3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/decode.h>
35 #include <linux/parser.h>
36 #include <linux/bsearch.h>
38 #include <linux/kernel.h>
39 #include <linux/device.h>
40 #include <linux/module.h>
41 #include <linux/blk-mq.h>
43 #include <linux/blkdev.h>
44 #include <linux/slab.h>
45 #include <linux/idr.h>
46 #include <linux/workqueue.h>
48 #include "rbd_types.h"
50 #define RBD_DEBUG /* Activate rbd_assert() calls */
53 * Increment the given counter and return its updated value.
54 * If the counter is already 0 it will not be incremented.
55 * If the counter is already at its maximum value returns
56 * -EINVAL without updating it.
58 static int atomic_inc_return_safe(atomic_t *v)
62 counter = (unsigned int)__atomic_add_unless(v, 1, 0);
63 if (counter <= (unsigned int)INT_MAX)
71 /* Decrement the counter. Return the resulting value, or -EINVAL */
72 static int atomic_dec_return_safe(atomic_t *v)
76 counter = atomic_dec_return(v);
85 #define RBD_DRV_NAME "rbd"
87 #define RBD_MINORS_PER_MAJOR 256
88 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
90 #define RBD_MAX_PARENT_CHAIN_LEN 16
92 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
93 #define RBD_MAX_SNAP_NAME_LEN \
94 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
96 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
98 #define RBD_SNAP_HEAD_NAME "-"
100 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
102 /* This allows a single page to hold an image name sent by OSD */
103 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
104 #define RBD_IMAGE_ID_LEN_MAX 64
106 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_FEATURE_LAYERING (1<<0)
111 #define RBD_FEATURE_STRIPINGV2 (1<<1)
112 #define RBD_FEATURES_ALL \
113 (RBD_FEATURE_LAYERING | RBD_FEATURE_STRIPINGV2)
115 /* Features supported by this (client software) implementation. */
117 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
120 * An RBD device name will be "rbd#", where the "rbd" comes from
121 * RBD_DRV_NAME above, and # is a unique integer identifier.
122 * MAX_INT_FORMAT_WIDTH is used in ensuring DEV_NAME_LEN is big
123 * enough to hold all possible device names.
125 #define DEV_NAME_LEN 32
126 #define MAX_INT_FORMAT_WIDTH ((5 * sizeof (int)) / 2 + 1)
129 * block device image metadata (in-memory version)
131 struct rbd_image_header {
132 /* These six fields never change for a given rbd image */
139 u64 features; /* Might be changeable someday? */
141 /* The remaining fields need to be updated occasionally */
143 struct ceph_snap_context *snapc;
144 char *snap_names; /* format 1 only */
145 u64 *snap_sizes; /* format 1 only */
149 * An rbd image specification.
151 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
152 * identify an image. Each rbd_dev structure includes a pointer to
153 * an rbd_spec structure that encapsulates this identity.
155 * Each of the id's in an rbd_spec has an associated name. For a
156 * user-mapped image, the names are supplied and the id's associated
157 * with them are looked up. For a layered image, a parent image is
158 * defined by the tuple, and the names are looked up.
160 * An rbd_dev structure contains a parent_spec pointer which is
161 * non-null if the image it represents is a child in a layered
162 * image. This pointer will refer to the rbd_spec structure used
163 * by the parent rbd_dev for its own identity (i.e., the structure
164 * is shared between the parent and child).
166 * Since these structures are populated once, during the discovery
167 * phase of image construction, they are effectively immutable so
168 * we make no effort to synchronize access to them.
170 * Note that code herein does not assume the image name is known (it
171 * could be a null pointer).
175 const char *pool_name;
177 const char *image_id;
178 const char *image_name;
181 const char *snap_name;
187 * an instance of the client. multiple devices may share an rbd client.
190 struct ceph_client *client;
192 struct list_head node;
195 struct rbd_img_request;
196 typedef void (*rbd_img_callback_t)(struct rbd_img_request *);
198 #define BAD_WHICH U32_MAX /* Good which or bad which, which? */
200 struct rbd_obj_request;
201 typedef void (*rbd_obj_callback_t)(struct rbd_obj_request *);
203 enum obj_request_type {
204 OBJ_REQUEST_NODATA, OBJ_REQUEST_BIO, OBJ_REQUEST_PAGES
207 enum obj_operation_type {
214 OBJ_REQ_DONE, /* completion flag: not done = 0, done = 1 */
215 OBJ_REQ_IMG_DATA, /* object usage: standalone = 0, image = 1 */
216 OBJ_REQ_KNOWN, /* EXISTS flag valid: no = 0, yes = 1 */
217 OBJ_REQ_EXISTS, /* target exists: no = 0, yes = 1 */
220 struct rbd_obj_request {
221 const char *object_name;
222 u64 offset; /* object start byte */
223 u64 length; /* bytes from offset */
227 * An object request associated with an image will have its
228 * img_data flag set; a standalone object request will not.
230 * A standalone object request will have which == BAD_WHICH
231 * and a null obj_request pointer.
233 * An object request initiated in support of a layered image
234 * object (to check for its existence before a write) will
235 * have which == BAD_WHICH and a non-null obj_request pointer.
237 * Finally, an object request for rbd image data will have
238 * which != BAD_WHICH, and will have a non-null img_request
239 * pointer. The value of which will be in the range
240 * 0..(img_request->obj_request_count-1).
243 struct rbd_obj_request *obj_request; /* STAT op */
245 struct rbd_img_request *img_request;
247 /* links for img_request->obj_requests list */
248 struct list_head links;
251 u32 which; /* posn image request list */
253 enum obj_request_type type;
255 struct bio *bio_list;
261 struct page **copyup_pages;
262 u32 copyup_page_count;
264 struct ceph_osd_request *osd_req;
266 u64 xferred; /* bytes transferred */
269 rbd_obj_callback_t callback;
270 struct completion completion;
276 IMG_REQ_WRITE, /* I/O direction: read = 0, write = 1 */
277 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
278 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
279 IMG_REQ_DISCARD, /* discard: normal = 0, discard request = 1 */
282 struct rbd_img_request {
283 struct rbd_device *rbd_dev;
284 u64 offset; /* starting image byte offset */
285 u64 length; /* byte count from offset */
288 u64 snap_id; /* for reads */
289 struct ceph_snap_context *snapc; /* for writes */
292 struct request *rq; /* block request */
293 struct rbd_obj_request *obj_request; /* obj req initiator */
295 struct page **copyup_pages;
296 u32 copyup_page_count;
297 spinlock_t completion_lock;/* protects next_completion */
299 rbd_img_callback_t callback;
300 u64 xferred;/* aggregate bytes transferred */
301 int result; /* first nonzero obj_request result */
303 u32 obj_request_count;
304 struct list_head obj_requests; /* rbd_obj_request structs */
309 #define for_each_obj_request(ireq, oreq) \
310 list_for_each_entry(oreq, &(ireq)->obj_requests, links)
311 #define for_each_obj_request_from(ireq, oreq) \
312 list_for_each_entry_from(oreq, &(ireq)->obj_requests, links)
313 #define for_each_obj_request_safe(ireq, oreq, n) \
314 list_for_each_entry_safe_reverse(oreq, n, &(ireq)->obj_requests, links)
326 int dev_id; /* blkdev unique id */
328 int major; /* blkdev assigned major */
330 struct gendisk *disk; /* blkdev's gendisk and rq */
332 u32 image_format; /* Either 1 or 2 */
333 struct rbd_client *rbd_client;
335 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
337 spinlock_t lock; /* queue, flags, open_count */
339 struct rbd_image_header header;
340 unsigned long flags; /* possibly lock protected */
341 struct rbd_spec *spec;
342 struct rbd_options *opts;
346 struct ceph_file_layout layout;
348 struct ceph_osd_event *watch_event;
349 struct rbd_obj_request *watch_request;
351 struct rbd_spec *parent_spec;
354 struct rbd_device *parent;
356 /* Block layer tags. */
357 struct blk_mq_tag_set tag_set;
359 /* protects updating the header */
360 struct rw_semaphore header_rwsem;
362 struct rbd_mapping mapping;
364 struct list_head node;
368 unsigned long open_count; /* protected by lock */
372 * Flag bits for rbd_dev->flags. If atomicity is required,
373 * rbd_dev->lock is used to protect access.
375 * Currently, only the "removing" flag (which is coupled with the
376 * "open_count" field) requires atomic access.
379 RBD_DEV_FLAG_EXISTS, /* mapped snapshot has not been deleted */
380 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
383 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
385 static LIST_HEAD(rbd_dev_list); /* devices */
386 static DEFINE_SPINLOCK(rbd_dev_list_lock);
388 static LIST_HEAD(rbd_client_list); /* clients */
389 static DEFINE_SPINLOCK(rbd_client_list_lock);
391 /* Slab caches for frequently-allocated structures */
393 static struct kmem_cache *rbd_img_request_cache;
394 static struct kmem_cache *rbd_obj_request_cache;
395 static struct kmem_cache *rbd_segment_name_cache;
397 static int rbd_major;
398 static DEFINE_IDA(rbd_dev_id_ida);
400 static struct workqueue_struct *rbd_wq;
403 * Default to false for now, as single-major requires >= 0.75 version of
404 * userspace rbd utility.
406 static bool single_major = false;
407 module_param(single_major, bool, S_IRUGO);
408 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: false)");
410 static int rbd_img_request_submit(struct rbd_img_request *img_request);
412 static ssize_t rbd_add(struct bus_type *bus, const char *buf,
414 static ssize_t rbd_remove(struct bus_type *bus, const char *buf,
416 static ssize_t rbd_add_single_major(struct bus_type *bus, const char *buf,
418 static ssize_t rbd_remove_single_major(struct bus_type *bus, const char *buf,
420 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
421 static void rbd_spec_put(struct rbd_spec *spec);
423 static int rbd_dev_id_to_minor(int dev_id)
425 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
428 static int minor_to_rbd_dev_id(int minor)
430 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
433 static BUS_ATTR(add, S_IWUSR, NULL, rbd_add);
434 static BUS_ATTR(remove, S_IWUSR, NULL, rbd_remove);
435 static BUS_ATTR(add_single_major, S_IWUSR, NULL, rbd_add_single_major);
436 static BUS_ATTR(remove_single_major, S_IWUSR, NULL, rbd_remove_single_major);
438 static struct attribute *rbd_bus_attrs[] = {
440 &bus_attr_remove.attr,
441 &bus_attr_add_single_major.attr,
442 &bus_attr_remove_single_major.attr,
446 static umode_t rbd_bus_is_visible(struct kobject *kobj,
447 struct attribute *attr, int index)
450 (attr == &bus_attr_add_single_major.attr ||
451 attr == &bus_attr_remove_single_major.attr))
457 static const struct attribute_group rbd_bus_group = {
458 .attrs = rbd_bus_attrs,
459 .is_visible = rbd_bus_is_visible,
461 __ATTRIBUTE_GROUPS(rbd_bus);
463 static struct bus_type rbd_bus_type = {
465 .bus_groups = rbd_bus_groups,
468 static void rbd_root_dev_release(struct device *dev)
472 static struct device rbd_root_dev = {
474 .release = rbd_root_dev_release,
477 static __printf(2, 3)
478 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
480 struct va_format vaf;
488 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
489 else if (rbd_dev->disk)
490 printk(KERN_WARNING "%s: %s: %pV\n",
491 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
492 else if (rbd_dev->spec && rbd_dev->spec->image_name)
493 printk(KERN_WARNING "%s: image %s: %pV\n",
494 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
495 else if (rbd_dev->spec && rbd_dev->spec->image_id)
496 printk(KERN_WARNING "%s: id %s: %pV\n",
497 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
499 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
500 RBD_DRV_NAME, rbd_dev, &vaf);
505 #define rbd_assert(expr) \
506 if (unlikely(!(expr))) { \
507 printk(KERN_ERR "\nAssertion failure in %s() " \
509 "\trbd_assert(%s);\n\n", \
510 __func__, __LINE__, #expr); \
513 #else /* !RBD_DEBUG */
514 # define rbd_assert(expr) ((void) 0)
515 #endif /* !RBD_DEBUG */
517 static void rbd_osd_copyup_callback(struct rbd_obj_request *obj_request);
518 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request);
519 static void rbd_img_parent_read(struct rbd_obj_request *obj_request);
520 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
522 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
523 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev);
524 static int rbd_dev_header_info(struct rbd_device *rbd_dev);
525 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev);
526 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
528 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
529 u8 *order, u64 *snap_size);
530 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
532 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name);
534 static int rbd_open(struct block_device *bdev, fmode_t mode)
536 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
537 bool removing = false;
539 if ((mode & FMODE_WRITE) && rbd_dev->mapping.read_only)
542 spin_lock_irq(&rbd_dev->lock);
543 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
546 rbd_dev->open_count++;
547 spin_unlock_irq(&rbd_dev->lock);
551 (void) get_device(&rbd_dev->dev);
556 static void rbd_release(struct gendisk *disk, fmode_t mode)
558 struct rbd_device *rbd_dev = disk->private_data;
559 unsigned long open_count_before;
561 spin_lock_irq(&rbd_dev->lock);
562 open_count_before = rbd_dev->open_count--;
563 spin_unlock_irq(&rbd_dev->lock);
564 rbd_assert(open_count_before > 0);
566 put_device(&rbd_dev->dev);
569 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
574 bool ro_changed = false;
576 /* get_user() may sleep, so call it before taking rbd_dev->lock */
577 if (get_user(val, (int __user *)(arg)))
580 ro = val ? true : false;
581 /* Snapshot doesn't allow to write*/
582 if (rbd_dev->spec->snap_id != CEPH_NOSNAP && !ro)
585 spin_lock_irq(&rbd_dev->lock);
586 /* prevent others open this device */
587 if (rbd_dev->open_count > 1) {
592 if (rbd_dev->mapping.read_only != ro) {
593 rbd_dev->mapping.read_only = ro;
598 spin_unlock_irq(&rbd_dev->lock);
599 /* set_disk_ro() may sleep, so call it after releasing rbd_dev->lock */
600 if (ret == 0 && ro_changed)
601 set_disk_ro(rbd_dev->disk, ro ? 1 : 0);
606 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
607 unsigned int cmd, unsigned long arg)
609 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
614 ret = rbd_ioctl_set_ro(rbd_dev, arg);
624 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
625 unsigned int cmd, unsigned long arg)
627 return rbd_ioctl(bdev, mode, cmd, arg);
629 #endif /* CONFIG_COMPAT */
631 static const struct block_device_operations rbd_bd_ops = {
632 .owner = THIS_MODULE,
634 .release = rbd_release,
637 .compat_ioctl = rbd_compat_ioctl,
642 * Initialize an rbd client instance. Success or not, this function
643 * consumes ceph_opts. Caller holds client_mutex.
645 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
647 struct rbd_client *rbdc;
650 dout("%s:\n", __func__);
651 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
655 kref_init(&rbdc->kref);
656 INIT_LIST_HEAD(&rbdc->node);
658 rbdc->client = ceph_create_client(ceph_opts, rbdc, 0, 0);
659 if (IS_ERR(rbdc->client))
661 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
663 ret = ceph_open_session(rbdc->client);
667 spin_lock(&rbd_client_list_lock);
668 list_add_tail(&rbdc->node, &rbd_client_list);
669 spin_unlock(&rbd_client_list_lock);
671 dout("%s: rbdc %p\n", __func__, rbdc);
675 ceph_destroy_client(rbdc->client);
680 ceph_destroy_options(ceph_opts);
681 dout("%s: error %d\n", __func__, ret);
686 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
688 kref_get(&rbdc->kref);
694 * Find a ceph client with specific addr and configuration. If
695 * found, bump its reference count.
697 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
699 struct rbd_client *client_node;
702 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
705 spin_lock(&rbd_client_list_lock);
706 list_for_each_entry(client_node, &rbd_client_list, node) {
707 if (!ceph_compare_options(ceph_opts, client_node->client)) {
708 __rbd_get_client(client_node);
714 spin_unlock(&rbd_client_list_lock);
716 return found ? client_node : NULL;
720 * (Per device) rbd map options
727 /* string args above */
733 static match_table_t rbd_opts_tokens = {
734 {Opt_queue_depth, "queue_depth=%d"},
736 /* string args above */
737 {Opt_read_only, "read_only"},
738 {Opt_read_only, "ro"}, /* Alternate spelling */
739 {Opt_read_write, "read_write"},
740 {Opt_read_write, "rw"}, /* Alternate spelling */
749 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
750 #define RBD_READ_ONLY_DEFAULT false
752 static int parse_rbd_opts_token(char *c, void *private)
754 struct rbd_options *rbd_opts = private;
755 substring_t argstr[MAX_OPT_ARGS];
756 int token, intval, ret;
758 token = match_token(c, rbd_opts_tokens, argstr);
759 if (token < Opt_last_int) {
760 ret = match_int(&argstr[0], &intval);
762 pr_err("bad mount option arg (not int) at '%s'\n", c);
765 dout("got int token %d val %d\n", token, intval);
766 } else if (token > Opt_last_int && token < Opt_last_string) {
767 dout("got string token %d val %s\n", token, argstr[0].from);
769 dout("got token %d\n", token);
773 case Opt_queue_depth:
775 pr_err("queue_depth out of range\n");
778 rbd_opts->queue_depth = intval;
781 rbd_opts->read_only = true;
784 rbd_opts->read_only = false;
787 /* libceph prints "bad option" msg */
794 static char* obj_op_name(enum obj_operation_type op_type)
809 * Get a ceph client with specific addr and configuration, if one does
810 * not exist create it. Either way, ceph_opts is consumed by this
813 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
815 struct rbd_client *rbdc;
817 mutex_lock_nested(&client_mutex, SINGLE_DEPTH_NESTING);
818 rbdc = rbd_client_find(ceph_opts);
819 if (rbdc) /* using an existing client */
820 ceph_destroy_options(ceph_opts);
822 rbdc = rbd_client_create(ceph_opts);
823 mutex_unlock(&client_mutex);
829 * Destroy ceph client
831 * Caller must hold rbd_client_list_lock.
833 static void rbd_client_release(struct kref *kref)
835 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
837 dout("%s: rbdc %p\n", __func__, rbdc);
838 spin_lock(&rbd_client_list_lock);
839 list_del(&rbdc->node);
840 spin_unlock(&rbd_client_list_lock);
842 ceph_destroy_client(rbdc->client);
847 * Drop reference to ceph client node. If it's not referenced anymore, release
850 static void rbd_put_client(struct rbd_client *rbdc)
853 kref_put(&rbdc->kref, rbd_client_release);
856 static bool rbd_image_format_valid(u32 image_format)
858 return image_format == 1 || image_format == 2;
861 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
866 /* The header has to start with the magic rbd header text */
867 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
870 /* The bio layer requires at least sector-sized I/O */
872 if (ondisk->options.order < SECTOR_SHIFT)
875 /* If we use u64 in a few spots we may be able to loosen this */
877 if (ondisk->options.order > 8 * sizeof (int) - 1)
881 * The size of a snapshot header has to fit in a size_t, and
882 * that limits the number of snapshots.
884 snap_count = le32_to_cpu(ondisk->snap_count);
885 size = SIZE_MAX - sizeof (struct ceph_snap_context);
886 if (snap_count > size / sizeof (__le64))
890 * Not only that, but the size of the entire the snapshot
891 * header must also be representable in a size_t.
893 size -= snap_count * sizeof (__le64);
894 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
901 * Fill an rbd image header with information from the given format 1
904 static int rbd_header_from_disk(struct rbd_device *rbd_dev,
905 struct rbd_image_header_ondisk *ondisk)
907 struct rbd_image_header *header = &rbd_dev->header;
908 bool first_time = header->object_prefix == NULL;
909 struct ceph_snap_context *snapc;
910 char *object_prefix = NULL;
911 char *snap_names = NULL;
912 u64 *snap_sizes = NULL;
918 /* Allocate this now to avoid having to handle failure below */
923 len = strnlen(ondisk->object_prefix,
924 sizeof (ondisk->object_prefix));
925 object_prefix = kmalloc(len + 1, GFP_KERNEL);
928 memcpy(object_prefix, ondisk->object_prefix, len);
929 object_prefix[len] = '\0';
932 /* Allocate the snapshot context and fill it in */
934 snap_count = le32_to_cpu(ondisk->snap_count);
935 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
938 snapc->seq = le64_to_cpu(ondisk->snap_seq);
940 struct rbd_image_snap_ondisk *snaps;
941 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
943 /* We'll keep a copy of the snapshot names... */
945 if (snap_names_len > (u64)SIZE_MAX)
947 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
951 /* ...as well as the array of their sizes. */
953 size = snap_count * sizeof (*header->snap_sizes);
954 snap_sizes = kmalloc(size, GFP_KERNEL);
959 * Copy the names, and fill in each snapshot's id
962 * Note that rbd_dev_v1_header_info() guarantees the
963 * ondisk buffer we're working with has
964 * snap_names_len bytes beyond the end of the
965 * snapshot id array, this memcpy() is safe.
967 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
968 snaps = ondisk->snaps;
969 for (i = 0; i < snap_count; i++) {
970 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
971 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
975 /* We won't fail any more, fill in the header */
978 header->object_prefix = object_prefix;
979 header->obj_order = ondisk->options.order;
980 header->crypt_type = ondisk->options.crypt_type;
981 header->comp_type = ondisk->options.comp_type;
982 /* The rest aren't used for format 1 images */
983 header->stripe_unit = 0;
984 header->stripe_count = 0;
985 header->features = 0;
987 ceph_put_snap_context(header->snapc);
988 kfree(header->snap_names);
989 kfree(header->snap_sizes);
992 /* The remaining fields always get updated (when we refresh) */
994 header->image_size = le64_to_cpu(ondisk->image_size);
995 header->snapc = snapc;
996 header->snap_names = snap_names;
997 header->snap_sizes = snap_sizes;
1005 ceph_put_snap_context(snapc);
1006 kfree(object_prefix);
1011 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1013 const char *snap_name;
1015 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1017 /* Skip over names until we find the one we are looking for */
1019 snap_name = rbd_dev->header.snap_names;
1021 snap_name += strlen(snap_name) + 1;
1023 return kstrdup(snap_name, GFP_KERNEL);
1027 * Snapshot id comparison function for use with qsort()/bsearch().
1028 * Note that result is for snapshots in *descending* order.
1030 static int snapid_compare_reverse(const void *s1, const void *s2)
1032 u64 snap_id1 = *(u64 *)s1;
1033 u64 snap_id2 = *(u64 *)s2;
1035 if (snap_id1 < snap_id2)
1037 return snap_id1 == snap_id2 ? 0 : -1;
1041 * Search a snapshot context to see if the given snapshot id is
1044 * Returns the position of the snapshot id in the array if it's found,
1045 * or BAD_SNAP_INDEX otherwise.
1047 * Note: The snapshot array is in kept sorted (by the osd) in
1048 * reverse order, highest snapshot id first.
1050 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1052 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1055 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1056 sizeof (snap_id), snapid_compare_reverse);
1058 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1061 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1065 const char *snap_name;
1067 which = rbd_dev_snap_index(rbd_dev, snap_id);
1068 if (which == BAD_SNAP_INDEX)
1069 return ERR_PTR(-ENOENT);
1071 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1072 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1075 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1077 if (snap_id == CEPH_NOSNAP)
1078 return RBD_SNAP_HEAD_NAME;
1080 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1081 if (rbd_dev->image_format == 1)
1082 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1084 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1087 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1090 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1091 if (snap_id == CEPH_NOSNAP) {
1092 *snap_size = rbd_dev->header.image_size;
1093 } else if (rbd_dev->image_format == 1) {
1096 which = rbd_dev_snap_index(rbd_dev, snap_id);
1097 if (which == BAD_SNAP_INDEX)
1100 *snap_size = rbd_dev->header.snap_sizes[which];
1105 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1114 static int rbd_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
1117 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1118 if (snap_id == CEPH_NOSNAP) {
1119 *snap_features = rbd_dev->header.features;
1120 } else if (rbd_dev->image_format == 1) {
1121 *snap_features = 0; /* No features for format 1 */
1126 ret = _rbd_dev_v2_snap_features(rbd_dev, snap_id, &features);
1130 *snap_features = features;
1135 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1137 u64 snap_id = rbd_dev->spec->snap_id;
1142 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1145 ret = rbd_snap_features(rbd_dev, snap_id, &features);
1149 rbd_dev->mapping.size = size;
1150 rbd_dev->mapping.features = features;
1155 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1157 rbd_dev->mapping.size = 0;
1158 rbd_dev->mapping.features = 0;
1161 static void rbd_segment_name_free(const char *name)
1163 /* The explicit cast here is needed to drop the const qualifier */
1165 kmem_cache_free(rbd_segment_name_cache, (void *)name);
1168 static const char *rbd_segment_name(struct rbd_device *rbd_dev, u64 offset)
1175 name = kmem_cache_alloc(rbd_segment_name_cache, GFP_NOIO);
1178 segment = offset >> rbd_dev->header.obj_order;
1179 name_format = "%s.%012llx";
1180 if (rbd_dev->image_format == 2)
1181 name_format = "%s.%016llx";
1182 ret = snprintf(name, CEPH_MAX_OID_NAME_LEN + 1, name_format,
1183 rbd_dev->header.object_prefix, segment);
1184 if (ret < 0 || ret > CEPH_MAX_OID_NAME_LEN) {
1185 pr_err("error formatting segment name for #%llu (%d)\n",
1187 rbd_segment_name_free(name);
1194 static u64 rbd_segment_offset(struct rbd_device *rbd_dev, u64 offset)
1196 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1198 return offset & (segment_size - 1);
1201 static u64 rbd_segment_length(struct rbd_device *rbd_dev,
1202 u64 offset, u64 length)
1204 u64 segment_size = (u64) 1 << rbd_dev->header.obj_order;
1206 offset &= segment_size - 1;
1208 rbd_assert(length <= U64_MAX - offset);
1209 if (offset + length > segment_size)
1210 length = segment_size - offset;
1216 * returns the size of an object in the image
1218 static u64 rbd_obj_bytes(struct rbd_image_header *header)
1220 return 1 << header->obj_order;
1227 static void bio_chain_put(struct bio *chain)
1233 chain = chain->bi_next;
1239 * zeros a bio chain, starting at specific offset
1241 static void zero_bio_chain(struct bio *chain, int start_ofs)
1244 struct bvec_iter iter;
1245 unsigned long flags;
1250 bio_for_each_segment(bv, chain, iter) {
1251 if (pos + bv.bv_len > start_ofs) {
1252 int remainder = max(start_ofs - pos, 0);
1253 buf = bvec_kmap_irq(&bv, &flags);
1254 memset(buf + remainder, 0,
1255 bv.bv_len - remainder);
1256 flush_dcache_page(bv.bv_page);
1257 bvec_kunmap_irq(buf, &flags);
1262 chain = chain->bi_next;
1267 * similar to zero_bio_chain(), zeros data defined by a page array,
1268 * starting at the given byte offset from the start of the array and
1269 * continuing up to the given end offset. The pages array is
1270 * assumed to be big enough to hold all bytes up to the end.
1272 static void zero_pages(struct page **pages, u64 offset, u64 end)
1274 struct page **page = &pages[offset >> PAGE_SHIFT];
1276 rbd_assert(end > offset);
1277 rbd_assert(end - offset <= (u64)SIZE_MAX);
1278 while (offset < end) {
1281 unsigned long flags;
1284 page_offset = offset & ~PAGE_MASK;
1285 length = min_t(size_t, PAGE_SIZE - page_offset, end - offset);
1286 local_irq_save(flags);
1287 kaddr = kmap_atomic(*page);
1288 memset(kaddr + page_offset, 0, length);
1289 flush_dcache_page(*page);
1290 kunmap_atomic(kaddr);
1291 local_irq_restore(flags);
1299 * Clone a portion of a bio, starting at the given byte offset
1300 * and continuing for the number of bytes indicated.
1302 static struct bio *bio_clone_range(struct bio *bio_src,
1303 unsigned int offset,
1309 bio = bio_clone(bio_src, gfpmask);
1311 return NULL; /* ENOMEM */
1313 bio_advance(bio, offset);
1314 bio->bi_iter.bi_size = len;
1320 * Clone a portion of a bio chain, starting at the given byte offset
1321 * into the first bio in the source chain and continuing for the
1322 * number of bytes indicated. The result is another bio chain of
1323 * exactly the given length, or a null pointer on error.
1325 * The bio_src and offset parameters are both in-out. On entry they
1326 * refer to the first source bio and the offset into that bio where
1327 * the start of data to be cloned is located.
1329 * On return, bio_src is updated to refer to the bio in the source
1330 * chain that contains first un-cloned byte, and *offset will
1331 * contain the offset of that byte within that bio.
1333 static struct bio *bio_chain_clone_range(struct bio **bio_src,
1334 unsigned int *offset,
1338 struct bio *bi = *bio_src;
1339 unsigned int off = *offset;
1340 struct bio *chain = NULL;
1343 /* Build up a chain of clone bios up to the limit */
1345 if (!bi || off >= bi->bi_iter.bi_size || !len)
1346 return NULL; /* Nothing to clone */
1350 unsigned int bi_size;
1354 rbd_warn(NULL, "bio_chain exhausted with %u left", len);
1355 goto out_err; /* EINVAL; ran out of bio's */
1357 bi_size = min_t(unsigned int, bi->bi_iter.bi_size - off, len);
1358 bio = bio_clone_range(bi, off, bi_size, gfpmask);
1360 goto out_err; /* ENOMEM */
1363 end = &bio->bi_next;
1366 if (off == bi->bi_iter.bi_size) {
1377 bio_chain_put(chain);
1383 * The default/initial value for all object request flags is 0. For
1384 * each flag, once its value is set to 1 it is never reset to 0
1387 static void obj_request_img_data_set(struct rbd_obj_request *obj_request)
1389 if (test_and_set_bit(OBJ_REQ_IMG_DATA, &obj_request->flags)) {
1390 struct rbd_device *rbd_dev;
1392 rbd_dev = obj_request->img_request->rbd_dev;
1393 rbd_warn(rbd_dev, "obj_request %p already marked img_data",
1398 static bool obj_request_img_data_test(struct rbd_obj_request *obj_request)
1401 return test_bit(OBJ_REQ_IMG_DATA, &obj_request->flags) != 0;
1404 static void obj_request_done_set(struct rbd_obj_request *obj_request)
1406 if (test_and_set_bit(OBJ_REQ_DONE, &obj_request->flags)) {
1407 struct rbd_device *rbd_dev = NULL;
1409 if (obj_request_img_data_test(obj_request))
1410 rbd_dev = obj_request->img_request->rbd_dev;
1411 rbd_warn(rbd_dev, "obj_request %p already marked done",
1416 static bool obj_request_done_test(struct rbd_obj_request *obj_request)
1419 return test_bit(OBJ_REQ_DONE, &obj_request->flags) != 0;
1423 * This sets the KNOWN flag after (possibly) setting the EXISTS
1424 * flag. The latter is set based on the "exists" value provided.
1426 * Note that for our purposes once an object exists it never goes
1427 * away again. It's possible that the response from two existence
1428 * checks are separated by the creation of the target object, and
1429 * the first ("doesn't exist") response arrives *after* the second
1430 * ("does exist"). In that case we ignore the second one.
1432 static void obj_request_existence_set(struct rbd_obj_request *obj_request,
1436 set_bit(OBJ_REQ_EXISTS, &obj_request->flags);
1437 set_bit(OBJ_REQ_KNOWN, &obj_request->flags);
1441 static bool obj_request_known_test(struct rbd_obj_request *obj_request)
1444 return test_bit(OBJ_REQ_KNOWN, &obj_request->flags) != 0;
1447 static bool obj_request_exists_test(struct rbd_obj_request *obj_request)
1450 return test_bit(OBJ_REQ_EXISTS, &obj_request->flags) != 0;
1453 static bool obj_request_overlaps_parent(struct rbd_obj_request *obj_request)
1455 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1457 return obj_request->img_offset <
1458 round_up(rbd_dev->parent_overlap, rbd_obj_bytes(&rbd_dev->header));
1461 static void rbd_obj_request_get(struct rbd_obj_request *obj_request)
1463 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1464 atomic_read(&obj_request->kref.refcount));
1465 kref_get(&obj_request->kref);
1468 static void rbd_obj_request_destroy(struct kref *kref);
1469 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1471 rbd_assert(obj_request != NULL);
1472 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1473 atomic_read(&obj_request->kref.refcount));
1474 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1477 static void rbd_img_request_get(struct rbd_img_request *img_request)
1479 dout("%s: img %p (was %d)\n", __func__, img_request,
1480 atomic_read(&img_request->kref.refcount));
1481 kref_get(&img_request->kref);
1484 static bool img_request_child_test(struct rbd_img_request *img_request);
1485 static void rbd_parent_request_destroy(struct kref *kref);
1486 static void rbd_img_request_destroy(struct kref *kref);
1487 static void rbd_img_request_put(struct rbd_img_request *img_request)
1489 rbd_assert(img_request != NULL);
1490 dout("%s: img %p (was %d)\n", __func__, img_request,
1491 atomic_read(&img_request->kref.refcount));
1492 if (img_request_child_test(img_request))
1493 kref_put(&img_request->kref, rbd_parent_request_destroy);
1495 kref_put(&img_request->kref, rbd_img_request_destroy);
1498 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1499 struct rbd_obj_request *obj_request)
1501 rbd_assert(obj_request->img_request == NULL);
1503 /* Image request now owns object's original reference */
1504 obj_request->img_request = img_request;
1505 obj_request->which = img_request->obj_request_count;
1506 rbd_assert(!obj_request_img_data_test(obj_request));
1507 obj_request_img_data_set(obj_request);
1508 rbd_assert(obj_request->which != BAD_WHICH);
1509 img_request->obj_request_count++;
1510 list_add_tail(&obj_request->links, &img_request->obj_requests);
1511 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1512 obj_request->which);
1515 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1516 struct rbd_obj_request *obj_request)
1518 rbd_assert(obj_request->which != BAD_WHICH);
1520 dout("%s: img %p obj %p w=%u\n", __func__, img_request, obj_request,
1521 obj_request->which);
1522 list_del(&obj_request->links);
1523 rbd_assert(img_request->obj_request_count > 0);
1524 img_request->obj_request_count--;
1525 rbd_assert(obj_request->which == img_request->obj_request_count);
1526 obj_request->which = BAD_WHICH;
1527 rbd_assert(obj_request_img_data_test(obj_request));
1528 rbd_assert(obj_request->img_request == img_request);
1529 obj_request->img_request = NULL;
1530 obj_request->callback = NULL;
1531 rbd_obj_request_put(obj_request);
1534 static bool obj_request_type_valid(enum obj_request_type type)
1537 case OBJ_REQUEST_NODATA:
1538 case OBJ_REQUEST_BIO:
1539 case OBJ_REQUEST_PAGES:
1546 static int rbd_obj_request_submit(struct ceph_osd_client *osdc,
1547 struct rbd_obj_request *obj_request)
1549 dout("%s %p\n", __func__, obj_request);
1550 return ceph_osdc_start_request(osdc, obj_request->osd_req, false);
1553 static void rbd_obj_request_end(struct rbd_obj_request *obj_request)
1555 dout("%s %p\n", __func__, obj_request);
1556 ceph_osdc_cancel_request(obj_request->osd_req);
1560 * Wait for an object request to complete. If interrupted, cancel the
1561 * underlying osd request.
1563 * @timeout: in jiffies, 0 means "wait forever"
1565 static int __rbd_obj_request_wait(struct rbd_obj_request *obj_request,
1566 unsigned long timeout)
1570 dout("%s %p\n", __func__, obj_request);
1571 ret = wait_for_completion_interruptible_timeout(
1572 &obj_request->completion,
1573 ceph_timeout_jiffies(timeout));
1577 rbd_obj_request_end(obj_request);
1582 dout("%s %p ret %d\n", __func__, obj_request, (int)ret);
1586 static int rbd_obj_request_wait(struct rbd_obj_request *obj_request)
1588 return __rbd_obj_request_wait(obj_request, 0);
1591 static int rbd_obj_request_wait_timeout(struct rbd_obj_request *obj_request,
1592 unsigned long timeout)
1594 return __rbd_obj_request_wait(obj_request, timeout);
1597 static void rbd_img_request_complete(struct rbd_img_request *img_request)
1600 dout("%s: img %p\n", __func__, img_request);
1603 * If no error occurred, compute the aggregate transfer
1604 * count for the image request. We could instead use
1605 * atomic64_cmpxchg() to update it as each object request
1606 * completes; not clear which way is better off hand.
1608 if (!img_request->result) {
1609 struct rbd_obj_request *obj_request;
1612 for_each_obj_request(img_request, obj_request)
1613 xferred += obj_request->xferred;
1614 img_request->xferred = xferred;
1617 if (img_request->callback)
1618 img_request->callback(img_request);
1620 rbd_img_request_put(img_request);
1624 * The default/initial value for all image request flags is 0. Each
1625 * is conditionally set to 1 at image request initialization time
1626 * and currently never change thereafter.
1628 static void img_request_write_set(struct rbd_img_request *img_request)
1630 set_bit(IMG_REQ_WRITE, &img_request->flags);
1634 static bool img_request_write_test(struct rbd_img_request *img_request)
1637 return test_bit(IMG_REQ_WRITE, &img_request->flags) != 0;
1641 * Set the discard flag when the img_request is an discard request
1643 static void img_request_discard_set(struct rbd_img_request *img_request)
1645 set_bit(IMG_REQ_DISCARD, &img_request->flags);
1649 static bool img_request_discard_test(struct rbd_img_request *img_request)
1652 return test_bit(IMG_REQ_DISCARD, &img_request->flags) != 0;
1655 static void img_request_child_set(struct rbd_img_request *img_request)
1657 set_bit(IMG_REQ_CHILD, &img_request->flags);
1661 static void img_request_child_clear(struct rbd_img_request *img_request)
1663 clear_bit(IMG_REQ_CHILD, &img_request->flags);
1667 static bool img_request_child_test(struct rbd_img_request *img_request)
1670 return test_bit(IMG_REQ_CHILD, &img_request->flags) != 0;
1673 static void img_request_layered_set(struct rbd_img_request *img_request)
1675 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1679 static void img_request_layered_clear(struct rbd_img_request *img_request)
1681 clear_bit(IMG_REQ_LAYERED, &img_request->flags);
1685 static bool img_request_layered_test(struct rbd_img_request *img_request)
1688 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1691 static enum obj_operation_type
1692 rbd_img_request_op_type(struct rbd_img_request *img_request)
1694 if (img_request_write_test(img_request))
1695 return OBJ_OP_WRITE;
1696 else if (img_request_discard_test(img_request))
1697 return OBJ_OP_DISCARD;
1703 rbd_img_obj_request_read_callback(struct rbd_obj_request *obj_request)
1705 u64 xferred = obj_request->xferred;
1706 u64 length = obj_request->length;
1708 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1709 obj_request, obj_request->img_request, obj_request->result,
1712 * ENOENT means a hole in the image. We zero-fill the entire
1713 * length of the request. A short read also implies zero-fill
1714 * to the end of the request. An error requires the whole
1715 * length of the request to be reported finished with an error
1716 * to the block layer. In each case we update the xferred
1717 * count to indicate the whole request was satisfied.
1719 rbd_assert(obj_request->type != OBJ_REQUEST_NODATA);
1720 if (obj_request->result == -ENOENT) {
1721 if (obj_request->type == OBJ_REQUEST_BIO)
1722 zero_bio_chain(obj_request->bio_list, 0);
1724 zero_pages(obj_request->pages, 0, length);
1725 obj_request->result = 0;
1726 } else if (xferred < length && !obj_request->result) {
1727 if (obj_request->type == OBJ_REQUEST_BIO)
1728 zero_bio_chain(obj_request->bio_list, xferred);
1730 zero_pages(obj_request->pages, xferred, length);
1732 obj_request->xferred = length;
1733 obj_request_done_set(obj_request);
1736 static void rbd_obj_request_complete(struct rbd_obj_request *obj_request)
1738 dout("%s: obj %p cb %p\n", __func__, obj_request,
1739 obj_request->callback);
1740 if (obj_request->callback)
1741 obj_request->callback(obj_request);
1743 complete_all(&obj_request->completion);
1746 static void rbd_osd_trivial_callback(struct rbd_obj_request *obj_request)
1748 dout("%s: obj %p\n", __func__, obj_request);
1749 obj_request_done_set(obj_request);
1752 static void rbd_osd_read_callback(struct rbd_obj_request *obj_request)
1754 struct rbd_img_request *img_request = NULL;
1755 struct rbd_device *rbd_dev = NULL;
1756 bool layered = false;
1758 if (obj_request_img_data_test(obj_request)) {
1759 img_request = obj_request->img_request;
1760 layered = img_request && img_request_layered_test(img_request);
1761 rbd_dev = img_request->rbd_dev;
1764 dout("%s: obj %p img %p result %d %llu/%llu\n", __func__,
1765 obj_request, img_request, obj_request->result,
1766 obj_request->xferred, obj_request->length);
1767 if (layered && obj_request->result == -ENOENT &&
1768 obj_request->img_offset < rbd_dev->parent_overlap)
1769 rbd_img_parent_read(obj_request);
1770 else if (img_request)
1771 rbd_img_obj_request_read_callback(obj_request);
1773 obj_request_done_set(obj_request);
1776 static void rbd_osd_write_callback(struct rbd_obj_request *obj_request)
1778 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1779 obj_request->result, obj_request->length);
1781 * There is no such thing as a successful short write. Set
1782 * it to our originally-requested length.
1784 obj_request->xferred = obj_request->length;
1785 obj_request_done_set(obj_request);
1788 static void rbd_osd_discard_callback(struct rbd_obj_request *obj_request)
1790 dout("%s: obj %p result %d %llu\n", __func__, obj_request,
1791 obj_request->result, obj_request->length);
1793 * There is no such thing as a successful short discard. Set
1794 * it to our originally-requested length.
1796 obj_request->xferred = obj_request->length;
1797 /* discarding a non-existent object is not a problem */
1798 if (obj_request->result == -ENOENT)
1799 obj_request->result = 0;
1800 obj_request_done_set(obj_request);
1804 * For a simple stat call there's nothing to do. We'll do more if
1805 * this is part of a write sequence for a layered image.
1807 static void rbd_osd_stat_callback(struct rbd_obj_request *obj_request)
1809 dout("%s: obj %p\n", __func__, obj_request);
1810 obj_request_done_set(obj_request);
1813 static void rbd_osd_call_callback(struct rbd_obj_request *obj_request)
1815 dout("%s: obj %p\n", __func__, obj_request);
1817 if (obj_request_img_data_test(obj_request))
1818 rbd_osd_copyup_callback(obj_request);
1820 obj_request_done_set(obj_request);
1823 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req,
1824 struct ceph_msg *msg)
1826 struct rbd_obj_request *obj_request = osd_req->r_priv;
1829 dout("%s: osd_req %p msg %p\n", __func__, osd_req, msg);
1830 rbd_assert(osd_req == obj_request->osd_req);
1831 if (obj_request_img_data_test(obj_request)) {
1832 rbd_assert(obj_request->img_request);
1833 rbd_assert(obj_request->which != BAD_WHICH);
1835 rbd_assert(obj_request->which == BAD_WHICH);
1838 if (osd_req->r_result < 0)
1839 obj_request->result = osd_req->r_result;
1841 rbd_assert(osd_req->r_num_ops <= CEPH_OSD_MAX_OP);
1844 * We support a 64-bit length, but ultimately it has to be
1845 * passed to the block layer, which just supports a 32-bit
1848 obj_request->xferred = osd_req->r_reply_op_len[0];
1849 rbd_assert(obj_request->xferred < (u64)UINT_MAX);
1851 opcode = osd_req->r_ops[0].op;
1853 case CEPH_OSD_OP_READ:
1854 rbd_osd_read_callback(obj_request);
1856 case CEPH_OSD_OP_SETALLOCHINT:
1857 rbd_assert(osd_req->r_ops[1].op == CEPH_OSD_OP_WRITE ||
1858 osd_req->r_ops[1].op == CEPH_OSD_OP_WRITEFULL);
1860 case CEPH_OSD_OP_WRITE:
1861 case CEPH_OSD_OP_WRITEFULL:
1862 rbd_osd_write_callback(obj_request);
1864 case CEPH_OSD_OP_STAT:
1865 rbd_osd_stat_callback(obj_request);
1867 case CEPH_OSD_OP_DELETE:
1868 case CEPH_OSD_OP_TRUNCATE:
1869 case CEPH_OSD_OP_ZERO:
1870 rbd_osd_discard_callback(obj_request);
1872 case CEPH_OSD_OP_CALL:
1873 rbd_osd_call_callback(obj_request);
1875 case CEPH_OSD_OP_NOTIFY_ACK:
1876 case CEPH_OSD_OP_WATCH:
1877 rbd_osd_trivial_callback(obj_request);
1880 rbd_warn(NULL, "%s: unsupported op %hu",
1881 obj_request->object_name, (unsigned short) opcode);
1885 if (obj_request_done_test(obj_request))
1886 rbd_obj_request_complete(obj_request);
1889 static void rbd_osd_req_format_read(struct rbd_obj_request *obj_request)
1891 struct rbd_img_request *img_request = obj_request->img_request;
1892 struct ceph_osd_request *osd_req = obj_request->osd_req;
1895 rbd_assert(osd_req != NULL);
1897 snap_id = img_request ? img_request->snap_id : CEPH_NOSNAP;
1898 ceph_osdc_build_request(osd_req, obj_request->offset,
1899 NULL, snap_id, NULL);
1902 static void rbd_osd_req_format_write(struct rbd_obj_request *obj_request)
1904 struct rbd_img_request *img_request = obj_request->img_request;
1905 struct ceph_osd_request *osd_req = obj_request->osd_req;
1906 struct ceph_snap_context *snapc;
1907 struct timespec mtime = CURRENT_TIME;
1909 rbd_assert(osd_req != NULL);
1911 snapc = img_request ? img_request->snapc : NULL;
1912 ceph_osdc_build_request(osd_req, obj_request->offset,
1913 snapc, CEPH_NOSNAP, &mtime);
1917 * Create an osd request. A read request has one osd op (read).
1918 * A write request has either one (watch) or two (hint+write) osd ops.
1919 * (All rbd data writes are prefixed with an allocation hint op, but
1920 * technically osd watch is a write request, hence this distinction.)
1922 static struct ceph_osd_request *rbd_osd_req_create(
1923 struct rbd_device *rbd_dev,
1924 enum obj_operation_type op_type,
1925 unsigned int num_ops,
1926 struct rbd_obj_request *obj_request)
1928 struct ceph_snap_context *snapc = NULL;
1929 struct ceph_osd_client *osdc;
1930 struct ceph_osd_request *osd_req;
1932 if (obj_request_img_data_test(obj_request) &&
1933 (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_WRITE)) {
1934 struct rbd_img_request *img_request = obj_request->img_request;
1935 if (op_type == OBJ_OP_WRITE) {
1936 rbd_assert(img_request_write_test(img_request));
1938 rbd_assert(img_request_discard_test(img_request));
1940 snapc = img_request->snapc;
1943 rbd_assert(num_ops == 1 || ((op_type == OBJ_OP_WRITE) && num_ops == 2));
1945 /* Allocate and initialize the request, for the num_ops ops */
1947 osdc = &rbd_dev->rbd_client->client->osdc;
1948 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false,
1951 return NULL; /* ENOMEM */
1953 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
1954 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
1956 osd_req->r_flags = CEPH_OSD_FLAG_READ;
1958 osd_req->r_callback = rbd_osd_req_callback;
1959 osd_req->r_priv = obj_request;
1961 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
1962 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
1968 * Create a copyup osd request based on the information in the object
1969 * request supplied. A copyup request has two or three osd ops, a
1970 * copyup method call, potentially a hint op, and a write or truncate
1973 static struct ceph_osd_request *
1974 rbd_osd_req_create_copyup(struct rbd_obj_request *obj_request)
1976 struct rbd_img_request *img_request;
1977 struct ceph_snap_context *snapc;
1978 struct rbd_device *rbd_dev;
1979 struct ceph_osd_client *osdc;
1980 struct ceph_osd_request *osd_req;
1981 int num_osd_ops = 3;
1983 rbd_assert(obj_request_img_data_test(obj_request));
1984 img_request = obj_request->img_request;
1985 rbd_assert(img_request);
1986 rbd_assert(img_request_write_test(img_request) ||
1987 img_request_discard_test(img_request));
1989 if (img_request_discard_test(img_request))
1992 /* Allocate and initialize the request, for all the ops */
1994 snapc = img_request->snapc;
1995 rbd_dev = img_request->rbd_dev;
1996 osdc = &rbd_dev->rbd_client->client->osdc;
1997 osd_req = ceph_osdc_alloc_request(osdc, snapc, num_osd_ops,
2000 return NULL; /* ENOMEM */
2002 osd_req->r_flags = CEPH_OSD_FLAG_WRITE | CEPH_OSD_FLAG_ONDISK;
2003 osd_req->r_callback = rbd_osd_req_callback;
2004 osd_req->r_priv = obj_request;
2006 osd_req->r_base_oloc.pool = ceph_file_layout_pg_pool(rbd_dev->layout);
2007 ceph_oid_set_name(&osd_req->r_base_oid, obj_request->object_name);
2013 static void rbd_osd_req_destroy(struct ceph_osd_request *osd_req)
2015 ceph_osdc_put_request(osd_req);
2018 /* object_name is assumed to be a non-null pointer and NUL-terminated */
2020 static struct rbd_obj_request *rbd_obj_request_create(const char *object_name,
2021 u64 offset, u64 length,
2022 enum obj_request_type type)
2024 struct rbd_obj_request *obj_request;
2028 rbd_assert(obj_request_type_valid(type));
2030 size = strlen(object_name) + 1;
2031 name = kmalloc(size, GFP_NOIO);
2035 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
2041 obj_request->object_name = memcpy(name, object_name, size);
2042 obj_request->offset = offset;
2043 obj_request->length = length;
2044 obj_request->flags = 0;
2045 obj_request->which = BAD_WHICH;
2046 obj_request->type = type;
2047 INIT_LIST_HEAD(&obj_request->links);
2048 init_completion(&obj_request->completion);
2049 kref_init(&obj_request->kref);
2051 dout("%s: \"%s\" %llu/%llu %d -> obj %p\n", __func__, object_name,
2052 offset, length, (int)type, obj_request);
2057 static void rbd_obj_request_destroy(struct kref *kref)
2059 struct rbd_obj_request *obj_request;
2061 obj_request = container_of(kref, struct rbd_obj_request, kref);
2063 dout("%s: obj %p\n", __func__, obj_request);
2065 rbd_assert(obj_request->img_request == NULL);
2066 rbd_assert(obj_request->which == BAD_WHICH);
2068 if (obj_request->osd_req)
2069 rbd_osd_req_destroy(obj_request->osd_req);
2071 rbd_assert(obj_request_type_valid(obj_request->type));
2072 switch (obj_request->type) {
2073 case OBJ_REQUEST_NODATA:
2074 break; /* Nothing to do */
2075 case OBJ_REQUEST_BIO:
2076 if (obj_request->bio_list)
2077 bio_chain_put(obj_request->bio_list);
2079 case OBJ_REQUEST_PAGES:
2080 if (obj_request->pages)
2081 ceph_release_page_vector(obj_request->pages,
2082 obj_request->page_count);
2086 kfree(obj_request->object_name);
2087 obj_request->object_name = NULL;
2088 kmem_cache_free(rbd_obj_request_cache, obj_request);
2091 /* It's OK to call this for a device with no parent */
2093 static void rbd_spec_put(struct rbd_spec *spec);
2094 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
2096 rbd_dev_remove_parent(rbd_dev);
2097 rbd_spec_put(rbd_dev->parent_spec);
2098 rbd_dev->parent_spec = NULL;
2099 rbd_dev->parent_overlap = 0;
2103 * Parent image reference counting is used to determine when an
2104 * image's parent fields can be safely torn down--after there are no
2105 * more in-flight requests to the parent image. When the last
2106 * reference is dropped, cleaning them up is safe.
2108 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
2112 if (!rbd_dev->parent_spec)
2115 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
2119 /* Last reference; clean up parent data structures */
2122 rbd_dev_unparent(rbd_dev);
2124 rbd_warn(rbd_dev, "parent reference underflow");
2128 * If an image has a non-zero parent overlap, get a reference to its
2131 * Returns true if the rbd device has a parent with a non-zero
2132 * overlap and a reference for it was successfully taken, or
2135 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
2139 if (!rbd_dev->parent_spec)
2142 down_read(&rbd_dev->header_rwsem);
2143 if (rbd_dev->parent_overlap)
2144 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
2145 up_read(&rbd_dev->header_rwsem);
2148 rbd_warn(rbd_dev, "parent reference overflow");
2154 * Caller is responsible for filling in the list of object requests
2155 * that comprises the image request, and the Linux request pointer
2156 * (if there is one).
2158 static struct rbd_img_request *rbd_img_request_create(
2159 struct rbd_device *rbd_dev,
2160 u64 offset, u64 length,
2161 enum obj_operation_type op_type,
2162 struct ceph_snap_context *snapc)
2164 struct rbd_img_request *img_request;
2166 img_request = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2170 img_request->rq = NULL;
2171 img_request->rbd_dev = rbd_dev;
2172 img_request->offset = offset;
2173 img_request->length = length;
2174 img_request->flags = 0;
2175 if (op_type == OBJ_OP_DISCARD) {
2176 img_request_discard_set(img_request);
2177 img_request->snapc = snapc;
2178 } else if (op_type == OBJ_OP_WRITE) {
2179 img_request_write_set(img_request);
2180 img_request->snapc = snapc;
2182 img_request->snap_id = rbd_dev->spec->snap_id;
2184 if (rbd_dev_parent_get(rbd_dev))
2185 img_request_layered_set(img_request);
2186 spin_lock_init(&img_request->completion_lock);
2187 img_request->next_completion = 0;
2188 img_request->callback = NULL;
2189 img_request->result = 0;
2190 img_request->obj_request_count = 0;
2191 INIT_LIST_HEAD(&img_request->obj_requests);
2192 kref_init(&img_request->kref);
2194 dout("%s: rbd_dev %p %s %llu/%llu -> img %p\n", __func__, rbd_dev,
2195 obj_op_name(op_type), offset, length, img_request);
2200 static void rbd_img_request_destroy(struct kref *kref)
2202 struct rbd_img_request *img_request;
2203 struct rbd_obj_request *obj_request;
2204 struct rbd_obj_request *next_obj_request;
2206 img_request = container_of(kref, struct rbd_img_request, kref);
2208 dout("%s: img %p\n", __func__, img_request);
2210 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2211 rbd_img_obj_request_del(img_request, obj_request);
2212 rbd_assert(img_request->obj_request_count == 0);
2214 if (img_request_layered_test(img_request)) {
2215 img_request_layered_clear(img_request);
2216 rbd_dev_parent_put(img_request->rbd_dev);
2219 if (img_request_write_test(img_request) ||
2220 img_request_discard_test(img_request))
2221 ceph_put_snap_context(img_request->snapc);
2223 kmem_cache_free(rbd_img_request_cache, img_request);
2226 static struct rbd_img_request *rbd_parent_request_create(
2227 struct rbd_obj_request *obj_request,
2228 u64 img_offset, u64 length)
2230 struct rbd_img_request *parent_request;
2231 struct rbd_device *rbd_dev;
2233 rbd_assert(obj_request->img_request);
2234 rbd_dev = obj_request->img_request->rbd_dev;
2236 parent_request = rbd_img_request_create(rbd_dev->parent, img_offset,
2237 length, OBJ_OP_READ, NULL);
2238 if (!parent_request)
2241 img_request_child_set(parent_request);
2242 rbd_obj_request_get(obj_request);
2243 parent_request->obj_request = obj_request;
2245 return parent_request;
2248 static void rbd_parent_request_destroy(struct kref *kref)
2250 struct rbd_img_request *parent_request;
2251 struct rbd_obj_request *orig_request;
2253 parent_request = container_of(kref, struct rbd_img_request, kref);
2254 orig_request = parent_request->obj_request;
2256 parent_request->obj_request = NULL;
2257 rbd_obj_request_put(orig_request);
2258 img_request_child_clear(parent_request);
2260 rbd_img_request_destroy(kref);
2263 static bool rbd_img_obj_end_request(struct rbd_obj_request *obj_request)
2265 struct rbd_img_request *img_request;
2266 unsigned int xferred;
2270 rbd_assert(obj_request_img_data_test(obj_request));
2271 img_request = obj_request->img_request;
2273 rbd_assert(obj_request->xferred <= (u64)UINT_MAX);
2274 xferred = (unsigned int)obj_request->xferred;
2275 result = obj_request->result;
2277 struct rbd_device *rbd_dev = img_request->rbd_dev;
2278 enum obj_operation_type op_type;
2280 if (img_request_discard_test(img_request))
2281 op_type = OBJ_OP_DISCARD;
2282 else if (img_request_write_test(img_request))
2283 op_type = OBJ_OP_WRITE;
2285 op_type = OBJ_OP_READ;
2287 rbd_warn(rbd_dev, "%s %llx at %llx (%llx)",
2288 obj_op_name(op_type), obj_request->length,
2289 obj_request->img_offset, obj_request->offset);
2290 rbd_warn(rbd_dev, " result %d xferred %x",
2292 if (!img_request->result)
2293 img_request->result = result;
2295 * Need to end I/O on the entire obj_request worth of
2296 * bytes in case of error.
2298 xferred = obj_request->length;
2301 /* Image object requests don't own their page array */
2303 if (obj_request->type == OBJ_REQUEST_PAGES) {
2304 obj_request->pages = NULL;
2305 obj_request->page_count = 0;
2308 if (img_request_child_test(img_request)) {
2309 rbd_assert(img_request->obj_request != NULL);
2310 more = obj_request->which < img_request->obj_request_count - 1;
2312 rbd_assert(img_request->rq != NULL);
2314 more = blk_update_request(img_request->rq, result, xferred);
2316 __blk_mq_end_request(img_request->rq, result);
2322 static void rbd_img_obj_callback(struct rbd_obj_request *obj_request)
2324 struct rbd_img_request *img_request;
2325 u32 which = obj_request->which;
2328 rbd_assert(obj_request_img_data_test(obj_request));
2329 img_request = obj_request->img_request;
2331 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
2332 rbd_assert(img_request != NULL);
2333 rbd_assert(img_request->obj_request_count > 0);
2334 rbd_assert(which != BAD_WHICH);
2335 rbd_assert(which < img_request->obj_request_count);
2337 spin_lock_irq(&img_request->completion_lock);
2338 if (which != img_request->next_completion)
2341 for_each_obj_request_from(img_request, obj_request) {
2343 rbd_assert(which < img_request->obj_request_count);
2345 if (!obj_request_done_test(obj_request))
2347 more = rbd_img_obj_end_request(obj_request);
2351 rbd_assert(more ^ (which == img_request->obj_request_count));
2352 img_request->next_completion = which;
2354 spin_unlock_irq(&img_request->completion_lock);
2355 rbd_img_request_put(img_request);
2358 rbd_img_request_complete(img_request);
2362 * Add individual osd ops to the given ceph_osd_request and prepare
2363 * them for submission. num_ops is the current number of
2364 * osd operations already to the object request.
2366 static void rbd_img_obj_request_fill(struct rbd_obj_request *obj_request,
2367 struct ceph_osd_request *osd_request,
2368 enum obj_operation_type op_type,
2369 unsigned int num_ops)
2371 struct rbd_img_request *img_request = obj_request->img_request;
2372 struct rbd_device *rbd_dev = img_request->rbd_dev;
2373 u64 object_size = rbd_obj_bytes(&rbd_dev->header);
2374 u64 offset = obj_request->offset;
2375 u64 length = obj_request->length;
2379 if (op_type == OBJ_OP_DISCARD) {
2380 if (!offset && length == object_size &&
2381 (!img_request_layered_test(img_request) ||
2382 !obj_request_overlaps_parent(obj_request))) {
2383 opcode = CEPH_OSD_OP_DELETE;
2384 } else if ((offset + length == object_size)) {
2385 opcode = CEPH_OSD_OP_TRUNCATE;
2387 down_read(&rbd_dev->header_rwsem);
2388 img_end = rbd_dev->header.image_size;
2389 up_read(&rbd_dev->header_rwsem);
2391 if (obj_request->img_offset + length == img_end)
2392 opcode = CEPH_OSD_OP_TRUNCATE;
2394 opcode = CEPH_OSD_OP_ZERO;
2396 } else if (op_type == OBJ_OP_WRITE) {
2397 if (!offset && length == object_size)
2398 opcode = CEPH_OSD_OP_WRITEFULL;
2400 opcode = CEPH_OSD_OP_WRITE;
2401 osd_req_op_alloc_hint_init(osd_request, num_ops,
2402 object_size, object_size);
2405 opcode = CEPH_OSD_OP_READ;
2408 if (opcode == CEPH_OSD_OP_DELETE)
2409 osd_req_op_init(osd_request, num_ops, opcode, 0);
2411 osd_req_op_extent_init(osd_request, num_ops, opcode,
2412 offset, length, 0, 0);
2414 if (obj_request->type == OBJ_REQUEST_BIO)
2415 osd_req_op_extent_osd_data_bio(osd_request, num_ops,
2416 obj_request->bio_list, length);
2417 else if (obj_request->type == OBJ_REQUEST_PAGES)
2418 osd_req_op_extent_osd_data_pages(osd_request, num_ops,
2419 obj_request->pages, length,
2420 offset & ~PAGE_MASK, false, false);
2422 /* Discards are also writes */
2423 if (op_type == OBJ_OP_WRITE || op_type == OBJ_OP_DISCARD)
2424 rbd_osd_req_format_write(obj_request);
2426 rbd_osd_req_format_read(obj_request);
2430 * Split up an image request into one or more object requests, each
2431 * to a different object. The "type" parameter indicates whether
2432 * "data_desc" is the pointer to the head of a list of bio
2433 * structures, or the base of a page array. In either case this
2434 * function assumes data_desc describes memory sufficient to hold
2435 * all data described by the image request.
2437 static int rbd_img_request_fill(struct rbd_img_request *img_request,
2438 enum obj_request_type type,
2441 struct rbd_device *rbd_dev = img_request->rbd_dev;
2442 struct rbd_obj_request *obj_request = NULL;
2443 struct rbd_obj_request *next_obj_request;
2444 struct bio *bio_list = NULL;
2445 unsigned int bio_offset = 0;
2446 struct page **pages = NULL;
2447 enum obj_operation_type op_type;
2451 dout("%s: img %p type %d data_desc %p\n", __func__, img_request,
2452 (int)type, data_desc);
2454 img_offset = img_request->offset;
2455 resid = img_request->length;
2456 rbd_assert(resid > 0);
2457 op_type = rbd_img_request_op_type(img_request);
2459 if (type == OBJ_REQUEST_BIO) {
2460 bio_list = data_desc;
2461 rbd_assert(img_offset ==
2462 bio_list->bi_iter.bi_sector << SECTOR_SHIFT);
2463 } else if (type == OBJ_REQUEST_PAGES) {
2468 struct ceph_osd_request *osd_req;
2469 const char *object_name;
2473 object_name = rbd_segment_name(rbd_dev, img_offset);
2476 offset = rbd_segment_offset(rbd_dev, img_offset);
2477 length = rbd_segment_length(rbd_dev, img_offset, resid);
2478 obj_request = rbd_obj_request_create(object_name,
2479 offset, length, type);
2480 /* object request has its own copy of the object name */
2481 rbd_segment_name_free(object_name);
2486 * set obj_request->img_request before creating the
2487 * osd_request so that it gets the right snapc
2489 rbd_img_obj_request_add(img_request, obj_request);
2491 if (type == OBJ_REQUEST_BIO) {
2492 unsigned int clone_size;
2494 rbd_assert(length <= (u64)UINT_MAX);
2495 clone_size = (unsigned int)length;
2496 obj_request->bio_list =
2497 bio_chain_clone_range(&bio_list,
2501 if (!obj_request->bio_list)
2503 } else if (type == OBJ_REQUEST_PAGES) {
2504 unsigned int page_count;
2506 obj_request->pages = pages;
2507 page_count = (u32)calc_pages_for(offset, length);
2508 obj_request->page_count = page_count;
2509 if ((offset + length) & ~PAGE_MASK)
2510 page_count--; /* more on last page */
2511 pages += page_count;
2514 osd_req = rbd_osd_req_create(rbd_dev, op_type,
2515 (op_type == OBJ_OP_WRITE) ? 2 : 1,
2520 obj_request->osd_req = osd_req;
2521 obj_request->callback = rbd_img_obj_callback;
2522 obj_request->img_offset = img_offset;
2524 rbd_img_obj_request_fill(obj_request, osd_req, op_type, 0);
2526 rbd_img_request_get(img_request);
2528 img_offset += length;
2535 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
2536 rbd_img_obj_request_del(img_request, obj_request);
2542 rbd_osd_copyup_callback(struct rbd_obj_request *obj_request)
2544 struct rbd_img_request *img_request;
2545 struct rbd_device *rbd_dev;
2546 struct page **pages;
2549 dout("%s: obj %p\n", __func__, obj_request);
2551 rbd_assert(obj_request->type == OBJ_REQUEST_BIO ||
2552 obj_request->type == OBJ_REQUEST_NODATA);
2553 rbd_assert(obj_request_img_data_test(obj_request));
2554 img_request = obj_request->img_request;
2555 rbd_assert(img_request);
2557 rbd_dev = img_request->rbd_dev;
2558 rbd_assert(rbd_dev);
2560 pages = obj_request->copyup_pages;
2561 rbd_assert(pages != NULL);
2562 obj_request->copyup_pages = NULL;
2563 page_count = obj_request->copyup_page_count;
2564 rbd_assert(page_count);
2565 obj_request->copyup_page_count = 0;
2566 ceph_release_page_vector(pages, page_count);
2569 * We want the transfer count to reflect the size of the
2570 * original write request. There is no such thing as a
2571 * successful short write, so if the request was successful
2572 * we can just set it to the originally-requested length.
2574 if (!obj_request->result)
2575 obj_request->xferred = obj_request->length;
2577 obj_request_done_set(obj_request);
2581 rbd_img_obj_parent_read_full_callback(struct rbd_img_request *img_request)
2583 struct rbd_obj_request *orig_request;
2584 struct ceph_osd_request *osd_req;
2585 struct ceph_osd_client *osdc;
2586 struct rbd_device *rbd_dev;
2587 struct page **pages;
2588 enum obj_operation_type op_type;
2593 rbd_assert(img_request_child_test(img_request));
2595 /* First get what we need from the image request */
2597 pages = img_request->copyup_pages;
2598 rbd_assert(pages != NULL);
2599 img_request->copyup_pages = NULL;
2600 page_count = img_request->copyup_page_count;
2601 rbd_assert(page_count);
2602 img_request->copyup_page_count = 0;
2604 orig_request = img_request->obj_request;
2605 rbd_assert(orig_request != NULL);
2606 rbd_assert(obj_request_type_valid(orig_request->type));
2607 img_result = img_request->result;
2608 parent_length = img_request->length;
2609 rbd_assert(parent_length == img_request->xferred);
2610 rbd_img_request_put(img_request);
2612 rbd_assert(orig_request->img_request);
2613 rbd_dev = orig_request->img_request->rbd_dev;
2614 rbd_assert(rbd_dev);
2617 * If the overlap has become 0 (most likely because the
2618 * image has been flattened) we need to free the pages
2619 * and re-submit the original write request.
2621 if (!rbd_dev->parent_overlap) {
2622 struct ceph_osd_client *osdc;
2624 ceph_release_page_vector(pages, page_count);
2625 osdc = &rbd_dev->rbd_client->client->osdc;
2626 img_result = rbd_obj_request_submit(osdc, orig_request);
2635 * The original osd request is of no use to use any more.
2636 * We need a new one that can hold the three ops in a copyup
2637 * request. Allocate the new copyup osd request for the
2638 * original request, and release the old one.
2640 img_result = -ENOMEM;
2641 osd_req = rbd_osd_req_create_copyup(orig_request);
2644 rbd_osd_req_destroy(orig_request->osd_req);
2645 orig_request->osd_req = osd_req;
2646 orig_request->copyup_pages = pages;
2647 orig_request->copyup_page_count = page_count;
2649 /* Initialize the copyup op */
2651 osd_req_op_cls_init(osd_req, 0, CEPH_OSD_OP_CALL, "rbd", "copyup");
2652 osd_req_op_cls_request_data_pages(osd_req, 0, pages, parent_length, 0,
2655 /* Add the other op(s) */
2657 op_type = rbd_img_request_op_type(orig_request->img_request);
2658 rbd_img_obj_request_fill(orig_request, osd_req, op_type, 1);
2660 /* All set, send it off. */
2662 osdc = &rbd_dev->rbd_client->client->osdc;
2663 img_result = rbd_obj_request_submit(osdc, orig_request);
2667 /* Record the error code and complete the request */
2669 orig_request->result = img_result;
2670 orig_request->xferred = 0;
2671 obj_request_done_set(orig_request);
2672 rbd_obj_request_complete(orig_request);
2676 * Read from the parent image the range of data that covers the
2677 * entire target of the given object request. This is used for
2678 * satisfying a layered image write request when the target of an
2679 * object request from the image request does not exist.
2681 * A page array big enough to hold the returned data is allocated
2682 * and supplied to rbd_img_request_fill() as the "data descriptor."
2683 * When the read completes, this page array will be transferred to
2684 * the original object request for the copyup operation.
2686 * If an error occurs, record it as the result of the original
2687 * object request and mark it done so it gets completed.
2689 static int rbd_img_obj_parent_read_full(struct rbd_obj_request *obj_request)
2691 struct rbd_img_request *img_request = NULL;
2692 struct rbd_img_request *parent_request = NULL;
2693 struct rbd_device *rbd_dev;
2696 struct page **pages = NULL;
2700 rbd_assert(obj_request_img_data_test(obj_request));
2701 rbd_assert(obj_request_type_valid(obj_request->type));
2703 img_request = obj_request->img_request;
2704 rbd_assert(img_request != NULL);
2705 rbd_dev = img_request->rbd_dev;
2706 rbd_assert(rbd_dev->parent != NULL);
2709 * Determine the byte range covered by the object in the
2710 * child image to which the original request was to be sent.
2712 img_offset = obj_request->img_offset - obj_request->offset;
2713 length = (u64)1 << rbd_dev->header.obj_order;
2716 * There is no defined parent data beyond the parent
2717 * overlap, so limit what we read at that boundary if
2720 if (img_offset + length > rbd_dev->parent_overlap) {
2721 rbd_assert(img_offset < rbd_dev->parent_overlap);
2722 length = rbd_dev->parent_overlap - img_offset;
2726 * Allocate a page array big enough to receive the data read
2729 page_count = (u32)calc_pages_for(0, length);
2730 pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
2731 if (IS_ERR(pages)) {
2732 result = PTR_ERR(pages);
2738 parent_request = rbd_parent_request_create(obj_request,
2739 img_offset, length);
2740 if (!parent_request)
2743 result = rbd_img_request_fill(parent_request, OBJ_REQUEST_PAGES, pages);
2746 parent_request->copyup_pages = pages;
2747 parent_request->copyup_page_count = page_count;
2749 parent_request->callback = rbd_img_obj_parent_read_full_callback;
2750 result = rbd_img_request_submit(parent_request);
2754 parent_request->copyup_pages = NULL;
2755 parent_request->copyup_page_count = 0;
2756 parent_request->obj_request = NULL;
2757 rbd_obj_request_put(obj_request);
2760 ceph_release_page_vector(pages, page_count);
2762 rbd_img_request_put(parent_request);
2763 obj_request->result = result;
2764 obj_request->xferred = 0;
2765 obj_request_done_set(obj_request);
2770 static void rbd_img_obj_exists_callback(struct rbd_obj_request *obj_request)
2772 struct rbd_obj_request *orig_request;
2773 struct rbd_device *rbd_dev;
2776 rbd_assert(!obj_request_img_data_test(obj_request));
2779 * All we need from the object request is the original
2780 * request and the result of the STAT op. Grab those, then
2781 * we're done with the request.
2783 orig_request = obj_request->obj_request;
2784 obj_request->obj_request = NULL;
2785 rbd_obj_request_put(orig_request);
2786 rbd_assert(orig_request);
2787 rbd_assert(orig_request->img_request);
2789 result = obj_request->result;
2790 obj_request->result = 0;
2792 dout("%s: obj %p for obj %p result %d %llu/%llu\n", __func__,
2793 obj_request, orig_request, result,
2794 obj_request->xferred, obj_request->length);
2795 rbd_obj_request_put(obj_request);
2798 * If the overlap has become 0 (most likely because the
2799 * image has been flattened) we need to free the pages
2800 * and re-submit the original write request.
2802 rbd_dev = orig_request->img_request->rbd_dev;
2803 if (!rbd_dev->parent_overlap) {
2804 struct ceph_osd_client *osdc;
2806 osdc = &rbd_dev->rbd_client->client->osdc;
2807 result = rbd_obj_request_submit(osdc, orig_request);
2813 * Our only purpose here is to determine whether the object
2814 * exists, and we don't want to treat the non-existence as
2815 * an error. If something else comes back, transfer the
2816 * error to the original request and complete it now.
2819 obj_request_existence_set(orig_request, true);
2820 } else if (result == -ENOENT) {
2821 obj_request_existence_set(orig_request, false);
2822 } else if (result) {
2823 orig_request->result = result;
2828 * Resubmit the original request now that we have recorded
2829 * whether the target object exists.
2831 orig_request->result = rbd_img_obj_request_submit(orig_request);
2833 if (orig_request->result)
2834 rbd_obj_request_complete(orig_request);
2837 static int rbd_img_obj_exists_submit(struct rbd_obj_request *obj_request)
2839 struct rbd_obj_request *stat_request;
2840 struct rbd_device *rbd_dev;
2841 struct ceph_osd_client *osdc;
2842 struct page **pages = NULL;
2848 * The response data for a STAT call consists of:
2855 size = sizeof (__le64) + sizeof (__le32) + sizeof (__le32);
2856 page_count = (u32)calc_pages_for(0, size);
2857 pages = ceph_alloc_page_vector(page_count, GFP_NOIO);
2859 return PTR_ERR(pages);
2862 stat_request = rbd_obj_request_create(obj_request->object_name, 0, 0,
2867 rbd_obj_request_get(obj_request);
2868 stat_request->obj_request = obj_request;
2869 stat_request->pages = pages;
2870 stat_request->page_count = page_count;
2872 rbd_assert(obj_request->img_request);
2873 rbd_dev = obj_request->img_request->rbd_dev;
2874 stat_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
2876 if (!stat_request->osd_req)
2878 stat_request->callback = rbd_img_obj_exists_callback;
2880 osd_req_op_init(stat_request->osd_req, 0, CEPH_OSD_OP_STAT, 0);
2881 osd_req_op_raw_data_in_pages(stat_request->osd_req, 0, pages, size, 0,
2883 rbd_osd_req_format_read(stat_request);
2885 osdc = &rbd_dev->rbd_client->client->osdc;
2886 ret = rbd_obj_request_submit(osdc, stat_request);
2889 rbd_obj_request_put(obj_request);
2894 static bool img_obj_request_simple(struct rbd_obj_request *obj_request)
2896 struct rbd_img_request *img_request;
2897 struct rbd_device *rbd_dev;
2899 rbd_assert(obj_request_img_data_test(obj_request));
2901 img_request = obj_request->img_request;
2902 rbd_assert(img_request);
2903 rbd_dev = img_request->rbd_dev;
2906 if (!img_request_write_test(img_request) &&
2907 !img_request_discard_test(img_request))
2910 /* Non-layered writes */
2911 if (!img_request_layered_test(img_request))
2915 * Layered writes outside of the parent overlap range don't
2916 * share any data with the parent.
2918 if (!obj_request_overlaps_parent(obj_request))
2922 * Entire-object layered writes - we will overwrite whatever
2923 * parent data there is anyway.
2925 if (!obj_request->offset &&
2926 obj_request->length == rbd_obj_bytes(&rbd_dev->header))
2930 * If the object is known to already exist, its parent data has
2931 * already been copied.
2933 if (obj_request_known_test(obj_request) &&
2934 obj_request_exists_test(obj_request))
2940 static int rbd_img_obj_request_submit(struct rbd_obj_request *obj_request)
2942 if (img_obj_request_simple(obj_request)) {
2943 struct rbd_device *rbd_dev;
2944 struct ceph_osd_client *osdc;
2946 rbd_dev = obj_request->img_request->rbd_dev;
2947 osdc = &rbd_dev->rbd_client->client->osdc;
2949 return rbd_obj_request_submit(osdc, obj_request);
2953 * It's a layered write. The target object might exist but
2954 * we may not know that yet. If we know it doesn't exist,
2955 * start by reading the data for the full target object from
2956 * the parent so we can use it for a copyup to the target.
2958 if (obj_request_known_test(obj_request))
2959 return rbd_img_obj_parent_read_full(obj_request);
2961 /* We don't know whether the target exists. Go find out. */
2963 return rbd_img_obj_exists_submit(obj_request);
2966 static int rbd_img_request_submit(struct rbd_img_request *img_request)
2968 struct rbd_obj_request *obj_request;
2969 struct rbd_obj_request *next_obj_request;
2971 dout("%s: img %p\n", __func__, img_request);
2972 for_each_obj_request_safe(img_request, obj_request, next_obj_request) {
2975 ret = rbd_img_obj_request_submit(obj_request);
2983 static void rbd_img_parent_read_callback(struct rbd_img_request *img_request)
2985 struct rbd_obj_request *obj_request;
2986 struct rbd_device *rbd_dev;
2991 rbd_assert(img_request_child_test(img_request));
2993 /* First get what we need from the image request and release it */
2995 obj_request = img_request->obj_request;
2996 img_xferred = img_request->xferred;
2997 img_result = img_request->result;
2998 rbd_img_request_put(img_request);
3001 * If the overlap has become 0 (most likely because the
3002 * image has been flattened) we need to re-submit the
3005 rbd_assert(obj_request);
3006 rbd_assert(obj_request->img_request);
3007 rbd_dev = obj_request->img_request->rbd_dev;
3008 if (!rbd_dev->parent_overlap) {
3009 struct ceph_osd_client *osdc;
3011 osdc = &rbd_dev->rbd_client->client->osdc;
3012 img_result = rbd_obj_request_submit(osdc, obj_request);
3017 obj_request->result = img_result;
3018 if (obj_request->result)
3022 * We need to zero anything beyond the parent overlap
3023 * boundary. Since rbd_img_obj_request_read_callback()
3024 * will zero anything beyond the end of a short read, an
3025 * easy way to do this is to pretend the data from the
3026 * parent came up short--ending at the overlap boundary.
3028 rbd_assert(obj_request->img_offset < U64_MAX - obj_request->length);
3029 obj_end = obj_request->img_offset + obj_request->length;
3030 if (obj_end > rbd_dev->parent_overlap) {
3033 if (obj_request->img_offset < rbd_dev->parent_overlap)
3034 xferred = rbd_dev->parent_overlap -
3035 obj_request->img_offset;
3037 obj_request->xferred = min(img_xferred, xferred);
3039 obj_request->xferred = img_xferred;
3042 rbd_img_obj_request_read_callback(obj_request);
3043 rbd_obj_request_complete(obj_request);
3046 static void rbd_img_parent_read(struct rbd_obj_request *obj_request)
3048 struct rbd_img_request *img_request;
3051 rbd_assert(obj_request_img_data_test(obj_request));
3052 rbd_assert(obj_request->img_request != NULL);
3053 rbd_assert(obj_request->result == (s32) -ENOENT);
3054 rbd_assert(obj_request_type_valid(obj_request->type));
3056 /* rbd_read_finish(obj_request, obj_request->length); */
3057 img_request = rbd_parent_request_create(obj_request,
3058 obj_request->img_offset,
3059 obj_request->length);
3064 if (obj_request->type == OBJ_REQUEST_BIO)
3065 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3066 obj_request->bio_list);
3068 result = rbd_img_request_fill(img_request, OBJ_REQUEST_PAGES,
3069 obj_request->pages);
3073 img_request->callback = rbd_img_parent_read_callback;
3074 result = rbd_img_request_submit(img_request);
3081 rbd_img_request_put(img_request);
3082 obj_request->result = result;
3083 obj_request->xferred = 0;
3084 obj_request_done_set(obj_request);
3087 static int rbd_obj_notify_ack_sync(struct rbd_device *rbd_dev, u64 notify_id)
3089 struct rbd_obj_request *obj_request;
3090 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3093 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3094 OBJ_REQUEST_NODATA);
3099 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3101 if (!obj_request->osd_req)
3104 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_NOTIFY_ACK,
3106 rbd_osd_req_format_read(obj_request);
3108 ret = rbd_obj_request_submit(osdc, obj_request);
3111 ret = rbd_obj_request_wait(obj_request);
3113 rbd_obj_request_put(obj_request);
3118 static void rbd_watch_cb(u64 ver, u64 notify_id, u8 opcode, void *data)
3120 struct rbd_device *rbd_dev = (struct rbd_device *)data;
3126 dout("%s: \"%s\" notify_id %llu opcode %u\n", __func__,
3127 rbd_dev->header_name, (unsigned long long)notify_id,
3128 (unsigned int)opcode);
3131 * Until adequate refresh error handling is in place, there is
3132 * not much we can do here, except warn.
3134 * See http://tracker.ceph.com/issues/5040
3136 ret = rbd_dev_refresh(rbd_dev);
3138 rbd_warn(rbd_dev, "refresh failed: %d", ret);
3140 ret = rbd_obj_notify_ack_sync(rbd_dev, notify_id);
3142 rbd_warn(rbd_dev, "notify_ack ret %d", ret);
3146 * Send a (un)watch request and wait for the ack. Return a request
3147 * with a ref held on success or error.
3149 static struct rbd_obj_request *rbd_obj_watch_request_helper(
3150 struct rbd_device *rbd_dev,
3153 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3154 struct ceph_options *opts = osdc->client->options;
3155 struct rbd_obj_request *obj_request;
3158 obj_request = rbd_obj_request_create(rbd_dev->header_name, 0, 0,
3159 OBJ_REQUEST_NODATA);
3161 return ERR_PTR(-ENOMEM);
3163 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_WRITE, 1,
3165 if (!obj_request->osd_req) {
3170 osd_req_op_watch_init(obj_request->osd_req, 0, CEPH_OSD_OP_WATCH,
3171 rbd_dev->watch_event->cookie, 0, watch);
3172 rbd_osd_req_format_write(obj_request);
3175 ceph_osdc_set_request_linger(osdc, obj_request->osd_req);
3177 ret = rbd_obj_request_submit(osdc, obj_request);
3181 ret = rbd_obj_request_wait_timeout(obj_request, opts->mount_timeout);
3185 ret = obj_request->result;
3188 rbd_obj_request_end(obj_request);
3195 rbd_obj_request_put(obj_request);
3196 return ERR_PTR(ret);
3200 * Initiate a watch request, synchronously.
3202 static int rbd_dev_header_watch_sync(struct rbd_device *rbd_dev)
3204 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3205 struct rbd_obj_request *obj_request;
3208 rbd_assert(!rbd_dev->watch_event);
3209 rbd_assert(!rbd_dev->watch_request);
3211 ret = ceph_osdc_create_event(osdc, rbd_watch_cb, rbd_dev,
3212 &rbd_dev->watch_event);
3216 obj_request = rbd_obj_watch_request_helper(rbd_dev, true);
3217 if (IS_ERR(obj_request)) {
3218 ceph_osdc_cancel_event(rbd_dev->watch_event);
3219 rbd_dev->watch_event = NULL;
3220 return PTR_ERR(obj_request);
3224 * A watch request is set to linger, so the underlying osd
3225 * request won't go away until we unregister it. We retain
3226 * a pointer to the object request during that time (in
3227 * rbd_dev->watch_request), so we'll keep a reference to it.
3228 * We'll drop that reference after we've unregistered it in
3229 * rbd_dev_header_unwatch_sync().
3231 rbd_dev->watch_request = obj_request;
3237 * Tear down a watch request, synchronously.
3239 static void rbd_dev_header_unwatch_sync(struct rbd_device *rbd_dev)
3241 struct rbd_obj_request *obj_request;
3243 rbd_assert(rbd_dev->watch_event);
3244 rbd_assert(rbd_dev->watch_request);
3246 rbd_obj_request_end(rbd_dev->watch_request);
3247 rbd_obj_request_put(rbd_dev->watch_request);
3248 rbd_dev->watch_request = NULL;
3250 obj_request = rbd_obj_watch_request_helper(rbd_dev, false);
3251 if (!IS_ERR(obj_request))
3252 rbd_obj_request_put(obj_request);
3254 rbd_warn(rbd_dev, "unable to tear down watch request (%ld)",
3255 PTR_ERR(obj_request));
3257 ceph_osdc_cancel_event(rbd_dev->watch_event);
3258 rbd_dev->watch_event = NULL;
3262 * Synchronous osd object method call. Returns the number of bytes
3263 * returned in the outbound buffer, or a negative error code.
3265 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
3266 const char *object_name,
3267 const char *class_name,
3268 const char *method_name,
3269 const void *outbound,
3270 size_t outbound_size,
3272 size_t inbound_size)
3274 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3275 struct rbd_obj_request *obj_request;
3276 struct page **pages;
3281 * Method calls are ultimately read operations. The result
3282 * should placed into the inbound buffer provided. They
3283 * also supply outbound data--parameters for the object
3284 * method. Currently if this is present it will be a
3287 page_count = (u32)calc_pages_for(0, inbound_size);
3288 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3290 return PTR_ERR(pages);
3293 obj_request = rbd_obj_request_create(object_name, 0, inbound_size,
3298 obj_request->pages = pages;
3299 obj_request->page_count = page_count;
3301 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3303 if (!obj_request->osd_req)
3306 osd_req_op_cls_init(obj_request->osd_req, 0, CEPH_OSD_OP_CALL,
3307 class_name, method_name);
3308 if (outbound_size) {
3309 struct ceph_pagelist *pagelist;
3311 pagelist = kmalloc(sizeof (*pagelist), GFP_NOFS);
3315 ceph_pagelist_init(pagelist);
3316 ceph_pagelist_append(pagelist, outbound, outbound_size);
3317 osd_req_op_cls_request_data_pagelist(obj_request->osd_req, 0,
3320 osd_req_op_cls_response_data_pages(obj_request->osd_req, 0,
3321 obj_request->pages, inbound_size,
3323 rbd_osd_req_format_read(obj_request);
3325 ret = rbd_obj_request_submit(osdc, obj_request);
3328 ret = rbd_obj_request_wait(obj_request);
3332 ret = obj_request->result;
3336 rbd_assert(obj_request->xferred < (u64)INT_MAX);
3337 ret = (int)obj_request->xferred;
3338 ceph_copy_from_page_vector(pages, inbound, 0, obj_request->xferred);
3341 rbd_obj_request_put(obj_request);
3343 ceph_release_page_vector(pages, page_count);
3348 static void rbd_queue_workfn(struct work_struct *work)
3350 struct request *rq = blk_mq_rq_from_pdu(work);
3351 struct rbd_device *rbd_dev = rq->q->queuedata;
3352 struct rbd_img_request *img_request;
3353 struct ceph_snap_context *snapc = NULL;
3354 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
3355 u64 length = blk_rq_bytes(rq);
3356 enum obj_operation_type op_type;
3360 if (rq->cmd_type != REQ_TYPE_FS) {
3361 dout("%s: non-fs request type %d\n", __func__,
3362 (int) rq->cmd_type);
3367 if (rq->cmd_flags & REQ_DISCARD)
3368 op_type = OBJ_OP_DISCARD;
3369 else if (rq->cmd_flags & REQ_WRITE)
3370 op_type = OBJ_OP_WRITE;
3372 op_type = OBJ_OP_READ;
3374 /* Ignore/skip any zero-length requests */
3377 dout("%s: zero-length request\n", __func__);
3382 /* Only reads are allowed to a read-only device */
3384 if (op_type != OBJ_OP_READ) {
3385 if (rbd_dev->mapping.read_only) {
3389 rbd_assert(rbd_dev->spec->snap_id == CEPH_NOSNAP);
3393 * Quit early if the mapped snapshot no longer exists. It's
3394 * still possible the snapshot will have disappeared by the
3395 * time our request arrives at the osd, but there's no sense in
3396 * sending it if we already know.
3398 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags)) {
3399 dout("request for non-existent snapshot");
3400 rbd_assert(rbd_dev->spec->snap_id != CEPH_NOSNAP);
3405 if (offset && length > U64_MAX - offset + 1) {
3406 rbd_warn(rbd_dev, "bad request range (%llu~%llu)", offset,
3409 goto err_rq; /* Shouldn't happen */
3412 blk_mq_start_request(rq);
3414 down_read(&rbd_dev->header_rwsem);
3415 mapping_size = rbd_dev->mapping.size;
3416 if (op_type != OBJ_OP_READ) {
3417 snapc = rbd_dev->header.snapc;
3418 ceph_get_snap_context(snapc);
3420 up_read(&rbd_dev->header_rwsem);
3422 if (offset + length > mapping_size) {
3423 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
3424 length, mapping_size);
3429 img_request = rbd_img_request_create(rbd_dev, offset, length, op_type,
3435 img_request->rq = rq;
3436 snapc = NULL; /* img_request consumes a ref */
3438 if (op_type == OBJ_OP_DISCARD)
3439 result = rbd_img_request_fill(img_request, OBJ_REQUEST_NODATA,
3442 result = rbd_img_request_fill(img_request, OBJ_REQUEST_BIO,
3445 goto err_img_request;
3447 result = rbd_img_request_submit(img_request);
3449 goto err_img_request;
3454 rbd_img_request_put(img_request);
3457 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
3458 obj_op_name(op_type), length, offset, result);
3459 ceph_put_snap_context(snapc);
3461 blk_mq_end_request(rq, result);
3464 static int rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
3465 const struct blk_mq_queue_data *bd)
3467 struct request *rq = bd->rq;
3468 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3470 queue_work(rbd_wq, work);
3471 return BLK_MQ_RQ_QUEUE_OK;
3474 static void rbd_free_disk(struct rbd_device *rbd_dev)
3476 struct gendisk *disk = rbd_dev->disk;
3481 rbd_dev->disk = NULL;
3482 if (disk->flags & GENHD_FL_UP) {
3485 blk_cleanup_queue(disk->queue);
3486 blk_mq_free_tag_set(&rbd_dev->tag_set);
3491 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
3492 const char *object_name,
3493 u64 offset, u64 length, void *buf)
3496 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3497 struct rbd_obj_request *obj_request;
3498 struct page **pages = NULL;
3503 page_count = (u32) calc_pages_for(offset, length);
3504 pages = ceph_alloc_page_vector(page_count, GFP_KERNEL);
3506 return PTR_ERR(pages);
3509 obj_request = rbd_obj_request_create(object_name, offset, length,
3514 obj_request->pages = pages;
3515 obj_request->page_count = page_count;
3517 obj_request->osd_req = rbd_osd_req_create(rbd_dev, OBJ_OP_READ, 1,
3519 if (!obj_request->osd_req)
3522 osd_req_op_extent_init(obj_request->osd_req, 0, CEPH_OSD_OP_READ,
3523 offset, length, 0, 0);
3524 osd_req_op_extent_osd_data_pages(obj_request->osd_req, 0,
3526 obj_request->length,
3527 obj_request->offset & ~PAGE_MASK,
3529 rbd_osd_req_format_read(obj_request);
3531 ret = rbd_obj_request_submit(osdc, obj_request);
3534 ret = rbd_obj_request_wait(obj_request);
3538 ret = obj_request->result;
3542 rbd_assert(obj_request->xferred <= (u64) SIZE_MAX);
3543 size = (size_t) obj_request->xferred;
3544 ceph_copy_from_page_vector(pages, buf, 0, size);
3545 rbd_assert(size <= (size_t)INT_MAX);
3549 rbd_obj_request_put(obj_request);
3551 ceph_release_page_vector(pages, page_count);
3557 * Read the complete header for the given rbd device. On successful
3558 * return, the rbd_dev->header field will contain up-to-date
3559 * information about the image.
3561 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev)
3563 struct rbd_image_header_ondisk *ondisk = NULL;
3570 * The complete header will include an array of its 64-bit
3571 * snapshot ids, followed by the names of those snapshots as
3572 * a contiguous block of NUL-terminated strings. Note that
3573 * the number of snapshots could change by the time we read
3574 * it in, in which case we re-read it.
3581 size = sizeof (*ondisk);
3582 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
3584 ondisk = kmalloc(size, GFP_KERNEL);
3588 ret = rbd_obj_read_sync(rbd_dev, rbd_dev->header_name,
3592 if ((size_t)ret < size) {
3594 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
3598 if (!rbd_dev_ondisk_valid(ondisk)) {
3600 rbd_warn(rbd_dev, "invalid header");
3604 names_size = le64_to_cpu(ondisk->snap_names_len);
3605 want_count = snap_count;
3606 snap_count = le32_to_cpu(ondisk->snap_count);
3607 } while (snap_count != want_count);
3609 ret = rbd_header_from_disk(rbd_dev, ondisk);
3617 * Clear the rbd device's EXISTS flag if the snapshot it's mapped to
3618 * has disappeared from the (just updated) snapshot context.
3620 static void rbd_exists_validate(struct rbd_device *rbd_dev)
3624 if (!test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags))
3627 snap_id = rbd_dev->spec->snap_id;
3628 if (snap_id == CEPH_NOSNAP)
3631 if (rbd_dev_snap_index(rbd_dev, snap_id) == BAD_SNAP_INDEX)
3632 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
3635 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
3641 * Don't hold the lock while doing disk operations,
3642 * or lock ordering will conflict with the bdev mutex via:
3643 * rbd_add() -> blkdev_get() -> rbd_open()
3645 spin_lock_irq(&rbd_dev->lock);
3646 removing = test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags);
3647 spin_unlock_irq(&rbd_dev->lock);
3649 * If the device is being removed, rbd_dev->disk has
3650 * been destroyed, so don't try to update its size
3653 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
3654 dout("setting size to %llu sectors", (unsigned long long)size);
3655 set_capacity(rbd_dev->disk, size);
3656 revalidate_disk(rbd_dev->disk);
3660 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
3665 down_write(&rbd_dev->header_rwsem);
3666 mapping_size = rbd_dev->mapping.size;
3668 ret = rbd_dev_header_info(rbd_dev);
3673 * If there is a parent, see if it has disappeared due to the
3674 * mapped image getting flattened.
3676 if (rbd_dev->parent) {
3677 ret = rbd_dev_v2_parent_info(rbd_dev);
3682 if (rbd_dev->spec->snap_id == CEPH_NOSNAP) {
3683 rbd_dev->mapping.size = rbd_dev->header.image_size;
3685 /* validate mapped snapshot's EXISTS flag */
3686 rbd_exists_validate(rbd_dev);
3690 up_write(&rbd_dev->header_rwsem);
3691 if (!ret && mapping_size != rbd_dev->mapping.size)
3692 rbd_dev_update_size(rbd_dev);
3697 static int rbd_init_request(void *data, struct request *rq,
3698 unsigned int hctx_idx, unsigned int request_idx,
3699 unsigned int numa_node)
3701 struct work_struct *work = blk_mq_rq_to_pdu(rq);
3703 INIT_WORK(work, rbd_queue_workfn);
3707 static struct blk_mq_ops rbd_mq_ops = {
3708 .queue_rq = rbd_queue_rq,
3709 .map_queue = blk_mq_map_queue,
3710 .init_request = rbd_init_request,
3713 static int rbd_init_disk(struct rbd_device *rbd_dev)
3715 struct gendisk *disk;
3716 struct request_queue *q;
3720 /* create gendisk info */
3721 disk = alloc_disk(single_major ?
3722 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
3723 RBD_MINORS_PER_MAJOR);
3727 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
3729 disk->major = rbd_dev->major;
3730 disk->first_minor = rbd_dev->minor;
3732 disk->flags |= GENHD_FL_EXT_DEVT;
3733 disk->fops = &rbd_bd_ops;
3734 disk->private_data = rbd_dev;
3736 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
3737 rbd_dev->tag_set.ops = &rbd_mq_ops;
3738 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
3739 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
3740 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE | BLK_MQ_F_SG_MERGE;
3741 rbd_dev->tag_set.nr_hw_queues = 1;
3742 rbd_dev->tag_set.cmd_size = sizeof(struct work_struct);
3744 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
3748 q = blk_mq_init_queue(&rbd_dev->tag_set);
3754 queue_flag_set_unlocked(QUEUE_FLAG_NONROT, q);
3755 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
3757 /* set io sizes to object size */
3758 segment_size = rbd_obj_bytes(&rbd_dev->header);
3759 blk_queue_max_hw_sectors(q, segment_size / SECTOR_SIZE);
3760 q->limits.max_sectors = queue_max_hw_sectors(q);
3761 blk_queue_max_segments(q, USHRT_MAX);
3762 blk_queue_max_segment_size(q, segment_size);
3763 blk_queue_io_min(q, segment_size);
3764 blk_queue_io_opt(q, segment_size);
3766 /* enable the discard support */
3767 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
3768 q->limits.discard_granularity = segment_size;
3769 q->limits.discard_alignment = segment_size;
3770 blk_queue_max_discard_sectors(q, segment_size / SECTOR_SIZE);
3771 q->limits.discard_zeroes_data = 1;
3773 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
3774 q->backing_dev_info.capabilities |= BDI_CAP_STABLE_WRITES;
3778 q->queuedata = rbd_dev;
3780 rbd_dev->disk = disk;
3784 blk_mq_free_tag_set(&rbd_dev->tag_set);
3794 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
3796 return container_of(dev, struct rbd_device, dev);
3799 static ssize_t rbd_size_show(struct device *dev,
3800 struct device_attribute *attr, char *buf)
3802 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3804 return sprintf(buf, "%llu\n",
3805 (unsigned long long)rbd_dev->mapping.size);
3809 * Note this shows the features for whatever's mapped, which is not
3810 * necessarily the base image.
3812 static ssize_t rbd_features_show(struct device *dev,
3813 struct device_attribute *attr, char *buf)
3815 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3817 return sprintf(buf, "0x%016llx\n",
3818 (unsigned long long)rbd_dev->mapping.features);
3821 static ssize_t rbd_major_show(struct device *dev,
3822 struct device_attribute *attr, char *buf)
3824 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3827 return sprintf(buf, "%d\n", rbd_dev->major);
3829 return sprintf(buf, "(none)\n");
3832 static ssize_t rbd_minor_show(struct device *dev,
3833 struct device_attribute *attr, char *buf)
3835 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3837 return sprintf(buf, "%d\n", rbd_dev->minor);
3840 static ssize_t rbd_client_id_show(struct device *dev,
3841 struct device_attribute *attr, char *buf)
3843 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3845 return sprintf(buf, "client%lld\n",
3846 ceph_client_id(rbd_dev->rbd_client->client));
3849 static ssize_t rbd_pool_show(struct device *dev,
3850 struct device_attribute *attr, char *buf)
3852 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3854 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
3857 static ssize_t rbd_pool_id_show(struct device *dev,
3858 struct device_attribute *attr, char *buf)
3860 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3862 return sprintf(buf, "%llu\n",
3863 (unsigned long long) rbd_dev->spec->pool_id);
3866 static ssize_t rbd_name_show(struct device *dev,
3867 struct device_attribute *attr, char *buf)
3869 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3871 if (rbd_dev->spec->image_name)
3872 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
3874 return sprintf(buf, "(unknown)\n");
3877 static ssize_t rbd_image_id_show(struct device *dev,
3878 struct device_attribute *attr, char *buf)
3880 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3882 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
3886 * Shows the name of the currently-mapped snapshot (or
3887 * RBD_SNAP_HEAD_NAME for the base image).
3889 static ssize_t rbd_snap_show(struct device *dev,
3890 struct device_attribute *attr,
3893 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3895 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
3899 * For a v2 image, shows the chain of parent images, separated by empty
3900 * lines. For v1 images or if there is no parent, shows "(no parent
3903 static ssize_t rbd_parent_show(struct device *dev,
3904 struct device_attribute *attr,
3907 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3910 if (!rbd_dev->parent)
3911 return sprintf(buf, "(no parent image)\n");
3913 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
3914 struct rbd_spec *spec = rbd_dev->parent_spec;
3916 count += sprintf(&buf[count], "%s"
3917 "pool_id %llu\npool_name %s\n"
3918 "image_id %s\nimage_name %s\n"
3919 "snap_id %llu\nsnap_name %s\n"
3921 !count ? "" : "\n", /* first? */
3922 spec->pool_id, spec->pool_name,
3923 spec->image_id, spec->image_name ?: "(unknown)",
3924 spec->snap_id, spec->snap_name,
3925 rbd_dev->parent_overlap);
3931 static ssize_t rbd_image_refresh(struct device *dev,
3932 struct device_attribute *attr,
3936 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
3939 if (!capable(CAP_SYS_ADMIN))
3942 ret = rbd_dev_refresh(rbd_dev);
3949 static DEVICE_ATTR(size, S_IRUGO, rbd_size_show, NULL);
3950 static DEVICE_ATTR(features, S_IRUGO, rbd_features_show, NULL);
3951 static DEVICE_ATTR(major, S_IRUGO, rbd_major_show, NULL);
3952 static DEVICE_ATTR(minor, S_IRUGO, rbd_minor_show, NULL);
3953 static DEVICE_ATTR(client_id, S_IRUGO, rbd_client_id_show, NULL);
3954 static DEVICE_ATTR(pool, S_IRUGO, rbd_pool_show, NULL);
3955 static DEVICE_ATTR(pool_id, S_IRUGO, rbd_pool_id_show, NULL);
3956 static DEVICE_ATTR(name, S_IRUGO, rbd_name_show, NULL);
3957 static DEVICE_ATTR(image_id, S_IRUGO, rbd_image_id_show, NULL);
3958 static DEVICE_ATTR(refresh, S_IWUSR, NULL, rbd_image_refresh);
3959 static DEVICE_ATTR(current_snap, S_IRUGO, rbd_snap_show, NULL);
3960 static DEVICE_ATTR(parent, S_IRUGO, rbd_parent_show, NULL);
3962 static struct attribute *rbd_attrs[] = {
3963 &dev_attr_size.attr,
3964 &dev_attr_features.attr,
3965 &dev_attr_major.attr,
3966 &dev_attr_minor.attr,
3967 &dev_attr_client_id.attr,
3968 &dev_attr_pool.attr,
3969 &dev_attr_pool_id.attr,
3970 &dev_attr_name.attr,
3971 &dev_attr_image_id.attr,
3972 &dev_attr_current_snap.attr,
3973 &dev_attr_parent.attr,
3974 &dev_attr_refresh.attr,
3978 static struct attribute_group rbd_attr_group = {
3982 static const struct attribute_group *rbd_attr_groups[] = {
3987 static void rbd_dev_release(struct device *dev);
3989 static struct device_type rbd_device_type = {
3991 .groups = rbd_attr_groups,
3992 .release = rbd_dev_release,
3995 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
3997 kref_get(&spec->kref);
4002 static void rbd_spec_free(struct kref *kref);
4003 static void rbd_spec_put(struct rbd_spec *spec)
4006 kref_put(&spec->kref, rbd_spec_free);
4009 static struct rbd_spec *rbd_spec_alloc(void)
4011 struct rbd_spec *spec;
4013 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
4017 spec->pool_id = CEPH_NOPOOL;
4018 spec->snap_id = CEPH_NOSNAP;
4019 kref_init(&spec->kref);
4024 static void rbd_spec_free(struct kref *kref)
4026 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
4028 kfree(spec->pool_name);
4029 kfree(spec->image_id);
4030 kfree(spec->image_name);
4031 kfree(spec->snap_name);
4035 static void rbd_dev_release(struct device *dev)
4037 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
4038 bool need_put = !!rbd_dev->opts;
4040 rbd_put_client(rbd_dev->rbd_client);
4041 rbd_spec_put(rbd_dev->spec);
4042 kfree(rbd_dev->opts);
4046 * This is racy, but way better than putting module outside of
4047 * the release callback. The race window is pretty small, so
4048 * doing something similar to dm (dm-builtin.c) is overkill.
4051 module_put(THIS_MODULE);
4054 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
4055 struct rbd_spec *spec,
4056 struct rbd_options *opts)
4058 struct rbd_device *rbd_dev;
4060 rbd_dev = kzalloc(sizeof (*rbd_dev), GFP_KERNEL);
4064 spin_lock_init(&rbd_dev->lock);
4066 atomic_set(&rbd_dev->parent_ref, 0);
4067 INIT_LIST_HEAD(&rbd_dev->node);
4068 init_rwsem(&rbd_dev->header_rwsem);
4070 rbd_dev->dev.bus = &rbd_bus_type;
4071 rbd_dev->dev.type = &rbd_device_type;
4072 rbd_dev->dev.parent = &rbd_root_dev;
4073 device_initialize(&rbd_dev->dev);
4075 rbd_dev->rbd_client = rbdc;
4076 rbd_dev->spec = spec;
4077 rbd_dev->opts = opts;
4079 /* Initialize the layout used for all rbd requests */
4081 rbd_dev->layout.fl_stripe_unit = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4082 rbd_dev->layout.fl_stripe_count = cpu_to_le32(1);
4083 rbd_dev->layout.fl_object_size = cpu_to_le32(1 << RBD_MAX_OBJ_ORDER);
4084 rbd_dev->layout.fl_pg_pool = cpu_to_le32((u32) spec->pool_id);
4087 * If this is a mapping rbd_dev (as opposed to a parent one),
4088 * pin our module. We have a ref from do_rbd_add(), so use
4092 __module_get(THIS_MODULE);
4097 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
4100 put_device(&rbd_dev->dev);
4104 * Get the size and object order for an image snapshot, or if
4105 * snap_id is CEPH_NOSNAP, gets this information for the base
4108 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
4109 u8 *order, u64 *snap_size)
4111 __le64 snapid = cpu_to_le64(snap_id);
4116 } __attribute__ ((packed)) size_buf = { 0 };
4118 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4120 &snapid, sizeof (snapid),
4121 &size_buf, sizeof (size_buf));
4122 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4125 if (ret < sizeof (size_buf))
4129 *order = size_buf.order;
4130 dout(" order %u", (unsigned int)*order);
4132 *snap_size = le64_to_cpu(size_buf.size);
4134 dout(" snap_id 0x%016llx snap_size = %llu\n",
4135 (unsigned long long)snap_id,
4136 (unsigned long long)*snap_size);
4141 static int rbd_dev_v2_image_size(struct rbd_device *rbd_dev)
4143 return _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
4144 &rbd_dev->header.obj_order,
4145 &rbd_dev->header.image_size);
4148 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev)
4154 reply_buf = kzalloc(RBD_OBJ_PREFIX_LEN_MAX, GFP_KERNEL);
4158 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4159 "rbd", "get_object_prefix", NULL, 0,
4160 reply_buf, RBD_OBJ_PREFIX_LEN_MAX);
4161 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4166 rbd_dev->header.object_prefix = ceph_extract_encoded_string(&p,
4167 p + ret, NULL, GFP_NOIO);
4170 if (IS_ERR(rbd_dev->header.object_prefix)) {
4171 ret = PTR_ERR(rbd_dev->header.object_prefix);
4172 rbd_dev->header.object_prefix = NULL;
4174 dout(" object_prefix = %s\n", rbd_dev->header.object_prefix);
4182 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
4185 __le64 snapid = cpu_to_le64(snap_id);
4189 } __attribute__ ((packed)) features_buf = { 0 };
4193 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4194 "rbd", "get_features",
4195 &snapid, sizeof (snapid),
4196 &features_buf, sizeof (features_buf));
4197 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4200 if (ret < sizeof (features_buf))
4203 incompat = le64_to_cpu(features_buf.incompat);
4204 if (incompat & ~RBD_FEATURES_SUPPORTED)
4207 *snap_features = le64_to_cpu(features_buf.features);
4209 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
4210 (unsigned long long)snap_id,
4211 (unsigned long long)*snap_features,
4212 (unsigned long long)le64_to_cpu(features_buf.incompat));
4217 static int rbd_dev_v2_features(struct rbd_device *rbd_dev)
4219 return _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
4220 &rbd_dev->header.features);
4223 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev)
4225 struct rbd_spec *parent_spec;
4227 void *reply_buf = NULL;
4237 parent_spec = rbd_spec_alloc();
4241 size = sizeof (__le64) + /* pool_id */
4242 sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX + /* image_id */
4243 sizeof (__le64) + /* snap_id */
4244 sizeof (__le64); /* overlap */
4245 reply_buf = kmalloc(size, GFP_KERNEL);
4251 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
4252 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4253 "rbd", "get_parent",
4254 &snapid, sizeof (snapid),
4256 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4261 end = reply_buf + ret;
4263 ceph_decode_64_safe(&p, end, pool_id, out_err);
4264 if (pool_id == CEPH_NOPOOL) {
4266 * Either the parent never existed, or we have
4267 * record of it but the image got flattened so it no
4268 * longer has a parent. When the parent of a
4269 * layered image disappears we immediately set the
4270 * overlap to 0. The effect of this is that all new
4271 * requests will be treated as if the image had no
4274 if (rbd_dev->parent_overlap) {
4275 rbd_dev->parent_overlap = 0;
4276 rbd_dev_parent_put(rbd_dev);
4277 pr_info("%s: clone image has been flattened\n",
4278 rbd_dev->disk->disk_name);
4281 goto out; /* No parent? No problem. */
4284 /* The ceph file layout needs to fit pool id in 32 bits */
4287 if (pool_id > (u64)U32_MAX) {
4288 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
4289 (unsigned long long)pool_id, U32_MAX);
4293 image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4294 if (IS_ERR(image_id)) {
4295 ret = PTR_ERR(image_id);
4298 ceph_decode_64_safe(&p, end, snap_id, out_err);
4299 ceph_decode_64_safe(&p, end, overlap, out_err);
4302 * The parent won't change (except when the clone is
4303 * flattened, already handled that). So we only need to
4304 * record the parent spec we have not already done so.
4306 if (!rbd_dev->parent_spec) {
4307 parent_spec->pool_id = pool_id;
4308 parent_spec->image_id = image_id;
4309 parent_spec->snap_id = snap_id;
4310 rbd_dev->parent_spec = parent_spec;
4311 parent_spec = NULL; /* rbd_dev now owns this */
4317 * We always update the parent overlap. If it's zero we issue
4318 * a warning, as we will proceed as if there was no parent.
4322 /* refresh, careful to warn just once */
4323 if (rbd_dev->parent_overlap)
4325 "clone now standalone (overlap became 0)");
4328 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
4331 rbd_dev->parent_overlap = overlap;
4337 rbd_spec_put(parent_spec);
4342 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev)
4346 __le64 stripe_count;
4347 } __attribute__ ((packed)) striping_info_buf = { 0 };
4348 size_t size = sizeof (striping_info_buf);
4355 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4356 "rbd", "get_stripe_unit_count", NULL, 0,
4357 (char *)&striping_info_buf, size);
4358 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4365 * We don't actually support the "fancy striping" feature
4366 * (STRIPINGV2) yet, but if the striping sizes are the
4367 * defaults the behavior is the same as before. So find
4368 * out, and only fail if the image has non-default values.
4371 obj_size = (u64)1 << rbd_dev->header.obj_order;
4372 p = &striping_info_buf;
4373 stripe_unit = ceph_decode_64(&p);
4374 if (stripe_unit != obj_size) {
4375 rbd_warn(rbd_dev, "unsupported stripe unit "
4376 "(got %llu want %llu)",
4377 stripe_unit, obj_size);
4380 stripe_count = ceph_decode_64(&p);
4381 if (stripe_count != 1) {
4382 rbd_warn(rbd_dev, "unsupported stripe count "
4383 "(got %llu want 1)", stripe_count);
4386 rbd_dev->header.stripe_unit = stripe_unit;
4387 rbd_dev->header.stripe_count = stripe_count;
4392 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
4394 size_t image_id_size;
4399 void *reply_buf = NULL;
4401 char *image_name = NULL;
4404 rbd_assert(!rbd_dev->spec->image_name);
4406 len = strlen(rbd_dev->spec->image_id);
4407 image_id_size = sizeof (__le32) + len;
4408 image_id = kmalloc(image_id_size, GFP_KERNEL);
4413 end = image_id + image_id_size;
4414 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
4416 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
4417 reply_buf = kmalloc(size, GFP_KERNEL);
4421 ret = rbd_obj_method_sync(rbd_dev, RBD_DIRECTORY,
4422 "rbd", "dir_get_name",
4423 image_id, image_id_size,
4428 end = reply_buf + ret;
4430 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
4431 if (IS_ERR(image_name))
4434 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
4442 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4444 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4445 const char *snap_name;
4448 /* Skip over names until we find the one we are looking for */
4450 snap_name = rbd_dev->header.snap_names;
4451 while (which < snapc->num_snaps) {
4452 if (!strcmp(name, snap_name))
4453 return snapc->snaps[which];
4454 snap_name += strlen(snap_name) + 1;
4460 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4462 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
4467 for (which = 0; !found && which < snapc->num_snaps; which++) {
4468 const char *snap_name;
4470 snap_id = snapc->snaps[which];
4471 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
4472 if (IS_ERR(snap_name)) {
4473 /* ignore no-longer existing snapshots */
4474 if (PTR_ERR(snap_name) == -ENOENT)
4479 found = !strcmp(name, snap_name);
4482 return found ? snap_id : CEPH_NOSNAP;
4486 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
4487 * no snapshot by that name is found, or if an error occurs.
4489 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
4491 if (rbd_dev->image_format == 1)
4492 return rbd_v1_snap_id_by_name(rbd_dev, name);
4494 return rbd_v2_snap_id_by_name(rbd_dev, name);
4498 * An image being mapped will have everything but the snap id.
4500 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
4502 struct rbd_spec *spec = rbd_dev->spec;
4504 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
4505 rbd_assert(spec->image_id && spec->image_name);
4506 rbd_assert(spec->snap_name);
4508 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
4511 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
4512 if (snap_id == CEPH_NOSNAP)
4515 spec->snap_id = snap_id;
4517 spec->snap_id = CEPH_NOSNAP;
4524 * A parent image will have all ids but none of the names.
4526 * All names in an rbd spec are dynamically allocated. It's OK if we
4527 * can't figure out the name for an image id.
4529 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
4531 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4532 struct rbd_spec *spec = rbd_dev->spec;
4533 const char *pool_name;
4534 const char *image_name;
4535 const char *snap_name;
4538 rbd_assert(spec->pool_id != CEPH_NOPOOL);
4539 rbd_assert(spec->image_id);
4540 rbd_assert(spec->snap_id != CEPH_NOSNAP);
4542 /* Get the pool name; we have to make our own copy of this */
4544 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
4546 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
4549 pool_name = kstrdup(pool_name, GFP_KERNEL);
4553 /* Fetch the image name; tolerate failure here */
4555 image_name = rbd_dev_image_name(rbd_dev);
4557 rbd_warn(rbd_dev, "unable to get image name");
4559 /* Fetch the snapshot name */
4561 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
4562 if (IS_ERR(snap_name)) {
4563 ret = PTR_ERR(snap_name);
4567 spec->pool_name = pool_name;
4568 spec->image_name = image_name;
4569 spec->snap_name = snap_name;
4579 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev)
4588 struct ceph_snap_context *snapc;
4592 * We'll need room for the seq value (maximum snapshot id),
4593 * snapshot count, and array of that many snapshot ids.
4594 * For now we have a fixed upper limit on the number we're
4595 * prepared to receive.
4597 size = sizeof (__le64) + sizeof (__le32) +
4598 RBD_MAX_SNAP_COUNT * sizeof (__le64);
4599 reply_buf = kzalloc(size, GFP_KERNEL);
4603 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4604 "rbd", "get_snapcontext", NULL, 0,
4606 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4611 end = reply_buf + ret;
4613 ceph_decode_64_safe(&p, end, seq, out);
4614 ceph_decode_32_safe(&p, end, snap_count, out);
4617 * Make sure the reported number of snapshot ids wouldn't go
4618 * beyond the end of our buffer. But before checking that,
4619 * make sure the computed size of the snapshot context we
4620 * allocate is representable in a size_t.
4622 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
4627 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
4631 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
4637 for (i = 0; i < snap_count; i++)
4638 snapc->snaps[i] = ceph_decode_64(&p);
4640 ceph_put_snap_context(rbd_dev->header.snapc);
4641 rbd_dev->header.snapc = snapc;
4643 dout(" snap context seq = %llu, snap_count = %u\n",
4644 (unsigned long long)seq, (unsigned int)snap_count);
4651 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
4662 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
4663 reply_buf = kmalloc(size, GFP_KERNEL);
4665 return ERR_PTR(-ENOMEM);
4667 snapid = cpu_to_le64(snap_id);
4668 ret = rbd_obj_method_sync(rbd_dev, rbd_dev->header_name,
4669 "rbd", "get_snapshot_name",
4670 &snapid, sizeof (snapid),
4672 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
4674 snap_name = ERR_PTR(ret);
4679 end = reply_buf + ret;
4680 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
4681 if (IS_ERR(snap_name))
4684 dout(" snap_id 0x%016llx snap_name = %s\n",
4685 (unsigned long long)snap_id, snap_name);
4692 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev)
4694 bool first_time = rbd_dev->header.object_prefix == NULL;
4697 ret = rbd_dev_v2_image_size(rbd_dev);
4702 ret = rbd_dev_v2_header_onetime(rbd_dev);
4707 ret = rbd_dev_v2_snap_context(rbd_dev);
4708 if (ret && first_time) {
4709 kfree(rbd_dev->header.object_prefix);
4710 rbd_dev->header.object_prefix = NULL;
4716 static int rbd_dev_header_info(struct rbd_device *rbd_dev)
4718 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
4720 if (rbd_dev->image_format == 1)
4721 return rbd_dev_v1_header_info(rbd_dev);
4723 return rbd_dev_v2_header_info(rbd_dev);
4727 * Get a unique rbd identifier for the given new rbd_dev, and add
4728 * the rbd_dev to the global list.
4730 static int rbd_dev_id_get(struct rbd_device *rbd_dev)
4734 new_dev_id = ida_simple_get(&rbd_dev_id_ida,
4735 0, minor_to_rbd_dev_id(1 << MINORBITS),
4740 rbd_dev->dev_id = new_dev_id;
4742 spin_lock(&rbd_dev_list_lock);
4743 list_add_tail(&rbd_dev->node, &rbd_dev_list);
4744 spin_unlock(&rbd_dev_list_lock);
4746 dout("rbd_dev %p given dev id %d\n", rbd_dev, rbd_dev->dev_id);
4752 * Remove an rbd_dev from the global list, and record that its
4753 * identifier is no longer in use.
4755 static void rbd_dev_id_put(struct rbd_device *rbd_dev)
4757 spin_lock(&rbd_dev_list_lock);
4758 list_del_init(&rbd_dev->node);
4759 spin_unlock(&rbd_dev_list_lock);
4761 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
4763 dout("rbd_dev %p released dev id %d\n", rbd_dev, rbd_dev->dev_id);
4767 * Skips over white space at *buf, and updates *buf to point to the
4768 * first found non-space character (if any). Returns the length of
4769 * the token (string of non-white space characters) found. Note
4770 * that *buf must be terminated with '\0'.
4772 static inline size_t next_token(const char **buf)
4775 * These are the characters that produce nonzero for
4776 * isspace() in the "C" and "POSIX" locales.
4778 const char *spaces = " \f\n\r\t\v";
4780 *buf += strspn(*buf, spaces); /* Find start of token */
4782 return strcspn(*buf, spaces); /* Return token length */
4786 * Finds the next token in *buf, dynamically allocates a buffer big
4787 * enough to hold a copy of it, and copies the token into the new
4788 * buffer. The copy is guaranteed to be terminated with '\0'. Note
4789 * that a duplicate buffer is created even for a zero-length token.
4791 * Returns a pointer to the newly-allocated duplicate, or a null
4792 * pointer if memory for the duplicate was not available. If
4793 * the lenp argument is a non-null pointer, the length of the token
4794 * (not including the '\0') is returned in *lenp.
4796 * If successful, the *buf pointer will be updated to point beyond
4797 * the end of the found token.
4799 * Note: uses GFP_KERNEL for allocation.
4801 static inline char *dup_token(const char **buf, size_t *lenp)
4806 len = next_token(buf);
4807 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
4810 *(dup + len) = '\0';
4820 * Parse the options provided for an "rbd add" (i.e., rbd image
4821 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
4822 * and the data written is passed here via a NUL-terminated buffer.
4823 * Returns 0 if successful or an error code otherwise.
4825 * The information extracted from these options is recorded in
4826 * the other parameters which return dynamically-allocated
4829 * The address of a pointer that will refer to a ceph options
4830 * structure. Caller must release the returned pointer using
4831 * ceph_destroy_options() when it is no longer needed.
4833 * Address of an rbd options pointer. Fully initialized by
4834 * this function; caller must release with kfree().
4836 * Address of an rbd image specification pointer. Fully
4837 * initialized by this function based on parsed options.
4838 * Caller must release with rbd_spec_put().
4840 * The options passed take this form:
4841 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
4844 * A comma-separated list of one or more monitor addresses.
4845 * A monitor address is an ip address, optionally followed
4846 * by a port number (separated by a colon).
4847 * I.e.: ip1[:port1][,ip2[:port2]...]
4849 * A comma-separated list of ceph and/or rbd options.
4851 * The name of the rados pool containing the rbd image.
4853 * The name of the image in that pool to map.
4855 * An optional snapshot id. If provided, the mapping will
4856 * present data from the image at the time that snapshot was
4857 * created. The image head is used if no snapshot id is
4858 * provided. Snapshot mappings are always read-only.
4860 static int rbd_add_parse_args(const char *buf,
4861 struct ceph_options **ceph_opts,
4862 struct rbd_options **opts,
4863 struct rbd_spec **rbd_spec)
4867 const char *mon_addrs;
4869 size_t mon_addrs_size;
4870 struct rbd_spec *spec = NULL;
4871 struct rbd_options *rbd_opts = NULL;
4872 struct ceph_options *copts;
4875 /* The first four tokens are required */
4877 len = next_token(&buf);
4879 rbd_warn(NULL, "no monitor address(es) provided");
4883 mon_addrs_size = len + 1;
4887 options = dup_token(&buf, NULL);
4891 rbd_warn(NULL, "no options provided");
4895 spec = rbd_spec_alloc();
4899 spec->pool_name = dup_token(&buf, NULL);
4900 if (!spec->pool_name)
4902 if (!*spec->pool_name) {
4903 rbd_warn(NULL, "no pool name provided");
4907 spec->image_name = dup_token(&buf, NULL);
4908 if (!spec->image_name)
4910 if (!*spec->image_name) {
4911 rbd_warn(NULL, "no image name provided");
4916 * Snapshot name is optional; default is to use "-"
4917 * (indicating the head/no snapshot).
4919 len = next_token(&buf);
4921 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
4922 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
4923 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
4924 ret = -ENAMETOOLONG;
4927 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
4930 *(snap_name + len) = '\0';
4931 spec->snap_name = snap_name;
4933 /* Initialize all rbd options to the defaults */
4935 rbd_opts = kzalloc(sizeof (*rbd_opts), GFP_KERNEL);
4939 rbd_opts->read_only = RBD_READ_ONLY_DEFAULT;
4940 rbd_opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
4942 copts = ceph_parse_options(options, mon_addrs,
4943 mon_addrs + mon_addrs_size - 1,
4944 parse_rbd_opts_token, rbd_opts);
4945 if (IS_ERR(copts)) {
4946 ret = PTR_ERR(copts);
4967 * Return pool id (>= 0) or a negative error code.
4969 static int rbd_add_get_pool_id(struct rbd_client *rbdc, const char *pool_name)
4971 struct ceph_options *opts = rbdc->client->options;
4977 ret = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, pool_name);
4978 if (ret == -ENOENT && tries++ < 1) {
4979 ret = ceph_monc_do_get_version(&rbdc->client->monc, "osdmap",
4984 if (rbdc->client->osdc.osdmap->epoch < newest_epoch) {
4985 ceph_monc_request_next_osdmap(&rbdc->client->monc);
4986 (void) ceph_monc_wait_osdmap(&rbdc->client->monc,
4988 opts->mount_timeout);
4991 /* the osdmap we have is new enough */
5000 * An rbd format 2 image has a unique identifier, distinct from the
5001 * name given to it by the user. Internally, that identifier is
5002 * what's used to specify the names of objects related to the image.
5004 * A special "rbd id" object is used to map an rbd image name to its
5005 * id. If that object doesn't exist, then there is no v2 rbd image
5006 * with the supplied name.
5008 * This function will record the given rbd_dev's image_id field if
5009 * it can be determined, and in that case will return 0. If any
5010 * errors occur a negative errno will be returned and the rbd_dev's
5011 * image_id field will be unchanged (and should be NULL).
5013 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
5022 * When probing a parent image, the image id is already
5023 * known (and the image name likely is not). There's no
5024 * need to fetch the image id again in this case. We
5025 * do still need to set the image format though.
5027 if (rbd_dev->spec->image_id) {
5028 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
5034 * First, see if the format 2 image id file exists, and if
5035 * so, get the image's persistent id from it.
5037 size = sizeof (RBD_ID_PREFIX) + strlen(rbd_dev->spec->image_name);
5038 object_name = kmalloc(size, GFP_NOIO);
5041 sprintf(object_name, "%s%s", RBD_ID_PREFIX, rbd_dev->spec->image_name);
5042 dout("rbd id object name is %s\n", object_name);
5044 /* Response will be an encoded string, which includes a length */
5046 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
5047 response = kzalloc(size, GFP_NOIO);
5053 /* If it doesn't exist we'll assume it's a format 1 image */
5055 ret = rbd_obj_method_sync(rbd_dev, object_name,
5056 "rbd", "get_id", NULL, 0,
5057 response, RBD_IMAGE_ID_LEN_MAX);
5058 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5059 if (ret == -ENOENT) {
5060 image_id = kstrdup("", GFP_KERNEL);
5061 ret = image_id ? 0 : -ENOMEM;
5063 rbd_dev->image_format = 1;
5064 } else if (ret >= 0) {
5067 image_id = ceph_extract_encoded_string(&p, p + ret,
5069 ret = PTR_ERR_OR_ZERO(image_id);
5071 rbd_dev->image_format = 2;
5075 rbd_dev->spec->image_id = image_id;
5076 dout("image_id is %s\n", image_id);
5086 * Undo whatever state changes are made by v1 or v2 header info
5089 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
5091 struct rbd_image_header *header;
5093 rbd_dev_parent_put(rbd_dev);
5095 /* Free dynamic fields from the header, then zero it out */
5097 header = &rbd_dev->header;
5098 ceph_put_snap_context(header->snapc);
5099 kfree(header->snap_sizes);
5100 kfree(header->snap_names);
5101 kfree(header->object_prefix);
5102 memset(header, 0, sizeof (*header));
5105 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev)
5109 ret = rbd_dev_v2_object_prefix(rbd_dev);
5114 * Get the and check features for the image. Currently the
5115 * features are assumed to never change.
5117 ret = rbd_dev_v2_features(rbd_dev);
5121 /* If the image supports fancy striping, get its parameters */
5123 if (rbd_dev->header.features & RBD_FEATURE_STRIPINGV2) {
5124 ret = rbd_dev_v2_striping_info(rbd_dev);
5128 /* No support for crypto and compression type format 2 images */
5132 rbd_dev->header.features = 0;
5133 kfree(rbd_dev->header.object_prefix);
5134 rbd_dev->header.object_prefix = NULL;
5140 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
5141 * rbd_dev_image_probe() recursion depth, which means it's also the
5142 * length of the already discovered part of the parent chain.
5144 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
5146 struct rbd_device *parent = NULL;
5149 if (!rbd_dev->parent_spec)
5152 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
5153 pr_info("parent chain is too long (%d)\n", depth);
5158 parent = rbd_dev_create(rbd_dev->rbd_client, rbd_dev->parent_spec,
5166 * Images related by parent/child relationships always share
5167 * rbd_client and spec/parent_spec, so bump their refcounts.
5169 __rbd_get_client(rbd_dev->rbd_client);
5170 rbd_spec_get(rbd_dev->parent_spec);
5172 ret = rbd_dev_image_probe(parent, depth);
5176 rbd_dev->parent = parent;
5177 atomic_set(&rbd_dev->parent_ref, 1);
5181 rbd_dev_unparent(rbd_dev);
5183 rbd_dev_destroy(parent);
5187 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
5191 /* Get an id and fill in device name. */
5193 ret = rbd_dev_id_get(rbd_dev);
5197 BUILD_BUG_ON(DEV_NAME_LEN
5198 < sizeof (RBD_DRV_NAME) + MAX_INT_FORMAT_WIDTH);
5199 sprintf(rbd_dev->name, "%s%d", RBD_DRV_NAME, rbd_dev->dev_id);
5201 /* Record our major and minor device numbers. */
5203 if (!single_major) {
5204 ret = register_blkdev(0, rbd_dev->name);
5208 rbd_dev->major = ret;
5211 rbd_dev->major = rbd_major;
5212 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
5215 /* Set up the blkdev mapping. */
5217 ret = rbd_init_disk(rbd_dev);
5219 goto err_out_blkdev;
5221 ret = rbd_dev_mapping_set(rbd_dev);
5225 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
5226 set_disk_ro(rbd_dev->disk, rbd_dev->mapping.read_only);
5228 dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
5229 ret = device_add(&rbd_dev->dev);
5231 goto err_out_mapping;
5233 /* Everything's ready. Announce the disk to the world. */
5235 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5236 add_disk(rbd_dev->disk);
5238 pr_info("%s: added with size 0x%llx\n", rbd_dev->disk->disk_name,
5239 (unsigned long long) rbd_dev->mapping.size);
5244 rbd_dev_mapping_clear(rbd_dev);
5246 rbd_free_disk(rbd_dev);
5249 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5251 rbd_dev_id_put(rbd_dev);
5255 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
5257 struct rbd_spec *spec = rbd_dev->spec;
5260 /* Record the header object name for this rbd image. */
5262 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
5264 if (rbd_dev->image_format == 1)
5265 size = strlen(spec->image_name) + sizeof (RBD_SUFFIX);
5267 size = sizeof (RBD_HEADER_PREFIX) + strlen(spec->image_id);
5269 rbd_dev->header_name = kmalloc(size, GFP_KERNEL);
5270 if (!rbd_dev->header_name)
5273 if (rbd_dev->image_format == 1)
5274 sprintf(rbd_dev->header_name, "%s%s",
5275 spec->image_name, RBD_SUFFIX);
5277 sprintf(rbd_dev->header_name, "%s%s",
5278 RBD_HEADER_PREFIX, spec->image_id);
5282 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
5284 rbd_dev_unprobe(rbd_dev);
5285 kfree(rbd_dev->header_name);
5286 rbd_dev->header_name = NULL;
5287 rbd_dev->image_format = 0;
5288 kfree(rbd_dev->spec->image_id);
5289 rbd_dev->spec->image_id = NULL;
5291 rbd_dev_destroy(rbd_dev);
5295 * Probe for the existence of the header object for the given rbd
5296 * device. If this image is the one being mapped (i.e., not a
5297 * parent), initiate a watch on its header object before using that
5298 * object to get detailed information about the rbd image.
5300 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
5305 * Get the id from the image id object. Unless there's an
5306 * error, rbd_dev->spec->image_id will be filled in with
5307 * a dynamically-allocated string, and rbd_dev->image_format
5308 * will be set to either 1 or 2.
5310 ret = rbd_dev_image_id(rbd_dev);
5314 ret = rbd_dev_header_name(rbd_dev);
5316 goto err_out_format;
5319 ret = rbd_dev_header_watch_sync(rbd_dev);
5322 pr_info("image %s/%s does not exist\n",
5323 rbd_dev->spec->pool_name,
5324 rbd_dev->spec->image_name);
5325 goto out_header_name;
5329 ret = rbd_dev_header_info(rbd_dev);
5334 * If this image is the one being mapped, we have pool name and
5335 * id, image name and id, and snap name - need to fill snap id.
5336 * Otherwise this is a parent image, identified by pool, image
5337 * and snap ids - need to fill in names for those ids.
5340 ret = rbd_spec_fill_snap_id(rbd_dev);
5342 ret = rbd_spec_fill_names(rbd_dev);
5345 pr_info("snap %s/%s@%s does not exist\n",
5346 rbd_dev->spec->pool_name,
5347 rbd_dev->spec->image_name,
5348 rbd_dev->spec->snap_name);
5352 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
5353 ret = rbd_dev_v2_parent_info(rbd_dev);
5358 * Need to warn users if this image is the one being
5359 * mapped and has a parent.
5361 if (!depth && rbd_dev->parent_spec)
5363 "WARNING: kernel layering is EXPERIMENTAL!");
5366 ret = rbd_dev_probe_parent(rbd_dev, depth);
5370 dout("discovered format %u image, header name is %s\n",
5371 rbd_dev->image_format, rbd_dev->header_name);
5375 rbd_dev_unprobe(rbd_dev);
5378 rbd_dev_header_unwatch_sync(rbd_dev);
5380 kfree(rbd_dev->header_name);
5381 rbd_dev->header_name = NULL;
5383 rbd_dev->image_format = 0;
5384 kfree(rbd_dev->spec->image_id);
5385 rbd_dev->spec->image_id = NULL;
5389 static ssize_t do_rbd_add(struct bus_type *bus,
5393 struct rbd_device *rbd_dev = NULL;
5394 struct ceph_options *ceph_opts = NULL;
5395 struct rbd_options *rbd_opts = NULL;
5396 struct rbd_spec *spec = NULL;
5397 struct rbd_client *rbdc;
5401 if (!capable(CAP_SYS_ADMIN))
5404 if (!try_module_get(THIS_MODULE))
5407 /* parse add command */
5408 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
5412 rbdc = rbd_get_client(ceph_opts);
5419 rc = rbd_add_get_pool_id(rbdc, spec->pool_name);
5422 pr_info("pool %s does not exist\n", spec->pool_name);
5423 goto err_out_client;
5425 spec->pool_id = (u64)rc;
5427 /* The ceph file layout needs to fit pool id in 32 bits */
5429 if (spec->pool_id > (u64)U32_MAX) {
5430 rbd_warn(NULL, "pool id too large (%llu > %u)",
5431 (unsigned long long)spec->pool_id, U32_MAX);
5433 goto err_out_client;
5436 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
5439 goto err_out_client;
5441 rbdc = NULL; /* rbd_dev now owns this */
5442 spec = NULL; /* rbd_dev now owns this */
5443 rbd_opts = NULL; /* rbd_dev now owns this */
5445 rc = rbd_dev_image_probe(rbd_dev, 0);
5447 goto err_out_rbd_dev;
5449 /* If we are mapping a snapshot it must be marked read-only */
5451 read_only = rbd_dev->opts->read_only;
5452 if (rbd_dev->spec->snap_id != CEPH_NOSNAP)
5454 rbd_dev->mapping.read_only = read_only;
5456 rc = rbd_dev_device_setup(rbd_dev);
5459 * rbd_dev_header_unwatch_sync() can't be moved into
5460 * rbd_dev_image_release() without refactoring, see
5461 * commit 1f3ef78861ac.
5463 rbd_dev_header_unwatch_sync(rbd_dev);
5464 rbd_dev_image_release(rbd_dev);
5470 module_put(THIS_MODULE);
5474 rbd_dev_destroy(rbd_dev);
5476 rbd_put_client(rbdc);
5483 static ssize_t rbd_add(struct bus_type *bus,
5490 return do_rbd_add(bus, buf, count);
5493 static ssize_t rbd_add_single_major(struct bus_type *bus,
5497 return do_rbd_add(bus, buf, count);
5500 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
5502 rbd_free_disk(rbd_dev);
5503 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
5504 device_del(&rbd_dev->dev);
5505 rbd_dev_mapping_clear(rbd_dev);
5507 unregister_blkdev(rbd_dev->major, rbd_dev->name);
5508 rbd_dev_id_put(rbd_dev);
5511 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
5513 while (rbd_dev->parent) {
5514 struct rbd_device *first = rbd_dev;
5515 struct rbd_device *second = first->parent;
5516 struct rbd_device *third;
5519 * Follow to the parent with no grandparent and
5522 while (second && (third = second->parent)) {
5527 rbd_dev_image_release(second);
5528 first->parent = NULL;
5529 first->parent_overlap = 0;
5531 rbd_assert(first->parent_spec);
5532 rbd_spec_put(first->parent_spec);
5533 first->parent_spec = NULL;
5537 static ssize_t do_rbd_remove(struct bus_type *bus,
5541 struct rbd_device *rbd_dev = NULL;
5542 struct list_head *tmp;
5545 bool already = false;
5548 if (!capable(CAP_SYS_ADMIN))
5551 ret = kstrtoul(buf, 10, &ul);
5555 /* convert to int; abort if we lost anything in the conversion */
5561 spin_lock(&rbd_dev_list_lock);
5562 list_for_each(tmp, &rbd_dev_list) {
5563 rbd_dev = list_entry(tmp, struct rbd_device, node);
5564 if (rbd_dev->dev_id == dev_id) {
5570 spin_lock_irq(&rbd_dev->lock);
5571 if (rbd_dev->open_count)
5574 already = test_and_set_bit(RBD_DEV_FLAG_REMOVING,
5576 spin_unlock_irq(&rbd_dev->lock);
5578 spin_unlock(&rbd_dev_list_lock);
5579 if (ret < 0 || already)
5582 rbd_dev_header_unwatch_sync(rbd_dev);
5584 * flush remaining watch callbacks - these must be complete
5585 * before the osd_client is shutdown
5587 dout("%s: flushing notifies", __func__);
5588 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
5591 * Don't free anything from rbd_dev->disk until after all
5592 * notifies are completely processed. Otherwise
5593 * rbd_bus_del_dev() will race with rbd_watch_cb(), resulting
5594 * in a potential use after free of rbd_dev->disk or rbd_dev.
5596 rbd_dev_device_release(rbd_dev);
5597 rbd_dev_image_release(rbd_dev);
5602 static ssize_t rbd_remove(struct bus_type *bus,
5609 return do_rbd_remove(bus, buf, count);
5612 static ssize_t rbd_remove_single_major(struct bus_type *bus,
5616 return do_rbd_remove(bus, buf, count);
5620 * create control files in sysfs
5623 static int rbd_sysfs_init(void)
5627 ret = device_register(&rbd_root_dev);
5631 ret = bus_register(&rbd_bus_type);
5633 device_unregister(&rbd_root_dev);
5638 static void rbd_sysfs_cleanup(void)
5640 bus_unregister(&rbd_bus_type);
5641 device_unregister(&rbd_root_dev);
5644 static int rbd_slab_init(void)
5646 rbd_assert(!rbd_img_request_cache);
5647 rbd_img_request_cache = kmem_cache_create("rbd_img_request",
5648 sizeof (struct rbd_img_request),
5649 __alignof__(struct rbd_img_request),
5651 if (!rbd_img_request_cache)
5654 rbd_assert(!rbd_obj_request_cache);
5655 rbd_obj_request_cache = kmem_cache_create("rbd_obj_request",
5656 sizeof (struct rbd_obj_request),
5657 __alignof__(struct rbd_obj_request),
5659 if (!rbd_obj_request_cache)
5662 rbd_assert(!rbd_segment_name_cache);
5663 rbd_segment_name_cache = kmem_cache_create("rbd_segment_name",
5664 CEPH_MAX_OID_NAME_LEN + 1, 1, 0, NULL);
5665 if (rbd_segment_name_cache)
5668 kmem_cache_destroy(rbd_obj_request_cache);
5669 rbd_obj_request_cache = NULL;
5671 kmem_cache_destroy(rbd_img_request_cache);
5672 rbd_img_request_cache = NULL;
5677 static void rbd_slab_exit(void)
5679 rbd_assert(rbd_segment_name_cache);
5680 kmem_cache_destroy(rbd_segment_name_cache);
5681 rbd_segment_name_cache = NULL;
5683 rbd_assert(rbd_obj_request_cache);
5684 kmem_cache_destroy(rbd_obj_request_cache);
5685 rbd_obj_request_cache = NULL;
5687 rbd_assert(rbd_img_request_cache);
5688 kmem_cache_destroy(rbd_img_request_cache);
5689 rbd_img_request_cache = NULL;
5692 static int __init rbd_init(void)
5696 if (!libceph_compatible(NULL)) {
5697 rbd_warn(NULL, "libceph incompatibility (quitting)");
5701 rc = rbd_slab_init();
5706 * The number of active work items is limited by the number of
5707 * rbd devices * queue depth, so leave @max_active at default.
5709 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
5716 rbd_major = register_blkdev(0, RBD_DRV_NAME);
5717 if (rbd_major < 0) {
5723 rc = rbd_sysfs_init();
5725 goto err_out_blkdev;
5728 pr_info("loaded (major %d)\n", rbd_major);
5730 pr_info("loaded\n");
5736 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5738 destroy_workqueue(rbd_wq);
5744 static void __exit rbd_exit(void)
5746 ida_destroy(&rbd_dev_id_ida);
5747 rbd_sysfs_cleanup();
5749 unregister_blkdev(rbd_major, RBD_DRV_NAME);
5750 destroy_workqueue(rbd_wq);
5754 module_init(rbd_init);
5755 module_exit(rbd_exit);
5757 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
5758 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
5759 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
5760 /* following authorship retained from original osdblk.c */
5761 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
5763 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
5764 MODULE_LICENSE("GPL");