3 rbd.c -- Export ceph rados objects as a Linux block device
6 based on drivers/block/osdblk.c:
8 Copyright 2009 Red Hat, Inc.
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License as published by
12 the Free Software Foundation.
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
19 You should have received a copy of the GNU General Public License
20 along with this program; see the file COPYING. If not, write to
21 the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, USA.
25 For usage instructions, please refer to:
27 Documentation/ABI/testing/sysfs-bus-rbd
31 #include <linux/ceph/libceph.h>
32 #include <linux/ceph/osd_client.h>
33 #include <linux/ceph/mon_client.h>
34 #include <linux/ceph/cls_lock_client.h>
35 #include <linux/ceph/striper.h>
36 #include <linux/ceph/decode.h>
37 #include <linux/fs_parser.h>
38 #include <linux/bsearch.h>
40 #include <linux/kernel.h>
41 #include <linux/device.h>
42 #include <linux/module.h>
43 #include <linux/blk-mq.h>
45 #include <linux/blkdev.h>
46 #include <linux/slab.h>
47 #include <linux/idr.h>
48 #include <linux/workqueue.h>
50 #include "rbd_types.h"
52 #define RBD_DEBUG /* Activate rbd_assert() calls */
55 * Increment the given counter and return its updated value.
56 * If the counter is already 0 it will not be incremented.
57 * If the counter is already at its maximum value returns
58 * -EINVAL without updating it.
60 static int atomic_inc_return_safe(atomic_t *v)
64 counter = (unsigned int)atomic_fetch_add_unless(v, 1, 0);
65 if (counter <= (unsigned int)INT_MAX)
73 /* Decrement the counter. Return the resulting value, or -EINVAL */
74 static int atomic_dec_return_safe(atomic_t *v)
78 counter = atomic_dec_return(v);
87 #define RBD_DRV_NAME "rbd"
89 #define RBD_MINORS_PER_MAJOR 256
90 #define RBD_SINGLE_MAJOR_PART_SHIFT 4
92 #define RBD_MAX_PARENT_CHAIN_LEN 16
94 #define RBD_SNAP_DEV_NAME_PREFIX "snap_"
95 #define RBD_MAX_SNAP_NAME_LEN \
96 (NAME_MAX - (sizeof (RBD_SNAP_DEV_NAME_PREFIX) - 1))
98 #define RBD_MAX_SNAP_COUNT 510 /* allows max snapc to fit in 4KB */
100 #define RBD_SNAP_HEAD_NAME "-"
102 #define BAD_SNAP_INDEX U32_MAX /* invalid index into snap array */
104 /* This allows a single page to hold an image name sent by OSD */
105 #define RBD_IMAGE_NAME_LEN_MAX (PAGE_SIZE - sizeof (__le32) - 1)
106 #define RBD_IMAGE_ID_LEN_MAX 64
108 #define RBD_OBJ_PREFIX_LEN_MAX 64
110 #define RBD_NOTIFY_TIMEOUT 5 /* seconds */
111 #define RBD_RETRY_DELAY msecs_to_jiffies(1000)
115 #define RBD_FEATURE_LAYERING (1ULL<<0)
116 #define RBD_FEATURE_STRIPINGV2 (1ULL<<1)
117 #define RBD_FEATURE_EXCLUSIVE_LOCK (1ULL<<2)
118 #define RBD_FEATURE_OBJECT_MAP (1ULL<<3)
119 #define RBD_FEATURE_FAST_DIFF (1ULL<<4)
120 #define RBD_FEATURE_DEEP_FLATTEN (1ULL<<5)
121 #define RBD_FEATURE_DATA_POOL (1ULL<<7)
122 #define RBD_FEATURE_OPERATIONS (1ULL<<8)
124 #define RBD_FEATURES_ALL (RBD_FEATURE_LAYERING | \
125 RBD_FEATURE_STRIPINGV2 | \
126 RBD_FEATURE_EXCLUSIVE_LOCK | \
127 RBD_FEATURE_OBJECT_MAP | \
128 RBD_FEATURE_FAST_DIFF | \
129 RBD_FEATURE_DEEP_FLATTEN | \
130 RBD_FEATURE_DATA_POOL | \
131 RBD_FEATURE_OPERATIONS)
133 /* Features supported by this (client software) implementation. */
135 #define RBD_FEATURES_SUPPORTED (RBD_FEATURES_ALL)
138 * An RBD device name will be "rbd#", where the "rbd" comes from
139 * RBD_DRV_NAME above, and # is a unique integer identifier.
141 #define DEV_NAME_LEN 32
144 * block device image metadata (in-memory version)
146 struct rbd_image_header {
147 /* These six fields never change for a given rbd image */
153 u64 features; /* Might be changeable someday? */
155 /* The remaining fields need to be updated occasionally */
157 struct ceph_snap_context *snapc;
158 char *snap_names; /* format 1 only */
159 u64 *snap_sizes; /* format 1 only */
163 * An rbd image specification.
165 * The tuple (pool_id, image_id, snap_id) is sufficient to uniquely
166 * identify an image. Each rbd_dev structure includes a pointer to
167 * an rbd_spec structure that encapsulates this identity.
169 * Each of the id's in an rbd_spec has an associated name. For a
170 * user-mapped image, the names are supplied and the id's associated
171 * with them are looked up. For a layered image, a parent image is
172 * defined by the tuple, and the names are looked up.
174 * An rbd_dev structure contains a parent_spec pointer which is
175 * non-null if the image it represents is a child in a layered
176 * image. This pointer will refer to the rbd_spec structure used
177 * by the parent rbd_dev for its own identity (i.e., the structure
178 * is shared between the parent and child).
180 * Since these structures are populated once, during the discovery
181 * phase of image construction, they are effectively immutable so
182 * we make no effort to synchronize access to them.
184 * Note that code herein does not assume the image name is known (it
185 * could be a null pointer).
189 const char *pool_name;
190 const char *pool_ns; /* NULL if default, never "" */
192 const char *image_id;
193 const char *image_name;
196 const char *snap_name;
202 * an instance of the client. multiple devices may share an rbd client.
205 struct ceph_client *client;
207 struct list_head node;
210 struct pending_result {
211 int result; /* first nonzero result */
215 struct rbd_img_request;
217 enum obj_request_type {
218 OBJ_REQUEST_NODATA = 1,
219 OBJ_REQUEST_BIO, /* pointer into provided bio (list) */
220 OBJ_REQUEST_BVECS, /* pointer into provided bio_vec array */
221 OBJ_REQUEST_OWN_BVECS, /* private bio_vec array, doesn't own pages */
224 enum obj_operation_type {
231 #define RBD_OBJ_FLAG_DELETION (1U << 0)
232 #define RBD_OBJ_FLAG_COPYUP_ENABLED (1U << 1)
233 #define RBD_OBJ_FLAG_COPYUP_ZEROS (1U << 2)
234 #define RBD_OBJ_FLAG_MAY_EXIST (1U << 3)
235 #define RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT (1U << 4)
237 enum rbd_obj_read_state {
238 RBD_OBJ_READ_START = 1,
244 * Writes go through the following state machine to deal with
247 * . . . . . RBD_OBJ_WRITE_GUARD. . . . . . . . . . . . . .
250 * . RBD_OBJ_WRITE_READ_FROM_PARENT. . . .
252 * . v v (deep-copyup .
253 * (image . RBD_OBJ_WRITE_COPYUP_EMPTY_SNAPC . not needed) .
256 * . . . .RBD_OBJ_WRITE_COPYUP_OPS. . . . . (copyup .
259 * done . . . . . . . . . . . . . . . . . .
264 * Writes start in RBD_OBJ_WRITE_GUARD or _FLAT, depending on whether
265 * assert_exists guard is needed or not (in some cases it's not needed
266 * even if there is a parent).
268 enum rbd_obj_write_state {
269 RBD_OBJ_WRITE_START = 1,
270 RBD_OBJ_WRITE_PRE_OBJECT_MAP,
271 RBD_OBJ_WRITE_OBJECT,
272 __RBD_OBJ_WRITE_COPYUP,
273 RBD_OBJ_WRITE_COPYUP,
274 RBD_OBJ_WRITE_POST_OBJECT_MAP,
277 enum rbd_obj_copyup_state {
278 RBD_OBJ_COPYUP_START = 1,
279 RBD_OBJ_COPYUP_READ_PARENT,
280 __RBD_OBJ_COPYUP_OBJECT_MAPS,
281 RBD_OBJ_COPYUP_OBJECT_MAPS,
282 __RBD_OBJ_COPYUP_WRITE_OBJECT,
283 RBD_OBJ_COPYUP_WRITE_OBJECT,
286 struct rbd_obj_request {
287 struct ceph_object_extent ex;
288 unsigned int flags; /* RBD_OBJ_FLAG_* */
290 enum rbd_obj_read_state read_state; /* for reads */
291 enum rbd_obj_write_state write_state; /* for writes */
294 struct rbd_img_request *img_request;
295 struct ceph_file_extent *img_extents;
299 struct ceph_bio_iter bio_pos;
301 struct ceph_bvec_iter bvec_pos;
307 enum rbd_obj_copyup_state copyup_state;
308 struct bio_vec *copyup_bvecs;
309 u32 copyup_bvec_count;
311 struct list_head osd_reqs; /* w/ r_private_item */
313 struct mutex state_mutex;
314 struct pending_result pending;
319 IMG_REQ_CHILD, /* initiator: block = 0, child image = 1 */
320 IMG_REQ_LAYERED, /* ENOENT handling: normal = 0, layered = 1 */
325 RBD_IMG_EXCLUSIVE_LOCK,
326 __RBD_IMG_OBJECT_REQUESTS,
327 RBD_IMG_OBJECT_REQUESTS,
330 struct rbd_img_request {
331 struct rbd_device *rbd_dev;
332 enum obj_operation_type op_type;
333 enum obj_request_type data_type;
335 enum rbd_img_state state;
337 u64 snap_id; /* for reads */
338 struct ceph_snap_context *snapc; /* for writes */
340 struct rbd_obj_request *obj_request; /* obj req initiator */
342 struct list_head lock_item;
343 struct list_head object_extents; /* obj_req.ex structs */
345 struct mutex state_mutex;
346 struct pending_result pending;
347 struct work_struct work;
351 #define for_each_obj_request(ireq, oreq) \
352 list_for_each_entry(oreq, &(ireq)->object_extents, ex.oe_item)
353 #define for_each_obj_request_safe(ireq, oreq, n) \
354 list_for_each_entry_safe(oreq, n, &(ireq)->object_extents, ex.oe_item)
356 enum rbd_watch_state {
357 RBD_WATCH_STATE_UNREGISTERED,
358 RBD_WATCH_STATE_REGISTERED,
359 RBD_WATCH_STATE_ERROR,
362 enum rbd_lock_state {
363 RBD_LOCK_STATE_UNLOCKED,
364 RBD_LOCK_STATE_LOCKED,
365 RBD_LOCK_STATE_RELEASING,
368 /* WatchNotify::ClientId */
369 struct rbd_client_id {
382 int dev_id; /* blkdev unique id */
384 int major; /* blkdev assigned major */
386 struct gendisk *disk; /* blkdev's gendisk and rq */
388 u32 image_format; /* Either 1 or 2 */
389 struct rbd_client *rbd_client;
391 char name[DEV_NAME_LEN]; /* blkdev name, e.g. rbd3 */
393 spinlock_t lock; /* queue, flags, open_count */
395 struct rbd_image_header header;
396 unsigned long flags; /* possibly lock protected */
397 struct rbd_spec *spec;
398 struct rbd_options *opts;
399 char *config_info; /* add{,_single_major} string */
401 struct ceph_object_id header_oid;
402 struct ceph_object_locator header_oloc;
404 struct ceph_file_layout layout; /* used for all rbd requests */
406 struct mutex watch_mutex;
407 enum rbd_watch_state watch_state;
408 struct ceph_osd_linger_request *watch_handle;
410 struct delayed_work watch_dwork;
412 struct rw_semaphore lock_rwsem;
413 enum rbd_lock_state lock_state;
414 char lock_cookie[32];
415 struct rbd_client_id owner_cid;
416 struct work_struct acquired_lock_work;
417 struct work_struct released_lock_work;
418 struct delayed_work lock_dwork;
419 struct work_struct unlock_work;
420 spinlock_t lock_lists_lock;
421 struct list_head acquiring_list;
422 struct list_head running_list;
423 struct completion acquire_wait;
425 struct completion releasing_wait;
427 spinlock_t object_map_lock;
429 u64 object_map_size; /* in objects */
430 u64 object_map_flags;
432 struct workqueue_struct *task_wq;
434 struct rbd_spec *parent_spec;
437 struct rbd_device *parent;
439 /* Block layer tags. */
440 struct blk_mq_tag_set tag_set;
442 /* protects updating the header */
443 struct rw_semaphore header_rwsem;
445 struct rbd_mapping mapping;
447 struct list_head node;
451 unsigned long open_count; /* protected by lock */
455 * Flag bits for rbd_dev->flags:
456 * - REMOVING (which is coupled with rbd_dev->open_count) is protected
460 RBD_DEV_FLAG_EXISTS, /* rbd_dev_device_setup() ran */
461 RBD_DEV_FLAG_REMOVING, /* this mapping is being removed */
462 RBD_DEV_FLAG_READONLY, /* -o ro or snapshot */
465 static DEFINE_MUTEX(client_mutex); /* Serialize client creation */
467 static LIST_HEAD(rbd_dev_list); /* devices */
468 static DEFINE_SPINLOCK(rbd_dev_list_lock);
470 static LIST_HEAD(rbd_client_list); /* clients */
471 static DEFINE_SPINLOCK(rbd_client_list_lock);
473 /* Slab caches for frequently-allocated structures */
475 static struct kmem_cache *rbd_img_request_cache;
476 static struct kmem_cache *rbd_obj_request_cache;
478 static int rbd_major;
479 static DEFINE_IDA(rbd_dev_id_ida);
481 static struct workqueue_struct *rbd_wq;
483 static struct ceph_snap_context rbd_empty_snapc = {
484 .nref = REFCOUNT_INIT(1),
488 * single-major requires >= 0.75 version of userspace rbd utility.
490 static bool single_major = true;
491 module_param(single_major, bool, 0444);
492 MODULE_PARM_DESC(single_major, "Use a single major number for all rbd devices (default: true)");
494 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count);
495 static ssize_t remove_store(struct bus_type *bus, const char *buf,
497 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
499 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
501 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth);
503 static int rbd_dev_id_to_minor(int dev_id)
505 return dev_id << RBD_SINGLE_MAJOR_PART_SHIFT;
508 static int minor_to_rbd_dev_id(int minor)
510 return minor >> RBD_SINGLE_MAJOR_PART_SHIFT;
513 static bool rbd_is_ro(struct rbd_device *rbd_dev)
515 return test_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
518 static bool rbd_is_snap(struct rbd_device *rbd_dev)
520 return rbd_dev->spec->snap_id != CEPH_NOSNAP;
523 static bool __rbd_is_lock_owner(struct rbd_device *rbd_dev)
525 lockdep_assert_held(&rbd_dev->lock_rwsem);
527 return rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED ||
528 rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING;
531 static bool rbd_is_lock_owner(struct rbd_device *rbd_dev)
535 down_read(&rbd_dev->lock_rwsem);
536 is_lock_owner = __rbd_is_lock_owner(rbd_dev);
537 up_read(&rbd_dev->lock_rwsem);
538 return is_lock_owner;
541 static ssize_t supported_features_show(struct bus_type *bus, char *buf)
543 return sprintf(buf, "0x%llx\n", RBD_FEATURES_SUPPORTED);
546 static BUS_ATTR_WO(add);
547 static BUS_ATTR_WO(remove);
548 static BUS_ATTR_WO(add_single_major);
549 static BUS_ATTR_WO(remove_single_major);
550 static BUS_ATTR_RO(supported_features);
552 static struct attribute *rbd_bus_attrs[] = {
554 &bus_attr_remove.attr,
555 &bus_attr_add_single_major.attr,
556 &bus_attr_remove_single_major.attr,
557 &bus_attr_supported_features.attr,
561 static umode_t rbd_bus_is_visible(struct kobject *kobj,
562 struct attribute *attr, int index)
565 (attr == &bus_attr_add_single_major.attr ||
566 attr == &bus_attr_remove_single_major.attr))
572 static const struct attribute_group rbd_bus_group = {
573 .attrs = rbd_bus_attrs,
574 .is_visible = rbd_bus_is_visible,
576 __ATTRIBUTE_GROUPS(rbd_bus);
578 static struct bus_type rbd_bus_type = {
580 .bus_groups = rbd_bus_groups,
583 static void rbd_root_dev_release(struct device *dev)
587 static struct device rbd_root_dev = {
589 .release = rbd_root_dev_release,
592 static __printf(2, 3)
593 void rbd_warn(struct rbd_device *rbd_dev, const char *fmt, ...)
595 struct va_format vaf;
603 printk(KERN_WARNING "%s: %pV\n", RBD_DRV_NAME, &vaf);
604 else if (rbd_dev->disk)
605 printk(KERN_WARNING "%s: %s: %pV\n",
606 RBD_DRV_NAME, rbd_dev->disk->disk_name, &vaf);
607 else if (rbd_dev->spec && rbd_dev->spec->image_name)
608 printk(KERN_WARNING "%s: image %s: %pV\n",
609 RBD_DRV_NAME, rbd_dev->spec->image_name, &vaf);
610 else if (rbd_dev->spec && rbd_dev->spec->image_id)
611 printk(KERN_WARNING "%s: id %s: %pV\n",
612 RBD_DRV_NAME, rbd_dev->spec->image_id, &vaf);
614 printk(KERN_WARNING "%s: rbd_dev %p: %pV\n",
615 RBD_DRV_NAME, rbd_dev, &vaf);
620 #define rbd_assert(expr) \
621 if (unlikely(!(expr))) { \
622 printk(KERN_ERR "\nAssertion failure in %s() " \
624 "\trbd_assert(%s);\n\n", \
625 __func__, __LINE__, #expr); \
628 #else /* !RBD_DEBUG */
629 # define rbd_assert(expr) ((void) 0)
630 #endif /* !RBD_DEBUG */
632 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev);
634 static int rbd_dev_refresh(struct rbd_device *rbd_dev);
635 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
636 struct rbd_image_header *header);
637 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
639 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
640 u8 *order, u64 *snap_size);
641 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev);
643 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result);
644 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result);
647 * Return true if nothing else is pending.
649 static bool pending_result_dec(struct pending_result *pending, int *result)
651 rbd_assert(pending->num_pending > 0);
653 if (*result && !pending->result)
654 pending->result = *result;
655 if (--pending->num_pending)
658 *result = pending->result;
662 static int rbd_open(struct block_device *bdev, fmode_t mode)
664 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
665 bool removing = false;
667 spin_lock_irq(&rbd_dev->lock);
668 if (test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags))
671 rbd_dev->open_count++;
672 spin_unlock_irq(&rbd_dev->lock);
676 (void) get_device(&rbd_dev->dev);
681 static void rbd_release(struct gendisk *disk, fmode_t mode)
683 struct rbd_device *rbd_dev = disk->private_data;
684 unsigned long open_count_before;
686 spin_lock_irq(&rbd_dev->lock);
687 open_count_before = rbd_dev->open_count--;
688 spin_unlock_irq(&rbd_dev->lock);
689 rbd_assert(open_count_before > 0);
691 put_device(&rbd_dev->dev);
694 static int rbd_ioctl_set_ro(struct rbd_device *rbd_dev, unsigned long arg)
698 if (get_user(ro, (int __user *)arg))
702 * Both images mapped read-only and snapshots can't be marked
706 if (rbd_is_ro(rbd_dev))
709 rbd_assert(!rbd_is_snap(rbd_dev));
712 /* Let blkdev_roset() handle it */
716 static int rbd_ioctl(struct block_device *bdev, fmode_t mode,
717 unsigned int cmd, unsigned long arg)
719 struct rbd_device *rbd_dev = bdev->bd_disk->private_data;
724 ret = rbd_ioctl_set_ro(rbd_dev, arg);
734 static int rbd_compat_ioctl(struct block_device *bdev, fmode_t mode,
735 unsigned int cmd, unsigned long arg)
737 return rbd_ioctl(bdev, mode, cmd, arg);
739 #endif /* CONFIG_COMPAT */
741 static const struct block_device_operations rbd_bd_ops = {
742 .owner = THIS_MODULE,
744 .release = rbd_release,
747 .compat_ioctl = rbd_compat_ioctl,
752 * Initialize an rbd client instance. Success or not, this function
753 * consumes ceph_opts. Caller holds client_mutex.
755 static struct rbd_client *rbd_client_create(struct ceph_options *ceph_opts)
757 struct rbd_client *rbdc;
760 dout("%s:\n", __func__);
761 rbdc = kmalloc(sizeof(struct rbd_client), GFP_KERNEL);
765 kref_init(&rbdc->kref);
766 INIT_LIST_HEAD(&rbdc->node);
768 rbdc->client = ceph_create_client(ceph_opts, rbdc);
769 if (IS_ERR(rbdc->client))
771 ceph_opts = NULL; /* Now rbdc->client is responsible for ceph_opts */
773 ret = ceph_open_session(rbdc->client);
777 spin_lock(&rbd_client_list_lock);
778 list_add_tail(&rbdc->node, &rbd_client_list);
779 spin_unlock(&rbd_client_list_lock);
781 dout("%s: rbdc %p\n", __func__, rbdc);
785 ceph_destroy_client(rbdc->client);
790 ceph_destroy_options(ceph_opts);
791 dout("%s: error %d\n", __func__, ret);
796 static struct rbd_client *__rbd_get_client(struct rbd_client *rbdc)
798 kref_get(&rbdc->kref);
804 * Find a ceph client with specific addr and configuration. If
805 * found, bump its reference count.
807 static struct rbd_client *rbd_client_find(struct ceph_options *ceph_opts)
809 struct rbd_client *client_node;
812 if (ceph_opts->flags & CEPH_OPT_NOSHARE)
815 spin_lock(&rbd_client_list_lock);
816 list_for_each_entry(client_node, &rbd_client_list, node) {
817 if (!ceph_compare_options(ceph_opts, client_node->client)) {
818 __rbd_get_client(client_node);
824 spin_unlock(&rbd_client_list_lock);
826 return found ? client_node : NULL;
830 * (Per device) rbd map options
838 Opt_compression_hint,
839 /* string args above */
848 Opt_compression_hint_none,
849 Opt_compression_hint_compressible,
850 Opt_compression_hint_incompressible,
853 static const struct constant_table rbd_param_compression_hint[] = {
854 {"none", Opt_compression_hint_none},
855 {"compressible", Opt_compression_hint_compressible},
856 {"incompressible", Opt_compression_hint_incompressible},
860 static const struct fs_parameter_spec rbd_parameters[] = {
861 fsparam_u32 ("alloc_size", Opt_alloc_size),
862 fsparam_enum ("compression_hint", Opt_compression_hint,
863 rbd_param_compression_hint),
864 fsparam_flag ("exclusive", Opt_exclusive),
865 fsparam_flag ("lock_on_read", Opt_lock_on_read),
866 fsparam_u32 ("lock_timeout", Opt_lock_timeout),
867 fsparam_flag ("notrim", Opt_notrim),
868 fsparam_string ("_pool_ns", Opt_pool_ns),
869 fsparam_u32 ("queue_depth", Opt_queue_depth),
870 fsparam_flag ("read_only", Opt_read_only),
871 fsparam_flag ("read_write", Opt_read_write),
872 fsparam_flag ("ro", Opt_read_only),
873 fsparam_flag ("rw", Opt_read_write),
880 unsigned long lock_timeout;
886 u32 alloc_hint_flags; /* CEPH_OSD_OP_ALLOC_HINT_FLAG_* */
889 #define RBD_QUEUE_DEPTH_DEFAULT BLKDEV_MAX_RQ
890 #define RBD_ALLOC_SIZE_DEFAULT (64 * 1024)
891 #define RBD_LOCK_TIMEOUT_DEFAULT 0 /* no timeout */
892 #define RBD_READ_ONLY_DEFAULT false
893 #define RBD_LOCK_ON_READ_DEFAULT false
894 #define RBD_EXCLUSIVE_DEFAULT false
895 #define RBD_TRIM_DEFAULT true
897 struct rbd_parse_opts_ctx {
898 struct rbd_spec *spec;
899 struct ceph_options *copts;
900 struct rbd_options *opts;
903 static char* obj_op_name(enum obj_operation_type op_type)
920 * Destroy ceph client
922 * Caller must hold rbd_client_list_lock.
924 static void rbd_client_release(struct kref *kref)
926 struct rbd_client *rbdc = container_of(kref, struct rbd_client, kref);
928 dout("%s: rbdc %p\n", __func__, rbdc);
929 spin_lock(&rbd_client_list_lock);
930 list_del(&rbdc->node);
931 spin_unlock(&rbd_client_list_lock);
933 ceph_destroy_client(rbdc->client);
938 * Drop reference to ceph client node. If it's not referenced anymore, release
941 static void rbd_put_client(struct rbd_client *rbdc)
944 kref_put(&rbdc->kref, rbd_client_release);
948 * Get a ceph client with specific addr and configuration, if one does
949 * not exist create it. Either way, ceph_opts is consumed by this
952 static struct rbd_client *rbd_get_client(struct ceph_options *ceph_opts)
954 struct rbd_client *rbdc;
957 mutex_lock(&client_mutex);
958 rbdc = rbd_client_find(ceph_opts);
960 ceph_destroy_options(ceph_opts);
963 * Using an existing client. Make sure ->pg_pools is up to
964 * date before we look up the pool id in do_rbd_add().
966 ret = ceph_wait_for_latest_osdmap(rbdc->client,
967 rbdc->client->options->mount_timeout);
969 rbd_warn(NULL, "failed to get latest osdmap: %d", ret);
970 rbd_put_client(rbdc);
974 rbdc = rbd_client_create(ceph_opts);
976 mutex_unlock(&client_mutex);
981 static bool rbd_image_format_valid(u32 image_format)
983 return image_format == 1 || image_format == 2;
986 static bool rbd_dev_ondisk_valid(struct rbd_image_header_ondisk *ondisk)
991 /* The header has to start with the magic rbd header text */
992 if (memcmp(&ondisk->text, RBD_HEADER_TEXT, sizeof (RBD_HEADER_TEXT)))
995 /* The bio layer requires at least sector-sized I/O */
997 if (ondisk->options.order < SECTOR_SHIFT)
1000 /* If we use u64 in a few spots we may be able to loosen this */
1002 if (ondisk->options.order > 8 * sizeof (int) - 1)
1006 * The size of a snapshot header has to fit in a size_t, and
1007 * that limits the number of snapshots.
1009 snap_count = le32_to_cpu(ondisk->snap_count);
1010 size = SIZE_MAX - sizeof (struct ceph_snap_context);
1011 if (snap_count > size / sizeof (__le64))
1015 * Not only that, but the size of the entire the snapshot
1016 * header must also be representable in a size_t.
1018 size -= snap_count * sizeof (__le64);
1019 if ((u64) size < le64_to_cpu(ondisk->snap_names_len))
1026 * returns the size of an object in the image
1028 static u32 rbd_obj_bytes(struct rbd_image_header *header)
1030 return 1U << header->obj_order;
1033 static void rbd_init_layout(struct rbd_device *rbd_dev)
1035 if (rbd_dev->header.stripe_unit == 0 ||
1036 rbd_dev->header.stripe_count == 0) {
1037 rbd_dev->header.stripe_unit = rbd_obj_bytes(&rbd_dev->header);
1038 rbd_dev->header.stripe_count = 1;
1041 rbd_dev->layout.stripe_unit = rbd_dev->header.stripe_unit;
1042 rbd_dev->layout.stripe_count = rbd_dev->header.stripe_count;
1043 rbd_dev->layout.object_size = rbd_obj_bytes(&rbd_dev->header);
1044 rbd_dev->layout.pool_id = rbd_dev->header.data_pool_id == CEPH_NOPOOL ?
1045 rbd_dev->spec->pool_id : rbd_dev->header.data_pool_id;
1046 RCU_INIT_POINTER(rbd_dev->layout.pool_ns, NULL);
1049 static void rbd_image_header_cleanup(struct rbd_image_header *header)
1051 kfree(header->object_prefix);
1052 ceph_put_snap_context(header->snapc);
1053 kfree(header->snap_sizes);
1054 kfree(header->snap_names);
1056 memset(header, 0, sizeof(*header));
1060 * Fill an rbd image header with information from the given format 1
1063 static int rbd_header_from_disk(struct rbd_image_header *header,
1064 struct rbd_image_header_ondisk *ondisk,
1067 struct ceph_snap_context *snapc;
1068 char *object_prefix = NULL;
1069 char *snap_names = NULL;
1070 u64 *snap_sizes = NULL;
1075 /* Allocate this now to avoid having to handle failure below */
1078 object_prefix = kstrndup(ondisk->object_prefix,
1079 sizeof(ondisk->object_prefix),
1085 /* Allocate the snapshot context and fill it in */
1087 snap_count = le32_to_cpu(ondisk->snap_count);
1088 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
1091 snapc->seq = le64_to_cpu(ondisk->snap_seq);
1093 struct rbd_image_snap_ondisk *snaps;
1094 u64 snap_names_len = le64_to_cpu(ondisk->snap_names_len);
1096 /* We'll keep a copy of the snapshot names... */
1098 if (snap_names_len > (u64)SIZE_MAX)
1100 snap_names = kmalloc(snap_names_len, GFP_KERNEL);
1104 /* ...as well as the array of their sizes. */
1105 snap_sizes = kmalloc_array(snap_count,
1106 sizeof(*header->snap_sizes),
1112 * Copy the names, and fill in each snapshot's id
1115 * Note that rbd_dev_v1_header_info() guarantees the
1116 * ondisk buffer we're working with has
1117 * snap_names_len bytes beyond the end of the
1118 * snapshot id array, this memcpy() is safe.
1120 memcpy(snap_names, &ondisk->snaps[snap_count], snap_names_len);
1121 snaps = ondisk->snaps;
1122 for (i = 0; i < snap_count; i++) {
1123 snapc->snaps[i] = le64_to_cpu(snaps[i].id);
1124 snap_sizes[i] = le64_to_cpu(snaps[i].image_size);
1128 /* We won't fail any more, fill in the header */
1131 header->object_prefix = object_prefix;
1132 header->obj_order = ondisk->options.order;
1135 /* The remaining fields always get updated (when we refresh) */
1137 header->image_size = le64_to_cpu(ondisk->image_size);
1138 header->snapc = snapc;
1139 header->snap_names = snap_names;
1140 header->snap_sizes = snap_sizes;
1148 ceph_put_snap_context(snapc);
1149 kfree(object_prefix);
1154 static const char *_rbd_dev_v1_snap_name(struct rbd_device *rbd_dev, u32 which)
1156 const char *snap_name;
1158 rbd_assert(which < rbd_dev->header.snapc->num_snaps);
1160 /* Skip over names until we find the one we are looking for */
1162 snap_name = rbd_dev->header.snap_names;
1164 snap_name += strlen(snap_name) + 1;
1166 return kstrdup(snap_name, GFP_KERNEL);
1170 * Snapshot id comparison function for use with qsort()/bsearch().
1171 * Note that result is for snapshots in *descending* order.
1173 static int snapid_compare_reverse(const void *s1, const void *s2)
1175 u64 snap_id1 = *(u64 *)s1;
1176 u64 snap_id2 = *(u64 *)s2;
1178 if (snap_id1 < snap_id2)
1180 return snap_id1 == snap_id2 ? 0 : -1;
1184 * Search a snapshot context to see if the given snapshot id is
1187 * Returns the position of the snapshot id in the array if it's found,
1188 * or BAD_SNAP_INDEX otherwise.
1190 * Note: The snapshot array is in kept sorted (by the osd) in
1191 * reverse order, highest snapshot id first.
1193 static u32 rbd_dev_snap_index(struct rbd_device *rbd_dev, u64 snap_id)
1195 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
1198 found = bsearch(&snap_id, &snapc->snaps, snapc->num_snaps,
1199 sizeof (snap_id), snapid_compare_reverse);
1201 return found ? (u32)(found - &snapc->snaps[0]) : BAD_SNAP_INDEX;
1204 static const char *rbd_dev_v1_snap_name(struct rbd_device *rbd_dev,
1208 const char *snap_name;
1210 which = rbd_dev_snap_index(rbd_dev, snap_id);
1211 if (which == BAD_SNAP_INDEX)
1212 return ERR_PTR(-ENOENT);
1214 snap_name = _rbd_dev_v1_snap_name(rbd_dev, which);
1215 return snap_name ? snap_name : ERR_PTR(-ENOMEM);
1218 static const char *rbd_snap_name(struct rbd_device *rbd_dev, u64 snap_id)
1220 if (snap_id == CEPH_NOSNAP)
1221 return RBD_SNAP_HEAD_NAME;
1223 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1224 if (rbd_dev->image_format == 1)
1225 return rbd_dev_v1_snap_name(rbd_dev, snap_id);
1227 return rbd_dev_v2_snap_name(rbd_dev, snap_id);
1230 static int rbd_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
1233 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
1234 if (snap_id == CEPH_NOSNAP) {
1235 *snap_size = rbd_dev->header.image_size;
1236 } else if (rbd_dev->image_format == 1) {
1239 which = rbd_dev_snap_index(rbd_dev, snap_id);
1240 if (which == BAD_SNAP_INDEX)
1243 *snap_size = rbd_dev->header.snap_sizes[which];
1248 ret = _rbd_dev_v2_snap_size(rbd_dev, snap_id, NULL, &size);
1257 static int rbd_dev_mapping_set(struct rbd_device *rbd_dev)
1259 u64 snap_id = rbd_dev->spec->snap_id;
1263 ret = rbd_snap_size(rbd_dev, snap_id, &size);
1267 rbd_dev->mapping.size = size;
1271 static void rbd_dev_mapping_clear(struct rbd_device *rbd_dev)
1273 rbd_dev->mapping.size = 0;
1276 static void zero_bvec(struct bio_vec *bv)
1279 unsigned long flags;
1281 buf = bvec_kmap_irq(bv, &flags);
1282 memset(buf, 0, bv->bv_len);
1283 flush_dcache_page(bv->bv_page);
1284 bvec_kunmap_irq(buf, &flags);
1287 static void zero_bios(struct ceph_bio_iter *bio_pos, u32 off, u32 bytes)
1289 struct ceph_bio_iter it = *bio_pos;
1291 ceph_bio_iter_advance(&it, off);
1292 ceph_bio_iter_advance_step(&it, bytes, ({
1297 static void zero_bvecs(struct ceph_bvec_iter *bvec_pos, u32 off, u32 bytes)
1299 struct ceph_bvec_iter it = *bvec_pos;
1301 ceph_bvec_iter_advance(&it, off);
1302 ceph_bvec_iter_advance_step(&it, bytes, ({
1308 * Zero a range in @obj_req data buffer defined by a bio (list) or
1309 * (private) bio_vec array.
1311 * @off is relative to the start of the data buffer.
1313 static void rbd_obj_zero_range(struct rbd_obj_request *obj_req, u32 off,
1316 dout("%s %p data buf %u~%u\n", __func__, obj_req, off, bytes);
1318 switch (obj_req->img_request->data_type) {
1319 case OBJ_REQUEST_BIO:
1320 zero_bios(&obj_req->bio_pos, off, bytes);
1322 case OBJ_REQUEST_BVECS:
1323 case OBJ_REQUEST_OWN_BVECS:
1324 zero_bvecs(&obj_req->bvec_pos, off, bytes);
1331 static void rbd_obj_request_destroy(struct kref *kref);
1332 static void rbd_obj_request_put(struct rbd_obj_request *obj_request)
1334 rbd_assert(obj_request != NULL);
1335 dout("%s: obj %p (was %d)\n", __func__, obj_request,
1336 kref_read(&obj_request->kref));
1337 kref_put(&obj_request->kref, rbd_obj_request_destroy);
1340 static inline void rbd_img_obj_request_add(struct rbd_img_request *img_request,
1341 struct rbd_obj_request *obj_request)
1343 rbd_assert(obj_request->img_request == NULL);
1345 /* Image request now owns object's original reference */
1346 obj_request->img_request = img_request;
1347 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1350 static inline void rbd_img_obj_request_del(struct rbd_img_request *img_request,
1351 struct rbd_obj_request *obj_request)
1353 dout("%s: img %p obj %p\n", __func__, img_request, obj_request);
1354 list_del(&obj_request->ex.oe_item);
1355 rbd_assert(obj_request->img_request == img_request);
1356 rbd_obj_request_put(obj_request);
1359 static void rbd_osd_submit(struct ceph_osd_request *osd_req)
1361 struct rbd_obj_request *obj_req = osd_req->r_priv;
1363 dout("%s osd_req %p for obj_req %p objno %llu %llu~%llu\n",
1364 __func__, osd_req, obj_req, obj_req->ex.oe_objno,
1365 obj_req->ex.oe_off, obj_req->ex.oe_len);
1366 ceph_osdc_start_request(osd_req->r_osdc, osd_req, false);
1370 * The default/initial value for all image request flags is 0. Each
1371 * is conditionally set to 1 at image request initialization time
1372 * and currently never change thereafter.
1374 static void img_request_layered_set(struct rbd_img_request *img_request)
1376 set_bit(IMG_REQ_LAYERED, &img_request->flags);
1379 static bool img_request_layered_test(struct rbd_img_request *img_request)
1381 return test_bit(IMG_REQ_LAYERED, &img_request->flags) != 0;
1384 static bool rbd_obj_is_entire(struct rbd_obj_request *obj_req)
1386 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1388 return !obj_req->ex.oe_off &&
1389 obj_req->ex.oe_len == rbd_dev->layout.object_size;
1392 static bool rbd_obj_is_tail(struct rbd_obj_request *obj_req)
1394 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1396 return obj_req->ex.oe_off + obj_req->ex.oe_len ==
1397 rbd_dev->layout.object_size;
1401 * Must be called after rbd_obj_calc_img_extents().
1403 static void rbd_obj_set_copyup_enabled(struct rbd_obj_request *obj_req)
1405 rbd_assert(obj_req->img_request->snapc);
1407 if (obj_req->img_request->op_type == OBJ_OP_DISCARD) {
1408 dout("%s %p objno %llu discard\n", __func__, obj_req,
1409 obj_req->ex.oe_objno);
1413 if (!obj_req->num_img_extents) {
1414 dout("%s %p objno %llu not overlapping\n", __func__, obj_req,
1415 obj_req->ex.oe_objno);
1419 if (rbd_obj_is_entire(obj_req) &&
1420 !obj_req->img_request->snapc->num_snaps) {
1421 dout("%s %p objno %llu entire\n", __func__, obj_req,
1422 obj_req->ex.oe_objno);
1426 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ENABLED;
1429 static u64 rbd_obj_img_extents_bytes(struct rbd_obj_request *obj_req)
1431 return ceph_file_extents_bytes(obj_req->img_extents,
1432 obj_req->num_img_extents);
1435 static bool rbd_img_is_write(struct rbd_img_request *img_req)
1437 switch (img_req->op_type) {
1441 case OBJ_OP_DISCARD:
1442 case OBJ_OP_ZEROOUT:
1449 static void rbd_osd_req_callback(struct ceph_osd_request *osd_req)
1451 struct rbd_obj_request *obj_req = osd_req->r_priv;
1454 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
1455 osd_req->r_result, obj_req);
1458 * Writes aren't allowed to return a data payload. In some
1459 * guarded write cases (e.g. stat + zero on an empty object)
1460 * a stat response makes it through, but we don't care.
1462 if (osd_req->r_result > 0 && rbd_img_is_write(obj_req->img_request))
1465 result = osd_req->r_result;
1467 rbd_obj_handle_request(obj_req, result);
1470 static void rbd_osd_format_read(struct ceph_osd_request *osd_req)
1472 struct rbd_obj_request *obj_request = osd_req->r_priv;
1473 struct rbd_device *rbd_dev = obj_request->img_request->rbd_dev;
1474 struct ceph_options *opt = rbd_dev->rbd_client->client->options;
1476 osd_req->r_flags = CEPH_OSD_FLAG_READ | opt->read_from_replica;
1477 osd_req->r_snapid = obj_request->img_request->snap_id;
1480 static void rbd_osd_format_write(struct ceph_osd_request *osd_req)
1482 struct rbd_obj_request *obj_request = osd_req->r_priv;
1484 osd_req->r_flags = CEPH_OSD_FLAG_WRITE;
1485 ktime_get_real_ts64(&osd_req->r_mtime);
1486 osd_req->r_data_offset = obj_request->ex.oe_off;
1489 static struct ceph_osd_request *
1490 __rbd_obj_add_osd_request(struct rbd_obj_request *obj_req,
1491 struct ceph_snap_context *snapc, int num_ops)
1493 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
1494 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1495 struct ceph_osd_request *req;
1496 const char *name_format = rbd_dev->image_format == 1 ?
1497 RBD_V1_DATA_FORMAT : RBD_V2_DATA_FORMAT;
1500 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, false, GFP_NOIO);
1502 return ERR_PTR(-ENOMEM);
1504 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
1505 req->r_callback = rbd_osd_req_callback;
1506 req->r_priv = obj_req;
1509 * Data objects may be stored in a separate pool, but always in
1510 * the same namespace in that pool as the header in its pool.
1512 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
1513 req->r_base_oloc.pool = rbd_dev->layout.pool_id;
1515 ret = ceph_oid_aprintf(&req->r_base_oid, GFP_NOIO, name_format,
1516 rbd_dev->header.object_prefix,
1517 obj_req->ex.oe_objno);
1519 return ERR_PTR(ret);
1524 static struct ceph_osd_request *
1525 rbd_obj_add_osd_request(struct rbd_obj_request *obj_req, int num_ops)
1527 rbd_assert(obj_req->img_request->snapc);
1528 return __rbd_obj_add_osd_request(obj_req, obj_req->img_request->snapc,
1532 static struct rbd_obj_request *rbd_obj_request_create(void)
1534 struct rbd_obj_request *obj_request;
1536 obj_request = kmem_cache_zalloc(rbd_obj_request_cache, GFP_NOIO);
1540 ceph_object_extent_init(&obj_request->ex);
1541 INIT_LIST_HEAD(&obj_request->osd_reqs);
1542 mutex_init(&obj_request->state_mutex);
1543 kref_init(&obj_request->kref);
1545 dout("%s %p\n", __func__, obj_request);
1549 static void rbd_obj_request_destroy(struct kref *kref)
1551 struct rbd_obj_request *obj_request;
1552 struct ceph_osd_request *osd_req;
1555 obj_request = container_of(kref, struct rbd_obj_request, kref);
1557 dout("%s: obj %p\n", __func__, obj_request);
1559 while (!list_empty(&obj_request->osd_reqs)) {
1560 osd_req = list_first_entry(&obj_request->osd_reqs,
1561 struct ceph_osd_request, r_private_item);
1562 list_del_init(&osd_req->r_private_item);
1563 ceph_osdc_put_request(osd_req);
1566 switch (obj_request->img_request->data_type) {
1567 case OBJ_REQUEST_NODATA:
1568 case OBJ_REQUEST_BIO:
1569 case OBJ_REQUEST_BVECS:
1570 break; /* Nothing to do */
1571 case OBJ_REQUEST_OWN_BVECS:
1572 kfree(obj_request->bvec_pos.bvecs);
1578 kfree(obj_request->img_extents);
1579 if (obj_request->copyup_bvecs) {
1580 for (i = 0; i < obj_request->copyup_bvec_count; i++) {
1581 if (obj_request->copyup_bvecs[i].bv_page)
1582 __free_page(obj_request->copyup_bvecs[i].bv_page);
1584 kfree(obj_request->copyup_bvecs);
1587 kmem_cache_free(rbd_obj_request_cache, obj_request);
1590 /* It's OK to call this for a device with no parent */
1592 static void rbd_spec_put(struct rbd_spec *spec);
1593 static void rbd_dev_unparent(struct rbd_device *rbd_dev)
1595 rbd_dev_remove_parent(rbd_dev);
1596 rbd_spec_put(rbd_dev->parent_spec);
1597 rbd_dev->parent_spec = NULL;
1598 rbd_dev->parent_overlap = 0;
1602 * Parent image reference counting is used to determine when an
1603 * image's parent fields can be safely torn down--after there are no
1604 * more in-flight requests to the parent image. When the last
1605 * reference is dropped, cleaning them up is safe.
1607 static void rbd_dev_parent_put(struct rbd_device *rbd_dev)
1611 if (!rbd_dev->parent_spec)
1614 counter = atomic_dec_return_safe(&rbd_dev->parent_ref);
1618 /* Last reference; clean up parent data structures */
1621 rbd_dev_unparent(rbd_dev);
1623 rbd_warn(rbd_dev, "parent reference underflow");
1627 * If an image has a non-zero parent overlap, get a reference to its
1630 * Returns true if the rbd device has a parent with a non-zero
1631 * overlap and a reference for it was successfully taken, or
1634 static bool rbd_dev_parent_get(struct rbd_device *rbd_dev)
1638 if (!rbd_dev->parent_spec)
1641 if (rbd_dev->parent_overlap)
1642 counter = atomic_inc_return_safe(&rbd_dev->parent_ref);
1645 rbd_warn(rbd_dev, "parent reference overflow");
1650 static void rbd_img_request_init(struct rbd_img_request *img_request,
1651 struct rbd_device *rbd_dev,
1652 enum obj_operation_type op_type)
1654 memset(img_request, 0, sizeof(*img_request));
1656 img_request->rbd_dev = rbd_dev;
1657 img_request->op_type = op_type;
1659 INIT_LIST_HEAD(&img_request->lock_item);
1660 INIT_LIST_HEAD(&img_request->object_extents);
1661 mutex_init(&img_request->state_mutex);
1665 * Only snap_id is captured here, for reads. For writes, snapshot
1666 * context is captured in rbd_img_object_requests() after exclusive
1667 * lock is ensured to be held.
1669 static void rbd_img_capture_header(struct rbd_img_request *img_req)
1671 struct rbd_device *rbd_dev = img_req->rbd_dev;
1673 lockdep_assert_held(&rbd_dev->header_rwsem);
1675 if (!rbd_img_is_write(img_req))
1676 img_req->snap_id = rbd_dev->spec->snap_id;
1678 if (rbd_dev_parent_get(rbd_dev))
1679 img_request_layered_set(img_req);
1682 static void rbd_img_request_destroy(struct rbd_img_request *img_request)
1684 struct rbd_obj_request *obj_request;
1685 struct rbd_obj_request *next_obj_request;
1687 dout("%s: img %p\n", __func__, img_request);
1689 WARN_ON(!list_empty(&img_request->lock_item));
1690 for_each_obj_request_safe(img_request, obj_request, next_obj_request)
1691 rbd_img_obj_request_del(img_request, obj_request);
1693 if (img_request_layered_test(img_request))
1694 rbd_dev_parent_put(img_request->rbd_dev);
1696 if (rbd_img_is_write(img_request))
1697 ceph_put_snap_context(img_request->snapc);
1699 if (test_bit(IMG_REQ_CHILD, &img_request->flags))
1700 kmem_cache_free(rbd_img_request_cache, img_request);
1703 #define BITS_PER_OBJ 2
1704 #define OBJS_PER_BYTE (BITS_PER_BYTE / BITS_PER_OBJ)
1705 #define OBJ_MASK ((1 << BITS_PER_OBJ) - 1)
1707 static void __rbd_object_map_index(struct rbd_device *rbd_dev, u64 objno,
1708 u64 *index, u8 *shift)
1712 rbd_assert(objno < rbd_dev->object_map_size);
1713 *index = div_u64_rem(objno, OBJS_PER_BYTE, &off);
1714 *shift = (OBJS_PER_BYTE - off - 1) * BITS_PER_OBJ;
1717 static u8 __rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1722 lockdep_assert_held(&rbd_dev->object_map_lock);
1723 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1724 return (rbd_dev->object_map[index] >> shift) & OBJ_MASK;
1727 static void __rbd_object_map_set(struct rbd_device *rbd_dev, u64 objno, u8 val)
1733 lockdep_assert_held(&rbd_dev->object_map_lock);
1734 rbd_assert(!(val & ~OBJ_MASK));
1736 __rbd_object_map_index(rbd_dev, objno, &index, &shift);
1737 p = &rbd_dev->object_map[index];
1738 *p = (*p & ~(OBJ_MASK << shift)) | (val << shift);
1741 static u8 rbd_object_map_get(struct rbd_device *rbd_dev, u64 objno)
1745 spin_lock(&rbd_dev->object_map_lock);
1746 state = __rbd_object_map_get(rbd_dev, objno);
1747 spin_unlock(&rbd_dev->object_map_lock);
1751 static bool use_object_map(struct rbd_device *rbd_dev)
1754 * An image mapped read-only can't use the object map -- it isn't
1755 * loaded because the header lock isn't acquired. Someone else can
1756 * write to the image and update the object map behind our back.
1758 * A snapshot can't be written to, so using the object map is always
1761 if (!rbd_is_snap(rbd_dev) && rbd_is_ro(rbd_dev))
1764 return ((rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) &&
1765 !(rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID));
1768 static bool rbd_object_map_may_exist(struct rbd_device *rbd_dev, u64 objno)
1772 /* fall back to default logic if object map is disabled or invalid */
1773 if (!use_object_map(rbd_dev))
1776 state = rbd_object_map_get(rbd_dev, objno);
1777 return state != OBJECT_NONEXISTENT;
1780 static void rbd_object_map_name(struct rbd_device *rbd_dev, u64 snap_id,
1781 struct ceph_object_id *oid)
1783 if (snap_id == CEPH_NOSNAP)
1784 ceph_oid_printf(oid, "%s%s", RBD_OBJECT_MAP_PREFIX,
1785 rbd_dev->spec->image_id);
1787 ceph_oid_printf(oid, "%s%s.%016llx", RBD_OBJECT_MAP_PREFIX,
1788 rbd_dev->spec->image_id, snap_id);
1791 static int rbd_object_map_lock(struct rbd_device *rbd_dev)
1793 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1794 CEPH_DEFINE_OID_ONSTACK(oid);
1797 struct ceph_locker *lockers;
1799 bool broke_lock = false;
1802 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1805 ret = ceph_cls_lock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1806 CEPH_CLS_LOCK_EXCLUSIVE, "", "", "", 0);
1807 if (ret != -EBUSY || broke_lock) {
1809 ret = 0; /* already locked by myself */
1811 rbd_warn(rbd_dev, "failed to lock object map: %d", ret);
1815 ret = ceph_cls_lock_info(osdc, &oid, &rbd_dev->header_oloc,
1816 RBD_LOCK_NAME, &lock_type, &lock_tag,
1817 &lockers, &num_lockers);
1822 rbd_warn(rbd_dev, "failed to get object map lockers: %d", ret);
1827 if (num_lockers == 0)
1830 rbd_warn(rbd_dev, "breaking object map lock owned by %s%llu",
1831 ENTITY_NAME(lockers[0].id.name));
1833 ret = ceph_cls_break_lock(osdc, &oid, &rbd_dev->header_oloc,
1834 RBD_LOCK_NAME, lockers[0].id.cookie,
1835 &lockers[0].id.name);
1836 ceph_free_lockers(lockers, num_lockers);
1841 rbd_warn(rbd_dev, "failed to break object map lock: %d", ret);
1849 static void rbd_object_map_unlock(struct rbd_device *rbd_dev)
1851 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1852 CEPH_DEFINE_OID_ONSTACK(oid);
1855 rbd_object_map_name(rbd_dev, CEPH_NOSNAP, &oid);
1857 ret = ceph_cls_unlock(osdc, &oid, &rbd_dev->header_oloc, RBD_LOCK_NAME,
1859 if (ret && ret != -ENOENT)
1860 rbd_warn(rbd_dev, "failed to unlock object map: %d", ret);
1863 static int decode_object_map_header(void **p, void *end, u64 *object_map_size)
1871 ceph_decode_32_safe(p, end, header_len, e_inval);
1872 header_end = *p + header_len;
1874 ret = ceph_start_decoding(p, end, 1, "BitVector header", &struct_v,
1879 ceph_decode_64_safe(p, end, *object_map_size, e_inval);
1888 static int __rbd_object_map_load(struct rbd_device *rbd_dev)
1890 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
1891 CEPH_DEFINE_OID_ONSTACK(oid);
1892 struct page **pages;
1896 u64 object_map_bytes;
1897 u64 object_map_size;
1901 rbd_assert(!rbd_dev->object_map && !rbd_dev->object_map_size);
1903 num_objects = ceph_get_num_objects(&rbd_dev->layout,
1904 rbd_dev->mapping.size);
1905 object_map_bytes = DIV_ROUND_UP_ULL(num_objects * BITS_PER_OBJ,
1907 num_pages = calc_pages_for(0, object_map_bytes) + 1;
1908 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
1910 return PTR_ERR(pages);
1912 reply_len = num_pages * PAGE_SIZE;
1913 rbd_object_map_name(rbd_dev, rbd_dev->spec->snap_id, &oid);
1914 ret = ceph_osdc_call(osdc, &oid, &rbd_dev->header_oloc,
1915 "rbd", "object_map_load", CEPH_OSD_FLAG_READ,
1916 NULL, 0, pages, &reply_len);
1920 p = page_address(pages[0]);
1921 end = p + min(reply_len, (size_t)PAGE_SIZE);
1922 ret = decode_object_map_header(&p, end, &object_map_size);
1926 if (object_map_size != num_objects) {
1927 rbd_warn(rbd_dev, "object map size mismatch: %llu vs %llu",
1928 object_map_size, num_objects);
1933 if (offset_in_page(p) + object_map_bytes > reply_len) {
1938 rbd_dev->object_map = kvmalloc(object_map_bytes, GFP_KERNEL);
1939 if (!rbd_dev->object_map) {
1944 rbd_dev->object_map_size = object_map_size;
1945 ceph_copy_from_page_vector(pages, rbd_dev->object_map,
1946 offset_in_page(p), object_map_bytes);
1949 ceph_release_page_vector(pages, num_pages);
1953 static void rbd_object_map_free(struct rbd_device *rbd_dev)
1955 kvfree(rbd_dev->object_map);
1956 rbd_dev->object_map = NULL;
1957 rbd_dev->object_map_size = 0;
1960 static int rbd_object_map_load(struct rbd_device *rbd_dev)
1964 ret = __rbd_object_map_load(rbd_dev);
1968 ret = rbd_dev_v2_get_flags(rbd_dev);
1970 rbd_object_map_free(rbd_dev);
1974 if (rbd_dev->object_map_flags & RBD_FLAG_OBJECT_MAP_INVALID)
1975 rbd_warn(rbd_dev, "object map is invalid");
1980 static int rbd_object_map_open(struct rbd_device *rbd_dev)
1984 ret = rbd_object_map_lock(rbd_dev);
1988 ret = rbd_object_map_load(rbd_dev);
1990 rbd_object_map_unlock(rbd_dev);
1997 static void rbd_object_map_close(struct rbd_device *rbd_dev)
1999 rbd_object_map_free(rbd_dev);
2000 rbd_object_map_unlock(rbd_dev);
2004 * This function needs snap_id (or more precisely just something to
2005 * distinguish between HEAD and snapshot object maps), new_state and
2006 * current_state that were passed to rbd_object_map_update().
2008 * To avoid allocating and stashing a context we piggyback on the OSD
2009 * request. A HEAD update has two ops (assert_locked). For new_state
2010 * and current_state we decode our own object_map_update op, encoded in
2011 * rbd_cls_object_map_update().
2013 static int rbd_object_map_update_finish(struct rbd_obj_request *obj_req,
2014 struct ceph_osd_request *osd_req)
2016 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2017 struct ceph_osd_data *osd_data;
2019 u8 state, new_state, current_state;
2020 bool has_current_state;
2023 if (osd_req->r_result)
2024 return osd_req->r_result;
2027 * Nothing to do for a snapshot object map.
2029 if (osd_req->r_num_ops == 1)
2033 * Update in-memory HEAD object map.
2035 rbd_assert(osd_req->r_num_ops == 2);
2036 osd_data = osd_req_op_data(osd_req, 1, cls, request_data);
2037 rbd_assert(osd_data->type == CEPH_OSD_DATA_TYPE_PAGES);
2039 p = page_address(osd_data->pages[0]);
2040 objno = ceph_decode_64(&p);
2041 rbd_assert(objno == obj_req->ex.oe_objno);
2042 rbd_assert(ceph_decode_64(&p) == objno + 1);
2043 new_state = ceph_decode_8(&p);
2044 has_current_state = ceph_decode_8(&p);
2045 if (has_current_state)
2046 current_state = ceph_decode_8(&p);
2048 spin_lock(&rbd_dev->object_map_lock);
2049 state = __rbd_object_map_get(rbd_dev, objno);
2050 if (!has_current_state || current_state == state ||
2051 (current_state == OBJECT_EXISTS && state == OBJECT_EXISTS_CLEAN))
2052 __rbd_object_map_set(rbd_dev, objno, new_state);
2053 spin_unlock(&rbd_dev->object_map_lock);
2058 static void rbd_object_map_callback(struct ceph_osd_request *osd_req)
2060 struct rbd_obj_request *obj_req = osd_req->r_priv;
2063 dout("%s osd_req %p result %d for obj_req %p\n", __func__, osd_req,
2064 osd_req->r_result, obj_req);
2066 result = rbd_object_map_update_finish(obj_req, osd_req);
2067 rbd_obj_handle_request(obj_req, result);
2070 static bool update_needed(struct rbd_device *rbd_dev, u64 objno, u8 new_state)
2072 u8 state = rbd_object_map_get(rbd_dev, objno);
2074 if (state == new_state ||
2075 (new_state == OBJECT_PENDING && state == OBJECT_NONEXISTENT) ||
2076 (new_state == OBJECT_NONEXISTENT && state != OBJECT_PENDING))
2082 static int rbd_cls_object_map_update(struct ceph_osd_request *req,
2083 int which, u64 objno, u8 new_state,
2084 const u8 *current_state)
2086 struct page **pages;
2090 ret = osd_req_op_cls_init(req, which, "rbd", "object_map_update");
2094 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2096 return PTR_ERR(pages);
2098 p = start = page_address(pages[0]);
2099 ceph_encode_64(&p, objno);
2100 ceph_encode_64(&p, objno + 1);
2101 ceph_encode_8(&p, new_state);
2102 if (current_state) {
2103 ceph_encode_8(&p, 1);
2104 ceph_encode_8(&p, *current_state);
2106 ceph_encode_8(&p, 0);
2109 osd_req_op_cls_request_data_pages(req, which, pages, p - start, 0,
2116 * 0 - object map update sent
2117 * 1 - object map update isn't needed
2120 static int rbd_object_map_update(struct rbd_obj_request *obj_req, u64 snap_id,
2121 u8 new_state, const u8 *current_state)
2123 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2124 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
2125 struct ceph_osd_request *req;
2130 if (snap_id == CEPH_NOSNAP) {
2131 if (!update_needed(rbd_dev, obj_req->ex.oe_objno, new_state))
2134 num_ops++; /* assert_locked */
2137 req = ceph_osdc_alloc_request(osdc, NULL, num_ops, false, GFP_NOIO);
2141 list_add_tail(&req->r_private_item, &obj_req->osd_reqs);
2142 req->r_callback = rbd_object_map_callback;
2143 req->r_priv = obj_req;
2145 rbd_object_map_name(rbd_dev, snap_id, &req->r_base_oid);
2146 ceph_oloc_copy(&req->r_base_oloc, &rbd_dev->header_oloc);
2147 req->r_flags = CEPH_OSD_FLAG_WRITE;
2148 ktime_get_real_ts64(&req->r_mtime);
2150 if (snap_id == CEPH_NOSNAP) {
2152 * Protect against possible race conditions during lock
2153 * ownership transitions.
2155 ret = ceph_cls_assert_locked(req, which++, RBD_LOCK_NAME,
2156 CEPH_CLS_LOCK_EXCLUSIVE, "", "");
2161 ret = rbd_cls_object_map_update(req, which, obj_req->ex.oe_objno,
2162 new_state, current_state);
2166 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
2170 ceph_osdc_start_request(osdc, req, false);
2174 static void prune_extents(struct ceph_file_extent *img_extents,
2175 u32 *num_img_extents, u64 overlap)
2177 u32 cnt = *num_img_extents;
2179 /* drop extents completely beyond the overlap */
2180 while (cnt && img_extents[cnt - 1].fe_off >= overlap)
2184 struct ceph_file_extent *ex = &img_extents[cnt - 1];
2186 /* trim final overlapping extent */
2187 if (ex->fe_off + ex->fe_len > overlap)
2188 ex->fe_len = overlap - ex->fe_off;
2191 *num_img_extents = cnt;
2195 * Determine the byte range(s) covered by either just the object extent
2196 * or the entire object in the parent image.
2198 static int rbd_obj_calc_img_extents(struct rbd_obj_request *obj_req,
2201 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2204 if (!rbd_dev->parent_overlap)
2207 ret = ceph_extent_to_file(&rbd_dev->layout, obj_req->ex.oe_objno,
2208 entire ? 0 : obj_req->ex.oe_off,
2209 entire ? rbd_dev->layout.object_size :
2211 &obj_req->img_extents,
2212 &obj_req->num_img_extents);
2216 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
2217 rbd_dev->parent_overlap);
2221 static void rbd_osd_setup_data(struct ceph_osd_request *osd_req, int which)
2223 struct rbd_obj_request *obj_req = osd_req->r_priv;
2225 switch (obj_req->img_request->data_type) {
2226 case OBJ_REQUEST_BIO:
2227 osd_req_op_extent_osd_data_bio(osd_req, which,
2229 obj_req->ex.oe_len);
2231 case OBJ_REQUEST_BVECS:
2232 case OBJ_REQUEST_OWN_BVECS:
2233 rbd_assert(obj_req->bvec_pos.iter.bi_size ==
2234 obj_req->ex.oe_len);
2235 rbd_assert(obj_req->bvec_idx == obj_req->bvec_count);
2236 osd_req_op_extent_osd_data_bvec_pos(osd_req, which,
2237 &obj_req->bvec_pos);
2244 static int rbd_osd_setup_stat(struct ceph_osd_request *osd_req, int which)
2246 struct page **pages;
2249 * The response data for a STAT call consists of:
2256 pages = ceph_alloc_page_vector(1, GFP_NOIO);
2258 return PTR_ERR(pages);
2260 osd_req_op_init(osd_req, which, CEPH_OSD_OP_STAT, 0);
2261 osd_req_op_raw_data_in_pages(osd_req, which, pages,
2262 8 + sizeof(struct ceph_timespec),
2267 static int rbd_osd_setup_copyup(struct ceph_osd_request *osd_req, int which,
2270 struct rbd_obj_request *obj_req = osd_req->r_priv;
2273 ret = osd_req_op_cls_init(osd_req, which, "rbd", "copyup");
2277 osd_req_op_cls_request_data_bvecs(osd_req, which, obj_req->copyup_bvecs,
2278 obj_req->copyup_bvec_count, bytes);
2282 static int rbd_obj_init_read(struct rbd_obj_request *obj_req)
2284 obj_req->read_state = RBD_OBJ_READ_START;
2288 static void __rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2291 struct rbd_obj_request *obj_req = osd_req->r_priv;
2292 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2295 if (!use_object_map(rbd_dev) ||
2296 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST)) {
2297 osd_req_op_alloc_hint_init(osd_req, which++,
2298 rbd_dev->layout.object_size,
2299 rbd_dev->layout.object_size,
2300 rbd_dev->opts->alloc_hint_flags);
2303 if (rbd_obj_is_entire(obj_req))
2304 opcode = CEPH_OSD_OP_WRITEFULL;
2306 opcode = CEPH_OSD_OP_WRITE;
2308 osd_req_op_extent_init(osd_req, which, opcode,
2309 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2310 rbd_osd_setup_data(osd_req, which);
2313 static int rbd_obj_init_write(struct rbd_obj_request *obj_req)
2317 /* reverse map the entire object onto the parent */
2318 ret = rbd_obj_calc_img_extents(obj_req, true);
2322 obj_req->write_state = RBD_OBJ_WRITE_START;
2326 static u16 truncate_or_zero_opcode(struct rbd_obj_request *obj_req)
2328 return rbd_obj_is_tail(obj_req) ? CEPH_OSD_OP_TRUNCATE :
2332 static void __rbd_osd_setup_discard_ops(struct ceph_osd_request *osd_req,
2335 struct rbd_obj_request *obj_req = osd_req->r_priv;
2337 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents) {
2338 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2339 osd_req_op_init(osd_req, which, CEPH_OSD_OP_DELETE, 0);
2341 osd_req_op_extent_init(osd_req, which,
2342 truncate_or_zero_opcode(obj_req),
2343 obj_req->ex.oe_off, obj_req->ex.oe_len,
2348 static int rbd_obj_init_discard(struct rbd_obj_request *obj_req)
2350 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2355 * Align the range to alloc_size boundary and punt on discards
2356 * that are too small to free up any space.
2358 * alloc_size == object_size && is_tail() is a special case for
2359 * filestore with filestore_punch_hole = false, needed to allow
2360 * truncate (in addition to delete).
2362 if (rbd_dev->opts->alloc_size != rbd_dev->layout.object_size ||
2363 !rbd_obj_is_tail(obj_req)) {
2364 off = round_up(obj_req->ex.oe_off, rbd_dev->opts->alloc_size);
2365 next_off = round_down(obj_req->ex.oe_off + obj_req->ex.oe_len,
2366 rbd_dev->opts->alloc_size);
2367 if (off >= next_off)
2370 dout("%s %p %llu~%llu -> %llu~%llu\n", __func__,
2371 obj_req, obj_req->ex.oe_off, obj_req->ex.oe_len,
2372 off, next_off - off);
2373 obj_req->ex.oe_off = off;
2374 obj_req->ex.oe_len = next_off - off;
2377 /* reverse map the entire object onto the parent */
2378 ret = rbd_obj_calc_img_extents(obj_req, true);
2382 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2383 if (rbd_obj_is_entire(obj_req) && !obj_req->num_img_extents)
2384 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2386 obj_req->write_state = RBD_OBJ_WRITE_START;
2390 static void __rbd_osd_setup_zeroout_ops(struct ceph_osd_request *osd_req,
2393 struct rbd_obj_request *obj_req = osd_req->r_priv;
2396 if (rbd_obj_is_entire(obj_req)) {
2397 if (obj_req->num_img_extents) {
2398 if (!(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2399 osd_req_op_init(osd_req, which++,
2400 CEPH_OSD_OP_CREATE, 0);
2401 opcode = CEPH_OSD_OP_TRUNCATE;
2403 rbd_assert(obj_req->flags & RBD_OBJ_FLAG_DELETION);
2404 osd_req_op_init(osd_req, which++,
2405 CEPH_OSD_OP_DELETE, 0);
2409 opcode = truncate_or_zero_opcode(obj_req);
2413 osd_req_op_extent_init(osd_req, which, opcode,
2414 obj_req->ex.oe_off, obj_req->ex.oe_len,
2418 static int rbd_obj_init_zeroout(struct rbd_obj_request *obj_req)
2422 /* reverse map the entire object onto the parent */
2423 ret = rbd_obj_calc_img_extents(obj_req, true);
2427 if (!obj_req->num_img_extents) {
2428 obj_req->flags |= RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT;
2429 if (rbd_obj_is_entire(obj_req))
2430 obj_req->flags |= RBD_OBJ_FLAG_DELETION;
2433 obj_req->write_state = RBD_OBJ_WRITE_START;
2437 static int count_write_ops(struct rbd_obj_request *obj_req)
2439 struct rbd_img_request *img_req = obj_req->img_request;
2441 switch (img_req->op_type) {
2443 if (!use_object_map(img_req->rbd_dev) ||
2444 !(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST))
2445 return 2; /* setallochint + write/writefull */
2447 return 1; /* write/writefull */
2448 case OBJ_OP_DISCARD:
2449 return 1; /* delete/truncate/zero */
2450 case OBJ_OP_ZEROOUT:
2451 if (rbd_obj_is_entire(obj_req) && obj_req->num_img_extents &&
2452 !(obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED))
2453 return 2; /* create + truncate */
2455 return 1; /* delete/truncate/zero */
2461 static void rbd_osd_setup_write_ops(struct ceph_osd_request *osd_req,
2464 struct rbd_obj_request *obj_req = osd_req->r_priv;
2466 switch (obj_req->img_request->op_type) {
2468 __rbd_osd_setup_write_ops(osd_req, which);
2470 case OBJ_OP_DISCARD:
2471 __rbd_osd_setup_discard_ops(osd_req, which);
2473 case OBJ_OP_ZEROOUT:
2474 __rbd_osd_setup_zeroout_ops(osd_req, which);
2482 * Prune the list of object requests (adjust offset and/or length, drop
2483 * redundant requests). Prepare object request state machines and image
2484 * request state machine for execution.
2486 static int __rbd_img_fill_request(struct rbd_img_request *img_req)
2488 struct rbd_obj_request *obj_req, *next_obj_req;
2491 for_each_obj_request_safe(img_req, obj_req, next_obj_req) {
2492 switch (img_req->op_type) {
2494 ret = rbd_obj_init_read(obj_req);
2497 ret = rbd_obj_init_write(obj_req);
2499 case OBJ_OP_DISCARD:
2500 ret = rbd_obj_init_discard(obj_req);
2502 case OBJ_OP_ZEROOUT:
2503 ret = rbd_obj_init_zeroout(obj_req);
2511 rbd_img_obj_request_del(img_req, obj_req);
2516 img_req->state = RBD_IMG_START;
2520 union rbd_img_fill_iter {
2521 struct ceph_bio_iter bio_iter;
2522 struct ceph_bvec_iter bvec_iter;
2525 struct rbd_img_fill_ctx {
2526 enum obj_request_type pos_type;
2527 union rbd_img_fill_iter *pos;
2528 union rbd_img_fill_iter iter;
2529 ceph_object_extent_fn_t set_pos_fn;
2530 ceph_object_extent_fn_t count_fn;
2531 ceph_object_extent_fn_t copy_fn;
2534 static struct ceph_object_extent *alloc_object_extent(void *arg)
2536 struct rbd_img_request *img_req = arg;
2537 struct rbd_obj_request *obj_req;
2539 obj_req = rbd_obj_request_create();
2543 rbd_img_obj_request_add(img_req, obj_req);
2544 return &obj_req->ex;
2548 * While su != os && sc == 1 is technically not fancy (it's the same
2549 * layout as su == os && sc == 1), we can't use the nocopy path for it
2550 * because ->set_pos_fn() should be called only once per object.
2551 * ceph_file_to_extents() invokes action_fn once per stripe unit, so
2552 * treat su != os && sc == 1 as fancy.
2554 static bool rbd_layout_is_fancy(struct ceph_file_layout *l)
2556 return l->stripe_unit != l->object_size;
2559 static int rbd_img_fill_request_nocopy(struct rbd_img_request *img_req,
2560 struct ceph_file_extent *img_extents,
2561 u32 num_img_extents,
2562 struct rbd_img_fill_ctx *fctx)
2567 img_req->data_type = fctx->pos_type;
2570 * Create object requests and set each object request's starting
2571 * position in the provided bio (list) or bio_vec array.
2573 fctx->iter = *fctx->pos;
2574 for (i = 0; i < num_img_extents; i++) {
2575 ret = ceph_file_to_extents(&img_req->rbd_dev->layout,
2576 img_extents[i].fe_off,
2577 img_extents[i].fe_len,
2578 &img_req->object_extents,
2579 alloc_object_extent, img_req,
2580 fctx->set_pos_fn, &fctx->iter);
2585 return __rbd_img_fill_request(img_req);
2589 * Map a list of image extents to a list of object extents, create the
2590 * corresponding object requests (normally each to a different object,
2591 * but not always) and add them to @img_req. For each object request,
2592 * set up its data descriptor to point to the corresponding chunk(s) of
2593 * @fctx->pos data buffer.
2595 * Because ceph_file_to_extents() will merge adjacent object extents
2596 * together, each object request's data descriptor may point to multiple
2597 * different chunks of @fctx->pos data buffer.
2599 * @fctx->pos data buffer is assumed to be large enough.
2601 static int rbd_img_fill_request(struct rbd_img_request *img_req,
2602 struct ceph_file_extent *img_extents,
2603 u32 num_img_extents,
2604 struct rbd_img_fill_ctx *fctx)
2606 struct rbd_device *rbd_dev = img_req->rbd_dev;
2607 struct rbd_obj_request *obj_req;
2611 if (fctx->pos_type == OBJ_REQUEST_NODATA ||
2612 !rbd_layout_is_fancy(&rbd_dev->layout))
2613 return rbd_img_fill_request_nocopy(img_req, img_extents,
2614 num_img_extents, fctx);
2616 img_req->data_type = OBJ_REQUEST_OWN_BVECS;
2619 * Create object requests and determine ->bvec_count for each object
2620 * request. Note that ->bvec_count sum over all object requests may
2621 * be greater than the number of bio_vecs in the provided bio (list)
2622 * or bio_vec array because when mapped, those bio_vecs can straddle
2623 * stripe unit boundaries.
2625 fctx->iter = *fctx->pos;
2626 for (i = 0; i < num_img_extents; i++) {
2627 ret = ceph_file_to_extents(&rbd_dev->layout,
2628 img_extents[i].fe_off,
2629 img_extents[i].fe_len,
2630 &img_req->object_extents,
2631 alloc_object_extent, img_req,
2632 fctx->count_fn, &fctx->iter);
2637 for_each_obj_request(img_req, obj_req) {
2638 obj_req->bvec_pos.bvecs = kmalloc_array(obj_req->bvec_count,
2639 sizeof(*obj_req->bvec_pos.bvecs),
2641 if (!obj_req->bvec_pos.bvecs)
2646 * Fill in each object request's private bio_vec array, splitting and
2647 * rearranging the provided bio_vecs in stripe unit chunks as needed.
2649 fctx->iter = *fctx->pos;
2650 for (i = 0; i < num_img_extents; i++) {
2651 ret = ceph_iterate_extents(&rbd_dev->layout,
2652 img_extents[i].fe_off,
2653 img_extents[i].fe_len,
2654 &img_req->object_extents,
2655 fctx->copy_fn, &fctx->iter);
2660 return __rbd_img_fill_request(img_req);
2663 static int rbd_img_fill_nodata(struct rbd_img_request *img_req,
2666 struct ceph_file_extent ex = { off, len };
2667 union rbd_img_fill_iter dummy = {};
2668 struct rbd_img_fill_ctx fctx = {
2669 .pos_type = OBJ_REQUEST_NODATA,
2673 return rbd_img_fill_request(img_req, &ex, 1, &fctx);
2676 static void set_bio_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2678 struct rbd_obj_request *obj_req =
2679 container_of(ex, struct rbd_obj_request, ex);
2680 struct ceph_bio_iter *it = arg;
2682 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2683 obj_req->bio_pos = *it;
2684 ceph_bio_iter_advance(it, bytes);
2687 static void count_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2689 struct rbd_obj_request *obj_req =
2690 container_of(ex, struct rbd_obj_request, ex);
2691 struct ceph_bio_iter *it = arg;
2693 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2694 ceph_bio_iter_advance_step(it, bytes, ({
2695 obj_req->bvec_count++;
2700 static void copy_bio_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2702 struct rbd_obj_request *obj_req =
2703 container_of(ex, struct rbd_obj_request, ex);
2704 struct ceph_bio_iter *it = arg;
2706 dout("%s objno %llu bytes %u\n", __func__, ex->oe_objno, bytes);
2707 ceph_bio_iter_advance_step(it, bytes, ({
2708 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2709 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2713 static int __rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2714 struct ceph_file_extent *img_extents,
2715 u32 num_img_extents,
2716 struct ceph_bio_iter *bio_pos)
2718 struct rbd_img_fill_ctx fctx = {
2719 .pos_type = OBJ_REQUEST_BIO,
2720 .pos = (union rbd_img_fill_iter *)bio_pos,
2721 .set_pos_fn = set_bio_pos,
2722 .count_fn = count_bio_bvecs,
2723 .copy_fn = copy_bio_bvecs,
2726 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2730 static int rbd_img_fill_from_bio(struct rbd_img_request *img_req,
2731 u64 off, u64 len, struct bio *bio)
2733 struct ceph_file_extent ex = { off, len };
2734 struct ceph_bio_iter it = { .bio = bio, .iter = bio->bi_iter };
2736 return __rbd_img_fill_from_bio(img_req, &ex, 1, &it);
2739 static void set_bvec_pos(struct ceph_object_extent *ex, u32 bytes, void *arg)
2741 struct rbd_obj_request *obj_req =
2742 container_of(ex, struct rbd_obj_request, ex);
2743 struct ceph_bvec_iter *it = arg;
2745 obj_req->bvec_pos = *it;
2746 ceph_bvec_iter_shorten(&obj_req->bvec_pos, bytes);
2747 ceph_bvec_iter_advance(it, bytes);
2750 static void count_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2752 struct rbd_obj_request *obj_req =
2753 container_of(ex, struct rbd_obj_request, ex);
2754 struct ceph_bvec_iter *it = arg;
2756 ceph_bvec_iter_advance_step(it, bytes, ({
2757 obj_req->bvec_count++;
2761 static void copy_bvecs(struct ceph_object_extent *ex, u32 bytes, void *arg)
2763 struct rbd_obj_request *obj_req =
2764 container_of(ex, struct rbd_obj_request, ex);
2765 struct ceph_bvec_iter *it = arg;
2767 ceph_bvec_iter_advance_step(it, bytes, ({
2768 obj_req->bvec_pos.bvecs[obj_req->bvec_idx++] = bv;
2769 obj_req->bvec_pos.iter.bi_size += bv.bv_len;
2773 static int __rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2774 struct ceph_file_extent *img_extents,
2775 u32 num_img_extents,
2776 struct ceph_bvec_iter *bvec_pos)
2778 struct rbd_img_fill_ctx fctx = {
2779 .pos_type = OBJ_REQUEST_BVECS,
2780 .pos = (union rbd_img_fill_iter *)bvec_pos,
2781 .set_pos_fn = set_bvec_pos,
2782 .count_fn = count_bvecs,
2783 .copy_fn = copy_bvecs,
2786 return rbd_img_fill_request(img_req, img_extents, num_img_extents,
2790 static int rbd_img_fill_from_bvecs(struct rbd_img_request *img_req,
2791 struct ceph_file_extent *img_extents,
2792 u32 num_img_extents,
2793 struct bio_vec *bvecs)
2795 struct ceph_bvec_iter it = {
2797 .iter = { .bi_size = ceph_file_extents_bytes(img_extents,
2801 return __rbd_img_fill_from_bvecs(img_req, img_extents, num_img_extents,
2805 static void rbd_img_handle_request_work(struct work_struct *work)
2807 struct rbd_img_request *img_req =
2808 container_of(work, struct rbd_img_request, work);
2810 rbd_img_handle_request(img_req, img_req->work_result);
2813 static void rbd_img_schedule(struct rbd_img_request *img_req, int result)
2815 INIT_WORK(&img_req->work, rbd_img_handle_request_work);
2816 img_req->work_result = result;
2817 queue_work(rbd_wq, &img_req->work);
2820 static bool rbd_obj_may_exist(struct rbd_obj_request *obj_req)
2822 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2824 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno)) {
2825 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2829 dout("%s %p objno %llu assuming dne\n", __func__, obj_req,
2830 obj_req->ex.oe_objno);
2834 static int rbd_obj_read_object(struct rbd_obj_request *obj_req)
2836 struct ceph_osd_request *osd_req;
2839 osd_req = __rbd_obj_add_osd_request(obj_req, NULL, 1);
2840 if (IS_ERR(osd_req))
2841 return PTR_ERR(osd_req);
2843 osd_req_op_extent_init(osd_req, 0, CEPH_OSD_OP_READ,
2844 obj_req->ex.oe_off, obj_req->ex.oe_len, 0, 0);
2845 rbd_osd_setup_data(osd_req, 0);
2846 rbd_osd_format_read(osd_req);
2848 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
2852 rbd_osd_submit(osd_req);
2856 static int rbd_obj_read_from_parent(struct rbd_obj_request *obj_req)
2858 struct rbd_img_request *img_req = obj_req->img_request;
2859 struct rbd_device *parent = img_req->rbd_dev->parent;
2860 struct rbd_img_request *child_img_req;
2863 child_img_req = kmem_cache_alloc(rbd_img_request_cache, GFP_NOIO);
2867 rbd_img_request_init(child_img_req, parent, OBJ_OP_READ);
2868 __set_bit(IMG_REQ_CHILD, &child_img_req->flags);
2869 child_img_req->obj_request = obj_req;
2871 down_read(&parent->header_rwsem);
2872 rbd_img_capture_header(child_img_req);
2873 up_read(&parent->header_rwsem);
2875 dout("%s child_img_req %p for obj_req %p\n", __func__, child_img_req,
2878 if (!rbd_img_is_write(img_req)) {
2879 switch (img_req->data_type) {
2880 case OBJ_REQUEST_BIO:
2881 ret = __rbd_img_fill_from_bio(child_img_req,
2882 obj_req->img_extents,
2883 obj_req->num_img_extents,
2886 case OBJ_REQUEST_BVECS:
2887 case OBJ_REQUEST_OWN_BVECS:
2888 ret = __rbd_img_fill_from_bvecs(child_img_req,
2889 obj_req->img_extents,
2890 obj_req->num_img_extents,
2891 &obj_req->bvec_pos);
2897 ret = rbd_img_fill_from_bvecs(child_img_req,
2898 obj_req->img_extents,
2899 obj_req->num_img_extents,
2900 obj_req->copyup_bvecs);
2903 rbd_img_request_destroy(child_img_req);
2907 /* avoid parent chain recursion */
2908 rbd_img_schedule(child_img_req, 0);
2912 static bool rbd_obj_advance_read(struct rbd_obj_request *obj_req, int *result)
2914 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2918 switch (obj_req->read_state) {
2919 case RBD_OBJ_READ_START:
2920 rbd_assert(!*result);
2922 if (!rbd_obj_may_exist(obj_req)) {
2924 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2928 ret = rbd_obj_read_object(obj_req);
2933 obj_req->read_state = RBD_OBJ_READ_OBJECT;
2935 case RBD_OBJ_READ_OBJECT:
2936 if (*result == -ENOENT && rbd_dev->parent_overlap) {
2937 /* reverse map this object extent onto the parent */
2938 ret = rbd_obj_calc_img_extents(obj_req, false);
2943 if (obj_req->num_img_extents) {
2944 ret = rbd_obj_read_from_parent(obj_req);
2949 obj_req->read_state = RBD_OBJ_READ_PARENT;
2955 * -ENOENT means a hole in the image -- zero-fill the entire
2956 * length of the request. A short read also implies zero-fill
2957 * to the end of the request.
2959 if (*result == -ENOENT) {
2960 rbd_obj_zero_range(obj_req, 0, obj_req->ex.oe_len);
2962 } else if (*result >= 0) {
2963 if (*result < obj_req->ex.oe_len)
2964 rbd_obj_zero_range(obj_req, *result,
2965 obj_req->ex.oe_len - *result);
2967 rbd_assert(*result == obj_req->ex.oe_len);
2971 case RBD_OBJ_READ_PARENT:
2973 * The parent image is read only up to the overlap -- zero-fill
2974 * from the overlap to the end of the request.
2977 u32 obj_overlap = rbd_obj_img_extents_bytes(obj_req);
2979 if (obj_overlap < obj_req->ex.oe_len)
2980 rbd_obj_zero_range(obj_req, obj_overlap,
2981 obj_req->ex.oe_len - obj_overlap);
2989 static bool rbd_obj_write_is_noop(struct rbd_obj_request *obj_req)
2991 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
2993 if (rbd_object_map_may_exist(rbd_dev, obj_req->ex.oe_objno))
2994 obj_req->flags |= RBD_OBJ_FLAG_MAY_EXIST;
2996 if (!(obj_req->flags & RBD_OBJ_FLAG_MAY_EXIST) &&
2997 (obj_req->flags & RBD_OBJ_FLAG_NOOP_FOR_NONEXISTENT)) {
2998 dout("%s %p noop for nonexistent\n", __func__, obj_req);
3007 * 0 - object map update sent
3008 * 1 - object map update isn't needed
3011 static int rbd_obj_write_pre_object_map(struct rbd_obj_request *obj_req)
3013 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3016 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3019 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3020 new_state = OBJECT_PENDING;
3022 new_state = OBJECT_EXISTS;
3024 return rbd_object_map_update(obj_req, CEPH_NOSNAP, new_state, NULL);
3027 static int rbd_obj_write_object(struct rbd_obj_request *obj_req)
3029 struct ceph_osd_request *osd_req;
3030 int num_ops = count_write_ops(obj_req);
3034 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED)
3035 num_ops++; /* stat */
3037 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3038 if (IS_ERR(osd_req))
3039 return PTR_ERR(osd_req);
3041 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3042 ret = rbd_osd_setup_stat(osd_req, which++);
3047 rbd_osd_setup_write_ops(osd_req, which);
3048 rbd_osd_format_write(osd_req);
3050 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3054 rbd_osd_submit(osd_req);
3059 * copyup_bvecs pages are never highmem pages
3061 static bool is_zero_bvecs(struct bio_vec *bvecs, u32 bytes)
3063 struct ceph_bvec_iter it = {
3065 .iter = { .bi_size = bytes },
3068 ceph_bvec_iter_advance_step(&it, bytes, ({
3069 if (memchr_inv(page_address(bv.bv_page) + bv.bv_offset, 0,
3076 #define MODS_ONLY U32_MAX
3078 static int rbd_obj_copyup_empty_snapc(struct rbd_obj_request *obj_req,
3081 struct ceph_osd_request *osd_req;
3084 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3085 rbd_assert(bytes > 0 && bytes != MODS_ONLY);
3087 osd_req = __rbd_obj_add_osd_request(obj_req, &rbd_empty_snapc, 1);
3088 if (IS_ERR(osd_req))
3089 return PTR_ERR(osd_req);
3091 ret = rbd_osd_setup_copyup(osd_req, 0, bytes);
3095 rbd_osd_format_write(osd_req);
3097 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3101 rbd_osd_submit(osd_req);
3105 static int rbd_obj_copyup_current_snapc(struct rbd_obj_request *obj_req,
3108 struct ceph_osd_request *osd_req;
3109 int num_ops = count_write_ops(obj_req);
3113 dout("%s obj_req %p bytes %u\n", __func__, obj_req, bytes);
3115 if (bytes != MODS_ONLY)
3116 num_ops++; /* copyup */
3118 osd_req = rbd_obj_add_osd_request(obj_req, num_ops);
3119 if (IS_ERR(osd_req))
3120 return PTR_ERR(osd_req);
3122 if (bytes != MODS_ONLY) {
3123 ret = rbd_osd_setup_copyup(osd_req, which++, bytes);
3128 rbd_osd_setup_write_ops(osd_req, which);
3129 rbd_osd_format_write(osd_req);
3131 ret = ceph_osdc_alloc_messages(osd_req, GFP_NOIO);
3135 rbd_osd_submit(osd_req);
3139 static int setup_copyup_bvecs(struct rbd_obj_request *obj_req, u64 obj_overlap)
3143 rbd_assert(!obj_req->copyup_bvecs);
3144 obj_req->copyup_bvec_count = calc_pages_for(0, obj_overlap);
3145 obj_req->copyup_bvecs = kcalloc(obj_req->copyup_bvec_count,
3146 sizeof(*obj_req->copyup_bvecs),
3148 if (!obj_req->copyup_bvecs)
3151 for (i = 0; i < obj_req->copyup_bvec_count; i++) {
3152 unsigned int len = min(obj_overlap, (u64)PAGE_SIZE);
3154 obj_req->copyup_bvecs[i].bv_page = alloc_page(GFP_NOIO);
3155 if (!obj_req->copyup_bvecs[i].bv_page)
3158 obj_req->copyup_bvecs[i].bv_offset = 0;
3159 obj_req->copyup_bvecs[i].bv_len = len;
3163 rbd_assert(!obj_overlap);
3168 * The target object doesn't exist. Read the data for the entire
3169 * target object up to the overlap point (if any) from the parent,
3170 * so we can use it for a copyup.
3172 static int rbd_obj_copyup_read_parent(struct rbd_obj_request *obj_req)
3174 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3177 rbd_assert(obj_req->num_img_extents);
3178 prune_extents(obj_req->img_extents, &obj_req->num_img_extents,
3179 rbd_dev->parent_overlap);
3180 if (!obj_req->num_img_extents) {
3182 * The overlap has become 0 (most likely because the
3183 * image has been flattened). Re-submit the original write
3184 * request -- pass MODS_ONLY since the copyup isn't needed
3187 return rbd_obj_copyup_current_snapc(obj_req, MODS_ONLY);
3190 ret = setup_copyup_bvecs(obj_req, rbd_obj_img_extents_bytes(obj_req));
3194 return rbd_obj_read_from_parent(obj_req);
3197 static void rbd_obj_copyup_object_maps(struct rbd_obj_request *obj_req)
3199 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3200 struct ceph_snap_context *snapc = obj_req->img_request->snapc;
3205 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3207 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3210 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3213 for (i = 0; i < snapc->num_snaps; i++) {
3214 if ((rbd_dev->header.features & RBD_FEATURE_FAST_DIFF) &&
3215 i + 1 < snapc->num_snaps)
3216 new_state = OBJECT_EXISTS_CLEAN;
3218 new_state = OBJECT_EXISTS;
3220 ret = rbd_object_map_update(obj_req, snapc->snaps[i],
3223 obj_req->pending.result = ret;
3228 obj_req->pending.num_pending++;
3232 static void rbd_obj_copyup_write_object(struct rbd_obj_request *obj_req)
3234 u32 bytes = rbd_obj_img_extents_bytes(obj_req);
3237 rbd_assert(!obj_req->pending.result && !obj_req->pending.num_pending);
3240 * Only send non-zero copyup data to save some I/O and network
3241 * bandwidth -- zero copyup data is equivalent to the object not
3244 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ZEROS)
3247 if (obj_req->img_request->snapc->num_snaps && bytes > 0) {
3249 * Send a copyup request with an empty snapshot context to
3250 * deep-copyup the object through all existing snapshots.
3251 * A second request with the current snapshot context will be
3252 * sent for the actual modification.
3254 ret = rbd_obj_copyup_empty_snapc(obj_req, bytes);
3256 obj_req->pending.result = ret;
3260 obj_req->pending.num_pending++;
3264 ret = rbd_obj_copyup_current_snapc(obj_req, bytes);
3266 obj_req->pending.result = ret;
3270 obj_req->pending.num_pending++;
3273 static bool rbd_obj_advance_copyup(struct rbd_obj_request *obj_req, int *result)
3275 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3279 switch (obj_req->copyup_state) {
3280 case RBD_OBJ_COPYUP_START:
3281 rbd_assert(!*result);
3283 ret = rbd_obj_copyup_read_parent(obj_req);
3288 if (obj_req->num_img_extents)
3289 obj_req->copyup_state = RBD_OBJ_COPYUP_READ_PARENT;
3291 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3293 case RBD_OBJ_COPYUP_READ_PARENT:
3297 if (is_zero_bvecs(obj_req->copyup_bvecs,
3298 rbd_obj_img_extents_bytes(obj_req))) {
3299 dout("%s %p detected zeros\n", __func__, obj_req);
3300 obj_req->flags |= RBD_OBJ_FLAG_COPYUP_ZEROS;
3303 rbd_obj_copyup_object_maps(obj_req);
3304 if (!obj_req->pending.num_pending) {
3305 *result = obj_req->pending.result;
3306 obj_req->copyup_state = RBD_OBJ_COPYUP_OBJECT_MAPS;
3309 obj_req->copyup_state = __RBD_OBJ_COPYUP_OBJECT_MAPS;
3311 case __RBD_OBJ_COPYUP_OBJECT_MAPS:
3312 if (!pending_result_dec(&obj_req->pending, result))
3315 case RBD_OBJ_COPYUP_OBJECT_MAPS:
3317 rbd_warn(rbd_dev, "snap object map update failed: %d",
3322 rbd_obj_copyup_write_object(obj_req);
3323 if (!obj_req->pending.num_pending) {
3324 *result = obj_req->pending.result;
3325 obj_req->copyup_state = RBD_OBJ_COPYUP_WRITE_OBJECT;
3328 obj_req->copyup_state = __RBD_OBJ_COPYUP_WRITE_OBJECT;
3330 case __RBD_OBJ_COPYUP_WRITE_OBJECT:
3331 if (!pending_result_dec(&obj_req->pending, result))
3334 case RBD_OBJ_COPYUP_WRITE_OBJECT:
3343 * 0 - object map update sent
3344 * 1 - object map update isn't needed
3347 static int rbd_obj_write_post_object_map(struct rbd_obj_request *obj_req)
3349 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3350 u8 current_state = OBJECT_PENDING;
3352 if (!(rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3355 if (!(obj_req->flags & RBD_OBJ_FLAG_DELETION))
3358 return rbd_object_map_update(obj_req, CEPH_NOSNAP, OBJECT_NONEXISTENT,
3362 static bool rbd_obj_advance_write(struct rbd_obj_request *obj_req, int *result)
3364 struct rbd_device *rbd_dev = obj_req->img_request->rbd_dev;
3368 switch (obj_req->write_state) {
3369 case RBD_OBJ_WRITE_START:
3370 rbd_assert(!*result);
3372 rbd_obj_set_copyup_enabled(obj_req);
3373 if (rbd_obj_write_is_noop(obj_req))
3376 ret = rbd_obj_write_pre_object_map(obj_req);
3381 obj_req->write_state = RBD_OBJ_WRITE_PRE_OBJECT_MAP;
3385 case RBD_OBJ_WRITE_PRE_OBJECT_MAP:
3387 rbd_warn(rbd_dev, "pre object map update failed: %d",
3391 ret = rbd_obj_write_object(obj_req);
3396 obj_req->write_state = RBD_OBJ_WRITE_OBJECT;
3398 case RBD_OBJ_WRITE_OBJECT:
3399 if (*result == -ENOENT) {
3400 if (obj_req->flags & RBD_OBJ_FLAG_COPYUP_ENABLED) {
3402 obj_req->copyup_state = RBD_OBJ_COPYUP_START;
3403 obj_req->write_state = __RBD_OBJ_WRITE_COPYUP;
3407 * On a non-existent object:
3408 * delete - -ENOENT, truncate/zero - 0
3410 if (obj_req->flags & RBD_OBJ_FLAG_DELETION)
3416 obj_req->write_state = RBD_OBJ_WRITE_COPYUP;
3418 case __RBD_OBJ_WRITE_COPYUP:
3419 if (!rbd_obj_advance_copyup(obj_req, result))
3422 case RBD_OBJ_WRITE_COPYUP:
3424 rbd_warn(rbd_dev, "copyup failed: %d", *result);
3427 ret = rbd_obj_write_post_object_map(obj_req);
3432 obj_req->write_state = RBD_OBJ_WRITE_POST_OBJECT_MAP;
3436 case RBD_OBJ_WRITE_POST_OBJECT_MAP:
3438 rbd_warn(rbd_dev, "post object map update failed: %d",
3447 * Return true if @obj_req is completed.
3449 static bool __rbd_obj_handle_request(struct rbd_obj_request *obj_req,
3452 struct rbd_img_request *img_req = obj_req->img_request;
3453 struct rbd_device *rbd_dev = img_req->rbd_dev;
3456 mutex_lock(&obj_req->state_mutex);
3457 if (!rbd_img_is_write(img_req))
3458 done = rbd_obj_advance_read(obj_req, result);
3460 done = rbd_obj_advance_write(obj_req, result);
3461 mutex_unlock(&obj_req->state_mutex);
3463 if (done && *result) {
3464 rbd_assert(*result < 0);
3465 rbd_warn(rbd_dev, "%s at objno %llu %llu~%llu result %d",
3466 obj_op_name(img_req->op_type), obj_req->ex.oe_objno,
3467 obj_req->ex.oe_off, obj_req->ex.oe_len, *result);
3473 * This is open-coded in rbd_img_handle_request() to avoid parent chain
3476 static void rbd_obj_handle_request(struct rbd_obj_request *obj_req, int result)
3478 if (__rbd_obj_handle_request(obj_req, &result))
3479 rbd_img_handle_request(obj_req->img_request, result);
3482 static bool need_exclusive_lock(struct rbd_img_request *img_req)
3484 struct rbd_device *rbd_dev = img_req->rbd_dev;
3486 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK))
3489 if (rbd_is_ro(rbd_dev))
3492 rbd_assert(!test_bit(IMG_REQ_CHILD, &img_req->flags));
3493 if (rbd_dev->opts->lock_on_read ||
3494 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP))
3497 return rbd_img_is_write(img_req);
3500 static bool rbd_lock_add_request(struct rbd_img_request *img_req)
3502 struct rbd_device *rbd_dev = img_req->rbd_dev;
3505 lockdep_assert_held(&rbd_dev->lock_rwsem);
3506 locked = rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED;
3507 spin_lock(&rbd_dev->lock_lists_lock);
3508 rbd_assert(list_empty(&img_req->lock_item));
3510 list_add_tail(&img_req->lock_item, &rbd_dev->acquiring_list);
3512 list_add_tail(&img_req->lock_item, &rbd_dev->running_list);
3513 spin_unlock(&rbd_dev->lock_lists_lock);
3517 static void rbd_lock_del_request(struct rbd_img_request *img_req)
3519 struct rbd_device *rbd_dev = img_req->rbd_dev;
3520 bool need_wakeup = false;
3522 lockdep_assert_held(&rbd_dev->lock_rwsem);
3523 spin_lock(&rbd_dev->lock_lists_lock);
3524 if (!list_empty(&img_req->lock_item)) {
3525 list_del_init(&img_req->lock_item);
3526 need_wakeup = (rbd_dev->lock_state == RBD_LOCK_STATE_RELEASING &&
3527 list_empty(&rbd_dev->running_list));
3529 spin_unlock(&rbd_dev->lock_lists_lock);
3531 complete(&rbd_dev->releasing_wait);
3534 static int rbd_img_exclusive_lock(struct rbd_img_request *img_req)
3536 struct rbd_device *rbd_dev = img_req->rbd_dev;
3538 if (!need_exclusive_lock(img_req))
3541 if (rbd_lock_add_request(img_req))
3544 if (rbd_dev->opts->exclusive) {
3545 WARN_ON(1); /* lock got released? */
3550 * Note the use of mod_delayed_work() in rbd_acquire_lock()
3551 * and cancel_delayed_work() in wake_lock_waiters().
3553 dout("%s rbd_dev %p queueing lock_dwork\n", __func__, rbd_dev);
3554 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
3558 static void rbd_img_object_requests(struct rbd_img_request *img_req)
3560 struct rbd_device *rbd_dev = img_req->rbd_dev;
3561 struct rbd_obj_request *obj_req;
3563 rbd_assert(!img_req->pending.result && !img_req->pending.num_pending);
3564 rbd_assert(!need_exclusive_lock(img_req) ||
3565 __rbd_is_lock_owner(rbd_dev));
3567 if (rbd_img_is_write(img_req)) {
3568 rbd_assert(!img_req->snapc);
3569 down_read(&rbd_dev->header_rwsem);
3570 img_req->snapc = ceph_get_snap_context(rbd_dev->header.snapc);
3571 up_read(&rbd_dev->header_rwsem);
3574 for_each_obj_request(img_req, obj_req) {
3577 if (__rbd_obj_handle_request(obj_req, &result)) {
3579 img_req->pending.result = result;
3583 img_req->pending.num_pending++;
3588 static bool rbd_img_advance(struct rbd_img_request *img_req, int *result)
3593 switch (img_req->state) {
3595 rbd_assert(!*result);
3597 ret = rbd_img_exclusive_lock(img_req);
3602 img_req->state = RBD_IMG_EXCLUSIVE_LOCK;
3606 case RBD_IMG_EXCLUSIVE_LOCK:
3610 rbd_img_object_requests(img_req);
3611 if (!img_req->pending.num_pending) {
3612 *result = img_req->pending.result;
3613 img_req->state = RBD_IMG_OBJECT_REQUESTS;
3616 img_req->state = __RBD_IMG_OBJECT_REQUESTS;
3618 case __RBD_IMG_OBJECT_REQUESTS:
3619 if (!pending_result_dec(&img_req->pending, result))
3622 case RBD_IMG_OBJECT_REQUESTS:
3630 * Return true if @img_req is completed.
3632 static bool __rbd_img_handle_request(struct rbd_img_request *img_req,
3635 struct rbd_device *rbd_dev = img_req->rbd_dev;
3638 if (need_exclusive_lock(img_req)) {
3639 down_read(&rbd_dev->lock_rwsem);
3640 mutex_lock(&img_req->state_mutex);
3641 done = rbd_img_advance(img_req, result);
3643 rbd_lock_del_request(img_req);
3644 mutex_unlock(&img_req->state_mutex);
3645 up_read(&rbd_dev->lock_rwsem);
3647 mutex_lock(&img_req->state_mutex);
3648 done = rbd_img_advance(img_req, result);
3649 mutex_unlock(&img_req->state_mutex);
3652 if (done && *result) {
3653 rbd_assert(*result < 0);
3654 rbd_warn(rbd_dev, "%s%s result %d",
3655 test_bit(IMG_REQ_CHILD, &img_req->flags) ? "child " : "",
3656 obj_op_name(img_req->op_type), *result);
3661 static void rbd_img_handle_request(struct rbd_img_request *img_req, int result)
3664 if (!__rbd_img_handle_request(img_req, &result))
3667 if (test_bit(IMG_REQ_CHILD, &img_req->flags)) {
3668 struct rbd_obj_request *obj_req = img_req->obj_request;
3670 rbd_img_request_destroy(img_req);
3671 if (__rbd_obj_handle_request(obj_req, &result)) {
3672 img_req = obj_req->img_request;
3676 struct request *rq = blk_mq_rq_from_pdu(img_req);
3678 rbd_img_request_destroy(img_req);
3679 blk_mq_end_request(rq, errno_to_blk_status(result));
3683 static const struct rbd_client_id rbd_empty_cid;
3685 static bool rbd_cid_equal(const struct rbd_client_id *lhs,
3686 const struct rbd_client_id *rhs)
3688 return lhs->gid == rhs->gid && lhs->handle == rhs->handle;
3691 static struct rbd_client_id rbd_get_cid(struct rbd_device *rbd_dev)
3693 struct rbd_client_id cid;
3695 mutex_lock(&rbd_dev->watch_mutex);
3696 cid.gid = ceph_client_gid(rbd_dev->rbd_client->client);
3697 cid.handle = rbd_dev->watch_cookie;
3698 mutex_unlock(&rbd_dev->watch_mutex);
3703 * lock_rwsem must be held for write
3705 static void rbd_set_owner_cid(struct rbd_device *rbd_dev,
3706 const struct rbd_client_id *cid)
3708 dout("%s rbd_dev %p %llu-%llu -> %llu-%llu\n", __func__, rbd_dev,
3709 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle,
3710 cid->gid, cid->handle);
3711 rbd_dev->owner_cid = *cid; /* struct */
3714 static void format_lock_cookie(struct rbd_device *rbd_dev, char *buf)
3716 mutex_lock(&rbd_dev->watch_mutex);
3717 sprintf(buf, "%s %llu", RBD_LOCK_COOKIE_PREFIX, rbd_dev->watch_cookie);
3718 mutex_unlock(&rbd_dev->watch_mutex);
3721 static void __rbd_lock(struct rbd_device *rbd_dev, const char *cookie)
3723 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3725 rbd_dev->lock_state = RBD_LOCK_STATE_LOCKED;
3726 strcpy(rbd_dev->lock_cookie, cookie);
3727 rbd_set_owner_cid(rbd_dev, &cid);
3728 queue_work(rbd_dev->task_wq, &rbd_dev->acquired_lock_work);
3732 * lock_rwsem must be held for write
3734 static int rbd_lock(struct rbd_device *rbd_dev)
3736 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3740 WARN_ON(__rbd_is_lock_owner(rbd_dev) ||
3741 rbd_dev->lock_cookie[0] != '\0');
3743 format_lock_cookie(rbd_dev, cookie);
3744 ret = ceph_cls_lock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3745 RBD_LOCK_NAME, CEPH_CLS_LOCK_EXCLUSIVE, cookie,
3746 RBD_LOCK_TAG, "", 0);
3747 if (ret && ret != -EEXIST)
3750 __rbd_lock(rbd_dev, cookie);
3755 * lock_rwsem must be held for write
3757 static void rbd_unlock(struct rbd_device *rbd_dev)
3759 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3762 WARN_ON(!__rbd_is_lock_owner(rbd_dev) ||
3763 rbd_dev->lock_cookie[0] == '\0');
3765 ret = ceph_cls_unlock(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
3766 RBD_LOCK_NAME, rbd_dev->lock_cookie);
3767 if (ret && ret != -ENOENT)
3768 rbd_warn(rbd_dev, "failed to unlock header: %d", ret);
3770 /* treat errors as the image is unlocked */
3771 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
3772 rbd_dev->lock_cookie[0] = '\0';
3773 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
3774 queue_work(rbd_dev->task_wq, &rbd_dev->released_lock_work);
3777 static int __rbd_notify_op_lock(struct rbd_device *rbd_dev,
3778 enum rbd_notify_op notify_op,
3779 struct page ***preply_pages,
3782 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3783 struct rbd_client_id cid = rbd_get_cid(rbd_dev);
3784 char buf[4 + 8 + 8 + CEPH_ENCODING_START_BLK_LEN];
3785 int buf_size = sizeof(buf);
3788 dout("%s rbd_dev %p notify_op %d\n", __func__, rbd_dev, notify_op);
3790 /* encode *LockPayload NotifyMessage (op + ClientId) */
3791 ceph_start_encoding(&p, 2, 1, buf_size - CEPH_ENCODING_START_BLK_LEN);
3792 ceph_encode_32(&p, notify_op);
3793 ceph_encode_64(&p, cid.gid);
3794 ceph_encode_64(&p, cid.handle);
3796 return ceph_osdc_notify(osdc, &rbd_dev->header_oid,
3797 &rbd_dev->header_oloc, buf, buf_size,
3798 RBD_NOTIFY_TIMEOUT, preply_pages, preply_len);
3801 static void rbd_notify_op_lock(struct rbd_device *rbd_dev,
3802 enum rbd_notify_op notify_op)
3804 __rbd_notify_op_lock(rbd_dev, notify_op, NULL, NULL);
3807 static void rbd_notify_acquired_lock(struct work_struct *work)
3809 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3810 acquired_lock_work);
3812 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_ACQUIRED_LOCK);
3815 static void rbd_notify_released_lock(struct work_struct *work)
3817 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
3818 released_lock_work);
3820 rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_RELEASED_LOCK);
3823 static int rbd_request_lock(struct rbd_device *rbd_dev)
3825 struct page **reply_pages;
3827 bool lock_owner_responded = false;
3830 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3832 ret = __rbd_notify_op_lock(rbd_dev, RBD_NOTIFY_OP_REQUEST_LOCK,
3833 &reply_pages, &reply_len);
3834 if (ret && ret != -ETIMEDOUT) {
3835 rbd_warn(rbd_dev, "failed to request lock: %d", ret);
3839 if (reply_len > 0 && reply_len <= PAGE_SIZE) {
3840 void *p = page_address(reply_pages[0]);
3841 void *const end = p + reply_len;
3844 ceph_decode_32_safe(&p, end, n, e_inval); /* num_acks */
3849 ceph_decode_need(&p, end, 8 + 8, e_inval);
3850 p += 8 + 8; /* skip gid and cookie */
3852 ceph_decode_32_safe(&p, end, len, e_inval);
3856 if (lock_owner_responded) {
3858 "duplicate lock owners detected");
3863 lock_owner_responded = true;
3864 ret = ceph_start_decoding(&p, end, 1, "ResponseMessage",
3868 "failed to decode ResponseMessage: %d",
3873 ret = ceph_decode_32(&p);
3877 if (!lock_owner_responded) {
3878 rbd_warn(rbd_dev, "no lock owners detected");
3883 ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len));
3892 * Either image request state machine(s) or rbd_add_acquire_lock()
3895 static void wake_lock_waiters(struct rbd_device *rbd_dev, int result)
3897 struct rbd_img_request *img_req;
3899 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
3900 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
3902 cancel_delayed_work(&rbd_dev->lock_dwork);
3903 if (!completion_done(&rbd_dev->acquire_wait)) {
3904 rbd_assert(list_empty(&rbd_dev->acquiring_list) &&
3905 list_empty(&rbd_dev->running_list));
3906 rbd_dev->acquire_err = result;
3907 complete_all(&rbd_dev->acquire_wait);
3911 while (!list_empty(&rbd_dev->acquiring_list)) {
3912 img_req = list_first_entry(&rbd_dev->acquiring_list,
3913 struct rbd_img_request, lock_item);
3914 mutex_lock(&img_req->state_mutex);
3915 rbd_assert(img_req->state == RBD_IMG_EXCLUSIVE_LOCK);
3917 list_move_tail(&img_req->lock_item,
3918 &rbd_dev->running_list);
3920 list_del_init(&img_req->lock_item);
3921 rbd_img_schedule(img_req, result);
3922 mutex_unlock(&img_req->state_mutex);
3926 static bool locker_equal(const struct ceph_locker *lhs,
3927 const struct ceph_locker *rhs)
3929 return lhs->id.name.type == rhs->id.name.type &&
3930 lhs->id.name.num == rhs->id.name.num &&
3931 !strcmp(lhs->id.cookie, rhs->id.cookie) &&
3932 ceph_addr_equal_no_type(&lhs->info.addr, &rhs->info.addr);
3935 static void free_locker(struct ceph_locker *locker)
3938 ceph_free_lockers(locker, 1);
3941 static struct ceph_locker *get_lock_owner_info(struct rbd_device *rbd_dev)
3943 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3944 struct ceph_locker *lockers;
3950 dout("%s rbd_dev %p\n", __func__, rbd_dev);
3952 ret = ceph_cls_lock_info(osdc, &rbd_dev->header_oid,
3953 &rbd_dev->header_oloc, RBD_LOCK_NAME,
3954 &lock_type, &lock_tag, &lockers, &num_lockers);
3956 rbd_warn(rbd_dev, "failed to get header lockers: %d", ret);
3957 return ERR_PTR(ret);
3960 if (num_lockers == 0) {
3961 dout("%s rbd_dev %p no lockers detected\n", __func__, rbd_dev);
3966 if (strcmp(lock_tag, RBD_LOCK_TAG)) {
3967 rbd_warn(rbd_dev, "locked by external mechanism, tag %s",
3972 if (lock_type == CEPH_CLS_LOCK_SHARED) {
3973 rbd_warn(rbd_dev, "shared lock type detected");
3977 WARN_ON(num_lockers != 1);
3978 if (strncmp(lockers[0].id.cookie, RBD_LOCK_COOKIE_PREFIX,
3979 strlen(RBD_LOCK_COOKIE_PREFIX))) {
3980 rbd_warn(rbd_dev, "locked by external mechanism, cookie %s",
3981 lockers[0].id.cookie);
3991 ceph_free_lockers(lockers, num_lockers);
3992 return ERR_PTR(-EBUSY);
3995 static int find_watcher(struct rbd_device *rbd_dev,
3996 const struct ceph_locker *locker)
3998 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
3999 struct ceph_watch_item *watchers;
4005 ret = ceph_osdc_list_watchers(osdc, &rbd_dev->header_oid,
4006 &rbd_dev->header_oloc, &watchers,
4009 rbd_warn(rbd_dev, "failed to get watchers: %d", ret);
4013 sscanf(locker->id.cookie, RBD_LOCK_COOKIE_PREFIX " %llu", &cookie);
4014 for (i = 0; i < num_watchers; i++) {
4016 * Ignore addr->type while comparing. This mimics
4017 * entity_addr_t::get_legacy_str() + strcmp().
4019 if (ceph_addr_equal_no_type(&watchers[i].addr,
4020 &locker->info.addr) &&
4021 watchers[i].cookie == cookie) {
4022 struct rbd_client_id cid = {
4023 .gid = le64_to_cpu(watchers[i].name.num),
4027 dout("%s rbd_dev %p found cid %llu-%llu\n", __func__,
4028 rbd_dev, cid.gid, cid.handle);
4029 rbd_set_owner_cid(rbd_dev, &cid);
4035 dout("%s rbd_dev %p no watchers\n", __func__, rbd_dev);
4043 * lock_rwsem must be held for write
4045 static int rbd_try_lock(struct rbd_device *rbd_dev)
4047 struct ceph_client *client = rbd_dev->rbd_client->client;
4048 struct ceph_locker *locker, *refreshed_locker;
4052 locker = refreshed_locker = NULL;
4054 ret = rbd_lock(rbd_dev);
4057 if (ret != -EBUSY) {
4058 rbd_warn(rbd_dev, "failed to lock header: %d", ret);
4062 /* determine if the current lock holder is still alive */
4063 locker = get_lock_owner_info(rbd_dev);
4064 if (IS_ERR(locker)) {
4065 ret = PTR_ERR(locker);
4072 ret = find_watcher(rbd_dev, locker);
4074 goto out; /* request lock or error */
4076 refreshed_locker = get_lock_owner_info(rbd_dev);
4077 if (IS_ERR(refreshed_locker)) {
4078 ret = PTR_ERR(refreshed_locker);
4079 refreshed_locker = NULL;
4082 if (!refreshed_locker ||
4083 !locker_equal(locker, refreshed_locker))
4086 rbd_warn(rbd_dev, "breaking header lock owned by %s%llu",
4087 ENTITY_NAME(locker->id.name));
4089 ret = ceph_monc_blocklist_add(&client->monc,
4090 &locker->info.addr);
4092 rbd_warn(rbd_dev, "failed to blocklist %s%llu: %d",
4093 ENTITY_NAME(locker->id.name), ret);
4097 ret = ceph_cls_break_lock(&client->osdc, &rbd_dev->header_oid,
4098 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4099 locker->id.cookie, &locker->id.name);
4100 if (ret && ret != -ENOENT) {
4101 rbd_warn(rbd_dev, "failed to break header lock: %d",
4107 free_locker(refreshed_locker);
4108 free_locker(locker);
4112 free_locker(refreshed_locker);
4113 free_locker(locker);
4117 static int rbd_post_acquire_action(struct rbd_device *rbd_dev)
4121 ret = rbd_dev_refresh(rbd_dev);
4125 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP) {
4126 ret = rbd_object_map_open(rbd_dev);
4137 * 1 - caller should call rbd_request_lock()
4140 static int rbd_try_acquire_lock(struct rbd_device *rbd_dev)
4144 down_read(&rbd_dev->lock_rwsem);
4145 dout("%s rbd_dev %p read lock_state %d\n", __func__, rbd_dev,
4146 rbd_dev->lock_state);
4147 if (__rbd_is_lock_owner(rbd_dev)) {
4148 up_read(&rbd_dev->lock_rwsem);
4152 up_read(&rbd_dev->lock_rwsem);
4153 down_write(&rbd_dev->lock_rwsem);
4154 dout("%s rbd_dev %p write lock_state %d\n", __func__, rbd_dev,
4155 rbd_dev->lock_state);
4156 if (__rbd_is_lock_owner(rbd_dev)) {
4157 up_write(&rbd_dev->lock_rwsem);
4161 ret = rbd_try_lock(rbd_dev);
4163 rbd_warn(rbd_dev, "failed to acquire lock: %d", ret);
4167 up_write(&rbd_dev->lock_rwsem);
4171 rbd_assert(rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED);
4172 rbd_assert(list_empty(&rbd_dev->running_list));
4174 ret = rbd_post_acquire_action(rbd_dev);
4176 rbd_warn(rbd_dev, "post-acquire action failed: %d", ret);
4178 * Can't stay in RBD_LOCK_STATE_LOCKED because
4179 * rbd_lock_add_request() would let the request through,
4180 * assuming that e.g. object map is locked and loaded.
4182 rbd_unlock(rbd_dev);
4186 wake_lock_waiters(rbd_dev, ret);
4187 up_write(&rbd_dev->lock_rwsem);
4191 static void rbd_acquire_lock(struct work_struct *work)
4193 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4194 struct rbd_device, lock_dwork);
4197 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4199 ret = rbd_try_acquire_lock(rbd_dev);
4201 dout("%s rbd_dev %p ret %d - done\n", __func__, rbd_dev, ret);
4205 ret = rbd_request_lock(rbd_dev);
4206 if (ret == -ETIMEDOUT) {
4207 goto again; /* treat this as a dead client */
4208 } else if (ret == -EROFS) {
4209 rbd_warn(rbd_dev, "peer will not release lock");
4210 down_write(&rbd_dev->lock_rwsem);
4211 wake_lock_waiters(rbd_dev, ret);
4212 up_write(&rbd_dev->lock_rwsem);
4213 } else if (ret < 0) {
4214 rbd_warn(rbd_dev, "error requesting lock: %d", ret);
4215 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4219 * lock owner acked, but resend if we don't see them
4222 dout("%s rbd_dev %p requeuing lock_dwork\n", __func__,
4224 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork,
4225 msecs_to_jiffies(2 * RBD_NOTIFY_TIMEOUT * MSEC_PER_SEC));
4229 static bool rbd_quiesce_lock(struct rbd_device *rbd_dev)
4231 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4232 lockdep_assert_held_write(&rbd_dev->lock_rwsem);
4234 if (rbd_dev->lock_state != RBD_LOCK_STATE_LOCKED)
4238 * Ensure that all in-flight IO is flushed.
4240 rbd_dev->lock_state = RBD_LOCK_STATE_RELEASING;
4241 rbd_assert(!completion_done(&rbd_dev->releasing_wait));
4242 if (list_empty(&rbd_dev->running_list))
4245 up_write(&rbd_dev->lock_rwsem);
4246 wait_for_completion(&rbd_dev->releasing_wait);
4248 down_write(&rbd_dev->lock_rwsem);
4249 if (rbd_dev->lock_state != RBD_LOCK_STATE_RELEASING)
4252 rbd_assert(list_empty(&rbd_dev->running_list));
4256 static void rbd_pre_release_action(struct rbd_device *rbd_dev)
4258 if (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)
4259 rbd_object_map_close(rbd_dev);
4262 static void __rbd_release_lock(struct rbd_device *rbd_dev)
4264 rbd_assert(list_empty(&rbd_dev->running_list));
4266 rbd_pre_release_action(rbd_dev);
4267 rbd_unlock(rbd_dev);
4271 * lock_rwsem must be held for write
4273 static void rbd_release_lock(struct rbd_device *rbd_dev)
4275 if (!rbd_quiesce_lock(rbd_dev))
4278 __rbd_release_lock(rbd_dev);
4281 * Give others a chance to grab the lock - we would re-acquire
4282 * almost immediately if we got new IO while draining the running
4283 * list otherwise. We need to ack our own notifications, so this
4284 * lock_dwork will be requeued from rbd_handle_released_lock() by
4285 * way of maybe_kick_acquire().
4287 cancel_delayed_work(&rbd_dev->lock_dwork);
4290 static void rbd_release_lock_work(struct work_struct *work)
4292 struct rbd_device *rbd_dev = container_of(work, struct rbd_device,
4295 down_write(&rbd_dev->lock_rwsem);
4296 rbd_release_lock(rbd_dev);
4297 up_write(&rbd_dev->lock_rwsem);
4300 static void maybe_kick_acquire(struct rbd_device *rbd_dev)
4304 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4305 if (__rbd_is_lock_owner(rbd_dev))
4308 spin_lock(&rbd_dev->lock_lists_lock);
4309 have_requests = !list_empty(&rbd_dev->acquiring_list);
4310 spin_unlock(&rbd_dev->lock_lists_lock);
4311 if (have_requests || delayed_work_pending(&rbd_dev->lock_dwork)) {
4312 dout("%s rbd_dev %p kicking lock_dwork\n", __func__, rbd_dev);
4313 mod_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4317 static void rbd_handle_acquired_lock(struct rbd_device *rbd_dev, u8 struct_v,
4320 struct rbd_client_id cid = { 0 };
4322 if (struct_v >= 2) {
4323 cid.gid = ceph_decode_64(p);
4324 cid.handle = ceph_decode_64(p);
4327 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4329 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4330 down_write(&rbd_dev->lock_rwsem);
4331 if (rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4332 dout("%s rbd_dev %p cid %llu-%llu == owner_cid\n",
4333 __func__, rbd_dev, cid.gid, cid.handle);
4335 rbd_set_owner_cid(rbd_dev, &cid);
4337 downgrade_write(&rbd_dev->lock_rwsem);
4339 down_read(&rbd_dev->lock_rwsem);
4342 maybe_kick_acquire(rbd_dev);
4343 up_read(&rbd_dev->lock_rwsem);
4346 static void rbd_handle_released_lock(struct rbd_device *rbd_dev, u8 struct_v,
4349 struct rbd_client_id cid = { 0 };
4351 if (struct_v >= 2) {
4352 cid.gid = ceph_decode_64(p);
4353 cid.handle = ceph_decode_64(p);
4356 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4358 if (!rbd_cid_equal(&cid, &rbd_empty_cid)) {
4359 down_write(&rbd_dev->lock_rwsem);
4360 if (!rbd_cid_equal(&cid, &rbd_dev->owner_cid)) {
4361 dout("%s rbd_dev %p cid %llu-%llu != owner_cid %llu-%llu\n",
4362 __func__, rbd_dev, cid.gid, cid.handle,
4363 rbd_dev->owner_cid.gid, rbd_dev->owner_cid.handle);
4365 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4367 downgrade_write(&rbd_dev->lock_rwsem);
4369 down_read(&rbd_dev->lock_rwsem);
4372 maybe_kick_acquire(rbd_dev);
4373 up_read(&rbd_dev->lock_rwsem);
4377 * Returns result for ResponseMessage to be encoded (<= 0), or 1 if no
4378 * ResponseMessage is needed.
4380 static int rbd_handle_request_lock(struct rbd_device *rbd_dev, u8 struct_v,
4383 struct rbd_client_id my_cid = rbd_get_cid(rbd_dev);
4384 struct rbd_client_id cid = { 0 };
4387 if (struct_v >= 2) {
4388 cid.gid = ceph_decode_64(p);
4389 cid.handle = ceph_decode_64(p);
4392 dout("%s rbd_dev %p cid %llu-%llu\n", __func__, rbd_dev, cid.gid,
4394 if (rbd_cid_equal(&cid, &my_cid))
4397 down_read(&rbd_dev->lock_rwsem);
4398 if (__rbd_is_lock_owner(rbd_dev)) {
4399 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED &&
4400 rbd_cid_equal(&rbd_dev->owner_cid, &rbd_empty_cid))
4404 * encode ResponseMessage(0) so the peer can detect
4409 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED) {
4410 if (!rbd_dev->opts->exclusive) {
4411 dout("%s rbd_dev %p queueing unlock_work\n",
4413 queue_work(rbd_dev->task_wq,
4414 &rbd_dev->unlock_work);
4416 /* refuse to release the lock */
4423 up_read(&rbd_dev->lock_rwsem);
4427 static void __rbd_acknowledge_notify(struct rbd_device *rbd_dev,
4428 u64 notify_id, u64 cookie, s32 *result)
4430 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4431 char buf[4 + CEPH_ENCODING_START_BLK_LEN];
4432 int buf_size = sizeof(buf);
4438 /* encode ResponseMessage */
4439 ceph_start_encoding(&p, 1, 1,
4440 buf_size - CEPH_ENCODING_START_BLK_LEN);
4441 ceph_encode_32(&p, *result);
4446 ret = ceph_osdc_notify_ack(osdc, &rbd_dev->header_oid,
4447 &rbd_dev->header_oloc, notify_id, cookie,
4450 rbd_warn(rbd_dev, "acknowledge_notify failed: %d", ret);
4453 static void rbd_acknowledge_notify(struct rbd_device *rbd_dev, u64 notify_id,
4456 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4457 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, NULL);
4460 static void rbd_acknowledge_notify_result(struct rbd_device *rbd_dev,
4461 u64 notify_id, u64 cookie, s32 result)
4463 dout("%s rbd_dev %p result %d\n", __func__, rbd_dev, result);
4464 __rbd_acknowledge_notify(rbd_dev, notify_id, cookie, &result);
4467 static void rbd_watch_cb(void *arg, u64 notify_id, u64 cookie,
4468 u64 notifier_id, void *data, size_t data_len)
4470 struct rbd_device *rbd_dev = arg;
4472 void *const end = p + data_len;
4478 dout("%s rbd_dev %p cookie %llu notify_id %llu data_len %zu\n",
4479 __func__, rbd_dev, cookie, notify_id, data_len);
4481 ret = ceph_start_decoding(&p, end, 1, "NotifyMessage",
4484 rbd_warn(rbd_dev, "failed to decode NotifyMessage: %d",
4489 notify_op = ceph_decode_32(&p);
4491 /* legacy notification for header updates */
4492 notify_op = RBD_NOTIFY_OP_HEADER_UPDATE;
4496 dout("%s rbd_dev %p notify_op %u\n", __func__, rbd_dev, notify_op);
4497 switch (notify_op) {
4498 case RBD_NOTIFY_OP_ACQUIRED_LOCK:
4499 rbd_handle_acquired_lock(rbd_dev, struct_v, &p);
4500 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4502 case RBD_NOTIFY_OP_RELEASED_LOCK:
4503 rbd_handle_released_lock(rbd_dev, struct_v, &p);
4504 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4506 case RBD_NOTIFY_OP_REQUEST_LOCK:
4507 ret = rbd_handle_request_lock(rbd_dev, struct_v, &p);
4509 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4512 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4514 case RBD_NOTIFY_OP_HEADER_UPDATE:
4515 ret = rbd_dev_refresh(rbd_dev);
4517 rbd_warn(rbd_dev, "refresh failed: %d", ret);
4519 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4522 if (rbd_is_lock_owner(rbd_dev))
4523 rbd_acknowledge_notify_result(rbd_dev, notify_id,
4524 cookie, -EOPNOTSUPP);
4526 rbd_acknowledge_notify(rbd_dev, notify_id, cookie);
4531 static void __rbd_unregister_watch(struct rbd_device *rbd_dev);
4533 static void rbd_watch_errcb(void *arg, u64 cookie, int err)
4535 struct rbd_device *rbd_dev = arg;
4537 rbd_warn(rbd_dev, "encountered watch error: %d", err);
4539 down_write(&rbd_dev->lock_rwsem);
4540 rbd_set_owner_cid(rbd_dev, &rbd_empty_cid);
4541 up_write(&rbd_dev->lock_rwsem);
4543 mutex_lock(&rbd_dev->watch_mutex);
4544 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED) {
4545 __rbd_unregister_watch(rbd_dev);
4546 rbd_dev->watch_state = RBD_WATCH_STATE_ERROR;
4548 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->watch_dwork, 0);
4550 mutex_unlock(&rbd_dev->watch_mutex);
4554 * watch_mutex must be locked
4556 static int __rbd_register_watch(struct rbd_device *rbd_dev)
4558 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4559 struct ceph_osd_linger_request *handle;
4561 rbd_assert(!rbd_dev->watch_handle);
4562 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4564 handle = ceph_osdc_watch(osdc, &rbd_dev->header_oid,
4565 &rbd_dev->header_oloc, rbd_watch_cb,
4566 rbd_watch_errcb, rbd_dev);
4568 return PTR_ERR(handle);
4570 rbd_dev->watch_handle = handle;
4575 * watch_mutex must be locked
4577 static void __rbd_unregister_watch(struct rbd_device *rbd_dev)
4579 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4582 rbd_assert(rbd_dev->watch_handle);
4583 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4585 ret = ceph_osdc_unwatch(osdc, rbd_dev->watch_handle);
4587 rbd_warn(rbd_dev, "failed to unwatch: %d", ret);
4589 rbd_dev->watch_handle = NULL;
4592 static int rbd_register_watch(struct rbd_device *rbd_dev)
4596 mutex_lock(&rbd_dev->watch_mutex);
4597 rbd_assert(rbd_dev->watch_state == RBD_WATCH_STATE_UNREGISTERED);
4598 ret = __rbd_register_watch(rbd_dev);
4602 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4603 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4606 mutex_unlock(&rbd_dev->watch_mutex);
4610 static void cancel_tasks_sync(struct rbd_device *rbd_dev)
4612 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4614 cancel_work_sync(&rbd_dev->acquired_lock_work);
4615 cancel_work_sync(&rbd_dev->released_lock_work);
4616 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
4617 cancel_work_sync(&rbd_dev->unlock_work);
4621 * header_rwsem must not be held to avoid a deadlock with
4622 * rbd_dev_refresh() when flushing notifies.
4624 static void rbd_unregister_watch(struct rbd_device *rbd_dev)
4626 cancel_tasks_sync(rbd_dev);
4628 mutex_lock(&rbd_dev->watch_mutex);
4629 if (rbd_dev->watch_state == RBD_WATCH_STATE_REGISTERED)
4630 __rbd_unregister_watch(rbd_dev);
4631 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
4632 mutex_unlock(&rbd_dev->watch_mutex);
4634 cancel_delayed_work_sync(&rbd_dev->watch_dwork);
4635 ceph_osdc_flush_notifies(&rbd_dev->rbd_client->client->osdc);
4639 * lock_rwsem must be held for write
4641 static void rbd_reacquire_lock(struct rbd_device *rbd_dev)
4643 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4647 if (!rbd_quiesce_lock(rbd_dev))
4650 format_lock_cookie(rbd_dev, cookie);
4651 ret = ceph_cls_set_cookie(osdc, &rbd_dev->header_oid,
4652 &rbd_dev->header_oloc, RBD_LOCK_NAME,
4653 CEPH_CLS_LOCK_EXCLUSIVE, rbd_dev->lock_cookie,
4654 RBD_LOCK_TAG, cookie);
4656 if (ret != -EOPNOTSUPP)
4657 rbd_warn(rbd_dev, "failed to update lock cookie: %d",
4661 * Lock cookie cannot be updated on older OSDs, so do
4662 * a manual release and queue an acquire.
4664 __rbd_release_lock(rbd_dev);
4665 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
4667 __rbd_lock(rbd_dev, cookie);
4668 wake_lock_waiters(rbd_dev, 0);
4672 static void rbd_reregister_watch(struct work_struct *work)
4674 struct rbd_device *rbd_dev = container_of(to_delayed_work(work),
4675 struct rbd_device, watch_dwork);
4678 dout("%s rbd_dev %p\n", __func__, rbd_dev);
4680 mutex_lock(&rbd_dev->watch_mutex);
4681 if (rbd_dev->watch_state != RBD_WATCH_STATE_ERROR) {
4682 mutex_unlock(&rbd_dev->watch_mutex);
4686 ret = __rbd_register_watch(rbd_dev);
4688 rbd_warn(rbd_dev, "failed to reregister watch: %d", ret);
4689 if (ret != -EBLOCKLISTED && ret != -ENOENT) {
4690 queue_delayed_work(rbd_dev->task_wq,
4691 &rbd_dev->watch_dwork,
4693 mutex_unlock(&rbd_dev->watch_mutex);
4697 mutex_unlock(&rbd_dev->watch_mutex);
4698 down_write(&rbd_dev->lock_rwsem);
4699 wake_lock_waiters(rbd_dev, ret);
4700 up_write(&rbd_dev->lock_rwsem);
4704 rbd_dev->watch_state = RBD_WATCH_STATE_REGISTERED;
4705 rbd_dev->watch_cookie = rbd_dev->watch_handle->linger_id;
4706 mutex_unlock(&rbd_dev->watch_mutex);
4708 down_write(&rbd_dev->lock_rwsem);
4709 if (rbd_dev->lock_state == RBD_LOCK_STATE_LOCKED)
4710 rbd_reacquire_lock(rbd_dev);
4711 up_write(&rbd_dev->lock_rwsem);
4713 ret = rbd_dev_refresh(rbd_dev);
4715 rbd_warn(rbd_dev, "reregistration refresh failed: %d", ret);
4719 * Synchronous osd object method call. Returns the number of bytes
4720 * returned in the outbound buffer, or a negative error code.
4722 static int rbd_obj_method_sync(struct rbd_device *rbd_dev,
4723 struct ceph_object_id *oid,
4724 struct ceph_object_locator *oloc,
4725 const char *method_name,
4726 const void *outbound,
4727 size_t outbound_size,
4729 size_t inbound_size)
4731 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4732 struct page *req_page = NULL;
4733 struct page *reply_page;
4737 * Method calls are ultimately read operations. The result
4738 * should placed into the inbound buffer provided. They
4739 * also supply outbound data--parameters for the object
4740 * method. Currently if this is present it will be a
4744 if (outbound_size > PAGE_SIZE)
4747 req_page = alloc_page(GFP_KERNEL);
4751 memcpy(page_address(req_page), outbound, outbound_size);
4754 reply_page = alloc_page(GFP_KERNEL);
4757 __free_page(req_page);
4761 ret = ceph_osdc_call(osdc, oid, oloc, RBD_DRV_NAME, method_name,
4762 CEPH_OSD_FLAG_READ, req_page, outbound_size,
4763 &reply_page, &inbound_size);
4765 memcpy(inbound, page_address(reply_page), inbound_size);
4770 __free_page(req_page);
4771 __free_page(reply_page);
4775 static void rbd_queue_workfn(struct work_struct *work)
4777 struct rbd_img_request *img_request =
4778 container_of(work, struct rbd_img_request, work);
4779 struct rbd_device *rbd_dev = img_request->rbd_dev;
4780 enum obj_operation_type op_type = img_request->op_type;
4781 struct request *rq = blk_mq_rq_from_pdu(img_request);
4782 u64 offset = (u64)blk_rq_pos(rq) << SECTOR_SHIFT;
4783 u64 length = blk_rq_bytes(rq);
4787 /* Ignore/skip any zero-length requests */
4789 dout("%s: zero-length request\n", __func__);
4791 goto err_img_request;
4794 blk_mq_start_request(rq);
4796 down_read(&rbd_dev->header_rwsem);
4797 mapping_size = rbd_dev->mapping.size;
4798 rbd_img_capture_header(img_request);
4799 up_read(&rbd_dev->header_rwsem);
4801 if (offset + length > mapping_size) {
4802 rbd_warn(rbd_dev, "beyond EOD (%llu~%llu > %llu)", offset,
4803 length, mapping_size);
4805 goto err_img_request;
4808 dout("%s rbd_dev %p img_req %p %s %llu~%llu\n", __func__, rbd_dev,
4809 img_request, obj_op_name(op_type), offset, length);
4811 if (op_type == OBJ_OP_DISCARD || op_type == OBJ_OP_ZEROOUT)
4812 result = rbd_img_fill_nodata(img_request, offset, length);
4814 result = rbd_img_fill_from_bio(img_request, offset, length,
4817 goto err_img_request;
4819 rbd_img_handle_request(img_request, 0);
4823 rbd_img_request_destroy(img_request);
4825 rbd_warn(rbd_dev, "%s %llx at %llx result %d",
4826 obj_op_name(op_type), length, offset, result);
4827 blk_mq_end_request(rq, errno_to_blk_status(result));
4830 static blk_status_t rbd_queue_rq(struct blk_mq_hw_ctx *hctx,
4831 const struct blk_mq_queue_data *bd)
4833 struct rbd_device *rbd_dev = hctx->queue->queuedata;
4834 struct rbd_img_request *img_req = blk_mq_rq_to_pdu(bd->rq);
4835 enum obj_operation_type op_type;
4837 switch (req_op(bd->rq)) {
4838 case REQ_OP_DISCARD:
4839 op_type = OBJ_OP_DISCARD;
4841 case REQ_OP_WRITE_ZEROES:
4842 op_type = OBJ_OP_ZEROOUT;
4845 op_type = OBJ_OP_WRITE;
4848 op_type = OBJ_OP_READ;
4851 rbd_warn(rbd_dev, "unknown req_op %d", req_op(bd->rq));
4852 return BLK_STS_IOERR;
4855 rbd_img_request_init(img_req, rbd_dev, op_type);
4857 if (rbd_img_is_write(img_req)) {
4858 if (rbd_is_ro(rbd_dev)) {
4859 rbd_warn(rbd_dev, "%s on read-only mapping",
4860 obj_op_name(img_req->op_type));
4861 return BLK_STS_IOERR;
4863 rbd_assert(!rbd_is_snap(rbd_dev));
4866 INIT_WORK(&img_req->work, rbd_queue_workfn);
4867 queue_work(rbd_wq, &img_req->work);
4871 static void rbd_free_disk(struct rbd_device *rbd_dev)
4873 blk_cleanup_queue(rbd_dev->disk->queue);
4874 blk_mq_free_tag_set(&rbd_dev->tag_set);
4875 put_disk(rbd_dev->disk);
4876 rbd_dev->disk = NULL;
4879 static int rbd_obj_read_sync(struct rbd_device *rbd_dev,
4880 struct ceph_object_id *oid,
4881 struct ceph_object_locator *oloc,
4882 void *buf, int buf_len)
4885 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
4886 struct ceph_osd_request *req;
4887 struct page **pages;
4888 int num_pages = calc_pages_for(0, buf_len);
4891 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
4895 ceph_oid_copy(&req->r_base_oid, oid);
4896 ceph_oloc_copy(&req->r_base_oloc, oloc);
4897 req->r_flags = CEPH_OSD_FLAG_READ;
4899 pages = ceph_alloc_page_vector(num_pages, GFP_KERNEL);
4900 if (IS_ERR(pages)) {
4901 ret = PTR_ERR(pages);
4905 osd_req_op_extent_init(req, 0, CEPH_OSD_OP_READ, 0, buf_len, 0, 0);
4906 osd_req_op_extent_osd_data_pages(req, 0, pages, buf_len, 0, false,
4909 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
4913 ceph_osdc_start_request(osdc, req, false);
4914 ret = ceph_osdc_wait_request(osdc, req);
4916 ceph_copy_from_page_vector(pages, buf, 0, ret);
4919 ceph_osdc_put_request(req);
4924 * Read the complete header for the given rbd device. On successful
4925 * return, the rbd_dev->header field will contain up-to-date
4926 * information about the image.
4928 static int rbd_dev_v1_header_info(struct rbd_device *rbd_dev,
4929 struct rbd_image_header *header,
4932 struct rbd_image_header_ondisk *ondisk = NULL;
4939 * The complete header will include an array of its 64-bit
4940 * snapshot ids, followed by the names of those snapshots as
4941 * a contiguous block of NUL-terminated strings. Note that
4942 * the number of snapshots could change by the time we read
4943 * it in, in which case we re-read it.
4950 size = sizeof (*ondisk);
4951 size += snap_count * sizeof (struct rbd_image_snap_ondisk);
4953 ondisk = kmalloc(size, GFP_KERNEL);
4957 ret = rbd_obj_read_sync(rbd_dev, &rbd_dev->header_oid,
4958 &rbd_dev->header_oloc, ondisk, size);
4961 if ((size_t)ret < size) {
4963 rbd_warn(rbd_dev, "short header read (want %zd got %d)",
4967 if (!rbd_dev_ondisk_valid(ondisk)) {
4969 rbd_warn(rbd_dev, "invalid header");
4973 names_size = le64_to_cpu(ondisk->snap_names_len);
4974 want_count = snap_count;
4975 snap_count = le32_to_cpu(ondisk->snap_count);
4976 } while (snap_count != want_count);
4978 ret = rbd_header_from_disk(header, ondisk, first_time);
4985 static void rbd_dev_update_size(struct rbd_device *rbd_dev)
4990 * If EXISTS is not set, rbd_dev->disk may be NULL, so don't
4991 * try to update its size. If REMOVING is set, updating size
4992 * is just useless work since the device can't be opened.
4994 if (test_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags) &&
4995 !test_bit(RBD_DEV_FLAG_REMOVING, &rbd_dev->flags)) {
4996 size = (sector_t)rbd_dev->mapping.size / SECTOR_SIZE;
4997 dout("setting size to %llu sectors", (unsigned long long)size);
4998 set_capacity(rbd_dev->disk, size);
4999 revalidate_disk_size(rbd_dev->disk, true);
5003 static const struct blk_mq_ops rbd_mq_ops = {
5004 .queue_rq = rbd_queue_rq,
5007 static int rbd_init_disk(struct rbd_device *rbd_dev)
5009 struct gendisk *disk;
5010 struct request_queue *q;
5011 unsigned int objset_bytes =
5012 rbd_dev->layout.object_size * rbd_dev->layout.stripe_count;
5015 /* create gendisk info */
5016 disk = alloc_disk(single_major ?
5017 (1 << RBD_SINGLE_MAJOR_PART_SHIFT) :
5018 RBD_MINORS_PER_MAJOR);
5022 snprintf(disk->disk_name, sizeof(disk->disk_name), RBD_DRV_NAME "%d",
5024 disk->major = rbd_dev->major;
5025 disk->first_minor = rbd_dev->minor;
5027 disk->flags |= GENHD_FL_EXT_DEVT;
5028 disk->fops = &rbd_bd_ops;
5029 disk->private_data = rbd_dev;
5031 memset(&rbd_dev->tag_set, 0, sizeof(rbd_dev->tag_set));
5032 rbd_dev->tag_set.ops = &rbd_mq_ops;
5033 rbd_dev->tag_set.queue_depth = rbd_dev->opts->queue_depth;
5034 rbd_dev->tag_set.numa_node = NUMA_NO_NODE;
5035 rbd_dev->tag_set.flags = BLK_MQ_F_SHOULD_MERGE;
5036 rbd_dev->tag_set.nr_hw_queues = num_present_cpus();
5037 rbd_dev->tag_set.cmd_size = sizeof(struct rbd_img_request);
5039 err = blk_mq_alloc_tag_set(&rbd_dev->tag_set);
5043 q = blk_mq_init_queue(&rbd_dev->tag_set);
5049 blk_queue_flag_set(QUEUE_FLAG_NONROT, q);
5050 /* QUEUE_FLAG_ADD_RANDOM is off by default for blk-mq */
5052 blk_queue_max_hw_sectors(q, objset_bytes >> SECTOR_SHIFT);
5053 q->limits.max_sectors = queue_max_hw_sectors(q);
5054 blk_queue_max_segments(q, USHRT_MAX);
5055 blk_queue_max_segment_size(q, UINT_MAX);
5056 blk_queue_io_min(q, rbd_dev->opts->alloc_size);
5057 blk_queue_io_opt(q, rbd_dev->opts->alloc_size);
5059 if (rbd_dev->opts->trim) {
5060 blk_queue_flag_set(QUEUE_FLAG_DISCARD, q);
5061 q->limits.discard_granularity = rbd_dev->opts->alloc_size;
5062 blk_queue_max_discard_sectors(q, objset_bytes >> SECTOR_SHIFT);
5063 blk_queue_max_write_zeroes_sectors(q, objset_bytes >> SECTOR_SHIFT);
5066 if (!ceph_test_opt(rbd_dev->rbd_client->client, NOCRC))
5067 blk_queue_flag_set(QUEUE_FLAG_STABLE_WRITES, q);
5070 * disk_release() expects a queue ref from add_disk() and will
5071 * put it. Hold an extra ref until add_disk() is called.
5073 WARN_ON(!blk_get_queue(q));
5075 q->queuedata = rbd_dev;
5077 rbd_dev->disk = disk;
5081 blk_mq_free_tag_set(&rbd_dev->tag_set);
5091 static struct rbd_device *dev_to_rbd_dev(struct device *dev)
5093 return container_of(dev, struct rbd_device, dev);
5096 static ssize_t rbd_size_show(struct device *dev,
5097 struct device_attribute *attr, char *buf)
5099 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5101 return sprintf(buf, "%llu\n",
5102 (unsigned long long)rbd_dev->mapping.size);
5105 static ssize_t rbd_features_show(struct device *dev,
5106 struct device_attribute *attr, char *buf)
5108 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5110 return sprintf(buf, "0x%016llx\n", rbd_dev->header.features);
5113 static ssize_t rbd_major_show(struct device *dev,
5114 struct device_attribute *attr, char *buf)
5116 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5119 return sprintf(buf, "%d\n", rbd_dev->major);
5121 return sprintf(buf, "(none)\n");
5124 static ssize_t rbd_minor_show(struct device *dev,
5125 struct device_attribute *attr, char *buf)
5127 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5129 return sprintf(buf, "%d\n", rbd_dev->minor);
5132 static ssize_t rbd_client_addr_show(struct device *dev,
5133 struct device_attribute *attr, char *buf)
5135 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5136 struct ceph_entity_addr *client_addr =
5137 ceph_client_addr(rbd_dev->rbd_client->client);
5139 return sprintf(buf, "%pISpc/%u\n", &client_addr->in_addr,
5140 le32_to_cpu(client_addr->nonce));
5143 static ssize_t rbd_client_id_show(struct device *dev,
5144 struct device_attribute *attr, char *buf)
5146 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5148 return sprintf(buf, "client%lld\n",
5149 ceph_client_gid(rbd_dev->rbd_client->client));
5152 static ssize_t rbd_cluster_fsid_show(struct device *dev,
5153 struct device_attribute *attr, char *buf)
5155 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5157 return sprintf(buf, "%pU\n", &rbd_dev->rbd_client->client->fsid);
5160 static ssize_t rbd_config_info_show(struct device *dev,
5161 struct device_attribute *attr, char *buf)
5163 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5165 if (!capable(CAP_SYS_ADMIN))
5168 return sprintf(buf, "%s\n", rbd_dev->config_info);
5171 static ssize_t rbd_pool_show(struct device *dev,
5172 struct device_attribute *attr, char *buf)
5174 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5176 return sprintf(buf, "%s\n", rbd_dev->spec->pool_name);
5179 static ssize_t rbd_pool_id_show(struct device *dev,
5180 struct device_attribute *attr, char *buf)
5182 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5184 return sprintf(buf, "%llu\n",
5185 (unsigned long long) rbd_dev->spec->pool_id);
5188 static ssize_t rbd_pool_ns_show(struct device *dev,
5189 struct device_attribute *attr, char *buf)
5191 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5193 return sprintf(buf, "%s\n", rbd_dev->spec->pool_ns ?: "");
5196 static ssize_t rbd_name_show(struct device *dev,
5197 struct device_attribute *attr, char *buf)
5199 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5201 if (rbd_dev->spec->image_name)
5202 return sprintf(buf, "%s\n", rbd_dev->spec->image_name);
5204 return sprintf(buf, "(unknown)\n");
5207 static ssize_t rbd_image_id_show(struct device *dev,
5208 struct device_attribute *attr, char *buf)
5210 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5212 return sprintf(buf, "%s\n", rbd_dev->spec->image_id);
5216 * Shows the name of the currently-mapped snapshot (or
5217 * RBD_SNAP_HEAD_NAME for the base image).
5219 static ssize_t rbd_snap_show(struct device *dev,
5220 struct device_attribute *attr,
5223 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5225 return sprintf(buf, "%s\n", rbd_dev->spec->snap_name);
5228 static ssize_t rbd_snap_id_show(struct device *dev,
5229 struct device_attribute *attr, char *buf)
5231 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5233 return sprintf(buf, "%llu\n", rbd_dev->spec->snap_id);
5237 * For a v2 image, shows the chain of parent images, separated by empty
5238 * lines. For v1 images or if there is no parent, shows "(no parent
5241 static ssize_t rbd_parent_show(struct device *dev,
5242 struct device_attribute *attr,
5245 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5248 if (!rbd_dev->parent)
5249 return sprintf(buf, "(no parent image)\n");
5251 for ( ; rbd_dev->parent; rbd_dev = rbd_dev->parent) {
5252 struct rbd_spec *spec = rbd_dev->parent_spec;
5254 count += sprintf(&buf[count], "%s"
5255 "pool_id %llu\npool_name %s\n"
5257 "image_id %s\nimage_name %s\n"
5258 "snap_id %llu\nsnap_name %s\n"
5260 !count ? "" : "\n", /* first? */
5261 spec->pool_id, spec->pool_name,
5262 spec->pool_ns ?: "",
5263 spec->image_id, spec->image_name ?: "(unknown)",
5264 spec->snap_id, spec->snap_name,
5265 rbd_dev->parent_overlap);
5271 static ssize_t rbd_image_refresh(struct device *dev,
5272 struct device_attribute *attr,
5276 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5279 if (!capable(CAP_SYS_ADMIN))
5282 ret = rbd_dev_refresh(rbd_dev);
5289 static DEVICE_ATTR(size, 0444, rbd_size_show, NULL);
5290 static DEVICE_ATTR(features, 0444, rbd_features_show, NULL);
5291 static DEVICE_ATTR(major, 0444, rbd_major_show, NULL);
5292 static DEVICE_ATTR(minor, 0444, rbd_minor_show, NULL);
5293 static DEVICE_ATTR(client_addr, 0444, rbd_client_addr_show, NULL);
5294 static DEVICE_ATTR(client_id, 0444, rbd_client_id_show, NULL);
5295 static DEVICE_ATTR(cluster_fsid, 0444, rbd_cluster_fsid_show, NULL);
5296 static DEVICE_ATTR(config_info, 0400, rbd_config_info_show, NULL);
5297 static DEVICE_ATTR(pool, 0444, rbd_pool_show, NULL);
5298 static DEVICE_ATTR(pool_id, 0444, rbd_pool_id_show, NULL);
5299 static DEVICE_ATTR(pool_ns, 0444, rbd_pool_ns_show, NULL);
5300 static DEVICE_ATTR(name, 0444, rbd_name_show, NULL);
5301 static DEVICE_ATTR(image_id, 0444, rbd_image_id_show, NULL);
5302 static DEVICE_ATTR(refresh, 0200, NULL, rbd_image_refresh);
5303 static DEVICE_ATTR(current_snap, 0444, rbd_snap_show, NULL);
5304 static DEVICE_ATTR(snap_id, 0444, rbd_snap_id_show, NULL);
5305 static DEVICE_ATTR(parent, 0444, rbd_parent_show, NULL);
5307 static struct attribute *rbd_attrs[] = {
5308 &dev_attr_size.attr,
5309 &dev_attr_features.attr,
5310 &dev_attr_major.attr,
5311 &dev_attr_minor.attr,
5312 &dev_attr_client_addr.attr,
5313 &dev_attr_client_id.attr,
5314 &dev_attr_cluster_fsid.attr,
5315 &dev_attr_config_info.attr,
5316 &dev_attr_pool.attr,
5317 &dev_attr_pool_id.attr,
5318 &dev_attr_pool_ns.attr,
5319 &dev_attr_name.attr,
5320 &dev_attr_image_id.attr,
5321 &dev_attr_current_snap.attr,
5322 &dev_attr_snap_id.attr,
5323 &dev_attr_parent.attr,
5324 &dev_attr_refresh.attr,
5328 static struct attribute_group rbd_attr_group = {
5332 static const struct attribute_group *rbd_attr_groups[] = {
5337 static void rbd_dev_release(struct device *dev);
5339 static const struct device_type rbd_device_type = {
5341 .groups = rbd_attr_groups,
5342 .release = rbd_dev_release,
5345 static struct rbd_spec *rbd_spec_get(struct rbd_spec *spec)
5347 kref_get(&spec->kref);
5352 static void rbd_spec_free(struct kref *kref);
5353 static void rbd_spec_put(struct rbd_spec *spec)
5356 kref_put(&spec->kref, rbd_spec_free);
5359 static struct rbd_spec *rbd_spec_alloc(void)
5361 struct rbd_spec *spec;
5363 spec = kzalloc(sizeof (*spec), GFP_KERNEL);
5367 spec->pool_id = CEPH_NOPOOL;
5368 spec->snap_id = CEPH_NOSNAP;
5369 kref_init(&spec->kref);
5374 static void rbd_spec_free(struct kref *kref)
5376 struct rbd_spec *spec = container_of(kref, struct rbd_spec, kref);
5378 kfree(spec->pool_name);
5379 kfree(spec->pool_ns);
5380 kfree(spec->image_id);
5381 kfree(spec->image_name);
5382 kfree(spec->snap_name);
5386 static void rbd_dev_free(struct rbd_device *rbd_dev)
5388 WARN_ON(rbd_dev->watch_state != RBD_WATCH_STATE_UNREGISTERED);
5389 WARN_ON(rbd_dev->lock_state != RBD_LOCK_STATE_UNLOCKED);
5391 ceph_oid_destroy(&rbd_dev->header_oid);
5392 ceph_oloc_destroy(&rbd_dev->header_oloc);
5393 kfree(rbd_dev->config_info);
5395 rbd_put_client(rbd_dev->rbd_client);
5396 rbd_spec_put(rbd_dev->spec);
5397 kfree(rbd_dev->opts);
5401 static void rbd_dev_release(struct device *dev)
5403 struct rbd_device *rbd_dev = dev_to_rbd_dev(dev);
5404 bool need_put = !!rbd_dev->opts;
5407 destroy_workqueue(rbd_dev->task_wq);
5408 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5411 rbd_dev_free(rbd_dev);
5414 * This is racy, but way better than putting module outside of
5415 * the release callback. The race window is pretty small, so
5416 * doing something similar to dm (dm-builtin.c) is overkill.
5419 module_put(THIS_MODULE);
5422 static struct rbd_device *__rbd_dev_create(struct rbd_spec *spec)
5424 struct rbd_device *rbd_dev;
5426 rbd_dev = kzalloc(sizeof(*rbd_dev), GFP_KERNEL);
5430 spin_lock_init(&rbd_dev->lock);
5431 INIT_LIST_HEAD(&rbd_dev->node);
5432 init_rwsem(&rbd_dev->header_rwsem);
5434 rbd_dev->header.data_pool_id = CEPH_NOPOOL;
5435 ceph_oid_init(&rbd_dev->header_oid);
5436 rbd_dev->header_oloc.pool = spec->pool_id;
5437 if (spec->pool_ns) {
5438 WARN_ON(!*spec->pool_ns);
5439 rbd_dev->header_oloc.pool_ns =
5440 ceph_find_or_create_string(spec->pool_ns,
5441 strlen(spec->pool_ns));
5444 mutex_init(&rbd_dev->watch_mutex);
5445 rbd_dev->watch_state = RBD_WATCH_STATE_UNREGISTERED;
5446 INIT_DELAYED_WORK(&rbd_dev->watch_dwork, rbd_reregister_watch);
5448 init_rwsem(&rbd_dev->lock_rwsem);
5449 rbd_dev->lock_state = RBD_LOCK_STATE_UNLOCKED;
5450 INIT_WORK(&rbd_dev->acquired_lock_work, rbd_notify_acquired_lock);
5451 INIT_WORK(&rbd_dev->released_lock_work, rbd_notify_released_lock);
5452 INIT_DELAYED_WORK(&rbd_dev->lock_dwork, rbd_acquire_lock);
5453 INIT_WORK(&rbd_dev->unlock_work, rbd_release_lock_work);
5454 spin_lock_init(&rbd_dev->lock_lists_lock);
5455 INIT_LIST_HEAD(&rbd_dev->acquiring_list);
5456 INIT_LIST_HEAD(&rbd_dev->running_list);
5457 init_completion(&rbd_dev->acquire_wait);
5458 init_completion(&rbd_dev->releasing_wait);
5460 spin_lock_init(&rbd_dev->object_map_lock);
5462 rbd_dev->dev.bus = &rbd_bus_type;
5463 rbd_dev->dev.type = &rbd_device_type;
5464 rbd_dev->dev.parent = &rbd_root_dev;
5465 device_initialize(&rbd_dev->dev);
5471 * Create a mapping rbd_dev.
5473 static struct rbd_device *rbd_dev_create(struct rbd_client *rbdc,
5474 struct rbd_spec *spec,
5475 struct rbd_options *opts)
5477 struct rbd_device *rbd_dev;
5479 rbd_dev = __rbd_dev_create(spec);
5483 /* get an id and fill in device name */
5484 rbd_dev->dev_id = ida_simple_get(&rbd_dev_id_ida, 0,
5485 minor_to_rbd_dev_id(1 << MINORBITS),
5487 if (rbd_dev->dev_id < 0)
5490 sprintf(rbd_dev->name, RBD_DRV_NAME "%d", rbd_dev->dev_id);
5491 rbd_dev->task_wq = alloc_ordered_workqueue("%s-tasks", WQ_MEM_RECLAIM,
5493 if (!rbd_dev->task_wq)
5496 /* we have a ref from do_rbd_add() */
5497 __module_get(THIS_MODULE);
5499 rbd_dev->rbd_client = rbdc;
5500 rbd_dev->spec = spec;
5501 rbd_dev->opts = opts;
5503 dout("%s rbd_dev %p dev_id %d\n", __func__, rbd_dev, rbd_dev->dev_id);
5507 ida_simple_remove(&rbd_dev_id_ida, rbd_dev->dev_id);
5509 rbd_dev_free(rbd_dev);
5513 static void rbd_dev_destroy(struct rbd_device *rbd_dev)
5516 put_device(&rbd_dev->dev);
5520 * Get the size and object order for an image snapshot, or if
5521 * snap_id is CEPH_NOSNAP, gets this information for the base
5524 static int _rbd_dev_v2_snap_size(struct rbd_device *rbd_dev, u64 snap_id,
5525 u8 *order, u64 *snap_size)
5527 __le64 snapid = cpu_to_le64(snap_id);
5532 } __attribute__ ((packed)) size_buf = { 0 };
5534 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5535 &rbd_dev->header_oloc, "get_size",
5536 &snapid, sizeof(snapid),
5537 &size_buf, sizeof(size_buf));
5538 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5541 if (ret < sizeof (size_buf))
5545 *order = size_buf.order;
5546 dout(" order %u", (unsigned int)*order);
5548 *snap_size = le64_to_cpu(size_buf.size);
5550 dout(" snap_id 0x%016llx snap_size = %llu\n",
5551 (unsigned long long)snap_id,
5552 (unsigned long long)*snap_size);
5557 static int rbd_dev_v2_object_prefix(struct rbd_device *rbd_dev,
5558 char **pobject_prefix)
5562 char *object_prefix;
5566 /* Response will be an encoded string, which includes a length */
5567 size = sizeof(__le32) + RBD_OBJ_PREFIX_LEN_MAX;
5568 reply_buf = kzalloc(size, GFP_KERNEL);
5572 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5573 &rbd_dev->header_oloc, "get_object_prefix",
5574 NULL, 0, reply_buf, size);
5575 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5580 object_prefix = ceph_extract_encoded_string(&p, p + ret, NULL,
5582 if (IS_ERR(object_prefix)) {
5583 ret = PTR_ERR(object_prefix);
5588 *pobject_prefix = object_prefix;
5589 dout(" object_prefix = %s\n", object_prefix);
5596 static int _rbd_dev_v2_snap_features(struct rbd_device *rbd_dev, u64 snap_id,
5597 bool read_only, u64 *snap_features)
5606 } __attribute__ ((packed)) features_buf = { 0 };
5610 features_in.snap_id = cpu_to_le64(snap_id);
5611 features_in.read_only = read_only;
5613 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5614 &rbd_dev->header_oloc, "get_features",
5615 &features_in, sizeof(features_in),
5616 &features_buf, sizeof(features_buf));
5617 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5620 if (ret < sizeof (features_buf))
5623 unsup = le64_to_cpu(features_buf.incompat) & ~RBD_FEATURES_SUPPORTED;
5625 rbd_warn(rbd_dev, "image uses unsupported features: 0x%llx",
5630 *snap_features = le64_to_cpu(features_buf.features);
5632 dout(" snap_id 0x%016llx features = 0x%016llx incompat = 0x%016llx\n",
5633 (unsigned long long)snap_id,
5634 (unsigned long long)*snap_features,
5635 (unsigned long long)le64_to_cpu(features_buf.incompat));
5641 * These are generic image flags, but since they are used only for
5642 * object map, store them in rbd_dev->object_map_flags.
5644 * For the same reason, this function is called only on object map
5645 * (re)load and not on header refresh.
5647 static int rbd_dev_v2_get_flags(struct rbd_device *rbd_dev)
5649 __le64 snapid = cpu_to_le64(rbd_dev->spec->snap_id);
5653 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5654 &rbd_dev->header_oloc, "get_flags",
5655 &snapid, sizeof(snapid),
5656 &flags, sizeof(flags));
5659 if (ret < sizeof(flags))
5662 rbd_dev->object_map_flags = le64_to_cpu(flags);
5666 struct parent_image_info {
5668 const char *pool_ns;
5669 const char *image_id;
5676 static void rbd_parent_info_cleanup(struct parent_image_info *pii)
5678 kfree(pii->pool_ns);
5679 kfree(pii->image_id);
5681 memset(pii, 0, sizeof(*pii));
5685 * The caller is responsible for @pii.
5687 static int decode_parent_image_spec(void **p, void *end,
5688 struct parent_image_info *pii)
5694 ret = ceph_start_decoding(p, end, 1, "ParentImageSpec",
5695 &struct_v, &struct_len);
5699 ceph_decode_64_safe(p, end, pii->pool_id, e_inval);
5700 pii->pool_ns = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5701 if (IS_ERR(pii->pool_ns)) {
5702 ret = PTR_ERR(pii->pool_ns);
5703 pii->pool_ns = NULL;
5706 pii->image_id = ceph_extract_encoded_string(p, end, NULL, GFP_KERNEL);
5707 if (IS_ERR(pii->image_id)) {
5708 ret = PTR_ERR(pii->image_id);
5709 pii->image_id = NULL;
5712 ceph_decode_64_safe(p, end, pii->snap_id, e_inval);
5719 static int __get_parent_info(struct rbd_device *rbd_dev,
5720 struct page *req_page,
5721 struct page *reply_page,
5722 struct parent_image_info *pii)
5724 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5725 size_t reply_len = PAGE_SIZE;
5729 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5730 "rbd", "parent_get", CEPH_OSD_FLAG_READ,
5731 req_page, sizeof(u64), &reply_page, &reply_len);
5733 return ret == -EOPNOTSUPP ? 1 : ret;
5735 p = page_address(reply_page);
5736 end = p + reply_len;
5737 ret = decode_parent_image_spec(&p, end, pii);
5741 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5742 "rbd", "parent_overlap_get", CEPH_OSD_FLAG_READ,
5743 req_page, sizeof(u64), &reply_page, &reply_len);
5747 p = page_address(reply_page);
5748 end = p + reply_len;
5749 ceph_decode_8_safe(&p, end, pii->has_overlap, e_inval);
5750 if (pii->has_overlap)
5751 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5753 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5754 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5755 pii->has_overlap, pii->overlap);
5763 * The caller is responsible for @pii.
5765 static int __get_parent_info_legacy(struct rbd_device *rbd_dev,
5766 struct page *req_page,
5767 struct page *reply_page,
5768 struct parent_image_info *pii)
5770 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
5771 size_t reply_len = PAGE_SIZE;
5775 ret = ceph_osdc_call(osdc, &rbd_dev->header_oid, &rbd_dev->header_oloc,
5776 "rbd", "get_parent", CEPH_OSD_FLAG_READ,
5777 req_page, sizeof(u64), &reply_page, &reply_len);
5781 p = page_address(reply_page);
5782 end = p + reply_len;
5783 ceph_decode_64_safe(&p, end, pii->pool_id, e_inval);
5784 pii->image_id = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
5785 if (IS_ERR(pii->image_id)) {
5786 ret = PTR_ERR(pii->image_id);
5787 pii->image_id = NULL;
5790 ceph_decode_64_safe(&p, end, pii->snap_id, e_inval);
5791 pii->has_overlap = true;
5792 ceph_decode_64_safe(&p, end, pii->overlap, e_inval);
5794 dout("%s pool_id %llu pool_ns %s image_id %s snap_id %llu has_overlap %d overlap %llu\n",
5795 __func__, pii->pool_id, pii->pool_ns, pii->image_id, pii->snap_id,
5796 pii->has_overlap, pii->overlap);
5803 static int rbd_dev_v2_parent_info(struct rbd_device *rbd_dev,
5804 struct parent_image_info *pii)
5806 struct page *req_page, *reply_page;
5810 req_page = alloc_page(GFP_KERNEL);
5814 reply_page = alloc_page(GFP_KERNEL);
5816 __free_page(req_page);
5820 p = page_address(req_page);
5821 ceph_encode_64(&p, rbd_dev->spec->snap_id);
5822 ret = __get_parent_info(rbd_dev, req_page, reply_page, pii);
5824 ret = __get_parent_info_legacy(rbd_dev, req_page, reply_page,
5827 __free_page(req_page);
5828 __free_page(reply_page);
5832 static int rbd_dev_setup_parent(struct rbd_device *rbd_dev)
5834 struct rbd_spec *parent_spec;
5835 struct parent_image_info pii = { 0 };
5838 parent_spec = rbd_spec_alloc();
5842 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
5846 if (pii.pool_id == CEPH_NOPOOL || !pii.has_overlap)
5847 goto out; /* No parent? No problem. */
5849 /* The ceph file layout needs to fit pool id in 32 bits */
5852 if (pii.pool_id > (u64)U32_MAX) {
5853 rbd_warn(NULL, "parent pool id too large (%llu > %u)",
5854 (unsigned long long)pii.pool_id, U32_MAX);
5859 * The parent won't change except when the clone is flattened,
5860 * so we only need to record the parent image spec once.
5862 parent_spec->pool_id = pii.pool_id;
5863 if (pii.pool_ns && *pii.pool_ns) {
5864 parent_spec->pool_ns = pii.pool_ns;
5867 parent_spec->image_id = pii.image_id;
5868 pii.image_id = NULL;
5869 parent_spec->snap_id = pii.snap_id;
5871 rbd_assert(!rbd_dev->parent_spec);
5872 rbd_dev->parent_spec = parent_spec;
5873 parent_spec = NULL; /* rbd_dev now owns this */
5876 * Record the parent overlap. If it's zero, issue a warning as
5877 * we will proceed as if there is no parent.
5880 rbd_warn(rbd_dev, "clone is standalone (overlap 0)");
5881 rbd_dev->parent_overlap = pii.overlap;
5886 rbd_parent_info_cleanup(&pii);
5887 rbd_spec_put(parent_spec);
5891 static int rbd_dev_v2_striping_info(struct rbd_device *rbd_dev,
5892 u64 *stripe_unit, u64 *stripe_count)
5896 __le64 stripe_count;
5897 } __attribute__ ((packed)) striping_info_buf = { 0 };
5898 size_t size = sizeof (striping_info_buf);
5901 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5902 &rbd_dev->header_oloc, "get_stripe_unit_count",
5903 NULL, 0, &striping_info_buf, size);
5904 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5910 *stripe_unit = le64_to_cpu(striping_info_buf.stripe_unit);
5911 *stripe_count = le64_to_cpu(striping_info_buf.stripe_count);
5912 dout(" stripe_unit = %llu stripe_count = %llu\n", *stripe_unit,
5918 static int rbd_dev_v2_data_pool(struct rbd_device *rbd_dev, s64 *data_pool_id)
5920 __le64 data_pool_buf;
5923 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
5924 &rbd_dev->header_oloc, "get_data_pool",
5925 NULL, 0, &data_pool_buf,
5926 sizeof(data_pool_buf));
5927 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
5930 if (ret < sizeof(data_pool_buf))
5933 *data_pool_id = le64_to_cpu(data_pool_buf);
5934 dout(" data_pool_id = %lld\n", *data_pool_id);
5935 WARN_ON(*data_pool_id == CEPH_NOPOOL);
5940 static char *rbd_dev_image_name(struct rbd_device *rbd_dev)
5942 CEPH_DEFINE_OID_ONSTACK(oid);
5943 size_t image_id_size;
5948 void *reply_buf = NULL;
5950 char *image_name = NULL;
5953 rbd_assert(!rbd_dev->spec->image_name);
5955 len = strlen(rbd_dev->spec->image_id);
5956 image_id_size = sizeof (__le32) + len;
5957 image_id = kmalloc(image_id_size, GFP_KERNEL);
5962 end = image_id + image_id_size;
5963 ceph_encode_string(&p, end, rbd_dev->spec->image_id, (u32)len);
5965 size = sizeof (__le32) + RBD_IMAGE_NAME_LEN_MAX;
5966 reply_buf = kmalloc(size, GFP_KERNEL);
5970 ceph_oid_printf(&oid, "%s", RBD_DIRECTORY);
5971 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
5972 "dir_get_name", image_id, image_id_size,
5977 end = reply_buf + ret;
5979 image_name = ceph_extract_encoded_string(&p, end, &len, GFP_KERNEL);
5980 if (IS_ERR(image_name))
5983 dout("%s: name is %s len is %zd\n", __func__, image_name, len);
5991 static u64 rbd_v1_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
5993 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
5994 const char *snap_name;
5997 /* Skip over names until we find the one we are looking for */
5999 snap_name = rbd_dev->header.snap_names;
6000 while (which < snapc->num_snaps) {
6001 if (!strcmp(name, snap_name))
6002 return snapc->snaps[which];
6003 snap_name += strlen(snap_name) + 1;
6009 static u64 rbd_v2_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6011 struct ceph_snap_context *snapc = rbd_dev->header.snapc;
6016 for (which = 0; !found && which < snapc->num_snaps; which++) {
6017 const char *snap_name;
6019 snap_id = snapc->snaps[which];
6020 snap_name = rbd_dev_v2_snap_name(rbd_dev, snap_id);
6021 if (IS_ERR(snap_name)) {
6022 /* ignore no-longer existing snapshots */
6023 if (PTR_ERR(snap_name) == -ENOENT)
6028 found = !strcmp(name, snap_name);
6031 return found ? snap_id : CEPH_NOSNAP;
6035 * Assumes name is never RBD_SNAP_HEAD_NAME; returns CEPH_NOSNAP if
6036 * no snapshot by that name is found, or if an error occurs.
6038 static u64 rbd_snap_id_by_name(struct rbd_device *rbd_dev, const char *name)
6040 if (rbd_dev->image_format == 1)
6041 return rbd_v1_snap_id_by_name(rbd_dev, name);
6043 return rbd_v2_snap_id_by_name(rbd_dev, name);
6047 * An image being mapped will have everything but the snap id.
6049 static int rbd_spec_fill_snap_id(struct rbd_device *rbd_dev)
6051 struct rbd_spec *spec = rbd_dev->spec;
6053 rbd_assert(spec->pool_id != CEPH_NOPOOL && spec->pool_name);
6054 rbd_assert(spec->image_id && spec->image_name);
6055 rbd_assert(spec->snap_name);
6057 if (strcmp(spec->snap_name, RBD_SNAP_HEAD_NAME)) {
6060 snap_id = rbd_snap_id_by_name(rbd_dev, spec->snap_name);
6061 if (snap_id == CEPH_NOSNAP)
6064 spec->snap_id = snap_id;
6066 spec->snap_id = CEPH_NOSNAP;
6073 * A parent image will have all ids but none of the names.
6075 * All names in an rbd spec are dynamically allocated. It's OK if we
6076 * can't figure out the name for an image id.
6078 static int rbd_spec_fill_names(struct rbd_device *rbd_dev)
6080 struct ceph_osd_client *osdc = &rbd_dev->rbd_client->client->osdc;
6081 struct rbd_spec *spec = rbd_dev->spec;
6082 const char *pool_name;
6083 const char *image_name;
6084 const char *snap_name;
6087 rbd_assert(spec->pool_id != CEPH_NOPOOL);
6088 rbd_assert(spec->image_id);
6089 rbd_assert(spec->snap_id != CEPH_NOSNAP);
6091 /* Get the pool name; we have to make our own copy of this */
6093 pool_name = ceph_pg_pool_name_by_id(osdc->osdmap, spec->pool_id);
6095 rbd_warn(rbd_dev, "no pool with id %llu", spec->pool_id);
6098 pool_name = kstrdup(pool_name, GFP_KERNEL);
6102 /* Fetch the image name; tolerate failure here */
6104 image_name = rbd_dev_image_name(rbd_dev);
6106 rbd_warn(rbd_dev, "unable to get image name");
6108 /* Fetch the snapshot name */
6110 snap_name = rbd_snap_name(rbd_dev, spec->snap_id);
6111 if (IS_ERR(snap_name)) {
6112 ret = PTR_ERR(snap_name);
6116 spec->pool_name = pool_name;
6117 spec->image_name = image_name;
6118 spec->snap_name = snap_name;
6128 static int rbd_dev_v2_snap_context(struct rbd_device *rbd_dev,
6129 struct ceph_snap_context **psnapc)
6138 struct ceph_snap_context *snapc;
6142 * We'll need room for the seq value (maximum snapshot id),
6143 * snapshot count, and array of that many snapshot ids.
6144 * For now we have a fixed upper limit on the number we're
6145 * prepared to receive.
6147 size = sizeof (__le64) + sizeof (__le32) +
6148 RBD_MAX_SNAP_COUNT * sizeof (__le64);
6149 reply_buf = kzalloc(size, GFP_KERNEL);
6153 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6154 &rbd_dev->header_oloc, "get_snapcontext",
6155 NULL, 0, reply_buf, size);
6156 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6161 end = reply_buf + ret;
6163 ceph_decode_64_safe(&p, end, seq, out);
6164 ceph_decode_32_safe(&p, end, snap_count, out);
6167 * Make sure the reported number of snapshot ids wouldn't go
6168 * beyond the end of our buffer. But before checking that,
6169 * make sure the computed size of the snapshot context we
6170 * allocate is representable in a size_t.
6172 if (snap_count > (SIZE_MAX - sizeof (struct ceph_snap_context))
6177 if (!ceph_has_room(&p, end, snap_count * sizeof (__le64)))
6181 snapc = ceph_create_snap_context(snap_count, GFP_KERNEL);
6187 for (i = 0; i < snap_count; i++)
6188 snapc->snaps[i] = ceph_decode_64(&p);
6191 dout(" snap context seq = %llu, snap_count = %u\n",
6192 (unsigned long long)seq, (unsigned int)snap_count);
6199 static const char *rbd_dev_v2_snap_name(struct rbd_device *rbd_dev,
6210 size = sizeof (__le32) + RBD_MAX_SNAP_NAME_LEN;
6211 reply_buf = kmalloc(size, GFP_KERNEL);
6213 return ERR_PTR(-ENOMEM);
6215 snapid = cpu_to_le64(snap_id);
6216 ret = rbd_obj_method_sync(rbd_dev, &rbd_dev->header_oid,
6217 &rbd_dev->header_oloc, "get_snapshot_name",
6218 &snapid, sizeof(snapid), reply_buf, size);
6219 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6221 snap_name = ERR_PTR(ret);
6226 end = reply_buf + ret;
6227 snap_name = ceph_extract_encoded_string(&p, end, NULL, GFP_KERNEL);
6228 if (IS_ERR(snap_name))
6231 dout(" snap_id 0x%016llx snap_name = %s\n",
6232 (unsigned long long)snap_id, snap_name);
6239 static int rbd_dev_v2_header_info(struct rbd_device *rbd_dev,
6240 struct rbd_image_header *header,
6245 ret = _rbd_dev_v2_snap_size(rbd_dev, CEPH_NOSNAP,
6246 first_time ? &header->obj_order : NULL,
6247 &header->image_size);
6252 ret = rbd_dev_v2_header_onetime(rbd_dev, header);
6257 ret = rbd_dev_v2_snap_context(rbd_dev, &header->snapc);
6264 static int rbd_dev_header_info(struct rbd_device *rbd_dev,
6265 struct rbd_image_header *header,
6268 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6269 rbd_assert(!header->object_prefix && !header->snapc);
6271 if (rbd_dev->image_format == 1)
6272 return rbd_dev_v1_header_info(rbd_dev, header, first_time);
6274 return rbd_dev_v2_header_info(rbd_dev, header, first_time);
6278 * Skips over white space at *buf, and updates *buf to point to the
6279 * first found non-space character (if any). Returns the length of
6280 * the token (string of non-white space characters) found. Note
6281 * that *buf must be terminated with '\0'.
6283 static inline size_t next_token(const char **buf)
6286 * These are the characters that produce nonzero for
6287 * isspace() in the "C" and "POSIX" locales.
6289 const char *spaces = " \f\n\r\t\v";
6291 *buf += strspn(*buf, spaces); /* Find start of token */
6293 return strcspn(*buf, spaces); /* Return token length */
6297 * Finds the next token in *buf, dynamically allocates a buffer big
6298 * enough to hold a copy of it, and copies the token into the new
6299 * buffer. The copy is guaranteed to be terminated with '\0'. Note
6300 * that a duplicate buffer is created even for a zero-length token.
6302 * Returns a pointer to the newly-allocated duplicate, or a null
6303 * pointer if memory for the duplicate was not available. If
6304 * the lenp argument is a non-null pointer, the length of the token
6305 * (not including the '\0') is returned in *lenp.
6307 * If successful, the *buf pointer will be updated to point beyond
6308 * the end of the found token.
6310 * Note: uses GFP_KERNEL for allocation.
6312 static inline char *dup_token(const char **buf, size_t *lenp)
6317 len = next_token(buf);
6318 dup = kmemdup(*buf, len + 1, GFP_KERNEL);
6321 *(dup + len) = '\0';
6330 static int rbd_parse_param(struct fs_parameter *param,
6331 struct rbd_parse_opts_ctx *pctx)
6333 struct rbd_options *opt = pctx->opts;
6334 struct fs_parse_result result;
6335 struct p_log log = {.prefix = "rbd"};
6338 ret = ceph_parse_param(param, pctx->copts, NULL);
6339 if (ret != -ENOPARAM)
6342 token = __fs_parse(&log, rbd_parameters, param, &result);
6343 dout("%s fs_parse '%s' token %d\n", __func__, param->key, token);
6345 if (token == -ENOPARAM)
6346 return inval_plog(&log, "Unknown parameter '%s'",
6352 case Opt_queue_depth:
6353 if (result.uint_32 < 1)
6355 opt->queue_depth = result.uint_32;
6357 case Opt_alloc_size:
6358 if (result.uint_32 < SECTOR_SIZE)
6360 if (!is_power_of_2(result.uint_32))
6361 return inval_plog(&log, "alloc_size must be a power of 2");
6362 opt->alloc_size = result.uint_32;
6364 case Opt_lock_timeout:
6365 /* 0 is "wait forever" (i.e. infinite timeout) */
6366 if (result.uint_32 > INT_MAX / 1000)
6368 opt->lock_timeout = msecs_to_jiffies(result.uint_32 * 1000);
6371 kfree(pctx->spec->pool_ns);
6372 pctx->spec->pool_ns = param->string;
6373 param->string = NULL;
6375 case Opt_compression_hint:
6376 switch (result.uint_32) {
6377 case Opt_compression_hint_none:
6378 opt->alloc_hint_flags &=
6379 ~(CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE |
6380 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE);
6382 case Opt_compression_hint_compressible:
6383 opt->alloc_hint_flags |=
6384 CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6385 opt->alloc_hint_flags &=
6386 ~CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6388 case Opt_compression_hint_incompressible:
6389 opt->alloc_hint_flags |=
6390 CEPH_OSD_ALLOC_HINT_FLAG_INCOMPRESSIBLE;
6391 opt->alloc_hint_flags &=
6392 ~CEPH_OSD_ALLOC_HINT_FLAG_COMPRESSIBLE;
6399 opt->read_only = true;
6401 case Opt_read_write:
6402 opt->read_only = false;
6404 case Opt_lock_on_read:
6405 opt->lock_on_read = true;
6408 opt->exclusive = true;
6420 return inval_plog(&log, "%s out of range", param->key);
6424 * This duplicates most of generic_parse_monolithic(), untying it from
6425 * fs_context and skipping standard superblock and security options.
6427 static int rbd_parse_options(char *options, struct rbd_parse_opts_ctx *pctx)
6432 dout("%s '%s'\n", __func__, options);
6433 while ((key = strsep(&options, ",")) != NULL) {
6435 struct fs_parameter param = {
6437 .type = fs_value_is_flag,
6439 char *value = strchr(key, '=');
6446 v_len = strlen(value);
6447 param.string = kmemdup_nul(value, v_len,
6451 param.type = fs_value_is_string;
6455 ret = rbd_parse_param(¶m, pctx);
6456 kfree(param.string);
6466 * Parse the options provided for an "rbd add" (i.e., rbd image
6467 * mapping) request. These arrive via a write to /sys/bus/rbd/add,
6468 * and the data written is passed here via a NUL-terminated buffer.
6469 * Returns 0 if successful or an error code otherwise.
6471 * The information extracted from these options is recorded in
6472 * the other parameters which return dynamically-allocated
6475 * The address of a pointer that will refer to a ceph options
6476 * structure. Caller must release the returned pointer using
6477 * ceph_destroy_options() when it is no longer needed.
6479 * Address of an rbd options pointer. Fully initialized by
6480 * this function; caller must release with kfree().
6482 * Address of an rbd image specification pointer. Fully
6483 * initialized by this function based on parsed options.
6484 * Caller must release with rbd_spec_put().
6486 * The options passed take this form:
6487 * <mon_addrs> <options> <pool_name> <image_name> [<snap_id>]
6490 * A comma-separated list of one or more monitor addresses.
6491 * A monitor address is an ip address, optionally followed
6492 * by a port number (separated by a colon).
6493 * I.e.: ip1[:port1][,ip2[:port2]...]
6495 * A comma-separated list of ceph and/or rbd options.
6497 * The name of the rados pool containing the rbd image.
6499 * The name of the image in that pool to map.
6501 * An optional snapshot id. If provided, the mapping will
6502 * present data from the image at the time that snapshot was
6503 * created. The image head is used if no snapshot id is
6504 * provided. Snapshot mappings are always read-only.
6506 static int rbd_add_parse_args(const char *buf,
6507 struct ceph_options **ceph_opts,
6508 struct rbd_options **opts,
6509 struct rbd_spec **rbd_spec)
6513 const char *mon_addrs;
6515 size_t mon_addrs_size;
6516 struct rbd_parse_opts_ctx pctx = { 0 };
6519 /* The first four tokens are required */
6521 len = next_token(&buf);
6523 rbd_warn(NULL, "no monitor address(es) provided");
6527 mon_addrs_size = len;
6531 options = dup_token(&buf, NULL);
6535 rbd_warn(NULL, "no options provided");
6539 pctx.spec = rbd_spec_alloc();
6543 pctx.spec->pool_name = dup_token(&buf, NULL);
6544 if (!pctx.spec->pool_name)
6546 if (!*pctx.spec->pool_name) {
6547 rbd_warn(NULL, "no pool name provided");
6551 pctx.spec->image_name = dup_token(&buf, NULL);
6552 if (!pctx.spec->image_name)
6554 if (!*pctx.spec->image_name) {
6555 rbd_warn(NULL, "no image name provided");
6560 * Snapshot name is optional; default is to use "-"
6561 * (indicating the head/no snapshot).
6563 len = next_token(&buf);
6565 buf = RBD_SNAP_HEAD_NAME; /* No snapshot supplied */
6566 len = sizeof (RBD_SNAP_HEAD_NAME) - 1;
6567 } else if (len > RBD_MAX_SNAP_NAME_LEN) {
6568 ret = -ENAMETOOLONG;
6571 snap_name = kmemdup(buf, len + 1, GFP_KERNEL);
6574 *(snap_name + len) = '\0';
6575 pctx.spec->snap_name = snap_name;
6577 pctx.copts = ceph_alloc_options();
6581 /* Initialize all rbd options to the defaults */
6583 pctx.opts = kzalloc(sizeof(*pctx.opts), GFP_KERNEL);
6587 pctx.opts->read_only = RBD_READ_ONLY_DEFAULT;
6588 pctx.opts->queue_depth = RBD_QUEUE_DEPTH_DEFAULT;
6589 pctx.opts->alloc_size = RBD_ALLOC_SIZE_DEFAULT;
6590 pctx.opts->lock_timeout = RBD_LOCK_TIMEOUT_DEFAULT;
6591 pctx.opts->lock_on_read = RBD_LOCK_ON_READ_DEFAULT;
6592 pctx.opts->exclusive = RBD_EXCLUSIVE_DEFAULT;
6593 pctx.opts->trim = RBD_TRIM_DEFAULT;
6595 ret = ceph_parse_mon_ips(mon_addrs, mon_addrs_size, pctx.copts, NULL);
6599 ret = rbd_parse_options(options, &pctx);
6603 *ceph_opts = pctx.copts;
6605 *rbd_spec = pctx.spec;
6613 ceph_destroy_options(pctx.copts);
6614 rbd_spec_put(pctx.spec);
6619 static void rbd_dev_image_unlock(struct rbd_device *rbd_dev)
6621 down_write(&rbd_dev->lock_rwsem);
6622 if (__rbd_is_lock_owner(rbd_dev))
6623 __rbd_release_lock(rbd_dev);
6624 up_write(&rbd_dev->lock_rwsem);
6628 * If the wait is interrupted, an error is returned even if the lock
6629 * was successfully acquired. rbd_dev_image_unlock() will release it
6632 static int rbd_add_acquire_lock(struct rbd_device *rbd_dev)
6636 if (!(rbd_dev->header.features & RBD_FEATURE_EXCLUSIVE_LOCK)) {
6637 if (!rbd_dev->opts->exclusive && !rbd_dev->opts->lock_on_read)
6640 rbd_warn(rbd_dev, "exclusive-lock feature is not enabled");
6644 if (rbd_is_ro(rbd_dev))
6647 rbd_assert(!rbd_is_lock_owner(rbd_dev));
6648 queue_delayed_work(rbd_dev->task_wq, &rbd_dev->lock_dwork, 0);
6649 ret = wait_for_completion_killable_timeout(&rbd_dev->acquire_wait,
6650 ceph_timeout_jiffies(rbd_dev->opts->lock_timeout));
6652 ret = rbd_dev->acquire_err;
6654 cancel_delayed_work_sync(&rbd_dev->lock_dwork);
6658 rbd_warn(rbd_dev, "failed to acquire lock: %ld", ret);
6664 * The lock may have been released by now, unless automatic lock
6665 * transitions are disabled.
6667 rbd_assert(!rbd_dev->opts->exclusive || rbd_is_lock_owner(rbd_dev));
6672 * An rbd format 2 image has a unique identifier, distinct from the
6673 * name given to it by the user. Internally, that identifier is
6674 * what's used to specify the names of objects related to the image.
6676 * A special "rbd id" object is used to map an rbd image name to its
6677 * id. If that object doesn't exist, then there is no v2 rbd image
6678 * with the supplied name.
6680 * This function will record the given rbd_dev's image_id field if
6681 * it can be determined, and in that case will return 0. If any
6682 * errors occur a negative errno will be returned and the rbd_dev's
6683 * image_id field will be unchanged (and should be NULL).
6685 static int rbd_dev_image_id(struct rbd_device *rbd_dev)
6689 CEPH_DEFINE_OID_ONSTACK(oid);
6694 * When probing a parent image, the image id is already
6695 * known (and the image name likely is not). There's no
6696 * need to fetch the image id again in this case. We
6697 * do still need to set the image format though.
6699 if (rbd_dev->spec->image_id) {
6700 rbd_dev->image_format = *rbd_dev->spec->image_id ? 2 : 1;
6706 * First, see if the format 2 image id file exists, and if
6707 * so, get the image's persistent id from it.
6709 ret = ceph_oid_aprintf(&oid, GFP_KERNEL, "%s%s", RBD_ID_PREFIX,
6710 rbd_dev->spec->image_name);
6714 dout("rbd id object name is %s\n", oid.name);
6716 /* Response will be an encoded string, which includes a length */
6717 size = sizeof (__le32) + RBD_IMAGE_ID_LEN_MAX;
6718 response = kzalloc(size, GFP_NOIO);
6724 /* If it doesn't exist we'll assume it's a format 1 image */
6726 ret = rbd_obj_method_sync(rbd_dev, &oid, &rbd_dev->header_oloc,
6729 dout("%s: rbd_obj_method_sync returned %d\n", __func__, ret);
6730 if (ret == -ENOENT) {
6731 image_id = kstrdup("", GFP_KERNEL);
6732 ret = image_id ? 0 : -ENOMEM;
6734 rbd_dev->image_format = 1;
6735 } else if (ret >= 0) {
6738 image_id = ceph_extract_encoded_string(&p, p + ret,
6740 ret = PTR_ERR_OR_ZERO(image_id);
6742 rbd_dev->image_format = 2;
6746 rbd_dev->spec->image_id = image_id;
6747 dout("image_id is %s\n", image_id);
6751 ceph_oid_destroy(&oid);
6756 * Undo whatever state changes are made by v1 or v2 header info
6759 static void rbd_dev_unprobe(struct rbd_device *rbd_dev)
6761 rbd_dev_parent_put(rbd_dev);
6762 rbd_object_map_free(rbd_dev);
6763 rbd_dev_mapping_clear(rbd_dev);
6765 /* Free dynamic fields from the header, then zero it out */
6767 rbd_image_header_cleanup(&rbd_dev->header);
6770 static int rbd_dev_v2_header_onetime(struct rbd_device *rbd_dev,
6771 struct rbd_image_header *header)
6775 ret = rbd_dev_v2_object_prefix(rbd_dev, &header->object_prefix);
6780 * Get the and check features for the image. Currently the
6781 * features are assumed to never change.
6783 ret = _rbd_dev_v2_snap_features(rbd_dev, CEPH_NOSNAP,
6784 rbd_is_ro(rbd_dev), &header->features);
6788 /* If the image supports fancy striping, get its parameters */
6790 if (header->features & RBD_FEATURE_STRIPINGV2) {
6791 ret = rbd_dev_v2_striping_info(rbd_dev, &header->stripe_unit,
6792 &header->stripe_count);
6797 if (header->features & RBD_FEATURE_DATA_POOL) {
6798 ret = rbd_dev_v2_data_pool(rbd_dev, &header->data_pool_id);
6807 * @depth is rbd_dev_image_probe() -> rbd_dev_probe_parent() ->
6808 * rbd_dev_image_probe() recursion depth, which means it's also the
6809 * length of the already discovered part of the parent chain.
6811 static int rbd_dev_probe_parent(struct rbd_device *rbd_dev, int depth)
6813 struct rbd_device *parent = NULL;
6816 if (!rbd_dev->parent_spec)
6819 if (++depth > RBD_MAX_PARENT_CHAIN_LEN) {
6820 pr_info("parent chain is too long (%d)\n", depth);
6825 parent = __rbd_dev_create(rbd_dev->parent_spec);
6832 * Images related by parent/child relationships always share
6833 * rbd_client and spec/parent_spec, so bump their refcounts.
6835 parent->rbd_client = __rbd_get_client(rbd_dev->rbd_client);
6836 parent->spec = rbd_spec_get(rbd_dev->parent_spec);
6838 __set_bit(RBD_DEV_FLAG_READONLY, &parent->flags);
6840 ret = rbd_dev_image_probe(parent, depth);
6844 rbd_dev->parent = parent;
6845 atomic_set(&rbd_dev->parent_ref, 1);
6849 rbd_dev_unparent(rbd_dev);
6850 rbd_dev_destroy(parent);
6854 static void rbd_dev_device_release(struct rbd_device *rbd_dev)
6856 clear_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6857 rbd_free_disk(rbd_dev);
6859 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6863 * rbd_dev->header_rwsem must be locked for write and will be unlocked
6866 static int rbd_dev_device_setup(struct rbd_device *rbd_dev)
6870 /* Record our major and minor device numbers. */
6872 if (!single_major) {
6873 ret = register_blkdev(0, rbd_dev->name);
6875 goto err_out_unlock;
6877 rbd_dev->major = ret;
6880 rbd_dev->major = rbd_major;
6881 rbd_dev->minor = rbd_dev_id_to_minor(rbd_dev->dev_id);
6884 /* Set up the blkdev mapping. */
6886 ret = rbd_init_disk(rbd_dev);
6888 goto err_out_blkdev;
6890 set_capacity(rbd_dev->disk, rbd_dev->mapping.size / SECTOR_SIZE);
6891 set_disk_ro(rbd_dev->disk, rbd_is_ro(rbd_dev));
6893 ret = dev_set_name(&rbd_dev->dev, "%d", rbd_dev->dev_id);
6897 set_bit(RBD_DEV_FLAG_EXISTS, &rbd_dev->flags);
6898 up_write(&rbd_dev->header_rwsem);
6902 rbd_free_disk(rbd_dev);
6905 unregister_blkdev(rbd_dev->major, rbd_dev->name);
6907 up_write(&rbd_dev->header_rwsem);
6911 static int rbd_dev_header_name(struct rbd_device *rbd_dev)
6913 struct rbd_spec *spec = rbd_dev->spec;
6916 /* Record the header object name for this rbd image. */
6918 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
6919 if (rbd_dev->image_format == 1)
6920 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6921 spec->image_name, RBD_SUFFIX);
6923 ret = ceph_oid_aprintf(&rbd_dev->header_oid, GFP_KERNEL, "%s%s",
6924 RBD_HEADER_PREFIX, spec->image_id);
6929 static void rbd_print_dne(struct rbd_device *rbd_dev, bool is_snap)
6932 pr_info("image %s/%s%s%s does not exist\n",
6933 rbd_dev->spec->pool_name,
6934 rbd_dev->spec->pool_ns ?: "",
6935 rbd_dev->spec->pool_ns ? "/" : "",
6936 rbd_dev->spec->image_name);
6938 pr_info("snap %s/%s%s%s@%s does not exist\n",
6939 rbd_dev->spec->pool_name,
6940 rbd_dev->spec->pool_ns ?: "",
6941 rbd_dev->spec->pool_ns ? "/" : "",
6942 rbd_dev->spec->image_name,
6943 rbd_dev->spec->snap_name);
6947 static void rbd_dev_image_release(struct rbd_device *rbd_dev)
6949 if (!rbd_is_ro(rbd_dev))
6950 rbd_unregister_watch(rbd_dev);
6952 rbd_dev_unprobe(rbd_dev);
6953 rbd_dev->image_format = 0;
6954 kfree(rbd_dev->spec->image_id);
6955 rbd_dev->spec->image_id = NULL;
6959 * Probe for the existence of the header object for the given rbd
6960 * device. If this image is the one being mapped (i.e., not a
6961 * parent), initiate a watch on its header object before using that
6962 * object to get detailed information about the rbd image.
6964 * On success, returns with header_rwsem held for write if called
6967 static int rbd_dev_image_probe(struct rbd_device *rbd_dev, int depth)
6969 bool need_watch = !rbd_is_ro(rbd_dev);
6973 * Get the id from the image id object. Unless there's an
6974 * error, rbd_dev->spec->image_id will be filled in with
6975 * a dynamically-allocated string, and rbd_dev->image_format
6976 * will be set to either 1 or 2.
6978 ret = rbd_dev_image_id(rbd_dev);
6982 ret = rbd_dev_header_name(rbd_dev);
6984 goto err_out_format;
6987 ret = rbd_register_watch(rbd_dev);
6990 rbd_print_dne(rbd_dev, false);
6991 goto err_out_format;
6996 down_write(&rbd_dev->header_rwsem);
6998 ret = rbd_dev_header_info(rbd_dev, &rbd_dev->header, true);
7000 if (ret == -ENOENT && !need_watch)
7001 rbd_print_dne(rbd_dev, false);
7005 rbd_init_layout(rbd_dev);
7008 * If this image is the one being mapped, we have pool name and
7009 * id, image name and id, and snap name - need to fill snap id.
7010 * Otherwise this is a parent image, identified by pool, image
7011 * and snap ids - need to fill in names for those ids.
7014 ret = rbd_spec_fill_snap_id(rbd_dev);
7016 ret = rbd_spec_fill_names(rbd_dev);
7019 rbd_print_dne(rbd_dev, true);
7023 ret = rbd_dev_mapping_set(rbd_dev);
7027 if (rbd_is_snap(rbd_dev) &&
7028 (rbd_dev->header.features & RBD_FEATURE_OBJECT_MAP)) {
7029 ret = rbd_object_map_load(rbd_dev);
7034 if (rbd_dev->header.features & RBD_FEATURE_LAYERING) {
7035 ret = rbd_dev_setup_parent(rbd_dev);
7040 ret = rbd_dev_probe_parent(rbd_dev, depth);
7044 dout("discovered format %u image, header name is %s\n",
7045 rbd_dev->image_format, rbd_dev->header_oid.name);
7050 up_write(&rbd_dev->header_rwsem);
7052 rbd_unregister_watch(rbd_dev);
7053 rbd_dev_unprobe(rbd_dev);
7055 rbd_dev->image_format = 0;
7056 kfree(rbd_dev->spec->image_id);
7057 rbd_dev->spec->image_id = NULL;
7061 static void rbd_dev_update_header(struct rbd_device *rbd_dev,
7062 struct rbd_image_header *header)
7064 rbd_assert(rbd_image_format_valid(rbd_dev->image_format));
7065 rbd_assert(rbd_dev->header.object_prefix); /* !first_time */
7067 if (rbd_dev->header.image_size != header->image_size) {
7068 rbd_dev->header.image_size = header->image_size;
7070 if (!rbd_is_snap(rbd_dev)) {
7071 rbd_dev->mapping.size = header->image_size;
7072 rbd_dev_update_size(rbd_dev);
7076 ceph_put_snap_context(rbd_dev->header.snapc);
7077 rbd_dev->header.snapc = header->snapc;
7078 header->snapc = NULL;
7080 if (rbd_dev->image_format == 1) {
7081 kfree(rbd_dev->header.snap_names);
7082 rbd_dev->header.snap_names = header->snap_names;
7083 header->snap_names = NULL;
7085 kfree(rbd_dev->header.snap_sizes);
7086 rbd_dev->header.snap_sizes = header->snap_sizes;
7087 header->snap_sizes = NULL;
7091 static void rbd_dev_update_parent(struct rbd_device *rbd_dev,
7092 struct parent_image_info *pii)
7094 if (pii->pool_id == CEPH_NOPOOL || !pii->has_overlap) {
7096 * Either the parent never existed, or we have
7097 * record of it but the image got flattened so it no
7098 * longer has a parent. When the parent of a
7099 * layered image disappears we immediately set the
7100 * overlap to 0. The effect of this is that all new
7101 * requests will be treated as if the image had no
7104 * If !pii.has_overlap, the parent image spec is not
7105 * applicable. It's there to avoid duplication in each
7108 if (rbd_dev->parent_overlap) {
7109 rbd_dev->parent_overlap = 0;
7110 rbd_dev_parent_put(rbd_dev);
7111 pr_info("%s: clone has been flattened\n",
7112 rbd_dev->disk->disk_name);
7115 rbd_assert(rbd_dev->parent_spec);
7118 * Update the parent overlap. If it became zero, issue
7119 * a warning as we will proceed as if there is no parent.
7121 if (!pii->overlap && rbd_dev->parent_overlap)
7123 "clone has become standalone (overlap 0)");
7124 rbd_dev->parent_overlap = pii->overlap;
7128 static int rbd_dev_refresh(struct rbd_device *rbd_dev)
7130 struct rbd_image_header header = { 0 };
7131 struct parent_image_info pii = { 0 };
7134 dout("%s rbd_dev %p\n", __func__, rbd_dev);
7136 ret = rbd_dev_header_info(rbd_dev, &header, false);
7141 * If there is a parent, see if it has disappeared due to the
7142 * mapped image getting flattened.
7144 if (rbd_dev->parent) {
7145 ret = rbd_dev_v2_parent_info(rbd_dev, &pii);
7150 down_write(&rbd_dev->header_rwsem);
7151 rbd_dev_update_header(rbd_dev, &header);
7152 if (rbd_dev->parent)
7153 rbd_dev_update_parent(rbd_dev, &pii);
7154 up_write(&rbd_dev->header_rwsem);
7157 rbd_parent_info_cleanup(&pii);
7158 rbd_image_header_cleanup(&header);
7162 static ssize_t do_rbd_add(struct bus_type *bus,
7166 struct rbd_device *rbd_dev = NULL;
7167 struct ceph_options *ceph_opts = NULL;
7168 struct rbd_options *rbd_opts = NULL;
7169 struct rbd_spec *spec = NULL;
7170 struct rbd_client *rbdc;
7173 if (!capable(CAP_SYS_ADMIN))
7176 if (!try_module_get(THIS_MODULE))
7179 /* parse add command */
7180 rc = rbd_add_parse_args(buf, &ceph_opts, &rbd_opts, &spec);
7184 rbdc = rbd_get_client(ceph_opts);
7191 rc = ceph_pg_poolid_by_name(rbdc->client->osdc.osdmap, spec->pool_name);
7194 pr_info("pool %s does not exist\n", spec->pool_name);
7195 goto err_out_client;
7197 spec->pool_id = (u64)rc;
7199 rbd_dev = rbd_dev_create(rbdc, spec, rbd_opts);
7202 goto err_out_client;
7204 rbdc = NULL; /* rbd_dev now owns this */
7205 spec = NULL; /* rbd_dev now owns this */
7206 rbd_opts = NULL; /* rbd_dev now owns this */
7208 /* if we are mapping a snapshot it will be a read-only mapping */
7209 if (rbd_dev->opts->read_only ||
7210 strcmp(rbd_dev->spec->snap_name, RBD_SNAP_HEAD_NAME))
7211 __set_bit(RBD_DEV_FLAG_READONLY, &rbd_dev->flags);
7213 rbd_dev->config_info = kstrdup(buf, GFP_KERNEL);
7214 if (!rbd_dev->config_info) {
7216 goto err_out_rbd_dev;
7219 rc = rbd_dev_image_probe(rbd_dev, 0);
7221 goto err_out_rbd_dev;
7223 if (rbd_dev->opts->alloc_size > rbd_dev->layout.object_size) {
7224 rbd_warn(rbd_dev, "alloc_size adjusted to %u",
7225 rbd_dev->layout.object_size);
7226 rbd_dev->opts->alloc_size = rbd_dev->layout.object_size;
7229 rc = rbd_dev_device_setup(rbd_dev);
7231 goto err_out_image_probe;
7233 rc = rbd_add_acquire_lock(rbd_dev);
7235 goto err_out_image_lock;
7237 /* Everything's ready. Announce the disk to the world. */
7239 rc = device_add(&rbd_dev->dev);
7241 goto err_out_image_lock;
7243 device_add_disk(&rbd_dev->dev, rbd_dev->disk, NULL);
7244 /* see rbd_init_disk() */
7245 blk_put_queue(rbd_dev->disk->queue);
7247 spin_lock(&rbd_dev_list_lock);
7248 list_add_tail(&rbd_dev->node, &rbd_dev_list);
7249 spin_unlock(&rbd_dev_list_lock);
7251 pr_info("%s: capacity %llu features 0x%llx\n", rbd_dev->disk->disk_name,
7252 (unsigned long long)get_capacity(rbd_dev->disk) << SECTOR_SHIFT,
7253 rbd_dev->header.features);
7256 module_put(THIS_MODULE);
7260 rbd_dev_image_unlock(rbd_dev);
7261 rbd_dev_device_release(rbd_dev);
7262 err_out_image_probe:
7263 rbd_dev_image_release(rbd_dev);
7265 rbd_dev_destroy(rbd_dev);
7267 rbd_put_client(rbdc);
7274 static ssize_t add_store(struct bus_type *bus, const char *buf, size_t count)
7279 return do_rbd_add(bus, buf, count);
7282 static ssize_t add_single_major_store(struct bus_type *bus, const char *buf,
7285 return do_rbd_add(bus, buf, count);
7288 static void rbd_dev_remove_parent(struct rbd_device *rbd_dev)
7290 while (rbd_dev->parent) {
7291 struct rbd_device *first = rbd_dev;
7292 struct rbd_device *second = first->parent;
7293 struct rbd_device *third;
7296 * Follow to the parent with no grandparent and
7299 while (second && (third = second->parent)) {
7304 rbd_dev_image_release(second);
7305 rbd_dev_destroy(second);
7306 first->parent = NULL;
7307 first->parent_overlap = 0;
7309 rbd_assert(first->parent_spec);
7310 rbd_spec_put(first->parent_spec);
7311 first->parent_spec = NULL;
7315 static ssize_t do_rbd_remove(struct bus_type *bus,
7319 struct rbd_device *rbd_dev = NULL;
7320 struct list_head *tmp;
7326 if (!capable(CAP_SYS_ADMIN))
7331 sscanf(buf, "%d %5s", &dev_id, opt_buf);
7333 pr_err("dev_id out of range\n");
7336 if (opt_buf[0] != '\0') {
7337 if (!strcmp(opt_buf, "force")) {
7340 pr_err("bad remove option at '%s'\n", opt_buf);
7346 spin_lock(&rbd_dev_list_lock);
7347 list_for_each(tmp, &rbd_dev_list) {
7348 rbd_dev = list_entry(tmp, struct rbd_device, node);
7349 if (rbd_dev->dev_id == dev_id) {
7355 spin_lock_irq(&rbd_dev->lock);
7356 if (rbd_dev->open_count && !force)
7358 else if (test_and_set_bit(RBD_DEV_FLAG_REMOVING,
7361 spin_unlock_irq(&rbd_dev->lock);
7363 spin_unlock(&rbd_dev_list_lock);
7369 * Prevent new IO from being queued and wait for existing
7370 * IO to complete/fail.
7372 blk_mq_freeze_queue(rbd_dev->disk->queue);
7373 blk_set_queue_dying(rbd_dev->disk->queue);
7376 del_gendisk(rbd_dev->disk);
7377 spin_lock(&rbd_dev_list_lock);
7378 list_del_init(&rbd_dev->node);
7379 spin_unlock(&rbd_dev_list_lock);
7380 device_del(&rbd_dev->dev);
7382 rbd_dev_image_unlock(rbd_dev);
7383 rbd_dev_device_release(rbd_dev);
7384 rbd_dev_image_release(rbd_dev);
7385 rbd_dev_destroy(rbd_dev);
7389 static ssize_t remove_store(struct bus_type *bus, const char *buf, size_t count)
7394 return do_rbd_remove(bus, buf, count);
7397 static ssize_t remove_single_major_store(struct bus_type *bus, const char *buf,
7400 return do_rbd_remove(bus, buf, count);
7404 * create control files in sysfs
7407 static int __init rbd_sysfs_init(void)
7411 ret = device_register(&rbd_root_dev);
7415 ret = bus_register(&rbd_bus_type);
7417 device_unregister(&rbd_root_dev);
7422 static void __exit rbd_sysfs_cleanup(void)
7424 bus_unregister(&rbd_bus_type);
7425 device_unregister(&rbd_root_dev);
7428 static int __init rbd_slab_init(void)
7430 rbd_assert(!rbd_img_request_cache);
7431 rbd_img_request_cache = KMEM_CACHE(rbd_img_request, 0);
7432 if (!rbd_img_request_cache)
7435 rbd_assert(!rbd_obj_request_cache);
7436 rbd_obj_request_cache = KMEM_CACHE(rbd_obj_request, 0);
7437 if (!rbd_obj_request_cache)
7443 kmem_cache_destroy(rbd_img_request_cache);
7444 rbd_img_request_cache = NULL;
7448 static void rbd_slab_exit(void)
7450 rbd_assert(rbd_obj_request_cache);
7451 kmem_cache_destroy(rbd_obj_request_cache);
7452 rbd_obj_request_cache = NULL;
7454 rbd_assert(rbd_img_request_cache);
7455 kmem_cache_destroy(rbd_img_request_cache);
7456 rbd_img_request_cache = NULL;
7459 static int __init rbd_init(void)
7463 if (!libceph_compatible(NULL)) {
7464 rbd_warn(NULL, "libceph incompatibility (quitting)");
7468 rc = rbd_slab_init();
7473 * The number of active work items is limited by the number of
7474 * rbd devices * queue depth, so leave @max_active at default.
7476 rbd_wq = alloc_workqueue(RBD_DRV_NAME, WQ_MEM_RECLAIM, 0);
7483 rbd_major = register_blkdev(0, RBD_DRV_NAME);
7484 if (rbd_major < 0) {
7490 rc = rbd_sysfs_init();
7492 goto err_out_blkdev;
7495 pr_info("loaded (major %d)\n", rbd_major);
7497 pr_info("loaded\n");
7503 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7505 destroy_workqueue(rbd_wq);
7511 static void __exit rbd_exit(void)
7513 ida_destroy(&rbd_dev_id_ida);
7514 rbd_sysfs_cleanup();
7516 unregister_blkdev(rbd_major, RBD_DRV_NAME);
7517 destroy_workqueue(rbd_wq);
7521 module_init(rbd_init);
7522 module_exit(rbd_exit);
7524 MODULE_AUTHOR("Alex Elder <elder@inktank.com>");
7525 MODULE_AUTHOR("Sage Weil <sage@newdream.net>");
7526 MODULE_AUTHOR("Yehuda Sadeh <yehuda@hq.newdream.net>");
7527 /* following authorship retained from original osdblk.c */
7528 MODULE_AUTHOR("Jeff Garzik <jeff@garzik.org>");
7530 MODULE_DESCRIPTION("RADOS Block Device (RBD) driver");
7531 MODULE_LICENSE("GPL");