2 * Copyright (C) 2001, 2002 Sistina Software (UK) Limited.
3 * Copyright (C) 2004 - 2006 Red Hat, Inc. All rights reserved.
5 * This file is released under the GPL.
10 #include <linux/module.h>
11 #include <linux/vmalloc.h>
12 #include <linux/miscdevice.h>
13 #include <linux/sched/mm.h>
14 #include <linux/init.h>
15 #include <linux/wait.h>
16 #include <linux/slab.h>
17 #include <linux/dm-ioctl.h>
18 #include <linux/hdreg.h>
19 #include <linux/compat.h>
20 #include <linux/nospec.h>
22 #include <linux/uaccess.h>
24 #define DM_MSG_PREFIX "ioctl"
25 #define DM_DRIVER_EMAIL "dm-devel@redhat.com"
29 * poll will wait until the global event number is greater than
32 volatile unsigned global_event_nr;
35 /*-----------------------------------------------------------------
36 * The ioctl interface needs to be able to look up devices by
38 *---------------------------------------------------------------*/
40 struct list_head name_list;
41 struct list_head uuid_list;
45 struct mapped_device *md;
46 struct dm_table *new_map;
51 struct dm_target_versions *vers, *old_vers;
57 #define NUM_BUCKETS 64
58 #define MASK_BUCKETS (NUM_BUCKETS - 1)
59 static struct list_head _name_buckets[NUM_BUCKETS];
60 static struct list_head _uuid_buckets[NUM_BUCKETS];
62 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred);
65 * Guards access to both hash tables.
67 static DECLARE_RWSEM(_hash_lock);
70 * Protects use of mdptr to obtain hash cell name and uuid from mapped device.
72 static DEFINE_MUTEX(dm_hash_cells_mutex);
74 static void init_buckets(struct list_head *buckets)
78 for (i = 0; i < NUM_BUCKETS; i++)
79 INIT_LIST_HEAD(buckets + i);
82 static int dm_hash_init(void)
84 init_buckets(_name_buckets);
85 init_buckets(_uuid_buckets);
89 static void dm_hash_exit(void)
91 dm_hash_remove_all(false, false, false);
94 /*-----------------------------------------------------------------
96 * We're not really concerned with the str hash function being
97 * fast since it's only used by the ioctl interface.
98 *---------------------------------------------------------------*/
99 static unsigned int hash_str(const char *str)
101 const unsigned int hash_mult = 2654435387U;
105 h = (h + (unsigned int) *str++) * hash_mult;
107 return h & MASK_BUCKETS;
110 /*-----------------------------------------------------------------
111 * Code for looking up a device by name
112 *---------------------------------------------------------------*/
113 static struct hash_cell *__get_name_cell(const char *str)
115 struct hash_cell *hc;
116 unsigned int h = hash_str(str);
118 list_for_each_entry (hc, _name_buckets + h, name_list)
119 if (!strcmp(hc->name, str)) {
127 static struct hash_cell *__get_uuid_cell(const char *str)
129 struct hash_cell *hc;
130 unsigned int h = hash_str(str);
132 list_for_each_entry (hc, _uuid_buckets + h, uuid_list)
133 if (!strcmp(hc->uuid, str)) {
141 static struct hash_cell *__get_dev_cell(uint64_t dev)
143 struct mapped_device *md;
144 struct hash_cell *hc;
146 md = dm_get_md(huge_decode_dev(dev));
150 hc = dm_get_mdptr(md);
159 /*-----------------------------------------------------------------
160 * Inserting, removing and renaming a device.
161 *---------------------------------------------------------------*/
162 static struct hash_cell *alloc_cell(const char *name, const char *uuid,
163 struct mapped_device *md)
165 struct hash_cell *hc;
167 hc = kmalloc(sizeof(*hc), GFP_KERNEL);
171 hc->name = kstrdup(name, GFP_KERNEL);
181 hc->uuid = kstrdup(uuid, GFP_KERNEL);
189 INIT_LIST_HEAD(&hc->name_list);
190 INIT_LIST_HEAD(&hc->uuid_list);
196 static void free_cell(struct hash_cell *hc)
206 * The kdev_t and uuid of a device can never change once it is
207 * initially inserted.
209 static int dm_hash_insert(const char *name, const char *uuid, struct mapped_device *md)
211 struct hash_cell *cell, *hc;
214 * Allocate the new cells.
216 cell = alloc_cell(name, uuid, md);
221 * Insert the cell into both hash tables.
223 down_write(&_hash_lock);
224 hc = __get_name_cell(name);
230 list_add(&cell->name_list, _name_buckets + hash_str(name));
233 hc = __get_uuid_cell(uuid);
235 list_del(&cell->name_list);
239 list_add(&cell->uuid_list, _uuid_buckets + hash_str(uuid));
242 mutex_lock(&dm_hash_cells_mutex);
243 dm_set_mdptr(md, cell);
244 mutex_unlock(&dm_hash_cells_mutex);
245 up_write(&_hash_lock);
250 up_write(&_hash_lock);
255 static struct dm_table *__hash_remove(struct hash_cell *hc)
257 struct dm_table *table;
260 /* remove from the dev hash */
261 list_del(&hc->uuid_list);
262 list_del(&hc->name_list);
263 mutex_lock(&dm_hash_cells_mutex);
264 dm_set_mdptr(hc->md, NULL);
265 mutex_unlock(&dm_hash_cells_mutex);
267 table = dm_get_live_table(hc->md, &srcu_idx);
269 dm_table_event(table);
270 dm_put_live_table(hc->md, srcu_idx);
281 static void dm_hash_remove_all(bool keep_open_devices, bool mark_deferred, bool only_deferred)
284 struct hash_cell *hc;
285 struct mapped_device *md;
291 down_write(&_hash_lock);
293 for (i = 0; i < NUM_BUCKETS; i++) {
294 list_for_each_entry(hc, _name_buckets + i, name_list) {
298 if (keep_open_devices &&
299 dm_lock_for_deletion(md, mark_deferred, only_deferred)) {
305 t = __hash_remove(hc);
307 up_write(&_hash_lock);
314 if (likely(keep_open_devices))
317 dm_destroy_immediate(md);
320 * Some mapped devices may be using other mapped
321 * devices, so repeat until we make no further
322 * progress. If a new mapped device is created
323 * here it will also get removed.
329 up_write(&_hash_lock);
332 DMWARN("remove_all left %d open device(s)", dev_skipped);
336 * Set the uuid of a hash_cell that isn't already set.
338 static void __set_cell_uuid(struct hash_cell *hc, char *new_uuid)
340 mutex_lock(&dm_hash_cells_mutex);
342 mutex_unlock(&dm_hash_cells_mutex);
344 list_add(&hc->uuid_list, _uuid_buckets + hash_str(new_uuid));
348 * Changes the name of a hash_cell and returns the old name for
349 * the caller to free.
351 static char *__change_cell_name(struct hash_cell *hc, char *new_name)
356 * Rename and move the name cell.
358 list_del(&hc->name_list);
361 mutex_lock(&dm_hash_cells_mutex);
363 mutex_unlock(&dm_hash_cells_mutex);
365 list_add(&hc->name_list, _name_buckets + hash_str(new_name));
370 static struct mapped_device *dm_hash_rename(struct dm_ioctl *param,
373 char *new_data, *old_name = NULL;
374 struct hash_cell *hc;
375 struct dm_table *table;
376 struct mapped_device *md;
377 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
383 new_data = kstrdup(new, GFP_KERNEL);
385 return ERR_PTR(-ENOMEM);
387 down_write(&_hash_lock);
393 hc = __get_uuid_cell(new);
395 hc = __get_name_cell(new);
398 DMWARN("Unable to change %s on mapped device %s to one that "
399 "already exists: %s",
400 change_uuid ? "uuid" : "name",
403 up_write(&_hash_lock);
405 return ERR_PTR(-EBUSY);
409 * Is there such a device as 'old' ?
411 hc = __get_name_cell(param->name);
413 DMWARN("Unable to rename non-existent device, %s to %s%s",
414 param->name, change_uuid ? "uuid " : "", new);
415 up_write(&_hash_lock);
417 return ERR_PTR(-ENXIO);
421 * Does this device already have a uuid?
423 if (change_uuid && hc->uuid) {
424 DMWARN("Unable to change uuid of mapped device %s to %s "
425 "because uuid is already set to %s",
426 param->name, new, hc->uuid);
428 up_write(&_hash_lock);
430 return ERR_PTR(-EINVAL);
434 __set_cell_uuid(hc, new_data);
436 old_name = __change_cell_name(hc, new_data);
439 * Wake up any dm event waiters.
441 table = dm_get_live_table(hc->md, &srcu_idx);
443 dm_table_event(table);
444 dm_put_live_table(hc->md, srcu_idx);
446 if (!dm_kobject_uevent(hc->md, KOBJ_CHANGE, param->event_nr))
447 param->flags |= DM_UEVENT_GENERATED_FLAG;
450 up_write(&_hash_lock);
456 void dm_deferred_remove(void)
458 dm_hash_remove_all(true, false, true);
461 /*-----------------------------------------------------------------
462 * Implementation of the ioctl commands
463 *---------------------------------------------------------------*/
465 * All the ioctl commands get dispatched to functions with this
468 typedef int (*ioctl_fn)(struct file *filp, struct dm_ioctl *param, size_t param_size);
470 static int remove_all(struct file *filp, struct dm_ioctl *param, size_t param_size)
472 dm_hash_remove_all(true, !!(param->flags & DM_DEFERRED_REMOVE), false);
473 param->data_size = 0;
478 * Round up the ptr to an 8-byte boundary.
481 static inline size_t align_val(size_t val)
483 return (val + ALIGN_MASK) & ~ALIGN_MASK;
485 static inline void *align_ptr(void *ptr)
487 return (void *)align_val((size_t)ptr);
491 * Retrieves the data payload buffer from an already allocated
494 static void *get_result_buffer(struct dm_ioctl *param, size_t param_size,
497 param->data_start = align_ptr(param + 1) - (void *) param;
499 if (param->data_start < param_size)
500 *len = param_size - param->data_start;
504 return ((void *) param) + param->data_start;
507 static int list_devices(struct file *filp, struct dm_ioctl *param, size_t param_size)
510 struct hash_cell *hc;
511 size_t len, needed = 0;
512 struct gendisk *disk;
513 struct dm_name_list *orig_nl, *nl, *old_nl = NULL;
516 down_write(&_hash_lock);
519 * Loop through all the devices working out how much
522 for (i = 0; i < NUM_BUCKETS; i++) {
523 list_for_each_entry (hc, _name_buckets + i, name_list) {
524 needed += align_val(offsetof(struct dm_name_list, name) + strlen(hc->name) + 1);
525 needed += align_val(sizeof(uint32_t));
530 * Grab our output buffer.
532 nl = orig_nl = get_result_buffer(param, param_size, &len);
533 if (len < needed || len < sizeof(nl->dev)) {
534 param->flags |= DM_BUFFER_FULL_FLAG;
537 param->data_size = param->data_start + needed;
539 nl->dev = 0; /* Flags no data */
542 * Now loop through filling out the names.
544 for (i = 0; i < NUM_BUCKETS; i++) {
545 list_for_each_entry (hc, _name_buckets + i, name_list) {
547 old_nl->next = (uint32_t) ((void *) nl -
549 disk = dm_disk(hc->md);
550 nl->dev = huge_encode_dev(disk_devt(disk));
552 strcpy(nl->name, hc->name);
555 event_nr = align_ptr(nl->name + strlen(hc->name) + 1);
556 *event_nr = dm_get_event_nr(hc->md);
557 nl = align_ptr(event_nr + 1);
561 * If mismatch happens, security may be compromised due to buffer
562 * overflow, so it's better to crash.
564 BUG_ON((char *)nl - (char *)orig_nl != needed);
567 up_write(&_hash_lock);
571 static void list_version_get_needed(struct target_type *tt, void *needed_param)
573 size_t *needed = needed_param;
575 *needed += sizeof(struct dm_target_versions);
576 *needed += strlen(tt->name);
577 *needed += ALIGN_MASK;
580 static void list_version_get_info(struct target_type *tt, void *param)
582 struct vers_iter *info = param;
584 /* Check space - it might have changed since the first iteration */
585 if ((char *)info->vers + sizeof(tt->version) + strlen(tt->name) + 1 >
588 info->flags = DM_BUFFER_FULL_FLAG;
593 info->old_vers->next = (uint32_t) ((void *)info->vers -
594 (void *)info->old_vers);
595 info->vers->version[0] = tt->version[0];
596 info->vers->version[1] = tt->version[1];
597 info->vers->version[2] = tt->version[2];
598 info->vers->next = 0;
599 strcpy(info->vers->name, tt->name);
601 info->old_vers = info->vers;
602 info->vers = align_ptr(((void *) ++info->vers) + strlen(tt->name) + 1);
605 static int __list_versions(struct dm_ioctl *param, size_t param_size, const char *name)
607 size_t len, needed = 0;
608 struct dm_target_versions *vers;
609 struct vers_iter iter_info;
610 struct target_type *tt = NULL;
613 tt = dm_get_target_type(name);
619 * Loop through all the devices working out how much
623 dm_target_iterate(list_version_get_needed, &needed);
625 list_version_get_needed(tt, &needed);
628 * Grab our output buffer.
630 vers = get_result_buffer(param, param_size, &len);
632 param->flags |= DM_BUFFER_FULL_FLAG;
635 param->data_size = param->data_start + needed;
637 iter_info.param_size = param_size;
638 iter_info.old_vers = NULL;
639 iter_info.vers = vers;
641 iter_info.end = (char *)vers+len;
644 * Now loop through filling out the names & versions.
647 dm_target_iterate(list_version_get_info, &iter_info);
649 list_version_get_info(tt, &iter_info);
650 param->flags |= iter_info.flags;
654 dm_put_target_type(tt);
658 static int list_versions(struct file *filp, struct dm_ioctl *param, size_t param_size)
660 return __list_versions(param, param_size, NULL);
663 static int get_target_version(struct file *filp, struct dm_ioctl *param, size_t param_size)
665 return __list_versions(param, param_size, param->name);
668 static int check_name(const char *name)
670 if (strchr(name, '/')) {
671 DMWARN("invalid device name");
679 * On successful return, the caller must not attempt to acquire
680 * _hash_lock without first calling dm_put_live_table, because dm_table_destroy
681 * waits for this dm_put_live_table and could be called under this lock.
683 static struct dm_table *dm_get_inactive_table(struct mapped_device *md, int *srcu_idx)
685 struct hash_cell *hc;
686 struct dm_table *table = NULL;
688 /* increment rcu count, we don't care about the table pointer */
689 dm_get_live_table(md, srcu_idx);
691 down_read(&_hash_lock);
692 hc = dm_get_mdptr(md);
693 if (!hc || hc->md != md) {
694 DMWARN("device has been removed from the dev hash table.");
701 up_read(&_hash_lock);
706 static struct dm_table *dm_get_live_or_inactive_table(struct mapped_device *md,
707 struct dm_ioctl *param,
710 return (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) ?
711 dm_get_inactive_table(md, srcu_idx) : dm_get_live_table(md, srcu_idx);
715 * Fills in a dm_ioctl structure, ready for sending back to
718 static void __dev_status(struct mapped_device *md, struct dm_ioctl *param)
720 struct gendisk *disk = dm_disk(md);
721 struct dm_table *table;
724 param->flags &= ~(DM_SUSPEND_FLAG | DM_READONLY_FLAG |
725 DM_ACTIVE_PRESENT_FLAG | DM_INTERNAL_SUSPEND_FLAG);
727 if (dm_suspended_md(md))
728 param->flags |= DM_SUSPEND_FLAG;
730 if (dm_suspended_internally_md(md))
731 param->flags |= DM_INTERNAL_SUSPEND_FLAG;
733 if (dm_test_deferred_remove_flag(md))
734 param->flags |= DM_DEFERRED_REMOVE;
736 param->dev = huge_encode_dev(disk_devt(disk));
739 * Yes, this will be out of date by the time it gets back
740 * to userland, but it is still very useful for
743 param->open_count = dm_open_count(md);
745 param->event_nr = dm_get_event_nr(md);
746 param->target_count = 0;
748 table = dm_get_live_table(md, &srcu_idx);
750 if (!(param->flags & DM_QUERY_INACTIVE_TABLE_FLAG)) {
751 if (get_disk_ro(disk))
752 param->flags |= DM_READONLY_FLAG;
753 param->target_count = dm_table_get_num_targets(table);
756 param->flags |= DM_ACTIVE_PRESENT_FLAG;
758 dm_put_live_table(md, srcu_idx);
760 if (param->flags & DM_QUERY_INACTIVE_TABLE_FLAG) {
762 table = dm_get_inactive_table(md, &srcu_idx);
764 if (!(dm_table_get_mode(table) & FMODE_WRITE))
765 param->flags |= DM_READONLY_FLAG;
766 param->target_count = dm_table_get_num_targets(table);
768 dm_put_live_table(md, srcu_idx);
772 static int dev_create(struct file *filp, struct dm_ioctl *param, size_t param_size)
774 int r, m = DM_ANY_MINOR;
775 struct mapped_device *md;
777 r = check_name(param->name);
781 if (param->flags & DM_PERSISTENT_DEV_FLAG)
782 m = MINOR(huge_decode_dev(param->dev));
784 r = dm_create(m, &md);
788 r = dm_hash_insert(param->name, *param->uuid ? param->uuid : NULL, md);
795 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
797 __dev_status(md, param);
805 * Always use UUID for lookups if it's present, otherwise use name or dev.
807 static struct hash_cell *__find_device_hash_cell(struct dm_ioctl *param)
809 struct hash_cell *hc = NULL;
812 if (*param->name || param->dev)
815 hc = __get_uuid_cell(param->uuid);
818 } else if (*param->name) {
822 hc = __get_name_cell(param->name);
825 } else if (param->dev) {
826 hc = __get_dev_cell(param->dev);
833 * Sneakily write in both the name and the uuid
834 * while we have the cell.
836 strlcpy(param->name, hc->name, sizeof(param->name));
838 strlcpy(param->uuid, hc->uuid, sizeof(param->uuid));
840 param->uuid[0] = '\0';
843 param->flags |= DM_INACTIVE_PRESENT_FLAG;
845 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
850 static struct mapped_device *find_device(struct dm_ioctl *param)
852 struct hash_cell *hc;
853 struct mapped_device *md = NULL;
855 down_read(&_hash_lock);
856 hc = __find_device_hash_cell(param);
859 up_read(&_hash_lock);
864 static int dev_remove(struct file *filp, struct dm_ioctl *param, size_t param_size)
866 struct hash_cell *hc;
867 struct mapped_device *md;
871 down_write(&_hash_lock);
872 hc = __find_device_hash_cell(param);
875 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
876 up_write(&_hash_lock);
883 * Ensure the device is not open and nothing further can open it.
885 r = dm_lock_for_deletion(md, !!(param->flags & DM_DEFERRED_REMOVE), false);
887 if (r == -EBUSY && param->flags & DM_DEFERRED_REMOVE) {
888 up_write(&_hash_lock);
892 DMDEBUG_LIMIT("unable to remove open device %s", hc->name);
893 up_write(&_hash_lock);
898 t = __hash_remove(hc);
899 up_write(&_hash_lock);
906 param->flags &= ~DM_DEFERRED_REMOVE;
908 if (!dm_kobject_uevent(md, KOBJ_REMOVE, param->event_nr))
909 param->flags |= DM_UEVENT_GENERATED_FLAG;
917 * Check a string doesn't overrun the chunk of
918 * memory we copied from userland.
920 static int invalid_str(char *str, void *end)
922 while ((void *) str < end)
929 static int dev_rename(struct file *filp, struct dm_ioctl *param, size_t param_size)
932 char *new_data = (char *) param + param->data_start;
933 struct mapped_device *md;
934 unsigned change_uuid = (param->flags & DM_UUID_FLAG) ? 1 : 0;
936 if (new_data < param->data ||
937 invalid_str(new_data, (void *) param + param_size) || !*new_data ||
938 strlen(new_data) > (change_uuid ? DM_UUID_LEN - 1 : DM_NAME_LEN - 1)) {
939 DMWARN("Invalid new mapped device name or uuid string supplied.");
944 r = check_name(new_data);
949 md = dm_hash_rename(param, new_data);
953 __dev_status(md, param);
959 static int dev_set_geometry(struct file *filp, struct dm_ioctl *param, size_t param_size)
962 struct mapped_device *md;
963 struct hd_geometry geometry;
964 unsigned long indata[4];
965 char *geostr = (char *) param + param->data_start;
968 md = find_device(param);
972 if (geostr < param->data ||
973 invalid_str(geostr, (void *) param + param_size)) {
974 DMWARN("Invalid geometry supplied.");
978 x = sscanf(geostr, "%lu %lu %lu %lu%c", indata,
979 indata + 1, indata + 2, indata + 3, &dummy);
982 DMWARN("Unable to interpret geometry settings.");
986 if (indata[0] > 65535 || indata[1] > 255 ||
987 indata[2] > 255 || indata[3] > ULONG_MAX) {
988 DMWARN("Geometry exceeds range limits.");
992 geometry.cylinders = indata[0];
993 geometry.heads = indata[1];
994 geometry.sectors = indata[2];
995 geometry.start = indata[3];
997 r = dm_set_geometry(md, &geometry);
999 param->data_size = 0;
1006 static int do_suspend(struct dm_ioctl *param)
1009 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1010 struct mapped_device *md;
1012 md = find_device(param);
1016 if (param->flags & DM_SKIP_LOCKFS_FLAG)
1017 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
1018 if (param->flags & DM_NOFLUSH_FLAG)
1019 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
1021 if (!dm_suspended_md(md)) {
1022 r = dm_suspend(md, suspend_flags);
1027 __dev_status(md, param);
1035 static int do_resume(struct dm_ioctl *param)
1038 unsigned suspend_flags = DM_SUSPEND_LOCKFS_FLAG;
1039 struct hash_cell *hc;
1040 struct mapped_device *md;
1041 struct dm_table *new_map, *old_map = NULL;
1043 down_write(&_hash_lock);
1045 hc = __find_device_hash_cell(param);
1047 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
1048 up_write(&_hash_lock);
1054 new_map = hc->new_map;
1056 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1058 up_write(&_hash_lock);
1060 /* Do we need to load a new map ? */
1062 /* Suspend if it isn't already suspended */
1063 if (param->flags & DM_SKIP_LOCKFS_FLAG)
1064 suspend_flags &= ~DM_SUSPEND_LOCKFS_FLAG;
1065 if (param->flags & DM_NOFLUSH_FLAG)
1066 suspend_flags |= DM_SUSPEND_NOFLUSH_FLAG;
1067 if (!dm_suspended_md(md))
1068 dm_suspend(md, suspend_flags);
1070 old_map = dm_swap_table(md, new_map);
1071 if (IS_ERR(old_map)) {
1073 dm_table_destroy(new_map);
1075 return PTR_ERR(old_map);
1078 if (dm_table_get_mode(new_map) & FMODE_WRITE)
1079 set_disk_ro(dm_disk(md), 0);
1081 set_disk_ro(dm_disk(md), 1);
1084 if (dm_suspended_md(md)) {
1086 if (!r && !dm_kobject_uevent(md, KOBJ_CHANGE, param->event_nr))
1087 param->flags |= DM_UEVENT_GENERATED_FLAG;
1091 * Since dm_swap_table synchronizes RCU, nobody should be in
1092 * read-side critical section already.
1095 dm_table_destroy(old_map);
1098 __dev_status(md, param);
1105 * Set or unset the suspension state of a device.
1106 * If the device already is in the requested state we just return its status.
1108 static int dev_suspend(struct file *filp, struct dm_ioctl *param, size_t param_size)
1110 if (param->flags & DM_SUSPEND_FLAG)
1111 return do_suspend(param);
1113 return do_resume(param);
1117 * Copies device info back to user space, used by
1118 * the create and info ioctls.
1120 static int dev_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
1122 struct mapped_device *md;
1124 md = find_device(param);
1128 __dev_status(md, param);
1135 * Build up the status struct for each target
1137 static void retrieve_status(struct dm_table *table,
1138 struct dm_ioctl *param, size_t param_size)
1140 unsigned int i, num_targets;
1141 struct dm_target_spec *spec;
1142 char *outbuf, *outptr;
1144 size_t remaining, len, used = 0;
1145 unsigned status_flags = 0;
1147 outptr = outbuf = get_result_buffer(param, param_size, &len);
1149 if (param->flags & DM_STATUS_TABLE_FLAG)
1150 type = STATUSTYPE_TABLE;
1152 type = STATUSTYPE_INFO;
1154 /* Get all the target info */
1155 num_targets = dm_table_get_num_targets(table);
1156 for (i = 0; i < num_targets; i++) {
1157 struct dm_target *ti = dm_table_get_target(table, i);
1160 remaining = len - (outptr - outbuf);
1161 if (remaining <= sizeof(struct dm_target_spec)) {
1162 param->flags |= DM_BUFFER_FULL_FLAG;
1166 spec = (struct dm_target_spec *) outptr;
1169 spec->sector_start = ti->begin;
1170 spec->length = ti->len;
1171 strncpy(spec->target_type, ti->type->name,
1172 sizeof(spec->target_type) - 1);
1174 outptr += sizeof(struct dm_target_spec);
1175 remaining = len - (outptr - outbuf);
1176 if (remaining <= 0) {
1177 param->flags |= DM_BUFFER_FULL_FLAG;
1181 /* Get the status/table string from the target driver */
1182 if (ti->type->status) {
1183 if (param->flags & DM_NOFLUSH_FLAG)
1184 status_flags |= DM_STATUS_NOFLUSH_FLAG;
1185 ti->type->status(ti, type, status_flags, outptr, remaining);
1189 l = strlen(outptr) + 1;
1190 if (l == remaining) {
1191 param->flags |= DM_BUFFER_FULL_FLAG;
1196 used = param->data_start + (outptr - outbuf);
1198 outptr = align_ptr(outptr);
1199 spec->next = outptr - outbuf;
1203 param->data_size = used;
1205 param->target_count = num_targets;
1209 * Wait for a device to report an event
1211 static int dev_wait(struct file *filp, struct dm_ioctl *param, size_t param_size)
1214 struct mapped_device *md;
1215 struct dm_table *table;
1218 md = find_device(param);
1223 * Wait for a notification event
1225 if (dm_wait_event(md, param->event_nr)) {
1231 * The userland program is going to want to know what
1232 * changed to trigger the event, so we may as well tell
1233 * him and save an ioctl.
1235 __dev_status(md, param);
1237 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1239 retrieve_status(table, param, param_size);
1240 dm_put_live_table(md, srcu_idx);
1249 * Remember the global event number and make it possible to poll
1250 * for further events.
1252 static int dev_arm_poll(struct file *filp, struct dm_ioctl *param, size_t param_size)
1254 struct dm_file *priv = filp->private_data;
1256 priv->global_event_nr = atomic_read(&dm_global_event_nr);
1261 static inline fmode_t get_mode(struct dm_ioctl *param)
1263 fmode_t mode = FMODE_READ | FMODE_WRITE;
1265 if (param->flags & DM_READONLY_FLAG)
1271 static int next_target(struct dm_target_spec *last, uint32_t next, void *end,
1272 struct dm_target_spec **spec, char **target_params)
1274 *spec = (struct dm_target_spec *) ((unsigned char *) last + next);
1275 *target_params = (char *) (*spec + 1);
1277 if (*spec < (last + 1))
1280 return invalid_str(*target_params, end);
1283 static int populate_table(struct dm_table *table,
1284 struct dm_ioctl *param, size_t param_size)
1288 struct dm_target_spec *spec = (struct dm_target_spec *) param;
1289 uint32_t next = param->data_start;
1290 void *end = (void *) param + param_size;
1291 char *target_params;
1293 if (!param->target_count) {
1294 DMWARN("populate_table: no targets specified");
1298 for (i = 0; i < param->target_count; i++) {
1300 r = next_target(spec, next, end, &spec, &target_params);
1302 DMWARN("unable to find target");
1306 r = dm_table_add_target(table, spec->target_type,
1307 (sector_t) spec->sector_start,
1308 (sector_t) spec->length,
1311 DMWARN("error adding target to table");
1318 return dm_table_complete(table);
1321 static bool is_valid_type(enum dm_queue_mode cur, enum dm_queue_mode new)
1324 (cur == DM_TYPE_BIO_BASED && new == DM_TYPE_DAX_BIO_BASED))
1330 static int table_load(struct file *filp, struct dm_ioctl *param, size_t param_size)
1333 struct hash_cell *hc;
1334 struct dm_table *t, *old_map = NULL;
1335 struct mapped_device *md;
1336 struct target_type *immutable_target_type;
1338 md = find_device(param);
1342 r = dm_table_create(&t, get_mode(param), param->target_count, md);
1346 /* Protect md->type and md->queue against concurrent table loads. */
1347 dm_lock_md_type(md);
1348 r = populate_table(t, param, param_size);
1350 goto err_unlock_md_type;
1352 immutable_target_type = dm_get_immutable_target_type(md);
1353 if (immutable_target_type &&
1354 (immutable_target_type != dm_table_get_immutable_target_type(t)) &&
1355 !dm_table_get_wildcard_target(t)) {
1356 DMWARN("can't replace immutable target type %s",
1357 immutable_target_type->name);
1359 goto err_unlock_md_type;
1362 if (dm_get_md_type(md) == DM_TYPE_NONE) {
1363 /* Initial table load: acquire type of table. */
1364 dm_set_md_type(md, dm_table_get_type(t));
1366 /* setup md->queue to reflect md's type (may block) */
1367 r = dm_setup_md_queue(md, t);
1369 DMWARN("unable to set up device queue for new table.");
1370 goto err_unlock_md_type;
1372 } else if (!is_valid_type(dm_get_md_type(md), dm_table_get_type(t))) {
1373 DMWARN("can't change device type (old=%u vs new=%u) after initial table load.",
1374 dm_get_md_type(md), dm_table_get_type(t));
1376 goto err_unlock_md_type;
1379 dm_unlock_md_type(md);
1381 /* stage inactive table */
1382 down_write(&_hash_lock);
1383 hc = dm_get_mdptr(md);
1384 if (!hc || hc->md != md) {
1385 DMWARN("device has been removed from the dev hash table.");
1386 up_write(&_hash_lock);
1388 goto err_destroy_table;
1392 old_map = hc->new_map;
1394 up_write(&_hash_lock);
1396 param->flags |= DM_INACTIVE_PRESENT_FLAG;
1397 __dev_status(md, param);
1401 dm_table_destroy(old_map);
1409 dm_unlock_md_type(md);
1411 dm_table_destroy(t);
1418 static int table_clear(struct file *filp, struct dm_ioctl *param, size_t param_size)
1420 struct hash_cell *hc;
1421 struct mapped_device *md;
1422 struct dm_table *old_map = NULL;
1424 down_write(&_hash_lock);
1426 hc = __find_device_hash_cell(param);
1428 DMDEBUG_LIMIT("device doesn't appear to be in the dev hash table.");
1429 up_write(&_hash_lock);
1434 old_map = hc->new_map;
1438 param->flags &= ~DM_INACTIVE_PRESENT_FLAG;
1440 __dev_status(hc->md, param);
1442 up_write(&_hash_lock);
1445 dm_table_destroy(old_map);
1453 * Retrieves a list of devices used by a particular dm device.
1455 static void retrieve_deps(struct dm_table *table,
1456 struct dm_ioctl *param, size_t param_size)
1458 unsigned int count = 0;
1459 struct list_head *tmp;
1461 struct dm_dev_internal *dd;
1462 struct dm_target_deps *deps;
1464 deps = get_result_buffer(param, param_size, &len);
1467 * Count the devices.
1469 list_for_each (tmp, dm_table_get_devices(table))
1473 * Check we have enough space.
1475 needed = struct_size(deps, dev, count);
1477 param->flags |= DM_BUFFER_FULL_FLAG;
1482 * Fill in the devices.
1484 deps->count = count;
1486 list_for_each_entry (dd, dm_table_get_devices(table), list)
1487 deps->dev[count++] = huge_encode_dev(dd->dm_dev->bdev->bd_dev);
1489 param->data_size = param->data_start + needed;
1492 static int table_deps(struct file *filp, struct dm_ioctl *param, size_t param_size)
1494 struct mapped_device *md;
1495 struct dm_table *table;
1498 md = find_device(param);
1502 __dev_status(md, param);
1504 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1506 retrieve_deps(table, param, param_size);
1507 dm_put_live_table(md, srcu_idx);
1515 * Return the status of a device as a text string for each
1518 static int table_status(struct file *filp, struct dm_ioctl *param, size_t param_size)
1520 struct mapped_device *md;
1521 struct dm_table *table;
1524 md = find_device(param);
1528 __dev_status(md, param);
1530 table = dm_get_live_or_inactive_table(md, param, &srcu_idx);
1532 retrieve_status(table, param, param_size);
1533 dm_put_live_table(md, srcu_idx);
1541 * Process device-mapper dependent messages. Messages prefixed with '@'
1542 * are processed by the DM core. All others are delivered to the target.
1543 * Returns a number <= 1 if message was processed by device mapper.
1544 * Returns 2 if message should be delivered to the target.
1546 static int message_for_md(struct mapped_device *md, unsigned argc, char **argv,
1547 char *result, unsigned maxlen)
1552 return 2; /* no '@' prefix, deliver to target */
1554 if (!strcasecmp(argv[0], "@cancel_deferred_remove")) {
1556 DMERR("Invalid arguments for @cancel_deferred_remove");
1559 return dm_cancel_deferred_remove(md);
1562 r = dm_stats_message(md, argc, argv, result, maxlen);
1566 DMERR("Unsupported message sent to DM core: %s", argv[0]);
1571 * Pass a message to the target that's at the supplied device offset.
1573 static int target_message(struct file *filp, struct dm_ioctl *param, size_t param_size)
1577 struct mapped_device *md;
1578 struct dm_table *table;
1579 struct dm_target *ti;
1580 struct dm_target_msg *tmsg = (void *) param + param->data_start;
1582 char *result = get_result_buffer(param, param_size, &maxlen);
1585 md = find_device(param);
1589 if (tmsg < (struct dm_target_msg *) param->data ||
1590 invalid_str(tmsg->message, (void *) param + param_size)) {
1591 DMWARN("Invalid target message parameters.");
1596 r = dm_split_args(&argc, &argv, tmsg->message);
1598 DMWARN("Failed to split target message parameters");
1603 DMWARN("Empty message received.");
1608 r = message_for_md(md, argc, argv, result, maxlen);
1612 table = dm_get_live_table(md, &srcu_idx);
1616 if (dm_deleting_md(md)) {
1621 ti = dm_table_find_target(table, tmsg->sector);
1623 DMWARN("Target message sector outside device.");
1625 } else if (ti->type->message)
1626 r = ti->type->message(ti, argc, argv, result, maxlen);
1628 DMWARN("Target type does not support messages");
1633 dm_put_live_table(md, srcu_idx);
1638 __dev_status(md, param);
1641 param->flags |= DM_DATA_OUT_FLAG;
1642 if (dm_message_test_buffer_overflow(result, maxlen))
1643 param->flags |= DM_BUFFER_FULL_FLAG;
1645 param->data_size = param->data_start + strlen(result) + 1;
1654 * The ioctl parameter block consists of two parts, a dm_ioctl struct
1655 * followed by a data buffer. This flag is set if the second part,
1656 * which has a variable size, is not used by the function processing
1659 #define IOCTL_FLAGS_NO_PARAMS 1
1660 #define IOCTL_FLAGS_ISSUE_GLOBAL_EVENT 2
1662 /*-----------------------------------------------------------------
1663 * Implementation of open/close/ioctl on the special char
1665 *---------------------------------------------------------------*/
1666 static ioctl_fn lookup_ioctl(unsigned int cmd, int *ioctl_flags)
1668 static const struct {
1673 {DM_VERSION_CMD, 0, NULL}, /* version is dealt with elsewhere */
1674 {DM_REMOVE_ALL_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, remove_all},
1675 {DM_LIST_DEVICES_CMD, 0, list_devices},
1677 {DM_DEV_CREATE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_create},
1678 {DM_DEV_REMOVE_CMD, IOCTL_FLAGS_NO_PARAMS | IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_remove},
1679 {DM_DEV_RENAME_CMD, IOCTL_FLAGS_ISSUE_GLOBAL_EVENT, dev_rename},
1680 {DM_DEV_SUSPEND_CMD, IOCTL_FLAGS_NO_PARAMS, dev_suspend},
1681 {DM_DEV_STATUS_CMD, IOCTL_FLAGS_NO_PARAMS, dev_status},
1682 {DM_DEV_WAIT_CMD, 0, dev_wait},
1684 {DM_TABLE_LOAD_CMD, 0, table_load},
1685 {DM_TABLE_CLEAR_CMD, IOCTL_FLAGS_NO_PARAMS, table_clear},
1686 {DM_TABLE_DEPS_CMD, 0, table_deps},
1687 {DM_TABLE_STATUS_CMD, 0, table_status},
1689 {DM_LIST_VERSIONS_CMD, 0, list_versions},
1691 {DM_TARGET_MSG_CMD, 0, target_message},
1692 {DM_DEV_SET_GEOMETRY_CMD, 0, dev_set_geometry},
1693 {DM_DEV_ARM_POLL, IOCTL_FLAGS_NO_PARAMS, dev_arm_poll},
1694 {DM_GET_TARGET_VERSION, 0, get_target_version},
1697 if (unlikely(cmd >= ARRAY_SIZE(_ioctls)))
1700 cmd = array_index_nospec(cmd, ARRAY_SIZE(_ioctls));
1701 *ioctl_flags = _ioctls[cmd].flags;
1702 return _ioctls[cmd].fn;
1706 * As well as checking the version compatibility this always
1707 * copies the kernel interface version out.
1709 static int check_version(unsigned int cmd, struct dm_ioctl __user *user)
1711 uint32_t version[3];
1714 if (copy_from_user(version, user->version, sizeof(version)))
1717 if ((DM_VERSION_MAJOR != version[0]) ||
1718 (DM_VERSION_MINOR < version[1])) {
1719 DMWARN("ioctl interface mismatch: "
1720 "kernel(%u.%u.%u), user(%u.%u.%u), cmd(%d)",
1721 DM_VERSION_MAJOR, DM_VERSION_MINOR,
1722 DM_VERSION_PATCHLEVEL,
1723 version[0], version[1], version[2], cmd);
1728 * Fill in the kernel version.
1730 version[0] = DM_VERSION_MAJOR;
1731 version[1] = DM_VERSION_MINOR;
1732 version[2] = DM_VERSION_PATCHLEVEL;
1733 if (copy_to_user(user->version, version, sizeof(version)))
1739 #define DM_PARAMS_MALLOC 0x0001 /* Params allocated with kvmalloc() */
1740 #define DM_WIPE_BUFFER 0x0010 /* Wipe input buffer before returning from ioctl */
1742 static void free_params(struct dm_ioctl *param, size_t param_size, int param_flags)
1744 if (param_flags & DM_WIPE_BUFFER)
1745 memset(param, 0, param_size);
1747 if (param_flags & DM_PARAMS_MALLOC)
1751 static int copy_params(struct dm_ioctl __user *user, struct dm_ioctl *param_kernel,
1752 int ioctl_flags, struct dm_ioctl **param, int *param_flags)
1754 struct dm_ioctl *dmi;
1756 const size_t minimum_data_size = offsetof(struct dm_ioctl, data);
1759 if (copy_from_user(param_kernel, user, minimum_data_size))
1762 if (param_kernel->data_size < minimum_data_size)
1765 secure_data = param_kernel->flags & DM_SECURE_DATA_FLAG;
1767 *param_flags = secure_data ? DM_WIPE_BUFFER : 0;
1769 if (ioctl_flags & IOCTL_FLAGS_NO_PARAMS) {
1771 dmi->data_size = minimum_data_size;
1776 * Use __GFP_HIGH to avoid low memory issues when a device is
1777 * suspended and the ioctl is needed to resume it.
1778 * Use kmalloc() rather than vmalloc() when we can.
1781 noio_flag = memalloc_noio_save();
1782 dmi = kvmalloc(param_kernel->data_size, GFP_KERNEL | __GFP_HIGH);
1783 memalloc_noio_restore(noio_flag);
1786 if (secure_data && clear_user(user, param_kernel->data_size))
1791 *param_flags |= DM_PARAMS_MALLOC;
1793 /* Copy from param_kernel (which was already copied from user) */
1794 memcpy(dmi, param_kernel, minimum_data_size);
1796 if (copy_from_user(&dmi->data, (char __user *)user + minimum_data_size,
1797 param_kernel->data_size - minimum_data_size))
1800 /* Wipe the user buffer so we do not return it to userspace */
1801 if (secure_data && clear_user(user, param_kernel->data_size))
1808 free_params(dmi, param_kernel->data_size, *param_flags);
1813 static int validate_params(uint cmd, struct dm_ioctl *param)
1815 /* Always clear this flag */
1816 param->flags &= ~DM_BUFFER_FULL_FLAG;
1817 param->flags &= ~DM_UEVENT_GENERATED_FLAG;
1818 param->flags &= ~DM_SECURE_DATA_FLAG;
1819 param->flags &= ~DM_DATA_OUT_FLAG;
1821 /* Ignores parameters */
1822 if (cmd == DM_REMOVE_ALL_CMD ||
1823 cmd == DM_LIST_DEVICES_CMD ||
1824 cmd == DM_LIST_VERSIONS_CMD)
1827 if (cmd == DM_DEV_CREATE_CMD) {
1828 if (!*param->name) {
1829 DMWARN("name not supplied when creating device");
1832 } else if (*param->uuid && *param->name) {
1833 DMWARN("only supply one of name or uuid, cmd(%u)", cmd);
1837 /* Ensure strings are terminated */
1838 param->name[DM_NAME_LEN - 1] = '\0';
1839 param->uuid[DM_UUID_LEN - 1] = '\0';
1844 static int ctl_ioctl(struct file *file, uint command, struct dm_ioctl __user *user)
1850 struct dm_ioctl *param;
1852 size_t input_param_size;
1853 struct dm_ioctl param_kernel;
1855 /* only root can play with this */
1856 if (!capable(CAP_SYS_ADMIN))
1859 if (_IOC_TYPE(command) != DM_IOCTL)
1862 cmd = _IOC_NR(command);
1865 * Check the interface version passed in. This also
1866 * writes out the kernel's interface version.
1868 r = check_version(cmd, user);
1873 * Nothing more to do for the version command.
1875 if (cmd == DM_VERSION_CMD)
1878 fn = lookup_ioctl(cmd, &ioctl_flags);
1880 DMWARN("dm_ctl_ioctl: unknown command 0x%x", command);
1885 * Copy the parameters into kernel space.
1887 r = copy_params(user, ¶m_kernel, ioctl_flags, ¶m, ¶m_flags);
1892 input_param_size = param->data_size;
1893 r = validate_params(cmd, param);
1897 param->data_size = offsetof(struct dm_ioctl, data);
1898 r = fn(file, param, input_param_size);
1900 if (unlikely(param->flags & DM_BUFFER_FULL_FLAG) &&
1901 unlikely(ioctl_flags & IOCTL_FLAGS_NO_PARAMS))
1902 DMERR("ioctl %d tried to output some data but has IOCTL_FLAGS_NO_PARAMS set", cmd);
1904 if (!r && ioctl_flags & IOCTL_FLAGS_ISSUE_GLOBAL_EVENT)
1905 dm_issue_global_event();
1908 * Copy the results back to userland.
1910 if (!r && copy_to_user(user, param, param->data_size))
1914 free_params(param, input_param_size, param_flags);
1918 static long dm_ctl_ioctl(struct file *file, uint command, ulong u)
1920 return (long)ctl_ioctl(file, command, (struct dm_ioctl __user *)u);
1923 #ifdef CONFIG_COMPAT
1924 static long dm_compat_ctl_ioctl(struct file *file, uint command, ulong u)
1926 return (long)dm_ctl_ioctl(file, command, (ulong) compat_ptr(u));
1929 #define dm_compat_ctl_ioctl NULL
1932 static int dm_open(struct inode *inode, struct file *filp)
1935 struct dm_file *priv;
1937 r = nonseekable_open(inode, filp);
1941 priv = filp->private_data = kmalloc(sizeof(struct dm_file), GFP_KERNEL);
1945 priv->global_event_nr = atomic_read(&dm_global_event_nr);
1950 static int dm_release(struct inode *inode, struct file *filp)
1952 kfree(filp->private_data);
1956 static __poll_t dm_poll(struct file *filp, poll_table *wait)
1958 struct dm_file *priv = filp->private_data;
1961 poll_wait(filp, &dm_global_eventq, wait);
1963 if ((int)(atomic_read(&dm_global_event_nr) - priv->global_event_nr) > 0)
1969 static const struct file_operations _ctl_fops = {
1971 .release = dm_release,
1973 .unlocked_ioctl = dm_ctl_ioctl,
1974 .compat_ioctl = dm_compat_ctl_ioctl,
1975 .owner = THIS_MODULE,
1976 .llseek = noop_llseek,
1979 static struct miscdevice _dm_misc = {
1980 .minor = MAPPER_CTRL_MINOR,
1982 .nodename = DM_DIR "/" DM_CONTROL_NODE,
1986 MODULE_ALIAS_MISCDEV(MAPPER_CTRL_MINOR);
1987 MODULE_ALIAS("devname:" DM_DIR "/" DM_CONTROL_NODE);
1990 * Create misc character device and link to DM_DIR/control.
1992 int __init dm_interface_init(void)
2000 r = misc_register(&_dm_misc);
2002 DMERR("misc_register failed for control device");
2007 DMINFO("%d.%d.%d%s initialised: %s", DM_VERSION_MAJOR,
2008 DM_VERSION_MINOR, DM_VERSION_PATCHLEVEL, DM_VERSION_EXTRA,
2013 void dm_interface_exit(void)
2015 misc_deregister(&_dm_misc);
2020 * dm_copy_name_and_uuid - Copy mapped device name & uuid into supplied buffers
2021 * @md: Pointer to mapped_device
2022 * @name: Buffer (size DM_NAME_LEN) for name
2023 * @uuid: Buffer (size DM_UUID_LEN) for uuid or empty string if uuid not defined
2025 int dm_copy_name_and_uuid(struct mapped_device *md, char *name, char *uuid)
2028 struct hash_cell *hc;
2033 mutex_lock(&dm_hash_cells_mutex);
2034 hc = dm_get_mdptr(md);
2035 if (!hc || hc->md != md) {
2041 strcpy(name, hc->name);
2043 strcpy(uuid, hc->uuid ? : "");
2046 mutex_unlock(&dm_hash_cells_mutex);
2050 EXPORT_SYMBOL_GPL(dm_copy_name_and_uuid);
2053 * dm_early_create - create a mapped device in early boot.
2055 * @dmi: Contains main information of the device mapping to be created.
2056 * @spec_array: array of pointers to struct dm_target_spec. Describes the
2057 * mapping table of the device.
2058 * @target_params_array: array of strings with the parameters to a specific
2061 * Instead of having the struct dm_target_spec and the parameters for every
2062 * target embedded at the end of struct dm_ioctl (as performed in a normal
2063 * ioctl), pass them as arguments, so the caller doesn't need to serialize them.
2064 * The size of the spec_array and target_params_array is given by
2065 * @dmi->target_count.
2066 * This function is supposed to be called in early boot, so locking mechanisms
2067 * to protect against concurrent loads are not required.
2069 int __init dm_early_create(struct dm_ioctl *dmi,
2070 struct dm_target_spec **spec_array,
2071 char **target_params_array)
2073 int r, m = DM_ANY_MINOR;
2074 struct dm_table *t, *old_map;
2075 struct mapped_device *md;
2078 if (!dmi->target_count)
2081 r = check_name(dmi->name);
2085 if (dmi->flags & DM_PERSISTENT_DEV_FLAG)
2086 m = MINOR(huge_decode_dev(dmi->dev));
2088 /* alloc dm device */
2089 r = dm_create(m, &md);
2094 r = dm_hash_insert(dmi->name, *dmi->uuid ? dmi->uuid : NULL, md);
2096 goto err_destroy_dm;
2099 r = dm_table_create(&t, get_mode(dmi), dmi->target_count, md);
2101 goto err_hash_remove;
2104 for (i = 0; i < dmi->target_count; i++) {
2105 r = dm_table_add_target(t, spec_array[i]->target_type,
2106 (sector_t) spec_array[i]->sector_start,
2107 (sector_t) spec_array[i]->length,
2108 target_params_array[i]);
2110 DMWARN("error adding target to table");
2111 goto err_destroy_table;
2116 r = dm_table_complete(t);
2118 goto err_destroy_table;
2120 md->type = dm_table_get_type(t);
2121 /* setup md->queue to reflect md's type (may block) */
2122 r = dm_setup_md_queue(md, t);
2124 DMWARN("unable to set up device queue for new table.");
2125 goto err_destroy_table;
2130 old_map = dm_swap_table(md, t);
2131 if (IS_ERR(old_map)) {
2132 r = PTR_ERR(old_map);
2133 goto err_destroy_table;
2135 set_disk_ro(dm_disk(md), !!(dmi->flags & DM_READONLY_FLAG));
2140 goto err_destroy_table;
2142 DMINFO("%s (%s) is ready", md->disk->disk_name, dmi->name);
2147 dm_table_destroy(t);
2149 (void) __hash_remove(__get_name_cell(dmi->name));
2150 /* release reference from __get_name_cell */