2 #include <linux/ceph/ceph_debug.h>
4 #include <linux/module.h>
5 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, int state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_8_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
125 static int crush_decode_straw2_bucket(void **p, void *end,
126 struct crush_bucket_straw2 *b)
129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
131 if (b->item_weights == NULL)
133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
134 for (j = 0; j < b->h.size; j++)
135 b->item_weights[j] = ceph_decode_32(p);
141 static int skip_name_map(void **p, void *end)
144 ceph_decode_32_safe(p, end, len ,bad);
148 ceph_decode_32_safe(p, end, strlen, bad);
156 static struct crush_map *crush_decode(void *pbyval, void *end)
162 void *start = pbyval;
166 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
168 c = kzalloc(sizeof(*c), GFP_NOFS);
170 return ERR_PTR(-ENOMEM);
172 /* set tunables to default values */
173 c->choose_local_tries = 2;
174 c->choose_local_fallback_tries = 5;
175 c->choose_total_tries = 19;
176 c->chooseleaf_descend_once = 0;
178 ceph_decode_need(p, end, 4*sizeof(u32), bad);
179 magic = ceph_decode_32(p);
180 if (magic != CRUSH_MAGIC) {
181 pr_err("crush_decode magic %x != current %x\n",
182 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
185 c->max_buckets = ceph_decode_32(p);
186 c->max_rules = ceph_decode_32(p);
187 c->max_devices = ceph_decode_32(p);
189 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
190 if (c->buckets == NULL)
192 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
193 if (c->rules == NULL)
197 for (i = 0; i < c->max_buckets; i++) {
200 struct crush_bucket *b;
202 ceph_decode_32_safe(p, end, alg, bad);
204 c->buckets[i] = NULL;
207 dout("crush_decode bucket %d off %x %p to %p\n",
208 i, (int)(*p-start), *p, end);
211 case CRUSH_BUCKET_UNIFORM:
212 size = sizeof(struct crush_bucket_uniform);
214 case CRUSH_BUCKET_LIST:
215 size = sizeof(struct crush_bucket_list);
217 case CRUSH_BUCKET_TREE:
218 size = sizeof(struct crush_bucket_tree);
220 case CRUSH_BUCKET_STRAW:
221 size = sizeof(struct crush_bucket_straw);
223 case CRUSH_BUCKET_STRAW2:
224 size = sizeof(struct crush_bucket_straw2);
231 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
235 ceph_decode_need(p, end, 4*sizeof(u32), bad);
236 b->id = ceph_decode_32(p);
237 b->type = ceph_decode_16(p);
238 b->alg = ceph_decode_8(p);
239 b->hash = ceph_decode_8(p);
240 b->weight = ceph_decode_32(p);
241 b->size = ceph_decode_32(p);
243 dout("crush_decode bucket size %d off %x %p to %p\n",
244 b->size, (int)(*p-start), *p, end);
246 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
247 if (b->items == NULL)
249 b->perm = kcalloc(b->size, sizeof(u32), GFP_NOFS);
254 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
255 for (j = 0; j < b->size; j++)
256 b->items[j] = ceph_decode_32(p);
259 case CRUSH_BUCKET_UNIFORM:
260 err = crush_decode_uniform_bucket(p, end,
261 (struct crush_bucket_uniform *)b);
265 case CRUSH_BUCKET_LIST:
266 err = crush_decode_list_bucket(p, end,
267 (struct crush_bucket_list *)b);
271 case CRUSH_BUCKET_TREE:
272 err = crush_decode_tree_bucket(p, end,
273 (struct crush_bucket_tree *)b);
277 case CRUSH_BUCKET_STRAW:
278 err = crush_decode_straw_bucket(p, end,
279 (struct crush_bucket_straw *)b);
283 case CRUSH_BUCKET_STRAW2:
284 err = crush_decode_straw2_bucket(p, end,
285 (struct crush_bucket_straw2 *)b);
293 dout("rule vec is %p\n", c->rules);
294 for (i = 0; i < c->max_rules; i++) {
296 struct crush_rule *r;
299 ceph_decode_32_safe(p, end, yes, bad);
301 dout("crush_decode NO rule %d off %x %p to %p\n",
302 i, (int)(*p-start), *p, end);
307 dout("crush_decode rule %d off %x %p to %p\n",
308 i, (int)(*p-start), *p, end);
311 ceph_decode_32_safe(p, end, yes, bad);
312 #if BITS_PER_LONG == 32
314 if (yes > (ULONG_MAX - sizeof(*r))
315 / sizeof(struct crush_rule_step))
318 r = c->rules[i] = kmalloc(sizeof(*r) +
319 yes*sizeof(struct crush_rule_step),
323 dout(" rule %d is at %p\n", i, r);
325 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
326 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
327 for (j = 0; j < r->len; j++) {
328 r->steps[j].op = ceph_decode_32(p);
329 r->steps[j].arg1 = ceph_decode_32(p);
330 r->steps[j].arg2 = ceph_decode_32(p);
334 /* ignore trailing name maps. */
335 for (num_name_maps = 0; num_name_maps < 3; num_name_maps++) {
336 err = skip_name_map(p, end);
342 ceph_decode_need(p, end, 3*sizeof(u32), done);
343 c->choose_local_tries = ceph_decode_32(p);
344 c->choose_local_fallback_tries = ceph_decode_32(p);
345 c->choose_total_tries = ceph_decode_32(p);
346 dout("crush decode tunable choose_local_tries = %d\n",
347 c->choose_local_tries);
348 dout("crush decode tunable choose_local_fallback_tries = %d\n",
349 c->choose_local_fallback_tries);
350 dout("crush decode tunable choose_total_tries = %d\n",
351 c->choose_total_tries);
353 ceph_decode_need(p, end, sizeof(u32), done);
354 c->chooseleaf_descend_once = ceph_decode_32(p);
355 dout("crush decode tunable chooseleaf_descend_once = %d\n",
356 c->chooseleaf_descend_once);
358 ceph_decode_need(p, end, sizeof(u8), done);
359 c->chooseleaf_vary_r = ceph_decode_8(p);
360 dout("crush decode tunable chooseleaf_vary_r = %d\n",
361 c->chooseleaf_vary_r);
363 /* skip straw_calc_version, allowed_bucket_algs */
364 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
365 *p += sizeof(u8) + sizeof(u32);
367 ceph_decode_need(p, end, sizeof(u8), done);
368 c->chooseleaf_stable = ceph_decode_8(p);
369 dout("crush decode tunable chooseleaf_stable = %d\n",
370 c->chooseleaf_stable);
373 dout("crush_decode success\n");
379 dout("crush_decode fail %d\n", err);
384 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
386 if (lhs->pool < rhs->pool)
388 if (lhs->pool > rhs->pool)
390 if (lhs->seed < rhs->seed)
392 if (lhs->seed > rhs->seed)
399 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
400 * to a set of osds) and primary_temp (explicit primary setting)
402 static int __insert_pg_mapping(struct ceph_pg_mapping *new,
403 struct rb_root *root)
405 struct rb_node **p = &root->rb_node;
406 struct rb_node *parent = NULL;
407 struct ceph_pg_mapping *pg = NULL;
410 dout("__insert_pg_mapping %llx %p\n", *(u64 *)&new->pgid, new);
413 pg = rb_entry(parent, struct ceph_pg_mapping, node);
414 c = ceph_pg_compare(&new->pgid, &pg->pgid);
423 rb_link_node(&new->node, parent, p);
424 rb_insert_color(&new->node, root);
428 static struct ceph_pg_mapping *__lookup_pg_mapping(struct rb_root *root,
431 struct rb_node *n = root->rb_node;
432 struct ceph_pg_mapping *pg;
436 pg = rb_entry(n, struct ceph_pg_mapping, node);
437 c = ceph_pg_compare(&pgid, &pg->pgid);
443 dout("__lookup_pg_mapping %lld.%x got %p\n",
444 pgid.pool, pgid.seed, pg);
451 static int __remove_pg_mapping(struct rb_root *root, struct ceph_pg pgid)
453 struct ceph_pg_mapping *pg = __lookup_pg_mapping(root, pgid);
456 dout("__remove_pg_mapping %lld.%x %p\n", pgid.pool, pgid.seed,
458 rb_erase(&pg->node, root);
462 dout("__remove_pg_mapping %lld.%x dne\n", pgid.pool, pgid.seed);
467 * rbtree of pg pool info
469 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
471 struct rb_node **p = &root->rb_node;
472 struct rb_node *parent = NULL;
473 struct ceph_pg_pool_info *pi = NULL;
477 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
478 if (new->id < pi->id)
480 else if (new->id > pi->id)
486 rb_link_node(&new->node, parent, p);
487 rb_insert_color(&new->node, root);
491 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
493 struct ceph_pg_pool_info *pi;
494 struct rb_node *n = root->rb_node;
497 pi = rb_entry(n, struct ceph_pg_pool_info, node);
500 else if (id > pi->id)
508 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
510 return __lookup_pg_pool(&map->pg_pools, id);
513 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
515 struct ceph_pg_pool_info *pi;
517 if (id == CEPH_NOPOOL)
520 if (WARN_ON_ONCE(id > (u64) INT_MAX))
523 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
525 return pi ? pi->name : NULL;
527 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
529 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
533 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
534 struct ceph_pg_pool_info *pi =
535 rb_entry(rbp, struct ceph_pg_pool_info, node);
536 if (pi->name && strcmp(pi->name, name) == 0)
541 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
543 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
545 rb_erase(&pi->node, root);
550 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
556 ceph_decode_need(p, end, 2 + 4, bad);
557 ev = ceph_decode_8(p); /* encoding version */
558 cv = ceph_decode_8(p); /* compat version */
560 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
564 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
567 len = ceph_decode_32(p);
568 ceph_decode_need(p, end, len, bad);
571 pi->type = ceph_decode_8(p);
572 pi->size = ceph_decode_8(p);
573 pi->crush_ruleset = ceph_decode_8(p);
574 pi->object_hash = ceph_decode_8(p);
576 pi->pg_num = ceph_decode_32(p);
577 pi->pgp_num = ceph_decode_32(p);
579 *p += 4 + 4; /* skip lpg* */
580 *p += 4; /* skip last_change */
581 *p += 8 + 4; /* skip snap_seq, snap_epoch */
584 num = ceph_decode_32(p);
586 *p += 8; /* snapid key */
587 *p += 1 + 1; /* versions */
588 len = ceph_decode_32(p);
592 /* skip removed_snaps */
593 num = ceph_decode_32(p);
596 *p += 8; /* skip auid */
597 pi->flags = ceph_decode_64(p);
598 *p += 4; /* skip crash_replay_interval */
601 pi->min_size = ceph_decode_8(p);
603 pi->min_size = pi->size - pi->size / 2;
606 *p += 8 + 8; /* skip quota_max_* */
610 num = ceph_decode_32(p);
613 *p += 8; /* skip tier_of */
614 *p += 1; /* skip cache_mode */
616 pi->read_tier = ceph_decode_64(p);
617 pi->write_tier = ceph_decode_64(p);
624 /* skip properties */
625 num = ceph_decode_32(p);
627 len = ceph_decode_32(p);
629 len = ceph_decode_32(p);
635 /* skip hit_set_params */
636 *p += 1 + 1; /* versions */
637 len = ceph_decode_32(p);
640 *p += 4; /* skip hit_set_period */
641 *p += 4; /* skip hit_set_count */
645 *p += 4; /* skip stripe_width */
648 *p += 8; /* skip target_max_bytes */
649 *p += 8; /* skip target_max_objects */
650 *p += 4; /* skip cache_target_dirty_ratio_micro */
651 *p += 4; /* skip cache_target_full_ratio_micro */
652 *p += 4; /* skip cache_min_flush_age */
653 *p += 4; /* skip cache_min_evict_age */
657 /* skip erasure_code_profile */
658 len = ceph_decode_32(p);
663 pi->last_force_request_resend = ceph_decode_32(p);
665 pi->last_force_request_resend = 0;
667 /* ignore the rest */
677 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
679 struct ceph_pg_pool_info *pi;
683 ceph_decode_32_safe(p, end, num, bad);
684 dout(" %d pool names\n", num);
686 ceph_decode_64_safe(p, end, pool, bad);
687 ceph_decode_32_safe(p, end, len, bad);
688 dout(" pool %llu len %d\n", pool, len);
689 ceph_decode_need(p, end, len, bad);
690 pi = __lookup_pg_pool(&map->pg_pools, pool);
692 char *name = kstrndup(*p, len, GFP_NOFS);
698 dout(" name is %s\n", pi->name);
711 struct ceph_osdmap *ceph_osdmap_alloc(void)
713 struct ceph_osdmap *map;
715 map = kzalloc(sizeof(*map), GFP_NOIO);
719 map->pg_pools = RB_ROOT;
721 map->pg_temp = RB_ROOT;
722 map->primary_temp = RB_ROOT;
723 mutex_init(&map->crush_scratch_mutex);
728 void ceph_osdmap_destroy(struct ceph_osdmap *map)
730 dout("osdmap_destroy %p\n", map);
732 crush_destroy(map->crush);
733 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
734 struct ceph_pg_mapping *pg =
735 rb_entry(rb_first(&map->pg_temp),
736 struct ceph_pg_mapping, node);
737 rb_erase(&pg->node, &map->pg_temp);
740 while (!RB_EMPTY_ROOT(&map->primary_temp)) {
741 struct ceph_pg_mapping *pg =
742 rb_entry(rb_first(&map->primary_temp),
743 struct ceph_pg_mapping, node);
744 rb_erase(&pg->node, &map->primary_temp);
747 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
748 struct ceph_pg_pool_info *pi =
749 rb_entry(rb_first(&map->pg_pools),
750 struct ceph_pg_pool_info, node);
751 __remove_pg_pool(&map->pg_pools, pi);
753 kfree(map->osd_state);
754 kfree(map->osd_weight);
755 kfree(map->osd_addr);
756 kfree(map->osd_primary_affinity);
761 * Adjust max_osd value, (re)allocate arrays.
763 * The new elements are properly initialized.
765 static int osdmap_set_max_osd(struct ceph_osdmap *map, int max)
769 struct ceph_entity_addr *addr;
772 state = krealloc(map->osd_state, max*sizeof(*state), GFP_NOFS);
775 map->osd_state = state;
777 weight = krealloc(map->osd_weight, max*sizeof(*weight), GFP_NOFS);
780 map->osd_weight = weight;
782 addr = krealloc(map->osd_addr, max*sizeof(*addr), GFP_NOFS);
785 map->osd_addr = addr;
787 for (i = map->max_osd; i < max; i++) {
788 map->osd_state[i] = 0;
789 map->osd_weight[i] = CEPH_OSD_OUT;
790 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
793 if (map->osd_primary_affinity) {
796 affinity = krealloc(map->osd_primary_affinity,
797 max*sizeof(*affinity), GFP_NOFS);
800 map->osd_primary_affinity = affinity;
802 for (i = map->max_osd; i < max; i++)
803 map->osd_primary_affinity[i] =
804 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
812 #define OSDMAP_WRAPPER_COMPAT_VER 7
813 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
816 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
817 * to struct_v of the client_data section for new (v7 and above)
820 static int get_osdmap_client_data_v(void **p, void *end,
821 const char *prefix, u8 *v)
825 ceph_decode_8_safe(p, end, struct_v, e_inval);
829 ceph_decode_8_safe(p, end, struct_compat, e_inval);
830 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
831 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
832 struct_v, struct_compat,
833 OSDMAP_WRAPPER_COMPAT_VER, prefix);
836 *p += 4; /* ignore wrapper struct_len */
838 ceph_decode_8_safe(p, end, struct_v, e_inval);
839 ceph_decode_8_safe(p, end, struct_compat, e_inval);
840 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
841 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
842 struct_v, struct_compat,
843 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
846 *p += 4; /* ignore client data struct_len */
851 ceph_decode_16_safe(p, end, version, e_inval);
853 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
858 /* old osdmap enconding */
869 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
874 ceph_decode_32_safe(p, end, n, e_inval);
876 struct ceph_pg_pool_info *pi;
880 ceph_decode_64_safe(p, end, pool, e_inval);
882 pi = __lookup_pg_pool(&map->pg_pools, pool);
883 if (!incremental || !pi) {
884 pi = kzalloc(sizeof(*pi), GFP_NOFS);
890 ret = __insert_pg_pool(&map->pg_pools, pi);
897 ret = decode_pool(p, end, pi);
908 static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
910 return __decode_pools(p, end, map, false);
913 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
915 return __decode_pools(p, end, map, true);
918 static int __decode_pg_temp(void **p, void *end, struct ceph_osdmap *map,
923 ceph_decode_32_safe(p, end, n, e_inval);
929 ret = ceph_decode_pgid(p, end, &pgid);
933 ceph_decode_32_safe(p, end, len, e_inval);
935 ret = __remove_pg_mapping(&map->pg_temp, pgid);
936 BUG_ON(!incremental && ret != -ENOENT);
938 if (!incremental || len > 0) {
939 struct ceph_pg_mapping *pg;
941 ceph_decode_need(p, end, len*sizeof(u32), e_inval);
943 if (len > (UINT_MAX - sizeof(*pg)) / sizeof(u32))
946 pg = kzalloc(sizeof(*pg) + len*sizeof(u32), GFP_NOFS);
951 pg->pg_temp.len = len;
952 for (i = 0; i < len; i++)
953 pg->pg_temp.osds[i] = ceph_decode_32(p);
955 ret = __insert_pg_mapping(pg, &map->pg_temp);
969 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
971 return __decode_pg_temp(p, end, map, false);
974 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
976 return __decode_pg_temp(p, end, map, true);
979 static int __decode_primary_temp(void **p, void *end, struct ceph_osdmap *map,
984 ceph_decode_32_safe(p, end, n, e_inval);
990 ret = ceph_decode_pgid(p, end, &pgid);
994 ceph_decode_32_safe(p, end, osd, e_inval);
996 ret = __remove_pg_mapping(&map->primary_temp, pgid);
997 BUG_ON(!incremental && ret != -ENOENT);
999 if (!incremental || osd != (u32)-1) {
1000 struct ceph_pg_mapping *pg;
1002 pg = kzalloc(sizeof(*pg), GFP_NOFS);
1007 pg->primary_temp.osd = osd;
1009 ret = __insert_pg_mapping(pg, &map->primary_temp);
1023 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1025 return __decode_primary_temp(p, end, map, false);
1028 static int decode_new_primary_temp(void **p, void *end,
1029 struct ceph_osdmap *map)
1031 return __decode_primary_temp(p, end, map, true);
1034 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1036 BUG_ON(osd >= map->max_osd);
1038 if (!map->osd_primary_affinity)
1039 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1041 return map->osd_primary_affinity[osd];
1044 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1046 BUG_ON(osd >= map->max_osd);
1048 if (!map->osd_primary_affinity) {
1051 map->osd_primary_affinity = kmalloc(map->max_osd*sizeof(u32),
1053 if (!map->osd_primary_affinity)
1056 for (i = 0; i < map->max_osd; i++)
1057 map->osd_primary_affinity[i] =
1058 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1061 map->osd_primary_affinity[osd] = aff;
1066 static int decode_primary_affinity(void **p, void *end,
1067 struct ceph_osdmap *map)
1071 ceph_decode_32_safe(p, end, len, e_inval);
1073 kfree(map->osd_primary_affinity);
1074 map->osd_primary_affinity = NULL;
1077 if (len != map->max_osd)
1080 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1082 for (i = 0; i < map->max_osd; i++) {
1085 ret = set_primary_affinity(map, i, ceph_decode_32(p));
1096 static int decode_new_primary_affinity(void **p, void *end,
1097 struct ceph_osdmap *map)
1101 ceph_decode_32_safe(p, end, n, e_inval);
1106 ceph_decode_32_safe(p, end, osd, e_inval);
1107 ceph_decode_32_safe(p, end, aff, e_inval);
1109 ret = set_primary_affinity(map, osd, aff);
1113 pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1123 * decode a full map.
1125 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1134 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1136 err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1140 /* fsid, epoch, created, modified */
1141 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1142 sizeof(map->created) + sizeof(map->modified), e_inval);
1143 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1144 epoch = map->epoch = ceph_decode_32(p);
1145 ceph_decode_copy(p, &map->created, sizeof(map->created));
1146 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1149 err = decode_pools(p, end, map);
1154 err = decode_pool_names(p, end, map);
1158 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1160 ceph_decode_32_safe(p, end, map->flags, e_inval);
1163 ceph_decode_32_safe(p, end, max, e_inval);
1165 /* (re)alloc osd arrays */
1166 err = osdmap_set_max_osd(map, max);
1170 /* osd_state, osd_weight, osd_addrs->client_addr */
1171 ceph_decode_need(p, end, 3*sizeof(u32) +
1172 map->max_osd*(1 + sizeof(*map->osd_weight) +
1173 sizeof(*map->osd_addr)), e_inval);
1175 if (ceph_decode_32(p) != map->max_osd)
1178 ceph_decode_copy(p, map->osd_state, map->max_osd);
1180 if (ceph_decode_32(p) != map->max_osd)
1183 for (i = 0; i < map->max_osd; i++)
1184 map->osd_weight[i] = ceph_decode_32(p);
1186 if (ceph_decode_32(p) != map->max_osd)
1189 ceph_decode_copy(p, map->osd_addr, map->max_osd*sizeof(*map->osd_addr));
1190 for (i = 0; i < map->max_osd; i++)
1191 ceph_decode_addr(&map->osd_addr[i]);
1194 err = decode_pg_temp(p, end, map);
1199 if (struct_v >= 1) {
1200 err = decode_primary_temp(p, end, map);
1205 /* primary_affinity */
1206 if (struct_v >= 2) {
1207 err = decode_primary_affinity(p, end, map);
1211 /* XXX can this happen? */
1212 kfree(map->osd_primary_affinity);
1213 map->osd_primary_affinity = NULL;
1217 ceph_decode_32_safe(p, end, len, e_inval);
1218 map->crush = crush_decode(*p, min(*p + len, end));
1219 if (IS_ERR(map->crush)) {
1220 err = PTR_ERR(map->crush);
1226 /* ignore the rest */
1229 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1235 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1236 err, epoch, (int)(*p - start), *p, start, end);
1237 print_hex_dump(KERN_DEBUG, "osdmap: ",
1238 DUMP_PREFIX_OFFSET, 16, 1,
1239 start, end - start, true);
1244 * Allocate and decode a full map.
1246 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1248 struct ceph_osdmap *map;
1251 map = ceph_osdmap_alloc();
1253 return ERR_PTR(-ENOMEM);
1255 ret = osdmap_decode(p, end, map);
1257 ceph_osdmap_destroy(map);
1258 return ERR_PTR(ret);
1265 * Encoding order is (new_up_client, new_state, new_weight). Need to
1266 * apply in the (new_weight, new_state, new_up_client) order, because
1267 * an incremental map may look like e.g.
1269 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1270 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1272 static int decode_new_up_state_weight(void **p, void *end,
1273 struct ceph_osdmap *map)
1275 void *new_up_client;
1277 void *new_weight_end;
1281 ceph_decode_32_safe(p, end, len, e_inval);
1282 len *= sizeof(u32) + sizeof(struct ceph_entity_addr);
1283 ceph_decode_need(p, end, len, e_inval);
1287 ceph_decode_32_safe(p, end, len, e_inval);
1288 len *= sizeof(u32) + sizeof(u8);
1289 ceph_decode_need(p, end, len, e_inval);
1293 ceph_decode_32_safe(p, end, len, e_inval);
1298 ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1299 osd = ceph_decode_32(p);
1300 w = ceph_decode_32(p);
1301 BUG_ON(osd >= map->max_osd);
1302 pr_info("osd%d weight 0x%x %s\n", osd, w,
1303 w == CEPH_OSD_IN ? "(in)" :
1304 (w == CEPH_OSD_OUT ? "(out)" : ""));
1305 map->osd_weight[osd] = w;
1308 * If we are marking in, set the EXISTS, and clear the
1309 * AUTOOUT and NEW bits.
1312 map->osd_state[osd] |= CEPH_OSD_EXISTS;
1313 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1317 new_weight_end = *p;
1319 /* new_state (up/down) */
1321 len = ceph_decode_32(p);
1327 osd = ceph_decode_32(p);
1328 xorstate = ceph_decode_8(p);
1330 xorstate = CEPH_OSD_UP;
1331 BUG_ON(osd >= map->max_osd);
1332 if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1333 (xorstate & CEPH_OSD_UP))
1334 pr_info("osd%d down\n", osd);
1335 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1336 (xorstate & CEPH_OSD_EXISTS)) {
1337 pr_info("osd%d does not exist\n", osd);
1338 ret = set_primary_affinity(map, osd,
1339 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1342 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1343 map->osd_state[osd] = 0;
1345 map->osd_state[osd] ^= xorstate;
1351 len = ceph_decode_32(p);
1354 struct ceph_entity_addr addr;
1356 osd = ceph_decode_32(p);
1357 ceph_decode_copy(p, &addr, sizeof(addr));
1358 ceph_decode_addr(&addr);
1359 BUG_ON(osd >= map->max_osd);
1360 pr_info("osd%d up\n", osd);
1361 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1362 map->osd_addr[osd] = addr;
1365 *p = new_weight_end;
1373 * decode and apply an incremental map update.
1375 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1376 struct ceph_osdmap *map)
1378 struct crush_map *newcrush = NULL;
1379 struct ceph_fsid fsid;
1381 struct ceph_timespec modified;
1385 __s32 new_flags, max;
1390 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1392 err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1396 /* fsid, epoch, modified, new_pool_max, new_flags */
1397 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1398 sizeof(u64) + sizeof(u32), e_inval);
1399 ceph_decode_copy(p, &fsid, sizeof(fsid));
1400 epoch = ceph_decode_32(p);
1401 BUG_ON(epoch != map->epoch+1);
1402 ceph_decode_copy(p, &modified, sizeof(modified));
1403 new_pool_max = ceph_decode_64(p);
1404 new_flags = ceph_decode_32(p);
1407 ceph_decode_32_safe(p, end, len, e_inval);
1409 dout("apply_incremental full map len %d, %p to %p\n",
1411 return ceph_osdmap_decode(p, min(*p+len, end));
1415 ceph_decode_32_safe(p, end, len, e_inval);
1417 newcrush = crush_decode(*p, min(*p+len, end));
1418 if (IS_ERR(newcrush)) {
1419 err = PTR_ERR(newcrush);
1428 map->flags = new_flags;
1429 if (new_pool_max >= 0)
1430 map->pool_max = new_pool_max;
1433 ceph_decode_32_safe(p, end, max, e_inval);
1435 err = osdmap_set_max_osd(map, max);
1441 map->modified = modified;
1444 crush_destroy(map->crush);
1445 map->crush = newcrush;
1450 err = decode_new_pools(p, end, map);
1454 /* new_pool_names */
1455 err = decode_pool_names(p, end, map);
1460 ceph_decode_32_safe(p, end, len, e_inval);
1462 struct ceph_pg_pool_info *pi;
1464 ceph_decode_64_safe(p, end, pool, e_inval);
1465 pi = __lookup_pg_pool(&map->pg_pools, pool);
1467 __remove_pg_pool(&map->pg_pools, pi);
1470 /* new_up_client, new_state, new_weight */
1471 err = decode_new_up_state_weight(p, end, map);
1476 err = decode_new_pg_temp(p, end, map);
1480 /* new_primary_temp */
1481 if (struct_v >= 1) {
1482 err = decode_new_primary_temp(p, end, map);
1487 /* new_primary_affinity */
1488 if (struct_v >= 2) {
1489 err = decode_new_primary_affinity(p, end, map);
1494 /* ignore the rest */
1497 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1503 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1504 err, epoch, (int)(*p - start), *p, start, end);
1505 print_hex_dump(KERN_DEBUG, "osdmap: ",
1506 DUMP_PREFIX_OFFSET, 16, 1,
1507 start, end - start, true);
1509 crush_destroy(newcrush);
1510 return ERR_PTR(err);
1513 void ceph_oloc_copy(struct ceph_object_locator *dest,
1514 const struct ceph_object_locator *src)
1516 WARN_ON(!ceph_oloc_empty(dest));
1517 WARN_ON(dest->pool_ns); /* empty() only covers ->pool */
1519 dest->pool = src->pool;
1521 dest->pool_ns = ceph_get_string(src->pool_ns);
1523 EXPORT_SYMBOL(ceph_oloc_copy);
1525 void ceph_oloc_destroy(struct ceph_object_locator *oloc)
1527 ceph_put_string(oloc->pool_ns);
1529 EXPORT_SYMBOL(ceph_oloc_destroy);
1531 void ceph_oid_copy(struct ceph_object_id *dest,
1532 const struct ceph_object_id *src)
1534 WARN_ON(!ceph_oid_empty(dest));
1536 if (src->name != src->inline_name) {
1537 /* very rare, see ceph_object_id definition */
1538 dest->name = kmalloc(src->name_len + 1,
1539 GFP_NOIO | __GFP_NOFAIL);
1542 memcpy(dest->name, src->name, src->name_len + 1);
1543 dest->name_len = src->name_len;
1545 EXPORT_SYMBOL(ceph_oid_copy);
1547 static __printf(2, 0)
1548 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
1552 WARN_ON(!ceph_oid_empty(oid));
1554 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
1555 if (len >= sizeof(oid->inline_name))
1558 oid->name_len = len;
1563 * If oid doesn't fit into inline buffer, BUG.
1565 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
1570 BUG_ON(oid_printf_vargs(oid, fmt, ap));
1573 EXPORT_SYMBOL(ceph_oid_printf);
1575 static __printf(3, 0)
1576 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
1577 const char *fmt, va_list ap)
1583 len = oid_printf_vargs(oid, fmt, aq);
1587 char *external_name;
1589 external_name = kmalloc(len + 1, gfp);
1593 oid->name = external_name;
1594 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
1595 oid->name_len = len;
1602 * If oid doesn't fit into inline buffer, allocate.
1604 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
1605 const char *fmt, ...)
1611 ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
1616 EXPORT_SYMBOL(ceph_oid_aprintf);
1618 void ceph_oid_destroy(struct ceph_object_id *oid)
1620 if (oid->name != oid->inline_name)
1623 EXPORT_SYMBOL(ceph_oid_destroy);
1628 static bool __osds_equal(const struct ceph_osds *lhs,
1629 const struct ceph_osds *rhs)
1631 if (lhs->size == rhs->size &&
1632 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
1641 static bool osds_equal(const struct ceph_osds *lhs,
1642 const struct ceph_osds *rhs)
1644 if (__osds_equal(lhs, rhs) &&
1645 lhs->primary == rhs->primary)
1651 static bool osds_valid(const struct ceph_osds *set)
1654 if (set->size > 0 && set->primary >= 0)
1657 /* empty can_shift_osds set */
1658 if (!set->size && set->primary == -1)
1661 /* empty !can_shift_osds set - all NONE */
1662 if (set->size > 0 && set->primary == -1) {
1665 for (i = 0; i < set->size; i++) {
1666 if (set->osds[i] != CRUSH_ITEM_NONE)
1676 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
1678 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
1679 dest->size = src->size;
1680 dest->primary = src->primary;
1683 static bool is_split(const struct ceph_pg *pgid,
1687 int old_bits = calc_bits_of(old_pg_num);
1688 int old_mask = (1 << old_bits) - 1;
1691 WARN_ON(pgid->seed >= old_pg_num);
1692 if (new_pg_num <= old_pg_num)
1695 for (n = 1; ; n++) {
1696 int next_bit = n << (old_bits - 1);
1697 u32 s = next_bit | pgid->seed;
1699 if (s < old_pg_num || s == pgid->seed)
1701 if (s >= new_pg_num)
1704 s = ceph_stable_mod(s, old_pg_num, old_mask);
1705 if (s == pgid->seed)
1712 bool ceph_is_new_interval(const struct ceph_osds *old_acting,
1713 const struct ceph_osds *new_acting,
1714 const struct ceph_osds *old_up,
1715 const struct ceph_osds *new_up,
1722 bool old_sort_bitwise,
1723 bool new_sort_bitwise,
1724 const struct ceph_pg *pgid)
1726 return !osds_equal(old_acting, new_acting) ||
1727 !osds_equal(old_up, new_up) ||
1728 old_size != new_size ||
1729 old_min_size != new_min_size ||
1730 is_split(pgid, old_pg_num, new_pg_num) ||
1731 old_sort_bitwise != new_sort_bitwise;
1734 static int calc_pg_rank(int osd, const struct ceph_osds *acting)
1738 for (i = 0; i < acting->size; i++) {
1739 if (acting->osds[i] == osd)
1746 static bool primary_changed(const struct ceph_osds *old_acting,
1747 const struct ceph_osds *new_acting)
1749 if (!old_acting->size && !new_acting->size)
1750 return false; /* both still empty */
1752 if (!old_acting->size ^ !new_acting->size)
1753 return true; /* was empty, now not, or vice versa */
1755 if (old_acting->primary != new_acting->primary)
1756 return true; /* primary changed */
1758 if (calc_pg_rank(old_acting->primary, old_acting) !=
1759 calc_pg_rank(new_acting->primary, new_acting))
1762 return false; /* same primary (tho replicas may have changed) */
1765 bool ceph_osds_changed(const struct ceph_osds *old_acting,
1766 const struct ceph_osds *new_acting,
1769 if (primary_changed(old_acting, new_acting))
1772 if (any_change && !__osds_equal(old_acting, new_acting))
1779 * calculate file layout from given offset, length.
1780 * fill in correct oid, logical length, and object extent
1783 * for now, we write only a single su, until we can
1784 * pass a stride back to the caller.
1786 int ceph_calc_file_object_mapping(struct ceph_file_layout *layout,
1789 u64 *oxoff, u64 *oxlen)
1791 u32 osize = layout->object_size;
1792 u32 su = layout->stripe_unit;
1793 u32 sc = layout->stripe_count;
1794 u32 bl, stripeno, stripepos, objsetno;
1798 dout("mapping %llu~%llu osize %u fl_su %u\n", off, len,
1800 if (su == 0 || sc == 0)
1802 su_per_object = osize / su;
1803 if (su_per_object == 0)
1805 dout("osize %u / su %u = su_per_object %u\n", osize, su,
1808 if ((su & ~PAGE_MASK) != 0)
1811 /* bl = *off / su; */
1815 dout("off %llu / su %u = bl %u\n", off, su, bl);
1818 stripepos = bl % sc;
1819 objsetno = stripeno / su_per_object;
1821 *ono = objsetno * sc + stripepos;
1822 dout("objset %u * sc %u = ono %u\n", objsetno, sc, (unsigned int)*ono);
1824 /* *oxoff = *off % layout->fl_stripe_unit; # offset in su */
1826 su_offset = do_div(t, su);
1827 *oxoff = su_offset + (stripeno % su_per_object) * su;
1830 * Calculate the length of the extent being written to the selected
1831 * object. This is the minimum of the full length requested (len) or
1832 * the remainder of the current stripe being written to.
1834 *oxlen = min_t(u64, len, su - su_offset);
1836 dout(" obj extent %llu~%llu\n", *oxoff, *oxlen);
1840 dout(" invalid layout\n");
1846 EXPORT_SYMBOL(ceph_calc_file_object_mapping);
1849 * Map an object into a PG.
1851 * Should only be called with target_oid and target_oloc (as opposed to
1852 * base_oid and base_oloc), since tiering isn't taken into account.
1854 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
1855 struct ceph_object_id *oid,
1856 struct ceph_object_locator *oloc,
1857 struct ceph_pg *raw_pgid)
1859 struct ceph_pg_pool_info *pi;
1861 pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
1865 if (!oloc->pool_ns) {
1866 raw_pgid->pool = oloc->pool;
1867 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
1869 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
1870 raw_pgid->pool, raw_pgid->seed);
1872 char stack_buf[256];
1873 char *buf = stack_buf;
1874 int nsl = oloc->pool_ns->len;
1875 size_t total = nsl + 1 + oid->name_len;
1877 if (total > sizeof(stack_buf)) {
1878 buf = kmalloc(total, GFP_NOIO);
1882 memcpy(buf, oloc->pool_ns->str, nsl);
1884 memcpy(buf + nsl + 1, oid->name, oid->name_len);
1885 raw_pgid->pool = oloc->pool;
1886 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
1887 if (buf != stack_buf)
1889 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
1890 oid->name, nsl, oloc->pool_ns->str,
1891 raw_pgid->pool, raw_pgid->seed);
1895 EXPORT_SYMBOL(ceph_object_locator_to_pg);
1898 * Map a raw PG (full precision ps) into an actual PG.
1900 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
1901 const struct ceph_pg *raw_pgid,
1902 struct ceph_pg *pgid)
1904 pgid->pool = raw_pgid->pool;
1905 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
1910 * Map a raw PG (full precision ps) into a placement ps (placement
1911 * seed). Include pool id in that value so that different pools don't
1912 * use the same seeds.
1914 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
1915 const struct ceph_pg *raw_pgid)
1917 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
1918 /* hash pool id and seed so that pool PGs do not overlap */
1919 return crush_hash32_2(CRUSH_HASH_RJENKINS1,
1920 ceph_stable_mod(raw_pgid->seed,
1926 * legacy behavior: add ps and pool together. this is
1927 * not a great approach because the PGs from each pool
1928 * will overlap on top of each other: 0.5 == 1.4 ==
1931 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
1933 (unsigned)raw_pgid->pool;
1937 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
1938 int *result, int result_max,
1939 const __u32 *weight, int weight_max)
1943 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
1945 mutex_lock(&map->crush_scratch_mutex);
1946 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
1947 weight, weight_max, map->crush_scratch_ary);
1948 mutex_unlock(&map->crush_scratch_mutex);
1954 * Calculate raw set (CRUSH output) for given PG. The result may
1955 * contain nonexistent OSDs. ->primary is undefined for a raw set.
1957 * Placement seed (CRUSH input) is returned through @ppps.
1959 static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
1960 struct ceph_pg_pool_info *pi,
1961 const struct ceph_pg *raw_pgid,
1962 struct ceph_osds *raw,
1965 u32 pps = raw_pg_to_pps(pi, raw_pgid);
1969 ceph_osds_init(raw);
1973 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
1976 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
1977 pi->id, pi->crush_ruleset, pi->type, pi->size);
1981 len = do_crush(osdmap, ruleno, pps, raw->osds,
1982 min_t(int, pi->size, ARRAY_SIZE(raw->osds)),
1983 osdmap->osd_weight, osdmap->max_osd);
1985 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
1986 len, ruleno, pi->id, pi->crush_ruleset, pi->type,
1995 * Given raw set, calculate up set and up primary. By definition of an
1996 * up set, the result won't contain nonexistent or down OSDs.
1998 * This is done in-place - on return @set is the up set. If it's
1999 * empty, ->primary will remain undefined.
2001 static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2002 struct ceph_pg_pool_info *pi,
2003 struct ceph_osds *set)
2007 /* ->primary is undefined for a raw set */
2008 BUG_ON(set->primary != -1);
2010 if (ceph_can_shift_osds(pi)) {
2014 for (i = 0; i < set->size; i++) {
2015 if (ceph_osd_is_down(osdmap, set->osds[i])) {
2020 set->osds[i - removed] = set->osds[i];
2022 set->size -= removed;
2024 set->primary = set->osds[0];
2026 /* set down/dne devices to NONE */
2027 for (i = set->size - 1; i >= 0; i--) {
2028 if (ceph_osd_is_down(osdmap, set->osds[i]))
2029 set->osds[i] = CRUSH_ITEM_NONE;
2031 set->primary = set->osds[i];
2036 static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2037 struct ceph_pg_pool_info *pi,
2039 struct ceph_osds *up)
2045 * Do we have any non-default primary_affinity values for these
2048 if (!osdmap->osd_primary_affinity)
2051 for (i = 0; i < up->size; i++) {
2052 int osd = up->osds[i];
2054 if (osd != CRUSH_ITEM_NONE &&
2055 osdmap->osd_primary_affinity[osd] !=
2056 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2064 * Pick the primary. Feed both the seed (for the pg) and the
2065 * osd into the hash/rng so that a proportional fraction of an
2066 * osd's pgs get rejected as primary.
2068 for (i = 0; i < up->size; i++) {
2069 int osd = up->osds[i];
2072 if (osd == CRUSH_ITEM_NONE)
2075 aff = osdmap->osd_primary_affinity[osd];
2076 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2077 (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2078 pps, osd) >> 16) >= aff) {
2080 * We chose not to use this primary. Note it
2081 * anyway as a fallback in case we don't pick
2082 * anyone else, but keep looking.
2094 up->primary = up->osds[pos];
2096 if (ceph_can_shift_osds(pi) && pos > 0) {
2097 /* move the new primary to the front */
2098 for (i = pos; i > 0; i--)
2099 up->osds[i] = up->osds[i - 1];
2100 up->osds[0] = up->primary;
2105 * Get pg_temp and primary_temp mappings for given PG.
2107 * Note that a PG may have none, only pg_temp, only primary_temp or
2108 * both pg_temp and primary_temp mappings. This means @temp isn't
2109 * always a valid OSD set on return: in the "only primary_temp" case,
2110 * @temp will have its ->primary >= 0 but ->size == 0.
2112 static void get_temp_osds(struct ceph_osdmap *osdmap,
2113 struct ceph_pg_pool_info *pi,
2114 const struct ceph_pg *raw_pgid,
2115 struct ceph_osds *temp)
2117 struct ceph_pg pgid;
2118 struct ceph_pg_mapping *pg;
2121 raw_pg_to_pg(pi, raw_pgid, &pgid);
2122 ceph_osds_init(temp);
2125 pg = __lookup_pg_mapping(&osdmap->pg_temp, pgid);
2127 for (i = 0; i < pg->pg_temp.len; i++) {
2128 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2129 if (ceph_can_shift_osds(pi))
2132 temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2134 temp->osds[temp->size++] = pg->pg_temp.osds[i];
2138 /* apply pg_temp's primary */
2139 for (i = 0; i < temp->size; i++) {
2140 if (temp->osds[i] != CRUSH_ITEM_NONE) {
2141 temp->primary = temp->osds[i];
2148 pg = __lookup_pg_mapping(&osdmap->primary_temp, pgid);
2150 temp->primary = pg->primary_temp.osd;
2154 * Map a PG to its acting set as well as its up set.
2156 * Acting set is used for data mapping purposes, while up set can be
2157 * recorded for detecting interval changes and deciding whether to
2160 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2161 const struct ceph_pg *raw_pgid,
2162 struct ceph_osds *up,
2163 struct ceph_osds *acting)
2165 struct ceph_pg_pool_info *pi;
2168 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2171 ceph_osds_init(acting);
2175 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2176 raw_to_up_osds(osdmap, pi, up);
2177 apply_primary_affinity(osdmap, pi, pps, up);
2178 get_temp_osds(osdmap, pi, raw_pgid, acting);
2179 if (!acting->size) {
2180 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2181 acting->size = up->size;
2182 if (acting->primary == -1)
2183 acting->primary = up->primary;
2186 WARN_ON(!osds_valid(up) || !osds_valid(acting));
2190 * Return acting primary for given PG, or -1 if none.
2192 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2193 const struct ceph_pg *raw_pgid)
2195 struct ceph_osds up, acting;
2197 ceph_pg_to_up_acting_osds(osdmap, raw_pgid, &up, &acting);
2198 return acting.primary;
2200 EXPORT_SYMBOL(ceph_pg_to_acting_primary);