1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
6 #include <linux/slab.h>
8 #include <linux/ceph/libceph.h>
9 #include <linux/ceph/osdmap.h>
10 #include <linux/ceph/decode.h>
11 #include <linux/crush/hash.h>
12 #include <linux/crush/mapper.h>
14 char *ceph_osdmap_state_str(char *str, int len, u32 state)
19 if ((state & CEPH_OSD_EXISTS) && (state & CEPH_OSD_UP))
20 snprintf(str, len, "exists, up");
21 else if (state & CEPH_OSD_EXISTS)
22 snprintf(str, len, "exists");
23 else if (state & CEPH_OSD_UP)
24 snprintf(str, len, "up");
26 snprintf(str, len, "doesn't exist");
33 static int calc_bits_of(unsigned int t)
44 * the foo_mask is the smallest value 2^n-1 that is >= foo.
46 static void calc_pg_masks(struct ceph_pg_pool_info *pi)
48 pi->pg_num_mask = (1 << calc_bits_of(pi->pg_num-1)) - 1;
49 pi->pgp_num_mask = (1 << calc_bits_of(pi->pgp_num-1)) - 1;
55 static int crush_decode_uniform_bucket(void **p, void *end,
56 struct crush_bucket_uniform *b)
58 dout("crush_decode_uniform_bucket %p to %p\n", *p, end);
59 ceph_decode_need(p, end, (1+b->h.size) * sizeof(u32), bad);
60 b->item_weight = ceph_decode_32(p);
66 static int crush_decode_list_bucket(void **p, void *end,
67 struct crush_bucket_list *b)
70 dout("crush_decode_list_bucket %p to %p\n", *p, end);
71 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
72 if (b->item_weights == NULL)
74 b->sum_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
75 if (b->sum_weights == NULL)
77 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
78 for (j = 0; j < b->h.size; j++) {
79 b->item_weights[j] = ceph_decode_32(p);
80 b->sum_weights[j] = ceph_decode_32(p);
87 static int crush_decode_tree_bucket(void **p, void *end,
88 struct crush_bucket_tree *b)
91 dout("crush_decode_tree_bucket %p to %p\n", *p, end);
92 ceph_decode_8_safe(p, end, b->num_nodes, bad);
93 b->node_weights = kcalloc(b->num_nodes, sizeof(u32), GFP_NOFS);
94 if (b->node_weights == NULL)
96 ceph_decode_need(p, end, b->num_nodes * sizeof(u32), bad);
97 for (j = 0; j < b->num_nodes; j++)
98 b->node_weights[j] = ceph_decode_32(p);
104 static int crush_decode_straw_bucket(void **p, void *end,
105 struct crush_bucket_straw *b)
108 dout("crush_decode_straw_bucket %p to %p\n", *p, end);
109 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
110 if (b->item_weights == NULL)
112 b->straws = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
113 if (b->straws == NULL)
115 ceph_decode_need(p, end, 2 * b->h.size * sizeof(u32), bad);
116 for (j = 0; j < b->h.size; j++) {
117 b->item_weights[j] = ceph_decode_32(p);
118 b->straws[j] = ceph_decode_32(p);
125 static int crush_decode_straw2_bucket(void **p, void *end,
126 struct crush_bucket_straw2 *b)
129 dout("crush_decode_straw2_bucket %p to %p\n", *p, end);
130 b->item_weights = kcalloc(b->h.size, sizeof(u32), GFP_NOFS);
131 if (b->item_weights == NULL)
133 ceph_decode_need(p, end, b->h.size * sizeof(u32), bad);
134 for (j = 0; j < b->h.size; j++)
135 b->item_weights[j] = ceph_decode_32(p);
141 static struct crush_choose_arg_map *alloc_choose_arg_map(void)
143 struct crush_choose_arg_map *arg_map;
145 arg_map = kzalloc(sizeof(*arg_map), GFP_NOIO);
149 RB_CLEAR_NODE(&arg_map->node);
153 static void free_choose_arg_map(struct crush_choose_arg_map *arg_map)
158 WARN_ON(!RB_EMPTY_NODE(&arg_map->node));
160 for (i = 0; i < arg_map->size; i++) {
161 struct crush_choose_arg *arg = &arg_map->args[i];
163 for (j = 0; j < arg->weight_set_size; j++)
164 kfree(arg->weight_set[j].weights);
165 kfree(arg->weight_set);
168 kfree(arg_map->args);
173 DEFINE_RB_FUNCS(choose_arg_map, struct crush_choose_arg_map, choose_args_index,
176 void clear_choose_args(struct crush_map *c)
178 while (!RB_EMPTY_ROOT(&c->choose_args)) {
179 struct crush_choose_arg_map *arg_map =
180 rb_entry(rb_first(&c->choose_args),
181 struct crush_choose_arg_map, node);
183 erase_choose_arg_map(&c->choose_args, arg_map);
184 free_choose_arg_map(arg_map);
188 static u32 *decode_array_32_alloc(void **p, void *end, u32 *plen)
194 ceph_decode_32_safe(p, end, len, e_inval);
198 a = kmalloc_array(len, sizeof(u32), GFP_NOIO);
204 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
205 for (i = 0; i < len; i++)
206 a[i] = ceph_decode_32(p);
220 * Assumes @arg is zero-initialized.
222 static int decode_choose_arg(void **p, void *end, struct crush_choose_arg *arg)
226 ceph_decode_32_safe(p, end, arg->weight_set_size, e_inval);
227 if (arg->weight_set_size) {
230 arg->weight_set = kmalloc_array(arg->weight_set_size,
231 sizeof(*arg->weight_set),
233 if (!arg->weight_set)
236 for (i = 0; i < arg->weight_set_size; i++) {
237 struct crush_weight_set *w = &arg->weight_set[i];
239 w->weights = decode_array_32_alloc(p, end, &w->size);
240 if (IS_ERR(w->weights)) {
241 ret = PTR_ERR(w->weights);
248 arg->ids = decode_array_32_alloc(p, end, &arg->ids_size);
249 if (IS_ERR(arg->ids)) {
250 ret = PTR_ERR(arg->ids);
261 static int decode_choose_args(void **p, void *end, struct crush_map *c)
263 struct crush_choose_arg_map *arg_map = NULL;
264 u32 num_choose_arg_maps, num_buckets;
267 ceph_decode_32_safe(p, end, num_choose_arg_maps, e_inval);
268 while (num_choose_arg_maps--) {
269 arg_map = alloc_choose_arg_map();
275 ceph_decode_64_safe(p, end, arg_map->choose_args_index,
277 arg_map->size = c->max_buckets;
278 arg_map->args = kcalloc(arg_map->size, sizeof(*arg_map->args),
280 if (!arg_map->args) {
285 ceph_decode_32_safe(p, end, num_buckets, e_inval);
286 while (num_buckets--) {
287 struct crush_choose_arg *arg;
290 ceph_decode_32_safe(p, end, bucket_index, e_inval);
291 if (bucket_index >= arg_map->size)
294 arg = &arg_map->args[bucket_index];
295 ret = decode_choose_arg(p, end, arg);
300 arg->ids_size != c->buckets[bucket_index]->size)
304 insert_choose_arg_map(&c->choose_args, arg_map);
312 free_choose_arg_map(arg_map);
316 static void crush_finalize(struct crush_map *c)
320 /* Space for the array of pointers to per-bucket workspace */
321 c->working_size = sizeof(struct crush_work) +
322 c->max_buckets * sizeof(struct crush_work_bucket *);
324 for (b = 0; b < c->max_buckets; b++) {
328 switch (c->buckets[b]->alg) {
331 * The base case, permutation variables and
332 * the pointer to the permutation array.
334 c->working_size += sizeof(struct crush_work_bucket);
337 /* Every bucket has a permutation array. */
338 c->working_size += c->buckets[b]->size * sizeof(__u32);
342 static struct crush_map *crush_decode(void *pbyval, void *end)
348 void *start = pbyval;
351 dout("crush_decode %p to %p len %d\n", *p, end, (int)(end - *p));
353 c = kzalloc(sizeof(*c), GFP_NOFS);
355 return ERR_PTR(-ENOMEM);
357 c->choose_args = RB_ROOT;
359 /* set tunables to default values */
360 c->choose_local_tries = 2;
361 c->choose_local_fallback_tries = 5;
362 c->choose_total_tries = 19;
363 c->chooseleaf_descend_once = 0;
365 ceph_decode_need(p, end, 4*sizeof(u32), bad);
366 magic = ceph_decode_32(p);
367 if (magic != CRUSH_MAGIC) {
368 pr_err("crush_decode magic %x != current %x\n",
369 (unsigned int)magic, (unsigned int)CRUSH_MAGIC);
372 c->max_buckets = ceph_decode_32(p);
373 c->max_rules = ceph_decode_32(p);
374 c->max_devices = ceph_decode_32(p);
376 c->buckets = kcalloc(c->max_buckets, sizeof(*c->buckets), GFP_NOFS);
377 if (c->buckets == NULL)
379 c->rules = kcalloc(c->max_rules, sizeof(*c->rules), GFP_NOFS);
380 if (c->rules == NULL)
384 for (i = 0; i < c->max_buckets; i++) {
387 struct crush_bucket *b;
389 ceph_decode_32_safe(p, end, alg, bad);
391 c->buckets[i] = NULL;
394 dout("crush_decode bucket %d off %x %p to %p\n",
395 i, (int)(*p-start), *p, end);
398 case CRUSH_BUCKET_UNIFORM:
399 size = sizeof(struct crush_bucket_uniform);
401 case CRUSH_BUCKET_LIST:
402 size = sizeof(struct crush_bucket_list);
404 case CRUSH_BUCKET_TREE:
405 size = sizeof(struct crush_bucket_tree);
407 case CRUSH_BUCKET_STRAW:
408 size = sizeof(struct crush_bucket_straw);
410 case CRUSH_BUCKET_STRAW2:
411 size = sizeof(struct crush_bucket_straw2);
417 b = c->buckets[i] = kzalloc(size, GFP_NOFS);
421 ceph_decode_need(p, end, 4*sizeof(u32), bad);
422 b->id = ceph_decode_32(p);
423 b->type = ceph_decode_16(p);
424 b->alg = ceph_decode_8(p);
425 b->hash = ceph_decode_8(p);
426 b->weight = ceph_decode_32(p);
427 b->size = ceph_decode_32(p);
429 dout("crush_decode bucket size %d off %x %p to %p\n",
430 b->size, (int)(*p-start), *p, end);
432 b->items = kcalloc(b->size, sizeof(__s32), GFP_NOFS);
433 if (b->items == NULL)
436 ceph_decode_need(p, end, b->size*sizeof(u32), bad);
437 for (j = 0; j < b->size; j++)
438 b->items[j] = ceph_decode_32(p);
441 case CRUSH_BUCKET_UNIFORM:
442 err = crush_decode_uniform_bucket(p, end,
443 (struct crush_bucket_uniform *)b);
447 case CRUSH_BUCKET_LIST:
448 err = crush_decode_list_bucket(p, end,
449 (struct crush_bucket_list *)b);
453 case CRUSH_BUCKET_TREE:
454 err = crush_decode_tree_bucket(p, end,
455 (struct crush_bucket_tree *)b);
459 case CRUSH_BUCKET_STRAW:
460 err = crush_decode_straw_bucket(p, end,
461 (struct crush_bucket_straw *)b);
465 case CRUSH_BUCKET_STRAW2:
466 err = crush_decode_straw2_bucket(p, end,
467 (struct crush_bucket_straw2 *)b);
475 dout("rule vec is %p\n", c->rules);
476 for (i = 0; i < c->max_rules; i++) {
478 struct crush_rule *r;
480 ceph_decode_32_safe(p, end, yes, bad);
482 dout("crush_decode NO rule %d off %x %p to %p\n",
483 i, (int)(*p-start), *p, end);
488 dout("crush_decode rule %d off %x %p to %p\n",
489 i, (int)(*p-start), *p, end);
492 ceph_decode_32_safe(p, end, yes, bad);
493 #if BITS_PER_LONG == 32
494 if (yes > (ULONG_MAX - sizeof(*r))
495 / sizeof(struct crush_rule_step))
498 r = kmalloc(struct_size(r, steps, yes), GFP_NOFS);
502 dout(" rule %d is at %p\n", i, r);
504 ceph_decode_copy_safe(p, end, &r->mask, 4, bad); /* 4 u8's */
505 ceph_decode_need(p, end, r->len*3*sizeof(u32), bad);
506 for (j = 0; j < r->len; j++) {
507 r->steps[j].op = ceph_decode_32(p);
508 r->steps[j].arg1 = ceph_decode_32(p);
509 r->steps[j].arg2 = ceph_decode_32(p);
513 ceph_decode_skip_map(p, end, 32, string, bad); /* type_map */
514 ceph_decode_skip_map(p, end, 32, string, bad); /* name_map */
515 ceph_decode_skip_map(p, end, 32, string, bad); /* rule_name_map */
518 ceph_decode_need(p, end, 3*sizeof(u32), done);
519 c->choose_local_tries = ceph_decode_32(p);
520 c->choose_local_fallback_tries = ceph_decode_32(p);
521 c->choose_total_tries = ceph_decode_32(p);
522 dout("crush decode tunable choose_local_tries = %d\n",
523 c->choose_local_tries);
524 dout("crush decode tunable choose_local_fallback_tries = %d\n",
525 c->choose_local_fallback_tries);
526 dout("crush decode tunable choose_total_tries = %d\n",
527 c->choose_total_tries);
529 ceph_decode_need(p, end, sizeof(u32), done);
530 c->chooseleaf_descend_once = ceph_decode_32(p);
531 dout("crush decode tunable chooseleaf_descend_once = %d\n",
532 c->chooseleaf_descend_once);
534 ceph_decode_need(p, end, sizeof(u8), done);
535 c->chooseleaf_vary_r = ceph_decode_8(p);
536 dout("crush decode tunable chooseleaf_vary_r = %d\n",
537 c->chooseleaf_vary_r);
539 /* skip straw_calc_version, allowed_bucket_algs */
540 ceph_decode_need(p, end, sizeof(u8) + sizeof(u32), done);
541 *p += sizeof(u8) + sizeof(u32);
543 ceph_decode_need(p, end, sizeof(u8), done);
544 c->chooseleaf_stable = ceph_decode_8(p);
545 dout("crush decode tunable chooseleaf_stable = %d\n",
546 c->chooseleaf_stable);
550 ceph_decode_skip_map(p, end, 32, 32, bad);
552 ceph_decode_skip_map(p, end, 32, string, bad);
554 ceph_decode_skip_map_of_map(p, end, 32, 32, 32, bad);
558 err = decode_choose_args(p, end, c);
565 dout("crush_decode success\n");
571 dout("crush_decode fail %d\n", err);
580 int ceph_pg_compare(const struct ceph_pg *lhs, const struct ceph_pg *rhs)
582 if (lhs->pool < rhs->pool)
584 if (lhs->pool > rhs->pool)
586 if (lhs->seed < rhs->seed)
588 if (lhs->seed > rhs->seed)
594 int ceph_spg_compare(const struct ceph_spg *lhs, const struct ceph_spg *rhs)
598 ret = ceph_pg_compare(&lhs->pgid, &rhs->pgid);
602 if (lhs->shard < rhs->shard)
604 if (lhs->shard > rhs->shard)
610 static struct ceph_pg_mapping *alloc_pg_mapping(size_t payload_len)
612 struct ceph_pg_mapping *pg;
614 pg = kmalloc(sizeof(*pg) + payload_len, GFP_NOIO);
618 RB_CLEAR_NODE(&pg->node);
622 static void free_pg_mapping(struct ceph_pg_mapping *pg)
624 WARN_ON(!RB_EMPTY_NODE(&pg->node));
630 * rbtree of pg_mapping for handling pg_temp (explicit mapping of pgid
631 * to a set of osds) and primary_temp (explicit primary setting)
633 DEFINE_RB_FUNCS2(pg_mapping, struct ceph_pg_mapping, pgid, ceph_pg_compare,
634 RB_BYPTR, const struct ceph_pg *, node)
637 * rbtree of pg pool info
639 static int __insert_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *new)
641 struct rb_node **p = &root->rb_node;
642 struct rb_node *parent = NULL;
643 struct ceph_pg_pool_info *pi = NULL;
647 pi = rb_entry(parent, struct ceph_pg_pool_info, node);
648 if (new->id < pi->id)
650 else if (new->id > pi->id)
656 rb_link_node(&new->node, parent, p);
657 rb_insert_color(&new->node, root);
661 static struct ceph_pg_pool_info *__lookup_pg_pool(struct rb_root *root, u64 id)
663 struct ceph_pg_pool_info *pi;
664 struct rb_node *n = root->rb_node;
667 pi = rb_entry(n, struct ceph_pg_pool_info, node);
670 else if (id > pi->id)
678 struct ceph_pg_pool_info *ceph_pg_pool_by_id(struct ceph_osdmap *map, u64 id)
680 return __lookup_pg_pool(&map->pg_pools, id);
683 const char *ceph_pg_pool_name_by_id(struct ceph_osdmap *map, u64 id)
685 struct ceph_pg_pool_info *pi;
687 if (id == CEPH_NOPOOL)
690 if (WARN_ON_ONCE(id > (u64) INT_MAX))
693 pi = __lookup_pg_pool(&map->pg_pools, (int) id);
695 return pi ? pi->name : NULL;
697 EXPORT_SYMBOL(ceph_pg_pool_name_by_id);
699 int ceph_pg_poolid_by_name(struct ceph_osdmap *map, const char *name)
703 for (rbp = rb_first(&map->pg_pools); rbp; rbp = rb_next(rbp)) {
704 struct ceph_pg_pool_info *pi =
705 rb_entry(rbp, struct ceph_pg_pool_info, node);
706 if (pi->name && strcmp(pi->name, name) == 0)
711 EXPORT_SYMBOL(ceph_pg_poolid_by_name);
713 u64 ceph_pg_pool_flags(struct ceph_osdmap *map, u64 id)
715 struct ceph_pg_pool_info *pi;
717 pi = __lookup_pg_pool(&map->pg_pools, id);
718 return pi ? pi->flags : 0;
720 EXPORT_SYMBOL(ceph_pg_pool_flags);
722 static void __remove_pg_pool(struct rb_root *root, struct ceph_pg_pool_info *pi)
724 rb_erase(&pi->node, root);
729 static int decode_pool(void **p, void *end, struct ceph_pg_pool_info *pi)
735 ceph_decode_need(p, end, 2 + 4, bad);
736 ev = ceph_decode_8(p); /* encoding version */
737 cv = ceph_decode_8(p); /* compat version */
739 pr_warn("got v %d < 5 cv %d of ceph_pg_pool\n", ev, cv);
743 pr_warn("got v %d cv %d > 9 of ceph_pg_pool\n", ev, cv);
746 len = ceph_decode_32(p);
747 ceph_decode_need(p, end, len, bad);
750 pi->type = ceph_decode_8(p);
751 pi->size = ceph_decode_8(p);
752 pi->crush_ruleset = ceph_decode_8(p);
753 pi->object_hash = ceph_decode_8(p);
755 pi->pg_num = ceph_decode_32(p);
756 pi->pgp_num = ceph_decode_32(p);
758 *p += 4 + 4; /* skip lpg* */
759 *p += 4; /* skip last_change */
760 *p += 8 + 4; /* skip snap_seq, snap_epoch */
763 num = ceph_decode_32(p);
765 *p += 8; /* snapid key */
766 *p += 1 + 1; /* versions */
767 len = ceph_decode_32(p);
771 /* skip removed_snaps */
772 num = ceph_decode_32(p);
775 *p += 8; /* skip auid */
776 pi->flags = ceph_decode_64(p);
777 *p += 4; /* skip crash_replay_interval */
780 pi->min_size = ceph_decode_8(p);
782 pi->min_size = pi->size - pi->size / 2;
785 *p += 8 + 8; /* skip quota_max_* */
789 num = ceph_decode_32(p);
792 *p += 8; /* skip tier_of */
793 *p += 1; /* skip cache_mode */
795 pi->read_tier = ceph_decode_64(p);
796 pi->write_tier = ceph_decode_64(p);
803 /* skip properties */
804 num = ceph_decode_32(p);
806 len = ceph_decode_32(p);
808 len = ceph_decode_32(p);
814 /* skip hit_set_params */
815 *p += 1 + 1; /* versions */
816 len = ceph_decode_32(p);
819 *p += 4; /* skip hit_set_period */
820 *p += 4; /* skip hit_set_count */
824 *p += 4; /* skip stripe_width */
827 *p += 8; /* skip target_max_bytes */
828 *p += 8; /* skip target_max_objects */
829 *p += 4; /* skip cache_target_dirty_ratio_micro */
830 *p += 4; /* skip cache_target_full_ratio_micro */
831 *p += 4; /* skip cache_min_flush_age */
832 *p += 4; /* skip cache_min_evict_age */
836 /* skip erasure_code_profile */
837 len = ceph_decode_32(p);
842 * last_force_op_resend_preluminous, will be overridden if the
843 * map was encoded with RESEND_ON_SPLIT
846 pi->last_force_request_resend = ceph_decode_32(p);
848 pi->last_force_request_resend = 0;
851 *p += 4; /* skip min_read_recency_for_promote */
854 *p += 8; /* skip expected_num_objects */
857 *p += 4; /* skip cache_target_dirty_high_ratio_micro */
860 *p += 4; /* skip min_write_recency_for_promote */
863 *p += 1; /* skip use_gmt_hitset */
866 *p += 1; /* skip fast_read */
869 *p += 4; /* skip hit_set_grade_decay_rate */
870 *p += 4; /* skip hit_set_search_last_n */
875 *p += 1 + 1; /* versions */
876 len = ceph_decode_32(p);
881 pi->last_force_request_resend = ceph_decode_32(p);
883 /* ignore the rest */
893 static int decode_pool_names(void **p, void *end, struct ceph_osdmap *map)
895 struct ceph_pg_pool_info *pi;
899 ceph_decode_32_safe(p, end, num, bad);
900 dout(" %d pool names\n", num);
902 ceph_decode_64_safe(p, end, pool, bad);
903 ceph_decode_32_safe(p, end, len, bad);
904 dout(" pool %llu len %d\n", pool, len);
905 ceph_decode_need(p, end, len, bad);
906 pi = __lookup_pg_pool(&map->pg_pools, pool);
908 char *name = kstrndup(*p, len, GFP_NOFS);
914 dout(" name is %s\n", pi->name);
927 struct ceph_osdmap *ceph_osdmap_alloc(void)
929 struct ceph_osdmap *map;
931 map = kzalloc(sizeof(*map), GFP_NOIO);
935 map->pg_pools = RB_ROOT;
937 map->pg_temp = RB_ROOT;
938 map->primary_temp = RB_ROOT;
939 map->pg_upmap = RB_ROOT;
940 map->pg_upmap_items = RB_ROOT;
941 mutex_init(&map->crush_workspace_mutex);
946 void ceph_osdmap_destroy(struct ceph_osdmap *map)
948 dout("osdmap_destroy %p\n", map);
950 crush_destroy(map->crush);
951 while (!RB_EMPTY_ROOT(&map->pg_temp)) {
952 struct ceph_pg_mapping *pg =
953 rb_entry(rb_first(&map->pg_temp),
954 struct ceph_pg_mapping, node);
955 erase_pg_mapping(&map->pg_temp, pg);
958 while (!RB_EMPTY_ROOT(&map->primary_temp)) {
959 struct ceph_pg_mapping *pg =
960 rb_entry(rb_first(&map->primary_temp),
961 struct ceph_pg_mapping, node);
962 erase_pg_mapping(&map->primary_temp, pg);
965 while (!RB_EMPTY_ROOT(&map->pg_upmap)) {
966 struct ceph_pg_mapping *pg =
967 rb_entry(rb_first(&map->pg_upmap),
968 struct ceph_pg_mapping, node);
969 rb_erase(&pg->node, &map->pg_upmap);
972 while (!RB_EMPTY_ROOT(&map->pg_upmap_items)) {
973 struct ceph_pg_mapping *pg =
974 rb_entry(rb_first(&map->pg_upmap_items),
975 struct ceph_pg_mapping, node);
976 rb_erase(&pg->node, &map->pg_upmap_items);
979 while (!RB_EMPTY_ROOT(&map->pg_pools)) {
980 struct ceph_pg_pool_info *pi =
981 rb_entry(rb_first(&map->pg_pools),
982 struct ceph_pg_pool_info, node);
983 __remove_pg_pool(&map->pg_pools, pi);
985 kvfree(map->osd_state);
986 kvfree(map->osd_weight);
987 kvfree(map->osd_addr);
988 kvfree(map->osd_primary_affinity);
989 kvfree(map->crush_workspace);
994 * Adjust max_osd value, (re)allocate arrays.
996 * The new elements are properly initialized.
998 static int osdmap_set_max_osd(struct ceph_osdmap *map, u32 max)
1002 struct ceph_entity_addr *addr;
1006 dout("%s old %u new %u\n", __func__, map->max_osd, max);
1007 if (max == map->max_osd)
1010 state = ceph_kvmalloc(array_size(max, sizeof(*state)), GFP_NOFS);
1011 weight = ceph_kvmalloc(array_size(max, sizeof(*weight)), GFP_NOFS);
1012 addr = ceph_kvmalloc(array_size(max, sizeof(*addr)), GFP_NOFS);
1013 if (!state || !weight || !addr) {
1020 to_copy = min(map->max_osd, max);
1021 if (map->osd_state) {
1022 memcpy(state, map->osd_state, to_copy * sizeof(*state));
1023 memcpy(weight, map->osd_weight, to_copy * sizeof(*weight));
1024 memcpy(addr, map->osd_addr, to_copy * sizeof(*addr));
1025 kvfree(map->osd_state);
1026 kvfree(map->osd_weight);
1027 kvfree(map->osd_addr);
1030 map->osd_state = state;
1031 map->osd_weight = weight;
1032 map->osd_addr = addr;
1033 for (i = map->max_osd; i < max; i++) {
1034 map->osd_state[i] = 0;
1035 map->osd_weight[i] = CEPH_OSD_OUT;
1036 memset(map->osd_addr + i, 0, sizeof(*map->osd_addr));
1039 if (map->osd_primary_affinity) {
1042 affinity = ceph_kvmalloc(array_size(max, sizeof(*affinity)),
1047 memcpy(affinity, map->osd_primary_affinity,
1048 to_copy * sizeof(*affinity));
1049 kvfree(map->osd_primary_affinity);
1051 map->osd_primary_affinity = affinity;
1052 for (i = map->max_osd; i < max; i++)
1053 map->osd_primary_affinity[i] =
1054 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1062 static int osdmap_set_crush(struct ceph_osdmap *map, struct crush_map *crush)
1068 return PTR_ERR(crush);
1070 work_size = crush_work_size(crush, CEPH_PG_MAX_SIZE);
1071 dout("%s work_size %zu bytes\n", __func__, work_size);
1072 workspace = ceph_kvmalloc(work_size, GFP_NOIO);
1074 crush_destroy(crush);
1077 crush_init_workspace(crush, workspace);
1080 crush_destroy(map->crush);
1081 kvfree(map->crush_workspace);
1083 map->crush_workspace = workspace;
1087 #define OSDMAP_WRAPPER_COMPAT_VER 7
1088 #define OSDMAP_CLIENT_DATA_COMPAT_VER 1
1091 * Return 0 or error. On success, *v is set to 0 for old (v6) osdmaps,
1092 * to struct_v of the client_data section for new (v7 and above)
1095 static int get_osdmap_client_data_v(void **p, void *end,
1096 const char *prefix, u8 *v)
1100 ceph_decode_8_safe(p, end, struct_v, e_inval);
1101 if (struct_v >= 7) {
1104 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1105 if (struct_compat > OSDMAP_WRAPPER_COMPAT_VER) {
1106 pr_warn("got v %d cv %d > %d of %s ceph_osdmap\n",
1107 struct_v, struct_compat,
1108 OSDMAP_WRAPPER_COMPAT_VER, prefix);
1111 *p += 4; /* ignore wrapper struct_len */
1113 ceph_decode_8_safe(p, end, struct_v, e_inval);
1114 ceph_decode_8_safe(p, end, struct_compat, e_inval);
1115 if (struct_compat > OSDMAP_CLIENT_DATA_COMPAT_VER) {
1116 pr_warn("got v %d cv %d > %d of %s ceph_osdmap client data\n",
1117 struct_v, struct_compat,
1118 OSDMAP_CLIENT_DATA_COMPAT_VER, prefix);
1121 *p += 4; /* ignore client data struct_len */
1126 ceph_decode_16_safe(p, end, version, e_inval);
1128 pr_warn("got v %d < 6 of %s ceph_osdmap\n",
1133 /* old osdmap enconding */
1144 static int __decode_pools(void **p, void *end, struct ceph_osdmap *map,
1149 ceph_decode_32_safe(p, end, n, e_inval);
1151 struct ceph_pg_pool_info *pi;
1155 ceph_decode_64_safe(p, end, pool, e_inval);
1157 pi = __lookup_pg_pool(&map->pg_pools, pool);
1158 if (!incremental || !pi) {
1159 pi = kzalloc(sizeof(*pi), GFP_NOFS);
1165 ret = __insert_pg_pool(&map->pg_pools, pi);
1172 ret = decode_pool(p, end, pi);
1183 static int decode_pools(void **p, void *end, struct ceph_osdmap *map)
1185 return __decode_pools(p, end, map, false);
1188 static int decode_new_pools(void **p, void *end, struct ceph_osdmap *map)
1190 return __decode_pools(p, end, map, true);
1193 typedef struct ceph_pg_mapping *(*decode_mapping_fn_t)(void **, void *, bool);
1195 static int decode_pg_mapping(void **p, void *end, struct rb_root *mapping_root,
1196 decode_mapping_fn_t fn, bool incremental)
1200 WARN_ON(!incremental && !fn);
1202 ceph_decode_32_safe(p, end, n, e_inval);
1204 struct ceph_pg_mapping *pg;
1205 struct ceph_pg pgid;
1208 ret = ceph_decode_pgid(p, end, &pgid);
1212 pg = lookup_pg_mapping(mapping_root, &pgid);
1214 WARN_ON(!incremental);
1215 erase_pg_mapping(mapping_root, pg);
1216 free_pg_mapping(pg);
1220 pg = fn(p, end, incremental);
1225 pg->pgid = pgid; /* struct */
1226 insert_pg_mapping(mapping_root, pg);
1237 static struct ceph_pg_mapping *__decode_pg_temp(void **p, void *end,
1240 struct ceph_pg_mapping *pg;
1243 ceph_decode_32_safe(p, end, len, e_inval);
1244 if (len == 0 && incremental)
1245 return NULL; /* new_pg_temp: [] to remove */
1246 if (len > (SIZE_MAX - sizeof(*pg)) / sizeof(u32))
1247 return ERR_PTR(-EINVAL);
1249 ceph_decode_need(p, end, len * sizeof(u32), e_inval);
1250 pg = alloc_pg_mapping(len * sizeof(u32));
1252 return ERR_PTR(-ENOMEM);
1254 pg->pg_temp.len = len;
1255 for (i = 0; i < len; i++)
1256 pg->pg_temp.osds[i] = ceph_decode_32(p);
1261 return ERR_PTR(-EINVAL);
1264 static int decode_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1266 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1270 static int decode_new_pg_temp(void **p, void *end, struct ceph_osdmap *map)
1272 return decode_pg_mapping(p, end, &map->pg_temp, __decode_pg_temp,
1276 static struct ceph_pg_mapping *__decode_primary_temp(void **p, void *end,
1279 struct ceph_pg_mapping *pg;
1282 ceph_decode_32_safe(p, end, osd, e_inval);
1283 if (osd == (u32)-1 && incremental)
1284 return NULL; /* new_primary_temp: -1 to remove */
1286 pg = alloc_pg_mapping(0);
1288 return ERR_PTR(-ENOMEM);
1290 pg->primary_temp.osd = osd;
1294 return ERR_PTR(-EINVAL);
1297 static int decode_primary_temp(void **p, void *end, struct ceph_osdmap *map)
1299 return decode_pg_mapping(p, end, &map->primary_temp,
1300 __decode_primary_temp, false);
1303 static int decode_new_primary_temp(void **p, void *end,
1304 struct ceph_osdmap *map)
1306 return decode_pg_mapping(p, end, &map->primary_temp,
1307 __decode_primary_temp, true);
1310 u32 ceph_get_primary_affinity(struct ceph_osdmap *map, int osd)
1312 BUG_ON(osd >= map->max_osd);
1314 if (!map->osd_primary_affinity)
1315 return CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1317 return map->osd_primary_affinity[osd];
1320 static int set_primary_affinity(struct ceph_osdmap *map, int osd, u32 aff)
1322 BUG_ON(osd >= map->max_osd);
1324 if (!map->osd_primary_affinity) {
1327 map->osd_primary_affinity = ceph_kvmalloc(
1328 array_size(map->max_osd, sizeof(*map->osd_primary_affinity)),
1330 if (!map->osd_primary_affinity)
1333 for (i = 0; i < map->max_osd; i++)
1334 map->osd_primary_affinity[i] =
1335 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY;
1338 map->osd_primary_affinity[osd] = aff;
1343 static int decode_primary_affinity(void **p, void *end,
1344 struct ceph_osdmap *map)
1348 ceph_decode_32_safe(p, end, len, e_inval);
1350 kvfree(map->osd_primary_affinity);
1351 map->osd_primary_affinity = NULL;
1354 if (len != map->max_osd)
1357 ceph_decode_need(p, end, map->max_osd*sizeof(u32), e_inval);
1359 for (i = 0; i < map->max_osd; i++) {
1362 ret = set_primary_affinity(map, i, ceph_decode_32(p));
1373 static int decode_new_primary_affinity(void **p, void *end,
1374 struct ceph_osdmap *map)
1378 ceph_decode_32_safe(p, end, n, e_inval);
1383 ceph_decode_32_safe(p, end, osd, e_inval);
1384 ceph_decode_32_safe(p, end, aff, e_inval);
1386 ret = set_primary_affinity(map, osd, aff);
1390 pr_info("osd%d primary-affinity 0x%x\n", osd, aff);
1399 static struct ceph_pg_mapping *__decode_pg_upmap(void **p, void *end,
1402 return __decode_pg_temp(p, end, false);
1405 static int decode_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1407 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1411 static int decode_new_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1413 return decode_pg_mapping(p, end, &map->pg_upmap, __decode_pg_upmap,
1417 static int decode_old_pg_upmap(void **p, void *end, struct ceph_osdmap *map)
1419 return decode_pg_mapping(p, end, &map->pg_upmap, NULL, true);
1422 static struct ceph_pg_mapping *__decode_pg_upmap_items(void **p, void *end,
1425 struct ceph_pg_mapping *pg;
1428 ceph_decode_32_safe(p, end, len, e_inval);
1429 if (len > (SIZE_MAX - sizeof(*pg)) / (2 * sizeof(u32)))
1430 return ERR_PTR(-EINVAL);
1432 ceph_decode_need(p, end, 2 * len * sizeof(u32), e_inval);
1433 pg = alloc_pg_mapping(2 * len * sizeof(u32));
1435 return ERR_PTR(-ENOMEM);
1437 pg->pg_upmap_items.len = len;
1438 for (i = 0; i < len; i++) {
1439 pg->pg_upmap_items.from_to[i][0] = ceph_decode_32(p);
1440 pg->pg_upmap_items.from_to[i][1] = ceph_decode_32(p);
1446 return ERR_PTR(-EINVAL);
1449 static int decode_pg_upmap_items(void **p, void *end, struct ceph_osdmap *map)
1451 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1452 __decode_pg_upmap_items, false);
1455 static int decode_new_pg_upmap_items(void **p, void *end,
1456 struct ceph_osdmap *map)
1458 return decode_pg_mapping(p, end, &map->pg_upmap_items,
1459 __decode_pg_upmap_items, true);
1462 static int decode_old_pg_upmap_items(void **p, void *end,
1463 struct ceph_osdmap *map)
1465 return decode_pg_mapping(p, end, &map->pg_upmap_items, NULL, true);
1469 * decode a full map.
1471 static int osdmap_decode(void **p, void *end, struct ceph_osdmap *map)
1480 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1482 err = get_osdmap_client_data_v(p, end, "full", &struct_v);
1486 /* fsid, epoch, created, modified */
1487 ceph_decode_need(p, end, sizeof(map->fsid) + sizeof(u32) +
1488 sizeof(map->created) + sizeof(map->modified), e_inval);
1489 ceph_decode_copy(p, &map->fsid, sizeof(map->fsid));
1490 epoch = map->epoch = ceph_decode_32(p);
1491 ceph_decode_copy(p, &map->created, sizeof(map->created));
1492 ceph_decode_copy(p, &map->modified, sizeof(map->modified));
1495 err = decode_pools(p, end, map);
1500 err = decode_pool_names(p, end, map);
1504 ceph_decode_32_safe(p, end, map->pool_max, e_inval);
1506 ceph_decode_32_safe(p, end, map->flags, e_inval);
1509 ceph_decode_32_safe(p, end, max, e_inval);
1511 /* (re)alloc osd arrays */
1512 err = osdmap_set_max_osd(map, max);
1516 /* osd_state, osd_weight, osd_addrs->client_addr */
1517 ceph_decode_need(p, end, 3*sizeof(u32) +
1518 map->max_osd*(struct_v >= 5 ? sizeof(u32) :
1520 sizeof(*map->osd_weight), e_inval);
1521 if (ceph_decode_32(p) != map->max_osd)
1524 if (struct_v >= 5) {
1525 for (i = 0; i < map->max_osd; i++)
1526 map->osd_state[i] = ceph_decode_32(p);
1528 for (i = 0; i < map->max_osd; i++)
1529 map->osd_state[i] = ceph_decode_8(p);
1532 if (ceph_decode_32(p) != map->max_osd)
1535 for (i = 0; i < map->max_osd; i++)
1536 map->osd_weight[i] = ceph_decode_32(p);
1538 if (ceph_decode_32(p) != map->max_osd)
1541 for (i = 0; i < map->max_osd; i++) {
1542 err = ceph_decode_entity_addr(p, end, &map->osd_addr[i]);
1548 err = decode_pg_temp(p, end, map);
1553 if (struct_v >= 1) {
1554 err = decode_primary_temp(p, end, map);
1559 /* primary_affinity */
1560 if (struct_v >= 2) {
1561 err = decode_primary_affinity(p, end, map);
1565 WARN_ON(map->osd_primary_affinity);
1569 ceph_decode_32_safe(p, end, len, e_inval);
1570 err = osdmap_set_crush(map, crush_decode(*p, min(*p + len, end)));
1575 if (struct_v >= 3) {
1576 /* erasure_code_profiles */
1577 ceph_decode_skip_map_of_map(p, end, string, string, string,
1581 if (struct_v >= 4) {
1582 err = decode_pg_upmap(p, end, map);
1586 err = decode_pg_upmap_items(p, end, map);
1590 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap));
1591 WARN_ON(!RB_EMPTY_ROOT(&map->pg_upmap_items));
1594 /* ignore the rest */
1597 dout("full osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1603 pr_err("corrupt full osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1604 err, epoch, (int)(*p - start), *p, start, end);
1605 print_hex_dump(KERN_DEBUG, "osdmap: ",
1606 DUMP_PREFIX_OFFSET, 16, 1,
1607 start, end - start, true);
1612 * Allocate and decode a full map.
1614 struct ceph_osdmap *ceph_osdmap_decode(void **p, void *end)
1616 struct ceph_osdmap *map;
1619 map = ceph_osdmap_alloc();
1621 return ERR_PTR(-ENOMEM);
1623 ret = osdmap_decode(p, end, map);
1625 ceph_osdmap_destroy(map);
1626 return ERR_PTR(ret);
1633 * Encoding order is (new_up_client, new_state, new_weight). Need to
1634 * apply in the (new_weight, new_state, new_up_client) order, because
1635 * an incremental map may look like e.g.
1637 * new_up_client: { osd=6, addr=... } # set osd_state and addr
1638 * new_state: { osd=6, xorstate=EXISTS } # clear osd_state
1640 static int decode_new_up_state_weight(void **p, void *end, u8 struct_v,
1641 struct ceph_osdmap *map)
1643 void *new_up_client;
1645 void *new_weight_end;
1650 ceph_decode_32_safe(p, end, len, e_inval);
1651 for (i = 0; i < len; ++i) {
1652 struct ceph_entity_addr addr;
1654 ceph_decode_skip_32(p, end, e_inval);
1655 if (ceph_decode_entity_addr(p, end, &addr))
1660 ceph_decode_32_safe(p, end, len, e_inval);
1661 len *= sizeof(u32) + (struct_v >= 5 ? sizeof(u32) : sizeof(u8));
1662 ceph_decode_need(p, end, len, e_inval);
1666 ceph_decode_32_safe(p, end, len, e_inval);
1671 ceph_decode_need(p, end, 2*sizeof(u32), e_inval);
1672 osd = ceph_decode_32(p);
1673 w = ceph_decode_32(p);
1674 BUG_ON(osd >= map->max_osd);
1675 pr_info("osd%d weight 0x%x %s\n", osd, w,
1676 w == CEPH_OSD_IN ? "(in)" :
1677 (w == CEPH_OSD_OUT ? "(out)" : ""));
1678 map->osd_weight[osd] = w;
1681 * If we are marking in, set the EXISTS, and clear the
1682 * AUTOOUT and NEW bits.
1685 map->osd_state[osd] |= CEPH_OSD_EXISTS;
1686 map->osd_state[osd] &= ~(CEPH_OSD_AUTOOUT |
1690 new_weight_end = *p;
1692 /* new_state (up/down) */
1694 len = ceph_decode_32(p);
1700 osd = ceph_decode_32(p);
1702 xorstate = ceph_decode_32(p);
1704 xorstate = ceph_decode_8(p);
1706 xorstate = CEPH_OSD_UP;
1707 BUG_ON(osd >= map->max_osd);
1708 if ((map->osd_state[osd] & CEPH_OSD_UP) &&
1709 (xorstate & CEPH_OSD_UP))
1710 pr_info("osd%d down\n", osd);
1711 if ((map->osd_state[osd] & CEPH_OSD_EXISTS) &&
1712 (xorstate & CEPH_OSD_EXISTS)) {
1713 pr_info("osd%d does not exist\n", osd);
1714 ret = set_primary_affinity(map, osd,
1715 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY);
1718 memset(map->osd_addr + osd, 0, sizeof(*map->osd_addr));
1719 map->osd_state[osd] = 0;
1721 map->osd_state[osd] ^= xorstate;
1727 len = ceph_decode_32(p);
1730 struct ceph_entity_addr addr;
1732 osd = ceph_decode_32(p);
1733 BUG_ON(osd >= map->max_osd);
1734 if (ceph_decode_entity_addr(p, end, &addr))
1736 pr_info("osd%d up\n", osd);
1737 map->osd_state[osd] |= CEPH_OSD_EXISTS | CEPH_OSD_UP;
1738 map->osd_addr[osd] = addr;
1741 *p = new_weight_end;
1749 * decode and apply an incremental map update.
1751 struct ceph_osdmap *osdmap_apply_incremental(void **p, void *end,
1752 struct ceph_osdmap *map)
1754 struct ceph_fsid fsid;
1756 struct ceph_timespec modified;
1760 __s32 new_flags, max;
1765 dout("%s %p to %p len %d\n", __func__, *p, end, (int)(end - *p));
1767 err = get_osdmap_client_data_v(p, end, "inc", &struct_v);
1771 /* fsid, epoch, modified, new_pool_max, new_flags */
1772 ceph_decode_need(p, end, sizeof(fsid) + sizeof(u32) + sizeof(modified) +
1773 sizeof(u64) + sizeof(u32), e_inval);
1774 ceph_decode_copy(p, &fsid, sizeof(fsid));
1775 epoch = ceph_decode_32(p);
1776 BUG_ON(epoch != map->epoch+1);
1777 ceph_decode_copy(p, &modified, sizeof(modified));
1778 new_pool_max = ceph_decode_64(p);
1779 new_flags = ceph_decode_32(p);
1782 ceph_decode_32_safe(p, end, len, e_inval);
1784 dout("apply_incremental full map len %d, %p to %p\n",
1786 return ceph_osdmap_decode(p, min(*p+len, end));
1790 ceph_decode_32_safe(p, end, len, e_inval);
1792 err = osdmap_set_crush(map,
1793 crush_decode(*p, min(*p + len, end)));
1801 map->flags = new_flags;
1802 if (new_pool_max >= 0)
1803 map->pool_max = new_pool_max;
1806 ceph_decode_32_safe(p, end, max, e_inval);
1808 err = osdmap_set_max_osd(map, max);
1814 map->modified = modified;
1817 err = decode_new_pools(p, end, map);
1821 /* new_pool_names */
1822 err = decode_pool_names(p, end, map);
1827 ceph_decode_32_safe(p, end, len, e_inval);
1829 struct ceph_pg_pool_info *pi;
1831 ceph_decode_64_safe(p, end, pool, e_inval);
1832 pi = __lookup_pg_pool(&map->pg_pools, pool);
1834 __remove_pg_pool(&map->pg_pools, pi);
1837 /* new_up_client, new_state, new_weight */
1838 err = decode_new_up_state_weight(p, end, struct_v, map);
1843 err = decode_new_pg_temp(p, end, map);
1847 /* new_primary_temp */
1848 if (struct_v >= 1) {
1849 err = decode_new_primary_temp(p, end, map);
1854 /* new_primary_affinity */
1855 if (struct_v >= 2) {
1856 err = decode_new_primary_affinity(p, end, map);
1861 if (struct_v >= 3) {
1862 /* new_erasure_code_profiles */
1863 ceph_decode_skip_map_of_map(p, end, string, string, string,
1865 /* old_erasure_code_profiles */
1866 ceph_decode_skip_set(p, end, string, e_inval);
1869 if (struct_v >= 4) {
1870 err = decode_new_pg_upmap(p, end, map);
1874 err = decode_old_pg_upmap(p, end, map);
1878 err = decode_new_pg_upmap_items(p, end, map);
1882 err = decode_old_pg_upmap_items(p, end, map);
1887 /* ignore the rest */
1890 dout("inc osdmap epoch %d max_osd %d\n", map->epoch, map->max_osd);
1896 pr_err("corrupt inc osdmap (%d) epoch %d off %d (%p of %p-%p)\n",
1897 err, epoch, (int)(*p - start), *p, start, end);
1898 print_hex_dump(KERN_DEBUG, "osdmap: ",
1899 DUMP_PREFIX_OFFSET, 16, 1,
1900 start, end - start, true);
1901 return ERR_PTR(err);
1904 void ceph_oloc_copy(struct ceph_object_locator *dest,
1905 const struct ceph_object_locator *src)
1907 ceph_oloc_destroy(dest);
1909 dest->pool = src->pool;
1911 dest->pool_ns = ceph_get_string(src->pool_ns);
1913 dest->pool_ns = NULL;
1915 EXPORT_SYMBOL(ceph_oloc_copy);
1917 void ceph_oloc_destroy(struct ceph_object_locator *oloc)
1919 ceph_put_string(oloc->pool_ns);
1921 EXPORT_SYMBOL(ceph_oloc_destroy);
1923 void ceph_oid_copy(struct ceph_object_id *dest,
1924 const struct ceph_object_id *src)
1926 ceph_oid_destroy(dest);
1928 if (src->name != src->inline_name) {
1929 /* very rare, see ceph_object_id definition */
1930 dest->name = kmalloc(src->name_len + 1,
1931 GFP_NOIO | __GFP_NOFAIL);
1933 dest->name = dest->inline_name;
1935 memcpy(dest->name, src->name, src->name_len + 1);
1936 dest->name_len = src->name_len;
1938 EXPORT_SYMBOL(ceph_oid_copy);
1940 static __printf(2, 0)
1941 int oid_printf_vargs(struct ceph_object_id *oid, const char *fmt, va_list ap)
1945 WARN_ON(!ceph_oid_empty(oid));
1947 len = vsnprintf(oid->inline_name, sizeof(oid->inline_name), fmt, ap);
1948 if (len >= sizeof(oid->inline_name))
1951 oid->name_len = len;
1956 * If oid doesn't fit into inline buffer, BUG.
1958 void ceph_oid_printf(struct ceph_object_id *oid, const char *fmt, ...)
1963 BUG_ON(oid_printf_vargs(oid, fmt, ap));
1966 EXPORT_SYMBOL(ceph_oid_printf);
1968 static __printf(3, 0)
1969 int oid_aprintf_vargs(struct ceph_object_id *oid, gfp_t gfp,
1970 const char *fmt, va_list ap)
1976 len = oid_printf_vargs(oid, fmt, aq);
1980 char *external_name;
1982 external_name = kmalloc(len + 1, gfp);
1986 oid->name = external_name;
1987 WARN_ON(vsnprintf(oid->name, len + 1, fmt, ap) != len);
1988 oid->name_len = len;
1995 * If oid doesn't fit into inline buffer, allocate.
1997 int ceph_oid_aprintf(struct ceph_object_id *oid, gfp_t gfp,
1998 const char *fmt, ...)
2004 ret = oid_aprintf_vargs(oid, gfp, fmt, ap);
2009 EXPORT_SYMBOL(ceph_oid_aprintf);
2011 void ceph_oid_destroy(struct ceph_object_id *oid)
2013 if (oid->name != oid->inline_name)
2016 EXPORT_SYMBOL(ceph_oid_destroy);
2021 static bool __osds_equal(const struct ceph_osds *lhs,
2022 const struct ceph_osds *rhs)
2024 if (lhs->size == rhs->size &&
2025 !memcmp(lhs->osds, rhs->osds, rhs->size * sizeof(rhs->osds[0])))
2034 static bool osds_equal(const struct ceph_osds *lhs,
2035 const struct ceph_osds *rhs)
2037 if (__osds_equal(lhs, rhs) &&
2038 lhs->primary == rhs->primary)
2044 static bool osds_valid(const struct ceph_osds *set)
2047 if (set->size > 0 && set->primary >= 0)
2050 /* empty can_shift_osds set */
2051 if (!set->size && set->primary == -1)
2054 /* empty !can_shift_osds set - all NONE */
2055 if (set->size > 0 && set->primary == -1) {
2058 for (i = 0; i < set->size; i++) {
2059 if (set->osds[i] != CRUSH_ITEM_NONE)
2069 void ceph_osds_copy(struct ceph_osds *dest, const struct ceph_osds *src)
2071 memcpy(dest->osds, src->osds, src->size * sizeof(src->osds[0]));
2072 dest->size = src->size;
2073 dest->primary = src->primary;
2076 bool ceph_pg_is_split(const struct ceph_pg *pgid, u32 old_pg_num,
2079 int old_bits = calc_bits_of(old_pg_num);
2080 int old_mask = (1 << old_bits) - 1;
2083 WARN_ON(pgid->seed >= old_pg_num);
2084 if (new_pg_num <= old_pg_num)
2087 for (n = 1; ; n++) {
2088 int next_bit = n << (old_bits - 1);
2089 u32 s = next_bit | pgid->seed;
2091 if (s < old_pg_num || s == pgid->seed)
2093 if (s >= new_pg_num)
2096 s = ceph_stable_mod(s, old_pg_num, old_mask);
2097 if (s == pgid->seed)
2104 bool ceph_is_new_interval(const struct ceph_osds *old_acting,
2105 const struct ceph_osds *new_acting,
2106 const struct ceph_osds *old_up,
2107 const struct ceph_osds *new_up,
2114 bool old_sort_bitwise,
2115 bool new_sort_bitwise,
2116 bool old_recovery_deletes,
2117 bool new_recovery_deletes,
2118 const struct ceph_pg *pgid)
2120 return !osds_equal(old_acting, new_acting) ||
2121 !osds_equal(old_up, new_up) ||
2122 old_size != new_size ||
2123 old_min_size != new_min_size ||
2124 ceph_pg_is_split(pgid, old_pg_num, new_pg_num) ||
2125 old_sort_bitwise != new_sort_bitwise ||
2126 old_recovery_deletes != new_recovery_deletes;
2129 static int calc_pg_rank(int osd, const struct ceph_osds *acting)
2133 for (i = 0; i < acting->size; i++) {
2134 if (acting->osds[i] == osd)
2141 static bool primary_changed(const struct ceph_osds *old_acting,
2142 const struct ceph_osds *new_acting)
2144 if (!old_acting->size && !new_acting->size)
2145 return false; /* both still empty */
2147 if (!old_acting->size ^ !new_acting->size)
2148 return true; /* was empty, now not, or vice versa */
2150 if (old_acting->primary != new_acting->primary)
2151 return true; /* primary changed */
2153 if (calc_pg_rank(old_acting->primary, old_acting) !=
2154 calc_pg_rank(new_acting->primary, new_acting))
2157 return false; /* same primary (tho replicas may have changed) */
2160 bool ceph_osds_changed(const struct ceph_osds *old_acting,
2161 const struct ceph_osds *new_acting,
2164 if (primary_changed(old_acting, new_acting))
2167 if (any_change && !__osds_equal(old_acting, new_acting))
2174 * Map an object into a PG.
2176 * Should only be called with target_oid and target_oloc (as opposed to
2177 * base_oid and base_oloc), since tiering isn't taken into account.
2179 void __ceph_object_locator_to_pg(struct ceph_pg_pool_info *pi,
2180 const struct ceph_object_id *oid,
2181 const struct ceph_object_locator *oloc,
2182 struct ceph_pg *raw_pgid)
2184 WARN_ON(pi->id != oloc->pool);
2186 if (!oloc->pool_ns) {
2187 raw_pgid->pool = oloc->pool;
2188 raw_pgid->seed = ceph_str_hash(pi->object_hash, oid->name,
2190 dout("%s %s -> raw_pgid %llu.%x\n", __func__, oid->name,
2191 raw_pgid->pool, raw_pgid->seed);
2193 char stack_buf[256];
2194 char *buf = stack_buf;
2195 int nsl = oloc->pool_ns->len;
2196 size_t total = nsl + 1 + oid->name_len;
2198 if (total > sizeof(stack_buf))
2199 buf = kmalloc(total, GFP_NOIO | __GFP_NOFAIL);
2200 memcpy(buf, oloc->pool_ns->str, nsl);
2202 memcpy(buf + nsl + 1, oid->name, oid->name_len);
2203 raw_pgid->pool = oloc->pool;
2204 raw_pgid->seed = ceph_str_hash(pi->object_hash, buf, total);
2205 if (buf != stack_buf)
2207 dout("%s %s ns %.*s -> raw_pgid %llu.%x\n", __func__,
2208 oid->name, nsl, oloc->pool_ns->str,
2209 raw_pgid->pool, raw_pgid->seed);
2213 int ceph_object_locator_to_pg(struct ceph_osdmap *osdmap,
2214 const struct ceph_object_id *oid,
2215 const struct ceph_object_locator *oloc,
2216 struct ceph_pg *raw_pgid)
2218 struct ceph_pg_pool_info *pi;
2220 pi = ceph_pg_pool_by_id(osdmap, oloc->pool);
2224 __ceph_object_locator_to_pg(pi, oid, oloc, raw_pgid);
2227 EXPORT_SYMBOL(ceph_object_locator_to_pg);
2230 * Map a raw PG (full precision ps) into an actual PG.
2232 static void raw_pg_to_pg(struct ceph_pg_pool_info *pi,
2233 const struct ceph_pg *raw_pgid,
2234 struct ceph_pg *pgid)
2236 pgid->pool = raw_pgid->pool;
2237 pgid->seed = ceph_stable_mod(raw_pgid->seed, pi->pg_num,
2242 * Map a raw PG (full precision ps) into a placement ps (placement
2243 * seed). Include pool id in that value so that different pools don't
2244 * use the same seeds.
2246 static u32 raw_pg_to_pps(struct ceph_pg_pool_info *pi,
2247 const struct ceph_pg *raw_pgid)
2249 if (pi->flags & CEPH_POOL_FLAG_HASHPSPOOL) {
2250 /* hash pool id and seed so that pool PGs do not overlap */
2251 return crush_hash32_2(CRUSH_HASH_RJENKINS1,
2252 ceph_stable_mod(raw_pgid->seed,
2258 * legacy behavior: add ps and pool together. this is
2259 * not a great approach because the PGs from each pool
2260 * will overlap on top of each other: 0.5 == 1.4 ==
2263 return ceph_stable_mod(raw_pgid->seed, pi->pgp_num,
2265 (unsigned)raw_pgid->pool;
2270 * Magic value used for a "default" fallback choose_args, used if the
2271 * crush_choose_arg_map passed to do_crush() does not exist. If this
2272 * also doesn't exist, fall back to canonical weights.
2274 #define CEPH_DEFAULT_CHOOSE_ARGS -1
2276 static int do_crush(struct ceph_osdmap *map, int ruleno, int x,
2277 int *result, int result_max,
2278 const __u32 *weight, int weight_max,
2279 s64 choose_args_index)
2281 struct crush_choose_arg_map *arg_map;
2284 BUG_ON(result_max > CEPH_PG_MAX_SIZE);
2286 arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2289 arg_map = lookup_choose_arg_map(&map->crush->choose_args,
2290 CEPH_DEFAULT_CHOOSE_ARGS);
2292 mutex_lock(&map->crush_workspace_mutex);
2293 r = crush_do_rule(map->crush, ruleno, x, result, result_max,
2294 weight, weight_max, map->crush_workspace,
2295 arg_map ? arg_map->args : NULL);
2296 mutex_unlock(&map->crush_workspace_mutex);
2301 static void remove_nonexistent_osds(struct ceph_osdmap *osdmap,
2302 struct ceph_pg_pool_info *pi,
2303 struct ceph_osds *set)
2307 if (ceph_can_shift_osds(pi)) {
2311 for (i = 0; i < set->size; i++) {
2312 if (!ceph_osd_exists(osdmap, set->osds[i])) {
2317 set->osds[i - removed] = set->osds[i];
2319 set->size -= removed;
2321 /* set dne devices to NONE */
2322 for (i = 0; i < set->size; i++) {
2323 if (!ceph_osd_exists(osdmap, set->osds[i]))
2324 set->osds[i] = CRUSH_ITEM_NONE;
2330 * Calculate raw set (CRUSH output) for given PG and filter out
2331 * nonexistent OSDs. ->primary is undefined for a raw set.
2333 * Placement seed (CRUSH input) is returned through @ppps.
2335 static void pg_to_raw_osds(struct ceph_osdmap *osdmap,
2336 struct ceph_pg_pool_info *pi,
2337 const struct ceph_pg *raw_pgid,
2338 struct ceph_osds *raw,
2341 u32 pps = raw_pg_to_pps(pi, raw_pgid);
2345 ceph_osds_init(raw);
2349 ruleno = crush_find_rule(osdmap->crush, pi->crush_ruleset, pi->type,
2352 pr_err("no crush rule: pool %lld ruleset %d type %d size %d\n",
2353 pi->id, pi->crush_ruleset, pi->type, pi->size);
2357 if (pi->size > ARRAY_SIZE(raw->osds)) {
2358 pr_err_ratelimited("pool %lld ruleset %d type %d too wide: size %d > %zu\n",
2359 pi->id, pi->crush_ruleset, pi->type, pi->size,
2360 ARRAY_SIZE(raw->osds));
2364 len = do_crush(osdmap, ruleno, pps, raw->osds, pi->size,
2365 osdmap->osd_weight, osdmap->max_osd, pi->id);
2367 pr_err("error %d from crush rule %d: pool %lld ruleset %d type %d size %d\n",
2368 len, ruleno, pi->id, pi->crush_ruleset, pi->type,
2374 remove_nonexistent_osds(osdmap, pi, raw);
2377 /* apply pg_upmap[_items] mappings */
2378 static void apply_upmap(struct ceph_osdmap *osdmap,
2379 const struct ceph_pg *pgid,
2380 struct ceph_osds *raw)
2382 struct ceph_pg_mapping *pg;
2385 pg = lookup_pg_mapping(&osdmap->pg_upmap, pgid);
2387 /* make sure targets aren't marked out */
2388 for (i = 0; i < pg->pg_upmap.len; i++) {
2389 int osd = pg->pg_upmap.osds[i];
2391 if (osd != CRUSH_ITEM_NONE &&
2392 osd < osdmap->max_osd &&
2393 osdmap->osd_weight[osd] == 0) {
2394 /* reject/ignore explicit mapping */
2398 for (i = 0; i < pg->pg_upmap.len; i++)
2399 raw->osds[i] = pg->pg_upmap.osds[i];
2400 raw->size = pg->pg_upmap.len;
2401 /* check and apply pg_upmap_items, if any */
2404 pg = lookup_pg_mapping(&osdmap->pg_upmap_items, pgid);
2407 * Note: this approach does not allow a bidirectional swap,
2408 * e.g., [[1,2],[2,1]] applied to [0,1,2] -> [0,2,1].
2410 for (i = 0; i < pg->pg_upmap_items.len; i++) {
2411 int from = pg->pg_upmap_items.from_to[i][0];
2412 int to = pg->pg_upmap_items.from_to[i][1];
2414 bool exists = false;
2416 /* make sure replacement doesn't already appear */
2417 for (j = 0; j < raw->size; j++) {
2418 int osd = raw->osds[j];
2424 /* ignore mapping if target is marked out */
2425 if (osd == from && pos < 0 &&
2426 !(to != CRUSH_ITEM_NONE &&
2427 to < osdmap->max_osd &&
2428 osdmap->osd_weight[to] == 0)) {
2432 if (!exists && pos >= 0)
2433 raw->osds[pos] = to;
2439 * Given raw set, calculate up set and up primary. By definition of an
2440 * up set, the result won't contain nonexistent or down OSDs.
2442 * This is done in-place - on return @set is the up set. If it's
2443 * empty, ->primary will remain undefined.
2445 static void raw_to_up_osds(struct ceph_osdmap *osdmap,
2446 struct ceph_pg_pool_info *pi,
2447 struct ceph_osds *set)
2451 /* ->primary is undefined for a raw set */
2452 BUG_ON(set->primary != -1);
2454 if (ceph_can_shift_osds(pi)) {
2458 for (i = 0; i < set->size; i++) {
2459 if (ceph_osd_is_down(osdmap, set->osds[i])) {
2464 set->osds[i - removed] = set->osds[i];
2466 set->size -= removed;
2468 set->primary = set->osds[0];
2470 /* set down/dne devices to NONE */
2471 for (i = set->size - 1; i >= 0; i--) {
2472 if (ceph_osd_is_down(osdmap, set->osds[i]))
2473 set->osds[i] = CRUSH_ITEM_NONE;
2475 set->primary = set->osds[i];
2480 static void apply_primary_affinity(struct ceph_osdmap *osdmap,
2481 struct ceph_pg_pool_info *pi,
2483 struct ceph_osds *up)
2489 * Do we have any non-default primary_affinity values for these
2492 if (!osdmap->osd_primary_affinity)
2495 for (i = 0; i < up->size; i++) {
2496 int osd = up->osds[i];
2498 if (osd != CRUSH_ITEM_NONE &&
2499 osdmap->osd_primary_affinity[osd] !=
2500 CEPH_OSD_DEFAULT_PRIMARY_AFFINITY) {
2508 * Pick the primary. Feed both the seed (for the pg) and the
2509 * osd into the hash/rng so that a proportional fraction of an
2510 * osd's pgs get rejected as primary.
2512 for (i = 0; i < up->size; i++) {
2513 int osd = up->osds[i];
2516 if (osd == CRUSH_ITEM_NONE)
2519 aff = osdmap->osd_primary_affinity[osd];
2520 if (aff < CEPH_OSD_MAX_PRIMARY_AFFINITY &&
2521 (crush_hash32_2(CRUSH_HASH_RJENKINS1,
2522 pps, osd) >> 16) >= aff) {
2524 * We chose not to use this primary. Note it
2525 * anyway as a fallback in case we don't pick
2526 * anyone else, but keep looking.
2538 up->primary = up->osds[pos];
2540 if (ceph_can_shift_osds(pi) && pos > 0) {
2541 /* move the new primary to the front */
2542 for (i = pos; i > 0; i--)
2543 up->osds[i] = up->osds[i - 1];
2544 up->osds[0] = up->primary;
2549 * Get pg_temp and primary_temp mappings for given PG.
2551 * Note that a PG may have none, only pg_temp, only primary_temp or
2552 * both pg_temp and primary_temp mappings. This means @temp isn't
2553 * always a valid OSD set on return: in the "only primary_temp" case,
2554 * @temp will have its ->primary >= 0 but ->size == 0.
2556 static void get_temp_osds(struct ceph_osdmap *osdmap,
2557 struct ceph_pg_pool_info *pi,
2558 const struct ceph_pg *pgid,
2559 struct ceph_osds *temp)
2561 struct ceph_pg_mapping *pg;
2564 ceph_osds_init(temp);
2567 pg = lookup_pg_mapping(&osdmap->pg_temp, pgid);
2569 for (i = 0; i < pg->pg_temp.len; i++) {
2570 if (ceph_osd_is_down(osdmap, pg->pg_temp.osds[i])) {
2571 if (ceph_can_shift_osds(pi))
2574 temp->osds[temp->size++] = CRUSH_ITEM_NONE;
2576 temp->osds[temp->size++] = pg->pg_temp.osds[i];
2580 /* apply pg_temp's primary */
2581 for (i = 0; i < temp->size; i++) {
2582 if (temp->osds[i] != CRUSH_ITEM_NONE) {
2583 temp->primary = temp->osds[i];
2590 pg = lookup_pg_mapping(&osdmap->primary_temp, pgid);
2592 temp->primary = pg->primary_temp.osd;
2596 * Map a PG to its acting set as well as its up set.
2598 * Acting set is used for data mapping purposes, while up set can be
2599 * recorded for detecting interval changes and deciding whether to
2602 void ceph_pg_to_up_acting_osds(struct ceph_osdmap *osdmap,
2603 struct ceph_pg_pool_info *pi,
2604 const struct ceph_pg *raw_pgid,
2605 struct ceph_osds *up,
2606 struct ceph_osds *acting)
2608 struct ceph_pg pgid;
2611 WARN_ON(pi->id != raw_pgid->pool);
2612 raw_pg_to_pg(pi, raw_pgid, &pgid);
2614 pg_to_raw_osds(osdmap, pi, raw_pgid, up, &pps);
2615 apply_upmap(osdmap, &pgid, up);
2616 raw_to_up_osds(osdmap, pi, up);
2617 apply_primary_affinity(osdmap, pi, pps, up);
2618 get_temp_osds(osdmap, pi, &pgid, acting);
2619 if (!acting->size) {
2620 memcpy(acting->osds, up->osds, up->size * sizeof(up->osds[0]));
2621 acting->size = up->size;
2622 if (acting->primary == -1)
2623 acting->primary = up->primary;
2625 WARN_ON(!osds_valid(up) || !osds_valid(acting));
2628 bool ceph_pg_to_primary_shard(struct ceph_osdmap *osdmap,
2629 struct ceph_pg_pool_info *pi,
2630 const struct ceph_pg *raw_pgid,
2631 struct ceph_spg *spgid)
2633 struct ceph_pg pgid;
2634 struct ceph_osds up, acting;
2637 WARN_ON(pi->id != raw_pgid->pool);
2638 raw_pg_to_pg(pi, raw_pgid, &pgid);
2640 if (ceph_can_shift_osds(pi)) {
2641 spgid->pgid = pgid; /* struct */
2642 spgid->shard = CEPH_SPG_NOSHARD;
2646 ceph_pg_to_up_acting_osds(osdmap, pi, &pgid, &up, &acting);
2647 for (i = 0; i < acting.size; i++) {
2648 if (acting.osds[i] == acting.primary) {
2649 spgid->pgid = pgid; /* struct */
2659 * Return acting primary for given PG, or -1 if none.
2661 int ceph_pg_to_acting_primary(struct ceph_osdmap *osdmap,
2662 const struct ceph_pg *raw_pgid)
2664 struct ceph_pg_pool_info *pi;
2665 struct ceph_osds up, acting;
2667 pi = ceph_pg_pool_by_id(osdmap, raw_pgid->pool);
2671 ceph_pg_to_up_acting_osds(osdmap, pi, raw_pgid, &up, &acting);
2672 return acting.primary;
2674 EXPORT_SYMBOL(ceph_pg_to_acting_primary);