1 // SPDX-License-Identifier: GPL-2.0
3 #include <linux/ceph/ceph_debug.h>
5 #include <linux/module.h>
7 #include <linux/highmem.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
13 #include <linux/bio.h>
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
25 #define OSD_OPREPLY_FRONT_LEN 512
27 static struct kmem_cache *ceph_osd_request_cache;
29 static const struct ceph_connection_operations osd_con_ops;
32 * Implement client access to distributed object storage cluster.
34 * All data objects are stored within a cluster/cloud of OSDs, or
35 * "object storage devices." (Note that Ceph OSDs have _nothing_ to
36 * do with the T10 OSD extensions to SCSI.) Ceph OSDs are simply
37 * remote daemons serving up and coordinating consistent and safe
40 * Cluster membership and the mapping of data objects onto storage devices
41 * are described by the osd map.
43 * We keep track of pending OSD requests (read, write), resubmit
44 * requests to different OSDs when the cluster topology/data layout
45 * change, or retry the affected requests when the communications
46 * channel with an OSD is reset.
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52 struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54 struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
62 if (unlikely(down_read_trylock(sem))) {
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
71 WARN_ON(!rwsem_is_locked(&osdc->lock));
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
75 WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
77 static inline void verify_osd_locked(struct ceph_osd *osd)
79 struct ceph_osd_client *osdc = osd->o_osdc;
81 WARN_ON(!(mutex_is_locked(&osd->lock) &&
82 rwsem_is_locked(&osdc->lock)) &&
83 !rwsem_is_wrlocked(&osdc->lock));
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
87 WARN_ON(!mutex_is_locked(&lreq->lock));
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
97 * calculate the mapping of a file extent onto an object, and fill out the
98 * request accordingly. shorten extent as necessary if it crosses an
101 * fill osd op in request message.
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104 u64 *objnum, u64 *objoff, u64 *objlen)
106 u64 orig_len = *plen;
110 ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
113 if (*objlen < orig_len) {
115 dout(" skipping last %llu, final file extent %llu~%llu\n",
116 orig_len - *plen, off, *plen);
119 dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
125 memset(osd_data, 0, sizeof (*osd_data));
126 osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
130 * Consumes @pages if @own_pages is true.
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133 struct page **pages, u64 length, u32 alignment,
134 bool pages_from_pool, bool own_pages)
136 osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137 osd_data->pages = pages;
138 osd_data->length = length;
139 osd_data->alignment = alignment;
140 osd_data->pages_from_pool = pages_from_pool;
141 osd_data->own_pages = own_pages;
145 * Consumes a ref on @pagelist.
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148 struct ceph_pagelist *pagelist)
150 osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151 osd_data->pagelist = pagelist;
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156 struct ceph_bio_iter *bio_pos,
159 osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160 osd_data->bio_pos = *bio_pos;
161 osd_data->bio_length = bio_length;
163 #endif /* CONFIG_BLOCK */
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166 struct ceph_bvec_iter *bvec_pos,
169 osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170 osd_data->bvec_pos = *bvec_pos;
171 osd_data->num_bvecs = num_bvecs;
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
177 BUG_ON(which >= osd_req->r_num_ops);
179 return &osd_req->r_ops[which].raw_data_in;
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
186 return osd_req_op_data(osd_req, which, extent, osd_data);
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191 unsigned int which, struct page **pages,
192 u64 length, u32 alignment,
193 bool pages_from_pool, bool own_pages)
195 struct ceph_osd_data *osd_data;
197 osd_data = osd_req_op_raw_data_in(osd_req, which);
198 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199 pages_from_pool, own_pages);
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204 unsigned int which, struct page **pages,
205 u64 length, u32 alignment,
206 bool pages_from_pool, bool own_pages)
208 struct ceph_osd_data *osd_data;
210 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212 pages_from_pool, own_pages);
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217 unsigned int which, struct ceph_pagelist *pagelist)
219 struct ceph_osd_data *osd_data;
221 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222 ceph_osd_data_pagelist_init(osd_data, pagelist);
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
229 struct ceph_bio_iter *bio_pos,
232 struct ceph_osd_data *osd_data;
234 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235 ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
242 struct bio_vec *bvecs, u32 num_bvecs,
245 struct ceph_osd_data *osd_data;
246 struct ceph_bvec_iter it = {
248 .iter = { .bi_size = bytes },
251 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
252 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
258 struct ceph_bvec_iter *bvec_pos)
260 struct ceph_osd_data *osd_data;
262 osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
263 ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
267 static void osd_req_op_cls_request_info_pagelist(
268 struct ceph_osd_request *osd_req,
269 unsigned int which, struct ceph_pagelist *pagelist)
271 struct ceph_osd_data *osd_data;
273 osd_data = osd_req_op_data(osd_req, which, cls, request_info);
274 ceph_osd_data_pagelist_init(osd_data, pagelist);
277 void osd_req_op_cls_request_data_pagelist(
278 struct ceph_osd_request *osd_req,
279 unsigned int which, struct ceph_pagelist *pagelist)
281 struct ceph_osd_data *osd_data;
283 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
284 ceph_osd_data_pagelist_init(osd_data, pagelist);
285 osd_req->r_ops[which].cls.indata_len += pagelist->length;
286 osd_req->r_ops[which].indata_len += pagelist->length;
288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
291 unsigned int which, struct page **pages, u64 length,
292 u32 alignment, bool pages_from_pool, bool own_pages)
294 struct ceph_osd_data *osd_data;
296 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
297 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
298 pages_from_pool, own_pages);
299 osd_req->r_ops[which].cls.indata_len += length;
300 osd_req->r_ops[which].indata_len += length;
302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
306 struct bio_vec *bvecs, u32 num_bvecs,
309 struct ceph_osd_data *osd_data;
310 struct ceph_bvec_iter it = {
312 .iter = { .bi_size = bytes },
315 osd_data = osd_req_op_data(osd_req, which, cls, request_data);
316 ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
317 osd_req->r_ops[which].cls.indata_len += bytes;
318 osd_req->r_ops[which].indata_len += bytes;
320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
323 unsigned int which, struct page **pages, u64 length,
324 u32 alignment, bool pages_from_pool, bool own_pages)
326 struct ceph_osd_data *osd_data;
328 osd_data = osd_req_op_data(osd_req, which, cls, response_data);
329 ceph_osd_data_pages_init(osd_data, pages, length, alignment,
330 pages_from_pool, own_pages);
332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
336 switch (osd_data->type) {
337 case CEPH_OSD_DATA_TYPE_NONE:
339 case CEPH_OSD_DATA_TYPE_PAGES:
340 return osd_data->length;
341 case CEPH_OSD_DATA_TYPE_PAGELIST:
342 return (u64)osd_data->pagelist->length;
344 case CEPH_OSD_DATA_TYPE_BIO:
345 return (u64)osd_data->bio_length;
346 #endif /* CONFIG_BLOCK */
347 case CEPH_OSD_DATA_TYPE_BVECS:
348 return osd_data->bvec_pos.iter.bi_size;
350 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
357 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
360 num_pages = calc_pages_for((u64)osd_data->alignment,
361 (u64)osd_data->length);
362 ceph_release_page_vector(osd_data->pages, num_pages);
363 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
364 ceph_pagelist_release(osd_data->pagelist);
366 ceph_osd_data_init(osd_data);
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
372 struct ceph_osd_req_op *op;
374 BUG_ON(which >= osd_req->r_num_ops);
375 op = &osd_req->r_ops[which];
378 case CEPH_OSD_OP_READ:
379 case CEPH_OSD_OP_WRITE:
380 case CEPH_OSD_OP_WRITEFULL:
381 ceph_osd_data_release(&op->extent.osd_data);
383 case CEPH_OSD_OP_CALL:
384 ceph_osd_data_release(&op->cls.request_info);
385 ceph_osd_data_release(&op->cls.request_data);
386 ceph_osd_data_release(&op->cls.response_data);
388 case CEPH_OSD_OP_SETXATTR:
389 case CEPH_OSD_OP_CMPXATTR:
390 ceph_osd_data_release(&op->xattr.osd_data);
392 case CEPH_OSD_OP_STAT:
393 ceph_osd_data_release(&op->raw_data_in);
395 case CEPH_OSD_OP_NOTIFY_ACK:
396 ceph_osd_data_release(&op->notify_ack.request_data);
398 case CEPH_OSD_OP_NOTIFY:
399 ceph_osd_data_release(&op->notify.request_data);
400 ceph_osd_data_release(&op->notify.response_data);
402 case CEPH_OSD_OP_LIST_WATCHERS:
403 ceph_osd_data_release(&op->list_watchers.response_data);
405 case CEPH_OSD_OP_COPY_FROM2:
406 ceph_osd_data_release(&op->copy_from.osd_data);
414 * Assumes @t is zero-initialized.
416 static void target_init(struct ceph_osd_request_target *t)
418 ceph_oid_init(&t->base_oid);
419 ceph_oloc_init(&t->base_oloc);
420 ceph_oid_init(&t->target_oid);
421 ceph_oloc_init(&t->target_oloc);
423 ceph_osds_init(&t->acting);
424 ceph_osds_init(&t->up);
428 t->osd = CEPH_HOMELESS_OSD;
431 static void target_copy(struct ceph_osd_request_target *dest,
432 const struct ceph_osd_request_target *src)
434 ceph_oid_copy(&dest->base_oid, &src->base_oid);
435 ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
436 ceph_oid_copy(&dest->target_oid, &src->target_oid);
437 ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
439 dest->pgid = src->pgid; /* struct */
440 dest->spgid = src->spgid; /* struct */
441 dest->pg_num = src->pg_num;
442 dest->pg_num_mask = src->pg_num_mask;
443 ceph_osds_copy(&dest->acting, &src->acting);
444 ceph_osds_copy(&dest->up, &src->up);
445 dest->size = src->size;
446 dest->min_size = src->min_size;
447 dest->sort_bitwise = src->sort_bitwise;
449 dest->flags = src->flags;
450 dest->paused = src->paused;
452 dest->epoch = src->epoch;
453 dest->last_force_resend = src->last_force_resend;
455 dest->osd = src->osd;
458 static void target_destroy(struct ceph_osd_request_target *t)
460 ceph_oid_destroy(&t->base_oid);
461 ceph_oloc_destroy(&t->base_oloc);
462 ceph_oid_destroy(&t->target_oid);
463 ceph_oloc_destroy(&t->target_oloc);
469 static void request_release_checks(struct ceph_osd_request *req)
471 WARN_ON(!RB_EMPTY_NODE(&req->r_node));
472 WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
473 WARN_ON(!list_empty(&req->r_private_item));
477 static void ceph_osdc_release_request(struct kref *kref)
479 struct ceph_osd_request *req = container_of(kref,
480 struct ceph_osd_request, r_kref);
483 dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
484 req->r_request, req->r_reply);
485 request_release_checks(req);
488 ceph_msg_put(req->r_request);
490 ceph_msg_put(req->r_reply);
492 for (which = 0; which < req->r_num_ops; which++)
493 osd_req_op_data_release(req, which);
495 target_destroy(&req->r_t);
496 ceph_put_snap_context(req->r_snapc);
499 mempool_free(req, req->r_osdc->req_mempool);
500 else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
501 kmem_cache_free(ceph_osd_request_cache, req);
506 void ceph_osdc_get_request(struct ceph_osd_request *req)
508 dout("%s %p (was %d)\n", __func__, req,
509 kref_read(&req->r_kref));
510 kref_get(&req->r_kref);
512 EXPORT_SYMBOL(ceph_osdc_get_request);
514 void ceph_osdc_put_request(struct ceph_osd_request *req)
517 dout("%s %p (was %d)\n", __func__, req,
518 kref_read(&req->r_kref));
519 kref_put(&req->r_kref, ceph_osdc_release_request);
522 EXPORT_SYMBOL(ceph_osdc_put_request);
524 static void request_init(struct ceph_osd_request *req)
526 /* req only, each op is zeroed in _osd_req_op_init() */
527 memset(req, 0, sizeof(*req));
529 kref_init(&req->r_kref);
530 init_completion(&req->r_completion);
531 RB_CLEAR_NODE(&req->r_node);
532 RB_CLEAR_NODE(&req->r_mc_node);
533 INIT_LIST_HEAD(&req->r_private_item);
535 target_init(&req->r_t);
539 * This is ugly, but it allows us to reuse linger registration and ping
540 * requests, keeping the structure of the code around send_linger{_ping}()
541 * reasonable. Setting up a min_nr=2 mempool for each linger request
542 * and dealing with copying ops (this blasts req only, watch op remains
543 * intact) isn't any better.
545 static void request_reinit(struct ceph_osd_request *req)
547 struct ceph_osd_client *osdc = req->r_osdc;
548 bool mempool = req->r_mempool;
549 unsigned int num_ops = req->r_num_ops;
550 u64 snapid = req->r_snapid;
551 struct ceph_snap_context *snapc = req->r_snapc;
552 bool linger = req->r_linger;
553 struct ceph_msg *request_msg = req->r_request;
554 struct ceph_msg *reply_msg = req->r_reply;
556 dout("%s req %p\n", __func__, req);
557 WARN_ON(kref_read(&req->r_kref) != 1);
558 request_release_checks(req);
560 WARN_ON(kref_read(&request_msg->kref) != 1);
561 WARN_ON(kref_read(&reply_msg->kref) != 1);
562 target_destroy(&req->r_t);
566 req->r_mempool = mempool;
567 req->r_num_ops = num_ops;
568 req->r_snapid = snapid;
569 req->r_snapc = snapc;
570 req->r_linger = linger;
571 req->r_request = request_msg;
572 req->r_reply = reply_msg;
575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
576 struct ceph_snap_context *snapc,
577 unsigned int num_ops,
581 struct ceph_osd_request *req;
584 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
585 req = mempool_alloc(osdc->req_mempool, gfp_flags);
586 } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
587 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
589 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
590 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
597 req->r_mempool = use_mempool;
598 req->r_num_ops = num_ops;
599 req->r_snapid = CEPH_NOSNAP;
600 req->r_snapc = ceph_get_snap_context(snapc);
602 dout("%s req %p\n", __func__, req);
605 EXPORT_SYMBOL(ceph_osdc_alloc_request);
607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
609 return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
613 int num_request_data_items,
614 int num_reply_data_items)
616 struct ceph_osd_client *osdc = req->r_osdc;
617 struct ceph_msg *msg;
620 WARN_ON(req->r_request || req->r_reply);
621 WARN_ON(ceph_oid_empty(&req->r_base_oid));
622 WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
624 /* create request message */
625 msg_size = CEPH_ENCODING_START_BLK_LEN +
626 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
627 msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
628 msg_size += CEPH_ENCODING_START_BLK_LEN +
629 sizeof(struct ceph_osd_reqid); /* reqid */
630 msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
631 msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
632 msg_size += CEPH_ENCODING_START_BLK_LEN +
633 ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
634 msg_size += 4 + req->r_base_oid.name_len; /* oid */
635 msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
636 msg_size += 8; /* snapid */
637 msg_size += 8; /* snap_seq */
638 msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
639 msg_size += 4 + 8; /* retry_attempt, features */
642 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
643 num_request_data_items);
645 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
646 num_request_data_items, gfp, true);
650 memset(msg->front.iov_base, 0, msg->front.iov_len);
651 req->r_request = msg;
653 /* create reply message */
654 msg_size = OSD_OPREPLY_FRONT_LEN;
655 msg_size += req->r_base_oid.name_len;
656 msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
659 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
660 num_reply_data_items);
662 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
663 num_reply_data_items, gfp, true);
672 static bool osd_req_opcode_valid(u16 opcode)
675 #define GENERATE_CASE(op, opcode, str) case CEPH_OSD_OP_##op: return true;
676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
683 static void get_num_data_items(struct ceph_osd_request *req,
684 int *num_request_data_items,
685 int *num_reply_data_items)
687 struct ceph_osd_req_op *op;
689 *num_request_data_items = 0;
690 *num_reply_data_items = 0;
692 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
695 case CEPH_OSD_OP_WRITE:
696 case CEPH_OSD_OP_WRITEFULL:
697 case CEPH_OSD_OP_SETXATTR:
698 case CEPH_OSD_OP_CMPXATTR:
699 case CEPH_OSD_OP_NOTIFY_ACK:
700 case CEPH_OSD_OP_COPY_FROM2:
701 *num_request_data_items += 1;
705 case CEPH_OSD_OP_STAT:
706 case CEPH_OSD_OP_READ:
707 case CEPH_OSD_OP_LIST_WATCHERS:
708 *num_reply_data_items += 1;
712 case CEPH_OSD_OP_NOTIFY:
713 *num_request_data_items += 1;
714 *num_reply_data_items += 1;
716 case CEPH_OSD_OP_CALL:
717 *num_request_data_items += 2;
718 *num_reply_data_items += 1;
722 WARN_ON(!osd_req_opcode_valid(op->op));
729 * oid, oloc and OSD op opcode(s) must be filled in before this function
732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
734 int num_request_data_items, num_reply_data_items;
736 get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
737 return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
738 num_reply_data_items);
740 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
743 * This is an osd op init function for opcodes that have no data or
744 * other information associated with them. It also serves as a
745 * common init routine for all the other init functions, below.
747 static struct ceph_osd_req_op *
748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
749 u16 opcode, u32 flags)
751 struct ceph_osd_req_op *op;
753 BUG_ON(which >= osd_req->r_num_ops);
754 BUG_ON(!osd_req_opcode_valid(opcode));
756 op = &osd_req->r_ops[which];
757 memset(op, 0, sizeof (*op));
764 void osd_req_op_init(struct ceph_osd_request *osd_req,
765 unsigned int which, u16 opcode, u32 flags)
767 (void)_osd_req_op_init(osd_req, which, opcode, flags);
769 EXPORT_SYMBOL(osd_req_op_init);
771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
772 unsigned int which, u16 opcode,
773 u64 offset, u64 length,
774 u64 truncate_size, u32 truncate_seq)
776 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
778 size_t payload_len = 0;
780 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
781 opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
782 opcode != CEPH_OSD_OP_TRUNCATE);
784 op->extent.offset = offset;
785 op->extent.length = length;
786 op->extent.truncate_size = truncate_size;
787 op->extent.truncate_seq = truncate_seq;
788 if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
789 payload_len += length;
791 op->indata_len = payload_len;
793 EXPORT_SYMBOL(osd_req_op_extent_init);
795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
796 unsigned int which, u64 length)
798 struct ceph_osd_req_op *op;
801 BUG_ON(which >= osd_req->r_num_ops);
802 op = &osd_req->r_ops[which];
803 previous = op->extent.length;
805 if (length == previous)
806 return; /* Nothing to do */
807 BUG_ON(length > previous);
809 op->extent.length = length;
810 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
811 op->indata_len -= previous - length;
813 EXPORT_SYMBOL(osd_req_op_extent_update);
815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
816 unsigned int which, u64 offset_inc)
818 struct ceph_osd_req_op *op, *prev_op;
820 BUG_ON(which + 1 >= osd_req->r_num_ops);
822 prev_op = &osd_req->r_ops[which];
823 op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
824 /* dup previous one */
825 op->indata_len = prev_op->indata_len;
826 op->outdata_len = prev_op->outdata_len;
827 op->extent = prev_op->extent;
829 op->extent.offset += offset_inc;
830 op->extent.length -= offset_inc;
832 if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
833 op->indata_len -= offset_inc;
835 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
838 const char *class, const char *method)
840 struct ceph_osd_req_op *op;
841 struct ceph_pagelist *pagelist;
842 size_t payload_len = 0;
846 op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
848 pagelist = ceph_pagelist_alloc(GFP_NOFS);
852 op->cls.class_name = class;
853 size = strlen(class);
854 BUG_ON(size > (size_t) U8_MAX);
855 op->cls.class_len = size;
856 ret = ceph_pagelist_append(pagelist, class, size);
858 goto err_pagelist_free;
861 op->cls.method_name = method;
862 size = strlen(method);
863 BUG_ON(size > (size_t) U8_MAX);
864 op->cls.method_len = size;
865 ret = ceph_pagelist_append(pagelist, method, size);
867 goto err_pagelist_free;
870 osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
871 op->indata_len = payload_len;
875 ceph_pagelist_release(pagelist);
878 EXPORT_SYMBOL(osd_req_op_cls_init);
880 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
881 u16 opcode, const char *name, const void *value,
882 size_t size, u8 cmp_op, u8 cmp_mode)
884 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
886 struct ceph_pagelist *pagelist;
890 BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
892 pagelist = ceph_pagelist_alloc(GFP_NOFS);
896 payload_len = strlen(name);
897 op->xattr.name_len = payload_len;
898 ret = ceph_pagelist_append(pagelist, name, payload_len);
900 goto err_pagelist_free;
902 op->xattr.value_len = size;
903 ret = ceph_pagelist_append(pagelist, value, size);
905 goto err_pagelist_free;
908 op->xattr.cmp_op = cmp_op;
909 op->xattr.cmp_mode = cmp_mode;
911 ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
912 op->indata_len = payload_len;
916 ceph_pagelist_release(pagelist);
919 EXPORT_SYMBOL(osd_req_op_xattr_init);
922 * @watch_opcode: CEPH_OSD_WATCH_OP_*
924 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
925 u64 cookie, u8 watch_opcode)
927 struct ceph_osd_req_op *op;
929 op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
930 op->watch.cookie = cookie;
931 op->watch.op = watch_opcode;
936 * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
938 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
940 u64 expected_object_size,
941 u64 expected_write_size,
944 struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
945 CEPH_OSD_OP_SETALLOCHINT,
948 op->alloc_hint.expected_object_size = expected_object_size;
949 op->alloc_hint.expected_write_size = expected_write_size;
950 op->alloc_hint.flags = flags;
953 * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
954 * not worth a feature bit. Set FAILOK per-op flag to make
955 * sure older osds don't trip over an unsupported opcode.
957 op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
959 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
961 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
962 struct ceph_osd_data *osd_data)
964 u64 length = ceph_osd_data_length(osd_data);
966 if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
967 BUG_ON(length > (u64) SIZE_MAX);
969 ceph_msg_data_add_pages(msg, osd_data->pages,
970 length, osd_data->alignment, false);
971 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
973 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
975 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
976 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
978 } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
979 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
981 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
985 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
986 const struct ceph_osd_req_op *src)
989 case CEPH_OSD_OP_STAT:
991 case CEPH_OSD_OP_READ:
992 case CEPH_OSD_OP_WRITE:
993 case CEPH_OSD_OP_WRITEFULL:
994 case CEPH_OSD_OP_ZERO:
995 case CEPH_OSD_OP_TRUNCATE:
996 dst->extent.offset = cpu_to_le64(src->extent.offset);
997 dst->extent.length = cpu_to_le64(src->extent.length);
998 dst->extent.truncate_size =
999 cpu_to_le64(src->extent.truncate_size);
1000 dst->extent.truncate_seq =
1001 cpu_to_le32(src->extent.truncate_seq);
1003 case CEPH_OSD_OP_CALL:
1004 dst->cls.class_len = src->cls.class_len;
1005 dst->cls.method_len = src->cls.method_len;
1006 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1008 case CEPH_OSD_OP_WATCH:
1009 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1010 dst->watch.ver = cpu_to_le64(0);
1011 dst->watch.op = src->watch.op;
1012 dst->watch.gen = cpu_to_le32(src->watch.gen);
1014 case CEPH_OSD_OP_NOTIFY_ACK:
1016 case CEPH_OSD_OP_NOTIFY:
1017 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1019 case CEPH_OSD_OP_LIST_WATCHERS:
1021 case CEPH_OSD_OP_SETALLOCHINT:
1022 dst->alloc_hint.expected_object_size =
1023 cpu_to_le64(src->alloc_hint.expected_object_size);
1024 dst->alloc_hint.expected_write_size =
1025 cpu_to_le64(src->alloc_hint.expected_write_size);
1026 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags);
1028 case CEPH_OSD_OP_SETXATTR:
1029 case CEPH_OSD_OP_CMPXATTR:
1030 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1031 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1032 dst->xattr.cmp_op = src->xattr.cmp_op;
1033 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1035 case CEPH_OSD_OP_CREATE:
1036 case CEPH_OSD_OP_DELETE:
1038 case CEPH_OSD_OP_COPY_FROM2:
1039 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1040 dst->copy_from.src_version =
1041 cpu_to_le64(src->copy_from.src_version);
1042 dst->copy_from.flags = src->copy_from.flags;
1043 dst->copy_from.src_fadvise_flags =
1044 cpu_to_le32(src->copy_from.src_fadvise_flags);
1047 pr_err("unsupported osd opcode %s\n",
1048 ceph_osd_op_name(src->op));
1054 dst->op = cpu_to_le16(src->op);
1055 dst->flags = cpu_to_le32(src->flags);
1056 dst->payload_len = cpu_to_le32(src->indata_len);
1058 return src->indata_len;
1062 * build new request AND message, calculate layout, and adjust file
1065 * if the file was recently truncated, we include information about its
1066 * old and new size so that the object can be updated appropriately. (we
1067 * avoid synchronously deleting truncated objects because it's slow.)
1069 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1070 struct ceph_file_layout *layout,
1071 struct ceph_vino vino,
1073 unsigned int which, int num_ops,
1074 int opcode, int flags,
1075 struct ceph_snap_context *snapc,
1080 struct ceph_osd_request *req;
1086 BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1087 opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1088 opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1090 req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1097 /* calculate max write size */
1098 r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1102 if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1103 osd_req_op_init(req, which, opcode, 0);
1105 u32 object_size = layout->object_size;
1106 u32 object_base = off - objoff;
1107 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1108 if (truncate_size <= object_base) {
1111 truncate_size -= object_base;
1112 if (truncate_size > object_size)
1113 truncate_size = object_size;
1116 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1117 truncate_size, truncate_seq);
1120 req->r_flags = flags;
1121 req->r_base_oloc.pool = layout->pool_id;
1122 req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1123 ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1125 req->r_snapid = vino.snap;
1126 if (flags & CEPH_OSD_FLAG_WRITE)
1127 req->r_data_offset = off;
1131 * This is a special case for ceph_writepages_start(), but it
1132 * also covers ceph_uninline_data(). If more multi-op request
1133 * use cases emerge, we will need a separate helper.
1135 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1137 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1144 ceph_osdc_put_request(req);
1147 EXPORT_SYMBOL(ceph_osdc_new_request);
1150 * We keep osd requests in an rbtree, sorted by ->r_tid.
1152 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1153 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1156 * Call @fn on each OSD request as long as @fn returns 0.
1158 static void for_each_request(struct ceph_osd_client *osdc,
1159 int (*fn)(struct ceph_osd_request *req, void *arg),
1162 struct rb_node *n, *p;
1164 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1165 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1167 for (p = rb_first(&osd->o_requests); p; ) {
1168 struct ceph_osd_request *req =
1169 rb_entry(p, struct ceph_osd_request, r_node);
1177 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1178 struct ceph_osd_request *req =
1179 rb_entry(p, struct ceph_osd_request, r_node);
1187 static bool osd_homeless(struct ceph_osd *osd)
1189 return osd->o_osd == CEPH_HOMELESS_OSD;
1192 static bool osd_registered(struct ceph_osd *osd)
1194 verify_osdc_locked(osd->o_osdc);
1196 return !RB_EMPTY_NODE(&osd->o_node);
1200 * Assumes @osd is zero-initialized.
1202 static void osd_init(struct ceph_osd *osd)
1204 refcount_set(&osd->o_ref, 1);
1205 RB_CLEAR_NODE(&osd->o_node);
1206 osd->o_requests = RB_ROOT;
1207 osd->o_linger_requests = RB_ROOT;
1208 osd->o_backoff_mappings = RB_ROOT;
1209 osd->o_backoffs_by_id = RB_ROOT;
1210 INIT_LIST_HEAD(&osd->o_osd_lru);
1211 INIT_LIST_HEAD(&osd->o_keepalive_item);
1212 osd->o_incarnation = 1;
1213 mutex_init(&osd->lock);
1216 static void osd_cleanup(struct ceph_osd *osd)
1218 WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1219 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1220 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1221 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1222 WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1223 WARN_ON(!list_empty(&osd->o_osd_lru));
1224 WARN_ON(!list_empty(&osd->o_keepalive_item));
1226 if (osd->o_auth.authorizer) {
1227 WARN_ON(osd_homeless(osd));
1228 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1233 * Track open sessions with osds.
1235 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1237 struct ceph_osd *osd;
1239 WARN_ON(onum == CEPH_HOMELESS_OSD);
1241 osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1246 ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1251 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1253 if (refcount_inc_not_zero(&osd->o_ref)) {
1254 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1255 refcount_read(&osd->o_ref));
1258 dout("get_osd %p FAIL\n", osd);
1263 static void put_osd(struct ceph_osd *osd)
1265 dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1266 refcount_read(&osd->o_ref) - 1);
1267 if (refcount_dec_and_test(&osd->o_ref)) {
1273 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1275 static void __move_osd_to_lru(struct ceph_osd *osd)
1277 struct ceph_osd_client *osdc = osd->o_osdc;
1279 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1280 BUG_ON(!list_empty(&osd->o_osd_lru));
1282 spin_lock(&osdc->osd_lru_lock);
1283 list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1284 spin_unlock(&osdc->osd_lru_lock);
1286 osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1289 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1291 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1292 RB_EMPTY_ROOT(&osd->o_linger_requests))
1293 __move_osd_to_lru(osd);
1296 static void __remove_osd_from_lru(struct ceph_osd *osd)
1298 struct ceph_osd_client *osdc = osd->o_osdc;
1300 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1302 spin_lock(&osdc->osd_lru_lock);
1303 if (!list_empty(&osd->o_osd_lru))
1304 list_del_init(&osd->o_osd_lru);
1305 spin_unlock(&osdc->osd_lru_lock);
1309 * Close the connection and assign any leftover requests to the
1312 static void close_osd(struct ceph_osd *osd)
1314 struct ceph_osd_client *osdc = osd->o_osdc;
1317 verify_osdc_wrlocked(osdc);
1318 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1320 ceph_con_close(&osd->o_con);
1322 for (n = rb_first(&osd->o_requests); n; ) {
1323 struct ceph_osd_request *req =
1324 rb_entry(n, struct ceph_osd_request, r_node);
1326 n = rb_next(n); /* unlink_request() */
1328 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1329 unlink_request(osd, req);
1330 link_request(&osdc->homeless_osd, req);
1332 for (n = rb_first(&osd->o_linger_requests); n; ) {
1333 struct ceph_osd_linger_request *lreq =
1334 rb_entry(n, struct ceph_osd_linger_request, node);
1336 n = rb_next(n); /* unlink_linger() */
1338 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1340 unlink_linger(osd, lreq);
1341 link_linger(&osdc->homeless_osd, lreq);
1343 clear_backoffs(osd);
1345 __remove_osd_from_lru(osd);
1346 erase_osd(&osdc->osds, osd);
1353 static int reopen_osd(struct ceph_osd *osd)
1355 struct ceph_entity_addr *peer_addr;
1357 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1359 if (RB_EMPTY_ROOT(&osd->o_requests) &&
1360 RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1365 peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1366 if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1367 !ceph_con_opened(&osd->o_con)) {
1370 dout("osd addr hasn't changed and connection never opened, "
1371 "letting msgr retry\n");
1372 /* touch each r_stamp for handle_timeout()'s benfit */
1373 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1374 struct ceph_osd_request *req =
1375 rb_entry(n, struct ceph_osd_request, r_node);
1376 req->r_stamp = jiffies;
1382 ceph_con_close(&osd->o_con);
1383 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1384 osd->o_incarnation++;
1389 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1392 struct ceph_osd *osd;
1395 verify_osdc_wrlocked(osdc);
1397 verify_osdc_locked(osdc);
1399 if (o != CEPH_HOMELESS_OSD)
1400 osd = lookup_osd(&osdc->osds, o);
1402 osd = &osdc->homeless_osd;
1405 return ERR_PTR(-EAGAIN);
1407 osd = create_osd(osdc, o);
1408 insert_osd(&osdc->osds, osd);
1409 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1410 &osdc->osdmap->osd_addr[osd->o_osd]);
1413 dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1418 * Create request <-> OSD session relation.
1420 * @req has to be assigned a tid, @osd may be homeless.
1422 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1424 verify_osd_locked(osd);
1425 WARN_ON(!req->r_tid || req->r_osd);
1426 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1429 if (!osd_homeless(osd))
1430 __remove_osd_from_lru(osd);
1432 atomic_inc(&osd->o_osdc->num_homeless);
1435 insert_request(&osd->o_requests, req);
1439 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1441 verify_osd_locked(osd);
1442 WARN_ON(req->r_osd != osd);
1443 dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1447 erase_request(&osd->o_requests, req);
1450 if (!osd_homeless(osd))
1451 maybe_move_osd_to_lru(osd);
1453 atomic_dec(&osd->o_osdc->num_homeless);
1456 static bool __pool_full(struct ceph_pg_pool_info *pi)
1458 return pi->flags & CEPH_POOL_FLAG_FULL;
1461 static bool have_pool_full(struct ceph_osd_client *osdc)
1465 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1466 struct ceph_pg_pool_info *pi =
1467 rb_entry(n, struct ceph_pg_pool_info, node);
1469 if (__pool_full(pi))
1476 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1478 struct ceph_pg_pool_info *pi;
1480 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1484 return __pool_full(pi);
1488 * Returns whether a request should be blocked from being sent
1489 * based on the current osdmap and osd_client settings.
1491 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1492 const struct ceph_osd_request_target *t,
1493 struct ceph_pg_pool_info *pi)
1495 bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1496 bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1497 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1500 WARN_ON(pi->id != t->target_oloc.pool);
1501 return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1502 ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1503 (osdc->osdmap->epoch < osdc->epoch_barrier);
1506 static int pick_random_replica(const struct ceph_osds *acting)
1508 int i = prandom_u32() % acting->size;
1510 dout("%s picked osd%d, primary osd%d\n", __func__,
1511 acting->osds[i], acting->primary);
1516 * Picks the closest replica based on client's location given by
1517 * crush_location option. Prefers the primary if the locality is
1520 static int pick_closest_replica(struct ceph_osd_client *osdc,
1521 const struct ceph_osds *acting)
1523 struct ceph_options *opt = osdc->client->options;
1524 int best_i, best_locality;
1525 int i = 0, locality;
1528 locality = ceph_get_crush_locality(osdc->osdmap,
1532 (locality >= 0 && best_locality < 0) ||
1533 (locality >= 0 && best_locality >= 0 &&
1534 locality < best_locality)) {
1536 best_locality = locality;
1538 } while (++i < acting->size);
1540 dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
1541 acting->osds[best_i], best_locality, acting->primary);
1545 enum calc_target_result {
1546 CALC_TARGET_NO_ACTION = 0,
1547 CALC_TARGET_NEED_RESEND,
1548 CALC_TARGET_POOL_DNE,
1551 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1552 struct ceph_osd_request_target *t,
1555 struct ceph_pg_pool_info *pi;
1556 struct ceph_pg pgid, last_pgid;
1557 struct ceph_osds up, acting;
1558 bool is_read = t->flags & CEPH_OSD_FLAG_READ;
1559 bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
1560 bool force_resend = false;
1561 bool unpaused = false;
1562 bool legacy_change = false;
1564 bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1565 bool recovery_deletes = ceph_osdmap_flag(osdc,
1566 CEPH_OSDMAP_RECOVERY_DELETES);
1567 enum calc_target_result ct_res;
1569 t->epoch = osdc->osdmap->epoch;
1570 pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1572 t->osd = CEPH_HOMELESS_OSD;
1573 ct_res = CALC_TARGET_POOL_DNE;
1577 if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1578 if (t->last_force_resend < pi->last_force_request_resend) {
1579 t->last_force_resend = pi->last_force_request_resend;
1580 force_resend = true;
1581 } else if (t->last_force_resend == 0) {
1582 force_resend = true;
1587 ceph_oid_copy(&t->target_oid, &t->base_oid);
1588 ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1589 if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1590 if (is_read && pi->read_tier >= 0)
1591 t->target_oloc.pool = pi->read_tier;
1592 if (is_write && pi->write_tier >= 0)
1593 t->target_oloc.pool = pi->write_tier;
1595 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1597 t->osd = CEPH_HOMELESS_OSD;
1598 ct_res = CALC_TARGET_POOL_DNE;
1603 __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1604 last_pgid.pool = pgid.pool;
1605 last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1607 ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1609 ceph_is_new_interval(&t->acting,
1621 t->recovery_deletes,
1624 force_resend = true;
1626 if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1630 legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1631 ceph_osds_changed(&t->acting, &acting,
1632 t->used_replica || any_change);
1634 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1636 if (legacy_change || force_resend || split) {
1637 t->pgid = pgid; /* struct */
1638 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1639 ceph_osds_copy(&t->acting, &acting);
1640 ceph_osds_copy(&t->up, &up);
1642 t->min_size = pi->min_size;
1643 t->pg_num = pi->pg_num;
1644 t->pg_num_mask = pi->pg_num_mask;
1645 t->sort_bitwise = sort_bitwise;
1646 t->recovery_deletes = recovery_deletes;
1648 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
1649 CEPH_OSD_FLAG_LOCALIZE_READS)) &&
1650 !is_write && pi->type == CEPH_POOL_TYPE_REP &&
1654 WARN_ON(!is_read || acting.osds[0] != acting.primary);
1655 if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
1656 pos = pick_random_replica(&acting);
1658 pos = pick_closest_replica(osdc, &acting);
1660 t->osd = acting.osds[pos];
1661 t->used_replica = pos > 0;
1663 t->osd = acting.primary;
1664 t->used_replica = false;
1668 if (unpaused || legacy_change || force_resend || split)
1669 ct_res = CALC_TARGET_NEED_RESEND;
1671 ct_res = CALC_TARGET_NO_ACTION;
1674 dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1675 legacy_change, force_resend, split, ct_res, t->osd);
1679 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1681 struct ceph_spg_mapping *spg;
1683 spg = kmalloc(sizeof(*spg), GFP_NOIO);
1687 RB_CLEAR_NODE(&spg->node);
1688 spg->backoffs = RB_ROOT;
1692 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1694 WARN_ON(!RB_EMPTY_NODE(&spg->node));
1695 WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1701 * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1702 * ceph_pg_mapping. Used to track OSD backoffs -- a backoff [range] is
1703 * defined only within a specific spgid; it does not pass anything to
1704 * children on split, or to another primary.
1706 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1707 RB_BYPTR, const struct ceph_spg *, node)
1709 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1711 return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1714 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1715 void **pkey, size_t *pkey_len)
1717 if (hoid->key_len) {
1719 *pkey_len = hoid->key_len;
1722 *pkey_len = hoid->oid_len;
1726 static int compare_names(const void *name1, size_t name1_len,
1727 const void *name2, size_t name2_len)
1731 ret = memcmp(name1, name2, min(name1_len, name2_len));
1733 if (name1_len < name2_len)
1735 else if (name1_len > name2_len)
1741 static int hoid_compare(const struct ceph_hobject_id *lhs,
1742 const struct ceph_hobject_id *rhs)
1744 void *effective_key1, *effective_key2;
1745 size_t effective_key1_len, effective_key2_len;
1748 if (lhs->is_max < rhs->is_max)
1750 if (lhs->is_max > rhs->is_max)
1753 if (lhs->pool < rhs->pool)
1755 if (lhs->pool > rhs->pool)
1758 if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1760 if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1763 ret = compare_names(lhs->nspace, lhs->nspace_len,
1764 rhs->nspace, rhs->nspace_len);
1768 hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1769 hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1770 ret = compare_names(effective_key1, effective_key1_len,
1771 effective_key2, effective_key2_len);
1775 ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1779 if (lhs->snapid < rhs->snapid)
1781 if (lhs->snapid > rhs->snapid)
1788 * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1789 * compat stuff here.
1791 * Assumes @hoid is zero-initialized.
1793 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1799 ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1805 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1809 hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1811 if (IS_ERR(hoid->key)) {
1812 ret = PTR_ERR(hoid->key);
1817 hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1819 if (IS_ERR(hoid->oid)) {
1820 ret = PTR_ERR(hoid->oid);
1825 ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1826 ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1827 ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1829 hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1831 if (IS_ERR(hoid->nspace)) {
1832 ret = PTR_ERR(hoid->nspace);
1833 hoid->nspace = NULL;
1837 ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1839 ceph_hoid_build_hash_cache(hoid);
1846 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1848 return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1849 4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1852 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1854 ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1855 ceph_encode_string(p, end, hoid->key, hoid->key_len);
1856 ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1857 ceph_encode_64(p, hoid->snapid);
1858 ceph_encode_32(p, hoid->hash);
1859 ceph_encode_8(p, hoid->is_max);
1860 ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1861 ceph_encode_64(p, hoid->pool);
1864 static void free_hoid(struct ceph_hobject_id *hoid)
1869 kfree(hoid->nspace);
1874 static struct ceph_osd_backoff *alloc_backoff(void)
1876 struct ceph_osd_backoff *backoff;
1878 backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1882 RB_CLEAR_NODE(&backoff->spg_node);
1883 RB_CLEAR_NODE(&backoff->id_node);
1887 static void free_backoff(struct ceph_osd_backoff *backoff)
1889 WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1890 WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1892 free_hoid(backoff->begin);
1893 free_hoid(backoff->end);
1898 * Within a specific spgid, backoffs are managed by ->begin hoid.
1900 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1901 RB_BYVAL, spg_node);
1903 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1904 const struct ceph_hobject_id *hoid)
1906 struct rb_node *n = root->rb_node;
1909 struct ceph_osd_backoff *cur =
1910 rb_entry(n, struct ceph_osd_backoff, spg_node);
1913 cmp = hoid_compare(hoid, cur->begin);
1916 } else if (cmp > 0) {
1917 if (hoid_compare(hoid, cur->end) < 0)
1930 * Each backoff has a unique id within its OSD session.
1932 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1934 static void clear_backoffs(struct ceph_osd *osd)
1936 while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1937 struct ceph_spg_mapping *spg =
1938 rb_entry(rb_first(&osd->o_backoff_mappings),
1939 struct ceph_spg_mapping, node);
1941 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1942 struct ceph_osd_backoff *backoff =
1943 rb_entry(rb_first(&spg->backoffs),
1944 struct ceph_osd_backoff, spg_node);
1946 erase_backoff(&spg->backoffs, backoff);
1947 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1948 free_backoff(backoff);
1950 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1951 free_spg_mapping(spg);
1956 * Set up a temporary, non-owning view into @t.
1958 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1959 const struct ceph_osd_request_target *t)
1963 hoid->oid = t->target_oid.name;
1964 hoid->oid_len = t->target_oid.name_len;
1965 hoid->snapid = CEPH_NOSNAP;
1966 hoid->hash = t->pgid.seed;
1967 hoid->is_max = false;
1968 if (t->target_oloc.pool_ns) {
1969 hoid->nspace = t->target_oloc.pool_ns->str;
1970 hoid->nspace_len = t->target_oloc.pool_ns->len;
1972 hoid->nspace = NULL;
1973 hoid->nspace_len = 0;
1975 hoid->pool = t->target_oloc.pool;
1976 ceph_hoid_build_hash_cache(hoid);
1979 static bool should_plug_request(struct ceph_osd_request *req)
1981 struct ceph_osd *osd = req->r_osd;
1982 struct ceph_spg_mapping *spg;
1983 struct ceph_osd_backoff *backoff;
1984 struct ceph_hobject_id hoid;
1986 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1990 hoid_fill_from_target(&hoid, &req->r_t);
1991 backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1995 dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1996 __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1997 backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
2002 * Keep get_num_data_items() in sync with this function.
2004 static void setup_request_data(struct ceph_osd_request *req)
2006 struct ceph_msg *request_msg = req->r_request;
2007 struct ceph_msg *reply_msg = req->r_reply;
2008 struct ceph_osd_req_op *op;
2010 if (req->r_request->num_data_items || req->r_reply->num_data_items)
2013 WARN_ON(request_msg->data_length || reply_msg->data_length);
2014 for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
2017 case CEPH_OSD_OP_WRITE:
2018 case CEPH_OSD_OP_WRITEFULL:
2019 WARN_ON(op->indata_len != op->extent.length);
2020 ceph_osdc_msg_data_add(request_msg,
2021 &op->extent.osd_data);
2023 case CEPH_OSD_OP_SETXATTR:
2024 case CEPH_OSD_OP_CMPXATTR:
2025 WARN_ON(op->indata_len != op->xattr.name_len +
2026 op->xattr.value_len);
2027 ceph_osdc_msg_data_add(request_msg,
2028 &op->xattr.osd_data);
2030 case CEPH_OSD_OP_NOTIFY_ACK:
2031 ceph_osdc_msg_data_add(request_msg,
2032 &op->notify_ack.request_data);
2034 case CEPH_OSD_OP_COPY_FROM2:
2035 ceph_osdc_msg_data_add(request_msg,
2036 &op->copy_from.osd_data);
2040 case CEPH_OSD_OP_STAT:
2041 ceph_osdc_msg_data_add(reply_msg,
2044 case CEPH_OSD_OP_READ:
2045 ceph_osdc_msg_data_add(reply_msg,
2046 &op->extent.osd_data);
2048 case CEPH_OSD_OP_LIST_WATCHERS:
2049 ceph_osdc_msg_data_add(reply_msg,
2050 &op->list_watchers.response_data);
2054 case CEPH_OSD_OP_CALL:
2055 WARN_ON(op->indata_len != op->cls.class_len +
2056 op->cls.method_len +
2057 op->cls.indata_len);
2058 ceph_osdc_msg_data_add(request_msg,
2059 &op->cls.request_info);
2060 /* optional, can be NONE */
2061 ceph_osdc_msg_data_add(request_msg,
2062 &op->cls.request_data);
2063 /* optional, can be NONE */
2064 ceph_osdc_msg_data_add(reply_msg,
2065 &op->cls.response_data);
2067 case CEPH_OSD_OP_NOTIFY:
2068 ceph_osdc_msg_data_add(request_msg,
2069 &op->notify.request_data);
2070 ceph_osdc_msg_data_add(reply_msg,
2071 &op->notify.response_data);
2077 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2079 ceph_encode_8(p, 1);
2080 ceph_encode_64(p, pgid->pool);
2081 ceph_encode_32(p, pgid->seed);
2082 ceph_encode_32(p, -1); /* preferred */
2085 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2087 ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2088 encode_pgid(p, &spgid->pgid);
2089 ceph_encode_8(p, spgid->shard);
2092 static void encode_oloc(void **p, void *end,
2093 const struct ceph_object_locator *oloc)
2095 ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2096 ceph_encode_64(p, oloc->pool);
2097 ceph_encode_32(p, -1); /* preferred */
2098 ceph_encode_32(p, 0); /* key len */
2100 ceph_encode_string(p, end, oloc->pool_ns->str,
2101 oloc->pool_ns->len);
2103 ceph_encode_32(p, 0);
2106 static void encode_request_partial(struct ceph_osd_request *req,
2107 struct ceph_msg *msg)
2109 void *p = msg->front.iov_base;
2110 void *const end = p + msg->front_alloc_len;
2114 if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2115 /* snapshots aren't writeable */
2116 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2118 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2119 req->r_data_offset || req->r_snapc);
2122 setup_request_data(req);
2124 encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2125 ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2126 ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2127 ceph_encode_32(&p, req->r_flags);
2130 ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2131 memset(p, 0, sizeof(struct ceph_osd_reqid));
2132 p += sizeof(struct ceph_osd_reqid);
2135 memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2136 p += sizeof(struct ceph_blkin_trace_info);
2138 ceph_encode_32(&p, 0); /* client_inc, always 0 */
2139 ceph_encode_timespec64(p, &req->r_mtime);
2140 p += sizeof(struct ceph_timespec);
2142 encode_oloc(&p, end, &req->r_t.target_oloc);
2143 ceph_encode_string(&p, end, req->r_t.target_oid.name,
2144 req->r_t.target_oid.name_len);
2146 /* ops, can imply data */
2147 ceph_encode_16(&p, req->r_num_ops);
2148 for (i = 0; i < req->r_num_ops; i++) {
2149 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2150 p += sizeof(struct ceph_osd_op);
2153 ceph_encode_64(&p, req->r_snapid); /* snapid */
2155 ceph_encode_64(&p, req->r_snapc->seq);
2156 ceph_encode_32(&p, req->r_snapc->num_snaps);
2157 for (i = 0; i < req->r_snapc->num_snaps; i++)
2158 ceph_encode_64(&p, req->r_snapc->snaps[i]);
2160 ceph_encode_64(&p, 0); /* snap_seq */
2161 ceph_encode_32(&p, 0); /* snaps len */
2164 ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2165 BUG_ON(p > end - 8); /* space for features */
2167 msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2168 /* front_len is finalized in encode_request_finish() */
2169 msg->front.iov_len = p - msg->front.iov_base;
2170 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2171 msg->hdr.data_len = cpu_to_le32(data_len);
2173 * The header "data_off" is a hint to the receiver allowing it
2174 * to align received data into its buffers such that there's no
2175 * need to re-copy it before writing it to disk (direct I/O).
2177 msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2179 dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2180 req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2183 static void encode_request_finish(struct ceph_msg *msg)
2185 void *p = msg->front.iov_base;
2186 void *const partial_end = p + msg->front.iov_len;
2187 void *const end = p + msg->front_alloc_len;
2189 if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2190 /* luminous OSD -- encode features and be done */
2192 ceph_encode_64(&p, msg->con->peer_features);
2195 char spgid[CEPH_ENCODING_START_BLK_LEN +
2196 CEPH_PGID_ENCODING_LEN + 1];
2200 char reqid[CEPH_ENCODING_START_BLK_LEN +
2201 sizeof(struct ceph_osd_reqid)];
2202 char trace[sizeof(struct ceph_blkin_trace_info)];
2204 struct ceph_timespec mtime;
2206 struct ceph_pg pgid;
2207 void *oloc, *oid, *tail;
2208 int oloc_len, oid_len, tail_len;
2212 * Pre-luminous OSD -- reencode v8 into v4 using @head
2213 * as a temporary buffer. Encode the raw PG; the rest
2214 * is just a matter of moving oloc, oid and tail blobs
2217 memcpy(&head, p, sizeof(head));
2221 p += CEPH_ENCODING_START_BLK_LEN;
2222 pgid.pool = ceph_decode_64(&p);
2223 p += 4 + 4; /* preferred, key len */
2224 len = ceph_decode_32(&p);
2225 p += len; /* nspace */
2226 oloc_len = p - oloc;
2229 len = ceph_decode_32(&p);
2234 tail_len = partial_end - p;
2236 p = msg->front.iov_base;
2237 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2238 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2239 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2240 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2242 /* reassert_version */
2243 memset(p, 0, sizeof(struct ceph_eversion));
2244 p += sizeof(struct ceph_eversion);
2247 memmove(p, oloc, oloc_len);
2250 pgid.seed = le32_to_cpu(head.hash);
2251 encode_pgid(&p, &pgid); /* raw pg */
2254 memmove(p, oid, oid_len);
2257 /* tail -- ops, snapid, snapc, retry_attempt */
2259 memmove(p, tail, tail_len);
2262 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2266 msg->front.iov_len = p - msg->front.iov_base;
2267 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2269 dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2270 le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2271 le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2272 le16_to_cpu(msg->hdr.version));
2276 * @req has to be assigned a tid and registered.
2278 static void send_request(struct ceph_osd_request *req)
2280 struct ceph_osd *osd = req->r_osd;
2282 verify_osd_locked(osd);
2283 WARN_ON(osd->o_osd != req->r_t.osd);
2286 if (should_plug_request(req))
2290 * We may have a previously queued request message hanging
2291 * around. Cancel it to avoid corrupting the msgr.
2294 ceph_msg_revoke(req->r_request);
2296 req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2297 if (req->r_attempts)
2298 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2300 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2302 encode_request_partial(req, req->r_request);
2304 dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2305 __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2306 req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2307 req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2310 req->r_t.paused = false;
2311 req->r_stamp = jiffies;
2314 req->r_sent = osd->o_incarnation;
2315 req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2316 ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2319 static void maybe_request_map(struct ceph_osd_client *osdc)
2321 bool continuous = false;
2323 verify_osdc_locked(osdc);
2324 WARN_ON(!osdc->osdmap->epoch);
2326 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2327 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2328 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2329 dout("%s osdc %p continuous\n", __func__, osdc);
2332 dout("%s osdc %p onetime\n", __func__, osdc);
2335 if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2336 osdc->osdmap->epoch + 1, continuous))
2337 ceph_monc_renew_subs(&osdc->client->monc);
2340 static void complete_request(struct ceph_osd_request *req, int err);
2341 static void send_map_check(struct ceph_osd_request *req);
2343 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2345 struct ceph_osd_client *osdc = req->r_osdc;
2346 struct ceph_osd *osd;
2347 enum calc_target_result ct_res;
2349 bool need_send = false;
2350 bool promoted = false;
2352 WARN_ON(req->r_tid);
2353 dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2356 ct_res = calc_target(osdc, &req->r_t, false);
2357 if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2360 osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2362 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2366 if (osdc->abort_err) {
2367 dout("req %p abort_err %d\n", req, osdc->abort_err);
2368 err = osdc->abort_err;
2369 } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2370 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2371 osdc->epoch_barrier);
2372 req->r_t.paused = true;
2373 maybe_request_map(osdc);
2374 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2375 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2376 dout("req %p pausewr\n", req);
2377 req->r_t.paused = true;
2378 maybe_request_map(osdc);
2379 } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2380 ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2381 dout("req %p pauserd\n", req);
2382 req->r_t.paused = true;
2383 maybe_request_map(osdc);
2384 } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2385 !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2386 CEPH_OSD_FLAG_FULL_FORCE)) &&
2387 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2388 pool_full(osdc, req->r_t.base_oloc.pool))) {
2389 dout("req %p full/pool_full\n", req);
2390 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2393 pr_warn_ratelimited("FULL or reached pool quota\n");
2394 req->r_t.paused = true;
2395 maybe_request_map(osdc);
2397 } else if (!osd_homeless(osd)) {
2400 maybe_request_map(osdc);
2403 mutex_lock(&osd->lock);
2405 * Assign the tid atomically with send_request() to protect
2406 * multiple writes to the same object from racing with each
2407 * other, resulting in out of order ops on the OSDs.
2409 req->r_tid = atomic64_inc_return(&osdc->last_tid);
2410 link_request(osd, req);
2414 complete_request(req, err);
2415 mutex_unlock(&osd->lock);
2417 if (!err && ct_res == CALC_TARGET_POOL_DNE)
2418 send_map_check(req);
2421 downgrade_write(&osdc->lock);
2425 up_read(&osdc->lock);
2426 down_write(&osdc->lock);
2432 static void account_request(struct ceph_osd_request *req)
2434 struct ceph_osd_client *osdc = req->r_osdc;
2436 WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2437 WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2439 req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2440 req->r_flags |= osdc->client->options->osd_req_flags;
2441 atomic_inc(&osdc->num_requests);
2443 req->r_start_stamp = jiffies;
2444 req->r_start_latency = ktime_get();
2447 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2449 ceph_osdc_get_request(req);
2450 account_request(req);
2451 __submit_request(req, wrlocked);
2454 static void finish_request(struct ceph_osd_request *req)
2456 struct ceph_osd_client *osdc = req->r_osdc;
2458 WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2459 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2461 req->r_end_latency = ktime_get();
2464 unlink_request(req->r_osd, req);
2465 atomic_dec(&osdc->num_requests);
2468 * If an OSD has failed or returned and a request has been sent
2469 * twice, it's possible to get a reply and end up here while the
2470 * request message is queued for delivery. We will ignore the
2471 * reply, so not a big deal, but better to try and catch it.
2473 ceph_msg_revoke(req->r_request);
2474 ceph_msg_revoke_incoming(req->r_reply);
2477 static void __complete_request(struct ceph_osd_request *req)
2479 dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2480 req->r_tid, req->r_callback, req->r_result);
2482 if (req->r_callback)
2483 req->r_callback(req);
2484 complete_all(&req->r_completion);
2485 ceph_osdc_put_request(req);
2488 static void complete_request_workfn(struct work_struct *work)
2490 struct ceph_osd_request *req =
2491 container_of(work, struct ceph_osd_request, r_complete_work);
2493 __complete_request(req);
2497 * This is open-coded in handle_reply().
2499 static void complete_request(struct ceph_osd_request *req, int err)
2501 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2503 req->r_result = err;
2504 finish_request(req);
2506 INIT_WORK(&req->r_complete_work, complete_request_workfn);
2507 queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2510 static void cancel_map_check(struct ceph_osd_request *req)
2512 struct ceph_osd_client *osdc = req->r_osdc;
2513 struct ceph_osd_request *lookup_req;
2515 verify_osdc_wrlocked(osdc);
2517 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2521 WARN_ON(lookup_req != req);
2522 erase_request_mc(&osdc->map_checks, req);
2523 ceph_osdc_put_request(req);
2526 static void cancel_request(struct ceph_osd_request *req)
2528 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2530 cancel_map_check(req);
2531 finish_request(req);
2532 complete_all(&req->r_completion);
2533 ceph_osdc_put_request(req);
2536 static void abort_request(struct ceph_osd_request *req, int err)
2538 dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2540 cancel_map_check(req);
2541 complete_request(req, err);
2544 static int abort_fn(struct ceph_osd_request *req, void *arg)
2546 int err = *(int *)arg;
2548 abort_request(req, err);
2549 return 0; /* continue iteration */
2553 * Abort all in-flight requests with @err and arrange for all future
2554 * requests to be failed immediately.
2556 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2558 dout("%s osdc %p err %d\n", __func__, osdc, err);
2559 down_write(&osdc->lock);
2560 for_each_request(osdc, abort_fn, &err);
2561 osdc->abort_err = err;
2562 up_write(&osdc->lock);
2564 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2566 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2568 down_write(&osdc->lock);
2569 osdc->abort_err = 0;
2570 up_write(&osdc->lock);
2572 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2574 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2576 if (likely(eb > osdc->epoch_barrier)) {
2577 dout("updating epoch_barrier from %u to %u\n",
2578 osdc->epoch_barrier, eb);
2579 osdc->epoch_barrier = eb;
2580 /* Request map if we're not to the barrier yet */
2581 if (eb > osdc->osdmap->epoch)
2582 maybe_request_map(osdc);
2586 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2588 down_read(&osdc->lock);
2589 if (unlikely(eb > osdc->epoch_barrier)) {
2590 up_read(&osdc->lock);
2591 down_write(&osdc->lock);
2592 update_epoch_barrier(osdc, eb);
2593 up_write(&osdc->lock);
2595 up_read(&osdc->lock);
2598 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2601 * We can end up releasing caps as a result of abort_request().
2602 * In that case, we probably want to ensure that the cap release message
2603 * has an updated epoch barrier in it, so set the epoch barrier prior to
2604 * aborting the first request.
2606 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2608 struct ceph_osd_client *osdc = req->r_osdc;
2609 bool *victims = arg;
2611 if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2612 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2613 pool_full(osdc, req->r_t.base_oloc.pool))) {
2615 update_epoch_barrier(osdc, osdc->osdmap->epoch);
2618 abort_request(req, -ENOSPC);
2621 return 0; /* continue iteration */
2625 * Drop all pending requests that are stalled waiting on a full condition to
2626 * clear, and complete them with ENOSPC as the return code. Set the
2627 * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2630 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2632 bool victims = false;
2634 if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2635 (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2636 for_each_request(osdc, abort_on_full_fn, &victims);
2639 static void check_pool_dne(struct ceph_osd_request *req)
2641 struct ceph_osd_client *osdc = req->r_osdc;
2642 struct ceph_osdmap *map = osdc->osdmap;
2644 verify_osdc_wrlocked(osdc);
2645 WARN_ON(!map->epoch);
2647 if (req->r_attempts) {
2649 * We sent a request earlier, which means that
2650 * previously the pool existed, and now it does not
2651 * (i.e., it was deleted).
2653 req->r_map_dne_bound = map->epoch;
2654 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2657 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2658 req, req->r_tid, req->r_map_dne_bound, map->epoch);
2661 if (req->r_map_dne_bound) {
2662 if (map->epoch >= req->r_map_dne_bound) {
2663 /* we had a new enough map */
2664 pr_info_ratelimited("tid %llu pool does not exist\n",
2666 complete_request(req, -ENOENT);
2669 send_map_check(req);
2673 static void map_check_cb(struct ceph_mon_generic_request *greq)
2675 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2676 struct ceph_osd_request *req;
2677 u64 tid = greq->private_data;
2679 WARN_ON(greq->result || !greq->u.newest);
2681 down_write(&osdc->lock);
2682 req = lookup_request_mc(&osdc->map_checks, tid);
2684 dout("%s tid %llu dne\n", __func__, tid);
2688 dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2689 req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2690 if (!req->r_map_dne_bound)
2691 req->r_map_dne_bound = greq->u.newest;
2692 erase_request_mc(&osdc->map_checks, req);
2693 check_pool_dne(req);
2695 ceph_osdc_put_request(req);
2697 up_write(&osdc->lock);
2700 static void send_map_check(struct ceph_osd_request *req)
2702 struct ceph_osd_client *osdc = req->r_osdc;
2703 struct ceph_osd_request *lookup_req;
2706 verify_osdc_wrlocked(osdc);
2708 lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2710 WARN_ON(lookup_req != req);
2714 ceph_osdc_get_request(req);
2715 insert_request_mc(&osdc->map_checks, req);
2716 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2717 map_check_cb, req->r_tid);
2722 * lingering requests, watch/notify v2 infrastructure
2724 static void linger_release(struct kref *kref)
2726 struct ceph_osd_linger_request *lreq =
2727 container_of(kref, struct ceph_osd_linger_request, kref);
2729 dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2730 lreq->reg_req, lreq->ping_req);
2731 WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2732 WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2733 WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2734 WARN_ON(!list_empty(&lreq->scan_item));
2735 WARN_ON(!list_empty(&lreq->pending_lworks));
2739 ceph_osdc_put_request(lreq->reg_req);
2741 ceph_osdc_put_request(lreq->ping_req);
2742 target_destroy(&lreq->t);
2746 static void linger_put(struct ceph_osd_linger_request *lreq)
2749 kref_put(&lreq->kref, linger_release);
2752 static struct ceph_osd_linger_request *
2753 linger_get(struct ceph_osd_linger_request *lreq)
2755 kref_get(&lreq->kref);
2759 static struct ceph_osd_linger_request *
2760 linger_alloc(struct ceph_osd_client *osdc)
2762 struct ceph_osd_linger_request *lreq;
2764 lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2768 kref_init(&lreq->kref);
2769 mutex_init(&lreq->lock);
2770 RB_CLEAR_NODE(&lreq->node);
2771 RB_CLEAR_NODE(&lreq->osdc_node);
2772 RB_CLEAR_NODE(&lreq->mc_node);
2773 INIT_LIST_HEAD(&lreq->scan_item);
2774 INIT_LIST_HEAD(&lreq->pending_lworks);
2775 init_completion(&lreq->reg_commit_wait);
2776 init_completion(&lreq->notify_finish_wait);
2779 target_init(&lreq->t);
2781 dout("%s lreq %p\n", __func__, lreq);
2785 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2786 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2787 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2790 * Create linger request <-> OSD session relation.
2792 * @lreq has to be registered, @osd may be homeless.
2794 static void link_linger(struct ceph_osd *osd,
2795 struct ceph_osd_linger_request *lreq)
2797 verify_osd_locked(osd);
2798 WARN_ON(!lreq->linger_id || lreq->osd);
2799 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2800 osd->o_osd, lreq, lreq->linger_id);
2802 if (!osd_homeless(osd))
2803 __remove_osd_from_lru(osd);
2805 atomic_inc(&osd->o_osdc->num_homeless);
2808 insert_linger(&osd->o_linger_requests, lreq);
2812 static void unlink_linger(struct ceph_osd *osd,
2813 struct ceph_osd_linger_request *lreq)
2815 verify_osd_locked(osd);
2816 WARN_ON(lreq->osd != osd);
2817 dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2818 osd->o_osd, lreq, lreq->linger_id);
2821 erase_linger(&osd->o_linger_requests, lreq);
2824 if (!osd_homeless(osd))
2825 maybe_move_osd_to_lru(osd);
2827 atomic_dec(&osd->o_osdc->num_homeless);
2830 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2832 verify_osdc_locked(lreq->osdc);
2834 return !RB_EMPTY_NODE(&lreq->osdc_node);
2837 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2839 struct ceph_osd_client *osdc = lreq->osdc;
2842 down_read(&osdc->lock);
2843 registered = __linger_registered(lreq);
2844 up_read(&osdc->lock);
2849 static void linger_register(struct ceph_osd_linger_request *lreq)
2851 struct ceph_osd_client *osdc = lreq->osdc;
2853 verify_osdc_wrlocked(osdc);
2854 WARN_ON(lreq->linger_id);
2857 lreq->linger_id = ++osdc->last_linger_id;
2858 insert_linger_osdc(&osdc->linger_requests, lreq);
2861 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2863 struct ceph_osd_client *osdc = lreq->osdc;
2865 verify_osdc_wrlocked(osdc);
2867 erase_linger_osdc(&osdc->linger_requests, lreq);
2871 static void cancel_linger_request(struct ceph_osd_request *req)
2873 struct ceph_osd_linger_request *lreq = req->r_priv;
2875 WARN_ON(!req->r_linger);
2876 cancel_request(req);
2880 struct linger_work {
2881 struct work_struct work;
2882 struct ceph_osd_linger_request *lreq;
2883 struct list_head pending_item;
2884 unsigned long queued_stamp;
2890 void *payload; /* points into @msg front */
2893 struct ceph_msg *msg; /* for ceph_msg_put() */
2901 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2904 struct linger_work *lwork;
2906 lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2910 INIT_WORK(&lwork->work, workfn);
2911 INIT_LIST_HEAD(&lwork->pending_item);
2912 lwork->lreq = linger_get(lreq);
2917 static void lwork_free(struct linger_work *lwork)
2919 struct ceph_osd_linger_request *lreq = lwork->lreq;
2921 mutex_lock(&lreq->lock);
2922 list_del(&lwork->pending_item);
2923 mutex_unlock(&lreq->lock);
2929 static void lwork_queue(struct linger_work *lwork)
2931 struct ceph_osd_linger_request *lreq = lwork->lreq;
2932 struct ceph_osd_client *osdc = lreq->osdc;
2934 verify_lreq_locked(lreq);
2935 WARN_ON(!list_empty(&lwork->pending_item));
2937 lwork->queued_stamp = jiffies;
2938 list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2939 queue_work(osdc->notify_wq, &lwork->work);
2942 static void do_watch_notify(struct work_struct *w)
2944 struct linger_work *lwork = container_of(w, struct linger_work, work);
2945 struct ceph_osd_linger_request *lreq = lwork->lreq;
2947 if (!linger_registered(lreq)) {
2948 dout("%s lreq %p not registered\n", __func__, lreq);
2952 WARN_ON(!lreq->is_watch);
2953 dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2954 __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2955 lwork->notify.payload_len);
2956 lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2957 lwork->notify.notifier_id, lwork->notify.payload,
2958 lwork->notify.payload_len);
2961 ceph_msg_put(lwork->notify.msg);
2965 static void do_watch_error(struct work_struct *w)
2967 struct linger_work *lwork = container_of(w, struct linger_work, work);
2968 struct ceph_osd_linger_request *lreq = lwork->lreq;
2970 if (!linger_registered(lreq)) {
2971 dout("%s lreq %p not registered\n", __func__, lreq);
2975 dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2976 lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2982 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2984 struct linger_work *lwork;
2986 lwork = lwork_alloc(lreq, do_watch_error);
2988 pr_err("failed to allocate error-lwork\n");
2992 lwork->error.err = lreq->last_error;
2996 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2999 if (!completion_done(&lreq->reg_commit_wait)) {
3000 lreq->reg_commit_error = (result <= 0 ? result : 0);
3001 complete_all(&lreq->reg_commit_wait);
3005 static void linger_commit_cb(struct ceph_osd_request *req)
3007 struct ceph_osd_linger_request *lreq = req->r_priv;
3009 mutex_lock(&lreq->lock);
3010 dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
3011 lreq->linger_id, req->r_result);
3012 linger_reg_commit_complete(lreq, req->r_result);
3013 lreq->committed = true;
3015 if (!lreq->is_watch) {
3016 struct ceph_osd_data *osd_data =
3017 osd_req_op_data(req, 0, notify, response_data);
3018 void *p = page_address(osd_data->pages[0]);
3020 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
3021 osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
3023 /* make note of the notify_id */
3024 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
3025 lreq->notify_id = ceph_decode_64(&p);
3026 dout("lreq %p notify_id %llu\n", lreq,
3029 dout("lreq %p no notify_id\n", lreq);
3033 mutex_unlock(&lreq->lock);
3037 static int normalize_watch_error(int err)
3040 * Translate ENOENT -> ENOTCONN so that a delete->disconnection
3041 * notification and a failure to reconnect because we raced with
3042 * the delete appear the same to the user.
3050 static void linger_reconnect_cb(struct ceph_osd_request *req)
3052 struct ceph_osd_linger_request *lreq = req->r_priv;
3054 mutex_lock(&lreq->lock);
3055 dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
3056 lreq, lreq->linger_id, req->r_result, lreq->last_error);
3057 if (req->r_result < 0) {
3058 if (!lreq->last_error) {
3059 lreq->last_error = normalize_watch_error(req->r_result);
3060 queue_watch_error(lreq);
3064 mutex_unlock(&lreq->lock);
3068 static void send_linger(struct ceph_osd_linger_request *lreq)
3070 struct ceph_osd_request *req = lreq->reg_req;
3071 struct ceph_osd_req_op *op = &req->r_ops[0];
3073 verify_osdc_wrlocked(req->r_osdc);
3074 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3077 cancel_linger_request(req);
3079 request_reinit(req);
3080 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3081 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3082 req->r_flags = lreq->t.flags;
3083 req->r_mtime = lreq->mtime;
3085 mutex_lock(&lreq->lock);
3086 if (lreq->is_watch && lreq->committed) {
3087 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3088 op->watch.cookie != lreq->linger_id);
3089 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
3090 op->watch.gen = ++lreq->register_gen;
3091 dout("lreq %p reconnect register_gen %u\n", lreq,
3093 req->r_callback = linger_reconnect_cb;
3095 if (!lreq->is_watch)
3096 lreq->notify_id = 0;
3098 WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3099 dout("lreq %p register\n", lreq);
3100 req->r_callback = linger_commit_cb;
3102 mutex_unlock(&lreq->lock);
3104 req->r_priv = linger_get(lreq);
3105 req->r_linger = true;
3107 submit_request(req, true);
3110 static void linger_ping_cb(struct ceph_osd_request *req)
3112 struct ceph_osd_linger_request *lreq = req->r_priv;
3114 mutex_lock(&lreq->lock);
3115 dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3116 __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3118 if (lreq->register_gen == req->r_ops[0].watch.gen) {
3119 if (!req->r_result) {
3120 lreq->watch_valid_thru = lreq->ping_sent;
3121 } else if (!lreq->last_error) {
3122 lreq->last_error = normalize_watch_error(req->r_result);
3123 queue_watch_error(lreq);
3126 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3127 lreq->register_gen, req->r_ops[0].watch.gen);
3130 mutex_unlock(&lreq->lock);
3134 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3136 struct ceph_osd_client *osdc = lreq->osdc;
3137 struct ceph_osd_request *req = lreq->ping_req;
3138 struct ceph_osd_req_op *op = &req->r_ops[0];
3140 if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3141 dout("%s PAUSERD\n", __func__);
3145 lreq->ping_sent = jiffies;
3146 dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3147 __func__, lreq, lreq->linger_id, lreq->ping_sent,
3148 lreq->register_gen);
3151 cancel_linger_request(req);
3153 request_reinit(req);
3154 target_copy(&req->r_t, &lreq->t);
3156 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3157 op->watch.cookie != lreq->linger_id ||
3158 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3159 op->watch.gen = lreq->register_gen;
3160 req->r_callback = linger_ping_cb;
3161 req->r_priv = linger_get(lreq);
3162 req->r_linger = true;
3164 ceph_osdc_get_request(req);
3165 account_request(req);
3166 req->r_tid = atomic64_inc_return(&osdc->last_tid);
3167 link_request(lreq->osd, req);
3171 static void linger_submit(struct ceph_osd_linger_request *lreq)
3173 struct ceph_osd_client *osdc = lreq->osdc;
3174 struct ceph_osd *osd;
3176 down_write(&osdc->lock);
3177 linger_register(lreq);
3178 if (lreq->is_watch) {
3179 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3180 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3182 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3185 calc_target(osdc, &lreq->t, false);
3186 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3187 link_linger(osd, lreq);
3190 up_write(&osdc->lock);
3193 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3195 struct ceph_osd_client *osdc = lreq->osdc;
3196 struct ceph_osd_linger_request *lookup_lreq;
3198 verify_osdc_wrlocked(osdc);
3200 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3205 WARN_ON(lookup_lreq != lreq);
3206 erase_linger_mc(&osdc->linger_map_checks, lreq);
3211 * @lreq has to be both registered and linked.
3213 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3215 if (lreq->is_watch && lreq->ping_req->r_osd)
3216 cancel_linger_request(lreq->ping_req);
3217 if (lreq->reg_req->r_osd)
3218 cancel_linger_request(lreq->reg_req);
3219 cancel_linger_map_check(lreq);
3220 unlink_linger(lreq->osd, lreq);
3221 linger_unregister(lreq);
3224 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3226 struct ceph_osd_client *osdc = lreq->osdc;
3228 down_write(&osdc->lock);
3229 if (__linger_registered(lreq))
3230 __linger_cancel(lreq);
3231 up_write(&osdc->lock);
3234 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3236 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3238 struct ceph_osd_client *osdc = lreq->osdc;
3239 struct ceph_osdmap *map = osdc->osdmap;
3241 verify_osdc_wrlocked(osdc);
3242 WARN_ON(!map->epoch);
3244 if (lreq->register_gen) {
3245 lreq->map_dne_bound = map->epoch;
3246 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3247 lreq, lreq->linger_id);
3249 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3250 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3254 if (lreq->map_dne_bound) {
3255 if (map->epoch >= lreq->map_dne_bound) {
3256 /* we had a new enough map */
3257 pr_info("linger_id %llu pool does not exist\n",
3259 linger_reg_commit_complete(lreq, -ENOENT);
3260 __linger_cancel(lreq);
3263 send_linger_map_check(lreq);
3267 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3269 struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3270 struct ceph_osd_linger_request *lreq;
3271 u64 linger_id = greq->private_data;
3273 WARN_ON(greq->result || !greq->u.newest);
3275 down_write(&osdc->lock);
3276 lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3278 dout("%s linger_id %llu dne\n", __func__, linger_id);
3282 dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3283 __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3285 if (!lreq->map_dne_bound)
3286 lreq->map_dne_bound = greq->u.newest;
3287 erase_linger_mc(&osdc->linger_map_checks, lreq);
3288 check_linger_pool_dne(lreq);
3292 up_write(&osdc->lock);
3295 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3297 struct ceph_osd_client *osdc = lreq->osdc;
3298 struct ceph_osd_linger_request *lookup_lreq;
3301 verify_osdc_wrlocked(osdc);
3303 lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3306 WARN_ON(lookup_lreq != lreq);
3311 insert_linger_mc(&osdc->linger_map_checks, lreq);
3312 ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3313 linger_map_check_cb, lreq->linger_id);
3317 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3321 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3322 ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3323 return ret ?: lreq->reg_commit_error;
3326 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3330 dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3331 ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3332 return ret ?: lreq->notify_finish_error;
3336 * Timeout callback, called every N seconds. When 1 or more OSD
3337 * requests has been active for more than N seconds, we send a keepalive
3338 * (tag + timestamp) to its OSD to ensure any communications channel
3339 * reset is detected.
3341 static void handle_timeout(struct work_struct *work)
3343 struct ceph_osd_client *osdc =
3344 container_of(work, struct ceph_osd_client, timeout_work.work);
3345 struct ceph_options *opts = osdc->client->options;
3346 unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3347 unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3348 LIST_HEAD(slow_osds);
3349 struct rb_node *n, *p;
3351 dout("%s osdc %p\n", __func__, osdc);
3352 down_write(&osdc->lock);
3355 * ping osds that are a bit slow. this ensures that if there
3356 * is a break in the TCP connection we will notice, and reopen
3357 * a connection with that osd (from the fault callback).
3359 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3360 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3363 for (p = rb_first(&osd->o_requests); p; ) {
3364 struct ceph_osd_request *req =
3365 rb_entry(p, struct ceph_osd_request, r_node);
3367 p = rb_next(p); /* abort_request() */
3369 if (time_before(req->r_stamp, cutoff)) {
3370 dout(" req %p tid %llu on osd%d is laggy\n",
3371 req, req->r_tid, osd->o_osd);
3374 if (opts->osd_request_timeout &&
3375 time_before(req->r_start_stamp, expiry_cutoff)) {
3376 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3377 req->r_tid, osd->o_osd);
3378 abort_request(req, -ETIMEDOUT);
3381 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3382 struct ceph_osd_linger_request *lreq =
3383 rb_entry(p, struct ceph_osd_linger_request, node);
3385 dout(" lreq %p linger_id %llu is served by osd%d\n",
3386 lreq, lreq->linger_id, osd->o_osd);
3389 mutex_lock(&lreq->lock);
3390 if (lreq->is_watch && lreq->committed && !lreq->last_error)
3391 send_linger_ping(lreq);
3392 mutex_unlock(&lreq->lock);
3396 list_move_tail(&osd->o_keepalive_item, &slow_osds);
3399 if (opts->osd_request_timeout) {
3400 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3401 struct ceph_osd_request *req =
3402 rb_entry(p, struct ceph_osd_request, r_node);
3404 p = rb_next(p); /* abort_request() */
3406 if (time_before(req->r_start_stamp, expiry_cutoff)) {
3407 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3408 req->r_tid, osdc->homeless_osd.o_osd);
3409 abort_request(req, -ETIMEDOUT);
3414 if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3415 maybe_request_map(osdc);
3417 while (!list_empty(&slow_osds)) {
3418 struct ceph_osd *osd = list_first_entry(&slow_osds,
3421 list_del_init(&osd->o_keepalive_item);
3422 ceph_con_keepalive(&osd->o_con);
3425 up_write(&osdc->lock);
3426 schedule_delayed_work(&osdc->timeout_work,
3427 osdc->client->options->osd_keepalive_timeout);
3430 static void handle_osds_timeout(struct work_struct *work)
3432 struct ceph_osd_client *osdc =
3433 container_of(work, struct ceph_osd_client,
3434 osds_timeout_work.work);
3435 unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3436 struct ceph_osd *osd, *nosd;
3438 dout("%s osdc %p\n", __func__, osdc);
3439 down_write(&osdc->lock);
3440 list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3441 if (time_before(jiffies, osd->lru_ttl))
3444 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3445 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3449 up_write(&osdc->lock);
3450 schedule_delayed_work(&osdc->osds_timeout_work,
3451 round_jiffies_relative(delay));
3454 static int ceph_oloc_decode(void **p, void *end,
3455 struct ceph_object_locator *oloc)
3457 u8 struct_v, struct_cv;
3462 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3463 struct_v = ceph_decode_8(p);
3464 struct_cv = ceph_decode_8(p);
3466 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3467 struct_v, struct_cv);
3470 if (struct_cv > 6) {
3471 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3472 struct_v, struct_cv);
3475 len = ceph_decode_32(p);
3476 ceph_decode_need(p, end, len, e_inval);
3477 struct_end = *p + len;
3479 oloc->pool = ceph_decode_64(p);
3480 *p += 4; /* skip preferred */
3482 len = ceph_decode_32(p);
3484 pr_warn("ceph_object_locator::key is set\n");
3488 if (struct_v >= 5) {
3489 bool changed = false;
3491 len = ceph_decode_32(p);
3493 ceph_decode_need(p, end, len, e_inval);
3494 if (!oloc->pool_ns ||
3495 ceph_compare_string(oloc->pool_ns, *p, len))
3503 /* redirect changes namespace */
3504 pr_warn("ceph_object_locator::nspace is changed\n");
3509 if (struct_v >= 6) {
3510 s64 hash = ceph_decode_64(p);
3512 pr_warn("ceph_object_locator::hash is set\n");
3527 static int ceph_redirect_decode(void **p, void *end,
3528 struct ceph_request_redirect *redir)
3530 u8 struct_v, struct_cv;
3535 ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3536 struct_v = ceph_decode_8(p);
3537 struct_cv = ceph_decode_8(p);
3538 if (struct_cv > 1) {
3539 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3540 struct_v, struct_cv);
3543 len = ceph_decode_32(p);
3544 ceph_decode_need(p, end, len, e_inval);
3545 struct_end = *p + len;
3547 ret = ceph_oloc_decode(p, end, &redir->oloc);
3551 len = ceph_decode_32(p);
3553 pr_warn("ceph_request_redirect::object_name is set\n");
3567 struct MOSDOpReply {
3568 struct ceph_pg pgid;
3573 u32 outdata_len[CEPH_OSD_MAX_OPS];
3574 s32 rval[CEPH_OSD_MAX_OPS];
3576 struct ceph_eversion replay_version;
3578 struct ceph_request_redirect redirect;
3581 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3583 void *p = msg->front.iov_base;
3584 void *const end = p + msg->front.iov_len;
3585 u16 version = le16_to_cpu(msg->hdr.version);
3586 struct ceph_eversion bad_replay_version;
3592 ceph_decode_32_safe(&p, end, len, e_inval);
3593 ceph_decode_need(&p, end, len, e_inval);
3594 p += len; /* skip oid */
3596 ret = ceph_decode_pgid(&p, end, &m->pgid);
3600 ceph_decode_64_safe(&p, end, m->flags, e_inval);
3601 ceph_decode_32_safe(&p, end, m->result, e_inval);
3602 ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3603 memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3604 p += sizeof(bad_replay_version);
3605 ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3607 ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3608 if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3611 ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3613 for (i = 0; i < m->num_ops; i++) {
3614 struct ceph_osd_op *op = p;
3616 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3620 ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3621 for (i = 0; i < m->num_ops; i++)
3622 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3625 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3626 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3627 p += sizeof(m->replay_version);
3628 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3630 m->replay_version = bad_replay_version; /* struct */
3631 m->user_version = le64_to_cpu(m->replay_version.version);
3636 ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3644 ret = ceph_redirect_decode(&p, end, &m->redirect);
3648 ceph_oloc_init(&m->redirect.oloc);
3658 * Handle MOSDOpReply. Set ->r_result and call the callback if it is
3661 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3663 struct ceph_osd_client *osdc = osd->o_osdc;
3664 struct ceph_osd_request *req;
3665 struct MOSDOpReply m;
3666 u64 tid = le64_to_cpu(msg->hdr.tid);
3671 dout("%s msg %p tid %llu\n", __func__, msg, tid);
3673 down_read(&osdc->lock);
3674 if (!osd_registered(osd)) {
3675 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3676 goto out_unlock_osdc;
3678 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3680 mutex_lock(&osd->lock);
3681 req = lookup_request(&osd->o_requests, tid);
3683 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3684 goto out_unlock_session;
3687 m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3688 ret = decode_MOSDOpReply(msg, &m);
3689 m.redirect.oloc.pool_ns = NULL;
3691 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3696 dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3697 __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3698 m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3699 le64_to_cpu(m.replay_version.version), m.user_version);
3701 if (m.retry_attempt >= 0) {
3702 if (m.retry_attempt != req->r_attempts - 1) {
3703 dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3704 req, req->r_tid, m.retry_attempt,
3705 req->r_attempts - 1);
3706 goto out_unlock_session;
3709 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3712 if (!ceph_oloc_empty(&m.redirect.oloc)) {
3713 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3714 m.redirect.oloc.pool);
3715 unlink_request(osd, req);
3716 mutex_unlock(&osd->lock);
3719 * Not ceph_oloc_copy() - changing pool_ns is not
3722 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3723 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3724 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3725 CEPH_OSD_FLAG_IGNORE_CACHE;
3727 __submit_request(req, false);
3728 goto out_unlock_osdc;
3731 if (m.result == -EAGAIN) {
3732 dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
3733 unlink_request(osd, req);
3734 mutex_unlock(&osd->lock);
3737 * The object is missing on the replica or not (yet)
3738 * readable. Clear pgid to force a resend to the primary
3739 * via legacy_change.
3741 req->r_t.pgid.pool = 0;
3742 req->r_t.pgid.seed = 0;
3743 WARN_ON(!req->r_t.used_replica);
3744 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
3745 CEPH_OSD_FLAG_LOCALIZE_READS);
3747 __submit_request(req, false);
3748 goto out_unlock_osdc;
3751 if (m.num_ops != req->r_num_ops) {
3752 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3753 req->r_num_ops, req->r_tid);
3756 for (i = 0; i < req->r_num_ops; i++) {
3757 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3758 req->r_tid, i, m.rval[i], m.outdata_len[i]);
3759 req->r_ops[i].rval = m.rval[i];
3760 req->r_ops[i].outdata_len = m.outdata_len[i];
3761 data_len += m.outdata_len[i];
3763 if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3764 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3765 le32_to_cpu(msg->hdr.data_len), req->r_tid);
3768 dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3769 req, req->r_tid, m.result, data_len);
3772 * Since we only ever request ONDISK, we should only ever get
3773 * one (type of) reply back.
3775 WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3776 req->r_result = m.result ?: data_len;
3777 finish_request(req);
3778 mutex_unlock(&osd->lock);
3779 up_read(&osdc->lock);
3781 __complete_request(req);
3785 complete_request(req, -EIO);
3787 mutex_unlock(&osd->lock);
3789 up_read(&osdc->lock);
3792 static void set_pool_was_full(struct ceph_osd_client *osdc)
3796 for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3797 struct ceph_pg_pool_info *pi =
3798 rb_entry(n, struct ceph_pg_pool_info, node);
3800 pi->was_full = __pool_full(pi);
3804 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3806 struct ceph_pg_pool_info *pi;
3808 pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3812 return pi->was_full && !__pool_full(pi);
3815 static enum calc_target_result
3816 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3818 struct ceph_osd_client *osdc = lreq->osdc;
3819 enum calc_target_result ct_res;
3821 ct_res = calc_target(osdc, &lreq->t, true);
3822 if (ct_res == CALC_TARGET_NEED_RESEND) {
3823 struct ceph_osd *osd;
3825 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3826 if (osd != lreq->osd) {
3827 unlink_linger(lreq->osd, lreq);
3828 link_linger(osd, lreq);
3836 * Requeue requests whose mapping to an OSD has changed.
3838 static void scan_requests(struct ceph_osd *osd,
3841 bool check_pool_cleared_full,
3842 struct rb_root *need_resend,
3843 struct list_head *need_resend_linger)
3845 struct ceph_osd_client *osdc = osd->o_osdc;
3847 bool force_resend_writes;
3849 for (n = rb_first(&osd->o_linger_requests); n; ) {
3850 struct ceph_osd_linger_request *lreq =
3851 rb_entry(n, struct ceph_osd_linger_request, node);
3852 enum calc_target_result ct_res;
3854 n = rb_next(n); /* recalc_linger_target() */
3856 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3858 ct_res = recalc_linger_target(lreq);
3860 case CALC_TARGET_NO_ACTION:
3861 force_resend_writes = cleared_full ||
3862 (check_pool_cleared_full &&
3863 pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3864 if (!force_resend && !force_resend_writes)
3868 case CALC_TARGET_NEED_RESEND:
3869 cancel_linger_map_check(lreq);
3871 * scan_requests() for the previous epoch(s)
3872 * may have already added it to the list, since
3873 * it's not unlinked here.
3875 if (list_empty(&lreq->scan_item))
3876 list_add_tail(&lreq->scan_item, need_resend_linger);
3878 case CALC_TARGET_POOL_DNE:
3879 list_del_init(&lreq->scan_item);
3880 check_linger_pool_dne(lreq);
3885 for (n = rb_first(&osd->o_requests); n; ) {
3886 struct ceph_osd_request *req =
3887 rb_entry(n, struct ceph_osd_request, r_node);
3888 enum calc_target_result ct_res;
3890 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3892 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3893 ct_res = calc_target(osdc, &req->r_t, false);
3895 case CALC_TARGET_NO_ACTION:
3896 force_resend_writes = cleared_full ||
3897 (check_pool_cleared_full &&
3898 pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3899 if (!force_resend &&
3900 (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3901 !force_resend_writes))
3905 case CALC_TARGET_NEED_RESEND:
3906 cancel_map_check(req);
3907 unlink_request(osd, req);
3908 insert_request(need_resend, req);
3910 case CALC_TARGET_POOL_DNE:
3911 check_pool_dne(req);
3917 static int handle_one_map(struct ceph_osd_client *osdc,
3918 void *p, void *end, bool incremental,
3919 struct rb_root *need_resend,
3920 struct list_head *need_resend_linger)
3922 struct ceph_osdmap *newmap;
3924 bool skipped_map = false;
3927 was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3928 set_pool_was_full(osdc);
3931 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3933 newmap = ceph_osdmap_decode(&p, end);
3935 return PTR_ERR(newmap);
3937 if (newmap != osdc->osdmap) {
3939 * Preserve ->was_full before destroying the old map.
3940 * For pools that weren't in the old map, ->was_full
3943 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3944 struct ceph_pg_pool_info *pi =
3945 rb_entry(n, struct ceph_pg_pool_info, node);
3946 struct ceph_pg_pool_info *old_pi;
3948 old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3950 pi->was_full = old_pi->was_full;
3952 WARN_ON(pi->was_full);
3955 if (osdc->osdmap->epoch &&
3956 osdc->osdmap->epoch + 1 < newmap->epoch) {
3957 WARN_ON(incremental);
3961 ceph_osdmap_destroy(osdc->osdmap);
3962 osdc->osdmap = newmap;
3965 was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3966 scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3967 need_resend, need_resend_linger);
3969 for (n = rb_first(&osdc->osds); n; ) {
3970 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3972 n = rb_next(n); /* close_osd() */
3974 scan_requests(osd, skipped_map, was_full, true, need_resend,
3975 need_resend_linger);
3976 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3977 memcmp(&osd->o_con.peer_addr,
3978 ceph_osd_addr(osdc->osdmap, osd->o_osd),
3979 sizeof(struct ceph_entity_addr)))
3986 static void kick_requests(struct ceph_osd_client *osdc,
3987 struct rb_root *need_resend,
3988 struct list_head *need_resend_linger)
3990 struct ceph_osd_linger_request *lreq, *nlreq;
3991 enum calc_target_result ct_res;
3994 /* make sure need_resend targets reflect latest map */
3995 for (n = rb_first(need_resend); n; ) {
3996 struct ceph_osd_request *req =
3997 rb_entry(n, struct ceph_osd_request, r_node);
4001 if (req->r_t.epoch < osdc->osdmap->epoch) {
4002 ct_res = calc_target(osdc, &req->r_t, false);
4003 if (ct_res == CALC_TARGET_POOL_DNE) {
4004 erase_request(need_resend, req);
4005 check_pool_dne(req);
4010 for (n = rb_first(need_resend); n; ) {
4011 struct ceph_osd_request *req =
4012 rb_entry(n, struct ceph_osd_request, r_node);
4013 struct ceph_osd *osd;
4016 erase_request(need_resend, req); /* before link_request() */
4018 osd = lookup_create_osd(osdc, req->r_t.osd, true);
4019 link_request(osd, req);
4020 if (!req->r_linger) {
4021 if (!osd_homeless(osd) && !req->r_t.paused)
4024 cancel_linger_request(req);
4028 list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
4029 if (!osd_homeless(lreq->osd))
4032 list_del_init(&lreq->scan_item);
4037 * Process updated osd map.
4039 * The message contains any number of incremental and full maps, normally
4040 * indicating some sort of topology change in the cluster. Kick requests
4041 * off to different OSDs as needed.
4043 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4045 void *p = msg->front.iov_base;
4046 void *const end = p + msg->front.iov_len;
4047 u32 nr_maps, maplen;
4049 struct ceph_fsid fsid;
4050 struct rb_root need_resend = RB_ROOT;
4051 LIST_HEAD(need_resend_linger);
4052 bool handled_incremental = false;
4053 bool was_pauserd, was_pausewr;
4054 bool pauserd, pausewr;
4057 dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4058 down_write(&osdc->lock);
4061 ceph_decode_need(&p, end, sizeof(fsid), bad);
4062 ceph_decode_copy(&p, &fsid, sizeof(fsid));
4063 if (ceph_check_fsid(osdc->client, &fsid) < 0)
4066 was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4067 was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4068 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4069 have_pool_full(osdc);
4071 /* incremental maps */
4072 ceph_decode_32_safe(&p, end, nr_maps, bad);
4073 dout(" %d inc maps\n", nr_maps);
4074 while (nr_maps > 0) {
4075 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4076 epoch = ceph_decode_32(&p);
4077 maplen = ceph_decode_32(&p);
4078 ceph_decode_need(&p, end, maplen, bad);
4079 if (osdc->osdmap->epoch &&
4080 osdc->osdmap->epoch + 1 == epoch) {
4081 dout("applying incremental map %u len %d\n",
4083 err = handle_one_map(osdc, p, p + maplen, true,
4084 &need_resend, &need_resend_linger);
4087 handled_incremental = true;
4089 dout("ignoring incremental map %u len %d\n",
4095 if (handled_incremental)
4099 ceph_decode_32_safe(&p, end, nr_maps, bad);
4100 dout(" %d full maps\n", nr_maps);
4102 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4103 epoch = ceph_decode_32(&p);
4104 maplen = ceph_decode_32(&p);
4105 ceph_decode_need(&p, end, maplen, bad);
4107 dout("skipping non-latest full map %u len %d\n",
4109 } else if (osdc->osdmap->epoch >= epoch) {
4110 dout("skipping full map %u len %d, "
4111 "older than our %u\n", epoch, maplen,
4112 osdc->osdmap->epoch);
4114 dout("taking full map %u len %d\n", epoch, maplen);
4115 err = handle_one_map(osdc, p, p + maplen, false,
4116 &need_resend, &need_resend_linger);
4126 * subscribe to subsequent osdmap updates if full to ensure
4127 * we find out when we are no longer full and stop returning
4130 pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4131 pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4132 ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4133 have_pool_full(osdc);
4134 if (was_pauserd || was_pausewr || pauserd || pausewr ||
4135 osdc->osdmap->epoch < osdc->epoch_barrier)
4136 maybe_request_map(osdc);
4138 kick_requests(osdc, &need_resend, &need_resend_linger);
4140 ceph_osdc_abort_on_full(osdc);
4141 ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4142 osdc->osdmap->epoch);
4143 up_write(&osdc->lock);
4144 wake_up_all(&osdc->client->auth_wq);
4148 pr_err("osdc handle_map corrupt msg\n");
4150 up_write(&osdc->lock);
4154 * Resubmit requests pending on the given osd.
4156 static void kick_osd_requests(struct ceph_osd *osd)
4160 clear_backoffs(osd);
4162 for (n = rb_first(&osd->o_requests); n; ) {
4163 struct ceph_osd_request *req =
4164 rb_entry(n, struct ceph_osd_request, r_node);
4166 n = rb_next(n); /* cancel_linger_request() */
4168 if (!req->r_linger) {
4169 if (!req->r_t.paused)
4172 cancel_linger_request(req);
4175 for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4176 struct ceph_osd_linger_request *lreq =
4177 rb_entry(n, struct ceph_osd_linger_request, node);
4184 * If the osd connection drops, we need to resubmit all requests.
4186 static void osd_fault(struct ceph_connection *con)
4188 struct ceph_osd *osd = con->private;
4189 struct ceph_osd_client *osdc = osd->o_osdc;
4191 dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4193 down_write(&osdc->lock);
4194 if (!osd_registered(osd)) {
4195 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4199 if (!reopen_osd(osd))
4200 kick_osd_requests(osd);
4201 maybe_request_map(osdc);
4204 up_write(&osdc->lock);
4207 struct MOSDBackoff {
4208 struct ceph_spg spgid;
4212 struct ceph_hobject_id *begin;
4213 struct ceph_hobject_id *end;
4216 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4218 void *p = msg->front.iov_base;
4219 void *const end = p + msg->front.iov_len;
4224 ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4228 ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4232 ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4233 ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4234 ceph_decode_8_safe(&p, end, m->op, e_inval);
4235 ceph_decode_64_safe(&p, end, m->id, e_inval);
4237 m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4241 ret = decode_hoid(&p, end, m->begin);
4243 free_hoid(m->begin);
4247 m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4249 free_hoid(m->begin);
4253 ret = decode_hoid(&p, end, m->end);
4255 free_hoid(m->begin);
4266 static struct ceph_msg *create_backoff_message(
4267 const struct ceph_osd_backoff *backoff,
4270 struct ceph_msg *msg;
4274 msg_size = CEPH_ENCODING_START_BLK_LEN +
4275 CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4276 msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4277 msg_size += CEPH_ENCODING_START_BLK_LEN +
4278 hoid_encoding_size(backoff->begin);
4279 msg_size += CEPH_ENCODING_START_BLK_LEN +
4280 hoid_encoding_size(backoff->end);
4282 msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4286 p = msg->front.iov_base;
4287 end = p + msg->front_alloc_len;
4289 encode_spgid(&p, &backoff->spgid);
4290 ceph_encode_32(&p, map_epoch);
4291 ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4292 ceph_encode_64(&p, backoff->id);
4293 encode_hoid(&p, end, backoff->begin);
4294 encode_hoid(&p, end, backoff->end);
4297 msg->front.iov_len = p - msg->front.iov_base;
4298 msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4299 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4304 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4306 struct ceph_spg_mapping *spg;
4307 struct ceph_osd_backoff *backoff;
4308 struct ceph_msg *msg;
4310 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4311 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4313 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4315 spg = alloc_spg_mapping();
4317 pr_err("%s failed to allocate spg\n", __func__);
4320 spg->spgid = m->spgid; /* struct */
4321 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4324 backoff = alloc_backoff();
4326 pr_err("%s failed to allocate backoff\n", __func__);
4329 backoff->spgid = m->spgid; /* struct */
4330 backoff->id = m->id;
4331 backoff->begin = m->begin;
4332 m->begin = NULL; /* backoff now owns this */
4333 backoff->end = m->end;
4334 m->end = NULL; /* ditto */
4336 insert_backoff(&spg->backoffs, backoff);
4337 insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4340 * Ack with original backoff's epoch so that the OSD can
4341 * discard this if there was a PG split.
4343 msg = create_backoff_message(backoff, m->map_epoch);
4345 pr_err("%s failed to allocate msg\n", __func__);
4348 ceph_con_send(&osd->o_con, msg);
4351 static bool target_contained_by(const struct ceph_osd_request_target *t,
4352 const struct ceph_hobject_id *begin,
4353 const struct ceph_hobject_id *end)
4355 struct ceph_hobject_id hoid;
4358 hoid_fill_from_target(&hoid, t);
4359 cmp = hoid_compare(&hoid, begin);
4360 return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4363 static void handle_backoff_unblock(struct ceph_osd *osd,
4364 const struct MOSDBackoff *m)
4366 struct ceph_spg_mapping *spg;
4367 struct ceph_osd_backoff *backoff;
4370 dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4371 m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4373 backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4375 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4376 __func__, osd->o_osd, m->spgid.pgid.pool,
4377 m->spgid.pgid.seed, m->spgid.shard, m->id);
4381 if (hoid_compare(backoff->begin, m->begin) &&
4382 hoid_compare(backoff->end, m->end)) {
4383 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4384 __func__, osd->o_osd, m->spgid.pgid.pool,
4385 m->spgid.pgid.seed, m->spgid.shard, m->id);
4386 /* unblock it anyway... */
4389 spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4392 erase_backoff(&spg->backoffs, backoff);
4393 erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4394 free_backoff(backoff);
4396 if (RB_EMPTY_ROOT(&spg->backoffs)) {
4397 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4398 free_spg_mapping(spg);
4401 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4402 struct ceph_osd_request *req =
4403 rb_entry(n, struct ceph_osd_request, r_node);
4405 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4407 * Match against @m, not @backoff -- the PG may
4408 * have split on the OSD.
4410 if (target_contained_by(&req->r_t, m->begin, m->end)) {
4412 * If no other installed backoff applies,
4421 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4423 struct ceph_osd_client *osdc = osd->o_osdc;
4424 struct MOSDBackoff m;
4427 down_read(&osdc->lock);
4428 if (!osd_registered(osd)) {
4429 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4430 up_read(&osdc->lock);
4433 WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4435 mutex_lock(&osd->lock);
4436 ret = decode_MOSDBackoff(msg, &m);
4438 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4444 case CEPH_OSD_BACKOFF_OP_BLOCK:
4445 handle_backoff_block(osd, &m);
4447 case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4448 handle_backoff_unblock(osd, &m);
4451 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4458 mutex_unlock(&osd->lock);
4459 up_read(&osdc->lock);
4463 * Process osd watch notifications
4465 static void handle_watch_notify(struct ceph_osd_client *osdc,
4466 struct ceph_msg *msg)
4468 void *p = msg->front.iov_base;
4469 void *const end = p + msg->front.iov_len;
4470 struct ceph_osd_linger_request *lreq;
4471 struct linger_work *lwork;
4472 u8 proto_ver, opcode;
4473 u64 cookie, notify_id;
4474 u64 notifier_id = 0;
4475 s32 return_code = 0;
4476 void *payload = NULL;
4477 u32 payload_len = 0;
4479 ceph_decode_8_safe(&p, end, proto_ver, bad);
4480 ceph_decode_8_safe(&p, end, opcode, bad);
4481 ceph_decode_64_safe(&p, end, cookie, bad);
4482 p += 8; /* skip ver */
4483 ceph_decode_64_safe(&p, end, notify_id, bad);
4485 if (proto_ver >= 1) {
4486 ceph_decode_32_safe(&p, end, payload_len, bad);
4487 ceph_decode_need(&p, end, payload_len, bad);
4492 if (le16_to_cpu(msg->hdr.version) >= 2)
4493 ceph_decode_32_safe(&p, end, return_code, bad);
4495 if (le16_to_cpu(msg->hdr.version) >= 3)
4496 ceph_decode_64_safe(&p, end, notifier_id, bad);
4498 down_read(&osdc->lock);
4499 lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4501 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4503 goto out_unlock_osdc;
4506 mutex_lock(&lreq->lock);
4507 dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4508 opcode, cookie, lreq, lreq->is_watch);
4509 if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4510 if (!lreq->last_error) {
4511 lreq->last_error = -ENOTCONN;
4512 queue_watch_error(lreq);
4514 } else if (!lreq->is_watch) {
4515 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4516 if (lreq->notify_id && lreq->notify_id != notify_id) {
4517 dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4518 lreq->notify_id, notify_id);
4519 } else if (!completion_done(&lreq->notify_finish_wait)) {
4520 struct ceph_msg_data *data =
4521 msg->num_data_items ? &msg->data[0] : NULL;
4524 if (lreq->preply_pages) {
4525 WARN_ON(data->type !=
4526 CEPH_MSG_DATA_PAGES);
4527 *lreq->preply_pages = data->pages;
4528 *lreq->preply_len = data->length;
4529 data->own_pages = false;
4532 lreq->notify_finish_error = return_code;
4533 complete_all(&lreq->notify_finish_wait);
4536 /* CEPH_WATCH_EVENT_NOTIFY */
4537 lwork = lwork_alloc(lreq, do_watch_notify);
4539 pr_err("failed to allocate notify-lwork\n");
4540 goto out_unlock_lreq;
4543 lwork->notify.notify_id = notify_id;
4544 lwork->notify.notifier_id = notifier_id;
4545 lwork->notify.payload = payload;
4546 lwork->notify.payload_len = payload_len;
4547 lwork->notify.msg = ceph_msg_get(msg);
4552 mutex_unlock(&lreq->lock);
4554 up_read(&osdc->lock);
4558 pr_err("osdc handle_watch_notify corrupt msg\n");
4562 * Register request, send initial attempt.
4564 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4565 struct ceph_osd_request *req,
4568 down_read(&osdc->lock);
4569 submit_request(req, false);
4570 up_read(&osdc->lock);
4574 EXPORT_SYMBOL(ceph_osdc_start_request);
4577 * Unregister a registered request. The request is not completed:
4578 * ->r_result isn't set and __complete_request() isn't called.
4580 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4582 struct ceph_osd_client *osdc = req->r_osdc;
4584 down_write(&osdc->lock);
4586 cancel_request(req);
4587 up_write(&osdc->lock);
4589 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4592 * @timeout: in jiffies, 0 means "wait forever"
4594 static int wait_request_timeout(struct ceph_osd_request *req,
4595 unsigned long timeout)
4599 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4600 left = wait_for_completion_killable_timeout(&req->r_completion,
4601 ceph_timeout_jiffies(timeout));
4603 left = left ?: -ETIMEDOUT;
4604 ceph_osdc_cancel_request(req);
4606 left = req->r_result; /* completed */
4613 * wait for a request to complete
4615 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4616 struct ceph_osd_request *req)
4618 return wait_request_timeout(req, 0);
4620 EXPORT_SYMBOL(ceph_osdc_wait_request);
4623 * sync - wait for all in-flight requests to flush. avoid starvation.
4625 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4627 struct rb_node *n, *p;
4628 u64 last_tid = atomic64_read(&osdc->last_tid);
4631 down_read(&osdc->lock);
4632 for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4633 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4635 mutex_lock(&osd->lock);
4636 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4637 struct ceph_osd_request *req =
4638 rb_entry(p, struct ceph_osd_request, r_node);
4640 if (req->r_tid > last_tid)
4643 if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4646 ceph_osdc_get_request(req);
4647 mutex_unlock(&osd->lock);
4648 up_read(&osdc->lock);
4649 dout("%s waiting on req %p tid %llu last_tid %llu\n",
4650 __func__, req, req->r_tid, last_tid);
4651 wait_for_completion(&req->r_completion);
4652 ceph_osdc_put_request(req);
4656 mutex_unlock(&osd->lock);
4659 up_read(&osdc->lock);
4660 dout("%s done last_tid %llu\n", __func__, last_tid);
4662 EXPORT_SYMBOL(ceph_osdc_sync);
4664 static struct ceph_osd_request *
4665 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4667 struct ceph_osd_request *req;
4669 req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4673 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4674 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4678 static struct ceph_osd_request *
4679 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4681 struct ceph_osd_request *req;
4683 req = alloc_linger_request(lreq);
4688 * Pass 0 for cookie because we don't know it yet, it will be
4689 * filled in by linger_submit().
4691 osd_req_op_watch_init(req, 0, 0, watch_opcode);
4693 if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4694 ceph_osdc_put_request(req);
4702 * Returns a handle, caller owns a ref.
4704 struct ceph_osd_linger_request *
4705 ceph_osdc_watch(struct ceph_osd_client *osdc,
4706 struct ceph_object_id *oid,
4707 struct ceph_object_locator *oloc,
4708 rados_watchcb2_t wcb,
4709 rados_watcherrcb_t errcb,
4712 struct ceph_osd_linger_request *lreq;
4715 lreq = linger_alloc(osdc);
4717 return ERR_PTR(-ENOMEM);
4719 lreq->is_watch = true;
4721 lreq->errcb = errcb;
4723 lreq->watch_valid_thru = jiffies;
4725 ceph_oid_copy(&lreq->t.base_oid, oid);
4726 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4727 lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4728 ktime_get_real_ts64(&lreq->mtime);
4730 lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4731 if (!lreq->reg_req) {
4736 lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4737 if (!lreq->ping_req) {
4742 linger_submit(lreq);
4743 ret = linger_reg_commit_wait(lreq);
4745 linger_cancel(lreq);
4753 return ERR_PTR(ret);
4755 EXPORT_SYMBOL(ceph_osdc_watch);
4760 * Times out after mount_timeout to preserve rbd unmap behaviour
4761 * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4762 * with mount_timeout").
4764 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4765 struct ceph_osd_linger_request *lreq)
4767 struct ceph_options *opts = osdc->client->options;
4768 struct ceph_osd_request *req;
4771 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4775 ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4776 ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4777 req->r_flags = CEPH_OSD_FLAG_WRITE;
4778 ktime_get_real_ts64(&req->r_mtime);
4779 osd_req_op_watch_init(req, 0, lreq->linger_id,
4780 CEPH_OSD_WATCH_OP_UNWATCH);
4782 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4786 ceph_osdc_start_request(osdc, req, false);
4787 linger_cancel(lreq);
4789 ret = wait_request_timeout(req, opts->mount_timeout);
4792 ceph_osdc_put_request(req);
4795 EXPORT_SYMBOL(ceph_osdc_unwatch);
4797 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4798 u64 notify_id, u64 cookie, void *payload,
4801 struct ceph_osd_req_op *op;
4802 struct ceph_pagelist *pl;
4805 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4807 pl = ceph_pagelist_alloc(GFP_NOIO);
4811 ret = ceph_pagelist_encode_64(pl, notify_id);
4812 ret |= ceph_pagelist_encode_64(pl, cookie);
4814 ret |= ceph_pagelist_encode_32(pl, payload_len);
4815 ret |= ceph_pagelist_append(pl, payload, payload_len);
4817 ret |= ceph_pagelist_encode_32(pl, 0);
4820 ceph_pagelist_release(pl);
4824 ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4825 op->indata_len = pl->length;
4829 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4830 struct ceph_object_id *oid,
4831 struct ceph_object_locator *oloc,
4837 struct ceph_osd_request *req;
4840 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4844 ceph_oid_copy(&req->r_base_oid, oid);
4845 ceph_oloc_copy(&req->r_base_oloc, oloc);
4846 req->r_flags = CEPH_OSD_FLAG_READ;
4848 ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4853 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4857 ceph_osdc_start_request(osdc, req, false);
4858 ret = ceph_osdc_wait_request(osdc, req);
4861 ceph_osdc_put_request(req);
4864 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4866 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4867 u64 cookie, u32 prot_ver, u32 timeout,
4868 void *payload, u32 payload_len)
4870 struct ceph_osd_req_op *op;
4871 struct ceph_pagelist *pl;
4874 op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4875 op->notify.cookie = cookie;
4877 pl = ceph_pagelist_alloc(GFP_NOIO);
4881 ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4882 ret |= ceph_pagelist_encode_32(pl, timeout);
4883 ret |= ceph_pagelist_encode_32(pl, payload_len);
4884 ret |= ceph_pagelist_append(pl, payload, payload_len);
4886 ceph_pagelist_release(pl);
4890 ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4891 op->indata_len = pl->length;
4896 * @timeout: in seconds
4898 * @preply_{pages,len} are initialized both on success and error.
4899 * The caller is responsible for:
4901 * ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4903 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4904 struct ceph_object_id *oid,
4905 struct ceph_object_locator *oloc,
4909 struct page ***preply_pages,
4912 struct ceph_osd_linger_request *lreq;
4913 struct page **pages;
4918 *preply_pages = NULL;
4922 lreq = linger_alloc(osdc);
4926 lreq->preply_pages = preply_pages;
4927 lreq->preply_len = preply_len;
4929 ceph_oid_copy(&lreq->t.base_oid, oid);
4930 ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4931 lreq->t.flags = CEPH_OSD_FLAG_READ;
4933 lreq->reg_req = alloc_linger_request(lreq);
4934 if (!lreq->reg_req) {
4940 * Pass 0 for cookie because we don't know it yet, it will be
4941 * filled in by linger_submit().
4943 ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4944 payload, payload_len);
4949 pages = ceph_alloc_page_vector(1, GFP_NOIO);
4950 if (IS_ERR(pages)) {
4951 ret = PTR_ERR(pages);
4954 ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4956 pages, PAGE_SIZE, 0, false, true);
4958 ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4962 linger_submit(lreq);
4963 ret = linger_reg_commit_wait(lreq);
4965 ret = linger_notify_finish_wait(lreq);
4967 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4969 linger_cancel(lreq);
4974 EXPORT_SYMBOL(ceph_osdc_notify);
4977 * Return the number of milliseconds since the watch was last
4978 * confirmed, or an error. If there is an error, the watch is no
4979 * longer valid, and should be destroyed with ceph_osdc_unwatch().
4981 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4982 struct ceph_osd_linger_request *lreq)
4984 unsigned long stamp, age;
4987 down_read(&osdc->lock);
4988 mutex_lock(&lreq->lock);
4989 stamp = lreq->watch_valid_thru;
4990 if (!list_empty(&lreq->pending_lworks)) {
4991 struct linger_work *lwork =
4992 list_first_entry(&lreq->pending_lworks,
4996 if (time_before(lwork->queued_stamp, stamp))
4997 stamp = lwork->queued_stamp;
4999 age = jiffies - stamp;
5000 dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
5001 lreq, lreq->linger_id, age, lreq->last_error);
5002 /* we are truncating to msecs, so return a safe upper bound */
5003 ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
5005 mutex_unlock(&lreq->lock);
5006 up_read(&osdc->lock);
5010 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
5016 ret = ceph_start_decoding(p, end, 2, "watch_item_t",
5017 &struct_v, &struct_len);
5022 ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
5023 ceph_decode_64_safe(p, end, item->cookie, bad);
5024 ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
5026 if (struct_v >= 2) {
5027 ret = ceph_decode_entity_addr(p, end, &item->addr);
5034 dout("%s %s%llu cookie %llu addr %s\n", __func__,
5035 ENTITY_NAME(item->name), item->cookie,
5036 ceph_pr_addr(&item->addr));
5041 static int decode_watchers(void **p, void *end,
5042 struct ceph_watch_item **watchers,
5050 ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
5051 &struct_v, &struct_len);
5055 *num_watchers = ceph_decode_32(p);
5056 *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
5060 for (i = 0; i < *num_watchers; i++) {
5061 ret = decode_watcher(p, end, *watchers + i);
5072 * On success, the caller is responsible for:
5076 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5077 struct ceph_object_id *oid,
5078 struct ceph_object_locator *oloc,
5079 struct ceph_watch_item **watchers,
5082 struct ceph_osd_request *req;
5083 struct page **pages;
5086 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5090 ceph_oid_copy(&req->r_base_oid, oid);
5091 ceph_oloc_copy(&req->r_base_oloc, oloc);
5092 req->r_flags = CEPH_OSD_FLAG_READ;
5094 pages = ceph_alloc_page_vector(1, GFP_NOIO);
5095 if (IS_ERR(pages)) {
5096 ret = PTR_ERR(pages);
5100 osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5101 ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5103 pages, PAGE_SIZE, 0, false, true);
5105 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5109 ceph_osdc_start_request(osdc, req, false);
5110 ret = ceph_osdc_wait_request(osdc, req);
5112 void *p = page_address(pages[0]);
5113 void *const end = p + req->r_ops[0].outdata_len;
5115 ret = decode_watchers(&p, end, watchers, num_watchers);
5119 ceph_osdc_put_request(req);
5122 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5125 * Call all pending notify callbacks - for use after a watch is
5126 * unregistered, to make sure no more callbacks for it will be invoked
5128 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5130 dout("%s osdc %p\n", __func__, osdc);
5131 flush_workqueue(osdc->notify_wq);
5133 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5135 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5137 down_read(&osdc->lock);
5138 maybe_request_map(osdc);
5139 up_read(&osdc->lock);
5141 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5144 * Execute an OSD class method on an object.
5146 * @flags: CEPH_OSD_FLAG_*
5147 * @resp_len: in/out param for reply length
5149 int ceph_osdc_call(struct ceph_osd_client *osdc,
5150 struct ceph_object_id *oid,
5151 struct ceph_object_locator *oloc,
5152 const char *class, const char *method,
5154 struct page *req_page, size_t req_len,
5155 struct page **resp_pages, size_t *resp_len)
5157 struct ceph_osd_request *req;
5160 if (req_len > PAGE_SIZE)
5163 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5167 ceph_oid_copy(&req->r_base_oid, oid);
5168 ceph_oloc_copy(&req->r_base_oloc, oloc);
5169 req->r_flags = flags;
5171 ret = osd_req_op_cls_init(req, 0, class, method);
5176 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5179 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5180 *resp_len, 0, false, false);
5182 ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5186 ceph_osdc_start_request(osdc, req, false);
5187 ret = ceph_osdc_wait_request(osdc, req);
5189 ret = req->r_ops[0].rval;
5191 *resp_len = req->r_ops[0].outdata_len;
5195 ceph_osdc_put_request(req);
5198 EXPORT_SYMBOL(ceph_osdc_call);
5201 * reset all osd connections
5203 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5207 down_write(&osdc->lock);
5208 for (n = rb_first(&osdc->osds); n; ) {
5209 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5212 if (!reopen_osd(osd))
5213 kick_osd_requests(osd);
5215 up_write(&osdc->lock);
5221 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5226 osdc->client = client;
5227 init_rwsem(&osdc->lock);
5228 osdc->osds = RB_ROOT;
5229 INIT_LIST_HEAD(&osdc->osd_lru);
5230 spin_lock_init(&osdc->osd_lru_lock);
5231 osd_init(&osdc->homeless_osd);
5232 osdc->homeless_osd.o_osdc = osdc;
5233 osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5234 osdc->last_linger_id = CEPH_LINGER_ID_START;
5235 osdc->linger_requests = RB_ROOT;
5236 osdc->map_checks = RB_ROOT;
5237 osdc->linger_map_checks = RB_ROOT;
5238 INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5239 INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5242 osdc->osdmap = ceph_osdmap_alloc();
5246 osdc->req_mempool = mempool_create_slab_pool(10,
5247 ceph_osd_request_cache);
5248 if (!osdc->req_mempool)
5251 err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5252 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5255 err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5256 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5262 osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5263 if (!osdc->notify_wq)
5264 goto out_msgpool_reply;
5266 osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5267 if (!osdc->completion_wq)
5270 schedule_delayed_work(&osdc->timeout_work,
5271 osdc->client->options->osd_keepalive_timeout);
5272 schedule_delayed_work(&osdc->osds_timeout_work,
5273 round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5278 destroy_workqueue(osdc->notify_wq);
5280 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5282 ceph_msgpool_destroy(&osdc->msgpool_op);
5284 mempool_destroy(osdc->req_mempool);
5286 ceph_osdmap_destroy(osdc->osdmap);
5291 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5293 destroy_workqueue(osdc->completion_wq);
5294 destroy_workqueue(osdc->notify_wq);
5295 cancel_delayed_work_sync(&osdc->timeout_work);
5296 cancel_delayed_work_sync(&osdc->osds_timeout_work);
5298 down_write(&osdc->lock);
5299 while (!RB_EMPTY_ROOT(&osdc->osds)) {
5300 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5301 struct ceph_osd, o_node);
5304 up_write(&osdc->lock);
5305 WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5306 osd_cleanup(&osdc->homeless_osd);
5308 WARN_ON(!list_empty(&osdc->osd_lru));
5309 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5310 WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5311 WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5312 WARN_ON(atomic_read(&osdc->num_requests));
5313 WARN_ON(atomic_read(&osdc->num_homeless));
5315 ceph_osdmap_destroy(osdc->osdmap);
5316 mempool_destroy(osdc->req_mempool);
5317 ceph_msgpool_destroy(&osdc->msgpool_op);
5318 ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5321 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5322 u64 src_snapid, u64 src_version,
5323 struct ceph_object_id *src_oid,
5324 struct ceph_object_locator *src_oloc,
5325 u32 src_fadvise_flags,
5326 u32 dst_fadvise_flags,
5327 u32 truncate_seq, u64 truncate_size,
5330 struct ceph_osd_req_op *op;
5331 struct page **pages;
5334 pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5336 return PTR_ERR(pages);
5338 op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
5340 op->copy_from.snapid = src_snapid;
5341 op->copy_from.src_version = src_version;
5342 op->copy_from.flags = copy_from_flags;
5343 op->copy_from.src_fadvise_flags = src_fadvise_flags;
5345 p = page_address(pages[0]);
5346 end = p + PAGE_SIZE;
5347 ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5348 encode_oloc(&p, end, src_oloc);
5349 ceph_encode_32(&p, truncate_seq);
5350 ceph_encode_64(&p, truncate_size);
5351 op->indata_len = PAGE_SIZE - (end - p);
5353 ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5354 op->indata_len, 0, false, true);
5358 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5359 u64 src_snapid, u64 src_version,
5360 struct ceph_object_id *src_oid,
5361 struct ceph_object_locator *src_oloc,
5362 u32 src_fadvise_flags,
5363 struct ceph_object_id *dst_oid,
5364 struct ceph_object_locator *dst_oloc,
5365 u32 dst_fadvise_flags,
5366 u32 truncate_seq, u64 truncate_size,
5369 struct ceph_osd_request *req;
5372 req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5376 req->r_flags = CEPH_OSD_FLAG_WRITE;
5378 ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5379 ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5381 ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5382 src_oloc, src_fadvise_flags,
5383 dst_fadvise_flags, truncate_seq,
5384 truncate_size, copy_from_flags);
5388 ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5392 ceph_osdc_start_request(osdc, req, false);
5393 ret = ceph_osdc_wait_request(osdc, req);
5396 ceph_osdc_put_request(req);
5399 EXPORT_SYMBOL(ceph_osdc_copy_from);
5401 int __init ceph_osdc_setup(void)
5403 size_t size = sizeof(struct ceph_osd_request) +
5404 CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5406 BUG_ON(ceph_osd_request_cache);
5407 ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5410 return ceph_osd_request_cache ? 0 : -ENOMEM;
5413 void ceph_osdc_cleanup(void)
5415 BUG_ON(!ceph_osd_request_cache);
5416 kmem_cache_destroy(ceph_osd_request_cache);
5417 ceph_osd_request_cache = NULL;
5421 * handle incoming message
5423 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5425 struct ceph_osd *osd = con->private;
5426 struct ceph_osd_client *osdc = osd->o_osdc;
5427 int type = le16_to_cpu(msg->hdr.type);
5430 case CEPH_MSG_OSD_MAP:
5431 ceph_osdc_handle_map(osdc, msg);
5433 case CEPH_MSG_OSD_OPREPLY:
5434 handle_reply(osd, msg);
5436 case CEPH_MSG_OSD_BACKOFF:
5437 handle_backoff(osd, msg);
5439 case CEPH_MSG_WATCH_NOTIFY:
5440 handle_watch_notify(osdc, msg);
5444 pr_err("received unknown message type %d %s\n", type,
5445 ceph_msg_type_name(type));
5452 * Lookup and return message for incoming reply. Don't try to do
5453 * anything about a larger than preallocated data portion of the
5454 * message at the moment - for now, just skip the message.
5456 static struct ceph_msg *get_reply(struct ceph_connection *con,
5457 struct ceph_msg_header *hdr,
5460 struct ceph_osd *osd = con->private;
5461 struct ceph_osd_client *osdc = osd->o_osdc;
5462 struct ceph_msg *m = NULL;
5463 struct ceph_osd_request *req;
5464 int front_len = le32_to_cpu(hdr->front_len);
5465 int data_len = le32_to_cpu(hdr->data_len);
5466 u64 tid = le64_to_cpu(hdr->tid);
5468 down_read(&osdc->lock);
5469 if (!osd_registered(osd)) {
5470 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5472 goto out_unlock_osdc;
5474 WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5476 mutex_lock(&osd->lock);
5477 req = lookup_request(&osd->o_requests, tid);
5479 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5482 goto out_unlock_session;
5485 ceph_msg_revoke_incoming(req->r_reply);
5487 if (front_len > req->r_reply->front_alloc_len) {
5488 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5489 __func__, osd->o_osd, req->r_tid, front_len,
5490 req->r_reply->front_alloc_len);
5491 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5494 goto out_unlock_session;
5495 ceph_msg_put(req->r_reply);
5499 if (data_len > req->r_reply->data_length) {
5500 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5501 __func__, osd->o_osd, req->r_tid, data_len,
5502 req->r_reply->data_length);
5505 goto out_unlock_session;
5508 m = ceph_msg_get(req->r_reply);
5509 dout("get_reply tid %lld %p\n", tid, m);
5512 mutex_unlock(&osd->lock);
5514 up_read(&osdc->lock);
5518 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5521 int type = le16_to_cpu(hdr->type);
5522 u32 front_len = le32_to_cpu(hdr->front_len);
5523 u32 data_len = le32_to_cpu(hdr->data_len);
5525 m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5530 struct page **pages;
5532 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5534 if (IS_ERR(pages)) {
5539 ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5545 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5546 struct ceph_msg_header *hdr,
5549 struct ceph_osd *osd = con->private;
5550 int type = le16_to_cpu(hdr->type);
5554 case CEPH_MSG_OSD_MAP:
5555 case CEPH_MSG_OSD_BACKOFF:
5556 case CEPH_MSG_WATCH_NOTIFY:
5557 return alloc_msg_with_page_vector(hdr);
5558 case CEPH_MSG_OSD_OPREPLY:
5559 return get_reply(con, hdr, skip);
5561 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5569 * Wrappers to refcount containing ceph_osd struct
5571 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5573 struct ceph_osd *osd = con->private;
5579 static void put_osd_con(struct ceph_connection *con)
5581 struct ceph_osd *osd = con->private;
5589 * Note: returned pointer is the address of a structure that's
5590 * managed separately. Caller must *not* attempt to free it.
5592 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5593 int *proto, int force_new)
5595 struct ceph_osd *o = con->private;
5596 struct ceph_osd_client *osdc = o->o_osdc;
5597 struct ceph_auth_client *ac = osdc->client->monc.auth;
5598 struct ceph_auth_handshake *auth = &o->o_auth;
5600 if (force_new && auth->authorizer) {
5601 ceph_auth_destroy_authorizer(auth->authorizer);
5602 auth->authorizer = NULL;
5604 if (!auth->authorizer) {
5605 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5608 return ERR_PTR(ret);
5610 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5613 return ERR_PTR(ret);
5615 *proto = ac->protocol;
5620 static int add_authorizer_challenge(struct ceph_connection *con,
5621 void *challenge_buf, int challenge_buf_len)
5623 struct ceph_osd *o = con->private;
5624 struct ceph_osd_client *osdc = o->o_osdc;
5625 struct ceph_auth_client *ac = osdc->client->monc.auth;
5627 return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5628 challenge_buf, challenge_buf_len);
5631 static int verify_authorizer_reply(struct ceph_connection *con)
5633 struct ceph_osd *o = con->private;
5634 struct ceph_osd_client *osdc = o->o_osdc;
5635 struct ceph_auth_client *ac = osdc->client->monc.auth;
5637 return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5640 static int invalidate_authorizer(struct ceph_connection *con)
5642 struct ceph_osd *o = con->private;
5643 struct ceph_osd_client *osdc = o->o_osdc;
5644 struct ceph_auth_client *ac = osdc->client->monc.auth;
5646 ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5647 return ceph_monc_validate_auth(&osdc->client->monc);
5650 static void osd_reencode_message(struct ceph_msg *msg)
5652 int type = le16_to_cpu(msg->hdr.type);
5654 if (type == CEPH_MSG_OSD_OP)
5655 encode_request_finish(msg);
5658 static int osd_sign_message(struct ceph_msg *msg)
5660 struct ceph_osd *o = msg->con->private;
5661 struct ceph_auth_handshake *auth = &o->o_auth;
5663 return ceph_auth_sign_message(auth, msg);
5666 static int osd_check_message_signature(struct ceph_msg *msg)
5668 struct ceph_osd *o = msg->con->private;
5669 struct ceph_auth_handshake *auth = &o->o_auth;
5671 return ceph_auth_check_message_signature(auth, msg);
5674 static const struct ceph_connection_operations osd_con_ops = {
5677 .dispatch = dispatch,
5678 .get_authorizer = get_authorizer,
5679 .add_authorizer_challenge = add_authorizer_challenge,
5680 .verify_authorizer_reply = verify_authorizer_reply,
5681 .invalidate_authorizer = invalidate_authorizer,
5682 .alloc_msg = alloc_msg,
5683 .reencode_message = osd_reencode_message,
5684 .sign_message = osd_sign_message,
5685 .check_message_signature = osd_check_message_signature,