Merge tag 'perf-core-for-mingo-5.4-20190822' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-microblaze.git] / net / ceph / osd_client.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24
25 #define OSD_OPREPLY_FRONT_LEN   512
26
27 static struct kmem_cache        *ceph_osd_request_cache;
28
29 static const struct ceph_connection_operations osd_con_ops;
30
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52                         struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54                           struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60         bool wrlocked = true;
61
62         if (unlikely(down_read_trylock(sem))) {
63                 wrlocked = false;
64                 up_read(sem);
65         }
66
67         return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79         struct ceph_osd_client *osdc = osd->o_osdc;
80
81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
82                   rwsem_is_locked(&osdc->lock)) &&
83                 !rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87         WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104                         u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106         u64 orig_len = *plen;
107         u32 xlen;
108
109         /* object extent? */
110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111                                           objoff, &xlen);
112         *objlen = xlen;
113         if (*objlen < orig_len) {
114                 *plen = *objlen;
115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
116                      orig_len - *plen, off, *plen);
117         }
118
119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120         return 0;
121 }
122
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125         memset(osd_data, 0, sizeof (*osd_data));
126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128
129 /*
130  * Consumes @pages if @own_pages is true.
131  */
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133                         struct page **pages, u64 length, u32 alignment,
134                         bool pages_from_pool, bool own_pages)
135 {
136         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137         osd_data->pages = pages;
138         osd_data->length = length;
139         osd_data->alignment = alignment;
140         osd_data->pages_from_pool = pages_from_pool;
141         osd_data->own_pages = own_pages;
142 }
143
144 /*
145  * Consumes a ref on @pagelist.
146  */
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148                         struct ceph_pagelist *pagelist)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151         osd_data->pagelist = pagelist;
152 }
153
154 #ifdef CONFIG_BLOCK
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156                                    struct ceph_bio_iter *bio_pos,
157                                    u32 bio_length)
158 {
159         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160         osd_data->bio_pos = *bio_pos;
161         osd_data->bio_length = bio_length;
162 }
163 #endif /* CONFIG_BLOCK */
164
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166                                      struct ceph_bvec_iter *bvec_pos,
167                                      u32 num_bvecs)
168 {
169         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170         osd_data->bvec_pos = *bvec_pos;
171         osd_data->num_bvecs = num_bvecs;
172 }
173
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
176 {
177         BUG_ON(which >= osd_req->r_num_ops);
178
179         return &osd_req->r_ops[which].raw_data_in;
180 }
181
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
184                         unsigned int which)
185 {
186         return osd_req_op_data(osd_req, which, extent, osd_data);
187 }
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
189
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191                         unsigned int which, struct page **pages,
192                         u64 length, u32 alignment,
193                         bool pages_from_pool, bool own_pages)
194 {
195         struct ceph_osd_data *osd_data;
196
197         osd_data = osd_req_op_raw_data_in(osd_req, which);
198         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199                                 pages_from_pool, own_pages);
200 }
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
202
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204                         unsigned int which, struct page **pages,
205                         u64 length, u32 alignment,
206                         bool pages_from_pool, bool own_pages)
207 {
208         struct ceph_osd_data *osd_data;
209
210         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212                                 pages_from_pool, own_pages);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
215
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217                         unsigned int which, struct ceph_pagelist *pagelist)
218 {
219         struct ceph_osd_data *osd_data;
220
221         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222         ceph_osd_data_pagelist_init(osd_data, pagelist);
223 }
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
225
226 #ifdef CONFIG_BLOCK
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
228                                     unsigned int which,
229                                     struct ceph_bio_iter *bio_pos,
230                                     u32 bio_length)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
236 }
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
239
240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
241                                       unsigned int which,
242                                       struct bio_vec *bvecs, u32 num_bvecs,
243                                       u32 bytes)
244 {
245         struct ceph_osd_data *osd_data;
246         struct ceph_bvec_iter it = {
247                 .bvecs = bvecs,
248                 .iter = { .bi_size = bytes },
249         };
250
251         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
252         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
253 }
254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
255
256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
257                                          unsigned int which,
258                                          struct ceph_bvec_iter *bvec_pos)
259 {
260         struct ceph_osd_data *osd_data;
261
262         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
263         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
264 }
265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
266
267 static void osd_req_op_cls_request_info_pagelist(
268                         struct ceph_osd_request *osd_req,
269                         unsigned int which, struct ceph_pagelist *pagelist)
270 {
271         struct ceph_osd_data *osd_data;
272
273         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
274         ceph_osd_data_pagelist_init(osd_data, pagelist);
275 }
276
277 void osd_req_op_cls_request_data_pagelist(
278                         struct ceph_osd_request *osd_req,
279                         unsigned int which, struct ceph_pagelist *pagelist)
280 {
281         struct ceph_osd_data *osd_data;
282
283         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
284         ceph_osd_data_pagelist_init(osd_data, pagelist);
285         osd_req->r_ops[which].cls.indata_len += pagelist->length;
286         osd_req->r_ops[which].indata_len += pagelist->length;
287 }
288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
289
290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
291                         unsigned int which, struct page **pages, u64 length,
292                         u32 alignment, bool pages_from_pool, bool own_pages)
293 {
294         struct ceph_osd_data *osd_data;
295
296         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
297         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
298                                 pages_from_pool, own_pages);
299         osd_req->r_ops[which].cls.indata_len += length;
300         osd_req->r_ops[which].indata_len += length;
301 }
302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
303
304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
305                                        unsigned int which,
306                                        struct bio_vec *bvecs, u32 num_bvecs,
307                                        u32 bytes)
308 {
309         struct ceph_osd_data *osd_data;
310         struct ceph_bvec_iter it = {
311                 .bvecs = bvecs,
312                 .iter = { .bi_size = bytes },
313         };
314
315         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
316         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
317         osd_req->r_ops[which].cls.indata_len += bytes;
318         osd_req->r_ops[which].indata_len += bytes;
319 }
320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
321
322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
323                         unsigned int which, struct page **pages, u64 length,
324                         u32 alignment, bool pages_from_pool, bool own_pages)
325 {
326         struct ceph_osd_data *osd_data;
327
328         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
329         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
330                                 pages_from_pool, own_pages);
331 }
332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
333
334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
335 {
336         switch (osd_data->type) {
337         case CEPH_OSD_DATA_TYPE_NONE:
338                 return 0;
339         case CEPH_OSD_DATA_TYPE_PAGES:
340                 return osd_data->length;
341         case CEPH_OSD_DATA_TYPE_PAGELIST:
342                 return (u64)osd_data->pagelist->length;
343 #ifdef CONFIG_BLOCK
344         case CEPH_OSD_DATA_TYPE_BIO:
345                 return (u64)osd_data->bio_length;
346 #endif /* CONFIG_BLOCK */
347         case CEPH_OSD_DATA_TYPE_BVECS:
348                 return osd_data->bvec_pos.iter.bi_size;
349         default:
350                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
351                 return 0;
352         }
353 }
354
355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
356 {
357         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
358                 int num_pages;
359
360                 num_pages = calc_pages_for((u64)osd_data->alignment,
361                                                 (u64)osd_data->length);
362                 ceph_release_page_vector(osd_data->pages, num_pages);
363         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
364                 ceph_pagelist_release(osd_data->pagelist);
365         }
366         ceph_osd_data_init(osd_data);
367 }
368
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
370                         unsigned int which)
371 {
372         struct ceph_osd_req_op *op;
373
374         BUG_ON(which >= osd_req->r_num_ops);
375         op = &osd_req->r_ops[which];
376
377         switch (op->op) {
378         case CEPH_OSD_OP_READ:
379         case CEPH_OSD_OP_WRITE:
380         case CEPH_OSD_OP_WRITEFULL:
381                 ceph_osd_data_release(&op->extent.osd_data);
382                 break;
383         case CEPH_OSD_OP_CALL:
384                 ceph_osd_data_release(&op->cls.request_info);
385                 ceph_osd_data_release(&op->cls.request_data);
386                 ceph_osd_data_release(&op->cls.response_data);
387                 break;
388         case CEPH_OSD_OP_SETXATTR:
389         case CEPH_OSD_OP_CMPXATTR:
390                 ceph_osd_data_release(&op->xattr.osd_data);
391                 break;
392         case CEPH_OSD_OP_STAT:
393                 ceph_osd_data_release(&op->raw_data_in);
394                 break;
395         case CEPH_OSD_OP_NOTIFY_ACK:
396                 ceph_osd_data_release(&op->notify_ack.request_data);
397                 break;
398         case CEPH_OSD_OP_NOTIFY:
399                 ceph_osd_data_release(&op->notify.request_data);
400                 ceph_osd_data_release(&op->notify.response_data);
401                 break;
402         case CEPH_OSD_OP_LIST_WATCHERS:
403                 ceph_osd_data_release(&op->list_watchers.response_data);
404                 break;
405         case CEPH_OSD_OP_COPY_FROM:
406                 ceph_osd_data_release(&op->copy_from.osd_data);
407                 break;
408         default:
409                 break;
410         }
411 }
412
413 /*
414  * Assumes @t is zero-initialized.
415  */
416 static void target_init(struct ceph_osd_request_target *t)
417 {
418         ceph_oid_init(&t->base_oid);
419         ceph_oloc_init(&t->base_oloc);
420         ceph_oid_init(&t->target_oid);
421         ceph_oloc_init(&t->target_oloc);
422
423         ceph_osds_init(&t->acting);
424         ceph_osds_init(&t->up);
425         t->size = -1;
426         t->min_size = -1;
427
428         t->osd = CEPH_HOMELESS_OSD;
429 }
430
431 static void target_copy(struct ceph_osd_request_target *dest,
432                         const struct ceph_osd_request_target *src)
433 {
434         ceph_oid_copy(&dest->base_oid, &src->base_oid);
435         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
436         ceph_oid_copy(&dest->target_oid, &src->target_oid);
437         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
438
439         dest->pgid = src->pgid; /* struct */
440         dest->spgid = src->spgid; /* struct */
441         dest->pg_num = src->pg_num;
442         dest->pg_num_mask = src->pg_num_mask;
443         ceph_osds_copy(&dest->acting, &src->acting);
444         ceph_osds_copy(&dest->up, &src->up);
445         dest->size = src->size;
446         dest->min_size = src->min_size;
447         dest->sort_bitwise = src->sort_bitwise;
448
449         dest->flags = src->flags;
450         dest->paused = src->paused;
451
452         dest->epoch = src->epoch;
453         dest->last_force_resend = src->last_force_resend;
454
455         dest->osd = src->osd;
456 }
457
458 static void target_destroy(struct ceph_osd_request_target *t)
459 {
460         ceph_oid_destroy(&t->base_oid);
461         ceph_oloc_destroy(&t->base_oloc);
462         ceph_oid_destroy(&t->target_oid);
463         ceph_oloc_destroy(&t->target_oloc);
464 }
465
466 /*
467  * requests
468  */
469 static void request_release_checks(struct ceph_osd_request *req)
470 {
471         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
472         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
473         WARN_ON(!list_empty(&req->r_private_item));
474         WARN_ON(req->r_osd);
475 }
476
477 static void ceph_osdc_release_request(struct kref *kref)
478 {
479         struct ceph_osd_request *req = container_of(kref,
480                                             struct ceph_osd_request, r_kref);
481         unsigned int which;
482
483         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
484              req->r_request, req->r_reply);
485         request_release_checks(req);
486
487         if (req->r_request)
488                 ceph_msg_put(req->r_request);
489         if (req->r_reply)
490                 ceph_msg_put(req->r_reply);
491
492         for (which = 0; which < req->r_num_ops; which++)
493                 osd_req_op_data_release(req, which);
494
495         target_destroy(&req->r_t);
496         ceph_put_snap_context(req->r_snapc);
497
498         if (req->r_mempool)
499                 mempool_free(req, req->r_osdc->req_mempool);
500         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
501                 kmem_cache_free(ceph_osd_request_cache, req);
502         else
503                 kfree(req);
504 }
505
506 void ceph_osdc_get_request(struct ceph_osd_request *req)
507 {
508         dout("%s %p (was %d)\n", __func__, req,
509              kref_read(&req->r_kref));
510         kref_get(&req->r_kref);
511 }
512 EXPORT_SYMBOL(ceph_osdc_get_request);
513
514 void ceph_osdc_put_request(struct ceph_osd_request *req)
515 {
516         if (req) {
517                 dout("%s %p (was %d)\n", __func__, req,
518                      kref_read(&req->r_kref));
519                 kref_put(&req->r_kref, ceph_osdc_release_request);
520         }
521 }
522 EXPORT_SYMBOL(ceph_osdc_put_request);
523
524 static void request_init(struct ceph_osd_request *req)
525 {
526         /* req only, each op is zeroed in _osd_req_op_init() */
527         memset(req, 0, sizeof(*req));
528
529         kref_init(&req->r_kref);
530         init_completion(&req->r_completion);
531         RB_CLEAR_NODE(&req->r_node);
532         RB_CLEAR_NODE(&req->r_mc_node);
533         INIT_LIST_HEAD(&req->r_private_item);
534
535         target_init(&req->r_t);
536 }
537
538 /*
539  * This is ugly, but it allows us to reuse linger registration and ping
540  * requests, keeping the structure of the code around send_linger{_ping}()
541  * reasonable.  Setting up a min_nr=2 mempool for each linger request
542  * and dealing with copying ops (this blasts req only, watch op remains
543  * intact) isn't any better.
544  */
545 static void request_reinit(struct ceph_osd_request *req)
546 {
547         struct ceph_osd_client *osdc = req->r_osdc;
548         bool mempool = req->r_mempool;
549         unsigned int num_ops = req->r_num_ops;
550         u64 snapid = req->r_snapid;
551         struct ceph_snap_context *snapc = req->r_snapc;
552         bool linger = req->r_linger;
553         struct ceph_msg *request_msg = req->r_request;
554         struct ceph_msg *reply_msg = req->r_reply;
555
556         dout("%s req %p\n", __func__, req);
557         WARN_ON(kref_read(&req->r_kref) != 1);
558         request_release_checks(req);
559
560         WARN_ON(kref_read(&request_msg->kref) != 1);
561         WARN_ON(kref_read(&reply_msg->kref) != 1);
562         target_destroy(&req->r_t);
563
564         request_init(req);
565         req->r_osdc = osdc;
566         req->r_mempool = mempool;
567         req->r_num_ops = num_ops;
568         req->r_snapid = snapid;
569         req->r_snapc = snapc;
570         req->r_linger = linger;
571         req->r_request = request_msg;
572         req->r_reply = reply_msg;
573 }
574
575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
576                                                struct ceph_snap_context *snapc,
577                                                unsigned int num_ops,
578                                                bool use_mempool,
579                                                gfp_t gfp_flags)
580 {
581         struct ceph_osd_request *req;
582
583         if (use_mempool) {
584                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
585                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
586         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
587                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
588         } else {
589                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
590                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
591         }
592         if (unlikely(!req))
593                 return NULL;
594
595         request_init(req);
596         req->r_osdc = osdc;
597         req->r_mempool = use_mempool;
598         req->r_num_ops = num_ops;
599         req->r_snapid = CEPH_NOSNAP;
600         req->r_snapc = ceph_get_snap_context(snapc);
601
602         dout("%s req %p\n", __func__, req);
603         return req;
604 }
605 EXPORT_SYMBOL(ceph_osdc_alloc_request);
606
607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
608 {
609         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
610 }
611
612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
613                                       int num_request_data_items,
614                                       int num_reply_data_items)
615 {
616         struct ceph_osd_client *osdc = req->r_osdc;
617         struct ceph_msg *msg;
618         int msg_size;
619
620         WARN_ON(req->r_request || req->r_reply);
621         WARN_ON(ceph_oid_empty(&req->r_base_oid));
622         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
623
624         /* create request message */
625         msg_size = CEPH_ENCODING_START_BLK_LEN +
626                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
627         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
628         msg_size += CEPH_ENCODING_START_BLK_LEN +
629                         sizeof(struct ceph_osd_reqid); /* reqid */
630         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
631         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
632         msg_size += CEPH_ENCODING_START_BLK_LEN +
633                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
634         msg_size += 4 + req->r_base_oid.name_len; /* oid */
635         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
636         msg_size += 8; /* snapid */
637         msg_size += 8; /* snap_seq */
638         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
639         msg_size += 4 + 8; /* retry_attempt, features */
640
641         if (req->r_mempool)
642                 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
643                                        num_request_data_items);
644         else
645                 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
646                                     num_request_data_items, gfp, true);
647         if (!msg)
648                 return -ENOMEM;
649
650         memset(msg->front.iov_base, 0, msg->front.iov_len);
651         req->r_request = msg;
652
653         /* create reply message */
654         msg_size = OSD_OPREPLY_FRONT_LEN;
655         msg_size += req->r_base_oid.name_len;
656         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
657
658         if (req->r_mempool)
659                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
660                                        num_reply_data_items);
661         else
662                 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
663                                     num_reply_data_items, gfp, true);
664         if (!msg)
665                 return -ENOMEM;
666
667         req->r_reply = msg;
668
669         return 0;
670 }
671
672 static bool osd_req_opcode_valid(u16 opcode)
673 {
674         switch (opcode) {
675 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
677 #undef GENERATE_CASE
678         default:
679                 return false;
680         }
681 }
682
683 static void get_num_data_items(struct ceph_osd_request *req,
684                                int *num_request_data_items,
685                                int *num_reply_data_items)
686 {
687         struct ceph_osd_req_op *op;
688
689         *num_request_data_items = 0;
690         *num_reply_data_items = 0;
691
692         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
693                 switch (op->op) {
694                 /* request */
695                 case CEPH_OSD_OP_WRITE:
696                 case CEPH_OSD_OP_WRITEFULL:
697                 case CEPH_OSD_OP_SETXATTR:
698                 case CEPH_OSD_OP_CMPXATTR:
699                 case CEPH_OSD_OP_NOTIFY_ACK:
700                 case CEPH_OSD_OP_COPY_FROM:
701                         *num_request_data_items += 1;
702                         break;
703
704                 /* reply */
705                 case CEPH_OSD_OP_STAT:
706                 case CEPH_OSD_OP_READ:
707                 case CEPH_OSD_OP_LIST_WATCHERS:
708                         *num_reply_data_items += 1;
709                         break;
710
711                 /* both */
712                 case CEPH_OSD_OP_NOTIFY:
713                         *num_request_data_items += 1;
714                         *num_reply_data_items += 1;
715                         break;
716                 case CEPH_OSD_OP_CALL:
717                         *num_request_data_items += 2;
718                         *num_reply_data_items += 1;
719                         break;
720
721                 default:
722                         WARN_ON(!osd_req_opcode_valid(op->op));
723                         break;
724                 }
725         }
726 }
727
728 /*
729  * oid, oloc and OSD op opcode(s) must be filled in before this function
730  * is called.
731  */
732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
733 {
734         int num_request_data_items, num_reply_data_items;
735
736         get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
737         return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
738                                           num_reply_data_items);
739 }
740 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
741
742 /*
743  * This is an osd op init function for opcodes that have no data or
744  * other information associated with them.  It also serves as a
745  * common init routine for all the other init functions, below.
746  */
747 static struct ceph_osd_req_op *
748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
749                  u16 opcode, u32 flags)
750 {
751         struct ceph_osd_req_op *op;
752
753         BUG_ON(which >= osd_req->r_num_ops);
754         BUG_ON(!osd_req_opcode_valid(opcode));
755
756         op = &osd_req->r_ops[which];
757         memset(op, 0, sizeof (*op));
758         op->op = opcode;
759         op->flags = flags;
760
761         return op;
762 }
763
764 void osd_req_op_init(struct ceph_osd_request *osd_req,
765                      unsigned int which, u16 opcode, u32 flags)
766 {
767         (void)_osd_req_op_init(osd_req, which, opcode, flags);
768 }
769 EXPORT_SYMBOL(osd_req_op_init);
770
771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
772                                 unsigned int which, u16 opcode,
773                                 u64 offset, u64 length,
774                                 u64 truncate_size, u32 truncate_seq)
775 {
776         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
777                                                       opcode, 0);
778         size_t payload_len = 0;
779
780         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
781                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
782                opcode != CEPH_OSD_OP_TRUNCATE);
783
784         op->extent.offset = offset;
785         op->extent.length = length;
786         op->extent.truncate_size = truncate_size;
787         op->extent.truncate_seq = truncate_seq;
788         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
789                 payload_len += length;
790
791         op->indata_len = payload_len;
792 }
793 EXPORT_SYMBOL(osd_req_op_extent_init);
794
795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
796                                 unsigned int which, u64 length)
797 {
798         struct ceph_osd_req_op *op;
799         u64 previous;
800
801         BUG_ON(which >= osd_req->r_num_ops);
802         op = &osd_req->r_ops[which];
803         previous = op->extent.length;
804
805         if (length == previous)
806                 return;         /* Nothing to do */
807         BUG_ON(length > previous);
808
809         op->extent.length = length;
810         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
811                 op->indata_len -= previous - length;
812 }
813 EXPORT_SYMBOL(osd_req_op_extent_update);
814
815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
816                                 unsigned int which, u64 offset_inc)
817 {
818         struct ceph_osd_req_op *op, *prev_op;
819
820         BUG_ON(which + 1 >= osd_req->r_num_ops);
821
822         prev_op = &osd_req->r_ops[which];
823         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
824         /* dup previous one */
825         op->indata_len = prev_op->indata_len;
826         op->outdata_len = prev_op->outdata_len;
827         op->extent = prev_op->extent;
828         /* adjust offset */
829         op->extent.offset += offset_inc;
830         op->extent.length -= offset_inc;
831
832         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
833                 op->indata_len -= offset_inc;
834 }
835 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
836
837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
838                         const char *class, const char *method)
839 {
840         struct ceph_osd_req_op *op;
841         struct ceph_pagelist *pagelist;
842         size_t payload_len = 0;
843         size_t size;
844
845         op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
846
847         pagelist = ceph_pagelist_alloc(GFP_NOFS);
848         if (!pagelist)
849                 return -ENOMEM;
850
851         op->cls.class_name = class;
852         size = strlen(class);
853         BUG_ON(size > (size_t) U8_MAX);
854         op->cls.class_len = size;
855         ceph_pagelist_append(pagelist, class, size);
856         payload_len += size;
857
858         op->cls.method_name = method;
859         size = strlen(method);
860         BUG_ON(size > (size_t) U8_MAX);
861         op->cls.method_len = size;
862         ceph_pagelist_append(pagelist, method, size);
863         payload_len += size;
864
865         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
866
867         op->indata_len = payload_len;
868         return 0;
869 }
870 EXPORT_SYMBOL(osd_req_op_cls_init);
871
872 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
873                           u16 opcode, const char *name, const void *value,
874                           size_t size, u8 cmp_op, u8 cmp_mode)
875 {
876         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
877                                                       opcode, 0);
878         struct ceph_pagelist *pagelist;
879         size_t payload_len;
880
881         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
882
883         pagelist = ceph_pagelist_alloc(GFP_NOFS);
884         if (!pagelist)
885                 return -ENOMEM;
886
887         payload_len = strlen(name);
888         op->xattr.name_len = payload_len;
889         ceph_pagelist_append(pagelist, name, payload_len);
890
891         op->xattr.value_len = size;
892         ceph_pagelist_append(pagelist, value, size);
893         payload_len += size;
894
895         op->xattr.cmp_op = cmp_op;
896         op->xattr.cmp_mode = cmp_mode;
897
898         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
899         op->indata_len = payload_len;
900         return 0;
901 }
902 EXPORT_SYMBOL(osd_req_op_xattr_init);
903
904 /*
905  * @watch_opcode: CEPH_OSD_WATCH_OP_*
906  */
907 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
908                                   u64 cookie, u8 watch_opcode)
909 {
910         struct ceph_osd_req_op *op;
911
912         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
913         op->watch.cookie = cookie;
914         op->watch.op = watch_opcode;
915         op->watch.gen = 0;
916 }
917
918 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
919                                 unsigned int which,
920                                 u64 expected_object_size,
921                                 u64 expected_write_size)
922 {
923         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
924                                                       CEPH_OSD_OP_SETALLOCHINT,
925                                                       0);
926
927         op->alloc_hint.expected_object_size = expected_object_size;
928         op->alloc_hint.expected_write_size = expected_write_size;
929
930         /*
931          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
932          * not worth a feature bit.  Set FAILOK per-op flag to make
933          * sure older osds don't trip over an unsupported opcode.
934          */
935         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
936 }
937 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
938
939 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
940                                 struct ceph_osd_data *osd_data)
941 {
942         u64 length = ceph_osd_data_length(osd_data);
943
944         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
945                 BUG_ON(length > (u64) SIZE_MAX);
946                 if (length)
947                         ceph_msg_data_add_pages(msg, osd_data->pages,
948                                         length, osd_data->alignment);
949         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
950                 BUG_ON(!length);
951                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
952 #ifdef CONFIG_BLOCK
953         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
954                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
955 #endif
956         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
957                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
958         } else {
959                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
960         }
961 }
962
963 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
964                              const struct ceph_osd_req_op *src)
965 {
966         switch (src->op) {
967         case CEPH_OSD_OP_STAT:
968                 break;
969         case CEPH_OSD_OP_READ:
970         case CEPH_OSD_OP_WRITE:
971         case CEPH_OSD_OP_WRITEFULL:
972         case CEPH_OSD_OP_ZERO:
973         case CEPH_OSD_OP_TRUNCATE:
974                 dst->extent.offset = cpu_to_le64(src->extent.offset);
975                 dst->extent.length = cpu_to_le64(src->extent.length);
976                 dst->extent.truncate_size =
977                         cpu_to_le64(src->extent.truncate_size);
978                 dst->extent.truncate_seq =
979                         cpu_to_le32(src->extent.truncate_seq);
980                 break;
981         case CEPH_OSD_OP_CALL:
982                 dst->cls.class_len = src->cls.class_len;
983                 dst->cls.method_len = src->cls.method_len;
984                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
985                 break;
986         case CEPH_OSD_OP_WATCH:
987                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
988                 dst->watch.ver = cpu_to_le64(0);
989                 dst->watch.op = src->watch.op;
990                 dst->watch.gen = cpu_to_le32(src->watch.gen);
991                 break;
992         case CEPH_OSD_OP_NOTIFY_ACK:
993                 break;
994         case CEPH_OSD_OP_NOTIFY:
995                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
996                 break;
997         case CEPH_OSD_OP_LIST_WATCHERS:
998                 break;
999         case CEPH_OSD_OP_SETALLOCHINT:
1000                 dst->alloc_hint.expected_object_size =
1001                     cpu_to_le64(src->alloc_hint.expected_object_size);
1002                 dst->alloc_hint.expected_write_size =
1003                     cpu_to_le64(src->alloc_hint.expected_write_size);
1004                 break;
1005         case CEPH_OSD_OP_SETXATTR:
1006         case CEPH_OSD_OP_CMPXATTR:
1007                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1008                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1009                 dst->xattr.cmp_op = src->xattr.cmp_op;
1010                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1011                 break;
1012         case CEPH_OSD_OP_CREATE:
1013         case CEPH_OSD_OP_DELETE:
1014                 break;
1015         case CEPH_OSD_OP_COPY_FROM:
1016                 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1017                 dst->copy_from.src_version =
1018                         cpu_to_le64(src->copy_from.src_version);
1019                 dst->copy_from.flags = src->copy_from.flags;
1020                 dst->copy_from.src_fadvise_flags =
1021                         cpu_to_le32(src->copy_from.src_fadvise_flags);
1022                 break;
1023         default:
1024                 pr_err("unsupported osd opcode %s\n",
1025                         ceph_osd_op_name(src->op));
1026                 WARN_ON(1);
1027
1028                 return 0;
1029         }
1030
1031         dst->op = cpu_to_le16(src->op);
1032         dst->flags = cpu_to_le32(src->flags);
1033         dst->payload_len = cpu_to_le32(src->indata_len);
1034
1035         return src->indata_len;
1036 }
1037
1038 /*
1039  * build new request AND message, calculate layout, and adjust file
1040  * extent as needed.
1041  *
1042  * if the file was recently truncated, we include information about its
1043  * old and new size so that the object can be updated appropriately.  (we
1044  * avoid synchronously deleting truncated objects because it's slow.)
1045  */
1046 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1047                                                struct ceph_file_layout *layout,
1048                                                struct ceph_vino vino,
1049                                                u64 off, u64 *plen,
1050                                                unsigned int which, int num_ops,
1051                                                int opcode, int flags,
1052                                                struct ceph_snap_context *snapc,
1053                                                u32 truncate_seq,
1054                                                u64 truncate_size,
1055                                                bool use_mempool)
1056 {
1057         struct ceph_osd_request *req;
1058         u64 objnum = 0;
1059         u64 objoff = 0;
1060         u64 objlen = 0;
1061         int r;
1062
1063         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1064                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1065                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1066
1067         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1068                                         GFP_NOFS);
1069         if (!req) {
1070                 r = -ENOMEM;
1071                 goto fail;
1072         }
1073
1074         /* calculate max write size */
1075         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1076         if (r)
1077                 goto fail;
1078
1079         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1080                 osd_req_op_init(req, which, opcode, 0);
1081         } else {
1082                 u32 object_size = layout->object_size;
1083                 u32 object_base = off - objoff;
1084                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1085                         if (truncate_size <= object_base) {
1086                                 truncate_size = 0;
1087                         } else {
1088                                 truncate_size -= object_base;
1089                                 if (truncate_size > object_size)
1090                                         truncate_size = object_size;
1091                         }
1092                 }
1093                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1094                                        truncate_size, truncate_seq);
1095         }
1096
1097         req->r_flags = flags;
1098         req->r_base_oloc.pool = layout->pool_id;
1099         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1100         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1101
1102         req->r_snapid = vino.snap;
1103         if (flags & CEPH_OSD_FLAG_WRITE)
1104                 req->r_data_offset = off;
1105
1106         if (num_ops > 1)
1107                 /*
1108                  * This is a special case for ceph_writepages_start(), but it
1109                  * also covers ceph_uninline_data().  If more multi-op request
1110                  * use cases emerge, we will need a separate helper.
1111                  */
1112                 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1113         else
1114                 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1115         if (r)
1116                 goto fail;
1117
1118         return req;
1119
1120 fail:
1121         ceph_osdc_put_request(req);
1122         return ERR_PTR(r);
1123 }
1124 EXPORT_SYMBOL(ceph_osdc_new_request);
1125
1126 /*
1127  * We keep osd requests in an rbtree, sorted by ->r_tid.
1128  */
1129 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1130 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1131
1132 /*
1133  * Call @fn on each OSD request as long as @fn returns 0.
1134  */
1135 static void for_each_request(struct ceph_osd_client *osdc,
1136                         int (*fn)(struct ceph_osd_request *req, void *arg),
1137                         void *arg)
1138 {
1139         struct rb_node *n, *p;
1140
1141         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1142                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1143
1144                 for (p = rb_first(&osd->o_requests); p; ) {
1145                         struct ceph_osd_request *req =
1146                             rb_entry(p, struct ceph_osd_request, r_node);
1147
1148                         p = rb_next(p);
1149                         if (fn(req, arg))
1150                                 return;
1151                 }
1152         }
1153
1154         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1155                 struct ceph_osd_request *req =
1156                     rb_entry(p, struct ceph_osd_request, r_node);
1157
1158                 p = rb_next(p);
1159                 if (fn(req, arg))
1160                         return;
1161         }
1162 }
1163
1164 static bool osd_homeless(struct ceph_osd *osd)
1165 {
1166         return osd->o_osd == CEPH_HOMELESS_OSD;
1167 }
1168
1169 static bool osd_registered(struct ceph_osd *osd)
1170 {
1171         verify_osdc_locked(osd->o_osdc);
1172
1173         return !RB_EMPTY_NODE(&osd->o_node);
1174 }
1175
1176 /*
1177  * Assumes @osd is zero-initialized.
1178  */
1179 static void osd_init(struct ceph_osd *osd)
1180 {
1181         refcount_set(&osd->o_ref, 1);
1182         RB_CLEAR_NODE(&osd->o_node);
1183         osd->o_requests = RB_ROOT;
1184         osd->o_linger_requests = RB_ROOT;
1185         osd->o_backoff_mappings = RB_ROOT;
1186         osd->o_backoffs_by_id = RB_ROOT;
1187         INIT_LIST_HEAD(&osd->o_osd_lru);
1188         INIT_LIST_HEAD(&osd->o_keepalive_item);
1189         osd->o_incarnation = 1;
1190         mutex_init(&osd->lock);
1191 }
1192
1193 static void osd_cleanup(struct ceph_osd *osd)
1194 {
1195         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1196         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1197         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1198         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1199         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1200         WARN_ON(!list_empty(&osd->o_osd_lru));
1201         WARN_ON(!list_empty(&osd->o_keepalive_item));
1202
1203         if (osd->o_auth.authorizer) {
1204                 WARN_ON(osd_homeless(osd));
1205                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1206         }
1207 }
1208
1209 /*
1210  * Track open sessions with osds.
1211  */
1212 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1213 {
1214         struct ceph_osd *osd;
1215
1216         WARN_ON(onum == CEPH_HOMELESS_OSD);
1217
1218         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1219         osd_init(osd);
1220         osd->o_osdc = osdc;
1221         osd->o_osd = onum;
1222
1223         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1224
1225         return osd;
1226 }
1227
1228 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1229 {
1230         if (refcount_inc_not_zero(&osd->o_ref)) {
1231                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1232                      refcount_read(&osd->o_ref));
1233                 return osd;
1234         } else {
1235                 dout("get_osd %p FAIL\n", osd);
1236                 return NULL;
1237         }
1238 }
1239
1240 static void put_osd(struct ceph_osd *osd)
1241 {
1242         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1243              refcount_read(&osd->o_ref) - 1);
1244         if (refcount_dec_and_test(&osd->o_ref)) {
1245                 osd_cleanup(osd);
1246                 kfree(osd);
1247         }
1248 }
1249
1250 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1251
1252 static void __move_osd_to_lru(struct ceph_osd *osd)
1253 {
1254         struct ceph_osd_client *osdc = osd->o_osdc;
1255
1256         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1257         BUG_ON(!list_empty(&osd->o_osd_lru));
1258
1259         spin_lock(&osdc->osd_lru_lock);
1260         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1261         spin_unlock(&osdc->osd_lru_lock);
1262
1263         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1264 }
1265
1266 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1267 {
1268         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1269             RB_EMPTY_ROOT(&osd->o_linger_requests))
1270                 __move_osd_to_lru(osd);
1271 }
1272
1273 static void __remove_osd_from_lru(struct ceph_osd *osd)
1274 {
1275         struct ceph_osd_client *osdc = osd->o_osdc;
1276
1277         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1278
1279         spin_lock(&osdc->osd_lru_lock);
1280         if (!list_empty(&osd->o_osd_lru))
1281                 list_del_init(&osd->o_osd_lru);
1282         spin_unlock(&osdc->osd_lru_lock);
1283 }
1284
1285 /*
1286  * Close the connection and assign any leftover requests to the
1287  * homeless session.
1288  */
1289 static void close_osd(struct ceph_osd *osd)
1290 {
1291         struct ceph_osd_client *osdc = osd->o_osdc;
1292         struct rb_node *n;
1293
1294         verify_osdc_wrlocked(osdc);
1295         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1296
1297         ceph_con_close(&osd->o_con);
1298
1299         for (n = rb_first(&osd->o_requests); n; ) {
1300                 struct ceph_osd_request *req =
1301                     rb_entry(n, struct ceph_osd_request, r_node);
1302
1303                 n = rb_next(n); /* unlink_request() */
1304
1305                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1306                 unlink_request(osd, req);
1307                 link_request(&osdc->homeless_osd, req);
1308         }
1309         for (n = rb_first(&osd->o_linger_requests); n; ) {
1310                 struct ceph_osd_linger_request *lreq =
1311                     rb_entry(n, struct ceph_osd_linger_request, node);
1312
1313                 n = rb_next(n); /* unlink_linger() */
1314
1315                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1316                      lreq->linger_id);
1317                 unlink_linger(osd, lreq);
1318                 link_linger(&osdc->homeless_osd, lreq);
1319         }
1320         clear_backoffs(osd);
1321
1322         __remove_osd_from_lru(osd);
1323         erase_osd(&osdc->osds, osd);
1324         put_osd(osd);
1325 }
1326
1327 /*
1328  * reset osd connect
1329  */
1330 static int reopen_osd(struct ceph_osd *osd)
1331 {
1332         struct ceph_entity_addr *peer_addr;
1333
1334         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1335
1336         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1337             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1338                 close_osd(osd);
1339                 return -ENODEV;
1340         }
1341
1342         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1343         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1344                         !ceph_con_opened(&osd->o_con)) {
1345                 struct rb_node *n;
1346
1347                 dout("osd addr hasn't changed and connection never opened, "
1348                      "letting msgr retry\n");
1349                 /* touch each r_stamp for handle_timeout()'s benfit */
1350                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1351                         struct ceph_osd_request *req =
1352                             rb_entry(n, struct ceph_osd_request, r_node);
1353                         req->r_stamp = jiffies;
1354                 }
1355
1356                 return -EAGAIN;
1357         }
1358
1359         ceph_con_close(&osd->o_con);
1360         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1361         osd->o_incarnation++;
1362
1363         return 0;
1364 }
1365
1366 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1367                                           bool wrlocked)
1368 {
1369         struct ceph_osd *osd;
1370
1371         if (wrlocked)
1372                 verify_osdc_wrlocked(osdc);
1373         else
1374                 verify_osdc_locked(osdc);
1375
1376         if (o != CEPH_HOMELESS_OSD)
1377                 osd = lookup_osd(&osdc->osds, o);
1378         else
1379                 osd = &osdc->homeless_osd;
1380         if (!osd) {
1381                 if (!wrlocked)
1382                         return ERR_PTR(-EAGAIN);
1383
1384                 osd = create_osd(osdc, o);
1385                 insert_osd(&osdc->osds, osd);
1386                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1387                               &osdc->osdmap->osd_addr[osd->o_osd]);
1388         }
1389
1390         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1391         return osd;
1392 }
1393
1394 /*
1395  * Create request <-> OSD session relation.
1396  *
1397  * @req has to be assigned a tid, @osd may be homeless.
1398  */
1399 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1400 {
1401         verify_osd_locked(osd);
1402         WARN_ON(!req->r_tid || req->r_osd);
1403         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1404              req, req->r_tid);
1405
1406         if (!osd_homeless(osd))
1407                 __remove_osd_from_lru(osd);
1408         else
1409                 atomic_inc(&osd->o_osdc->num_homeless);
1410
1411         get_osd(osd);
1412         insert_request(&osd->o_requests, req);
1413         req->r_osd = osd;
1414 }
1415
1416 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1417 {
1418         verify_osd_locked(osd);
1419         WARN_ON(req->r_osd != osd);
1420         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1421              req, req->r_tid);
1422
1423         req->r_osd = NULL;
1424         erase_request(&osd->o_requests, req);
1425         put_osd(osd);
1426
1427         if (!osd_homeless(osd))
1428                 maybe_move_osd_to_lru(osd);
1429         else
1430                 atomic_dec(&osd->o_osdc->num_homeless);
1431 }
1432
1433 static bool __pool_full(struct ceph_pg_pool_info *pi)
1434 {
1435         return pi->flags & CEPH_POOL_FLAG_FULL;
1436 }
1437
1438 static bool have_pool_full(struct ceph_osd_client *osdc)
1439 {
1440         struct rb_node *n;
1441
1442         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1443                 struct ceph_pg_pool_info *pi =
1444                     rb_entry(n, struct ceph_pg_pool_info, node);
1445
1446                 if (__pool_full(pi))
1447                         return true;
1448         }
1449
1450         return false;
1451 }
1452
1453 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1454 {
1455         struct ceph_pg_pool_info *pi;
1456
1457         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1458         if (!pi)
1459                 return false;
1460
1461         return __pool_full(pi);
1462 }
1463
1464 /*
1465  * Returns whether a request should be blocked from being sent
1466  * based on the current osdmap and osd_client settings.
1467  */
1468 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1469                                     const struct ceph_osd_request_target *t,
1470                                     struct ceph_pg_pool_info *pi)
1471 {
1472         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1473         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1474                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1475                        __pool_full(pi);
1476
1477         WARN_ON(pi->id != t->target_oloc.pool);
1478         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1479                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1480                (osdc->osdmap->epoch < osdc->epoch_barrier);
1481 }
1482
1483 enum calc_target_result {
1484         CALC_TARGET_NO_ACTION = 0,
1485         CALC_TARGET_NEED_RESEND,
1486         CALC_TARGET_POOL_DNE,
1487 };
1488
1489 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1490                                            struct ceph_osd_request_target *t,
1491                                            struct ceph_connection *con,
1492                                            bool any_change)
1493 {
1494         struct ceph_pg_pool_info *pi;
1495         struct ceph_pg pgid, last_pgid;
1496         struct ceph_osds up, acting;
1497         bool force_resend = false;
1498         bool unpaused = false;
1499         bool legacy_change;
1500         bool split = false;
1501         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1502         bool recovery_deletes = ceph_osdmap_flag(osdc,
1503                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1504         enum calc_target_result ct_res;
1505
1506         t->epoch = osdc->osdmap->epoch;
1507         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1508         if (!pi) {
1509                 t->osd = CEPH_HOMELESS_OSD;
1510                 ct_res = CALC_TARGET_POOL_DNE;
1511                 goto out;
1512         }
1513
1514         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1515                 if (t->last_force_resend < pi->last_force_request_resend) {
1516                         t->last_force_resend = pi->last_force_request_resend;
1517                         force_resend = true;
1518                 } else if (t->last_force_resend == 0) {
1519                         force_resend = true;
1520                 }
1521         }
1522
1523         /* apply tiering */
1524         ceph_oid_copy(&t->target_oid, &t->base_oid);
1525         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1526         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1527                 if (t->flags & CEPH_OSD_FLAG_READ && pi->read_tier >= 0)
1528                         t->target_oloc.pool = pi->read_tier;
1529                 if (t->flags & CEPH_OSD_FLAG_WRITE && pi->write_tier >= 0)
1530                         t->target_oloc.pool = pi->write_tier;
1531
1532                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1533                 if (!pi) {
1534                         t->osd = CEPH_HOMELESS_OSD;
1535                         ct_res = CALC_TARGET_POOL_DNE;
1536                         goto out;
1537                 }
1538         }
1539
1540         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1541         last_pgid.pool = pgid.pool;
1542         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1543
1544         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1545         if (any_change &&
1546             ceph_is_new_interval(&t->acting,
1547                                  &acting,
1548                                  &t->up,
1549                                  &up,
1550                                  t->size,
1551                                  pi->size,
1552                                  t->min_size,
1553                                  pi->min_size,
1554                                  t->pg_num,
1555                                  pi->pg_num,
1556                                  t->sort_bitwise,
1557                                  sort_bitwise,
1558                                  t->recovery_deletes,
1559                                  recovery_deletes,
1560                                  &last_pgid))
1561                 force_resend = true;
1562
1563         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1564                 t->paused = false;
1565                 unpaused = true;
1566         }
1567         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1568                         ceph_osds_changed(&t->acting, &acting, any_change);
1569         if (t->pg_num)
1570                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1571
1572         if (legacy_change || force_resend || split) {
1573                 t->pgid = pgid; /* struct */
1574                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1575                 ceph_osds_copy(&t->acting, &acting);
1576                 ceph_osds_copy(&t->up, &up);
1577                 t->size = pi->size;
1578                 t->min_size = pi->min_size;
1579                 t->pg_num = pi->pg_num;
1580                 t->pg_num_mask = pi->pg_num_mask;
1581                 t->sort_bitwise = sort_bitwise;
1582                 t->recovery_deletes = recovery_deletes;
1583
1584                 t->osd = acting.primary;
1585         }
1586
1587         if (unpaused || legacy_change || force_resend ||
1588             (split && con && CEPH_HAVE_FEATURE(con->peer_features,
1589                                                RESEND_ON_SPLIT)))
1590                 ct_res = CALC_TARGET_NEED_RESEND;
1591         else
1592                 ct_res = CALC_TARGET_NO_ACTION;
1593
1594 out:
1595         dout("%s t %p -> ct_res %d osd %d\n", __func__, t, ct_res, t->osd);
1596         return ct_res;
1597 }
1598
1599 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1600 {
1601         struct ceph_spg_mapping *spg;
1602
1603         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1604         if (!spg)
1605                 return NULL;
1606
1607         RB_CLEAR_NODE(&spg->node);
1608         spg->backoffs = RB_ROOT;
1609         return spg;
1610 }
1611
1612 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1613 {
1614         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1615         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1616
1617         kfree(spg);
1618 }
1619
1620 /*
1621  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1622  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1623  * defined only within a specific spgid; it does not pass anything to
1624  * children on split, or to another primary.
1625  */
1626 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1627                  RB_BYPTR, const struct ceph_spg *, node)
1628
1629 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1630 {
1631         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1632 }
1633
1634 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1635                                    void **pkey, size_t *pkey_len)
1636 {
1637         if (hoid->key_len) {
1638                 *pkey = hoid->key;
1639                 *pkey_len = hoid->key_len;
1640         } else {
1641                 *pkey = hoid->oid;
1642                 *pkey_len = hoid->oid_len;
1643         }
1644 }
1645
1646 static int compare_names(const void *name1, size_t name1_len,
1647                          const void *name2, size_t name2_len)
1648 {
1649         int ret;
1650
1651         ret = memcmp(name1, name2, min(name1_len, name2_len));
1652         if (!ret) {
1653                 if (name1_len < name2_len)
1654                         ret = -1;
1655                 else if (name1_len > name2_len)
1656                         ret = 1;
1657         }
1658         return ret;
1659 }
1660
1661 static int hoid_compare(const struct ceph_hobject_id *lhs,
1662                         const struct ceph_hobject_id *rhs)
1663 {
1664         void *effective_key1, *effective_key2;
1665         size_t effective_key1_len, effective_key2_len;
1666         int ret;
1667
1668         if (lhs->is_max < rhs->is_max)
1669                 return -1;
1670         if (lhs->is_max > rhs->is_max)
1671                 return 1;
1672
1673         if (lhs->pool < rhs->pool)
1674                 return -1;
1675         if (lhs->pool > rhs->pool)
1676                 return 1;
1677
1678         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1679                 return -1;
1680         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1681                 return 1;
1682
1683         ret = compare_names(lhs->nspace, lhs->nspace_len,
1684                             rhs->nspace, rhs->nspace_len);
1685         if (ret)
1686                 return ret;
1687
1688         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1689         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1690         ret = compare_names(effective_key1, effective_key1_len,
1691                             effective_key2, effective_key2_len);
1692         if (ret)
1693                 return ret;
1694
1695         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1696         if (ret)
1697                 return ret;
1698
1699         if (lhs->snapid < rhs->snapid)
1700                 return -1;
1701         if (lhs->snapid > rhs->snapid)
1702                 return 1;
1703
1704         return 0;
1705 }
1706
1707 /*
1708  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1709  * compat stuff here.
1710  *
1711  * Assumes @hoid is zero-initialized.
1712  */
1713 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1714 {
1715         u8 struct_v;
1716         u32 struct_len;
1717         int ret;
1718
1719         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1720                                   &struct_len);
1721         if (ret)
1722                 return ret;
1723
1724         if (struct_v < 4) {
1725                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1726                 goto e_inval;
1727         }
1728
1729         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1730                                                 GFP_NOIO);
1731         if (IS_ERR(hoid->key)) {
1732                 ret = PTR_ERR(hoid->key);
1733                 hoid->key = NULL;
1734                 return ret;
1735         }
1736
1737         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1738                                                 GFP_NOIO);
1739         if (IS_ERR(hoid->oid)) {
1740                 ret = PTR_ERR(hoid->oid);
1741                 hoid->oid = NULL;
1742                 return ret;
1743         }
1744
1745         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1746         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1747         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1748
1749         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1750                                                    GFP_NOIO);
1751         if (IS_ERR(hoid->nspace)) {
1752                 ret = PTR_ERR(hoid->nspace);
1753                 hoid->nspace = NULL;
1754                 return ret;
1755         }
1756
1757         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1758
1759         ceph_hoid_build_hash_cache(hoid);
1760         return 0;
1761
1762 e_inval:
1763         return -EINVAL;
1764 }
1765
1766 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1767 {
1768         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1769                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1770 }
1771
1772 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1773 {
1774         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1775         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1776         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1777         ceph_encode_64(p, hoid->snapid);
1778         ceph_encode_32(p, hoid->hash);
1779         ceph_encode_8(p, hoid->is_max);
1780         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1781         ceph_encode_64(p, hoid->pool);
1782 }
1783
1784 static void free_hoid(struct ceph_hobject_id *hoid)
1785 {
1786         if (hoid) {
1787                 kfree(hoid->key);
1788                 kfree(hoid->oid);
1789                 kfree(hoid->nspace);
1790                 kfree(hoid);
1791         }
1792 }
1793
1794 static struct ceph_osd_backoff *alloc_backoff(void)
1795 {
1796         struct ceph_osd_backoff *backoff;
1797
1798         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1799         if (!backoff)
1800                 return NULL;
1801
1802         RB_CLEAR_NODE(&backoff->spg_node);
1803         RB_CLEAR_NODE(&backoff->id_node);
1804         return backoff;
1805 }
1806
1807 static void free_backoff(struct ceph_osd_backoff *backoff)
1808 {
1809         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1810         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1811
1812         free_hoid(backoff->begin);
1813         free_hoid(backoff->end);
1814         kfree(backoff);
1815 }
1816
1817 /*
1818  * Within a specific spgid, backoffs are managed by ->begin hoid.
1819  */
1820 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1821                         RB_BYVAL, spg_node);
1822
1823 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1824                                             const struct ceph_hobject_id *hoid)
1825 {
1826         struct rb_node *n = root->rb_node;
1827
1828         while (n) {
1829                 struct ceph_osd_backoff *cur =
1830                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1831                 int cmp;
1832
1833                 cmp = hoid_compare(hoid, cur->begin);
1834                 if (cmp < 0) {
1835                         n = n->rb_left;
1836                 } else if (cmp > 0) {
1837                         if (hoid_compare(hoid, cur->end) < 0)
1838                                 return cur;
1839
1840                         n = n->rb_right;
1841                 } else {
1842                         return cur;
1843                 }
1844         }
1845
1846         return NULL;
1847 }
1848
1849 /*
1850  * Each backoff has a unique id within its OSD session.
1851  */
1852 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1853
1854 static void clear_backoffs(struct ceph_osd *osd)
1855 {
1856         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1857                 struct ceph_spg_mapping *spg =
1858                     rb_entry(rb_first(&osd->o_backoff_mappings),
1859                              struct ceph_spg_mapping, node);
1860
1861                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1862                         struct ceph_osd_backoff *backoff =
1863                             rb_entry(rb_first(&spg->backoffs),
1864                                      struct ceph_osd_backoff, spg_node);
1865
1866                         erase_backoff(&spg->backoffs, backoff);
1867                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1868                         free_backoff(backoff);
1869                 }
1870                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1871                 free_spg_mapping(spg);
1872         }
1873 }
1874
1875 /*
1876  * Set up a temporary, non-owning view into @t.
1877  */
1878 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1879                                   const struct ceph_osd_request_target *t)
1880 {
1881         hoid->key = NULL;
1882         hoid->key_len = 0;
1883         hoid->oid = t->target_oid.name;
1884         hoid->oid_len = t->target_oid.name_len;
1885         hoid->snapid = CEPH_NOSNAP;
1886         hoid->hash = t->pgid.seed;
1887         hoid->is_max = false;
1888         if (t->target_oloc.pool_ns) {
1889                 hoid->nspace = t->target_oloc.pool_ns->str;
1890                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1891         } else {
1892                 hoid->nspace = NULL;
1893                 hoid->nspace_len = 0;
1894         }
1895         hoid->pool = t->target_oloc.pool;
1896         ceph_hoid_build_hash_cache(hoid);
1897 }
1898
1899 static bool should_plug_request(struct ceph_osd_request *req)
1900 {
1901         struct ceph_osd *osd = req->r_osd;
1902         struct ceph_spg_mapping *spg;
1903         struct ceph_osd_backoff *backoff;
1904         struct ceph_hobject_id hoid;
1905
1906         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1907         if (!spg)
1908                 return false;
1909
1910         hoid_fill_from_target(&hoid, &req->r_t);
1911         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1912         if (!backoff)
1913                 return false;
1914
1915         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1916              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1917              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1918         return true;
1919 }
1920
1921 /*
1922  * Keep get_num_data_items() in sync with this function.
1923  */
1924 static void setup_request_data(struct ceph_osd_request *req)
1925 {
1926         struct ceph_msg *request_msg = req->r_request;
1927         struct ceph_msg *reply_msg = req->r_reply;
1928         struct ceph_osd_req_op *op;
1929
1930         if (req->r_request->num_data_items || req->r_reply->num_data_items)
1931                 return;
1932
1933         WARN_ON(request_msg->data_length || reply_msg->data_length);
1934         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
1935                 switch (op->op) {
1936                 /* request */
1937                 case CEPH_OSD_OP_WRITE:
1938                 case CEPH_OSD_OP_WRITEFULL:
1939                         WARN_ON(op->indata_len != op->extent.length);
1940                         ceph_osdc_msg_data_add(request_msg,
1941                                                &op->extent.osd_data);
1942                         break;
1943                 case CEPH_OSD_OP_SETXATTR:
1944                 case CEPH_OSD_OP_CMPXATTR:
1945                         WARN_ON(op->indata_len != op->xattr.name_len +
1946                                                   op->xattr.value_len);
1947                         ceph_osdc_msg_data_add(request_msg,
1948                                                &op->xattr.osd_data);
1949                         break;
1950                 case CEPH_OSD_OP_NOTIFY_ACK:
1951                         ceph_osdc_msg_data_add(request_msg,
1952                                                &op->notify_ack.request_data);
1953                         break;
1954                 case CEPH_OSD_OP_COPY_FROM:
1955                         ceph_osdc_msg_data_add(request_msg,
1956                                                &op->copy_from.osd_data);
1957                         break;
1958
1959                 /* reply */
1960                 case CEPH_OSD_OP_STAT:
1961                         ceph_osdc_msg_data_add(reply_msg,
1962                                                &op->raw_data_in);
1963                         break;
1964                 case CEPH_OSD_OP_READ:
1965                         ceph_osdc_msg_data_add(reply_msg,
1966                                                &op->extent.osd_data);
1967                         break;
1968                 case CEPH_OSD_OP_LIST_WATCHERS:
1969                         ceph_osdc_msg_data_add(reply_msg,
1970                                                &op->list_watchers.response_data);
1971                         break;
1972
1973                 /* both */
1974                 case CEPH_OSD_OP_CALL:
1975                         WARN_ON(op->indata_len != op->cls.class_len +
1976                                                   op->cls.method_len +
1977                                                   op->cls.indata_len);
1978                         ceph_osdc_msg_data_add(request_msg,
1979                                                &op->cls.request_info);
1980                         /* optional, can be NONE */
1981                         ceph_osdc_msg_data_add(request_msg,
1982                                                &op->cls.request_data);
1983                         /* optional, can be NONE */
1984                         ceph_osdc_msg_data_add(reply_msg,
1985                                                &op->cls.response_data);
1986                         break;
1987                 case CEPH_OSD_OP_NOTIFY:
1988                         ceph_osdc_msg_data_add(request_msg,
1989                                                &op->notify.request_data);
1990                         ceph_osdc_msg_data_add(reply_msg,
1991                                                &op->notify.response_data);
1992                         break;
1993                 }
1994         }
1995 }
1996
1997 static void encode_pgid(void **p, const struct ceph_pg *pgid)
1998 {
1999         ceph_encode_8(p, 1);
2000         ceph_encode_64(p, pgid->pool);
2001         ceph_encode_32(p, pgid->seed);
2002         ceph_encode_32(p, -1); /* preferred */
2003 }
2004
2005 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2006 {
2007         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2008         encode_pgid(p, &spgid->pgid);
2009         ceph_encode_8(p, spgid->shard);
2010 }
2011
2012 static void encode_oloc(void **p, void *end,
2013                         const struct ceph_object_locator *oloc)
2014 {
2015         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2016         ceph_encode_64(p, oloc->pool);
2017         ceph_encode_32(p, -1); /* preferred */
2018         ceph_encode_32(p, 0);  /* key len */
2019         if (oloc->pool_ns)
2020                 ceph_encode_string(p, end, oloc->pool_ns->str,
2021                                    oloc->pool_ns->len);
2022         else
2023                 ceph_encode_32(p, 0);
2024 }
2025
2026 static void encode_request_partial(struct ceph_osd_request *req,
2027                                    struct ceph_msg *msg)
2028 {
2029         void *p = msg->front.iov_base;
2030         void *const end = p + msg->front_alloc_len;
2031         u32 data_len = 0;
2032         int i;
2033
2034         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2035                 /* snapshots aren't writeable */
2036                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2037         } else {
2038                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2039                         req->r_data_offset || req->r_snapc);
2040         }
2041
2042         setup_request_data(req);
2043
2044         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2045         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2046         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2047         ceph_encode_32(&p, req->r_flags);
2048
2049         /* reqid */
2050         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2051         memset(p, 0, sizeof(struct ceph_osd_reqid));
2052         p += sizeof(struct ceph_osd_reqid);
2053
2054         /* trace */
2055         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2056         p += sizeof(struct ceph_blkin_trace_info);
2057
2058         ceph_encode_32(&p, 0); /* client_inc, always 0 */
2059         ceph_encode_timespec64(p, &req->r_mtime);
2060         p += sizeof(struct ceph_timespec);
2061
2062         encode_oloc(&p, end, &req->r_t.target_oloc);
2063         ceph_encode_string(&p, end, req->r_t.target_oid.name,
2064                            req->r_t.target_oid.name_len);
2065
2066         /* ops, can imply data */
2067         ceph_encode_16(&p, req->r_num_ops);
2068         for (i = 0; i < req->r_num_ops; i++) {
2069                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2070                 p += sizeof(struct ceph_osd_op);
2071         }
2072
2073         ceph_encode_64(&p, req->r_snapid); /* snapid */
2074         if (req->r_snapc) {
2075                 ceph_encode_64(&p, req->r_snapc->seq);
2076                 ceph_encode_32(&p, req->r_snapc->num_snaps);
2077                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2078                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2079         } else {
2080                 ceph_encode_64(&p, 0); /* snap_seq */
2081                 ceph_encode_32(&p, 0); /* snaps len */
2082         }
2083
2084         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2085         BUG_ON(p > end - 8); /* space for features */
2086
2087         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2088         /* front_len is finalized in encode_request_finish() */
2089         msg->front.iov_len = p - msg->front.iov_base;
2090         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2091         msg->hdr.data_len = cpu_to_le32(data_len);
2092         /*
2093          * The header "data_off" is a hint to the receiver allowing it
2094          * to align received data into its buffers such that there's no
2095          * need to re-copy it before writing it to disk (direct I/O).
2096          */
2097         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2098
2099         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2100              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2101 }
2102
2103 static void encode_request_finish(struct ceph_msg *msg)
2104 {
2105         void *p = msg->front.iov_base;
2106         void *const partial_end = p + msg->front.iov_len;
2107         void *const end = p + msg->front_alloc_len;
2108
2109         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2110                 /* luminous OSD -- encode features and be done */
2111                 p = partial_end;
2112                 ceph_encode_64(&p, msg->con->peer_features);
2113         } else {
2114                 struct {
2115                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2116                                    CEPH_PGID_ENCODING_LEN + 1];
2117                         __le32 hash;
2118                         __le32 epoch;
2119                         __le32 flags;
2120                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2121                                    sizeof(struct ceph_osd_reqid)];
2122                         char trace[sizeof(struct ceph_blkin_trace_info)];
2123                         __le32 client_inc;
2124                         struct ceph_timespec mtime;
2125                 } __packed head;
2126                 struct ceph_pg pgid;
2127                 void *oloc, *oid, *tail;
2128                 int oloc_len, oid_len, tail_len;
2129                 int len;
2130
2131                 /*
2132                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2133                  * as a temporary buffer.  Encode the raw PG; the rest
2134                  * is just a matter of moving oloc, oid and tail blobs
2135                  * around.
2136                  */
2137                 memcpy(&head, p, sizeof(head));
2138                 p += sizeof(head);
2139
2140                 oloc = p;
2141                 p += CEPH_ENCODING_START_BLK_LEN;
2142                 pgid.pool = ceph_decode_64(&p);
2143                 p += 4 + 4; /* preferred, key len */
2144                 len = ceph_decode_32(&p);
2145                 p += len;   /* nspace */
2146                 oloc_len = p - oloc;
2147
2148                 oid = p;
2149                 len = ceph_decode_32(&p);
2150                 p += len;
2151                 oid_len = p - oid;
2152
2153                 tail = p;
2154                 tail_len = partial_end - p;
2155
2156                 p = msg->front.iov_base;
2157                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2158                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2159                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2160                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2161
2162                 /* reassert_version */
2163                 memset(p, 0, sizeof(struct ceph_eversion));
2164                 p += sizeof(struct ceph_eversion);
2165
2166                 BUG_ON(p >= oloc);
2167                 memmove(p, oloc, oloc_len);
2168                 p += oloc_len;
2169
2170                 pgid.seed = le32_to_cpu(head.hash);
2171                 encode_pgid(&p, &pgid); /* raw pg */
2172
2173                 BUG_ON(p >= oid);
2174                 memmove(p, oid, oid_len);
2175                 p += oid_len;
2176
2177                 /* tail -- ops, snapid, snapc, retry_attempt */
2178                 BUG_ON(p >= tail);
2179                 memmove(p, tail, tail_len);
2180                 p += tail_len;
2181
2182                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2183         }
2184
2185         BUG_ON(p > end);
2186         msg->front.iov_len = p - msg->front.iov_base;
2187         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2188
2189         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2190              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2191              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2192              le16_to_cpu(msg->hdr.version));
2193 }
2194
2195 /*
2196  * @req has to be assigned a tid and registered.
2197  */
2198 static void send_request(struct ceph_osd_request *req)
2199 {
2200         struct ceph_osd *osd = req->r_osd;
2201
2202         verify_osd_locked(osd);
2203         WARN_ON(osd->o_osd != req->r_t.osd);
2204
2205         /* backoff? */
2206         if (should_plug_request(req))
2207                 return;
2208
2209         /*
2210          * We may have a previously queued request message hanging
2211          * around.  Cancel it to avoid corrupting the msgr.
2212          */
2213         if (req->r_sent)
2214                 ceph_msg_revoke(req->r_request);
2215
2216         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2217         if (req->r_attempts)
2218                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2219         else
2220                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2221
2222         encode_request_partial(req, req->r_request);
2223
2224         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2225              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2226              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2227              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2228              req->r_attempts);
2229
2230         req->r_t.paused = false;
2231         req->r_stamp = jiffies;
2232         req->r_attempts++;
2233
2234         req->r_sent = osd->o_incarnation;
2235         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2236         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2237 }
2238
2239 static void maybe_request_map(struct ceph_osd_client *osdc)
2240 {
2241         bool continuous = false;
2242
2243         verify_osdc_locked(osdc);
2244         WARN_ON(!osdc->osdmap->epoch);
2245
2246         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2247             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2248             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2249                 dout("%s osdc %p continuous\n", __func__, osdc);
2250                 continuous = true;
2251         } else {
2252                 dout("%s osdc %p onetime\n", __func__, osdc);
2253         }
2254
2255         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2256                                osdc->osdmap->epoch + 1, continuous))
2257                 ceph_monc_renew_subs(&osdc->client->monc);
2258 }
2259
2260 static void complete_request(struct ceph_osd_request *req, int err);
2261 static void send_map_check(struct ceph_osd_request *req);
2262
2263 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2264 {
2265         struct ceph_osd_client *osdc = req->r_osdc;
2266         struct ceph_osd *osd;
2267         enum calc_target_result ct_res;
2268         int err = 0;
2269         bool need_send = false;
2270         bool promoted = false;
2271
2272         WARN_ON(req->r_tid);
2273         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2274
2275 again:
2276         ct_res = calc_target(osdc, &req->r_t, NULL, false);
2277         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2278                 goto promote;
2279
2280         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2281         if (IS_ERR(osd)) {
2282                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2283                 goto promote;
2284         }
2285
2286         if (osdc->abort_err) {
2287                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2288                 err = osdc->abort_err;
2289         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2290                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2291                      osdc->epoch_barrier);
2292                 req->r_t.paused = true;
2293                 maybe_request_map(osdc);
2294         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2295                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2296                 dout("req %p pausewr\n", req);
2297                 req->r_t.paused = true;
2298                 maybe_request_map(osdc);
2299         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2300                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2301                 dout("req %p pauserd\n", req);
2302                 req->r_t.paused = true;
2303                 maybe_request_map(osdc);
2304         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2305                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2306                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2307                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2308                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2309                 dout("req %p full/pool_full\n", req);
2310                 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2311                         err = -ENOSPC;
2312                 } else {
2313                         pr_warn_ratelimited("FULL or reached pool quota\n");
2314                         req->r_t.paused = true;
2315                         maybe_request_map(osdc);
2316                 }
2317         } else if (!osd_homeless(osd)) {
2318                 need_send = true;
2319         } else {
2320                 maybe_request_map(osdc);
2321         }
2322
2323         mutex_lock(&osd->lock);
2324         /*
2325          * Assign the tid atomically with send_request() to protect
2326          * multiple writes to the same object from racing with each
2327          * other, resulting in out of order ops on the OSDs.
2328          */
2329         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2330         link_request(osd, req);
2331         if (need_send)
2332                 send_request(req);
2333         else if (err)
2334                 complete_request(req, err);
2335         mutex_unlock(&osd->lock);
2336
2337         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2338                 send_map_check(req);
2339
2340         if (promoted)
2341                 downgrade_write(&osdc->lock);
2342         return;
2343
2344 promote:
2345         up_read(&osdc->lock);
2346         down_write(&osdc->lock);
2347         wrlocked = true;
2348         promoted = true;
2349         goto again;
2350 }
2351
2352 static void account_request(struct ceph_osd_request *req)
2353 {
2354         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2355         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2356
2357         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2358         atomic_inc(&req->r_osdc->num_requests);
2359
2360         req->r_start_stamp = jiffies;
2361 }
2362
2363 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2364 {
2365         ceph_osdc_get_request(req);
2366         account_request(req);
2367         __submit_request(req, wrlocked);
2368 }
2369
2370 static void finish_request(struct ceph_osd_request *req)
2371 {
2372         struct ceph_osd_client *osdc = req->r_osdc;
2373
2374         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2375         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2376
2377         if (req->r_osd)
2378                 unlink_request(req->r_osd, req);
2379         atomic_dec(&osdc->num_requests);
2380
2381         /*
2382          * If an OSD has failed or returned and a request has been sent
2383          * twice, it's possible to get a reply and end up here while the
2384          * request message is queued for delivery.  We will ignore the
2385          * reply, so not a big deal, but better to try and catch it.
2386          */
2387         ceph_msg_revoke(req->r_request);
2388         ceph_msg_revoke_incoming(req->r_reply);
2389 }
2390
2391 static void __complete_request(struct ceph_osd_request *req)
2392 {
2393         dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2394              req->r_tid, req->r_callback, req->r_result);
2395
2396         if (req->r_callback)
2397                 req->r_callback(req);
2398         complete_all(&req->r_completion);
2399         ceph_osdc_put_request(req);
2400 }
2401
2402 static void complete_request_workfn(struct work_struct *work)
2403 {
2404         struct ceph_osd_request *req =
2405             container_of(work, struct ceph_osd_request, r_complete_work);
2406
2407         __complete_request(req);
2408 }
2409
2410 /*
2411  * This is open-coded in handle_reply().
2412  */
2413 static void complete_request(struct ceph_osd_request *req, int err)
2414 {
2415         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2416
2417         req->r_result = err;
2418         finish_request(req);
2419
2420         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2421         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2422 }
2423
2424 static void cancel_map_check(struct ceph_osd_request *req)
2425 {
2426         struct ceph_osd_client *osdc = req->r_osdc;
2427         struct ceph_osd_request *lookup_req;
2428
2429         verify_osdc_wrlocked(osdc);
2430
2431         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2432         if (!lookup_req)
2433                 return;
2434
2435         WARN_ON(lookup_req != req);
2436         erase_request_mc(&osdc->map_checks, req);
2437         ceph_osdc_put_request(req);
2438 }
2439
2440 static void cancel_request(struct ceph_osd_request *req)
2441 {
2442         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2443
2444         cancel_map_check(req);
2445         finish_request(req);
2446         complete_all(&req->r_completion);
2447         ceph_osdc_put_request(req);
2448 }
2449
2450 static void abort_request(struct ceph_osd_request *req, int err)
2451 {
2452         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2453
2454         cancel_map_check(req);
2455         complete_request(req, err);
2456 }
2457
2458 static int abort_fn(struct ceph_osd_request *req, void *arg)
2459 {
2460         int err = *(int *)arg;
2461
2462         abort_request(req, err);
2463         return 0; /* continue iteration */
2464 }
2465
2466 /*
2467  * Abort all in-flight requests with @err and arrange for all future
2468  * requests to be failed immediately.
2469  */
2470 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2471 {
2472         dout("%s osdc %p err %d\n", __func__, osdc, err);
2473         down_write(&osdc->lock);
2474         for_each_request(osdc, abort_fn, &err);
2475         osdc->abort_err = err;
2476         up_write(&osdc->lock);
2477 }
2478 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2479
2480 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2481 {
2482         if (likely(eb > osdc->epoch_barrier)) {
2483                 dout("updating epoch_barrier from %u to %u\n",
2484                                 osdc->epoch_barrier, eb);
2485                 osdc->epoch_barrier = eb;
2486                 /* Request map if we're not to the barrier yet */
2487                 if (eb > osdc->osdmap->epoch)
2488                         maybe_request_map(osdc);
2489         }
2490 }
2491
2492 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2493 {
2494         down_read(&osdc->lock);
2495         if (unlikely(eb > osdc->epoch_barrier)) {
2496                 up_read(&osdc->lock);
2497                 down_write(&osdc->lock);
2498                 update_epoch_barrier(osdc, eb);
2499                 up_write(&osdc->lock);
2500         } else {
2501                 up_read(&osdc->lock);
2502         }
2503 }
2504 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2505
2506 /*
2507  * We can end up releasing caps as a result of abort_request().
2508  * In that case, we probably want to ensure that the cap release message
2509  * has an updated epoch barrier in it, so set the epoch barrier prior to
2510  * aborting the first request.
2511  */
2512 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2513 {
2514         struct ceph_osd_client *osdc = req->r_osdc;
2515         bool *victims = arg;
2516
2517         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2518             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2519              pool_full(osdc, req->r_t.base_oloc.pool))) {
2520                 if (!*victims) {
2521                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2522                         *victims = true;
2523                 }
2524                 abort_request(req, -ENOSPC);
2525         }
2526
2527         return 0; /* continue iteration */
2528 }
2529
2530 /*
2531  * Drop all pending requests that are stalled waiting on a full condition to
2532  * clear, and complete them with ENOSPC as the return code. Set the
2533  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2534  * cancelled.
2535  */
2536 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2537 {
2538         bool victims = false;
2539
2540         if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2541             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2542                 for_each_request(osdc, abort_on_full_fn, &victims);
2543 }
2544
2545 static void check_pool_dne(struct ceph_osd_request *req)
2546 {
2547         struct ceph_osd_client *osdc = req->r_osdc;
2548         struct ceph_osdmap *map = osdc->osdmap;
2549
2550         verify_osdc_wrlocked(osdc);
2551         WARN_ON(!map->epoch);
2552
2553         if (req->r_attempts) {
2554                 /*
2555                  * We sent a request earlier, which means that
2556                  * previously the pool existed, and now it does not
2557                  * (i.e., it was deleted).
2558                  */
2559                 req->r_map_dne_bound = map->epoch;
2560                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2561                      req->r_tid);
2562         } else {
2563                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2564                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2565         }
2566
2567         if (req->r_map_dne_bound) {
2568                 if (map->epoch >= req->r_map_dne_bound) {
2569                         /* we had a new enough map */
2570                         pr_info_ratelimited("tid %llu pool does not exist\n",
2571                                             req->r_tid);
2572                         complete_request(req, -ENOENT);
2573                 }
2574         } else {
2575                 send_map_check(req);
2576         }
2577 }
2578
2579 static void map_check_cb(struct ceph_mon_generic_request *greq)
2580 {
2581         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2582         struct ceph_osd_request *req;
2583         u64 tid = greq->private_data;
2584
2585         WARN_ON(greq->result || !greq->u.newest);
2586
2587         down_write(&osdc->lock);
2588         req = lookup_request_mc(&osdc->map_checks, tid);
2589         if (!req) {
2590                 dout("%s tid %llu dne\n", __func__, tid);
2591                 goto out_unlock;
2592         }
2593
2594         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2595              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2596         if (!req->r_map_dne_bound)
2597                 req->r_map_dne_bound = greq->u.newest;
2598         erase_request_mc(&osdc->map_checks, req);
2599         check_pool_dne(req);
2600
2601         ceph_osdc_put_request(req);
2602 out_unlock:
2603         up_write(&osdc->lock);
2604 }
2605
2606 static void send_map_check(struct ceph_osd_request *req)
2607 {
2608         struct ceph_osd_client *osdc = req->r_osdc;
2609         struct ceph_osd_request *lookup_req;
2610         int ret;
2611
2612         verify_osdc_wrlocked(osdc);
2613
2614         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2615         if (lookup_req) {
2616                 WARN_ON(lookup_req != req);
2617                 return;
2618         }
2619
2620         ceph_osdc_get_request(req);
2621         insert_request_mc(&osdc->map_checks, req);
2622         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2623                                           map_check_cb, req->r_tid);
2624         WARN_ON(ret);
2625 }
2626
2627 /*
2628  * lingering requests, watch/notify v2 infrastructure
2629  */
2630 static void linger_release(struct kref *kref)
2631 {
2632         struct ceph_osd_linger_request *lreq =
2633             container_of(kref, struct ceph_osd_linger_request, kref);
2634
2635         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2636              lreq->reg_req, lreq->ping_req);
2637         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2638         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2639         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2640         WARN_ON(!list_empty(&lreq->scan_item));
2641         WARN_ON(!list_empty(&lreq->pending_lworks));
2642         WARN_ON(lreq->osd);
2643
2644         if (lreq->reg_req)
2645                 ceph_osdc_put_request(lreq->reg_req);
2646         if (lreq->ping_req)
2647                 ceph_osdc_put_request(lreq->ping_req);
2648         target_destroy(&lreq->t);
2649         kfree(lreq);
2650 }
2651
2652 static void linger_put(struct ceph_osd_linger_request *lreq)
2653 {
2654         if (lreq)
2655                 kref_put(&lreq->kref, linger_release);
2656 }
2657
2658 static struct ceph_osd_linger_request *
2659 linger_get(struct ceph_osd_linger_request *lreq)
2660 {
2661         kref_get(&lreq->kref);
2662         return lreq;
2663 }
2664
2665 static struct ceph_osd_linger_request *
2666 linger_alloc(struct ceph_osd_client *osdc)
2667 {
2668         struct ceph_osd_linger_request *lreq;
2669
2670         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2671         if (!lreq)
2672                 return NULL;
2673
2674         kref_init(&lreq->kref);
2675         mutex_init(&lreq->lock);
2676         RB_CLEAR_NODE(&lreq->node);
2677         RB_CLEAR_NODE(&lreq->osdc_node);
2678         RB_CLEAR_NODE(&lreq->mc_node);
2679         INIT_LIST_HEAD(&lreq->scan_item);
2680         INIT_LIST_HEAD(&lreq->pending_lworks);
2681         init_completion(&lreq->reg_commit_wait);
2682         init_completion(&lreq->notify_finish_wait);
2683
2684         lreq->osdc = osdc;
2685         target_init(&lreq->t);
2686
2687         dout("%s lreq %p\n", __func__, lreq);
2688         return lreq;
2689 }
2690
2691 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2692 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2693 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2694
2695 /*
2696  * Create linger request <-> OSD session relation.
2697  *
2698  * @lreq has to be registered, @osd may be homeless.
2699  */
2700 static void link_linger(struct ceph_osd *osd,
2701                         struct ceph_osd_linger_request *lreq)
2702 {
2703         verify_osd_locked(osd);
2704         WARN_ON(!lreq->linger_id || lreq->osd);
2705         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2706              osd->o_osd, lreq, lreq->linger_id);
2707
2708         if (!osd_homeless(osd))
2709                 __remove_osd_from_lru(osd);
2710         else
2711                 atomic_inc(&osd->o_osdc->num_homeless);
2712
2713         get_osd(osd);
2714         insert_linger(&osd->o_linger_requests, lreq);
2715         lreq->osd = osd;
2716 }
2717
2718 static void unlink_linger(struct ceph_osd *osd,
2719                           struct ceph_osd_linger_request *lreq)
2720 {
2721         verify_osd_locked(osd);
2722         WARN_ON(lreq->osd != osd);
2723         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2724              osd->o_osd, lreq, lreq->linger_id);
2725
2726         lreq->osd = NULL;
2727         erase_linger(&osd->o_linger_requests, lreq);
2728         put_osd(osd);
2729
2730         if (!osd_homeless(osd))
2731                 maybe_move_osd_to_lru(osd);
2732         else
2733                 atomic_dec(&osd->o_osdc->num_homeless);
2734 }
2735
2736 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2737 {
2738         verify_osdc_locked(lreq->osdc);
2739
2740         return !RB_EMPTY_NODE(&lreq->osdc_node);
2741 }
2742
2743 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2744 {
2745         struct ceph_osd_client *osdc = lreq->osdc;
2746         bool registered;
2747
2748         down_read(&osdc->lock);
2749         registered = __linger_registered(lreq);
2750         up_read(&osdc->lock);
2751
2752         return registered;
2753 }
2754
2755 static void linger_register(struct ceph_osd_linger_request *lreq)
2756 {
2757         struct ceph_osd_client *osdc = lreq->osdc;
2758
2759         verify_osdc_wrlocked(osdc);
2760         WARN_ON(lreq->linger_id);
2761
2762         linger_get(lreq);
2763         lreq->linger_id = ++osdc->last_linger_id;
2764         insert_linger_osdc(&osdc->linger_requests, lreq);
2765 }
2766
2767 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2768 {
2769         struct ceph_osd_client *osdc = lreq->osdc;
2770
2771         verify_osdc_wrlocked(osdc);
2772
2773         erase_linger_osdc(&osdc->linger_requests, lreq);
2774         linger_put(lreq);
2775 }
2776
2777 static void cancel_linger_request(struct ceph_osd_request *req)
2778 {
2779         struct ceph_osd_linger_request *lreq = req->r_priv;
2780
2781         WARN_ON(!req->r_linger);
2782         cancel_request(req);
2783         linger_put(lreq);
2784 }
2785
2786 struct linger_work {
2787         struct work_struct work;
2788         struct ceph_osd_linger_request *lreq;
2789         struct list_head pending_item;
2790         unsigned long queued_stamp;
2791
2792         union {
2793                 struct {
2794                         u64 notify_id;
2795                         u64 notifier_id;
2796                         void *payload; /* points into @msg front */
2797                         size_t payload_len;
2798
2799                         struct ceph_msg *msg; /* for ceph_msg_put() */
2800                 } notify;
2801                 struct {
2802                         int err;
2803                 } error;
2804         };
2805 };
2806
2807 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2808                                        work_func_t workfn)
2809 {
2810         struct linger_work *lwork;
2811
2812         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2813         if (!lwork)
2814                 return NULL;
2815
2816         INIT_WORK(&lwork->work, workfn);
2817         INIT_LIST_HEAD(&lwork->pending_item);
2818         lwork->lreq = linger_get(lreq);
2819
2820         return lwork;
2821 }
2822
2823 static void lwork_free(struct linger_work *lwork)
2824 {
2825         struct ceph_osd_linger_request *lreq = lwork->lreq;
2826
2827         mutex_lock(&lreq->lock);
2828         list_del(&lwork->pending_item);
2829         mutex_unlock(&lreq->lock);
2830
2831         linger_put(lreq);
2832         kfree(lwork);
2833 }
2834
2835 static void lwork_queue(struct linger_work *lwork)
2836 {
2837         struct ceph_osd_linger_request *lreq = lwork->lreq;
2838         struct ceph_osd_client *osdc = lreq->osdc;
2839
2840         verify_lreq_locked(lreq);
2841         WARN_ON(!list_empty(&lwork->pending_item));
2842
2843         lwork->queued_stamp = jiffies;
2844         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2845         queue_work(osdc->notify_wq, &lwork->work);
2846 }
2847
2848 static void do_watch_notify(struct work_struct *w)
2849 {
2850         struct linger_work *lwork = container_of(w, struct linger_work, work);
2851         struct ceph_osd_linger_request *lreq = lwork->lreq;
2852
2853         if (!linger_registered(lreq)) {
2854                 dout("%s lreq %p not registered\n", __func__, lreq);
2855                 goto out;
2856         }
2857
2858         WARN_ON(!lreq->is_watch);
2859         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2860              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2861              lwork->notify.payload_len);
2862         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2863                   lwork->notify.notifier_id, lwork->notify.payload,
2864                   lwork->notify.payload_len);
2865
2866 out:
2867         ceph_msg_put(lwork->notify.msg);
2868         lwork_free(lwork);
2869 }
2870
2871 static void do_watch_error(struct work_struct *w)
2872 {
2873         struct linger_work *lwork = container_of(w, struct linger_work, work);
2874         struct ceph_osd_linger_request *lreq = lwork->lreq;
2875
2876         if (!linger_registered(lreq)) {
2877                 dout("%s lreq %p not registered\n", __func__, lreq);
2878                 goto out;
2879         }
2880
2881         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2882         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2883
2884 out:
2885         lwork_free(lwork);
2886 }
2887
2888 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2889 {
2890         struct linger_work *lwork;
2891
2892         lwork = lwork_alloc(lreq, do_watch_error);
2893         if (!lwork) {
2894                 pr_err("failed to allocate error-lwork\n");
2895                 return;
2896         }
2897
2898         lwork->error.err = lreq->last_error;
2899         lwork_queue(lwork);
2900 }
2901
2902 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2903                                        int result)
2904 {
2905         if (!completion_done(&lreq->reg_commit_wait)) {
2906                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2907                 complete_all(&lreq->reg_commit_wait);
2908         }
2909 }
2910
2911 static void linger_commit_cb(struct ceph_osd_request *req)
2912 {
2913         struct ceph_osd_linger_request *lreq = req->r_priv;
2914
2915         mutex_lock(&lreq->lock);
2916         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
2917              lreq->linger_id, req->r_result);
2918         linger_reg_commit_complete(lreq, req->r_result);
2919         lreq->committed = true;
2920
2921         if (!lreq->is_watch) {
2922                 struct ceph_osd_data *osd_data =
2923                     osd_req_op_data(req, 0, notify, response_data);
2924                 void *p = page_address(osd_data->pages[0]);
2925
2926                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
2927                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
2928
2929                 /* make note of the notify_id */
2930                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
2931                         lreq->notify_id = ceph_decode_64(&p);
2932                         dout("lreq %p notify_id %llu\n", lreq,
2933                              lreq->notify_id);
2934                 } else {
2935                         dout("lreq %p no notify_id\n", lreq);
2936                 }
2937         }
2938
2939         mutex_unlock(&lreq->lock);
2940         linger_put(lreq);
2941 }
2942
2943 static int normalize_watch_error(int err)
2944 {
2945         /*
2946          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
2947          * notification and a failure to reconnect because we raced with
2948          * the delete appear the same to the user.
2949          */
2950         if (err == -ENOENT)
2951                 err = -ENOTCONN;
2952
2953         return err;
2954 }
2955
2956 static void linger_reconnect_cb(struct ceph_osd_request *req)
2957 {
2958         struct ceph_osd_linger_request *lreq = req->r_priv;
2959
2960         mutex_lock(&lreq->lock);
2961         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
2962              lreq, lreq->linger_id, req->r_result, lreq->last_error);
2963         if (req->r_result < 0) {
2964                 if (!lreq->last_error) {
2965                         lreq->last_error = normalize_watch_error(req->r_result);
2966                         queue_watch_error(lreq);
2967                 }
2968         }
2969
2970         mutex_unlock(&lreq->lock);
2971         linger_put(lreq);
2972 }
2973
2974 static void send_linger(struct ceph_osd_linger_request *lreq)
2975 {
2976         struct ceph_osd_request *req = lreq->reg_req;
2977         struct ceph_osd_req_op *op = &req->r_ops[0];
2978
2979         verify_osdc_wrlocked(req->r_osdc);
2980         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
2981
2982         if (req->r_osd)
2983                 cancel_linger_request(req);
2984
2985         request_reinit(req);
2986         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
2987         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
2988         req->r_flags = lreq->t.flags;
2989         req->r_mtime = lreq->mtime;
2990
2991         mutex_lock(&lreq->lock);
2992         if (lreq->is_watch && lreq->committed) {
2993                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
2994                         op->watch.cookie != lreq->linger_id);
2995                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
2996                 op->watch.gen = ++lreq->register_gen;
2997                 dout("lreq %p reconnect register_gen %u\n", lreq,
2998                      op->watch.gen);
2999                 req->r_callback = linger_reconnect_cb;
3000         } else {
3001                 if (!lreq->is_watch)
3002                         lreq->notify_id = 0;
3003                 else
3004                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3005                 dout("lreq %p register\n", lreq);
3006                 req->r_callback = linger_commit_cb;
3007         }
3008         mutex_unlock(&lreq->lock);
3009
3010         req->r_priv = linger_get(lreq);
3011         req->r_linger = true;
3012
3013         submit_request(req, true);
3014 }
3015
3016 static void linger_ping_cb(struct ceph_osd_request *req)
3017 {
3018         struct ceph_osd_linger_request *lreq = req->r_priv;
3019
3020         mutex_lock(&lreq->lock);
3021         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3022              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3023              lreq->last_error);
3024         if (lreq->register_gen == req->r_ops[0].watch.gen) {
3025                 if (!req->r_result) {
3026                         lreq->watch_valid_thru = lreq->ping_sent;
3027                 } else if (!lreq->last_error) {
3028                         lreq->last_error = normalize_watch_error(req->r_result);
3029                         queue_watch_error(lreq);
3030                 }
3031         } else {
3032                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3033                      lreq->register_gen, req->r_ops[0].watch.gen);
3034         }
3035
3036         mutex_unlock(&lreq->lock);
3037         linger_put(lreq);
3038 }
3039
3040 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3041 {
3042         struct ceph_osd_client *osdc = lreq->osdc;
3043         struct ceph_osd_request *req = lreq->ping_req;
3044         struct ceph_osd_req_op *op = &req->r_ops[0];
3045
3046         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3047                 dout("%s PAUSERD\n", __func__);
3048                 return;
3049         }
3050
3051         lreq->ping_sent = jiffies;
3052         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3053              __func__, lreq, lreq->linger_id, lreq->ping_sent,
3054              lreq->register_gen);
3055
3056         if (req->r_osd)
3057                 cancel_linger_request(req);
3058
3059         request_reinit(req);
3060         target_copy(&req->r_t, &lreq->t);
3061
3062         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3063                 op->watch.cookie != lreq->linger_id ||
3064                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3065         op->watch.gen = lreq->register_gen;
3066         req->r_callback = linger_ping_cb;
3067         req->r_priv = linger_get(lreq);
3068         req->r_linger = true;
3069
3070         ceph_osdc_get_request(req);
3071         account_request(req);
3072         req->r_tid = atomic64_inc_return(&osdc->last_tid);
3073         link_request(lreq->osd, req);
3074         send_request(req);
3075 }
3076
3077 static void linger_submit(struct ceph_osd_linger_request *lreq)
3078 {
3079         struct ceph_osd_client *osdc = lreq->osdc;
3080         struct ceph_osd *osd;
3081
3082         down_write(&osdc->lock);
3083         linger_register(lreq);
3084         if (lreq->is_watch) {
3085                 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3086                 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3087         } else {
3088                 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3089         }
3090
3091         calc_target(osdc, &lreq->t, NULL, false);
3092         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3093         link_linger(osd, lreq);
3094
3095         send_linger(lreq);
3096         up_write(&osdc->lock);
3097 }
3098
3099 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3100 {
3101         struct ceph_osd_client *osdc = lreq->osdc;
3102         struct ceph_osd_linger_request *lookup_lreq;
3103
3104         verify_osdc_wrlocked(osdc);
3105
3106         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3107                                        lreq->linger_id);
3108         if (!lookup_lreq)
3109                 return;
3110
3111         WARN_ON(lookup_lreq != lreq);
3112         erase_linger_mc(&osdc->linger_map_checks, lreq);
3113         linger_put(lreq);
3114 }
3115
3116 /*
3117  * @lreq has to be both registered and linked.
3118  */
3119 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3120 {
3121         if (lreq->is_watch && lreq->ping_req->r_osd)
3122                 cancel_linger_request(lreq->ping_req);
3123         if (lreq->reg_req->r_osd)
3124                 cancel_linger_request(lreq->reg_req);
3125         cancel_linger_map_check(lreq);
3126         unlink_linger(lreq->osd, lreq);
3127         linger_unregister(lreq);
3128 }
3129
3130 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3131 {
3132         struct ceph_osd_client *osdc = lreq->osdc;
3133
3134         down_write(&osdc->lock);
3135         if (__linger_registered(lreq))
3136                 __linger_cancel(lreq);
3137         up_write(&osdc->lock);
3138 }
3139
3140 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3141
3142 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3143 {
3144         struct ceph_osd_client *osdc = lreq->osdc;
3145         struct ceph_osdmap *map = osdc->osdmap;
3146
3147         verify_osdc_wrlocked(osdc);
3148         WARN_ON(!map->epoch);
3149
3150         if (lreq->register_gen) {
3151                 lreq->map_dne_bound = map->epoch;
3152                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3153                      lreq, lreq->linger_id);
3154         } else {
3155                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3156                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3157                      map->epoch);
3158         }
3159
3160         if (lreq->map_dne_bound) {
3161                 if (map->epoch >= lreq->map_dne_bound) {
3162                         /* we had a new enough map */
3163                         pr_info("linger_id %llu pool does not exist\n",
3164                                 lreq->linger_id);
3165                         linger_reg_commit_complete(lreq, -ENOENT);
3166                         __linger_cancel(lreq);
3167                 }
3168         } else {
3169                 send_linger_map_check(lreq);
3170         }
3171 }
3172
3173 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3174 {
3175         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3176         struct ceph_osd_linger_request *lreq;
3177         u64 linger_id = greq->private_data;
3178
3179         WARN_ON(greq->result || !greq->u.newest);
3180
3181         down_write(&osdc->lock);
3182         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3183         if (!lreq) {
3184                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3185                 goto out_unlock;
3186         }
3187
3188         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3189              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3190              greq->u.newest);
3191         if (!lreq->map_dne_bound)
3192                 lreq->map_dne_bound = greq->u.newest;
3193         erase_linger_mc(&osdc->linger_map_checks, lreq);
3194         check_linger_pool_dne(lreq);
3195
3196         linger_put(lreq);
3197 out_unlock:
3198         up_write(&osdc->lock);
3199 }
3200
3201 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3202 {
3203         struct ceph_osd_client *osdc = lreq->osdc;
3204         struct ceph_osd_linger_request *lookup_lreq;
3205         int ret;
3206
3207         verify_osdc_wrlocked(osdc);
3208
3209         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3210                                        lreq->linger_id);
3211         if (lookup_lreq) {
3212                 WARN_ON(lookup_lreq != lreq);
3213                 return;
3214         }
3215
3216         linger_get(lreq);
3217         insert_linger_mc(&osdc->linger_map_checks, lreq);
3218         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3219                                           linger_map_check_cb, lreq->linger_id);
3220         WARN_ON(ret);
3221 }
3222
3223 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3224 {
3225         int ret;
3226
3227         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3228         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3229         return ret ?: lreq->reg_commit_error;
3230 }
3231
3232 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3233 {
3234         int ret;
3235
3236         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3237         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3238         return ret ?: lreq->notify_finish_error;
3239 }
3240
3241 /*
3242  * Timeout callback, called every N seconds.  When 1 or more OSD
3243  * requests has been active for more than N seconds, we send a keepalive
3244  * (tag + timestamp) to its OSD to ensure any communications channel
3245  * reset is detected.
3246  */
3247 static void handle_timeout(struct work_struct *work)
3248 {
3249         struct ceph_osd_client *osdc =
3250                 container_of(work, struct ceph_osd_client, timeout_work.work);
3251         struct ceph_options *opts = osdc->client->options;
3252         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3253         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3254         LIST_HEAD(slow_osds);
3255         struct rb_node *n, *p;
3256
3257         dout("%s osdc %p\n", __func__, osdc);
3258         down_write(&osdc->lock);
3259
3260         /*
3261          * ping osds that are a bit slow.  this ensures that if there
3262          * is a break in the TCP connection we will notice, and reopen
3263          * a connection with that osd (from the fault callback).
3264          */
3265         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3266                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3267                 bool found = false;
3268
3269                 for (p = rb_first(&osd->o_requests); p; ) {
3270                         struct ceph_osd_request *req =
3271                             rb_entry(p, struct ceph_osd_request, r_node);
3272
3273                         p = rb_next(p); /* abort_request() */
3274
3275                         if (time_before(req->r_stamp, cutoff)) {
3276                                 dout(" req %p tid %llu on osd%d is laggy\n",
3277                                      req, req->r_tid, osd->o_osd);
3278                                 found = true;
3279                         }
3280                         if (opts->osd_request_timeout &&
3281                             time_before(req->r_start_stamp, expiry_cutoff)) {
3282                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3283                                        req->r_tid, osd->o_osd);
3284                                 abort_request(req, -ETIMEDOUT);
3285                         }
3286                 }
3287                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3288                         struct ceph_osd_linger_request *lreq =
3289                             rb_entry(p, struct ceph_osd_linger_request, node);
3290
3291                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3292                              lreq, lreq->linger_id, osd->o_osd);
3293                         found = true;
3294
3295                         mutex_lock(&lreq->lock);
3296                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3297                                 send_linger_ping(lreq);
3298                         mutex_unlock(&lreq->lock);
3299                 }
3300
3301                 if (found)
3302                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3303         }
3304
3305         if (opts->osd_request_timeout) {
3306                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3307                         struct ceph_osd_request *req =
3308                             rb_entry(p, struct ceph_osd_request, r_node);
3309
3310                         p = rb_next(p); /* abort_request() */
3311
3312                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3313                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3314                                        req->r_tid, osdc->homeless_osd.o_osd);
3315                                 abort_request(req, -ETIMEDOUT);
3316                         }
3317                 }
3318         }
3319
3320         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3321                 maybe_request_map(osdc);
3322
3323         while (!list_empty(&slow_osds)) {
3324                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3325                                                         struct ceph_osd,
3326                                                         o_keepalive_item);
3327                 list_del_init(&osd->o_keepalive_item);
3328                 ceph_con_keepalive(&osd->o_con);
3329         }
3330
3331         up_write(&osdc->lock);
3332         schedule_delayed_work(&osdc->timeout_work,
3333                               osdc->client->options->osd_keepalive_timeout);
3334 }
3335
3336 static void handle_osds_timeout(struct work_struct *work)
3337 {
3338         struct ceph_osd_client *osdc =
3339                 container_of(work, struct ceph_osd_client,
3340                              osds_timeout_work.work);
3341         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3342         struct ceph_osd *osd, *nosd;
3343
3344         dout("%s osdc %p\n", __func__, osdc);
3345         down_write(&osdc->lock);
3346         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3347                 if (time_before(jiffies, osd->lru_ttl))
3348                         break;
3349
3350                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3351                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3352                 close_osd(osd);
3353         }
3354
3355         up_write(&osdc->lock);
3356         schedule_delayed_work(&osdc->osds_timeout_work,
3357                               round_jiffies_relative(delay));
3358 }
3359
3360 static int ceph_oloc_decode(void **p, void *end,
3361                             struct ceph_object_locator *oloc)
3362 {
3363         u8 struct_v, struct_cv;
3364         u32 len;
3365         void *struct_end;
3366         int ret = 0;
3367
3368         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3369         struct_v = ceph_decode_8(p);
3370         struct_cv = ceph_decode_8(p);
3371         if (struct_v < 3) {
3372                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3373                         struct_v, struct_cv);
3374                 goto e_inval;
3375         }
3376         if (struct_cv > 6) {
3377                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3378                         struct_v, struct_cv);
3379                 goto e_inval;
3380         }
3381         len = ceph_decode_32(p);
3382         ceph_decode_need(p, end, len, e_inval);
3383         struct_end = *p + len;
3384
3385         oloc->pool = ceph_decode_64(p);
3386         *p += 4; /* skip preferred */
3387
3388         len = ceph_decode_32(p);
3389         if (len > 0) {
3390                 pr_warn("ceph_object_locator::key is set\n");
3391                 goto e_inval;
3392         }
3393
3394         if (struct_v >= 5) {
3395                 bool changed = false;
3396
3397                 len = ceph_decode_32(p);
3398                 if (len > 0) {
3399                         ceph_decode_need(p, end, len, e_inval);
3400                         if (!oloc->pool_ns ||
3401                             ceph_compare_string(oloc->pool_ns, *p, len))
3402                                 changed = true;
3403                         *p += len;
3404                 } else {
3405                         if (oloc->pool_ns)
3406                                 changed = true;
3407                 }
3408                 if (changed) {
3409                         /* redirect changes namespace */
3410                         pr_warn("ceph_object_locator::nspace is changed\n");
3411                         goto e_inval;
3412                 }
3413         }
3414
3415         if (struct_v >= 6) {
3416                 s64 hash = ceph_decode_64(p);
3417                 if (hash != -1) {
3418                         pr_warn("ceph_object_locator::hash is set\n");
3419                         goto e_inval;
3420                 }
3421         }
3422
3423         /* skip the rest */
3424         *p = struct_end;
3425 out:
3426         return ret;
3427
3428 e_inval:
3429         ret = -EINVAL;
3430         goto out;
3431 }
3432
3433 static int ceph_redirect_decode(void **p, void *end,
3434                                 struct ceph_request_redirect *redir)
3435 {
3436         u8 struct_v, struct_cv;
3437         u32 len;
3438         void *struct_end;
3439         int ret;
3440
3441         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3442         struct_v = ceph_decode_8(p);
3443         struct_cv = ceph_decode_8(p);
3444         if (struct_cv > 1) {
3445                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3446                         struct_v, struct_cv);
3447                 goto e_inval;
3448         }
3449         len = ceph_decode_32(p);
3450         ceph_decode_need(p, end, len, e_inval);
3451         struct_end = *p + len;
3452
3453         ret = ceph_oloc_decode(p, end, &redir->oloc);
3454         if (ret)
3455                 goto out;
3456
3457         len = ceph_decode_32(p);
3458         if (len > 0) {
3459                 pr_warn("ceph_request_redirect::object_name is set\n");
3460                 goto e_inval;
3461         }
3462
3463         len = ceph_decode_32(p);
3464         *p += len; /* skip osd_instructions */
3465
3466         /* skip the rest */
3467         *p = struct_end;
3468 out:
3469         return ret;
3470
3471 e_inval:
3472         ret = -EINVAL;
3473         goto out;
3474 }
3475
3476 struct MOSDOpReply {
3477         struct ceph_pg pgid;
3478         u64 flags;
3479         int result;
3480         u32 epoch;
3481         int num_ops;
3482         u32 outdata_len[CEPH_OSD_MAX_OPS];
3483         s32 rval[CEPH_OSD_MAX_OPS];
3484         int retry_attempt;
3485         struct ceph_eversion replay_version;
3486         u64 user_version;
3487         struct ceph_request_redirect redirect;
3488 };
3489
3490 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3491 {
3492         void *p = msg->front.iov_base;
3493         void *const end = p + msg->front.iov_len;
3494         u16 version = le16_to_cpu(msg->hdr.version);
3495         struct ceph_eversion bad_replay_version;
3496         u8 decode_redir;
3497         u32 len;
3498         int ret;
3499         int i;
3500
3501         ceph_decode_32_safe(&p, end, len, e_inval);
3502         ceph_decode_need(&p, end, len, e_inval);
3503         p += len; /* skip oid */
3504
3505         ret = ceph_decode_pgid(&p, end, &m->pgid);
3506         if (ret)
3507                 return ret;
3508
3509         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3510         ceph_decode_32_safe(&p, end, m->result, e_inval);
3511         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3512         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3513         p += sizeof(bad_replay_version);
3514         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3515
3516         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3517         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3518                 goto e_inval;
3519
3520         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3521                          e_inval);
3522         for (i = 0; i < m->num_ops; i++) {
3523                 struct ceph_osd_op *op = p;
3524
3525                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3526                 p += sizeof(*op);
3527         }
3528
3529         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3530         for (i = 0; i < m->num_ops; i++)
3531                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3532
3533         if (version >= 5) {
3534                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3535                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3536                 p += sizeof(m->replay_version);
3537                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3538         } else {
3539                 m->replay_version = bad_replay_version; /* struct */
3540                 m->user_version = le64_to_cpu(m->replay_version.version);
3541         }
3542
3543         if (version >= 6) {
3544                 if (version >= 7)
3545                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3546                 else
3547                         decode_redir = 1;
3548         } else {
3549                 decode_redir = 0;
3550         }
3551
3552         if (decode_redir) {
3553                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3554                 if (ret)
3555                         return ret;
3556         } else {
3557                 ceph_oloc_init(&m->redirect.oloc);
3558         }
3559
3560         return 0;
3561
3562 e_inval:
3563         return -EINVAL;
3564 }
3565
3566 /*
3567  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3568  * specified.
3569  */
3570 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3571 {
3572         struct ceph_osd_client *osdc = osd->o_osdc;
3573         struct ceph_osd_request *req;
3574         struct MOSDOpReply m;
3575         u64 tid = le64_to_cpu(msg->hdr.tid);
3576         u32 data_len = 0;
3577         int ret;
3578         int i;
3579
3580         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3581
3582         down_read(&osdc->lock);
3583         if (!osd_registered(osd)) {
3584                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3585                 goto out_unlock_osdc;
3586         }
3587         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3588
3589         mutex_lock(&osd->lock);
3590         req = lookup_request(&osd->o_requests, tid);
3591         if (!req) {
3592                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3593                 goto out_unlock_session;
3594         }
3595
3596         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3597         ret = decode_MOSDOpReply(msg, &m);
3598         m.redirect.oloc.pool_ns = NULL;
3599         if (ret) {
3600                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3601                        req->r_tid, ret);
3602                 ceph_msg_dump(msg);
3603                 goto fail_request;
3604         }
3605         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3606              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3607              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3608              le64_to_cpu(m.replay_version.version), m.user_version);
3609
3610         if (m.retry_attempt >= 0) {
3611                 if (m.retry_attempt != req->r_attempts - 1) {
3612                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3613                              req, req->r_tid, m.retry_attempt,
3614                              req->r_attempts - 1);
3615                         goto out_unlock_session;
3616                 }
3617         } else {
3618                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3619         }
3620
3621         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3622                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3623                      m.redirect.oloc.pool);
3624                 unlink_request(osd, req);
3625                 mutex_unlock(&osd->lock);
3626
3627                 /*
3628                  * Not ceph_oloc_copy() - changing pool_ns is not
3629                  * supported.
3630                  */
3631                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3632                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED;
3633                 req->r_tid = 0;
3634                 __submit_request(req, false);
3635                 goto out_unlock_osdc;
3636         }
3637
3638         if (m.num_ops != req->r_num_ops) {
3639                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3640                        req->r_num_ops, req->r_tid);
3641                 goto fail_request;
3642         }
3643         for (i = 0; i < req->r_num_ops; i++) {
3644                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3645                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3646                 req->r_ops[i].rval = m.rval[i];
3647                 req->r_ops[i].outdata_len = m.outdata_len[i];
3648                 data_len += m.outdata_len[i];
3649         }
3650         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3651                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3652                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3653                 goto fail_request;
3654         }
3655         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3656              req, req->r_tid, m.result, data_len);
3657
3658         /*
3659          * Since we only ever request ONDISK, we should only ever get
3660          * one (type of) reply back.
3661          */
3662         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3663         req->r_result = m.result ?: data_len;
3664         finish_request(req);
3665         mutex_unlock(&osd->lock);
3666         up_read(&osdc->lock);
3667
3668         __complete_request(req);
3669         return;
3670
3671 fail_request:
3672         complete_request(req, -EIO);
3673 out_unlock_session:
3674         mutex_unlock(&osd->lock);
3675 out_unlock_osdc:
3676         up_read(&osdc->lock);
3677 }
3678
3679 static void set_pool_was_full(struct ceph_osd_client *osdc)
3680 {
3681         struct rb_node *n;
3682
3683         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3684                 struct ceph_pg_pool_info *pi =
3685                     rb_entry(n, struct ceph_pg_pool_info, node);
3686
3687                 pi->was_full = __pool_full(pi);
3688         }
3689 }
3690
3691 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3692 {
3693         struct ceph_pg_pool_info *pi;
3694
3695         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3696         if (!pi)
3697                 return false;
3698
3699         return pi->was_full && !__pool_full(pi);
3700 }
3701
3702 static enum calc_target_result
3703 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3704 {
3705         struct ceph_osd_client *osdc = lreq->osdc;
3706         enum calc_target_result ct_res;
3707
3708         ct_res = calc_target(osdc, &lreq->t, NULL, true);
3709         if (ct_res == CALC_TARGET_NEED_RESEND) {
3710                 struct ceph_osd *osd;
3711
3712                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3713                 if (osd != lreq->osd) {
3714                         unlink_linger(lreq->osd, lreq);
3715                         link_linger(osd, lreq);
3716                 }
3717         }
3718
3719         return ct_res;
3720 }
3721
3722 /*
3723  * Requeue requests whose mapping to an OSD has changed.
3724  */
3725 static void scan_requests(struct ceph_osd *osd,
3726                           bool force_resend,
3727                           bool cleared_full,
3728                           bool check_pool_cleared_full,
3729                           struct rb_root *need_resend,
3730                           struct list_head *need_resend_linger)
3731 {
3732         struct ceph_osd_client *osdc = osd->o_osdc;
3733         struct rb_node *n;
3734         bool force_resend_writes;
3735
3736         for (n = rb_first(&osd->o_linger_requests); n; ) {
3737                 struct ceph_osd_linger_request *lreq =
3738                     rb_entry(n, struct ceph_osd_linger_request, node);
3739                 enum calc_target_result ct_res;
3740
3741                 n = rb_next(n); /* recalc_linger_target() */
3742
3743                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3744                      lreq->linger_id);
3745                 ct_res = recalc_linger_target(lreq);
3746                 switch (ct_res) {
3747                 case CALC_TARGET_NO_ACTION:
3748                         force_resend_writes = cleared_full ||
3749                             (check_pool_cleared_full &&
3750                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3751                         if (!force_resend && !force_resend_writes)
3752                                 break;
3753
3754                         /* fall through */
3755                 case CALC_TARGET_NEED_RESEND:
3756                         cancel_linger_map_check(lreq);
3757                         /*
3758                          * scan_requests() for the previous epoch(s)
3759                          * may have already added it to the list, since
3760                          * it's not unlinked here.
3761                          */
3762                         if (list_empty(&lreq->scan_item))
3763                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3764                         break;
3765                 case CALC_TARGET_POOL_DNE:
3766                         list_del_init(&lreq->scan_item);
3767                         check_linger_pool_dne(lreq);
3768                         break;
3769                 }
3770         }
3771
3772         for (n = rb_first(&osd->o_requests); n; ) {
3773                 struct ceph_osd_request *req =
3774                     rb_entry(n, struct ceph_osd_request, r_node);
3775                 enum calc_target_result ct_res;
3776
3777                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3778
3779                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3780                 ct_res = calc_target(osdc, &req->r_t, &req->r_osd->o_con,
3781                                      false);
3782                 switch (ct_res) {
3783                 case CALC_TARGET_NO_ACTION:
3784                         force_resend_writes = cleared_full ||
3785                             (check_pool_cleared_full &&
3786                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3787                         if (!force_resend &&
3788                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3789                              !force_resend_writes))
3790                                 break;
3791
3792                         /* fall through */
3793                 case CALC_TARGET_NEED_RESEND:
3794                         cancel_map_check(req);
3795                         unlink_request(osd, req);
3796                         insert_request(need_resend, req);
3797                         break;
3798                 case CALC_TARGET_POOL_DNE:
3799                         check_pool_dne(req);
3800                         break;
3801                 }
3802         }
3803 }
3804
3805 static int handle_one_map(struct ceph_osd_client *osdc,
3806                           void *p, void *end, bool incremental,
3807                           struct rb_root *need_resend,
3808                           struct list_head *need_resend_linger)
3809 {
3810         struct ceph_osdmap *newmap;
3811         struct rb_node *n;
3812         bool skipped_map = false;
3813         bool was_full;
3814
3815         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3816         set_pool_was_full(osdc);
3817
3818         if (incremental)
3819                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3820         else
3821                 newmap = ceph_osdmap_decode(&p, end);
3822         if (IS_ERR(newmap))
3823                 return PTR_ERR(newmap);
3824
3825         if (newmap != osdc->osdmap) {
3826                 /*
3827                  * Preserve ->was_full before destroying the old map.
3828                  * For pools that weren't in the old map, ->was_full
3829                  * should be false.
3830                  */
3831                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3832                         struct ceph_pg_pool_info *pi =
3833                             rb_entry(n, struct ceph_pg_pool_info, node);
3834                         struct ceph_pg_pool_info *old_pi;
3835
3836                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3837                         if (old_pi)
3838                                 pi->was_full = old_pi->was_full;
3839                         else
3840                                 WARN_ON(pi->was_full);
3841                 }
3842
3843                 if (osdc->osdmap->epoch &&
3844                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3845                         WARN_ON(incremental);
3846                         skipped_map = true;
3847                 }
3848
3849                 ceph_osdmap_destroy(osdc->osdmap);
3850                 osdc->osdmap = newmap;
3851         }
3852
3853         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3854         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3855                       need_resend, need_resend_linger);
3856
3857         for (n = rb_first(&osdc->osds); n; ) {
3858                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3859
3860                 n = rb_next(n); /* close_osd() */
3861
3862                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3863                               need_resend_linger);
3864                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3865                     memcmp(&osd->o_con.peer_addr,
3866                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3867                            sizeof(struct ceph_entity_addr)))
3868                         close_osd(osd);
3869         }
3870
3871         return 0;
3872 }
3873
3874 static void kick_requests(struct ceph_osd_client *osdc,
3875                           struct rb_root *need_resend,
3876                           struct list_head *need_resend_linger)
3877 {
3878         struct ceph_osd_linger_request *lreq, *nlreq;
3879         enum calc_target_result ct_res;
3880         struct rb_node *n;
3881
3882         /* make sure need_resend targets reflect latest map */
3883         for (n = rb_first(need_resend); n; ) {
3884                 struct ceph_osd_request *req =
3885                     rb_entry(n, struct ceph_osd_request, r_node);
3886
3887                 n = rb_next(n);
3888
3889                 if (req->r_t.epoch < osdc->osdmap->epoch) {
3890                         ct_res = calc_target(osdc, &req->r_t, NULL, false);
3891                         if (ct_res == CALC_TARGET_POOL_DNE) {
3892                                 erase_request(need_resend, req);
3893                                 check_pool_dne(req);
3894                         }
3895                 }
3896         }
3897
3898         for (n = rb_first(need_resend); n; ) {
3899                 struct ceph_osd_request *req =
3900                     rb_entry(n, struct ceph_osd_request, r_node);
3901                 struct ceph_osd *osd;
3902
3903                 n = rb_next(n);
3904                 erase_request(need_resend, req); /* before link_request() */
3905
3906                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
3907                 link_request(osd, req);
3908                 if (!req->r_linger) {
3909                         if (!osd_homeless(osd) && !req->r_t.paused)
3910                                 send_request(req);
3911                 } else {
3912                         cancel_linger_request(req);
3913                 }
3914         }
3915
3916         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
3917                 if (!osd_homeless(lreq->osd))
3918                         send_linger(lreq);
3919
3920                 list_del_init(&lreq->scan_item);
3921         }
3922 }
3923
3924 /*
3925  * Process updated osd map.
3926  *
3927  * The message contains any number of incremental and full maps, normally
3928  * indicating some sort of topology change in the cluster.  Kick requests
3929  * off to different OSDs as needed.
3930  */
3931 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
3932 {
3933         void *p = msg->front.iov_base;
3934         void *const end = p + msg->front.iov_len;
3935         u32 nr_maps, maplen;
3936         u32 epoch;
3937         struct ceph_fsid fsid;
3938         struct rb_root need_resend = RB_ROOT;
3939         LIST_HEAD(need_resend_linger);
3940         bool handled_incremental = false;
3941         bool was_pauserd, was_pausewr;
3942         bool pauserd, pausewr;
3943         int err;
3944
3945         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
3946         down_write(&osdc->lock);
3947
3948         /* verify fsid */
3949         ceph_decode_need(&p, end, sizeof(fsid), bad);
3950         ceph_decode_copy(&p, &fsid, sizeof(fsid));
3951         if (ceph_check_fsid(osdc->client, &fsid) < 0)
3952                 goto bad;
3953
3954         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
3955         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
3956                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
3957                       have_pool_full(osdc);
3958
3959         /* incremental maps */
3960         ceph_decode_32_safe(&p, end, nr_maps, bad);
3961         dout(" %d inc maps\n", nr_maps);
3962         while (nr_maps > 0) {
3963                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3964                 epoch = ceph_decode_32(&p);
3965                 maplen = ceph_decode_32(&p);
3966                 ceph_decode_need(&p, end, maplen, bad);
3967                 if (osdc->osdmap->epoch &&
3968                     osdc->osdmap->epoch + 1 == epoch) {
3969                         dout("applying incremental map %u len %d\n",
3970                              epoch, maplen);
3971                         err = handle_one_map(osdc, p, p + maplen, true,
3972                                              &need_resend, &need_resend_linger);
3973                         if (err)
3974                                 goto bad;
3975                         handled_incremental = true;
3976                 } else {
3977                         dout("ignoring incremental map %u len %d\n",
3978                              epoch, maplen);
3979                 }
3980                 p += maplen;
3981                 nr_maps--;
3982         }
3983         if (handled_incremental)
3984                 goto done;
3985
3986         /* full maps */
3987         ceph_decode_32_safe(&p, end, nr_maps, bad);
3988         dout(" %d full maps\n", nr_maps);
3989         while (nr_maps) {
3990                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3991                 epoch = ceph_decode_32(&p);
3992                 maplen = ceph_decode_32(&p);
3993                 ceph_decode_need(&p, end, maplen, bad);
3994                 if (nr_maps > 1) {
3995                         dout("skipping non-latest full map %u len %d\n",
3996                              epoch, maplen);
3997                 } else if (osdc->osdmap->epoch >= epoch) {
3998                         dout("skipping full map %u len %d, "
3999                              "older than our %u\n", epoch, maplen,
4000                              osdc->osdmap->epoch);
4001                 } else {
4002                         dout("taking full map %u len %d\n", epoch, maplen);
4003                         err = handle_one_map(osdc, p, p + maplen, false,
4004                                              &need_resend, &need_resend_linger);
4005                         if (err)
4006                                 goto bad;
4007                 }
4008                 p += maplen;
4009                 nr_maps--;
4010         }
4011
4012 done:
4013         /*
4014          * subscribe to subsequent osdmap updates if full to ensure
4015          * we find out when we are no longer full and stop returning
4016          * ENOSPC.
4017          */
4018         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4019         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4020                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4021                   have_pool_full(osdc);
4022         if (was_pauserd || was_pausewr || pauserd || pausewr ||
4023             osdc->osdmap->epoch < osdc->epoch_barrier)
4024                 maybe_request_map(osdc);
4025
4026         kick_requests(osdc, &need_resend, &need_resend_linger);
4027
4028         ceph_osdc_abort_on_full(osdc);
4029         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4030                           osdc->osdmap->epoch);
4031         up_write(&osdc->lock);
4032         wake_up_all(&osdc->client->auth_wq);
4033         return;
4034
4035 bad:
4036         pr_err("osdc handle_map corrupt msg\n");
4037         ceph_msg_dump(msg);
4038         up_write(&osdc->lock);
4039 }
4040
4041 /*
4042  * Resubmit requests pending on the given osd.
4043  */
4044 static void kick_osd_requests(struct ceph_osd *osd)
4045 {
4046         struct rb_node *n;
4047
4048         clear_backoffs(osd);
4049
4050         for (n = rb_first(&osd->o_requests); n; ) {
4051                 struct ceph_osd_request *req =
4052                     rb_entry(n, struct ceph_osd_request, r_node);
4053
4054                 n = rb_next(n); /* cancel_linger_request() */
4055
4056                 if (!req->r_linger) {
4057                         if (!req->r_t.paused)
4058                                 send_request(req);
4059                 } else {
4060                         cancel_linger_request(req);
4061                 }
4062         }
4063         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4064                 struct ceph_osd_linger_request *lreq =
4065                     rb_entry(n, struct ceph_osd_linger_request, node);
4066
4067                 send_linger(lreq);
4068         }
4069 }
4070
4071 /*
4072  * If the osd connection drops, we need to resubmit all requests.
4073  */
4074 static void osd_fault(struct ceph_connection *con)
4075 {
4076         struct ceph_osd *osd = con->private;
4077         struct ceph_osd_client *osdc = osd->o_osdc;
4078
4079         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4080
4081         down_write(&osdc->lock);
4082         if (!osd_registered(osd)) {
4083                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4084                 goto out_unlock;
4085         }
4086
4087         if (!reopen_osd(osd))
4088                 kick_osd_requests(osd);
4089         maybe_request_map(osdc);
4090
4091 out_unlock:
4092         up_write(&osdc->lock);
4093 }
4094
4095 struct MOSDBackoff {
4096         struct ceph_spg spgid;
4097         u32 map_epoch;
4098         u8 op;
4099         u64 id;
4100         struct ceph_hobject_id *begin;
4101         struct ceph_hobject_id *end;
4102 };
4103
4104 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4105 {
4106         void *p = msg->front.iov_base;
4107         void *const end = p + msg->front.iov_len;
4108         u8 struct_v;
4109         u32 struct_len;
4110         int ret;
4111
4112         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4113         if (ret)
4114                 return ret;
4115
4116         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4117         if (ret)
4118                 return ret;
4119
4120         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4121         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4122         ceph_decode_8_safe(&p, end, m->op, e_inval);
4123         ceph_decode_64_safe(&p, end, m->id, e_inval);
4124
4125         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4126         if (!m->begin)
4127                 return -ENOMEM;
4128
4129         ret = decode_hoid(&p, end, m->begin);
4130         if (ret) {
4131                 free_hoid(m->begin);
4132                 return ret;
4133         }
4134
4135         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4136         if (!m->end) {
4137                 free_hoid(m->begin);
4138                 return -ENOMEM;
4139         }
4140
4141         ret = decode_hoid(&p, end, m->end);
4142         if (ret) {
4143                 free_hoid(m->begin);
4144                 free_hoid(m->end);
4145                 return ret;
4146         }
4147
4148         return 0;
4149
4150 e_inval:
4151         return -EINVAL;
4152 }
4153
4154 static struct ceph_msg *create_backoff_message(
4155                                 const struct ceph_osd_backoff *backoff,
4156                                 u32 map_epoch)
4157 {
4158         struct ceph_msg *msg;
4159         void *p, *end;
4160         int msg_size;
4161
4162         msg_size = CEPH_ENCODING_START_BLK_LEN +
4163                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4164         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4165         msg_size += CEPH_ENCODING_START_BLK_LEN +
4166                         hoid_encoding_size(backoff->begin);
4167         msg_size += CEPH_ENCODING_START_BLK_LEN +
4168                         hoid_encoding_size(backoff->end);
4169
4170         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4171         if (!msg)
4172                 return NULL;
4173
4174         p = msg->front.iov_base;
4175         end = p + msg->front_alloc_len;
4176
4177         encode_spgid(&p, &backoff->spgid);
4178         ceph_encode_32(&p, map_epoch);
4179         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4180         ceph_encode_64(&p, backoff->id);
4181         encode_hoid(&p, end, backoff->begin);
4182         encode_hoid(&p, end, backoff->end);
4183         BUG_ON(p != end);
4184
4185         msg->front.iov_len = p - msg->front.iov_base;
4186         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4187         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4188
4189         return msg;
4190 }
4191
4192 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4193 {
4194         struct ceph_spg_mapping *spg;
4195         struct ceph_osd_backoff *backoff;
4196         struct ceph_msg *msg;
4197
4198         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4199              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4200
4201         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4202         if (!spg) {
4203                 spg = alloc_spg_mapping();
4204                 if (!spg) {
4205                         pr_err("%s failed to allocate spg\n", __func__);
4206                         return;
4207                 }
4208                 spg->spgid = m->spgid; /* struct */
4209                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4210         }
4211
4212         backoff = alloc_backoff();
4213         if (!backoff) {
4214                 pr_err("%s failed to allocate backoff\n", __func__);
4215                 return;
4216         }
4217         backoff->spgid = m->spgid; /* struct */
4218         backoff->id = m->id;
4219         backoff->begin = m->begin;
4220         m->begin = NULL; /* backoff now owns this */
4221         backoff->end = m->end;
4222         m->end = NULL;   /* ditto */
4223
4224         insert_backoff(&spg->backoffs, backoff);
4225         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4226
4227         /*
4228          * Ack with original backoff's epoch so that the OSD can
4229          * discard this if there was a PG split.
4230          */
4231         msg = create_backoff_message(backoff, m->map_epoch);
4232         if (!msg) {
4233                 pr_err("%s failed to allocate msg\n", __func__);
4234                 return;
4235         }
4236         ceph_con_send(&osd->o_con, msg);
4237 }
4238
4239 static bool target_contained_by(const struct ceph_osd_request_target *t,
4240                                 const struct ceph_hobject_id *begin,
4241                                 const struct ceph_hobject_id *end)
4242 {
4243         struct ceph_hobject_id hoid;
4244         int cmp;
4245
4246         hoid_fill_from_target(&hoid, t);
4247         cmp = hoid_compare(&hoid, begin);
4248         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4249 }
4250
4251 static void handle_backoff_unblock(struct ceph_osd *osd,
4252                                    const struct MOSDBackoff *m)
4253 {
4254         struct ceph_spg_mapping *spg;
4255         struct ceph_osd_backoff *backoff;
4256         struct rb_node *n;
4257
4258         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4259              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4260
4261         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4262         if (!backoff) {
4263                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4264                        __func__, osd->o_osd, m->spgid.pgid.pool,
4265                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4266                 return;
4267         }
4268
4269         if (hoid_compare(backoff->begin, m->begin) &&
4270             hoid_compare(backoff->end, m->end)) {
4271                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4272                        __func__, osd->o_osd, m->spgid.pgid.pool,
4273                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4274                 /* unblock it anyway... */
4275         }
4276
4277         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4278         BUG_ON(!spg);
4279
4280         erase_backoff(&spg->backoffs, backoff);
4281         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4282         free_backoff(backoff);
4283
4284         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4285                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4286                 free_spg_mapping(spg);
4287         }
4288
4289         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4290                 struct ceph_osd_request *req =
4291                     rb_entry(n, struct ceph_osd_request, r_node);
4292
4293                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4294                         /*
4295                          * Match against @m, not @backoff -- the PG may
4296                          * have split on the OSD.
4297                          */
4298                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4299                                 /*
4300                                  * If no other installed backoff applies,
4301                                  * resend.
4302                                  */
4303                                 send_request(req);
4304                         }
4305                 }
4306         }
4307 }
4308
4309 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4310 {
4311         struct ceph_osd_client *osdc = osd->o_osdc;
4312         struct MOSDBackoff m;
4313         int ret;
4314
4315         down_read(&osdc->lock);
4316         if (!osd_registered(osd)) {
4317                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4318                 up_read(&osdc->lock);
4319                 return;
4320         }
4321         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4322
4323         mutex_lock(&osd->lock);
4324         ret = decode_MOSDBackoff(msg, &m);
4325         if (ret) {
4326                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4327                 ceph_msg_dump(msg);
4328                 goto out_unlock;
4329         }
4330
4331         switch (m.op) {
4332         case CEPH_OSD_BACKOFF_OP_BLOCK:
4333                 handle_backoff_block(osd, &m);
4334                 break;
4335         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4336                 handle_backoff_unblock(osd, &m);
4337                 break;
4338         default:
4339                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4340         }
4341
4342         free_hoid(m.begin);
4343         free_hoid(m.end);
4344
4345 out_unlock:
4346         mutex_unlock(&osd->lock);
4347         up_read(&osdc->lock);
4348 }
4349
4350 /*
4351  * Process osd watch notifications
4352  */
4353 static void handle_watch_notify(struct ceph_osd_client *osdc,
4354                                 struct ceph_msg *msg)
4355 {
4356         void *p = msg->front.iov_base;
4357         void *const end = p + msg->front.iov_len;
4358         struct ceph_osd_linger_request *lreq;
4359         struct linger_work *lwork;
4360         u8 proto_ver, opcode;
4361         u64 cookie, notify_id;
4362         u64 notifier_id = 0;
4363         s32 return_code = 0;
4364         void *payload = NULL;
4365         u32 payload_len = 0;
4366
4367         ceph_decode_8_safe(&p, end, proto_ver, bad);
4368         ceph_decode_8_safe(&p, end, opcode, bad);
4369         ceph_decode_64_safe(&p, end, cookie, bad);
4370         p += 8; /* skip ver */
4371         ceph_decode_64_safe(&p, end, notify_id, bad);
4372
4373         if (proto_ver >= 1) {
4374                 ceph_decode_32_safe(&p, end, payload_len, bad);
4375                 ceph_decode_need(&p, end, payload_len, bad);
4376                 payload = p;
4377                 p += payload_len;
4378         }
4379
4380         if (le16_to_cpu(msg->hdr.version) >= 2)
4381                 ceph_decode_32_safe(&p, end, return_code, bad);
4382
4383         if (le16_to_cpu(msg->hdr.version) >= 3)
4384                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4385
4386         down_read(&osdc->lock);
4387         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4388         if (!lreq) {
4389                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4390                      cookie);
4391                 goto out_unlock_osdc;
4392         }
4393
4394         mutex_lock(&lreq->lock);
4395         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4396              opcode, cookie, lreq, lreq->is_watch);
4397         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4398                 if (!lreq->last_error) {
4399                         lreq->last_error = -ENOTCONN;
4400                         queue_watch_error(lreq);
4401                 }
4402         } else if (!lreq->is_watch) {
4403                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4404                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4405                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4406                              lreq->notify_id, notify_id);
4407                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4408                         struct ceph_msg_data *data =
4409                             msg->num_data_items ? &msg->data[0] : NULL;
4410
4411                         if (data) {
4412                                 if (lreq->preply_pages) {
4413                                         WARN_ON(data->type !=
4414                                                         CEPH_MSG_DATA_PAGES);
4415                                         *lreq->preply_pages = data->pages;
4416                                         *lreq->preply_len = data->length;
4417                                 } else {
4418                                         ceph_release_page_vector(data->pages,
4419                                                calc_pages_for(0, data->length));
4420                                 }
4421                         }
4422                         lreq->notify_finish_error = return_code;
4423                         complete_all(&lreq->notify_finish_wait);
4424                 }
4425         } else {
4426                 /* CEPH_WATCH_EVENT_NOTIFY */
4427                 lwork = lwork_alloc(lreq, do_watch_notify);
4428                 if (!lwork) {
4429                         pr_err("failed to allocate notify-lwork\n");
4430                         goto out_unlock_lreq;
4431                 }
4432
4433                 lwork->notify.notify_id = notify_id;
4434                 lwork->notify.notifier_id = notifier_id;
4435                 lwork->notify.payload = payload;
4436                 lwork->notify.payload_len = payload_len;
4437                 lwork->notify.msg = ceph_msg_get(msg);
4438                 lwork_queue(lwork);
4439         }
4440
4441 out_unlock_lreq:
4442         mutex_unlock(&lreq->lock);
4443 out_unlock_osdc:
4444         up_read(&osdc->lock);
4445         return;
4446
4447 bad:
4448         pr_err("osdc handle_watch_notify corrupt msg\n");
4449 }
4450
4451 /*
4452  * Register request, send initial attempt.
4453  */
4454 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4455                             struct ceph_osd_request *req,
4456                             bool nofail)
4457 {
4458         down_read(&osdc->lock);
4459         submit_request(req, false);
4460         up_read(&osdc->lock);
4461
4462         return 0;
4463 }
4464 EXPORT_SYMBOL(ceph_osdc_start_request);
4465
4466 /*
4467  * Unregister a registered request.  The request is not completed:
4468  * ->r_result isn't set and __complete_request() isn't called.
4469  */
4470 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4471 {
4472         struct ceph_osd_client *osdc = req->r_osdc;
4473
4474         down_write(&osdc->lock);
4475         if (req->r_osd)
4476                 cancel_request(req);
4477         up_write(&osdc->lock);
4478 }
4479 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4480
4481 /*
4482  * @timeout: in jiffies, 0 means "wait forever"
4483  */
4484 static int wait_request_timeout(struct ceph_osd_request *req,
4485                                 unsigned long timeout)
4486 {
4487         long left;
4488
4489         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4490         left = wait_for_completion_killable_timeout(&req->r_completion,
4491                                                 ceph_timeout_jiffies(timeout));
4492         if (left <= 0) {
4493                 left = left ?: -ETIMEDOUT;
4494                 ceph_osdc_cancel_request(req);
4495         } else {
4496                 left = req->r_result; /* completed */
4497         }
4498
4499         return left;
4500 }
4501
4502 /*
4503  * wait for a request to complete
4504  */
4505 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4506                            struct ceph_osd_request *req)
4507 {
4508         return wait_request_timeout(req, 0);
4509 }
4510 EXPORT_SYMBOL(ceph_osdc_wait_request);
4511
4512 /*
4513  * sync - wait for all in-flight requests to flush.  avoid starvation.
4514  */
4515 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4516 {
4517         struct rb_node *n, *p;
4518         u64 last_tid = atomic64_read(&osdc->last_tid);
4519
4520 again:
4521         down_read(&osdc->lock);
4522         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4523                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4524
4525                 mutex_lock(&osd->lock);
4526                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4527                         struct ceph_osd_request *req =
4528                             rb_entry(p, struct ceph_osd_request, r_node);
4529
4530                         if (req->r_tid > last_tid)
4531                                 break;
4532
4533                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4534                                 continue;
4535
4536                         ceph_osdc_get_request(req);
4537                         mutex_unlock(&osd->lock);
4538                         up_read(&osdc->lock);
4539                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4540                              __func__, req, req->r_tid, last_tid);
4541                         wait_for_completion(&req->r_completion);
4542                         ceph_osdc_put_request(req);
4543                         goto again;
4544                 }
4545
4546                 mutex_unlock(&osd->lock);
4547         }
4548
4549         up_read(&osdc->lock);
4550         dout("%s done last_tid %llu\n", __func__, last_tid);
4551 }
4552 EXPORT_SYMBOL(ceph_osdc_sync);
4553
4554 static struct ceph_osd_request *
4555 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4556 {
4557         struct ceph_osd_request *req;
4558
4559         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4560         if (!req)
4561                 return NULL;
4562
4563         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4564         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4565         return req;
4566 }
4567
4568 static struct ceph_osd_request *
4569 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4570 {
4571         struct ceph_osd_request *req;
4572
4573         req = alloc_linger_request(lreq);
4574         if (!req)
4575                 return NULL;
4576
4577         /*
4578          * Pass 0 for cookie because we don't know it yet, it will be
4579          * filled in by linger_submit().
4580          */
4581         osd_req_op_watch_init(req, 0, 0, watch_opcode);
4582
4583         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4584                 ceph_osdc_put_request(req);
4585                 return NULL;
4586         }
4587
4588         return req;
4589 }
4590
4591 /*
4592  * Returns a handle, caller owns a ref.
4593  */
4594 struct ceph_osd_linger_request *
4595 ceph_osdc_watch(struct ceph_osd_client *osdc,
4596                 struct ceph_object_id *oid,
4597                 struct ceph_object_locator *oloc,
4598                 rados_watchcb2_t wcb,
4599                 rados_watcherrcb_t errcb,
4600                 void *data)
4601 {
4602         struct ceph_osd_linger_request *lreq;
4603         int ret;
4604
4605         lreq = linger_alloc(osdc);
4606         if (!lreq)
4607                 return ERR_PTR(-ENOMEM);
4608
4609         lreq->is_watch = true;
4610         lreq->wcb = wcb;
4611         lreq->errcb = errcb;
4612         lreq->data = data;
4613         lreq->watch_valid_thru = jiffies;
4614
4615         ceph_oid_copy(&lreq->t.base_oid, oid);
4616         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4617         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4618         ktime_get_real_ts64(&lreq->mtime);
4619
4620         lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4621         if (!lreq->reg_req) {
4622                 ret = -ENOMEM;
4623                 goto err_put_lreq;
4624         }
4625
4626         lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4627         if (!lreq->ping_req) {
4628                 ret = -ENOMEM;
4629                 goto err_put_lreq;
4630         }
4631
4632         linger_submit(lreq);
4633         ret = linger_reg_commit_wait(lreq);
4634         if (ret) {
4635                 linger_cancel(lreq);
4636                 goto err_put_lreq;
4637         }
4638
4639         return lreq;
4640
4641 err_put_lreq:
4642         linger_put(lreq);
4643         return ERR_PTR(ret);
4644 }
4645 EXPORT_SYMBOL(ceph_osdc_watch);
4646
4647 /*
4648  * Releases a ref.
4649  *
4650  * Times out after mount_timeout to preserve rbd unmap behaviour
4651  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4652  * with mount_timeout").
4653  */
4654 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4655                       struct ceph_osd_linger_request *lreq)
4656 {
4657         struct ceph_options *opts = osdc->client->options;
4658         struct ceph_osd_request *req;
4659         int ret;
4660
4661         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4662         if (!req)
4663                 return -ENOMEM;
4664
4665         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4666         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4667         req->r_flags = CEPH_OSD_FLAG_WRITE;
4668         ktime_get_real_ts64(&req->r_mtime);
4669         osd_req_op_watch_init(req, 0, lreq->linger_id,
4670                               CEPH_OSD_WATCH_OP_UNWATCH);
4671
4672         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4673         if (ret)
4674                 goto out_put_req;
4675
4676         ceph_osdc_start_request(osdc, req, false);
4677         linger_cancel(lreq);
4678         linger_put(lreq);
4679         ret = wait_request_timeout(req, opts->mount_timeout);
4680
4681 out_put_req:
4682         ceph_osdc_put_request(req);
4683         return ret;
4684 }
4685 EXPORT_SYMBOL(ceph_osdc_unwatch);
4686
4687 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4688                                       u64 notify_id, u64 cookie, void *payload,
4689                                       u32 payload_len)
4690 {
4691         struct ceph_osd_req_op *op;
4692         struct ceph_pagelist *pl;
4693         int ret;
4694
4695         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4696
4697         pl = ceph_pagelist_alloc(GFP_NOIO);
4698         if (!pl)
4699                 return -ENOMEM;
4700
4701         ret = ceph_pagelist_encode_64(pl, notify_id);
4702         ret |= ceph_pagelist_encode_64(pl, cookie);
4703         if (payload) {
4704                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4705                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4706         } else {
4707                 ret |= ceph_pagelist_encode_32(pl, 0);
4708         }
4709         if (ret) {
4710                 ceph_pagelist_release(pl);
4711                 return -ENOMEM;
4712         }
4713
4714         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4715         op->indata_len = pl->length;
4716         return 0;
4717 }
4718
4719 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4720                          struct ceph_object_id *oid,
4721                          struct ceph_object_locator *oloc,
4722                          u64 notify_id,
4723                          u64 cookie,
4724                          void *payload,
4725                          u32 payload_len)
4726 {
4727         struct ceph_osd_request *req;
4728         int ret;
4729
4730         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4731         if (!req)
4732                 return -ENOMEM;
4733
4734         ceph_oid_copy(&req->r_base_oid, oid);
4735         ceph_oloc_copy(&req->r_base_oloc, oloc);
4736         req->r_flags = CEPH_OSD_FLAG_READ;
4737
4738         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4739                                          payload_len);
4740         if (ret)
4741                 goto out_put_req;
4742
4743         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4744         if (ret)
4745                 goto out_put_req;
4746
4747         ceph_osdc_start_request(osdc, req, false);
4748         ret = ceph_osdc_wait_request(osdc, req);
4749
4750 out_put_req:
4751         ceph_osdc_put_request(req);
4752         return ret;
4753 }
4754 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4755
4756 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4757                                   u64 cookie, u32 prot_ver, u32 timeout,
4758                                   void *payload, u32 payload_len)
4759 {
4760         struct ceph_osd_req_op *op;
4761         struct ceph_pagelist *pl;
4762         int ret;
4763
4764         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4765         op->notify.cookie = cookie;
4766
4767         pl = ceph_pagelist_alloc(GFP_NOIO);
4768         if (!pl)
4769                 return -ENOMEM;
4770
4771         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4772         ret |= ceph_pagelist_encode_32(pl, timeout);
4773         ret |= ceph_pagelist_encode_32(pl, payload_len);
4774         ret |= ceph_pagelist_append(pl, payload, payload_len);
4775         if (ret) {
4776                 ceph_pagelist_release(pl);
4777                 return -ENOMEM;
4778         }
4779
4780         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4781         op->indata_len = pl->length;
4782         return 0;
4783 }
4784
4785 /*
4786  * @timeout: in seconds
4787  *
4788  * @preply_{pages,len} are initialized both on success and error.
4789  * The caller is responsible for:
4790  *
4791  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4792  */
4793 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4794                      struct ceph_object_id *oid,
4795                      struct ceph_object_locator *oloc,
4796                      void *payload,
4797                      u32 payload_len,
4798                      u32 timeout,
4799                      struct page ***preply_pages,
4800                      size_t *preply_len)
4801 {
4802         struct ceph_osd_linger_request *lreq;
4803         struct page **pages;
4804         int ret;
4805
4806         WARN_ON(!timeout);
4807         if (preply_pages) {
4808                 *preply_pages = NULL;
4809                 *preply_len = 0;
4810         }
4811
4812         lreq = linger_alloc(osdc);
4813         if (!lreq)
4814                 return -ENOMEM;
4815
4816         lreq->preply_pages = preply_pages;
4817         lreq->preply_len = preply_len;
4818
4819         ceph_oid_copy(&lreq->t.base_oid, oid);
4820         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4821         lreq->t.flags = CEPH_OSD_FLAG_READ;
4822
4823         lreq->reg_req = alloc_linger_request(lreq);
4824         if (!lreq->reg_req) {
4825                 ret = -ENOMEM;
4826                 goto out_put_lreq;
4827         }
4828
4829         /*
4830          * Pass 0 for cookie because we don't know it yet, it will be
4831          * filled in by linger_submit().
4832          */
4833         ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4834                                      payload, payload_len);
4835         if (ret)
4836                 goto out_put_lreq;
4837
4838         /* for notify_id */
4839         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4840         if (IS_ERR(pages)) {
4841                 ret = PTR_ERR(pages);
4842                 goto out_put_lreq;
4843         }
4844         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4845                                                  response_data),
4846                                  pages, PAGE_SIZE, 0, false, true);
4847
4848         ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4849         if (ret)
4850                 goto out_put_lreq;
4851
4852         linger_submit(lreq);
4853         ret = linger_reg_commit_wait(lreq);
4854         if (!ret)
4855                 ret = linger_notify_finish_wait(lreq);
4856         else
4857                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4858
4859         linger_cancel(lreq);
4860 out_put_lreq:
4861         linger_put(lreq);
4862         return ret;
4863 }
4864 EXPORT_SYMBOL(ceph_osdc_notify);
4865
4866 /*
4867  * Return the number of milliseconds since the watch was last
4868  * confirmed, or an error.  If there is an error, the watch is no
4869  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4870  */
4871 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4872                           struct ceph_osd_linger_request *lreq)
4873 {
4874         unsigned long stamp, age;
4875         int ret;
4876
4877         down_read(&osdc->lock);
4878         mutex_lock(&lreq->lock);
4879         stamp = lreq->watch_valid_thru;
4880         if (!list_empty(&lreq->pending_lworks)) {
4881                 struct linger_work *lwork =
4882                     list_first_entry(&lreq->pending_lworks,
4883                                      struct linger_work,
4884                                      pending_item);
4885
4886                 if (time_before(lwork->queued_stamp, stamp))
4887                         stamp = lwork->queued_stamp;
4888         }
4889         age = jiffies - stamp;
4890         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4891              lreq, lreq->linger_id, age, lreq->last_error);
4892         /* we are truncating to msecs, so return a safe upper bound */
4893         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4894
4895         mutex_unlock(&lreq->lock);
4896         up_read(&osdc->lock);
4897         return ret;
4898 }
4899
4900 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
4901 {
4902         u8 struct_v;
4903         u32 struct_len;
4904         int ret;
4905
4906         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
4907                                   &struct_v, &struct_len);
4908         if (ret)
4909                 goto bad;
4910
4911         ret = -EINVAL;
4912         ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
4913         ceph_decode_64_safe(p, end, item->cookie, bad);
4914         ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
4915
4916         if (struct_v >= 2) {
4917                 ret = ceph_decode_entity_addr(p, end, &item->addr);
4918                 if (ret)
4919                         goto bad;
4920         } else {
4921                 ret = 0;
4922         }
4923
4924         dout("%s %s%llu cookie %llu addr %s\n", __func__,
4925              ENTITY_NAME(item->name), item->cookie,
4926              ceph_pr_addr(&item->addr));
4927 bad:
4928         return ret;
4929 }
4930
4931 static int decode_watchers(void **p, void *end,
4932                            struct ceph_watch_item **watchers,
4933                            u32 *num_watchers)
4934 {
4935         u8 struct_v;
4936         u32 struct_len;
4937         int i;
4938         int ret;
4939
4940         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
4941                                   &struct_v, &struct_len);
4942         if (ret)
4943                 return ret;
4944
4945         *num_watchers = ceph_decode_32(p);
4946         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
4947         if (!*watchers)
4948                 return -ENOMEM;
4949
4950         for (i = 0; i < *num_watchers; i++) {
4951                 ret = decode_watcher(p, end, *watchers + i);
4952                 if (ret) {
4953                         kfree(*watchers);
4954                         return ret;
4955                 }
4956         }
4957
4958         return 0;
4959 }
4960
4961 /*
4962  * On success, the caller is responsible for:
4963  *
4964  *     kfree(watchers);
4965  */
4966 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
4967                             struct ceph_object_id *oid,
4968                             struct ceph_object_locator *oloc,
4969                             struct ceph_watch_item **watchers,
4970                             u32 *num_watchers)
4971 {
4972         struct ceph_osd_request *req;
4973         struct page **pages;
4974         int ret;
4975
4976         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4977         if (!req)
4978                 return -ENOMEM;
4979
4980         ceph_oid_copy(&req->r_base_oid, oid);
4981         ceph_oloc_copy(&req->r_base_oloc, oloc);
4982         req->r_flags = CEPH_OSD_FLAG_READ;
4983
4984         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4985         if (IS_ERR(pages)) {
4986                 ret = PTR_ERR(pages);
4987                 goto out_put_req;
4988         }
4989
4990         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
4991         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
4992                                                  response_data),
4993                                  pages, PAGE_SIZE, 0, false, true);
4994
4995         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4996         if (ret)
4997                 goto out_put_req;
4998
4999         ceph_osdc_start_request(osdc, req, false);
5000         ret = ceph_osdc_wait_request(osdc, req);
5001         if (ret >= 0) {
5002                 void *p = page_address(pages[0]);
5003                 void *const end = p + req->r_ops[0].outdata_len;
5004
5005                 ret = decode_watchers(&p, end, watchers, num_watchers);
5006         }
5007
5008 out_put_req:
5009         ceph_osdc_put_request(req);
5010         return ret;
5011 }
5012 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5013
5014 /*
5015  * Call all pending notify callbacks - for use after a watch is
5016  * unregistered, to make sure no more callbacks for it will be invoked
5017  */
5018 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5019 {
5020         dout("%s osdc %p\n", __func__, osdc);
5021         flush_workqueue(osdc->notify_wq);
5022 }
5023 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5024
5025 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5026 {
5027         down_read(&osdc->lock);
5028         maybe_request_map(osdc);
5029         up_read(&osdc->lock);
5030 }
5031 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5032
5033 /*
5034  * Execute an OSD class method on an object.
5035  *
5036  * @flags: CEPH_OSD_FLAG_*
5037  * @resp_len: in/out param for reply length
5038  */
5039 int ceph_osdc_call(struct ceph_osd_client *osdc,
5040                    struct ceph_object_id *oid,
5041                    struct ceph_object_locator *oloc,
5042                    const char *class, const char *method,
5043                    unsigned int flags,
5044                    struct page *req_page, size_t req_len,
5045                    struct page **resp_pages, size_t *resp_len)
5046 {
5047         struct ceph_osd_request *req;
5048         int ret;
5049
5050         if (req_len > PAGE_SIZE)
5051                 return -E2BIG;
5052
5053         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5054         if (!req)
5055                 return -ENOMEM;
5056
5057         ceph_oid_copy(&req->r_base_oid, oid);
5058         ceph_oloc_copy(&req->r_base_oloc, oloc);
5059         req->r_flags = flags;
5060
5061         ret = osd_req_op_cls_init(req, 0, class, method);
5062         if (ret)
5063                 goto out_put_req;
5064
5065         if (req_page)
5066                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5067                                                   0, false, false);
5068         if (resp_pages)
5069                 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5070                                                    *resp_len, 0, false, false);
5071
5072         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5073         if (ret)
5074                 goto out_put_req;
5075
5076         ceph_osdc_start_request(osdc, req, false);
5077         ret = ceph_osdc_wait_request(osdc, req);
5078         if (ret >= 0) {
5079                 ret = req->r_ops[0].rval;
5080                 if (resp_pages)
5081                         *resp_len = req->r_ops[0].outdata_len;
5082         }
5083
5084 out_put_req:
5085         ceph_osdc_put_request(req);
5086         return ret;
5087 }
5088 EXPORT_SYMBOL(ceph_osdc_call);
5089
5090 /*
5091  * init, shutdown
5092  */
5093 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5094 {
5095         int err;
5096
5097         dout("init\n");
5098         osdc->client = client;
5099         init_rwsem(&osdc->lock);
5100         osdc->osds = RB_ROOT;
5101         INIT_LIST_HEAD(&osdc->osd_lru);
5102         spin_lock_init(&osdc->osd_lru_lock);
5103         osd_init(&osdc->homeless_osd);
5104         osdc->homeless_osd.o_osdc = osdc;
5105         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5106         osdc->last_linger_id = CEPH_LINGER_ID_START;
5107         osdc->linger_requests = RB_ROOT;
5108         osdc->map_checks = RB_ROOT;
5109         osdc->linger_map_checks = RB_ROOT;
5110         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5111         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5112
5113         err = -ENOMEM;
5114         osdc->osdmap = ceph_osdmap_alloc();
5115         if (!osdc->osdmap)
5116                 goto out;
5117
5118         osdc->req_mempool = mempool_create_slab_pool(10,
5119                                                      ceph_osd_request_cache);
5120         if (!osdc->req_mempool)
5121                 goto out_map;
5122
5123         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5124                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5125         if (err < 0)
5126                 goto out_mempool;
5127         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5128                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5129                                 "osd_op_reply");
5130         if (err < 0)
5131                 goto out_msgpool;
5132
5133         err = -ENOMEM;
5134         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5135         if (!osdc->notify_wq)
5136                 goto out_msgpool_reply;
5137
5138         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5139         if (!osdc->completion_wq)
5140                 goto out_notify_wq;
5141
5142         schedule_delayed_work(&osdc->timeout_work,
5143                               osdc->client->options->osd_keepalive_timeout);
5144         schedule_delayed_work(&osdc->osds_timeout_work,
5145             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5146
5147         return 0;
5148
5149 out_notify_wq:
5150         destroy_workqueue(osdc->notify_wq);
5151 out_msgpool_reply:
5152         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5153 out_msgpool:
5154         ceph_msgpool_destroy(&osdc->msgpool_op);
5155 out_mempool:
5156         mempool_destroy(osdc->req_mempool);
5157 out_map:
5158         ceph_osdmap_destroy(osdc->osdmap);
5159 out:
5160         return err;
5161 }
5162
5163 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5164 {
5165         destroy_workqueue(osdc->completion_wq);
5166         destroy_workqueue(osdc->notify_wq);
5167         cancel_delayed_work_sync(&osdc->timeout_work);
5168         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5169
5170         down_write(&osdc->lock);
5171         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5172                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5173                                                 struct ceph_osd, o_node);
5174                 close_osd(osd);
5175         }
5176         up_write(&osdc->lock);
5177         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5178         osd_cleanup(&osdc->homeless_osd);
5179
5180         WARN_ON(!list_empty(&osdc->osd_lru));
5181         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5182         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5183         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5184         WARN_ON(atomic_read(&osdc->num_requests));
5185         WARN_ON(atomic_read(&osdc->num_homeless));
5186
5187         ceph_osdmap_destroy(osdc->osdmap);
5188         mempool_destroy(osdc->req_mempool);
5189         ceph_msgpool_destroy(&osdc->msgpool_op);
5190         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5191 }
5192
5193 /*
5194  * Read some contiguous pages.  If we cross a stripe boundary, shorten
5195  * *plen.  Return number of bytes read, or error.
5196  */
5197 int ceph_osdc_readpages(struct ceph_osd_client *osdc,
5198                         struct ceph_vino vino, struct ceph_file_layout *layout,
5199                         u64 off, u64 *plen,
5200                         u32 truncate_seq, u64 truncate_size,
5201                         struct page **pages, int num_pages, int page_align)
5202 {
5203         struct ceph_osd_request *req;
5204         int rc = 0;
5205
5206         dout("readpages on ino %llx.%llx on %llu~%llu\n", vino.ino,
5207              vino.snap, off, *plen);
5208         req = ceph_osdc_new_request(osdc, layout, vino, off, plen, 0, 1,
5209                                     CEPH_OSD_OP_READ, CEPH_OSD_FLAG_READ,
5210                                     NULL, truncate_seq, truncate_size,
5211                                     false);
5212         if (IS_ERR(req))
5213                 return PTR_ERR(req);
5214
5215         /* it may be a short read due to an object boundary */
5216         osd_req_op_extent_osd_data_pages(req, 0,
5217                                 pages, *plen, page_align, false, false);
5218
5219         dout("readpages  final extent is %llu~%llu (%llu bytes align %d)\n",
5220              off, *plen, *plen, page_align);
5221
5222         rc = ceph_osdc_start_request(osdc, req, false);
5223         if (!rc)
5224                 rc = ceph_osdc_wait_request(osdc, req);
5225
5226         ceph_osdc_put_request(req);
5227         dout("readpages result %d\n", rc);
5228         return rc;
5229 }
5230 EXPORT_SYMBOL(ceph_osdc_readpages);
5231
5232 /*
5233  * do a synchronous write on N pages
5234  */
5235 int ceph_osdc_writepages(struct ceph_osd_client *osdc, struct ceph_vino vino,
5236                          struct ceph_file_layout *layout,
5237                          struct ceph_snap_context *snapc,
5238                          u64 off, u64 len,
5239                          u32 truncate_seq, u64 truncate_size,
5240                          struct timespec64 *mtime,
5241                          struct page **pages, int num_pages)
5242 {
5243         struct ceph_osd_request *req;
5244         int rc = 0;
5245         int page_align = off & ~PAGE_MASK;
5246
5247         req = ceph_osdc_new_request(osdc, layout, vino, off, &len, 0, 1,
5248                                     CEPH_OSD_OP_WRITE, CEPH_OSD_FLAG_WRITE,
5249                                     snapc, truncate_seq, truncate_size,
5250                                     true);
5251         if (IS_ERR(req))
5252                 return PTR_ERR(req);
5253
5254         /* it may be a short write due to an object boundary */
5255         osd_req_op_extent_osd_data_pages(req, 0, pages, len, page_align,
5256                                 false, false);
5257         dout("writepages %llu~%llu (%llu bytes)\n", off, len, len);
5258
5259         req->r_mtime = *mtime;
5260         rc = ceph_osdc_start_request(osdc, req, true);
5261         if (!rc)
5262                 rc = ceph_osdc_wait_request(osdc, req);
5263
5264         ceph_osdc_put_request(req);
5265         if (rc == 0)
5266                 rc = len;
5267         dout("writepages result %d\n", rc);
5268         return rc;
5269 }
5270 EXPORT_SYMBOL(ceph_osdc_writepages);
5271
5272 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5273                                      u64 src_snapid, u64 src_version,
5274                                      struct ceph_object_id *src_oid,
5275                                      struct ceph_object_locator *src_oloc,
5276                                      u32 src_fadvise_flags,
5277                                      u32 dst_fadvise_flags,
5278                                      u8 copy_from_flags)
5279 {
5280         struct ceph_osd_req_op *op;
5281         struct page **pages;
5282         void *p, *end;
5283
5284         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5285         if (IS_ERR(pages))
5286                 return PTR_ERR(pages);
5287
5288         op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM, dst_fadvise_flags);
5289         op->copy_from.snapid = src_snapid;
5290         op->copy_from.src_version = src_version;
5291         op->copy_from.flags = copy_from_flags;
5292         op->copy_from.src_fadvise_flags = src_fadvise_flags;
5293
5294         p = page_address(pages[0]);
5295         end = p + PAGE_SIZE;
5296         ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5297         encode_oloc(&p, end, src_oloc);
5298         op->indata_len = PAGE_SIZE - (end - p);
5299
5300         ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5301                                  op->indata_len, 0, false, true);
5302         return 0;
5303 }
5304
5305 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5306                         u64 src_snapid, u64 src_version,
5307                         struct ceph_object_id *src_oid,
5308                         struct ceph_object_locator *src_oloc,
5309                         u32 src_fadvise_flags,
5310                         struct ceph_object_id *dst_oid,
5311                         struct ceph_object_locator *dst_oloc,
5312                         u32 dst_fadvise_flags,
5313                         u8 copy_from_flags)
5314 {
5315         struct ceph_osd_request *req;
5316         int ret;
5317
5318         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5319         if (!req)
5320                 return -ENOMEM;
5321
5322         req->r_flags = CEPH_OSD_FLAG_WRITE;
5323
5324         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5325         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5326
5327         ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5328                                         src_oloc, src_fadvise_flags,
5329                                         dst_fadvise_flags, copy_from_flags);
5330         if (ret)
5331                 goto out;
5332
5333         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5334         if (ret)
5335                 goto out;
5336
5337         ceph_osdc_start_request(osdc, req, false);
5338         ret = ceph_osdc_wait_request(osdc, req);
5339
5340 out:
5341         ceph_osdc_put_request(req);
5342         return ret;
5343 }
5344 EXPORT_SYMBOL(ceph_osdc_copy_from);
5345
5346 int __init ceph_osdc_setup(void)
5347 {
5348         size_t size = sizeof(struct ceph_osd_request) +
5349             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5350
5351         BUG_ON(ceph_osd_request_cache);
5352         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5353                                                    0, 0, NULL);
5354
5355         return ceph_osd_request_cache ? 0 : -ENOMEM;
5356 }
5357
5358 void ceph_osdc_cleanup(void)
5359 {
5360         BUG_ON(!ceph_osd_request_cache);
5361         kmem_cache_destroy(ceph_osd_request_cache);
5362         ceph_osd_request_cache = NULL;
5363 }
5364
5365 /*
5366  * handle incoming message
5367  */
5368 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5369 {
5370         struct ceph_osd *osd = con->private;
5371         struct ceph_osd_client *osdc = osd->o_osdc;
5372         int type = le16_to_cpu(msg->hdr.type);
5373
5374         switch (type) {
5375         case CEPH_MSG_OSD_MAP:
5376                 ceph_osdc_handle_map(osdc, msg);
5377                 break;
5378         case CEPH_MSG_OSD_OPREPLY:
5379                 handle_reply(osd, msg);
5380                 break;
5381         case CEPH_MSG_OSD_BACKOFF:
5382                 handle_backoff(osd, msg);
5383                 break;
5384         case CEPH_MSG_WATCH_NOTIFY:
5385                 handle_watch_notify(osdc, msg);
5386                 break;
5387
5388         default:
5389                 pr_err("received unknown message type %d %s\n", type,
5390                        ceph_msg_type_name(type));
5391         }
5392
5393         ceph_msg_put(msg);
5394 }
5395
5396 /*
5397  * Lookup and return message for incoming reply.  Don't try to do
5398  * anything about a larger than preallocated data portion of the
5399  * message at the moment - for now, just skip the message.
5400  */
5401 static struct ceph_msg *get_reply(struct ceph_connection *con,
5402                                   struct ceph_msg_header *hdr,
5403                                   int *skip)
5404 {
5405         struct ceph_osd *osd = con->private;
5406         struct ceph_osd_client *osdc = osd->o_osdc;
5407         struct ceph_msg *m = NULL;
5408         struct ceph_osd_request *req;
5409         int front_len = le32_to_cpu(hdr->front_len);
5410         int data_len = le32_to_cpu(hdr->data_len);
5411         u64 tid = le64_to_cpu(hdr->tid);
5412
5413         down_read(&osdc->lock);
5414         if (!osd_registered(osd)) {
5415                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5416                 *skip = 1;
5417                 goto out_unlock_osdc;
5418         }
5419         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5420
5421         mutex_lock(&osd->lock);
5422         req = lookup_request(&osd->o_requests, tid);
5423         if (!req) {
5424                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5425                      osd->o_osd, tid);
5426                 *skip = 1;
5427                 goto out_unlock_session;
5428         }
5429
5430         ceph_msg_revoke_incoming(req->r_reply);
5431
5432         if (front_len > req->r_reply->front_alloc_len) {
5433                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5434                         __func__, osd->o_osd, req->r_tid, front_len,
5435                         req->r_reply->front_alloc_len);
5436                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5437                                  false);
5438                 if (!m)
5439                         goto out_unlock_session;
5440                 ceph_msg_put(req->r_reply);
5441                 req->r_reply = m;
5442         }
5443
5444         if (data_len > req->r_reply->data_length) {
5445                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5446                         __func__, osd->o_osd, req->r_tid, data_len,
5447                         req->r_reply->data_length);
5448                 m = NULL;
5449                 *skip = 1;
5450                 goto out_unlock_session;
5451         }
5452
5453         m = ceph_msg_get(req->r_reply);
5454         dout("get_reply tid %lld %p\n", tid, m);
5455
5456 out_unlock_session:
5457         mutex_unlock(&osd->lock);
5458 out_unlock_osdc:
5459         up_read(&osdc->lock);
5460         return m;
5461 }
5462
5463 /*
5464  * TODO: switch to a msg-owned pagelist
5465  */
5466 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5467 {
5468         struct ceph_msg *m;
5469         int type = le16_to_cpu(hdr->type);
5470         u32 front_len = le32_to_cpu(hdr->front_len);
5471         u32 data_len = le32_to_cpu(hdr->data_len);
5472
5473         m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5474         if (!m)
5475                 return NULL;
5476
5477         if (data_len) {
5478                 struct page **pages;
5479                 struct ceph_osd_data osd_data;
5480
5481                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5482                                                GFP_NOIO);
5483                 if (IS_ERR(pages)) {
5484                         ceph_msg_put(m);
5485                         return NULL;
5486                 }
5487
5488                 ceph_osd_data_pages_init(&osd_data, pages, data_len, 0, false,
5489                                          false);
5490                 ceph_osdc_msg_data_add(m, &osd_data);
5491         }
5492
5493         return m;
5494 }
5495
5496 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5497                                   struct ceph_msg_header *hdr,
5498                                   int *skip)
5499 {
5500         struct ceph_osd *osd = con->private;
5501         int type = le16_to_cpu(hdr->type);
5502
5503         *skip = 0;
5504         switch (type) {
5505         case CEPH_MSG_OSD_MAP:
5506         case CEPH_MSG_OSD_BACKOFF:
5507         case CEPH_MSG_WATCH_NOTIFY:
5508                 return alloc_msg_with_page_vector(hdr);
5509         case CEPH_MSG_OSD_OPREPLY:
5510                 return get_reply(con, hdr, skip);
5511         default:
5512                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5513                         osd->o_osd, type);
5514                 *skip = 1;
5515                 return NULL;
5516         }
5517 }
5518
5519 /*
5520  * Wrappers to refcount containing ceph_osd struct
5521  */
5522 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5523 {
5524         struct ceph_osd *osd = con->private;
5525         if (get_osd(osd))
5526                 return con;
5527         return NULL;
5528 }
5529
5530 static void put_osd_con(struct ceph_connection *con)
5531 {
5532         struct ceph_osd *osd = con->private;
5533         put_osd(osd);
5534 }
5535
5536 /*
5537  * authentication
5538  */
5539 /*
5540  * Note: returned pointer is the address of a structure that's
5541  * managed separately.  Caller must *not* attempt to free it.
5542  */
5543 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5544                                         int *proto, int force_new)
5545 {
5546         struct ceph_osd *o = con->private;
5547         struct ceph_osd_client *osdc = o->o_osdc;
5548         struct ceph_auth_client *ac = osdc->client->monc.auth;
5549         struct ceph_auth_handshake *auth = &o->o_auth;
5550
5551         if (force_new && auth->authorizer) {
5552                 ceph_auth_destroy_authorizer(auth->authorizer);
5553                 auth->authorizer = NULL;
5554         }
5555         if (!auth->authorizer) {
5556                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5557                                                       auth);
5558                 if (ret)
5559                         return ERR_PTR(ret);
5560         } else {
5561                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5562                                                      auth);
5563                 if (ret)
5564                         return ERR_PTR(ret);
5565         }
5566         *proto = ac->protocol;
5567
5568         return auth;
5569 }
5570
5571 static int add_authorizer_challenge(struct ceph_connection *con,
5572                                     void *challenge_buf, int challenge_buf_len)
5573 {
5574         struct ceph_osd *o = con->private;
5575         struct ceph_osd_client *osdc = o->o_osdc;
5576         struct ceph_auth_client *ac = osdc->client->monc.auth;
5577
5578         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5579                                             challenge_buf, challenge_buf_len);
5580 }
5581
5582 static int verify_authorizer_reply(struct ceph_connection *con)
5583 {
5584         struct ceph_osd *o = con->private;
5585         struct ceph_osd_client *osdc = o->o_osdc;
5586         struct ceph_auth_client *ac = osdc->client->monc.auth;
5587
5588         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5589 }
5590
5591 static int invalidate_authorizer(struct ceph_connection *con)
5592 {
5593         struct ceph_osd *o = con->private;
5594         struct ceph_osd_client *osdc = o->o_osdc;
5595         struct ceph_auth_client *ac = osdc->client->monc.auth;
5596
5597         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5598         return ceph_monc_validate_auth(&osdc->client->monc);
5599 }
5600
5601 static void osd_reencode_message(struct ceph_msg *msg)
5602 {
5603         int type = le16_to_cpu(msg->hdr.type);
5604
5605         if (type == CEPH_MSG_OSD_OP)
5606                 encode_request_finish(msg);
5607 }
5608
5609 static int osd_sign_message(struct ceph_msg *msg)
5610 {
5611         struct ceph_osd *o = msg->con->private;
5612         struct ceph_auth_handshake *auth = &o->o_auth;
5613
5614         return ceph_auth_sign_message(auth, msg);
5615 }
5616
5617 static int osd_check_message_signature(struct ceph_msg *msg)
5618 {
5619         struct ceph_osd *o = msg->con->private;
5620         struct ceph_auth_handshake *auth = &o->o_auth;
5621
5622         return ceph_auth_check_message_signature(auth, msg);
5623 }
5624
5625 static const struct ceph_connection_operations osd_con_ops = {
5626         .get = get_osd_con,
5627         .put = put_osd_con,
5628         .dispatch = dispatch,
5629         .get_authorizer = get_authorizer,
5630         .add_authorizer_challenge = add_authorizer_challenge,
5631         .verify_authorizer_reply = verify_authorizer_reply,
5632         .invalidate_authorizer = invalidate_authorizer,
5633         .alloc_msg = alloc_msg,
5634         .reencode_message = osd_reencode_message,
5635         .sign_message = osd_sign_message,
5636         .check_message_signature = osd_check_message_signature,
5637         .fault = osd_fault,
5638 };