libceph: support for balanced and localized reads
[linux-2.6-microblaze.git] / net / ceph / osd_client.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24
25 #define OSD_OPREPLY_FRONT_LEN   512
26
27 static struct kmem_cache        *ceph_osd_request_cache;
28
29 static const struct ceph_connection_operations osd_con_ops;
30
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52                         struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54                           struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60         bool wrlocked = true;
61
62         if (unlikely(down_read_trylock(sem))) {
63                 wrlocked = false;
64                 up_read(sem);
65         }
66
67         return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79         struct ceph_osd_client *osdc = osd->o_osdc;
80
81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
82                   rwsem_is_locked(&osdc->lock)) &&
83                 !rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87         WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104                         u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106         u64 orig_len = *plen;
107         u32 xlen;
108
109         /* object extent? */
110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111                                           objoff, &xlen);
112         *objlen = xlen;
113         if (*objlen < orig_len) {
114                 *plen = *objlen;
115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
116                      orig_len - *plen, off, *plen);
117         }
118
119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120         return 0;
121 }
122
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125         memset(osd_data, 0, sizeof (*osd_data));
126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128
129 /*
130  * Consumes @pages if @own_pages is true.
131  */
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133                         struct page **pages, u64 length, u32 alignment,
134                         bool pages_from_pool, bool own_pages)
135 {
136         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137         osd_data->pages = pages;
138         osd_data->length = length;
139         osd_data->alignment = alignment;
140         osd_data->pages_from_pool = pages_from_pool;
141         osd_data->own_pages = own_pages;
142 }
143
144 /*
145  * Consumes a ref on @pagelist.
146  */
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148                         struct ceph_pagelist *pagelist)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151         osd_data->pagelist = pagelist;
152 }
153
154 #ifdef CONFIG_BLOCK
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156                                    struct ceph_bio_iter *bio_pos,
157                                    u32 bio_length)
158 {
159         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160         osd_data->bio_pos = *bio_pos;
161         osd_data->bio_length = bio_length;
162 }
163 #endif /* CONFIG_BLOCK */
164
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166                                      struct ceph_bvec_iter *bvec_pos,
167                                      u32 num_bvecs)
168 {
169         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170         osd_data->bvec_pos = *bvec_pos;
171         osd_data->num_bvecs = num_bvecs;
172 }
173
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
176 {
177         BUG_ON(which >= osd_req->r_num_ops);
178
179         return &osd_req->r_ops[which].raw_data_in;
180 }
181
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
184                         unsigned int which)
185 {
186         return osd_req_op_data(osd_req, which, extent, osd_data);
187 }
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
189
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191                         unsigned int which, struct page **pages,
192                         u64 length, u32 alignment,
193                         bool pages_from_pool, bool own_pages)
194 {
195         struct ceph_osd_data *osd_data;
196
197         osd_data = osd_req_op_raw_data_in(osd_req, which);
198         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199                                 pages_from_pool, own_pages);
200 }
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
202
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204                         unsigned int which, struct page **pages,
205                         u64 length, u32 alignment,
206                         bool pages_from_pool, bool own_pages)
207 {
208         struct ceph_osd_data *osd_data;
209
210         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212                                 pages_from_pool, own_pages);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
215
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217                         unsigned int which, struct ceph_pagelist *pagelist)
218 {
219         struct ceph_osd_data *osd_data;
220
221         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222         ceph_osd_data_pagelist_init(osd_data, pagelist);
223 }
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
225
226 #ifdef CONFIG_BLOCK
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
228                                     unsigned int which,
229                                     struct ceph_bio_iter *bio_pos,
230                                     u32 bio_length)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
236 }
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
239
240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
241                                       unsigned int which,
242                                       struct bio_vec *bvecs, u32 num_bvecs,
243                                       u32 bytes)
244 {
245         struct ceph_osd_data *osd_data;
246         struct ceph_bvec_iter it = {
247                 .bvecs = bvecs,
248                 .iter = { .bi_size = bytes },
249         };
250
251         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
252         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
253 }
254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
255
256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
257                                          unsigned int which,
258                                          struct ceph_bvec_iter *bvec_pos)
259 {
260         struct ceph_osd_data *osd_data;
261
262         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
263         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
264 }
265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
266
267 static void osd_req_op_cls_request_info_pagelist(
268                         struct ceph_osd_request *osd_req,
269                         unsigned int which, struct ceph_pagelist *pagelist)
270 {
271         struct ceph_osd_data *osd_data;
272
273         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
274         ceph_osd_data_pagelist_init(osd_data, pagelist);
275 }
276
277 void osd_req_op_cls_request_data_pagelist(
278                         struct ceph_osd_request *osd_req,
279                         unsigned int which, struct ceph_pagelist *pagelist)
280 {
281         struct ceph_osd_data *osd_data;
282
283         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
284         ceph_osd_data_pagelist_init(osd_data, pagelist);
285         osd_req->r_ops[which].cls.indata_len += pagelist->length;
286         osd_req->r_ops[which].indata_len += pagelist->length;
287 }
288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
289
290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
291                         unsigned int which, struct page **pages, u64 length,
292                         u32 alignment, bool pages_from_pool, bool own_pages)
293 {
294         struct ceph_osd_data *osd_data;
295
296         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
297         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
298                                 pages_from_pool, own_pages);
299         osd_req->r_ops[which].cls.indata_len += length;
300         osd_req->r_ops[which].indata_len += length;
301 }
302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
303
304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
305                                        unsigned int which,
306                                        struct bio_vec *bvecs, u32 num_bvecs,
307                                        u32 bytes)
308 {
309         struct ceph_osd_data *osd_data;
310         struct ceph_bvec_iter it = {
311                 .bvecs = bvecs,
312                 .iter = { .bi_size = bytes },
313         };
314
315         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
316         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
317         osd_req->r_ops[which].cls.indata_len += bytes;
318         osd_req->r_ops[which].indata_len += bytes;
319 }
320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
321
322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
323                         unsigned int which, struct page **pages, u64 length,
324                         u32 alignment, bool pages_from_pool, bool own_pages)
325 {
326         struct ceph_osd_data *osd_data;
327
328         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
329         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
330                                 pages_from_pool, own_pages);
331 }
332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
333
334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
335 {
336         switch (osd_data->type) {
337         case CEPH_OSD_DATA_TYPE_NONE:
338                 return 0;
339         case CEPH_OSD_DATA_TYPE_PAGES:
340                 return osd_data->length;
341         case CEPH_OSD_DATA_TYPE_PAGELIST:
342                 return (u64)osd_data->pagelist->length;
343 #ifdef CONFIG_BLOCK
344         case CEPH_OSD_DATA_TYPE_BIO:
345                 return (u64)osd_data->bio_length;
346 #endif /* CONFIG_BLOCK */
347         case CEPH_OSD_DATA_TYPE_BVECS:
348                 return osd_data->bvec_pos.iter.bi_size;
349         default:
350                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
351                 return 0;
352         }
353 }
354
355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
356 {
357         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
358                 int num_pages;
359
360                 num_pages = calc_pages_for((u64)osd_data->alignment,
361                                                 (u64)osd_data->length);
362                 ceph_release_page_vector(osd_data->pages, num_pages);
363         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
364                 ceph_pagelist_release(osd_data->pagelist);
365         }
366         ceph_osd_data_init(osd_data);
367 }
368
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
370                         unsigned int which)
371 {
372         struct ceph_osd_req_op *op;
373
374         BUG_ON(which >= osd_req->r_num_ops);
375         op = &osd_req->r_ops[which];
376
377         switch (op->op) {
378         case CEPH_OSD_OP_READ:
379         case CEPH_OSD_OP_WRITE:
380         case CEPH_OSD_OP_WRITEFULL:
381                 ceph_osd_data_release(&op->extent.osd_data);
382                 break;
383         case CEPH_OSD_OP_CALL:
384                 ceph_osd_data_release(&op->cls.request_info);
385                 ceph_osd_data_release(&op->cls.request_data);
386                 ceph_osd_data_release(&op->cls.response_data);
387                 break;
388         case CEPH_OSD_OP_SETXATTR:
389         case CEPH_OSD_OP_CMPXATTR:
390                 ceph_osd_data_release(&op->xattr.osd_data);
391                 break;
392         case CEPH_OSD_OP_STAT:
393                 ceph_osd_data_release(&op->raw_data_in);
394                 break;
395         case CEPH_OSD_OP_NOTIFY_ACK:
396                 ceph_osd_data_release(&op->notify_ack.request_data);
397                 break;
398         case CEPH_OSD_OP_NOTIFY:
399                 ceph_osd_data_release(&op->notify.request_data);
400                 ceph_osd_data_release(&op->notify.response_data);
401                 break;
402         case CEPH_OSD_OP_LIST_WATCHERS:
403                 ceph_osd_data_release(&op->list_watchers.response_data);
404                 break;
405         case CEPH_OSD_OP_COPY_FROM2:
406                 ceph_osd_data_release(&op->copy_from.osd_data);
407                 break;
408         default:
409                 break;
410         }
411 }
412
413 /*
414  * Assumes @t is zero-initialized.
415  */
416 static void target_init(struct ceph_osd_request_target *t)
417 {
418         ceph_oid_init(&t->base_oid);
419         ceph_oloc_init(&t->base_oloc);
420         ceph_oid_init(&t->target_oid);
421         ceph_oloc_init(&t->target_oloc);
422
423         ceph_osds_init(&t->acting);
424         ceph_osds_init(&t->up);
425         t->size = -1;
426         t->min_size = -1;
427
428         t->osd = CEPH_HOMELESS_OSD;
429 }
430
431 static void target_copy(struct ceph_osd_request_target *dest,
432                         const struct ceph_osd_request_target *src)
433 {
434         ceph_oid_copy(&dest->base_oid, &src->base_oid);
435         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
436         ceph_oid_copy(&dest->target_oid, &src->target_oid);
437         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
438
439         dest->pgid = src->pgid; /* struct */
440         dest->spgid = src->spgid; /* struct */
441         dest->pg_num = src->pg_num;
442         dest->pg_num_mask = src->pg_num_mask;
443         ceph_osds_copy(&dest->acting, &src->acting);
444         ceph_osds_copy(&dest->up, &src->up);
445         dest->size = src->size;
446         dest->min_size = src->min_size;
447         dest->sort_bitwise = src->sort_bitwise;
448
449         dest->flags = src->flags;
450         dest->paused = src->paused;
451
452         dest->epoch = src->epoch;
453         dest->last_force_resend = src->last_force_resend;
454
455         dest->osd = src->osd;
456 }
457
458 static void target_destroy(struct ceph_osd_request_target *t)
459 {
460         ceph_oid_destroy(&t->base_oid);
461         ceph_oloc_destroy(&t->base_oloc);
462         ceph_oid_destroy(&t->target_oid);
463         ceph_oloc_destroy(&t->target_oloc);
464 }
465
466 /*
467  * requests
468  */
469 static void request_release_checks(struct ceph_osd_request *req)
470 {
471         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
472         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
473         WARN_ON(!list_empty(&req->r_private_item));
474         WARN_ON(req->r_osd);
475 }
476
477 static void ceph_osdc_release_request(struct kref *kref)
478 {
479         struct ceph_osd_request *req = container_of(kref,
480                                             struct ceph_osd_request, r_kref);
481         unsigned int which;
482
483         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
484              req->r_request, req->r_reply);
485         request_release_checks(req);
486
487         if (req->r_request)
488                 ceph_msg_put(req->r_request);
489         if (req->r_reply)
490                 ceph_msg_put(req->r_reply);
491
492         for (which = 0; which < req->r_num_ops; which++)
493                 osd_req_op_data_release(req, which);
494
495         target_destroy(&req->r_t);
496         ceph_put_snap_context(req->r_snapc);
497
498         if (req->r_mempool)
499                 mempool_free(req, req->r_osdc->req_mempool);
500         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
501                 kmem_cache_free(ceph_osd_request_cache, req);
502         else
503                 kfree(req);
504 }
505
506 void ceph_osdc_get_request(struct ceph_osd_request *req)
507 {
508         dout("%s %p (was %d)\n", __func__, req,
509              kref_read(&req->r_kref));
510         kref_get(&req->r_kref);
511 }
512 EXPORT_SYMBOL(ceph_osdc_get_request);
513
514 void ceph_osdc_put_request(struct ceph_osd_request *req)
515 {
516         if (req) {
517                 dout("%s %p (was %d)\n", __func__, req,
518                      kref_read(&req->r_kref));
519                 kref_put(&req->r_kref, ceph_osdc_release_request);
520         }
521 }
522 EXPORT_SYMBOL(ceph_osdc_put_request);
523
524 static void request_init(struct ceph_osd_request *req)
525 {
526         /* req only, each op is zeroed in _osd_req_op_init() */
527         memset(req, 0, sizeof(*req));
528
529         kref_init(&req->r_kref);
530         init_completion(&req->r_completion);
531         RB_CLEAR_NODE(&req->r_node);
532         RB_CLEAR_NODE(&req->r_mc_node);
533         INIT_LIST_HEAD(&req->r_private_item);
534
535         target_init(&req->r_t);
536 }
537
538 /*
539  * This is ugly, but it allows us to reuse linger registration and ping
540  * requests, keeping the structure of the code around send_linger{_ping}()
541  * reasonable.  Setting up a min_nr=2 mempool for each linger request
542  * and dealing with copying ops (this blasts req only, watch op remains
543  * intact) isn't any better.
544  */
545 static void request_reinit(struct ceph_osd_request *req)
546 {
547         struct ceph_osd_client *osdc = req->r_osdc;
548         bool mempool = req->r_mempool;
549         unsigned int num_ops = req->r_num_ops;
550         u64 snapid = req->r_snapid;
551         struct ceph_snap_context *snapc = req->r_snapc;
552         bool linger = req->r_linger;
553         struct ceph_msg *request_msg = req->r_request;
554         struct ceph_msg *reply_msg = req->r_reply;
555
556         dout("%s req %p\n", __func__, req);
557         WARN_ON(kref_read(&req->r_kref) != 1);
558         request_release_checks(req);
559
560         WARN_ON(kref_read(&request_msg->kref) != 1);
561         WARN_ON(kref_read(&reply_msg->kref) != 1);
562         target_destroy(&req->r_t);
563
564         request_init(req);
565         req->r_osdc = osdc;
566         req->r_mempool = mempool;
567         req->r_num_ops = num_ops;
568         req->r_snapid = snapid;
569         req->r_snapc = snapc;
570         req->r_linger = linger;
571         req->r_request = request_msg;
572         req->r_reply = reply_msg;
573 }
574
575 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
576                                                struct ceph_snap_context *snapc,
577                                                unsigned int num_ops,
578                                                bool use_mempool,
579                                                gfp_t gfp_flags)
580 {
581         struct ceph_osd_request *req;
582
583         if (use_mempool) {
584                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
585                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
586         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
587                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
588         } else {
589                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
590                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
591         }
592         if (unlikely(!req))
593                 return NULL;
594
595         request_init(req);
596         req->r_osdc = osdc;
597         req->r_mempool = use_mempool;
598         req->r_num_ops = num_ops;
599         req->r_snapid = CEPH_NOSNAP;
600         req->r_snapc = ceph_get_snap_context(snapc);
601
602         dout("%s req %p\n", __func__, req);
603         return req;
604 }
605 EXPORT_SYMBOL(ceph_osdc_alloc_request);
606
607 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
608 {
609         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
610 }
611
612 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
613                                       int num_request_data_items,
614                                       int num_reply_data_items)
615 {
616         struct ceph_osd_client *osdc = req->r_osdc;
617         struct ceph_msg *msg;
618         int msg_size;
619
620         WARN_ON(req->r_request || req->r_reply);
621         WARN_ON(ceph_oid_empty(&req->r_base_oid));
622         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
623
624         /* create request message */
625         msg_size = CEPH_ENCODING_START_BLK_LEN +
626                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
627         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
628         msg_size += CEPH_ENCODING_START_BLK_LEN +
629                         sizeof(struct ceph_osd_reqid); /* reqid */
630         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
631         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
632         msg_size += CEPH_ENCODING_START_BLK_LEN +
633                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
634         msg_size += 4 + req->r_base_oid.name_len; /* oid */
635         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
636         msg_size += 8; /* snapid */
637         msg_size += 8; /* snap_seq */
638         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
639         msg_size += 4 + 8; /* retry_attempt, features */
640
641         if (req->r_mempool)
642                 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
643                                        num_request_data_items);
644         else
645                 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
646                                     num_request_data_items, gfp, true);
647         if (!msg)
648                 return -ENOMEM;
649
650         memset(msg->front.iov_base, 0, msg->front.iov_len);
651         req->r_request = msg;
652
653         /* create reply message */
654         msg_size = OSD_OPREPLY_FRONT_LEN;
655         msg_size += req->r_base_oid.name_len;
656         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
657
658         if (req->r_mempool)
659                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
660                                        num_reply_data_items);
661         else
662                 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
663                                     num_reply_data_items, gfp, true);
664         if (!msg)
665                 return -ENOMEM;
666
667         req->r_reply = msg;
668
669         return 0;
670 }
671
672 static bool osd_req_opcode_valid(u16 opcode)
673 {
674         switch (opcode) {
675 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
676 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
677 #undef GENERATE_CASE
678         default:
679                 return false;
680         }
681 }
682
683 static void get_num_data_items(struct ceph_osd_request *req,
684                                int *num_request_data_items,
685                                int *num_reply_data_items)
686 {
687         struct ceph_osd_req_op *op;
688
689         *num_request_data_items = 0;
690         *num_reply_data_items = 0;
691
692         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
693                 switch (op->op) {
694                 /* request */
695                 case CEPH_OSD_OP_WRITE:
696                 case CEPH_OSD_OP_WRITEFULL:
697                 case CEPH_OSD_OP_SETXATTR:
698                 case CEPH_OSD_OP_CMPXATTR:
699                 case CEPH_OSD_OP_NOTIFY_ACK:
700                 case CEPH_OSD_OP_COPY_FROM2:
701                         *num_request_data_items += 1;
702                         break;
703
704                 /* reply */
705                 case CEPH_OSD_OP_STAT:
706                 case CEPH_OSD_OP_READ:
707                 case CEPH_OSD_OP_LIST_WATCHERS:
708                         *num_reply_data_items += 1;
709                         break;
710
711                 /* both */
712                 case CEPH_OSD_OP_NOTIFY:
713                         *num_request_data_items += 1;
714                         *num_reply_data_items += 1;
715                         break;
716                 case CEPH_OSD_OP_CALL:
717                         *num_request_data_items += 2;
718                         *num_reply_data_items += 1;
719                         break;
720
721                 default:
722                         WARN_ON(!osd_req_opcode_valid(op->op));
723                         break;
724                 }
725         }
726 }
727
728 /*
729  * oid, oloc and OSD op opcode(s) must be filled in before this function
730  * is called.
731  */
732 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
733 {
734         int num_request_data_items, num_reply_data_items;
735
736         get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
737         return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
738                                           num_reply_data_items);
739 }
740 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
741
742 /*
743  * This is an osd op init function for opcodes that have no data or
744  * other information associated with them.  It also serves as a
745  * common init routine for all the other init functions, below.
746  */
747 static struct ceph_osd_req_op *
748 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
749                  u16 opcode, u32 flags)
750 {
751         struct ceph_osd_req_op *op;
752
753         BUG_ON(which >= osd_req->r_num_ops);
754         BUG_ON(!osd_req_opcode_valid(opcode));
755
756         op = &osd_req->r_ops[which];
757         memset(op, 0, sizeof (*op));
758         op->op = opcode;
759         op->flags = flags;
760
761         return op;
762 }
763
764 void osd_req_op_init(struct ceph_osd_request *osd_req,
765                      unsigned int which, u16 opcode, u32 flags)
766 {
767         (void)_osd_req_op_init(osd_req, which, opcode, flags);
768 }
769 EXPORT_SYMBOL(osd_req_op_init);
770
771 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
772                                 unsigned int which, u16 opcode,
773                                 u64 offset, u64 length,
774                                 u64 truncate_size, u32 truncate_seq)
775 {
776         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
777                                                       opcode, 0);
778         size_t payload_len = 0;
779
780         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
781                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
782                opcode != CEPH_OSD_OP_TRUNCATE);
783
784         op->extent.offset = offset;
785         op->extent.length = length;
786         op->extent.truncate_size = truncate_size;
787         op->extent.truncate_seq = truncate_seq;
788         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
789                 payload_len += length;
790
791         op->indata_len = payload_len;
792 }
793 EXPORT_SYMBOL(osd_req_op_extent_init);
794
795 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
796                                 unsigned int which, u64 length)
797 {
798         struct ceph_osd_req_op *op;
799         u64 previous;
800
801         BUG_ON(which >= osd_req->r_num_ops);
802         op = &osd_req->r_ops[which];
803         previous = op->extent.length;
804
805         if (length == previous)
806                 return;         /* Nothing to do */
807         BUG_ON(length > previous);
808
809         op->extent.length = length;
810         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
811                 op->indata_len -= previous - length;
812 }
813 EXPORT_SYMBOL(osd_req_op_extent_update);
814
815 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
816                                 unsigned int which, u64 offset_inc)
817 {
818         struct ceph_osd_req_op *op, *prev_op;
819
820         BUG_ON(which + 1 >= osd_req->r_num_ops);
821
822         prev_op = &osd_req->r_ops[which];
823         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
824         /* dup previous one */
825         op->indata_len = prev_op->indata_len;
826         op->outdata_len = prev_op->outdata_len;
827         op->extent = prev_op->extent;
828         /* adjust offset */
829         op->extent.offset += offset_inc;
830         op->extent.length -= offset_inc;
831
832         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
833                 op->indata_len -= offset_inc;
834 }
835 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
836
837 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
838                         const char *class, const char *method)
839 {
840         struct ceph_osd_req_op *op;
841         struct ceph_pagelist *pagelist;
842         size_t payload_len = 0;
843         size_t size;
844         int ret;
845
846         op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
847
848         pagelist = ceph_pagelist_alloc(GFP_NOFS);
849         if (!pagelist)
850                 return -ENOMEM;
851
852         op->cls.class_name = class;
853         size = strlen(class);
854         BUG_ON(size > (size_t) U8_MAX);
855         op->cls.class_len = size;
856         ret = ceph_pagelist_append(pagelist, class, size);
857         if (ret)
858                 goto err_pagelist_free;
859         payload_len += size;
860
861         op->cls.method_name = method;
862         size = strlen(method);
863         BUG_ON(size > (size_t) U8_MAX);
864         op->cls.method_len = size;
865         ret = ceph_pagelist_append(pagelist, method, size);
866         if (ret)
867                 goto err_pagelist_free;
868         payload_len += size;
869
870         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
871         op->indata_len = payload_len;
872         return 0;
873
874 err_pagelist_free:
875         ceph_pagelist_release(pagelist);
876         return ret;
877 }
878 EXPORT_SYMBOL(osd_req_op_cls_init);
879
880 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
881                           u16 opcode, const char *name, const void *value,
882                           size_t size, u8 cmp_op, u8 cmp_mode)
883 {
884         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
885                                                       opcode, 0);
886         struct ceph_pagelist *pagelist;
887         size_t payload_len;
888         int ret;
889
890         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
891
892         pagelist = ceph_pagelist_alloc(GFP_NOFS);
893         if (!pagelist)
894                 return -ENOMEM;
895
896         payload_len = strlen(name);
897         op->xattr.name_len = payload_len;
898         ret = ceph_pagelist_append(pagelist, name, payload_len);
899         if (ret)
900                 goto err_pagelist_free;
901
902         op->xattr.value_len = size;
903         ret = ceph_pagelist_append(pagelist, value, size);
904         if (ret)
905                 goto err_pagelist_free;
906         payload_len += size;
907
908         op->xattr.cmp_op = cmp_op;
909         op->xattr.cmp_mode = cmp_mode;
910
911         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
912         op->indata_len = payload_len;
913         return 0;
914
915 err_pagelist_free:
916         ceph_pagelist_release(pagelist);
917         return ret;
918 }
919 EXPORT_SYMBOL(osd_req_op_xattr_init);
920
921 /*
922  * @watch_opcode: CEPH_OSD_WATCH_OP_*
923  */
924 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
925                                   u64 cookie, u8 watch_opcode)
926 {
927         struct ceph_osd_req_op *op;
928
929         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
930         op->watch.cookie = cookie;
931         op->watch.op = watch_opcode;
932         op->watch.gen = 0;
933 }
934
935 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
936                                 unsigned int which,
937                                 u64 expected_object_size,
938                                 u64 expected_write_size)
939 {
940         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
941                                                       CEPH_OSD_OP_SETALLOCHINT,
942                                                       0);
943
944         op->alloc_hint.expected_object_size = expected_object_size;
945         op->alloc_hint.expected_write_size = expected_write_size;
946
947         /*
948          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
949          * not worth a feature bit.  Set FAILOK per-op flag to make
950          * sure older osds don't trip over an unsupported opcode.
951          */
952         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
953 }
954 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
955
956 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
957                                 struct ceph_osd_data *osd_data)
958 {
959         u64 length = ceph_osd_data_length(osd_data);
960
961         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
962                 BUG_ON(length > (u64) SIZE_MAX);
963                 if (length)
964                         ceph_msg_data_add_pages(msg, osd_data->pages,
965                                         length, osd_data->alignment, false);
966         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
967                 BUG_ON(!length);
968                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
969 #ifdef CONFIG_BLOCK
970         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
971                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
972 #endif
973         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
974                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
975         } else {
976                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
977         }
978 }
979
980 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
981                              const struct ceph_osd_req_op *src)
982 {
983         switch (src->op) {
984         case CEPH_OSD_OP_STAT:
985                 break;
986         case CEPH_OSD_OP_READ:
987         case CEPH_OSD_OP_WRITE:
988         case CEPH_OSD_OP_WRITEFULL:
989         case CEPH_OSD_OP_ZERO:
990         case CEPH_OSD_OP_TRUNCATE:
991                 dst->extent.offset = cpu_to_le64(src->extent.offset);
992                 dst->extent.length = cpu_to_le64(src->extent.length);
993                 dst->extent.truncate_size =
994                         cpu_to_le64(src->extent.truncate_size);
995                 dst->extent.truncate_seq =
996                         cpu_to_le32(src->extent.truncate_seq);
997                 break;
998         case CEPH_OSD_OP_CALL:
999                 dst->cls.class_len = src->cls.class_len;
1000                 dst->cls.method_len = src->cls.method_len;
1001                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1002                 break;
1003         case CEPH_OSD_OP_WATCH:
1004                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1005                 dst->watch.ver = cpu_to_le64(0);
1006                 dst->watch.op = src->watch.op;
1007                 dst->watch.gen = cpu_to_le32(src->watch.gen);
1008                 break;
1009         case CEPH_OSD_OP_NOTIFY_ACK:
1010                 break;
1011         case CEPH_OSD_OP_NOTIFY:
1012                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1013                 break;
1014         case CEPH_OSD_OP_LIST_WATCHERS:
1015                 break;
1016         case CEPH_OSD_OP_SETALLOCHINT:
1017                 dst->alloc_hint.expected_object_size =
1018                     cpu_to_le64(src->alloc_hint.expected_object_size);
1019                 dst->alloc_hint.expected_write_size =
1020                     cpu_to_le64(src->alloc_hint.expected_write_size);
1021                 break;
1022         case CEPH_OSD_OP_SETXATTR:
1023         case CEPH_OSD_OP_CMPXATTR:
1024                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1025                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1026                 dst->xattr.cmp_op = src->xattr.cmp_op;
1027                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1028                 break;
1029         case CEPH_OSD_OP_CREATE:
1030         case CEPH_OSD_OP_DELETE:
1031                 break;
1032         case CEPH_OSD_OP_COPY_FROM2:
1033                 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1034                 dst->copy_from.src_version =
1035                         cpu_to_le64(src->copy_from.src_version);
1036                 dst->copy_from.flags = src->copy_from.flags;
1037                 dst->copy_from.src_fadvise_flags =
1038                         cpu_to_le32(src->copy_from.src_fadvise_flags);
1039                 break;
1040         default:
1041                 pr_err("unsupported osd opcode %s\n",
1042                         ceph_osd_op_name(src->op));
1043                 WARN_ON(1);
1044
1045                 return 0;
1046         }
1047
1048         dst->op = cpu_to_le16(src->op);
1049         dst->flags = cpu_to_le32(src->flags);
1050         dst->payload_len = cpu_to_le32(src->indata_len);
1051
1052         return src->indata_len;
1053 }
1054
1055 /*
1056  * build new request AND message, calculate layout, and adjust file
1057  * extent as needed.
1058  *
1059  * if the file was recently truncated, we include information about its
1060  * old and new size so that the object can be updated appropriately.  (we
1061  * avoid synchronously deleting truncated objects because it's slow.)
1062  */
1063 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1064                                                struct ceph_file_layout *layout,
1065                                                struct ceph_vino vino,
1066                                                u64 off, u64 *plen,
1067                                                unsigned int which, int num_ops,
1068                                                int opcode, int flags,
1069                                                struct ceph_snap_context *snapc,
1070                                                u32 truncate_seq,
1071                                                u64 truncate_size,
1072                                                bool use_mempool)
1073 {
1074         struct ceph_osd_request *req;
1075         u64 objnum = 0;
1076         u64 objoff = 0;
1077         u64 objlen = 0;
1078         int r;
1079
1080         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1081                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1082                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1083
1084         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1085                                         GFP_NOFS);
1086         if (!req) {
1087                 r = -ENOMEM;
1088                 goto fail;
1089         }
1090
1091         /* calculate max write size */
1092         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1093         if (r)
1094                 goto fail;
1095
1096         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1097                 osd_req_op_init(req, which, opcode, 0);
1098         } else {
1099                 u32 object_size = layout->object_size;
1100                 u32 object_base = off - objoff;
1101                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1102                         if (truncate_size <= object_base) {
1103                                 truncate_size = 0;
1104                         } else {
1105                                 truncate_size -= object_base;
1106                                 if (truncate_size > object_size)
1107                                         truncate_size = object_size;
1108                         }
1109                 }
1110                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1111                                        truncate_size, truncate_seq);
1112         }
1113
1114         req->r_flags = flags;
1115         req->r_base_oloc.pool = layout->pool_id;
1116         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1117         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1118
1119         req->r_snapid = vino.snap;
1120         if (flags & CEPH_OSD_FLAG_WRITE)
1121                 req->r_data_offset = off;
1122
1123         if (num_ops > 1)
1124                 /*
1125                  * This is a special case for ceph_writepages_start(), but it
1126                  * also covers ceph_uninline_data().  If more multi-op request
1127                  * use cases emerge, we will need a separate helper.
1128                  */
1129                 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1130         else
1131                 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1132         if (r)
1133                 goto fail;
1134
1135         return req;
1136
1137 fail:
1138         ceph_osdc_put_request(req);
1139         return ERR_PTR(r);
1140 }
1141 EXPORT_SYMBOL(ceph_osdc_new_request);
1142
1143 /*
1144  * We keep osd requests in an rbtree, sorted by ->r_tid.
1145  */
1146 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1147 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1148
1149 /*
1150  * Call @fn on each OSD request as long as @fn returns 0.
1151  */
1152 static void for_each_request(struct ceph_osd_client *osdc,
1153                         int (*fn)(struct ceph_osd_request *req, void *arg),
1154                         void *arg)
1155 {
1156         struct rb_node *n, *p;
1157
1158         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1159                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1160
1161                 for (p = rb_first(&osd->o_requests); p; ) {
1162                         struct ceph_osd_request *req =
1163                             rb_entry(p, struct ceph_osd_request, r_node);
1164
1165                         p = rb_next(p);
1166                         if (fn(req, arg))
1167                                 return;
1168                 }
1169         }
1170
1171         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1172                 struct ceph_osd_request *req =
1173                     rb_entry(p, struct ceph_osd_request, r_node);
1174
1175                 p = rb_next(p);
1176                 if (fn(req, arg))
1177                         return;
1178         }
1179 }
1180
1181 static bool osd_homeless(struct ceph_osd *osd)
1182 {
1183         return osd->o_osd == CEPH_HOMELESS_OSD;
1184 }
1185
1186 static bool osd_registered(struct ceph_osd *osd)
1187 {
1188         verify_osdc_locked(osd->o_osdc);
1189
1190         return !RB_EMPTY_NODE(&osd->o_node);
1191 }
1192
1193 /*
1194  * Assumes @osd is zero-initialized.
1195  */
1196 static void osd_init(struct ceph_osd *osd)
1197 {
1198         refcount_set(&osd->o_ref, 1);
1199         RB_CLEAR_NODE(&osd->o_node);
1200         osd->o_requests = RB_ROOT;
1201         osd->o_linger_requests = RB_ROOT;
1202         osd->o_backoff_mappings = RB_ROOT;
1203         osd->o_backoffs_by_id = RB_ROOT;
1204         INIT_LIST_HEAD(&osd->o_osd_lru);
1205         INIT_LIST_HEAD(&osd->o_keepalive_item);
1206         osd->o_incarnation = 1;
1207         mutex_init(&osd->lock);
1208 }
1209
1210 static void osd_cleanup(struct ceph_osd *osd)
1211 {
1212         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1213         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1214         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1215         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1216         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1217         WARN_ON(!list_empty(&osd->o_osd_lru));
1218         WARN_ON(!list_empty(&osd->o_keepalive_item));
1219
1220         if (osd->o_auth.authorizer) {
1221                 WARN_ON(osd_homeless(osd));
1222                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1223         }
1224 }
1225
1226 /*
1227  * Track open sessions with osds.
1228  */
1229 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1230 {
1231         struct ceph_osd *osd;
1232
1233         WARN_ON(onum == CEPH_HOMELESS_OSD);
1234
1235         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1236         osd_init(osd);
1237         osd->o_osdc = osdc;
1238         osd->o_osd = onum;
1239
1240         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1241
1242         return osd;
1243 }
1244
1245 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1246 {
1247         if (refcount_inc_not_zero(&osd->o_ref)) {
1248                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1249                      refcount_read(&osd->o_ref));
1250                 return osd;
1251         } else {
1252                 dout("get_osd %p FAIL\n", osd);
1253                 return NULL;
1254         }
1255 }
1256
1257 static void put_osd(struct ceph_osd *osd)
1258 {
1259         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1260              refcount_read(&osd->o_ref) - 1);
1261         if (refcount_dec_and_test(&osd->o_ref)) {
1262                 osd_cleanup(osd);
1263                 kfree(osd);
1264         }
1265 }
1266
1267 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1268
1269 static void __move_osd_to_lru(struct ceph_osd *osd)
1270 {
1271         struct ceph_osd_client *osdc = osd->o_osdc;
1272
1273         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1274         BUG_ON(!list_empty(&osd->o_osd_lru));
1275
1276         spin_lock(&osdc->osd_lru_lock);
1277         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1278         spin_unlock(&osdc->osd_lru_lock);
1279
1280         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1281 }
1282
1283 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1284 {
1285         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1286             RB_EMPTY_ROOT(&osd->o_linger_requests))
1287                 __move_osd_to_lru(osd);
1288 }
1289
1290 static void __remove_osd_from_lru(struct ceph_osd *osd)
1291 {
1292         struct ceph_osd_client *osdc = osd->o_osdc;
1293
1294         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1295
1296         spin_lock(&osdc->osd_lru_lock);
1297         if (!list_empty(&osd->o_osd_lru))
1298                 list_del_init(&osd->o_osd_lru);
1299         spin_unlock(&osdc->osd_lru_lock);
1300 }
1301
1302 /*
1303  * Close the connection and assign any leftover requests to the
1304  * homeless session.
1305  */
1306 static void close_osd(struct ceph_osd *osd)
1307 {
1308         struct ceph_osd_client *osdc = osd->o_osdc;
1309         struct rb_node *n;
1310
1311         verify_osdc_wrlocked(osdc);
1312         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1313
1314         ceph_con_close(&osd->o_con);
1315
1316         for (n = rb_first(&osd->o_requests); n; ) {
1317                 struct ceph_osd_request *req =
1318                     rb_entry(n, struct ceph_osd_request, r_node);
1319
1320                 n = rb_next(n); /* unlink_request() */
1321
1322                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1323                 unlink_request(osd, req);
1324                 link_request(&osdc->homeless_osd, req);
1325         }
1326         for (n = rb_first(&osd->o_linger_requests); n; ) {
1327                 struct ceph_osd_linger_request *lreq =
1328                     rb_entry(n, struct ceph_osd_linger_request, node);
1329
1330                 n = rb_next(n); /* unlink_linger() */
1331
1332                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1333                      lreq->linger_id);
1334                 unlink_linger(osd, lreq);
1335                 link_linger(&osdc->homeless_osd, lreq);
1336         }
1337         clear_backoffs(osd);
1338
1339         __remove_osd_from_lru(osd);
1340         erase_osd(&osdc->osds, osd);
1341         put_osd(osd);
1342 }
1343
1344 /*
1345  * reset osd connect
1346  */
1347 static int reopen_osd(struct ceph_osd *osd)
1348 {
1349         struct ceph_entity_addr *peer_addr;
1350
1351         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1352
1353         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1354             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1355                 close_osd(osd);
1356                 return -ENODEV;
1357         }
1358
1359         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1360         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1361                         !ceph_con_opened(&osd->o_con)) {
1362                 struct rb_node *n;
1363
1364                 dout("osd addr hasn't changed and connection never opened, "
1365                      "letting msgr retry\n");
1366                 /* touch each r_stamp for handle_timeout()'s benfit */
1367                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1368                         struct ceph_osd_request *req =
1369                             rb_entry(n, struct ceph_osd_request, r_node);
1370                         req->r_stamp = jiffies;
1371                 }
1372
1373                 return -EAGAIN;
1374         }
1375
1376         ceph_con_close(&osd->o_con);
1377         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1378         osd->o_incarnation++;
1379
1380         return 0;
1381 }
1382
1383 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1384                                           bool wrlocked)
1385 {
1386         struct ceph_osd *osd;
1387
1388         if (wrlocked)
1389                 verify_osdc_wrlocked(osdc);
1390         else
1391                 verify_osdc_locked(osdc);
1392
1393         if (o != CEPH_HOMELESS_OSD)
1394                 osd = lookup_osd(&osdc->osds, o);
1395         else
1396                 osd = &osdc->homeless_osd;
1397         if (!osd) {
1398                 if (!wrlocked)
1399                         return ERR_PTR(-EAGAIN);
1400
1401                 osd = create_osd(osdc, o);
1402                 insert_osd(&osdc->osds, osd);
1403                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1404                               &osdc->osdmap->osd_addr[osd->o_osd]);
1405         }
1406
1407         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1408         return osd;
1409 }
1410
1411 /*
1412  * Create request <-> OSD session relation.
1413  *
1414  * @req has to be assigned a tid, @osd may be homeless.
1415  */
1416 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1417 {
1418         verify_osd_locked(osd);
1419         WARN_ON(!req->r_tid || req->r_osd);
1420         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1421              req, req->r_tid);
1422
1423         if (!osd_homeless(osd))
1424                 __remove_osd_from_lru(osd);
1425         else
1426                 atomic_inc(&osd->o_osdc->num_homeless);
1427
1428         get_osd(osd);
1429         insert_request(&osd->o_requests, req);
1430         req->r_osd = osd;
1431 }
1432
1433 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1434 {
1435         verify_osd_locked(osd);
1436         WARN_ON(req->r_osd != osd);
1437         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1438              req, req->r_tid);
1439
1440         req->r_osd = NULL;
1441         erase_request(&osd->o_requests, req);
1442         put_osd(osd);
1443
1444         if (!osd_homeless(osd))
1445                 maybe_move_osd_to_lru(osd);
1446         else
1447                 atomic_dec(&osd->o_osdc->num_homeless);
1448 }
1449
1450 static bool __pool_full(struct ceph_pg_pool_info *pi)
1451 {
1452         return pi->flags & CEPH_POOL_FLAG_FULL;
1453 }
1454
1455 static bool have_pool_full(struct ceph_osd_client *osdc)
1456 {
1457         struct rb_node *n;
1458
1459         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1460                 struct ceph_pg_pool_info *pi =
1461                     rb_entry(n, struct ceph_pg_pool_info, node);
1462
1463                 if (__pool_full(pi))
1464                         return true;
1465         }
1466
1467         return false;
1468 }
1469
1470 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1471 {
1472         struct ceph_pg_pool_info *pi;
1473
1474         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1475         if (!pi)
1476                 return false;
1477
1478         return __pool_full(pi);
1479 }
1480
1481 /*
1482  * Returns whether a request should be blocked from being sent
1483  * based on the current osdmap and osd_client settings.
1484  */
1485 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1486                                     const struct ceph_osd_request_target *t,
1487                                     struct ceph_pg_pool_info *pi)
1488 {
1489         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1490         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1491                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1492                        __pool_full(pi);
1493
1494         WARN_ON(pi->id != t->target_oloc.pool);
1495         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1496                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1497                (osdc->osdmap->epoch < osdc->epoch_barrier);
1498 }
1499
1500 static int pick_random_replica(const struct ceph_osds *acting)
1501 {
1502         int i = prandom_u32() % acting->size;
1503
1504         dout("%s picked osd%d, primary osd%d\n", __func__,
1505              acting->osds[i], acting->primary);
1506         return i;
1507 }
1508
1509 /*
1510  * Picks the closest replica based on client's location given by
1511  * crush_location option.  Prefers the primary if the locality is
1512  * the same.
1513  */
1514 static int pick_closest_replica(struct ceph_osd_client *osdc,
1515                                 const struct ceph_osds *acting)
1516 {
1517         struct ceph_options *opt = osdc->client->options;
1518         int best_i, best_locality;
1519         int i = 0, locality;
1520
1521         do {
1522                 locality = ceph_get_crush_locality(osdc->osdmap,
1523                                                    acting->osds[i],
1524                                                    &opt->crush_locs);
1525                 if (i == 0 ||
1526                     (locality >= 0 && best_locality < 0) ||
1527                     (locality >= 0 && best_locality >= 0 &&
1528                      locality < best_locality)) {
1529                         best_i = i;
1530                         best_locality = locality;
1531                 }
1532         } while (++i < acting->size);
1533
1534         dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
1535              acting->osds[best_i], best_locality, acting->primary);
1536         return best_i;
1537 }
1538
1539 enum calc_target_result {
1540         CALC_TARGET_NO_ACTION = 0,
1541         CALC_TARGET_NEED_RESEND,
1542         CALC_TARGET_POOL_DNE,
1543 };
1544
1545 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1546                                            struct ceph_osd_request_target *t,
1547                                            bool any_change)
1548 {
1549         struct ceph_pg_pool_info *pi;
1550         struct ceph_pg pgid, last_pgid;
1551         struct ceph_osds up, acting;
1552         bool is_read = t->flags & CEPH_OSD_FLAG_READ;
1553         bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
1554         bool force_resend = false;
1555         bool unpaused = false;
1556         bool legacy_change = false;
1557         bool split = false;
1558         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1559         bool recovery_deletes = ceph_osdmap_flag(osdc,
1560                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1561         enum calc_target_result ct_res;
1562
1563         t->epoch = osdc->osdmap->epoch;
1564         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1565         if (!pi) {
1566                 t->osd = CEPH_HOMELESS_OSD;
1567                 ct_res = CALC_TARGET_POOL_DNE;
1568                 goto out;
1569         }
1570
1571         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1572                 if (t->last_force_resend < pi->last_force_request_resend) {
1573                         t->last_force_resend = pi->last_force_request_resend;
1574                         force_resend = true;
1575                 } else if (t->last_force_resend == 0) {
1576                         force_resend = true;
1577                 }
1578         }
1579
1580         /* apply tiering */
1581         ceph_oid_copy(&t->target_oid, &t->base_oid);
1582         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1583         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1584                 if (is_read && pi->read_tier >= 0)
1585                         t->target_oloc.pool = pi->read_tier;
1586                 if (is_write && pi->write_tier >= 0)
1587                         t->target_oloc.pool = pi->write_tier;
1588
1589                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1590                 if (!pi) {
1591                         t->osd = CEPH_HOMELESS_OSD;
1592                         ct_res = CALC_TARGET_POOL_DNE;
1593                         goto out;
1594                 }
1595         }
1596
1597         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1598         last_pgid.pool = pgid.pool;
1599         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1600
1601         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1602         if (any_change &&
1603             ceph_is_new_interval(&t->acting,
1604                                  &acting,
1605                                  &t->up,
1606                                  &up,
1607                                  t->size,
1608                                  pi->size,
1609                                  t->min_size,
1610                                  pi->min_size,
1611                                  t->pg_num,
1612                                  pi->pg_num,
1613                                  t->sort_bitwise,
1614                                  sort_bitwise,
1615                                  t->recovery_deletes,
1616                                  recovery_deletes,
1617                                  &last_pgid))
1618                 force_resend = true;
1619
1620         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1621                 t->paused = false;
1622                 unpaused = true;
1623         }
1624         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1625                         ceph_osds_changed(&t->acting, &acting,
1626                                           t->used_replica || any_change);
1627         if (t->pg_num)
1628                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1629
1630         if (legacy_change || force_resend || split) {
1631                 t->pgid = pgid; /* struct */
1632                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1633                 ceph_osds_copy(&t->acting, &acting);
1634                 ceph_osds_copy(&t->up, &up);
1635                 t->size = pi->size;
1636                 t->min_size = pi->min_size;
1637                 t->pg_num = pi->pg_num;
1638                 t->pg_num_mask = pi->pg_num_mask;
1639                 t->sort_bitwise = sort_bitwise;
1640                 t->recovery_deletes = recovery_deletes;
1641
1642                 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
1643                                  CEPH_OSD_FLAG_LOCALIZE_READS)) &&
1644                     !is_write && pi->type == CEPH_POOL_TYPE_REP &&
1645                     acting.size > 1) {
1646                         int pos;
1647
1648                         WARN_ON(!is_read || acting.osds[0] != acting.primary);
1649                         if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
1650                                 pos = pick_random_replica(&acting);
1651                         } else {
1652                                 pos = pick_closest_replica(osdc, &acting);
1653                         }
1654                         t->osd = acting.osds[pos];
1655                         t->used_replica = pos > 0;
1656                 } else {
1657                         t->osd = acting.primary;
1658                         t->used_replica = false;
1659                 }
1660         }
1661
1662         if (unpaused || legacy_change || force_resend || split)
1663                 ct_res = CALC_TARGET_NEED_RESEND;
1664         else
1665                 ct_res = CALC_TARGET_NO_ACTION;
1666
1667 out:
1668         dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1669              legacy_change, force_resend, split, ct_res, t->osd);
1670         return ct_res;
1671 }
1672
1673 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1674 {
1675         struct ceph_spg_mapping *spg;
1676
1677         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1678         if (!spg)
1679                 return NULL;
1680
1681         RB_CLEAR_NODE(&spg->node);
1682         spg->backoffs = RB_ROOT;
1683         return spg;
1684 }
1685
1686 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1687 {
1688         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1689         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1690
1691         kfree(spg);
1692 }
1693
1694 /*
1695  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1696  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1697  * defined only within a specific spgid; it does not pass anything to
1698  * children on split, or to another primary.
1699  */
1700 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1701                  RB_BYPTR, const struct ceph_spg *, node)
1702
1703 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1704 {
1705         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1706 }
1707
1708 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1709                                    void **pkey, size_t *pkey_len)
1710 {
1711         if (hoid->key_len) {
1712                 *pkey = hoid->key;
1713                 *pkey_len = hoid->key_len;
1714         } else {
1715                 *pkey = hoid->oid;
1716                 *pkey_len = hoid->oid_len;
1717         }
1718 }
1719
1720 static int compare_names(const void *name1, size_t name1_len,
1721                          const void *name2, size_t name2_len)
1722 {
1723         int ret;
1724
1725         ret = memcmp(name1, name2, min(name1_len, name2_len));
1726         if (!ret) {
1727                 if (name1_len < name2_len)
1728                         ret = -1;
1729                 else if (name1_len > name2_len)
1730                         ret = 1;
1731         }
1732         return ret;
1733 }
1734
1735 static int hoid_compare(const struct ceph_hobject_id *lhs,
1736                         const struct ceph_hobject_id *rhs)
1737 {
1738         void *effective_key1, *effective_key2;
1739         size_t effective_key1_len, effective_key2_len;
1740         int ret;
1741
1742         if (lhs->is_max < rhs->is_max)
1743                 return -1;
1744         if (lhs->is_max > rhs->is_max)
1745                 return 1;
1746
1747         if (lhs->pool < rhs->pool)
1748                 return -1;
1749         if (lhs->pool > rhs->pool)
1750                 return 1;
1751
1752         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1753                 return -1;
1754         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1755                 return 1;
1756
1757         ret = compare_names(lhs->nspace, lhs->nspace_len,
1758                             rhs->nspace, rhs->nspace_len);
1759         if (ret)
1760                 return ret;
1761
1762         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1763         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1764         ret = compare_names(effective_key1, effective_key1_len,
1765                             effective_key2, effective_key2_len);
1766         if (ret)
1767                 return ret;
1768
1769         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1770         if (ret)
1771                 return ret;
1772
1773         if (lhs->snapid < rhs->snapid)
1774                 return -1;
1775         if (lhs->snapid > rhs->snapid)
1776                 return 1;
1777
1778         return 0;
1779 }
1780
1781 /*
1782  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1783  * compat stuff here.
1784  *
1785  * Assumes @hoid is zero-initialized.
1786  */
1787 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1788 {
1789         u8 struct_v;
1790         u32 struct_len;
1791         int ret;
1792
1793         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1794                                   &struct_len);
1795         if (ret)
1796                 return ret;
1797
1798         if (struct_v < 4) {
1799                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1800                 goto e_inval;
1801         }
1802
1803         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1804                                                 GFP_NOIO);
1805         if (IS_ERR(hoid->key)) {
1806                 ret = PTR_ERR(hoid->key);
1807                 hoid->key = NULL;
1808                 return ret;
1809         }
1810
1811         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1812                                                 GFP_NOIO);
1813         if (IS_ERR(hoid->oid)) {
1814                 ret = PTR_ERR(hoid->oid);
1815                 hoid->oid = NULL;
1816                 return ret;
1817         }
1818
1819         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1820         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1821         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1822
1823         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1824                                                    GFP_NOIO);
1825         if (IS_ERR(hoid->nspace)) {
1826                 ret = PTR_ERR(hoid->nspace);
1827                 hoid->nspace = NULL;
1828                 return ret;
1829         }
1830
1831         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1832
1833         ceph_hoid_build_hash_cache(hoid);
1834         return 0;
1835
1836 e_inval:
1837         return -EINVAL;
1838 }
1839
1840 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1841 {
1842         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1843                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1844 }
1845
1846 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1847 {
1848         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1849         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1850         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1851         ceph_encode_64(p, hoid->snapid);
1852         ceph_encode_32(p, hoid->hash);
1853         ceph_encode_8(p, hoid->is_max);
1854         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1855         ceph_encode_64(p, hoid->pool);
1856 }
1857
1858 static void free_hoid(struct ceph_hobject_id *hoid)
1859 {
1860         if (hoid) {
1861                 kfree(hoid->key);
1862                 kfree(hoid->oid);
1863                 kfree(hoid->nspace);
1864                 kfree(hoid);
1865         }
1866 }
1867
1868 static struct ceph_osd_backoff *alloc_backoff(void)
1869 {
1870         struct ceph_osd_backoff *backoff;
1871
1872         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1873         if (!backoff)
1874                 return NULL;
1875
1876         RB_CLEAR_NODE(&backoff->spg_node);
1877         RB_CLEAR_NODE(&backoff->id_node);
1878         return backoff;
1879 }
1880
1881 static void free_backoff(struct ceph_osd_backoff *backoff)
1882 {
1883         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1884         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1885
1886         free_hoid(backoff->begin);
1887         free_hoid(backoff->end);
1888         kfree(backoff);
1889 }
1890
1891 /*
1892  * Within a specific spgid, backoffs are managed by ->begin hoid.
1893  */
1894 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1895                         RB_BYVAL, spg_node);
1896
1897 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1898                                             const struct ceph_hobject_id *hoid)
1899 {
1900         struct rb_node *n = root->rb_node;
1901
1902         while (n) {
1903                 struct ceph_osd_backoff *cur =
1904                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1905                 int cmp;
1906
1907                 cmp = hoid_compare(hoid, cur->begin);
1908                 if (cmp < 0) {
1909                         n = n->rb_left;
1910                 } else if (cmp > 0) {
1911                         if (hoid_compare(hoid, cur->end) < 0)
1912                                 return cur;
1913
1914                         n = n->rb_right;
1915                 } else {
1916                         return cur;
1917                 }
1918         }
1919
1920         return NULL;
1921 }
1922
1923 /*
1924  * Each backoff has a unique id within its OSD session.
1925  */
1926 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1927
1928 static void clear_backoffs(struct ceph_osd *osd)
1929 {
1930         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1931                 struct ceph_spg_mapping *spg =
1932                     rb_entry(rb_first(&osd->o_backoff_mappings),
1933                              struct ceph_spg_mapping, node);
1934
1935                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1936                         struct ceph_osd_backoff *backoff =
1937                             rb_entry(rb_first(&spg->backoffs),
1938                                      struct ceph_osd_backoff, spg_node);
1939
1940                         erase_backoff(&spg->backoffs, backoff);
1941                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1942                         free_backoff(backoff);
1943                 }
1944                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1945                 free_spg_mapping(spg);
1946         }
1947 }
1948
1949 /*
1950  * Set up a temporary, non-owning view into @t.
1951  */
1952 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1953                                   const struct ceph_osd_request_target *t)
1954 {
1955         hoid->key = NULL;
1956         hoid->key_len = 0;
1957         hoid->oid = t->target_oid.name;
1958         hoid->oid_len = t->target_oid.name_len;
1959         hoid->snapid = CEPH_NOSNAP;
1960         hoid->hash = t->pgid.seed;
1961         hoid->is_max = false;
1962         if (t->target_oloc.pool_ns) {
1963                 hoid->nspace = t->target_oloc.pool_ns->str;
1964                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1965         } else {
1966                 hoid->nspace = NULL;
1967                 hoid->nspace_len = 0;
1968         }
1969         hoid->pool = t->target_oloc.pool;
1970         ceph_hoid_build_hash_cache(hoid);
1971 }
1972
1973 static bool should_plug_request(struct ceph_osd_request *req)
1974 {
1975         struct ceph_osd *osd = req->r_osd;
1976         struct ceph_spg_mapping *spg;
1977         struct ceph_osd_backoff *backoff;
1978         struct ceph_hobject_id hoid;
1979
1980         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1981         if (!spg)
1982                 return false;
1983
1984         hoid_fill_from_target(&hoid, &req->r_t);
1985         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1986         if (!backoff)
1987                 return false;
1988
1989         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1990              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1991              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
1992         return true;
1993 }
1994
1995 /*
1996  * Keep get_num_data_items() in sync with this function.
1997  */
1998 static void setup_request_data(struct ceph_osd_request *req)
1999 {
2000         struct ceph_msg *request_msg = req->r_request;
2001         struct ceph_msg *reply_msg = req->r_reply;
2002         struct ceph_osd_req_op *op;
2003
2004         if (req->r_request->num_data_items || req->r_reply->num_data_items)
2005                 return;
2006
2007         WARN_ON(request_msg->data_length || reply_msg->data_length);
2008         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
2009                 switch (op->op) {
2010                 /* request */
2011                 case CEPH_OSD_OP_WRITE:
2012                 case CEPH_OSD_OP_WRITEFULL:
2013                         WARN_ON(op->indata_len != op->extent.length);
2014                         ceph_osdc_msg_data_add(request_msg,
2015                                                &op->extent.osd_data);
2016                         break;
2017                 case CEPH_OSD_OP_SETXATTR:
2018                 case CEPH_OSD_OP_CMPXATTR:
2019                         WARN_ON(op->indata_len != op->xattr.name_len +
2020                                                   op->xattr.value_len);
2021                         ceph_osdc_msg_data_add(request_msg,
2022                                                &op->xattr.osd_data);
2023                         break;
2024                 case CEPH_OSD_OP_NOTIFY_ACK:
2025                         ceph_osdc_msg_data_add(request_msg,
2026                                                &op->notify_ack.request_data);
2027                         break;
2028                 case CEPH_OSD_OP_COPY_FROM2:
2029                         ceph_osdc_msg_data_add(request_msg,
2030                                                &op->copy_from.osd_data);
2031                         break;
2032
2033                 /* reply */
2034                 case CEPH_OSD_OP_STAT:
2035                         ceph_osdc_msg_data_add(reply_msg,
2036                                                &op->raw_data_in);
2037                         break;
2038                 case CEPH_OSD_OP_READ:
2039                         ceph_osdc_msg_data_add(reply_msg,
2040                                                &op->extent.osd_data);
2041                         break;
2042                 case CEPH_OSD_OP_LIST_WATCHERS:
2043                         ceph_osdc_msg_data_add(reply_msg,
2044                                                &op->list_watchers.response_data);
2045                         break;
2046
2047                 /* both */
2048                 case CEPH_OSD_OP_CALL:
2049                         WARN_ON(op->indata_len != op->cls.class_len +
2050                                                   op->cls.method_len +
2051                                                   op->cls.indata_len);
2052                         ceph_osdc_msg_data_add(request_msg,
2053                                                &op->cls.request_info);
2054                         /* optional, can be NONE */
2055                         ceph_osdc_msg_data_add(request_msg,
2056                                                &op->cls.request_data);
2057                         /* optional, can be NONE */
2058                         ceph_osdc_msg_data_add(reply_msg,
2059                                                &op->cls.response_data);
2060                         break;
2061                 case CEPH_OSD_OP_NOTIFY:
2062                         ceph_osdc_msg_data_add(request_msg,
2063                                                &op->notify.request_data);
2064                         ceph_osdc_msg_data_add(reply_msg,
2065                                                &op->notify.response_data);
2066                         break;
2067                 }
2068         }
2069 }
2070
2071 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2072 {
2073         ceph_encode_8(p, 1);
2074         ceph_encode_64(p, pgid->pool);
2075         ceph_encode_32(p, pgid->seed);
2076         ceph_encode_32(p, -1); /* preferred */
2077 }
2078
2079 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2080 {
2081         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2082         encode_pgid(p, &spgid->pgid);
2083         ceph_encode_8(p, spgid->shard);
2084 }
2085
2086 static void encode_oloc(void **p, void *end,
2087                         const struct ceph_object_locator *oloc)
2088 {
2089         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2090         ceph_encode_64(p, oloc->pool);
2091         ceph_encode_32(p, -1); /* preferred */
2092         ceph_encode_32(p, 0);  /* key len */
2093         if (oloc->pool_ns)
2094                 ceph_encode_string(p, end, oloc->pool_ns->str,
2095                                    oloc->pool_ns->len);
2096         else
2097                 ceph_encode_32(p, 0);
2098 }
2099
2100 static void encode_request_partial(struct ceph_osd_request *req,
2101                                    struct ceph_msg *msg)
2102 {
2103         void *p = msg->front.iov_base;
2104         void *const end = p + msg->front_alloc_len;
2105         u32 data_len = 0;
2106         int i;
2107
2108         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2109                 /* snapshots aren't writeable */
2110                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2111         } else {
2112                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2113                         req->r_data_offset || req->r_snapc);
2114         }
2115
2116         setup_request_data(req);
2117
2118         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2119         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2120         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2121         ceph_encode_32(&p, req->r_flags);
2122
2123         /* reqid */
2124         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2125         memset(p, 0, sizeof(struct ceph_osd_reqid));
2126         p += sizeof(struct ceph_osd_reqid);
2127
2128         /* trace */
2129         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2130         p += sizeof(struct ceph_blkin_trace_info);
2131
2132         ceph_encode_32(&p, 0); /* client_inc, always 0 */
2133         ceph_encode_timespec64(p, &req->r_mtime);
2134         p += sizeof(struct ceph_timespec);
2135
2136         encode_oloc(&p, end, &req->r_t.target_oloc);
2137         ceph_encode_string(&p, end, req->r_t.target_oid.name,
2138                            req->r_t.target_oid.name_len);
2139
2140         /* ops, can imply data */
2141         ceph_encode_16(&p, req->r_num_ops);
2142         for (i = 0; i < req->r_num_ops; i++) {
2143                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2144                 p += sizeof(struct ceph_osd_op);
2145         }
2146
2147         ceph_encode_64(&p, req->r_snapid); /* snapid */
2148         if (req->r_snapc) {
2149                 ceph_encode_64(&p, req->r_snapc->seq);
2150                 ceph_encode_32(&p, req->r_snapc->num_snaps);
2151                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2152                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2153         } else {
2154                 ceph_encode_64(&p, 0); /* snap_seq */
2155                 ceph_encode_32(&p, 0); /* snaps len */
2156         }
2157
2158         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2159         BUG_ON(p > end - 8); /* space for features */
2160
2161         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2162         /* front_len is finalized in encode_request_finish() */
2163         msg->front.iov_len = p - msg->front.iov_base;
2164         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2165         msg->hdr.data_len = cpu_to_le32(data_len);
2166         /*
2167          * The header "data_off" is a hint to the receiver allowing it
2168          * to align received data into its buffers such that there's no
2169          * need to re-copy it before writing it to disk (direct I/O).
2170          */
2171         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2172
2173         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2174              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2175 }
2176
2177 static void encode_request_finish(struct ceph_msg *msg)
2178 {
2179         void *p = msg->front.iov_base;
2180         void *const partial_end = p + msg->front.iov_len;
2181         void *const end = p + msg->front_alloc_len;
2182
2183         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2184                 /* luminous OSD -- encode features and be done */
2185                 p = partial_end;
2186                 ceph_encode_64(&p, msg->con->peer_features);
2187         } else {
2188                 struct {
2189                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2190                                    CEPH_PGID_ENCODING_LEN + 1];
2191                         __le32 hash;
2192                         __le32 epoch;
2193                         __le32 flags;
2194                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2195                                    sizeof(struct ceph_osd_reqid)];
2196                         char trace[sizeof(struct ceph_blkin_trace_info)];
2197                         __le32 client_inc;
2198                         struct ceph_timespec mtime;
2199                 } __packed head;
2200                 struct ceph_pg pgid;
2201                 void *oloc, *oid, *tail;
2202                 int oloc_len, oid_len, tail_len;
2203                 int len;
2204
2205                 /*
2206                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2207                  * as a temporary buffer.  Encode the raw PG; the rest
2208                  * is just a matter of moving oloc, oid and tail blobs
2209                  * around.
2210                  */
2211                 memcpy(&head, p, sizeof(head));
2212                 p += sizeof(head);
2213
2214                 oloc = p;
2215                 p += CEPH_ENCODING_START_BLK_LEN;
2216                 pgid.pool = ceph_decode_64(&p);
2217                 p += 4 + 4; /* preferred, key len */
2218                 len = ceph_decode_32(&p);
2219                 p += len;   /* nspace */
2220                 oloc_len = p - oloc;
2221
2222                 oid = p;
2223                 len = ceph_decode_32(&p);
2224                 p += len;
2225                 oid_len = p - oid;
2226
2227                 tail = p;
2228                 tail_len = partial_end - p;
2229
2230                 p = msg->front.iov_base;
2231                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2232                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2233                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2234                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2235
2236                 /* reassert_version */
2237                 memset(p, 0, sizeof(struct ceph_eversion));
2238                 p += sizeof(struct ceph_eversion);
2239
2240                 BUG_ON(p >= oloc);
2241                 memmove(p, oloc, oloc_len);
2242                 p += oloc_len;
2243
2244                 pgid.seed = le32_to_cpu(head.hash);
2245                 encode_pgid(&p, &pgid); /* raw pg */
2246
2247                 BUG_ON(p >= oid);
2248                 memmove(p, oid, oid_len);
2249                 p += oid_len;
2250
2251                 /* tail -- ops, snapid, snapc, retry_attempt */
2252                 BUG_ON(p >= tail);
2253                 memmove(p, tail, tail_len);
2254                 p += tail_len;
2255
2256                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2257         }
2258
2259         BUG_ON(p > end);
2260         msg->front.iov_len = p - msg->front.iov_base;
2261         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2262
2263         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2264              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2265              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2266              le16_to_cpu(msg->hdr.version));
2267 }
2268
2269 /*
2270  * @req has to be assigned a tid and registered.
2271  */
2272 static void send_request(struct ceph_osd_request *req)
2273 {
2274         struct ceph_osd *osd = req->r_osd;
2275
2276         verify_osd_locked(osd);
2277         WARN_ON(osd->o_osd != req->r_t.osd);
2278
2279         /* backoff? */
2280         if (should_plug_request(req))
2281                 return;
2282
2283         /*
2284          * We may have a previously queued request message hanging
2285          * around.  Cancel it to avoid corrupting the msgr.
2286          */
2287         if (req->r_sent)
2288                 ceph_msg_revoke(req->r_request);
2289
2290         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2291         if (req->r_attempts)
2292                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2293         else
2294                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2295
2296         encode_request_partial(req, req->r_request);
2297
2298         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2299              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2300              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2301              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2302              req->r_attempts);
2303
2304         req->r_t.paused = false;
2305         req->r_stamp = jiffies;
2306         req->r_attempts++;
2307
2308         req->r_sent = osd->o_incarnation;
2309         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2310         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2311 }
2312
2313 static void maybe_request_map(struct ceph_osd_client *osdc)
2314 {
2315         bool continuous = false;
2316
2317         verify_osdc_locked(osdc);
2318         WARN_ON(!osdc->osdmap->epoch);
2319
2320         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2321             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2322             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2323                 dout("%s osdc %p continuous\n", __func__, osdc);
2324                 continuous = true;
2325         } else {
2326                 dout("%s osdc %p onetime\n", __func__, osdc);
2327         }
2328
2329         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2330                                osdc->osdmap->epoch + 1, continuous))
2331                 ceph_monc_renew_subs(&osdc->client->monc);
2332 }
2333
2334 static void complete_request(struct ceph_osd_request *req, int err);
2335 static void send_map_check(struct ceph_osd_request *req);
2336
2337 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2338 {
2339         struct ceph_osd_client *osdc = req->r_osdc;
2340         struct ceph_osd *osd;
2341         enum calc_target_result ct_res;
2342         int err = 0;
2343         bool need_send = false;
2344         bool promoted = false;
2345
2346         WARN_ON(req->r_tid);
2347         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2348
2349 again:
2350         ct_res = calc_target(osdc, &req->r_t, false);
2351         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2352                 goto promote;
2353
2354         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2355         if (IS_ERR(osd)) {
2356                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2357                 goto promote;
2358         }
2359
2360         if (osdc->abort_err) {
2361                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2362                 err = osdc->abort_err;
2363         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2364                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2365                      osdc->epoch_barrier);
2366                 req->r_t.paused = true;
2367                 maybe_request_map(osdc);
2368         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2369                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2370                 dout("req %p pausewr\n", req);
2371                 req->r_t.paused = true;
2372                 maybe_request_map(osdc);
2373         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2374                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2375                 dout("req %p pauserd\n", req);
2376                 req->r_t.paused = true;
2377                 maybe_request_map(osdc);
2378         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2379                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2380                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2381                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2382                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2383                 dout("req %p full/pool_full\n", req);
2384                 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2385                         err = -ENOSPC;
2386                 } else {
2387                         pr_warn_ratelimited("FULL or reached pool quota\n");
2388                         req->r_t.paused = true;
2389                         maybe_request_map(osdc);
2390                 }
2391         } else if (!osd_homeless(osd)) {
2392                 need_send = true;
2393         } else {
2394                 maybe_request_map(osdc);
2395         }
2396
2397         mutex_lock(&osd->lock);
2398         /*
2399          * Assign the tid atomically with send_request() to protect
2400          * multiple writes to the same object from racing with each
2401          * other, resulting in out of order ops on the OSDs.
2402          */
2403         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2404         link_request(osd, req);
2405         if (need_send)
2406                 send_request(req);
2407         else if (err)
2408                 complete_request(req, err);
2409         mutex_unlock(&osd->lock);
2410
2411         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2412                 send_map_check(req);
2413
2414         if (promoted)
2415                 downgrade_write(&osdc->lock);
2416         return;
2417
2418 promote:
2419         up_read(&osdc->lock);
2420         down_write(&osdc->lock);
2421         wrlocked = true;
2422         promoted = true;
2423         goto again;
2424 }
2425
2426 static void account_request(struct ceph_osd_request *req)
2427 {
2428         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2429         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2430
2431         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2432         atomic_inc(&req->r_osdc->num_requests);
2433
2434         req->r_start_stamp = jiffies;
2435         req->r_start_latency = ktime_get();
2436 }
2437
2438 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2439 {
2440         ceph_osdc_get_request(req);
2441         account_request(req);
2442         __submit_request(req, wrlocked);
2443 }
2444
2445 static void finish_request(struct ceph_osd_request *req)
2446 {
2447         struct ceph_osd_client *osdc = req->r_osdc;
2448
2449         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2450         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2451
2452         req->r_end_latency = ktime_get();
2453
2454         if (req->r_osd)
2455                 unlink_request(req->r_osd, req);
2456         atomic_dec(&osdc->num_requests);
2457
2458         /*
2459          * If an OSD has failed or returned and a request has been sent
2460          * twice, it's possible to get a reply and end up here while the
2461          * request message is queued for delivery.  We will ignore the
2462          * reply, so not a big deal, but better to try and catch it.
2463          */
2464         ceph_msg_revoke(req->r_request);
2465         ceph_msg_revoke_incoming(req->r_reply);
2466 }
2467
2468 static void __complete_request(struct ceph_osd_request *req)
2469 {
2470         dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2471              req->r_tid, req->r_callback, req->r_result);
2472
2473         if (req->r_callback)
2474                 req->r_callback(req);
2475         complete_all(&req->r_completion);
2476         ceph_osdc_put_request(req);
2477 }
2478
2479 static void complete_request_workfn(struct work_struct *work)
2480 {
2481         struct ceph_osd_request *req =
2482             container_of(work, struct ceph_osd_request, r_complete_work);
2483
2484         __complete_request(req);
2485 }
2486
2487 /*
2488  * This is open-coded in handle_reply().
2489  */
2490 static void complete_request(struct ceph_osd_request *req, int err)
2491 {
2492         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2493
2494         req->r_result = err;
2495         finish_request(req);
2496
2497         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2498         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2499 }
2500
2501 static void cancel_map_check(struct ceph_osd_request *req)
2502 {
2503         struct ceph_osd_client *osdc = req->r_osdc;
2504         struct ceph_osd_request *lookup_req;
2505
2506         verify_osdc_wrlocked(osdc);
2507
2508         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2509         if (!lookup_req)
2510                 return;
2511
2512         WARN_ON(lookup_req != req);
2513         erase_request_mc(&osdc->map_checks, req);
2514         ceph_osdc_put_request(req);
2515 }
2516
2517 static void cancel_request(struct ceph_osd_request *req)
2518 {
2519         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2520
2521         cancel_map_check(req);
2522         finish_request(req);
2523         complete_all(&req->r_completion);
2524         ceph_osdc_put_request(req);
2525 }
2526
2527 static void abort_request(struct ceph_osd_request *req, int err)
2528 {
2529         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2530
2531         cancel_map_check(req);
2532         complete_request(req, err);
2533 }
2534
2535 static int abort_fn(struct ceph_osd_request *req, void *arg)
2536 {
2537         int err = *(int *)arg;
2538
2539         abort_request(req, err);
2540         return 0; /* continue iteration */
2541 }
2542
2543 /*
2544  * Abort all in-flight requests with @err and arrange for all future
2545  * requests to be failed immediately.
2546  */
2547 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2548 {
2549         dout("%s osdc %p err %d\n", __func__, osdc, err);
2550         down_write(&osdc->lock);
2551         for_each_request(osdc, abort_fn, &err);
2552         osdc->abort_err = err;
2553         up_write(&osdc->lock);
2554 }
2555 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2556
2557 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2558 {
2559         down_write(&osdc->lock);
2560         osdc->abort_err = 0;
2561         up_write(&osdc->lock);
2562 }
2563 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2564
2565 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2566 {
2567         if (likely(eb > osdc->epoch_barrier)) {
2568                 dout("updating epoch_barrier from %u to %u\n",
2569                                 osdc->epoch_barrier, eb);
2570                 osdc->epoch_barrier = eb;
2571                 /* Request map if we're not to the barrier yet */
2572                 if (eb > osdc->osdmap->epoch)
2573                         maybe_request_map(osdc);
2574         }
2575 }
2576
2577 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2578 {
2579         down_read(&osdc->lock);
2580         if (unlikely(eb > osdc->epoch_barrier)) {
2581                 up_read(&osdc->lock);
2582                 down_write(&osdc->lock);
2583                 update_epoch_barrier(osdc, eb);
2584                 up_write(&osdc->lock);
2585         } else {
2586                 up_read(&osdc->lock);
2587         }
2588 }
2589 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2590
2591 /*
2592  * We can end up releasing caps as a result of abort_request().
2593  * In that case, we probably want to ensure that the cap release message
2594  * has an updated epoch barrier in it, so set the epoch barrier prior to
2595  * aborting the first request.
2596  */
2597 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2598 {
2599         struct ceph_osd_client *osdc = req->r_osdc;
2600         bool *victims = arg;
2601
2602         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2603             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2604              pool_full(osdc, req->r_t.base_oloc.pool))) {
2605                 if (!*victims) {
2606                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2607                         *victims = true;
2608                 }
2609                 abort_request(req, -ENOSPC);
2610         }
2611
2612         return 0; /* continue iteration */
2613 }
2614
2615 /*
2616  * Drop all pending requests that are stalled waiting on a full condition to
2617  * clear, and complete them with ENOSPC as the return code. Set the
2618  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2619  * cancelled.
2620  */
2621 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2622 {
2623         bool victims = false;
2624
2625         if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2626             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2627                 for_each_request(osdc, abort_on_full_fn, &victims);
2628 }
2629
2630 static void check_pool_dne(struct ceph_osd_request *req)
2631 {
2632         struct ceph_osd_client *osdc = req->r_osdc;
2633         struct ceph_osdmap *map = osdc->osdmap;
2634
2635         verify_osdc_wrlocked(osdc);
2636         WARN_ON(!map->epoch);
2637
2638         if (req->r_attempts) {
2639                 /*
2640                  * We sent a request earlier, which means that
2641                  * previously the pool existed, and now it does not
2642                  * (i.e., it was deleted).
2643                  */
2644                 req->r_map_dne_bound = map->epoch;
2645                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2646                      req->r_tid);
2647         } else {
2648                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2649                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2650         }
2651
2652         if (req->r_map_dne_bound) {
2653                 if (map->epoch >= req->r_map_dne_bound) {
2654                         /* we had a new enough map */
2655                         pr_info_ratelimited("tid %llu pool does not exist\n",
2656                                             req->r_tid);
2657                         complete_request(req, -ENOENT);
2658                 }
2659         } else {
2660                 send_map_check(req);
2661         }
2662 }
2663
2664 static void map_check_cb(struct ceph_mon_generic_request *greq)
2665 {
2666         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2667         struct ceph_osd_request *req;
2668         u64 tid = greq->private_data;
2669
2670         WARN_ON(greq->result || !greq->u.newest);
2671
2672         down_write(&osdc->lock);
2673         req = lookup_request_mc(&osdc->map_checks, tid);
2674         if (!req) {
2675                 dout("%s tid %llu dne\n", __func__, tid);
2676                 goto out_unlock;
2677         }
2678
2679         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2680              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2681         if (!req->r_map_dne_bound)
2682                 req->r_map_dne_bound = greq->u.newest;
2683         erase_request_mc(&osdc->map_checks, req);
2684         check_pool_dne(req);
2685
2686         ceph_osdc_put_request(req);
2687 out_unlock:
2688         up_write(&osdc->lock);
2689 }
2690
2691 static void send_map_check(struct ceph_osd_request *req)
2692 {
2693         struct ceph_osd_client *osdc = req->r_osdc;
2694         struct ceph_osd_request *lookup_req;
2695         int ret;
2696
2697         verify_osdc_wrlocked(osdc);
2698
2699         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2700         if (lookup_req) {
2701                 WARN_ON(lookup_req != req);
2702                 return;
2703         }
2704
2705         ceph_osdc_get_request(req);
2706         insert_request_mc(&osdc->map_checks, req);
2707         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2708                                           map_check_cb, req->r_tid);
2709         WARN_ON(ret);
2710 }
2711
2712 /*
2713  * lingering requests, watch/notify v2 infrastructure
2714  */
2715 static void linger_release(struct kref *kref)
2716 {
2717         struct ceph_osd_linger_request *lreq =
2718             container_of(kref, struct ceph_osd_linger_request, kref);
2719
2720         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2721              lreq->reg_req, lreq->ping_req);
2722         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2723         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2724         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2725         WARN_ON(!list_empty(&lreq->scan_item));
2726         WARN_ON(!list_empty(&lreq->pending_lworks));
2727         WARN_ON(lreq->osd);
2728
2729         if (lreq->reg_req)
2730                 ceph_osdc_put_request(lreq->reg_req);
2731         if (lreq->ping_req)
2732                 ceph_osdc_put_request(lreq->ping_req);
2733         target_destroy(&lreq->t);
2734         kfree(lreq);
2735 }
2736
2737 static void linger_put(struct ceph_osd_linger_request *lreq)
2738 {
2739         if (lreq)
2740                 kref_put(&lreq->kref, linger_release);
2741 }
2742
2743 static struct ceph_osd_linger_request *
2744 linger_get(struct ceph_osd_linger_request *lreq)
2745 {
2746         kref_get(&lreq->kref);
2747         return lreq;
2748 }
2749
2750 static struct ceph_osd_linger_request *
2751 linger_alloc(struct ceph_osd_client *osdc)
2752 {
2753         struct ceph_osd_linger_request *lreq;
2754
2755         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2756         if (!lreq)
2757                 return NULL;
2758
2759         kref_init(&lreq->kref);
2760         mutex_init(&lreq->lock);
2761         RB_CLEAR_NODE(&lreq->node);
2762         RB_CLEAR_NODE(&lreq->osdc_node);
2763         RB_CLEAR_NODE(&lreq->mc_node);
2764         INIT_LIST_HEAD(&lreq->scan_item);
2765         INIT_LIST_HEAD(&lreq->pending_lworks);
2766         init_completion(&lreq->reg_commit_wait);
2767         init_completion(&lreq->notify_finish_wait);
2768
2769         lreq->osdc = osdc;
2770         target_init(&lreq->t);
2771
2772         dout("%s lreq %p\n", __func__, lreq);
2773         return lreq;
2774 }
2775
2776 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2777 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2778 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2779
2780 /*
2781  * Create linger request <-> OSD session relation.
2782  *
2783  * @lreq has to be registered, @osd may be homeless.
2784  */
2785 static void link_linger(struct ceph_osd *osd,
2786                         struct ceph_osd_linger_request *lreq)
2787 {
2788         verify_osd_locked(osd);
2789         WARN_ON(!lreq->linger_id || lreq->osd);
2790         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2791              osd->o_osd, lreq, lreq->linger_id);
2792
2793         if (!osd_homeless(osd))
2794                 __remove_osd_from_lru(osd);
2795         else
2796                 atomic_inc(&osd->o_osdc->num_homeless);
2797
2798         get_osd(osd);
2799         insert_linger(&osd->o_linger_requests, lreq);
2800         lreq->osd = osd;
2801 }
2802
2803 static void unlink_linger(struct ceph_osd *osd,
2804                           struct ceph_osd_linger_request *lreq)
2805 {
2806         verify_osd_locked(osd);
2807         WARN_ON(lreq->osd != osd);
2808         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2809              osd->o_osd, lreq, lreq->linger_id);
2810
2811         lreq->osd = NULL;
2812         erase_linger(&osd->o_linger_requests, lreq);
2813         put_osd(osd);
2814
2815         if (!osd_homeless(osd))
2816                 maybe_move_osd_to_lru(osd);
2817         else
2818                 atomic_dec(&osd->o_osdc->num_homeless);
2819 }
2820
2821 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2822 {
2823         verify_osdc_locked(lreq->osdc);
2824
2825         return !RB_EMPTY_NODE(&lreq->osdc_node);
2826 }
2827
2828 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2829 {
2830         struct ceph_osd_client *osdc = lreq->osdc;
2831         bool registered;
2832
2833         down_read(&osdc->lock);
2834         registered = __linger_registered(lreq);
2835         up_read(&osdc->lock);
2836
2837         return registered;
2838 }
2839
2840 static void linger_register(struct ceph_osd_linger_request *lreq)
2841 {
2842         struct ceph_osd_client *osdc = lreq->osdc;
2843
2844         verify_osdc_wrlocked(osdc);
2845         WARN_ON(lreq->linger_id);
2846
2847         linger_get(lreq);
2848         lreq->linger_id = ++osdc->last_linger_id;
2849         insert_linger_osdc(&osdc->linger_requests, lreq);
2850 }
2851
2852 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2853 {
2854         struct ceph_osd_client *osdc = lreq->osdc;
2855
2856         verify_osdc_wrlocked(osdc);
2857
2858         erase_linger_osdc(&osdc->linger_requests, lreq);
2859         linger_put(lreq);
2860 }
2861
2862 static void cancel_linger_request(struct ceph_osd_request *req)
2863 {
2864         struct ceph_osd_linger_request *lreq = req->r_priv;
2865
2866         WARN_ON(!req->r_linger);
2867         cancel_request(req);
2868         linger_put(lreq);
2869 }
2870
2871 struct linger_work {
2872         struct work_struct work;
2873         struct ceph_osd_linger_request *lreq;
2874         struct list_head pending_item;
2875         unsigned long queued_stamp;
2876
2877         union {
2878                 struct {
2879                         u64 notify_id;
2880                         u64 notifier_id;
2881                         void *payload; /* points into @msg front */
2882                         size_t payload_len;
2883
2884                         struct ceph_msg *msg; /* for ceph_msg_put() */
2885                 } notify;
2886                 struct {
2887                         int err;
2888                 } error;
2889         };
2890 };
2891
2892 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2893                                        work_func_t workfn)
2894 {
2895         struct linger_work *lwork;
2896
2897         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2898         if (!lwork)
2899                 return NULL;
2900
2901         INIT_WORK(&lwork->work, workfn);
2902         INIT_LIST_HEAD(&lwork->pending_item);
2903         lwork->lreq = linger_get(lreq);
2904
2905         return lwork;
2906 }
2907
2908 static void lwork_free(struct linger_work *lwork)
2909 {
2910         struct ceph_osd_linger_request *lreq = lwork->lreq;
2911
2912         mutex_lock(&lreq->lock);
2913         list_del(&lwork->pending_item);
2914         mutex_unlock(&lreq->lock);
2915
2916         linger_put(lreq);
2917         kfree(lwork);
2918 }
2919
2920 static void lwork_queue(struct linger_work *lwork)
2921 {
2922         struct ceph_osd_linger_request *lreq = lwork->lreq;
2923         struct ceph_osd_client *osdc = lreq->osdc;
2924
2925         verify_lreq_locked(lreq);
2926         WARN_ON(!list_empty(&lwork->pending_item));
2927
2928         lwork->queued_stamp = jiffies;
2929         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2930         queue_work(osdc->notify_wq, &lwork->work);
2931 }
2932
2933 static void do_watch_notify(struct work_struct *w)
2934 {
2935         struct linger_work *lwork = container_of(w, struct linger_work, work);
2936         struct ceph_osd_linger_request *lreq = lwork->lreq;
2937
2938         if (!linger_registered(lreq)) {
2939                 dout("%s lreq %p not registered\n", __func__, lreq);
2940                 goto out;
2941         }
2942
2943         WARN_ON(!lreq->is_watch);
2944         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2945              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2946              lwork->notify.payload_len);
2947         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2948                   lwork->notify.notifier_id, lwork->notify.payload,
2949                   lwork->notify.payload_len);
2950
2951 out:
2952         ceph_msg_put(lwork->notify.msg);
2953         lwork_free(lwork);
2954 }
2955
2956 static void do_watch_error(struct work_struct *w)
2957 {
2958         struct linger_work *lwork = container_of(w, struct linger_work, work);
2959         struct ceph_osd_linger_request *lreq = lwork->lreq;
2960
2961         if (!linger_registered(lreq)) {
2962                 dout("%s lreq %p not registered\n", __func__, lreq);
2963                 goto out;
2964         }
2965
2966         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2967         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2968
2969 out:
2970         lwork_free(lwork);
2971 }
2972
2973 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2974 {
2975         struct linger_work *lwork;
2976
2977         lwork = lwork_alloc(lreq, do_watch_error);
2978         if (!lwork) {
2979                 pr_err("failed to allocate error-lwork\n");
2980                 return;
2981         }
2982
2983         lwork->error.err = lreq->last_error;
2984         lwork_queue(lwork);
2985 }
2986
2987 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2988                                        int result)
2989 {
2990         if (!completion_done(&lreq->reg_commit_wait)) {
2991                 lreq->reg_commit_error = (result <= 0 ? result : 0);
2992                 complete_all(&lreq->reg_commit_wait);
2993         }
2994 }
2995
2996 static void linger_commit_cb(struct ceph_osd_request *req)
2997 {
2998         struct ceph_osd_linger_request *lreq = req->r_priv;
2999
3000         mutex_lock(&lreq->lock);
3001         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
3002              lreq->linger_id, req->r_result);
3003         linger_reg_commit_complete(lreq, req->r_result);
3004         lreq->committed = true;
3005
3006         if (!lreq->is_watch) {
3007                 struct ceph_osd_data *osd_data =
3008                     osd_req_op_data(req, 0, notify, response_data);
3009                 void *p = page_address(osd_data->pages[0]);
3010
3011                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
3012                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
3013
3014                 /* make note of the notify_id */
3015                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
3016                         lreq->notify_id = ceph_decode_64(&p);
3017                         dout("lreq %p notify_id %llu\n", lreq,
3018                              lreq->notify_id);
3019                 } else {
3020                         dout("lreq %p no notify_id\n", lreq);
3021                 }
3022         }
3023
3024         mutex_unlock(&lreq->lock);
3025         linger_put(lreq);
3026 }
3027
3028 static int normalize_watch_error(int err)
3029 {
3030         /*
3031          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
3032          * notification and a failure to reconnect because we raced with
3033          * the delete appear the same to the user.
3034          */
3035         if (err == -ENOENT)
3036                 err = -ENOTCONN;
3037
3038         return err;
3039 }
3040
3041 static void linger_reconnect_cb(struct ceph_osd_request *req)
3042 {
3043         struct ceph_osd_linger_request *lreq = req->r_priv;
3044
3045         mutex_lock(&lreq->lock);
3046         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
3047              lreq, lreq->linger_id, req->r_result, lreq->last_error);
3048         if (req->r_result < 0) {
3049                 if (!lreq->last_error) {
3050                         lreq->last_error = normalize_watch_error(req->r_result);
3051                         queue_watch_error(lreq);
3052                 }
3053         }
3054
3055         mutex_unlock(&lreq->lock);
3056         linger_put(lreq);
3057 }
3058
3059 static void send_linger(struct ceph_osd_linger_request *lreq)
3060 {
3061         struct ceph_osd_request *req = lreq->reg_req;
3062         struct ceph_osd_req_op *op = &req->r_ops[0];
3063
3064         verify_osdc_wrlocked(req->r_osdc);
3065         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3066
3067         if (req->r_osd)
3068                 cancel_linger_request(req);
3069
3070         request_reinit(req);
3071         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3072         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3073         req->r_flags = lreq->t.flags;
3074         req->r_mtime = lreq->mtime;
3075
3076         mutex_lock(&lreq->lock);
3077         if (lreq->is_watch && lreq->committed) {
3078                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3079                         op->watch.cookie != lreq->linger_id);
3080                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
3081                 op->watch.gen = ++lreq->register_gen;
3082                 dout("lreq %p reconnect register_gen %u\n", lreq,
3083                      op->watch.gen);
3084                 req->r_callback = linger_reconnect_cb;
3085         } else {
3086                 if (!lreq->is_watch)
3087                         lreq->notify_id = 0;
3088                 else
3089                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3090                 dout("lreq %p register\n", lreq);
3091                 req->r_callback = linger_commit_cb;
3092         }
3093         mutex_unlock(&lreq->lock);
3094
3095         req->r_priv = linger_get(lreq);
3096         req->r_linger = true;
3097
3098         submit_request(req, true);
3099 }
3100
3101 static void linger_ping_cb(struct ceph_osd_request *req)
3102 {
3103         struct ceph_osd_linger_request *lreq = req->r_priv;
3104
3105         mutex_lock(&lreq->lock);
3106         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3107              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3108              lreq->last_error);
3109         if (lreq->register_gen == req->r_ops[0].watch.gen) {
3110                 if (!req->r_result) {
3111                         lreq->watch_valid_thru = lreq->ping_sent;
3112                 } else if (!lreq->last_error) {
3113                         lreq->last_error = normalize_watch_error(req->r_result);
3114                         queue_watch_error(lreq);
3115                 }
3116         } else {
3117                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3118                      lreq->register_gen, req->r_ops[0].watch.gen);
3119         }
3120
3121         mutex_unlock(&lreq->lock);
3122         linger_put(lreq);
3123 }
3124
3125 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3126 {
3127         struct ceph_osd_client *osdc = lreq->osdc;
3128         struct ceph_osd_request *req = lreq->ping_req;
3129         struct ceph_osd_req_op *op = &req->r_ops[0];
3130
3131         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3132                 dout("%s PAUSERD\n", __func__);
3133                 return;
3134         }
3135
3136         lreq->ping_sent = jiffies;
3137         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3138              __func__, lreq, lreq->linger_id, lreq->ping_sent,
3139              lreq->register_gen);
3140
3141         if (req->r_osd)
3142                 cancel_linger_request(req);
3143
3144         request_reinit(req);
3145         target_copy(&req->r_t, &lreq->t);
3146
3147         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3148                 op->watch.cookie != lreq->linger_id ||
3149                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3150         op->watch.gen = lreq->register_gen;
3151         req->r_callback = linger_ping_cb;
3152         req->r_priv = linger_get(lreq);
3153         req->r_linger = true;
3154
3155         ceph_osdc_get_request(req);
3156         account_request(req);
3157         req->r_tid = atomic64_inc_return(&osdc->last_tid);
3158         link_request(lreq->osd, req);
3159         send_request(req);
3160 }
3161
3162 static void linger_submit(struct ceph_osd_linger_request *lreq)
3163 {
3164         struct ceph_osd_client *osdc = lreq->osdc;
3165         struct ceph_osd *osd;
3166
3167         down_write(&osdc->lock);
3168         linger_register(lreq);
3169         if (lreq->is_watch) {
3170                 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3171                 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3172         } else {
3173                 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3174         }
3175
3176         calc_target(osdc, &lreq->t, false);
3177         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3178         link_linger(osd, lreq);
3179
3180         send_linger(lreq);
3181         up_write(&osdc->lock);
3182 }
3183
3184 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3185 {
3186         struct ceph_osd_client *osdc = lreq->osdc;
3187         struct ceph_osd_linger_request *lookup_lreq;
3188
3189         verify_osdc_wrlocked(osdc);
3190
3191         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3192                                        lreq->linger_id);
3193         if (!lookup_lreq)
3194                 return;
3195
3196         WARN_ON(lookup_lreq != lreq);
3197         erase_linger_mc(&osdc->linger_map_checks, lreq);
3198         linger_put(lreq);
3199 }
3200
3201 /*
3202  * @lreq has to be both registered and linked.
3203  */
3204 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3205 {
3206         if (lreq->is_watch && lreq->ping_req->r_osd)
3207                 cancel_linger_request(lreq->ping_req);
3208         if (lreq->reg_req->r_osd)
3209                 cancel_linger_request(lreq->reg_req);
3210         cancel_linger_map_check(lreq);
3211         unlink_linger(lreq->osd, lreq);
3212         linger_unregister(lreq);
3213 }
3214
3215 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3216 {
3217         struct ceph_osd_client *osdc = lreq->osdc;
3218
3219         down_write(&osdc->lock);
3220         if (__linger_registered(lreq))
3221                 __linger_cancel(lreq);
3222         up_write(&osdc->lock);
3223 }
3224
3225 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3226
3227 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3228 {
3229         struct ceph_osd_client *osdc = lreq->osdc;
3230         struct ceph_osdmap *map = osdc->osdmap;
3231
3232         verify_osdc_wrlocked(osdc);
3233         WARN_ON(!map->epoch);
3234
3235         if (lreq->register_gen) {
3236                 lreq->map_dne_bound = map->epoch;
3237                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3238                      lreq, lreq->linger_id);
3239         } else {
3240                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3241                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3242                      map->epoch);
3243         }
3244
3245         if (lreq->map_dne_bound) {
3246                 if (map->epoch >= lreq->map_dne_bound) {
3247                         /* we had a new enough map */
3248                         pr_info("linger_id %llu pool does not exist\n",
3249                                 lreq->linger_id);
3250                         linger_reg_commit_complete(lreq, -ENOENT);
3251                         __linger_cancel(lreq);
3252                 }
3253         } else {
3254                 send_linger_map_check(lreq);
3255         }
3256 }
3257
3258 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3259 {
3260         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3261         struct ceph_osd_linger_request *lreq;
3262         u64 linger_id = greq->private_data;
3263
3264         WARN_ON(greq->result || !greq->u.newest);
3265
3266         down_write(&osdc->lock);
3267         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3268         if (!lreq) {
3269                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3270                 goto out_unlock;
3271         }
3272
3273         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3274              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3275              greq->u.newest);
3276         if (!lreq->map_dne_bound)
3277                 lreq->map_dne_bound = greq->u.newest;
3278         erase_linger_mc(&osdc->linger_map_checks, lreq);
3279         check_linger_pool_dne(lreq);
3280
3281         linger_put(lreq);
3282 out_unlock:
3283         up_write(&osdc->lock);
3284 }
3285
3286 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3287 {
3288         struct ceph_osd_client *osdc = lreq->osdc;
3289         struct ceph_osd_linger_request *lookup_lreq;
3290         int ret;
3291
3292         verify_osdc_wrlocked(osdc);
3293
3294         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3295                                        lreq->linger_id);
3296         if (lookup_lreq) {
3297                 WARN_ON(lookup_lreq != lreq);
3298                 return;
3299         }
3300
3301         linger_get(lreq);
3302         insert_linger_mc(&osdc->linger_map_checks, lreq);
3303         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3304                                           linger_map_check_cb, lreq->linger_id);
3305         WARN_ON(ret);
3306 }
3307
3308 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3309 {
3310         int ret;
3311
3312         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3313         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3314         return ret ?: lreq->reg_commit_error;
3315 }
3316
3317 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3318 {
3319         int ret;
3320
3321         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3322         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3323         return ret ?: lreq->notify_finish_error;
3324 }
3325
3326 /*
3327  * Timeout callback, called every N seconds.  When 1 or more OSD
3328  * requests has been active for more than N seconds, we send a keepalive
3329  * (tag + timestamp) to its OSD to ensure any communications channel
3330  * reset is detected.
3331  */
3332 static void handle_timeout(struct work_struct *work)
3333 {
3334         struct ceph_osd_client *osdc =
3335                 container_of(work, struct ceph_osd_client, timeout_work.work);
3336         struct ceph_options *opts = osdc->client->options;
3337         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3338         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3339         LIST_HEAD(slow_osds);
3340         struct rb_node *n, *p;
3341
3342         dout("%s osdc %p\n", __func__, osdc);
3343         down_write(&osdc->lock);
3344
3345         /*
3346          * ping osds that are a bit slow.  this ensures that if there
3347          * is a break in the TCP connection we will notice, and reopen
3348          * a connection with that osd (from the fault callback).
3349          */
3350         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3351                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3352                 bool found = false;
3353
3354                 for (p = rb_first(&osd->o_requests); p; ) {
3355                         struct ceph_osd_request *req =
3356                             rb_entry(p, struct ceph_osd_request, r_node);
3357
3358                         p = rb_next(p); /* abort_request() */
3359
3360                         if (time_before(req->r_stamp, cutoff)) {
3361                                 dout(" req %p tid %llu on osd%d is laggy\n",
3362                                      req, req->r_tid, osd->o_osd);
3363                                 found = true;
3364                         }
3365                         if (opts->osd_request_timeout &&
3366                             time_before(req->r_start_stamp, expiry_cutoff)) {
3367                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3368                                        req->r_tid, osd->o_osd);
3369                                 abort_request(req, -ETIMEDOUT);
3370                         }
3371                 }
3372                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3373                         struct ceph_osd_linger_request *lreq =
3374                             rb_entry(p, struct ceph_osd_linger_request, node);
3375
3376                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3377                              lreq, lreq->linger_id, osd->o_osd);
3378                         found = true;
3379
3380                         mutex_lock(&lreq->lock);
3381                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3382                                 send_linger_ping(lreq);
3383                         mutex_unlock(&lreq->lock);
3384                 }
3385
3386                 if (found)
3387                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3388         }
3389
3390         if (opts->osd_request_timeout) {
3391                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3392                         struct ceph_osd_request *req =
3393                             rb_entry(p, struct ceph_osd_request, r_node);
3394
3395                         p = rb_next(p); /* abort_request() */
3396
3397                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3398                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3399                                        req->r_tid, osdc->homeless_osd.o_osd);
3400                                 abort_request(req, -ETIMEDOUT);
3401                         }
3402                 }
3403         }
3404
3405         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3406                 maybe_request_map(osdc);
3407
3408         while (!list_empty(&slow_osds)) {
3409                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3410                                                         struct ceph_osd,
3411                                                         o_keepalive_item);
3412                 list_del_init(&osd->o_keepalive_item);
3413                 ceph_con_keepalive(&osd->o_con);
3414         }
3415
3416         up_write(&osdc->lock);
3417         schedule_delayed_work(&osdc->timeout_work,
3418                               osdc->client->options->osd_keepalive_timeout);
3419 }
3420
3421 static void handle_osds_timeout(struct work_struct *work)
3422 {
3423         struct ceph_osd_client *osdc =
3424                 container_of(work, struct ceph_osd_client,
3425                              osds_timeout_work.work);
3426         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3427         struct ceph_osd *osd, *nosd;
3428
3429         dout("%s osdc %p\n", __func__, osdc);
3430         down_write(&osdc->lock);
3431         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3432                 if (time_before(jiffies, osd->lru_ttl))
3433                         break;
3434
3435                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3436                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3437                 close_osd(osd);
3438         }
3439
3440         up_write(&osdc->lock);
3441         schedule_delayed_work(&osdc->osds_timeout_work,
3442                               round_jiffies_relative(delay));
3443 }
3444
3445 static int ceph_oloc_decode(void **p, void *end,
3446                             struct ceph_object_locator *oloc)
3447 {
3448         u8 struct_v, struct_cv;
3449         u32 len;
3450         void *struct_end;
3451         int ret = 0;
3452
3453         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3454         struct_v = ceph_decode_8(p);
3455         struct_cv = ceph_decode_8(p);
3456         if (struct_v < 3) {
3457                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3458                         struct_v, struct_cv);
3459                 goto e_inval;
3460         }
3461         if (struct_cv > 6) {
3462                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3463                         struct_v, struct_cv);
3464                 goto e_inval;
3465         }
3466         len = ceph_decode_32(p);
3467         ceph_decode_need(p, end, len, e_inval);
3468         struct_end = *p + len;
3469
3470         oloc->pool = ceph_decode_64(p);
3471         *p += 4; /* skip preferred */
3472
3473         len = ceph_decode_32(p);
3474         if (len > 0) {
3475                 pr_warn("ceph_object_locator::key is set\n");
3476                 goto e_inval;
3477         }
3478
3479         if (struct_v >= 5) {
3480                 bool changed = false;
3481
3482                 len = ceph_decode_32(p);
3483                 if (len > 0) {
3484                         ceph_decode_need(p, end, len, e_inval);
3485                         if (!oloc->pool_ns ||
3486                             ceph_compare_string(oloc->pool_ns, *p, len))
3487                                 changed = true;
3488                         *p += len;
3489                 } else {
3490                         if (oloc->pool_ns)
3491                                 changed = true;
3492                 }
3493                 if (changed) {
3494                         /* redirect changes namespace */
3495                         pr_warn("ceph_object_locator::nspace is changed\n");
3496                         goto e_inval;
3497                 }
3498         }
3499
3500         if (struct_v >= 6) {
3501                 s64 hash = ceph_decode_64(p);
3502                 if (hash != -1) {
3503                         pr_warn("ceph_object_locator::hash is set\n");
3504                         goto e_inval;
3505                 }
3506         }
3507
3508         /* skip the rest */
3509         *p = struct_end;
3510 out:
3511         return ret;
3512
3513 e_inval:
3514         ret = -EINVAL;
3515         goto out;
3516 }
3517
3518 static int ceph_redirect_decode(void **p, void *end,
3519                                 struct ceph_request_redirect *redir)
3520 {
3521         u8 struct_v, struct_cv;
3522         u32 len;
3523         void *struct_end;
3524         int ret;
3525
3526         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3527         struct_v = ceph_decode_8(p);
3528         struct_cv = ceph_decode_8(p);
3529         if (struct_cv > 1) {
3530                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3531                         struct_v, struct_cv);
3532                 goto e_inval;
3533         }
3534         len = ceph_decode_32(p);
3535         ceph_decode_need(p, end, len, e_inval);
3536         struct_end = *p + len;
3537
3538         ret = ceph_oloc_decode(p, end, &redir->oloc);
3539         if (ret)
3540                 goto out;
3541
3542         len = ceph_decode_32(p);
3543         if (len > 0) {
3544                 pr_warn("ceph_request_redirect::object_name is set\n");
3545                 goto e_inval;
3546         }
3547
3548         /* skip the rest */
3549         *p = struct_end;
3550 out:
3551         return ret;
3552
3553 e_inval:
3554         ret = -EINVAL;
3555         goto out;
3556 }
3557
3558 struct MOSDOpReply {
3559         struct ceph_pg pgid;
3560         u64 flags;
3561         int result;
3562         u32 epoch;
3563         int num_ops;
3564         u32 outdata_len[CEPH_OSD_MAX_OPS];
3565         s32 rval[CEPH_OSD_MAX_OPS];
3566         int retry_attempt;
3567         struct ceph_eversion replay_version;
3568         u64 user_version;
3569         struct ceph_request_redirect redirect;
3570 };
3571
3572 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3573 {
3574         void *p = msg->front.iov_base;
3575         void *const end = p + msg->front.iov_len;
3576         u16 version = le16_to_cpu(msg->hdr.version);
3577         struct ceph_eversion bad_replay_version;
3578         u8 decode_redir;
3579         u32 len;
3580         int ret;
3581         int i;
3582
3583         ceph_decode_32_safe(&p, end, len, e_inval);
3584         ceph_decode_need(&p, end, len, e_inval);
3585         p += len; /* skip oid */
3586
3587         ret = ceph_decode_pgid(&p, end, &m->pgid);
3588         if (ret)
3589                 return ret;
3590
3591         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3592         ceph_decode_32_safe(&p, end, m->result, e_inval);
3593         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3594         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3595         p += sizeof(bad_replay_version);
3596         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3597
3598         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3599         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3600                 goto e_inval;
3601
3602         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3603                          e_inval);
3604         for (i = 0; i < m->num_ops; i++) {
3605                 struct ceph_osd_op *op = p;
3606
3607                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3608                 p += sizeof(*op);
3609         }
3610
3611         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3612         for (i = 0; i < m->num_ops; i++)
3613                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3614
3615         if (version >= 5) {
3616                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3617                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3618                 p += sizeof(m->replay_version);
3619                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3620         } else {
3621                 m->replay_version = bad_replay_version; /* struct */
3622                 m->user_version = le64_to_cpu(m->replay_version.version);
3623         }
3624
3625         if (version >= 6) {
3626                 if (version >= 7)
3627                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3628                 else
3629                         decode_redir = 1;
3630         } else {
3631                 decode_redir = 0;
3632         }
3633
3634         if (decode_redir) {
3635                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3636                 if (ret)
3637                         return ret;
3638         } else {
3639                 ceph_oloc_init(&m->redirect.oloc);
3640         }
3641
3642         return 0;
3643
3644 e_inval:
3645         return -EINVAL;
3646 }
3647
3648 /*
3649  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3650  * specified.
3651  */
3652 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3653 {
3654         struct ceph_osd_client *osdc = osd->o_osdc;
3655         struct ceph_osd_request *req;
3656         struct MOSDOpReply m;
3657         u64 tid = le64_to_cpu(msg->hdr.tid);
3658         u32 data_len = 0;
3659         int ret;
3660         int i;
3661
3662         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3663
3664         down_read(&osdc->lock);
3665         if (!osd_registered(osd)) {
3666                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3667                 goto out_unlock_osdc;
3668         }
3669         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3670
3671         mutex_lock(&osd->lock);
3672         req = lookup_request(&osd->o_requests, tid);
3673         if (!req) {
3674                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3675                 goto out_unlock_session;
3676         }
3677
3678         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3679         ret = decode_MOSDOpReply(msg, &m);
3680         m.redirect.oloc.pool_ns = NULL;
3681         if (ret) {
3682                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3683                        req->r_tid, ret);
3684                 ceph_msg_dump(msg);
3685                 goto fail_request;
3686         }
3687         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3688              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3689              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3690              le64_to_cpu(m.replay_version.version), m.user_version);
3691
3692         if (m.retry_attempt >= 0) {
3693                 if (m.retry_attempt != req->r_attempts - 1) {
3694                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3695                              req, req->r_tid, m.retry_attempt,
3696                              req->r_attempts - 1);
3697                         goto out_unlock_session;
3698                 }
3699         } else {
3700                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3701         }
3702
3703         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3704                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3705                      m.redirect.oloc.pool);
3706                 unlink_request(osd, req);
3707                 mutex_unlock(&osd->lock);
3708
3709                 /*
3710                  * Not ceph_oloc_copy() - changing pool_ns is not
3711                  * supported.
3712                  */
3713                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3714                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3715                                 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3716                                 CEPH_OSD_FLAG_IGNORE_CACHE;
3717                 req->r_tid = 0;
3718                 __submit_request(req, false);
3719                 goto out_unlock_osdc;
3720         }
3721
3722         if (m.result == -EAGAIN) {
3723                 dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
3724                 unlink_request(osd, req);
3725                 mutex_unlock(&osd->lock);
3726
3727                 /*
3728                  * The object is missing on the replica or not (yet)
3729                  * readable.  Clear pgid to force a resend to the primary
3730                  * via legacy_change.
3731                  */
3732                 req->r_t.pgid.pool = 0;
3733                 req->r_t.pgid.seed = 0;
3734                 WARN_ON(!req->r_t.used_replica);
3735                 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
3736                                   CEPH_OSD_FLAG_LOCALIZE_READS);
3737                 req->r_tid = 0;
3738                 __submit_request(req, false);
3739                 goto out_unlock_osdc;
3740         }
3741
3742         if (m.num_ops != req->r_num_ops) {
3743                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3744                        req->r_num_ops, req->r_tid);
3745                 goto fail_request;
3746         }
3747         for (i = 0; i < req->r_num_ops; i++) {
3748                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3749                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3750                 req->r_ops[i].rval = m.rval[i];
3751                 req->r_ops[i].outdata_len = m.outdata_len[i];
3752                 data_len += m.outdata_len[i];
3753         }
3754         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3755                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3756                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3757                 goto fail_request;
3758         }
3759         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3760              req, req->r_tid, m.result, data_len);
3761
3762         /*
3763          * Since we only ever request ONDISK, we should only ever get
3764          * one (type of) reply back.
3765          */
3766         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3767         req->r_result = m.result ?: data_len;
3768         finish_request(req);
3769         mutex_unlock(&osd->lock);
3770         up_read(&osdc->lock);
3771
3772         __complete_request(req);
3773         return;
3774
3775 fail_request:
3776         complete_request(req, -EIO);
3777 out_unlock_session:
3778         mutex_unlock(&osd->lock);
3779 out_unlock_osdc:
3780         up_read(&osdc->lock);
3781 }
3782
3783 static void set_pool_was_full(struct ceph_osd_client *osdc)
3784 {
3785         struct rb_node *n;
3786
3787         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3788                 struct ceph_pg_pool_info *pi =
3789                     rb_entry(n, struct ceph_pg_pool_info, node);
3790
3791                 pi->was_full = __pool_full(pi);
3792         }
3793 }
3794
3795 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3796 {
3797         struct ceph_pg_pool_info *pi;
3798
3799         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3800         if (!pi)
3801                 return false;
3802
3803         return pi->was_full && !__pool_full(pi);
3804 }
3805
3806 static enum calc_target_result
3807 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3808 {
3809         struct ceph_osd_client *osdc = lreq->osdc;
3810         enum calc_target_result ct_res;
3811
3812         ct_res = calc_target(osdc, &lreq->t, true);
3813         if (ct_res == CALC_TARGET_NEED_RESEND) {
3814                 struct ceph_osd *osd;
3815
3816                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3817                 if (osd != lreq->osd) {
3818                         unlink_linger(lreq->osd, lreq);
3819                         link_linger(osd, lreq);
3820                 }
3821         }
3822
3823         return ct_res;
3824 }
3825
3826 /*
3827  * Requeue requests whose mapping to an OSD has changed.
3828  */
3829 static void scan_requests(struct ceph_osd *osd,
3830                           bool force_resend,
3831                           bool cleared_full,
3832                           bool check_pool_cleared_full,
3833                           struct rb_root *need_resend,
3834                           struct list_head *need_resend_linger)
3835 {
3836         struct ceph_osd_client *osdc = osd->o_osdc;
3837         struct rb_node *n;
3838         bool force_resend_writes;
3839
3840         for (n = rb_first(&osd->o_linger_requests); n; ) {
3841                 struct ceph_osd_linger_request *lreq =
3842                     rb_entry(n, struct ceph_osd_linger_request, node);
3843                 enum calc_target_result ct_res;
3844
3845                 n = rb_next(n); /* recalc_linger_target() */
3846
3847                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3848                      lreq->linger_id);
3849                 ct_res = recalc_linger_target(lreq);
3850                 switch (ct_res) {
3851                 case CALC_TARGET_NO_ACTION:
3852                         force_resend_writes = cleared_full ||
3853                             (check_pool_cleared_full &&
3854                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3855                         if (!force_resend && !force_resend_writes)
3856                                 break;
3857
3858                         /* fall through */
3859                 case CALC_TARGET_NEED_RESEND:
3860                         cancel_linger_map_check(lreq);
3861                         /*
3862                          * scan_requests() for the previous epoch(s)
3863                          * may have already added it to the list, since
3864                          * it's not unlinked here.
3865                          */
3866                         if (list_empty(&lreq->scan_item))
3867                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3868                         break;
3869                 case CALC_TARGET_POOL_DNE:
3870                         list_del_init(&lreq->scan_item);
3871                         check_linger_pool_dne(lreq);
3872                         break;
3873                 }
3874         }
3875
3876         for (n = rb_first(&osd->o_requests); n; ) {
3877                 struct ceph_osd_request *req =
3878                     rb_entry(n, struct ceph_osd_request, r_node);
3879                 enum calc_target_result ct_res;
3880
3881                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3882
3883                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3884                 ct_res = calc_target(osdc, &req->r_t, false);
3885                 switch (ct_res) {
3886                 case CALC_TARGET_NO_ACTION:
3887                         force_resend_writes = cleared_full ||
3888                             (check_pool_cleared_full &&
3889                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3890                         if (!force_resend &&
3891                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3892                              !force_resend_writes))
3893                                 break;
3894
3895                         /* fall through */
3896                 case CALC_TARGET_NEED_RESEND:
3897                         cancel_map_check(req);
3898                         unlink_request(osd, req);
3899                         insert_request(need_resend, req);
3900                         break;
3901                 case CALC_TARGET_POOL_DNE:
3902                         check_pool_dne(req);
3903                         break;
3904                 }
3905         }
3906 }
3907
3908 static int handle_one_map(struct ceph_osd_client *osdc,
3909                           void *p, void *end, bool incremental,
3910                           struct rb_root *need_resend,
3911                           struct list_head *need_resend_linger)
3912 {
3913         struct ceph_osdmap *newmap;
3914         struct rb_node *n;
3915         bool skipped_map = false;
3916         bool was_full;
3917
3918         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3919         set_pool_was_full(osdc);
3920
3921         if (incremental)
3922                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3923         else
3924                 newmap = ceph_osdmap_decode(&p, end);
3925         if (IS_ERR(newmap))
3926                 return PTR_ERR(newmap);
3927
3928         if (newmap != osdc->osdmap) {
3929                 /*
3930                  * Preserve ->was_full before destroying the old map.
3931                  * For pools that weren't in the old map, ->was_full
3932                  * should be false.
3933                  */
3934                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3935                         struct ceph_pg_pool_info *pi =
3936                             rb_entry(n, struct ceph_pg_pool_info, node);
3937                         struct ceph_pg_pool_info *old_pi;
3938
3939                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3940                         if (old_pi)
3941                                 pi->was_full = old_pi->was_full;
3942                         else
3943                                 WARN_ON(pi->was_full);
3944                 }
3945
3946                 if (osdc->osdmap->epoch &&
3947                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3948                         WARN_ON(incremental);
3949                         skipped_map = true;
3950                 }
3951
3952                 ceph_osdmap_destroy(osdc->osdmap);
3953                 osdc->osdmap = newmap;
3954         }
3955
3956         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3957         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3958                       need_resend, need_resend_linger);
3959
3960         for (n = rb_first(&osdc->osds); n; ) {
3961                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3962
3963                 n = rb_next(n); /* close_osd() */
3964
3965                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3966                               need_resend_linger);
3967                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3968                     memcmp(&osd->o_con.peer_addr,
3969                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3970                            sizeof(struct ceph_entity_addr)))
3971                         close_osd(osd);
3972         }
3973
3974         return 0;
3975 }
3976
3977 static void kick_requests(struct ceph_osd_client *osdc,
3978                           struct rb_root *need_resend,
3979                           struct list_head *need_resend_linger)
3980 {
3981         struct ceph_osd_linger_request *lreq, *nlreq;
3982         enum calc_target_result ct_res;
3983         struct rb_node *n;
3984
3985         /* make sure need_resend targets reflect latest map */
3986         for (n = rb_first(need_resend); n; ) {
3987                 struct ceph_osd_request *req =
3988                     rb_entry(n, struct ceph_osd_request, r_node);
3989
3990                 n = rb_next(n);
3991
3992                 if (req->r_t.epoch < osdc->osdmap->epoch) {
3993                         ct_res = calc_target(osdc, &req->r_t, false);
3994                         if (ct_res == CALC_TARGET_POOL_DNE) {
3995                                 erase_request(need_resend, req);
3996                                 check_pool_dne(req);
3997                         }
3998                 }
3999         }
4000
4001         for (n = rb_first(need_resend); n; ) {
4002                 struct ceph_osd_request *req =
4003                     rb_entry(n, struct ceph_osd_request, r_node);
4004                 struct ceph_osd *osd;
4005
4006                 n = rb_next(n);
4007                 erase_request(need_resend, req); /* before link_request() */
4008
4009                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
4010                 link_request(osd, req);
4011                 if (!req->r_linger) {
4012                         if (!osd_homeless(osd) && !req->r_t.paused)
4013                                 send_request(req);
4014                 } else {
4015                         cancel_linger_request(req);
4016                 }
4017         }
4018
4019         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
4020                 if (!osd_homeless(lreq->osd))
4021                         send_linger(lreq);
4022
4023                 list_del_init(&lreq->scan_item);
4024         }
4025 }
4026
4027 /*
4028  * Process updated osd map.
4029  *
4030  * The message contains any number of incremental and full maps, normally
4031  * indicating some sort of topology change in the cluster.  Kick requests
4032  * off to different OSDs as needed.
4033  */
4034 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4035 {
4036         void *p = msg->front.iov_base;
4037         void *const end = p + msg->front.iov_len;
4038         u32 nr_maps, maplen;
4039         u32 epoch;
4040         struct ceph_fsid fsid;
4041         struct rb_root need_resend = RB_ROOT;
4042         LIST_HEAD(need_resend_linger);
4043         bool handled_incremental = false;
4044         bool was_pauserd, was_pausewr;
4045         bool pauserd, pausewr;
4046         int err;
4047
4048         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4049         down_write(&osdc->lock);
4050
4051         /* verify fsid */
4052         ceph_decode_need(&p, end, sizeof(fsid), bad);
4053         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4054         if (ceph_check_fsid(osdc->client, &fsid) < 0)
4055                 goto bad;
4056
4057         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4058         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4059                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4060                       have_pool_full(osdc);
4061
4062         /* incremental maps */
4063         ceph_decode_32_safe(&p, end, nr_maps, bad);
4064         dout(" %d inc maps\n", nr_maps);
4065         while (nr_maps > 0) {
4066                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4067                 epoch = ceph_decode_32(&p);
4068                 maplen = ceph_decode_32(&p);
4069                 ceph_decode_need(&p, end, maplen, bad);
4070                 if (osdc->osdmap->epoch &&
4071                     osdc->osdmap->epoch + 1 == epoch) {
4072                         dout("applying incremental map %u len %d\n",
4073                              epoch, maplen);
4074                         err = handle_one_map(osdc, p, p + maplen, true,
4075                                              &need_resend, &need_resend_linger);
4076                         if (err)
4077                                 goto bad;
4078                         handled_incremental = true;
4079                 } else {
4080                         dout("ignoring incremental map %u len %d\n",
4081                              epoch, maplen);
4082                 }
4083                 p += maplen;
4084                 nr_maps--;
4085         }
4086         if (handled_incremental)
4087                 goto done;
4088
4089         /* full maps */
4090         ceph_decode_32_safe(&p, end, nr_maps, bad);
4091         dout(" %d full maps\n", nr_maps);
4092         while (nr_maps) {
4093                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4094                 epoch = ceph_decode_32(&p);
4095                 maplen = ceph_decode_32(&p);
4096                 ceph_decode_need(&p, end, maplen, bad);
4097                 if (nr_maps > 1) {
4098                         dout("skipping non-latest full map %u len %d\n",
4099                              epoch, maplen);
4100                 } else if (osdc->osdmap->epoch >= epoch) {
4101                         dout("skipping full map %u len %d, "
4102                              "older than our %u\n", epoch, maplen,
4103                              osdc->osdmap->epoch);
4104                 } else {
4105                         dout("taking full map %u len %d\n", epoch, maplen);
4106                         err = handle_one_map(osdc, p, p + maplen, false,
4107                                              &need_resend, &need_resend_linger);
4108                         if (err)
4109                                 goto bad;
4110                 }
4111                 p += maplen;
4112                 nr_maps--;
4113         }
4114
4115 done:
4116         /*
4117          * subscribe to subsequent osdmap updates if full to ensure
4118          * we find out when we are no longer full and stop returning
4119          * ENOSPC.
4120          */
4121         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4122         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4123                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4124                   have_pool_full(osdc);
4125         if (was_pauserd || was_pausewr || pauserd || pausewr ||
4126             osdc->osdmap->epoch < osdc->epoch_barrier)
4127                 maybe_request_map(osdc);
4128
4129         kick_requests(osdc, &need_resend, &need_resend_linger);
4130
4131         ceph_osdc_abort_on_full(osdc);
4132         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4133                           osdc->osdmap->epoch);
4134         up_write(&osdc->lock);
4135         wake_up_all(&osdc->client->auth_wq);
4136         return;
4137
4138 bad:
4139         pr_err("osdc handle_map corrupt msg\n");
4140         ceph_msg_dump(msg);
4141         up_write(&osdc->lock);
4142 }
4143
4144 /*
4145  * Resubmit requests pending on the given osd.
4146  */
4147 static void kick_osd_requests(struct ceph_osd *osd)
4148 {
4149         struct rb_node *n;
4150
4151         clear_backoffs(osd);
4152
4153         for (n = rb_first(&osd->o_requests); n; ) {
4154                 struct ceph_osd_request *req =
4155                     rb_entry(n, struct ceph_osd_request, r_node);
4156
4157                 n = rb_next(n); /* cancel_linger_request() */
4158
4159                 if (!req->r_linger) {
4160                         if (!req->r_t.paused)
4161                                 send_request(req);
4162                 } else {
4163                         cancel_linger_request(req);
4164                 }
4165         }
4166         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4167                 struct ceph_osd_linger_request *lreq =
4168                     rb_entry(n, struct ceph_osd_linger_request, node);
4169
4170                 send_linger(lreq);
4171         }
4172 }
4173
4174 /*
4175  * If the osd connection drops, we need to resubmit all requests.
4176  */
4177 static void osd_fault(struct ceph_connection *con)
4178 {
4179         struct ceph_osd *osd = con->private;
4180         struct ceph_osd_client *osdc = osd->o_osdc;
4181
4182         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4183
4184         down_write(&osdc->lock);
4185         if (!osd_registered(osd)) {
4186                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4187                 goto out_unlock;
4188         }
4189
4190         if (!reopen_osd(osd))
4191                 kick_osd_requests(osd);
4192         maybe_request_map(osdc);
4193
4194 out_unlock:
4195         up_write(&osdc->lock);
4196 }
4197
4198 struct MOSDBackoff {
4199         struct ceph_spg spgid;
4200         u32 map_epoch;
4201         u8 op;
4202         u64 id;
4203         struct ceph_hobject_id *begin;
4204         struct ceph_hobject_id *end;
4205 };
4206
4207 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4208 {
4209         void *p = msg->front.iov_base;
4210         void *const end = p + msg->front.iov_len;
4211         u8 struct_v;
4212         u32 struct_len;
4213         int ret;
4214
4215         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4216         if (ret)
4217                 return ret;
4218
4219         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4220         if (ret)
4221                 return ret;
4222
4223         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4224         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4225         ceph_decode_8_safe(&p, end, m->op, e_inval);
4226         ceph_decode_64_safe(&p, end, m->id, e_inval);
4227
4228         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4229         if (!m->begin)
4230                 return -ENOMEM;
4231
4232         ret = decode_hoid(&p, end, m->begin);
4233         if (ret) {
4234                 free_hoid(m->begin);
4235                 return ret;
4236         }
4237
4238         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4239         if (!m->end) {
4240                 free_hoid(m->begin);
4241                 return -ENOMEM;
4242         }
4243
4244         ret = decode_hoid(&p, end, m->end);
4245         if (ret) {
4246                 free_hoid(m->begin);
4247                 free_hoid(m->end);
4248                 return ret;
4249         }
4250
4251         return 0;
4252
4253 e_inval:
4254         return -EINVAL;
4255 }
4256
4257 static struct ceph_msg *create_backoff_message(
4258                                 const struct ceph_osd_backoff *backoff,
4259                                 u32 map_epoch)
4260 {
4261         struct ceph_msg *msg;
4262         void *p, *end;
4263         int msg_size;
4264
4265         msg_size = CEPH_ENCODING_START_BLK_LEN +
4266                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4267         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4268         msg_size += CEPH_ENCODING_START_BLK_LEN +
4269                         hoid_encoding_size(backoff->begin);
4270         msg_size += CEPH_ENCODING_START_BLK_LEN +
4271                         hoid_encoding_size(backoff->end);
4272
4273         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4274         if (!msg)
4275                 return NULL;
4276
4277         p = msg->front.iov_base;
4278         end = p + msg->front_alloc_len;
4279
4280         encode_spgid(&p, &backoff->spgid);
4281         ceph_encode_32(&p, map_epoch);
4282         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4283         ceph_encode_64(&p, backoff->id);
4284         encode_hoid(&p, end, backoff->begin);
4285         encode_hoid(&p, end, backoff->end);
4286         BUG_ON(p != end);
4287
4288         msg->front.iov_len = p - msg->front.iov_base;
4289         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4290         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4291
4292         return msg;
4293 }
4294
4295 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4296 {
4297         struct ceph_spg_mapping *spg;
4298         struct ceph_osd_backoff *backoff;
4299         struct ceph_msg *msg;
4300
4301         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4302              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4303
4304         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4305         if (!spg) {
4306                 spg = alloc_spg_mapping();
4307                 if (!spg) {
4308                         pr_err("%s failed to allocate spg\n", __func__);
4309                         return;
4310                 }
4311                 spg->spgid = m->spgid; /* struct */
4312                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4313         }
4314
4315         backoff = alloc_backoff();
4316         if (!backoff) {
4317                 pr_err("%s failed to allocate backoff\n", __func__);
4318                 return;
4319         }
4320         backoff->spgid = m->spgid; /* struct */
4321         backoff->id = m->id;
4322         backoff->begin = m->begin;
4323         m->begin = NULL; /* backoff now owns this */
4324         backoff->end = m->end;
4325         m->end = NULL;   /* ditto */
4326
4327         insert_backoff(&spg->backoffs, backoff);
4328         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4329
4330         /*
4331          * Ack with original backoff's epoch so that the OSD can
4332          * discard this if there was a PG split.
4333          */
4334         msg = create_backoff_message(backoff, m->map_epoch);
4335         if (!msg) {
4336                 pr_err("%s failed to allocate msg\n", __func__);
4337                 return;
4338         }
4339         ceph_con_send(&osd->o_con, msg);
4340 }
4341
4342 static bool target_contained_by(const struct ceph_osd_request_target *t,
4343                                 const struct ceph_hobject_id *begin,
4344                                 const struct ceph_hobject_id *end)
4345 {
4346         struct ceph_hobject_id hoid;
4347         int cmp;
4348
4349         hoid_fill_from_target(&hoid, t);
4350         cmp = hoid_compare(&hoid, begin);
4351         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4352 }
4353
4354 static void handle_backoff_unblock(struct ceph_osd *osd,
4355                                    const struct MOSDBackoff *m)
4356 {
4357         struct ceph_spg_mapping *spg;
4358         struct ceph_osd_backoff *backoff;
4359         struct rb_node *n;
4360
4361         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4362              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4363
4364         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4365         if (!backoff) {
4366                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4367                        __func__, osd->o_osd, m->spgid.pgid.pool,
4368                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4369                 return;
4370         }
4371
4372         if (hoid_compare(backoff->begin, m->begin) &&
4373             hoid_compare(backoff->end, m->end)) {
4374                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4375                        __func__, osd->o_osd, m->spgid.pgid.pool,
4376                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4377                 /* unblock it anyway... */
4378         }
4379
4380         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4381         BUG_ON(!spg);
4382
4383         erase_backoff(&spg->backoffs, backoff);
4384         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4385         free_backoff(backoff);
4386
4387         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4388                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4389                 free_spg_mapping(spg);
4390         }
4391
4392         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4393                 struct ceph_osd_request *req =
4394                     rb_entry(n, struct ceph_osd_request, r_node);
4395
4396                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4397                         /*
4398                          * Match against @m, not @backoff -- the PG may
4399                          * have split on the OSD.
4400                          */
4401                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4402                                 /*
4403                                  * If no other installed backoff applies,
4404                                  * resend.
4405                                  */
4406                                 send_request(req);
4407                         }
4408                 }
4409         }
4410 }
4411
4412 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4413 {
4414         struct ceph_osd_client *osdc = osd->o_osdc;
4415         struct MOSDBackoff m;
4416         int ret;
4417
4418         down_read(&osdc->lock);
4419         if (!osd_registered(osd)) {
4420                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4421                 up_read(&osdc->lock);
4422                 return;
4423         }
4424         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4425
4426         mutex_lock(&osd->lock);
4427         ret = decode_MOSDBackoff(msg, &m);
4428         if (ret) {
4429                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4430                 ceph_msg_dump(msg);
4431                 goto out_unlock;
4432         }
4433
4434         switch (m.op) {
4435         case CEPH_OSD_BACKOFF_OP_BLOCK:
4436                 handle_backoff_block(osd, &m);
4437                 break;
4438         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4439                 handle_backoff_unblock(osd, &m);
4440                 break;
4441         default:
4442                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4443         }
4444
4445         free_hoid(m.begin);
4446         free_hoid(m.end);
4447
4448 out_unlock:
4449         mutex_unlock(&osd->lock);
4450         up_read(&osdc->lock);
4451 }
4452
4453 /*
4454  * Process osd watch notifications
4455  */
4456 static void handle_watch_notify(struct ceph_osd_client *osdc,
4457                                 struct ceph_msg *msg)
4458 {
4459         void *p = msg->front.iov_base;
4460         void *const end = p + msg->front.iov_len;
4461         struct ceph_osd_linger_request *lreq;
4462         struct linger_work *lwork;
4463         u8 proto_ver, opcode;
4464         u64 cookie, notify_id;
4465         u64 notifier_id = 0;
4466         s32 return_code = 0;
4467         void *payload = NULL;
4468         u32 payload_len = 0;
4469
4470         ceph_decode_8_safe(&p, end, proto_ver, bad);
4471         ceph_decode_8_safe(&p, end, opcode, bad);
4472         ceph_decode_64_safe(&p, end, cookie, bad);
4473         p += 8; /* skip ver */
4474         ceph_decode_64_safe(&p, end, notify_id, bad);
4475
4476         if (proto_ver >= 1) {
4477                 ceph_decode_32_safe(&p, end, payload_len, bad);
4478                 ceph_decode_need(&p, end, payload_len, bad);
4479                 payload = p;
4480                 p += payload_len;
4481         }
4482
4483         if (le16_to_cpu(msg->hdr.version) >= 2)
4484                 ceph_decode_32_safe(&p, end, return_code, bad);
4485
4486         if (le16_to_cpu(msg->hdr.version) >= 3)
4487                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4488
4489         down_read(&osdc->lock);
4490         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4491         if (!lreq) {
4492                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4493                      cookie);
4494                 goto out_unlock_osdc;
4495         }
4496
4497         mutex_lock(&lreq->lock);
4498         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4499              opcode, cookie, lreq, lreq->is_watch);
4500         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4501                 if (!lreq->last_error) {
4502                         lreq->last_error = -ENOTCONN;
4503                         queue_watch_error(lreq);
4504                 }
4505         } else if (!lreq->is_watch) {
4506                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4507                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4508                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4509                              lreq->notify_id, notify_id);
4510                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4511                         struct ceph_msg_data *data =
4512                             msg->num_data_items ? &msg->data[0] : NULL;
4513
4514                         if (data) {
4515                                 if (lreq->preply_pages) {
4516                                         WARN_ON(data->type !=
4517                                                         CEPH_MSG_DATA_PAGES);
4518                                         *lreq->preply_pages = data->pages;
4519                                         *lreq->preply_len = data->length;
4520                                         data->own_pages = false;
4521                                 }
4522                         }
4523                         lreq->notify_finish_error = return_code;
4524                         complete_all(&lreq->notify_finish_wait);
4525                 }
4526         } else {
4527                 /* CEPH_WATCH_EVENT_NOTIFY */
4528                 lwork = lwork_alloc(lreq, do_watch_notify);
4529                 if (!lwork) {
4530                         pr_err("failed to allocate notify-lwork\n");
4531                         goto out_unlock_lreq;
4532                 }
4533
4534                 lwork->notify.notify_id = notify_id;
4535                 lwork->notify.notifier_id = notifier_id;
4536                 lwork->notify.payload = payload;
4537                 lwork->notify.payload_len = payload_len;
4538                 lwork->notify.msg = ceph_msg_get(msg);
4539                 lwork_queue(lwork);
4540         }
4541
4542 out_unlock_lreq:
4543         mutex_unlock(&lreq->lock);
4544 out_unlock_osdc:
4545         up_read(&osdc->lock);
4546         return;
4547
4548 bad:
4549         pr_err("osdc handle_watch_notify corrupt msg\n");
4550 }
4551
4552 /*
4553  * Register request, send initial attempt.
4554  */
4555 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4556                             struct ceph_osd_request *req,
4557                             bool nofail)
4558 {
4559         down_read(&osdc->lock);
4560         submit_request(req, false);
4561         up_read(&osdc->lock);
4562
4563         return 0;
4564 }
4565 EXPORT_SYMBOL(ceph_osdc_start_request);
4566
4567 /*
4568  * Unregister a registered request.  The request is not completed:
4569  * ->r_result isn't set and __complete_request() isn't called.
4570  */
4571 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4572 {
4573         struct ceph_osd_client *osdc = req->r_osdc;
4574
4575         down_write(&osdc->lock);
4576         if (req->r_osd)
4577                 cancel_request(req);
4578         up_write(&osdc->lock);
4579 }
4580 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4581
4582 /*
4583  * @timeout: in jiffies, 0 means "wait forever"
4584  */
4585 static int wait_request_timeout(struct ceph_osd_request *req,
4586                                 unsigned long timeout)
4587 {
4588         long left;
4589
4590         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4591         left = wait_for_completion_killable_timeout(&req->r_completion,
4592                                                 ceph_timeout_jiffies(timeout));
4593         if (left <= 0) {
4594                 left = left ?: -ETIMEDOUT;
4595                 ceph_osdc_cancel_request(req);
4596         } else {
4597                 left = req->r_result; /* completed */
4598         }
4599
4600         return left;
4601 }
4602
4603 /*
4604  * wait for a request to complete
4605  */
4606 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4607                            struct ceph_osd_request *req)
4608 {
4609         return wait_request_timeout(req, 0);
4610 }
4611 EXPORT_SYMBOL(ceph_osdc_wait_request);
4612
4613 /*
4614  * sync - wait for all in-flight requests to flush.  avoid starvation.
4615  */
4616 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4617 {
4618         struct rb_node *n, *p;
4619         u64 last_tid = atomic64_read(&osdc->last_tid);
4620
4621 again:
4622         down_read(&osdc->lock);
4623         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4624                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4625
4626                 mutex_lock(&osd->lock);
4627                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4628                         struct ceph_osd_request *req =
4629                             rb_entry(p, struct ceph_osd_request, r_node);
4630
4631                         if (req->r_tid > last_tid)
4632                                 break;
4633
4634                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4635                                 continue;
4636
4637                         ceph_osdc_get_request(req);
4638                         mutex_unlock(&osd->lock);
4639                         up_read(&osdc->lock);
4640                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4641                              __func__, req, req->r_tid, last_tid);
4642                         wait_for_completion(&req->r_completion);
4643                         ceph_osdc_put_request(req);
4644                         goto again;
4645                 }
4646
4647                 mutex_unlock(&osd->lock);
4648         }
4649
4650         up_read(&osdc->lock);
4651         dout("%s done last_tid %llu\n", __func__, last_tid);
4652 }
4653 EXPORT_SYMBOL(ceph_osdc_sync);
4654
4655 static struct ceph_osd_request *
4656 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4657 {
4658         struct ceph_osd_request *req;
4659
4660         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4661         if (!req)
4662                 return NULL;
4663
4664         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4665         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4666         return req;
4667 }
4668
4669 static struct ceph_osd_request *
4670 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4671 {
4672         struct ceph_osd_request *req;
4673
4674         req = alloc_linger_request(lreq);
4675         if (!req)
4676                 return NULL;
4677
4678         /*
4679          * Pass 0 for cookie because we don't know it yet, it will be
4680          * filled in by linger_submit().
4681          */
4682         osd_req_op_watch_init(req, 0, 0, watch_opcode);
4683
4684         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4685                 ceph_osdc_put_request(req);
4686                 return NULL;
4687         }
4688
4689         return req;
4690 }
4691
4692 /*
4693  * Returns a handle, caller owns a ref.
4694  */
4695 struct ceph_osd_linger_request *
4696 ceph_osdc_watch(struct ceph_osd_client *osdc,
4697                 struct ceph_object_id *oid,
4698                 struct ceph_object_locator *oloc,
4699                 rados_watchcb2_t wcb,
4700                 rados_watcherrcb_t errcb,
4701                 void *data)
4702 {
4703         struct ceph_osd_linger_request *lreq;
4704         int ret;
4705
4706         lreq = linger_alloc(osdc);
4707         if (!lreq)
4708                 return ERR_PTR(-ENOMEM);
4709
4710         lreq->is_watch = true;
4711         lreq->wcb = wcb;
4712         lreq->errcb = errcb;
4713         lreq->data = data;
4714         lreq->watch_valid_thru = jiffies;
4715
4716         ceph_oid_copy(&lreq->t.base_oid, oid);
4717         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4718         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4719         ktime_get_real_ts64(&lreq->mtime);
4720
4721         lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4722         if (!lreq->reg_req) {
4723                 ret = -ENOMEM;
4724                 goto err_put_lreq;
4725         }
4726
4727         lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4728         if (!lreq->ping_req) {
4729                 ret = -ENOMEM;
4730                 goto err_put_lreq;
4731         }
4732
4733         linger_submit(lreq);
4734         ret = linger_reg_commit_wait(lreq);
4735         if (ret) {
4736                 linger_cancel(lreq);
4737                 goto err_put_lreq;
4738         }
4739
4740         return lreq;
4741
4742 err_put_lreq:
4743         linger_put(lreq);
4744         return ERR_PTR(ret);
4745 }
4746 EXPORT_SYMBOL(ceph_osdc_watch);
4747
4748 /*
4749  * Releases a ref.
4750  *
4751  * Times out after mount_timeout to preserve rbd unmap behaviour
4752  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4753  * with mount_timeout").
4754  */
4755 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4756                       struct ceph_osd_linger_request *lreq)
4757 {
4758         struct ceph_options *opts = osdc->client->options;
4759         struct ceph_osd_request *req;
4760         int ret;
4761
4762         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4763         if (!req)
4764                 return -ENOMEM;
4765
4766         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4767         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4768         req->r_flags = CEPH_OSD_FLAG_WRITE;
4769         ktime_get_real_ts64(&req->r_mtime);
4770         osd_req_op_watch_init(req, 0, lreq->linger_id,
4771                               CEPH_OSD_WATCH_OP_UNWATCH);
4772
4773         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4774         if (ret)
4775                 goto out_put_req;
4776
4777         ceph_osdc_start_request(osdc, req, false);
4778         linger_cancel(lreq);
4779         linger_put(lreq);
4780         ret = wait_request_timeout(req, opts->mount_timeout);
4781
4782 out_put_req:
4783         ceph_osdc_put_request(req);
4784         return ret;
4785 }
4786 EXPORT_SYMBOL(ceph_osdc_unwatch);
4787
4788 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4789                                       u64 notify_id, u64 cookie, void *payload,
4790                                       u32 payload_len)
4791 {
4792         struct ceph_osd_req_op *op;
4793         struct ceph_pagelist *pl;
4794         int ret;
4795
4796         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4797
4798         pl = ceph_pagelist_alloc(GFP_NOIO);
4799         if (!pl)
4800                 return -ENOMEM;
4801
4802         ret = ceph_pagelist_encode_64(pl, notify_id);
4803         ret |= ceph_pagelist_encode_64(pl, cookie);
4804         if (payload) {
4805                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4806                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4807         } else {
4808                 ret |= ceph_pagelist_encode_32(pl, 0);
4809         }
4810         if (ret) {
4811                 ceph_pagelist_release(pl);
4812                 return -ENOMEM;
4813         }
4814
4815         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4816         op->indata_len = pl->length;
4817         return 0;
4818 }
4819
4820 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4821                          struct ceph_object_id *oid,
4822                          struct ceph_object_locator *oloc,
4823                          u64 notify_id,
4824                          u64 cookie,
4825                          void *payload,
4826                          u32 payload_len)
4827 {
4828         struct ceph_osd_request *req;
4829         int ret;
4830
4831         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4832         if (!req)
4833                 return -ENOMEM;
4834
4835         ceph_oid_copy(&req->r_base_oid, oid);
4836         ceph_oloc_copy(&req->r_base_oloc, oloc);
4837         req->r_flags = CEPH_OSD_FLAG_READ;
4838
4839         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4840                                          payload_len);
4841         if (ret)
4842                 goto out_put_req;
4843
4844         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4845         if (ret)
4846                 goto out_put_req;
4847
4848         ceph_osdc_start_request(osdc, req, false);
4849         ret = ceph_osdc_wait_request(osdc, req);
4850
4851 out_put_req:
4852         ceph_osdc_put_request(req);
4853         return ret;
4854 }
4855 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4856
4857 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4858                                   u64 cookie, u32 prot_ver, u32 timeout,
4859                                   void *payload, u32 payload_len)
4860 {
4861         struct ceph_osd_req_op *op;
4862         struct ceph_pagelist *pl;
4863         int ret;
4864
4865         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4866         op->notify.cookie = cookie;
4867
4868         pl = ceph_pagelist_alloc(GFP_NOIO);
4869         if (!pl)
4870                 return -ENOMEM;
4871
4872         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4873         ret |= ceph_pagelist_encode_32(pl, timeout);
4874         ret |= ceph_pagelist_encode_32(pl, payload_len);
4875         ret |= ceph_pagelist_append(pl, payload, payload_len);
4876         if (ret) {
4877                 ceph_pagelist_release(pl);
4878                 return -ENOMEM;
4879         }
4880
4881         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4882         op->indata_len = pl->length;
4883         return 0;
4884 }
4885
4886 /*
4887  * @timeout: in seconds
4888  *
4889  * @preply_{pages,len} are initialized both on success and error.
4890  * The caller is responsible for:
4891  *
4892  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4893  */
4894 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4895                      struct ceph_object_id *oid,
4896                      struct ceph_object_locator *oloc,
4897                      void *payload,
4898                      u32 payload_len,
4899                      u32 timeout,
4900                      struct page ***preply_pages,
4901                      size_t *preply_len)
4902 {
4903         struct ceph_osd_linger_request *lreq;
4904         struct page **pages;
4905         int ret;
4906
4907         WARN_ON(!timeout);
4908         if (preply_pages) {
4909                 *preply_pages = NULL;
4910                 *preply_len = 0;
4911         }
4912
4913         lreq = linger_alloc(osdc);
4914         if (!lreq)
4915                 return -ENOMEM;
4916
4917         lreq->preply_pages = preply_pages;
4918         lreq->preply_len = preply_len;
4919
4920         ceph_oid_copy(&lreq->t.base_oid, oid);
4921         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4922         lreq->t.flags = CEPH_OSD_FLAG_READ;
4923
4924         lreq->reg_req = alloc_linger_request(lreq);
4925         if (!lreq->reg_req) {
4926                 ret = -ENOMEM;
4927                 goto out_put_lreq;
4928         }
4929
4930         /*
4931          * Pass 0 for cookie because we don't know it yet, it will be
4932          * filled in by linger_submit().
4933          */
4934         ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4935                                      payload, payload_len);
4936         if (ret)
4937                 goto out_put_lreq;
4938
4939         /* for notify_id */
4940         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4941         if (IS_ERR(pages)) {
4942                 ret = PTR_ERR(pages);
4943                 goto out_put_lreq;
4944         }
4945         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4946                                                  response_data),
4947                                  pages, PAGE_SIZE, 0, false, true);
4948
4949         ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4950         if (ret)
4951                 goto out_put_lreq;
4952
4953         linger_submit(lreq);
4954         ret = linger_reg_commit_wait(lreq);
4955         if (!ret)
4956                 ret = linger_notify_finish_wait(lreq);
4957         else
4958                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4959
4960         linger_cancel(lreq);
4961 out_put_lreq:
4962         linger_put(lreq);
4963         return ret;
4964 }
4965 EXPORT_SYMBOL(ceph_osdc_notify);
4966
4967 /*
4968  * Return the number of milliseconds since the watch was last
4969  * confirmed, or an error.  If there is an error, the watch is no
4970  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4971  */
4972 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4973                           struct ceph_osd_linger_request *lreq)
4974 {
4975         unsigned long stamp, age;
4976         int ret;
4977
4978         down_read(&osdc->lock);
4979         mutex_lock(&lreq->lock);
4980         stamp = lreq->watch_valid_thru;
4981         if (!list_empty(&lreq->pending_lworks)) {
4982                 struct linger_work *lwork =
4983                     list_first_entry(&lreq->pending_lworks,
4984                                      struct linger_work,
4985                                      pending_item);
4986
4987                 if (time_before(lwork->queued_stamp, stamp))
4988                         stamp = lwork->queued_stamp;
4989         }
4990         age = jiffies - stamp;
4991         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
4992              lreq, lreq->linger_id, age, lreq->last_error);
4993         /* we are truncating to msecs, so return a safe upper bound */
4994         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
4995
4996         mutex_unlock(&lreq->lock);
4997         up_read(&osdc->lock);
4998         return ret;
4999 }
5000
5001 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
5002 {
5003         u8 struct_v;
5004         u32 struct_len;
5005         int ret;
5006
5007         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
5008                                   &struct_v, &struct_len);
5009         if (ret)
5010                 goto bad;
5011
5012         ret = -EINVAL;
5013         ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
5014         ceph_decode_64_safe(p, end, item->cookie, bad);
5015         ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
5016
5017         if (struct_v >= 2) {
5018                 ret = ceph_decode_entity_addr(p, end, &item->addr);
5019                 if (ret)
5020                         goto bad;
5021         } else {
5022                 ret = 0;
5023         }
5024
5025         dout("%s %s%llu cookie %llu addr %s\n", __func__,
5026              ENTITY_NAME(item->name), item->cookie,
5027              ceph_pr_addr(&item->addr));
5028 bad:
5029         return ret;
5030 }
5031
5032 static int decode_watchers(void **p, void *end,
5033                            struct ceph_watch_item **watchers,
5034                            u32 *num_watchers)
5035 {
5036         u8 struct_v;
5037         u32 struct_len;
5038         int i;
5039         int ret;
5040
5041         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
5042                                   &struct_v, &struct_len);
5043         if (ret)
5044                 return ret;
5045
5046         *num_watchers = ceph_decode_32(p);
5047         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
5048         if (!*watchers)
5049                 return -ENOMEM;
5050
5051         for (i = 0; i < *num_watchers; i++) {
5052                 ret = decode_watcher(p, end, *watchers + i);
5053                 if (ret) {
5054                         kfree(*watchers);
5055                         return ret;
5056                 }
5057         }
5058
5059         return 0;
5060 }
5061
5062 /*
5063  * On success, the caller is responsible for:
5064  *
5065  *     kfree(watchers);
5066  */
5067 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5068                             struct ceph_object_id *oid,
5069                             struct ceph_object_locator *oloc,
5070                             struct ceph_watch_item **watchers,
5071                             u32 *num_watchers)
5072 {
5073         struct ceph_osd_request *req;
5074         struct page **pages;
5075         int ret;
5076
5077         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5078         if (!req)
5079                 return -ENOMEM;
5080
5081         ceph_oid_copy(&req->r_base_oid, oid);
5082         ceph_oloc_copy(&req->r_base_oloc, oloc);
5083         req->r_flags = CEPH_OSD_FLAG_READ;
5084
5085         pages = ceph_alloc_page_vector(1, GFP_NOIO);
5086         if (IS_ERR(pages)) {
5087                 ret = PTR_ERR(pages);
5088                 goto out_put_req;
5089         }
5090
5091         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5092         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5093                                                  response_data),
5094                                  pages, PAGE_SIZE, 0, false, true);
5095
5096         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5097         if (ret)
5098                 goto out_put_req;
5099
5100         ceph_osdc_start_request(osdc, req, false);
5101         ret = ceph_osdc_wait_request(osdc, req);
5102         if (ret >= 0) {
5103                 void *p = page_address(pages[0]);
5104                 void *const end = p + req->r_ops[0].outdata_len;
5105
5106                 ret = decode_watchers(&p, end, watchers, num_watchers);
5107         }
5108
5109 out_put_req:
5110         ceph_osdc_put_request(req);
5111         return ret;
5112 }
5113 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5114
5115 /*
5116  * Call all pending notify callbacks - for use after a watch is
5117  * unregistered, to make sure no more callbacks for it will be invoked
5118  */
5119 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5120 {
5121         dout("%s osdc %p\n", __func__, osdc);
5122         flush_workqueue(osdc->notify_wq);
5123 }
5124 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5125
5126 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5127 {
5128         down_read(&osdc->lock);
5129         maybe_request_map(osdc);
5130         up_read(&osdc->lock);
5131 }
5132 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5133
5134 /*
5135  * Execute an OSD class method on an object.
5136  *
5137  * @flags: CEPH_OSD_FLAG_*
5138  * @resp_len: in/out param for reply length
5139  */
5140 int ceph_osdc_call(struct ceph_osd_client *osdc,
5141                    struct ceph_object_id *oid,
5142                    struct ceph_object_locator *oloc,
5143                    const char *class, const char *method,
5144                    unsigned int flags,
5145                    struct page *req_page, size_t req_len,
5146                    struct page **resp_pages, size_t *resp_len)
5147 {
5148         struct ceph_osd_request *req;
5149         int ret;
5150
5151         if (req_len > PAGE_SIZE)
5152                 return -E2BIG;
5153
5154         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5155         if (!req)
5156                 return -ENOMEM;
5157
5158         ceph_oid_copy(&req->r_base_oid, oid);
5159         ceph_oloc_copy(&req->r_base_oloc, oloc);
5160         req->r_flags = flags;
5161
5162         ret = osd_req_op_cls_init(req, 0, class, method);
5163         if (ret)
5164                 goto out_put_req;
5165
5166         if (req_page)
5167                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5168                                                   0, false, false);
5169         if (resp_pages)
5170                 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5171                                                    *resp_len, 0, false, false);
5172
5173         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5174         if (ret)
5175                 goto out_put_req;
5176
5177         ceph_osdc_start_request(osdc, req, false);
5178         ret = ceph_osdc_wait_request(osdc, req);
5179         if (ret >= 0) {
5180                 ret = req->r_ops[0].rval;
5181                 if (resp_pages)
5182                         *resp_len = req->r_ops[0].outdata_len;
5183         }
5184
5185 out_put_req:
5186         ceph_osdc_put_request(req);
5187         return ret;
5188 }
5189 EXPORT_SYMBOL(ceph_osdc_call);
5190
5191 /*
5192  * reset all osd connections
5193  */
5194 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5195 {
5196         struct rb_node *n;
5197
5198         down_write(&osdc->lock);
5199         for (n = rb_first(&osdc->osds); n; ) {
5200                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5201
5202                 n = rb_next(n);
5203                 if (!reopen_osd(osd))
5204                         kick_osd_requests(osd);
5205         }
5206         up_write(&osdc->lock);
5207 }
5208
5209 /*
5210  * init, shutdown
5211  */
5212 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5213 {
5214         int err;
5215
5216         dout("init\n");
5217         osdc->client = client;
5218         init_rwsem(&osdc->lock);
5219         osdc->osds = RB_ROOT;
5220         INIT_LIST_HEAD(&osdc->osd_lru);
5221         spin_lock_init(&osdc->osd_lru_lock);
5222         osd_init(&osdc->homeless_osd);
5223         osdc->homeless_osd.o_osdc = osdc;
5224         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5225         osdc->last_linger_id = CEPH_LINGER_ID_START;
5226         osdc->linger_requests = RB_ROOT;
5227         osdc->map_checks = RB_ROOT;
5228         osdc->linger_map_checks = RB_ROOT;
5229         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5230         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5231
5232         err = -ENOMEM;
5233         osdc->osdmap = ceph_osdmap_alloc();
5234         if (!osdc->osdmap)
5235                 goto out;
5236
5237         osdc->req_mempool = mempool_create_slab_pool(10,
5238                                                      ceph_osd_request_cache);
5239         if (!osdc->req_mempool)
5240                 goto out_map;
5241
5242         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5243                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5244         if (err < 0)
5245                 goto out_mempool;
5246         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5247                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5248                                 "osd_op_reply");
5249         if (err < 0)
5250                 goto out_msgpool;
5251
5252         err = -ENOMEM;
5253         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5254         if (!osdc->notify_wq)
5255                 goto out_msgpool_reply;
5256
5257         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5258         if (!osdc->completion_wq)
5259                 goto out_notify_wq;
5260
5261         schedule_delayed_work(&osdc->timeout_work,
5262                               osdc->client->options->osd_keepalive_timeout);
5263         schedule_delayed_work(&osdc->osds_timeout_work,
5264             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5265
5266         return 0;
5267
5268 out_notify_wq:
5269         destroy_workqueue(osdc->notify_wq);
5270 out_msgpool_reply:
5271         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5272 out_msgpool:
5273         ceph_msgpool_destroy(&osdc->msgpool_op);
5274 out_mempool:
5275         mempool_destroy(osdc->req_mempool);
5276 out_map:
5277         ceph_osdmap_destroy(osdc->osdmap);
5278 out:
5279         return err;
5280 }
5281
5282 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5283 {
5284         destroy_workqueue(osdc->completion_wq);
5285         destroy_workqueue(osdc->notify_wq);
5286         cancel_delayed_work_sync(&osdc->timeout_work);
5287         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5288
5289         down_write(&osdc->lock);
5290         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5291                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5292                                                 struct ceph_osd, o_node);
5293                 close_osd(osd);
5294         }
5295         up_write(&osdc->lock);
5296         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5297         osd_cleanup(&osdc->homeless_osd);
5298
5299         WARN_ON(!list_empty(&osdc->osd_lru));
5300         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5301         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5302         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5303         WARN_ON(atomic_read(&osdc->num_requests));
5304         WARN_ON(atomic_read(&osdc->num_homeless));
5305
5306         ceph_osdmap_destroy(osdc->osdmap);
5307         mempool_destroy(osdc->req_mempool);
5308         ceph_msgpool_destroy(&osdc->msgpool_op);
5309         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5310 }
5311
5312 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5313                                      u64 src_snapid, u64 src_version,
5314                                      struct ceph_object_id *src_oid,
5315                                      struct ceph_object_locator *src_oloc,
5316                                      u32 src_fadvise_flags,
5317                                      u32 dst_fadvise_flags,
5318                                      u32 truncate_seq, u64 truncate_size,
5319                                      u8 copy_from_flags)
5320 {
5321         struct ceph_osd_req_op *op;
5322         struct page **pages;
5323         void *p, *end;
5324
5325         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5326         if (IS_ERR(pages))
5327                 return PTR_ERR(pages);
5328
5329         op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
5330                               dst_fadvise_flags);
5331         op->copy_from.snapid = src_snapid;
5332         op->copy_from.src_version = src_version;
5333         op->copy_from.flags = copy_from_flags;
5334         op->copy_from.src_fadvise_flags = src_fadvise_flags;
5335
5336         p = page_address(pages[0]);
5337         end = p + PAGE_SIZE;
5338         ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5339         encode_oloc(&p, end, src_oloc);
5340         ceph_encode_32(&p, truncate_seq);
5341         ceph_encode_64(&p, truncate_size);
5342         op->indata_len = PAGE_SIZE - (end - p);
5343
5344         ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5345                                  op->indata_len, 0, false, true);
5346         return 0;
5347 }
5348
5349 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5350                         u64 src_snapid, u64 src_version,
5351                         struct ceph_object_id *src_oid,
5352                         struct ceph_object_locator *src_oloc,
5353                         u32 src_fadvise_flags,
5354                         struct ceph_object_id *dst_oid,
5355                         struct ceph_object_locator *dst_oloc,
5356                         u32 dst_fadvise_flags,
5357                         u32 truncate_seq, u64 truncate_size,
5358                         u8 copy_from_flags)
5359 {
5360         struct ceph_osd_request *req;
5361         int ret;
5362
5363         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5364         if (!req)
5365                 return -ENOMEM;
5366
5367         req->r_flags = CEPH_OSD_FLAG_WRITE;
5368
5369         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5370         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5371
5372         ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5373                                         src_oloc, src_fadvise_flags,
5374                                         dst_fadvise_flags, truncate_seq,
5375                                         truncate_size, copy_from_flags);
5376         if (ret)
5377                 goto out;
5378
5379         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5380         if (ret)
5381                 goto out;
5382
5383         ceph_osdc_start_request(osdc, req, false);
5384         ret = ceph_osdc_wait_request(osdc, req);
5385
5386 out:
5387         ceph_osdc_put_request(req);
5388         return ret;
5389 }
5390 EXPORT_SYMBOL(ceph_osdc_copy_from);
5391
5392 int __init ceph_osdc_setup(void)
5393 {
5394         size_t size = sizeof(struct ceph_osd_request) +
5395             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5396
5397         BUG_ON(ceph_osd_request_cache);
5398         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5399                                                    0, 0, NULL);
5400
5401         return ceph_osd_request_cache ? 0 : -ENOMEM;
5402 }
5403
5404 void ceph_osdc_cleanup(void)
5405 {
5406         BUG_ON(!ceph_osd_request_cache);
5407         kmem_cache_destroy(ceph_osd_request_cache);
5408         ceph_osd_request_cache = NULL;
5409 }
5410
5411 /*
5412  * handle incoming message
5413  */
5414 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5415 {
5416         struct ceph_osd *osd = con->private;
5417         struct ceph_osd_client *osdc = osd->o_osdc;
5418         int type = le16_to_cpu(msg->hdr.type);
5419
5420         switch (type) {
5421         case CEPH_MSG_OSD_MAP:
5422                 ceph_osdc_handle_map(osdc, msg);
5423                 break;
5424         case CEPH_MSG_OSD_OPREPLY:
5425                 handle_reply(osd, msg);
5426                 break;
5427         case CEPH_MSG_OSD_BACKOFF:
5428                 handle_backoff(osd, msg);
5429                 break;
5430         case CEPH_MSG_WATCH_NOTIFY:
5431                 handle_watch_notify(osdc, msg);
5432                 break;
5433
5434         default:
5435                 pr_err("received unknown message type %d %s\n", type,
5436                        ceph_msg_type_name(type));
5437         }
5438
5439         ceph_msg_put(msg);
5440 }
5441
5442 /*
5443  * Lookup and return message for incoming reply.  Don't try to do
5444  * anything about a larger than preallocated data portion of the
5445  * message at the moment - for now, just skip the message.
5446  */
5447 static struct ceph_msg *get_reply(struct ceph_connection *con,
5448                                   struct ceph_msg_header *hdr,
5449                                   int *skip)
5450 {
5451         struct ceph_osd *osd = con->private;
5452         struct ceph_osd_client *osdc = osd->o_osdc;
5453         struct ceph_msg *m = NULL;
5454         struct ceph_osd_request *req;
5455         int front_len = le32_to_cpu(hdr->front_len);
5456         int data_len = le32_to_cpu(hdr->data_len);
5457         u64 tid = le64_to_cpu(hdr->tid);
5458
5459         down_read(&osdc->lock);
5460         if (!osd_registered(osd)) {
5461                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5462                 *skip = 1;
5463                 goto out_unlock_osdc;
5464         }
5465         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5466
5467         mutex_lock(&osd->lock);
5468         req = lookup_request(&osd->o_requests, tid);
5469         if (!req) {
5470                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5471                      osd->o_osd, tid);
5472                 *skip = 1;
5473                 goto out_unlock_session;
5474         }
5475
5476         ceph_msg_revoke_incoming(req->r_reply);
5477
5478         if (front_len > req->r_reply->front_alloc_len) {
5479                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5480                         __func__, osd->o_osd, req->r_tid, front_len,
5481                         req->r_reply->front_alloc_len);
5482                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5483                                  false);
5484                 if (!m)
5485                         goto out_unlock_session;
5486                 ceph_msg_put(req->r_reply);
5487                 req->r_reply = m;
5488         }
5489
5490         if (data_len > req->r_reply->data_length) {
5491                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5492                         __func__, osd->o_osd, req->r_tid, data_len,
5493                         req->r_reply->data_length);
5494                 m = NULL;
5495                 *skip = 1;
5496                 goto out_unlock_session;
5497         }
5498
5499         m = ceph_msg_get(req->r_reply);
5500         dout("get_reply tid %lld %p\n", tid, m);
5501
5502 out_unlock_session:
5503         mutex_unlock(&osd->lock);
5504 out_unlock_osdc:
5505         up_read(&osdc->lock);
5506         return m;
5507 }
5508
5509 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5510 {
5511         struct ceph_msg *m;
5512         int type = le16_to_cpu(hdr->type);
5513         u32 front_len = le32_to_cpu(hdr->front_len);
5514         u32 data_len = le32_to_cpu(hdr->data_len);
5515
5516         m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5517         if (!m)
5518                 return NULL;
5519
5520         if (data_len) {
5521                 struct page **pages;
5522
5523                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5524                                                GFP_NOIO);
5525                 if (IS_ERR(pages)) {
5526                         ceph_msg_put(m);
5527                         return NULL;
5528                 }
5529
5530                 ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5531         }
5532
5533         return m;
5534 }
5535
5536 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5537                                   struct ceph_msg_header *hdr,
5538                                   int *skip)
5539 {
5540         struct ceph_osd *osd = con->private;
5541         int type = le16_to_cpu(hdr->type);
5542
5543         *skip = 0;
5544         switch (type) {
5545         case CEPH_MSG_OSD_MAP:
5546         case CEPH_MSG_OSD_BACKOFF:
5547         case CEPH_MSG_WATCH_NOTIFY:
5548                 return alloc_msg_with_page_vector(hdr);
5549         case CEPH_MSG_OSD_OPREPLY:
5550                 return get_reply(con, hdr, skip);
5551         default:
5552                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5553                         osd->o_osd, type);
5554                 *skip = 1;
5555                 return NULL;
5556         }
5557 }
5558
5559 /*
5560  * Wrappers to refcount containing ceph_osd struct
5561  */
5562 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5563 {
5564         struct ceph_osd *osd = con->private;
5565         if (get_osd(osd))
5566                 return con;
5567         return NULL;
5568 }
5569
5570 static void put_osd_con(struct ceph_connection *con)
5571 {
5572         struct ceph_osd *osd = con->private;
5573         put_osd(osd);
5574 }
5575
5576 /*
5577  * authentication
5578  */
5579 /*
5580  * Note: returned pointer is the address of a structure that's
5581  * managed separately.  Caller must *not* attempt to free it.
5582  */
5583 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5584                                         int *proto, int force_new)
5585 {
5586         struct ceph_osd *o = con->private;
5587         struct ceph_osd_client *osdc = o->o_osdc;
5588         struct ceph_auth_client *ac = osdc->client->monc.auth;
5589         struct ceph_auth_handshake *auth = &o->o_auth;
5590
5591         if (force_new && auth->authorizer) {
5592                 ceph_auth_destroy_authorizer(auth->authorizer);
5593                 auth->authorizer = NULL;
5594         }
5595         if (!auth->authorizer) {
5596                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5597                                                       auth);
5598                 if (ret)
5599                         return ERR_PTR(ret);
5600         } else {
5601                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5602                                                      auth);
5603                 if (ret)
5604                         return ERR_PTR(ret);
5605         }
5606         *proto = ac->protocol;
5607
5608         return auth;
5609 }
5610
5611 static int add_authorizer_challenge(struct ceph_connection *con,
5612                                     void *challenge_buf, int challenge_buf_len)
5613 {
5614         struct ceph_osd *o = con->private;
5615         struct ceph_osd_client *osdc = o->o_osdc;
5616         struct ceph_auth_client *ac = osdc->client->monc.auth;
5617
5618         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5619                                             challenge_buf, challenge_buf_len);
5620 }
5621
5622 static int verify_authorizer_reply(struct ceph_connection *con)
5623 {
5624         struct ceph_osd *o = con->private;
5625         struct ceph_osd_client *osdc = o->o_osdc;
5626         struct ceph_auth_client *ac = osdc->client->monc.auth;
5627
5628         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5629 }
5630
5631 static int invalidate_authorizer(struct ceph_connection *con)
5632 {
5633         struct ceph_osd *o = con->private;
5634         struct ceph_osd_client *osdc = o->o_osdc;
5635         struct ceph_auth_client *ac = osdc->client->monc.auth;
5636
5637         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5638         return ceph_monc_validate_auth(&osdc->client->monc);
5639 }
5640
5641 static void osd_reencode_message(struct ceph_msg *msg)
5642 {
5643         int type = le16_to_cpu(msg->hdr.type);
5644
5645         if (type == CEPH_MSG_OSD_OP)
5646                 encode_request_finish(msg);
5647 }
5648
5649 static int osd_sign_message(struct ceph_msg *msg)
5650 {
5651         struct ceph_osd *o = msg->con->private;
5652         struct ceph_auth_handshake *auth = &o->o_auth;
5653
5654         return ceph_auth_sign_message(auth, msg);
5655 }
5656
5657 static int osd_check_message_signature(struct ceph_msg *msg)
5658 {
5659         struct ceph_osd *o = msg->con->private;
5660         struct ceph_auth_handshake *auth = &o->o_auth;
5661
5662         return ceph_auth_check_message_signature(auth, msg);
5663 }
5664
5665 static const struct ceph_connection_operations osd_con_ops = {
5666         .get = get_osd_con,
5667         .put = put_osd_con,
5668         .dispatch = dispatch,
5669         .get_authorizer = get_authorizer,
5670         .add_authorizer_challenge = add_authorizer_challenge,
5671         .verify_authorizer_reply = verify_authorizer_reply,
5672         .invalidate_authorizer = invalidate_authorizer,
5673         .alloc_msg = alloc_msg,
5674         .reencode_message = osd_reencode_message,
5675         .sign_message = osd_sign_message,
5676         .check_message_signature = osd_check_message_signature,
5677         .fault = osd_fault,
5678 };