Merge tag 'block-5.8-2020-06-19' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / net / ceph / osd_client.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #include <linux/ceph/ceph_debug.h>
4
5 #include <linux/module.h>
6 #include <linux/err.h>
7 #include <linux/highmem.h>
8 #include <linux/mm.h>
9 #include <linux/pagemap.h>
10 #include <linux/slab.h>
11 #include <linux/uaccess.h>
12 #ifdef CONFIG_BLOCK
13 #include <linux/bio.h>
14 #endif
15
16 #include <linux/ceph/ceph_features.h>
17 #include <linux/ceph/libceph.h>
18 #include <linux/ceph/osd_client.h>
19 #include <linux/ceph/messenger.h>
20 #include <linux/ceph/decode.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/pagelist.h>
23 #include <linux/ceph/striper.h>
24
25 #define OSD_OPREPLY_FRONT_LEN   512
26
27 static struct kmem_cache        *ceph_osd_request_cache;
28
29 static const struct ceph_connection_operations osd_con_ops;
30
31 /*
32  * Implement client access to distributed object storage cluster.
33  *
34  * All data objects are stored within a cluster/cloud of OSDs, or
35  * "object storage devices."  (Note that Ceph OSDs have _nothing_ to
36  * do with the T10 OSD extensions to SCSI.)  Ceph OSDs are simply
37  * remote daemons serving up and coordinating consistent and safe
38  * access to storage.
39  *
40  * Cluster membership and the mapping of data objects onto storage devices
41  * are described by the osd map.
42  *
43  * We keep track of pending OSD requests (read, write), resubmit
44  * requests to different OSDs when the cluster topology/data layout
45  * change, or retry the affected requests when the communications
46  * channel with an OSD is reset.
47  */
48
49 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req);
50 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req);
51 static void link_linger(struct ceph_osd *osd,
52                         struct ceph_osd_linger_request *lreq);
53 static void unlink_linger(struct ceph_osd *osd,
54                           struct ceph_osd_linger_request *lreq);
55 static void clear_backoffs(struct ceph_osd *osd);
56
57 #if 1
58 static inline bool rwsem_is_wrlocked(struct rw_semaphore *sem)
59 {
60         bool wrlocked = true;
61
62         if (unlikely(down_read_trylock(sem))) {
63                 wrlocked = false;
64                 up_read(sem);
65         }
66
67         return wrlocked;
68 }
69 static inline void verify_osdc_locked(struct ceph_osd_client *osdc)
70 {
71         WARN_ON(!rwsem_is_locked(&osdc->lock));
72 }
73 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc)
74 {
75         WARN_ON(!rwsem_is_wrlocked(&osdc->lock));
76 }
77 static inline void verify_osd_locked(struct ceph_osd *osd)
78 {
79         struct ceph_osd_client *osdc = osd->o_osdc;
80
81         WARN_ON(!(mutex_is_locked(&osd->lock) &&
82                   rwsem_is_locked(&osdc->lock)) &&
83                 !rwsem_is_wrlocked(&osdc->lock));
84 }
85 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq)
86 {
87         WARN_ON(!mutex_is_locked(&lreq->lock));
88 }
89 #else
90 static inline void verify_osdc_locked(struct ceph_osd_client *osdc) { }
91 static inline void verify_osdc_wrlocked(struct ceph_osd_client *osdc) { }
92 static inline void verify_osd_locked(struct ceph_osd *osd) { }
93 static inline void verify_lreq_locked(struct ceph_osd_linger_request *lreq) { }
94 #endif
95
96 /*
97  * calculate the mapping of a file extent onto an object, and fill out the
98  * request accordingly.  shorten extent as necessary if it crosses an
99  * object boundary.
100  *
101  * fill osd op in request message.
102  */
103 static int calc_layout(struct ceph_file_layout *layout, u64 off, u64 *plen,
104                         u64 *objnum, u64 *objoff, u64 *objlen)
105 {
106         u64 orig_len = *plen;
107         u32 xlen;
108
109         /* object extent? */
110         ceph_calc_file_object_mapping(layout, off, orig_len, objnum,
111                                           objoff, &xlen);
112         *objlen = xlen;
113         if (*objlen < orig_len) {
114                 *plen = *objlen;
115                 dout(" skipping last %llu, final file extent %llu~%llu\n",
116                      orig_len - *plen, off, *plen);
117         }
118
119         dout("calc_layout objnum=%llx %llu~%llu\n", *objnum, *objoff, *objlen);
120         return 0;
121 }
122
123 static void ceph_osd_data_init(struct ceph_osd_data *osd_data)
124 {
125         memset(osd_data, 0, sizeof (*osd_data));
126         osd_data->type = CEPH_OSD_DATA_TYPE_NONE;
127 }
128
129 /*
130  * Consumes @pages if @own_pages is true.
131  */
132 static void ceph_osd_data_pages_init(struct ceph_osd_data *osd_data,
133                         struct page **pages, u64 length, u32 alignment,
134                         bool pages_from_pool, bool own_pages)
135 {
136         osd_data->type = CEPH_OSD_DATA_TYPE_PAGES;
137         osd_data->pages = pages;
138         osd_data->length = length;
139         osd_data->alignment = alignment;
140         osd_data->pages_from_pool = pages_from_pool;
141         osd_data->own_pages = own_pages;
142 }
143
144 /*
145  * Consumes a ref on @pagelist.
146  */
147 static void ceph_osd_data_pagelist_init(struct ceph_osd_data *osd_data,
148                         struct ceph_pagelist *pagelist)
149 {
150         osd_data->type = CEPH_OSD_DATA_TYPE_PAGELIST;
151         osd_data->pagelist = pagelist;
152 }
153
154 #ifdef CONFIG_BLOCK
155 static void ceph_osd_data_bio_init(struct ceph_osd_data *osd_data,
156                                    struct ceph_bio_iter *bio_pos,
157                                    u32 bio_length)
158 {
159         osd_data->type = CEPH_OSD_DATA_TYPE_BIO;
160         osd_data->bio_pos = *bio_pos;
161         osd_data->bio_length = bio_length;
162 }
163 #endif /* CONFIG_BLOCK */
164
165 static void ceph_osd_data_bvecs_init(struct ceph_osd_data *osd_data,
166                                      struct ceph_bvec_iter *bvec_pos,
167                                      u32 num_bvecs)
168 {
169         osd_data->type = CEPH_OSD_DATA_TYPE_BVECS;
170         osd_data->bvec_pos = *bvec_pos;
171         osd_data->num_bvecs = num_bvecs;
172 }
173
174 static struct ceph_osd_data *
175 osd_req_op_raw_data_in(struct ceph_osd_request *osd_req, unsigned int which)
176 {
177         BUG_ON(which >= osd_req->r_num_ops);
178
179         return &osd_req->r_ops[which].raw_data_in;
180 }
181
182 struct ceph_osd_data *
183 osd_req_op_extent_osd_data(struct ceph_osd_request *osd_req,
184                         unsigned int which)
185 {
186         return osd_req_op_data(osd_req, which, extent, osd_data);
187 }
188 EXPORT_SYMBOL(osd_req_op_extent_osd_data);
189
190 void osd_req_op_raw_data_in_pages(struct ceph_osd_request *osd_req,
191                         unsigned int which, struct page **pages,
192                         u64 length, u32 alignment,
193                         bool pages_from_pool, bool own_pages)
194 {
195         struct ceph_osd_data *osd_data;
196
197         osd_data = osd_req_op_raw_data_in(osd_req, which);
198         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
199                                 pages_from_pool, own_pages);
200 }
201 EXPORT_SYMBOL(osd_req_op_raw_data_in_pages);
202
203 void osd_req_op_extent_osd_data_pages(struct ceph_osd_request *osd_req,
204                         unsigned int which, struct page **pages,
205                         u64 length, u32 alignment,
206                         bool pages_from_pool, bool own_pages)
207 {
208         struct ceph_osd_data *osd_data;
209
210         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
211         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
212                                 pages_from_pool, own_pages);
213 }
214 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pages);
215
216 void osd_req_op_extent_osd_data_pagelist(struct ceph_osd_request *osd_req,
217                         unsigned int which, struct ceph_pagelist *pagelist)
218 {
219         struct ceph_osd_data *osd_data;
220
221         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
222         ceph_osd_data_pagelist_init(osd_data, pagelist);
223 }
224 EXPORT_SYMBOL(osd_req_op_extent_osd_data_pagelist);
225
226 #ifdef CONFIG_BLOCK
227 void osd_req_op_extent_osd_data_bio(struct ceph_osd_request *osd_req,
228                                     unsigned int which,
229                                     struct ceph_bio_iter *bio_pos,
230                                     u32 bio_length)
231 {
232         struct ceph_osd_data *osd_data;
233
234         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
235         ceph_osd_data_bio_init(osd_data, bio_pos, bio_length);
236 }
237 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bio);
238 #endif /* CONFIG_BLOCK */
239
240 void osd_req_op_extent_osd_data_bvecs(struct ceph_osd_request *osd_req,
241                                       unsigned int which,
242                                       struct bio_vec *bvecs, u32 num_bvecs,
243                                       u32 bytes)
244 {
245         struct ceph_osd_data *osd_data;
246         struct ceph_bvec_iter it = {
247                 .bvecs = bvecs,
248                 .iter = { .bi_size = bytes },
249         };
250
251         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
252         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
253 }
254 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvecs);
255
256 void osd_req_op_extent_osd_data_bvec_pos(struct ceph_osd_request *osd_req,
257                                          unsigned int which,
258                                          struct ceph_bvec_iter *bvec_pos)
259 {
260         struct ceph_osd_data *osd_data;
261
262         osd_data = osd_req_op_data(osd_req, which, extent, osd_data);
263         ceph_osd_data_bvecs_init(osd_data, bvec_pos, 0);
264 }
265 EXPORT_SYMBOL(osd_req_op_extent_osd_data_bvec_pos);
266
267 static void osd_req_op_cls_request_info_pagelist(
268                         struct ceph_osd_request *osd_req,
269                         unsigned int which, struct ceph_pagelist *pagelist)
270 {
271         struct ceph_osd_data *osd_data;
272
273         osd_data = osd_req_op_data(osd_req, which, cls, request_info);
274         ceph_osd_data_pagelist_init(osd_data, pagelist);
275 }
276
277 void osd_req_op_cls_request_data_pagelist(
278                         struct ceph_osd_request *osd_req,
279                         unsigned int which, struct ceph_pagelist *pagelist)
280 {
281         struct ceph_osd_data *osd_data;
282
283         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
284         ceph_osd_data_pagelist_init(osd_data, pagelist);
285         osd_req->r_ops[which].cls.indata_len += pagelist->length;
286         osd_req->r_ops[which].indata_len += pagelist->length;
287 }
288 EXPORT_SYMBOL(osd_req_op_cls_request_data_pagelist);
289
290 void osd_req_op_cls_request_data_pages(struct ceph_osd_request *osd_req,
291                         unsigned int which, struct page **pages, u64 length,
292                         u32 alignment, bool pages_from_pool, bool own_pages)
293 {
294         struct ceph_osd_data *osd_data;
295
296         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
297         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
298                                 pages_from_pool, own_pages);
299         osd_req->r_ops[which].cls.indata_len += length;
300         osd_req->r_ops[which].indata_len += length;
301 }
302 EXPORT_SYMBOL(osd_req_op_cls_request_data_pages);
303
304 void osd_req_op_cls_request_data_bvecs(struct ceph_osd_request *osd_req,
305                                        unsigned int which,
306                                        struct bio_vec *bvecs, u32 num_bvecs,
307                                        u32 bytes)
308 {
309         struct ceph_osd_data *osd_data;
310         struct ceph_bvec_iter it = {
311                 .bvecs = bvecs,
312                 .iter = { .bi_size = bytes },
313         };
314
315         osd_data = osd_req_op_data(osd_req, which, cls, request_data);
316         ceph_osd_data_bvecs_init(osd_data, &it, num_bvecs);
317         osd_req->r_ops[which].cls.indata_len += bytes;
318         osd_req->r_ops[which].indata_len += bytes;
319 }
320 EXPORT_SYMBOL(osd_req_op_cls_request_data_bvecs);
321
322 void osd_req_op_cls_response_data_pages(struct ceph_osd_request *osd_req,
323                         unsigned int which, struct page **pages, u64 length,
324                         u32 alignment, bool pages_from_pool, bool own_pages)
325 {
326         struct ceph_osd_data *osd_data;
327
328         osd_data = osd_req_op_data(osd_req, which, cls, response_data);
329         ceph_osd_data_pages_init(osd_data, pages, length, alignment,
330                                 pages_from_pool, own_pages);
331 }
332 EXPORT_SYMBOL(osd_req_op_cls_response_data_pages);
333
334 static u64 ceph_osd_data_length(struct ceph_osd_data *osd_data)
335 {
336         switch (osd_data->type) {
337         case CEPH_OSD_DATA_TYPE_NONE:
338                 return 0;
339         case CEPH_OSD_DATA_TYPE_PAGES:
340                 return osd_data->length;
341         case CEPH_OSD_DATA_TYPE_PAGELIST:
342                 return (u64)osd_data->pagelist->length;
343 #ifdef CONFIG_BLOCK
344         case CEPH_OSD_DATA_TYPE_BIO:
345                 return (u64)osd_data->bio_length;
346 #endif /* CONFIG_BLOCK */
347         case CEPH_OSD_DATA_TYPE_BVECS:
348                 return osd_data->bvec_pos.iter.bi_size;
349         default:
350                 WARN(true, "unrecognized data type %d\n", (int)osd_data->type);
351                 return 0;
352         }
353 }
354
355 static void ceph_osd_data_release(struct ceph_osd_data *osd_data)
356 {
357         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES && osd_data->own_pages) {
358                 int num_pages;
359
360                 num_pages = calc_pages_for((u64)osd_data->alignment,
361                                                 (u64)osd_data->length);
362                 ceph_release_page_vector(osd_data->pages, num_pages);
363         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
364                 ceph_pagelist_release(osd_data->pagelist);
365         }
366         ceph_osd_data_init(osd_data);
367 }
368
369 static void osd_req_op_data_release(struct ceph_osd_request *osd_req,
370                         unsigned int which)
371 {
372         struct ceph_osd_req_op *op;
373
374         BUG_ON(which >= osd_req->r_num_ops);
375         op = &osd_req->r_ops[which];
376
377         switch (op->op) {
378         case CEPH_OSD_OP_READ:
379         case CEPH_OSD_OP_WRITE:
380         case CEPH_OSD_OP_WRITEFULL:
381                 ceph_osd_data_release(&op->extent.osd_data);
382                 break;
383         case CEPH_OSD_OP_CALL:
384                 ceph_osd_data_release(&op->cls.request_info);
385                 ceph_osd_data_release(&op->cls.request_data);
386                 ceph_osd_data_release(&op->cls.response_data);
387                 break;
388         case CEPH_OSD_OP_SETXATTR:
389         case CEPH_OSD_OP_CMPXATTR:
390                 ceph_osd_data_release(&op->xattr.osd_data);
391                 break;
392         case CEPH_OSD_OP_STAT:
393                 ceph_osd_data_release(&op->raw_data_in);
394                 break;
395         case CEPH_OSD_OP_NOTIFY_ACK:
396                 ceph_osd_data_release(&op->notify_ack.request_data);
397                 break;
398         case CEPH_OSD_OP_NOTIFY:
399                 ceph_osd_data_release(&op->notify.request_data);
400                 ceph_osd_data_release(&op->notify.response_data);
401                 break;
402         case CEPH_OSD_OP_LIST_WATCHERS:
403                 ceph_osd_data_release(&op->list_watchers.response_data);
404                 break;
405         case CEPH_OSD_OP_COPY_FROM2:
406                 ceph_osd_data_release(&op->copy_from.osd_data);
407                 break;
408         default:
409                 break;
410         }
411 }
412
413 /*
414  * Assumes @t is zero-initialized.
415  */
416 static void target_init(struct ceph_osd_request_target *t)
417 {
418         ceph_oid_init(&t->base_oid);
419         ceph_oloc_init(&t->base_oloc);
420         ceph_oid_init(&t->target_oid);
421         ceph_oloc_init(&t->target_oloc);
422
423         ceph_osds_init(&t->acting);
424         ceph_osds_init(&t->up);
425         t->size = -1;
426         t->min_size = -1;
427
428         t->osd = CEPH_HOMELESS_OSD;
429 }
430
431 static void target_copy(struct ceph_osd_request_target *dest,
432                         const struct ceph_osd_request_target *src)
433 {
434         ceph_oid_copy(&dest->base_oid, &src->base_oid);
435         ceph_oloc_copy(&dest->base_oloc, &src->base_oloc);
436         ceph_oid_copy(&dest->target_oid, &src->target_oid);
437         ceph_oloc_copy(&dest->target_oloc, &src->target_oloc);
438
439         dest->pgid = src->pgid; /* struct */
440         dest->spgid = src->spgid; /* struct */
441         dest->pg_num = src->pg_num;
442         dest->pg_num_mask = src->pg_num_mask;
443         ceph_osds_copy(&dest->acting, &src->acting);
444         ceph_osds_copy(&dest->up, &src->up);
445         dest->size = src->size;
446         dest->min_size = src->min_size;
447         dest->sort_bitwise = src->sort_bitwise;
448         dest->recovery_deletes = src->recovery_deletes;
449
450         dest->flags = src->flags;
451         dest->used_replica = src->used_replica;
452         dest->paused = src->paused;
453
454         dest->epoch = src->epoch;
455         dest->last_force_resend = src->last_force_resend;
456
457         dest->osd = src->osd;
458 }
459
460 static void target_destroy(struct ceph_osd_request_target *t)
461 {
462         ceph_oid_destroy(&t->base_oid);
463         ceph_oloc_destroy(&t->base_oloc);
464         ceph_oid_destroy(&t->target_oid);
465         ceph_oloc_destroy(&t->target_oloc);
466 }
467
468 /*
469  * requests
470  */
471 static void request_release_checks(struct ceph_osd_request *req)
472 {
473         WARN_ON(!RB_EMPTY_NODE(&req->r_node));
474         WARN_ON(!RB_EMPTY_NODE(&req->r_mc_node));
475         WARN_ON(!list_empty(&req->r_private_item));
476         WARN_ON(req->r_osd);
477 }
478
479 static void ceph_osdc_release_request(struct kref *kref)
480 {
481         struct ceph_osd_request *req = container_of(kref,
482                                             struct ceph_osd_request, r_kref);
483         unsigned int which;
484
485         dout("%s %p (r_request %p r_reply %p)\n", __func__, req,
486              req->r_request, req->r_reply);
487         request_release_checks(req);
488
489         if (req->r_request)
490                 ceph_msg_put(req->r_request);
491         if (req->r_reply)
492                 ceph_msg_put(req->r_reply);
493
494         for (which = 0; which < req->r_num_ops; which++)
495                 osd_req_op_data_release(req, which);
496
497         target_destroy(&req->r_t);
498         ceph_put_snap_context(req->r_snapc);
499
500         if (req->r_mempool)
501                 mempool_free(req, req->r_osdc->req_mempool);
502         else if (req->r_num_ops <= CEPH_OSD_SLAB_OPS)
503                 kmem_cache_free(ceph_osd_request_cache, req);
504         else
505                 kfree(req);
506 }
507
508 void ceph_osdc_get_request(struct ceph_osd_request *req)
509 {
510         dout("%s %p (was %d)\n", __func__, req,
511              kref_read(&req->r_kref));
512         kref_get(&req->r_kref);
513 }
514 EXPORT_SYMBOL(ceph_osdc_get_request);
515
516 void ceph_osdc_put_request(struct ceph_osd_request *req)
517 {
518         if (req) {
519                 dout("%s %p (was %d)\n", __func__, req,
520                      kref_read(&req->r_kref));
521                 kref_put(&req->r_kref, ceph_osdc_release_request);
522         }
523 }
524 EXPORT_SYMBOL(ceph_osdc_put_request);
525
526 static void request_init(struct ceph_osd_request *req)
527 {
528         /* req only, each op is zeroed in _osd_req_op_init() */
529         memset(req, 0, sizeof(*req));
530
531         kref_init(&req->r_kref);
532         init_completion(&req->r_completion);
533         RB_CLEAR_NODE(&req->r_node);
534         RB_CLEAR_NODE(&req->r_mc_node);
535         INIT_LIST_HEAD(&req->r_private_item);
536
537         target_init(&req->r_t);
538 }
539
540 /*
541  * This is ugly, but it allows us to reuse linger registration and ping
542  * requests, keeping the structure of the code around send_linger{_ping}()
543  * reasonable.  Setting up a min_nr=2 mempool for each linger request
544  * and dealing with copying ops (this blasts req only, watch op remains
545  * intact) isn't any better.
546  */
547 static void request_reinit(struct ceph_osd_request *req)
548 {
549         struct ceph_osd_client *osdc = req->r_osdc;
550         bool mempool = req->r_mempool;
551         unsigned int num_ops = req->r_num_ops;
552         u64 snapid = req->r_snapid;
553         struct ceph_snap_context *snapc = req->r_snapc;
554         bool linger = req->r_linger;
555         struct ceph_msg *request_msg = req->r_request;
556         struct ceph_msg *reply_msg = req->r_reply;
557
558         dout("%s req %p\n", __func__, req);
559         WARN_ON(kref_read(&req->r_kref) != 1);
560         request_release_checks(req);
561
562         WARN_ON(kref_read(&request_msg->kref) != 1);
563         WARN_ON(kref_read(&reply_msg->kref) != 1);
564         target_destroy(&req->r_t);
565
566         request_init(req);
567         req->r_osdc = osdc;
568         req->r_mempool = mempool;
569         req->r_num_ops = num_ops;
570         req->r_snapid = snapid;
571         req->r_snapc = snapc;
572         req->r_linger = linger;
573         req->r_request = request_msg;
574         req->r_reply = reply_msg;
575 }
576
577 struct ceph_osd_request *ceph_osdc_alloc_request(struct ceph_osd_client *osdc,
578                                                struct ceph_snap_context *snapc,
579                                                unsigned int num_ops,
580                                                bool use_mempool,
581                                                gfp_t gfp_flags)
582 {
583         struct ceph_osd_request *req;
584
585         if (use_mempool) {
586                 BUG_ON(num_ops > CEPH_OSD_SLAB_OPS);
587                 req = mempool_alloc(osdc->req_mempool, gfp_flags);
588         } else if (num_ops <= CEPH_OSD_SLAB_OPS) {
589                 req = kmem_cache_alloc(ceph_osd_request_cache, gfp_flags);
590         } else {
591                 BUG_ON(num_ops > CEPH_OSD_MAX_OPS);
592                 req = kmalloc(struct_size(req, r_ops, num_ops), gfp_flags);
593         }
594         if (unlikely(!req))
595                 return NULL;
596
597         request_init(req);
598         req->r_osdc = osdc;
599         req->r_mempool = use_mempool;
600         req->r_num_ops = num_ops;
601         req->r_snapid = CEPH_NOSNAP;
602         req->r_snapc = ceph_get_snap_context(snapc);
603
604         dout("%s req %p\n", __func__, req);
605         return req;
606 }
607 EXPORT_SYMBOL(ceph_osdc_alloc_request);
608
609 static int ceph_oloc_encoding_size(const struct ceph_object_locator *oloc)
610 {
611         return 8 + 4 + 4 + 4 + (oloc->pool_ns ? oloc->pool_ns->len : 0);
612 }
613
614 static int __ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp,
615                                       int num_request_data_items,
616                                       int num_reply_data_items)
617 {
618         struct ceph_osd_client *osdc = req->r_osdc;
619         struct ceph_msg *msg;
620         int msg_size;
621
622         WARN_ON(req->r_request || req->r_reply);
623         WARN_ON(ceph_oid_empty(&req->r_base_oid));
624         WARN_ON(ceph_oloc_empty(&req->r_base_oloc));
625
626         /* create request message */
627         msg_size = CEPH_ENCODING_START_BLK_LEN +
628                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
629         msg_size += 4 + 4 + 4; /* hash, osdmap_epoch, flags */
630         msg_size += CEPH_ENCODING_START_BLK_LEN +
631                         sizeof(struct ceph_osd_reqid); /* reqid */
632         msg_size += sizeof(struct ceph_blkin_trace_info); /* trace */
633         msg_size += 4 + sizeof(struct ceph_timespec); /* client_inc, mtime */
634         msg_size += CEPH_ENCODING_START_BLK_LEN +
635                         ceph_oloc_encoding_size(&req->r_base_oloc); /* oloc */
636         msg_size += 4 + req->r_base_oid.name_len; /* oid */
637         msg_size += 2 + req->r_num_ops * sizeof(struct ceph_osd_op);
638         msg_size += 8; /* snapid */
639         msg_size += 8; /* snap_seq */
640         msg_size += 4 + 8 * (req->r_snapc ? req->r_snapc->num_snaps : 0);
641         msg_size += 4 + 8; /* retry_attempt, features */
642
643         if (req->r_mempool)
644                 msg = ceph_msgpool_get(&osdc->msgpool_op, msg_size,
645                                        num_request_data_items);
646         else
647                 msg = ceph_msg_new2(CEPH_MSG_OSD_OP, msg_size,
648                                     num_request_data_items, gfp, true);
649         if (!msg)
650                 return -ENOMEM;
651
652         memset(msg->front.iov_base, 0, msg->front.iov_len);
653         req->r_request = msg;
654
655         /* create reply message */
656         msg_size = OSD_OPREPLY_FRONT_LEN;
657         msg_size += req->r_base_oid.name_len;
658         msg_size += req->r_num_ops * sizeof(struct ceph_osd_op);
659
660         if (req->r_mempool)
661                 msg = ceph_msgpool_get(&osdc->msgpool_op_reply, msg_size,
662                                        num_reply_data_items);
663         else
664                 msg = ceph_msg_new2(CEPH_MSG_OSD_OPREPLY, msg_size,
665                                     num_reply_data_items, gfp, true);
666         if (!msg)
667                 return -ENOMEM;
668
669         req->r_reply = msg;
670
671         return 0;
672 }
673
674 static bool osd_req_opcode_valid(u16 opcode)
675 {
676         switch (opcode) {
677 #define GENERATE_CASE(op, opcode, str)  case CEPH_OSD_OP_##op: return true;
678 __CEPH_FORALL_OSD_OPS(GENERATE_CASE)
679 #undef GENERATE_CASE
680         default:
681                 return false;
682         }
683 }
684
685 static void get_num_data_items(struct ceph_osd_request *req,
686                                int *num_request_data_items,
687                                int *num_reply_data_items)
688 {
689         struct ceph_osd_req_op *op;
690
691         *num_request_data_items = 0;
692         *num_reply_data_items = 0;
693
694         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
695                 switch (op->op) {
696                 /* request */
697                 case CEPH_OSD_OP_WRITE:
698                 case CEPH_OSD_OP_WRITEFULL:
699                 case CEPH_OSD_OP_SETXATTR:
700                 case CEPH_OSD_OP_CMPXATTR:
701                 case CEPH_OSD_OP_NOTIFY_ACK:
702                 case CEPH_OSD_OP_COPY_FROM2:
703                         *num_request_data_items += 1;
704                         break;
705
706                 /* reply */
707                 case CEPH_OSD_OP_STAT:
708                 case CEPH_OSD_OP_READ:
709                 case CEPH_OSD_OP_LIST_WATCHERS:
710                         *num_reply_data_items += 1;
711                         break;
712
713                 /* both */
714                 case CEPH_OSD_OP_NOTIFY:
715                         *num_request_data_items += 1;
716                         *num_reply_data_items += 1;
717                         break;
718                 case CEPH_OSD_OP_CALL:
719                         *num_request_data_items += 2;
720                         *num_reply_data_items += 1;
721                         break;
722
723                 default:
724                         WARN_ON(!osd_req_opcode_valid(op->op));
725                         break;
726                 }
727         }
728 }
729
730 /*
731  * oid, oloc and OSD op opcode(s) must be filled in before this function
732  * is called.
733  */
734 int ceph_osdc_alloc_messages(struct ceph_osd_request *req, gfp_t gfp)
735 {
736         int num_request_data_items, num_reply_data_items;
737
738         get_num_data_items(req, &num_request_data_items, &num_reply_data_items);
739         return __ceph_osdc_alloc_messages(req, gfp, num_request_data_items,
740                                           num_reply_data_items);
741 }
742 EXPORT_SYMBOL(ceph_osdc_alloc_messages);
743
744 /*
745  * This is an osd op init function for opcodes that have no data or
746  * other information associated with them.  It also serves as a
747  * common init routine for all the other init functions, below.
748  */
749 static struct ceph_osd_req_op *
750 _osd_req_op_init(struct ceph_osd_request *osd_req, unsigned int which,
751                  u16 opcode, u32 flags)
752 {
753         struct ceph_osd_req_op *op;
754
755         BUG_ON(which >= osd_req->r_num_ops);
756         BUG_ON(!osd_req_opcode_valid(opcode));
757
758         op = &osd_req->r_ops[which];
759         memset(op, 0, sizeof (*op));
760         op->op = opcode;
761         op->flags = flags;
762
763         return op;
764 }
765
766 void osd_req_op_init(struct ceph_osd_request *osd_req,
767                      unsigned int which, u16 opcode, u32 flags)
768 {
769         (void)_osd_req_op_init(osd_req, which, opcode, flags);
770 }
771 EXPORT_SYMBOL(osd_req_op_init);
772
773 void osd_req_op_extent_init(struct ceph_osd_request *osd_req,
774                                 unsigned int which, u16 opcode,
775                                 u64 offset, u64 length,
776                                 u64 truncate_size, u32 truncate_seq)
777 {
778         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
779                                                       opcode, 0);
780         size_t payload_len = 0;
781
782         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
783                opcode != CEPH_OSD_OP_WRITEFULL && opcode != CEPH_OSD_OP_ZERO &&
784                opcode != CEPH_OSD_OP_TRUNCATE);
785
786         op->extent.offset = offset;
787         op->extent.length = length;
788         op->extent.truncate_size = truncate_size;
789         op->extent.truncate_seq = truncate_seq;
790         if (opcode == CEPH_OSD_OP_WRITE || opcode == CEPH_OSD_OP_WRITEFULL)
791                 payload_len += length;
792
793         op->indata_len = payload_len;
794 }
795 EXPORT_SYMBOL(osd_req_op_extent_init);
796
797 void osd_req_op_extent_update(struct ceph_osd_request *osd_req,
798                                 unsigned int which, u64 length)
799 {
800         struct ceph_osd_req_op *op;
801         u64 previous;
802
803         BUG_ON(which >= osd_req->r_num_ops);
804         op = &osd_req->r_ops[which];
805         previous = op->extent.length;
806
807         if (length == previous)
808                 return;         /* Nothing to do */
809         BUG_ON(length > previous);
810
811         op->extent.length = length;
812         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
813                 op->indata_len -= previous - length;
814 }
815 EXPORT_SYMBOL(osd_req_op_extent_update);
816
817 void osd_req_op_extent_dup_last(struct ceph_osd_request *osd_req,
818                                 unsigned int which, u64 offset_inc)
819 {
820         struct ceph_osd_req_op *op, *prev_op;
821
822         BUG_ON(which + 1 >= osd_req->r_num_ops);
823
824         prev_op = &osd_req->r_ops[which];
825         op = _osd_req_op_init(osd_req, which + 1, prev_op->op, prev_op->flags);
826         /* dup previous one */
827         op->indata_len = prev_op->indata_len;
828         op->outdata_len = prev_op->outdata_len;
829         op->extent = prev_op->extent;
830         /* adjust offset */
831         op->extent.offset += offset_inc;
832         op->extent.length -= offset_inc;
833
834         if (op->op == CEPH_OSD_OP_WRITE || op->op == CEPH_OSD_OP_WRITEFULL)
835                 op->indata_len -= offset_inc;
836 }
837 EXPORT_SYMBOL(osd_req_op_extent_dup_last);
838
839 int osd_req_op_cls_init(struct ceph_osd_request *osd_req, unsigned int which,
840                         const char *class, const char *method)
841 {
842         struct ceph_osd_req_op *op;
843         struct ceph_pagelist *pagelist;
844         size_t payload_len = 0;
845         size_t size;
846         int ret;
847
848         op = _osd_req_op_init(osd_req, which, CEPH_OSD_OP_CALL, 0);
849
850         pagelist = ceph_pagelist_alloc(GFP_NOFS);
851         if (!pagelist)
852                 return -ENOMEM;
853
854         op->cls.class_name = class;
855         size = strlen(class);
856         BUG_ON(size > (size_t) U8_MAX);
857         op->cls.class_len = size;
858         ret = ceph_pagelist_append(pagelist, class, size);
859         if (ret)
860                 goto err_pagelist_free;
861         payload_len += size;
862
863         op->cls.method_name = method;
864         size = strlen(method);
865         BUG_ON(size > (size_t) U8_MAX);
866         op->cls.method_len = size;
867         ret = ceph_pagelist_append(pagelist, method, size);
868         if (ret)
869                 goto err_pagelist_free;
870         payload_len += size;
871
872         osd_req_op_cls_request_info_pagelist(osd_req, which, pagelist);
873         op->indata_len = payload_len;
874         return 0;
875
876 err_pagelist_free:
877         ceph_pagelist_release(pagelist);
878         return ret;
879 }
880 EXPORT_SYMBOL(osd_req_op_cls_init);
881
882 int osd_req_op_xattr_init(struct ceph_osd_request *osd_req, unsigned int which,
883                           u16 opcode, const char *name, const void *value,
884                           size_t size, u8 cmp_op, u8 cmp_mode)
885 {
886         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
887                                                       opcode, 0);
888         struct ceph_pagelist *pagelist;
889         size_t payload_len;
890         int ret;
891
892         BUG_ON(opcode != CEPH_OSD_OP_SETXATTR && opcode != CEPH_OSD_OP_CMPXATTR);
893
894         pagelist = ceph_pagelist_alloc(GFP_NOFS);
895         if (!pagelist)
896                 return -ENOMEM;
897
898         payload_len = strlen(name);
899         op->xattr.name_len = payload_len;
900         ret = ceph_pagelist_append(pagelist, name, payload_len);
901         if (ret)
902                 goto err_pagelist_free;
903
904         op->xattr.value_len = size;
905         ret = ceph_pagelist_append(pagelist, value, size);
906         if (ret)
907                 goto err_pagelist_free;
908         payload_len += size;
909
910         op->xattr.cmp_op = cmp_op;
911         op->xattr.cmp_mode = cmp_mode;
912
913         ceph_osd_data_pagelist_init(&op->xattr.osd_data, pagelist);
914         op->indata_len = payload_len;
915         return 0;
916
917 err_pagelist_free:
918         ceph_pagelist_release(pagelist);
919         return ret;
920 }
921 EXPORT_SYMBOL(osd_req_op_xattr_init);
922
923 /*
924  * @watch_opcode: CEPH_OSD_WATCH_OP_*
925  */
926 static void osd_req_op_watch_init(struct ceph_osd_request *req, int which,
927                                   u64 cookie, u8 watch_opcode)
928 {
929         struct ceph_osd_req_op *op;
930
931         op = _osd_req_op_init(req, which, CEPH_OSD_OP_WATCH, 0);
932         op->watch.cookie = cookie;
933         op->watch.op = watch_opcode;
934         op->watch.gen = 0;
935 }
936
937 /*
938  * @flags: CEPH_OSD_OP_ALLOC_HINT_FLAG_*
939  */
940 void osd_req_op_alloc_hint_init(struct ceph_osd_request *osd_req,
941                                 unsigned int which,
942                                 u64 expected_object_size,
943                                 u64 expected_write_size,
944                                 u32 flags)
945 {
946         struct ceph_osd_req_op *op = _osd_req_op_init(osd_req, which,
947                                                       CEPH_OSD_OP_SETALLOCHINT,
948                                                       0);
949
950         op->alloc_hint.expected_object_size = expected_object_size;
951         op->alloc_hint.expected_write_size = expected_write_size;
952         op->alloc_hint.flags = flags;
953
954         /*
955          * CEPH_OSD_OP_SETALLOCHINT op is advisory and therefore deemed
956          * not worth a feature bit.  Set FAILOK per-op flag to make
957          * sure older osds don't trip over an unsupported opcode.
958          */
959         op->flags |= CEPH_OSD_OP_FLAG_FAILOK;
960 }
961 EXPORT_SYMBOL(osd_req_op_alloc_hint_init);
962
963 static void ceph_osdc_msg_data_add(struct ceph_msg *msg,
964                                 struct ceph_osd_data *osd_data)
965 {
966         u64 length = ceph_osd_data_length(osd_data);
967
968         if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGES) {
969                 BUG_ON(length > (u64) SIZE_MAX);
970                 if (length)
971                         ceph_msg_data_add_pages(msg, osd_data->pages,
972                                         length, osd_data->alignment, false);
973         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_PAGELIST) {
974                 BUG_ON(!length);
975                 ceph_msg_data_add_pagelist(msg, osd_data->pagelist);
976 #ifdef CONFIG_BLOCK
977         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BIO) {
978                 ceph_msg_data_add_bio(msg, &osd_data->bio_pos, length);
979 #endif
980         } else if (osd_data->type == CEPH_OSD_DATA_TYPE_BVECS) {
981                 ceph_msg_data_add_bvecs(msg, &osd_data->bvec_pos);
982         } else {
983                 BUG_ON(osd_data->type != CEPH_OSD_DATA_TYPE_NONE);
984         }
985 }
986
987 static u32 osd_req_encode_op(struct ceph_osd_op *dst,
988                              const struct ceph_osd_req_op *src)
989 {
990         switch (src->op) {
991         case CEPH_OSD_OP_STAT:
992                 break;
993         case CEPH_OSD_OP_READ:
994         case CEPH_OSD_OP_WRITE:
995         case CEPH_OSD_OP_WRITEFULL:
996         case CEPH_OSD_OP_ZERO:
997         case CEPH_OSD_OP_TRUNCATE:
998                 dst->extent.offset = cpu_to_le64(src->extent.offset);
999                 dst->extent.length = cpu_to_le64(src->extent.length);
1000                 dst->extent.truncate_size =
1001                         cpu_to_le64(src->extent.truncate_size);
1002                 dst->extent.truncate_seq =
1003                         cpu_to_le32(src->extent.truncate_seq);
1004                 break;
1005         case CEPH_OSD_OP_CALL:
1006                 dst->cls.class_len = src->cls.class_len;
1007                 dst->cls.method_len = src->cls.method_len;
1008                 dst->cls.indata_len = cpu_to_le32(src->cls.indata_len);
1009                 break;
1010         case CEPH_OSD_OP_WATCH:
1011                 dst->watch.cookie = cpu_to_le64(src->watch.cookie);
1012                 dst->watch.ver = cpu_to_le64(0);
1013                 dst->watch.op = src->watch.op;
1014                 dst->watch.gen = cpu_to_le32(src->watch.gen);
1015                 break;
1016         case CEPH_OSD_OP_NOTIFY_ACK:
1017                 break;
1018         case CEPH_OSD_OP_NOTIFY:
1019                 dst->notify.cookie = cpu_to_le64(src->notify.cookie);
1020                 break;
1021         case CEPH_OSD_OP_LIST_WATCHERS:
1022                 break;
1023         case CEPH_OSD_OP_SETALLOCHINT:
1024                 dst->alloc_hint.expected_object_size =
1025                     cpu_to_le64(src->alloc_hint.expected_object_size);
1026                 dst->alloc_hint.expected_write_size =
1027                     cpu_to_le64(src->alloc_hint.expected_write_size);
1028                 dst->alloc_hint.flags = cpu_to_le32(src->alloc_hint.flags);
1029                 break;
1030         case CEPH_OSD_OP_SETXATTR:
1031         case CEPH_OSD_OP_CMPXATTR:
1032                 dst->xattr.name_len = cpu_to_le32(src->xattr.name_len);
1033                 dst->xattr.value_len = cpu_to_le32(src->xattr.value_len);
1034                 dst->xattr.cmp_op = src->xattr.cmp_op;
1035                 dst->xattr.cmp_mode = src->xattr.cmp_mode;
1036                 break;
1037         case CEPH_OSD_OP_CREATE:
1038         case CEPH_OSD_OP_DELETE:
1039                 break;
1040         case CEPH_OSD_OP_COPY_FROM2:
1041                 dst->copy_from.snapid = cpu_to_le64(src->copy_from.snapid);
1042                 dst->copy_from.src_version =
1043                         cpu_to_le64(src->copy_from.src_version);
1044                 dst->copy_from.flags = src->copy_from.flags;
1045                 dst->copy_from.src_fadvise_flags =
1046                         cpu_to_le32(src->copy_from.src_fadvise_flags);
1047                 break;
1048         default:
1049                 pr_err("unsupported osd opcode %s\n",
1050                         ceph_osd_op_name(src->op));
1051                 WARN_ON(1);
1052
1053                 return 0;
1054         }
1055
1056         dst->op = cpu_to_le16(src->op);
1057         dst->flags = cpu_to_le32(src->flags);
1058         dst->payload_len = cpu_to_le32(src->indata_len);
1059
1060         return src->indata_len;
1061 }
1062
1063 /*
1064  * build new request AND message, calculate layout, and adjust file
1065  * extent as needed.
1066  *
1067  * if the file was recently truncated, we include information about its
1068  * old and new size so that the object can be updated appropriately.  (we
1069  * avoid synchronously deleting truncated objects because it's slow.)
1070  */
1071 struct ceph_osd_request *ceph_osdc_new_request(struct ceph_osd_client *osdc,
1072                                                struct ceph_file_layout *layout,
1073                                                struct ceph_vino vino,
1074                                                u64 off, u64 *plen,
1075                                                unsigned int which, int num_ops,
1076                                                int opcode, int flags,
1077                                                struct ceph_snap_context *snapc,
1078                                                u32 truncate_seq,
1079                                                u64 truncate_size,
1080                                                bool use_mempool)
1081 {
1082         struct ceph_osd_request *req;
1083         u64 objnum = 0;
1084         u64 objoff = 0;
1085         u64 objlen = 0;
1086         int r;
1087
1088         BUG_ON(opcode != CEPH_OSD_OP_READ && opcode != CEPH_OSD_OP_WRITE &&
1089                opcode != CEPH_OSD_OP_ZERO && opcode != CEPH_OSD_OP_TRUNCATE &&
1090                opcode != CEPH_OSD_OP_CREATE && opcode != CEPH_OSD_OP_DELETE);
1091
1092         req = ceph_osdc_alloc_request(osdc, snapc, num_ops, use_mempool,
1093                                         GFP_NOFS);
1094         if (!req) {
1095                 r = -ENOMEM;
1096                 goto fail;
1097         }
1098
1099         /* calculate max write size */
1100         r = calc_layout(layout, off, plen, &objnum, &objoff, &objlen);
1101         if (r)
1102                 goto fail;
1103
1104         if (opcode == CEPH_OSD_OP_CREATE || opcode == CEPH_OSD_OP_DELETE) {
1105                 osd_req_op_init(req, which, opcode, 0);
1106         } else {
1107                 u32 object_size = layout->object_size;
1108                 u32 object_base = off - objoff;
1109                 if (!(truncate_seq == 1 && truncate_size == -1ULL)) {
1110                         if (truncate_size <= object_base) {
1111                                 truncate_size = 0;
1112                         } else {
1113                                 truncate_size -= object_base;
1114                                 if (truncate_size > object_size)
1115                                         truncate_size = object_size;
1116                         }
1117                 }
1118                 osd_req_op_extent_init(req, which, opcode, objoff, objlen,
1119                                        truncate_size, truncate_seq);
1120         }
1121
1122         req->r_base_oloc.pool = layout->pool_id;
1123         req->r_base_oloc.pool_ns = ceph_try_get_string(layout->pool_ns);
1124         ceph_oid_printf(&req->r_base_oid, "%llx.%08llx", vino.ino, objnum);
1125         req->r_flags = flags | osdc->client->options->read_from_replica;
1126
1127         req->r_snapid = vino.snap;
1128         if (flags & CEPH_OSD_FLAG_WRITE)
1129                 req->r_data_offset = off;
1130
1131         if (num_ops > 1)
1132                 /*
1133                  * This is a special case for ceph_writepages_start(), but it
1134                  * also covers ceph_uninline_data().  If more multi-op request
1135                  * use cases emerge, we will need a separate helper.
1136                  */
1137                 r = __ceph_osdc_alloc_messages(req, GFP_NOFS, num_ops, 0);
1138         else
1139                 r = ceph_osdc_alloc_messages(req, GFP_NOFS);
1140         if (r)
1141                 goto fail;
1142
1143         return req;
1144
1145 fail:
1146         ceph_osdc_put_request(req);
1147         return ERR_PTR(r);
1148 }
1149 EXPORT_SYMBOL(ceph_osdc_new_request);
1150
1151 /*
1152  * We keep osd requests in an rbtree, sorted by ->r_tid.
1153  */
1154 DEFINE_RB_FUNCS(request, struct ceph_osd_request, r_tid, r_node)
1155 DEFINE_RB_FUNCS(request_mc, struct ceph_osd_request, r_tid, r_mc_node)
1156
1157 /*
1158  * Call @fn on each OSD request as long as @fn returns 0.
1159  */
1160 static void for_each_request(struct ceph_osd_client *osdc,
1161                         int (*fn)(struct ceph_osd_request *req, void *arg),
1162                         void *arg)
1163 {
1164         struct rb_node *n, *p;
1165
1166         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
1167                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
1168
1169                 for (p = rb_first(&osd->o_requests); p; ) {
1170                         struct ceph_osd_request *req =
1171                             rb_entry(p, struct ceph_osd_request, r_node);
1172
1173                         p = rb_next(p);
1174                         if (fn(req, arg))
1175                                 return;
1176                 }
1177         }
1178
1179         for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
1180                 struct ceph_osd_request *req =
1181                     rb_entry(p, struct ceph_osd_request, r_node);
1182
1183                 p = rb_next(p);
1184                 if (fn(req, arg))
1185                         return;
1186         }
1187 }
1188
1189 static bool osd_homeless(struct ceph_osd *osd)
1190 {
1191         return osd->o_osd == CEPH_HOMELESS_OSD;
1192 }
1193
1194 static bool osd_registered(struct ceph_osd *osd)
1195 {
1196         verify_osdc_locked(osd->o_osdc);
1197
1198         return !RB_EMPTY_NODE(&osd->o_node);
1199 }
1200
1201 /*
1202  * Assumes @osd is zero-initialized.
1203  */
1204 static void osd_init(struct ceph_osd *osd)
1205 {
1206         refcount_set(&osd->o_ref, 1);
1207         RB_CLEAR_NODE(&osd->o_node);
1208         osd->o_requests = RB_ROOT;
1209         osd->o_linger_requests = RB_ROOT;
1210         osd->o_backoff_mappings = RB_ROOT;
1211         osd->o_backoffs_by_id = RB_ROOT;
1212         INIT_LIST_HEAD(&osd->o_osd_lru);
1213         INIT_LIST_HEAD(&osd->o_keepalive_item);
1214         osd->o_incarnation = 1;
1215         mutex_init(&osd->lock);
1216 }
1217
1218 static void osd_cleanup(struct ceph_osd *osd)
1219 {
1220         WARN_ON(!RB_EMPTY_NODE(&osd->o_node));
1221         WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
1222         WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
1223         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoff_mappings));
1224         WARN_ON(!RB_EMPTY_ROOT(&osd->o_backoffs_by_id));
1225         WARN_ON(!list_empty(&osd->o_osd_lru));
1226         WARN_ON(!list_empty(&osd->o_keepalive_item));
1227
1228         if (osd->o_auth.authorizer) {
1229                 WARN_ON(osd_homeless(osd));
1230                 ceph_auth_destroy_authorizer(osd->o_auth.authorizer);
1231         }
1232 }
1233
1234 /*
1235  * Track open sessions with osds.
1236  */
1237 static struct ceph_osd *create_osd(struct ceph_osd_client *osdc, int onum)
1238 {
1239         struct ceph_osd *osd;
1240
1241         WARN_ON(onum == CEPH_HOMELESS_OSD);
1242
1243         osd = kzalloc(sizeof(*osd), GFP_NOIO | __GFP_NOFAIL);
1244         osd_init(osd);
1245         osd->o_osdc = osdc;
1246         osd->o_osd = onum;
1247
1248         ceph_con_init(&osd->o_con, osd, &osd_con_ops, &osdc->client->msgr);
1249
1250         return osd;
1251 }
1252
1253 static struct ceph_osd *get_osd(struct ceph_osd *osd)
1254 {
1255         if (refcount_inc_not_zero(&osd->o_ref)) {
1256                 dout("get_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref)-1,
1257                      refcount_read(&osd->o_ref));
1258                 return osd;
1259         } else {
1260                 dout("get_osd %p FAIL\n", osd);
1261                 return NULL;
1262         }
1263 }
1264
1265 static void put_osd(struct ceph_osd *osd)
1266 {
1267         dout("put_osd %p %d -> %d\n", osd, refcount_read(&osd->o_ref),
1268              refcount_read(&osd->o_ref) - 1);
1269         if (refcount_dec_and_test(&osd->o_ref)) {
1270                 osd_cleanup(osd);
1271                 kfree(osd);
1272         }
1273 }
1274
1275 DEFINE_RB_FUNCS(osd, struct ceph_osd, o_osd, o_node)
1276
1277 static void __move_osd_to_lru(struct ceph_osd *osd)
1278 {
1279         struct ceph_osd_client *osdc = osd->o_osdc;
1280
1281         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1282         BUG_ON(!list_empty(&osd->o_osd_lru));
1283
1284         spin_lock(&osdc->osd_lru_lock);
1285         list_add_tail(&osd->o_osd_lru, &osdc->osd_lru);
1286         spin_unlock(&osdc->osd_lru_lock);
1287
1288         osd->lru_ttl = jiffies + osdc->client->options->osd_idle_ttl;
1289 }
1290
1291 static void maybe_move_osd_to_lru(struct ceph_osd *osd)
1292 {
1293         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1294             RB_EMPTY_ROOT(&osd->o_linger_requests))
1295                 __move_osd_to_lru(osd);
1296 }
1297
1298 static void __remove_osd_from_lru(struct ceph_osd *osd)
1299 {
1300         struct ceph_osd_client *osdc = osd->o_osdc;
1301
1302         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1303
1304         spin_lock(&osdc->osd_lru_lock);
1305         if (!list_empty(&osd->o_osd_lru))
1306                 list_del_init(&osd->o_osd_lru);
1307         spin_unlock(&osdc->osd_lru_lock);
1308 }
1309
1310 /*
1311  * Close the connection and assign any leftover requests to the
1312  * homeless session.
1313  */
1314 static void close_osd(struct ceph_osd *osd)
1315 {
1316         struct ceph_osd_client *osdc = osd->o_osdc;
1317         struct rb_node *n;
1318
1319         verify_osdc_wrlocked(osdc);
1320         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1321
1322         ceph_con_close(&osd->o_con);
1323
1324         for (n = rb_first(&osd->o_requests); n; ) {
1325                 struct ceph_osd_request *req =
1326                     rb_entry(n, struct ceph_osd_request, r_node);
1327
1328                 n = rb_next(n); /* unlink_request() */
1329
1330                 dout(" reassigning req %p tid %llu\n", req, req->r_tid);
1331                 unlink_request(osd, req);
1332                 link_request(&osdc->homeless_osd, req);
1333         }
1334         for (n = rb_first(&osd->o_linger_requests); n; ) {
1335                 struct ceph_osd_linger_request *lreq =
1336                     rb_entry(n, struct ceph_osd_linger_request, node);
1337
1338                 n = rb_next(n); /* unlink_linger() */
1339
1340                 dout(" reassigning lreq %p linger_id %llu\n", lreq,
1341                      lreq->linger_id);
1342                 unlink_linger(osd, lreq);
1343                 link_linger(&osdc->homeless_osd, lreq);
1344         }
1345         clear_backoffs(osd);
1346
1347         __remove_osd_from_lru(osd);
1348         erase_osd(&osdc->osds, osd);
1349         put_osd(osd);
1350 }
1351
1352 /*
1353  * reset osd connect
1354  */
1355 static int reopen_osd(struct ceph_osd *osd)
1356 {
1357         struct ceph_entity_addr *peer_addr;
1358
1359         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
1360
1361         if (RB_EMPTY_ROOT(&osd->o_requests) &&
1362             RB_EMPTY_ROOT(&osd->o_linger_requests)) {
1363                 close_osd(osd);
1364                 return -ENODEV;
1365         }
1366
1367         peer_addr = &osd->o_osdc->osdmap->osd_addr[osd->o_osd];
1368         if (!memcmp(peer_addr, &osd->o_con.peer_addr, sizeof (*peer_addr)) &&
1369                         !ceph_con_opened(&osd->o_con)) {
1370                 struct rb_node *n;
1371
1372                 dout("osd addr hasn't changed and connection never opened, "
1373                      "letting msgr retry\n");
1374                 /* touch each r_stamp for handle_timeout()'s benfit */
1375                 for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
1376                         struct ceph_osd_request *req =
1377                             rb_entry(n, struct ceph_osd_request, r_node);
1378                         req->r_stamp = jiffies;
1379                 }
1380
1381                 return -EAGAIN;
1382         }
1383
1384         ceph_con_close(&osd->o_con);
1385         ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd, peer_addr);
1386         osd->o_incarnation++;
1387
1388         return 0;
1389 }
1390
1391 static struct ceph_osd *lookup_create_osd(struct ceph_osd_client *osdc, int o,
1392                                           bool wrlocked)
1393 {
1394         struct ceph_osd *osd;
1395
1396         if (wrlocked)
1397                 verify_osdc_wrlocked(osdc);
1398         else
1399                 verify_osdc_locked(osdc);
1400
1401         if (o != CEPH_HOMELESS_OSD)
1402                 osd = lookup_osd(&osdc->osds, o);
1403         else
1404                 osd = &osdc->homeless_osd;
1405         if (!osd) {
1406                 if (!wrlocked)
1407                         return ERR_PTR(-EAGAIN);
1408
1409                 osd = create_osd(osdc, o);
1410                 insert_osd(&osdc->osds, osd);
1411                 ceph_con_open(&osd->o_con, CEPH_ENTITY_TYPE_OSD, osd->o_osd,
1412                               &osdc->osdmap->osd_addr[osd->o_osd]);
1413         }
1414
1415         dout("%s osdc %p osd%d -> osd %p\n", __func__, osdc, o, osd);
1416         return osd;
1417 }
1418
1419 /*
1420  * Create request <-> OSD session relation.
1421  *
1422  * @req has to be assigned a tid, @osd may be homeless.
1423  */
1424 static void link_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1425 {
1426         verify_osd_locked(osd);
1427         WARN_ON(!req->r_tid || req->r_osd);
1428         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1429              req, req->r_tid);
1430
1431         if (!osd_homeless(osd))
1432                 __remove_osd_from_lru(osd);
1433         else
1434                 atomic_inc(&osd->o_osdc->num_homeless);
1435
1436         get_osd(osd);
1437         insert_request(&osd->o_requests, req);
1438         req->r_osd = osd;
1439 }
1440
1441 static void unlink_request(struct ceph_osd *osd, struct ceph_osd_request *req)
1442 {
1443         verify_osd_locked(osd);
1444         WARN_ON(req->r_osd != osd);
1445         dout("%s osd %p osd%d req %p tid %llu\n", __func__, osd, osd->o_osd,
1446              req, req->r_tid);
1447
1448         req->r_osd = NULL;
1449         erase_request(&osd->o_requests, req);
1450         put_osd(osd);
1451
1452         if (!osd_homeless(osd))
1453                 maybe_move_osd_to_lru(osd);
1454         else
1455                 atomic_dec(&osd->o_osdc->num_homeless);
1456 }
1457
1458 static bool __pool_full(struct ceph_pg_pool_info *pi)
1459 {
1460         return pi->flags & CEPH_POOL_FLAG_FULL;
1461 }
1462
1463 static bool have_pool_full(struct ceph_osd_client *osdc)
1464 {
1465         struct rb_node *n;
1466
1467         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
1468                 struct ceph_pg_pool_info *pi =
1469                     rb_entry(n, struct ceph_pg_pool_info, node);
1470
1471                 if (__pool_full(pi))
1472                         return true;
1473         }
1474
1475         return false;
1476 }
1477
1478 static bool pool_full(struct ceph_osd_client *osdc, s64 pool_id)
1479 {
1480         struct ceph_pg_pool_info *pi;
1481
1482         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
1483         if (!pi)
1484                 return false;
1485
1486         return __pool_full(pi);
1487 }
1488
1489 /*
1490  * Returns whether a request should be blocked from being sent
1491  * based on the current osdmap and osd_client settings.
1492  */
1493 static bool target_should_be_paused(struct ceph_osd_client *osdc,
1494                                     const struct ceph_osd_request_target *t,
1495                                     struct ceph_pg_pool_info *pi)
1496 {
1497         bool pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
1498         bool pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
1499                        ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
1500                        __pool_full(pi);
1501
1502         WARN_ON(pi->id != t->target_oloc.pool);
1503         return ((t->flags & CEPH_OSD_FLAG_READ) && pauserd) ||
1504                ((t->flags & CEPH_OSD_FLAG_WRITE) && pausewr) ||
1505                (osdc->osdmap->epoch < osdc->epoch_barrier);
1506 }
1507
1508 static int pick_random_replica(const struct ceph_osds *acting)
1509 {
1510         int i = prandom_u32() % acting->size;
1511
1512         dout("%s picked osd%d, primary osd%d\n", __func__,
1513              acting->osds[i], acting->primary);
1514         return i;
1515 }
1516
1517 /*
1518  * Picks the closest replica based on client's location given by
1519  * crush_location option.  Prefers the primary if the locality is
1520  * the same.
1521  */
1522 static int pick_closest_replica(struct ceph_osd_client *osdc,
1523                                 const struct ceph_osds *acting)
1524 {
1525         struct ceph_options *opt = osdc->client->options;
1526         int best_i, best_locality;
1527         int i = 0, locality;
1528
1529         do {
1530                 locality = ceph_get_crush_locality(osdc->osdmap,
1531                                                    acting->osds[i],
1532                                                    &opt->crush_locs);
1533                 if (i == 0 ||
1534                     (locality >= 0 && best_locality < 0) ||
1535                     (locality >= 0 && best_locality >= 0 &&
1536                      locality < best_locality)) {
1537                         best_i = i;
1538                         best_locality = locality;
1539                 }
1540         } while (++i < acting->size);
1541
1542         dout("%s picked osd%d with locality %d, primary osd%d\n", __func__,
1543              acting->osds[best_i], best_locality, acting->primary);
1544         return best_i;
1545 }
1546
1547 enum calc_target_result {
1548         CALC_TARGET_NO_ACTION = 0,
1549         CALC_TARGET_NEED_RESEND,
1550         CALC_TARGET_POOL_DNE,
1551 };
1552
1553 static enum calc_target_result calc_target(struct ceph_osd_client *osdc,
1554                                            struct ceph_osd_request_target *t,
1555                                            bool any_change)
1556 {
1557         struct ceph_pg_pool_info *pi;
1558         struct ceph_pg pgid, last_pgid;
1559         struct ceph_osds up, acting;
1560         bool is_read = t->flags & CEPH_OSD_FLAG_READ;
1561         bool is_write = t->flags & CEPH_OSD_FLAG_WRITE;
1562         bool force_resend = false;
1563         bool unpaused = false;
1564         bool legacy_change = false;
1565         bool split = false;
1566         bool sort_bitwise = ceph_osdmap_flag(osdc, CEPH_OSDMAP_SORTBITWISE);
1567         bool recovery_deletes = ceph_osdmap_flag(osdc,
1568                                                  CEPH_OSDMAP_RECOVERY_DELETES);
1569         enum calc_target_result ct_res;
1570
1571         t->epoch = osdc->osdmap->epoch;
1572         pi = ceph_pg_pool_by_id(osdc->osdmap, t->base_oloc.pool);
1573         if (!pi) {
1574                 t->osd = CEPH_HOMELESS_OSD;
1575                 ct_res = CALC_TARGET_POOL_DNE;
1576                 goto out;
1577         }
1578
1579         if (osdc->osdmap->epoch == pi->last_force_request_resend) {
1580                 if (t->last_force_resend < pi->last_force_request_resend) {
1581                         t->last_force_resend = pi->last_force_request_resend;
1582                         force_resend = true;
1583                 } else if (t->last_force_resend == 0) {
1584                         force_resend = true;
1585                 }
1586         }
1587
1588         /* apply tiering */
1589         ceph_oid_copy(&t->target_oid, &t->base_oid);
1590         ceph_oloc_copy(&t->target_oloc, &t->base_oloc);
1591         if ((t->flags & CEPH_OSD_FLAG_IGNORE_OVERLAY) == 0) {
1592                 if (is_read && pi->read_tier >= 0)
1593                         t->target_oloc.pool = pi->read_tier;
1594                 if (is_write && pi->write_tier >= 0)
1595                         t->target_oloc.pool = pi->write_tier;
1596
1597                 pi = ceph_pg_pool_by_id(osdc->osdmap, t->target_oloc.pool);
1598                 if (!pi) {
1599                         t->osd = CEPH_HOMELESS_OSD;
1600                         ct_res = CALC_TARGET_POOL_DNE;
1601                         goto out;
1602                 }
1603         }
1604
1605         __ceph_object_locator_to_pg(pi, &t->target_oid, &t->target_oloc, &pgid);
1606         last_pgid.pool = pgid.pool;
1607         last_pgid.seed = ceph_stable_mod(pgid.seed, t->pg_num, t->pg_num_mask);
1608
1609         ceph_pg_to_up_acting_osds(osdc->osdmap, pi, &pgid, &up, &acting);
1610         if (any_change &&
1611             ceph_is_new_interval(&t->acting,
1612                                  &acting,
1613                                  &t->up,
1614                                  &up,
1615                                  t->size,
1616                                  pi->size,
1617                                  t->min_size,
1618                                  pi->min_size,
1619                                  t->pg_num,
1620                                  pi->pg_num,
1621                                  t->sort_bitwise,
1622                                  sort_bitwise,
1623                                  t->recovery_deletes,
1624                                  recovery_deletes,
1625                                  &last_pgid))
1626                 force_resend = true;
1627
1628         if (t->paused && !target_should_be_paused(osdc, t, pi)) {
1629                 t->paused = false;
1630                 unpaused = true;
1631         }
1632         legacy_change = ceph_pg_compare(&t->pgid, &pgid) ||
1633                         ceph_osds_changed(&t->acting, &acting,
1634                                           t->used_replica || any_change);
1635         if (t->pg_num)
1636                 split = ceph_pg_is_split(&last_pgid, t->pg_num, pi->pg_num);
1637
1638         if (legacy_change || force_resend || split) {
1639                 t->pgid = pgid; /* struct */
1640                 ceph_pg_to_primary_shard(osdc->osdmap, pi, &pgid, &t->spgid);
1641                 ceph_osds_copy(&t->acting, &acting);
1642                 ceph_osds_copy(&t->up, &up);
1643                 t->size = pi->size;
1644                 t->min_size = pi->min_size;
1645                 t->pg_num = pi->pg_num;
1646                 t->pg_num_mask = pi->pg_num_mask;
1647                 t->sort_bitwise = sort_bitwise;
1648                 t->recovery_deletes = recovery_deletes;
1649
1650                 if ((t->flags & (CEPH_OSD_FLAG_BALANCE_READS |
1651                                  CEPH_OSD_FLAG_LOCALIZE_READS)) &&
1652                     !is_write && pi->type == CEPH_POOL_TYPE_REP &&
1653                     acting.size > 1) {
1654                         int pos;
1655
1656                         WARN_ON(!is_read || acting.osds[0] != acting.primary);
1657                         if (t->flags & CEPH_OSD_FLAG_BALANCE_READS) {
1658                                 pos = pick_random_replica(&acting);
1659                         } else {
1660                                 pos = pick_closest_replica(osdc, &acting);
1661                         }
1662                         t->osd = acting.osds[pos];
1663                         t->used_replica = pos > 0;
1664                 } else {
1665                         t->osd = acting.primary;
1666                         t->used_replica = false;
1667                 }
1668         }
1669
1670         if (unpaused || legacy_change || force_resend || split)
1671                 ct_res = CALC_TARGET_NEED_RESEND;
1672         else
1673                 ct_res = CALC_TARGET_NO_ACTION;
1674
1675 out:
1676         dout("%s t %p -> %d%d%d%d ct_res %d osd%d\n", __func__, t, unpaused,
1677              legacy_change, force_resend, split, ct_res, t->osd);
1678         return ct_res;
1679 }
1680
1681 static struct ceph_spg_mapping *alloc_spg_mapping(void)
1682 {
1683         struct ceph_spg_mapping *spg;
1684
1685         spg = kmalloc(sizeof(*spg), GFP_NOIO);
1686         if (!spg)
1687                 return NULL;
1688
1689         RB_CLEAR_NODE(&spg->node);
1690         spg->backoffs = RB_ROOT;
1691         return spg;
1692 }
1693
1694 static void free_spg_mapping(struct ceph_spg_mapping *spg)
1695 {
1696         WARN_ON(!RB_EMPTY_NODE(&spg->node));
1697         WARN_ON(!RB_EMPTY_ROOT(&spg->backoffs));
1698
1699         kfree(spg);
1700 }
1701
1702 /*
1703  * rbtree of ceph_spg_mapping for handling map<spg_t, ...>, similar to
1704  * ceph_pg_mapping.  Used to track OSD backoffs -- a backoff [range] is
1705  * defined only within a specific spgid; it does not pass anything to
1706  * children on split, or to another primary.
1707  */
1708 DEFINE_RB_FUNCS2(spg_mapping, struct ceph_spg_mapping, spgid, ceph_spg_compare,
1709                  RB_BYPTR, const struct ceph_spg *, node)
1710
1711 static u64 hoid_get_bitwise_key(const struct ceph_hobject_id *hoid)
1712 {
1713         return hoid->is_max ? 0x100000000ull : hoid->hash_reverse_bits;
1714 }
1715
1716 static void hoid_get_effective_key(const struct ceph_hobject_id *hoid,
1717                                    void **pkey, size_t *pkey_len)
1718 {
1719         if (hoid->key_len) {
1720                 *pkey = hoid->key;
1721                 *pkey_len = hoid->key_len;
1722         } else {
1723                 *pkey = hoid->oid;
1724                 *pkey_len = hoid->oid_len;
1725         }
1726 }
1727
1728 static int compare_names(const void *name1, size_t name1_len,
1729                          const void *name2, size_t name2_len)
1730 {
1731         int ret;
1732
1733         ret = memcmp(name1, name2, min(name1_len, name2_len));
1734         if (!ret) {
1735                 if (name1_len < name2_len)
1736                         ret = -1;
1737                 else if (name1_len > name2_len)
1738                         ret = 1;
1739         }
1740         return ret;
1741 }
1742
1743 static int hoid_compare(const struct ceph_hobject_id *lhs,
1744                         const struct ceph_hobject_id *rhs)
1745 {
1746         void *effective_key1, *effective_key2;
1747         size_t effective_key1_len, effective_key2_len;
1748         int ret;
1749
1750         if (lhs->is_max < rhs->is_max)
1751                 return -1;
1752         if (lhs->is_max > rhs->is_max)
1753                 return 1;
1754
1755         if (lhs->pool < rhs->pool)
1756                 return -1;
1757         if (lhs->pool > rhs->pool)
1758                 return 1;
1759
1760         if (hoid_get_bitwise_key(lhs) < hoid_get_bitwise_key(rhs))
1761                 return -1;
1762         if (hoid_get_bitwise_key(lhs) > hoid_get_bitwise_key(rhs))
1763                 return 1;
1764
1765         ret = compare_names(lhs->nspace, lhs->nspace_len,
1766                             rhs->nspace, rhs->nspace_len);
1767         if (ret)
1768                 return ret;
1769
1770         hoid_get_effective_key(lhs, &effective_key1, &effective_key1_len);
1771         hoid_get_effective_key(rhs, &effective_key2, &effective_key2_len);
1772         ret = compare_names(effective_key1, effective_key1_len,
1773                             effective_key2, effective_key2_len);
1774         if (ret)
1775                 return ret;
1776
1777         ret = compare_names(lhs->oid, lhs->oid_len, rhs->oid, rhs->oid_len);
1778         if (ret)
1779                 return ret;
1780
1781         if (lhs->snapid < rhs->snapid)
1782                 return -1;
1783         if (lhs->snapid > rhs->snapid)
1784                 return 1;
1785
1786         return 0;
1787 }
1788
1789 /*
1790  * For decoding ->begin and ->end of MOSDBackoff only -- no MIN/MAX
1791  * compat stuff here.
1792  *
1793  * Assumes @hoid is zero-initialized.
1794  */
1795 static int decode_hoid(void **p, void *end, struct ceph_hobject_id *hoid)
1796 {
1797         u8 struct_v;
1798         u32 struct_len;
1799         int ret;
1800
1801         ret = ceph_start_decoding(p, end, 4, "hobject_t", &struct_v,
1802                                   &struct_len);
1803         if (ret)
1804                 return ret;
1805
1806         if (struct_v < 4) {
1807                 pr_err("got struct_v %d < 4 of hobject_t\n", struct_v);
1808                 goto e_inval;
1809         }
1810
1811         hoid->key = ceph_extract_encoded_string(p, end, &hoid->key_len,
1812                                                 GFP_NOIO);
1813         if (IS_ERR(hoid->key)) {
1814                 ret = PTR_ERR(hoid->key);
1815                 hoid->key = NULL;
1816                 return ret;
1817         }
1818
1819         hoid->oid = ceph_extract_encoded_string(p, end, &hoid->oid_len,
1820                                                 GFP_NOIO);
1821         if (IS_ERR(hoid->oid)) {
1822                 ret = PTR_ERR(hoid->oid);
1823                 hoid->oid = NULL;
1824                 return ret;
1825         }
1826
1827         ceph_decode_64_safe(p, end, hoid->snapid, e_inval);
1828         ceph_decode_32_safe(p, end, hoid->hash, e_inval);
1829         ceph_decode_8_safe(p, end, hoid->is_max, e_inval);
1830
1831         hoid->nspace = ceph_extract_encoded_string(p, end, &hoid->nspace_len,
1832                                                    GFP_NOIO);
1833         if (IS_ERR(hoid->nspace)) {
1834                 ret = PTR_ERR(hoid->nspace);
1835                 hoid->nspace = NULL;
1836                 return ret;
1837         }
1838
1839         ceph_decode_64_safe(p, end, hoid->pool, e_inval);
1840
1841         ceph_hoid_build_hash_cache(hoid);
1842         return 0;
1843
1844 e_inval:
1845         return -EINVAL;
1846 }
1847
1848 static int hoid_encoding_size(const struct ceph_hobject_id *hoid)
1849 {
1850         return 8 + 4 + 1 + 8 + /* snapid, hash, is_max, pool */
1851                4 + hoid->key_len + 4 + hoid->oid_len + 4 + hoid->nspace_len;
1852 }
1853
1854 static void encode_hoid(void **p, void *end, const struct ceph_hobject_id *hoid)
1855 {
1856         ceph_start_encoding(p, 4, 3, hoid_encoding_size(hoid));
1857         ceph_encode_string(p, end, hoid->key, hoid->key_len);
1858         ceph_encode_string(p, end, hoid->oid, hoid->oid_len);
1859         ceph_encode_64(p, hoid->snapid);
1860         ceph_encode_32(p, hoid->hash);
1861         ceph_encode_8(p, hoid->is_max);
1862         ceph_encode_string(p, end, hoid->nspace, hoid->nspace_len);
1863         ceph_encode_64(p, hoid->pool);
1864 }
1865
1866 static void free_hoid(struct ceph_hobject_id *hoid)
1867 {
1868         if (hoid) {
1869                 kfree(hoid->key);
1870                 kfree(hoid->oid);
1871                 kfree(hoid->nspace);
1872                 kfree(hoid);
1873         }
1874 }
1875
1876 static struct ceph_osd_backoff *alloc_backoff(void)
1877 {
1878         struct ceph_osd_backoff *backoff;
1879
1880         backoff = kzalloc(sizeof(*backoff), GFP_NOIO);
1881         if (!backoff)
1882                 return NULL;
1883
1884         RB_CLEAR_NODE(&backoff->spg_node);
1885         RB_CLEAR_NODE(&backoff->id_node);
1886         return backoff;
1887 }
1888
1889 static void free_backoff(struct ceph_osd_backoff *backoff)
1890 {
1891         WARN_ON(!RB_EMPTY_NODE(&backoff->spg_node));
1892         WARN_ON(!RB_EMPTY_NODE(&backoff->id_node));
1893
1894         free_hoid(backoff->begin);
1895         free_hoid(backoff->end);
1896         kfree(backoff);
1897 }
1898
1899 /*
1900  * Within a specific spgid, backoffs are managed by ->begin hoid.
1901  */
1902 DEFINE_RB_INSDEL_FUNCS2(backoff, struct ceph_osd_backoff, begin, hoid_compare,
1903                         RB_BYVAL, spg_node);
1904
1905 static struct ceph_osd_backoff *lookup_containing_backoff(struct rb_root *root,
1906                                             const struct ceph_hobject_id *hoid)
1907 {
1908         struct rb_node *n = root->rb_node;
1909
1910         while (n) {
1911                 struct ceph_osd_backoff *cur =
1912                     rb_entry(n, struct ceph_osd_backoff, spg_node);
1913                 int cmp;
1914
1915                 cmp = hoid_compare(hoid, cur->begin);
1916                 if (cmp < 0) {
1917                         n = n->rb_left;
1918                 } else if (cmp > 0) {
1919                         if (hoid_compare(hoid, cur->end) < 0)
1920                                 return cur;
1921
1922                         n = n->rb_right;
1923                 } else {
1924                         return cur;
1925                 }
1926         }
1927
1928         return NULL;
1929 }
1930
1931 /*
1932  * Each backoff has a unique id within its OSD session.
1933  */
1934 DEFINE_RB_FUNCS(backoff_by_id, struct ceph_osd_backoff, id, id_node)
1935
1936 static void clear_backoffs(struct ceph_osd *osd)
1937 {
1938         while (!RB_EMPTY_ROOT(&osd->o_backoff_mappings)) {
1939                 struct ceph_spg_mapping *spg =
1940                     rb_entry(rb_first(&osd->o_backoff_mappings),
1941                              struct ceph_spg_mapping, node);
1942
1943                 while (!RB_EMPTY_ROOT(&spg->backoffs)) {
1944                         struct ceph_osd_backoff *backoff =
1945                             rb_entry(rb_first(&spg->backoffs),
1946                                      struct ceph_osd_backoff, spg_node);
1947
1948                         erase_backoff(&spg->backoffs, backoff);
1949                         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
1950                         free_backoff(backoff);
1951                 }
1952                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
1953                 free_spg_mapping(spg);
1954         }
1955 }
1956
1957 /*
1958  * Set up a temporary, non-owning view into @t.
1959  */
1960 static void hoid_fill_from_target(struct ceph_hobject_id *hoid,
1961                                   const struct ceph_osd_request_target *t)
1962 {
1963         hoid->key = NULL;
1964         hoid->key_len = 0;
1965         hoid->oid = t->target_oid.name;
1966         hoid->oid_len = t->target_oid.name_len;
1967         hoid->snapid = CEPH_NOSNAP;
1968         hoid->hash = t->pgid.seed;
1969         hoid->is_max = false;
1970         if (t->target_oloc.pool_ns) {
1971                 hoid->nspace = t->target_oloc.pool_ns->str;
1972                 hoid->nspace_len = t->target_oloc.pool_ns->len;
1973         } else {
1974                 hoid->nspace = NULL;
1975                 hoid->nspace_len = 0;
1976         }
1977         hoid->pool = t->target_oloc.pool;
1978         ceph_hoid_build_hash_cache(hoid);
1979 }
1980
1981 static bool should_plug_request(struct ceph_osd_request *req)
1982 {
1983         struct ceph_osd *osd = req->r_osd;
1984         struct ceph_spg_mapping *spg;
1985         struct ceph_osd_backoff *backoff;
1986         struct ceph_hobject_id hoid;
1987
1988         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &req->r_t.spgid);
1989         if (!spg)
1990                 return false;
1991
1992         hoid_fill_from_target(&hoid, &req->r_t);
1993         backoff = lookup_containing_backoff(&spg->backoffs, &hoid);
1994         if (!backoff)
1995                 return false;
1996
1997         dout("%s req %p tid %llu backoff osd%d spgid %llu.%xs%d id %llu\n",
1998              __func__, req, req->r_tid, osd->o_osd, backoff->spgid.pgid.pool,
1999              backoff->spgid.pgid.seed, backoff->spgid.shard, backoff->id);
2000         return true;
2001 }
2002
2003 /*
2004  * Keep get_num_data_items() in sync with this function.
2005  */
2006 static void setup_request_data(struct ceph_osd_request *req)
2007 {
2008         struct ceph_msg *request_msg = req->r_request;
2009         struct ceph_msg *reply_msg = req->r_reply;
2010         struct ceph_osd_req_op *op;
2011
2012         if (req->r_request->num_data_items || req->r_reply->num_data_items)
2013                 return;
2014
2015         WARN_ON(request_msg->data_length || reply_msg->data_length);
2016         for (op = req->r_ops; op != &req->r_ops[req->r_num_ops]; op++) {
2017                 switch (op->op) {
2018                 /* request */
2019                 case CEPH_OSD_OP_WRITE:
2020                 case CEPH_OSD_OP_WRITEFULL:
2021                         WARN_ON(op->indata_len != op->extent.length);
2022                         ceph_osdc_msg_data_add(request_msg,
2023                                                &op->extent.osd_data);
2024                         break;
2025                 case CEPH_OSD_OP_SETXATTR:
2026                 case CEPH_OSD_OP_CMPXATTR:
2027                         WARN_ON(op->indata_len != op->xattr.name_len +
2028                                                   op->xattr.value_len);
2029                         ceph_osdc_msg_data_add(request_msg,
2030                                                &op->xattr.osd_data);
2031                         break;
2032                 case CEPH_OSD_OP_NOTIFY_ACK:
2033                         ceph_osdc_msg_data_add(request_msg,
2034                                                &op->notify_ack.request_data);
2035                         break;
2036                 case CEPH_OSD_OP_COPY_FROM2:
2037                         ceph_osdc_msg_data_add(request_msg,
2038                                                &op->copy_from.osd_data);
2039                         break;
2040
2041                 /* reply */
2042                 case CEPH_OSD_OP_STAT:
2043                         ceph_osdc_msg_data_add(reply_msg,
2044                                                &op->raw_data_in);
2045                         break;
2046                 case CEPH_OSD_OP_READ:
2047                         ceph_osdc_msg_data_add(reply_msg,
2048                                                &op->extent.osd_data);
2049                         break;
2050                 case CEPH_OSD_OP_LIST_WATCHERS:
2051                         ceph_osdc_msg_data_add(reply_msg,
2052                                                &op->list_watchers.response_data);
2053                         break;
2054
2055                 /* both */
2056                 case CEPH_OSD_OP_CALL:
2057                         WARN_ON(op->indata_len != op->cls.class_len +
2058                                                   op->cls.method_len +
2059                                                   op->cls.indata_len);
2060                         ceph_osdc_msg_data_add(request_msg,
2061                                                &op->cls.request_info);
2062                         /* optional, can be NONE */
2063                         ceph_osdc_msg_data_add(request_msg,
2064                                                &op->cls.request_data);
2065                         /* optional, can be NONE */
2066                         ceph_osdc_msg_data_add(reply_msg,
2067                                                &op->cls.response_data);
2068                         break;
2069                 case CEPH_OSD_OP_NOTIFY:
2070                         ceph_osdc_msg_data_add(request_msg,
2071                                                &op->notify.request_data);
2072                         ceph_osdc_msg_data_add(reply_msg,
2073                                                &op->notify.response_data);
2074                         break;
2075                 }
2076         }
2077 }
2078
2079 static void encode_pgid(void **p, const struct ceph_pg *pgid)
2080 {
2081         ceph_encode_8(p, 1);
2082         ceph_encode_64(p, pgid->pool);
2083         ceph_encode_32(p, pgid->seed);
2084         ceph_encode_32(p, -1); /* preferred */
2085 }
2086
2087 static void encode_spgid(void **p, const struct ceph_spg *spgid)
2088 {
2089         ceph_start_encoding(p, 1, 1, CEPH_PGID_ENCODING_LEN + 1);
2090         encode_pgid(p, &spgid->pgid);
2091         ceph_encode_8(p, spgid->shard);
2092 }
2093
2094 static void encode_oloc(void **p, void *end,
2095                         const struct ceph_object_locator *oloc)
2096 {
2097         ceph_start_encoding(p, 5, 4, ceph_oloc_encoding_size(oloc));
2098         ceph_encode_64(p, oloc->pool);
2099         ceph_encode_32(p, -1); /* preferred */
2100         ceph_encode_32(p, 0);  /* key len */
2101         if (oloc->pool_ns)
2102                 ceph_encode_string(p, end, oloc->pool_ns->str,
2103                                    oloc->pool_ns->len);
2104         else
2105                 ceph_encode_32(p, 0);
2106 }
2107
2108 static void encode_request_partial(struct ceph_osd_request *req,
2109                                    struct ceph_msg *msg)
2110 {
2111         void *p = msg->front.iov_base;
2112         void *const end = p + msg->front_alloc_len;
2113         u32 data_len = 0;
2114         int i;
2115
2116         if (req->r_flags & CEPH_OSD_FLAG_WRITE) {
2117                 /* snapshots aren't writeable */
2118                 WARN_ON(req->r_snapid != CEPH_NOSNAP);
2119         } else {
2120                 WARN_ON(req->r_mtime.tv_sec || req->r_mtime.tv_nsec ||
2121                         req->r_data_offset || req->r_snapc);
2122         }
2123
2124         setup_request_data(req);
2125
2126         encode_spgid(&p, &req->r_t.spgid); /* actual spg */
2127         ceph_encode_32(&p, req->r_t.pgid.seed); /* raw hash */
2128         ceph_encode_32(&p, req->r_osdc->osdmap->epoch);
2129         ceph_encode_32(&p, req->r_flags);
2130
2131         /* reqid */
2132         ceph_start_encoding(&p, 2, 2, sizeof(struct ceph_osd_reqid));
2133         memset(p, 0, sizeof(struct ceph_osd_reqid));
2134         p += sizeof(struct ceph_osd_reqid);
2135
2136         /* trace */
2137         memset(p, 0, sizeof(struct ceph_blkin_trace_info));
2138         p += sizeof(struct ceph_blkin_trace_info);
2139
2140         ceph_encode_32(&p, 0); /* client_inc, always 0 */
2141         ceph_encode_timespec64(p, &req->r_mtime);
2142         p += sizeof(struct ceph_timespec);
2143
2144         encode_oloc(&p, end, &req->r_t.target_oloc);
2145         ceph_encode_string(&p, end, req->r_t.target_oid.name,
2146                            req->r_t.target_oid.name_len);
2147
2148         /* ops, can imply data */
2149         ceph_encode_16(&p, req->r_num_ops);
2150         for (i = 0; i < req->r_num_ops; i++) {
2151                 data_len += osd_req_encode_op(p, &req->r_ops[i]);
2152                 p += sizeof(struct ceph_osd_op);
2153         }
2154
2155         ceph_encode_64(&p, req->r_snapid); /* snapid */
2156         if (req->r_snapc) {
2157                 ceph_encode_64(&p, req->r_snapc->seq);
2158                 ceph_encode_32(&p, req->r_snapc->num_snaps);
2159                 for (i = 0; i < req->r_snapc->num_snaps; i++)
2160                         ceph_encode_64(&p, req->r_snapc->snaps[i]);
2161         } else {
2162                 ceph_encode_64(&p, 0); /* snap_seq */
2163                 ceph_encode_32(&p, 0); /* snaps len */
2164         }
2165
2166         ceph_encode_32(&p, req->r_attempts); /* retry_attempt */
2167         BUG_ON(p > end - 8); /* space for features */
2168
2169         msg->hdr.version = cpu_to_le16(8); /* MOSDOp v8 */
2170         /* front_len is finalized in encode_request_finish() */
2171         msg->front.iov_len = p - msg->front.iov_base;
2172         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2173         msg->hdr.data_len = cpu_to_le32(data_len);
2174         /*
2175          * The header "data_off" is a hint to the receiver allowing it
2176          * to align received data into its buffers such that there's no
2177          * need to re-copy it before writing it to disk (direct I/O).
2178          */
2179         msg->hdr.data_off = cpu_to_le16(req->r_data_offset);
2180
2181         dout("%s req %p msg %p oid %s oid_len %d\n", __func__, req, msg,
2182              req->r_t.target_oid.name, req->r_t.target_oid.name_len);
2183 }
2184
2185 static void encode_request_finish(struct ceph_msg *msg)
2186 {
2187         void *p = msg->front.iov_base;
2188         void *const partial_end = p + msg->front.iov_len;
2189         void *const end = p + msg->front_alloc_len;
2190
2191         if (CEPH_HAVE_FEATURE(msg->con->peer_features, RESEND_ON_SPLIT)) {
2192                 /* luminous OSD -- encode features and be done */
2193                 p = partial_end;
2194                 ceph_encode_64(&p, msg->con->peer_features);
2195         } else {
2196                 struct {
2197                         char spgid[CEPH_ENCODING_START_BLK_LEN +
2198                                    CEPH_PGID_ENCODING_LEN + 1];
2199                         __le32 hash;
2200                         __le32 epoch;
2201                         __le32 flags;
2202                         char reqid[CEPH_ENCODING_START_BLK_LEN +
2203                                    sizeof(struct ceph_osd_reqid)];
2204                         char trace[sizeof(struct ceph_blkin_trace_info)];
2205                         __le32 client_inc;
2206                         struct ceph_timespec mtime;
2207                 } __packed head;
2208                 struct ceph_pg pgid;
2209                 void *oloc, *oid, *tail;
2210                 int oloc_len, oid_len, tail_len;
2211                 int len;
2212
2213                 /*
2214                  * Pre-luminous OSD -- reencode v8 into v4 using @head
2215                  * as a temporary buffer.  Encode the raw PG; the rest
2216                  * is just a matter of moving oloc, oid and tail blobs
2217                  * around.
2218                  */
2219                 memcpy(&head, p, sizeof(head));
2220                 p += sizeof(head);
2221
2222                 oloc = p;
2223                 p += CEPH_ENCODING_START_BLK_LEN;
2224                 pgid.pool = ceph_decode_64(&p);
2225                 p += 4 + 4; /* preferred, key len */
2226                 len = ceph_decode_32(&p);
2227                 p += len;   /* nspace */
2228                 oloc_len = p - oloc;
2229
2230                 oid = p;
2231                 len = ceph_decode_32(&p);
2232                 p += len;
2233                 oid_len = p - oid;
2234
2235                 tail = p;
2236                 tail_len = partial_end - p;
2237
2238                 p = msg->front.iov_base;
2239                 ceph_encode_copy(&p, &head.client_inc, sizeof(head.client_inc));
2240                 ceph_encode_copy(&p, &head.epoch, sizeof(head.epoch));
2241                 ceph_encode_copy(&p, &head.flags, sizeof(head.flags));
2242                 ceph_encode_copy(&p, &head.mtime, sizeof(head.mtime));
2243
2244                 /* reassert_version */
2245                 memset(p, 0, sizeof(struct ceph_eversion));
2246                 p += sizeof(struct ceph_eversion);
2247
2248                 BUG_ON(p >= oloc);
2249                 memmove(p, oloc, oloc_len);
2250                 p += oloc_len;
2251
2252                 pgid.seed = le32_to_cpu(head.hash);
2253                 encode_pgid(&p, &pgid); /* raw pg */
2254
2255                 BUG_ON(p >= oid);
2256                 memmove(p, oid, oid_len);
2257                 p += oid_len;
2258
2259                 /* tail -- ops, snapid, snapc, retry_attempt */
2260                 BUG_ON(p >= tail);
2261                 memmove(p, tail, tail_len);
2262                 p += tail_len;
2263
2264                 msg->hdr.version = cpu_to_le16(4); /* MOSDOp v4 */
2265         }
2266
2267         BUG_ON(p > end);
2268         msg->front.iov_len = p - msg->front.iov_base;
2269         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2270
2271         dout("%s msg %p tid %llu %u+%u+%u v%d\n", __func__, msg,
2272              le64_to_cpu(msg->hdr.tid), le32_to_cpu(msg->hdr.front_len),
2273              le32_to_cpu(msg->hdr.middle_len), le32_to_cpu(msg->hdr.data_len),
2274              le16_to_cpu(msg->hdr.version));
2275 }
2276
2277 /*
2278  * @req has to be assigned a tid and registered.
2279  */
2280 static void send_request(struct ceph_osd_request *req)
2281 {
2282         struct ceph_osd *osd = req->r_osd;
2283
2284         verify_osd_locked(osd);
2285         WARN_ON(osd->o_osd != req->r_t.osd);
2286
2287         /* backoff? */
2288         if (should_plug_request(req))
2289                 return;
2290
2291         /*
2292          * We may have a previously queued request message hanging
2293          * around.  Cancel it to avoid corrupting the msgr.
2294          */
2295         if (req->r_sent)
2296                 ceph_msg_revoke(req->r_request);
2297
2298         req->r_flags |= CEPH_OSD_FLAG_KNOWN_REDIR;
2299         if (req->r_attempts)
2300                 req->r_flags |= CEPH_OSD_FLAG_RETRY;
2301         else
2302                 WARN_ON(req->r_flags & CEPH_OSD_FLAG_RETRY);
2303
2304         encode_request_partial(req, req->r_request);
2305
2306         dout("%s req %p tid %llu to pgid %llu.%x spgid %llu.%xs%d osd%d e%u flags 0x%x attempt %d\n",
2307              __func__, req, req->r_tid, req->r_t.pgid.pool, req->r_t.pgid.seed,
2308              req->r_t.spgid.pgid.pool, req->r_t.spgid.pgid.seed,
2309              req->r_t.spgid.shard, osd->o_osd, req->r_t.epoch, req->r_flags,
2310              req->r_attempts);
2311
2312         req->r_t.paused = false;
2313         req->r_stamp = jiffies;
2314         req->r_attempts++;
2315
2316         req->r_sent = osd->o_incarnation;
2317         req->r_request->hdr.tid = cpu_to_le64(req->r_tid);
2318         ceph_con_send(&osd->o_con, ceph_msg_get(req->r_request));
2319 }
2320
2321 static void maybe_request_map(struct ceph_osd_client *osdc)
2322 {
2323         bool continuous = false;
2324
2325         verify_osdc_locked(osdc);
2326         WARN_ON(!osdc->osdmap->epoch);
2327
2328         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2329             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD) ||
2330             ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2331                 dout("%s osdc %p continuous\n", __func__, osdc);
2332                 continuous = true;
2333         } else {
2334                 dout("%s osdc %p onetime\n", __func__, osdc);
2335         }
2336
2337         if (ceph_monc_want_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
2338                                osdc->osdmap->epoch + 1, continuous))
2339                 ceph_monc_renew_subs(&osdc->client->monc);
2340 }
2341
2342 static void complete_request(struct ceph_osd_request *req, int err);
2343 static void send_map_check(struct ceph_osd_request *req);
2344
2345 static void __submit_request(struct ceph_osd_request *req, bool wrlocked)
2346 {
2347         struct ceph_osd_client *osdc = req->r_osdc;
2348         struct ceph_osd *osd;
2349         enum calc_target_result ct_res;
2350         int err = 0;
2351         bool need_send = false;
2352         bool promoted = false;
2353
2354         WARN_ON(req->r_tid);
2355         dout("%s req %p wrlocked %d\n", __func__, req, wrlocked);
2356
2357 again:
2358         ct_res = calc_target(osdc, &req->r_t, false);
2359         if (ct_res == CALC_TARGET_POOL_DNE && !wrlocked)
2360                 goto promote;
2361
2362         osd = lookup_create_osd(osdc, req->r_t.osd, wrlocked);
2363         if (IS_ERR(osd)) {
2364                 WARN_ON(PTR_ERR(osd) != -EAGAIN || wrlocked);
2365                 goto promote;
2366         }
2367
2368         if (osdc->abort_err) {
2369                 dout("req %p abort_err %d\n", req, osdc->abort_err);
2370                 err = osdc->abort_err;
2371         } else if (osdc->osdmap->epoch < osdc->epoch_barrier) {
2372                 dout("req %p epoch %u barrier %u\n", req, osdc->osdmap->epoch,
2373                      osdc->epoch_barrier);
2374                 req->r_t.paused = true;
2375                 maybe_request_map(osdc);
2376         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2377                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR)) {
2378                 dout("req %p pausewr\n", req);
2379                 req->r_t.paused = true;
2380                 maybe_request_map(osdc);
2381         } else if ((req->r_flags & CEPH_OSD_FLAG_READ) &&
2382                    ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
2383                 dout("req %p pauserd\n", req);
2384                 req->r_t.paused = true;
2385                 maybe_request_map(osdc);
2386         } else if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2387                    !(req->r_flags & (CEPH_OSD_FLAG_FULL_TRY |
2388                                      CEPH_OSD_FLAG_FULL_FORCE)) &&
2389                    (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2390                     pool_full(osdc, req->r_t.base_oloc.pool))) {
2391                 dout("req %p full/pool_full\n", req);
2392                 if (ceph_test_opt(osdc->client, ABORT_ON_FULL)) {
2393                         err = -ENOSPC;
2394                 } else {
2395                         pr_warn_ratelimited("FULL or reached pool quota\n");
2396                         req->r_t.paused = true;
2397                         maybe_request_map(osdc);
2398                 }
2399         } else if (!osd_homeless(osd)) {
2400                 need_send = true;
2401         } else {
2402                 maybe_request_map(osdc);
2403         }
2404
2405         mutex_lock(&osd->lock);
2406         /*
2407          * Assign the tid atomically with send_request() to protect
2408          * multiple writes to the same object from racing with each
2409          * other, resulting in out of order ops on the OSDs.
2410          */
2411         req->r_tid = atomic64_inc_return(&osdc->last_tid);
2412         link_request(osd, req);
2413         if (need_send)
2414                 send_request(req);
2415         else if (err)
2416                 complete_request(req, err);
2417         mutex_unlock(&osd->lock);
2418
2419         if (!err && ct_res == CALC_TARGET_POOL_DNE)
2420                 send_map_check(req);
2421
2422         if (promoted)
2423                 downgrade_write(&osdc->lock);
2424         return;
2425
2426 promote:
2427         up_read(&osdc->lock);
2428         down_write(&osdc->lock);
2429         wrlocked = true;
2430         promoted = true;
2431         goto again;
2432 }
2433
2434 static void account_request(struct ceph_osd_request *req)
2435 {
2436         WARN_ON(req->r_flags & (CEPH_OSD_FLAG_ACK | CEPH_OSD_FLAG_ONDISK));
2437         WARN_ON(!(req->r_flags & (CEPH_OSD_FLAG_READ | CEPH_OSD_FLAG_WRITE)));
2438
2439         req->r_flags |= CEPH_OSD_FLAG_ONDISK;
2440         atomic_inc(&req->r_osdc->num_requests);
2441
2442         req->r_start_stamp = jiffies;
2443         req->r_start_latency = ktime_get();
2444 }
2445
2446 static void submit_request(struct ceph_osd_request *req, bool wrlocked)
2447 {
2448         ceph_osdc_get_request(req);
2449         account_request(req);
2450         __submit_request(req, wrlocked);
2451 }
2452
2453 static void finish_request(struct ceph_osd_request *req)
2454 {
2455         struct ceph_osd_client *osdc = req->r_osdc;
2456
2457         WARN_ON(lookup_request_mc(&osdc->map_checks, req->r_tid));
2458         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2459
2460         req->r_end_latency = ktime_get();
2461
2462         if (req->r_osd)
2463                 unlink_request(req->r_osd, req);
2464         atomic_dec(&osdc->num_requests);
2465
2466         /*
2467          * If an OSD has failed or returned and a request has been sent
2468          * twice, it's possible to get a reply and end up here while the
2469          * request message is queued for delivery.  We will ignore the
2470          * reply, so not a big deal, but better to try and catch it.
2471          */
2472         ceph_msg_revoke(req->r_request);
2473         ceph_msg_revoke_incoming(req->r_reply);
2474 }
2475
2476 static void __complete_request(struct ceph_osd_request *req)
2477 {
2478         dout("%s req %p tid %llu cb %ps result %d\n", __func__, req,
2479              req->r_tid, req->r_callback, req->r_result);
2480
2481         if (req->r_callback)
2482                 req->r_callback(req);
2483         complete_all(&req->r_completion);
2484         ceph_osdc_put_request(req);
2485 }
2486
2487 static void complete_request_workfn(struct work_struct *work)
2488 {
2489         struct ceph_osd_request *req =
2490             container_of(work, struct ceph_osd_request, r_complete_work);
2491
2492         __complete_request(req);
2493 }
2494
2495 /*
2496  * This is open-coded in handle_reply().
2497  */
2498 static void complete_request(struct ceph_osd_request *req, int err)
2499 {
2500         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2501
2502         req->r_result = err;
2503         finish_request(req);
2504
2505         INIT_WORK(&req->r_complete_work, complete_request_workfn);
2506         queue_work(req->r_osdc->completion_wq, &req->r_complete_work);
2507 }
2508
2509 static void cancel_map_check(struct ceph_osd_request *req)
2510 {
2511         struct ceph_osd_client *osdc = req->r_osdc;
2512         struct ceph_osd_request *lookup_req;
2513
2514         verify_osdc_wrlocked(osdc);
2515
2516         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2517         if (!lookup_req)
2518                 return;
2519
2520         WARN_ON(lookup_req != req);
2521         erase_request_mc(&osdc->map_checks, req);
2522         ceph_osdc_put_request(req);
2523 }
2524
2525 static void cancel_request(struct ceph_osd_request *req)
2526 {
2527         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
2528
2529         cancel_map_check(req);
2530         finish_request(req);
2531         complete_all(&req->r_completion);
2532         ceph_osdc_put_request(req);
2533 }
2534
2535 static void abort_request(struct ceph_osd_request *req, int err)
2536 {
2537         dout("%s req %p tid %llu err %d\n", __func__, req, req->r_tid, err);
2538
2539         cancel_map_check(req);
2540         complete_request(req, err);
2541 }
2542
2543 static int abort_fn(struct ceph_osd_request *req, void *arg)
2544 {
2545         int err = *(int *)arg;
2546
2547         abort_request(req, err);
2548         return 0; /* continue iteration */
2549 }
2550
2551 /*
2552  * Abort all in-flight requests with @err and arrange for all future
2553  * requests to be failed immediately.
2554  */
2555 void ceph_osdc_abort_requests(struct ceph_osd_client *osdc, int err)
2556 {
2557         dout("%s osdc %p err %d\n", __func__, osdc, err);
2558         down_write(&osdc->lock);
2559         for_each_request(osdc, abort_fn, &err);
2560         osdc->abort_err = err;
2561         up_write(&osdc->lock);
2562 }
2563 EXPORT_SYMBOL(ceph_osdc_abort_requests);
2564
2565 void ceph_osdc_clear_abort_err(struct ceph_osd_client *osdc)
2566 {
2567         down_write(&osdc->lock);
2568         osdc->abort_err = 0;
2569         up_write(&osdc->lock);
2570 }
2571 EXPORT_SYMBOL(ceph_osdc_clear_abort_err);
2572
2573 static void update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2574 {
2575         if (likely(eb > osdc->epoch_barrier)) {
2576                 dout("updating epoch_barrier from %u to %u\n",
2577                                 osdc->epoch_barrier, eb);
2578                 osdc->epoch_barrier = eb;
2579                 /* Request map if we're not to the barrier yet */
2580                 if (eb > osdc->osdmap->epoch)
2581                         maybe_request_map(osdc);
2582         }
2583 }
2584
2585 void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb)
2586 {
2587         down_read(&osdc->lock);
2588         if (unlikely(eb > osdc->epoch_barrier)) {
2589                 up_read(&osdc->lock);
2590                 down_write(&osdc->lock);
2591                 update_epoch_barrier(osdc, eb);
2592                 up_write(&osdc->lock);
2593         } else {
2594                 up_read(&osdc->lock);
2595         }
2596 }
2597 EXPORT_SYMBOL(ceph_osdc_update_epoch_barrier);
2598
2599 /*
2600  * We can end up releasing caps as a result of abort_request().
2601  * In that case, we probably want to ensure that the cap release message
2602  * has an updated epoch barrier in it, so set the epoch barrier prior to
2603  * aborting the first request.
2604  */
2605 static int abort_on_full_fn(struct ceph_osd_request *req, void *arg)
2606 {
2607         struct ceph_osd_client *osdc = req->r_osdc;
2608         bool *victims = arg;
2609
2610         if ((req->r_flags & CEPH_OSD_FLAG_WRITE) &&
2611             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
2612              pool_full(osdc, req->r_t.base_oloc.pool))) {
2613                 if (!*victims) {
2614                         update_epoch_barrier(osdc, osdc->osdmap->epoch);
2615                         *victims = true;
2616                 }
2617                 abort_request(req, -ENOSPC);
2618         }
2619
2620         return 0; /* continue iteration */
2621 }
2622
2623 /*
2624  * Drop all pending requests that are stalled waiting on a full condition to
2625  * clear, and complete them with ENOSPC as the return code. Set the
2626  * osdc->epoch_barrier to the latest map epoch that we've seen if any were
2627  * cancelled.
2628  */
2629 static void ceph_osdc_abort_on_full(struct ceph_osd_client *osdc)
2630 {
2631         bool victims = false;
2632
2633         if (ceph_test_opt(osdc->client, ABORT_ON_FULL) &&
2634             (ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) || have_pool_full(osdc)))
2635                 for_each_request(osdc, abort_on_full_fn, &victims);
2636 }
2637
2638 static void check_pool_dne(struct ceph_osd_request *req)
2639 {
2640         struct ceph_osd_client *osdc = req->r_osdc;
2641         struct ceph_osdmap *map = osdc->osdmap;
2642
2643         verify_osdc_wrlocked(osdc);
2644         WARN_ON(!map->epoch);
2645
2646         if (req->r_attempts) {
2647                 /*
2648                  * We sent a request earlier, which means that
2649                  * previously the pool existed, and now it does not
2650                  * (i.e., it was deleted).
2651                  */
2652                 req->r_map_dne_bound = map->epoch;
2653                 dout("%s req %p tid %llu pool disappeared\n", __func__, req,
2654                      req->r_tid);
2655         } else {
2656                 dout("%s req %p tid %llu map_dne_bound %u have %u\n", __func__,
2657                      req, req->r_tid, req->r_map_dne_bound, map->epoch);
2658         }
2659
2660         if (req->r_map_dne_bound) {
2661                 if (map->epoch >= req->r_map_dne_bound) {
2662                         /* we had a new enough map */
2663                         pr_info_ratelimited("tid %llu pool does not exist\n",
2664                                             req->r_tid);
2665                         complete_request(req, -ENOENT);
2666                 }
2667         } else {
2668                 send_map_check(req);
2669         }
2670 }
2671
2672 static void map_check_cb(struct ceph_mon_generic_request *greq)
2673 {
2674         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
2675         struct ceph_osd_request *req;
2676         u64 tid = greq->private_data;
2677
2678         WARN_ON(greq->result || !greq->u.newest);
2679
2680         down_write(&osdc->lock);
2681         req = lookup_request_mc(&osdc->map_checks, tid);
2682         if (!req) {
2683                 dout("%s tid %llu dne\n", __func__, tid);
2684                 goto out_unlock;
2685         }
2686
2687         dout("%s req %p tid %llu map_dne_bound %u newest %llu\n", __func__,
2688              req, req->r_tid, req->r_map_dne_bound, greq->u.newest);
2689         if (!req->r_map_dne_bound)
2690                 req->r_map_dne_bound = greq->u.newest;
2691         erase_request_mc(&osdc->map_checks, req);
2692         check_pool_dne(req);
2693
2694         ceph_osdc_put_request(req);
2695 out_unlock:
2696         up_write(&osdc->lock);
2697 }
2698
2699 static void send_map_check(struct ceph_osd_request *req)
2700 {
2701         struct ceph_osd_client *osdc = req->r_osdc;
2702         struct ceph_osd_request *lookup_req;
2703         int ret;
2704
2705         verify_osdc_wrlocked(osdc);
2706
2707         lookup_req = lookup_request_mc(&osdc->map_checks, req->r_tid);
2708         if (lookup_req) {
2709                 WARN_ON(lookup_req != req);
2710                 return;
2711         }
2712
2713         ceph_osdc_get_request(req);
2714         insert_request_mc(&osdc->map_checks, req);
2715         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
2716                                           map_check_cb, req->r_tid);
2717         WARN_ON(ret);
2718 }
2719
2720 /*
2721  * lingering requests, watch/notify v2 infrastructure
2722  */
2723 static void linger_release(struct kref *kref)
2724 {
2725         struct ceph_osd_linger_request *lreq =
2726             container_of(kref, struct ceph_osd_linger_request, kref);
2727
2728         dout("%s lreq %p reg_req %p ping_req %p\n", __func__, lreq,
2729              lreq->reg_req, lreq->ping_req);
2730         WARN_ON(!RB_EMPTY_NODE(&lreq->node));
2731         WARN_ON(!RB_EMPTY_NODE(&lreq->osdc_node));
2732         WARN_ON(!RB_EMPTY_NODE(&lreq->mc_node));
2733         WARN_ON(!list_empty(&lreq->scan_item));
2734         WARN_ON(!list_empty(&lreq->pending_lworks));
2735         WARN_ON(lreq->osd);
2736
2737         if (lreq->reg_req)
2738                 ceph_osdc_put_request(lreq->reg_req);
2739         if (lreq->ping_req)
2740                 ceph_osdc_put_request(lreq->ping_req);
2741         target_destroy(&lreq->t);
2742         kfree(lreq);
2743 }
2744
2745 static void linger_put(struct ceph_osd_linger_request *lreq)
2746 {
2747         if (lreq)
2748                 kref_put(&lreq->kref, linger_release);
2749 }
2750
2751 static struct ceph_osd_linger_request *
2752 linger_get(struct ceph_osd_linger_request *lreq)
2753 {
2754         kref_get(&lreq->kref);
2755         return lreq;
2756 }
2757
2758 static struct ceph_osd_linger_request *
2759 linger_alloc(struct ceph_osd_client *osdc)
2760 {
2761         struct ceph_osd_linger_request *lreq;
2762
2763         lreq = kzalloc(sizeof(*lreq), GFP_NOIO);
2764         if (!lreq)
2765                 return NULL;
2766
2767         kref_init(&lreq->kref);
2768         mutex_init(&lreq->lock);
2769         RB_CLEAR_NODE(&lreq->node);
2770         RB_CLEAR_NODE(&lreq->osdc_node);
2771         RB_CLEAR_NODE(&lreq->mc_node);
2772         INIT_LIST_HEAD(&lreq->scan_item);
2773         INIT_LIST_HEAD(&lreq->pending_lworks);
2774         init_completion(&lreq->reg_commit_wait);
2775         init_completion(&lreq->notify_finish_wait);
2776
2777         lreq->osdc = osdc;
2778         target_init(&lreq->t);
2779
2780         dout("%s lreq %p\n", __func__, lreq);
2781         return lreq;
2782 }
2783
2784 DEFINE_RB_INSDEL_FUNCS(linger, struct ceph_osd_linger_request, linger_id, node)
2785 DEFINE_RB_FUNCS(linger_osdc, struct ceph_osd_linger_request, linger_id, osdc_node)
2786 DEFINE_RB_FUNCS(linger_mc, struct ceph_osd_linger_request, linger_id, mc_node)
2787
2788 /*
2789  * Create linger request <-> OSD session relation.
2790  *
2791  * @lreq has to be registered, @osd may be homeless.
2792  */
2793 static void link_linger(struct ceph_osd *osd,
2794                         struct ceph_osd_linger_request *lreq)
2795 {
2796         verify_osd_locked(osd);
2797         WARN_ON(!lreq->linger_id || lreq->osd);
2798         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2799              osd->o_osd, lreq, lreq->linger_id);
2800
2801         if (!osd_homeless(osd))
2802                 __remove_osd_from_lru(osd);
2803         else
2804                 atomic_inc(&osd->o_osdc->num_homeless);
2805
2806         get_osd(osd);
2807         insert_linger(&osd->o_linger_requests, lreq);
2808         lreq->osd = osd;
2809 }
2810
2811 static void unlink_linger(struct ceph_osd *osd,
2812                           struct ceph_osd_linger_request *lreq)
2813 {
2814         verify_osd_locked(osd);
2815         WARN_ON(lreq->osd != osd);
2816         dout("%s osd %p osd%d lreq %p linger_id %llu\n", __func__, osd,
2817              osd->o_osd, lreq, lreq->linger_id);
2818
2819         lreq->osd = NULL;
2820         erase_linger(&osd->o_linger_requests, lreq);
2821         put_osd(osd);
2822
2823         if (!osd_homeless(osd))
2824                 maybe_move_osd_to_lru(osd);
2825         else
2826                 atomic_dec(&osd->o_osdc->num_homeless);
2827 }
2828
2829 static bool __linger_registered(struct ceph_osd_linger_request *lreq)
2830 {
2831         verify_osdc_locked(lreq->osdc);
2832
2833         return !RB_EMPTY_NODE(&lreq->osdc_node);
2834 }
2835
2836 static bool linger_registered(struct ceph_osd_linger_request *lreq)
2837 {
2838         struct ceph_osd_client *osdc = lreq->osdc;
2839         bool registered;
2840
2841         down_read(&osdc->lock);
2842         registered = __linger_registered(lreq);
2843         up_read(&osdc->lock);
2844
2845         return registered;
2846 }
2847
2848 static void linger_register(struct ceph_osd_linger_request *lreq)
2849 {
2850         struct ceph_osd_client *osdc = lreq->osdc;
2851
2852         verify_osdc_wrlocked(osdc);
2853         WARN_ON(lreq->linger_id);
2854
2855         linger_get(lreq);
2856         lreq->linger_id = ++osdc->last_linger_id;
2857         insert_linger_osdc(&osdc->linger_requests, lreq);
2858 }
2859
2860 static void linger_unregister(struct ceph_osd_linger_request *lreq)
2861 {
2862         struct ceph_osd_client *osdc = lreq->osdc;
2863
2864         verify_osdc_wrlocked(osdc);
2865
2866         erase_linger_osdc(&osdc->linger_requests, lreq);
2867         linger_put(lreq);
2868 }
2869
2870 static void cancel_linger_request(struct ceph_osd_request *req)
2871 {
2872         struct ceph_osd_linger_request *lreq = req->r_priv;
2873
2874         WARN_ON(!req->r_linger);
2875         cancel_request(req);
2876         linger_put(lreq);
2877 }
2878
2879 struct linger_work {
2880         struct work_struct work;
2881         struct ceph_osd_linger_request *lreq;
2882         struct list_head pending_item;
2883         unsigned long queued_stamp;
2884
2885         union {
2886                 struct {
2887                         u64 notify_id;
2888                         u64 notifier_id;
2889                         void *payload; /* points into @msg front */
2890                         size_t payload_len;
2891
2892                         struct ceph_msg *msg; /* for ceph_msg_put() */
2893                 } notify;
2894                 struct {
2895                         int err;
2896                 } error;
2897         };
2898 };
2899
2900 static struct linger_work *lwork_alloc(struct ceph_osd_linger_request *lreq,
2901                                        work_func_t workfn)
2902 {
2903         struct linger_work *lwork;
2904
2905         lwork = kzalloc(sizeof(*lwork), GFP_NOIO);
2906         if (!lwork)
2907                 return NULL;
2908
2909         INIT_WORK(&lwork->work, workfn);
2910         INIT_LIST_HEAD(&lwork->pending_item);
2911         lwork->lreq = linger_get(lreq);
2912
2913         return lwork;
2914 }
2915
2916 static void lwork_free(struct linger_work *lwork)
2917 {
2918         struct ceph_osd_linger_request *lreq = lwork->lreq;
2919
2920         mutex_lock(&lreq->lock);
2921         list_del(&lwork->pending_item);
2922         mutex_unlock(&lreq->lock);
2923
2924         linger_put(lreq);
2925         kfree(lwork);
2926 }
2927
2928 static void lwork_queue(struct linger_work *lwork)
2929 {
2930         struct ceph_osd_linger_request *lreq = lwork->lreq;
2931         struct ceph_osd_client *osdc = lreq->osdc;
2932
2933         verify_lreq_locked(lreq);
2934         WARN_ON(!list_empty(&lwork->pending_item));
2935
2936         lwork->queued_stamp = jiffies;
2937         list_add_tail(&lwork->pending_item, &lreq->pending_lworks);
2938         queue_work(osdc->notify_wq, &lwork->work);
2939 }
2940
2941 static void do_watch_notify(struct work_struct *w)
2942 {
2943         struct linger_work *lwork = container_of(w, struct linger_work, work);
2944         struct ceph_osd_linger_request *lreq = lwork->lreq;
2945
2946         if (!linger_registered(lreq)) {
2947                 dout("%s lreq %p not registered\n", __func__, lreq);
2948                 goto out;
2949         }
2950
2951         WARN_ON(!lreq->is_watch);
2952         dout("%s lreq %p notify_id %llu notifier_id %llu payload_len %zu\n",
2953              __func__, lreq, lwork->notify.notify_id, lwork->notify.notifier_id,
2954              lwork->notify.payload_len);
2955         lreq->wcb(lreq->data, lwork->notify.notify_id, lreq->linger_id,
2956                   lwork->notify.notifier_id, lwork->notify.payload,
2957                   lwork->notify.payload_len);
2958
2959 out:
2960         ceph_msg_put(lwork->notify.msg);
2961         lwork_free(lwork);
2962 }
2963
2964 static void do_watch_error(struct work_struct *w)
2965 {
2966         struct linger_work *lwork = container_of(w, struct linger_work, work);
2967         struct ceph_osd_linger_request *lreq = lwork->lreq;
2968
2969         if (!linger_registered(lreq)) {
2970                 dout("%s lreq %p not registered\n", __func__, lreq);
2971                 goto out;
2972         }
2973
2974         dout("%s lreq %p err %d\n", __func__, lreq, lwork->error.err);
2975         lreq->errcb(lreq->data, lreq->linger_id, lwork->error.err);
2976
2977 out:
2978         lwork_free(lwork);
2979 }
2980
2981 static void queue_watch_error(struct ceph_osd_linger_request *lreq)
2982 {
2983         struct linger_work *lwork;
2984
2985         lwork = lwork_alloc(lreq, do_watch_error);
2986         if (!lwork) {
2987                 pr_err("failed to allocate error-lwork\n");
2988                 return;
2989         }
2990
2991         lwork->error.err = lreq->last_error;
2992         lwork_queue(lwork);
2993 }
2994
2995 static void linger_reg_commit_complete(struct ceph_osd_linger_request *lreq,
2996                                        int result)
2997 {
2998         if (!completion_done(&lreq->reg_commit_wait)) {
2999                 lreq->reg_commit_error = (result <= 0 ? result : 0);
3000                 complete_all(&lreq->reg_commit_wait);
3001         }
3002 }
3003
3004 static void linger_commit_cb(struct ceph_osd_request *req)
3005 {
3006         struct ceph_osd_linger_request *lreq = req->r_priv;
3007
3008         mutex_lock(&lreq->lock);
3009         dout("%s lreq %p linger_id %llu result %d\n", __func__, lreq,
3010              lreq->linger_id, req->r_result);
3011         linger_reg_commit_complete(lreq, req->r_result);
3012         lreq->committed = true;
3013
3014         if (!lreq->is_watch) {
3015                 struct ceph_osd_data *osd_data =
3016                     osd_req_op_data(req, 0, notify, response_data);
3017                 void *p = page_address(osd_data->pages[0]);
3018
3019                 WARN_ON(req->r_ops[0].op != CEPH_OSD_OP_NOTIFY ||
3020                         osd_data->type != CEPH_OSD_DATA_TYPE_PAGES);
3021
3022                 /* make note of the notify_id */
3023                 if (req->r_ops[0].outdata_len >= sizeof(u64)) {
3024                         lreq->notify_id = ceph_decode_64(&p);
3025                         dout("lreq %p notify_id %llu\n", lreq,
3026                              lreq->notify_id);
3027                 } else {
3028                         dout("lreq %p no notify_id\n", lreq);
3029                 }
3030         }
3031
3032         mutex_unlock(&lreq->lock);
3033         linger_put(lreq);
3034 }
3035
3036 static int normalize_watch_error(int err)
3037 {
3038         /*
3039          * Translate ENOENT -> ENOTCONN so that a delete->disconnection
3040          * notification and a failure to reconnect because we raced with
3041          * the delete appear the same to the user.
3042          */
3043         if (err == -ENOENT)
3044                 err = -ENOTCONN;
3045
3046         return err;
3047 }
3048
3049 static void linger_reconnect_cb(struct ceph_osd_request *req)
3050 {
3051         struct ceph_osd_linger_request *lreq = req->r_priv;
3052
3053         mutex_lock(&lreq->lock);
3054         dout("%s lreq %p linger_id %llu result %d last_error %d\n", __func__,
3055              lreq, lreq->linger_id, req->r_result, lreq->last_error);
3056         if (req->r_result < 0) {
3057                 if (!lreq->last_error) {
3058                         lreq->last_error = normalize_watch_error(req->r_result);
3059                         queue_watch_error(lreq);
3060                 }
3061         }
3062
3063         mutex_unlock(&lreq->lock);
3064         linger_put(lreq);
3065 }
3066
3067 static void send_linger(struct ceph_osd_linger_request *lreq)
3068 {
3069         struct ceph_osd_request *req = lreq->reg_req;
3070         struct ceph_osd_req_op *op = &req->r_ops[0];
3071
3072         verify_osdc_wrlocked(req->r_osdc);
3073         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3074
3075         if (req->r_osd)
3076                 cancel_linger_request(req);
3077
3078         request_reinit(req);
3079         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
3080         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
3081         req->r_flags = lreq->t.flags;
3082         req->r_mtime = lreq->mtime;
3083
3084         mutex_lock(&lreq->lock);
3085         if (lreq->is_watch && lreq->committed) {
3086                 WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3087                         op->watch.cookie != lreq->linger_id);
3088                 op->watch.op = CEPH_OSD_WATCH_OP_RECONNECT;
3089                 op->watch.gen = ++lreq->register_gen;
3090                 dout("lreq %p reconnect register_gen %u\n", lreq,
3091                      op->watch.gen);
3092                 req->r_callback = linger_reconnect_cb;
3093         } else {
3094                 if (!lreq->is_watch)
3095                         lreq->notify_id = 0;
3096                 else
3097                         WARN_ON(op->watch.op != CEPH_OSD_WATCH_OP_WATCH);
3098                 dout("lreq %p register\n", lreq);
3099                 req->r_callback = linger_commit_cb;
3100         }
3101         mutex_unlock(&lreq->lock);
3102
3103         req->r_priv = linger_get(lreq);
3104         req->r_linger = true;
3105
3106         submit_request(req, true);
3107 }
3108
3109 static void linger_ping_cb(struct ceph_osd_request *req)
3110 {
3111         struct ceph_osd_linger_request *lreq = req->r_priv;
3112
3113         mutex_lock(&lreq->lock);
3114         dout("%s lreq %p linger_id %llu result %d ping_sent %lu last_error %d\n",
3115              __func__, lreq, lreq->linger_id, req->r_result, lreq->ping_sent,
3116              lreq->last_error);
3117         if (lreq->register_gen == req->r_ops[0].watch.gen) {
3118                 if (!req->r_result) {
3119                         lreq->watch_valid_thru = lreq->ping_sent;
3120                 } else if (!lreq->last_error) {
3121                         lreq->last_error = normalize_watch_error(req->r_result);
3122                         queue_watch_error(lreq);
3123                 }
3124         } else {
3125                 dout("lreq %p register_gen %u ignoring old pong %u\n", lreq,
3126                      lreq->register_gen, req->r_ops[0].watch.gen);
3127         }
3128
3129         mutex_unlock(&lreq->lock);
3130         linger_put(lreq);
3131 }
3132
3133 static void send_linger_ping(struct ceph_osd_linger_request *lreq)
3134 {
3135         struct ceph_osd_client *osdc = lreq->osdc;
3136         struct ceph_osd_request *req = lreq->ping_req;
3137         struct ceph_osd_req_op *op = &req->r_ops[0];
3138
3139         if (ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD)) {
3140                 dout("%s PAUSERD\n", __func__);
3141                 return;
3142         }
3143
3144         lreq->ping_sent = jiffies;
3145         dout("%s lreq %p linger_id %llu ping_sent %lu register_gen %u\n",
3146              __func__, lreq, lreq->linger_id, lreq->ping_sent,
3147              lreq->register_gen);
3148
3149         if (req->r_osd)
3150                 cancel_linger_request(req);
3151
3152         request_reinit(req);
3153         target_copy(&req->r_t, &lreq->t);
3154
3155         WARN_ON(op->op != CEPH_OSD_OP_WATCH ||
3156                 op->watch.cookie != lreq->linger_id ||
3157                 op->watch.op != CEPH_OSD_WATCH_OP_PING);
3158         op->watch.gen = lreq->register_gen;
3159         req->r_callback = linger_ping_cb;
3160         req->r_priv = linger_get(lreq);
3161         req->r_linger = true;
3162
3163         ceph_osdc_get_request(req);
3164         account_request(req);
3165         req->r_tid = atomic64_inc_return(&osdc->last_tid);
3166         link_request(lreq->osd, req);
3167         send_request(req);
3168 }
3169
3170 static void linger_submit(struct ceph_osd_linger_request *lreq)
3171 {
3172         struct ceph_osd_client *osdc = lreq->osdc;
3173         struct ceph_osd *osd;
3174
3175         down_write(&osdc->lock);
3176         linger_register(lreq);
3177         if (lreq->is_watch) {
3178                 lreq->reg_req->r_ops[0].watch.cookie = lreq->linger_id;
3179                 lreq->ping_req->r_ops[0].watch.cookie = lreq->linger_id;
3180         } else {
3181                 lreq->reg_req->r_ops[0].notify.cookie = lreq->linger_id;
3182         }
3183
3184         calc_target(osdc, &lreq->t, false);
3185         osd = lookup_create_osd(osdc, lreq->t.osd, true);
3186         link_linger(osd, lreq);
3187
3188         send_linger(lreq);
3189         up_write(&osdc->lock);
3190 }
3191
3192 static void cancel_linger_map_check(struct ceph_osd_linger_request *lreq)
3193 {
3194         struct ceph_osd_client *osdc = lreq->osdc;
3195         struct ceph_osd_linger_request *lookup_lreq;
3196
3197         verify_osdc_wrlocked(osdc);
3198
3199         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3200                                        lreq->linger_id);
3201         if (!lookup_lreq)
3202                 return;
3203
3204         WARN_ON(lookup_lreq != lreq);
3205         erase_linger_mc(&osdc->linger_map_checks, lreq);
3206         linger_put(lreq);
3207 }
3208
3209 /*
3210  * @lreq has to be both registered and linked.
3211  */
3212 static void __linger_cancel(struct ceph_osd_linger_request *lreq)
3213 {
3214         if (lreq->is_watch && lreq->ping_req->r_osd)
3215                 cancel_linger_request(lreq->ping_req);
3216         if (lreq->reg_req->r_osd)
3217                 cancel_linger_request(lreq->reg_req);
3218         cancel_linger_map_check(lreq);
3219         unlink_linger(lreq->osd, lreq);
3220         linger_unregister(lreq);
3221 }
3222
3223 static void linger_cancel(struct ceph_osd_linger_request *lreq)
3224 {
3225         struct ceph_osd_client *osdc = lreq->osdc;
3226
3227         down_write(&osdc->lock);
3228         if (__linger_registered(lreq))
3229                 __linger_cancel(lreq);
3230         up_write(&osdc->lock);
3231 }
3232
3233 static void send_linger_map_check(struct ceph_osd_linger_request *lreq);
3234
3235 static void check_linger_pool_dne(struct ceph_osd_linger_request *lreq)
3236 {
3237         struct ceph_osd_client *osdc = lreq->osdc;
3238         struct ceph_osdmap *map = osdc->osdmap;
3239
3240         verify_osdc_wrlocked(osdc);
3241         WARN_ON(!map->epoch);
3242
3243         if (lreq->register_gen) {
3244                 lreq->map_dne_bound = map->epoch;
3245                 dout("%s lreq %p linger_id %llu pool disappeared\n", __func__,
3246                      lreq, lreq->linger_id);
3247         } else {
3248                 dout("%s lreq %p linger_id %llu map_dne_bound %u have %u\n",
3249                      __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3250                      map->epoch);
3251         }
3252
3253         if (lreq->map_dne_bound) {
3254                 if (map->epoch >= lreq->map_dne_bound) {
3255                         /* we had a new enough map */
3256                         pr_info("linger_id %llu pool does not exist\n",
3257                                 lreq->linger_id);
3258                         linger_reg_commit_complete(lreq, -ENOENT);
3259                         __linger_cancel(lreq);
3260                 }
3261         } else {
3262                 send_linger_map_check(lreq);
3263         }
3264 }
3265
3266 static void linger_map_check_cb(struct ceph_mon_generic_request *greq)
3267 {
3268         struct ceph_osd_client *osdc = &greq->monc->client->osdc;
3269         struct ceph_osd_linger_request *lreq;
3270         u64 linger_id = greq->private_data;
3271
3272         WARN_ON(greq->result || !greq->u.newest);
3273
3274         down_write(&osdc->lock);
3275         lreq = lookup_linger_mc(&osdc->linger_map_checks, linger_id);
3276         if (!lreq) {
3277                 dout("%s linger_id %llu dne\n", __func__, linger_id);
3278                 goto out_unlock;
3279         }
3280
3281         dout("%s lreq %p linger_id %llu map_dne_bound %u newest %llu\n",
3282              __func__, lreq, lreq->linger_id, lreq->map_dne_bound,
3283              greq->u.newest);
3284         if (!lreq->map_dne_bound)
3285                 lreq->map_dne_bound = greq->u.newest;
3286         erase_linger_mc(&osdc->linger_map_checks, lreq);
3287         check_linger_pool_dne(lreq);
3288
3289         linger_put(lreq);
3290 out_unlock:
3291         up_write(&osdc->lock);
3292 }
3293
3294 static void send_linger_map_check(struct ceph_osd_linger_request *lreq)
3295 {
3296         struct ceph_osd_client *osdc = lreq->osdc;
3297         struct ceph_osd_linger_request *lookup_lreq;
3298         int ret;
3299
3300         verify_osdc_wrlocked(osdc);
3301
3302         lookup_lreq = lookup_linger_mc(&osdc->linger_map_checks,
3303                                        lreq->linger_id);
3304         if (lookup_lreq) {
3305                 WARN_ON(lookup_lreq != lreq);
3306                 return;
3307         }
3308
3309         linger_get(lreq);
3310         insert_linger_mc(&osdc->linger_map_checks, lreq);
3311         ret = ceph_monc_get_version_async(&osdc->client->monc, "osdmap",
3312                                           linger_map_check_cb, lreq->linger_id);
3313         WARN_ON(ret);
3314 }
3315
3316 static int linger_reg_commit_wait(struct ceph_osd_linger_request *lreq)
3317 {
3318         int ret;
3319
3320         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3321         ret = wait_for_completion_interruptible(&lreq->reg_commit_wait);
3322         return ret ?: lreq->reg_commit_error;
3323 }
3324
3325 static int linger_notify_finish_wait(struct ceph_osd_linger_request *lreq)
3326 {
3327         int ret;
3328
3329         dout("%s lreq %p linger_id %llu\n", __func__, lreq, lreq->linger_id);
3330         ret = wait_for_completion_interruptible(&lreq->notify_finish_wait);
3331         return ret ?: lreq->notify_finish_error;
3332 }
3333
3334 /*
3335  * Timeout callback, called every N seconds.  When 1 or more OSD
3336  * requests has been active for more than N seconds, we send a keepalive
3337  * (tag + timestamp) to its OSD to ensure any communications channel
3338  * reset is detected.
3339  */
3340 static void handle_timeout(struct work_struct *work)
3341 {
3342         struct ceph_osd_client *osdc =
3343                 container_of(work, struct ceph_osd_client, timeout_work.work);
3344         struct ceph_options *opts = osdc->client->options;
3345         unsigned long cutoff = jiffies - opts->osd_keepalive_timeout;
3346         unsigned long expiry_cutoff = jiffies - opts->osd_request_timeout;
3347         LIST_HEAD(slow_osds);
3348         struct rb_node *n, *p;
3349
3350         dout("%s osdc %p\n", __func__, osdc);
3351         down_write(&osdc->lock);
3352
3353         /*
3354          * ping osds that are a bit slow.  this ensures that if there
3355          * is a break in the TCP connection we will notice, and reopen
3356          * a connection with that osd (from the fault callback).
3357          */
3358         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
3359                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3360                 bool found = false;
3361
3362                 for (p = rb_first(&osd->o_requests); p; ) {
3363                         struct ceph_osd_request *req =
3364                             rb_entry(p, struct ceph_osd_request, r_node);
3365
3366                         p = rb_next(p); /* abort_request() */
3367
3368                         if (time_before(req->r_stamp, cutoff)) {
3369                                 dout(" req %p tid %llu on osd%d is laggy\n",
3370                                      req, req->r_tid, osd->o_osd);
3371                                 found = true;
3372                         }
3373                         if (opts->osd_request_timeout &&
3374                             time_before(req->r_start_stamp, expiry_cutoff)) {
3375                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3376                                        req->r_tid, osd->o_osd);
3377                                 abort_request(req, -ETIMEDOUT);
3378                         }
3379                 }
3380                 for (p = rb_first(&osd->o_linger_requests); p; p = rb_next(p)) {
3381                         struct ceph_osd_linger_request *lreq =
3382                             rb_entry(p, struct ceph_osd_linger_request, node);
3383
3384                         dout(" lreq %p linger_id %llu is served by osd%d\n",
3385                              lreq, lreq->linger_id, osd->o_osd);
3386                         found = true;
3387
3388                         mutex_lock(&lreq->lock);
3389                         if (lreq->is_watch && lreq->committed && !lreq->last_error)
3390                                 send_linger_ping(lreq);
3391                         mutex_unlock(&lreq->lock);
3392                 }
3393
3394                 if (found)
3395                         list_move_tail(&osd->o_keepalive_item, &slow_osds);
3396         }
3397
3398         if (opts->osd_request_timeout) {
3399                 for (p = rb_first(&osdc->homeless_osd.o_requests); p; ) {
3400                         struct ceph_osd_request *req =
3401                             rb_entry(p, struct ceph_osd_request, r_node);
3402
3403                         p = rb_next(p); /* abort_request() */
3404
3405                         if (time_before(req->r_start_stamp, expiry_cutoff)) {
3406                                 pr_err_ratelimited("tid %llu on osd%d timeout\n",
3407                                        req->r_tid, osdc->homeless_osd.o_osd);
3408                                 abort_request(req, -ETIMEDOUT);
3409                         }
3410                 }
3411         }
3412
3413         if (atomic_read(&osdc->num_homeless) || !list_empty(&slow_osds))
3414                 maybe_request_map(osdc);
3415
3416         while (!list_empty(&slow_osds)) {
3417                 struct ceph_osd *osd = list_first_entry(&slow_osds,
3418                                                         struct ceph_osd,
3419                                                         o_keepalive_item);
3420                 list_del_init(&osd->o_keepalive_item);
3421                 ceph_con_keepalive(&osd->o_con);
3422         }
3423
3424         up_write(&osdc->lock);
3425         schedule_delayed_work(&osdc->timeout_work,
3426                               osdc->client->options->osd_keepalive_timeout);
3427 }
3428
3429 static void handle_osds_timeout(struct work_struct *work)
3430 {
3431         struct ceph_osd_client *osdc =
3432                 container_of(work, struct ceph_osd_client,
3433                              osds_timeout_work.work);
3434         unsigned long delay = osdc->client->options->osd_idle_ttl / 4;
3435         struct ceph_osd *osd, *nosd;
3436
3437         dout("%s osdc %p\n", __func__, osdc);
3438         down_write(&osdc->lock);
3439         list_for_each_entry_safe(osd, nosd, &osdc->osd_lru, o_osd_lru) {
3440                 if (time_before(jiffies, osd->lru_ttl))
3441                         break;
3442
3443                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_requests));
3444                 WARN_ON(!RB_EMPTY_ROOT(&osd->o_linger_requests));
3445                 close_osd(osd);
3446         }
3447
3448         up_write(&osdc->lock);
3449         schedule_delayed_work(&osdc->osds_timeout_work,
3450                               round_jiffies_relative(delay));
3451 }
3452
3453 static int ceph_oloc_decode(void **p, void *end,
3454                             struct ceph_object_locator *oloc)
3455 {
3456         u8 struct_v, struct_cv;
3457         u32 len;
3458         void *struct_end;
3459         int ret = 0;
3460
3461         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3462         struct_v = ceph_decode_8(p);
3463         struct_cv = ceph_decode_8(p);
3464         if (struct_v < 3) {
3465                 pr_warn("got v %d < 3 cv %d of ceph_object_locator\n",
3466                         struct_v, struct_cv);
3467                 goto e_inval;
3468         }
3469         if (struct_cv > 6) {
3470                 pr_warn("got v %d cv %d > 6 of ceph_object_locator\n",
3471                         struct_v, struct_cv);
3472                 goto e_inval;
3473         }
3474         len = ceph_decode_32(p);
3475         ceph_decode_need(p, end, len, e_inval);
3476         struct_end = *p + len;
3477
3478         oloc->pool = ceph_decode_64(p);
3479         *p += 4; /* skip preferred */
3480
3481         len = ceph_decode_32(p);
3482         if (len > 0) {
3483                 pr_warn("ceph_object_locator::key is set\n");
3484                 goto e_inval;
3485         }
3486
3487         if (struct_v >= 5) {
3488                 bool changed = false;
3489
3490                 len = ceph_decode_32(p);
3491                 if (len > 0) {
3492                         ceph_decode_need(p, end, len, e_inval);
3493                         if (!oloc->pool_ns ||
3494                             ceph_compare_string(oloc->pool_ns, *p, len))
3495                                 changed = true;
3496                         *p += len;
3497                 } else {
3498                         if (oloc->pool_ns)
3499                                 changed = true;
3500                 }
3501                 if (changed) {
3502                         /* redirect changes namespace */
3503                         pr_warn("ceph_object_locator::nspace is changed\n");
3504                         goto e_inval;
3505                 }
3506         }
3507
3508         if (struct_v >= 6) {
3509                 s64 hash = ceph_decode_64(p);
3510                 if (hash != -1) {
3511                         pr_warn("ceph_object_locator::hash is set\n");
3512                         goto e_inval;
3513                 }
3514         }
3515
3516         /* skip the rest */
3517         *p = struct_end;
3518 out:
3519         return ret;
3520
3521 e_inval:
3522         ret = -EINVAL;
3523         goto out;
3524 }
3525
3526 static int ceph_redirect_decode(void **p, void *end,
3527                                 struct ceph_request_redirect *redir)
3528 {
3529         u8 struct_v, struct_cv;
3530         u32 len;
3531         void *struct_end;
3532         int ret;
3533
3534         ceph_decode_need(p, end, 1 + 1 + 4, e_inval);
3535         struct_v = ceph_decode_8(p);
3536         struct_cv = ceph_decode_8(p);
3537         if (struct_cv > 1) {
3538                 pr_warn("got v %d cv %d > 1 of ceph_request_redirect\n",
3539                         struct_v, struct_cv);
3540                 goto e_inval;
3541         }
3542         len = ceph_decode_32(p);
3543         ceph_decode_need(p, end, len, e_inval);
3544         struct_end = *p + len;
3545
3546         ret = ceph_oloc_decode(p, end, &redir->oloc);
3547         if (ret)
3548                 goto out;
3549
3550         len = ceph_decode_32(p);
3551         if (len > 0) {
3552                 pr_warn("ceph_request_redirect::object_name is set\n");
3553                 goto e_inval;
3554         }
3555
3556         /* skip the rest */
3557         *p = struct_end;
3558 out:
3559         return ret;
3560
3561 e_inval:
3562         ret = -EINVAL;
3563         goto out;
3564 }
3565
3566 struct MOSDOpReply {
3567         struct ceph_pg pgid;
3568         u64 flags;
3569         int result;
3570         u32 epoch;
3571         int num_ops;
3572         u32 outdata_len[CEPH_OSD_MAX_OPS];
3573         s32 rval[CEPH_OSD_MAX_OPS];
3574         int retry_attempt;
3575         struct ceph_eversion replay_version;
3576         u64 user_version;
3577         struct ceph_request_redirect redirect;
3578 };
3579
3580 static int decode_MOSDOpReply(const struct ceph_msg *msg, struct MOSDOpReply *m)
3581 {
3582         void *p = msg->front.iov_base;
3583         void *const end = p + msg->front.iov_len;
3584         u16 version = le16_to_cpu(msg->hdr.version);
3585         struct ceph_eversion bad_replay_version;
3586         u8 decode_redir;
3587         u32 len;
3588         int ret;
3589         int i;
3590
3591         ceph_decode_32_safe(&p, end, len, e_inval);
3592         ceph_decode_need(&p, end, len, e_inval);
3593         p += len; /* skip oid */
3594
3595         ret = ceph_decode_pgid(&p, end, &m->pgid);
3596         if (ret)
3597                 return ret;
3598
3599         ceph_decode_64_safe(&p, end, m->flags, e_inval);
3600         ceph_decode_32_safe(&p, end, m->result, e_inval);
3601         ceph_decode_need(&p, end, sizeof(bad_replay_version), e_inval);
3602         memcpy(&bad_replay_version, p, sizeof(bad_replay_version));
3603         p += sizeof(bad_replay_version);
3604         ceph_decode_32_safe(&p, end, m->epoch, e_inval);
3605
3606         ceph_decode_32_safe(&p, end, m->num_ops, e_inval);
3607         if (m->num_ops > ARRAY_SIZE(m->outdata_len))
3608                 goto e_inval;
3609
3610         ceph_decode_need(&p, end, m->num_ops * sizeof(struct ceph_osd_op),
3611                          e_inval);
3612         for (i = 0; i < m->num_ops; i++) {
3613                 struct ceph_osd_op *op = p;
3614
3615                 m->outdata_len[i] = le32_to_cpu(op->payload_len);
3616                 p += sizeof(*op);
3617         }
3618
3619         ceph_decode_32_safe(&p, end, m->retry_attempt, e_inval);
3620         for (i = 0; i < m->num_ops; i++)
3621                 ceph_decode_32_safe(&p, end, m->rval[i], e_inval);
3622
3623         if (version >= 5) {
3624                 ceph_decode_need(&p, end, sizeof(m->replay_version), e_inval);
3625                 memcpy(&m->replay_version, p, sizeof(m->replay_version));
3626                 p += sizeof(m->replay_version);
3627                 ceph_decode_64_safe(&p, end, m->user_version, e_inval);
3628         } else {
3629                 m->replay_version = bad_replay_version; /* struct */
3630                 m->user_version = le64_to_cpu(m->replay_version.version);
3631         }
3632
3633         if (version >= 6) {
3634                 if (version >= 7)
3635                         ceph_decode_8_safe(&p, end, decode_redir, e_inval);
3636                 else
3637                         decode_redir = 1;
3638         } else {
3639                 decode_redir = 0;
3640         }
3641
3642         if (decode_redir) {
3643                 ret = ceph_redirect_decode(&p, end, &m->redirect);
3644                 if (ret)
3645                         return ret;
3646         } else {
3647                 ceph_oloc_init(&m->redirect.oloc);
3648         }
3649
3650         return 0;
3651
3652 e_inval:
3653         return -EINVAL;
3654 }
3655
3656 /*
3657  * Handle MOSDOpReply.  Set ->r_result and call the callback if it is
3658  * specified.
3659  */
3660 static void handle_reply(struct ceph_osd *osd, struct ceph_msg *msg)
3661 {
3662         struct ceph_osd_client *osdc = osd->o_osdc;
3663         struct ceph_osd_request *req;
3664         struct MOSDOpReply m;
3665         u64 tid = le64_to_cpu(msg->hdr.tid);
3666         u32 data_len = 0;
3667         int ret;
3668         int i;
3669
3670         dout("%s msg %p tid %llu\n", __func__, msg, tid);
3671
3672         down_read(&osdc->lock);
3673         if (!osd_registered(osd)) {
3674                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
3675                 goto out_unlock_osdc;
3676         }
3677         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
3678
3679         mutex_lock(&osd->lock);
3680         req = lookup_request(&osd->o_requests, tid);
3681         if (!req) {
3682                 dout("%s osd%d tid %llu unknown\n", __func__, osd->o_osd, tid);
3683                 goto out_unlock_session;
3684         }
3685
3686         m.redirect.oloc.pool_ns = req->r_t.target_oloc.pool_ns;
3687         ret = decode_MOSDOpReply(msg, &m);
3688         m.redirect.oloc.pool_ns = NULL;
3689         if (ret) {
3690                 pr_err("failed to decode MOSDOpReply for tid %llu: %d\n",
3691                        req->r_tid, ret);
3692                 ceph_msg_dump(msg);
3693                 goto fail_request;
3694         }
3695         dout("%s req %p tid %llu flags 0x%llx pgid %llu.%x epoch %u attempt %d v %u'%llu uv %llu\n",
3696              __func__, req, req->r_tid, m.flags, m.pgid.pool, m.pgid.seed,
3697              m.epoch, m.retry_attempt, le32_to_cpu(m.replay_version.epoch),
3698              le64_to_cpu(m.replay_version.version), m.user_version);
3699
3700         if (m.retry_attempt >= 0) {
3701                 if (m.retry_attempt != req->r_attempts - 1) {
3702                         dout("req %p tid %llu retry_attempt %d != %d, ignoring\n",
3703                              req, req->r_tid, m.retry_attempt,
3704                              req->r_attempts - 1);
3705                         goto out_unlock_session;
3706                 }
3707         } else {
3708                 WARN_ON(1); /* MOSDOpReply v4 is assumed */
3709         }
3710
3711         if (!ceph_oloc_empty(&m.redirect.oloc)) {
3712                 dout("req %p tid %llu redirect pool %lld\n", req, req->r_tid,
3713                      m.redirect.oloc.pool);
3714                 unlink_request(osd, req);
3715                 mutex_unlock(&osd->lock);
3716
3717                 /*
3718                  * Not ceph_oloc_copy() - changing pool_ns is not
3719                  * supported.
3720                  */
3721                 req->r_t.target_oloc.pool = m.redirect.oloc.pool;
3722                 req->r_flags |= CEPH_OSD_FLAG_REDIRECTED |
3723                                 CEPH_OSD_FLAG_IGNORE_OVERLAY |
3724                                 CEPH_OSD_FLAG_IGNORE_CACHE;
3725                 req->r_tid = 0;
3726                 __submit_request(req, false);
3727                 goto out_unlock_osdc;
3728         }
3729
3730         if (m.result == -EAGAIN) {
3731                 dout("req %p tid %llu EAGAIN\n", req, req->r_tid);
3732                 unlink_request(osd, req);
3733                 mutex_unlock(&osd->lock);
3734
3735                 /*
3736                  * The object is missing on the replica or not (yet)
3737                  * readable.  Clear pgid to force a resend to the primary
3738                  * via legacy_change.
3739                  */
3740                 req->r_t.pgid.pool = 0;
3741                 req->r_t.pgid.seed = 0;
3742                 WARN_ON(!req->r_t.used_replica);
3743                 req->r_flags &= ~(CEPH_OSD_FLAG_BALANCE_READS |
3744                                   CEPH_OSD_FLAG_LOCALIZE_READS);
3745                 req->r_tid = 0;
3746                 __submit_request(req, false);
3747                 goto out_unlock_osdc;
3748         }
3749
3750         if (m.num_ops != req->r_num_ops) {
3751                 pr_err("num_ops %d != %d for tid %llu\n", m.num_ops,
3752                        req->r_num_ops, req->r_tid);
3753                 goto fail_request;
3754         }
3755         for (i = 0; i < req->r_num_ops; i++) {
3756                 dout(" req %p tid %llu op %d rval %d len %u\n", req,
3757                      req->r_tid, i, m.rval[i], m.outdata_len[i]);
3758                 req->r_ops[i].rval = m.rval[i];
3759                 req->r_ops[i].outdata_len = m.outdata_len[i];
3760                 data_len += m.outdata_len[i];
3761         }
3762         if (data_len != le32_to_cpu(msg->hdr.data_len)) {
3763                 pr_err("sum of lens %u != %u for tid %llu\n", data_len,
3764                        le32_to_cpu(msg->hdr.data_len), req->r_tid);
3765                 goto fail_request;
3766         }
3767         dout("%s req %p tid %llu result %d data_len %u\n", __func__,
3768              req, req->r_tid, m.result, data_len);
3769
3770         /*
3771          * Since we only ever request ONDISK, we should only ever get
3772          * one (type of) reply back.
3773          */
3774         WARN_ON(!(m.flags & CEPH_OSD_FLAG_ONDISK));
3775         req->r_result = m.result ?: data_len;
3776         finish_request(req);
3777         mutex_unlock(&osd->lock);
3778         up_read(&osdc->lock);
3779
3780         __complete_request(req);
3781         return;
3782
3783 fail_request:
3784         complete_request(req, -EIO);
3785 out_unlock_session:
3786         mutex_unlock(&osd->lock);
3787 out_unlock_osdc:
3788         up_read(&osdc->lock);
3789 }
3790
3791 static void set_pool_was_full(struct ceph_osd_client *osdc)
3792 {
3793         struct rb_node *n;
3794
3795         for (n = rb_first(&osdc->osdmap->pg_pools); n; n = rb_next(n)) {
3796                 struct ceph_pg_pool_info *pi =
3797                     rb_entry(n, struct ceph_pg_pool_info, node);
3798
3799                 pi->was_full = __pool_full(pi);
3800         }
3801 }
3802
3803 static bool pool_cleared_full(struct ceph_osd_client *osdc, s64 pool_id)
3804 {
3805         struct ceph_pg_pool_info *pi;
3806
3807         pi = ceph_pg_pool_by_id(osdc->osdmap, pool_id);
3808         if (!pi)
3809                 return false;
3810
3811         return pi->was_full && !__pool_full(pi);
3812 }
3813
3814 static enum calc_target_result
3815 recalc_linger_target(struct ceph_osd_linger_request *lreq)
3816 {
3817         struct ceph_osd_client *osdc = lreq->osdc;
3818         enum calc_target_result ct_res;
3819
3820         ct_res = calc_target(osdc, &lreq->t, true);
3821         if (ct_res == CALC_TARGET_NEED_RESEND) {
3822                 struct ceph_osd *osd;
3823
3824                 osd = lookup_create_osd(osdc, lreq->t.osd, true);
3825                 if (osd != lreq->osd) {
3826                         unlink_linger(lreq->osd, lreq);
3827                         link_linger(osd, lreq);
3828                 }
3829         }
3830
3831         return ct_res;
3832 }
3833
3834 /*
3835  * Requeue requests whose mapping to an OSD has changed.
3836  */
3837 static void scan_requests(struct ceph_osd *osd,
3838                           bool force_resend,
3839                           bool cleared_full,
3840                           bool check_pool_cleared_full,
3841                           struct rb_root *need_resend,
3842                           struct list_head *need_resend_linger)
3843 {
3844         struct ceph_osd_client *osdc = osd->o_osdc;
3845         struct rb_node *n;
3846         bool force_resend_writes;
3847
3848         for (n = rb_first(&osd->o_linger_requests); n; ) {
3849                 struct ceph_osd_linger_request *lreq =
3850                     rb_entry(n, struct ceph_osd_linger_request, node);
3851                 enum calc_target_result ct_res;
3852
3853                 n = rb_next(n); /* recalc_linger_target() */
3854
3855                 dout("%s lreq %p linger_id %llu\n", __func__, lreq,
3856                      lreq->linger_id);
3857                 ct_res = recalc_linger_target(lreq);
3858                 switch (ct_res) {
3859                 case CALC_TARGET_NO_ACTION:
3860                         force_resend_writes = cleared_full ||
3861                             (check_pool_cleared_full &&
3862                              pool_cleared_full(osdc, lreq->t.base_oloc.pool));
3863                         if (!force_resend && !force_resend_writes)
3864                                 break;
3865
3866                         /* fall through */
3867                 case CALC_TARGET_NEED_RESEND:
3868                         cancel_linger_map_check(lreq);
3869                         /*
3870                          * scan_requests() for the previous epoch(s)
3871                          * may have already added it to the list, since
3872                          * it's not unlinked here.
3873                          */
3874                         if (list_empty(&lreq->scan_item))
3875                                 list_add_tail(&lreq->scan_item, need_resend_linger);
3876                         break;
3877                 case CALC_TARGET_POOL_DNE:
3878                         list_del_init(&lreq->scan_item);
3879                         check_linger_pool_dne(lreq);
3880                         break;
3881                 }
3882         }
3883
3884         for (n = rb_first(&osd->o_requests); n; ) {
3885                 struct ceph_osd_request *req =
3886                     rb_entry(n, struct ceph_osd_request, r_node);
3887                 enum calc_target_result ct_res;
3888
3889                 n = rb_next(n); /* unlink_request(), check_pool_dne() */
3890
3891                 dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
3892                 ct_res = calc_target(osdc, &req->r_t, false);
3893                 switch (ct_res) {
3894                 case CALC_TARGET_NO_ACTION:
3895                         force_resend_writes = cleared_full ||
3896                             (check_pool_cleared_full &&
3897                              pool_cleared_full(osdc, req->r_t.base_oloc.pool));
3898                         if (!force_resend &&
3899                             (!(req->r_flags & CEPH_OSD_FLAG_WRITE) ||
3900                              !force_resend_writes))
3901                                 break;
3902
3903                         /* fall through */
3904                 case CALC_TARGET_NEED_RESEND:
3905                         cancel_map_check(req);
3906                         unlink_request(osd, req);
3907                         insert_request(need_resend, req);
3908                         break;
3909                 case CALC_TARGET_POOL_DNE:
3910                         check_pool_dne(req);
3911                         break;
3912                 }
3913         }
3914 }
3915
3916 static int handle_one_map(struct ceph_osd_client *osdc,
3917                           void *p, void *end, bool incremental,
3918                           struct rb_root *need_resend,
3919                           struct list_head *need_resend_linger)
3920 {
3921         struct ceph_osdmap *newmap;
3922         struct rb_node *n;
3923         bool skipped_map = false;
3924         bool was_full;
3925
3926         was_full = ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3927         set_pool_was_full(osdc);
3928
3929         if (incremental)
3930                 newmap = osdmap_apply_incremental(&p, end, osdc->osdmap);
3931         else
3932                 newmap = ceph_osdmap_decode(&p, end);
3933         if (IS_ERR(newmap))
3934                 return PTR_ERR(newmap);
3935
3936         if (newmap != osdc->osdmap) {
3937                 /*
3938                  * Preserve ->was_full before destroying the old map.
3939                  * For pools that weren't in the old map, ->was_full
3940                  * should be false.
3941                  */
3942                 for (n = rb_first(&newmap->pg_pools); n; n = rb_next(n)) {
3943                         struct ceph_pg_pool_info *pi =
3944                             rb_entry(n, struct ceph_pg_pool_info, node);
3945                         struct ceph_pg_pool_info *old_pi;
3946
3947                         old_pi = ceph_pg_pool_by_id(osdc->osdmap, pi->id);
3948                         if (old_pi)
3949                                 pi->was_full = old_pi->was_full;
3950                         else
3951                                 WARN_ON(pi->was_full);
3952                 }
3953
3954                 if (osdc->osdmap->epoch &&
3955                     osdc->osdmap->epoch + 1 < newmap->epoch) {
3956                         WARN_ON(incremental);
3957                         skipped_map = true;
3958                 }
3959
3960                 ceph_osdmap_destroy(osdc->osdmap);
3961                 osdc->osdmap = newmap;
3962         }
3963
3964         was_full &= !ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL);
3965         scan_requests(&osdc->homeless_osd, skipped_map, was_full, true,
3966                       need_resend, need_resend_linger);
3967
3968         for (n = rb_first(&osdc->osds); n; ) {
3969                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
3970
3971                 n = rb_next(n); /* close_osd() */
3972
3973                 scan_requests(osd, skipped_map, was_full, true, need_resend,
3974                               need_resend_linger);
3975                 if (!ceph_osd_is_up(osdc->osdmap, osd->o_osd) ||
3976                     memcmp(&osd->o_con.peer_addr,
3977                            ceph_osd_addr(osdc->osdmap, osd->o_osd),
3978                            sizeof(struct ceph_entity_addr)))
3979                         close_osd(osd);
3980         }
3981
3982         return 0;
3983 }
3984
3985 static void kick_requests(struct ceph_osd_client *osdc,
3986                           struct rb_root *need_resend,
3987                           struct list_head *need_resend_linger)
3988 {
3989         struct ceph_osd_linger_request *lreq, *nlreq;
3990         enum calc_target_result ct_res;
3991         struct rb_node *n;
3992
3993         /* make sure need_resend targets reflect latest map */
3994         for (n = rb_first(need_resend); n; ) {
3995                 struct ceph_osd_request *req =
3996                     rb_entry(n, struct ceph_osd_request, r_node);
3997
3998                 n = rb_next(n);
3999
4000                 if (req->r_t.epoch < osdc->osdmap->epoch) {
4001                         ct_res = calc_target(osdc, &req->r_t, false);
4002                         if (ct_res == CALC_TARGET_POOL_DNE) {
4003                                 erase_request(need_resend, req);
4004                                 check_pool_dne(req);
4005                         }
4006                 }
4007         }
4008
4009         for (n = rb_first(need_resend); n; ) {
4010                 struct ceph_osd_request *req =
4011                     rb_entry(n, struct ceph_osd_request, r_node);
4012                 struct ceph_osd *osd;
4013
4014                 n = rb_next(n);
4015                 erase_request(need_resend, req); /* before link_request() */
4016
4017                 osd = lookup_create_osd(osdc, req->r_t.osd, true);
4018                 link_request(osd, req);
4019                 if (!req->r_linger) {
4020                         if (!osd_homeless(osd) && !req->r_t.paused)
4021                                 send_request(req);
4022                 } else {
4023                         cancel_linger_request(req);
4024                 }
4025         }
4026
4027         list_for_each_entry_safe(lreq, nlreq, need_resend_linger, scan_item) {
4028                 if (!osd_homeless(lreq->osd))
4029                         send_linger(lreq);
4030
4031                 list_del_init(&lreq->scan_item);
4032         }
4033 }
4034
4035 /*
4036  * Process updated osd map.
4037  *
4038  * The message contains any number of incremental and full maps, normally
4039  * indicating some sort of topology change in the cluster.  Kick requests
4040  * off to different OSDs as needed.
4041  */
4042 void ceph_osdc_handle_map(struct ceph_osd_client *osdc, struct ceph_msg *msg)
4043 {
4044         void *p = msg->front.iov_base;
4045         void *const end = p + msg->front.iov_len;
4046         u32 nr_maps, maplen;
4047         u32 epoch;
4048         struct ceph_fsid fsid;
4049         struct rb_root need_resend = RB_ROOT;
4050         LIST_HEAD(need_resend_linger);
4051         bool handled_incremental = false;
4052         bool was_pauserd, was_pausewr;
4053         bool pauserd, pausewr;
4054         int err;
4055
4056         dout("%s have %u\n", __func__, osdc->osdmap->epoch);
4057         down_write(&osdc->lock);
4058
4059         /* verify fsid */
4060         ceph_decode_need(&p, end, sizeof(fsid), bad);
4061         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4062         if (ceph_check_fsid(osdc->client, &fsid) < 0)
4063                 goto bad;
4064
4065         was_pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4066         was_pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4067                       ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4068                       have_pool_full(osdc);
4069
4070         /* incremental maps */
4071         ceph_decode_32_safe(&p, end, nr_maps, bad);
4072         dout(" %d inc maps\n", nr_maps);
4073         while (nr_maps > 0) {
4074                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4075                 epoch = ceph_decode_32(&p);
4076                 maplen = ceph_decode_32(&p);
4077                 ceph_decode_need(&p, end, maplen, bad);
4078                 if (osdc->osdmap->epoch &&
4079                     osdc->osdmap->epoch + 1 == epoch) {
4080                         dout("applying incremental map %u len %d\n",
4081                              epoch, maplen);
4082                         err = handle_one_map(osdc, p, p + maplen, true,
4083                                              &need_resend, &need_resend_linger);
4084                         if (err)
4085                                 goto bad;
4086                         handled_incremental = true;
4087                 } else {
4088                         dout("ignoring incremental map %u len %d\n",
4089                              epoch, maplen);
4090                 }
4091                 p += maplen;
4092                 nr_maps--;
4093         }
4094         if (handled_incremental)
4095                 goto done;
4096
4097         /* full maps */
4098         ceph_decode_32_safe(&p, end, nr_maps, bad);
4099         dout(" %d full maps\n", nr_maps);
4100         while (nr_maps) {
4101                 ceph_decode_need(&p, end, 2*sizeof(u32), bad);
4102                 epoch = ceph_decode_32(&p);
4103                 maplen = ceph_decode_32(&p);
4104                 ceph_decode_need(&p, end, maplen, bad);
4105                 if (nr_maps > 1) {
4106                         dout("skipping non-latest full map %u len %d\n",
4107                              epoch, maplen);
4108                 } else if (osdc->osdmap->epoch >= epoch) {
4109                         dout("skipping full map %u len %d, "
4110                              "older than our %u\n", epoch, maplen,
4111                              osdc->osdmap->epoch);
4112                 } else {
4113                         dout("taking full map %u len %d\n", epoch, maplen);
4114                         err = handle_one_map(osdc, p, p + maplen, false,
4115                                              &need_resend, &need_resend_linger);
4116                         if (err)
4117                                 goto bad;
4118                 }
4119                 p += maplen;
4120                 nr_maps--;
4121         }
4122
4123 done:
4124         /*
4125          * subscribe to subsequent osdmap updates if full to ensure
4126          * we find out when we are no longer full and stop returning
4127          * ENOSPC.
4128          */
4129         pauserd = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSERD);
4130         pausewr = ceph_osdmap_flag(osdc, CEPH_OSDMAP_PAUSEWR) ||
4131                   ceph_osdmap_flag(osdc, CEPH_OSDMAP_FULL) ||
4132                   have_pool_full(osdc);
4133         if (was_pauserd || was_pausewr || pauserd || pausewr ||
4134             osdc->osdmap->epoch < osdc->epoch_barrier)
4135                 maybe_request_map(osdc);
4136
4137         kick_requests(osdc, &need_resend, &need_resend_linger);
4138
4139         ceph_osdc_abort_on_full(osdc);
4140         ceph_monc_got_map(&osdc->client->monc, CEPH_SUB_OSDMAP,
4141                           osdc->osdmap->epoch);
4142         up_write(&osdc->lock);
4143         wake_up_all(&osdc->client->auth_wq);
4144         return;
4145
4146 bad:
4147         pr_err("osdc handle_map corrupt msg\n");
4148         ceph_msg_dump(msg);
4149         up_write(&osdc->lock);
4150 }
4151
4152 /*
4153  * Resubmit requests pending on the given osd.
4154  */
4155 static void kick_osd_requests(struct ceph_osd *osd)
4156 {
4157         struct rb_node *n;
4158
4159         clear_backoffs(osd);
4160
4161         for (n = rb_first(&osd->o_requests); n; ) {
4162                 struct ceph_osd_request *req =
4163                     rb_entry(n, struct ceph_osd_request, r_node);
4164
4165                 n = rb_next(n); /* cancel_linger_request() */
4166
4167                 if (!req->r_linger) {
4168                         if (!req->r_t.paused)
4169                                 send_request(req);
4170                 } else {
4171                         cancel_linger_request(req);
4172                 }
4173         }
4174         for (n = rb_first(&osd->o_linger_requests); n; n = rb_next(n)) {
4175                 struct ceph_osd_linger_request *lreq =
4176                     rb_entry(n, struct ceph_osd_linger_request, node);
4177
4178                 send_linger(lreq);
4179         }
4180 }
4181
4182 /*
4183  * If the osd connection drops, we need to resubmit all requests.
4184  */
4185 static void osd_fault(struct ceph_connection *con)
4186 {
4187         struct ceph_osd *osd = con->private;
4188         struct ceph_osd_client *osdc = osd->o_osdc;
4189
4190         dout("%s osd %p osd%d\n", __func__, osd, osd->o_osd);
4191
4192         down_write(&osdc->lock);
4193         if (!osd_registered(osd)) {
4194                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4195                 goto out_unlock;
4196         }
4197
4198         if (!reopen_osd(osd))
4199                 kick_osd_requests(osd);
4200         maybe_request_map(osdc);
4201
4202 out_unlock:
4203         up_write(&osdc->lock);
4204 }
4205
4206 struct MOSDBackoff {
4207         struct ceph_spg spgid;
4208         u32 map_epoch;
4209         u8 op;
4210         u64 id;
4211         struct ceph_hobject_id *begin;
4212         struct ceph_hobject_id *end;
4213 };
4214
4215 static int decode_MOSDBackoff(const struct ceph_msg *msg, struct MOSDBackoff *m)
4216 {
4217         void *p = msg->front.iov_base;
4218         void *const end = p + msg->front.iov_len;
4219         u8 struct_v;
4220         u32 struct_len;
4221         int ret;
4222
4223         ret = ceph_start_decoding(&p, end, 1, "spg_t", &struct_v, &struct_len);
4224         if (ret)
4225                 return ret;
4226
4227         ret = ceph_decode_pgid(&p, end, &m->spgid.pgid);
4228         if (ret)
4229                 return ret;
4230
4231         ceph_decode_8_safe(&p, end, m->spgid.shard, e_inval);
4232         ceph_decode_32_safe(&p, end, m->map_epoch, e_inval);
4233         ceph_decode_8_safe(&p, end, m->op, e_inval);
4234         ceph_decode_64_safe(&p, end, m->id, e_inval);
4235
4236         m->begin = kzalloc(sizeof(*m->begin), GFP_NOIO);
4237         if (!m->begin)
4238                 return -ENOMEM;
4239
4240         ret = decode_hoid(&p, end, m->begin);
4241         if (ret) {
4242                 free_hoid(m->begin);
4243                 return ret;
4244         }
4245
4246         m->end = kzalloc(sizeof(*m->end), GFP_NOIO);
4247         if (!m->end) {
4248                 free_hoid(m->begin);
4249                 return -ENOMEM;
4250         }
4251
4252         ret = decode_hoid(&p, end, m->end);
4253         if (ret) {
4254                 free_hoid(m->begin);
4255                 free_hoid(m->end);
4256                 return ret;
4257         }
4258
4259         return 0;
4260
4261 e_inval:
4262         return -EINVAL;
4263 }
4264
4265 static struct ceph_msg *create_backoff_message(
4266                                 const struct ceph_osd_backoff *backoff,
4267                                 u32 map_epoch)
4268 {
4269         struct ceph_msg *msg;
4270         void *p, *end;
4271         int msg_size;
4272
4273         msg_size = CEPH_ENCODING_START_BLK_LEN +
4274                         CEPH_PGID_ENCODING_LEN + 1; /* spgid */
4275         msg_size += 4 + 1 + 8; /* map_epoch, op, id */
4276         msg_size += CEPH_ENCODING_START_BLK_LEN +
4277                         hoid_encoding_size(backoff->begin);
4278         msg_size += CEPH_ENCODING_START_BLK_LEN +
4279                         hoid_encoding_size(backoff->end);
4280
4281         msg = ceph_msg_new(CEPH_MSG_OSD_BACKOFF, msg_size, GFP_NOIO, true);
4282         if (!msg)
4283                 return NULL;
4284
4285         p = msg->front.iov_base;
4286         end = p + msg->front_alloc_len;
4287
4288         encode_spgid(&p, &backoff->spgid);
4289         ceph_encode_32(&p, map_epoch);
4290         ceph_encode_8(&p, CEPH_OSD_BACKOFF_OP_ACK_BLOCK);
4291         ceph_encode_64(&p, backoff->id);
4292         encode_hoid(&p, end, backoff->begin);
4293         encode_hoid(&p, end, backoff->end);
4294         BUG_ON(p != end);
4295
4296         msg->front.iov_len = p - msg->front.iov_base;
4297         msg->hdr.version = cpu_to_le16(1); /* MOSDBackoff v1 */
4298         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
4299
4300         return msg;
4301 }
4302
4303 static void handle_backoff_block(struct ceph_osd *osd, struct MOSDBackoff *m)
4304 {
4305         struct ceph_spg_mapping *spg;
4306         struct ceph_osd_backoff *backoff;
4307         struct ceph_msg *msg;
4308
4309         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4310              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4311
4312         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &m->spgid);
4313         if (!spg) {
4314                 spg = alloc_spg_mapping();
4315                 if (!spg) {
4316                         pr_err("%s failed to allocate spg\n", __func__);
4317                         return;
4318                 }
4319                 spg->spgid = m->spgid; /* struct */
4320                 insert_spg_mapping(&osd->o_backoff_mappings, spg);
4321         }
4322
4323         backoff = alloc_backoff();
4324         if (!backoff) {
4325                 pr_err("%s failed to allocate backoff\n", __func__);
4326                 return;
4327         }
4328         backoff->spgid = m->spgid; /* struct */
4329         backoff->id = m->id;
4330         backoff->begin = m->begin;
4331         m->begin = NULL; /* backoff now owns this */
4332         backoff->end = m->end;
4333         m->end = NULL;   /* ditto */
4334
4335         insert_backoff(&spg->backoffs, backoff);
4336         insert_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4337
4338         /*
4339          * Ack with original backoff's epoch so that the OSD can
4340          * discard this if there was a PG split.
4341          */
4342         msg = create_backoff_message(backoff, m->map_epoch);
4343         if (!msg) {
4344                 pr_err("%s failed to allocate msg\n", __func__);
4345                 return;
4346         }
4347         ceph_con_send(&osd->o_con, msg);
4348 }
4349
4350 static bool target_contained_by(const struct ceph_osd_request_target *t,
4351                                 const struct ceph_hobject_id *begin,
4352                                 const struct ceph_hobject_id *end)
4353 {
4354         struct ceph_hobject_id hoid;
4355         int cmp;
4356
4357         hoid_fill_from_target(&hoid, t);
4358         cmp = hoid_compare(&hoid, begin);
4359         return !cmp || (cmp > 0 && hoid_compare(&hoid, end) < 0);
4360 }
4361
4362 static void handle_backoff_unblock(struct ceph_osd *osd,
4363                                    const struct MOSDBackoff *m)
4364 {
4365         struct ceph_spg_mapping *spg;
4366         struct ceph_osd_backoff *backoff;
4367         struct rb_node *n;
4368
4369         dout("%s osd%d spgid %llu.%xs%d id %llu\n", __func__, osd->o_osd,
4370              m->spgid.pgid.pool, m->spgid.pgid.seed, m->spgid.shard, m->id);
4371
4372         backoff = lookup_backoff_by_id(&osd->o_backoffs_by_id, m->id);
4373         if (!backoff) {
4374                 pr_err("%s osd%d spgid %llu.%xs%d id %llu backoff dne\n",
4375                        __func__, osd->o_osd, m->spgid.pgid.pool,
4376                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4377                 return;
4378         }
4379
4380         if (hoid_compare(backoff->begin, m->begin) &&
4381             hoid_compare(backoff->end, m->end)) {
4382                 pr_err("%s osd%d spgid %llu.%xs%d id %llu bad range?\n",
4383                        __func__, osd->o_osd, m->spgid.pgid.pool,
4384                        m->spgid.pgid.seed, m->spgid.shard, m->id);
4385                 /* unblock it anyway... */
4386         }
4387
4388         spg = lookup_spg_mapping(&osd->o_backoff_mappings, &backoff->spgid);
4389         BUG_ON(!spg);
4390
4391         erase_backoff(&spg->backoffs, backoff);
4392         erase_backoff_by_id(&osd->o_backoffs_by_id, backoff);
4393         free_backoff(backoff);
4394
4395         if (RB_EMPTY_ROOT(&spg->backoffs)) {
4396                 erase_spg_mapping(&osd->o_backoff_mappings, spg);
4397                 free_spg_mapping(spg);
4398         }
4399
4400         for (n = rb_first(&osd->o_requests); n; n = rb_next(n)) {
4401                 struct ceph_osd_request *req =
4402                     rb_entry(n, struct ceph_osd_request, r_node);
4403
4404                 if (!ceph_spg_compare(&req->r_t.spgid, &m->spgid)) {
4405                         /*
4406                          * Match against @m, not @backoff -- the PG may
4407                          * have split on the OSD.
4408                          */
4409                         if (target_contained_by(&req->r_t, m->begin, m->end)) {
4410                                 /*
4411                                  * If no other installed backoff applies,
4412                                  * resend.
4413                                  */
4414                                 send_request(req);
4415                         }
4416                 }
4417         }
4418 }
4419
4420 static void handle_backoff(struct ceph_osd *osd, struct ceph_msg *msg)
4421 {
4422         struct ceph_osd_client *osdc = osd->o_osdc;
4423         struct MOSDBackoff m;
4424         int ret;
4425
4426         down_read(&osdc->lock);
4427         if (!osd_registered(osd)) {
4428                 dout("%s osd%d unknown\n", __func__, osd->o_osd);
4429                 up_read(&osdc->lock);
4430                 return;
4431         }
4432         WARN_ON(osd->o_osd != le64_to_cpu(msg->hdr.src.num));
4433
4434         mutex_lock(&osd->lock);
4435         ret = decode_MOSDBackoff(msg, &m);
4436         if (ret) {
4437                 pr_err("failed to decode MOSDBackoff: %d\n", ret);
4438                 ceph_msg_dump(msg);
4439                 goto out_unlock;
4440         }
4441
4442         switch (m.op) {
4443         case CEPH_OSD_BACKOFF_OP_BLOCK:
4444                 handle_backoff_block(osd, &m);
4445                 break;
4446         case CEPH_OSD_BACKOFF_OP_UNBLOCK:
4447                 handle_backoff_unblock(osd, &m);
4448                 break;
4449         default:
4450                 pr_err("%s osd%d unknown op %d\n", __func__, osd->o_osd, m.op);
4451         }
4452
4453         free_hoid(m.begin);
4454         free_hoid(m.end);
4455
4456 out_unlock:
4457         mutex_unlock(&osd->lock);
4458         up_read(&osdc->lock);
4459 }
4460
4461 /*
4462  * Process osd watch notifications
4463  */
4464 static void handle_watch_notify(struct ceph_osd_client *osdc,
4465                                 struct ceph_msg *msg)
4466 {
4467         void *p = msg->front.iov_base;
4468         void *const end = p + msg->front.iov_len;
4469         struct ceph_osd_linger_request *lreq;
4470         struct linger_work *lwork;
4471         u8 proto_ver, opcode;
4472         u64 cookie, notify_id;
4473         u64 notifier_id = 0;
4474         s32 return_code = 0;
4475         void *payload = NULL;
4476         u32 payload_len = 0;
4477
4478         ceph_decode_8_safe(&p, end, proto_ver, bad);
4479         ceph_decode_8_safe(&p, end, opcode, bad);
4480         ceph_decode_64_safe(&p, end, cookie, bad);
4481         p += 8; /* skip ver */
4482         ceph_decode_64_safe(&p, end, notify_id, bad);
4483
4484         if (proto_ver >= 1) {
4485                 ceph_decode_32_safe(&p, end, payload_len, bad);
4486                 ceph_decode_need(&p, end, payload_len, bad);
4487                 payload = p;
4488                 p += payload_len;
4489         }
4490
4491         if (le16_to_cpu(msg->hdr.version) >= 2)
4492                 ceph_decode_32_safe(&p, end, return_code, bad);
4493
4494         if (le16_to_cpu(msg->hdr.version) >= 3)
4495                 ceph_decode_64_safe(&p, end, notifier_id, bad);
4496
4497         down_read(&osdc->lock);
4498         lreq = lookup_linger_osdc(&osdc->linger_requests, cookie);
4499         if (!lreq) {
4500                 dout("%s opcode %d cookie %llu dne\n", __func__, opcode,
4501                      cookie);
4502                 goto out_unlock_osdc;
4503         }
4504
4505         mutex_lock(&lreq->lock);
4506         dout("%s opcode %d cookie %llu lreq %p is_watch %d\n", __func__,
4507              opcode, cookie, lreq, lreq->is_watch);
4508         if (opcode == CEPH_WATCH_EVENT_DISCONNECT) {
4509                 if (!lreq->last_error) {
4510                         lreq->last_error = -ENOTCONN;
4511                         queue_watch_error(lreq);
4512                 }
4513         } else if (!lreq->is_watch) {
4514                 /* CEPH_WATCH_EVENT_NOTIFY_COMPLETE */
4515                 if (lreq->notify_id && lreq->notify_id != notify_id) {
4516                         dout("lreq %p notify_id %llu != %llu, ignoring\n", lreq,
4517                              lreq->notify_id, notify_id);
4518                 } else if (!completion_done(&lreq->notify_finish_wait)) {
4519                         struct ceph_msg_data *data =
4520                             msg->num_data_items ? &msg->data[0] : NULL;
4521
4522                         if (data) {
4523                                 if (lreq->preply_pages) {
4524                                         WARN_ON(data->type !=
4525                                                         CEPH_MSG_DATA_PAGES);
4526                                         *lreq->preply_pages = data->pages;
4527                                         *lreq->preply_len = data->length;
4528                                         data->own_pages = false;
4529                                 }
4530                         }
4531                         lreq->notify_finish_error = return_code;
4532                         complete_all(&lreq->notify_finish_wait);
4533                 }
4534         } else {
4535                 /* CEPH_WATCH_EVENT_NOTIFY */
4536                 lwork = lwork_alloc(lreq, do_watch_notify);
4537                 if (!lwork) {
4538                         pr_err("failed to allocate notify-lwork\n");
4539                         goto out_unlock_lreq;
4540                 }
4541
4542                 lwork->notify.notify_id = notify_id;
4543                 lwork->notify.notifier_id = notifier_id;
4544                 lwork->notify.payload = payload;
4545                 lwork->notify.payload_len = payload_len;
4546                 lwork->notify.msg = ceph_msg_get(msg);
4547                 lwork_queue(lwork);
4548         }
4549
4550 out_unlock_lreq:
4551         mutex_unlock(&lreq->lock);
4552 out_unlock_osdc:
4553         up_read(&osdc->lock);
4554         return;
4555
4556 bad:
4557         pr_err("osdc handle_watch_notify corrupt msg\n");
4558 }
4559
4560 /*
4561  * Register request, send initial attempt.
4562  */
4563 int ceph_osdc_start_request(struct ceph_osd_client *osdc,
4564                             struct ceph_osd_request *req,
4565                             bool nofail)
4566 {
4567         down_read(&osdc->lock);
4568         submit_request(req, false);
4569         up_read(&osdc->lock);
4570
4571         return 0;
4572 }
4573 EXPORT_SYMBOL(ceph_osdc_start_request);
4574
4575 /*
4576  * Unregister a registered request.  The request is not completed:
4577  * ->r_result isn't set and __complete_request() isn't called.
4578  */
4579 void ceph_osdc_cancel_request(struct ceph_osd_request *req)
4580 {
4581         struct ceph_osd_client *osdc = req->r_osdc;
4582
4583         down_write(&osdc->lock);
4584         if (req->r_osd)
4585                 cancel_request(req);
4586         up_write(&osdc->lock);
4587 }
4588 EXPORT_SYMBOL(ceph_osdc_cancel_request);
4589
4590 /*
4591  * @timeout: in jiffies, 0 means "wait forever"
4592  */
4593 static int wait_request_timeout(struct ceph_osd_request *req,
4594                                 unsigned long timeout)
4595 {
4596         long left;
4597
4598         dout("%s req %p tid %llu\n", __func__, req, req->r_tid);
4599         left = wait_for_completion_killable_timeout(&req->r_completion,
4600                                                 ceph_timeout_jiffies(timeout));
4601         if (left <= 0) {
4602                 left = left ?: -ETIMEDOUT;
4603                 ceph_osdc_cancel_request(req);
4604         } else {
4605                 left = req->r_result; /* completed */
4606         }
4607
4608         return left;
4609 }
4610
4611 /*
4612  * wait for a request to complete
4613  */
4614 int ceph_osdc_wait_request(struct ceph_osd_client *osdc,
4615                            struct ceph_osd_request *req)
4616 {
4617         return wait_request_timeout(req, 0);
4618 }
4619 EXPORT_SYMBOL(ceph_osdc_wait_request);
4620
4621 /*
4622  * sync - wait for all in-flight requests to flush.  avoid starvation.
4623  */
4624 void ceph_osdc_sync(struct ceph_osd_client *osdc)
4625 {
4626         struct rb_node *n, *p;
4627         u64 last_tid = atomic64_read(&osdc->last_tid);
4628
4629 again:
4630         down_read(&osdc->lock);
4631         for (n = rb_first(&osdc->osds); n; n = rb_next(n)) {
4632                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
4633
4634                 mutex_lock(&osd->lock);
4635                 for (p = rb_first(&osd->o_requests); p; p = rb_next(p)) {
4636                         struct ceph_osd_request *req =
4637                             rb_entry(p, struct ceph_osd_request, r_node);
4638
4639                         if (req->r_tid > last_tid)
4640                                 break;
4641
4642                         if (!(req->r_flags & CEPH_OSD_FLAG_WRITE))
4643                                 continue;
4644
4645                         ceph_osdc_get_request(req);
4646                         mutex_unlock(&osd->lock);
4647                         up_read(&osdc->lock);
4648                         dout("%s waiting on req %p tid %llu last_tid %llu\n",
4649                              __func__, req, req->r_tid, last_tid);
4650                         wait_for_completion(&req->r_completion);
4651                         ceph_osdc_put_request(req);
4652                         goto again;
4653                 }
4654
4655                 mutex_unlock(&osd->lock);
4656         }
4657
4658         up_read(&osdc->lock);
4659         dout("%s done last_tid %llu\n", __func__, last_tid);
4660 }
4661 EXPORT_SYMBOL(ceph_osdc_sync);
4662
4663 static struct ceph_osd_request *
4664 alloc_linger_request(struct ceph_osd_linger_request *lreq)
4665 {
4666         struct ceph_osd_request *req;
4667
4668         req = ceph_osdc_alloc_request(lreq->osdc, NULL, 1, false, GFP_NOIO);
4669         if (!req)
4670                 return NULL;
4671
4672         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4673         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4674         return req;
4675 }
4676
4677 static struct ceph_osd_request *
4678 alloc_watch_request(struct ceph_osd_linger_request *lreq, u8 watch_opcode)
4679 {
4680         struct ceph_osd_request *req;
4681
4682         req = alloc_linger_request(lreq);
4683         if (!req)
4684                 return NULL;
4685
4686         /*
4687          * Pass 0 for cookie because we don't know it yet, it will be
4688          * filled in by linger_submit().
4689          */
4690         osd_req_op_watch_init(req, 0, 0, watch_opcode);
4691
4692         if (ceph_osdc_alloc_messages(req, GFP_NOIO)) {
4693                 ceph_osdc_put_request(req);
4694                 return NULL;
4695         }
4696
4697         return req;
4698 }
4699
4700 /*
4701  * Returns a handle, caller owns a ref.
4702  */
4703 struct ceph_osd_linger_request *
4704 ceph_osdc_watch(struct ceph_osd_client *osdc,
4705                 struct ceph_object_id *oid,
4706                 struct ceph_object_locator *oloc,
4707                 rados_watchcb2_t wcb,
4708                 rados_watcherrcb_t errcb,
4709                 void *data)
4710 {
4711         struct ceph_osd_linger_request *lreq;
4712         int ret;
4713
4714         lreq = linger_alloc(osdc);
4715         if (!lreq)
4716                 return ERR_PTR(-ENOMEM);
4717
4718         lreq->is_watch = true;
4719         lreq->wcb = wcb;
4720         lreq->errcb = errcb;
4721         lreq->data = data;
4722         lreq->watch_valid_thru = jiffies;
4723
4724         ceph_oid_copy(&lreq->t.base_oid, oid);
4725         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4726         lreq->t.flags = CEPH_OSD_FLAG_WRITE;
4727         ktime_get_real_ts64(&lreq->mtime);
4728
4729         lreq->reg_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_WATCH);
4730         if (!lreq->reg_req) {
4731                 ret = -ENOMEM;
4732                 goto err_put_lreq;
4733         }
4734
4735         lreq->ping_req = alloc_watch_request(lreq, CEPH_OSD_WATCH_OP_PING);
4736         if (!lreq->ping_req) {
4737                 ret = -ENOMEM;
4738                 goto err_put_lreq;
4739         }
4740
4741         linger_submit(lreq);
4742         ret = linger_reg_commit_wait(lreq);
4743         if (ret) {
4744                 linger_cancel(lreq);
4745                 goto err_put_lreq;
4746         }
4747
4748         return lreq;
4749
4750 err_put_lreq:
4751         linger_put(lreq);
4752         return ERR_PTR(ret);
4753 }
4754 EXPORT_SYMBOL(ceph_osdc_watch);
4755
4756 /*
4757  * Releases a ref.
4758  *
4759  * Times out after mount_timeout to preserve rbd unmap behaviour
4760  * introduced in 2894e1d76974 ("rbd: timeout watch teardown on unmap
4761  * with mount_timeout").
4762  */
4763 int ceph_osdc_unwatch(struct ceph_osd_client *osdc,
4764                       struct ceph_osd_linger_request *lreq)
4765 {
4766         struct ceph_options *opts = osdc->client->options;
4767         struct ceph_osd_request *req;
4768         int ret;
4769
4770         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4771         if (!req)
4772                 return -ENOMEM;
4773
4774         ceph_oid_copy(&req->r_base_oid, &lreq->t.base_oid);
4775         ceph_oloc_copy(&req->r_base_oloc, &lreq->t.base_oloc);
4776         req->r_flags = CEPH_OSD_FLAG_WRITE;
4777         ktime_get_real_ts64(&req->r_mtime);
4778         osd_req_op_watch_init(req, 0, lreq->linger_id,
4779                               CEPH_OSD_WATCH_OP_UNWATCH);
4780
4781         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4782         if (ret)
4783                 goto out_put_req;
4784
4785         ceph_osdc_start_request(osdc, req, false);
4786         linger_cancel(lreq);
4787         linger_put(lreq);
4788         ret = wait_request_timeout(req, opts->mount_timeout);
4789
4790 out_put_req:
4791         ceph_osdc_put_request(req);
4792         return ret;
4793 }
4794 EXPORT_SYMBOL(ceph_osdc_unwatch);
4795
4796 static int osd_req_op_notify_ack_init(struct ceph_osd_request *req, int which,
4797                                       u64 notify_id, u64 cookie, void *payload,
4798                                       u32 payload_len)
4799 {
4800         struct ceph_osd_req_op *op;
4801         struct ceph_pagelist *pl;
4802         int ret;
4803
4804         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY_ACK, 0);
4805
4806         pl = ceph_pagelist_alloc(GFP_NOIO);
4807         if (!pl)
4808                 return -ENOMEM;
4809
4810         ret = ceph_pagelist_encode_64(pl, notify_id);
4811         ret |= ceph_pagelist_encode_64(pl, cookie);
4812         if (payload) {
4813                 ret |= ceph_pagelist_encode_32(pl, payload_len);
4814                 ret |= ceph_pagelist_append(pl, payload, payload_len);
4815         } else {
4816                 ret |= ceph_pagelist_encode_32(pl, 0);
4817         }
4818         if (ret) {
4819                 ceph_pagelist_release(pl);
4820                 return -ENOMEM;
4821         }
4822
4823         ceph_osd_data_pagelist_init(&op->notify_ack.request_data, pl);
4824         op->indata_len = pl->length;
4825         return 0;
4826 }
4827
4828 int ceph_osdc_notify_ack(struct ceph_osd_client *osdc,
4829                          struct ceph_object_id *oid,
4830                          struct ceph_object_locator *oloc,
4831                          u64 notify_id,
4832                          u64 cookie,
4833                          void *payload,
4834                          u32 payload_len)
4835 {
4836         struct ceph_osd_request *req;
4837         int ret;
4838
4839         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
4840         if (!req)
4841                 return -ENOMEM;
4842
4843         ceph_oid_copy(&req->r_base_oid, oid);
4844         ceph_oloc_copy(&req->r_base_oloc, oloc);
4845         req->r_flags = CEPH_OSD_FLAG_READ;
4846
4847         ret = osd_req_op_notify_ack_init(req, 0, notify_id, cookie, payload,
4848                                          payload_len);
4849         if (ret)
4850                 goto out_put_req;
4851
4852         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
4853         if (ret)
4854                 goto out_put_req;
4855
4856         ceph_osdc_start_request(osdc, req, false);
4857         ret = ceph_osdc_wait_request(osdc, req);
4858
4859 out_put_req:
4860         ceph_osdc_put_request(req);
4861         return ret;
4862 }
4863 EXPORT_SYMBOL(ceph_osdc_notify_ack);
4864
4865 static int osd_req_op_notify_init(struct ceph_osd_request *req, int which,
4866                                   u64 cookie, u32 prot_ver, u32 timeout,
4867                                   void *payload, u32 payload_len)
4868 {
4869         struct ceph_osd_req_op *op;
4870         struct ceph_pagelist *pl;
4871         int ret;
4872
4873         op = _osd_req_op_init(req, which, CEPH_OSD_OP_NOTIFY, 0);
4874         op->notify.cookie = cookie;
4875
4876         pl = ceph_pagelist_alloc(GFP_NOIO);
4877         if (!pl)
4878                 return -ENOMEM;
4879
4880         ret = ceph_pagelist_encode_32(pl, 1); /* prot_ver */
4881         ret |= ceph_pagelist_encode_32(pl, timeout);
4882         ret |= ceph_pagelist_encode_32(pl, payload_len);
4883         ret |= ceph_pagelist_append(pl, payload, payload_len);
4884         if (ret) {
4885                 ceph_pagelist_release(pl);
4886                 return -ENOMEM;
4887         }
4888
4889         ceph_osd_data_pagelist_init(&op->notify.request_data, pl);
4890         op->indata_len = pl->length;
4891         return 0;
4892 }
4893
4894 /*
4895  * @timeout: in seconds
4896  *
4897  * @preply_{pages,len} are initialized both on success and error.
4898  * The caller is responsible for:
4899  *
4900  *     ceph_release_page_vector(reply_pages, calc_pages_for(0, reply_len))
4901  */
4902 int ceph_osdc_notify(struct ceph_osd_client *osdc,
4903                      struct ceph_object_id *oid,
4904                      struct ceph_object_locator *oloc,
4905                      void *payload,
4906                      u32 payload_len,
4907                      u32 timeout,
4908                      struct page ***preply_pages,
4909                      size_t *preply_len)
4910 {
4911         struct ceph_osd_linger_request *lreq;
4912         struct page **pages;
4913         int ret;
4914
4915         WARN_ON(!timeout);
4916         if (preply_pages) {
4917                 *preply_pages = NULL;
4918                 *preply_len = 0;
4919         }
4920
4921         lreq = linger_alloc(osdc);
4922         if (!lreq)
4923                 return -ENOMEM;
4924
4925         lreq->preply_pages = preply_pages;
4926         lreq->preply_len = preply_len;
4927
4928         ceph_oid_copy(&lreq->t.base_oid, oid);
4929         ceph_oloc_copy(&lreq->t.base_oloc, oloc);
4930         lreq->t.flags = CEPH_OSD_FLAG_READ;
4931
4932         lreq->reg_req = alloc_linger_request(lreq);
4933         if (!lreq->reg_req) {
4934                 ret = -ENOMEM;
4935                 goto out_put_lreq;
4936         }
4937
4938         /*
4939          * Pass 0 for cookie because we don't know it yet, it will be
4940          * filled in by linger_submit().
4941          */
4942         ret = osd_req_op_notify_init(lreq->reg_req, 0, 0, 1, timeout,
4943                                      payload, payload_len);
4944         if (ret)
4945                 goto out_put_lreq;
4946
4947         /* for notify_id */
4948         pages = ceph_alloc_page_vector(1, GFP_NOIO);
4949         if (IS_ERR(pages)) {
4950                 ret = PTR_ERR(pages);
4951                 goto out_put_lreq;
4952         }
4953         ceph_osd_data_pages_init(osd_req_op_data(lreq->reg_req, 0, notify,
4954                                                  response_data),
4955                                  pages, PAGE_SIZE, 0, false, true);
4956
4957         ret = ceph_osdc_alloc_messages(lreq->reg_req, GFP_NOIO);
4958         if (ret)
4959                 goto out_put_lreq;
4960
4961         linger_submit(lreq);
4962         ret = linger_reg_commit_wait(lreq);
4963         if (!ret)
4964                 ret = linger_notify_finish_wait(lreq);
4965         else
4966                 dout("lreq %p failed to initiate notify %d\n", lreq, ret);
4967
4968         linger_cancel(lreq);
4969 out_put_lreq:
4970         linger_put(lreq);
4971         return ret;
4972 }
4973 EXPORT_SYMBOL(ceph_osdc_notify);
4974
4975 /*
4976  * Return the number of milliseconds since the watch was last
4977  * confirmed, or an error.  If there is an error, the watch is no
4978  * longer valid, and should be destroyed with ceph_osdc_unwatch().
4979  */
4980 int ceph_osdc_watch_check(struct ceph_osd_client *osdc,
4981                           struct ceph_osd_linger_request *lreq)
4982 {
4983         unsigned long stamp, age;
4984         int ret;
4985
4986         down_read(&osdc->lock);
4987         mutex_lock(&lreq->lock);
4988         stamp = lreq->watch_valid_thru;
4989         if (!list_empty(&lreq->pending_lworks)) {
4990                 struct linger_work *lwork =
4991                     list_first_entry(&lreq->pending_lworks,
4992                                      struct linger_work,
4993                                      pending_item);
4994
4995                 if (time_before(lwork->queued_stamp, stamp))
4996                         stamp = lwork->queued_stamp;
4997         }
4998         age = jiffies - stamp;
4999         dout("%s lreq %p linger_id %llu age %lu last_error %d\n", __func__,
5000              lreq, lreq->linger_id, age, lreq->last_error);
5001         /* we are truncating to msecs, so return a safe upper bound */
5002         ret = lreq->last_error ?: 1 + jiffies_to_msecs(age);
5003
5004         mutex_unlock(&lreq->lock);
5005         up_read(&osdc->lock);
5006         return ret;
5007 }
5008
5009 static int decode_watcher(void **p, void *end, struct ceph_watch_item *item)
5010 {
5011         u8 struct_v;
5012         u32 struct_len;
5013         int ret;
5014
5015         ret = ceph_start_decoding(p, end, 2, "watch_item_t",
5016                                   &struct_v, &struct_len);
5017         if (ret)
5018                 goto bad;
5019
5020         ret = -EINVAL;
5021         ceph_decode_copy_safe(p, end, &item->name, sizeof(item->name), bad);
5022         ceph_decode_64_safe(p, end, item->cookie, bad);
5023         ceph_decode_skip_32(p, end, bad); /* skip timeout seconds */
5024
5025         if (struct_v >= 2) {
5026                 ret = ceph_decode_entity_addr(p, end, &item->addr);
5027                 if (ret)
5028                         goto bad;
5029         } else {
5030                 ret = 0;
5031         }
5032
5033         dout("%s %s%llu cookie %llu addr %s\n", __func__,
5034              ENTITY_NAME(item->name), item->cookie,
5035              ceph_pr_addr(&item->addr));
5036 bad:
5037         return ret;
5038 }
5039
5040 static int decode_watchers(void **p, void *end,
5041                            struct ceph_watch_item **watchers,
5042                            u32 *num_watchers)
5043 {
5044         u8 struct_v;
5045         u32 struct_len;
5046         int i;
5047         int ret;
5048
5049         ret = ceph_start_decoding(p, end, 1, "obj_list_watch_response_t",
5050                                   &struct_v, &struct_len);
5051         if (ret)
5052                 return ret;
5053
5054         *num_watchers = ceph_decode_32(p);
5055         *watchers = kcalloc(*num_watchers, sizeof(**watchers), GFP_NOIO);
5056         if (!*watchers)
5057                 return -ENOMEM;
5058
5059         for (i = 0; i < *num_watchers; i++) {
5060                 ret = decode_watcher(p, end, *watchers + i);
5061                 if (ret) {
5062                         kfree(*watchers);
5063                         return ret;
5064                 }
5065         }
5066
5067         return 0;
5068 }
5069
5070 /*
5071  * On success, the caller is responsible for:
5072  *
5073  *     kfree(watchers);
5074  */
5075 int ceph_osdc_list_watchers(struct ceph_osd_client *osdc,
5076                             struct ceph_object_id *oid,
5077                             struct ceph_object_locator *oloc,
5078                             struct ceph_watch_item **watchers,
5079                             u32 *num_watchers)
5080 {
5081         struct ceph_osd_request *req;
5082         struct page **pages;
5083         int ret;
5084
5085         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5086         if (!req)
5087                 return -ENOMEM;
5088
5089         ceph_oid_copy(&req->r_base_oid, oid);
5090         ceph_oloc_copy(&req->r_base_oloc, oloc);
5091         req->r_flags = CEPH_OSD_FLAG_READ;
5092
5093         pages = ceph_alloc_page_vector(1, GFP_NOIO);
5094         if (IS_ERR(pages)) {
5095                 ret = PTR_ERR(pages);
5096                 goto out_put_req;
5097         }
5098
5099         osd_req_op_init(req, 0, CEPH_OSD_OP_LIST_WATCHERS, 0);
5100         ceph_osd_data_pages_init(osd_req_op_data(req, 0, list_watchers,
5101                                                  response_data),
5102                                  pages, PAGE_SIZE, 0, false, true);
5103
5104         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5105         if (ret)
5106                 goto out_put_req;
5107
5108         ceph_osdc_start_request(osdc, req, false);
5109         ret = ceph_osdc_wait_request(osdc, req);
5110         if (ret >= 0) {
5111                 void *p = page_address(pages[0]);
5112                 void *const end = p + req->r_ops[0].outdata_len;
5113
5114                 ret = decode_watchers(&p, end, watchers, num_watchers);
5115         }
5116
5117 out_put_req:
5118         ceph_osdc_put_request(req);
5119         return ret;
5120 }
5121 EXPORT_SYMBOL(ceph_osdc_list_watchers);
5122
5123 /*
5124  * Call all pending notify callbacks - for use after a watch is
5125  * unregistered, to make sure no more callbacks for it will be invoked
5126  */
5127 void ceph_osdc_flush_notifies(struct ceph_osd_client *osdc)
5128 {
5129         dout("%s osdc %p\n", __func__, osdc);
5130         flush_workqueue(osdc->notify_wq);
5131 }
5132 EXPORT_SYMBOL(ceph_osdc_flush_notifies);
5133
5134 void ceph_osdc_maybe_request_map(struct ceph_osd_client *osdc)
5135 {
5136         down_read(&osdc->lock);
5137         maybe_request_map(osdc);
5138         up_read(&osdc->lock);
5139 }
5140 EXPORT_SYMBOL(ceph_osdc_maybe_request_map);
5141
5142 /*
5143  * Execute an OSD class method on an object.
5144  *
5145  * @flags: CEPH_OSD_FLAG_*
5146  * @resp_len: in/out param for reply length
5147  */
5148 int ceph_osdc_call(struct ceph_osd_client *osdc,
5149                    struct ceph_object_id *oid,
5150                    struct ceph_object_locator *oloc,
5151                    const char *class, const char *method,
5152                    unsigned int flags,
5153                    struct page *req_page, size_t req_len,
5154                    struct page **resp_pages, size_t *resp_len)
5155 {
5156         struct ceph_osd_request *req;
5157         int ret;
5158
5159         if (req_len > PAGE_SIZE)
5160                 return -E2BIG;
5161
5162         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_NOIO);
5163         if (!req)
5164                 return -ENOMEM;
5165
5166         ceph_oid_copy(&req->r_base_oid, oid);
5167         ceph_oloc_copy(&req->r_base_oloc, oloc);
5168         req->r_flags = flags;
5169
5170         ret = osd_req_op_cls_init(req, 0, class, method);
5171         if (ret)
5172                 goto out_put_req;
5173
5174         if (req_page)
5175                 osd_req_op_cls_request_data_pages(req, 0, &req_page, req_len,
5176                                                   0, false, false);
5177         if (resp_pages)
5178                 osd_req_op_cls_response_data_pages(req, 0, resp_pages,
5179                                                    *resp_len, 0, false, false);
5180
5181         ret = ceph_osdc_alloc_messages(req, GFP_NOIO);
5182         if (ret)
5183                 goto out_put_req;
5184
5185         ceph_osdc_start_request(osdc, req, false);
5186         ret = ceph_osdc_wait_request(osdc, req);
5187         if (ret >= 0) {
5188                 ret = req->r_ops[0].rval;
5189                 if (resp_pages)
5190                         *resp_len = req->r_ops[0].outdata_len;
5191         }
5192
5193 out_put_req:
5194         ceph_osdc_put_request(req);
5195         return ret;
5196 }
5197 EXPORT_SYMBOL(ceph_osdc_call);
5198
5199 /*
5200  * reset all osd connections
5201  */
5202 void ceph_osdc_reopen_osds(struct ceph_osd_client *osdc)
5203 {
5204         struct rb_node *n;
5205
5206         down_write(&osdc->lock);
5207         for (n = rb_first(&osdc->osds); n; ) {
5208                 struct ceph_osd *osd = rb_entry(n, struct ceph_osd, o_node);
5209
5210                 n = rb_next(n);
5211                 if (!reopen_osd(osd))
5212                         kick_osd_requests(osd);
5213         }
5214         up_write(&osdc->lock);
5215 }
5216
5217 /*
5218  * init, shutdown
5219  */
5220 int ceph_osdc_init(struct ceph_osd_client *osdc, struct ceph_client *client)
5221 {
5222         int err;
5223
5224         dout("init\n");
5225         osdc->client = client;
5226         init_rwsem(&osdc->lock);
5227         osdc->osds = RB_ROOT;
5228         INIT_LIST_HEAD(&osdc->osd_lru);
5229         spin_lock_init(&osdc->osd_lru_lock);
5230         osd_init(&osdc->homeless_osd);
5231         osdc->homeless_osd.o_osdc = osdc;
5232         osdc->homeless_osd.o_osd = CEPH_HOMELESS_OSD;
5233         osdc->last_linger_id = CEPH_LINGER_ID_START;
5234         osdc->linger_requests = RB_ROOT;
5235         osdc->map_checks = RB_ROOT;
5236         osdc->linger_map_checks = RB_ROOT;
5237         INIT_DELAYED_WORK(&osdc->timeout_work, handle_timeout);
5238         INIT_DELAYED_WORK(&osdc->osds_timeout_work, handle_osds_timeout);
5239
5240         err = -ENOMEM;
5241         osdc->osdmap = ceph_osdmap_alloc();
5242         if (!osdc->osdmap)
5243                 goto out;
5244
5245         osdc->req_mempool = mempool_create_slab_pool(10,
5246                                                      ceph_osd_request_cache);
5247         if (!osdc->req_mempool)
5248                 goto out_map;
5249
5250         err = ceph_msgpool_init(&osdc->msgpool_op, CEPH_MSG_OSD_OP,
5251                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10, "osd_op");
5252         if (err < 0)
5253                 goto out_mempool;
5254         err = ceph_msgpool_init(&osdc->msgpool_op_reply, CEPH_MSG_OSD_OPREPLY,
5255                                 PAGE_SIZE, CEPH_OSD_SLAB_OPS, 10,
5256                                 "osd_op_reply");
5257         if (err < 0)
5258                 goto out_msgpool;
5259
5260         err = -ENOMEM;
5261         osdc->notify_wq = create_singlethread_workqueue("ceph-watch-notify");
5262         if (!osdc->notify_wq)
5263                 goto out_msgpool_reply;
5264
5265         osdc->completion_wq = create_singlethread_workqueue("ceph-completion");
5266         if (!osdc->completion_wq)
5267                 goto out_notify_wq;
5268
5269         schedule_delayed_work(&osdc->timeout_work,
5270                               osdc->client->options->osd_keepalive_timeout);
5271         schedule_delayed_work(&osdc->osds_timeout_work,
5272             round_jiffies_relative(osdc->client->options->osd_idle_ttl));
5273
5274         return 0;
5275
5276 out_notify_wq:
5277         destroy_workqueue(osdc->notify_wq);
5278 out_msgpool_reply:
5279         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5280 out_msgpool:
5281         ceph_msgpool_destroy(&osdc->msgpool_op);
5282 out_mempool:
5283         mempool_destroy(osdc->req_mempool);
5284 out_map:
5285         ceph_osdmap_destroy(osdc->osdmap);
5286 out:
5287         return err;
5288 }
5289
5290 void ceph_osdc_stop(struct ceph_osd_client *osdc)
5291 {
5292         destroy_workqueue(osdc->completion_wq);
5293         destroy_workqueue(osdc->notify_wq);
5294         cancel_delayed_work_sync(&osdc->timeout_work);
5295         cancel_delayed_work_sync(&osdc->osds_timeout_work);
5296
5297         down_write(&osdc->lock);
5298         while (!RB_EMPTY_ROOT(&osdc->osds)) {
5299                 struct ceph_osd *osd = rb_entry(rb_first(&osdc->osds),
5300                                                 struct ceph_osd, o_node);
5301                 close_osd(osd);
5302         }
5303         up_write(&osdc->lock);
5304         WARN_ON(refcount_read(&osdc->homeless_osd.o_ref) != 1);
5305         osd_cleanup(&osdc->homeless_osd);
5306
5307         WARN_ON(!list_empty(&osdc->osd_lru));
5308         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_requests));
5309         WARN_ON(!RB_EMPTY_ROOT(&osdc->map_checks));
5310         WARN_ON(!RB_EMPTY_ROOT(&osdc->linger_map_checks));
5311         WARN_ON(atomic_read(&osdc->num_requests));
5312         WARN_ON(atomic_read(&osdc->num_homeless));
5313
5314         ceph_osdmap_destroy(osdc->osdmap);
5315         mempool_destroy(osdc->req_mempool);
5316         ceph_msgpool_destroy(&osdc->msgpool_op);
5317         ceph_msgpool_destroy(&osdc->msgpool_op_reply);
5318 }
5319
5320 static int osd_req_op_copy_from_init(struct ceph_osd_request *req,
5321                                      u64 src_snapid, u64 src_version,
5322                                      struct ceph_object_id *src_oid,
5323                                      struct ceph_object_locator *src_oloc,
5324                                      u32 src_fadvise_flags,
5325                                      u32 dst_fadvise_flags,
5326                                      u32 truncate_seq, u64 truncate_size,
5327                                      u8 copy_from_flags)
5328 {
5329         struct ceph_osd_req_op *op;
5330         struct page **pages;
5331         void *p, *end;
5332
5333         pages = ceph_alloc_page_vector(1, GFP_KERNEL);
5334         if (IS_ERR(pages))
5335                 return PTR_ERR(pages);
5336
5337         op = _osd_req_op_init(req, 0, CEPH_OSD_OP_COPY_FROM2,
5338                               dst_fadvise_flags);
5339         op->copy_from.snapid = src_snapid;
5340         op->copy_from.src_version = src_version;
5341         op->copy_from.flags = copy_from_flags;
5342         op->copy_from.src_fadvise_flags = src_fadvise_flags;
5343
5344         p = page_address(pages[0]);
5345         end = p + PAGE_SIZE;
5346         ceph_encode_string(&p, end, src_oid->name, src_oid->name_len);
5347         encode_oloc(&p, end, src_oloc);
5348         ceph_encode_32(&p, truncate_seq);
5349         ceph_encode_64(&p, truncate_size);
5350         op->indata_len = PAGE_SIZE - (end - p);
5351
5352         ceph_osd_data_pages_init(&op->copy_from.osd_data, pages,
5353                                  op->indata_len, 0, false, true);
5354         return 0;
5355 }
5356
5357 int ceph_osdc_copy_from(struct ceph_osd_client *osdc,
5358                         u64 src_snapid, u64 src_version,
5359                         struct ceph_object_id *src_oid,
5360                         struct ceph_object_locator *src_oloc,
5361                         u32 src_fadvise_flags,
5362                         struct ceph_object_id *dst_oid,
5363                         struct ceph_object_locator *dst_oloc,
5364                         u32 dst_fadvise_flags,
5365                         u32 truncate_seq, u64 truncate_size,
5366                         u8 copy_from_flags)
5367 {
5368         struct ceph_osd_request *req;
5369         int ret;
5370
5371         req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
5372         if (!req)
5373                 return -ENOMEM;
5374
5375         req->r_flags = CEPH_OSD_FLAG_WRITE;
5376
5377         ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
5378         ceph_oid_copy(&req->r_t.base_oid, dst_oid);
5379
5380         ret = osd_req_op_copy_from_init(req, src_snapid, src_version, src_oid,
5381                                         src_oloc, src_fadvise_flags,
5382                                         dst_fadvise_flags, truncate_seq,
5383                                         truncate_size, copy_from_flags);
5384         if (ret)
5385                 goto out;
5386
5387         ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
5388         if (ret)
5389                 goto out;
5390
5391         ceph_osdc_start_request(osdc, req, false);
5392         ret = ceph_osdc_wait_request(osdc, req);
5393
5394 out:
5395         ceph_osdc_put_request(req);
5396         return ret;
5397 }
5398 EXPORT_SYMBOL(ceph_osdc_copy_from);
5399
5400 int __init ceph_osdc_setup(void)
5401 {
5402         size_t size = sizeof(struct ceph_osd_request) +
5403             CEPH_OSD_SLAB_OPS * sizeof(struct ceph_osd_req_op);
5404
5405         BUG_ON(ceph_osd_request_cache);
5406         ceph_osd_request_cache = kmem_cache_create("ceph_osd_request", size,
5407                                                    0, 0, NULL);
5408
5409         return ceph_osd_request_cache ? 0 : -ENOMEM;
5410 }
5411
5412 void ceph_osdc_cleanup(void)
5413 {
5414         BUG_ON(!ceph_osd_request_cache);
5415         kmem_cache_destroy(ceph_osd_request_cache);
5416         ceph_osd_request_cache = NULL;
5417 }
5418
5419 /*
5420  * handle incoming message
5421  */
5422 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
5423 {
5424         struct ceph_osd *osd = con->private;
5425         struct ceph_osd_client *osdc = osd->o_osdc;
5426         int type = le16_to_cpu(msg->hdr.type);
5427
5428         switch (type) {
5429         case CEPH_MSG_OSD_MAP:
5430                 ceph_osdc_handle_map(osdc, msg);
5431                 break;
5432         case CEPH_MSG_OSD_OPREPLY:
5433                 handle_reply(osd, msg);
5434                 break;
5435         case CEPH_MSG_OSD_BACKOFF:
5436                 handle_backoff(osd, msg);
5437                 break;
5438         case CEPH_MSG_WATCH_NOTIFY:
5439                 handle_watch_notify(osdc, msg);
5440                 break;
5441
5442         default:
5443                 pr_err("received unknown message type %d %s\n", type,
5444                        ceph_msg_type_name(type));
5445         }
5446
5447         ceph_msg_put(msg);
5448 }
5449
5450 /*
5451  * Lookup and return message for incoming reply.  Don't try to do
5452  * anything about a larger than preallocated data portion of the
5453  * message at the moment - for now, just skip the message.
5454  */
5455 static struct ceph_msg *get_reply(struct ceph_connection *con,
5456                                   struct ceph_msg_header *hdr,
5457                                   int *skip)
5458 {
5459         struct ceph_osd *osd = con->private;
5460         struct ceph_osd_client *osdc = osd->o_osdc;
5461         struct ceph_msg *m = NULL;
5462         struct ceph_osd_request *req;
5463         int front_len = le32_to_cpu(hdr->front_len);
5464         int data_len = le32_to_cpu(hdr->data_len);
5465         u64 tid = le64_to_cpu(hdr->tid);
5466
5467         down_read(&osdc->lock);
5468         if (!osd_registered(osd)) {
5469                 dout("%s osd%d unknown, skipping\n", __func__, osd->o_osd);
5470                 *skip = 1;
5471                 goto out_unlock_osdc;
5472         }
5473         WARN_ON(osd->o_osd != le64_to_cpu(hdr->src.num));
5474
5475         mutex_lock(&osd->lock);
5476         req = lookup_request(&osd->o_requests, tid);
5477         if (!req) {
5478                 dout("%s osd%d tid %llu unknown, skipping\n", __func__,
5479                      osd->o_osd, tid);
5480                 *skip = 1;
5481                 goto out_unlock_session;
5482         }
5483
5484         ceph_msg_revoke_incoming(req->r_reply);
5485
5486         if (front_len > req->r_reply->front_alloc_len) {
5487                 pr_warn("%s osd%d tid %llu front %d > preallocated %d\n",
5488                         __func__, osd->o_osd, req->r_tid, front_len,
5489                         req->r_reply->front_alloc_len);
5490                 m = ceph_msg_new(CEPH_MSG_OSD_OPREPLY, front_len, GFP_NOFS,
5491                                  false);
5492                 if (!m)
5493                         goto out_unlock_session;
5494                 ceph_msg_put(req->r_reply);
5495                 req->r_reply = m;
5496         }
5497
5498         if (data_len > req->r_reply->data_length) {
5499                 pr_warn("%s osd%d tid %llu data %d > preallocated %zu, skipping\n",
5500                         __func__, osd->o_osd, req->r_tid, data_len,
5501                         req->r_reply->data_length);
5502                 m = NULL;
5503                 *skip = 1;
5504                 goto out_unlock_session;
5505         }
5506
5507         m = ceph_msg_get(req->r_reply);
5508         dout("get_reply tid %lld %p\n", tid, m);
5509
5510 out_unlock_session:
5511         mutex_unlock(&osd->lock);
5512 out_unlock_osdc:
5513         up_read(&osdc->lock);
5514         return m;
5515 }
5516
5517 static struct ceph_msg *alloc_msg_with_page_vector(struct ceph_msg_header *hdr)
5518 {
5519         struct ceph_msg *m;
5520         int type = le16_to_cpu(hdr->type);
5521         u32 front_len = le32_to_cpu(hdr->front_len);
5522         u32 data_len = le32_to_cpu(hdr->data_len);
5523
5524         m = ceph_msg_new2(type, front_len, 1, GFP_NOIO, false);
5525         if (!m)
5526                 return NULL;
5527
5528         if (data_len) {
5529                 struct page **pages;
5530
5531                 pages = ceph_alloc_page_vector(calc_pages_for(0, data_len),
5532                                                GFP_NOIO);
5533                 if (IS_ERR(pages)) {
5534                         ceph_msg_put(m);
5535                         return NULL;
5536                 }
5537
5538                 ceph_msg_data_add_pages(m, pages, data_len, 0, true);
5539         }
5540
5541         return m;
5542 }
5543
5544 static struct ceph_msg *alloc_msg(struct ceph_connection *con,
5545                                   struct ceph_msg_header *hdr,
5546                                   int *skip)
5547 {
5548         struct ceph_osd *osd = con->private;
5549         int type = le16_to_cpu(hdr->type);
5550
5551         *skip = 0;
5552         switch (type) {
5553         case CEPH_MSG_OSD_MAP:
5554         case CEPH_MSG_OSD_BACKOFF:
5555         case CEPH_MSG_WATCH_NOTIFY:
5556                 return alloc_msg_with_page_vector(hdr);
5557         case CEPH_MSG_OSD_OPREPLY:
5558                 return get_reply(con, hdr, skip);
5559         default:
5560                 pr_warn("%s osd%d unknown msg type %d, skipping\n", __func__,
5561                         osd->o_osd, type);
5562                 *skip = 1;
5563                 return NULL;
5564         }
5565 }
5566
5567 /*
5568  * Wrappers to refcount containing ceph_osd struct
5569  */
5570 static struct ceph_connection *get_osd_con(struct ceph_connection *con)
5571 {
5572         struct ceph_osd *osd = con->private;
5573         if (get_osd(osd))
5574                 return con;
5575         return NULL;
5576 }
5577
5578 static void put_osd_con(struct ceph_connection *con)
5579 {
5580         struct ceph_osd *osd = con->private;
5581         put_osd(osd);
5582 }
5583
5584 /*
5585  * authentication
5586  */
5587 /*
5588  * Note: returned pointer is the address of a structure that's
5589  * managed separately.  Caller must *not* attempt to free it.
5590  */
5591 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
5592                                         int *proto, int force_new)
5593 {
5594         struct ceph_osd *o = con->private;
5595         struct ceph_osd_client *osdc = o->o_osdc;
5596         struct ceph_auth_client *ac = osdc->client->monc.auth;
5597         struct ceph_auth_handshake *auth = &o->o_auth;
5598
5599         if (force_new && auth->authorizer) {
5600                 ceph_auth_destroy_authorizer(auth->authorizer);
5601                 auth->authorizer = NULL;
5602         }
5603         if (!auth->authorizer) {
5604                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5605                                                       auth);
5606                 if (ret)
5607                         return ERR_PTR(ret);
5608         } else {
5609                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_OSD,
5610                                                      auth);
5611                 if (ret)
5612                         return ERR_PTR(ret);
5613         }
5614         *proto = ac->protocol;
5615
5616         return auth;
5617 }
5618
5619 static int add_authorizer_challenge(struct ceph_connection *con,
5620                                     void *challenge_buf, int challenge_buf_len)
5621 {
5622         struct ceph_osd *o = con->private;
5623         struct ceph_osd_client *osdc = o->o_osdc;
5624         struct ceph_auth_client *ac = osdc->client->monc.auth;
5625
5626         return ceph_auth_add_authorizer_challenge(ac, o->o_auth.authorizer,
5627                                             challenge_buf, challenge_buf_len);
5628 }
5629
5630 static int verify_authorizer_reply(struct ceph_connection *con)
5631 {
5632         struct ceph_osd *o = con->private;
5633         struct ceph_osd_client *osdc = o->o_osdc;
5634         struct ceph_auth_client *ac = osdc->client->monc.auth;
5635
5636         return ceph_auth_verify_authorizer_reply(ac, o->o_auth.authorizer);
5637 }
5638
5639 static int invalidate_authorizer(struct ceph_connection *con)
5640 {
5641         struct ceph_osd *o = con->private;
5642         struct ceph_osd_client *osdc = o->o_osdc;
5643         struct ceph_auth_client *ac = osdc->client->monc.auth;
5644
5645         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_OSD);
5646         return ceph_monc_validate_auth(&osdc->client->monc);
5647 }
5648
5649 static void osd_reencode_message(struct ceph_msg *msg)
5650 {
5651         int type = le16_to_cpu(msg->hdr.type);
5652
5653         if (type == CEPH_MSG_OSD_OP)
5654                 encode_request_finish(msg);
5655 }
5656
5657 static int osd_sign_message(struct ceph_msg *msg)
5658 {
5659         struct ceph_osd *o = msg->con->private;
5660         struct ceph_auth_handshake *auth = &o->o_auth;
5661
5662         return ceph_auth_sign_message(auth, msg);
5663 }
5664
5665 static int osd_check_message_signature(struct ceph_msg *msg)
5666 {
5667         struct ceph_osd *o = msg->con->private;
5668         struct ceph_auth_handshake *auth = &o->o_auth;
5669
5670         return ceph_auth_check_message_signature(auth, msg);
5671 }
5672
5673 static const struct ceph_connection_operations osd_con_ops = {
5674         .get = get_osd_con,
5675         .put = put_osd_con,
5676         .dispatch = dispatch,
5677         .get_authorizer = get_authorizer,
5678         .add_authorizer_challenge = add_authorizer_challenge,
5679         .verify_authorizer_reply = verify_authorizer_reply,
5680         .invalidate_authorizer = invalidate_authorizer,
5681         .alloc_msg = alloc_msg,
5682         .reencode_message = osd_reencode_message,
5683         .sign_message = osd_sign_message,
5684         .check_message_signature = osd_check_message_signature,
5685         .fault = osd_fault,
5686 };