ceph: make __take_cap_refs non-static
[linux-2.6-microblaze.git] / fs / ceph / mds_client.c
1 // SPDX-License-Identifier: GPL-2.0
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/fs.h>
5 #include <linux/wait.h>
6 #include <linux/slab.h>
7 #include <linux/gfp.h>
8 #include <linux/sched.h>
9 #include <linux/debugfs.h>
10 #include <linux/seq_file.h>
11 #include <linux/ratelimit.h>
12 #include <linux/bits.h>
13
14 #include "super.h"
15 #include "mds_client.h"
16
17 #include <linux/ceph/ceph_features.h>
18 #include <linux/ceph/messenger.h>
19 #include <linux/ceph/decode.h>
20 #include <linux/ceph/pagelist.h>
21 #include <linux/ceph/auth.h>
22 #include <linux/ceph/debugfs.h>
23
24 #define RECONNECT_MAX_SIZE (INT_MAX - PAGE_SIZE)
25
26 /*
27  * A cluster of MDS (metadata server) daemons is responsible for
28  * managing the file system namespace (the directory hierarchy and
29  * inodes) and for coordinating shared access to storage.  Metadata is
30  * partitioning hierarchically across a number of servers, and that
31  * partition varies over time as the cluster adjusts the distribution
32  * in order to balance load.
33  *
34  * The MDS client is primarily responsible to managing synchronous
35  * metadata requests for operations like open, unlink, and so forth.
36  * If there is a MDS failure, we find out about it when we (possibly
37  * request and) receive a new MDS map, and can resubmit affected
38  * requests.
39  *
40  * For the most part, though, we take advantage of a lossless
41  * communications channel to the MDS, and do not need to worry about
42  * timing out or resubmitting requests.
43  *
44  * We maintain a stateful "session" with each MDS we interact with.
45  * Within each session, we sent periodic heartbeat messages to ensure
46  * any capabilities or leases we have been issues remain valid.  If
47  * the session times out and goes stale, our leases and capabilities
48  * are no longer valid.
49  */
50
51 struct ceph_reconnect_state {
52         struct ceph_mds_session *session;
53         int nr_caps, nr_realms;
54         struct ceph_pagelist *pagelist;
55         unsigned msg_version;
56         bool allow_multi;
57 };
58
59 static void __wake_requests(struct ceph_mds_client *mdsc,
60                             struct list_head *head);
61 static void ceph_cap_release_work(struct work_struct *work);
62 static void ceph_cap_reclaim_work(struct work_struct *work);
63
64 static const struct ceph_connection_operations mds_con_ops;
65
66
67 /*
68  * mds reply parsing
69  */
70
71 static int parse_reply_info_quota(void **p, void *end,
72                                   struct ceph_mds_reply_info_in *info)
73 {
74         u8 struct_v, struct_compat;
75         u32 struct_len;
76
77         ceph_decode_8_safe(p, end, struct_v, bad);
78         ceph_decode_8_safe(p, end, struct_compat, bad);
79         /* struct_v is expected to be >= 1. we only
80          * understand encoding with struct_compat == 1. */
81         if (!struct_v || struct_compat != 1)
82                 goto bad;
83         ceph_decode_32_safe(p, end, struct_len, bad);
84         ceph_decode_need(p, end, struct_len, bad);
85         end = *p + struct_len;
86         ceph_decode_64_safe(p, end, info->max_bytes, bad);
87         ceph_decode_64_safe(p, end, info->max_files, bad);
88         *p = end;
89         return 0;
90 bad:
91         return -EIO;
92 }
93
94 /*
95  * parse individual inode info
96  */
97 static int parse_reply_info_in(void **p, void *end,
98                                struct ceph_mds_reply_info_in *info,
99                                u64 features)
100 {
101         int err = 0;
102         u8 struct_v = 0;
103
104         if (features == (u64)-1) {
105                 u32 struct_len;
106                 u8 struct_compat;
107                 ceph_decode_8_safe(p, end, struct_v, bad);
108                 ceph_decode_8_safe(p, end, struct_compat, bad);
109                 /* struct_v is expected to be >= 1. we only understand
110                  * encoding with struct_compat == 1. */
111                 if (!struct_v || struct_compat != 1)
112                         goto bad;
113                 ceph_decode_32_safe(p, end, struct_len, bad);
114                 ceph_decode_need(p, end, struct_len, bad);
115                 end = *p + struct_len;
116         }
117
118         ceph_decode_need(p, end, sizeof(struct ceph_mds_reply_inode), bad);
119         info->in = *p;
120         *p += sizeof(struct ceph_mds_reply_inode) +
121                 sizeof(*info->in->fragtree.splits) *
122                 le32_to_cpu(info->in->fragtree.nsplits);
123
124         ceph_decode_32_safe(p, end, info->symlink_len, bad);
125         ceph_decode_need(p, end, info->symlink_len, bad);
126         info->symlink = *p;
127         *p += info->symlink_len;
128
129         ceph_decode_copy_safe(p, end, &info->dir_layout,
130                               sizeof(info->dir_layout), bad);
131         ceph_decode_32_safe(p, end, info->xattr_len, bad);
132         ceph_decode_need(p, end, info->xattr_len, bad);
133         info->xattr_data = *p;
134         *p += info->xattr_len;
135
136         if (features == (u64)-1) {
137                 /* inline data */
138                 ceph_decode_64_safe(p, end, info->inline_version, bad);
139                 ceph_decode_32_safe(p, end, info->inline_len, bad);
140                 ceph_decode_need(p, end, info->inline_len, bad);
141                 info->inline_data = *p;
142                 *p += info->inline_len;
143                 /* quota */
144                 err = parse_reply_info_quota(p, end, info);
145                 if (err < 0)
146                         goto out_bad;
147                 /* pool namespace */
148                 ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
149                 if (info->pool_ns_len > 0) {
150                         ceph_decode_need(p, end, info->pool_ns_len, bad);
151                         info->pool_ns_data = *p;
152                         *p += info->pool_ns_len;
153                 }
154
155                 /* btime */
156                 ceph_decode_need(p, end, sizeof(info->btime), bad);
157                 ceph_decode_copy(p, &info->btime, sizeof(info->btime));
158
159                 /* change attribute */
160                 ceph_decode_64_safe(p, end, info->change_attr, bad);
161
162                 /* dir pin */
163                 if (struct_v >= 2) {
164                         ceph_decode_32_safe(p, end, info->dir_pin, bad);
165                 } else {
166                         info->dir_pin = -ENODATA;
167                 }
168
169                 /* snapshot birth time, remains zero for v<=2 */
170                 if (struct_v >= 3) {
171                         ceph_decode_need(p, end, sizeof(info->snap_btime), bad);
172                         ceph_decode_copy(p, &info->snap_btime,
173                                          sizeof(info->snap_btime));
174                 } else {
175                         memset(&info->snap_btime, 0, sizeof(info->snap_btime));
176                 }
177
178                 *p = end;
179         } else {
180                 if (features & CEPH_FEATURE_MDS_INLINE_DATA) {
181                         ceph_decode_64_safe(p, end, info->inline_version, bad);
182                         ceph_decode_32_safe(p, end, info->inline_len, bad);
183                         ceph_decode_need(p, end, info->inline_len, bad);
184                         info->inline_data = *p;
185                         *p += info->inline_len;
186                 } else
187                         info->inline_version = CEPH_INLINE_NONE;
188
189                 if (features & CEPH_FEATURE_MDS_QUOTA) {
190                         err = parse_reply_info_quota(p, end, info);
191                         if (err < 0)
192                                 goto out_bad;
193                 } else {
194                         info->max_bytes = 0;
195                         info->max_files = 0;
196                 }
197
198                 info->pool_ns_len = 0;
199                 info->pool_ns_data = NULL;
200                 if (features & CEPH_FEATURE_FS_FILE_LAYOUT_V2) {
201                         ceph_decode_32_safe(p, end, info->pool_ns_len, bad);
202                         if (info->pool_ns_len > 0) {
203                                 ceph_decode_need(p, end, info->pool_ns_len, bad);
204                                 info->pool_ns_data = *p;
205                                 *p += info->pool_ns_len;
206                         }
207                 }
208
209                 if (features & CEPH_FEATURE_FS_BTIME) {
210                         ceph_decode_need(p, end, sizeof(info->btime), bad);
211                         ceph_decode_copy(p, &info->btime, sizeof(info->btime));
212                         ceph_decode_64_safe(p, end, info->change_attr, bad);
213                 }
214
215                 info->dir_pin = -ENODATA;
216                 /* info->snap_btime remains zero */
217         }
218         return 0;
219 bad:
220         err = -EIO;
221 out_bad:
222         return err;
223 }
224
225 static int parse_reply_info_dir(void **p, void *end,
226                                 struct ceph_mds_reply_dirfrag **dirfrag,
227                                 u64 features)
228 {
229         if (features == (u64)-1) {
230                 u8 struct_v, struct_compat;
231                 u32 struct_len;
232                 ceph_decode_8_safe(p, end, struct_v, bad);
233                 ceph_decode_8_safe(p, end, struct_compat, bad);
234                 /* struct_v is expected to be >= 1. we only understand
235                  * encoding whose struct_compat == 1. */
236                 if (!struct_v || struct_compat != 1)
237                         goto bad;
238                 ceph_decode_32_safe(p, end, struct_len, bad);
239                 ceph_decode_need(p, end, struct_len, bad);
240                 end = *p + struct_len;
241         }
242
243         ceph_decode_need(p, end, sizeof(**dirfrag), bad);
244         *dirfrag = *p;
245         *p += sizeof(**dirfrag) + sizeof(u32) * le32_to_cpu((*dirfrag)->ndist);
246         if (unlikely(*p > end))
247                 goto bad;
248         if (features == (u64)-1)
249                 *p = end;
250         return 0;
251 bad:
252         return -EIO;
253 }
254
255 static int parse_reply_info_lease(void **p, void *end,
256                                   struct ceph_mds_reply_lease **lease,
257                                   u64 features)
258 {
259         if (features == (u64)-1) {
260                 u8 struct_v, struct_compat;
261                 u32 struct_len;
262                 ceph_decode_8_safe(p, end, struct_v, bad);
263                 ceph_decode_8_safe(p, end, struct_compat, bad);
264                 /* struct_v is expected to be >= 1. we only understand
265                  * encoding whose struct_compat == 1. */
266                 if (!struct_v || struct_compat != 1)
267                         goto bad;
268                 ceph_decode_32_safe(p, end, struct_len, bad);
269                 ceph_decode_need(p, end, struct_len, bad);
270                 end = *p + struct_len;
271         }
272
273         ceph_decode_need(p, end, sizeof(**lease), bad);
274         *lease = *p;
275         *p += sizeof(**lease);
276         if (features == (u64)-1)
277                 *p = end;
278         return 0;
279 bad:
280         return -EIO;
281 }
282
283 /*
284  * parse a normal reply, which may contain a (dir+)dentry and/or a
285  * target inode.
286  */
287 static int parse_reply_info_trace(void **p, void *end,
288                                   struct ceph_mds_reply_info_parsed *info,
289                                   u64 features)
290 {
291         int err;
292
293         if (info->head->is_dentry) {
294                 err = parse_reply_info_in(p, end, &info->diri, features);
295                 if (err < 0)
296                         goto out_bad;
297
298                 err = parse_reply_info_dir(p, end, &info->dirfrag, features);
299                 if (err < 0)
300                         goto out_bad;
301
302                 ceph_decode_32_safe(p, end, info->dname_len, bad);
303                 ceph_decode_need(p, end, info->dname_len, bad);
304                 info->dname = *p;
305                 *p += info->dname_len;
306
307                 err = parse_reply_info_lease(p, end, &info->dlease, features);
308                 if (err < 0)
309                         goto out_bad;
310         }
311
312         if (info->head->is_target) {
313                 err = parse_reply_info_in(p, end, &info->targeti, features);
314                 if (err < 0)
315                         goto out_bad;
316         }
317
318         if (unlikely(*p != end))
319                 goto bad;
320         return 0;
321
322 bad:
323         err = -EIO;
324 out_bad:
325         pr_err("problem parsing mds trace %d\n", err);
326         return err;
327 }
328
329 /*
330  * parse readdir results
331  */
332 static int parse_reply_info_readdir(void **p, void *end,
333                                 struct ceph_mds_reply_info_parsed *info,
334                                 u64 features)
335 {
336         u32 num, i = 0;
337         int err;
338
339         err = parse_reply_info_dir(p, end, &info->dir_dir, features);
340         if (err < 0)
341                 goto out_bad;
342
343         ceph_decode_need(p, end, sizeof(num) + 2, bad);
344         num = ceph_decode_32(p);
345         {
346                 u16 flags = ceph_decode_16(p);
347                 info->dir_end = !!(flags & CEPH_READDIR_FRAG_END);
348                 info->dir_complete = !!(flags & CEPH_READDIR_FRAG_COMPLETE);
349                 info->hash_order = !!(flags & CEPH_READDIR_HASH_ORDER);
350                 info->offset_hash = !!(flags & CEPH_READDIR_OFFSET_HASH);
351         }
352         if (num == 0)
353                 goto done;
354
355         BUG_ON(!info->dir_entries);
356         if ((unsigned long)(info->dir_entries + num) >
357             (unsigned long)info->dir_entries + info->dir_buf_size) {
358                 pr_err("dir contents are larger than expected\n");
359                 WARN_ON(1);
360                 goto bad;
361         }
362
363         info->dir_nr = num;
364         while (num) {
365                 struct ceph_mds_reply_dir_entry *rde = info->dir_entries + i;
366                 /* dentry */
367                 ceph_decode_32_safe(p, end, rde->name_len, bad);
368                 ceph_decode_need(p, end, rde->name_len, bad);
369                 rde->name = *p;
370                 *p += rde->name_len;
371                 dout("parsed dir dname '%.*s'\n", rde->name_len, rde->name);
372
373                 /* dentry lease */
374                 err = parse_reply_info_lease(p, end, &rde->lease, features);
375                 if (err)
376                         goto out_bad;
377                 /* inode */
378                 err = parse_reply_info_in(p, end, &rde->inode, features);
379                 if (err < 0)
380                         goto out_bad;
381                 /* ceph_readdir_prepopulate() will update it */
382                 rde->offset = 0;
383                 i++;
384                 num--;
385         }
386
387 done:
388         /* Skip over any unrecognized fields */
389         *p = end;
390         return 0;
391
392 bad:
393         err = -EIO;
394 out_bad:
395         pr_err("problem parsing dir contents %d\n", err);
396         return err;
397 }
398
399 /*
400  * parse fcntl F_GETLK results
401  */
402 static int parse_reply_info_filelock(void **p, void *end,
403                                      struct ceph_mds_reply_info_parsed *info,
404                                      u64 features)
405 {
406         if (*p + sizeof(*info->filelock_reply) > end)
407                 goto bad;
408
409         info->filelock_reply = *p;
410
411         /* Skip over any unrecognized fields */
412         *p = end;
413         return 0;
414 bad:
415         return -EIO;
416 }
417
418 /*
419  * parse create results
420  */
421 static int parse_reply_info_create(void **p, void *end,
422                                   struct ceph_mds_reply_info_parsed *info,
423                                   u64 features)
424 {
425         if (features == (u64)-1 ||
426             (features & CEPH_FEATURE_REPLY_CREATE_INODE)) {
427                 /* Malformed reply? */
428                 if (*p == end) {
429                         info->has_create_ino = false;
430                 } else {
431                         info->has_create_ino = true;
432                         ceph_decode_64_safe(p, end, info->ino, bad);
433                 }
434         } else {
435                 if (*p != end)
436                         goto bad;
437         }
438
439         /* Skip over any unrecognized fields */
440         *p = end;
441         return 0;
442 bad:
443         return -EIO;
444 }
445
446 /*
447  * parse extra results
448  */
449 static int parse_reply_info_extra(void **p, void *end,
450                                   struct ceph_mds_reply_info_parsed *info,
451                                   u64 features)
452 {
453         u32 op = le32_to_cpu(info->head->op);
454
455         if (op == CEPH_MDS_OP_GETFILELOCK)
456                 return parse_reply_info_filelock(p, end, info, features);
457         else if (op == CEPH_MDS_OP_READDIR || op == CEPH_MDS_OP_LSSNAP)
458                 return parse_reply_info_readdir(p, end, info, features);
459         else if (op == CEPH_MDS_OP_CREATE)
460                 return parse_reply_info_create(p, end, info, features);
461         else
462                 return -EIO;
463 }
464
465 /*
466  * parse entire mds reply
467  */
468 static int parse_reply_info(struct ceph_msg *msg,
469                             struct ceph_mds_reply_info_parsed *info,
470                             u64 features)
471 {
472         void *p, *end;
473         u32 len;
474         int err;
475
476         info->head = msg->front.iov_base;
477         p = msg->front.iov_base + sizeof(struct ceph_mds_reply_head);
478         end = p + msg->front.iov_len - sizeof(struct ceph_mds_reply_head);
479
480         /* trace */
481         ceph_decode_32_safe(&p, end, len, bad);
482         if (len > 0) {
483                 ceph_decode_need(&p, end, len, bad);
484                 err = parse_reply_info_trace(&p, p+len, info, features);
485                 if (err < 0)
486                         goto out_bad;
487         }
488
489         /* extra */
490         ceph_decode_32_safe(&p, end, len, bad);
491         if (len > 0) {
492                 ceph_decode_need(&p, end, len, bad);
493                 err = parse_reply_info_extra(&p, p+len, info, features);
494                 if (err < 0)
495                         goto out_bad;
496         }
497
498         /* snap blob */
499         ceph_decode_32_safe(&p, end, len, bad);
500         info->snapblob_len = len;
501         info->snapblob = p;
502         p += len;
503
504         if (p != end)
505                 goto bad;
506         return 0;
507
508 bad:
509         err = -EIO;
510 out_bad:
511         pr_err("mds parse_reply err %d\n", err);
512         return err;
513 }
514
515 static void destroy_reply_info(struct ceph_mds_reply_info_parsed *info)
516 {
517         if (!info->dir_entries)
518                 return;
519         free_pages((unsigned long)info->dir_entries, get_order(info->dir_buf_size));
520 }
521
522
523 /*
524  * sessions
525  */
526 const char *ceph_session_state_name(int s)
527 {
528         switch (s) {
529         case CEPH_MDS_SESSION_NEW: return "new";
530         case CEPH_MDS_SESSION_OPENING: return "opening";
531         case CEPH_MDS_SESSION_OPEN: return "open";
532         case CEPH_MDS_SESSION_HUNG: return "hung";
533         case CEPH_MDS_SESSION_CLOSING: return "closing";
534         case CEPH_MDS_SESSION_CLOSED: return "closed";
535         case CEPH_MDS_SESSION_RESTARTING: return "restarting";
536         case CEPH_MDS_SESSION_RECONNECTING: return "reconnecting";
537         case CEPH_MDS_SESSION_REJECTED: return "rejected";
538         default: return "???";
539         }
540 }
541
542 struct ceph_mds_session *ceph_get_mds_session(struct ceph_mds_session *s)
543 {
544         if (refcount_inc_not_zero(&s->s_ref)) {
545                 dout("mdsc get_session %p %d -> %d\n", s,
546                      refcount_read(&s->s_ref)-1, refcount_read(&s->s_ref));
547                 return s;
548         } else {
549                 dout("mdsc get_session %p 0 -- FAIL\n", s);
550                 return NULL;
551         }
552 }
553
554 void ceph_put_mds_session(struct ceph_mds_session *s)
555 {
556         dout("mdsc put_session %p %d -> %d\n", s,
557              refcount_read(&s->s_ref), refcount_read(&s->s_ref)-1);
558         if (refcount_dec_and_test(&s->s_ref)) {
559                 if (s->s_auth.authorizer)
560                         ceph_auth_destroy_authorizer(s->s_auth.authorizer);
561                 kfree(s);
562         }
563 }
564
565 /*
566  * called under mdsc->mutex
567  */
568 struct ceph_mds_session *__ceph_lookup_mds_session(struct ceph_mds_client *mdsc,
569                                                    int mds)
570 {
571         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
572                 return NULL;
573         return ceph_get_mds_session(mdsc->sessions[mds]);
574 }
575
576 static bool __have_session(struct ceph_mds_client *mdsc, int mds)
577 {
578         if (mds >= mdsc->max_sessions || !mdsc->sessions[mds])
579                 return false;
580         else
581                 return true;
582 }
583
584 static int __verify_registered_session(struct ceph_mds_client *mdsc,
585                                        struct ceph_mds_session *s)
586 {
587         if (s->s_mds >= mdsc->max_sessions ||
588             mdsc->sessions[s->s_mds] != s)
589                 return -ENOENT;
590         return 0;
591 }
592
593 /*
594  * create+register a new session for given mds.
595  * called under mdsc->mutex.
596  */
597 static struct ceph_mds_session *register_session(struct ceph_mds_client *mdsc,
598                                                  int mds)
599 {
600         struct ceph_mds_session *s;
601
602         if (mds >= mdsc->mdsmap->possible_max_rank)
603                 return ERR_PTR(-EINVAL);
604
605         s = kzalloc(sizeof(*s), GFP_NOFS);
606         if (!s)
607                 return ERR_PTR(-ENOMEM);
608
609         if (mds >= mdsc->max_sessions) {
610                 int newmax = 1 << get_count_order(mds + 1);
611                 struct ceph_mds_session **sa;
612
613                 dout("%s: realloc to %d\n", __func__, newmax);
614                 sa = kcalloc(newmax, sizeof(void *), GFP_NOFS);
615                 if (!sa)
616                         goto fail_realloc;
617                 if (mdsc->sessions) {
618                         memcpy(sa, mdsc->sessions,
619                                mdsc->max_sessions * sizeof(void *));
620                         kfree(mdsc->sessions);
621                 }
622                 mdsc->sessions = sa;
623                 mdsc->max_sessions = newmax;
624         }
625
626         dout("%s: mds%d\n", __func__, mds);
627         s->s_mdsc = mdsc;
628         s->s_mds = mds;
629         s->s_state = CEPH_MDS_SESSION_NEW;
630         s->s_ttl = 0;
631         s->s_seq = 0;
632         mutex_init(&s->s_mutex);
633
634         ceph_con_init(&s->s_con, s, &mds_con_ops, &mdsc->fsc->client->msgr);
635
636         spin_lock_init(&s->s_gen_ttl_lock);
637         s->s_cap_gen = 1;
638         s->s_cap_ttl = jiffies - 1;
639
640         spin_lock_init(&s->s_cap_lock);
641         s->s_renew_requested = 0;
642         s->s_renew_seq = 0;
643         INIT_LIST_HEAD(&s->s_caps);
644         s->s_nr_caps = 0;
645         refcount_set(&s->s_ref, 1);
646         INIT_LIST_HEAD(&s->s_waiting);
647         INIT_LIST_HEAD(&s->s_unsafe);
648         s->s_num_cap_releases = 0;
649         s->s_cap_reconnect = 0;
650         s->s_cap_iterator = NULL;
651         INIT_LIST_HEAD(&s->s_cap_releases);
652         INIT_WORK(&s->s_cap_release_work, ceph_cap_release_work);
653
654         INIT_LIST_HEAD(&s->s_cap_flushing);
655
656         mdsc->sessions[mds] = s;
657         atomic_inc(&mdsc->num_sessions);
658         refcount_inc(&s->s_ref);  /* one ref to sessions[], one to caller */
659
660         ceph_con_open(&s->s_con, CEPH_ENTITY_TYPE_MDS, mds,
661                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
662
663         return s;
664
665 fail_realloc:
666         kfree(s);
667         return ERR_PTR(-ENOMEM);
668 }
669
670 /*
671  * called under mdsc->mutex
672  */
673 static void __unregister_session(struct ceph_mds_client *mdsc,
674                                struct ceph_mds_session *s)
675 {
676         dout("__unregister_session mds%d %p\n", s->s_mds, s);
677         BUG_ON(mdsc->sessions[s->s_mds] != s);
678         mdsc->sessions[s->s_mds] = NULL;
679         ceph_con_close(&s->s_con);
680         ceph_put_mds_session(s);
681         atomic_dec(&mdsc->num_sessions);
682 }
683
684 /*
685  * drop session refs in request.
686  *
687  * should be last request ref, or hold mdsc->mutex
688  */
689 static void put_request_session(struct ceph_mds_request *req)
690 {
691         if (req->r_session) {
692                 ceph_put_mds_session(req->r_session);
693                 req->r_session = NULL;
694         }
695 }
696
697 void ceph_mdsc_release_request(struct kref *kref)
698 {
699         struct ceph_mds_request *req = container_of(kref,
700                                                     struct ceph_mds_request,
701                                                     r_kref);
702         destroy_reply_info(&req->r_reply_info);
703         if (req->r_request)
704                 ceph_msg_put(req->r_request);
705         if (req->r_reply)
706                 ceph_msg_put(req->r_reply);
707         if (req->r_inode) {
708                 ceph_put_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
709                 /* avoid calling iput_final() in mds dispatch threads */
710                 ceph_async_iput(req->r_inode);
711         }
712         if (req->r_parent) {
713                 ceph_put_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
714                 ceph_async_iput(req->r_parent);
715         }
716         ceph_async_iput(req->r_target_inode);
717         if (req->r_dentry)
718                 dput(req->r_dentry);
719         if (req->r_old_dentry)
720                 dput(req->r_old_dentry);
721         if (req->r_old_dentry_dir) {
722                 /*
723                  * track (and drop pins for) r_old_dentry_dir
724                  * separately, since r_old_dentry's d_parent may have
725                  * changed between the dir mutex being dropped and
726                  * this request being freed.
727                  */
728                 ceph_put_cap_refs(ceph_inode(req->r_old_dentry_dir),
729                                   CEPH_CAP_PIN);
730                 ceph_async_iput(req->r_old_dentry_dir);
731         }
732         kfree(req->r_path1);
733         kfree(req->r_path2);
734         if (req->r_pagelist)
735                 ceph_pagelist_release(req->r_pagelist);
736         put_request_session(req);
737         ceph_unreserve_caps(req->r_mdsc, &req->r_caps_reservation);
738         WARN_ON_ONCE(!list_empty(&req->r_wait));
739         kmem_cache_free(ceph_mds_request_cachep, req);
740 }
741
742 DEFINE_RB_FUNCS(request, struct ceph_mds_request, r_tid, r_node)
743
744 /*
745  * lookup session, bump ref if found.
746  *
747  * called under mdsc->mutex.
748  */
749 static struct ceph_mds_request *
750 lookup_get_request(struct ceph_mds_client *mdsc, u64 tid)
751 {
752         struct ceph_mds_request *req;
753
754         req = lookup_request(&mdsc->request_tree, tid);
755         if (req)
756                 ceph_mdsc_get_request(req);
757
758         return req;
759 }
760
761 /*
762  * Register an in-flight request, and assign a tid.  Link to directory
763  * are modifying (if any).
764  *
765  * Called under mdsc->mutex.
766  */
767 static void __register_request(struct ceph_mds_client *mdsc,
768                                struct ceph_mds_request *req,
769                                struct inode *dir)
770 {
771         int ret = 0;
772
773         req->r_tid = ++mdsc->last_tid;
774         if (req->r_num_caps) {
775                 ret = ceph_reserve_caps(mdsc, &req->r_caps_reservation,
776                                         req->r_num_caps);
777                 if (ret < 0) {
778                         pr_err("__register_request %p "
779                                "failed to reserve caps: %d\n", req, ret);
780                         /* set req->r_err to fail early from __do_request */
781                         req->r_err = ret;
782                         return;
783                 }
784         }
785         dout("__register_request %p tid %lld\n", req, req->r_tid);
786         ceph_mdsc_get_request(req);
787         insert_request(&mdsc->request_tree, req);
788
789         req->r_uid = current_fsuid();
790         req->r_gid = current_fsgid();
791
792         if (mdsc->oldest_tid == 0 && req->r_op != CEPH_MDS_OP_SETFILELOCK)
793                 mdsc->oldest_tid = req->r_tid;
794
795         if (dir) {
796                 struct ceph_inode_info *ci = ceph_inode(dir);
797
798                 ihold(dir);
799                 req->r_unsafe_dir = dir;
800                 spin_lock(&ci->i_unsafe_lock);
801                 list_add_tail(&req->r_unsafe_dir_item, &ci->i_unsafe_dirops);
802                 spin_unlock(&ci->i_unsafe_lock);
803         }
804 }
805
806 static void __unregister_request(struct ceph_mds_client *mdsc,
807                                  struct ceph_mds_request *req)
808 {
809         dout("__unregister_request %p tid %lld\n", req, req->r_tid);
810
811         /* Never leave an unregistered request on an unsafe list! */
812         list_del_init(&req->r_unsafe_item);
813
814         if (req->r_tid == mdsc->oldest_tid) {
815                 struct rb_node *p = rb_next(&req->r_node);
816                 mdsc->oldest_tid = 0;
817                 while (p) {
818                         struct ceph_mds_request *next_req =
819                                 rb_entry(p, struct ceph_mds_request, r_node);
820                         if (next_req->r_op != CEPH_MDS_OP_SETFILELOCK) {
821                                 mdsc->oldest_tid = next_req->r_tid;
822                                 break;
823                         }
824                         p = rb_next(p);
825                 }
826         }
827
828         erase_request(&mdsc->request_tree, req);
829
830         if (req->r_unsafe_dir) {
831                 struct ceph_inode_info *ci = ceph_inode(req->r_unsafe_dir);
832                 spin_lock(&ci->i_unsafe_lock);
833                 list_del_init(&req->r_unsafe_dir_item);
834                 spin_unlock(&ci->i_unsafe_lock);
835         }
836         if (req->r_target_inode &&
837             test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
838                 struct ceph_inode_info *ci = ceph_inode(req->r_target_inode);
839                 spin_lock(&ci->i_unsafe_lock);
840                 list_del_init(&req->r_unsafe_target_item);
841                 spin_unlock(&ci->i_unsafe_lock);
842         }
843
844         if (req->r_unsafe_dir) {
845                 /* avoid calling iput_final() in mds dispatch threads */
846                 ceph_async_iput(req->r_unsafe_dir);
847                 req->r_unsafe_dir = NULL;
848         }
849
850         complete_all(&req->r_safe_completion);
851
852         ceph_mdsc_put_request(req);
853 }
854
855 /*
856  * Walk back up the dentry tree until we hit a dentry representing a
857  * non-snapshot inode. We do this using the rcu_read_lock (which must be held
858  * when calling this) to ensure that the objects won't disappear while we're
859  * working with them. Once we hit a candidate dentry, we attempt to take a
860  * reference to it, and return that as the result.
861  */
862 static struct inode *get_nonsnap_parent(struct dentry *dentry)
863 {
864         struct inode *inode = NULL;
865
866         while (dentry && !IS_ROOT(dentry)) {
867                 inode = d_inode_rcu(dentry);
868                 if (!inode || ceph_snap(inode) == CEPH_NOSNAP)
869                         break;
870                 dentry = dentry->d_parent;
871         }
872         if (inode)
873                 inode = igrab(inode);
874         return inode;
875 }
876
877 /*
878  * Choose mds to send request to next.  If there is a hint set in the
879  * request (e.g., due to a prior forward hint from the mds), use that.
880  * Otherwise, consult frag tree and/or caps to identify the
881  * appropriate mds.  If all else fails, choose randomly.
882  *
883  * Called under mdsc->mutex.
884  */
885 static int __choose_mds(struct ceph_mds_client *mdsc,
886                         struct ceph_mds_request *req,
887                         bool *random)
888 {
889         struct inode *inode;
890         struct ceph_inode_info *ci;
891         struct ceph_cap *cap;
892         int mode = req->r_direct_mode;
893         int mds = -1;
894         u32 hash = req->r_direct_hash;
895         bool is_hash = test_bit(CEPH_MDS_R_DIRECT_IS_HASH, &req->r_req_flags);
896
897         if (random)
898                 *random = false;
899
900         /*
901          * is there a specific mds we should try?  ignore hint if we have
902          * no session and the mds is not up (active or recovering).
903          */
904         if (req->r_resend_mds >= 0 &&
905             (__have_session(mdsc, req->r_resend_mds) ||
906              ceph_mdsmap_get_state(mdsc->mdsmap, req->r_resend_mds) > 0)) {
907                 dout("%s using resend_mds mds%d\n", __func__,
908                      req->r_resend_mds);
909                 return req->r_resend_mds;
910         }
911
912         if (mode == USE_RANDOM_MDS)
913                 goto random;
914
915         inode = NULL;
916         if (req->r_inode) {
917                 if (ceph_snap(req->r_inode) != CEPH_SNAPDIR) {
918                         inode = req->r_inode;
919                         ihold(inode);
920                 } else {
921                         /* req->r_dentry is non-null for LSSNAP request */
922                         rcu_read_lock();
923                         inode = get_nonsnap_parent(req->r_dentry);
924                         rcu_read_unlock();
925                         dout("%s using snapdir's parent %p\n", __func__, inode);
926                 }
927         } else if (req->r_dentry) {
928                 /* ignore race with rename; old or new d_parent is okay */
929                 struct dentry *parent;
930                 struct inode *dir;
931
932                 rcu_read_lock();
933                 parent = READ_ONCE(req->r_dentry->d_parent);
934                 dir = req->r_parent ? : d_inode_rcu(parent);
935
936                 if (!dir || dir->i_sb != mdsc->fsc->sb) {
937                         /*  not this fs or parent went negative */
938                         inode = d_inode(req->r_dentry);
939                         if (inode)
940                                 ihold(inode);
941                 } else if (ceph_snap(dir) != CEPH_NOSNAP) {
942                         /* direct snapped/virtual snapdir requests
943                          * based on parent dir inode */
944                         inode = get_nonsnap_parent(parent);
945                         dout("%s using nonsnap parent %p\n", __func__, inode);
946                 } else {
947                         /* dentry target */
948                         inode = d_inode(req->r_dentry);
949                         if (!inode || mode == USE_AUTH_MDS) {
950                                 /* dir + name */
951                                 inode = igrab(dir);
952                                 hash = ceph_dentry_hash(dir, req->r_dentry);
953                                 is_hash = true;
954                         } else {
955                                 ihold(inode);
956                         }
957                 }
958                 rcu_read_unlock();
959         }
960
961         dout("%s %p is_hash=%d (0x%x) mode %d\n", __func__, inode, (int)is_hash,
962              hash, mode);
963         if (!inode)
964                 goto random;
965         ci = ceph_inode(inode);
966
967         if (is_hash && S_ISDIR(inode->i_mode)) {
968                 struct ceph_inode_frag frag;
969                 int found;
970
971                 ceph_choose_frag(ci, hash, &frag, &found);
972                 if (found) {
973                         if (mode == USE_ANY_MDS && frag.ndist > 0) {
974                                 u8 r;
975
976                                 /* choose a random replica */
977                                 get_random_bytes(&r, 1);
978                                 r %= frag.ndist;
979                                 mds = frag.dist[r];
980                                 dout("%s %p %llx.%llx frag %u mds%d (%d/%d)\n",
981                                      __func__, inode, ceph_vinop(inode),
982                                      frag.frag, mds, (int)r, frag.ndist);
983                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
984                                     CEPH_MDS_STATE_ACTIVE &&
985                                     !ceph_mdsmap_is_laggy(mdsc->mdsmap, mds))
986                                         goto out;
987                         }
988
989                         /* since this file/dir wasn't known to be
990                          * replicated, then we want to look for the
991                          * authoritative mds. */
992                         if (frag.mds >= 0) {
993                                 /* choose auth mds */
994                                 mds = frag.mds;
995                                 dout("%s %p %llx.%llx frag %u mds%d (auth)\n",
996                                      __func__, inode, ceph_vinop(inode),
997                                      frag.frag, mds);
998                                 if (ceph_mdsmap_get_state(mdsc->mdsmap, mds) >=
999                                     CEPH_MDS_STATE_ACTIVE) {
1000                                         if (mode == USE_ANY_MDS &&
1001                                             !ceph_mdsmap_is_laggy(mdsc->mdsmap,
1002                                                                   mds))
1003                                                 goto out;
1004                                 }
1005                         }
1006                         mode = USE_AUTH_MDS;
1007                 }
1008         }
1009
1010         spin_lock(&ci->i_ceph_lock);
1011         cap = NULL;
1012         if (mode == USE_AUTH_MDS)
1013                 cap = ci->i_auth_cap;
1014         if (!cap && !RB_EMPTY_ROOT(&ci->i_caps))
1015                 cap = rb_entry(rb_first(&ci->i_caps), struct ceph_cap, ci_node);
1016         if (!cap) {
1017                 spin_unlock(&ci->i_ceph_lock);
1018                 ceph_async_iput(inode);
1019                 goto random;
1020         }
1021         mds = cap->session->s_mds;
1022         dout("%s %p %llx.%llx mds%d (%scap %p)\n", __func__,
1023              inode, ceph_vinop(inode), mds,
1024              cap == ci->i_auth_cap ? "auth " : "", cap);
1025         spin_unlock(&ci->i_ceph_lock);
1026 out:
1027         /* avoid calling iput_final() while holding mdsc->mutex or
1028          * in mds dispatch threads */
1029         ceph_async_iput(inode);
1030         return mds;
1031
1032 random:
1033         if (random)
1034                 *random = true;
1035
1036         mds = ceph_mdsmap_get_random_mds(mdsc->mdsmap);
1037         dout("%s chose random mds%d\n", __func__, mds);
1038         return mds;
1039 }
1040
1041
1042 /*
1043  * session messages
1044  */
1045 static struct ceph_msg *create_session_msg(u32 op, u64 seq)
1046 {
1047         struct ceph_msg *msg;
1048         struct ceph_mds_session_head *h;
1049
1050         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h), GFP_NOFS,
1051                            false);
1052         if (!msg) {
1053                 pr_err("create_session_msg ENOMEM creating msg\n");
1054                 return NULL;
1055         }
1056         h = msg->front.iov_base;
1057         h->op = cpu_to_le32(op);
1058         h->seq = cpu_to_le64(seq);
1059
1060         return msg;
1061 }
1062
1063 static const unsigned char feature_bits[] = CEPHFS_FEATURES_CLIENT_SUPPORTED;
1064 #define FEATURE_BYTES(c) (DIV_ROUND_UP((size_t)feature_bits[c - 1] + 1, 64) * 8)
1065 static void encode_supported_features(void **p, void *end)
1066 {
1067         static const size_t count = ARRAY_SIZE(feature_bits);
1068
1069         if (count > 0) {
1070                 size_t i;
1071                 size_t size = FEATURE_BYTES(count);
1072
1073                 BUG_ON(*p + 4 + size > end);
1074                 ceph_encode_32(p, size);
1075                 memset(*p, 0, size);
1076                 for (i = 0; i < count; i++)
1077                         ((unsigned char*)(*p))[i / 8] |= BIT(feature_bits[i] % 8);
1078                 *p += size;
1079         } else {
1080                 BUG_ON(*p + 4 > end);
1081                 ceph_encode_32(p, 0);
1082         }
1083 }
1084
1085 /*
1086  * session message, specialization for CEPH_SESSION_REQUEST_OPEN
1087  * to include additional client metadata fields.
1088  */
1089 static struct ceph_msg *create_session_open_msg(struct ceph_mds_client *mdsc, u64 seq)
1090 {
1091         struct ceph_msg *msg;
1092         struct ceph_mds_session_head *h;
1093         int i = -1;
1094         int extra_bytes = 0;
1095         int metadata_key_count = 0;
1096         struct ceph_options *opt = mdsc->fsc->client->options;
1097         struct ceph_mount_options *fsopt = mdsc->fsc->mount_options;
1098         size_t size, count;
1099         void *p, *end;
1100
1101         const char* metadata[][2] = {
1102                 {"hostname", mdsc->nodename},
1103                 {"kernel_version", init_utsname()->release},
1104                 {"entity_id", opt->name ? : ""},
1105                 {"root", fsopt->server_path ? : "/"},
1106                 {NULL, NULL}
1107         };
1108
1109         /* Calculate serialized length of metadata */
1110         extra_bytes = 4;  /* map length */
1111         for (i = 0; metadata[i][0]; ++i) {
1112                 extra_bytes += 8 + strlen(metadata[i][0]) +
1113                         strlen(metadata[i][1]);
1114                 metadata_key_count++;
1115         }
1116
1117         /* supported feature */
1118         size = 0;
1119         count = ARRAY_SIZE(feature_bits);
1120         if (count > 0)
1121                 size = FEATURE_BYTES(count);
1122         extra_bytes += 4 + size;
1123
1124         /* Allocate the message */
1125         msg = ceph_msg_new(CEPH_MSG_CLIENT_SESSION, sizeof(*h) + extra_bytes,
1126                            GFP_NOFS, false);
1127         if (!msg) {
1128                 pr_err("create_session_msg ENOMEM creating msg\n");
1129                 return NULL;
1130         }
1131         p = msg->front.iov_base;
1132         end = p + msg->front.iov_len;
1133
1134         h = p;
1135         h->op = cpu_to_le32(CEPH_SESSION_REQUEST_OPEN);
1136         h->seq = cpu_to_le64(seq);
1137
1138         /*
1139          * Serialize client metadata into waiting buffer space, using
1140          * the format that userspace expects for map<string, string>
1141          *
1142          * ClientSession messages with metadata are v3
1143          */
1144         msg->hdr.version = cpu_to_le16(3);
1145         msg->hdr.compat_version = cpu_to_le16(1);
1146
1147         /* The write pointer, following the session_head structure */
1148         p += sizeof(*h);
1149
1150         /* Number of entries in the map */
1151         ceph_encode_32(&p, metadata_key_count);
1152
1153         /* Two length-prefixed strings for each entry in the map */
1154         for (i = 0; metadata[i][0]; ++i) {
1155                 size_t const key_len = strlen(metadata[i][0]);
1156                 size_t const val_len = strlen(metadata[i][1]);
1157
1158                 ceph_encode_32(&p, key_len);
1159                 memcpy(p, metadata[i][0], key_len);
1160                 p += key_len;
1161                 ceph_encode_32(&p, val_len);
1162                 memcpy(p, metadata[i][1], val_len);
1163                 p += val_len;
1164         }
1165
1166         encode_supported_features(&p, end);
1167         msg->front.iov_len = p - msg->front.iov_base;
1168         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1169
1170         return msg;
1171 }
1172
1173 /*
1174  * send session open request.
1175  *
1176  * called under mdsc->mutex
1177  */
1178 static int __open_session(struct ceph_mds_client *mdsc,
1179                           struct ceph_mds_session *session)
1180 {
1181         struct ceph_msg *msg;
1182         int mstate;
1183         int mds = session->s_mds;
1184
1185         /* wait for mds to go active? */
1186         mstate = ceph_mdsmap_get_state(mdsc->mdsmap, mds);
1187         dout("open_session to mds%d (%s)\n", mds,
1188              ceph_mds_state_name(mstate));
1189         session->s_state = CEPH_MDS_SESSION_OPENING;
1190         session->s_renew_requested = jiffies;
1191
1192         /* send connect message */
1193         msg = create_session_open_msg(mdsc, session->s_seq);
1194         if (!msg)
1195                 return -ENOMEM;
1196         ceph_con_send(&session->s_con, msg);
1197         return 0;
1198 }
1199
1200 /*
1201  * open sessions for any export targets for the given mds
1202  *
1203  * called under mdsc->mutex
1204  */
1205 static struct ceph_mds_session *
1206 __open_export_target_session(struct ceph_mds_client *mdsc, int target)
1207 {
1208         struct ceph_mds_session *session;
1209
1210         session = __ceph_lookup_mds_session(mdsc, target);
1211         if (!session) {
1212                 session = register_session(mdsc, target);
1213                 if (IS_ERR(session))
1214                         return session;
1215         }
1216         if (session->s_state == CEPH_MDS_SESSION_NEW ||
1217             session->s_state == CEPH_MDS_SESSION_CLOSING)
1218                 __open_session(mdsc, session);
1219
1220         return session;
1221 }
1222
1223 struct ceph_mds_session *
1224 ceph_mdsc_open_export_target_session(struct ceph_mds_client *mdsc, int target)
1225 {
1226         struct ceph_mds_session *session;
1227
1228         dout("open_export_target_session to mds%d\n", target);
1229
1230         mutex_lock(&mdsc->mutex);
1231         session = __open_export_target_session(mdsc, target);
1232         mutex_unlock(&mdsc->mutex);
1233
1234         return session;
1235 }
1236
1237 static void __open_export_target_sessions(struct ceph_mds_client *mdsc,
1238                                           struct ceph_mds_session *session)
1239 {
1240         struct ceph_mds_info *mi;
1241         struct ceph_mds_session *ts;
1242         int i, mds = session->s_mds;
1243
1244         if (mds >= mdsc->mdsmap->possible_max_rank)
1245                 return;
1246
1247         mi = &mdsc->mdsmap->m_info[mds];
1248         dout("open_export_target_sessions for mds%d (%d targets)\n",
1249              session->s_mds, mi->num_export_targets);
1250
1251         for (i = 0; i < mi->num_export_targets; i++) {
1252                 ts = __open_export_target_session(mdsc, mi->export_targets[i]);
1253                 if (!IS_ERR(ts))
1254                         ceph_put_mds_session(ts);
1255         }
1256 }
1257
1258 void ceph_mdsc_open_export_target_sessions(struct ceph_mds_client *mdsc,
1259                                            struct ceph_mds_session *session)
1260 {
1261         mutex_lock(&mdsc->mutex);
1262         __open_export_target_sessions(mdsc, session);
1263         mutex_unlock(&mdsc->mutex);
1264 }
1265
1266 /*
1267  * session caps
1268  */
1269
1270 static void detach_cap_releases(struct ceph_mds_session *session,
1271                                 struct list_head *target)
1272 {
1273         lockdep_assert_held(&session->s_cap_lock);
1274
1275         list_splice_init(&session->s_cap_releases, target);
1276         session->s_num_cap_releases = 0;
1277         dout("dispose_cap_releases mds%d\n", session->s_mds);
1278 }
1279
1280 static void dispose_cap_releases(struct ceph_mds_client *mdsc,
1281                                  struct list_head *dispose)
1282 {
1283         while (!list_empty(dispose)) {
1284                 struct ceph_cap *cap;
1285                 /* zero out the in-progress message */
1286                 cap = list_first_entry(dispose, struct ceph_cap, session_caps);
1287                 list_del(&cap->session_caps);
1288                 ceph_put_cap(mdsc, cap);
1289         }
1290 }
1291
1292 static void cleanup_session_requests(struct ceph_mds_client *mdsc,
1293                                      struct ceph_mds_session *session)
1294 {
1295         struct ceph_mds_request *req;
1296         struct rb_node *p;
1297         struct ceph_inode_info *ci;
1298
1299         dout("cleanup_session_requests mds%d\n", session->s_mds);
1300         mutex_lock(&mdsc->mutex);
1301         while (!list_empty(&session->s_unsafe)) {
1302                 req = list_first_entry(&session->s_unsafe,
1303                                        struct ceph_mds_request, r_unsafe_item);
1304                 pr_warn_ratelimited(" dropping unsafe request %llu\n",
1305                                     req->r_tid);
1306                 if (req->r_target_inode) {
1307                         /* dropping unsafe change of inode's attributes */
1308                         ci = ceph_inode(req->r_target_inode);
1309                         errseq_set(&ci->i_meta_err, -EIO);
1310                 }
1311                 if (req->r_unsafe_dir) {
1312                         /* dropping unsafe directory operation */
1313                         ci = ceph_inode(req->r_unsafe_dir);
1314                         errseq_set(&ci->i_meta_err, -EIO);
1315                 }
1316                 __unregister_request(mdsc, req);
1317         }
1318         /* zero r_attempts, so kick_requests() will re-send requests */
1319         p = rb_first(&mdsc->request_tree);
1320         while (p) {
1321                 req = rb_entry(p, struct ceph_mds_request, r_node);
1322                 p = rb_next(p);
1323                 if (req->r_session &&
1324                     req->r_session->s_mds == session->s_mds)
1325                         req->r_attempts = 0;
1326         }
1327         mutex_unlock(&mdsc->mutex);
1328 }
1329
1330 /*
1331  * Helper to safely iterate over all caps associated with a session, with
1332  * special care taken to handle a racing __ceph_remove_cap().
1333  *
1334  * Caller must hold session s_mutex.
1335  */
1336 int ceph_iterate_session_caps(struct ceph_mds_session *session,
1337                               int (*cb)(struct inode *, struct ceph_cap *,
1338                                         void *), void *arg)
1339 {
1340         struct list_head *p;
1341         struct ceph_cap *cap;
1342         struct inode *inode, *last_inode = NULL;
1343         struct ceph_cap *old_cap = NULL;
1344         int ret;
1345
1346         dout("iterate_session_caps %p mds%d\n", session, session->s_mds);
1347         spin_lock(&session->s_cap_lock);
1348         p = session->s_caps.next;
1349         while (p != &session->s_caps) {
1350                 cap = list_entry(p, struct ceph_cap, session_caps);
1351                 inode = igrab(&cap->ci->vfs_inode);
1352                 if (!inode) {
1353                         p = p->next;
1354                         continue;
1355                 }
1356                 session->s_cap_iterator = cap;
1357                 spin_unlock(&session->s_cap_lock);
1358
1359                 if (last_inode) {
1360                         /* avoid calling iput_final() while holding
1361                          * s_mutex or in mds dispatch threads */
1362                         ceph_async_iput(last_inode);
1363                         last_inode = NULL;
1364                 }
1365                 if (old_cap) {
1366                         ceph_put_cap(session->s_mdsc, old_cap);
1367                         old_cap = NULL;
1368                 }
1369
1370                 ret = cb(inode, cap, arg);
1371                 last_inode = inode;
1372
1373                 spin_lock(&session->s_cap_lock);
1374                 p = p->next;
1375                 if (!cap->ci) {
1376                         dout("iterate_session_caps  finishing cap %p removal\n",
1377                              cap);
1378                         BUG_ON(cap->session != session);
1379                         cap->session = NULL;
1380                         list_del_init(&cap->session_caps);
1381                         session->s_nr_caps--;
1382                         if (cap->queue_release)
1383                                 __ceph_queue_cap_release(session, cap);
1384                         else
1385                                 old_cap = cap;  /* put_cap it w/o locks held */
1386                 }
1387                 if (ret < 0)
1388                         goto out;
1389         }
1390         ret = 0;
1391 out:
1392         session->s_cap_iterator = NULL;
1393         spin_unlock(&session->s_cap_lock);
1394
1395         ceph_async_iput(last_inode);
1396         if (old_cap)
1397                 ceph_put_cap(session->s_mdsc, old_cap);
1398
1399         return ret;
1400 }
1401
1402 static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
1403                                   void *arg)
1404 {
1405         struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
1406         struct ceph_inode_info *ci = ceph_inode(inode);
1407         LIST_HEAD(to_remove);
1408         bool dirty_dropped = false;
1409         bool invalidate = false;
1410
1411         dout("removing cap %p, ci is %p, inode is %p\n",
1412              cap, ci, &ci->vfs_inode);
1413         spin_lock(&ci->i_ceph_lock);
1414         if (cap->mds_wanted | cap->issued)
1415                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1416         __ceph_remove_cap(cap, false);
1417         if (!ci->i_auth_cap) {
1418                 struct ceph_cap_flush *cf;
1419                 struct ceph_mds_client *mdsc = fsc->mdsc;
1420
1421                 if (READ_ONCE(fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
1422                         if (inode->i_data.nrpages > 0)
1423                                 invalidate = true;
1424                         if (ci->i_wrbuffer_ref > 0)
1425                                 mapping_set_error(&inode->i_data, -EIO);
1426                 }
1427
1428                 while (!list_empty(&ci->i_cap_flush_list)) {
1429                         cf = list_first_entry(&ci->i_cap_flush_list,
1430                                               struct ceph_cap_flush, i_list);
1431                         list_move(&cf->i_list, &to_remove);
1432                 }
1433
1434                 spin_lock(&mdsc->cap_dirty_lock);
1435
1436                 list_for_each_entry(cf, &to_remove, i_list)
1437                         list_del(&cf->g_list);
1438
1439                 if (!list_empty(&ci->i_dirty_item)) {
1440                         pr_warn_ratelimited(
1441                                 " dropping dirty %s state for %p %lld\n",
1442                                 ceph_cap_string(ci->i_dirty_caps),
1443                                 inode, ceph_ino(inode));
1444                         ci->i_dirty_caps = 0;
1445                         list_del_init(&ci->i_dirty_item);
1446                         dirty_dropped = true;
1447                 }
1448                 if (!list_empty(&ci->i_flushing_item)) {
1449                         pr_warn_ratelimited(
1450                                 " dropping dirty+flushing %s state for %p %lld\n",
1451                                 ceph_cap_string(ci->i_flushing_caps),
1452                                 inode, ceph_ino(inode));
1453                         ci->i_flushing_caps = 0;
1454                         list_del_init(&ci->i_flushing_item);
1455                         mdsc->num_cap_flushing--;
1456                         dirty_dropped = true;
1457                 }
1458                 spin_unlock(&mdsc->cap_dirty_lock);
1459
1460                 if (dirty_dropped) {
1461                         errseq_set(&ci->i_meta_err, -EIO);
1462
1463                         if (ci->i_wrbuffer_ref_head == 0 &&
1464                             ci->i_wr_ref == 0 &&
1465                             ci->i_dirty_caps == 0 &&
1466                             ci->i_flushing_caps == 0) {
1467                                 ceph_put_snap_context(ci->i_head_snapc);
1468                                 ci->i_head_snapc = NULL;
1469                         }
1470                 }
1471
1472                 if (atomic_read(&ci->i_filelock_ref) > 0) {
1473                         /* make further file lock syscall return -EIO */
1474                         ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
1475                         pr_warn_ratelimited(" dropping file locks for %p %lld\n",
1476                                             inode, ceph_ino(inode));
1477                 }
1478
1479                 if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
1480                         list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
1481                         ci->i_prealloc_cap_flush = NULL;
1482                 }
1483         }
1484         spin_unlock(&ci->i_ceph_lock);
1485         while (!list_empty(&to_remove)) {
1486                 struct ceph_cap_flush *cf;
1487                 cf = list_first_entry(&to_remove,
1488                                       struct ceph_cap_flush, i_list);
1489                 list_del(&cf->i_list);
1490                 ceph_free_cap_flush(cf);
1491         }
1492
1493         wake_up_all(&ci->i_cap_wq);
1494         if (invalidate)
1495                 ceph_queue_invalidate(inode);
1496         if (dirty_dropped)
1497                 iput(inode);
1498         return 0;
1499 }
1500
1501 /*
1502  * caller must hold session s_mutex
1503  */
1504 static void remove_session_caps(struct ceph_mds_session *session)
1505 {
1506         struct ceph_fs_client *fsc = session->s_mdsc->fsc;
1507         struct super_block *sb = fsc->sb;
1508         LIST_HEAD(dispose);
1509
1510         dout("remove_session_caps on %p\n", session);
1511         ceph_iterate_session_caps(session, remove_session_caps_cb, fsc);
1512
1513         wake_up_all(&fsc->mdsc->cap_flushing_wq);
1514
1515         spin_lock(&session->s_cap_lock);
1516         if (session->s_nr_caps > 0) {
1517                 struct inode *inode;
1518                 struct ceph_cap *cap, *prev = NULL;
1519                 struct ceph_vino vino;
1520                 /*
1521                  * iterate_session_caps() skips inodes that are being
1522                  * deleted, we need to wait until deletions are complete.
1523                  * __wait_on_freeing_inode() is designed for the job,
1524                  * but it is not exported, so use lookup inode function
1525                  * to access it.
1526                  */
1527                 while (!list_empty(&session->s_caps)) {
1528                         cap = list_entry(session->s_caps.next,
1529                                          struct ceph_cap, session_caps);
1530                         if (cap == prev)
1531                                 break;
1532                         prev = cap;
1533                         vino = cap->ci->i_vino;
1534                         spin_unlock(&session->s_cap_lock);
1535
1536                         inode = ceph_find_inode(sb, vino);
1537                          /* avoid calling iput_final() while holding s_mutex */
1538                         ceph_async_iput(inode);
1539
1540                         spin_lock(&session->s_cap_lock);
1541                 }
1542         }
1543
1544         // drop cap expires and unlock s_cap_lock
1545         detach_cap_releases(session, &dispose);
1546
1547         BUG_ON(session->s_nr_caps > 0);
1548         BUG_ON(!list_empty(&session->s_cap_flushing));
1549         spin_unlock(&session->s_cap_lock);
1550         dispose_cap_releases(session->s_mdsc, &dispose);
1551 }
1552
1553 enum {
1554         RECONNECT,
1555         RENEWCAPS,
1556         FORCE_RO,
1557 };
1558
1559 /*
1560  * wake up any threads waiting on this session's caps.  if the cap is
1561  * old (didn't get renewed on the client reconnect), remove it now.
1562  *
1563  * caller must hold s_mutex.
1564  */
1565 static int wake_up_session_cb(struct inode *inode, struct ceph_cap *cap,
1566                               void *arg)
1567 {
1568         struct ceph_inode_info *ci = ceph_inode(inode);
1569         unsigned long ev = (unsigned long)arg;
1570
1571         if (ev == RECONNECT) {
1572                 spin_lock(&ci->i_ceph_lock);
1573                 ci->i_wanted_max_size = 0;
1574                 ci->i_requested_max_size = 0;
1575                 spin_unlock(&ci->i_ceph_lock);
1576         } else if (ev == RENEWCAPS) {
1577                 if (cap->cap_gen < cap->session->s_cap_gen) {
1578                         /* mds did not re-issue stale cap */
1579                         spin_lock(&ci->i_ceph_lock);
1580                         cap->issued = cap->implemented = CEPH_CAP_PIN;
1581                         /* make sure mds knows what we want */
1582                         if (__ceph_caps_file_wanted(ci) & ~cap->mds_wanted)
1583                                 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
1584                         spin_unlock(&ci->i_ceph_lock);
1585                 }
1586         } else if (ev == FORCE_RO) {
1587         }
1588         wake_up_all(&ci->i_cap_wq);
1589         return 0;
1590 }
1591
1592 static void wake_up_session_caps(struct ceph_mds_session *session, int ev)
1593 {
1594         dout("wake_up_session_caps %p mds%d\n", session, session->s_mds);
1595         ceph_iterate_session_caps(session, wake_up_session_cb,
1596                                   (void *)(unsigned long)ev);
1597 }
1598
1599 /*
1600  * Send periodic message to MDS renewing all currently held caps.  The
1601  * ack will reset the expiration for all caps from this session.
1602  *
1603  * caller holds s_mutex
1604  */
1605 static int send_renew_caps(struct ceph_mds_client *mdsc,
1606                            struct ceph_mds_session *session)
1607 {
1608         struct ceph_msg *msg;
1609         int state;
1610
1611         if (time_after_eq(jiffies, session->s_cap_ttl) &&
1612             time_after_eq(session->s_cap_ttl, session->s_renew_requested))
1613                 pr_info("mds%d caps stale\n", session->s_mds);
1614         session->s_renew_requested = jiffies;
1615
1616         /* do not try to renew caps until a recovering mds has reconnected
1617          * with its clients. */
1618         state = ceph_mdsmap_get_state(mdsc->mdsmap, session->s_mds);
1619         if (state < CEPH_MDS_STATE_RECONNECT) {
1620                 dout("send_renew_caps ignoring mds%d (%s)\n",
1621                      session->s_mds, ceph_mds_state_name(state));
1622                 return 0;
1623         }
1624
1625         dout("send_renew_caps to mds%d (%s)\n", session->s_mds,
1626                 ceph_mds_state_name(state));
1627         msg = create_session_msg(CEPH_SESSION_REQUEST_RENEWCAPS,
1628                                  ++session->s_renew_seq);
1629         if (!msg)
1630                 return -ENOMEM;
1631         ceph_con_send(&session->s_con, msg);
1632         return 0;
1633 }
1634
1635 static int send_flushmsg_ack(struct ceph_mds_client *mdsc,
1636                              struct ceph_mds_session *session, u64 seq)
1637 {
1638         struct ceph_msg *msg;
1639
1640         dout("send_flushmsg_ack to mds%d (%s)s seq %lld\n",
1641              session->s_mds, ceph_session_state_name(session->s_state), seq);
1642         msg = create_session_msg(CEPH_SESSION_FLUSHMSG_ACK, seq);
1643         if (!msg)
1644                 return -ENOMEM;
1645         ceph_con_send(&session->s_con, msg);
1646         return 0;
1647 }
1648
1649
1650 /*
1651  * Note new cap ttl, and any transition from stale -> not stale (fresh?).
1652  *
1653  * Called under session->s_mutex
1654  */
1655 static void renewed_caps(struct ceph_mds_client *mdsc,
1656                          struct ceph_mds_session *session, int is_renew)
1657 {
1658         int was_stale;
1659         int wake = 0;
1660
1661         spin_lock(&session->s_cap_lock);
1662         was_stale = is_renew && time_after_eq(jiffies, session->s_cap_ttl);
1663
1664         session->s_cap_ttl = session->s_renew_requested +
1665                 mdsc->mdsmap->m_session_timeout*HZ;
1666
1667         if (was_stale) {
1668                 if (time_before(jiffies, session->s_cap_ttl)) {
1669                         pr_info("mds%d caps renewed\n", session->s_mds);
1670                         wake = 1;
1671                 } else {
1672                         pr_info("mds%d caps still stale\n", session->s_mds);
1673                 }
1674         }
1675         dout("renewed_caps mds%d ttl now %lu, was %s, now %s\n",
1676              session->s_mds, session->s_cap_ttl, was_stale ? "stale" : "fresh",
1677              time_before(jiffies, session->s_cap_ttl) ? "stale" : "fresh");
1678         spin_unlock(&session->s_cap_lock);
1679
1680         if (wake)
1681                 wake_up_session_caps(session, RENEWCAPS);
1682 }
1683
1684 /*
1685  * send a session close request
1686  */
1687 static int request_close_session(struct ceph_mds_client *mdsc,
1688                                  struct ceph_mds_session *session)
1689 {
1690         struct ceph_msg *msg;
1691
1692         dout("request_close_session mds%d state %s seq %lld\n",
1693              session->s_mds, ceph_session_state_name(session->s_state),
1694              session->s_seq);
1695         msg = create_session_msg(CEPH_SESSION_REQUEST_CLOSE, session->s_seq);
1696         if (!msg)
1697                 return -ENOMEM;
1698         ceph_con_send(&session->s_con, msg);
1699         return 1;
1700 }
1701
1702 /*
1703  * Called with s_mutex held.
1704  */
1705 static int __close_session(struct ceph_mds_client *mdsc,
1706                          struct ceph_mds_session *session)
1707 {
1708         if (session->s_state >= CEPH_MDS_SESSION_CLOSING)
1709                 return 0;
1710         session->s_state = CEPH_MDS_SESSION_CLOSING;
1711         return request_close_session(mdsc, session);
1712 }
1713
1714 static bool drop_negative_children(struct dentry *dentry)
1715 {
1716         struct dentry *child;
1717         bool all_negative = true;
1718
1719         if (!d_is_dir(dentry))
1720                 goto out;
1721
1722         spin_lock(&dentry->d_lock);
1723         list_for_each_entry(child, &dentry->d_subdirs, d_child) {
1724                 if (d_really_is_positive(child)) {
1725                         all_negative = false;
1726                         break;
1727                 }
1728         }
1729         spin_unlock(&dentry->d_lock);
1730
1731         if (all_negative)
1732                 shrink_dcache_parent(dentry);
1733 out:
1734         return all_negative;
1735 }
1736
1737 /*
1738  * Trim old(er) caps.
1739  *
1740  * Because we can't cache an inode without one or more caps, we do
1741  * this indirectly: if a cap is unused, we prune its aliases, at which
1742  * point the inode will hopefully get dropped to.
1743  *
1744  * Yes, this is a bit sloppy.  Our only real goal here is to respond to
1745  * memory pressure from the MDS, though, so it needn't be perfect.
1746  */
1747 static int trim_caps_cb(struct inode *inode, struct ceph_cap *cap, void *arg)
1748 {
1749         int *remaining = arg;
1750         struct ceph_inode_info *ci = ceph_inode(inode);
1751         int used, wanted, oissued, mine;
1752
1753         if (*remaining <= 0)
1754                 return -1;
1755
1756         spin_lock(&ci->i_ceph_lock);
1757         mine = cap->issued | cap->implemented;
1758         used = __ceph_caps_used(ci);
1759         wanted = __ceph_caps_file_wanted(ci);
1760         oissued = __ceph_caps_issued_other(ci, cap);
1761
1762         dout("trim_caps_cb %p cap %p mine %s oissued %s used %s wanted %s\n",
1763              inode, cap, ceph_cap_string(mine), ceph_cap_string(oissued),
1764              ceph_cap_string(used), ceph_cap_string(wanted));
1765         if (cap == ci->i_auth_cap) {
1766                 if (ci->i_dirty_caps || ci->i_flushing_caps ||
1767                     !list_empty(&ci->i_cap_snaps))
1768                         goto out;
1769                 if ((used | wanted) & CEPH_CAP_ANY_WR)
1770                         goto out;
1771                 /* Note: it's possible that i_filelock_ref becomes non-zero
1772                  * after dropping auth caps. It doesn't hurt because reply
1773                  * of lock mds request will re-add auth caps. */
1774                 if (atomic_read(&ci->i_filelock_ref) > 0)
1775                         goto out;
1776         }
1777         /* The inode has cached pages, but it's no longer used.
1778          * we can safely drop it */
1779         if (S_ISREG(inode->i_mode) &&
1780             wanted == 0 && used == CEPH_CAP_FILE_CACHE &&
1781             !(oissued & CEPH_CAP_FILE_CACHE)) {
1782           used = 0;
1783           oissued = 0;
1784         }
1785         if ((used | wanted) & ~oissued & mine)
1786                 goto out;   /* we need these caps */
1787
1788         if (oissued) {
1789                 /* we aren't the only cap.. just remove us */
1790                 __ceph_remove_cap(cap, true);
1791                 (*remaining)--;
1792         } else {
1793                 struct dentry *dentry;
1794                 /* try dropping referring dentries */
1795                 spin_unlock(&ci->i_ceph_lock);
1796                 dentry = d_find_any_alias(inode);
1797                 if (dentry && drop_negative_children(dentry)) {
1798                         int count;
1799                         dput(dentry);
1800                         d_prune_aliases(inode);
1801                         count = atomic_read(&inode->i_count);
1802                         if (count == 1)
1803                                 (*remaining)--;
1804                         dout("trim_caps_cb %p cap %p pruned, count now %d\n",
1805                              inode, cap, count);
1806                 } else {
1807                         dput(dentry);
1808                 }
1809                 return 0;
1810         }
1811
1812 out:
1813         spin_unlock(&ci->i_ceph_lock);
1814         return 0;
1815 }
1816
1817 /*
1818  * Trim session cap count down to some max number.
1819  */
1820 int ceph_trim_caps(struct ceph_mds_client *mdsc,
1821                    struct ceph_mds_session *session,
1822                    int max_caps)
1823 {
1824         int trim_caps = session->s_nr_caps - max_caps;
1825
1826         dout("trim_caps mds%d start: %d / %d, trim %d\n",
1827              session->s_mds, session->s_nr_caps, max_caps, trim_caps);
1828         if (trim_caps > 0) {
1829                 int remaining = trim_caps;
1830
1831                 ceph_iterate_session_caps(session, trim_caps_cb, &remaining);
1832                 dout("trim_caps mds%d done: %d / %d, trimmed %d\n",
1833                      session->s_mds, session->s_nr_caps, max_caps,
1834                         trim_caps - remaining);
1835         }
1836
1837         ceph_flush_cap_releases(mdsc, session);
1838         return 0;
1839 }
1840
1841 static int check_caps_flush(struct ceph_mds_client *mdsc,
1842                             u64 want_flush_tid)
1843 {
1844         int ret = 1;
1845
1846         spin_lock(&mdsc->cap_dirty_lock);
1847         if (!list_empty(&mdsc->cap_flush_list)) {
1848                 struct ceph_cap_flush *cf =
1849                         list_first_entry(&mdsc->cap_flush_list,
1850                                          struct ceph_cap_flush, g_list);
1851                 if (cf->tid <= want_flush_tid) {
1852                         dout("check_caps_flush still flushing tid "
1853                              "%llu <= %llu\n", cf->tid, want_flush_tid);
1854                         ret = 0;
1855                 }
1856         }
1857         spin_unlock(&mdsc->cap_dirty_lock);
1858         return ret;
1859 }
1860
1861 /*
1862  * flush all dirty inode data to disk.
1863  *
1864  * returns true if we've flushed through want_flush_tid
1865  */
1866 static void wait_caps_flush(struct ceph_mds_client *mdsc,
1867                             u64 want_flush_tid)
1868 {
1869         dout("check_caps_flush want %llu\n", want_flush_tid);
1870
1871         wait_event(mdsc->cap_flushing_wq,
1872                    check_caps_flush(mdsc, want_flush_tid));
1873
1874         dout("check_caps_flush ok, flushed thru %llu\n", want_flush_tid);
1875 }
1876
1877 /*
1878  * called under s_mutex
1879  */
1880 static void ceph_send_cap_releases(struct ceph_mds_client *mdsc,
1881                                    struct ceph_mds_session *session)
1882 {
1883         struct ceph_msg *msg = NULL;
1884         struct ceph_mds_cap_release *head;
1885         struct ceph_mds_cap_item *item;
1886         struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
1887         struct ceph_cap *cap;
1888         LIST_HEAD(tmp_list);
1889         int num_cap_releases;
1890         __le32  barrier, *cap_barrier;
1891
1892         down_read(&osdc->lock);
1893         barrier = cpu_to_le32(osdc->epoch_barrier);
1894         up_read(&osdc->lock);
1895
1896         spin_lock(&session->s_cap_lock);
1897 again:
1898         list_splice_init(&session->s_cap_releases, &tmp_list);
1899         num_cap_releases = session->s_num_cap_releases;
1900         session->s_num_cap_releases = 0;
1901         spin_unlock(&session->s_cap_lock);
1902
1903         while (!list_empty(&tmp_list)) {
1904                 if (!msg) {
1905                         msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPRELEASE,
1906                                         PAGE_SIZE, GFP_NOFS, false);
1907                         if (!msg)
1908                                 goto out_err;
1909                         head = msg->front.iov_base;
1910                         head->num = cpu_to_le32(0);
1911                         msg->front.iov_len = sizeof(*head);
1912
1913                         msg->hdr.version = cpu_to_le16(2);
1914                         msg->hdr.compat_version = cpu_to_le16(1);
1915                 }
1916
1917                 cap = list_first_entry(&tmp_list, struct ceph_cap,
1918                                         session_caps);
1919                 list_del(&cap->session_caps);
1920                 num_cap_releases--;
1921
1922                 head = msg->front.iov_base;
1923                 put_unaligned_le32(get_unaligned_le32(&head->num) + 1,
1924                                    &head->num);
1925                 item = msg->front.iov_base + msg->front.iov_len;
1926                 item->ino = cpu_to_le64(cap->cap_ino);
1927                 item->cap_id = cpu_to_le64(cap->cap_id);
1928                 item->migrate_seq = cpu_to_le32(cap->mseq);
1929                 item->seq = cpu_to_le32(cap->issue_seq);
1930                 msg->front.iov_len += sizeof(*item);
1931
1932                 ceph_put_cap(mdsc, cap);
1933
1934                 if (le32_to_cpu(head->num) == CEPH_CAPS_PER_RELEASE) {
1935                         // Append cap_barrier field
1936                         cap_barrier = msg->front.iov_base + msg->front.iov_len;
1937                         *cap_barrier = barrier;
1938                         msg->front.iov_len += sizeof(*cap_barrier);
1939
1940                         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1941                         dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1942                         ceph_con_send(&session->s_con, msg);
1943                         msg = NULL;
1944                 }
1945         }
1946
1947         BUG_ON(num_cap_releases != 0);
1948
1949         spin_lock(&session->s_cap_lock);
1950         if (!list_empty(&session->s_cap_releases))
1951                 goto again;
1952         spin_unlock(&session->s_cap_lock);
1953
1954         if (msg) {
1955                 // Append cap_barrier field
1956                 cap_barrier = msg->front.iov_base + msg->front.iov_len;
1957                 *cap_barrier = barrier;
1958                 msg->front.iov_len += sizeof(*cap_barrier);
1959
1960                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
1961                 dout("send_cap_releases mds%d %p\n", session->s_mds, msg);
1962                 ceph_con_send(&session->s_con, msg);
1963         }
1964         return;
1965 out_err:
1966         pr_err("send_cap_releases mds%d, failed to allocate message\n",
1967                 session->s_mds);
1968         spin_lock(&session->s_cap_lock);
1969         list_splice(&tmp_list, &session->s_cap_releases);
1970         session->s_num_cap_releases += num_cap_releases;
1971         spin_unlock(&session->s_cap_lock);
1972 }
1973
1974 static void ceph_cap_release_work(struct work_struct *work)
1975 {
1976         struct ceph_mds_session *session =
1977                 container_of(work, struct ceph_mds_session, s_cap_release_work);
1978
1979         mutex_lock(&session->s_mutex);
1980         if (session->s_state == CEPH_MDS_SESSION_OPEN ||
1981             session->s_state == CEPH_MDS_SESSION_HUNG)
1982                 ceph_send_cap_releases(session->s_mdsc, session);
1983         mutex_unlock(&session->s_mutex);
1984         ceph_put_mds_session(session);
1985 }
1986
1987 void ceph_flush_cap_releases(struct ceph_mds_client *mdsc,
1988                              struct ceph_mds_session *session)
1989 {
1990         if (mdsc->stopping)
1991                 return;
1992
1993         ceph_get_mds_session(session);
1994         if (queue_work(mdsc->fsc->cap_wq,
1995                        &session->s_cap_release_work)) {
1996                 dout("cap release work queued\n");
1997         } else {
1998                 ceph_put_mds_session(session);
1999                 dout("failed to queue cap release work\n");
2000         }
2001 }
2002
2003 /*
2004  * caller holds session->s_cap_lock
2005  */
2006 void __ceph_queue_cap_release(struct ceph_mds_session *session,
2007                               struct ceph_cap *cap)
2008 {
2009         list_add_tail(&cap->session_caps, &session->s_cap_releases);
2010         session->s_num_cap_releases++;
2011
2012         if (!(session->s_num_cap_releases % CEPH_CAPS_PER_RELEASE))
2013                 ceph_flush_cap_releases(session->s_mdsc, session);
2014 }
2015
2016 static void ceph_cap_reclaim_work(struct work_struct *work)
2017 {
2018         struct ceph_mds_client *mdsc =
2019                 container_of(work, struct ceph_mds_client, cap_reclaim_work);
2020         int ret = ceph_trim_dentries(mdsc);
2021         if (ret == -EAGAIN)
2022                 ceph_queue_cap_reclaim_work(mdsc);
2023 }
2024
2025 void ceph_queue_cap_reclaim_work(struct ceph_mds_client *mdsc)
2026 {
2027         if (mdsc->stopping)
2028                 return;
2029
2030         if (queue_work(mdsc->fsc->cap_wq, &mdsc->cap_reclaim_work)) {
2031                 dout("caps reclaim work queued\n");
2032         } else {
2033                 dout("failed to queue caps release work\n");
2034         }
2035 }
2036
2037 void ceph_reclaim_caps_nr(struct ceph_mds_client *mdsc, int nr)
2038 {
2039         int val;
2040         if (!nr)
2041                 return;
2042         val = atomic_add_return(nr, &mdsc->cap_reclaim_pending);
2043         if ((val % CEPH_CAPS_PER_RELEASE) < nr) {
2044                 atomic_set(&mdsc->cap_reclaim_pending, 0);
2045                 ceph_queue_cap_reclaim_work(mdsc);
2046         }
2047 }
2048
2049 /*
2050  * requests
2051  */
2052
2053 int ceph_alloc_readdir_reply_buffer(struct ceph_mds_request *req,
2054                                     struct inode *dir)
2055 {
2056         struct ceph_inode_info *ci = ceph_inode(dir);
2057         struct ceph_mds_reply_info_parsed *rinfo = &req->r_reply_info;
2058         struct ceph_mount_options *opt = req->r_mdsc->fsc->mount_options;
2059         size_t size = sizeof(struct ceph_mds_reply_dir_entry);
2060         unsigned int num_entries;
2061         int order;
2062
2063         spin_lock(&ci->i_ceph_lock);
2064         num_entries = ci->i_files + ci->i_subdirs;
2065         spin_unlock(&ci->i_ceph_lock);
2066         num_entries = max(num_entries, 1U);
2067         num_entries = min(num_entries, opt->max_readdir);
2068
2069         order = get_order(size * num_entries);
2070         while (order >= 0) {
2071                 rinfo->dir_entries = (void*)__get_free_pages(GFP_KERNEL |
2072                                                              __GFP_NOWARN,
2073                                                              order);
2074                 if (rinfo->dir_entries)
2075                         break;
2076                 order--;
2077         }
2078         if (!rinfo->dir_entries)
2079                 return -ENOMEM;
2080
2081         num_entries = (PAGE_SIZE << order) / size;
2082         num_entries = min(num_entries, opt->max_readdir);
2083
2084         rinfo->dir_buf_size = PAGE_SIZE << order;
2085         req->r_num_caps = num_entries + 1;
2086         req->r_args.readdir.max_entries = cpu_to_le32(num_entries);
2087         req->r_args.readdir.max_bytes = cpu_to_le32(opt->max_readdir_bytes);
2088         return 0;
2089 }
2090
2091 /*
2092  * Create an mds request.
2093  */
2094 struct ceph_mds_request *
2095 ceph_mdsc_create_request(struct ceph_mds_client *mdsc, int op, int mode)
2096 {
2097         struct ceph_mds_request *req;
2098
2099         req = kmem_cache_zalloc(ceph_mds_request_cachep, GFP_NOFS);
2100         if (!req)
2101                 return ERR_PTR(-ENOMEM);
2102
2103         mutex_init(&req->r_fill_mutex);
2104         req->r_mdsc = mdsc;
2105         req->r_started = jiffies;
2106         req->r_resend_mds = -1;
2107         INIT_LIST_HEAD(&req->r_unsafe_dir_item);
2108         INIT_LIST_HEAD(&req->r_unsafe_target_item);
2109         req->r_fmode = -1;
2110         kref_init(&req->r_kref);
2111         RB_CLEAR_NODE(&req->r_node);
2112         INIT_LIST_HEAD(&req->r_wait);
2113         init_completion(&req->r_completion);
2114         init_completion(&req->r_safe_completion);
2115         INIT_LIST_HEAD(&req->r_unsafe_item);
2116
2117         ktime_get_coarse_real_ts64(&req->r_stamp);
2118
2119         req->r_op = op;
2120         req->r_direct_mode = mode;
2121         return req;
2122 }
2123
2124 /*
2125  * return oldest (lowest) request, tid in request tree, 0 if none.
2126  *
2127  * called under mdsc->mutex.
2128  */
2129 static struct ceph_mds_request *__get_oldest_req(struct ceph_mds_client *mdsc)
2130 {
2131         if (RB_EMPTY_ROOT(&mdsc->request_tree))
2132                 return NULL;
2133         return rb_entry(rb_first(&mdsc->request_tree),
2134                         struct ceph_mds_request, r_node);
2135 }
2136
2137 static inline  u64 __get_oldest_tid(struct ceph_mds_client *mdsc)
2138 {
2139         return mdsc->oldest_tid;
2140 }
2141
2142 /*
2143  * Build a dentry's path.  Allocate on heap; caller must kfree.  Based
2144  * on build_path_from_dentry in fs/cifs/dir.c.
2145  *
2146  * If @stop_on_nosnap, generate path relative to the first non-snapped
2147  * inode.
2148  *
2149  * Encode hidden .snap dirs as a double /, i.e.
2150  *   foo/.snap/bar -> foo//bar
2151  */
2152 char *ceph_mdsc_build_path(struct dentry *dentry, int *plen, u64 *pbase,
2153                            int stop_on_nosnap)
2154 {
2155         struct dentry *temp;
2156         char *path;
2157         int pos;
2158         unsigned seq;
2159         u64 base;
2160
2161         if (!dentry)
2162                 return ERR_PTR(-EINVAL);
2163
2164         path = __getname();
2165         if (!path)
2166                 return ERR_PTR(-ENOMEM);
2167 retry:
2168         pos = PATH_MAX - 1;
2169         path[pos] = '\0';
2170
2171         seq = read_seqbegin(&rename_lock);
2172         rcu_read_lock();
2173         temp = dentry;
2174         for (;;) {
2175                 struct inode *inode;
2176
2177                 spin_lock(&temp->d_lock);
2178                 inode = d_inode(temp);
2179                 if (inode && ceph_snap(inode) == CEPH_SNAPDIR) {
2180                         dout("build_path path+%d: %p SNAPDIR\n",
2181                              pos, temp);
2182                 } else if (stop_on_nosnap && inode && dentry != temp &&
2183                            ceph_snap(inode) == CEPH_NOSNAP) {
2184                         spin_unlock(&temp->d_lock);
2185                         pos++; /* get rid of any prepended '/' */
2186                         break;
2187                 } else {
2188                         pos -= temp->d_name.len;
2189                         if (pos < 0) {
2190                                 spin_unlock(&temp->d_lock);
2191                                 break;
2192                         }
2193                         memcpy(path + pos, temp->d_name.name, temp->d_name.len);
2194                 }
2195                 spin_unlock(&temp->d_lock);
2196                 temp = READ_ONCE(temp->d_parent);
2197
2198                 /* Are we at the root? */
2199                 if (IS_ROOT(temp))
2200                         break;
2201
2202                 /* Are we out of buffer? */
2203                 if (--pos < 0)
2204                         break;
2205
2206                 path[pos] = '/';
2207         }
2208         base = ceph_ino(d_inode(temp));
2209         rcu_read_unlock();
2210
2211         if (read_seqretry(&rename_lock, seq))
2212                 goto retry;
2213
2214         if (pos < 0) {
2215                 /*
2216                  * A rename didn't occur, but somehow we didn't end up where
2217                  * we thought we would. Throw a warning and try again.
2218                  */
2219                 pr_warn("build_path did not end path lookup where "
2220                         "expected, pos is %d\n", pos);
2221                 goto retry;
2222         }
2223
2224         *pbase = base;
2225         *plen = PATH_MAX - 1 - pos;
2226         dout("build_path on %p %d built %llx '%.*s'\n",
2227              dentry, d_count(dentry), base, *plen, path + pos);
2228         return path + pos;
2229 }
2230
2231 static int build_dentry_path(struct dentry *dentry, struct inode *dir,
2232                              const char **ppath, int *ppathlen, u64 *pino,
2233                              bool *pfreepath, bool parent_locked)
2234 {
2235         char *path;
2236
2237         rcu_read_lock();
2238         if (!dir)
2239                 dir = d_inode_rcu(dentry->d_parent);
2240         if (dir && parent_locked && ceph_snap(dir) == CEPH_NOSNAP) {
2241                 *pino = ceph_ino(dir);
2242                 rcu_read_unlock();
2243                 *ppath = dentry->d_name.name;
2244                 *ppathlen = dentry->d_name.len;
2245                 return 0;
2246         }
2247         rcu_read_unlock();
2248         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2249         if (IS_ERR(path))
2250                 return PTR_ERR(path);
2251         *ppath = path;
2252         *pfreepath = true;
2253         return 0;
2254 }
2255
2256 static int build_inode_path(struct inode *inode,
2257                             const char **ppath, int *ppathlen, u64 *pino,
2258                             bool *pfreepath)
2259 {
2260         struct dentry *dentry;
2261         char *path;
2262
2263         if (ceph_snap(inode) == CEPH_NOSNAP) {
2264                 *pino = ceph_ino(inode);
2265                 *ppathlen = 0;
2266                 return 0;
2267         }
2268         dentry = d_find_alias(inode);
2269         path = ceph_mdsc_build_path(dentry, ppathlen, pino, 1);
2270         dput(dentry);
2271         if (IS_ERR(path))
2272                 return PTR_ERR(path);
2273         *ppath = path;
2274         *pfreepath = true;
2275         return 0;
2276 }
2277
2278 /*
2279  * request arguments may be specified via an inode *, a dentry *, or
2280  * an explicit ino+path.
2281  */
2282 static int set_request_path_attr(struct inode *rinode, struct dentry *rdentry,
2283                                   struct inode *rdiri, const char *rpath,
2284                                   u64 rino, const char **ppath, int *pathlen,
2285                                   u64 *ino, bool *freepath, bool parent_locked)
2286 {
2287         int r = 0;
2288
2289         if (rinode) {
2290                 r = build_inode_path(rinode, ppath, pathlen, ino, freepath);
2291                 dout(" inode %p %llx.%llx\n", rinode, ceph_ino(rinode),
2292                      ceph_snap(rinode));
2293         } else if (rdentry) {
2294                 r = build_dentry_path(rdentry, rdiri, ppath, pathlen, ino,
2295                                         freepath, parent_locked);
2296                 dout(" dentry %p %llx/%.*s\n", rdentry, *ino, *pathlen,
2297                      *ppath);
2298         } else if (rpath || rino) {
2299                 *ino = rino;
2300                 *ppath = rpath;
2301                 *pathlen = rpath ? strlen(rpath) : 0;
2302                 dout(" path %.*s\n", *pathlen, rpath);
2303         }
2304
2305         return r;
2306 }
2307
2308 /*
2309  * called under mdsc->mutex
2310  */
2311 static struct ceph_msg *create_request_message(struct ceph_mds_client *mdsc,
2312                                                struct ceph_mds_request *req,
2313                                                int mds, bool drop_cap_releases)
2314 {
2315         struct ceph_msg *msg;
2316         struct ceph_mds_request_head *head;
2317         const char *path1 = NULL;
2318         const char *path2 = NULL;
2319         u64 ino1 = 0, ino2 = 0;
2320         int pathlen1 = 0, pathlen2 = 0;
2321         bool freepath1 = false, freepath2 = false;
2322         int len;
2323         u16 releases;
2324         void *p, *end;
2325         int ret;
2326
2327         ret = set_request_path_attr(req->r_inode, req->r_dentry,
2328                               req->r_parent, req->r_path1, req->r_ino1.ino,
2329                               &path1, &pathlen1, &ino1, &freepath1,
2330                               test_bit(CEPH_MDS_R_PARENT_LOCKED,
2331                                         &req->r_req_flags));
2332         if (ret < 0) {
2333                 msg = ERR_PTR(ret);
2334                 goto out;
2335         }
2336
2337         /* If r_old_dentry is set, then assume that its parent is locked */
2338         ret = set_request_path_attr(NULL, req->r_old_dentry,
2339                               req->r_old_dentry_dir,
2340                               req->r_path2, req->r_ino2.ino,
2341                               &path2, &pathlen2, &ino2, &freepath2, true);
2342         if (ret < 0) {
2343                 msg = ERR_PTR(ret);
2344                 goto out_free1;
2345         }
2346
2347         len = sizeof(*head) +
2348                 pathlen1 + pathlen2 + 2*(1 + sizeof(u32) + sizeof(u64)) +
2349                 sizeof(struct ceph_timespec);
2350
2351         /* calculate (max) length for cap releases */
2352         len += sizeof(struct ceph_mds_request_release) *
2353                 (!!req->r_inode_drop + !!req->r_dentry_drop +
2354                  !!req->r_old_inode_drop + !!req->r_old_dentry_drop);
2355         if (req->r_dentry_drop)
2356                 len += pathlen1;
2357         if (req->r_old_dentry_drop)
2358                 len += pathlen2;
2359
2360         msg = ceph_msg_new2(CEPH_MSG_CLIENT_REQUEST, len, 1, GFP_NOFS, false);
2361         if (!msg) {
2362                 msg = ERR_PTR(-ENOMEM);
2363                 goto out_free2;
2364         }
2365
2366         msg->hdr.version = cpu_to_le16(2);
2367         msg->hdr.tid = cpu_to_le64(req->r_tid);
2368
2369         head = msg->front.iov_base;
2370         p = msg->front.iov_base + sizeof(*head);
2371         end = msg->front.iov_base + msg->front.iov_len;
2372
2373         head->mdsmap_epoch = cpu_to_le32(mdsc->mdsmap->m_epoch);
2374         head->op = cpu_to_le32(req->r_op);
2375         head->caller_uid = cpu_to_le32(from_kuid(&init_user_ns, req->r_uid));
2376         head->caller_gid = cpu_to_le32(from_kgid(&init_user_ns, req->r_gid));
2377         head->ino = 0;
2378         head->args = req->r_args;
2379
2380         ceph_encode_filepath(&p, end, ino1, path1);
2381         ceph_encode_filepath(&p, end, ino2, path2);
2382
2383         /* make note of release offset, in case we need to replay */
2384         req->r_request_release_offset = p - msg->front.iov_base;
2385
2386         /* cap releases */
2387         releases = 0;
2388         if (req->r_inode_drop)
2389                 releases += ceph_encode_inode_release(&p,
2390                       req->r_inode ? req->r_inode : d_inode(req->r_dentry),
2391                       mds, req->r_inode_drop, req->r_inode_unless, 0);
2392         if (req->r_dentry_drop)
2393                 releases += ceph_encode_dentry_release(&p, req->r_dentry,
2394                                 req->r_parent, mds, req->r_dentry_drop,
2395                                 req->r_dentry_unless);
2396         if (req->r_old_dentry_drop)
2397                 releases += ceph_encode_dentry_release(&p, req->r_old_dentry,
2398                                 req->r_old_dentry_dir, mds,
2399                                 req->r_old_dentry_drop,
2400                                 req->r_old_dentry_unless);
2401         if (req->r_old_inode_drop)
2402                 releases += ceph_encode_inode_release(&p,
2403                       d_inode(req->r_old_dentry),
2404                       mds, req->r_old_inode_drop, req->r_old_inode_unless, 0);
2405
2406         if (drop_cap_releases) {
2407                 releases = 0;
2408                 p = msg->front.iov_base + req->r_request_release_offset;
2409         }
2410
2411         head->num_releases = cpu_to_le16(releases);
2412
2413         /* time stamp */
2414         {
2415                 struct ceph_timespec ts;
2416                 ceph_encode_timespec64(&ts, &req->r_stamp);
2417                 ceph_encode_copy(&p, &ts, sizeof(ts));
2418         }
2419
2420         BUG_ON(p > end);
2421         msg->front.iov_len = p - msg->front.iov_base;
2422         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2423
2424         if (req->r_pagelist) {
2425                 struct ceph_pagelist *pagelist = req->r_pagelist;
2426                 ceph_msg_data_add_pagelist(msg, pagelist);
2427                 msg->hdr.data_len = cpu_to_le32(pagelist->length);
2428         } else {
2429                 msg->hdr.data_len = 0;
2430         }
2431
2432         msg->hdr.data_off = cpu_to_le16(0);
2433
2434 out_free2:
2435         if (freepath2)
2436                 ceph_mdsc_free_path((char *)path2, pathlen2);
2437 out_free1:
2438         if (freepath1)
2439                 ceph_mdsc_free_path((char *)path1, pathlen1);
2440 out:
2441         return msg;
2442 }
2443
2444 /*
2445  * called under mdsc->mutex if error, under no mutex if
2446  * success.
2447  */
2448 static void complete_request(struct ceph_mds_client *mdsc,
2449                              struct ceph_mds_request *req)
2450 {
2451         if (req->r_callback)
2452                 req->r_callback(mdsc, req);
2453         complete_all(&req->r_completion);
2454 }
2455
2456 /*
2457  * called under mdsc->mutex
2458  */
2459 static int __prepare_send_request(struct ceph_mds_client *mdsc,
2460                                   struct ceph_mds_request *req,
2461                                   int mds, bool drop_cap_releases)
2462 {
2463         struct ceph_mds_request_head *rhead;
2464         struct ceph_msg *msg;
2465         int flags = 0;
2466
2467         req->r_attempts++;
2468         if (req->r_inode) {
2469                 struct ceph_cap *cap =
2470                         ceph_get_cap_for_mds(ceph_inode(req->r_inode), mds);
2471
2472                 if (cap)
2473                         req->r_sent_on_mseq = cap->mseq;
2474                 else
2475                         req->r_sent_on_mseq = -1;
2476         }
2477         dout("prepare_send_request %p tid %lld %s (attempt %d)\n", req,
2478              req->r_tid, ceph_mds_op_name(req->r_op), req->r_attempts);
2479
2480         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2481                 void *p;
2482                 /*
2483                  * Replay.  Do not regenerate message (and rebuild
2484                  * paths, etc.); just use the original message.
2485                  * Rebuilding paths will break for renames because
2486                  * d_move mangles the src name.
2487                  */
2488                 msg = req->r_request;
2489                 rhead = msg->front.iov_base;
2490
2491                 flags = le32_to_cpu(rhead->flags);
2492                 flags |= CEPH_MDS_FLAG_REPLAY;
2493                 rhead->flags = cpu_to_le32(flags);
2494
2495                 if (req->r_target_inode)
2496                         rhead->ino = cpu_to_le64(ceph_ino(req->r_target_inode));
2497
2498                 rhead->num_retry = req->r_attempts - 1;
2499
2500                 /* remove cap/dentry releases from message */
2501                 rhead->num_releases = 0;
2502
2503                 /* time stamp */
2504                 p = msg->front.iov_base + req->r_request_release_offset;
2505                 {
2506                         struct ceph_timespec ts;
2507                         ceph_encode_timespec64(&ts, &req->r_stamp);
2508                         ceph_encode_copy(&p, &ts, sizeof(ts));
2509                 }
2510
2511                 msg->front.iov_len = p - msg->front.iov_base;
2512                 msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
2513                 return 0;
2514         }
2515
2516         if (req->r_request) {
2517                 ceph_msg_put(req->r_request);
2518                 req->r_request = NULL;
2519         }
2520         msg = create_request_message(mdsc, req, mds, drop_cap_releases);
2521         if (IS_ERR(msg)) {
2522                 req->r_err = PTR_ERR(msg);
2523                 return PTR_ERR(msg);
2524         }
2525         req->r_request = msg;
2526
2527         rhead = msg->front.iov_base;
2528         rhead->oldest_client_tid = cpu_to_le64(__get_oldest_tid(mdsc));
2529         if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2530                 flags |= CEPH_MDS_FLAG_REPLAY;
2531         if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags))
2532                 flags |= CEPH_MDS_FLAG_ASYNC;
2533         if (req->r_parent)
2534                 flags |= CEPH_MDS_FLAG_WANT_DENTRY;
2535         rhead->flags = cpu_to_le32(flags);
2536         rhead->num_fwd = req->r_num_fwd;
2537         rhead->num_retry = req->r_attempts - 1;
2538         rhead->ino = 0;
2539
2540         dout(" r_parent = %p\n", req->r_parent);
2541         return 0;
2542 }
2543
2544 /*
2545  * called under mdsc->mutex
2546  */
2547 static int __send_request(struct ceph_mds_client *mdsc,
2548                           struct ceph_mds_session *session,
2549                           struct ceph_mds_request *req,
2550                           bool drop_cap_releases)
2551 {
2552         int err;
2553
2554         err = __prepare_send_request(mdsc, req, session->s_mds,
2555                                      drop_cap_releases);
2556         if (!err) {
2557                 ceph_msg_get(req->r_request);
2558                 ceph_con_send(&session->s_con, req->r_request);
2559         }
2560
2561         return err;
2562 }
2563
2564 /*
2565  * send request, or put it on the appropriate wait list.
2566  */
2567 static void __do_request(struct ceph_mds_client *mdsc,
2568                         struct ceph_mds_request *req)
2569 {
2570         struct ceph_mds_session *session = NULL;
2571         int mds = -1;
2572         int err = 0;
2573         bool random;
2574
2575         if (req->r_err || test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2576                 if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags))
2577                         __unregister_request(mdsc, req);
2578                 return;
2579         }
2580
2581         if (req->r_timeout &&
2582             time_after_eq(jiffies, req->r_started + req->r_timeout)) {
2583                 dout("do_request timed out\n");
2584                 err = -ETIMEDOUT;
2585                 goto finish;
2586         }
2587         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN) {
2588                 dout("do_request forced umount\n");
2589                 err = -EIO;
2590                 goto finish;
2591         }
2592         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_MOUNTING) {
2593                 if (mdsc->mdsmap_err) {
2594                         err = mdsc->mdsmap_err;
2595                         dout("do_request mdsmap err %d\n", err);
2596                         goto finish;
2597                 }
2598                 if (mdsc->mdsmap->m_epoch == 0) {
2599                         dout("do_request no mdsmap, waiting for map\n");
2600                         list_add(&req->r_wait, &mdsc->waiting_for_map);
2601                         return;
2602                 }
2603                 if (!(mdsc->fsc->mount_options->flags &
2604                       CEPH_MOUNT_OPT_MOUNTWAIT) &&
2605                     !ceph_mdsmap_is_cluster_available(mdsc->mdsmap)) {
2606                         err = -EHOSTUNREACH;
2607                         goto finish;
2608                 }
2609         }
2610
2611         put_request_session(req);
2612
2613         mds = __choose_mds(mdsc, req, &random);
2614         if (mds < 0 ||
2615             ceph_mdsmap_get_state(mdsc->mdsmap, mds) < CEPH_MDS_STATE_ACTIVE) {
2616                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2617                         err = -EJUKEBOX;
2618                         goto finish;
2619                 }
2620                 dout("do_request no mds or not active, waiting for map\n");
2621                 list_add(&req->r_wait, &mdsc->waiting_for_map);
2622                 return;
2623         }
2624
2625         /* get, open session */
2626         session = __ceph_lookup_mds_session(mdsc, mds);
2627         if (!session) {
2628                 session = register_session(mdsc, mds);
2629                 if (IS_ERR(session)) {
2630                         err = PTR_ERR(session);
2631                         goto finish;
2632                 }
2633         }
2634         req->r_session = ceph_get_mds_session(session);
2635
2636         dout("do_request mds%d session %p state %s\n", mds, session,
2637              ceph_session_state_name(session->s_state));
2638         if (session->s_state != CEPH_MDS_SESSION_OPEN &&
2639             session->s_state != CEPH_MDS_SESSION_HUNG) {
2640                 if (session->s_state == CEPH_MDS_SESSION_REJECTED) {
2641                         err = -EACCES;
2642                         goto out_session;
2643                 }
2644                 /*
2645                  * We cannot queue async requests since the caps and delegated
2646                  * inodes are bound to the session. Just return -EJUKEBOX and
2647                  * let the caller retry a sync request in that case.
2648                  */
2649                 if (test_bit(CEPH_MDS_R_ASYNC, &req->r_req_flags)) {
2650                         err = -EJUKEBOX;
2651                         goto out_session;
2652                 }
2653                 if (session->s_state == CEPH_MDS_SESSION_NEW ||
2654                     session->s_state == CEPH_MDS_SESSION_CLOSING) {
2655                         __open_session(mdsc, session);
2656                         /* retry the same mds later */
2657                         if (random)
2658                                 req->r_resend_mds = mds;
2659                 }
2660                 list_add(&req->r_wait, &session->s_waiting);
2661                 goto out_session;
2662         }
2663
2664         /* send request */
2665         req->r_resend_mds = -1;   /* forget any previous mds hint */
2666
2667         if (req->r_request_started == 0)   /* note request start time */
2668                 req->r_request_started = jiffies;
2669
2670         err = __send_request(mdsc, session, req, false);
2671
2672 out_session:
2673         ceph_put_mds_session(session);
2674 finish:
2675         if (err) {
2676                 dout("__do_request early error %d\n", err);
2677                 req->r_err = err;
2678                 complete_request(mdsc, req);
2679                 __unregister_request(mdsc, req);
2680         }
2681         return;
2682 }
2683
2684 /*
2685  * called under mdsc->mutex
2686  */
2687 static void __wake_requests(struct ceph_mds_client *mdsc,
2688                             struct list_head *head)
2689 {
2690         struct ceph_mds_request *req;
2691         LIST_HEAD(tmp_list);
2692
2693         list_splice_init(head, &tmp_list);
2694
2695         while (!list_empty(&tmp_list)) {
2696                 req = list_entry(tmp_list.next,
2697                                  struct ceph_mds_request, r_wait);
2698                 list_del_init(&req->r_wait);
2699                 dout(" wake request %p tid %llu\n", req, req->r_tid);
2700                 __do_request(mdsc, req);
2701         }
2702 }
2703
2704 /*
2705  * Wake up threads with requests pending for @mds, so that they can
2706  * resubmit their requests to a possibly different mds.
2707  */
2708 static void kick_requests(struct ceph_mds_client *mdsc, int mds)
2709 {
2710         struct ceph_mds_request *req;
2711         struct rb_node *p = rb_first(&mdsc->request_tree);
2712
2713         dout("kick_requests mds%d\n", mds);
2714         while (p) {
2715                 req = rb_entry(p, struct ceph_mds_request, r_node);
2716                 p = rb_next(p);
2717                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
2718                         continue;
2719                 if (req->r_attempts > 0)
2720                         continue; /* only new requests */
2721                 if (req->r_session &&
2722                     req->r_session->s_mds == mds) {
2723                         dout(" kicking tid %llu\n", req->r_tid);
2724                         list_del_init(&req->r_wait);
2725                         __do_request(mdsc, req);
2726                 }
2727         }
2728 }
2729
2730 int ceph_mdsc_submit_request(struct ceph_mds_client *mdsc, struct inode *dir,
2731                               struct ceph_mds_request *req)
2732 {
2733         int err = 0;
2734
2735         /* take CAP_PIN refs for r_inode, r_parent, r_old_dentry */
2736         if (req->r_inode)
2737                 ceph_get_cap_refs(ceph_inode(req->r_inode), CEPH_CAP_PIN);
2738         if (req->r_parent) {
2739                 ceph_get_cap_refs(ceph_inode(req->r_parent), CEPH_CAP_PIN);
2740                 ihold(req->r_parent);
2741         }
2742         if (req->r_old_dentry_dir)
2743                 ceph_get_cap_refs(ceph_inode(req->r_old_dentry_dir),
2744                                   CEPH_CAP_PIN);
2745
2746         if (req->r_inode) {
2747                 err = ceph_wait_on_async_create(req->r_inode);
2748                 if (err) {
2749                         dout("%s: wait for async create returned: %d\n",
2750                              __func__, err);
2751                         return err;
2752                 }
2753         }
2754
2755         if (!err && req->r_old_inode) {
2756                 err = ceph_wait_on_async_create(req->r_old_inode);
2757                 if (err) {
2758                         dout("%s: wait for async create returned: %d\n",
2759                              __func__, err);
2760                         return err;
2761                 }
2762         }
2763
2764         dout("submit_request on %p for inode %p\n", req, dir);
2765         mutex_lock(&mdsc->mutex);
2766         __register_request(mdsc, req, dir);
2767         __do_request(mdsc, req);
2768         err = req->r_err;
2769         mutex_unlock(&mdsc->mutex);
2770         return err;
2771 }
2772
2773 static int ceph_mdsc_wait_request(struct ceph_mds_client *mdsc,
2774                                   struct ceph_mds_request *req)
2775 {
2776         int err;
2777
2778         /* wait */
2779         dout("do_request waiting\n");
2780         if (!req->r_timeout && req->r_wait_for_completion) {
2781                 err = req->r_wait_for_completion(mdsc, req);
2782         } else {
2783                 long timeleft = wait_for_completion_killable_timeout(
2784                                         &req->r_completion,
2785                                         ceph_timeout_jiffies(req->r_timeout));
2786                 if (timeleft > 0)
2787                         err = 0;
2788                 else if (!timeleft)
2789                         err = -ETIMEDOUT;  /* timed out */
2790                 else
2791                         err = timeleft;  /* killed */
2792         }
2793         dout("do_request waited, got %d\n", err);
2794         mutex_lock(&mdsc->mutex);
2795
2796         /* only abort if we didn't race with a real reply */
2797         if (test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags)) {
2798                 err = le32_to_cpu(req->r_reply_info.head->result);
2799         } else if (err < 0) {
2800                 dout("aborted request %lld with %d\n", req->r_tid, err);
2801
2802                 /*
2803                  * ensure we aren't running concurrently with
2804                  * ceph_fill_trace or ceph_readdir_prepopulate, which
2805                  * rely on locks (dir mutex) held by our caller.
2806                  */
2807                 mutex_lock(&req->r_fill_mutex);
2808                 req->r_err = err;
2809                 set_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags);
2810                 mutex_unlock(&req->r_fill_mutex);
2811
2812                 if (req->r_parent &&
2813                     (req->r_op & CEPH_MDS_OP_WRITE))
2814                         ceph_invalidate_dir_request(req);
2815         } else {
2816                 err = req->r_err;
2817         }
2818
2819         mutex_unlock(&mdsc->mutex);
2820         return err;
2821 }
2822
2823 /*
2824  * Synchrously perform an mds request.  Take care of all of the
2825  * session setup, forwarding, retry details.
2826  */
2827 int ceph_mdsc_do_request(struct ceph_mds_client *mdsc,
2828                          struct inode *dir,
2829                          struct ceph_mds_request *req)
2830 {
2831         int err;
2832
2833         dout("do_request on %p\n", req);
2834
2835         /* issue */
2836         err = ceph_mdsc_submit_request(mdsc, dir, req);
2837         if (!err)
2838                 err = ceph_mdsc_wait_request(mdsc, req);
2839         dout("do_request %p done, result %d\n", req, err);
2840         return err;
2841 }
2842
2843 /*
2844  * Invalidate dir's completeness, dentry lease state on an aborted MDS
2845  * namespace request.
2846  */
2847 void ceph_invalidate_dir_request(struct ceph_mds_request *req)
2848 {
2849         struct inode *dir = req->r_parent;
2850         struct inode *old_dir = req->r_old_dentry_dir;
2851
2852         dout("invalidate_dir_request %p %p (complete, lease(s))\n", dir, old_dir);
2853
2854         ceph_dir_clear_complete(dir);
2855         if (old_dir)
2856                 ceph_dir_clear_complete(old_dir);
2857         if (req->r_dentry)
2858                 ceph_invalidate_dentry_lease(req->r_dentry);
2859         if (req->r_old_dentry)
2860                 ceph_invalidate_dentry_lease(req->r_old_dentry);
2861 }
2862
2863 /*
2864  * Handle mds reply.
2865  *
2866  * We take the session mutex and parse and process the reply immediately.
2867  * This preserves the logical ordering of replies, capabilities, etc., sent
2868  * by the MDS as they are applied to our local cache.
2869  */
2870 static void handle_reply(struct ceph_mds_session *session, struct ceph_msg *msg)
2871 {
2872         struct ceph_mds_client *mdsc = session->s_mdsc;
2873         struct ceph_mds_request *req;
2874         struct ceph_mds_reply_head *head = msg->front.iov_base;
2875         struct ceph_mds_reply_info_parsed *rinfo;  /* parsed reply info */
2876         struct ceph_snap_realm *realm;
2877         u64 tid;
2878         int err, result;
2879         int mds = session->s_mds;
2880
2881         if (msg->front.iov_len < sizeof(*head)) {
2882                 pr_err("mdsc_handle_reply got corrupt (short) reply\n");
2883                 ceph_msg_dump(msg);
2884                 return;
2885         }
2886
2887         /* get request, session */
2888         tid = le64_to_cpu(msg->hdr.tid);
2889         mutex_lock(&mdsc->mutex);
2890         req = lookup_get_request(mdsc, tid);
2891         if (!req) {
2892                 dout("handle_reply on unknown tid %llu\n", tid);
2893                 mutex_unlock(&mdsc->mutex);
2894                 return;
2895         }
2896         dout("handle_reply %p\n", req);
2897
2898         /* correct session? */
2899         if (req->r_session != session) {
2900                 pr_err("mdsc_handle_reply got %llu on session mds%d"
2901                        " not mds%d\n", tid, session->s_mds,
2902                        req->r_session ? req->r_session->s_mds : -1);
2903                 mutex_unlock(&mdsc->mutex);
2904                 goto out;
2905         }
2906
2907         /* dup? */
2908         if ((test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags) && !head->safe) ||
2909             (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags) && head->safe)) {
2910                 pr_warn("got a dup %s reply on %llu from mds%d\n",
2911                            head->safe ? "safe" : "unsafe", tid, mds);
2912                 mutex_unlock(&mdsc->mutex);
2913                 goto out;
2914         }
2915         if (test_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags)) {
2916                 pr_warn("got unsafe after safe on %llu from mds%d\n",
2917                            tid, mds);
2918                 mutex_unlock(&mdsc->mutex);
2919                 goto out;
2920         }
2921
2922         result = le32_to_cpu(head->result);
2923
2924         /*
2925          * Handle an ESTALE
2926          * if we're not talking to the authority, send to them
2927          * if the authority has changed while we weren't looking,
2928          * send to new authority
2929          * Otherwise we just have to return an ESTALE
2930          */
2931         if (result == -ESTALE) {
2932                 dout("got ESTALE on request %llu\n", req->r_tid);
2933                 req->r_resend_mds = -1;
2934                 if (req->r_direct_mode != USE_AUTH_MDS) {
2935                         dout("not using auth, setting for that now\n");
2936                         req->r_direct_mode = USE_AUTH_MDS;
2937                         __do_request(mdsc, req);
2938                         mutex_unlock(&mdsc->mutex);
2939                         goto out;
2940                 } else  {
2941                         int mds = __choose_mds(mdsc, req, NULL);
2942                         if (mds >= 0 && mds != req->r_session->s_mds) {
2943                                 dout("but auth changed, so resending\n");
2944                                 __do_request(mdsc, req);
2945                                 mutex_unlock(&mdsc->mutex);
2946                                 goto out;
2947                         }
2948                 }
2949                 dout("have to return ESTALE on request %llu\n", req->r_tid);
2950         }
2951
2952
2953         if (head->safe) {
2954                 set_bit(CEPH_MDS_R_GOT_SAFE, &req->r_req_flags);
2955                 __unregister_request(mdsc, req);
2956
2957                 /* last request during umount? */
2958                 if (mdsc->stopping && !__get_oldest_req(mdsc))
2959                         complete_all(&mdsc->safe_umount_waiters);
2960
2961                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
2962                         /*
2963                          * We already handled the unsafe response, now do the
2964                          * cleanup.  No need to examine the response; the MDS
2965                          * doesn't include any result info in the safe
2966                          * response.  And even if it did, there is nothing
2967                          * useful we could do with a revised return value.
2968                          */
2969                         dout("got safe reply %llu, mds%d\n", tid, mds);
2970
2971                         mutex_unlock(&mdsc->mutex);
2972                         goto out;
2973                 }
2974         } else {
2975                 set_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags);
2976                 list_add_tail(&req->r_unsafe_item, &req->r_session->s_unsafe);
2977         }
2978
2979         dout("handle_reply tid %lld result %d\n", tid, result);
2980         rinfo = &req->r_reply_info;
2981         if (test_bit(CEPHFS_FEATURE_REPLY_ENCODING, &session->s_features))
2982                 err = parse_reply_info(msg, rinfo, (u64)-1);
2983         else
2984                 err = parse_reply_info(msg, rinfo, session->s_con.peer_features);
2985         mutex_unlock(&mdsc->mutex);
2986
2987         mutex_lock(&session->s_mutex);
2988         if (err < 0) {
2989                 pr_err("mdsc_handle_reply got corrupt reply mds%d(tid:%lld)\n", mds, tid);
2990                 ceph_msg_dump(msg);
2991                 goto out_err;
2992         }
2993
2994         /* snap trace */
2995         realm = NULL;
2996         if (rinfo->snapblob_len) {
2997                 down_write(&mdsc->snap_rwsem);
2998                 ceph_update_snap_trace(mdsc, rinfo->snapblob,
2999                                 rinfo->snapblob + rinfo->snapblob_len,
3000                                 le32_to_cpu(head->op) == CEPH_MDS_OP_RMSNAP,
3001                                 &realm);
3002                 downgrade_write(&mdsc->snap_rwsem);
3003         } else {
3004                 down_read(&mdsc->snap_rwsem);
3005         }
3006
3007         /* insert trace into our cache */
3008         mutex_lock(&req->r_fill_mutex);
3009         current->journal_info = req;
3010         err = ceph_fill_trace(mdsc->fsc->sb, req);
3011         if (err == 0) {
3012                 if (result == 0 && (req->r_op == CEPH_MDS_OP_READDIR ||
3013                                     req->r_op == CEPH_MDS_OP_LSSNAP))
3014                         ceph_readdir_prepopulate(req, req->r_session);
3015         }
3016         current->journal_info = NULL;
3017         mutex_unlock(&req->r_fill_mutex);
3018
3019         up_read(&mdsc->snap_rwsem);
3020         if (realm)
3021                 ceph_put_snap_realm(mdsc, realm);
3022
3023         if (err == 0) {
3024                 if (req->r_target_inode &&
3025                     test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags)) {
3026                         struct ceph_inode_info *ci =
3027                                 ceph_inode(req->r_target_inode);
3028                         spin_lock(&ci->i_unsafe_lock);
3029                         list_add_tail(&req->r_unsafe_target_item,
3030                                       &ci->i_unsafe_iops);
3031                         spin_unlock(&ci->i_unsafe_lock);
3032                 }
3033
3034                 ceph_unreserve_caps(mdsc, &req->r_caps_reservation);
3035         }
3036 out_err:
3037         mutex_lock(&mdsc->mutex);
3038         if (!test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3039                 if (err) {
3040                         req->r_err = err;
3041                 } else {
3042                         req->r_reply =  ceph_msg_get(msg);
3043                         set_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags);
3044                 }
3045         } else {
3046                 dout("reply arrived after request %lld was aborted\n", tid);
3047         }
3048         mutex_unlock(&mdsc->mutex);
3049
3050         mutex_unlock(&session->s_mutex);
3051
3052         /* kick calling process */
3053         complete_request(mdsc, req);
3054 out:
3055         ceph_mdsc_put_request(req);
3056         return;
3057 }
3058
3059
3060
3061 /*
3062  * handle mds notification that our request has been forwarded.
3063  */
3064 static void handle_forward(struct ceph_mds_client *mdsc,
3065                            struct ceph_mds_session *session,
3066                            struct ceph_msg *msg)
3067 {
3068         struct ceph_mds_request *req;
3069         u64 tid = le64_to_cpu(msg->hdr.tid);
3070         u32 next_mds;
3071         u32 fwd_seq;
3072         int err = -EINVAL;
3073         void *p = msg->front.iov_base;
3074         void *end = p + msg->front.iov_len;
3075
3076         ceph_decode_need(&p, end, 2*sizeof(u32), bad);
3077         next_mds = ceph_decode_32(&p);
3078         fwd_seq = ceph_decode_32(&p);
3079
3080         mutex_lock(&mdsc->mutex);
3081         req = lookup_get_request(mdsc, tid);
3082         if (!req) {
3083                 dout("forward tid %llu to mds%d - req dne\n", tid, next_mds);
3084                 goto out;  /* dup reply? */
3085         }
3086
3087         if (test_bit(CEPH_MDS_R_ABORTED, &req->r_req_flags)) {
3088                 dout("forward tid %llu aborted, unregistering\n", tid);
3089                 __unregister_request(mdsc, req);
3090         } else if (fwd_seq <= req->r_num_fwd) {
3091                 dout("forward tid %llu to mds%d - old seq %d <= %d\n",
3092                      tid, next_mds, req->r_num_fwd, fwd_seq);
3093         } else {
3094                 /* resend. forward race not possible; mds would drop */
3095                 dout("forward tid %llu to mds%d (we resend)\n", tid, next_mds);
3096                 BUG_ON(req->r_err);
3097                 BUG_ON(test_bit(CEPH_MDS_R_GOT_RESULT, &req->r_req_flags));
3098                 req->r_attempts = 0;
3099                 req->r_num_fwd = fwd_seq;
3100                 req->r_resend_mds = next_mds;
3101                 put_request_session(req);
3102                 __do_request(mdsc, req);
3103         }
3104         ceph_mdsc_put_request(req);
3105 out:
3106         mutex_unlock(&mdsc->mutex);
3107         return;
3108
3109 bad:
3110         pr_err("mdsc_handle_forward decode error err=%d\n", err);
3111 }
3112
3113 static int __decode_session_metadata(void **p, void *end,
3114                                      bool *blacklisted)
3115 {
3116         /* map<string,string> */
3117         u32 n;
3118         bool err_str;
3119         ceph_decode_32_safe(p, end, n, bad);
3120         while (n-- > 0) {
3121                 u32 len;
3122                 ceph_decode_32_safe(p, end, len, bad);
3123                 ceph_decode_need(p, end, len, bad);
3124                 err_str = !strncmp(*p, "error_string", len);
3125                 *p += len;
3126                 ceph_decode_32_safe(p, end, len, bad);
3127                 ceph_decode_need(p, end, len, bad);
3128                 if (err_str && strnstr(*p, "blacklisted", len))
3129                         *blacklisted = true;
3130                 *p += len;
3131         }
3132         return 0;
3133 bad:
3134         return -1;
3135 }
3136
3137 /*
3138  * handle a mds session control message
3139  */
3140 static void handle_session(struct ceph_mds_session *session,
3141                            struct ceph_msg *msg)
3142 {
3143         struct ceph_mds_client *mdsc = session->s_mdsc;
3144         int mds = session->s_mds;
3145         int msg_version = le16_to_cpu(msg->hdr.version);
3146         void *p = msg->front.iov_base;
3147         void *end = p + msg->front.iov_len;
3148         struct ceph_mds_session_head *h;
3149         u32 op;
3150         u64 seq;
3151         unsigned long features = 0;
3152         int wake = 0;
3153         bool blacklisted = false;
3154
3155         /* decode */
3156         ceph_decode_need(&p, end, sizeof(*h), bad);
3157         h = p;
3158         p += sizeof(*h);
3159
3160         op = le32_to_cpu(h->op);
3161         seq = le64_to_cpu(h->seq);
3162
3163         if (msg_version >= 3) {
3164                 u32 len;
3165                 /* version >= 2, metadata */
3166                 if (__decode_session_metadata(&p, end, &blacklisted) < 0)
3167                         goto bad;
3168                 /* version >= 3, feature bits */
3169                 ceph_decode_32_safe(&p, end, len, bad);
3170                 ceph_decode_need(&p, end, len, bad);
3171                 memcpy(&features, p, min_t(size_t, len, sizeof(features)));
3172                 p += len;
3173         }
3174
3175         mutex_lock(&mdsc->mutex);
3176         if (op == CEPH_SESSION_CLOSE) {
3177                 ceph_get_mds_session(session);
3178                 __unregister_session(mdsc, session);
3179         }
3180         /* FIXME: this ttl calculation is generous */
3181         session->s_ttl = jiffies + HZ*mdsc->mdsmap->m_session_autoclose;
3182         mutex_unlock(&mdsc->mutex);
3183
3184         mutex_lock(&session->s_mutex);
3185
3186         dout("handle_session mds%d %s %p state %s seq %llu\n",
3187              mds, ceph_session_op_name(op), session,
3188              ceph_session_state_name(session->s_state), seq);
3189
3190         if (session->s_state == CEPH_MDS_SESSION_HUNG) {
3191                 session->s_state = CEPH_MDS_SESSION_OPEN;
3192                 pr_info("mds%d came back\n", session->s_mds);
3193         }
3194
3195         switch (op) {
3196         case CEPH_SESSION_OPEN:
3197                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3198                         pr_info("mds%d reconnect success\n", session->s_mds);
3199                 session->s_state = CEPH_MDS_SESSION_OPEN;
3200                 session->s_features = features;
3201                 renewed_caps(mdsc, session, 0);
3202                 wake = 1;
3203                 if (mdsc->stopping)
3204                         __close_session(mdsc, session);
3205                 break;
3206
3207         case CEPH_SESSION_RENEWCAPS:
3208                 if (session->s_renew_seq == seq)
3209                         renewed_caps(mdsc, session, 1);
3210                 break;
3211
3212         case CEPH_SESSION_CLOSE:
3213                 if (session->s_state == CEPH_MDS_SESSION_RECONNECTING)
3214                         pr_info("mds%d reconnect denied\n", session->s_mds);
3215                 session->s_state = CEPH_MDS_SESSION_CLOSED;
3216                 cleanup_session_requests(mdsc, session);
3217                 remove_session_caps(session);
3218                 wake = 2; /* for good measure */
3219                 wake_up_all(&mdsc->session_close_wq);
3220                 break;
3221
3222         case CEPH_SESSION_STALE:
3223                 pr_info("mds%d caps went stale, renewing\n",
3224                         session->s_mds);
3225                 spin_lock(&session->s_gen_ttl_lock);
3226                 session->s_cap_gen++;
3227                 session->s_cap_ttl = jiffies - 1;
3228                 spin_unlock(&session->s_gen_ttl_lock);
3229                 send_renew_caps(mdsc, session);
3230                 break;
3231
3232         case CEPH_SESSION_RECALL_STATE:
3233                 ceph_trim_caps(mdsc, session, le32_to_cpu(h->max_caps));
3234                 break;
3235
3236         case CEPH_SESSION_FLUSHMSG:
3237                 send_flushmsg_ack(mdsc, session, seq);
3238                 break;
3239
3240         case CEPH_SESSION_FORCE_RO:
3241                 dout("force_session_readonly %p\n", session);
3242                 spin_lock(&session->s_cap_lock);
3243                 session->s_readonly = true;
3244                 spin_unlock(&session->s_cap_lock);
3245                 wake_up_session_caps(session, FORCE_RO);
3246                 break;
3247
3248         case CEPH_SESSION_REJECT:
3249                 WARN_ON(session->s_state != CEPH_MDS_SESSION_OPENING);
3250                 pr_info("mds%d rejected session\n", session->s_mds);
3251                 session->s_state = CEPH_MDS_SESSION_REJECTED;
3252                 cleanup_session_requests(mdsc, session);
3253                 remove_session_caps(session);
3254                 if (blacklisted)
3255                         mdsc->fsc->blacklisted = true;
3256                 wake = 2; /* for good measure */
3257                 break;
3258
3259         default:
3260                 pr_err("mdsc_handle_session bad op %d mds%d\n", op, mds);
3261                 WARN_ON(1);
3262         }
3263
3264         mutex_unlock(&session->s_mutex);
3265         if (wake) {
3266                 mutex_lock(&mdsc->mutex);
3267                 __wake_requests(mdsc, &session->s_waiting);
3268                 if (wake == 2)
3269                         kick_requests(mdsc, mds);
3270                 mutex_unlock(&mdsc->mutex);
3271         }
3272         if (op == CEPH_SESSION_CLOSE)
3273                 ceph_put_mds_session(session);
3274         return;
3275
3276 bad:
3277         pr_err("mdsc_handle_session corrupt message mds%d len %d\n", mds,
3278                (int)msg->front.iov_len);
3279         ceph_msg_dump(msg);
3280         return;
3281 }
3282
3283 /*
3284  * called under session->mutex.
3285  */
3286 static void replay_unsafe_requests(struct ceph_mds_client *mdsc,
3287                                    struct ceph_mds_session *session)
3288 {
3289         struct ceph_mds_request *req, *nreq;
3290         struct rb_node *p;
3291
3292         dout("replay_unsafe_requests mds%d\n", session->s_mds);
3293
3294         mutex_lock(&mdsc->mutex);
3295         list_for_each_entry_safe(req, nreq, &session->s_unsafe, r_unsafe_item)
3296                 __send_request(mdsc, session, req, true);
3297
3298         /*
3299          * also re-send old requests when MDS enters reconnect stage. So that MDS
3300          * can process completed request in clientreplay stage.
3301          */
3302         p = rb_first(&mdsc->request_tree);
3303         while (p) {
3304                 req = rb_entry(p, struct ceph_mds_request, r_node);
3305                 p = rb_next(p);
3306                 if (test_bit(CEPH_MDS_R_GOT_UNSAFE, &req->r_req_flags))
3307                         continue;
3308                 if (req->r_attempts == 0)
3309                         continue; /* only old requests */
3310                 if (req->r_session &&
3311                     req->r_session->s_mds == session->s_mds)
3312                         __send_request(mdsc, session, req, true);
3313         }
3314         mutex_unlock(&mdsc->mutex);
3315 }
3316
3317 static int send_reconnect_partial(struct ceph_reconnect_state *recon_state)
3318 {
3319         struct ceph_msg *reply;
3320         struct ceph_pagelist *_pagelist;
3321         struct page *page;
3322         __le32 *addr;
3323         int err = -ENOMEM;
3324
3325         if (!recon_state->allow_multi)
3326                 return -ENOSPC;
3327
3328         /* can't handle message that contains both caps and realm */
3329         BUG_ON(!recon_state->nr_caps == !recon_state->nr_realms);
3330
3331         /* pre-allocate new pagelist */
3332         _pagelist = ceph_pagelist_alloc(GFP_NOFS);
3333         if (!_pagelist)
3334                 return -ENOMEM;
3335
3336         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3337         if (!reply)
3338                 goto fail_msg;
3339
3340         /* placeholder for nr_caps */
3341         err = ceph_pagelist_encode_32(_pagelist, 0);
3342         if (err < 0)
3343                 goto fail;
3344
3345         if (recon_state->nr_caps) {
3346                 /* currently encoding caps */
3347                 err = ceph_pagelist_encode_32(recon_state->pagelist, 0);
3348                 if (err)
3349                         goto fail;
3350         } else {
3351                 /* placeholder for nr_realms (currently encoding relams) */
3352                 err = ceph_pagelist_encode_32(_pagelist, 0);
3353                 if (err < 0)
3354                         goto fail;
3355         }
3356
3357         err = ceph_pagelist_encode_8(recon_state->pagelist, 1);
3358         if (err)
3359                 goto fail;
3360
3361         page = list_first_entry(&recon_state->pagelist->head, struct page, lru);
3362         addr = kmap_atomic(page);
3363         if (recon_state->nr_caps) {
3364                 /* currently encoding caps */
3365                 *addr = cpu_to_le32(recon_state->nr_caps);
3366         } else {
3367                 /* currently encoding relams */
3368                 *(addr + 1) = cpu_to_le32(recon_state->nr_realms);
3369         }
3370         kunmap_atomic(addr);
3371
3372         reply->hdr.version = cpu_to_le16(5);
3373         reply->hdr.compat_version = cpu_to_le16(4);
3374
3375         reply->hdr.data_len = cpu_to_le32(recon_state->pagelist->length);
3376         ceph_msg_data_add_pagelist(reply, recon_state->pagelist);
3377
3378         ceph_con_send(&recon_state->session->s_con, reply);
3379         ceph_pagelist_release(recon_state->pagelist);
3380
3381         recon_state->pagelist = _pagelist;
3382         recon_state->nr_caps = 0;
3383         recon_state->nr_realms = 0;
3384         recon_state->msg_version = 5;
3385         return 0;
3386 fail:
3387         ceph_msg_put(reply);
3388 fail_msg:
3389         ceph_pagelist_release(_pagelist);
3390         return err;
3391 }
3392
3393 /*
3394  * Encode information about a cap for a reconnect with the MDS.
3395  */
3396 static int encode_caps_cb(struct inode *inode, struct ceph_cap *cap,
3397                           void *arg)
3398 {
3399         union {
3400                 struct ceph_mds_cap_reconnect v2;
3401                 struct ceph_mds_cap_reconnect_v1 v1;
3402         } rec;
3403         struct ceph_inode_info *ci = cap->ci;
3404         struct ceph_reconnect_state *recon_state = arg;
3405         struct ceph_pagelist *pagelist = recon_state->pagelist;
3406         int err;
3407         u64 snap_follows;
3408
3409         dout(" adding %p ino %llx.%llx cap %p %lld %s\n",
3410              inode, ceph_vinop(inode), cap, cap->cap_id,
3411              ceph_cap_string(cap->issued));
3412
3413         spin_lock(&ci->i_ceph_lock);
3414         cap->seq = 0;        /* reset cap seq */
3415         cap->issue_seq = 0;  /* and issue_seq */
3416         cap->mseq = 0;       /* and migrate_seq */
3417         cap->cap_gen = cap->session->s_cap_gen;
3418
3419         if (recon_state->msg_version >= 2) {
3420                 rec.v2.cap_id = cpu_to_le64(cap->cap_id);
3421                 rec.v2.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3422                 rec.v2.issued = cpu_to_le32(cap->issued);
3423                 rec.v2.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3424                 rec.v2.pathbase = 0;
3425                 rec.v2.flock_len = (__force __le32)
3426                         ((ci->i_ceph_flags & CEPH_I_ERROR_FILELOCK) ? 0 : 1);
3427         } else {
3428                 rec.v1.cap_id = cpu_to_le64(cap->cap_id);
3429                 rec.v1.wanted = cpu_to_le32(__ceph_caps_wanted(ci));
3430                 rec.v1.issued = cpu_to_le32(cap->issued);
3431                 rec.v1.size = cpu_to_le64(inode->i_size);
3432                 ceph_encode_timespec64(&rec.v1.mtime, &inode->i_mtime);
3433                 ceph_encode_timespec64(&rec.v1.atime, &inode->i_atime);
3434                 rec.v1.snaprealm = cpu_to_le64(ci->i_snap_realm->ino);
3435                 rec.v1.pathbase = 0;
3436         }
3437
3438         if (list_empty(&ci->i_cap_snaps)) {
3439                 snap_follows = ci->i_head_snapc ? ci->i_head_snapc->seq : 0;
3440         } else {
3441                 struct ceph_cap_snap *capsnap =
3442                         list_first_entry(&ci->i_cap_snaps,
3443                                          struct ceph_cap_snap, ci_item);
3444                 snap_follows = capsnap->follows;
3445         }
3446         spin_unlock(&ci->i_ceph_lock);
3447
3448         if (recon_state->msg_version >= 2) {
3449                 int num_fcntl_locks, num_flock_locks;
3450                 struct ceph_filelock *flocks = NULL;
3451                 size_t struct_len, total_len = sizeof(u64);
3452                 u8 struct_v = 0;
3453
3454 encode_again:
3455                 if (rec.v2.flock_len) {
3456                         ceph_count_locks(inode, &num_fcntl_locks, &num_flock_locks);
3457                 } else {
3458                         num_fcntl_locks = 0;
3459                         num_flock_locks = 0;
3460                 }
3461                 if (num_fcntl_locks + num_flock_locks > 0) {
3462                         flocks = kmalloc_array(num_fcntl_locks + num_flock_locks,
3463                                                sizeof(struct ceph_filelock),
3464                                                GFP_NOFS);
3465                         if (!flocks) {
3466                                 err = -ENOMEM;
3467                                 goto out_err;
3468                         }
3469                         err = ceph_encode_locks_to_buffer(inode, flocks,
3470                                                           num_fcntl_locks,
3471                                                           num_flock_locks);
3472                         if (err) {
3473                                 kfree(flocks);
3474                                 flocks = NULL;
3475                                 if (err == -ENOSPC)
3476                                         goto encode_again;
3477                                 goto out_err;
3478                         }
3479                 } else {
3480                         kfree(flocks);
3481                         flocks = NULL;
3482                 }
3483
3484                 if (recon_state->msg_version >= 3) {
3485                         /* version, compat_version and struct_len */
3486                         total_len += 2 * sizeof(u8) + sizeof(u32);
3487                         struct_v = 2;
3488                 }
3489                 /*
3490                  * number of encoded locks is stable, so copy to pagelist
3491                  */
3492                 struct_len = 2 * sizeof(u32) +
3493                             (num_fcntl_locks + num_flock_locks) *
3494                             sizeof(struct ceph_filelock);
3495                 rec.v2.flock_len = cpu_to_le32(struct_len);
3496
3497                 struct_len += sizeof(u32) + sizeof(rec.v2);
3498
3499                 if (struct_v >= 2)
3500                         struct_len += sizeof(u64); /* snap_follows */
3501
3502                 total_len += struct_len;
3503
3504                 if (pagelist->length + total_len > RECONNECT_MAX_SIZE) {
3505                         err = send_reconnect_partial(recon_state);
3506                         if (err)
3507                                 goto out_freeflocks;
3508                         pagelist = recon_state->pagelist;
3509                 }
3510
3511                 err = ceph_pagelist_reserve(pagelist, total_len);
3512                 if (err)
3513                         goto out_freeflocks;
3514
3515                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3516                 if (recon_state->msg_version >= 3) {
3517                         ceph_pagelist_encode_8(pagelist, struct_v);
3518                         ceph_pagelist_encode_8(pagelist, 1);
3519                         ceph_pagelist_encode_32(pagelist, struct_len);
3520                 }
3521                 ceph_pagelist_encode_string(pagelist, NULL, 0);
3522                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v2));
3523                 ceph_locks_to_pagelist(flocks, pagelist,
3524                                        num_fcntl_locks, num_flock_locks);
3525                 if (struct_v >= 2)
3526                         ceph_pagelist_encode_64(pagelist, snap_follows);
3527 out_freeflocks:
3528                 kfree(flocks);
3529         } else {
3530                 u64 pathbase = 0;
3531                 int pathlen = 0;
3532                 char *path = NULL;
3533                 struct dentry *dentry;
3534
3535                 dentry = d_find_alias(inode);
3536                 if (dentry) {
3537                         path = ceph_mdsc_build_path(dentry,
3538                                                 &pathlen, &pathbase, 0);
3539                         dput(dentry);
3540                         if (IS_ERR(path)) {
3541                                 err = PTR_ERR(path);
3542                                 goto out_err;
3543                         }
3544                         rec.v1.pathbase = cpu_to_le64(pathbase);
3545                 }
3546
3547                 err = ceph_pagelist_reserve(pagelist,
3548                                             sizeof(u64) + sizeof(u32) +
3549                                             pathlen + sizeof(rec.v1));
3550                 if (err) {
3551                         goto out_freepath;
3552                 }
3553
3554                 ceph_pagelist_encode_64(pagelist, ceph_ino(inode));
3555                 ceph_pagelist_encode_string(pagelist, path, pathlen);
3556                 ceph_pagelist_append(pagelist, &rec, sizeof(rec.v1));
3557 out_freepath:
3558                 ceph_mdsc_free_path(path, pathlen);
3559         }
3560
3561 out_err:
3562         if (err >= 0)
3563                 recon_state->nr_caps++;
3564         return err;
3565 }
3566
3567 static int encode_snap_realms(struct ceph_mds_client *mdsc,
3568                               struct ceph_reconnect_state *recon_state)
3569 {
3570         struct rb_node *p;
3571         struct ceph_pagelist *pagelist = recon_state->pagelist;
3572         int err = 0;
3573
3574         if (recon_state->msg_version >= 4) {
3575                 err = ceph_pagelist_encode_32(pagelist, mdsc->num_snap_realms);
3576                 if (err < 0)
3577                         goto fail;
3578         }
3579
3580         /*
3581          * snaprealms.  we provide mds with the ino, seq (version), and
3582          * parent for all of our realms.  If the mds has any newer info,
3583          * it will tell us.
3584          */
3585         for (p = rb_first(&mdsc->snap_realms); p; p = rb_next(p)) {
3586                 struct ceph_snap_realm *realm =
3587                        rb_entry(p, struct ceph_snap_realm, node);
3588                 struct ceph_mds_snaprealm_reconnect sr_rec;
3589
3590                 if (recon_state->msg_version >= 4) {
3591                         size_t need = sizeof(u8) * 2 + sizeof(u32) +
3592                                       sizeof(sr_rec);
3593
3594                         if (pagelist->length + need > RECONNECT_MAX_SIZE) {
3595                                 err = send_reconnect_partial(recon_state);
3596                                 if (err)
3597                                         goto fail;
3598                                 pagelist = recon_state->pagelist;
3599                         }
3600
3601                         err = ceph_pagelist_reserve(pagelist, need);
3602                         if (err)
3603                                 goto fail;
3604
3605                         ceph_pagelist_encode_8(pagelist, 1);
3606                         ceph_pagelist_encode_8(pagelist, 1);
3607                         ceph_pagelist_encode_32(pagelist, sizeof(sr_rec));
3608                 }
3609
3610                 dout(" adding snap realm %llx seq %lld parent %llx\n",
3611                      realm->ino, realm->seq, realm->parent_ino);
3612                 sr_rec.ino = cpu_to_le64(realm->ino);
3613                 sr_rec.seq = cpu_to_le64(realm->seq);
3614                 sr_rec.parent = cpu_to_le64(realm->parent_ino);
3615
3616                 err = ceph_pagelist_append(pagelist, &sr_rec, sizeof(sr_rec));
3617                 if (err)
3618                         goto fail;
3619
3620                 recon_state->nr_realms++;
3621         }
3622 fail:
3623         return err;
3624 }
3625
3626
3627 /*
3628  * If an MDS fails and recovers, clients need to reconnect in order to
3629  * reestablish shared state.  This includes all caps issued through
3630  * this session _and_ the snap_realm hierarchy.  Because it's not
3631  * clear which snap realms the mds cares about, we send everything we
3632  * know about.. that ensures we'll then get any new info the
3633  * recovering MDS might have.
3634  *
3635  * This is a relatively heavyweight operation, but it's rare.
3636  *
3637  * called with mdsc->mutex held.
3638  */
3639 static void send_mds_reconnect(struct ceph_mds_client *mdsc,
3640                                struct ceph_mds_session *session)
3641 {
3642         struct ceph_msg *reply;
3643         int mds = session->s_mds;
3644         int err = -ENOMEM;
3645         struct ceph_reconnect_state recon_state = {
3646                 .session = session,
3647         };
3648         LIST_HEAD(dispose);
3649
3650         pr_info("mds%d reconnect start\n", mds);
3651
3652         recon_state.pagelist = ceph_pagelist_alloc(GFP_NOFS);
3653         if (!recon_state.pagelist)
3654                 goto fail_nopagelist;
3655
3656         reply = ceph_msg_new2(CEPH_MSG_CLIENT_RECONNECT, 0, 1, GFP_NOFS, false);
3657         if (!reply)
3658                 goto fail_nomsg;
3659
3660         mutex_lock(&session->s_mutex);
3661         session->s_state = CEPH_MDS_SESSION_RECONNECTING;
3662         session->s_seq = 0;
3663
3664         dout("session %p state %s\n", session,
3665              ceph_session_state_name(session->s_state));
3666
3667         spin_lock(&session->s_gen_ttl_lock);
3668         session->s_cap_gen++;
3669         spin_unlock(&session->s_gen_ttl_lock);
3670
3671         spin_lock(&session->s_cap_lock);
3672         /* don't know if session is readonly */
3673         session->s_readonly = 0;
3674         /*
3675          * notify __ceph_remove_cap() that we are composing cap reconnect.
3676          * If a cap get released before being added to the cap reconnect,
3677          * __ceph_remove_cap() should skip queuing cap release.
3678          */
3679         session->s_cap_reconnect = 1;
3680         /* drop old cap expires; we're about to reestablish that state */
3681         detach_cap_releases(session, &dispose);
3682         spin_unlock(&session->s_cap_lock);
3683         dispose_cap_releases(mdsc, &dispose);
3684
3685         /* trim unused caps to reduce MDS's cache rejoin time */
3686         if (mdsc->fsc->sb->s_root)
3687                 shrink_dcache_parent(mdsc->fsc->sb->s_root);
3688
3689         ceph_con_close(&session->s_con);
3690         ceph_con_open(&session->s_con,
3691                       CEPH_ENTITY_TYPE_MDS, mds,
3692                       ceph_mdsmap_get_addr(mdsc->mdsmap, mds));
3693
3694         /* replay unsafe requests */
3695         replay_unsafe_requests(mdsc, session);
3696
3697         ceph_early_kick_flushing_caps(mdsc, session);
3698
3699         down_read(&mdsc->snap_rwsem);
3700
3701         /* placeholder for nr_caps */
3702         err = ceph_pagelist_encode_32(recon_state.pagelist, 0);
3703         if (err)
3704                 goto fail;
3705
3706         if (test_bit(CEPHFS_FEATURE_MULTI_RECONNECT, &session->s_features)) {
3707                 recon_state.msg_version = 3;
3708                 recon_state.allow_multi = true;
3709         } else if (session->s_con.peer_features & CEPH_FEATURE_MDSENC) {
3710                 recon_state.msg_version = 3;
3711         } else {
3712                 recon_state.msg_version = 2;
3713         }
3714         /* trsaverse this session's caps */
3715         err = ceph_iterate_session_caps(session, encode_caps_cb, &recon_state);
3716
3717         spin_lock(&session->s_cap_lock);
3718         session->s_cap_reconnect = 0;
3719         spin_unlock(&session->s_cap_lock);
3720
3721         if (err < 0)
3722                 goto fail;
3723
3724         /* check if all realms can be encoded into current message */
3725         if (mdsc->num_snap_realms) {
3726                 size_t total_len =
3727                         recon_state.pagelist->length +
3728                         mdsc->num_snap_realms *
3729                         sizeof(struct ceph_mds_snaprealm_reconnect);
3730                 if (recon_state.msg_version >= 4) {
3731                         /* number of realms */
3732                         total_len += sizeof(u32);
3733                         /* version, compat_version and struct_len */
3734                         total_len += mdsc->num_snap_realms *
3735                                      (2 * sizeof(u8) + sizeof(u32));
3736                 }
3737                 if (total_len > RECONNECT_MAX_SIZE) {
3738                         if (!recon_state.allow_multi) {
3739                                 err = -ENOSPC;
3740                                 goto fail;
3741                         }
3742                         if (recon_state.nr_caps) {
3743                                 err = send_reconnect_partial(&recon_state);
3744                                 if (err)
3745                                         goto fail;
3746                         }
3747                         recon_state.msg_version = 5;
3748                 }
3749         }
3750
3751         err = encode_snap_realms(mdsc, &recon_state);
3752         if (err < 0)
3753                 goto fail;
3754
3755         if (recon_state.msg_version >= 5) {
3756                 err = ceph_pagelist_encode_8(recon_state.pagelist, 0);
3757                 if (err < 0)
3758                         goto fail;
3759         }
3760
3761         if (recon_state.nr_caps || recon_state.nr_realms) {
3762                 struct page *page =
3763                         list_first_entry(&recon_state.pagelist->head,
3764                                         struct page, lru);
3765                 __le32 *addr = kmap_atomic(page);
3766                 if (recon_state.nr_caps) {
3767                         WARN_ON(recon_state.nr_realms != mdsc->num_snap_realms);
3768                         *addr = cpu_to_le32(recon_state.nr_caps);
3769                 } else if (recon_state.msg_version >= 4) {
3770                         *(addr + 1) = cpu_to_le32(recon_state.nr_realms);
3771                 }
3772                 kunmap_atomic(addr);
3773         }
3774
3775         reply->hdr.version = cpu_to_le16(recon_state.msg_version);
3776         if (recon_state.msg_version >= 4)
3777                 reply->hdr.compat_version = cpu_to_le16(4);
3778
3779         reply->hdr.data_len = cpu_to_le32(recon_state.pagelist->length);
3780         ceph_msg_data_add_pagelist(reply, recon_state.pagelist);
3781
3782         ceph_con_send(&session->s_con, reply);
3783
3784         mutex_unlock(&session->s_mutex);
3785
3786         mutex_lock(&mdsc->mutex);
3787         __wake_requests(mdsc, &session->s_waiting);
3788         mutex_unlock(&mdsc->mutex);
3789
3790         up_read(&mdsc->snap_rwsem);
3791         ceph_pagelist_release(recon_state.pagelist);
3792         return;
3793
3794 fail:
3795         ceph_msg_put(reply);
3796         up_read(&mdsc->snap_rwsem);
3797         mutex_unlock(&session->s_mutex);
3798 fail_nomsg:
3799         ceph_pagelist_release(recon_state.pagelist);
3800 fail_nopagelist:
3801         pr_err("error %d preparing reconnect for mds%d\n", err, mds);
3802         return;
3803 }
3804
3805
3806 /*
3807  * compare old and new mdsmaps, kicking requests
3808  * and closing out old connections as necessary
3809  *
3810  * called under mdsc->mutex.
3811  */
3812 static void check_new_map(struct ceph_mds_client *mdsc,
3813                           struct ceph_mdsmap *newmap,
3814                           struct ceph_mdsmap *oldmap)
3815 {
3816         int i;
3817         int oldstate, newstate;
3818         struct ceph_mds_session *s;
3819
3820         dout("check_new_map new %u old %u\n",
3821              newmap->m_epoch, oldmap->m_epoch);
3822
3823         for (i = 0; i < oldmap->possible_max_rank && i < mdsc->max_sessions; i++) {
3824                 if (!mdsc->sessions[i])
3825                         continue;
3826                 s = mdsc->sessions[i];
3827                 oldstate = ceph_mdsmap_get_state(oldmap, i);
3828                 newstate = ceph_mdsmap_get_state(newmap, i);
3829
3830                 dout("check_new_map mds%d state %s%s -> %s%s (session %s)\n",
3831                      i, ceph_mds_state_name(oldstate),
3832                      ceph_mdsmap_is_laggy(oldmap, i) ? " (laggy)" : "",
3833                      ceph_mds_state_name(newstate),
3834                      ceph_mdsmap_is_laggy(newmap, i) ? " (laggy)" : "",
3835                      ceph_session_state_name(s->s_state));
3836
3837                 if (i >= newmap->possible_max_rank) {
3838                         /* force close session for stopped mds */
3839                         ceph_get_mds_session(s);
3840                         __unregister_session(mdsc, s);
3841                         __wake_requests(mdsc, &s->s_waiting);
3842                         mutex_unlock(&mdsc->mutex);
3843
3844                         mutex_lock(&s->s_mutex);
3845                         cleanup_session_requests(mdsc, s);
3846                         remove_session_caps(s);
3847                         mutex_unlock(&s->s_mutex);
3848
3849                         ceph_put_mds_session(s);
3850
3851                         mutex_lock(&mdsc->mutex);
3852                         kick_requests(mdsc, i);
3853                         continue;
3854                 }
3855
3856                 if (memcmp(ceph_mdsmap_get_addr(oldmap, i),
3857                            ceph_mdsmap_get_addr(newmap, i),
3858                            sizeof(struct ceph_entity_addr))) {
3859                         /* just close it */
3860                         mutex_unlock(&mdsc->mutex);
3861                         mutex_lock(&s->s_mutex);
3862                         mutex_lock(&mdsc->mutex);
3863                         ceph_con_close(&s->s_con);
3864                         mutex_unlock(&s->s_mutex);
3865                         s->s_state = CEPH_MDS_SESSION_RESTARTING;
3866                 } else if (oldstate == newstate) {
3867                         continue;  /* nothing new with this mds */
3868                 }
3869
3870                 /*
3871                  * send reconnect?
3872                  */
3873                 if (s->s_state == CEPH_MDS_SESSION_RESTARTING &&
3874                     newstate >= CEPH_MDS_STATE_RECONNECT) {
3875                         mutex_unlock(&mdsc->mutex);
3876                         send_mds_reconnect(mdsc, s);
3877                         mutex_lock(&mdsc->mutex);
3878                 }
3879
3880                 /*
3881                  * kick request on any mds that has gone active.
3882                  */
3883                 if (oldstate < CEPH_MDS_STATE_ACTIVE &&
3884                     newstate >= CEPH_MDS_STATE_ACTIVE) {
3885                         if (oldstate != CEPH_MDS_STATE_CREATING &&
3886                             oldstate != CEPH_MDS_STATE_STARTING)
3887                                 pr_info("mds%d recovery completed\n", s->s_mds);
3888                         kick_requests(mdsc, i);
3889                         ceph_kick_flushing_caps(mdsc, s);
3890                         wake_up_session_caps(s, RECONNECT);
3891                 }
3892         }
3893
3894         for (i = 0; i < newmap->possible_max_rank && i < mdsc->max_sessions; i++) {
3895                 s = mdsc->sessions[i];
3896                 if (!s)
3897                         continue;
3898                 if (!ceph_mdsmap_is_laggy(newmap, i))
3899                         continue;
3900                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
3901                     s->s_state == CEPH_MDS_SESSION_HUNG ||
3902                     s->s_state == CEPH_MDS_SESSION_CLOSING) {
3903                         dout(" connecting to export targets of laggy mds%d\n",
3904                              i);
3905                         __open_export_target_sessions(mdsc, s);
3906                 }
3907         }
3908 }
3909
3910
3911
3912 /*
3913  * leases
3914  */
3915
3916 /*
3917  * caller must hold session s_mutex, dentry->d_lock
3918  */
3919 void __ceph_mdsc_drop_dentry_lease(struct dentry *dentry)
3920 {
3921         struct ceph_dentry_info *di = ceph_dentry(dentry);
3922
3923         ceph_put_mds_session(di->lease_session);
3924         di->lease_session = NULL;
3925 }
3926
3927 static void handle_lease(struct ceph_mds_client *mdsc,
3928                          struct ceph_mds_session *session,
3929                          struct ceph_msg *msg)
3930 {
3931         struct super_block *sb = mdsc->fsc->sb;
3932         struct inode *inode;
3933         struct dentry *parent, *dentry;
3934         struct ceph_dentry_info *di;
3935         int mds = session->s_mds;
3936         struct ceph_mds_lease *h = msg->front.iov_base;
3937         u32 seq;
3938         struct ceph_vino vino;
3939         struct qstr dname;
3940         int release = 0;
3941
3942         dout("handle_lease from mds%d\n", mds);
3943
3944         /* decode */
3945         if (msg->front.iov_len < sizeof(*h) + sizeof(u32))
3946                 goto bad;
3947         vino.ino = le64_to_cpu(h->ino);
3948         vino.snap = CEPH_NOSNAP;
3949         seq = le32_to_cpu(h->seq);
3950         dname.len = get_unaligned_le32(h + 1);
3951         if (msg->front.iov_len < sizeof(*h) + sizeof(u32) + dname.len)
3952                 goto bad;
3953         dname.name = (void *)(h + 1) + sizeof(u32);
3954
3955         /* lookup inode */
3956         inode = ceph_find_inode(sb, vino);
3957         dout("handle_lease %s, ino %llx %p %.*s\n",
3958              ceph_lease_op_name(h->action), vino.ino, inode,
3959              dname.len, dname.name);
3960
3961         mutex_lock(&session->s_mutex);
3962         session->s_seq++;
3963
3964         if (!inode) {
3965                 dout("handle_lease no inode %llx\n", vino.ino);
3966                 goto release;
3967         }
3968
3969         /* dentry */
3970         parent = d_find_alias(inode);
3971         if (!parent) {
3972                 dout("no parent dentry on inode %p\n", inode);
3973                 WARN_ON(1);
3974                 goto release;  /* hrm... */
3975         }
3976         dname.hash = full_name_hash(parent, dname.name, dname.len);
3977         dentry = d_lookup(parent, &dname);
3978         dput(parent);
3979         if (!dentry)
3980                 goto release;
3981
3982         spin_lock(&dentry->d_lock);
3983         di = ceph_dentry(dentry);
3984         switch (h->action) {
3985         case CEPH_MDS_LEASE_REVOKE:
3986                 if (di->lease_session == session) {
3987                         if (ceph_seq_cmp(di->lease_seq, seq) > 0)
3988                                 h->seq = cpu_to_le32(di->lease_seq);
3989                         __ceph_mdsc_drop_dentry_lease(dentry);
3990                 }
3991                 release = 1;
3992                 break;
3993
3994         case CEPH_MDS_LEASE_RENEW:
3995                 if (di->lease_session == session &&
3996                     di->lease_gen == session->s_cap_gen &&
3997                     di->lease_renew_from &&
3998                     di->lease_renew_after == 0) {
3999                         unsigned long duration =
4000                                 msecs_to_jiffies(le32_to_cpu(h->duration_ms));
4001
4002                         di->lease_seq = seq;
4003                         di->time = di->lease_renew_from + duration;
4004                         di->lease_renew_after = di->lease_renew_from +
4005                                 (duration >> 1);
4006                         di->lease_renew_from = 0;
4007                 }
4008                 break;
4009         }
4010         spin_unlock(&dentry->d_lock);
4011         dput(dentry);
4012
4013         if (!release)
4014                 goto out;
4015
4016 release:
4017         /* let's just reuse the same message */
4018         h->action = CEPH_MDS_LEASE_REVOKE_ACK;
4019         ceph_msg_get(msg);
4020         ceph_con_send(&session->s_con, msg);
4021
4022 out:
4023         mutex_unlock(&session->s_mutex);
4024         /* avoid calling iput_final() in mds dispatch threads */
4025         ceph_async_iput(inode);
4026         return;
4027
4028 bad:
4029         pr_err("corrupt lease message\n");
4030         ceph_msg_dump(msg);
4031 }
4032
4033 void ceph_mdsc_lease_send_msg(struct ceph_mds_session *session,
4034                               struct dentry *dentry, char action,
4035                               u32 seq)
4036 {
4037         struct ceph_msg *msg;
4038         struct ceph_mds_lease *lease;
4039         struct inode *dir;
4040         int len = sizeof(*lease) + sizeof(u32) + NAME_MAX;
4041
4042         dout("lease_send_msg identry %p %s to mds%d\n",
4043              dentry, ceph_lease_op_name(action), session->s_mds);
4044
4045         msg = ceph_msg_new(CEPH_MSG_CLIENT_LEASE, len, GFP_NOFS, false);
4046         if (!msg)
4047                 return;
4048         lease = msg->front.iov_base;
4049         lease->action = action;
4050         lease->seq = cpu_to_le32(seq);
4051
4052         spin_lock(&dentry->d_lock);
4053         dir = d_inode(dentry->d_parent);
4054         lease->ino = cpu_to_le64(ceph_ino(dir));
4055         lease->first = lease->last = cpu_to_le64(ceph_snap(dir));
4056
4057         put_unaligned_le32(dentry->d_name.len, lease + 1);
4058         memcpy((void *)(lease + 1) + 4,
4059                dentry->d_name.name, dentry->d_name.len);
4060         spin_unlock(&dentry->d_lock);
4061         /*
4062          * if this is a preemptive lease RELEASE, no need to
4063          * flush request stream, since the actual request will
4064          * soon follow.
4065          */
4066         msg->more_to_follow = (action == CEPH_MDS_LEASE_RELEASE);
4067
4068         ceph_con_send(&session->s_con, msg);
4069 }
4070
4071 /*
4072  * lock unlock sessions, to wait ongoing session activities
4073  */
4074 static void lock_unlock_sessions(struct ceph_mds_client *mdsc)
4075 {
4076         int i;
4077
4078         mutex_lock(&mdsc->mutex);
4079         for (i = 0; i < mdsc->max_sessions; i++) {
4080                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4081                 if (!s)
4082                         continue;
4083                 mutex_unlock(&mdsc->mutex);
4084                 mutex_lock(&s->s_mutex);
4085                 mutex_unlock(&s->s_mutex);
4086                 ceph_put_mds_session(s);
4087                 mutex_lock(&mdsc->mutex);
4088         }
4089         mutex_unlock(&mdsc->mutex);
4090 }
4091
4092 static void maybe_recover_session(struct ceph_mds_client *mdsc)
4093 {
4094         struct ceph_fs_client *fsc = mdsc->fsc;
4095
4096         if (!ceph_test_mount_opt(fsc, CLEANRECOVER))
4097                 return;
4098
4099         if (READ_ONCE(fsc->mount_state) != CEPH_MOUNT_MOUNTED)
4100                 return;
4101
4102         if (!READ_ONCE(fsc->blacklisted))
4103                 return;
4104
4105         if (fsc->last_auto_reconnect &&
4106             time_before(jiffies, fsc->last_auto_reconnect + HZ * 60 * 30))
4107                 return;
4108
4109         pr_info("auto reconnect after blacklisted\n");
4110         fsc->last_auto_reconnect = jiffies;
4111         ceph_force_reconnect(fsc->sb);
4112 }
4113
4114 /*
4115  * delayed work -- periodically trim expired leases, renew caps with mds
4116  */
4117 static void schedule_delayed(struct ceph_mds_client *mdsc)
4118 {
4119         int delay = 5;
4120         unsigned hz = round_jiffies_relative(HZ * delay);
4121         schedule_delayed_work(&mdsc->delayed_work, hz);
4122 }
4123
4124 static void delayed_work(struct work_struct *work)
4125 {
4126         int i;
4127         struct ceph_mds_client *mdsc =
4128                 container_of(work, struct ceph_mds_client, delayed_work.work);
4129         int renew_interval;
4130         int renew_caps;
4131
4132         dout("mdsc delayed_work\n");
4133
4134         mutex_lock(&mdsc->mutex);
4135         renew_interval = mdsc->mdsmap->m_session_timeout >> 2;
4136         renew_caps = time_after_eq(jiffies, HZ*renew_interval +
4137                                    mdsc->last_renew_caps);
4138         if (renew_caps)
4139                 mdsc->last_renew_caps = jiffies;
4140
4141         for (i = 0; i < mdsc->max_sessions; i++) {
4142                 struct ceph_mds_session *s = __ceph_lookup_mds_session(mdsc, i);
4143                 if (!s)
4144                         continue;
4145                 if (s->s_state == CEPH_MDS_SESSION_CLOSING) {
4146                         dout("resending session close request for mds%d\n",
4147                              s->s_mds);
4148                         request_close_session(mdsc, s);
4149                         ceph_put_mds_session(s);
4150                         continue;
4151                 }
4152                 if (s->s_ttl && time_after(jiffies, s->s_ttl)) {
4153                         if (s->s_state == CEPH_MDS_SESSION_OPEN) {
4154                                 s->s_state = CEPH_MDS_SESSION_HUNG;
4155                                 pr_info("mds%d hung\n", s->s_mds);
4156                         }
4157                 }
4158                 if (s->s_state == CEPH_MDS_SESSION_NEW ||
4159                     s->s_state == CEPH_MDS_SESSION_RESTARTING ||
4160                     s->s_state == CEPH_MDS_SESSION_REJECTED) {
4161                         /* this mds is failed or recovering, just wait */
4162                         ceph_put_mds_session(s);
4163                         continue;
4164                 }
4165                 mutex_unlock(&mdsc->mutex);
4166
4167                 mutex_lock(&s->s_mutex);
4168                 if (renew_caps)
4169                         send_renew_caps(mdsc, s);
4170                 else
4171                         ceph_con_keepalive(&s->s_con);
4172                 if (s->s_state == CEPH_MDS_SESSION_OPEN ||
4173                     s->s_state == CEPH_MDS_SESSION_HUNG)
4174                         ceph_send_cap_releases(mdsc, s);
4175                 mutex_unlock(&s->s_mutex);
4176                 ceph_put_mds_session(s);
4177
4178                 mutex_lock(&mdsc->mutex);
4179         }
4180         mutex_unlock(&mdsc->mutex);
4181
4182         ceph_check_delayed_caps(mdsc);
4183
4184         ceph_queue_cap_reclaim_work(mdsc);
4185
4186         ceph_trim_snapid_map(mdsc);
4187
4188         maybe_recover_session(mdsc);
4189
4190         schedule_delayed(mdsc);
4191 }
4192
4193 int ceph_mdsc_init(struct ceph_fs_client *fsc)
4194
4195 {
4196         struct ceph_mds_client *mdsc;
4197
4198         mdsc = kzalloc(sizeof(struct ceph_mds_client), GFP_NOFS);
4199         if (!mdsc)
4200                 return -ENOMEM;
4201         mdsc->fsc = fsc;
4202         mutex_init(&mdsc->mutex);
4203         mdsc->mdsmap = kzalloc(sizeof(*mdsc->mdsmap), GFP_NOFS);
4204         if (!mdsc->mdsmap) {
4205                 kfree(mdsc);
4206                 return -ENOMEM;
4207         }
4208
4209         fsc->mdsc = mdsc;
4210         init_completion(&mdsc->safe_umount_waiters);
4211         init_waitqueue_head(&mdsc->session_close_wq);
4212         INIT_LIST_HEAD(&mdsc->waiting_for_map);
4213         mdsc->sessions = NULL;
4214         atomic_set(&mdsc->num_sessions, 0);
4215         mdsc->max_sessions = 0;
4216         mdsc->stopping = 0;
4217         atomic64_set(&mdsc->quotarealms_count, 0);
4218         mdsc->quotarealms_inodes = RB_ROOT;
4219         mutex_init(&mdsc->quotarealms_inodes_mutex);
4220         mdsc->last_snap_seq = 0;
4221         init_rwsem(&mdsc->snap_rwsem);
4222         mdsc->snap_realms = RB_ROOT;
4223         INIT_LIST_HEAD(&mdsc->snap_empty);
4224         mdsc->num_snap_realms = 0;
4225         spin_lock_init(&mdsc->snap_empty_lock);
4226         mdsc->last_tid = 0;
4227         mdsc->oldest_tid = 0;
4228         mdsc->request_tree = RB_ROOT;
4229         INIT_DELAYED_WORK(&mdsc->delayed_work, delayed_work);
4230         mdsc->last_renew_caps = jiffies;
4231         INIT_LIST_HEAD(&mdsc->cap_delay_list);
4232         INIT_LIST_HEAD(&mdsc->cap_wait_list);
4233         spin_lock_init(&mdsc->cap_delay_lock);
4234         INIT_LIST_HEAD(&mdsc->snap_flush_list);
4235         spin_lock_init(&mdsc->snap_flush_lock);
4236         mdsc->last_cap_flush_tid = 1;
4237         INIT_LIST_HEAD(&mdsc->cap_flush_list);
4238         INIT_LIST_HEAD(&mdsc->cap_dirty);
4239         INIT_LIST_HEAD(&mdsc->cap_dirty_migrating);
4240         mdsc->num_cap_flushing = 0;
4241         spin_lock_init(&mdsc->cap_dirty_lock);
4242         init_waitqueue_head(&mdsc->cap_flushing_wq);
4243         INIT_WORK(&mdsc->cap_reclaim_work, ceph_cap_reclaim_work);
4244         atomic_set(&mdsc->cap_reclaim_pending, 0);
4245
4246         spin_lock_init(&mdsc->dentry_list_lock);
4247         INIT_LIST_HEAD(&mdsc->dentry_leases);
4248         INIT_LIST_HEAD(&mdsc->dentry_dir_leases);
4249
4250         ceph_caps_init(mdsc);
4251         ceph_adjust_caps_max_min(mdsc, fsc->mount_options);
4252
4253         spin_lock_init(&mdsc->snapid_map_lock);
4254         mdsc->snapid_map_tree = RB_ROOT;
4255         INIT_LIST_HEAD(&mdsc->snapid_map_lru);
4256
4257         init_rwsem(&mdsc->pool_perm_rwsem);
4258         mdsc->pool_perm_tree = RB_ROOT;
4259
4260         strscpy(mdsc->nodename, utsname()->nodename,
4261                 sizeof(mdsc->nodename));
4262         return 0;
4263 }
4264
4265 /*
4266  * Wait for safe replies on open mds requests.  If we time out, drop
4267  * all requests from the tree to avoid dangling dentry refs.
4268  */
4269 static void wait_requests(struct ceph_mds_client *mdsc)
4270 {
4271         struct ceph_options *opts = mdsc->fsc->client->options;
4272         struct ceph_mds_request *req;
4273
4274         mutex_lock(&mdsc->mutex);
4275         if (__get_oldest_req(mdsc)) {
4276                 mutex_unlock(&mdsc->mutex);
4277
4278                 dout("wait_requests waiting for requests\n");
4279                 wait_for_completion_timeout(&mdsc->safe_umount_waiters,
4280                                     ceph_timeout_jiffies(opts->mount_timeout));
4281
4282                 /* tear down remaining requests */
4283                 mutex_lock(&mdsc->mutex);
4284                 while ((req = __get_oldest_req(mdsc))) {
4285                         dout("wait_requests timed out on tid %llu\n",
4286                              req->r_tid);
4287                         list_del_init(&req->r_wait);
4288                         __unregister_request(mdsc, req);
4289                 }
4290         }
4291         mutex_unlock(&mdsc->mutex);
4292         dout("wait_requests done\n");
4293 }
4294
4295 /*
4296  * called before mount is ro, and before dentries are torn down.
4297  * (hmm, does this still race with new lookups?)
4298  */
4299 void ceph_mdsc_pre_umount(struct ceph_mds_client *mdsc)
4300 {
4301         dout("pre_umount\n");
4302         mdsc->stopping = 1;
4303
4304         lock_unlock_sessions(mdsc);
4305         ceph_flush_dirty_caps(mdsc);
4306         wait_requests(mdsc);
4307
4308         /*
4309          * wait for reply handlers to drop their request refs and
4310          * their inode/dcache refs
4311          */
4312         ceph_msgr_flush();
4313
4314         ceph_cleanup_quotarealms_inodes(mdsc);
4315 }
4316
4317 /*
4318  * wait for all write mds requests to flush.
4319  */
4320 static void wait_unsafe_requests(struct ceph_mds_client *mdsc, u64 want_tid)
4321 {
4322         struct ceph_mds_request *req = NULL, *nextreq;
4323         struct rb_node *n;
4324
4325         mutex_lock(&mdsc->mutex);
4326         dout("wait_unsafe_requests want %lld\n", want_tid);
4327 restart:
4328         req = __get_oldest_req(mdsc);
4329         while (req && req->r_tid <= want_tid) {
4330                 /* find next request */
4331                 n = rb_next(&req->r_node);
4332                 if (n)
4333                         nextreq = rb_entry(n, struct ceph_mds_request, r_node);
4334                 else
4335                         nextreq = NULL;
4336                 if (req->r_op != CEPH_MDS_OP_SETFILELOCK &&
4337                     (req->r_op & CEPH_MDS_OP_WRITE)) {
4338                         /* write op */
4339                         ceph_mdsc_get_request(req);
4340                         if (nextreq)
4341                                 ceph_mdsc_get_request(nextreq);
4342                         mutex_unlock(&mdsc->mutex);
4343                         dout("wait_unsafe_requests  wait on %llu (want %llu)\n",
4344                              req->r_tid, want_tid);
4345                         wait_for_completion(&req->r_safe_completion);
4346                         mutex_lock(&mdsc->mutex);
4347                         ceph_mdsc_put_request(req);
4348                         if (!nextreq)
4349                                 break;  /* next dne before, so we're done! */
4350                         if (RB_EMPTY_NODE(&nextreq->r_node)) {
4351                                 /* next request was removed from tree */
4352                                 ceph_mdsc_put_request(nextreq);
4353                                 goto restart;
4354                         }
4355                         ceph_mdsc_put_request(nextreq);  /* won't go away */
4356                 }
4357                 req = nextreq;
4358         }
4359         mutex_unlock(&mdsc->mutex);
4360         dout("wait_unsafe_requests done\n");
4361 }
4362
4363 void ceph_mdsc_sync(struct ceph_mds_client *mdsc)
4364 {
4365         u64 want_tid, want_flush;
4366
4367         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4368                 return;
4369
4370         dout("sync\n");
4371         mutex_lock(&mdsc->mutex);
4372         want_tid = mdsc->last_tid;
4373         mutex_unlock(&mdsc->mutex);
4374
4375         ceph_flush_dirty_caps(mdsc);
4376         spin_lock(&mdsc->cap_dirty_lock);
4377         want_flush = mdsc->last_cap_flush_tid;
4378         if (!list_empty(&mdsc->cap_flush_list)) {
4379                 struct ceph_cap_flush *cf =
4380                         list_last_entry(&mdsc->cap_flush_list,
4381                                         struct ceph_cap_flush, g_list);
4382                 cf->wake = true;
4383         }
4384         spin_unlock(&mdsc->cap_dirty_lock);
4385
4386         dout("sync want tid %lld flush_seq %lld\n",
4387              want_tid, want_flush);
4388
4389         wait_unsafe_requests(mdsc, want_tid);
4390         wait_caps_flush(mdsc, want_flush);
4391 }
4392
4393 /*
4394  * true if all sessions are closed, or we force unmount
4395  */
4396 static bool done_closing_sessions(struct ceph_mds_client *mdsc, int skipped)
4397 {
4398         if (READ_ONCE(mdsc->fsc->mount_state) == CEPH_MOUNT_SHUTDOWN)
4399                 return true;
4400         return atomic_read(&mdsc->num_sessions) <= skipped;
4401 }
4402
4403 /*
4404  * called after sb is ro.
4405  */
4406 void ceph_mdsc_close_sessions(struct ceph_mds_client *mdsc)
4407 {
4408         struct ceph_options *opts = mdsc->fsc->client->options;
4409         struct ceph_mds_session *session;
4410         int i;
4411         int skipped = 0;
4412
4413         dout("close_sessions\n");
4414
4415         /* close sessions */
4416         mutex_lock(&mdsc->mutex);
4417         for (i = 0; i < mdsc->max_sessions; i++) {
4418                 session = __ceph_lookup_mds_session(mdsc, i);
4419                 if (!session)
4420                         continue;
4421                 mutex_unlock(&mdsc->mutex);
4422                 mutex_lock(&session->s_mutex);
4423                 if (__close_session(mdsc, session) <= 0)
4424                         skipped++;
4425                 mutex_unlock(&session->s_mutex);
4426                 ceph_put_mds_session(session);
4427                 mutex_lock(&mdsc->mutex);
4428         }
4429         mutex_unlock(&mdsc->mutex);
4430
4431         dout("waiting for sessions to close\n");
4432         wait_event_timeout(mdsc->session_close_wq,
4433                            done_closing_sessions(mdsc, skipped),
4434                            ceph_timeout_jiffies(opts->mount_timeout));
4435
4436         /* tear down remaining sessions */
4437         mutex_lock(&mdsc->mutex);
4438         for (i = 0; i < mdsc->max_sessions; i++) {
4439                 if (mdsc->sessions[i]) {
4440                         session = ceph_get_mds_session(mdsc->sessions[i]);
4441                         __unregister_session(mdsc, session);
4442                         mutex_unlock(&mdsc->mutex);
4443                         mutex_lock(&session->s_mutex);
4444                         remove_session_caps(session);
4445                         mutex_unlock(&session->s_mutex);
4446                         ceph_put_mds_session(session);
4447                         mutex_lock(&mdsc->mutex);
4448                 }
4449         }
4450         WARN_ON(!list_empty(&mdsc->cap_delay_list));
4451         mutex_unlock(&mdsc->mutex);
4452
4453         ceph_cleanup_snapid_map(mdsc);
4454         ceph_cleanup_empty_realms(mdsc);
4455
4456         cancel_work_sync(&mdsc->cap_reclaim_work);
4457         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4458
4459         dout("stopped\n");
4460 }
4461
4462 void ceph_mdsc_force_umount(struct ceph_mds_client *mdsc)
4463 {
4464         struct ceph_mds_session *session;
4465         int mds;
4466
4467         dout("force umount\n");
4468
4469         mutex_lock(&mdsc->mutex);
4470         for (mds = 0; mds < mdsc->max_sessions; mds++) {
4471                 session = __ceph_lookup_mds_session(mdsc, mds);
4472                 if (!session)
4473                         continue;
4474
4475                 if (session->s_state == CEPH_MDS_SESSION_REJECTED)
4476                         __unregister_session(mdsc, session);
4477                 __wake_requests(mdsc, &session->s_waiting);
4478                 mutex_unlock(&mdsc->mutex);
4479
4480                 mutex_lock(&session->s_mutex);
4481                 __close_session(mdsc, session);
4482                 if (session->s_state == CEPH_MDS_SESSION_CLOSING) {
4483                         cleanup_session_requests(mdsc, session);
4484                         remove_session_caps(session);
4485                 }
4486                 mutex_unlock(&session->s_mutex);
4487                 ceph_put_mds_session(session);
4488
4489                 mutex_lock(&mdsc->mutex);
4490                 kick_requests(mdsc, mds);
4491         }
4492         __wake_requests(mdsc, &mdsc->waiting_for_map);
4493         mutex_unlock(&mdsc->mutex);
4494 }
4495
4496 static void ceph_mdsc_stop(struct ceph_mds_client *mdsc)
4497 {
4498         dout("stop\n");
4499         cancel_delayed_work_sync(&mdsc->delayed_work); /* cancel timer */
4500         if (mdsc->mdsmap)
4501                 ceph_mdsmap_destroy(mdsc->mdsmap);
4502         kfree(mdsc->sessions);
4503         ceph_caps_finalize(mdsc);
4504         ceph_pool_perm_destroy(mdsc);
4505 }
4506
4507 void ceph_mdsc_destroy(struct ceph_fs_client *fsc)
4508 {
4509         struct ceph_mds_client *mdsc = fsc->mdsc;
4510         dout("mdsc_destroy %p\n", mdsc);
4511
4512         if (!mdsc)
4513                 return;
4514
4515         /* flush out any connection work with references to us */
4516         ceph_msgr_flush();
4517
4518         ceph_mdsc_stop(mdsc);
4519
4520         fsc->mdsc = NULL;
4521         kfree(mdsc);
4522         dout("mdsc_destroy %p done\n", mdsc);
4523 }
4524
4525 void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4526 {
4527         struct ceph_fs_client *fsc = mdsc->fsc;
4528         const char *mds_namespace = fsc->mount_options->mds_namespace;
4529         void *p = msg->front.iov_base;
4530         void *end = p + msg->front.iov_len;
4531         u32 epoch;
4532         u32 map_len;
4533         u32 num_fs;
4534         u32 mount_fscid = (u32)-1;
4535         u8 struct_v, struct_cv;
4536         int err = -EINVAL;
4537
4538         ceph_decode_need(&p, end, sizeof(u32), bad);
4539         epoch = ceph_decode_32(&p);
4540
4541         dout("handle_fsmap epoch %u\n", epoch);
4542
4543         ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4544         struct_v = ceph_decode_8(&p);
4545         struct_cv = ceph_decode_8(&p);
4546         map_len = ceph_decode_32(&p);
4547
4548         ceph_decode_need(&p, end, sizeof(u32) * 3, bad);
4549         p += sizeof(u32) * 2; /* skip epoch and legacy_client_fscid */
4550
4551         num_fs = ceph_decode_32(&p);
4552         while (num_fs-- > 0) {
4553                 void *info_p, *info_end;
4554                 u32 info_len;
4555                 u8 info_v, info_cv;
4556                 u32 fscid, namelen;
4557
4558                 ceph_decode_need(&p, end, 2 + sizeof(u32), bad);
4559                 info_v = ceph_decode_8(&p);
4560                 info_cv = ceph_decode_8(&p);
4561                 info_len = ceph_decode_32(&p);
4562                 ceph_decode_need(&p, end, info_len, bad);
4563                 info_p = p;
4564                 info_end = p + info_len;
4565                 p = info_end;
4566
4567                 ceph_decode_need(&info_p, info_end, sizeof(u32) * 2, bad);
4568                 fscid = ceph_decode_32(&info_p);
4569                 namelen = ceph_decode_32(&info_p);
4570                 ceph_decode_need(&info_p, info_end, namelen, bad);
4571
4572                 if (mds_namespace &&
4573                     strlen(mds_namespace) == namelen &&
4574                     !strncmp(mds_namespace, (char *)info_p, namelen)) {
4575                         mount_fscid = fscid;
4576                         break;
4577                 }
4578         }
4579
4580         ceph_monc_got_map(&fsc->client->monc, CEPH_SUB_FSMAP, epoch);
4581         if (mount_fscid != (u32)-1) {
4582                 fsc->client->monc.fs_cluster_id = mount_fscid;
4583                 ceph_monc_want_map(&fsc->client->monc, CEPH_SUB_MDSMAP,
4584                                    0, true);
4585                 ceph_monc_renew_subs(&fsc->client->monc);
4586         } else {
4587                 err = -ENOENT;
4588                 goto err_out;
4589         }
4590         return;
4591
4592 bad:
4593         pr_err("error decoding fsmap\n");
4594 err_out:
4595         mutex_lock(&mdsc->mutex);
4596         mdsc->mdsmap_err = err;
4597         __wake_requests(mdsc, &mdsc->waiting_for_map);
4598         mutex_unlock(&mdsc->mutex);
4599 }
4600
4601 /*
4602  * handle mds map update.
4603  */
4604 void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
4605 {
4606         u32 epoch;
4607         u32 maplen;
4608         void *p = msg->front.iov_base;
4609         void *end = p + msg->front.iov_len;
4610         struct ceph_mdsmap *newmap, *oldmap;
4611         struct ceph_fsid fsid;
4612         int err = -EINVAL;
4613
4614         ceph_decode_need(&p, end, sizeof(fsid)+2*sizeof(u32), bad);
4615         ceph_decode_copy(&p, &fsid, sizeof(fsid));
4616         if (ceph_check_fsid(mdsc->fsc->client, &fsid) < 0)
4617                 return;
4618         epoch = ceph_decode_32(&p);
4619         maplen = ceph_decode_32(&p);
4620         dout("handle_map epoch %u len %d\n", epoch, (int)maplen);
4621
4622         /* do we need it? */
4623         mutex_lock(&mdsc->mutex);
4624         if (mdsc->mdsmap && epoch <= mdsc->mdsmap->m_epoch) {
4625                 dout("handle_map epoch %u <= our %u\n",
4626                      epoch, mdsc->mdsmap->m_epoch);
4627                 mutex_unlock(&mdsc->mutex);
4628                 return;
4629         }
4630
4631         newmap = ceph_mdsmap_decode(&p, end);
4632         if (IS_ERR(newmap)) {
4633                 err = PTR_ERR(newmap);
4634                 goto bad_unlock;
4635         }
4636
4637         /* swap into place */
4638         if (mdsc->mdsmap) {
4639                 oldmap = mdsc->mdsmap;
4640                 mdsc->mdsmap = newmap;
4641                 check_new_map(mdsc, newmap, oldmap);
4642                 ceph_mdsmap_destroy(oldmap);
4643         } else {
4644                 mdsc->mdsmap = newmap;  /* first mds map */
4645         }
4646         mdsc->fsc->max_file_size = min((loff_t)mdsc->mdsmap->m_max_file_size,
4647                                         MAX_LFS_FILESIZE);
4648
4649         __wake_requests(mdsc, &mdsc->waiting_for_map);
4650         ceph_monc_got_map(&mdsc->fsc->client->monc, CEPH_SUB_MDSMAP,
4651                           mdsc->mdsmap->m_epoch);
4652
4653         mutex_unlock(&mdsc->mutex);
4654         schedule_delayed(mdsc);
4655         return;
4656
4657 bad_unlock:
4658         mutex_unlock(&mdsc->mutex);
4659 bad:
4660         pr_err("error decoding mdsmap %d\n", err);
4661         return;
4662 }
4663
4664 static struct ceph_connection *con_get(struct ceph_connection *con)
4665 {
4666         struct ceph_mds_session *s = con->private;
4667
4668         if (ceph_get_mds_session(s))
4669                 return con;
4670         return NULL;
4671 }
4672
4673 static void con_put(struct ceph_connection *con)
4674 {
4675         struct ceph_mds_session *s = con->private;
4676
4677         ceph_put_mds_session(s);
4678 }
4679
4680 /*
4681  * if the client is unresponsive for long enough, the mds will kill
4682  * the session entirely.
4683  */
4684 static void peer_reset(struct ceph_connection *con)
4685 {
4686         struct ceph_mds_session *s = con->private;
4687         struct ceph_mds_client *mdsc = s->s_mdsc;
4688
4689         pr_warn("mds%d closed our session\n", s->s_mds);
4690         send_mds_reconnect(mdsc, s);
4691 }
4692
4693 static void dispatch(struct ceph_connection *con, struct ceph_msg *msg)
4694 {
4695         struct ceph_mds_session *s = con->private;
4696         struct ceph_mds_client *mdsc = s->s_mdsc;
4697         int type = le16_to_cpu(msg->hdr.type);
4698
4699         mutex_lock(&mdsc->mutex);
4700         if (__verify_registered_session(mdsc, s) < 0) {
4701                 mutex_unlock(&mdsc->mutex);
4702                 goto out;
4703         }
4704         mutex_unlock(&mdsc->mutex);
4705
4706         switch (type) {
4707         case CEPH_MSG_MDS_MAP:
4708                 ceph_mdsc_handle_mdsmap(mdsc, msg);
4709                 break;
4710         case CEPH_MSG_FS_MAP_USER:
4711                 ceph_mdsc_handle_fsmap(mdsc, msg);
4712                 break;
4713         case CEPH_MSG_CLIENT_SESSION:
4714                 handle_session(s, msg);
4715                 break;
4716         case CEPH_MSG_CLIENT_REPLY:
4717                 handle_reply(s, msg);
4718                 break;
4719         case CEPH_MSG_CLIENT_REQUEST_FORWARD:
4720                 handle_forward(mdsc, s, msg);
4721                 break;
4722         case CEPH_MSG_CLIENT_CAPS:
4723                 ceph_handle_caps(s, msg);
4724                 break;
4725         case CEPH_MSG_CLIENT_SNAP:
4726                 ceph_handle_snap(mdsc, s, msg);
4727                 break;
4728         case CEPH_MSG_CLIENT_LEASE:
4729                 handle_lease(mdsc, s, msg);
4730                 break;
4731         case CEPH_MSG_CLIENT_QUOTA:
4732                 ceph_handle_quota(mdsc, s, msg);
4733                 break;
4734
4735         default:
4736                 pr_err("received unknown message type %d %s\n", type,
4737                        ceph_msg_type_name(type));
4738         }
4739 out:
4740         ceph_msg_put(msg);
4741 }
4742
4743 /*
4744  * authentication
4745  */
4746
4747 /*
4748  * Note: returned pointer is the address of a structure that's
4749  * managed separately.  Caller must *not* attempt to free it.
4750  */
4751 static struct ceph_auth_handshake *get_authorizer(struct ceph_connection *con,
4752                                         int *proto, int force_new)
4753 {
4754         struct ceph_mds_session *s = con->private;
4755         struct ceph_mds_client *mdsc = s->s_mdsc;
4756         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4757         struct ceph_auth_handshake *auth = &s->s_auth;
4758
4759         if (force_new && auth->authorizer) {
4760                 ceph_auth_destroy_authorizer(auth->authorizer);
4761                 auth->authorizer = NULL;
4762         }
4763         if (!auth->authorizer) {
4764                 int ret = ceph_auth_create_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4765                                                       auth);
4766                 if (ret)
4767                         return ERR_PTR(ret);
4768         } else {
4769                 int ret = ceph_auth_update_authorizer(ac, CEPH_ENTITY_TYPE_MDS,
4770                                                       auth);
4771                 if (ret)
4772                         return ERR_PTR(ret);
4773         }
4774         *proto = ac->protocol;
4775
4776         return auth;
4777 }
4778
4779 static int add_authorizer_challenge(struct ceph_connection *con,
4780                                     void *challenge_buf, int challenge_buf_len)
4781 {
4782         struct ceph_mds_session *s = con->private;
4783         struct ceph_mds_client *mdsc = s->s_mdsc;
4784         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4785
4786         return ceph_auth_add_authorizer_challenge(ac, s->s_auth.authorizer,
4787                                             challenge_buf, challenge_buf_len);
4788 }
4789
4790 static int verify_authorizer_reply(struct ceph_connection *con)
4791 {
4792         struct ceph_mds_session *s = con->private;
4793         struct ceph_mds_client *mdsc = s->s_mdsc;
4794         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4795
4796         return ceph_auth_verify_authorizer_reply(ac, s->s_auth.authorizer);
4797 }
4798
4799 static int invalidate_authorizer(struct ceph_connection *con)
4800 {
4801         struct ceph_mds_session *s = con->private;
4802         struct ceph_mds_client *mdsc = s->s_mdsc;
4803         struct ceph_auth_client *ac = mdsc->fsc->client->monc.auth;
4804
4805         ceph_auth_invalidate_authorizer(ac, CEPH_ENTITY_TYPE_MDS);
4806
4807         return ceph_monc_validate_auth(&mdsc->fsc->client->monc);
4808 }
4809
4810 static struct ceph_msg *mds_alloc_msg(struct ceph_connection *con,
4811                                 struct ceph_msg_header *hdr, int *skip)
4812 {
4813         struct ceph_msg *msg;
4814         int type = (int) le16_to_cpu(hdr->type);
4815         int front_len = (int) le32_to_cpu(hdr->front_len);
4816
4817         if (con->in_msg)
4818                 return con->in_msg;
4819
4820         *skip = 0;
4821         msg = ceph_msg_new(type, front_len, GFP_NOFS, false);
4822         if (!msg) {
4823                 pr_err("unable to allocate msg type %d len %d\n",
4824                        type, front_len);
4825                 return NULL;
4826         }
4827
4828         return msg;
4829 }
4830
4831 static int mds_sign_message(struct ceph_msg *msg)
4832 {
4833        struct ceph_mds_session *s = msg->con->private;
4834        struct ceph_auth_handshake *auth = &s->s_auth;
4835
4836        return ceph_auth_sign_message(auth, msg);
4837 }
4838
4839 static int mds_check_message_signature(struct ceph_msg *msg)
4840 {
4841        struct ceph_mds_session *s = msg->con->private;
4842        struct ceph_auth_handshake *auth = &s->s_auth;
4843
4844        return ceph_auth_check_message_signature(auth, msg);
4845 }
4846
4847 static const struct ceph_connection_operations mds_con_ops = {
4848         .get = con_get,
4849         .put = con_put,
4850         .dispatch = dispatch,
4851         .get_authorizer = get_authorizer,
4852         .add_authorizer_challenge = add_authorizer_challenge,
4853         .verify_authorizer_reply = verify_authorizer_reply,
4854         .invalidate_authorizer = invalidate_authorizer,
4855         .peer_reset = peer_reset,
4856         .alloc_msg = mds_alloc_msg,
4857         .sign_message = mds_sign_message,
4858         .check_message_signature = mds_check_message_signature,
4859 };
4860
4861 /* eof */