1 #include <linux/ceph/ceph_debug.h>
4 #include <linux/kernel.h>
5 #include <linux/sched/signal.h>
6 #include <linux/slab.h>
7 #include <linux/vmalloc.h>
8 #include <linux/wait.h>
9 #include <linux/writeback.h>
12 #include "mds_client.h"
14 #include <linux/ceph/decode.h>
15 #include <linux/ceph/messenger.h>
18 * Capability management
20 * The Ceph metadata servers control client access to inode metadata
21 * and file data by issuing capabilities, granting clients permission
22 * to read and/or write both inode field and file data to OSDs
23 * (storage nodes). Each capability consists of a set of bits
24 * indicating which operations are allowed.
26 * If the client holds a *_SHARED cap, the client has a coherent value
27 * that can be safely read from the cached inode.
29 * In the case of a *_EXCL (exclusive) or FILE_WR capabilities, the
30 * client is allowed to change inode attributes (e.g., file size,
31 * mtime), note its dirty state in the ceph_cap, and asynchronously
32 * flush that metadata change to the MDS.
34 * In the event of a conflicting operation (perhaps by another
35 * client), the MDS will revoke the conflicting client capabilities.
37 * In order for a client to cache an inode, it must hold a capability
38 * with at least one MDS server. When inodes are released, release
39 * notifications are batched and periodically sent en masse to the MDS
40 * cluster to release server state.
43 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc);
44 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
45 struct ceph_mds_session *session,
46 struct ceph_inode_info *ci,
47 u64 oldest_flush_tid);
50 * Generate readable cap strings for debugging output.
52 #define MAX_CAP_STR 20
53 static char cap_str[MAX_CAP_STR][40];
54 static DEFINE_SPINLOCK(cap_str_lock);
55 static int last_cap_str;
57 static char *gcap_string(char *s, int c)
59 if (c & CEPH_CAP_GSHARED)
61 if (c & CEPH_CAP_GEXCL)
63 if (c & CEPH_CAP_GCACHE)
69 if (c & CEPH_CAP_GBUFFER)
71 if (c & CEPH_CAP_GLAZYIO)
76 const char *ceph_cap_string(int caps)
82 spin_lock(&cap_str_lock);
84 if (last_cap_str == MAX_CAP_STR)
86 spin_unlock(&cap_str_lock);
90 if (caps & CEPH_CAP_PIN)
93 c = (caps >> CEPH_CAP_SAUTH) & 3;
96 s = gcap_string(s, c);
99 c = (caps >> CEPH_CAP_SLINK) & 3;
102 s = gcap_string(s, c);
105 c = (caps >> CEPH_CAP_SXATTR) & 3;
108 s = gcap_string(s, c);
111 c = caps >> CEPH_CAP_SFILE;
114 s = gcap_string(s, c);
123 void ceph_caps_init(struct ceph_mds_client *mdsc)
125 INIT_LIST_HEAD(&mdsc->caps_list);
126 spin_lock_init(&mdsc->caps_list_lock);
129 void ceph_caps_finalize(struct ceph_mds_client *mdsc)
131 struct ceph_cap *cap;
133 spin_lock(&mdsc->caps_list_lock);
134 while (!list_empty(&mdsc->caps_list)) {
135 cap = list_first_entry(&mdsc->caps_list,
136 struct ceph_cap, caps_item);
137 list_del(&cap->caps_item);
138 kmem_cache_free(ceph_cap_cachep, cap);
140 mdsc->caps_total_count = 0;
141 mdsc->caps_avail_count = 0;
142 mdsc->caps_use_count = 0;
143 mdsc->caps_reserve_count = 0;
144 mdsc->caps_min_count = 0;
145 spin_unlock(&mdsc->caps_list_lock);
148 void ceph_adjust_min_caps(struct ceph_mds_client *mdsc, int delta)
150 spin_lock(&mdsc->caps_list_lock);
151 mdsc->caps_min_count += delta;
152 BUG_ON(mdsc->caps_min_count < 0);
153 spin_unlock(&mdsc->caps_list_lock);
156 void ceph_reserve_caps(struct ceph_mds_client *mdsc,
157 struct ceph_cap_reservation *ctx, int need)
160 struct ceph_cap *cap;
165 dout("reserve caps ctx=%p need=%d\n", ctx, need);
167 /* first reserve any caps that are already allocated */
168 spin_lock(&mdsc->caps_list_lock);
169 if (mdsc->caps_avail_count >= need)
172 have = mdsc->caps_avail_count;
173 mdsc->caps_avail_count -= have;
174 mdsc->caps_reserve_count += have;
175 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
176 mdsc->caps_reserve_count +
177 mdsc->caps_avail_count);
178 spin_unlock(&mdsc->caps_list_lock);
180 for (i = have; i < need; i++) {
181 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
184 list_add(&cap->caps_item, &newcaps);
187 /* we didn't manage to reserve as much as we needed */
188 if (have + alloc != need)
189 pr_warn("reserve caps ctx=%p ENOMEM need=%d got=%d\n",
190 ctx, need, have + alloc);
192 spin_lock(&mdsc->caps_list_lock);
193 mdsc->caps_total_count += alloc;
194 mdsc->caps_reserve_count += alloc;
195 list_splice(&newcaps, &mdsc->caps_list);
197 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
198 mdsc->caps_reserve_count +
199 mdsc->caps_avail_count);
200 spin_unlock(&mdsc->caps_list_lock);
203 dout("reserve caps ctx=%p %d = %d used + %d resv + %d avail\n",
204 ctx, mdsc->caps_total_count, mdsc->caps_use_count,
205 mdsc->caps_reserve_count, mdsc->caps_avail_count);
208 int ceph_unreserve_caps(struct ceph_mds_client *mdsc,
209 struct ceph_cap_reservation *ctx)
211 dout("unreserve caps ctx=%p count=%d\n", ctx, ctx->count);
213 spin_lock(&mdsc->caps_list_lock);
214 BUG_ON(mdsc->caps_reserve_count < ctx->count);
215 mdsc->caps_reserve_count -= ctx->count;
216 mdsc->caps_avail_count += ctx->count;
218 dout("unreserve caps %d = %d used + %d resv + %d avail\n",
219 mdsc->caps_total_count, mdsc->caps_use_count,
220 mdsc->caps_reserve_count, mdsc->caps_avail_count);
221 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
222 mdsc->caps_reserve_count +
223 mdsc->caps_avail_count);
224 spin_unlock(&mdsc->caps_list_lock);
229 struct ceph_cap *ceph_get_cap(struct ceph_mds_client *mdsc,
230 struct ceph_cap_reservation *ctx)
232 struct ceph_cap *cap = NULL;
234 /* temporary, until we do something about cap import/export */
236 cap = kmem_cache_alloc(ceph_cap_cachep, GFP_NOFS);
238 spin_lock(&mdsc->caps_list_lock);
239 mdsc->caps_use_count++;
240 mdsc->caps_total_count++;
241 spin_unlock(&mdsc->caps_list_lock);
246 spin_lock(&mdsc->caps_list_lock);
247 dout("get_cap ctx=%p (%d) %d = %d used + %d resv + %d avail\n",
248 ctx, ctx->count, mdsc->caps_total_count, mdsc->caps_use_count,
249 mdsc->caps_reserve_count, mdsc->caps_avail_count);
251 BUG_ON(ctx->count > mdsc->caps_reserve_count);
252 BUG_ON(list_empty(&mdsc->caps_list));
255 mdsc->caps_reserve_count--;
256 mdsc->caps_use_count++;
258 cap = list_first_entry(&mdsc->caps_list, struct ceph_cap, caps_item);
259 list_del(&cap->caps_item);
261 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
262 mdsc->caps_reserve_count + mdsc->caps_avail_count);
263 spin_unlock(&mdsc->caps_list_lock);
267 void ceph_put_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap)
269 spin_lock(&mdsc->caps_list_lock);
270 dout("put_cap %p %d = %d used + %d resv + %d avail\n",
271 cap, mdsc->caps_total_count, mdsc->caps_use_count,
272 mdsc->caps_reserve_count, mdsc->caps_avail_count);
273 mdsc->caps_use_count--;
275 * Keep some preallocated caps around (ceph_min_count), to
276 * avoid lots of free/alloc churn.
278 if (mdsc->caps_avail_count >= mdsc->caps_reserve_count +
279 mdsc->caps_min_count) {
280 mdsc->caps_total_count--;
281 kmem_cache_free(ceph_cap_cachep, cap);
283 mdsc->caps_avail_count++;
284 list_add(&cap->caps_item, &mdsc->caps_list);
287 BUG_ON(mdsc->caps_total_count != mdsc->caps_use_count +
288 mdsc->caps_reserve_count + mdsc->caps_avail_count);
289 spin_unlock(&mdsc->caps_list_lock);
292 void ceph_reservation_status(struct ceph_fs_client *fsc,
293 int *total, int *avail, int *used, int *reserved,
296 struct ceph_mds_client *mdsc = fsc->mdsc;
299 *total = mdsc->caps_total_count;
301 *avail = mdsc->caps_avail_count;
303 *used = mdsc->caps_use_count;
305 *reserved = mdsc->caps_reserve_count;
307 *min = mdsc->caps_min_count;
311 * Find ceph_cap for given mds, if any.
313 * Called with i_ceph_lock held.
315 static struct ceph_cap *__get_cap_for_mds(struct ceph_inode_info *ci, int mds)
317 struct ceph_cap *cap;
318 struct rb_node *n = ci->i_caps.rb_node;
321 cap = rb_entry(n, struct ceph_cap, ci_node);
324 else if (mds > cap->mds)
332 struct ceph_cap *ceph_get_cap_for_mds(struct ceph_inode_info *ci, int mds)
334 struct ceph_cap *cap;
336 spin_lock(&ci->i_ceph_lock);
337 cap = __get_cap_for_mds(ci, mds);
338 spin_unlock(&ci->i_ceph_lock);
343 * Return id of any MDS with a cap, preferably FILE_WR|BUFFER|EXCL, else -1.
345 static int __ceph_get_cap_mds(struct ceph_inode_info *ci)
347 struct ceph_cap *cap;
351 /* prefer mds with WR|BUFFER|EXCL caps */
352 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
353 cap = rb_entry(p, struct ceph_cap, ci_node);
355 if (cap->issued & (CEPH_CAP_FILE_WR |
356 CEPH_CAP_FILE_BUFFER |
363 int ceph_get_cap_mds(struct inode *inode)
365 struct ceph_inode_info *ci = ceph_inode(inode);
367 spin_lock(&ci->i_ceph_lock);
368 mds = __ceph_get_cap_mds(ceph_inode(inode));
369 spin_unlock(&ci->i_ceph_lock);
374 * Called under i_ceph_lock.
376 static void __insert_cap_node(struct ceph_inode_info *ci,
377 struct ceph_cap *new)
379 struct rb_node **p = &ci->i_caps.rb_node;
380 struct rb_node *parent = NULL;
381 struct ceph_cap *cap = NULL;
385 cap = rb_entry(parent, struct ceph_cap, ci_node);
386 if (new->mds < cap->mds)
388 else if (new->mds > cap->mds)
394 rb_link_node(&new->ci_node, parent, p);
395 rb_insert_color(&new->ci_node, &ci->i_caps);
399 * (re)set cap hold timeouts, which control the delayed release
400 * of unused caps back to the MDS. Should be called on cap use.
402 static void __cap_set_timeouts(struct ceph_mds_client *mdsc,
403 struct ceph_inode_info *ci)
405 struct ceph_mount_options *ma = mdsc->fsc->mount_options;
407 ci->i_hold_caps_min = round_jiffies(jiffies +
408 ma->caps_wanted_delay_min * HZ);
409 ci->i_hold_caps_max = round_jiffies(jiffies +
410 ma->caps_wanted_delay_max * HZ);
411 dout("__cap_set_timeouts %p min %lu max %lu\n", &ci->vfs_inode,
412 ci->i_hold_caps_min - jiffies, ci->i_hold_caps_max - jiffies);
416 * (Re)queue cap at the end of the delayed cap release list.
418 * If I_FLUSH is set, leave the inode at the front of the list.
420 * Caller holds i_ceph_lock
421 * -> we take mdsc->cap_delay_lock
423 static void __cap_delay_requeue(struct ceph_mds_client *mdsc,
424 struct ceph_inode_info *ci)
426 __cap_set_timeouts(mdsc, ci);
427 dout("__cap_delay_requeue %p flags %d at %lu\n", &ci->vfs_inode,
428 ci->i_ceph_flags, ci->i_hold_caps_max);
429 if (!mdsc->stopping) {
430 spin_lock(&mdsc->cap_delay_lock);
431 if (!list_empty(&ci->i_cap_delay_list)) {
432 if (ci->i_ceph_flags & CEPH_I_FLUSH)
434 list_del_init(&ci->i_cap_delay_list);
436 list_add_tail(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
438 spin_unlock(&mdsc->cap_delay_lock);
443 * Queue an inode for immediate writeback. Mark inode with I_FLUSH,
444 * indicating we should send a cap message to flush dirty metadata
445 * asap, and move to the front of the delayed cap list.
447 static void __cap_delay_requeue_front(struct ceph_mds_client *mdsc,
448 struct ceph_inode_info *ci)
450 dout("__cap_delay_requeue_front %p\n", &ci->vfs_inode);
451 spin_lock(&mdsc->cap_delay_lock);
452 ci->i_ceph_flags |= CEPH_I_FLUSH;
453 if (!list_empty(&ci->i_cap_delay_list))
454 list_del_init(&ci->i_cap_delay_list);
455 list_add(&ci->i_cap_delay_list, &mdsc->cap_delay_list);
456 spin_unlock(&mdsc->cap_delay_lock);
460 * Cancel delayed work on cap.
462 * Caller must hold i_ceph_lock.
464 static void __cap_delay_cancel(struct ceph_mds_client *mdsc,
465 struct ceph_inode_info *ci)
467 dout("__cap_delay_cancel %p\n", &ci->vfs_inode);
468 if (list_empty(&ci->i_cap_delay_list))
470 spin_lock(&mdsc->cap_delay_lock);
471 list_del_init(&ci->i_cap_delay_list);
472 spin_unlock(&mdsc->cap_delay_lock);
476 * Common issue checks for add_cap, handle_cap_grant.
478 static void __check_cap_issue(struct ceph_inode_info *ci, struct ceph_cap *cap,
481 unsigned had = __ceph_caps_issued(ci, NULL);
484 * Each time we receive FILE_CACHE anew, we increment
487 if ((issued & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
488 (had & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) == 0) {
493 * If FILE_SHARED is newly issued, mark dir not complete. We don't
494 * know what happened to this directory while we didn't have the cap.
495 * If FILE_SHARED is being revoked, also mark dir not complete. It
496 * stops on-going cached readdir.
498 if ((issued & CEPH_CAP_FILE_SHARED) != (had & CEPH_CAP_FILE_SHARED)) {
499 if (issued & CEPH_CAP_FILE_SHARED)
501 if (S_ISDIR(ci->vfs_inode.i_mode)) {
502 dout(" marking %p NOT complete\n", &ci->vfs_inode);
503 __ceph_dir_clear_complete(ci);
509 * Add a capability under the given MDS session.
511 * Caller should hold session snap_rwsem (read) and s_mutex.
513 * @fmode is the open file mode, if we are opening a file, otherwise
514 * it is < 0. (This is so we can atomically add the cap and add an
515 * open file reference to it.)
517 void ceph_add_cap(struct inode *inode,
518 struct ceph_mds_session *session, u64 cap_id,
519 int fmode, unsigned issued, unsigned wanted,
520 unsigned seq, unsigned mseq, u64 realmino, int flags,
521 struct ceph_cap **new_cap)
523 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
524 struct ceph_inode_info *ci = ceph_inode(inode);
525 struct ceph_cap *cap;
526 int mds = session->s_mds;
529 dout("add_cap %p mds%d cap %llx %s seq %d\n", inode,
530 session->s_mds, cap_id, ceph_cap_string(issued), seq);
533 * If we are opening the file, include file mode wanted bits
537 wanted |= ceph_caps_for_mode(fmode);
539 cap = __get_cap_for_mds(ci, mds);
545 cap->implemented = 0;
551 __insert_cap_node(ci, cap);
553 /* add to session cap list */
554 cap->session = session;
555 spin_lock(&session->s_cap_lock);
556 list_add_tail(&cap->session_caps, &session->s_caps);
557 session->s_nr_caps++;
558 spin_unlock(&session->s_cap_lock);
561 * auth mds of the inode changed. we received the cap export
562 * message, but still haven't received the cap import message.
563 * handle_cap_export() updated the new auth MDS' cap.
565 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing
566 * a message that was send before the cap import message. So
569 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
570 WARN_ON(cap != ci->i_auth_cap);
571 WARN_ON(cap->cap_id != cap_id);
574 issued |= cap->issued;
575 flags |= CEPH_CAP_FLAG_AUTH;
579 if (!ci->i_snap_realm) {
581 * add this inode to the appropriate snap realm
583 struct ceph_snap_realm *realm = ceph_lookup_snap_realm(mdsc,
586 spin_lock(&realm->inodes_with_caps_lock);
587 ci->i_snap_realm = realm;
588 list_add(&ci->i_snap_realm_item,
589 &realm->inodes_with_caps);
590 spin_unlock(&realm->inodes_with_caps_lock);
592 pr_err("ceph_add_cap: couldn't find snap realm %llx\n",
598 __check_cap_issue(ci, cap, issued);
601 * If we are issued caps we don't want, or the mds' wanted
602 * value appears to be off, queue a check so we'll release
603 * later and/or update the mds wanted value.
605 actual_wanted = __ceph_caps_wanted(ci);
606 if ((wanted & ~actual_wanted) ||
607 (issued & ~actual_wanted & CEPH_CAP_ANY_WR)) {
608 dout(" issued %s, mds wanted %s, actual %s, queueing\n",
609 ceph_cap_string(issued), ceph_cap_string(wanted),
610 ceph_cap_string(actual_wanted));
611 __cap_delay_requeue(mdsc, ci);
614 if (flags & CEPH_CAP_FLAG_AUTH) {
615 if (!ci->i_auth_cap ||
616 ceph_seq_cmp(ci->i_auth_cap->mseq, mseq) < 0) {
617 ci->i_auth_cap = cap;
618 cap->mds_wanted = wanted;
621 WARN_ON(ci->i_auth_cap == cap);
624 dout("add_cap inode %p (%llx.%llx) cap %p %s now %s seq %d mds%d\n",
625 inode, ceph_vinop(inode), cap, ceph_cap_string(issued),
626 ceph_cap_string(issued|cap->issued), seq, mds);
627 cap->cap_id = cap_id;
628 cap->issued = issued;
629 cap->implemented |= issued;
630 if (ceph_seq_cmp(mseq, cap->mseq) > 0)
631 cap->mds_wanted = wanted;
633 cap->mds_wanted |= wanted;
635 cap->issue_seq = seq;
637 cap->cap_gen = session->s_cap_gen;
640 __ceph_get_fmode(ci, fmode);
644 * Return true if cap has not timed out and belongs to the current
645 * generation of the MDS session (i.e. has not gone 'stale' due to
646 * us losing touch with the mds).
648 static int __cap_is_valid(struct ceph_cap *cap)
653 spin_lock(&cap->session->s_gen_ttl_lock);
654 gen = cap->session->s_cap_gen;
655 ttl = cap->session->s_cap_ttl;
656 spin_unlock(&cap->session->s_gen_ttl_lock);
658 if (cap->cap_gen < gen || time_after_eq(jiffies, ttl)) {
659 dout("__cap_is_valid %p cap %p issued %s "
660 "but STALE (gen %u vs %u)\n", &cap->ci->vfs_inode,
661 cap, ceph_cap_string(cap->issued), cap->cap_gen, gen);
669 * Return set of valid cap bits issued to us. Note that caps time
670 * out, and may be invalidated in bulk if the client session times out
671 * and session->s_cap_gen is bumped.
673 int __ceph_caps_issued(struct ceph_inode_info *ci, int *implemented)
675 int have = ci->i_snap_caps;
676 struct ceph_cap *cap;
681 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
682 cap = rb_entry(p, struct ceph_cap, ci_node);
683 if (!__cap_is_valid(cap))
685 dout("__ceph_caps_issued %p cap %p issued %s\n",
686 &ci->vfs_inode, cap, ceph_cap_string(cap->issued));
689 *implemented |= cap->implemented;
692 * exclude caps issued by non-auth MDS, but are been revoking
693 * by the auth MDS. The non-auth MDS should be revoking/exporting
694 * these caps, but the message is delayed.
696 if (ci->i_auth_cap) {
697 cap = ci->i_auth_cap;
698 have &= ~cap->implemented | cap->issued;
704 * Get cap bits issued by caps other than @ocap
706 int __ceph_caps_issued_other(struct ceph_inode_info *ci, struct ceph_cap *ocap)
708 int have = ci->i_snap_caps;
709 struct ceph_cap *cap;
712 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
713 cap = rb_entry(p, struct ceph_cap, ci_node);
716 if (!__cap_is_valid(cap))
724 * Move a cap to the end of the LRU (oldest caps at list head, newest
727 static void __touch_cap(struct ceph_cap *cap)
729 struct ceph_mds_session *s = cap->session;
731 spin_lock(&s->s_cap_lock);
732 if (!s->s_cap_iterator) {
733 dout("__touch_cap %p cap %p mds%d\n", &cap->ci->vfs_inode, cap,
735 list_move_tail(&cap->session_caps, &s->s_caps);
737 dout("__touch_cap %p cap %p mds%d NOP, iterating over caps\n",
738 &cap->ci->vfs_inode, cap, s->s_mds);
740 spin_unlock(&s->s_cap_lock);
744 * Check if we hold the given mask. If so, move the cap(s) to the
745 * front of their respective LRUs. (This is the preferred way for
746 * callers to check for caps they want.)
748 int __ceph_caps_issued_mask(struct ceph_inode_info *ci, int mask, int touch)
750 struct ceph_cap *cap;
752 int have = ci->i_snap_caps;
754 if ((have & mask) == mask) {
755 dout("__ceph_caps_issued_mask %p snap issued %s"
756 " (mask %s)\n", &ci->vfs_inode,
757 ceph_cap_string(have),
758 ceph_cap_string(mask));
762 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
763 cap = rb_entry(p, struct ceph_cap, ci_node);
764 if (!__cap_is_valid(cap))
766 if ((cap->issued & mask) == mask) {
767 dout("__ceph_caps_issued_mask %p cap %p issued %s"
768 " (mask %s)\n", &ci->vfs_inode, cap,
769 ceph_cap_string(cap->issued),
770 ceph_cap_string(mask));
776 /* does a combination of caps satisfy mask? */
778 if ((have & mask) == mask) {
779 dout("__ceph_caps_issued_mask %p combo issued %s"
780 " (mask %s)\n", &ci->vfs_inode,
781 ceph_cap_string(cap->issued),
782 ceph_cap_string(mask));
786 /* touch this + preceding caps */
788 for (q = rb_first(&ci->i_caps); q != p;
790 cap = rb_entry(q, struct ceph_cap,
792 if (!__cap_is_valid(cap))
805 * Return true if mask caps are currently being revoked by an MDS.
807 int __ceph_caps_revoking_other(struct ceph_inode_info *ci,
808 struct ceph_cap *ocap, int mask)
810 struct ceph_cap *cap;
813 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
814 cap = rb_entry(p, struct ceph_cap, ci_node);
816 (cap->implemented & ~cap->issued & mask))
822 int ceph_caps_revoking(struct ceph_inode_info *ci, int mask)
824 struct inode *inode = &ci->vfs_inode;
827 spin_lock(&ci->i_ceph_lock);
828 ret = __ceph_caps_revoking_other(ci, NULL, mask);
829 spin_unlock(&ci->i_ceph_lock);
830 dout("ceph_caps_revoking %p %s = %d\n", inode,
831 ceph_cap_string(mask), ret);
835 int __ceph_caps_used(struct ceph_inode_info *ci)
839 used |= CEPH_CAP_PIN;
841 used |= CEPH_CAP_FILE_RD;
842 if (ci->i_rdcache_ref ||
843 (!S_ISDIR(ci->vfs_inode.i_mode) && /* ignore readdir cache */
844 ci->vfs_inode.i_data.nrpages))
845 used |= CEPH_CAP_FILE_CACHE;
847 used |= CEPH_CAP_FILE_WR;
848 if (ci->i_wb_ref || ci->i_wrbuffer_ref)
849 used |= CEPH_CAP_FILE_BUFFER;
854 * wanted, by virtue of open file modes
856 int __ceph_caps_file_wanted(struct ceph_inode_info *ci)
859 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
860 if (ci->i_nr_by_mode[i])
865 return ceph_caps_for_mode(bits >> 1);
869 * Return caps we have registered with the MDS(s) as 'wanted'.
871 int __ceph_caps_mds_wanted(struct ceph_inode_info *ci, bool check)
873 struct ceph_cap *cap;
877 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
878 cap = rb_entry(p, struct ceph_cap, ci_node);
879 if (check && !__cap_is_valid(cap))
881 if (cap == ci->i_auth_cap)
882 mds_wanted |= cap->mds_wanted;
884 mds_wanted |= (cap->mds_wanted & ~CEPH_CAP_ANY_FILE_WR);
890 * called under i_ceph_lock
892 static int __ceph_is_any_caps(struct ceph_inode_info *ci)
894 return !RB_EMPTY_ROOT(&ci->i_caps);
897 int ceph_is_any_caps(struct inode *inode)
899 struct ceph_inode_info *ci = ceph_inode(inode);
902 spin_lock(&ci->i_ceph_lock);
903 ret = __ceph_is_any_caps(ci);
904 spin_unlock(&ci->i_ceph_lock);
909 static void drop_inode_snap_realm(struct ceph_inode_info *ci)
911 struct ceph_snap_realm *realm = ci->i_snap_realm;
912 spin_lock(&realm->inodes_with_caps_lock);
913 list_del_init(&ci->i_snap_realm_item);
914 ci->i_snap_realm_counter++;
915 ci->i_snap_realm = NULL;
916 spin_unlock(&realm->inodes_with_caps_lock);
917 ceph_put_snap_realm(ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc,
922 * Remove a cap. Take steps to deal with a racing iterate_session_caps.
924 * caller should hold i_ceph_lock.
925 * caller will not hold session s_mutex if called from destroy_inode.
927 void __ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
929 struct ceph_mds_session *session = cap->session;
930 struct ceph_inode_info *ci = cap->ci;
931 struct ceph_mds_client *mdsc =
932 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
935 dout("__ceph_remove_cap %p from %p\n", cap, &ci->vfs_inode);
937 /* remove from session list */
938 spin_lock(&session->s_cap_lock);
939 if (session->s_cap_iterator == cap) {
940 /* not yet, we are iterating over this very cap */
941 dout("__ceph_remove_cap delaying %p removal from session %p\n",
944 list_del_init(&cap->session_caps);
945 session->s_nr_caps--;
949 /* protect backpointer with s_cap_lock: see iterate_session_caps */
953 * s_cap_reconnect is protected by s_cap_lock. no one changes
954 * s_cap_gen while session is in the reconnect state.
957 (!session->s_cap_reconnect || cap->cap_gen == session->s_cap_gen)) {
958 cap->queue_release = 1;
960 list_add_tail(&cap->session_caps,
961 &session->s_cap_releases);
962 session->s_num_cap_releases++;
966 cap->queue_release = 0;
968 cap->cap_ino = ci->i_vino.ino;
970 spin_unlock(&session->s_cap_lock);
972 /* remove from inode list */
973 rb_erase(&cap->ci_node, &ci->i_caps);
974 if (ci->i_auth_cap == cap)
975 ci->i_auth_cap = NULL;
978 ceph_put_cap(mdsc, cap);
980 /* when reconnect denied, we remove session caps forcibly,
981 * i_wr_ref can be non-zero. If there are ongoing write,
984 if (!__ceph_is_any_caps(ci) && ci->i_wr_ref == 0 && ci->i_snap_realm)
985 drop_inode_snap_realm(ci);
987 if (!__ceph_is_any_real_caps(ci))
988 __cap_delay_cancel(mdsc, ci);
991 struct cap_msg_args {
992 struct ceph_mds_session *session;
993 u64 ino, cid, follows;
994 u64 flush_tid, oldest_flush_tid, size, max_size;
996 struct ceph_buffer *xattr_buf;
997 struct timespec atime, mtime, ctime;
998 int op, caps, wanted, dirty;
999 u32 seq, issue_seq, mseq, time_warp_seq;
1008 * Build and send a cap message to the given MDS.
1010 * Caller should be holding s_mutex.
1012 static int send_cap_msg(struct cap_msg_args *arg)
1014 struct ceph_mds_caps *fc;
1015 struct ceph_msg *msg;
1018 struct timespec zerotime = {0};
1019 struct ceph_osd_client *osdc = &arg->session->s_mdsc->fsc->client->osdc;
1021 dout("send_cap_msg %s %llx %llx caps %s wanted %s dirty %s"
1022 " seq %u/%u tid %llu/%llu mseq %u follows %lld size %llu/%llu"
1023 " xattr_ver %llu xattr_len %d\n", ceph_cap_op_name(arg->op),
1024 arg->cid, arg->ino, ceph_cap_string(arg->caps),
1025 ceph_cap_string(arg->wanted), ceph_cap_string(arg->dirty),
1026 arg->seq, arg->issue_seq, arg->flush_tid, arg->oldest_flush_tid,
1027 arg->mseq, arg->follows, arg->size, arg->max_size,
1029 arg->xattr_buf ? (int)arg->xattr_buf->vec.iov_len : 0);
1031 /* flock buffer size + inline version + inline data size +
1032 * osd_epoch_barrier + oldest_flush_tid */
1033 extra_len = 4 + 8 + 4 + 4 + 8 + 4 + 4 + 4 + 8 + 8 + 4;
1034 msg = ceph_msg_new(CEPH_MSG_CLIENT_CAPS, sizeof(*fc) + extra_len,
1039 msg->hdr.version = cpu_to_le16(10);
1040 msg->hdr.tid = cpu_to_le64(arg->flush_tid);
1042 fc = msg->front.iov_base;
1043 memset(fc, 0, sizeof(*fc));
1045 fc->cap_id = cpu_to_le64(arg->cid);
1046 fc->op = cpu_to_le32(arg->op);
1047 fc->seq = cpu_to_le32(arg->seq);
1048 fc->issue_seq = cpu_to_le32(arg->issue_seq);
1049 fc->migrate_seq = cpu_to_le32(arg->mseq);
1050 fc->caps = cpu_to_le32(arg->caps);
1051 fc->wanted = cpu_to_le32(arg->wanted);
1052 fc->dirty = cpu_to_le32(arg->dirty);
1053 fc->ino = cpu_to_le64(arg->ino);
1054 fc->snap_follows = cpu_to_le64(arg->follows);
1056 fc->size = cpu_to_le64(arg->size);
1057 fc->max_size = cpu_to_le64(arg->max_size);
1058 ceph_encode_timespec(&fc->mtime, &arg->mtime);
1059 ceph_encode_timespec(&fc->atime, &arg->atime);
1060 ceph_encode_timespec(&fc->ctime, &arg->ctime);
1061 fc->time_warp_seq = cpu_to_le32(arg->time_warp_seq);
1063 fc->uid = cpu_to_le32(from_kuid(&init_user_ns, arg->uid));
1064 fc->gid = cpu_to_le32(from_kgid(&init_user_ns, arg->gid));
1065 fc->mode = cpu_to_le32(arg->mode);
1067 fc->xattr_version = cpu_to_le64(arg->xattr_version);
1068 if (arg->xattr_buf) {
1069 msg->middle = ceph_buffer_get(arg->xattr_buf);
1070 fc->xattr_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
1071 msg->hdr.middle_len = cpu_to_le32(arg->xattr_buf->vec.iov_len);
1075 /* flock buffer size (version 2) */
1076 ceph_encode_32(&p, 0);
1077 /* inline version (version 4) */
1078 ceph_encode_64(&p, arg->inline_data ? 0 : CEPH_INLINE_NONE);
1079 /* inline data size */
1080 ceph_encode_32(&p, 0);
1082 * osd_epoch_barrier (version 5)
1083 * The epoch_barrier is protected osdc->lock, so READ_ONCE here in
1084 * case it was recently changed
1086 ceph_encode_32(&p, READ_ONCE(osdc->epoch_barrier));
1087 /* oldest_flush_tid (version 6) */
1088 ceph_encode_64(&p, arg->oldest_flush_tid);
1091 * caller_uid/caller_gid (version 7)
1093 * Currently, we don't properly track which caller dirtied the caps
1094 * last, and force a flush of them when there is a conflict. For now,
1095 * just set this to 0:0, to emulate how the MDS has worked up to now.
1097 ceph_encode_32(&p, 0);
1098 ceph_encode_32(&p, 0);
1100 /* pool namespace (version 8) (mds always ignores this) */
1101 ceph_encode_32(&p, 0);
1104 * btime and change_attr (version 9)
1106 * We just zero these out for now, as the MDS ignores them unless
1107 * the requisite feature flags are set (which we don't do yet).
1109 ceph_encode_timespec(p, &zerotime);
1110 p += sizeof(struct ceph_timespec);
1111 ceph_encode_64(&p, 0);
1113 /* Advisory flags (version 10) */
1114 ceph_encode_32(&p, arg->flags);
1116 ceph_con_send(&arg->session->s_con, msg);
1121 * Queue cap releases when an inode is dropped from our cache. Since
1122 * inode is about to be destroyed, there is no need for i_ceph_lock.
1124 void ceph_queue_caps_release(struct inode *inode)
1126 struct ceph_inode_info *ci = ceph_inode(inode);
1129 p = rb_first(&ci->i_caps);
1131 struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
1133 __ceph_remove_cap(cap, true);
1138 * Send a cap msg on the given inode. Update our caps state, then
1139 * drop i_ceph_lock and send the message.
1141 * Make note of max_size reported/requested from mds, revoked caps
1142 * that have now been implemented.
1144 * Make half-hearted attempt ot to invalidate page cache if we are
1145 * dropping RDCACHE. Note that this will leave behind locked pages
1146 * that we'll then need to deal with elsewhere.
1148 * Return non-zero if delayed release, or we experienced an error
1149 * such that the caller should requeue + retry later.
1151 * called with i_ceph_lock, then drops it.
1152 * caller should hold snap_rwsem (read), s_mutex.
1154 static int __send_cap(struct ceph_mds_client *mdsc, struct ceph_cap *cap,
1155 int op, bool sync, int used, int want, int retain,
1156 int flushing, u64 flush_tid, u64 oldest_flush_tid)
1157 __releases(cap->ci->i_ceph_lock)
1159 struct ceph_inode_info *ci = cap->ci;
1160 struct inode *inode = &ci->vfs_inode;
1161 struct cap_msg_args arg;
1162 int held, revoking, dropping;
1167 held = cap->issued | cap->implemented;
1168 revoking = cap->implemented & ~cap->issued;
1169 retain &= ~revoking;
1170 dropping = cap->issued & ~retain;
1172 dout("__send_cap %p cap %p session %p %s -> %s (revoking %s)\n",
1173 inode, cap, cap->session,
1174 ceph_cap_string(held), ceph_cap_string(held & retain),
1175 ceph_cap_string(revoking));
1176 BUG_ON((retain & CEPH_CAP_PIN) == 0);
1178 arg.session = cap->session;
1180 /* don't release wanted unless we've waited a bit. */
1181 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1182 time_before(jiffies, ci->i_hold_caps_min)) {
1183 dout(" delaying issued %s -> %s, wanted %s -> %s on send\n",
1184 ceph_cap_string(cap->issued),
1185 ceph_cap_string(cap->issued & retain),
1186 ceph_cap_string(cap->mds_wanted),
1187 ceph_cap_string(want));
1188 want |= cap->mds_wanted;
1189 retain |= cap->issued;
1192 ci->i_ceph_flags &= ~(CEPH_I_NODELAY | CEPH_I_FLUSH);
1193 if (want & ~cap->mds_wanted) {
1194 /* user space may open/close single file frequently.
1195 * This avoids droping mds_wanted immediately after
1196 * requesting new mds_wanted.
1198 __cap_set_timeouts(mdsc, ci);
1201 cap->issued &= retain; /* drop bits we don't want */
1202 if (cap->implemented & ~cap->issued) {
1204 * Wake up any waiters on wanted -> needed transition.
1205 * This is due to the weird transition from buffered
1206 * to sync IO... we need to flush dirty pages _before_
1207 * allowing sync writes to avoid reordering.
1211 cap->implemented &= cap->issued | used;
1212 cap->mds_wanted = want;
1214 arg.ino = ceph_vino(inode).ino;
1215 arg.cid = cap->cap_id;
1216 arg.follows = flushing ? ci->i_head_snapc->seq : 0;
1217 arg.flush_tid = flush_tid;
1218 arg.oldest_flush_tid = oldest_flush_tid;
1220 arg.size = inode->i_size;
1221 ci->i_reported_size = arg.size;
1222 arg.max_size = ci->i_wanted_max_size;
1223 ci->i_requested_max_size = arg.max_size;
1225 if (flushing & CEPH_CAP_XATTR_EXCL) {
1226 __ceph_build_xattrs_blob(ci);
1227 arg.xattr_version = ci->i_xattrs.version;
1228 arg.xattr_buf = ci->i_xattrs.blob;
1230 arg.xattr_buf = NULL;
1233 arg.mtime = inode->i_mtime;
1234 arg.atime = inode->i_atime;
1235 arg.ctime = inode->i_ctime;
1238 arg.caps = cap->implemented;
1240 arg.dirty = flushing;
1243 arg.issue_seq = cap->issue_seq;
1244 arg.mseq = cap->mseq;
1245 arg.time_warp_seq = ci->i_time_warp_seq;
1247 arg.uid = inode->i_uid;
1248 arg.gid = inode->i_gid;
1249 arg.mode = inode->i_mode;
1251 arg.inline_data = ci->i_inline_version != CEPH_INLINE_NONE;
1252 if (list_empty(&ci->i_cap_snaps))
1253 arg.flags = CEPH_CLIENT_CAPS_NO_CAPSNAP;
1255 arg.flags = CEPH_CLIENT_CAPS_PENDING_CAPSNAP;
1257 arg.flags |= CEPH_CLIENT_CAPS_SYNC;
1259 spin_unlock(&ci->i_ceph_lock);
1261 ret = send_cap_msg(&arg);
1263 dout("error sending cap msg, must requeue %p\n", inode);
1268 wake_up_all(&ci->i_cap_wq);
1273 static inline int __send_flush_snap(struct inode *inode,
1274 struct ceph_mds_session *session,
1275 struct ceph_cap_snap *capsnap,
1276 u32 mseq, u64 oldest_flush_tid)
1278 struct cap_msg_args arg;
1280 arg.session = session;
1281 arg.ino = ceph_vino(inode).ino;
1283 arg.follows = capsnap->follows;
1284 arg.flush_tid = capsnap->cap_flush.tid;
1285 arg.oldest_flush_tid = oldest_flush_tid;
1287 arg.size = capsnap->size;
1289 arg.xattr_version = capsnap->xattr_version;
1290 arg.xattr_buf = capsnap->xattr_blob;
1292 arg.atime = capsnap->atime;
1293 arg.mtime = capsnap->mtime;
1294 arg.ctime = capsnap->ctime;
1296 arg.op = CEPH_CAP_OP_FLUSHSNAP;
1297 arg.caps = capsnap->issued;
1299 arg.dirty = capsnap->dirty;
1304 arg.time_warp_seq = capsnap->time_warp_seq;
1306 arg.uid = capsnap->uid;
1307 arg.gid = capsnap->gid;
1308 arg.mode = capsnap->mode;
1310 arg.inline_data = capsnap->inline_data;
1313 return send_cap_msg(&arg);
1317 * When a snapshot is taken, clients accumulate dirty metadata on
1318 * inodes with capabilities in ceph_cap_snaps to describe the file
1319 * state at the time the snapshot was taken. This must be flushed
1320 * asynchronously back to the MDS once sync writes complete and dirty
1321 * data is written out.
1323 * Called under i_ceph_lock. Takes s_mutex as needed.
1325 static void __ceph_flush_snaps(struct ceph_inode_info *ci,
1326 struct ceph_mds_session *session)
1327 __releases(ci->i_ceph_lock)
1328 __acquires(ci->i_ceph_lock)
1330 struct inode *inode = &ci->vfs_inode;
1331 struct ceph_mds_client *mdsc = session->s_mdsc;
1332 struct ceph_cap_snap *capsnap;
1333 u64 oldest_flush_tid = 0;
1334 u64 first_tid = 1, last_tid = 0;
1336 dout("__flush_snaps %p session %p\n", inode, session);
1338 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
1340 * we need to wait for sync writes to complete and for dirty
1341 * pages to be written out.
1343 if (capsnap->dirty_pages || capsnap->writing)
1346 /* should be removed by ceph_try_drop_cap_snap() */
1347 BUG_ON(!capsnap->need_flush);
1349 /* only flush each capsnap once */
1350 if (capsnap->cap_flush.tid > 0) {
1351 dout(" already flushed %p, skipping\n", capsnap);
1355 spin_lock(&mdsc->cap_dirty_lock);
1356 capsnap->cap_flush.tid = ++mdsc->last_cap_flush_tid;
1357 list_add_tail(&capsnap->cap_flush.g_list,
1358 &mdsc->cap_flush_list);
1359 if (oldest_flush_tid == 0)
1360 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1361 if (list_empty(&ci->i_flushing_item)) {
1362 list_add_tail(&ci->i_flushing_item,
1363 &session->s_cap_flushing);
1365 spin_unlock(&mdsc->cap_dirty_lock);
1367 list_add_tail(&capsnap->cap_flush.i_list,
1368 &ci->i_cap_flush_list);
1371 first_tid = capsnap->cap_flush.tid;
1372 last_tid = capsnap->cap_flush.tid;
1375 ci->i_ceph_flags &= ~CEPH_I_FLUSH_SNAPS;
1377 while (first_tid <= last_tid) {
1378 struct ceph_cap *cap = ci->i_auth_cap;
1379 struct ceph_cap_flush *cf;
1382 if (!(cap && cap->session == session)) {
1383 dout("__flush_snaps %p auth cap %p not mds%d, "
1384 "stop\n", inode, cap, session->s_mds);
1389 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
1390 if (cf->tid >= first_tid) {
1398 first_tid = cf->tid + 1;
1400 capsnap = container_of(cf, struct ceph_cap_snap, cap_flush);
1401 refcount_inc(&capsnap->nref);
1402 spin_unlock(&ci->i_ceph_lock);
1404 dout("__flush_snaps %p capsnap %p tid %llu %s\n",
1405 inode, capsnap, cf->tid, ceph_cap_string(capsnap->dirty));
1407 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
1410 pr_err("__flush_snaps: error sending cap flushsnap, "
1411 "ino (%llx.%llx) tid %llu follows %llu\n",
1412 ceph_vinop(inode), cf->tid, capsnap->follows);
1415 ceph_put_cap_snap(capsnap);
1416 spin_lock(&ci->i_ceph_lock);
1420 void ceph_flush_snaps(struct ceph_inode_info *ci,
1421 struct ceph_mds_session **psession)
1423 struct inode *inode = &ci->vfs_inode;
1424 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
1425 struct ceph_mds_session *session = NULL;
1428 dout("ceph_flush_snaps %p\n", inode);
1430 session = *psession;
1432 spin_lock(&ci->i_ceph_lock);
1433 if (!(ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)) {
1434 dout(" no capsnap needs flush, doing nothing\n");
1437 if (!ci->i_auth_cap) {
1438 dout(" no auth cap (migrating?), doing nothing\n");
1442 mds = ci->i_auth_cap->session->s_mds;
1443 if (session && session->s_mds != mds) {
1444 dout(" oops, wrong session %p mutex\n", session);
1445 mutex_unlock(&session->s_mutex);
1446 ceph_put_mds_session(session);
1450 spin_unlock(&ci->i_ceph_lock);
1451 mutex_lock(&mdsc->mutex);
1452 session = __ceph_lookup_mds_session(mdsc, mds);
1453 mutex_unlock(&mdsc->mutex);
1455 dout(" inverting session/ino locks on %p\n", session);
1456 mutex_lock(&session->s_mutex);
1461 // make sure flushsnap messages are sent in proper order.
1462 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1463 __kick_flushing_caps(mdsc, session, ci, 0);
1464 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1467 __ceph_flush_snaps(ci, session);
1469 spin_unlock(&ci->i_ceph_lock);
1472 *psession = session;
1473 } else if (session) {
1474 mutex_unlock(&session->s_mutex);
1475 ceph_put_mds_session(session);
1477 /* we flushed them all; remove this inode from the queue */
1478 spin_lock(&mdsc->snap_flush_lock);
1479 list_del_init(&ci->i_snap_flush_item);
1480 spin_unlock(&mdsc->snap_flush_lock);
1484 * Mark caps dirty. If inode is newly dirty, return the dirty flags.
1485 * Caller is then responsible for calling __mark_inode_dirty with the
1486 * returned flags value.
1488 int __ceph_mark_dirty_caps(struct ceph_inode_info *ci, int mask,
1489 struct ceph_cap_flush **pcf)
1491 struct ceph_mds_client *mdsc =
1492 ceph_sb_to_client(ci->vfs_inode.i_sb)->mdsc;
1493 struct inode *inode = &ci->vfs_inode;
1494 int was = ci->i_dirty_caps;
1497 if (!ci->i_auth_cap) {
1498 pr_warn("__mark_dirty_caps %p %llx mask %s, "
1499 "but no auth cap (session was closed?)\n",
1500 inode, ceph_ino(inode), ceph_cap_string(mask));
1504 dout("__mark_dirty_caps %p %s dirty %s -> %s\n", &ci->vfs_inode,
1505 ceph_cap_string(mask), ceph_cap_string(was),
1506 ceph_cap_string(was | mask));
1507 ci->i_dirty_caps |= mask;
1509 WARN_ON_ONCE(ci->i_prealloc_cap_flush);
1510 swap(ci->i_prealloc_cap_flush, *pcf);
1512 if (!ci->i_head_snapc) {
1513 WARN_ON_ONCE(!rwsem_is_locked(&mdsc->snap_rwsem));
1514 ci->i_head_snapc = ceph_get_snap_context(
1515 ci->i_snap_realm->cached_context);
1517 dout(" inode %p now dirty snapc %p auth cap %p\n",
1518 &ci->vfs_inode, ci->i_head_snapc, ci->i_auth_cap);
1519 BUG_ON(!list_empty(&ci->i_dirty_item));
1520 spin_lock(&mdsc->cap_dirty_lock);
1521 list_add(&ci->i_dirty_item, &mdsc->cap_dirty);
1522 spin_unlock(&mdsc->cap_dirty_lock);
1523 if (ci->i_flushing_caps == 0) {
1525 dirty |= I_DIRTY_SYNC;
1528 WARN_ON_ONCE(!ci->i_prealloc_cap_flush);
1530 BUG_ON(list_empty(&ci->i_dirty_item));
1531 if (((was | ci->i_flushing_caps) & CEPH_CAP_FILE_BUFFER) &&
1532 (mask & CEPH_CAP_FILE_BUFFER))
1533 dirty |= I_DIRTY_DATASYNC;
1534 __cap_delay_requeue(mdsc, ci);
1538 struct ceph_cap_flush *ceph_alloc_cap_flush(void)
1540 return kmem_cache_alloc(ceph_cap_flush_cachep, GFP_KERNEL);
1543 void ceph_free_cap_flush(struct ceph_cap_flush *cf)
1546 kmem_cache_free(ceph_cap_flush_cachep, cf);
1549 static u64 __get_oldest_flush_tid(struct ceph_mds_client *mdsc)
1551 if (!list_empty(&mdsc->cap_flush_list)) {
1552 struct ceph_cap_flush *cf =
1553 list_first_entry(&mdsc->cap_flush_list,
1554 struct ceph_cap_flush, g_list);
1561 * Remove cap_flush from the mdsc's or inode's flushing cap list.
1562 * Return true if caller needs to wake up flush waiters.
1564 static bool __finish_cap_flush(struct ceph_mds_client *mdsc,
1565 struct ceph_inode_info *ci,
1566 struct ceph_cap_flush *cf)
1568 struct ceph_cap_flush *prev;
1569 bool wake = cf->wake;
1571 /* are there older pending cap flushes? */
1572 if (wake && cf->g_list.prev != &mdsc->cap_flush_list) {
1573 prev = list_prev_entry(cf, g_list);
1577 list_del(&cf->g_list);
1579 if (wake && cf->i_list.prev != &ci->i_cap_flush_list) {
1580 prev = list_prev_entry(cf, i_list);
1584 list_del(&cf->i_list);
1592 * Add dirty inode to the flushing list. Assigned a seq number so we
1593 * can wait for caps to flush without starving.
1595 * Called under i_ceph_lock.
1597 static int __mark_caps_flushing(struct inode *inode,
1598 struct ceph_mds_session *session, bool wake,
1599 u64 *flush_tid, u64 *oldest_flush_tid)
1601 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1602 struct ceph_inode_info *ci = ceph_inode(inode);
1603 struct ceph_cap_flush *cf = NULL;
1606 BUG_ON(ci->i_dirty_caps == 0);
1607 BUG_ON(list_empty(&ci->i_dirty_item));
1608 BUG_ON(!ci->i_prealloc_cap_flush);
1610 flushing = ci->i_dirty_caps;
1611 dout("__mark_caps_flushing flushing %s, flushing_caps %s -> %s\n",
1612 ceph_cap_string(flushing),
1613 ceph_cap_string(ci->i_flushing_caps),
1614 ceph_cap_string(ci->i_flushing_caps | flushing));
1615 ci->i_flushing_caps |= flushing;
1616 ci->i_dirty_caps = 0;
1617 dout(" inode %p now !dirty\n", inode);
1619 swap(cf, ci->i_prealloc_cap_flush);
1620 cf->caps = flushing;
1623 spin_lock(&mdsc->cap_dirty_lock);
1624 list_del_init(&ci->i_dirty_item);
1626 cf->tid = ++mdsc->last_cap_flush_tid;
1627 list_add_tail(&cf->g_list, &mdsc->cap_flush_list);
1628 *oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1630 if (list_empty(&ci->i_flushing_item)) {
1631 list_add_tail(&ci->i_flushing_item, &session->s_cap_flushing);
1632 mdsc->num_cap_flushing++;
1634 spin_unlock(&mdsc->cap_dirty_lock);
1636 list_add_tail(&cf->i_list, &ci->i_cap_flush_list);
1638 *flush_tid = cf->tid;
1643 * try to invalidate mapping pages without blocking.
1645 static int try_nonblocking_invalidate(struct inode *inode)
1647 struct ceph_inode_info *ci = ceph_inode(inode);
1648 u32 invalidating_gen = ci->i_rdcache_gen;
1650 spin_unlock(&ci->i_ceph_lock);
1651 invalidate_mapping_pages(&inode->i_data, 0, -1);
1652 spin_lock(&ci->i_ceph_lock);
1654 if (inode->i_data.nrpages == 0 &&
1655 invalidating_gen == ci->i_rdcache_gen) {
1657 dout("try_nonblocking_invalidate %p success\n", inode);
1658 /* save any racing async invalidate some trouble */
1659 ci->i_rdcache_revoking = ci->i_rdcache_gen - 1;
1662 dout("try_nonblocking_invalidate %p failed\n", inode);
1666 bool __ceph_should_report_size(struct ceph_inode_info *ci)
1668 loff_t size = ci->vfs_inode.i_size;
1669 /* mds will adjust max size according to the reported size */
1670 if (ci->i_flushing_caps & CEPH_CAP_FILE_WR)
1672 if (size >= ci->i_max_size)
1674 /* half of previous max_size increment has been used */
1675 if (ci->i_max_size > ci->i_reported_size &&
1676 (size << 1) >= ci->i_max_size + ci->i_reported_size)
1682 * Swiss army knife function to examine currently used and wanted
1683 * versus held caps. Release, flush, ack revoked caps to mds as
1686 * CHECK_CAPS_NODELAY - caller is delayed work and we should not delay
1687 * cap release further.
1688 * CHECK_CAPS_AUTHONLY - we should only check the auth cap
1689 * CHECK_CAPS_FLUSH - we should flush any dirty caps immediately, without
1692 void ceph_check_caps(struct ceph_inode_info *ci, int flags,
1693 struct ceph_mds_session *session)
1695 struct ceph_fs_client *fsc = ceph_inode_to_client(&ci->vfs_inode);
1696 struct ceph_mds_client *mdsc = fsc->mdsc;
1697 struct inode *inode = &ci->vfs_inode;
1698 struct ceph_cap *cap;
1699 u64 flush_tid, oldest_flush_tid;
1700 int file_wanted, used, cap_used;
1701 int took_snap_rwsem = 0; /* true if mdsc->snap_rwsem held */
1702 int issued, implemented, want, retain, revoking, flushing = 0;
1703 int mds = -1; /* keep track of how far we've gone through i_caps list
1704 to avoid an infinite loop on retry */
1706 int delayed = 0, sent = 0, num;
1707 bool is_delayed = flags & CHECK_CAPS_NODELAY;
1708 bool queue_invalidate = false;
1709 bool force_requeue = false;
1710 bool tried_invalidate = false;
1712 /* if we are unmounting, flush any unused caps immediately. */
1716 spin_lock(&ci->i_ceph_lock);
1718 if (ci->i_ceph_flags & CEPH_I_FLUSH)
1719 flags |= CHECK_CAPS_FLUSH;
1723 spin_lock(&ci->i_ceph_lock);
1725 file_wanted = __ceph_caps_file_wanted(ci);
1726 used = __ceph_caps_used(ci);
1727 issued = __ceph_caps_issued(ci, &implemented);
1728 revoking = implemented & ~issued;
1731 retain = file_wanted | used | CEPH_CAP_PIN;
1732 if (!mdsc->stopping && inode->i_nlink > 0) {
1734 retain |= CEPH_CAP_ANY; /* be greedy */
1735 } else if (S_ISDIR(inode->i_mode) &&
1736 (issued & CEPH_CAP_FILE_SHARED) &&
1737 __ceph_dir_is_complete(ci)) {
1739 * If a directory is complete, we want to keep
1740 * the exclusive cap. So that MDS does not end up
1741 * revoking the shared cap on every create/unlink
1744 want = CEPH_CAP_ANY_SHARED | CEPH_CAP_FILE_EXCL;
1748 retain |= CEPH_CAP_ANY_SHARED;
1750 * keep RD only if we didn't have the file open RW,
1751 * because then the mds would revoke it anyway to
1752 * journal max_size=0.
1754 if (ci->i_max_size == 0)
1755 retain |= CEPH_CAP_ANY_RD;
1759 dout("check_caps %p file_want %s used %s dirty %s flushing %s"
1760 " issued %s revoking %s retain %s %s%s%s\n", inode,
1761 ceph_cap_string(file_wanted),
1762 ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
1763 ceph_cap_string(ci->i_flushing_caps),
1764 ceph_cap_string(issued), ceph_cap_string(revoking),
1765 ceph_cap_string(retain),
1766 (flags & CHECK_CAPS_AUTHONLY) ? " AUTHONLY" : "",
1767 (flags & CHECK_CAPS_NODELAY) ? " NODELAY" : "",
1768 (flags & CHECK_CAPS_FLUSH) ? " FLUSH" : "");
1771 * If we no longer need to hold onto old our caps, and we may
1772 * have cached pages, but don't want them, then try to invalidate.
1773 * If we fail, it's because pages are locked.... try again later.
1775 if ((!is_delayed || mdsc->stopping) &&
1776 !S_ISDIR(inode->i_mode) && /* ignore readdir cache */
1777 !(ci->i_wb_ref || ci->i_wrbuffer_ref) && /* no dirty pages... */
1778 inode->i_data.nrpages && /* have cached pages */
1779 (revoking & (CEPH_CAP_FILE_CACHE|
1780 CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
1781 !tried_invalidate) {
1782 dout("check_caps trying to invalidate on %p\n", inode);
1783 if (try_nonblocking_invalidate(inode) < 0) {
1784 if (revoking & (CEPH_CAP_FILE_CACHE|
1785 CEPH_CAP_FILE_LAZYIO)) {
1786 dout("check_caps queuing invalidate\n");
1787 queue_invalidate = true;
1788 ci->i_rdcache_revoking = ci->i_rdcache_gen;
1790 dout("check_caps failed to invalidate pages\n");
1791 /* we failed to invalidate pages. check these
1792 caps again later. */
1793 force_requeue = true;
1794 __cap_set_timeouts(mdsc, ci);
1797 tried_invalidate = true;
1802 for (p = rb_first(&ci->i_caps); p; p = rb_next(p)) {
1803 cap = rb_entry(p, struct ceph_cap, ci_node);
1806 /* avoid looping forever */
1807 if (mds >= cap->mds ||
1808 ((flags & CHECK_CAPS_AUTHONLY) && cap != ci->i_auth_cap))
1811 /* NOTE: no side-effects allowed, until we take s_mutex */
1814 if (ci->i_auth_cap && cap != ci->i_auth_cap)
1815 cap_used &= ~ci->i_auth_cap->issued;
1817 revoking = cap->implemented & ~cap->issued;
1818 dout(" mds%d cap %p used %s issued %s implemented %s revoking %s\n",
1819 cap->mds, cap, ceph_cap_string(cap_used),
1820 ceph_cap_string(cap->issued),
1821 ceph_cap_string(cap->implemented),
1822 ceph_cap_string(revoking));
1824 if (cap == ci->i_auth_cap &&
1825 (cap->issued & CEPH_CAP_FILE_WR)) {
1826 /* request larger max_size from MDS? */
1827 if (ci->i_wanted_max_size > ci->i_max_size &&
1828 ci->i_wanted_max_size > ci->i_requested_max_size) {
1829 dout("requesting new max_size\n");
1833 /* approaching file_max? */
1834 if (__ceph_should_report_size(ci)) {
1835 dout("i_size approaching max_size\n");
1839 /* flush anything dirty? */
1840 if (cap == ci->i_auth_cap) {
1841 if ((flags & CHECK_CAPS_FLUSH) && ci->i_dirty_caps) {
1842 dout("flushing dirty caps\n");
1845 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS) {
1846 dout("flushing snap caps\n");
1851 /* completed revocation? going down and there are no caps? */
1852 if (revoking && (revoking & cap_used) == 0) {
1853 dout("completed revocation of %s\n",
1854 ceph_cap_string(cap->implemented & ~cap->issued));
1858 /* want more caps from mds? */
1859 if (want & ~(cap->mds_wanted | cap->issued))
1862 /* things we might delay */
1863 if ((cap->issued & ~retain) == 0 &&
1864 cap->mds_wanted == want)
1865 continue; /* nope, all good */
1871 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0 &&
1872 time_before(jiffies, ci->i_hold_caps_max)) {
1873 dout(" delaying issued %s -> %s, wanted %s -> %s\n",
1874 ceph_cap_string(cap->issued),
1875 ceph_cap_string(cap->issued & retain),
1876 ceph_cap_string(cap->mds_wanted),
1877 ceph_cap_string(want));
1883 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1884 dout(" skipping %p I_NOFLUSH set\n", inode);
1888 if (session && session != cap->session) {
1889 dout("oops, wrong session %p mutex\n", session);
1890 mutex_unlock(&session->s_mutex);
1894 session = cap->session;
1895 if (mutex_trylock(&session->s_mutex) == 0) {
1896 dout("inverting session/ino locks on %p\n",
1898 spin_unlock(&ci->i_ceph_lock);
1899 if (took_snap_rwsem) {
1900 up_read(&mdsc->snap_rwsem);
1901 took_snap_rwsem = 0;
1903 mutex_lock(&session->s_mutex);
1908 /* kick flushing and flush snaps before sending normal
1910 if (cap == ci->i_auth_cap &&
1912 (CEPH_I_KICK_FLUSH | CEPH_I_FLUSH_SNAPS))) {
1913 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
1914 __kick_flushing_caps(mdsc, session, ci, 0);
1915 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
1917 if (ci->i_ceph_flags & CEPH_I_FLUSH_SNAPS)
1918 __ceph_flush_snaps(ci, session);
1923 /* take snap_rwsem after session mutex */
1924 if (!took_snap_rwsem) {
1925 if (down_read_trylock(&mdsc->snap_rwsem) == 0) {
1926 dout("inverting snap/in locks on %p\n",
1928 spin_unlock(&ci->i_ceph_lock);
1929 down_read(&mdsc->snap_rwsem);
1930 took_snap_rwsem = 1;
1933 took_snap_rwsem = 1;
1936 if (cap == ci->i_auth_cap && ci->i_dirty_caps) {
1937 flushing = __mark_caps_flushing(inode, session, false,
1943 spin_lock(&mdsc->cap_dirty_lock);
1944 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
1945 spin_unlock(&mdsc->cap_dirty_lock);
1948 mds = cap->mds; /* remember mds, so we don't repeat */
1951 /* __send_cap drops i_ceph_lock */
1952 delayed += __send_cap(mdsc, cap, CEPH_CAP_OP_UPDATE, false,
1953 cap_used, want, retain, flushing,
1954 flush_tid, oldest_flush_tid);
1955 goto retry; /* retake i_ceph_lock and restart our cap scan. */
1959 * Reschedule delayed caps release if we delayed anything,
1962 if (delayed && is_delayed)
1963 force_requeue = true; /* __send_cap delayed release; requeue */
1964 if (!delayed && !is_delayed)
1965 __cap_delay_cancel(mdsc, ci);
1966 else if (!is_delayed || force_requeue)
1967 __cap_delay_requeue(mdsc, ci);
1969 spin_unlock(&ci->i_ceph_lock);
1971 if (queue_invalidate)
1972 ceph_queue_invalidate(inode);
1975 mutex_unlock(&session->s_mutex);
1976 if (took_snap_rwsem)
1977 up_read(&mdsc->snap_rwsem);
1981 * Try to flush dirty caps back to the auth mds.
1983 static int try_flush_caps(struct inode *inode, u64 *ptid)
1985 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
1986 struct ceph_inode_info *ci = ceph_inode(inode);
1987 struct ceph_mds_session *session = NULL;
1989 u64 flush_tid = 0, oldest_flush_tid = 0;
1992 spin_lock(&ci->i_ceph_lock);
1993 if (ci->i_ceph_flags & CEPH_I_NOFLUSH) {
1994 dout("try_flush_caps skipping %p I_NOFLUSH set\n", inode);
1997 if (ci->i_dirty_caps && ci->i_auth_cap) {
1998 struct ceph_cap *cap = ci->i_auth_cap;
1999 int used = __ceph_caps_used(ci);
2000 int want = __ceph_caps_wanted(ci);
2003 if (!session || session != cap->session) {
2004 spin_unlock(&ci->i_ceph_lock);
2006 mutex_unlock(&session->s_mutex);
2007 session = cap->session;
2008 mutex_lock(&session->s_mutex);
2011 if (cap->session->s_state < CEPH_MDS_SESSION_OPEN)
2014 flushing = __mark_caps_flushing(inode, session, true,
2015 &flush_tid, &oldest_flush_tid);
2017 /* __send_cap drops i_ceph_lock */
2018 delayed = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH, true,
2019 used, want, (cap->issued | cap->implemented),
2020 flushing, flush_tid, oldest_flush_tid);
2023 spin_lock(&ci->i_ceph_lock);
2024 __cap_delay_requeue(mdsc, ci);
2025 spin_unlock(&ci->i_ceph_lock);
2028 if (!list_empty(&ci->i_cap_flush_list)) {
2029 struct ceph_cap_flush *cf =
2030 list_last_entry(&ci->i_cap_flush_list,
2031 struct ceph_cap_flush, i_list);
2033 flush_tid = cf->tid;
2035 flushing = ci->i_flushing_caps;
2036 spin_unlock(&ci->i_ceph_lock);
2040 mutex_unlock(&session->s_mutex);
2047 * Return true if we've flushed caps through the given flush_tid.
2049 static int caps_are_flushed(struct inode *inode, u64 flush_tid)
2051 struct ceph_inode_info *ci = ceph_inode(inode);
2054 spin_lock(&ci->i_ceph_lock);
2055 if (!list_empty(&ci->i_cap_flush_list)) {
2056 struct ceph_cap_flush * cf =
2057 list_first_entry(&ci->i_cap_flush_list,
2058 struct ceph_cap_flush, i_list);
2059 if (cf->tid <= flush_tid)
2062 spin_unlock(&ci->i_ceph_lock);
2067 * wait for any unsafe requests to complete.
2069 static int unsafe_request_wait(struct inode *inode)
2071 struct ceph_inode_info *ci = ceph_inode(inode);
2072 struct ceph_mds_request *req1 = NULL, *req2 = NULL;
2075 spin_lock(&ci->i_unsafe_lock);
2076 if (S_ISDIR(inode->i_mode) && !list_empty(&ci->i_unsafe_dirops)) {
2077 req1 = list_last_entry(&ci->i_unsafe_dirops,
2078 struct ceph_mds_request,
2080 ceph_mdsc_get_request(req1);
2082 if (!list_empty(&ci->i_unsafe_iops)) {
2083 req2 = list_last_entry(&ci->i_unsafe_iops,
2084 struct ceph_mds_request,
2085 r_unsafe_target_item);
2086 ceph_mdsc_get_request(req2);
2088 spin_unlock(&ci->i_unsafe_lock);
2090 dout("unsafe_request_wait %p wait on tid %llu %llu\n",
2091 inode, req1 ? req1->r_tid : 0ULL, req2 ? req2->r_tid : 0ULL);
2093 ret = !wait_for_completion_timeout(&req1->r_safe_completion,
2094 ceph_timeout_jiffies(req1->r_timeout));
2097 ceph_mdsc_put_request(req1);
2100 ret = !wait_for_completion_timeout(&req2->r_safe_completion,
2101 ceph_timeout_jiffies(req2->r_timeout));
2104 ceph_mdsc_put_request(req2);
2109 int ceph_fsync(struct file *file, loff_t start, loff_t end, int datasync)
2111 struct inode *inode = file->f_mapping->host;
2112 struct ceph_inode_info *ci = ceph_inode(inode);
2117 dout("fsync %p%s\n", inode, datasync ? " datasync" : "");
2119 ret = file_write_and_wait_range(file, start, end);
2128 dirty = try_flush_caps(inode, &flush_tid);
2129 dout("fsync dirty caps are %s\n", ceph_cap_string(dirty));
2131 ret = unsafe_request_wait(inode);
2134 * only wait on non-file metadata writeback (the mds
2135 * can recover size and mtime, so we don't need to
2138 if (!ret && (dirty & ~CEPH_CAP_ANY_FILE_WR)) {
2139 ret = wait_event_interruptible(ci->i_cap_wq,
2140 caps_are_flushed(inode, flush_tid));
2142 inode_unlock(inode);
2144 dout("fsync %p%s result=%d\n", inode, datasync ? " datasync" : "", ret);
2149 * Flush any dirty caps back to the mds. If we aren't asked to wait,
2150 * queue inode for flush but don't do so immediately, because we can
2151 * get by with fewer MDS messages if we wait for data writeback to
2154 int ceph_write_inode(struct inode *inode, struct writeback_control *wbc)
2156 struct ceph_inode_info *ci = ceph_inode(inode);
2160 int wait = wbc->sync_mode == WB_SYNC_ALL;
2162 dout("write_inode %p wait=%d\n", inode, wait);
2164 dirty = try_flush_caps(inode, &flush_tid);
2166 err = wait_event_interruptible(ci->i_cap_wq,
2167 caps_are_flushed(inode, flush_tid));
2169 struct ceph_mds_client *mdsc =
2170 ceph_sb_to_client(inode->i_sb)->mdsc;
2172 spin_lock(&ci->i_ceph_lock);
2173 if (__ceph_caps_dirty(ci))
2174 __cap_delay_requeue_front(mdsc, ci);
2175 spin_unlock(&ci->i_ceph_lock);
2180 static void __kick_flushing_caps(struct ceph_mds_client *mdsc,
2181 struct ceph_mds_session *session,
2182 struct ceph_inode_info *ci,
2183 u64 oldest_flush_tid)
2184 __releases(ci->i_ceph_lock)
2185 __acquires(ci->i_ceph_lock)
2187 struct inode *inode = &ci->vfs_inode;
2188 struct ceph_cap *cap;
2189 struct ceph_cap_flush *cf;
2193 list_for_each_entry(cf, &ci->i_cap_flush_list, i_list) {
2194 if (cf->tid < first_tid)
2197 cap = ci->i_auth_cap;
2198 if (!(cap && cap->session == session)) {
2199 pr_err("%p auth cap %p not mds%d ???\n",
2200 inode, cap, session->s_mds);
2204 first_tid = cf->tid + 1;
2207 dout("kick_flushing_caps %p cap %p tid %llu %s\n",
2208 inode, cap, cf->tid, ceph_cap_string(cf->caps));
2209 ci->i_ceph_flags |= CEPH_I_NODELAY;
2210 ret = __send_cap(mdsc, cap, CEPH_CAP_OP_FLUSH,
2211 false, __ceph_caps_used(ci),
2212 __ceph_caps_wanted(ci),
2213 cap->issued | cap->implemented,
2214 cf->caps, cf->tid, oldest_flush_tid);
2216 pr_err("kick_flushing_caps: error sending "
2217 "cap flush, ino (%llx.%llx) "
2218 "tid %llu flushing %s\n",
2219 ceph_vinop(inode), cf->tid,
2220 ceph_cap_string(cf->caps));
2223 struct ceph_cap_snap *capsnap =
2224 container_of(cf, struct ceph_cap_snap,
2226 dout("kick_flushing_caps %p capsnap %p tid %llu %s\n",
2227 inode, capsnap, cf->tid,
2228 ceph_cap_string(capsnap->dirty));
2230 refcount_inc(&capsnap->nref);
2231 spin_unlock(&ci->i_ceph_lock);
2233 ret = __send_flush_snap(inode, session, capsnap, cap->mseq,
2236 pr_err("kick_flushing_caps: error sending "
2237 "cap flushsnap, ino (%llx.%llx) "
2238 "tid %llu follows %llu\n",
2239 ceph_vinop(inode), cf->tid,
2243 ceph_put_cap_snap(capsnap);
2246 spin_lock(&ci->i_ceph_lock);
2250 void ceph_early_kick_flushing_caps(struct ceph_mds_client *mdsc,
2251 struct ceph_mds_session *session)
2253 struct ceph_inode_info *ci;
2254 struct ceph_cap *cap;
2255 u64 oldest_flush_tid;
2257 dout("early_kick_flushing_caps mds%d\n", session->s_mds);
2259 spin_lock(&mdsc->cap_dirty_lock);
2260 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2261 spin_unlock(&mdsc->cap_dirty_lock);
2263 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2264 spin_lock(&ci->i_ceph_lock);
2265 cap = ci->i_auth_cap;
2266 if (!(cap && cap->session == session)) {
2267 pr_err("%p auth cap %p not mds%d ???\n",
2268 &ci->vfs_inode, cap, session->s_mds);
2269 spin_unlock(&ci->i_ceph_lock);
2275 * if flushing caps were revoked, we re-send the cap flush
2276 * in client reconnect stage. This guarantees MDS * processes
2277 * the cap flush message before issuing the flushing caps to
2280 if ((cap->issued & ci->i_flushing_caps) !=
2281 ci->i_flushing_caps) {
2282 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2283 __kick_flushing_caps(mdsc, session, ci,
2286 ci->i_ceph_flags |= CEPH_I_KICK_FLUSH;
2289 spin_unlock(&ci->i_ceph_lock);
2293 void ceph_kick_flushing_caps(struct ceph_mds_client *mdsc,
2294 struct ceph_mds_session *session)
2296 struct ceph_inode_info *ci;
2297 struct ceph_cap *cap;
2298 u64 oldest_flush_tid;
2300 dout("kick_flushing_caps mds%d\n", session->s_mds);
2302 spin_lock(&mdsc->cap_dirty_lock);
2303 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2304 spin_unlock(&mdsc->cap_dirty_lock);
2306 list_for_each_entry(ci, &session->s_cap_flushing, i_flushing_item) {
2307 spin_lock(&ci->i_ceph_lock);
2308 cap = ci->i_auth_cap;
2309 if (!(cap && cap->session == session)) {
2310 pr_err("%p auth cap %p not mds%d ???\n",
2311 &ci->vfs_inode, cap, session->s_mds);
2312 spin_unlock(&ci->i_ceph_lock);
2315 if (ci->i_ceph_flags & CEPH_I_KICK_FLUSH) {
2316 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2317 __kick_flushing_caps(mdsc, session, ci,
2320 spin_unlock(&ci->i_ceph_lock);
2324 static void kick_flushing_inode_caps(struct ceph_mds_client *mdsc,
2325 struct ceph_mds_session *session,
2326 struct inode *inode)
2327 __releases(ci->i_ceph_lock)
2329 struct ceph_inode_info *ci = ceph_inode(inode);
2330 struct ceph_cap *cap;
2332 cap = ci->i_auth_cap;
2333 dout("kick_flushing_inode_caps %p flushing %s\n", inode,
2334 ceph_cap_string(ci->i_flushing_caps));
2336 if (!list_empty(&ci->i_cap_flush_list)) {
2337 u64 oldest_flush_tid;
2338 spin_lock(&mdsc->cap_dirty_lock);
2339 list_move_tail(&ci->i_flushing_item,
2340 &cap->session->s_cap_flushing);
2341 oldest_flush_tid = __get_oldest_flush_tid(mdsc);
2342 spin_unlock(&mdsc->cap_dirty_lock);
2344 ci->i_ceph_flags &= ~CEPH_I_KICK_FLUSH;
2345 __kick_flushing_caps(mdsc, session, ci, oldest_flush_tid);
2346 spin_unlock(&ci->i_ceph_lock);
2348 spin_unlock(&ci->i_ceph_lock);
2354 * Take references to capabilities we hold, so that we don't release
2355 * them to the MDS prematurely.
2357 * Protected by i_ceph_lock.
2359 static void __take_cap_refs(struct ceph_inode_info *ci, int got,
2360 bool snap_rwsem_locked)
2362 if (got & CEPH_CAP_PIN)
2364 if (got & CEPH_CAP_FILE_RD)
2366 if (got & CEPH_CAP_FILE_CACHE)
2367 ci->i_rdcache_ref++;
2368 if (got & CEPH_CAP_FILE_WR) {
2369 if (ci->i_wr_ref == 0 && !ci->i_head_snapc) {
2370 BUG_ON(!snap_rwsem_locked);
2371 ci->i_head_snapc = ceph_get_snap_context(
2372 ci->i_snap_realm->cached_context);
2376 if (got & CEPH_CAP_FILE_BUFFER) {
2377 if (ci->i_wb_ref == 0)
2378 ihold(&ci->vfs_inode);
2380 dout("__take_cap_refs %p wb %d -> %d (?)\n",
2381 &ci->vfs_inode, ci->i_wb_ref-1, ci->i_wb_ref);
2386 * Try to grab cap references. Specify those refs we @want, and the
2387 * minimal set we @need. Also include the larger offset we are writing
2388 * to (when applicable), and check against max_size here as well.
2389 * Note that caller is responsible for ensuring max_size increases are
2390 * requested from the MDS.
2392 static int try_get_cap_refs(struct ceph_inode_info *ci, int need, int want,
2393 loff_t endoff, bool nonblock, int *got, int *err)
2395 struct inode *inode = &ci->vfs_inode;
2396 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
2398 int have, implemented;
2400 bool snap_rwsem_locked = false;
2402 dout("get_cap_refs %p need %s want %s\n", inode,
2403 ceph_cap_string(need), ceph_cap_string(want));
2406 spin_lock(&ci->i_ceph_lock);
2408 /* make sure file is actually open */
2409 file_wanted = __ceph_caps_file_wanted(ci);
2410 if ((file_wanted & need) != need) {
2411 dout("try_get_cap_refs need %s file_wanted %s, EBADF\n",
2412 ceph_cap_string(need), ceph_cap_string(file_wanted));
2418 /* finish pending truncate */
2419 while (ci->i_truncate_pending) {
2420 spin_unlock(&ci->i_ceph_lock);
2421 if (snap_rwsem_locked) {
2422 up_read(&mdsc->snap_rwsem);
2423 snap_rwsem_locked = false;
2425 __ceph_do_pending_vmtruncate(inode);
2426 spin_lock(&ci->i_ceph_lock);
2429 have = __ceph_caps_issued(ci, &implemented);
2431 if (have & need & CEPH_CAP_FILE_WR) {
2432 if (endoff >= 0 && endoff > (loff_t)ci->i_max_size) {
2433 dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
2434 inode, endoff, ci->i_max_size);
2435 if (endoff > ci->i_requested_max_size) {
2442 * If a sync write is in progress, we must wait, so that we
2443 * can get a final snapshot value for size+mtime.
2445 if (__ceph_have_pending_cap_snap(ci)) {
2446 dout("get_cap_refs %p cap_snap_pending\n", inode);
2451 if ((have & need) == need) {
2453 * Look at (implemented & ~have & not) so that we keep waiting
2454 * on transition from wanted -> needed caps. This is needed
2455 * for WRBUFFER|WR -> WR to avoid a new WR sync write from
2456 * going before a prior buffered writeback happens.
2458 int not = want & ~(have & need);
2459 int revoking = implemented & ~have;
2460 dout("get_cap_refs %p have %s but not %s (revoking %s)\n",
2461 inode, ceph_cap_string(have), ceph_cap_string(not),
2462 ceph_cap_string(revoking));
2463 if ((revoking & not) == 0) {
2464 if (!snap_rwsem_locked &&
2465 !ci->i_head_snapc &&
2466 (need & CEPH_CAP_FILE_WR)) {
2467 if (!down_read_trylock(&mdsc->snap_rwsem)) {
2469 * we can not call down_read() when
2470 * task isn't in TASK_RUNNING state
2478 spin_unlock(&ci->i_ceph_lock);
2479 down_read(&mdsc->snap_rwsem);
2480 snap_rwsem_locked = true;
2483 snap_rwsem_locked = true;
2485 *got = need | (have & want);
2486 if ((need & CEPH_CAP_FILE_RD) &&
2487 !(*got & CEPH_CAP_FILE_CACHE))
2488 ceph_disable_fscache_readpage(ci);
2489 __take_cap_refs(ci, *got, true);
2493 int session_readonly = false;
2494 if ((need & CEPH_CAP_FILE_WR) && ci->i_auth_cap) {
2495 struct ceph_mds_session *s = ci->i_auth_cap->session;
2496 spin_lock(&s->s_cap_lock);
2497 session_readonly = s->s_readonly;
2498 spin_unlock(&s->s_cap_lock);
2500 if (session_readonly) {
2501 dout("get_cap_refs %p needed %s but mds%d readonly\n",
2502 inode, ceph_cap_string(need), ci->i_auth_cap->mds);
2508 if (ci->i_ceph_flags & CEPH_I_CAP_DROPPED) {
2510 if (READ_ONCE(mdsc->fsc->mount_state) ==
2511 CEPH_MOUNT_SHUTDOWN) {
2512 dout("get_cap_refs %p forced umount\n", inode);
2517 mds_wanted = __ceph_caps_mds_wanted(ci, false);
2518 if (need & ~(mds_wanted & need)) {
2519 dout("get_cap_refs %p caps were dropped"
2520 " (session killed?)\n", inode);
2525 if (!(file_wanted & ~mds_wanted))
2526 ci->i_ceph_flags &= ~CEPH_I_CAP_DROPPED;
2529 dout("get_cap_refs %p have %s needed %s\n", inode,
2530 ceph_cap_string(have), ceph_cap_string(need));
2533 spin_unlock(&ci->i_ceph_lock);
2534 if (snap_rwsem_locked)
2535 up_read(&mdsc->snap_rwsem);
2537 dout("get_cap_refs %p ret %d got %s\n", inode,
2538 ret, ceph_cap_string(*got));
2543 * Check the offset we are writing up to against our current
2544 * max_size. If necessary, tell the MDS we want to write to
2547 static void check_max_size(struct inode *inode, loff_t endoff)
2549 struct ceph_inode_info *ci = ceph_inode(inode);
2552 /* do we need to explicitly request a larger max_size? */
2553 spin_lock(&ci->i_ceph_lock);
2554 if (endoff >= ci->i_max_size && endoff > ci->i_wanted_max_size) {
2555 dout("write %p at large endoff %llu, req max_size\n",
2557 ci->i_wanted_max_size = endoff;
2559 /* duplicate ceph_check_caps()'s logic */
2560 if (ci->i_auth_cap &&
2561 (ci->i_auth_cap->issued & CEPH_CAP_FILE_WR) &&
2562 ci->i_wanted_max_size > ci->i_max_size &&
2563 ci->i_wanted_max_size > ci->i_requested_max_size)
2565 spin_unlock(&ci->i_ceph_lock);
2567 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2570 int ceph_try_get_caps(struct ceph_inode_info *ci, int need, int want, int *got)
2574 BUG_ON(need & ~CEPH_CAP_FILE_RD);
2575 BUG_ON(want & ~(CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO));
2576 ret = ceph_pool_perm_check(ci, need);
2580 ret = try_get_cap_refs(ci, need, want, 0, true, got, &err);
2582 if (err == -EAGAIN) {
2584 } else if (err < 0) {
2592 * Wait for caps, and take cap references. If we can't get a WR cap
2593 * due to a small max_size, make sure we check_max_size (and possibly
2594 * ask the mds) so we don't get hung up indefinitely.
2596 int ceph_get_caps(struct ceph_inode_info *ci, int need, int want,
2597 loff_t endoff, int *got, struct page **pinned_page)
2599 int _got, ret, err = 0;
2601 ret = ceph_pool_perm_check(ci, need);
2607 check_max_size(&ci->vfs_inode, endoff);
2611 ret = try_get_cap_refs(ci, need, want, endoff,
2612 false, &_got, &err);
2619 DEFINE_WAIT_FUNC(wait, woken_wake_function);
2620 add_wait_queue(&ci->i_cap_wq, &wait);
2622 while (!try_get_cap_refs(ci, need, want, endoff,
2623 true, &_got, &err)) {
2624 if (signal_pending(current)) {
2628 wait_woken(&wait, TASK_INTERRUPTIBLE, MAX_SCHEDULE_TIMEOUT);
2631 remove_wait_queue(&ci->i_cap_wq, &wait);
2639 if (err == -ESTALE) {
2640 /* session was killed, try renew caps */
2641 ret = ceph_renew_caps(&ci->vfs_inode);
2648 if (ci->i_inline_version != CEPH_INLINE_NONE &&
2649 (_got & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)) &&
2650 i_size_read(&ci->vfs_inode) > 0) {
2652 find_get_page(ci->vfs_inode.i_mapping, 0);
2654 if (PageUptodate(page)) {
2655 *pinned_page = page;
2661 * drop cap refs first because getattr while
2662 * holding * caps refs can cause deadlock.
2664 ceph_put_cap_refs(ci, _got);
2668 * getattr request will bring inline data into
2671 ret = __ceph_do_getattr(&ci->vfs_inode, NULL,
2672 CEPH_STAT_CAP_INLINE_DATA,
2681 if ((_got & CEPH_CAP_FILE_RD) && (_got & CEPH_CAP_FILE_CACHE))
2682 ceph_fscache_revalidate_cookie(ci);
2689 * Take cap refs. Caller must already know we hold at least one ref
2690 * on the caps in question or we don't know this is safe.
2692 void ceph_get_cap_refs(struct ceph_inode_info *ci, int caps)
2694 spin_lock(&ci->i_ceph_lock);
2695 __take_cap_refs(ci, caps, false);
2696 spin_unlock(&ci->i_ceph_lock);
2701 * drop cap_snap that is not associated with any snapshot.
2702 * we don't need to send FLUSHSNAP message for it.
2704 static int ceph_try_drop_cap_snap(struct ceph_inode_info *ci,
2705 struct ceph_cap_snap *capsnap)
2707 if (!capsnap->need_flush &&
2708 !capsnap->writing && !capsnap->dirty_pages) {
2709 dout("dropping cap_snap %p follows %llu\n",
2710 capsnap, capsnap->follows);
2711 BUG_ON(capsnap->cap_flush.tid > 0);
2712 ceph_put_snap_context(capsnap->context);
2713 if (!list_is_last(&capsnap->ci_item, &ci->i_cap_snaps))
2714 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2716 list_del(&capsnap->ci_item);
2717 ceph_put_cap_snap(capsnap);
2726 * If we released the last ref on any given cap, call ceph_check_caps
2727 * to release (or schedule a release).
2729 * If we are releasing a WR cap (from a sync write), finalize any affected
2730 * cap_snap, and wake up any waiters.
2732 void ceph_put_cap_refs(struct ceph_inode_info *ci, int had)
2734 struct inode *inode = &ci->vfs_inode;
2735 int last = 0, put = 0, flushsnaps = 0, wake = 0;
2737 spin_lock(&ci->i_ceph_lock);
2738 if (had & CEPH_CAP_PIN)
2740 if (had & CEPH_CAP_FILE_RD)
2741 if (--ci->i_rd_ref == 0)
2743 if (had & CEPH_CAP_FILE_CACHE)
2744 if (--ci->i_rdcache_ref == 0)
2746 if (had & CEPH_CAP_FILE_BUFFER) {
2747 if (--ci->i_wb_ref == 0) {
2751 dout("put_cap_refs %p wb %d -> %d (?)\n",
2752 inode, ci->i_wb_ref+1, ci->i_wb_ref);
2754 if (had & CEPH_CAP_FILE_WR)
2755 if (--ci->i_wr_ref == 0) {
2757 if (__ceph_have_pending_cap_snap(ci)) {
2758 struct ceph_cap_snap *capsnap =
2759 list_last_entry(&ci->i_cap_snaps,
2760 struct ceph_cap_snap,
2762 capsnap->writing = 0;
2763 if (ceph_try_drop_cap_snap(ci, capsnap))
2765 else if (__ceph_finish_cap_snap(ci, capsnap))
2769 if (ci->i_wrbuffer_ref_head == 0 &&
2770 ci->i_dirty_caps == 0 &&
2771 ci->i_flushing_caps == 0) {
2772 BUG_ON(!ci->i_head_snapc);
2773 ceph_put_snap_context(ci->i_head_snapc);
2774 ci->i_head_snapc = NULL;
2776 /* see comment in __ceph_remove_cap() */
2777 if (!__ceph_is_any_caps(ci) && ci->i_snap_realm)
2778 drop_inode_snap_realm(ci);
2780 spin_unlock(&ci->i_ceph_lock);
2782 dout("put_cap_refs %p had %s%s%s\n", inode, ceph_cap_string(had),
2783 last ? " last" : "", put ? " put" : "");
2785 if (last && !flushsnaps)
2786 ceph_check_caps(ci, 0, NULL);
2787 else if (flushsnaps)
2788 ceph_flush_snaps(ci, NULL);
2790 wake_up_all(&ci->i_cap_wq);
2796 * Release @nr WRBUFFER refs on dirty pages for the given @snapc snap
2797 * context. Adjust per-snap dirty page accounting as appropriate.
2798 * Once all dirty data for a cap_snap is flushed, flush snapped file
2799 * metadata back to the MDS. If we dropped the last ref, call
2802 void ceph_put_wrbuffer_cap_refs(struct ceph_inode_info *ci, int nr,
2803 struct ceph_snap_context *snapc)
2805 struct inode *inode = &ci->vfs_inode;
2806 struct ceph_cap_snap *capsnap = NULL;
2810 bool flush_snaps = false;
2811 bool complete_capsnap = false;
2813 spin_lock(&ci->i_ceph_lock);
2814 ci->i_wrbuffer_ref -= nr;
2815 if (ci->i_wrbuffer_ref == 0) {
2820 if (ci->i_head_snapc == snapc) {
2821 ci->i_wrbuffer_ref_head -= nr;
2822 if (ci->i_wrbuffer_ref_head == 0 &&
2823 ci->i_wr_ref == 0 &&
2824 ci->i_dirty_caps == 0 &&
2825 ci->i_flushing_caps == 0) {
2826 BUG_ON(!ci->i_head_snapc);
2827 ceph_put_snap_context(ci->i_head_snapc);
2828 ci->i_head_snapc = NULL;
2830 dout("put_wrbuffer_cap_refs on %p head %d/%d -> %d/%d %s\n",
2832 ci->i_wrbuffer_ref+nr, ci->i_wrbuffer_ref_head+nr,
2833 ci->i_wrbuffer_ref, ci->i_wrbuffer_ref_head,
2834 last ? " LAST" : "");
2836 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
2837 if (capsnap->context == snapc) {
2843 capsnap->dirty_pages -= nr;
2844 if (capsnap->dirty_pages == 0) {
2845 complete_capsnap = true;
2846 if (!capsnap->writing) {
2847 if (ceph_try_drop_cap_snap(ci, capsnap)) {
2850 ci->i_ceph_flags |= CEPH_I_FLUSH_SNAPS;
2855 dout("put_wrbuffer_cap_refs on %p cap_snap %p "
2856 " snap %lld %d/%d -> %d/%d %s%s\n",
2857 inode, capsnap, capsnap->context->seq,
2858 ci->i_wrbuffer_ref+nr, capsnap->dirty_pages + nr,
2859 ci->i_wrbuffer_ref, capsnap->dirty_pages,
2860 last ? " (wrbuffer last)" : "",
2861 complete_capsnap ? " (complete capsnap)" : "");
2864 spin_unlock(&ci->i_ceph_lock);
2867 ceph_check_caps(ci, CHECK_CAPS_AUTHONLY, NULL);
2868 } else if (flush_snaps) {
2869 ceph_flush_snaps(ci, NULL);
2871 if (complete_capsnap)
2872 wake_up_all(&ci->i_cap_wq);
2878 * Invalidate unlinked inode's aliases, so we can drop the inode ASAP.
2880 static void invalidate_aliases(struct inode *inode)
2882 struct dentry *dn, *prev = NULL;
2884 dout("invalidate_aliases inode %p\n", inode);
2885 d_prune_aliases(inode);
2887 * For non-directory inode, d_find_alias() only returns
2888 * hashed dentry. After calling d_invalidate(), the
2889 * dentry becomes unhashed.
2891 * For directory inode, d_find_alias() can return
2892 * unhashed dentry. But directory inode should have
2893 * one alias at most.
2895 while ((dn = d_find_alias(inode))) {
2910 * Handle a cap GRANT message from the MDS. (Note that a GRANT may
2911 * actually be a revocation if it specifies a smaller cap set.)
2913 * caller holds s_mutex and i_ceph_lock, we drop both.
2915 static void handle_cap_grant(struct ceph_mds_client *mdsc,
2916 struct inode *inode, struct ceph_mds_caps *grant,
2917 struct ceph_string **pns, u64 inline_version,
2918 void *inline_data, u32 inline_len,
2919 struct ceph_buffer *xattr_buf,
2920 struct ceph_mds_session *session,
2921 struct ceph_cap *cap, int issued)
2922 __releases(ci->i_ceph_lock)
2923 __releases(mdsc->snap_rwsem)
2925 struct ceph_inode_info *ci = ceph_inode(inode);
2926 int mds = session->s_mds;
2927 int seq = le32_to_cpu(grant->seq);
2928 int newcaps = le32_to_cpu(grant->caps);
2929 int used, wanted, dirty;
2930 u64 size = le64_to_cpu(grant->size);
2931 u64 max_size = le64_to_cpu(grant->max_size);
2932 struct timespec mtime, atime, ctime;
2935 bool writeback = false;
2936 bool queue_trunc = false;
2937 bool queue_invalidate = false;
2938 bool deleted_inode = false;
2939 bool fill_inline = false;
2941 dout("handle_cap_grant inode %p cap %p mds%d seq %d %s\n",
2942 inode, cap, mds, seq, ceph_cap_string(newcaps));
2943 dout(" size %llu max_size %llu, i_size %llu\n", size, max_size,
2948 * auth mds of the inode changed. we received the cap export message,
2949 * but still haven't received the cap import message. handle_cap_export
2950 * updated the new auth MDS' cap.
2952 * "ceph_seq_cmp(seq, cap->seq) <= 0" means we are processing a message
2953 * that was sent before the cap import message. So don't remove caps.
2955 if (ceph_seq_cmp(seq, cap->seq) <= 0) {
2956 WARN_ON(cap != ci->i_auth_cap);
2957 WARN_ON(cap->cap_id != le64_to_cpu(grant->cap_id));
2959 newcaps |= cap->issued;
2963 * If CACHE is being revoked, and we have no dirty buffers,
2964 * try to invalidate (once). (If there are dirty buffers, we
2965 * will invalidate _after_ writeback.)
2967 if (!S_ISDIR(inode->i_mode) && /* don't invalidate readdir cache */
2968 ((cap->issued & ~newcaps) & CEPH_CAP_FILE_CACHE) &&
2969 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
2970 !(ci->i_wrbuffer_ref || ci->i_wb_ref)) {
2971 if (try_nonblocking_invalidate(inode)) {
2972 /* there were locked pages.. invalidate later
2973 in a separate thread. */
2974 if (ci->i_rdcache_revoking != ci->i_rdcache_gen) {
2975 queue_invalidate = true;
2976 ci->i_rdcache_revoking = ci->i_rdcache_gen;
2981 /* side effects now are allowed */
2982 cap->cap_gen = session->s_cap_gen;
2985 __check_cap_issue(ci, cap, newcaps);
2987 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2988 (issued & CEPH_CAP_AUTH_EXCL) == 0) {
2989 inode->i_mode = le32_to_cpu(grant->mode);
2990 inode->i_uid = make_kuid(&init_user_ns, le32_to_cpu(grant->uid));
2991 inode->i_gid = make_kgid(&init_user_ns, le32_to_cpu(grant->gid));
2992 dout("%p mode 0%o uid.gid %d.%d\n", inode, inode->i_mode,
2993 from_kuid(&init_user_ns, inode->i_uid),
2994 from_kgid(&init_user_ns, inode->i_gid));
2997 if ((newcaps & CEPH_CAP_AUTH_SHARED) &&
2998 (issued & CEPH_CAP_LINK_EXCL) == 0) {
2999 set_nlink(inode, le32_to_cpu(grant->nlink));
3000 if (inode->i_nlink == 0 &&
3001 (newcaps & (CEPH_CAP_LINK_SHARED | CEPH_CAP_LINK_EXCL)))
3002 deleted_inode = true;
3005 if ((issued & CEPH_CAP_XATTR_EXCL) == 0 && grant->xattr_len) {
3006 int len = le32_to_cpu(grant->xattr_len);
3007 u64 version = le64_to_cpu(grant->xattr_version);
3009 if (version > ci->i_xattrs.version) {
3010 dout(" got new xattrs v%llu on %p len %d\n",
3011 version, inode, len);
3012 if (ci->i_xattrs.blob)
3013 ceph_buffer_put(ci->i_xattrs.blob);
3014 ci->i_xattrs.blob = ceph_buffer_get(xattr_buf);
3015 ci->i_xattrs.version = version;
3016 ceph_forget_all_cached_acls(inode);
3020 if (newcaps & CEPH_CAP_ANY_RD) {
3021 /* ctime/mtime/atime? */
3022 ceph_decode_timespec(&mtime, &grant->mtime);
3023 ceph_decode_timespec(&atime, &grant->atime);
3024 ceph_decode_timespec(&ctime, &grant->ctime);
3025 ceph_fill_file_time(inode, issued,
3026 le32_to_cpu(grant->time_warp_seq),
3027 &ctime, &mtime, &atime);
3030 if (newcaps & (CEPH_CAP_ANY_FILE_RD | CEPH_CAP_ANY_FILE_WR)) {
3031 /* file layout may have changed */
3032 s64 old_pool = ci->i_layout.pool_id;
3033 struct ceph_string *old_ns;
3035 ceph_file_layout_from_legacy(&ci->i_layout, &grant->layout);
3036 old_ns = rcu_dereference_protected(ci->i_layout.pool_ns,
3037 lockdep_is_held(&ci->i_ceph_lock));
3038 rcu_assign_pointer(ci->i_layout.pool_ns, *pns);
3040 if (ci->i_layout.pool_id != old_pool || *pns != old_ns)
3041 ci->i_ceph_flags &= ~CEPH_I_POOL_PERM;
3045 /* size/truncate_seq? */
3046 queue_trunc = ceph_fill_file_size(inode, issued,
3047 le32_to_cpu(grant->truncate_seq),
3048 le64_to_cpu(grant->truncate_size),
3052 if (ci->i_auth_cap == cap && (newcaps & CEPH_CAP_ANY_FILE_WR)) {
3053 if (max_size != ci->i_max_size) {
3054 dout("max_size %lld -> %llu\n",
3055 ci->i_max_size, max_size);
3056 ci->i_max_size = max_size;
3057 if (max_size >= ci->i_wanted_max_size) {
3058 ci->i_wanted_max_size = 0; /* reset */
3059 ci->i_requested_max_size = 0;
3062 } else if (ci->i_wanted_max_size > ci->i_max_size &&
3063 ci->i_wanted_max_size > ci->i_requested_max_size) {
3064 /* CEPH_CAP_OP_IMPORT */
3069 /* check cap bits */
3070 wanted = __ceph_caps_wanted(ci);
3071 used = __ceph_caps_used(ci);
3072 dirty = __ceph_caps_dirty(ci);
3073 dout(" my wanted = %s, used = %s, dirty %s\n",
3074 ceph_cap_string(wanted),
3075 ceph_cap_string(used),
3076 ceph_cap_string(dirty));
3077 if (wanted != le32_to_cpu(grant->wanted)) {
3078 dout("mds wanted %s -> %s\n",
3079 ceph_cap_string(le32_to_cpu(grant->wanted)),
3080 ceph_cap_string(wanted));
3081 /* imported cap may not have correct mds_wanted */
3082 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT)
3086 /* revocation, grant, or no-op? */
3087 if (cap->issued & ~newcaps) {
3088 int revoking = cap->issued & ~newcaps;
3090 dout("revocation: %s -> %s (revoking %s)\n",
3091 ceph_cap_string(cap->issued),
3092 ceph_cap_string(newcaps),
3093 ceph_cap_string(revoking));
3094 if (revoking & used & CEPH_CAP_FILE_BUFFER)
3095 writeback = true; /* initiate writeback; will delay ack */
3096 else if (revoking == CEPH_CAP_FILE_CACHE &&
3097 (newcaps & CEPH_CAP_FILE_LAZYIO) == 0 &&
3099 ; /* do nothing yet, invalidation will be queued */
3100 else if (cap == ci->i_auth_cap)
3101 check_caps = 1; /* check auth cap only */
3103 check_caps = 2; /* check all caps */
3104 cap->issued = newcaps;
3105 cap->implemented |= newcaps;
3106 } else if (cap->issued == newcaps) {
3107 dout("caps unchanged: %s -> %s\n",
3108 ceph_cap_string(cap->issued), ceph_cap_string(newcaps));
3110 dout("grant: %s -> %s\n", ceph_cap_string(cap->issued),
3111 ceph_cap_string(newcaps));
3112 /* non-auth MDS is revoking the newly grant caps ? */
3113 if (cap == ci->i_auth_cap &&
3114 __ceph_caps_revoking_other(ci, cap, newcaps))
3117 cap->issued = newcaps;
3118 cap->implemented |= newcaps; /* add bits only, to
3119 * avoid stepping on a
3120 * pending revocation */
3123 BUG_ON(cap->issued & ~cap->implemented);
3125 if (inline_version > 0 && inline_version >= ci->i_inline_version) {
3126 ci->i_inline_version = inline_version;
3127 if (ci->i_inline_version != CEPH_INLINE_NONE &&
3128 (newcaps & (CEPH_CAP_FILE_CACHE|CEPH_CAP_FILE_LAZYIO)))
3132 if (le32_to_cpu(grant->op) == CEPH_CAP_OP_IMPORT) {
3133 if (newcaps & ~issued)
3135 kick_flushing_inode_caps(mdsc, session, inode);
3136 up_read(&mdsc->snap_rwsem);
3138 spin_unlock(&ci->i_ceph_lock);
3142 ceph_fill_inline_data(inode, NULL, inline_data, inline_len);
3145 ceph_queue_vmtruncate(inode);
3149 * queue inode for writeback: we can't actually call
3150 * filemap_write_and_wait, etc. from message handler
3153 ceph_queue_writeback(inode);
3154 if (queue_invalidate)
3155 ceph_queue_invalidate(inode);
3157 invalidate_aliases(inode);
3159 wake_up_all(&ci->i_cap_wq);
3161 if (check_caps == 1)
3162 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_AUTHONLY,
3164 else if (check_caps == 2)
3165 ceph_check_caps(ci, CHECK_CAPS_NODELAY, session);
3167 mutex_unlock(&session->s_mutex);
3171 * Handle FLUSH_ACK from MDS, indicating that metadata we sent to the
3172 * MDS has been safely committed.
3174 static void handle_cap_flush_ack(struct inode *inode, u64 flush_tid,
3175 struct ceph_mds_caps *m,
3176 struct ceph_mds_session *session,
3177 struct ceph_cap *cap)
3178 __releases(ci->i_ceph_lock)
3180 struct ceph_inode_info *ci = ceph_inode(inode);
3181 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3182 struct ceph_cap_flush *cf, *tmp_cf;
3183 LIST_HEAD(to_remove);
3184 unsigned seq = le32_to_cpu(m->seq);
3185 int dirty = le32_to_cpu(m->dirty);
3191 list_for_each_entry_safe(cf, tmp_cf, &ci->i_cap_flush_list, i_list) {
3192 if (cf->tid == flush_tid)
3194 if (cf->caps == 0) /* capsnap */
3196 if (cf->tid <= flush_tid) {
3197 if (__finish_cap_flush(NULL, ci, cf))
3199 list_add_tail(&cf->i_list, &to_remove);
3201 cleaned &= ~cf->caps;
3207 dout("handle_cap_flush_ack inode %p mds%d seq %d on %s cleaned %s,"
3208 " flushing %s -> %s\n",
3209 inode, session->s_mds, seq, ceph_cap_string(dirty),
3210 ceph_cap_string(cleaned), ceph_cap_string(ci->i_flushing_caps),
3211 ceph_cap_string(ci->i_flushing_caps & ~cleaned));
3213 if (list_empty(&to_remove) && !cleaned)
3216 ci->i_flushing_caps &= ~cleaned;
3218 spin_lock(&mdsc->cap_dirty_lock);
3220 list_for_each_entry(cf, &to_remove, i_list) {
3221 if (__finish_cap_flush(mdsc, NULL, cf))
3225 if (ci->i_flushing_caps == 0) {
3226 if (list_empty(&ci->i_cap_flush_list)) {
3227 list_del_init(&ci->i_flushing_item);
3228 if (!list_empty(&session->s_cap_flushing)) {
3229 dout(" mds%d still flushing cap on %p\n",
3231 &list_first_entry(&session->s_cap_flushing,
3232 struct ceph_inode_info,
3233 i_flushing_item)->vfs_inode);
3236 mdsc->num_cap_flushing--;
3237 dout(" inode %p now !flushing\n", inode);
3239 if (ci->i_dirty_caps == 0) {
3240 dout(" inode %p now clean\n", inode);
3241 BUG_ON(!list_empty(&ci->i_dirty_item));
3243 if (ci->i_wr_ref == 0 &&
3244 ci->i_wrbuffer_ref_head == 0) {
3245 BUG_ON(!ci->i_head_snapc);
3246 ceph_put_snap_context(ci->i_head_snapc);
3247 ci->i_head_snapc = NULL;
3250 BUG_ON(list_empty(&ci->i_dirty_item));
3253 spin_unlock(&mdsc->cap_dirty_lock);
3256 spin_unlock(&ci->i_ceph_lock);
3258 while (!list_empty(&to_remove)) {
3259 cf = list_first_entry(&to_remove,
3260 struct ceph_cap_flush, i_list);
3261 list_del(&cf->i_list);
3262 ceph_free_cap_flush(cf);
3266 wake_up_all(&ci->i_cap_wq);
3268 wake_up_all(&mdsc->cap_flushing_wq);
3274 * Handle FLUSHSNAP_ACK. MDS has flushed snap data to disk and we can
3275 * throw away our cap_snap.
3277 * Caller hold s_mutex.
3279 static void handle_cap_flushsnap_ack(struct inode *inode, u64 flush_tid,
3280 struct ceph_mds_caps *m,
3281 struct ceph_mds_session *session)
3283 struct ceph_inode_info *ci = ceph_inode(inode);
3284 struct ceph_mds_client *mdsc = ceph_sb_to_client(inode->i_sb)->mdsc;
3285 u64 follows = le64_to_cpu(m->snap_follows);
3286 struct ceph_cap_snap *capsnap;
3287 bool flushed = false;
3288 bool wake_ci = false;
3289 bool wake_mdsc = false;
3291 dout("handle_cap_flushsnap_ack inode %p ci %p mds%d follows %lld\n",
3292 inode, ci, session->s_mds, follows);
3294 spin_lock(&ci->i_ceph_lock);
3295 list_for_each_entry(capsnap, &ci->i_cap_snaps, ci_item) {
3296 if (capsnap->follows == follows) {
3297 if (capsnap->cap_flush.tid != flush_tid) {
3298 dout(" cap_snap %p follows %lld tid %lld !="
3299 " %lld\n", capsnap, follows,
3300 flush_tid, capsnap->cap_flush.tid);
3306 dout(" skipping cap_snap %p follows %lld\n",
3307 capsnap, capsnap->follows);
3311 WARN_ON(capsnap->dirty_pages || capsnap->writing);
3312 dout(" removing %p cap_snap %p follows %lld\n",
3313 inode, capsnap, follows);
3314 list_del(&capsnap->ci_item);
3315 if (__finish_cap_flush(NULL, ci, &capsnap->cap_flush))
3318 spin_lock(&mdsc->cap_dirty_lock);
3320 if (list_empty(&ci->i_cap_flush_list))
3321 list_del_init(&ci->i_flushing_item);
3323 if (__finish_cap_flush(mdsc, NULL, &capsnap->cap_flush))
3326 spin_unlock(&mdsc->cap_dirty_lock);
3328 spin_unlock(&ci->i_ceph_lock);
3330 ceph_put_snap_context(capsnap->context);
3331 ceph_put_cap_snap(capsnap);
3333 wake_up_all(&ci->i_cap_wq);
3335 wake_up_all(&mdsc->cap_flushing_wq);
3341 * Handle TRUNC from MDS, indicating file truncation.
3343 * caller hold s_mutex.
3345 static void handle_cap_trunc(struct inode *inode,
3346 struct ceph_mds_caps *trunc,
3347 struct ceph_mds_session *session)
3348 __releases(ci->i_ceph_lock)
3350 struct ceph_inode_info *ci = ceph_inode(inode);
3351 int mds = session->s_mds;
3352 int seq = le32_to_cpu(trunc->seq);
3353 u32 truncate_seq = le32_to_cpu(trunc->truncate_seq);
3354 u64 truncate_size = le64_to_cpu(trunc->truncate_size);
3355 u64 size = le64_to_cpu(trunc->size);
3356 int implemented = 0;
3357 int dirty = __ceph_caps_dirty(ci);
3358 int issued = __ceph_caps_issued(ceph_inode(inode), &implemented);
3359 int queue_trunc = 0;
3361 issued |= implemented | dirty;
3363 dout("handle_cap_trunc inode %p mds%d seq %d to %lld seq %d\n",
3364 inode, mds, seq, truncate_size, truncate_seq);
3365 queue_trunc = ceph_fill_file_size(inode, issued,
3366 truncate_seq, truncate_size, size);
3367 spin_unlock(&ci->i_ceph_lock);
3370 ceph_queue_vmtruncate(inode);
3374 * Handle EXPORT from MDS. Cap is being migrated _from_ this mds to a
3375 * different one. If we are the most recent migration we've seen (as
3376 * indicated by mseq), make note of the migrating cap bits for the
3377 * duration (until we see the corresponding IMPORT).
3379 * caller holds s_mutex
3381 static void handle_cap_export(struct inode *inode, struct ceph_mds_caps *ex,
3382 struct ceph_mds_cap_peer *ph,
3383 struct ceph_mds_session *session)
3385 struct ceph_mds_client *mdsc = ceph_inode_to_client(inode)->mdsc;
3386 struct ceph_mds_session *tsession = NULL;
3387 struct ceph_cap *cap, *tcap, *new_cap = NULL;
3388 struct ceph_inode_info *ci = ceph_inode(inode);
3390 unsigned mseq = le32_to_cpu(ex->migrate_seq);
3391 unsigned t_seq, t_mseq;
3393 int mds = session->s_mds;
3396 t_cap_id = le64_to_cpu(ph->cap_id);
3397 t_seq = le32_to_cpu(ph->seq);
3398 t_mseq = le32_to_cpu(ph->mseq);
3399 target = le32_to_cpu(ph->mds);
3401 t_cap_id = t_seq = t_mseq = 0;
3405 dout("handle_cap_export inode %p ci %p mds%d mseq %d target %d\n",
3406 inode, ci, mds, mseq, target);
3408 spin_lock(&ci->i_ceph_lock);
3409 cap = __get_cap_for_mds(ci, mds);
3410 if (!cap || cap->cap_id != le64_to_cpu(ex->cap_id))
3414 __ceph_remove_cap(cap, false);
3415 if (!ci->i_auth_cap)
3416 ci->i_ceph_flags |= CEPH_I_CAP_DROPPED;
3421 * now we know we haven't received the cap import message yet
3422 * because the exported cap still exist.
3425 issued = cap->issued;
3426 WARN_ON(issued != cap->implemented);
3428 tcap = __get_cap_for_mds(ci, target);
3430 /* already have caps from the target */
3431 if (tcap->cap_id == t_cap_id &&
3432 ceph_seq_cmp(tcap->seq, t_seq) < 0) {
3433 dout(" updating import cap %p mds%d\n", tcap, target);
3434 tcap->cap_id = t_cap_id;
3435 tcap->seq = t_seq - 1;
3436 tcap->issue_seq = t_seq - 1;
3437 tcap->mseq = t_mseq;
3438 tcap->issued |= issued;
3439 tcap->implemented |= issued;
3440 if (cap == ci->i_auth_cap)
3441 ci->i_auth_cap = tcap;
3443 if (!list_empty(&ci->i_cap_flush_list) &&
3444 ci->i_auth_cap == tcap) {
3445 spin_lock(&mdsc->cap_dirty_lock);
3446 list_move_tail(&ci->i_flushing_item,
3447 &tcap->session->s_cap_flushing);
3448 spin_unlock(&mdsc->cap_dirty_lock);
3451 __ceph_remove_cap(cap, false);
3453 } else if (tsession) {
3454 /* add placeholder for the export tagert */
3455 int flag = (cap == ci->i_auth_cap) ? CEPH_CAP_FLAG_AUTH : 0;
3457 ceph_add_cap(inode, tsession, t_cap_id, -1, issued, 0,
3458 t_seq - 1, t_mseq, (u64)-1, flag, &new_cap);
3460 if (!list_empty(&ci->i_cap_flush_list) &&
3461 ci->i_auth_cap == tcap) {
3462 spin_lock(&mdsc->cap_dirty_lock);
3463 list_move_tail(&ci->i_flushing_item,
3464 &tcap->session->s_cap_flushing);
3465 spin_unlock(&mdsc->cap_dirty_lock);
3468 __ceph_remove_cap(cap, false);
3472 spin_unlock(&ci->i_ceph_lock);
3473 mutex_unlock(&session->s_mutex);
3475 /* open target session */
3476 tsession = ceph_mdsc_open_export_target_session(mdsc, target);
3477 if (!IS_ERR(tsession)) {
3479 mutex_lock(&session->s_mutex);
3480 mutex_lock_nested(&tsession->s_mutex,
3481 SINGLE_DEPTH_NESTING);
3483 mutex_lock(&tsession->s_mutex);
3484 mutex_lock_nested(&session->s_mutex,
3485 SINGLE_DEPTH_NESTING);
3487 new_cap = ceph_get_cap(mdsc, NULL);
3496 spin_unlock(&ci->i_ceph_lock);
3497 mutex_unlock(&session->s_mutex);
3499 mutex_unlock(&tsession->s_mutex);
3500 ceph_put_mds_session(tsession);
3503 ceph_put_cap(mdsc, new_cap);
3507 * Handle cap IMPORT.
3509 * caller holds s_mutex. acquires i_ceph_lock
3511 static void handle_cap_import(struct ceph_mds_client *mdsc,
3512 struct inode *inode, struct ceph_mds_caps *im,
3513 struct ceph_mds_cap_peer *ph,
3514 struct ceph_mds_session *session,
3515 struct ceph_cap **target_cap, int *old_issued)
3516 __acquires(ci->i_ceph_lock)
3518 struct ceph_inode_info *ci = ceph_inode(inode);
3519 struct ceph_cap *cap, *ocap, *new_cap = NULL;
3520 int mds = session->s_mds;
3522 unsigned caps = le32_to_cpu(im->caps);
3523 unsigned wanted = le32_to_cpu(im->wanted);
3524 unsigned seq = le32_to_cpu(im->seq);
3525 unsigned mseq = le32_to_cpu(im->migrate_seq);
3526 u64 realmino = le64_to_cpu(im->realm);
3527 u64 cap_id = le64_to_cpu(im->cap_id);
3532 p_cap_id = le64_to_cpu(ph->cap_id);
3533 peer = le32_to_cpu(ph->mds);
3539 dout("handle_cap_import inode %p ci %p mds%d mseq %d peer %d\n",
3540 inode, ci, mds, mseq, peer);
3543 spin_lock(&ci->i_ceph_lock);
3544 cap = __get_cap_for_mds(ci, mds);
3547 spin_unlock(&ci->i_ceph_lock);
3548 new_cap = ceph_get_cap(mdsc, NULL);
3554 ceph_put_cap(mdsc, new_cap);
3559 __ceph_caps_issued(ci, &issued);
3560 issued |= __ceph_caps_dirty(ci);
3562 ceph_add_cap(inode, session, cap_id, -1, caps, wanted, seq, mseq,
3563 realmino, CEPH_CAP_FLAG_AUTH, &new_cap);
3565 ocap = peer >= 0 ? __get_cap_for_mds(ci, peer) : NULL;
3566 if (ocap && ocap->cap_id == p_cap_id) {
3567 dout(" remove export cap %p mds%d flags %d\n",
3568 ocap, peer, ph->flags);
3569 if ((ph->flags & CEPH_CAP_FLAG_AUTH) &&
3570 (ocap->seq != le32_to_cpu(ph->seq) ||
3571 ocap->mseq != le32_to_cpu(ph->mseq))) {
3572 pr_err("handle_cap_import: mismatched seq/mseq: "
3573 "ino (%llx.%llx) mds%d seq %d mseq %d "
3574 "importer mds%d has peer seq %d mseq %d\n",
3575 ceph_vinop(inode), peer, ocap->seq,
3576 ocap->mseq, mds, le32_to_cpu(ph->seq),
3577 le32_to_cpu(ph->mseq));
3579 __ceph_remove_cap(ocap, (ph->flags & CEPH_CAP_FLAG_RELEASE));
3582 /* make sure we re-request max_size, if necessary */
3583 ci->i_requested_max_size = 0;
3585 *old_issued = issued;
3590 * Handle a caps message from the MDS.
3592 * Identify the appropriate session, inode, and call the right handler
3593 * based on the cap op.
3595 void ceph_handle_caps(struct ceph_mds_session *session,
3596 struct ceph_msg *msg)
3598 struct ceph_mds_client *mdsc = session->s_mdsc;
3599 struct super_block *sb = mdsc->fsc->sb;
3600 struct inode *inode;
3601 struct ceph_inode_info *ci;
3602 struct ceph_cap *cap;
3603 struct ceph_mds_caps *h;
3604 struct ceph_mds_cap_peer *peer = NULL;
3605 struct ceph_snap_realm *realm = NULL;
3606 struct ceph_string *pool_ns = NULL;
3607 int mds = session->s_mds;
3610 struct ceph_vino vino;
3612 u64 inline_version = 0;
3613 void *inline_data = NULL;
3616 size_t snaptrace_len;
3619 dout("handle_caps from mds%d\n", mds);
3622 end = msg->front.iov_base + msg->front.iov_len;
3623 tid = le64_to_cpu(msg->hdr.tid);
3624 if (msg->front.iov_len < sizeof(*h))
3626 h = msg->front.iov_base;
3627 op = le32_to_cpu(h->op);
3628 vino.ino = le64_to_cpu(h->ino);
3629 vino.snap = CEPH_NOSNAP;
3630 seq = le32_to_cpu(h->seq);
3631 mseq = le32_to_cpu(h->migrate_seq);
3634 snaptrace_len = le32_to_cpu(h->snap_trace_len);
3635 p = snaptrace + snaptrace_len;
3637 if (le16_to_cpu(msg->hdr.version) >= 2) {
3639 ceph_decode_32_safe(&p, end, flock_len, bad);
3640 if (p + flock_len > end)
3645 if (le16_to_cpu(msg->hdr.version) >= 3) {
3646 if (op == CEPH_CAP_OP_IMPORT) {
3647 if (p + sizeof(*peer) > end)
3651 } else if (op == CEPH_CAP_OP_EXPORT) {
3652 /* recorded in unused fields */
3653 peer = (void *)&h->size;
3657 if (le16_to_cpu(msg->hdr.version) >= 4) {
3658 ceph_decode_64_safe(&p, end, inline_version, bad);
3659 ceph_decode_32_safe(&p, end, inline_len, bad);
3660 if (p + inline_len > end)
3666 if (le16_to_cpu(msg->hdr.version) >= 5) {
3667 struct ceph_osd_client *osdc = &mdsc->fsc->client->osdc;
3670 ceph_decode_32_safe(&p, end, epoch_barrier, bad);
3671 ceph_osdc_update_epoch_barrier(osdc, epoch_barrier);
3674 if (le16_to_cpu(msg->hdr.version) >= 8) {
3676 u32 caller_uid, caller_gid;
3680 ceph_decode_64_safe(&p, end, flush_tid, bad);
3682 ceph_decode_32_safe(&p, end, caller_uid, bad);
3683 ceph_decode_32_safe(&p, end, caller_gid, bad);
3685 ceph_decode_32_safe(&p, end, pool_ns_len, bad);
3686 if (pool_ns_len > 0) {
3687 ceph_decode_need(&p, end, pool_ns_len, bad);
3688 pool_ns = ceph_find_or_create_string(p, pool_ns_len);
3694 inode = ceph_find_inode(sb, vino);
3695 ci = ceph_inode(inode);
3696 dout(" op %s ino %llx.%llx inode %p\n", ceph_cap_op_name(op), vino.ino,
3699 mutex_lock(&session->s_mutex);
3701 dout(" mds%d seq %lld cap seq %u\n", session->s_mds, session->s_seq,
3705 dout(" i don't have ino %llx\n", vino.ino);
3707 if (op == CEPH_CAP_OP_IMPORT) {
3708 cap = ceph_get_cap(mdsc, NULL);
3709 cap->cap_ino = vino.ino;
3710 cap->queue_release = 1;
3711 cap->cap_id = le64_to_cpu(h->cap_id);
3714 cap->issue_seq = seq;
3715 spin_lock(&session->s_cap_lock);
3716 list_add_tail(&cap->session_caps,
3717 &session->s_cap_releases);
3718 session->s_num_cap_releases++;
3719 spin_unlock(&session->s_cap_lock);
3721 goto flush_cap_releases;
3724 /* these will work even if we don't have a cap yet */
3726 case CEPH_CAP_OP_FLUSHSNAP_ACK:
3727 handle_cap_flushsnap_ack(inode, tid, h, session);
3730 case CEPH_CAP_OP_EXPORT:
3731 handle_cap_export(inode, h, peer, session);
3734 case CEPH_CAP_OP_IMPORT:
3736 if (snaptrace_len) {
3737 down_write(&mdsc->snap_rwsem);
3738 ceph_update_snap_trace(mdsc, snaptrace,
3739 snaptrace + snaptrace_len,
3741 downgrade_write(&mdsc->snap_rwsem);
3743 down_read(&mdsc->snap_rwsem);
3745 handle_cap_import(mdsc, inode, h, peer, session,
3747 handle_cap_grant(mdsc, inode, h, &pool_ns,
3748 inline_version, inline_data, inline_len,
3749 msg->middle, session, cap, issued);
3751 ceph_put_snap_realm(mdsc, realm);
3755 /* the rest require a cap */
3756 spin_lock(&ci->i_ceph_lock);
3757 cap = __get_cap_for_mds(ceph_inode(inode), mds);
3759 dout(" no cap on %p ino %llx.%llx from mds%d\n",
3760 inode, ceph_ino(inode), ceph_snap(inode), mds);
3761 spin_unlock(&ci->i_ceph_lock);
3762 goto flush_cap_releases;
3765 /* note that each of these drops i_ceph_lock for us */
3767 case CEPH_CAP_OP_REVOKE:
3768 case CEPH_CAP_OP_GRANT:
3769 __ceph_caps_issued(ci, &issued);
3770 issued |= __ceph_caps_dirty(ci);
3771 handle_cap_grant(mdsc, inode, h, &pool_ns,
3772 inline_version, inline_data, inline_len,
3773 msg->middle, session, cap, issued);
3776 case CEPH_CAP_OP_FLUSH_ACK:
3777 handle_cap_flush_ack(inode, tid, h, session, cap);
3780 case CEPH_CAP_OP_TRUNC:
3781 handle_cap_trunc(inode, h, session);
3785 spin_unlock(&ci->i_ceph_lock);
3786 pr_err("ceph_handle_caps: unknown cap op %d %s\n", op,
3787 ceph_cap_op_name(op));
3794 * send any cap release message to try to move things
3795 * along for the mds (who clearly thinks we still have this
3798 ceph_send_cap_releases(mdsc, session);
3801 mutex_unlock(&session->s_mutex);
3804 ceph_put_string(pool_ns);
3808 pr_err("ceph_handle_caps: corrupt message\n");
3814 * Delayed work handler to process end of delayed cap release LRU list.
3816 void ceph_check_delayed_caps(struct ceph_mds_client *mdsc)
3818 struct inode *inode;
3819 struct ceph_inode_info *ci;
3820 int flags = CHECK_CAPS_NODELAY;
3822 dout("check_delayed_caps\n");
3824 spin_lock(&mdsc->cap_delay_lock);
3825 if (list_empty(&mdsc->cap_delay_list))
3827 ci = list_first_entry(&mdsc->cap_delay_list,
3828 struct ceph_inode_info,
3830 if ((ci->i_ceph_flags & CEPH_I_FLUSH) == 0 &&
3831 time_before(jiffies, ci->i_hold_caps_max))
3833 list_del_init(&ci->i_cap_delay_list);
3835 inode = igrab(&ci->vfs_inode);
3836 spin_unlock(&mdsc->cap_delay_lock);
3839 dout("check_delayed_caps on %p\n", inode);
3840 ceph_check_caps(ci, flags, NULL);
3844 spin_unlock(&mdsc->cap_delay_lock);
3848 * Flush all dirty caps to the mds
3850 void ceph_flush_dirty_caps(struct ceph_mds_client *mdsc)
3852 struct ceph_inode_info *ci;
3853 struct inode *inode;
3855 dout("flush_dirty_caps\n");
3856 spin_lock(&mdsc->cap_dirty_lock);
3857 while (!list_empty(&mdsc->cap_dirty)) {
3858 ci = list_first_entry(&mdsc->cap_dirty, struct ceph_inode_info,
3860 inode = &ci->vfs_inode;
3862 dout("flush_dirty_caps %p\n", inode);
3863 spin_unlock(&mdsc->cap_dirty_lock);
3864 ceph_check_caps(ci, CHECK_CAPS_NODELAY|CHECK_CAPS_FLUSH, NULL);
3866 spin_lock(&mdsc->cap_dirty_lock);
3868 spin_unlock(&mdsc->cap_dirty_lock);
3869 dout("flush_dirty_caps done\n");
3872 void __ceph_get_fmode(struct ceph_inode_info *ci, int fmode)
3875 int bits = (fmode << 1) | 1;
3876 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3877 if (bits & (1 << i))
3878 ci->i_nr_by_mode[i]++;
3883 * Drop open file reference. If we were the last open file,
3884 * we may need to release capabilities to the MDS (or schedule
3885 * their delayed release).
3887 void ceph_put_fmode(struct ceph_inode_info *ci, int fmode)
3890 int bits = (fmode << 1) | 1;
3891 spin_lock(&ci->i_ceph_lock);
3892 for (i = 0; i < CEPH_FILE_MODE_BITS; i++) {
3893 if (bits & (1 << i)) {
3894 BUG_ON(ci->i_nr_by_mode[i] == 0);
3895 if (--ci->i_nr_by_mode[i] == 0)
3899 dout("put_fmode %p fmode %d {%d,%d,%d,%d}\n",
3900 &ci->vfs_inode, fmode,
3901 ci->i_nr_by_mode[0], ci->i_nr_by_mode[1],
3902 ci->i_nr_by_mode[2], ci->i_nr_by_mode[3]);
3903 spin_unlock(&ci->i_ceph_lock);
3905 if (last && ci->i_vino.snap == CEPH_NOSNAP)
3906 ceph_check_caps(ci, 0, NULL);
3910 * Helpers for embedding cap and dentry lease releases into mds
3913 * @force is used by dentry_release (below) to force inclusion of a
3914 * record for the directory inode, even when there aren't any caps to
3917 int ceph_encode_inode_release(void **p, struct inode *inode,
3918 int mds, int drop, int unless, int force)
3920 struct ceph_inode_info *ci = ceph_inode(inode);
3921 struct ceph_cap *cap;
3922 struct ceph_mds_request_release *rel = *p;
3926 spin_lock(&ci->i_ceph_lock);
3927 used = __ceph_caps_used(ci);
3928 dirty = __ceph_caps_dirty(ci);
3930 dout("encode_inode_release %p mds%d used|dirty %s drop %s unless %s\n",
3931 inode, mds, ceph_cap_string(used|dirty), ceph_cap_string(drop),
3932 ceph_cap_string(unless));
3934 /* only drop unused, clean caps */
3935 drop &= ~(used | dirty);
3937 cap = __get_cap_for_mds(ci, mds);
3938 if (cap && __cap_is_valid(cap)) {
3940 ((cap->issued & drop) &&
3941 (cap->issued & unless) == 0)) {
3942 if ((cap->issued & drop) &&
3943 (cap->issued & unless) == 0) {
3944 int wanted = __ceph_caps_wanted(ci);
3945 if ((ci->i_ceph_flags & CEPH_I_NODELAY) == 0)
3946 wanted |= cap->mds_wanted;
3947 dout("encode_inode_release %p cap %p "
3948 "%s -> %s, wanted %s -> %s\n", inode, cap,
3949 ceph_cap_string(cap->issued),
3950 ceph_cap_string(cap->issued & ~drop),
3951 ceph_cap_string(cap->mds_wanted),
3952 ceph_cap_string(wanted));
3954 cap->issued &= ~drop;
3955 cap->implemented &= ~drop;
3956 cap->mds_wanted = wanted;
3958 dout("encode_inode_release %p cap %p %s"
3959 " (force)\n", inode, cap,
3960 ceph_cap_string(cap->issued));
3963 rel->ino = cpu_to_le64(ceph_ino(inode));
3964 rel->cap_id = cpu_to_le64(cap->cap_id);
3965 rel->seq = cpu_to_le32(cap->seq);
3966 rel->issue_seq = cpu_to_le32(cap->issue_seq);
3967 rel->mseq = cpu_to_le32(cap->mseq);
3968 rel->caps = cpu_to_le32(cap->implemented);
3969 rel->wanted = cpu_to_le32(cap->mds_wanted);
3975 dout("encode_inode_release %p cap %p %s\n",
3976 inode, cap, ceph_cap_string(cap->issued));
3979 spin_unlock(&ci->i_ceph_lock);
3983 int ceph_encode_dentry_release(void **p, struct dentry *dentry,
3985 int mds, int drop, int unless)
3987 struct dentry *parent = NULL;
3988 struct ceph_mds_request_release *rel = *p;
3989 struct ceph_dentry_info *di = ceph_dentry(dentry);
3994 * force an record for the directory caps if we have a dentry lease.
3995 * this is racy (can't take i_ceph_lock and d_lock together), but it
3996 * doesn't have to be perfect; the mds will revoke anything we don't
3999 spin_lock(&dentry->d_lock);
4000 if (di->lease_session && di->lease_session->s_mds == mds)
4003 parent = dget(dentry->d_parent);
4004 dir = d_inode(parent);
4006 spin_unlock(&dentry->d_lock);
4008 ret = ceph_encode_inode_release(p, dir, mds, drop, unless, force);
4011 spin_lock(&dentry->d_lock);
4012 if (ret && di->lease_session && di->lease_session->s_mds == mds) {
4013 dout("encode_dentry_release %p mds%d seq %d\n",
4014 dentry, mds, (int)di->lease_seq);
4015 rel->dname_len = cpu_to_le32(dentry->d_name.len);
4016 memcpy(*p, dentry->d_name.name, dentry->d_name.len);
4017 *p += dentry->d_name.len;
4018 rel->dname_seq = cpu_to_le32(di->lease_seq);
4019 __ceph_mdsc_drop_dentry_lease(dentry);
4021 spin_unlock(&dentry->d_lock);