4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/sched.h>
11 #include <linux/list_lru.h>
12 #include <linux/fsnotify_backend.h>
13 #include <linux/fsnotify.h>
14 #include <linux/seq_file.h>
20 #include "filecache.h"
23 #define NFSDDBG_FACILITY NFSDDBG_FH
25 /* FIXME: dynamically size this for the machine somehow? */
26 #define NFSD_FILE_HASH_BITS 12
27 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
28 #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
30 #define NFSD_FILE_SHUTDOWN (1)
31 #define NFSD_FILE_LRU_THRESHOLD (4096UL)
32 #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
34 /* We only care about NFSD_MAY_READ/WRITE for this cache */
35 #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
37 struct nfsd_fcache_bucket {
38 struct hlist_head nfb_head;
40 unsigned int nfb_count;
41 unsigned int nfb_maxcount;
44 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
46 struct nfsd_fcache_disposal {
47 struct list_head list;
48 struct work_struct work;
51 struct list_head freeme;
55 struct workqueue_struct *nfsd_filecache_wq __read_mostly;
57 static struct kmem_cache *nfsd_file_slab;
58 static struct kmem_cache *nfsd_file_mark_slab;
59 static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
60 static struct list_lru nfsd_file_lru;
61 static long nfsd_file_lru_flags;
62 static struct fsnotify_group *nfsd_file_fsnotify_group;
63 static atomic_long_t nfsd_filecache_count;
64 static struct delayed_work nfsd_filecache_laundrette;
65 static DEFINE_SPINLOCK(laundrette_lock);
66 static LIST_HEAD(laundrettes);
68 static void nfsd_file_gc(void);
71 nfsd_file_schedule_laundrette(void)
73 long count = atomic_long_read(&nfsd_filecache_count);
75 if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
78 queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
79 NFSD_LAUNDRETTE_DELAY);
83 nfsd_file_slab_free(struct rcu_head *rcu)
85 struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
87 put_cred(nf->nf_cred);
88 kmem_cache_free(nfsd_file_slab, nf);
92 nfsd_file_mark_free(struct fsnotify_mark *mark)
94 struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
97 kmem_cache_free(nfsd_file_mark_slab, nfm);
100 static struct nfsd_file_mark *
101 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
103 if (!atomic_inc_not_zero(&nfm->nfm_ref))
109 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
111 if (atomic_dec_and_test(&nfm->nfm_ref)) {
113 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
114 fsnotify_put_mark(&nfm->nfm_mark);
118 static struct nfsd_file_mark *
119 nfsd_file_mark_find_or_create(struct nfsd_file *nf)
122 struct fsnotify_mark *mark;
123 struct nfsd_file_mark *nfm = NULL, *new;
124 struct inode *inode = nf->nf_inode;
127 mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
128 mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
129 nfsd_file_fsnotify_group);
131 nfm = nfsd_file_mark_get(container_of(mark,
132 struct nfsd_file_mark,
134 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
135 fsnotify_put_mark(mark);
139 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
141 /* allocate a new nfm */
142 new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
145 fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
146 new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
147 atomic_set(&new->nfm_ref, 1);
149 err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
152 * If the add was successful, then return the object.
153 * Otherwise, we need to put the reference we hold on the
154 * nfm_mark. The fsnotify code will take a reference and put
155 * it on failure, so we can't just free it directly. It's also
156 * not safe to call fsnotify_destroy_mark on it as the
157 * mark->group will be NULL. Thus, we can't let the nfm_ref
158 * counter drive the destruction at this point.
163 fsnotify_put_mark(&new->nfm_mark);
164 } while (unlikely(err == -EEXIST));
169 static struct nfsd_file *
170 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
173 struct nfsd_file *nf;
175 nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
177 INIT_HLIST_NODE(&nf->nf_node);
178 INIT_LIST_HEAD(&nf->nf_lru);
180 nf->nf_cred = get_current_cred();
183 nf->nf_inode = inode;
184 nf->nf_hashval = hashval;
185 atomic_set(&nf->nf_ref, 1);
186 nf->nf_may = may & NFSD_FILE_MAY_MASK;
187 if (may & NFSD_MAY_NOT_BREAK_LEASE) {
188 if (may & NFSD_MAY_WRITE)
189 __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
190 if (may & NFSD_MAY_READ)
191 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
194 trace_nfsd_file_alloc(nf);
200 nfsd_file_free(struct nfsd_file *nf)
204 trace_nfsd_file_put_final(nf);
206 nfsd_file_mark_put(nf->nf_mark);
208 get_file(nf->nf_file);
209 filp_close(nf->nf_file, NULL);
213 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
218 nfsd_file_check_writeback(struct nfsd_file *nf)
220 struct file *file = nf->nf_file;
221 struct address_space *mapping;
223 if (!file || !(file->f_mode & FMODE_WRITE))
225 mapping = file->f_mapping;
226 return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
227 mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
231 nfsd_file_check_write_error(struct nfsd_file *nf)
233 struct file *file = nf->nf_file;
235 if (!file || !(file->f_mode & FMODE_WRITE))
237 return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
241 nfsd_file_do_unhash(struct nfsd_file *nf)
243 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
245 trace_nfsd_file_unhash(nf);
247 if (nfsd_file_check_write_error(nf))
248 nfsd_reset_boot_verifier(net_generic(nf->nf_net, nfsd_net_id));
249 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
250 hlist_del_rcu(&nf->nf_node);
251 atomic_long_dec(&nfsd_filecache_count);
255 nfsd_file_unhash(struct nfsd_file *nf)
257 if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
258 nfsd_file_do_unhash(nf);
259 if (!list_empty(&nf->nf_lru))
260 list_lru_del(&nfsd_file_lru, &nf->nf_lru);
267 * Return true if the file was unhashed.
270 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
272 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
274 trace_nfsd_file_unhash_and_release_locked(nf);
275 if (!nfsd_file_unhash(nf))
277 /* keep final reference for nfsd_file_lru_dispose */
278 if (atomic_add_unless(&nf->nf_ref, -1, 1))
281 list_add(&nf->nf_lru, dispose);
286 nfsd_file_put_noref(struct nfsd_file *nf)
289 trace_nfsd_file_put(nf);
291 count = atomic_dec_return(&nf->nf_ref);
293 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
300 nfsd_file_put(struct nfsd_file *nf)
302 bool is_hashed = test_bit(NFSD_FILE_HASHED, &nf->nf_flags) != 0;
304 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
305 if (nfsd_file_put_noref(nf) == 1 && is_hashed)
306 nfsd_file_schedule_laundrette();
307 if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
312 nfsd_file_get(struct nfsd_file *nf)
314 if (likely(atomic_inc_not_zero(&nf->nf_ref)))
320 nfsd_file_dispose_list(struct list_head *dispose)
322 struct nfsd_file *nf;
324 while(!list_empty(dispose)) {
325 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
326 list_del(&nf->nf_lru);
327 nfsd_file_put_noref(nf);
332 nfsd_file_dispose_list_sync(struct list_head *dispose)
335 struct nfsd_file *nf;
337 while(!list_empty(dispose)) {
338 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
339 list_del(&nf->nf_lru);
340 if (!atomic_dec_and_test(&nf->nf_ref))
342 if (nfsd_file_free(nf))
346 flush_delayed_fput();
350 nfsd_file_list_remove_disposal(struct list_head *dst,
351 struct nfsd_fcache_disposal *l)
354 list_splice_init(&l->freeme, dst);
355 spin_unlock(&l->lock);
359 nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
361 struct nfsd_fcache_disposal *l;
364 list_for_each_entry_rcu(l, &laundrettes, list) {
367 list_splice_tail_init(files, &l->freeme);
368 spin_unlock(&l->lock);
369 queue_work(nfsd_filecache_wq, &l->work);
377 nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
380 struct nfsd_file *nf, *tmp;
382 list_for_each_entry_safe(nf, tmp, src, nf_lru) {
383 if (nf->nf_net == net)
384 list_move_tail(&nf->nf_lru, dst);
389 nfsd_file_dispose_list_delayed(struct list_head *dispose)
392 struct nfsd_file *nf;
394 while(!list_empty(dispose)) {
395 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
396 nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
397 nfsd_file_list_add_disposal(&list, nf->nf_net);
402 * Note this can deadlock with nfsd_file_cache_purge.
404 static enum lru_status
405 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
406 spinlock_t *lock, void *arg)
410 struct list_head *head = arg;
411 struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
414 * Do a lockless refcount check. The hashtable holds one reference, so
415 * we look to see if anything else has a reference, or if any have
416 * been put since the shrinker last ran. Those don't get unhashed and
419 * Note that in the put path, we set the flag and then decrement the
420 * counter. Here we check the counter and then test and clear the flag.
421 * That order is deliberate to ensure that we can do this locklessly.
423 if (atomic_read(&nf->nf_ref) > 1)
427 * Don't throw out files that are still undergoing I/O or
428 * that have uncleared errors pending.
430 if (nfsd_file_check_writeback(nf))
433 if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
436 if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
439 list_lru_isolate_move(lru, &nf->nf_lru, head);
446 nfsd_file_lru_walk_list(struct shrink_control *sc)
449 struct nfsd_file *nf;
453 ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
454 nfsd_file_lru_cb, &head);
456 ret = list_lru_walk(&nfsd_file_lru,
459 list_for_each_entry(nf, &head, nf_lru) {
460 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
461 nfsd_file_do_unhash(nf);
462 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
464 nfsd_file_dispose_list_delayed(&head);
471 nfsd_file_lru_walk_list(NULL);
475 nfsd_file_gc_worker(struct work_struct *work)
478 nfsd_file_schedule_laundrette();
482 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
484 return list_lru_count(&nfsd_file_lru);
488 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
490 return nfsd_file_lru_walk_list(sc);
493 static struct shrinker nfsd_file_shrinker = {
494 .scan_objects = nfsd_file_lru_scan,
495 .count_objects = nfsd_file_lru_count,
500 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
501 struct list_head *dispose)
503 struct nfsd_file *nf;
504 struct hlist_node *tmp;
506 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
507 hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
508 if (inode == nf->nf_inode)
509 nfsd_file_unhash_and_release_locked(nf, dispose);
511 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
515 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
516 * @inode: inode of the file to attempt to remove
518 * Walk the whole hash bucket, looking for any files that correspond to "inode".
519 * If any do, then unhash them and put the hashtable reference to them and
520 * destroy any that had their last reference put. Also ensure that any of the
521 * fputs also have their final __fput done as well.
524 nfsd_file_close_inode_sync(struct inode *inode)
526 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
527 NFSD_FILE_HASH_BITS);
530 __nfsd_file_close_inode(inode, hashval, &dispose);
531 trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
532 nfsd_file_dispose_list_sync(&dispose);
536 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
537 * @inode: inode of the file to attempt to remove
539 * Walk the whole hash bucket, looking for any files that correspond to "inode".
540 * If any do, then unhash them and put the hashtable reference to them and
541 * destroy any that had their last reference put.
544 nfsd_file_close_inode(struct inode *inode)
546 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
547 NFSD_FILE_HASH_BITS);
550 __nfsd_file_close_inode(inode, hashval, &dispose);
551 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
552 nfsd_file_dispose_list_delayed(&dispose);
556 * nfsd_file_delayed_close - close unused nfsd_files
559 * Walk the LRU list and close any entries that have not been used since
562 * Note this can deadlock with nfsd_file_cache_purge.
565 nfsd_file_delayed_close(struct work_struct *work)
568 struct nfsd_fcache_disposal *l = container_of(work,
569 struct nfsd_fcache_disposal, work);
571 nfsd_file_list_remove_disposal(&head, l);
572 nfsd_file_dispose_list(&head);
576 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
579 struct file_lock *fl = data;
581 /* Only close files for F_SETLEASE leases */
582 if (fl->fl_flags & FL_LEASE)
583 nfsd_file_close_inode_sync(file_inode(fl->fl_file));
587 static struct notifier_block nfsd_file_lease_notifier = {
588 .notifier_call = nfsd_file_lease_notifier_call,
592 nfsd_file_fsnotify_handle_event(struct fsnotify_group *group,
594 u32 mask, const void *data, int data_type,
595 const struct qstr *file_name, u32 cookie,
596 struct fsnotify_iter_info *iter_info)
598 trace_nfsd_file_fsnotify_handle_event(inode, mask);
600 /* Should be no marks on non-regular files */
601 if (!S_ISREG(inode->i_mode)) {
606 /* don't close files if this was not the last link */
607 if (mask & FS_ATTRIB) {
612 nfsd_file_close_inode(inode);
617 static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
618 .handle_event = nfsd_file_fsnotify_handle_event,
619 .free_mark = nfsd_file_mark_free,
623 nfsd_file_cache_init(void)
628 clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
630 if (nfsd_file_hashtbl)
633 nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
634 if (!nfsd_filecache_wq)
637 nfsd_file_hashtbl = kcalloc(NFSD_FILE_HASH_SIZE,
638 sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
639 if (!nfsd_file_hashtbl) {
640 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
644 nfsd_file_slab = kmem_cache_create("nfsd_file",
645 sizeof(struct nfsd_file), 0, 0, NULL);
646 if (!nfsd_file_slab) {
647 pr_err("nfsd: unable to create nfsd_file_slab\n");
651 nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
652 sizeof(struct nfsd_file_mark), 0, 0, NULL);
653 if (!nfsd_file_mark_slab) {
654 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
659 ret = list_lru_init(&nfsd_file_lru);
661 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
665 ret = register_shrinker(&nfsd_file_shrinker);
667 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
671 ret = lease_register_notifier(&nfsd_file_lease_notifier);
673 pr_err("nfsd: unable to register lease notifier: %d\n", ret);
677 nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
678 if (IS_ERR(nfsd_file_fsnotify_group)) {
679 pr_err("nfsd: unable to create fsnotify group: %ld\n",
680 PTR_ERR(nfsd_file_fsnotify_group));
681 nfsd_file_fsnotify_group = NULL;
685 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
686 INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
687 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
690 INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
694 lease_unregister_notifier(&nfsd_file_lease_notifier);
696 unregister_shrinker(&nfsd_file_shrinker);
698 list_lru_destroy(&nfsd_file_lru);
700 kmem_cache_destroy(nfsd_file_slab);
701 nfsd_file_slab = NULL;
702 kmem_cache_destroy(nfsd_file_mark_slab);
703 nfsd_file_mark_slab = NULL;
704 kfree(nfsd_file_hashtbl);
705 nfsd_file_hashtbl = NULL;
706 destroy_workqueue(nfsd_filecache_wq);
707 nfsd_filecache_wq = NULL;
712 * Note this can deadlock with nfsd_file_lru_cb.
715 nfsd_file_cache_purge(struct net *net)
718 struct nfsd_file *nf;
719 struct hlist_node *next;
723 if (!nfsd_file_hashtbl)
726 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
727 struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
729 spin_lock(&nfb->nfb_lock);
730 hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
731 if (net && nf->nf_net != net)
733 del = nfsd_file_unhash_and_release_locked(nf, &dispose);
736 * Deadlock detected! Something marked this entry as
737 * unhased, but hasn't removed it from the hash list.
741 spin_unlock(&nfb->nfb_lock);
742 nfsd_file_dispose_list(&dispose);
746 static struct nfsd_fcache_disposal *
747 nfsd_alloc_fcache_disposal(struct net *net)
749 struct nfsd_fcache_disposal *l;
751 l = kmalloc(sizeof(*l), GFP_KERNEL);
754 INIT_WORK(&l->work, nfsd_file_delayed_close);
756 spin_lock_init(&l->lock);
757 INIT_LIST_HEAD(&l->freeme);
762 nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
764 rcu_assign_pointer(l->net, NULL);
765 cancel_work_sync(&l->work);
766 nfsd_file_dispose_list(&l->freeme);
771 nfsd_add_fcache_disposal(struct nfsd_fcache_disposal *l)
773 spin_lock(&laundrette_lock);
774 list_add_tail_rcu(&l->list, &laundrettes);
775 spin_unlock(&laundrette_lock);
779 nfsd_del_fcache_disposal(struct nfsd_fcache_disposal *l)
781 spin_lock(&laundrette_lock);
782 list_del_rcu(&l->list);
783 spin_unlock(&laundrette_lock);
787 nfsd_alloc_fcache_disposal_net(struct net *net)
789 struct nfsd_fcache_disposal *l;
791 l = nfsd_alloc_fcache_disposal(net);
794 nfsd_add_fcache_disposal(l);
799 nfsd_free_fcache_disposal_net(struct net *net)
801 struct nfsd_fcache_disposal *l;
804 list_for_each_entry_rcu(l, &laundrettes, list) {
807 nfsd_del_fcache_disposal(l);
809 nfsd_free_fcache_disposal(l);
816 nfsd_file_cache_start_net(struct net *net)
818 return nfsd_alloc_fcache_disposal_net(net);
822 nfsd_file_cache_shutdown_net(struct net *net)
824 nfsd_file_cache_purge(net);
825 nfsd_free_fcache_disposal_net(net);
829 nfsd_file_cache_shutdown(void)
831 set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
833 lease_unregister_notifier(&nfsd_file_lease_notifier);
834 unregister_shrinker(&nfsd_file_shrinker);
836 * make sure all callers of nfsd_file_lru_cb are done before
837 * calling nfsd_file_cache_purge
839 cancel_delayed_work_sync(&nfsd_filecache_laundrette);
840 nfsd_file_cache_purge(NULL);
841 list_lru_destroy(&nfsd_file_lru);
843 fsnotify_put_group(nfsd_file_fsnotify_group);
844 nfsd_file_fsnotify_group = NULL;
845 kmem_cache_destroy(nfsd_file_slab);
846 nfsd_file_slab = NULL;
847 fsnotify_wait_marks_destroyed();
848 kmem_cache_destroy(nfsd_file_mark_slab);
849 nfsd_file_mark_slab = NULL;
850 kfree(nfsd_file_hashtbl);
851 nfsd_file_hashtbl = NULL;
852 destroy_workqueue(nfsd_filecache_wq);
853 nfsd_filecache_wq = NULL;
857 nfsd_match_cred(const struct cred *c1, const struct cred *c2)
861 if (!uid_eq(c1->fsuid, c2->fsuid))
863 if (!gid_eq(c1->fsgid, c2->fsgid))
865 if (c1->group_info == NULL || c2->group_info == NULL)
866 return c1->group_info == c2->group_info;
867 if (c1->group_info->ngroups != c2->group_info->ngroups)
869 for (i = 0; i < c1->group_info->ngroups; i++) {
870 if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
876 static struct nfsd_file *
877 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
878 unsigned int hashval, struct net *net)
880 struct nfsd_file *nf;
881 unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
883 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
885 if ((need & nf->nf_may) != need)
887 if (nf->nf_inode != inode)
889 if (nf->nf_net != net)
891 if (!nfsd_match_cred(nf->nf_cred, current_cred()))
893 if (nfsd_file_get(nf) != NULL)
900 * nfsd_file_is_cached - are there any cached open files for this fh?
901 * @inode: inode of the file to check
903 * Scan the hashtable for open files that match this fh. Returns true if there
904 * are any, and false if not.
907 nfsd_file_is_cached(struct inode *inode)
910 struct nfsd_file *nf;
911 unsigned int hashval;
913 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
916 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
918 if (inode == nf->nf_inode) {
924 trace_nfsd_file_is_cached(inode, hashval, (int)ret);
929 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
930 unsigned int may_flags, struct nfsd_file **pnf)
933 struct net *net = SVC_NET(rqstp);
934 struct nfsd_file *nf, *new;
936 unsigned int hashval;
939 /* FIXME: skip this if fh_dentry is already set? */
940 status = fh_verify(rqstp, fhp, S_IFREG,
941 may_flags|NFSD_MAY_OWNER_OVERRIDE);
942 if (status != nfs_ok)
945 inode = d_inode(fhp->fh_dentry);
946 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
949 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
952 goto wait_for_construction;
954 new = nfsd_file_alloc(inode, may_flags, hashval, net);
956 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
957 NULL, nfserr_jukebox);
958 return nfserr_jukebox;
961 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
962 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
965 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
966 nfsd_file_slab_free(&new->nf_rcu);
968 wait_for_construction:
969 wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
971 /* Did construction of this file fail? */
972 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
974 status = nfserr_jukebox;
978 nfsd_file_put_noref(nf);
982 this_cpu_inc(nfsd_file_cache_hits);
984 if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
985 bool write = (may_flags & NFSD_MAY_WRITE);
987 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
988 (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
989 status = nfserrno(nfsd_open_break_lease(
990 file_inode(nf->nf_file), may_flags));
991 if (status == nfs_ok) {
992 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
994 clear_bit(NFSD_FILE_BREAK_WRITE,
1000 if (status == nfs_ok) {
1007 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
1011 /* Take reference for the hashtable */
1012 atomic_inc(&nf->nf_ref);
1013 __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
1014 __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
1015 list_lru_add(&nfsd_file_lru, &nf->nf_lru);
1016 hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
1017 ++nfsd_file_hashtbl[hashval].nfb_count;
1018 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
1019 nfsd_file_hashtbl[hashval].nfb_count);
1020 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1021 if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
1024 nf->nf_mark = nfsd_file_mark_find_or_create(nf);
1026 status = nfsd_open_verified(rqstp, fhp, S_IFREG,
1027 may_flags, &nf->nf_file);
1029 status = nfserr_jukebox;
1031 * If construction failed, or we raced with a call to unlink()
1034 if (status != nfs_ok || inode->i_nlink == 0) {
1036 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
1037 do_free = nfsd_file_unhash(nf);
1038 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1040 nfsd_file_put_noref(nf);
1042 clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
1043 smp_mb__after_atomic();
1044 wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
1049 * Note that fields may be added, removed or reordered in the future. Programs
1050 * scraping this file for info should test the labels to ensure they're
1051 * getting the correct field.
1053 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
1055 unsigned int i, count = 0, longest = 0;
1056 unsigned long hits = 0;
1059 * No need for spinlocks here since we're not terribly interested in
1060 * accuracy. We do take the nfsd_mutex simply to ensure that we
1061 * don't end up racing with server shutdown
1063 mutex_lock(&nfsd_mutex);
1064 if (nfsd_file_hashtbl) {
1065 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
1066 count += nfsd_file_hashtbl[i].nfb_count;
1067 longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
1070 mutex_unlock(&nfsd_mutex);
1072 for_each_possible_cpu(i)
1073 hits += per_cpu(nfsd_file_cache_hits, i);
1075 seq_printf(m, "total entries: %u\n", count);
1076 seq_printf(m, "longest chain: %u\n", longest);
1077 seq_printf(m, "cache hits: %lu\n", hits);
1081 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
1083 return single_open(file, nfsd_file_cache_stats_show, NULL);