4 * (c) 2015 - Jeff Layton <jeff.layton@primarydata.com>
7 #include <linux/hash.h>
8 #include <linux/slab.h>
9 #include <linux/file.h>
10 #include <linux/pagemap.h>
11 #include <linux/sched.h>
12 #include <linux/list_lru.h>
13 #include <linux/fsnotify_backend.h>
14 #include <linux/fsnotify.h>
15 #include <linux/seq_file.h>
21 #include "filecache.h"
24 #define NFSDDBG_FACILITY NFSDDBG_FH
26 /* FIXME: dynamically size this for the machine somehow? */
27 #define NFSD_FILE_HASH_BITS 12
28 #define NFSD_FILE_HASH_SIZE (1 << NFSD_FILE_HASH_BITS)
29 #define NFSD_LAUNDRETTE_DELAY (2 * HZ)
31 #define NFSD_FILE_SHUTDOWN (1)
32 #define NFSD_FILE_LRU_THRESHOLD (4096UL)
33 #define NFSD_FILE_LRU_LIMIT (NFSD_FILE_LRU_THRESHOLD << 2)
35 /* We only care about NFSD_MAY_READ/WRITE for this cache */
36 #define NFSD_FILE_MAY_MASK (NFSD_MAY_READ|NFSD_MAY_WRITE)
38 struct nfsd_fcache_bucket {
39 struct hlist_head nfb_head;
41 unsigned int nfb_count;
42 unsigned int nfb_maxcount;
45 static DEFINE_PER_CPU(unsigned long, nfsd_file_cache_hits);
47 struct nfsd_fcache_disposal {
48 struct work_struct work;
50 struct list_head freeme;
53 static struct workqueue_struct *nfsd_filecache_wq __read_mostly;
55 static struct kmem_cache *nfsd_file_slab;
56 static struct kmem_cache *nfsd_file_mark_slab;
57 static struct nfsd_fcache_bucket *nfsd_file_hashtbl;
58 static struct list_lru nfsd_file_lru;
59 static long nfsd_file_lru_flags;
60 static struct fsnotify_group *nfsd_file_fsnotify_group;
61 static atomic_long_t nfsd_filecache_count;
62 static struct delayed_work nfsd_filecache_laundrette;
64 static void nfsd_file_gc(void);
67 nfsd_file_schedule_laundrette(void)
69 long count = atomic_long_read(&nfsd_filecache_count);
71 if (count == 0 || test_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags))
74 queue_delayed_work(system_wq, &nfsd_filecache_laundrette,
75 NFSD_LAUNDRETTE_DELAY);
79 nfsd_file_slab_free(struct rcu_head *rcu)
81 struct nfsd_file *nf = container_of(rcu, struct nfsd_file, nf_rcu);
83 put_cred(nf->nf_cred);
84 kmem_cache_free(nfsd_file_slab, nf);
88 nfsd_file_mark_free(struct fsnotify_mark *mark)
90 struct nfsd_file_mark *nfm = container_of(mark, struct nfsd_file_mark,
93 kmem_cache_free(nfsd_file_mark_slab, nfm);
96 static struct nfsd_file_mark *
97 nfsd_file_mark_get(struct nfsd_file_mark *nfm)
99 if (!refcount_inc_not_zero(&nfm->nfm_ref))
105 nfsd_file_mark_put(struct nfsd_file_mark *nfm)
107 if (refcount_dec_and_test(&nfm->nfm_ref)) {
108 fsnotify_destroy_mark(&nfm->nfm_mark, nfsd_file_fsnotify_group);
109 fsnotify_put_mark(&nfm->nfm_mark);
113 static struct nfsd_file_mark *
114 nfsd_file_mark_find_or_create(struct nfsd_file *nf)
117 struct fsnotify_mark *mark;
118 struct nfsd_file_mark *nfm = NULL, *new;
119 struct inode *inode = nf->nf_inode;
122 mutex_lock(&nfsd_file_fsnotify_group->mark_mutex);
123 mark = fsnotify_find_mark(&inode->i_fsnotify_marks,
124 nfsd_file_fsnotify_group);
126 nfm = nfsd_file_mark_get(container_of(mark,
127 struct nfsd_file_mark,
129 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
131 fsnotify_put_mark(mark);
134 /* Avoid soft lockup race with nfsd_file_mark_put() */
135 fsnotify_destroy_mark(mark, nfsd_file_fsnotify_group);
136 fsnotify_put_mark(mark);
138 mutex_unlock(&nfsd_file_fsnotify_group->mark_mutex);
140 /* allocate a new nfm */
141 new = kmem_cache_alloc(nfsd_file_mark_slab, GFP_KERNEL);
144 fsnotify_init_mark(&new->nfm_mark, nfsd_file_fsnotify_group);
145 new->nfm_mark.mask = FS_ATTRIB|FS_DELETE_SELF;
146 refcount_set(&new->nfm_ref, 1);
148 err = fsnotify_add_inode_mark(&new->nfm_mark, inode, 0);
151 * If the add was successful, then return the object.
152 * Otherwise, we need to put the reference we hold on the
153 * nfm_mark. The fsnotify code will take a reference and put
154 * it on failure, so we can't just free it directly. It's also
155 * not safe to call fsnotify_destroy_mark on it as the
156 * mark->group will be NULL. Thus, we can't let the nfm_ref
157 * counter drive the destruction at this point.
162 fsnotify_put_mark(&new->nfm_mark);
163 } while (unlikely(err == -EEXIST));
168 static struct nfsd_file *
169 nfsd_file_alloc(struct inode *inode, unsigned int may, unsigned int hashval,
172 struct nfsd_file *nf;
174 nf = kmem_cache_alloc(nfsd_file_slab, GFP_KERNEL);
176 INIT_HLIST_NODE(&nf->nf_node);
177 INIT_LIST_HEAD(&nf->nf_lru);
179 nf->nf_cred = get_current_cred();
182 nf->nf_inode = inode;
183 nf->nf_hashval = hashval;
184 refcount_set(&nf->nf_ref, 1);
185 nf->nf_may = may & NFSD_FILE_MAY_MASK;
186 if (may & NFSD_MAY_NOT_BREAK_LEASE) {
187 if (may & NFSD_MAY_WRITE)
188 __set_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags);
189 if (may & NFSD_MAY_READ)
190 __set_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
193 trace_nfsd_file_alloc(nf);
199 nfsd_file_free(struct nfsd_file *nf)
203 trace_nfsd_file_put_final(nf);
205 nfsd_file_mark_put(nf->nf_mark);
207 get_file(nf->nf_file);
208 filp_close(nf->nf_file, NULL);
212 call_rcu(&nf->nf_rcu, nfsd_file_slab_free);
217 nfsd_file_check_writeback(struct nfsd_file *nf)
219 struct file *file = nf->nf_file;
220 struct address_space *mapping;
222 if (!file || !(file->f_mode & FMODE_WRITE))
224 mapping = file->f_mapping;
225 return mapping_tagged(mapping, PAGECACHE_TAG_DIRTY) ||
226 mapping_tagged(mapping, PAGECACHE_TAG_WRITEBACK);
230 nfsd_file_check_write_error(struct nfsd_file *nf)
232 struct file *file = nf->nf_file;
234 if (!file || !(file->f_mode & FMODE_WRITE))
236 return filemap_check_wb_err(file->f_mapping, READ_ONCE(file->f_wb_err));
240 nfsd_file_flush(struct nfsd_file *nf)
242 if (nf->nf_file && vfs_fsync(nf->nf_file, 1) != 0)
243 nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
247 nfsd_file_do_unhash(struct nfsd_file *nf)
249 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
251 trace_nfsd_file_unhash(nf);
253 if (nfsd_file_check_write_error(nf))
254 nfsd_reset_write_verifier(net_generic(nf->nf_net, nfsd_net_id));
255 --nfsd_file_hashtbl[nf->nf_hashval].nfb_count;
256 hlist_del_rcu(&nf->nf_node);
257 atomic_long_dec(&nfsd_filecache_count);
261 nfsd_file_unhash(struct nfsd_file *nf)
263 if (test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
264 nfsd_file_do_unhash(nf);
265 if (!list_empty(&nf->nf_lru))
266 list_lru_del(&nfsd_file_lru, &nf->nf_lru);
273 * Return true if the file was unhashed.
276 nfsd_file_unhash_and_release_locked(struct nfsd_file *nf, struct list_head *dispose)
278 lockdep_assert_held(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
280 trace_nfsd_file_unhash_and_release_locked(nf);
281 if (!nfsd_file_unhash(nf))
283 /* keep final reference for nfsd_file_lru_dispose */
284 if (refcount_dec_not_one(&nf->nf_ref))
287 list_add(&nf->nf_lru, dispose);
292 nfsd_file_put_noref(struct nfsd_file *nf)
294 trace_nfsd_file_put(nf);
296 if (refcount_dec_and_test(&nf->nf_ref)) {
297 WARN_ON(test_bit(NFSD_FILE_HASHED, &nf->nf_flags));
303 nfsd_file_put(struct nfsd_file *nf)
305 set_bit(NFSD_FILE_REFERENCED, &nf->nf_flags);
306 if (test_bit(NFSD_FILE_HASHED, &nf->nf_flags) == 0) {
308 nfsd_file_put_noref(nf);
310 nfsd_file_put_noref(nf);
312 nfsd_file_schedule_laundrette();
314 if (atomic_long_read(&nfsd_filecache_count) >= NFSD_FILE_LRU_LIMIT)
319 nfsd_file_get(struct nfsd_file *nf)
321 if (likely(refcount_inc_not_zero(&nf->nf_ref)))
327 nfsd_file_dispose_list(struct list_head *dispose)
329 struct nfsd_file *nf;
331 while(!list_empty(dispose)) {
332 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
333 list_del(&nf->nf_lru);
335 nfsd_file_put_noref(nf);
340 nfsd_file_dispose_list_sync(struct list_head *dispose)
343 struct nfsd_file *nf;
345 while(!list_empty(dispose)) {
346 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
347 list_del(&nf->nf_lru);
349 if (!refcount_dec_and_test(&nf->nf_ref))
351 if (nfsd_file_free(nf))
355 flush_delayed_fput();
359 nfsd_file_list_remove_disposal(struct list_head *dst,
360 struct nfsd_fcache_disposal *l)
363 list_splice_init(&l->freeme, dst);
364 spin_unlock(&l->lock);
368 nfsd_file_list_add_disposal(struct list_head *files, struct net *net)
370 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
371 struct nfsd_fcache_disposal *l = nn->fcache_disposal;
374 list_splice_tail_init(files, &l->freeme);
375 spin_unlock(&l->lock);
376 queue_work(nfsd_filecache_wq, &l->work);
380 nfsd_file_list_add_pernet(struct list_head *dst, struct list_head *src,
383 struct nfsd_file *nf, *tmp;
385 list_for_each_entry_safe(nf, tmp, src, nf_lru) {
386 if (nf->nf_net == net)
387 list_move_tail(&nf->nf_lru, dst);
392 nfsd_file_dispose_list_delayed(struct list_head *dispose)
395 struct nfsd_file *nf;
397 while(!list_empty(dispose)) {
398 nf = list_first_entry(dispose, struct nfsd_file, nf_lru);
399 nfsd_file_list_add_pernet(&list, dispose, nf->nf_net);
400 nfsd_file_list_add_disposal(&list, nf->nf_net);
405 * Note this can deadlock with nfsd_file_cache_purge.
407 static enum lru_status
408 nfsd_file_lru_cb(struct list_head *item, struct list_lru_one *lru,
409 spinlock_t *lock, void *arg)
413 struct list_head *head = arg;
414 struct nfsd_file *nf = list_entry(item, struct nfsd_file, nf_lru);
417 * Do a lockless refcount check. The hashtable holds one reference, so
418 * we look to see if anything else has a reference, or if any have
419 * been put since the shrinker last ran. Those don't get unhashed and
422 * Note that in the put path, we set the flag and then decrement the
423 * counter. Here we check the counter and then test and clear the flag.
424 * That order is deliberate to ensure that we can do this locklessly.
426 if (refcount_read(&nf->nf_ref) > 1)
430 * Don't throw out files that are still undergoing I/O or
431 * that have uncleared errors pending.
433 if (nfsd_file_check_writeback(nf))
436 if (test_and_clear_bit(NFSD_FILE_REFERENCED, &nf->nf_flags))
439 if (!test_and_clear_bit(NFSD_FILE_HASHED, &nf->nf_flags))
442 list_lru_isolate_move(lru, &nf->nf_lru, head);
449 nfsd_file_lru_walk_list(struct shrink_control *sc)
452 struct nfsd_file *nf;
456 ret = list_lru_shrink_walk(&nfsd_file_lru, sc,
457 nfsd_file_lru_cb, &head);
459 ret = list_lru_walk(&nfsd_file_lru,
462 list_for_each_entry(nf, &head, nf_lru) {
463 spin_lock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
464 nfsd_file_do_unhash(nf);
465 spin_unlock(&nfsd_file_hashtbl[nf->nf_hashval].nfb_lock);
467 nfsd_file_dispose_list_delayed(&head);
474 nfsd_file_lru_walk_list(NULL);
478 nfsd_file_gc_worker(struct work_struct *work)
481 nfsd_file_schedule_laundrette();
485 nfsd_file_lru_count(struct shrinker *s, struct shrink_control *sc)
487 return list_lru_count(&nfsd_file_lru);
491 nfsd_file_lru_scan(struct shrinker *s, struct shrink_control *sc)
493 return nfsd_file_lru_walk_list(sc);
496 static struct shrinker nfsd_file_shrinker = {
497 .scan_objects = nfsd_file_lru_scan,
498 .count_objects = nfsd_file_lru_count,
503 __nfsd_file_close_inode(struct inode *inode, unsigned int hashval,
504 struct list_head *dispose)
506 struct nfsd_file *nf;
507 struct hlist_node *tmp;
509 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
510 hlist_for_each_entry_safe(nf, tmp, &nfsd_file_hashtbl[hashval].nfb_head, nf_node) {
511 if (inode == nf->nf_inode)
512 nfsd_file_unhash_and_release_locked(nf, dispose);
514 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
518 * nfsd_file_close_inode_sync - attempt to forcibly close a nfsd_file
519 * @inode: inode of the file to attempt to remove
521 * Walk the whole hash bucket, looking for any files that correspond to "inode".
522 * If any do, then unhash them and put the hashtable reference to them and
523 * destroy any that had their last reference put. Also ensure that any of the
524 * fputs also have their final __fput done as well.
527 nfsd_file_close_inode_sync(struct inode *inode)
529 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
530 NFSD_FILE_HASH_BITS);
533 __nfsd_file_close_inode(inode, hashval, &dispose);
534 trace_nfsd_file_close_inode_sync(inode, hashval, !list_empty(&dispose));
535 nfsd_file_dispose_list_sync(&dispose);
539 * nfsd_file_close_inode - attempt a delayed close of a nfsd_file
540 * @inode: inode of the file to attempt to remove
542 * Walk the whole hash bucket, looking for any files that correspond to "inode".
543 * If any do, then unhash them and put the hashtable reference to them and
544 * destroy any that had their last reference put.
547 nfsd_file_close_inode(struct inode *inode)
549 unsigned int hashval = (unsigned int)hash_long(inode->i_ino,
550 NFSD_FILE_HASH_BITS);
553 __nfsd_file_close_inode(inode, hashval, &dispose);
554 trace_nfsd_file_close_inode(inode, hashval, !list_empty(&dispose));
555 nfsd_file_dispose_list_delayed(&dispose);
559 * nfsd_file_delayed_close - close unused nfsd_files
562 * Walk the LRU list and close any entries that have not been used since
565 * Note this can deadlock with nfsd_file_cache_purge.
568 nfsd_file_delayed_close(struct work_struct *work)
571 struct nfsd_fcache_disposal *l = container_of(work,
572 struct nfsd_fcache_disposal, work);
574 nfsd_file_list_remove_disposal(&head, l);
575 nfsd_file_dispose_list(&head);
579 nfsd_file_lease_notifier_call(struct notifier_block *nb, unsigned long arg,
582 struct file_lock *fl = data;
584 /* Only close files for F_SETLEASE leases */
585 if (fl->fl_flags & FL_LEASE)
586 nfsd_file_close_inode_sync(file_inode(fl->fl_file));
590 static struct notifier_block nfsd_file_lease_notifier = {
591 .notifier_call = nfsd_file_lease_notifier_call,
595 nfsd_file_fsnotify_handle_event(struct fsnotify_mark *mark, u32 mask,
596 struct inode *inode, struct inode *dir,
597 const struct qstr *name, u32 cookie)
599 if (WARN_ON_ONCE(!inode))
602 trace_nfsd_file_fsnotify_handle_event(inode, mask);
604 /* Should be no marks on non-regular files */
605 if (!S_ISREG(inode->i_mode)) {
610 /* don't close files if this was not the last link */
611 if (mask & FS_ATTRIB) {
616 nfsd_file_close_inode(inode);
621 static const struct fsnotify_ops nfsd_file_fsnotify_ops = {
622 .handle_inode_event = nfsd_file_fsnotify_handle_event,
623 .free_mark = nfsd_file_mark_free,
627 nfsd_file_cache_init(void)
632 clear_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
634 if (nfsd_file_hashtbl)
637 nfsd_filecache_wq = alloc_workqueue("nfsd_filecache", 0, 0);
638 if (!nfsd_filecache_wq)
641 nfsd_file_hashtbl = kvcalloc(NFSD_FILE_HASH_SIZE,
642 sizeof(*nfsd_file_hashtbl), GFP_KERNEL);
643 if (!nfsd_file_hashtbl) {
644 pr_err("nfsd: unable to allocate nfsd_file_hashtbl\n");
648 nfsd_file_slab = kmem_cache_create("nfsd_file",
649 sizeof(struct nfsd_file), 0, 0, NULL);
650 if (!nfsd_file_slab) {
651 pr_err("nfsd: unable to create nfsd_file_slab\n");
655 nfsd_file_mark_slab = kmem_cache_create("nfsd_file_mark",
656 sizeof(struct nfsd_file_mark), 0, 0, NULL);
657 if (!nfsd_file_mark_slab) {
658 pr_err("nfsd: unable to create nfsd_file_mark_slab\n");
663 ret = list_lru_init(&nfsd_file_lru);
665 pr_err("nfsd: failed to init nfsd_file_lru: %d\n", ret);
669 ret = register_shrinker(&nfsd_file_shrinker);
671 pr_err("nfsd: failed to register nfsd_file_shrinker: %d\n", ret);
675 ret = lease_register_notifier(&nfsd_file_lease_notifier);
677 pr_err("nfsd: unable to register lease notifier: %d\n", ret);
681 nfsd_file_fsnotify_group = fsnotify_alloc_group(&nfsd_file_fsnotify_ops);
682 if (IS_ERR(nfsd_file_fsnotify_group)) {
683 pr_err("nfsd: unable to create fsnotify group: %ld\n",
684 PTR_ERR(nfsd_file_fsnotify_group));
685 ret = PTR_ERR(nfsd_file_fsnotify_group);
686 nfsd_file_fsnotify_group = NULL;
690 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
691 INIT_HLIST_HEAD(&nfsd_file_hashtbl[i].nfb_head);
692 spin_lock_init(&nfsd_file_hashtbl[i].nfb_lock);
695 INIT_DELAYED_WORK(&nfsd_filecache_laundrette, nfsd_file_gc_worker);
699 lease_unregister_notifier(&nfsd_file_lease_notifier);
701 unregister_shrinker(&nfsd_file_shrinker);
703 list_lru_destroy(&nfsd_file_lru);
705 kmem_cache_destroy(nfsd_file_slab);
706 nfsd_file_slab = NULL;
707 kmem_cache_destroy(nfsd_file_mark_slab);
708 nfsd_file_mark_slab = NULL;
709 kvfree(nfsd_file_hashtbl);
710 nfsd_file_hashtbl = NULL;
711 destroy_workqueue(nfsd_filecache_wq);
712 nfsd_filecache_wq = NULL;
717 * Note this can deadlock with nfsd_file_lru_cb.
720 nfsd_file_cache_purge(struct net *net)
723 struct nfsd_file *nf;
724 struct hlist_node *next;
728 if (!nfsd_file_hashtbl)
731 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
732 struct nfsd_fcache_bucket *nfb = &nfsd_file_hashtbl[i];
734 spin_lock(&nfb->nfb_lock);
735 hlist_for_each_entry_safe(nf, next, &nfb->nfb_head, nf_node) {
736 if (net && nf->nf_net != net)
738 del = nfsd_file_unhash_and_release_locked(nf, &dispose);
741 * Deadlock detected! Something marked this entry as
742 * unhased, but hasn't removed it from the hash list.
746 spin_unlock(&nfb->nfb_lock);
747 nfsd_file_dispose_list(&dispose);
751 static struct nfsd_fcache_disposal *
752 nfsd_alloc_fcache_disposal(void)
754 struct nfsd_fcache_disposal *l;
756 l = kmalloc(sizeof(*l), GFP_KERNEL);
759 INIT_WORK(&l->work, nfsd_file_delayed_close);
760 spin_lock_init(&l->lock);
761 INIT_LIST_HEAD(&l->freeme);
766 nfsd_free_fcache_disposal(struct nfsd_fcache_disposal *l)
768 cancel_work_sync(&l->work);
769 nfsd_file_dispose_list(&l->freeme);
774 nfsd_free_fcache_disposal_net(struct net *net)
776 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
777 struct nfsd_fcache_disposal *l = nn->fcache_disposal;
779 nfsd_free_fcache_disposal(l);
783 nfsd_file_cache_start_net(struct net *net)
785 struct nfsd_net *nn = net_generic(net, nfsd_net_id);
787 nn->fcache_disposal = nfsd_alloc_fcache_disposal();
788 return nn->fcache_disposal ? 0 : -ENOMEM;
792 nfsd_file_cache_shutdown_net(struct net *net)
794 nfsd_file_cache_purge(net);
795 nfsd_free_fcache_disposal_net(net);
799 nfsd_file_cache_shutdown(void)
801 set_bit(NFSD_FILE_SHUTDOWN, &nfsd_file_lru_flags);
803 lease_unregister_notifier(&nfsd_file_lease_notifier);
804 unregister_shrinker(&nfsd_file_shrinker);
806 * make sure all callers of nfsd_file_lru_cb are done before
807 * calling nfsd_file_cache_purge
809 cancel_delayed_work_sync(&nfsd_filecache_laundrette);
810 nfsd_file_cache_purge(NULL);
811 list_lru_destroy(&nfsd_file_lru);
813 fsnotify_put_group(nfsd_file_fsnotify_group);
814 nfsd_file_fsnotify_group = NULL;
815 kmem_cache_destroy(nfsd_file_slab);
816 nfsd_file_slab = NULL;
817 fsnotify_wait_marks_destroyed();
818 kmem_cache_destroy(nfsd_file_mark_slab);
819 nfsd_file_mark_slab = NULL;
820 kvfree(nfsd_file_hashtbl);
821 nfsd_file_hashtbl = NULL;
822 destroy_workqueue(nfsd_filecache_wq);
823 nfsd_filecache_wq = NULL;
827 nfsd_match_cred(const struct cred *c1, const struct cred *c2)
831 if (!uid_eq(c1->fsuid, c2->fsuid))
833 if (!gid_eq(c1->fsgid, c2->fsgid))
835 if (c1->group_info == NULL || c2->group_info == NULL)
836 return c1->group_info == c2->group_info;
837 if (c1->group_info->ngroups != c2->group_info->ngroups)
839 for (i = 0; i < c1->group_info->ngroups; i++) {
840 if (!gid_eq(c1->group_info->gid[i], c2->group_info->gid[i]))
846 static struct nfsd_file *
847 nfsd_file_find_locked(struct inode *inode, unsigned int may_flags,
848 unsigned int hashval, struct net *net)
850 struct nfsd_file *nf;
851 unsigned char need = may_flags & NFSD_FILE_MAY_MASK;
853 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
854 nf_node, lockdep_is_held(&nfsd_file_hashtbl[hashval].nfb_lock)) {
855 if (nf->nf_may != need)
857 if (nf->nf_inode != inode)
859 if (nf->nf_net != net)
861 if (!nfsd_match_cred(nf->nf_cred, current_cred()))
863 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags))
865 if (nfsd_file_get(nf) != NULL)
872 * nfsd_file_is_cached - are there any cached open files for this fh?
873 * @inode: inode of the file to check
875 * Scan the hashtable for open files that match this fh. Returns true if there
876 * are any, and false if not.
879 nfsd_file_is_cached(struct inode *inode)
882 struct nfsd_file *nf;
883 unsigned int hashval;
885 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
888 hlist_for_each_entry_rcu(nf, &nfsd_file_hashtbl[hashval].nfb_head,
890 if (inode == nf->nf_inode) {
896 trace_nfsd_file_is_cached(inode, hashval, (int)ret);
901 nfsd_file_acquire(struct svc_rqst *rqstp, struct svc_fh *fhp,
902 unsigned int may_flags, struct nfsd_file **pnf)
905 struct net *net = SVC_NET(rqstp);
906 struct nfsd_file *nf, *new;
908 unsigned int hashval;
911 /* FIXME: skip this if fh_dentry is already set? */
912 status = fh_verify(rqstp, fhp, S_IFREG,
913 may_flags|NFSD_MAY_OWNER_OVERRIDE);
914 if (status != nfs_ok)
917 inode = d_inode(fhp->fh_dentry);
918 hashval = (unsigned int)hash_long(inode->i_ino, NFSD_FILE_HASH_BITS);
921 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
924 goto wait_for_construction;
926 new = nfsd_file_alloc(inode, may_flags, hashval, net);
928 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags,
929 NULL, nfserr_jukebox);
930 return nfserr_jukebox;
933 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
934 nf = nfsd_file_find_locked(inode, may_flags, hashval, net);
937 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
938 nfsd_file_slab_free(&new->nf_rcu);
940 wait_for_construction:
941 wait_on_bit(&nf->nf_flags, NFSD_FILE_PENDING, TASK_UNINTERRUPTIBLE);
943 /* Did construction of this file fail? */
944 if (!test_bit(NFSD_FILE_HASHED, &nf->nf_flags)) {
946 status = nfserr_jukebox;
950 nfsd_file_put_noref(nf);
954 this_cpu_inc(nfsd_file_cache_hits);
956 if (!(may_flags & NFSD_MAY_NOT_BREAK_LEASE)) {
957 bool write = (may_flags & NFSD_MAY_WRITE);
959 if (test_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags) ||
960 (test_bit(NFSD_FILE_BREAK_WRITE, &nf->nf_flags) && write)) {
961 status = nfserrno(nfsd_open_break_lease(
962 file_inode(nf->nf_file), may_flags));
963 if (status == nfs_ok) {
964 clear_bit(NFSD_FILE_BREAK_READ, &nf->nf_flags);
966 clear_bit(NFSD_FILE_BREAK_WRITE,
972 if (status == nfs_ok) {
979 trace_nfsd_file_acquire(rqstp, hashval, inode, may_flags, nf, status);
983 /* Take reference for the hashtable */
984 refcount_inc(&nf->nf_ref);
985 __set_bit(NFSD_FILE_HASHED, &nf->nf_flags);
986 __set_bit(NFSD_FILE_PENDING, &nf->nf_flags);
987 list_lru_add(&nfsd_file_lru, &nf->nf_lru);
988 hlist_add_head_rcu(&nf->nf_node, &nfsd_file_hashtbl[hashval].nfb_head);
989 ++nfsd_file_hashtbl[hashval].nfb_count;
990 nfsd_file_hashtbl[hashval].nfb_maxcount = max(nfsd_file_hashtbl[hashval].nfb_maxcount,
991 nfsd_file_hashtbl[hashval].nfb_count);
992 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
993 if (atomic_long_inc_return(&nfsd_filecache_count) >= NFSD_FILE_LRU_THRESHOLD)
996 nf->nf_mark = nfsd_file_mark_find_or_create(nf);
998 status = nfsd_open_verified(rqstp, fhp, S_IFREG,
999 may_flags, &nf->nf_file);
1001 status = nfserr_jukebox;
1003 * If construction failed, or we raced with a call to unlink()
1006 if (status != nfs_ok || inode->i_nlink == 0) {
1008 spin_lock(&nfsd_file_hashtbl[hashval].nfb_lock);
1009 do_free = nfsd_file_unhash(nf);
1010 spin_unlock(&nfsd_file_hashtbl[hashval].nfb_lock);
1012 nfsd_file_put_noref(nf);
1014 clear_bit_unlock(NFSD_FILE_PENDING, &nf->nf_flags);
1015 smp_mb__after_atomic();
1016 wake_up_bit(&nf->nf_flags, NFSD_FILE_PENDING);
1021 * Note that fields may be added, removed or reordered in the future. Programs
1022 * scraping this file for info should test the labels to ensure they're
1023 * getting the correct field.
1025 static int nfsd_file_cache_stats_show(struct seq_file *m, void *v)
1027 unsigned int i, count = 0, longest = 0;
1028 unsigned long hits = 0;
1031 * No need for spinlocks here since we're not terribly interested in
1032 * accuracy. We do take the nfsd_mutex simply to ensure that we
1033 * don't end up racing with server shutdown
1035 mutex_lock(&nfsd_mutex);
1036 if (nfsd_file_hashtbl) {
1037 for (i = 0; i < NFSD_FILE_HASH_SIZE; i++) {
1038 count += nfsd_file_hashtbl[i].nfb_count;
1039 longest = max(longest, nfsd_file_hashtbl[i].nfb_count);
1042 mutex_unlock(&nfsd_mutex);
1044 for_each_possible_cpu(i)
1045 hits += per_cpu(nfsd_file_cache_hits, i);
1047 seq_printf(m, "total entries: %u\n", count);
1048 seq_printf(m, "longest chain: %u\n", longest);
1049 seq_printf(m, "cache hits: %lu\n", hits);
1053 int nfsd_file_cache_stats_open(struct inode *inode, struct file *file)
1055 return single_open(file, nfsd_file_cache_stats_show, NULL);