1 // SPDX-License-Identifier: GPL-2.0-only
5 * Generic code for various authentication-related caches
6 * used by sunrpc clients and servers.
8 * Copyright (C) 2002 Neil Brown <neilb@cse.unsw.edu.au>
11 #include <linux/types.h>
13 #include <linux/file.h>
14 #include <linux/slab.h>
15 #include <linux/signal.h>
16 #include <linux/sched.h>
17 #include <linux/kmod.h>
18 #include <linux/list.h>
19 #include <linux/module.h>
20 #include <linux/ctype.h>
21 #include <linux/string_helpers.h>
22 #include <linux/uaccess.h>
23 #include <linux/poll.h>
24 #include <linux/seq_file.h>
25 #include <linux/proc_fs.h>
26 #include <linux/net.h>
27 #include <linux/workqueue.h>
28 #include <linux/mutex.h>
29 #include <linux/pagemap.h>
30 #include <asm/ioctls.h>
31 #include <linux/sunrpc/types.h>
32 #include <linux/sunrpc/cache.h>
33 #include <linux/sunrpc/stats.h>
34 #include <linux/sunrpc/rpc_pipe_fs.h>
35 #include <trace/events/sunrpc.h>
38 #define RPCDBG_FACILITY RPCDBG_CACHE
40 static bool cache_defer_req(struct cache_req *req, struct cache_head *item);
41 static void cache_revisit_request(struct cache_head *item);
43 static void cache_init(struct cache_head *h, struct cache_detail *detail)
45 time64_t now = seconds_since_boot();
46 INIT_HLIST_NODE(&h->cache_list);
49 h->expiry_time = now + CACHE_NEW_EXPIRY;
50 if (now <= detail->flush_time)
51 /* ensure it isn't already expired */
52 now = detail->flush_time + 1;
53 h->last_refresh = now;
56 static void cache_fresh_unlocked(struct cache_head *head,
57 struct cache_detail *detail);
59 static struct cache_head *sunrpc_cache_find_rcu(struct cache_detail *detail,
60 struct cache_head *key,
63 struct hlist_head *head = &detail->hash_table[hash];
64 struct cache_head *tmp;
67 hlist_for_each_entry_rcu(tmp, head, cache_list) {
68 if (!detail->match(tmp, key))
70 if (test_bit(CACHE_VALID, &tmp->flags) &&
71 cache_is_expired(detail, tmp))
73 tmp = cache_get_rcu(tmp);
81 static void sunrpc_begin_cache_remove_entry(struct cache_head *ch,
82 struct cache_detail *cd)
84 /* Must be called under cd->hash_lock */
85 hlist_del_init_rcu(&ch->cache_list);
86 set_bit(CACHE_CLEANED, &ch->flags);
90 static void sunrpc_end_cache_remove_entry(struct cache_head *ch,
91 struct cache_detail *cd)
93 cache_fresh_unlocked(ch, cd);
97 static struct cache_head *sunrpc_cache_add_entry(struct cache_detail *detail,
98 struct cache_head *key,
101 struct cache_head *new, *tmp, *freeme = NULL;
102 struct hlist_head *head = &detail->hash_table[hash];
104 new = detail->alloc();
107 /* must fully initialise 'new', else
108 * we might get lose if we need to
111 cache_init(new, detail);
112 detail->init(new, key);
114 spin_lock(&detail->hash_lock);
116 /* check if entry appeared while we slept */
117 hlist_for_each_entry_rcu(tmp, head, cache_list,
118 lockdep_is_held(&detail->hash_lock)) {
119 if (!detail->match(tmp, key))
121 if (test_bit(CACHE_VALID, &tmp->flags) &&
122 cache_is_expired(detail, tmp)) {
123 sunrpc_begin_cache_remove_entry(tmp, detail);
124 trace_cache_entry_expired(detail, tmp);
129 spin_unlock(&detail->hash_lock);
130 cache_put(new, detail);
134 hlist_add_head_rcu(&new->cache_list, head);
137 spin_unlock(&detail->hash_lock);
140 sunrpc_end_cache_remove_entry(freeme, detail);
144 struct cache_head *sunrpc_cache_lookup_rcu(struct cache_detail *detail,
145 struct cache_head *key, int hash)
147 struct cache_head *ret;
149 ret = sunrpc_cache_find_rcu(detail, key, hash);
152 /* Didn't find anything, insert an empty entry */
153 return sunrpc_cache_add_entry(detail, key, hash);
155 EXPORT_SYMBOL_GPL(sunrpc_cache_lookup_rcu);
157 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch);
159 static void cache_fresh_locked(struct cache_head *head, time64_t expiry,
160 struct cache_detail *detail)
162 time64_t now = seconds_since_boot();
163 if (now <= detail->flush_time)
164 /* ensure it isn't immediately treated as expired */
165 now = detail->flush_time + 1;
166 head->expiry_time = expiry;
167 head->last_refresh = now;
168 smp_wmb(); /* paired with smp_rmb() in cache_is_valid() */
169 set_bit(CACHE_VALID, &head->flags);
172 static void cache_fresh_unlocked(struct cache_head *head,
173 struct cache_detail *detail)
175 if (test_and_clear_bit(CACHE_PENDING, &head->flags)) {
176 cache_revisit_request(head);
177 cache_dequeue(detail, head);
181 static void cache_make_negative(struct cache_detail *detail,
182 struct cache_head *h)
184 set_bit(CACHE_NEGATIVE, &h->flags);
185 trace_cache_entry_make_negative(detail, h);
188 static void cache_entry_update(struct cache_detail *detail,
189 struct cache_head *h,
190 struct cache_head *new)
192 if (!test_bit(CACHE_NEGATIVE, &new->flags)) {
193 detail->update(h, new);
194 trace_cache_entry_update(detail, h);
196 cache_make_negative(detail, h);
200 struct cache_head *sunrpc_cache_update(struct cache_detail *detail,
201 struct cache_head *new, struct cache_head *old, int hash)
203 /* The 'old' entry is to be replaced by 'new'.
204 * If 'old' is not VALID, we update it directly,
205 * otherwise we need to replace it
207 struct cache_head *tmp;
209 if (!test_bit(CACHE_VALID, &old->flags)) {
210 spin_lock(&detail->hash_lock);
211 if (!test_bit(CACHE_VALID, &old->flags)) {
212 cache_entry_update(detail, old, new);
213 cache_fresh_locked(old, new->expiry_time, detail);
214 spin_unlock(&detail->hash_lock);
215 cache_fresh_unlocked(old, detail);
218 spin_unlock(&detail->hash_lock);
220 /* We need to insert a new entry */
221 tmp = detail->alloc();
223 cache_put(old, detail);
226 cache_init(tmp, detail);
227 detail->init(tmp, old);
229 spin_lock(&detail->hash_lock);
230 cache_entry_update(detail, tmp, new);
231 hlist_add_head(&tmp->cache_list, &detail->hash_table[hash]);
234 cache_fresh_locked(tmp, new->expiry_time, detail);
235 cache_fresh_locked(old, 0, detail);
236 spin_unlock(&detail->hash_lock);
237 cache_fresh_unlocked(tmp, detail);
238 cache_fresh_unlocked(old, detail);
239 cache_put(old, detail);
242 EXPORT_SYMBOL_GPL(sunrpc_cache_update);
244 static inline int cache_is_valid(struct cache_head *h)
246 if (!test_bit(CACHE_VALID, &h->flags))
250 if (test_bit(CACHE_NEGATIVE, &h->flags))
254 * In combination with write barrier in
255 * sunrpc_cache_update, ensures that anyone
256 * using the cache entry after this sees the
265 static int try_to_negate_entry(struct cache_detail *detail, struct cache_head *h)
269 spin_lock(&detail->hash_lock);
270 rv = cache_is_valid(h);
272 cache_make_negative(detail, h);
273 cache_fresh_locked(h, seconds_since_boot()+CACHE_NEW_EXPIRY,
277 spin_unlock(&detail->hash_lock);
278 cache_fresh_unlocked(h, detail);
283 * This is the generic cache management routine for all
284 * the authentication caches.
285 * It checks the currency of a cache item and will (later)
286 * initiate an upcall to fill it if needed.
289 * Returns 0 if the cache_head can be used, or cache_puts it and returns
290 * -EAGAIN if upcall is pending and request has been queued
291 * -ETIMEDOUT if upcall failed or request could not be queue or
292 * upcall completed but item is still invalid (implying that
293 * the cache item has been replaced with a newer one).
294 * -ENOENT if cache entry was negative
296 int cache_check(struct cache_detail *detail,
297 struct cache_head *h, struct cache_req *rqstp)
300 time64_t refresh_age, age;
302 /* First decide return status as best we can */
303 rv = cache_is_valid(h);
305 /* now see if we want to start an upcall */
306 refresh_age = (h->expiry_time - h->last_refresh);
307 age = seconds_since_boot() - h->last_refresh;
312 } else if (rv == -EAGAIN ||
313 (h->expiry_time != 0 && age > refresh_age/2)) {
314 dprintk("RPC: Want update, refage=%lld, age=%lld\n",
316 switch (detail->cache_upcall(detail, h)) {
318 rv = try_to_negate_entry(detail, h);
321 cache_fresh_unlocked(h, detail);
327 if (!cache_defer_req(rqstp, h)) {
329 * Request was not deferred; handle it as best
332 rv = cache_is_valid(h);
338 cache_put(h, detail);
341 EXPORT_SYMBOL_GPL(cache_check);
344 * caches need to be periodically cleaned.
345 * For this we maintain a list of cache_detail and
346 * a current pointer into that list and into the table
349 * Each time cache_clean is called it finds the next non-empty entry
350 * in the current table and walks the list in that entry
351 * looking for entries that can be removed.
353 * An entry gets removed if:
354 * - The expiry is before current time
355 * - The last_refresh time is before the flush_time for that cache
357 * later we might drop old entries with non-NEVER expiry if that table
358 * is getting 'full' for some definition of 'full'
360 * The question of "how often to scan a table" is an interesting one
361 * and is answered in part by the use of the "nextcheck" field in the
363 * When a scan of a table begins, the nextcheck field is set to a time
364 * that is well into the future.
365 * While scanning, if an expiry time is found that is earlier than the
366 * current nextcheck time, nextcheck is set to that expiry time.
367 * If the flush_time is ever set to a time earlier than the nextcheck
368 * time, the nextcheck time is then set to that flush_time.
370 * A table is then only scanned if the current time is at least
371 * the nextcheck time.
375 static LIST_HEAD(cache_list);
376 static DEFINE_SPINLOCK(cache_list_lock);
377 static struct cache_detail *current_detail;
378 static int current_index;
380 static void do_cache_clean(struct work_struct *work);
381 static struct delayed_work cache_cleaner;
383 void sunrpc_init_cache_detail(struct cache_detail *cd)
385 spin_lock_init(&cd->hash_lock);
386 INIT_LIST_HEAD(&cd->queue);
387 spin_lock(&cache_list_lock);
390 atomic_set(&cd->writers, 0);
393 list_add(&cd->others, &cache_list);
394 spin_unlock(&cache_list_lock);
396 /* start the cleaning process */
397 queue_delayed_work(system_power_efficient_wq, &cache_cleaner, 0);
399 EXPORT_SYMBOL_GPL(sunrpc_init_cache_detail);
401 void sunrpc_destroy_cache_detail(struct cache_detail *cd)
404 spin_lock(&cache_list_lock);
405 spin_lock(&cd->hash_lock);
406 if (current_detail == cd)
407 current_detail = NULL;
408 list_del_init(&cd->others);
409 spin_unlock(&cd->hash_lock);
410 spin_unlock(&cache_list_lock);
411 if (list_empty(&cache_list)) {
412 /* module must be being unloaded so its safe to kill the worker */
413 cancel_delayed_work_sync(&cache_cleaner);
416 EXPORT_SYMBOL_GPL(sunrpc_destroy_cache_detail);
418 /* clean cache tries to find something to clean
420 * It returns 1 if it cleaned something,
421 * 0 if it didn't find anything this time
422 * -1 if it fell off the end of the list.
424 static int cache_clean(void)
427 struct list_head *next;
429 spin_lock(&cache_list_lock);
431 /* find a suitable table if we don't already have one */
432 while (current_detail == NULL ||
433 current_index >= current_detail->hash_size) {
435 next = current_detail->others.next;
437 next = cache_list.next;
438 if (next == &cache_list) {
439 current_detail = NULL;
440 spin_unlock(&cache_list_lock);
443 current_detail = list_entry(next, struct cache_detail, others);
444 if (current_detail->nextcheck > seconds_since_boot())
445 current_index = current_detail->hash_size;
448 current_detail->nextcheck = seconds_since_boot()+30*60;
452 /* find a non-empty bucket in the table */
453 while (current_detail &&
454 current_index < current_detail->hash_size &&
455 hlist_empty(¤t_detail->hash_table[current_index]))
458 /* find a cleanable entry in the bucket and clean it, or set to next bucket */
460 if (current_detail && current_index < current_detail->hash_size) {
461 struct cache_head *ch = NULL;
462 struct cache_detail *d;
463 struct hlist_head *head;
464 struct hlist_node *tmp;
466 spin_lock(¤t_detail->hash_lock);
468 /* Ok, now to clean this strand */
470 head = ¤t_detail->hash_table[current_index];
471 hlist_for_each_entry_safe(ch, tmp, head, cache_list) {
472 if (current_detail->nextcheck > ch->expiry_time)
473 current_detail->nextcheck = ch->expiry_time+1;
474 if (!cache_is_expired(current_detail, ch))
477 sunrpc_begin_cache_remove_entry(ch, current_detail);
478 trace_cache_entry_expired(current_detail, ch);
483 spin_unlock(¤t_detail->hash_lock);
487 spin_unlock(&cache_list_lock);
489 sunrpc_end_cache_remove_entry(ch, d);
491 spin_unlock(&cache_list_lock);
497 * We want to regularly clean the cache, so we need to schedule some work ...
499 static void do_cache_clean(struct work_struct *work)
502 if (cache_clean() == -1)
503 delay = round_jiffies_relative(30*HZ);
505 if (list_empty(&cache_list))
509 queue_delayed_work(system_power_efficient_wq,
510 &cache_cleaner, delay);
515 * Clean all caches promptly. This just calls cache_clean
516 * repeatedly until we are sure that every cache has had a chance to
519 void cache_flush(void)
521 while (cache_clean() != -1)
523 while (cache_clean() != -1)
526 EXPORT_SYMBOL_GPL(cache_flush);
528 void cache_purge(struct cache_detail *detail)
530 struct cache_head *ch = NULL;
531 struct hlist_head *head = NULL;
534 spin_lock(&detail->hash_lock);
535 if (!detail->entries) {
536 spin_unlock(&detail->hash_lock);
540 dprintk("RPC: %d entries in %s cache\n", detail->entries, detail->name);
541 for (i = 0; i < detail->hash_size; i++) {
542 head = &detail->hash_table[i];
543 while (!hlist_empty(head)) {
544 ch = hlist_entry(head->first, struct cache_head,
546 sunrpc_begin_cache_remove_entry(ch, detail);
547 spin_unlock(&detail->hash_lock);
548 sunrpc_end_cache_remove_entry(ch, detail);
549 spin_lock(&detail->hash_lock);
552 spin_unlock(&detail->hash_lock);
554 EXPORT_SYMBOL_GPL(cache_purge);
558 * Deferral and Revisiting of Requests.
560 * If a cache lookup finds a pending entry, we
561 * need to defer the request and revisit it later.
562 * All deferred requests are stored in a hash table,
563 * indexed by "struct cache_head *".
564 * As it may be wasteful to store a whole request
565 * structure, we allow the request to provide a
566 * deferred form, which must contain a
567 * 'struct cache_deferred_req'
568 * This cache_deferred_req contains a method to allow
569 * it to be revisited when cache info is available
572 #define DFR_HASHSIZE (PAGE_SIZE/sizeof(struct list_head))
573 #define DFR_HASH(item) ((((long)item)>>4 ^ (((long)item)>>13)) % DFR_HASHSIZE)
575 #define DFR_MAX 300 /* ??? */
577 static DEFINE_SPINLOCK(cache_defer_lock);
578 static LIST_HEAD(cache_defer_list);
579 static struct hlist_head cache_defer_hash[DFR_HASHSIZE];
580 static int cache_defer_cnt;
582 static void __unhash_deferred_req(struct cache_deferred_req *dreq)
584 hlist_del_init(&dreq->hash);
585 if (!list_empty(&dreq->recent)) {
586 list_del_init(&dreq->recent);
591 static void __hash_deferred_req(struct cache_deferred_req *dreq, struct cache_head *item)
593 int hash = DFR_HASH(item);
595 INIT_LIST_HEAD(&dreq->recent);
596 hlist_add_head(&dreq->hash, &cache_defer_hash[hash]);
599 static void setup_deferral(struct cache_deferred_req *dreq,
600 struct cache_head *item,
606 spin_lock(&cache_defer_lock);
608 __hash_deferred_req(dreq, item);
612 list_add(&dreq->recent, &cache_defer_list);
615 spin_unlock(&cache_defer_lock);
619 struct thread_deferred_req {
620 struct cache_deferred_req handle;
621 struct completion completion;
624 static void cache_restart_thread(struct cache_deferred_req *dreq, int too_many)
626 struct thread_deferred_req *dr =
627 container_of(dreq, struct thread_deferred_req, handle);
628 complete(&dr->completion);
631 static void cache_wait_req(struct cache_req *req, struct cache_head *item)
633 struct thread_deferred_req sleeper;
634 struct cache_deferred_req *dreq = &sleeper.handle;
636 sleeper.completion = COMPLETION_INITIALIZER_ONSTACK(sleeper.completion);
637 dreq->revisit = cache_restart_thread;
639 setup_deferral(dreq, item, 0);
641 if (!test_bit(CACHE_PENDING, &item->flags) ||
642 wait_for_completion_interruptible_timeout(
643 &sleeper.completion, req->thread_wait) <= 0) {
644 /* The completion wasn't completed, so we need
647 spin_lock(&cache_defer_lock);
648 if (!hlist_unhashed(&sleeper.handle.hash)) {
649 __unhash_deferred_req(&sleeper.handle);
650 spin_unlock(&cache_defer_lock);
652 /* cache_revisit_request already removed
653 * this from the hash table, but hasn't
654 * called ->revisit yet. It will very soon
655 * and we need to wait for it.
657 spin_unlock(&cache_defer_lock);
658 wait_for_completion(&sleeper.completion);
663 static void cache_limit_defers(void)
665 /* Make sure we haven't exceed the limit of allowed deferred
668 struct cache_deferred_req *discard = NULL;
670 if (cache_defer_cnt <= DFR_MAX)
673 spin_lock(&cache_defer_lock);
675 /* Consider removing either the first or the last */
676 if (cache_defer_cnt > DFR_MAX) {
677 if (prandom_u32() & 1)
678 discard = list_entry(cache_defer_list.next,
679 struct cache_deferred_req, recent);
681 discard = list_entry(cache_defer_list.prev,
682 struct cache_deferred_req, recent);
683 __unhash_deferred_req(discard);
685 spin_unlock(&cache_defer_lock);
687 discard->revisit(discard, 1);
690 /* Return true if and only if a deferred request is queued. */
691 static bool cache_defer_req(struct cache_req *req, struct cache_head *item)
693 struct cache_deferred_req *dreq;
695 if (req->thread_wait) {
696 cache_wait_req(req, item);
697 if (!test_bit(CACHE_PENDING, &item->flags))
700 dreq = req->defer(req);
703 setup_deferral(dreq, item, 1);
704 if (!test_bit(CACHE_PENDING, &item->flags))
705 /* Bit could have been cleared before we managed to
706 * set up the deferral, so need to revisit just in case
708 cache_revisit_request(item);
710 cache_limit_defers();
714 static void cache_revisit_request(struct cache_head *item)
716 struct cache_deferred_req *dreq;
717 struct list_head pending;
718 struct hlist_node *tmp;
719 int hash = DFR_HASH(item);
721 INIT_LIST_HEAD(&pending);
722 spin_lock(&cache_defer_lock);
724 hlist_for_each_entry_safe(dreq, tmp, &cache_defer_hash[hash], hash)
725 if (dreq->item == item) {
726 __unhash_deferred_req(dreq);
727 list_add(&dreq->recent, &pending);
730 spin_unlock(&cache_defer_lock);
732 while (!list_empty(&pending)) {
733 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
734 list_del_init(&dreq->recent);
735 dreq->revisit(dreq, 0);
739 void cache_clean_deferred(void *owner)
741 struct cache_deferred_req *dreq, *tmp;
742 struct list_head pending;
745 INIT_LIST_HEAD(&pending);
746 spin_lock(&cache_defer_lock);
748 list_for_each_entry_safe(dreq, tmp, &cache_defer_list, recent) {
749 if (dreq->owner == owner) {
750 __unhash_deferred_req(dreq);
751 list_add(&dreq->recent, &pending);
754 spin_unlock(&cache_defer_lock);
756 while (!list_empty(&pending)) {
757 dreq = list_entry(pending.next, struct cache_deferred_req, recent);
758 list_del_init(&dreq->recent);
759 dreq->revisit(dreq, 1);
764 * communicate with user-space
766 * We have a magic /proc file - /proc/net/rpc/<cachename>/channel.
767 * On read, you get a full request, or block.
768 * On write, an update request is processed.
769 * Poll works if anything to read, and always allows write.
771 * Implemented by linked list of requests. Each open file has
772 * a ->private that also exists in this list. New requests are added
773 * to the end and may wakeup and preceding readers.
774 * New readers are added to the head. If, on read, an item is found with
775 * CACHE_UPCALLING clear, we free it from the list.
779 static DEFINE_SPINLOCK(queue_lock);
780 static DEFINE_MUTEX(queue_io_mutex);
783 struct list_head list;
784 int reader; /* if 0, then request */
786 struct cache_request {
787 struct cache_queue q;
788 struct cache_head *item;
793 struct cache_reader {
794 struct cache_queue q;
795 int offset; /* if non-0, we have a refcnt on next request */
798 static int cache_request(struct cache_detail *detail,
799 struct cache_request *crq)
804 detail->cache_request(detail, crq->item, &bp, &len);
807 return PAGE_SIZE - len;
810 static ssize_t cache_read(struct file *filp, char __user *buf, size_t count,
811 loff_t *ppos, struct cache_detail *cd)
813 struct cache_reader *rp = filp->private_data;
814 struct cache_request *rq;
815 struct inode *inode = file_inode(filp);
821 inode_lock(inode); /* protect against multiple concurrent
822 * readers on this file */
824 spin_lock(&queue_lock);
825 /* need to find next request */
826 while (rp->q.list.next != &cd->queue &&
827 list_entry(rp->q.list.next, struct cache_queue, list)
829 struct list_head *next = rp->q.list.next;
830 list_move(&rp->q.list, next);
832 if (rp->q.list.next == &cd->queue) {
833 spin_unlock(&queue_lock);
835 WARN_ON_ONCE(rp->offset);
838 rq = container_of(rp->q.list.next, struct cache_request, q.list);
839 WARN_ON_ONCE(rq->q.reader);
842 spin_unlock(&queue_lock);
845 err = cache_request(cd, rq);
851 if (rp->offset == 0 && !test_bit(CACHE_PENDING, &rq->item->flags)) {
853 spin_lock(&queue_lock);
854 list_move(&rp->q.list, &rq->q.list);
855 spin_unlock(&queue_lock);
857 if (rp->offset + count > rq->len)
858 count = rq->len - rp->offset;
860 if (copy_to_user(buf, rq->buf + rp->offset, count))
863 if (rp->offset >= rq->len) {
865 spin_lock(&queue_lock);
866 list_move(&rp->q.list, &rq->q.list);
867 spin_unlock(&queue_lock);
872 if (rp->offset == 0) {
873 /* need to release rq */
874 spin_lock(&queue_lock);
876 if (rq->readers == 0 &&
877 !test_bit(CACHE_PENDING, &rq->item->flags)) {
878 list_del(&rq->q.list);
879 spin_unlock(&queue_lock);
880 cache_put(rq->item, cd);
884 spin_unlock(&queue_lock);
889 return err ? err : count;
892 static ssize_t cache_do_downcall(char *kaddr, const char __user *buf,
893 size_t count, struct cache_detail *cd)
899 if (copy_from_user(kaddr, buf, count))
902 ret = cd->cache_parse(cd, kaddr, count);
908 static ssize_t cache_slow_downcall(const char __user *buf,
909 size_t count, struct cache_detail *cd)
911 static char write_buf[8192]; /* protected by queue_io_mutex */
912 ssize_t ret = -EINVAL;
914 if (count >= sizeof(write_buf))
916 mutex_lock(&queue_io_mutex);
917 ret = cache_do_downcall(write_buf, buf, count, cd);
918 mutex_unlock(&queue_io_mutex);
923 static ssize_t cache_downcall(struct address_space *mapping,
924 const char __user *buf,
925 size_t count, struct cache_detail *cd)
929 ssize_t ret = -ENOMEM;
931 if (count >= PAGE_SIZE)
934 page = find_or_create_page(mapping, 0, GFP_KERNEL);
939 ret = cache_do_downcall(kaddr, buf, count, cd);
945 return cache_slow_downcall(buf, count, cd);
948 static ssize_t cache_write(struct file *filp, const char __user *buf,
949 size_t count, loff_t *ppos,
950 struct cache_detail *cd)
952 struct address_space *mapping = filp->f_mapping;
953 struct inode *inode = file_inode(filp);
954 ssize_t ret = -EINVAL;
956 if (!cd->cache_parse)
960 ret = cache_downcall(mapping, buf, count, cd);
966 static DECLARE_WAIT_QUEUE_HEAD(queue_wait);
968 static __poll_t cache_poll(struct file *filp, poll_table *wait,
969 struct cache_detail *cd)
972 struct cache_reader *rp = filp->private_data;
973 struct cache_queue *cq;
975 poll_wait(filp, &queue_wait, wait);
977 /* alway allow write */
978 mask = EPOLLOUT | EPOLLWRNORM;
983 spin_lock(&queue_lock);
985 for (cq= &rp->q; &cq->list != &cd->queue;
986 cq = list_entry(cq->list.next, struct cache_queue, list))
988 mask |= EPOLLIN | EPOLLRDNORM;
991 spin_unlock(&queue_lock);
995 static int cache_ioctl(struct inode *ino, struct file *filp,
996 unsigned int cmd, unsigned long arg,
997 struct cache_detail *cd)
1000 struct cache_reader *rp = filp->private_data;
1001 struct cache_queue *cq;
1003 if (cmd != FIONREAD || !rp)
1006 spin_lock(&queue_lock);
1008 /* only find the length remaining in current request,
1009 * or the length of the next request
1011 for (cq= &rp->q; &cq->list != &cd->queue;
1012 cq = list_entry(cq->list.next, struct cache_queue, list))
1014 struct cache_request *cr =
1015 container_of(cq, struct cache_request, q);
1016 len = cr->len - rp->offset;
1019 spin_unlock(&queue_lock);
1021 return put_user(len, (int __user *)arg);
1024 static int cache_open(struct inode *inode, struct file *filp,
1025 struct cache_detail *cd)
1027 struct cache_reader *rp = NULL;
1029 if (!cd || !try_module_get(cd->owner))
1031 nonseekable_open(inode, filp);
1032 if (filp->f_mode & FMODE_READ) {
1033 rp = kmalloc(sizeof(*rp), GFP_KERNEL);
1035 module_put(cd->owner);
1041 spin_lock(&queue_lock);
1042 list_add(&rp->q.list, &cd->queue);
1043 spin_unlock(&queue_lock);
1045 if (filp->f_mode & FMODE_WRITE)
1046 atomic_inc(&cd->writers);
1047 filp->private_data = rp;
1051 static int cache_release(struct inode *inode, struct file *filp,
1052 struct cache_detail *cd)
1054 struct cache_reader *rp = filp->private_data;
1057 spin_lock(&queue_lock);
1059 struct cache_queue *cq;
1060 for (cq= &rp->q; &cq->list != &cd->queue;
1061 cq = list_entry(cq->list.next, struct cache_queue, list))
1063 container_of(cq, struct cache_request, q)
1069 list_del(&rp->q.list);
1070 spin_unlock(&queue_lock);
1072 filp->private_data = NULL;
1076 if (filp->f_mode & FMODE_WRITE) {
1077 atomic_dec(&cd->writers);
1078 cd->last_close = seconds_since_boot();
1080 module_put(cd->owner);
1086 static void cache_dequeue(struct cache_detail *detail, struct cache_head *ch)
1088 struct cache_queue *cq, *tmp;
1089 struct cache_request *cr;
1090 struct list_head dequeued;
1092 INIT_LIST_HEAD(&dequeued);
1093 spin_lock(&queue_lock);
1094 list_for_each_entry_safe(cq, tmp, &detail->queue, list)
1096 cr = container_of(cq, struct cache_request, q);
1099 if (test_bit(CACHE_PENDING, &ch->flags))
1100 /* Lost a race and it is pending again */
1102 if (cr->readers != 0)
1104 list_move(&cr->q.list, &dequeued);
1106 spin_unlock(&queue_lock);
1107 while (!list_empty(&dequeued)) {
1108 cr = list_entry(dequeued.next, struct cache_request, q.list);
1109 list_del(&cr->q.list);
1110 cache_put(cr->item, detail);
1117 * Support routines for text-based upcalls.
1118 * Fields are separated by spaces.
1119 * Fields are either mangled to quote space tab newline slosh with slosh
1120 * or a hexified with a leading \x
1121 * Record is terminated with newline.
1125 void qword_add(char **bpp, int *lp, char *str)
1131 if (len < 0) return;
1133 ret = string_escape_str(str, bp, len, ESCAPE_OCTAL, "\\ \n\t");
1146 EXPORT_SYMBOL_GPL(qword_add);
1148 void qword_addhex(char **bpp, int *lp, char *buf, int blen)
1153 if (len < 0) return;
1159 while (blen && len >= 2) {
1160 bp = hex_byte_pack(bp, *buf++);
1165 if (blen || len<1) len = -1;
1173 EXPORT_SYMBOL_GPL(qword_addhex);
1175 static void warn_no_listener(struct cache_detail *detail)
1177 if (detail->last_warn != detail->last_close) {
1178 detail->last_warn = detail->last_close;
1179 if (detail->warn_no_listener)
1180 detail->warn_no_listener(detail, detail->last_close != 0);
1184 static bool cache_listeners_exist(struct cache_detail *detail)
1186 if (atomic_read(&detail->writers))
1188 if (detail->last_close == 0)
1189 /* This cache was never opened */
1191 if (detail->last_close < seconds_since_boot() - 30)
1193 * We allow for the possibility that someone might
1194 * restart a userspace daemon without restarting the
1195 * server; but after 30 seconds, we give up.
1202 * register an upcall request to user-space and queue it up for read() by the
1205 * Each request is at most one page long.
1207 static int cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1210 struct cache_request *crq;
1213 if (test_bit(CACHE_CLEANED, &h->flags))
1214 /* Too late to make an upcall */
1217 buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
1221 crq = kmalloc(sizeof (*crq), GFP_KERNEL);
1231 spin_lock(&queue_lock);
1232 if (test_bit(CACHE_PENDING, &h->flags)) {
1233 crq->item = cache_get(h);
1234 list_add_tail(&crq->q.list, &detail->queue);
1235 trace_cache_entry_upcall(detail, h);
1237 /* Lost a race, no longer PENDING, so don't enqueue */
1239 spin_unlock(&queue_lock);
1240 wake_up(&queue_wait);
1241 if (ret == -EAGAIN) {
1248 int sunrpc_cache_pipe_upcall(struct cache_detail *detail, struct cache_head *h)
1250 if (test_and_set_bit(CACHE_PENDING, &h->flags))
1252 return cache_pipe_upcall(detail, h);
1254 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall);
1256 int sunrpc_cache_pipe_upcall_timeout(struct cache_detail *detail,
1257 struct cache_head *h)
1259 if (!cache_listeners_exist(detail)) {
1260 warn_no_listener(detail);
1261 trace_cache_entry_no_listener(detail, h);
1264 return sunrpc_cache_pipe_upcall(detail, h);
1266 EXPORT_SYMBOL_GPL(sunrpc_cache_pipe_upcall_timeout);
1269 * parse a message from user-space and pass it
1270 * to an appropriate cache
1271 * Messages are, like requests, separated into fields by
1272 * spaces and dequotes as \xHEXSTRING or embedded \nnn octal
1275 * reply cachename expiry key ... content....
1277 * key and content are both parsed by cache
1280 int qword_get(char **bpp, char *dest, int bufsize)
1282 /* return bytes copied, or -1 on error */
1286 while (*bp == ' ') bp++;
1288 if (bp[0] == '\\' && bp[1] == 'x') {
1291 while (len < bufsize - 1) {
1294 h = hex_to_bin(bp[0]);
1298 l = hex_to_bin(bp[1]);
1302 *dest++ = (h << 4) | l;
1307 /* text with \nnn octal quoting */
1308 while (*bp != ' ' && *bp != '\n' && *bp && len < bufsize-1) {
1310 isodigit(bp[1]) && (bp[1] <= '3') &&
1313 int byte = (*++bp -'0');
1315 byte = (byte << 3) | (*bp++ - '0');
1316 byte = (byte << 3) | (*bp++ - '0');
1326 if (*bp != ' ' && *bp != '\n' && *bp != '\0')
1328 while (*bp == ' ') bp++;
1333 EXPORT_SYMBOL_GPL(qword_get);
1337 * support /proc/net/rpc/$CACHENAME/content
1339 * We call ->cache_show passing NULL for the item to
1340 * get a header, then pass each real item in the cache
1343 static void *__cache_seq_start(struct seq_file *m, loff_t *pos)
1346 unsigned int hash, entry;
1347 struct cache_head *ch;
1348 struct cache_detail *cd = m->private;
1351 return SEQ_START_TOKEN;
1353 entry = n & ((1LL<<32) - 1);
1355 hlist_for_each_entry_rcu(ch, &cd->hash_table[hash], cache_list)
1358 n &= ~((1LL<<32) - 1);
1362 } while(hash < cd->hash_size &&
1363 hlist_empty(&cd->hash_table[hash]));
1364 if (hash >= cd->hash_size)
1367 return hlist_entry_safe(rcu_dereference_raw(
1368 hlist_first_rcu(&cd->hash_table[hash])),
1369 struct cache_head, cache_list);
1372 static void *cache_seq_next(struct seq_file *m, void *p, loff_t *pos)
1374 struct cache_head *ch = p;
1375 int hash = (*pos >> 32);
1376 struct cache_detail *cd = m->private;
1378 if (p == SEQ_START_TOKEN)
1380 else if (ch->cache_list.next == NULL) {
1385 return hlist_entry_safe(rcu_dereference_raw(
1386 hlist_next_rcu(&ch->cache_list)),
1387 struct cache_head, cache_list);
1389 *pos &= ~((1LL<<32) - 1);
1390 while (hash < cd->hash_size &&
1391 hlist_empty(&cd->hash_table[hash])) {
1395 if (hash >= cd->hash_size)
1398 return hlist_entry_safe(rcu_dereference_raw(
1399 hlist_first_rcu(&cd->hash_table[hash])),
1400 struct cache_head, cache_list);
1403 void *cache_seq_start_rcu(struct seq_file *m, loff_t *pos)
1407 return __cache_seq_start(m, pos);
1409 EXPORT_SYMBOL_GPL(cache_seq_start_rcu);
1411 void *cache_seq_next_rcu(struct seq_file *file, void *p, loff_t *pos)
1413 return cache_seq_next(file, p, pos);
1415 EXPORT_SYMBOL_GPL(cache_seq_next_rcu);
1417 void cache_seq_stop_rcu(struct seq_file *m, void *p)
1422 EXPORT_SYMBOL_GPL(cache_seq_stop_rcu);
1424 static int c_show(struct seq_file *m, void *p)
1426 struct cache_head *cp = p;
1427 struct cache_detail *cd = m->private;
1429 if (p == SEQ_START_TOKEN)
1430 return cd->cache_show(m, cd, NULL);
1433 seq_printf(m, "# expiry=%lld refcnt=%d flags=%lx\n",
1434 convert_to_wallclock(cp->expiry_time),
1435 kref_read(&cp->ref), cp->flags);
1437 if (cache_check(cd, cp, NULL))
1438 /* cache_check does a cache_put on failure */
1439 seq_printf(m, "# ");
1441 if (cache_is_expired(cd, cp))
1442 seq_printf(m, "# ");
1446 return cd->cache_show(m, cd, cp);
1449 static const struct seq_operations cache_content_op = {
1450 .start = cache_seq_start_rcu,
1451 .next = cache_seq_next_rcu,
1452 .stop = cache_seq_stop_rcu,
1456 static int content_open(struct inode *inode, struct file *file,
1457 struct cache_detail *cd)
1459 struct seq_file *seq;
1462 if (!cd || !try_module_get(cd->owner))
1465 err = seq_open(file, &cache_content_op);
1467 module_put(cd->owner);
1471 seq = file->private_data;
1476 static int content_release(struct inode *inode, struct file *file,
1477 struct cache_detail *cd)
1479 int ret = seq_release(inode, file);
1480 module_put(cd->owner);
1484 static int open_flush(struct inode *inode, struct file *file,
1485 struct cache_detail *cd)
1487 if (!cd || !try_module_get(cd->owner))
1489 return nonseekable_open(inode, file);
1492 static int release_flush(struct inode *inode, struct file *file,
1493 struct cache_detail *cd)
1495 module_put(cd->owner);
1499 static ssize_t read_flush(struct file *file, char __user *buf,
1500 size_t count, loff_t *ppos,
1501 struct cache_detail *cd)
1506 len = snprintf(tbuf, sizeof(tbuf), "%llu\n",
1507 convert_to_wallclock(cd->flush_time));
1508 return simple_read_from_buffer(buf, count, ppos, tbuf, len);
1511 static ssize_t write_flush(struct file *file, const char __user *buf,
1512 size_t count, loff_t *ppos,
1513 struct cache_detail *cd)
1519 if (*ppos || count > sizeof(tbuf)-1)
1521 if (copy_from_user(tbuf, buf, count))
1524 simple_strtoul(tbuf, &ep, 0);
1525 if (*ep && *ep != '\n')
1527 /* Note that while we check that 'buf' holds a valid number,
1528 * we always ignore the value and just flush everything.
1529 * Making use of the number leads to races.
1532 now = seconds_since_boot();
1533 /* Always flush everything, so behave like cache_purge()
1534 * Do this by advancing flush_time to the current time,
1535 * or by one second if it has already reached the current time.
1536 * Newly added cache entries will always have ->last_refresh greater
1537 * that ->flush_time, so they don't get flushed prematurely.
1540 if (cd->flush_time >= now)
1541 now = cd->flush_time + 1;
1543 cd->flush_time = now;
1544 cd->nextcheck = now;
1554 static ssize_t cache_read_procfs(struct file *filp, char __user *buf,
1555 size_t count, loff_t *ppos)
1557 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1559 return cache_read(filp, buf, count, ppos, cd);
1562 static ssize_t cache_write_procfs(struct file *filp, const char __user *buf,
1563 size_t count, loff_t *ppos)
1565 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1567 return cache_write(filp, buf, count, ppos, cd);
1570 static __poll_t cache_poll_procfs(struct file *filp, poll_table *wait)
1572 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1574 return cache_poll(filp, wait, cd);
1577 static long cache_ioctl_procfs(struct file *filp,
1578 unsigned int cmd, unsigned long arg)
1580 struct inode *inode = file_inode(filp);
1581 struct cache_detail *cd = PDE_DATA(inode);
1583 return cache_ioctl(inode, filp, cmd, arg, cd);
1586 static int cache_open_procfs(struct inode *inode, struct file *filp)
1588 struct cache_detail *cd = PDE_DATA(inode);
1590 return cache_open(inode, filp, cd);
1593 static int cache_release_procfs(struct inode *inode, struct file *filp)
1595 struct cache_detail *cd = PDE_DATA(inode);
1597 return cache_release(inode, filp, cd);
1600 static const struct proc_ops cache_channel_proc_ops = {
1601 .proc_lseek = no_llseek,
1602 .proc_read = cache_read_procfs,
1603 .proc_write = cache_write_procfs,
1604 .proc_poll = cache_poll_procfs,
1605 .proc_ioctl = cache_ioctl_procfs, /* for FIONREAD */
1606 .proc_open = cache_open_procfs,
1607 .proc_release = cache_release_procfs,
1610 static int content_open_procfs(struct inode *inode, struct file *filp)
1612 struct cache_detail *cd = PDE_DATA(inode);
1614 return content_open(inode, filp, cd);
1617 static int content_release_procfs(struct inode *inode, struct file *filp)
1619 struct cache_detail *cd = PDE_DATA(inode);
1621 return content_release(inode, filp, cd);
1624 static const struct proc_ops content_proc_ops = {
1625 .proc_open = content_open_procfs,
1626 .proc_read = seq_read,
1627 .proc_lseek = seq_lseek,
1628 .proc_release = content_release_procfs,
1631 static int open_flush_procfs(struct inode *inode, struct file *filp)
1633 struct cache_detail *cd = PDE_DATA(inode);
1635 return open_flush(inode, filp, cd);
1638 static int release_flush_procfs(struct inode *inode, struct file *filp)
1640 struct cache_detail *cd = PDE_DATA(inode);
1642 return release_flush(inode, filp, cd);
1645 static ssize_t read_flush_procfs(struct file *filp, char __user *buf,
1646 size_t count, loff_t *ppos)
1648 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1650 return read_flush(filp, buf, count, ppos, cd);
1653 static ssize_t write_flush_procfs(struct file *filp,
1654 const char __user *buf,
1655 size_t count, loff_t *ppos)
1657 struct cache_detail *cd = PDE_DATA(file_inode(filp));
1659 return write_flush(filp, buf, count, ppos, cd);
1662 static const struct proc_ops cache_flush_proc_ops = {
1663 .proc_open = open_flush_procfs,
1664 .proc_read = read_flush_procfs,
1665 .proc_write = write_flush_procfs,
1666 .proc_release = release_flush_procfs,
1667 .proc_lseek = no_llseek,
1670 static void remove_cache_proc_entries(struct cache_detail *cd)
1673 proc_remove(cd->procfs);
1678 #ifdef CONFIG_PROC_FS
1679 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1681 struct proc_dir_entry *p;
1682 struct sunrpc_net *sn;
1684 sn = net_generic(net, sunrpc_net_id);
1685 cd->procfs = proc_mkdir(cd->name, sn->proc_net_rpc);
1686 if (cd->procfs == NULL)
1689 p = proc_create_data("flush", S_IFREG | 0600,
1690 cd->procfs, &cache_flush_proc_ops, cd);
1694 if (cd->cache_request || cd->cache_parse) {
1695 p = proc_create_data("channel", S_IFREG | 0600, cd->procfs,
1696 &cache_channel_proc_ops, cd);
1700 if (cd->cache_show) {
1701 p = proc_create_data("content", S_IFREG | 0400, cd->procfs,
1702 &content_proc_ops, cd);
1708 remove_cache_proc_entries(cd);
1711 #else /* CONFIG_PROC_FS */
1712 static int create_cache_proc_entries(struct cache_detail *cd, struct net *net)
1718 void __init cache_initialize(void)
1720 INIT_DEFERRABLE_WORK(&cache_cleaner, do_cache_clean);
1723 int cache_register_net(struct cache_detail *cd, struct net *net)
1727 sunrpc_init_cache_detail(cd);
1728 ret = create_cache_proc_entries(cd, net);
1730 sunrpc_destroy_cache_detail(cd);
1733 EXPORT_SYMBOL_GPL(cache_register_net);
1735 void cache_unregister_net(struct cache_detail *cd, struct net *net)
1737 remove_cache_proc_entries(cd);
1738 sunrpc_destroy_cache_detail(cd);
1740 EXPORT_SYMBOL_GPL(cache_unregister_net);
1742 struct cache_detail *cache_create_net(const struct cache_detail *tmpl, struct net *net)
1744 struct cache_detail *cd;
1747 cd = kmemdup(tmpl, sizeof(struct cache_detail), GFP_KERNEL);
1749 return ERR_PTR(-ENOMEM);
1751 cd->hash_table = kcalloc(cd->hash_size, sizeof(struct hlist_head),
1753 if (cd->hash_table == NULL) {
1755 return ERR_PTR(-ENOMEM);
1758 for (i = 0; i < cd->hash_size; i++)
1759 INIT_HLIST_HEAD(&cd->hash_table[i]);
1763 EXPORT_SYMBOL_GPL(cache_create_net);
1765 void cache_destroy_net(struct cache_detail *cd, struct net *net)
1767 kfree(cd->hash_table);
1770 EXPORT_SYMBOL_GPL(cache_destroy_net);
1772 static ssize_t cache_read_pipefs(struct file *filp, char __user *buf,
1773 size_t count, loff_t *ppos)
1775 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1777 return cache_read(filp, buf, count, ppos, cd);
1780 static ssize_t cache_write_pipefs(struct file *filp, const char __user *buf,
1781 size_t count, loff_t *ppos)
1783 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1785 return cache_write(filp, buf, count, ppos, cd);
1788 static __poll_t cache_poll_pipefs(struct file *filp, poll_table *wait)
1790 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1792 return cache_poll(filp, wait, cd);
1795 static long cache_ioctl_pipefs(struct file *filp,
1796 unsigned int cmd, unsigned long arg)
1798 struct inode *inode = file_inode(filp);
1799 struct cache_detail *cd = RPC_I(inode)->private;
1801 return cache_ioctl(inode, filp, cmd, arg, cd);
1804 static int cache_open_pipefs(struct inode *inode, struct file *filp)
1806 struct cache_detail *cd = RPC_I(inode)->private;
1808 return cache_open(inode, filp, cd);
1811 static int cache_release_pipefs(struct inode *inode, struct file *filp)
1813 struct cache_detail *cd = RPC_I(inode)->private;
1815 return cache_release(inode, filp, cd);
1818 const struct file_operations cache_file_operations_pipefs = {
1819 .owner = THIS_MODULE,
1820 .llseek = no_llseek,
1821 .read = cache_read_pipefs,
1822 .write = cache_write_pipefs,
1823 .poll = cache_poll_pipefs,
1824 .unlocked_ioctl = cache_ioctl_pipefs, /* for FIONREAD */
1825 .open = cache_open_pipefs,
1826 .release = cache_release_pipefs,
1829 static int content_open_pipefs(struct inode *inode, struct file *filp)
1831 struct cache_detail *cd = RPC_I(inode)->private;
1833 return content_open(inode, filp, cd);
1836 static int content_release_pipefs(struct inode *inode, struct file *filp)
1838 struct cache_detail *cd = RPC_I(inode)->private;
1840 return content_release(inode, filp, cd);
1843 const struct file_operations content_file_operations_pipefs = {
1844 .open = content_open_pipefs,
1846 .llseek = seq_lseek,
1847 .release = content_release_pipefs,
1850 static int open_flush_pipefs(struct inode *inode, struct file *filp)
1852 struct cache_detail *cd = RPC_I(inode)->private;
1854 return open_flush(inode, filp, cd);
1857 static int release_flush_pipefs(struct inode *inode, struct file *filp)
1859 struct cache_detail *cd = RPC_I(inode)->private;
1861 return release_flush(inode, filp, cd);
1864 static ssize_t read_flush_pipefs(struct file *filp, char __user *buf,
1865 size_t count, loff_t *ppos)
1867 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1869 return read_flush(filp, buf, count, ppos, cd);
1872 static ssize_t write_flush_pipefs(struct file *filp,
1873 const char __user *buf,
1874 size_t count, loff_t *ppos)
1876 struct cache_detail *cd = RPC_I(file_inode(filp))->private;
1878 return write_flush(filp, buf, count, ppos, cd);
1881 const struct file_operations cache_flush_operations_pipefs = {
1882 .open = open_flush_pipefs,
1883 .read = read_flush_pipefs,
1884 .write = write_flush_pipefs,
1885 .release = release_flush_pipefs,
1886 .llseek = no_llseek,
1889 int sunrpc_cache_register_pipefs(struct dentry *parent,
1890 const char *name, umode_t umode,
1891 struct cache_detail *cd)
1893 struct dentry *dir = rpc_create_cache_dir(parent, name, umode, cd);
1895 return PTR_ERR(dir);
1899 EXPORT_SYMBOL_GPL(sunrpc_cache_register_pipefs);
1901 void sunrpc_cache_unregister_pipefs(struct cache_detail *cd)
1904 rpc_remove_cache_dir(cd->pipefs);
1908 EXPORT_SYMBOL_GPL(sunrpc_cache_unregister_pipefs);
1910 void sunrpc_cache_unhash(struct cache_detail *cd, struct cache_head *h)
1912 spin_lock(&cd->hash_lock);
1913 if (!hlist_unhashed(&h->cache_list)){
1914 sunrpc_begin_cache_remove_entry(h, cd);
1915 spin_unlock(&cd->hash_lock);
1916 sunrpc_end_cache_remove_entry(h, cd);
1918 spin_unlock(&cd->hash_lock);
1920 EXPORT_SYMBOL_GPL(sunrpc_cache_unhash);