1 // SPDX-License-Identifier: GPL-2.0
4 * Copyright 2019, 2020 Amazon.com, Inc. or its affiliates. All rights reserved.
6 * User extended attribute client side cache functions.
8 * Author: Frank van der Linden <fllinden@amazon.com>
10 #include <linux/errno.h>
11 #include <linux/nfs_fs.h>
12 #include <linux/hashtable.h>
13 #include <linux/refcount.h>
14 #include <uapi/linux/xattr.h>
20 * User extended attributes client side caching is implemented by having
21 * a cache structure attached to NFS inodes. This structure is allocated
22 * when needed, and freed when the cache is zapped.
24 * The cache structure contains as hash table of entries, and a pointer
25 * to a special-cased entry for the listxattr cache.
27 * Accessing and allocating / freeing the caches is done via reference
28 * counting. The cache entries use a similar refcounting scheme.
30 * This makes freeing a cache, both from the shrinker and from the
31 * zap cache path, easy. It also means that, in current use cases,
32 * the large majority of inodes will not waste any memory, as they
33 * will never have any user extended attributes assigned to them.
35 * Attribute entries are hashed in to a simple hash table. They are
36 * also part of an LRU.
38 * There are three shrinkers.
40 * Two shrinkers deal with the cache entries themselves: one for
41 * large entries (> PAGE_SIZE), and one for smaller entries. The
42 * shrinker for the larger entries works more aggressively than
43 * those for the smaller entries.
45 * The other shrinker frees the cache structures themselves.
49 * 64 buckets is a good default. There is likely no reasonable
50 * workload that uses more than even 64 user extended attributes.
51 * You can certainly add a lot more - but you get what you ask for
52 * in those circumstances.
54 #define NFS4_XATTR_HASH_SIZE 64
56 #define NFSDBG_FACILITY NFSDBG_XATTRCACHE
58 struct nfs4_xattr_cache;
59 struct nfs4_xattr_entry;
61 struct nfs4_xattr_bucket {
63 struct hlist_head hlist;
64 struct nfs4_xattr_cache *cache;
68 struct nfs4_xattr_cache {
70 spinlock_t hash_lock; /* protects hashtable and lru */
71 struct nfs4_xattr_bucket buckets[NFS4_XATTR_HASH_SIZE];
73 struct list_head dispose;
75 spinlock_t listxattr_lock;
77 struct nfs4_xattr_entry *listxattr;
80 struct nfs4_xattr_entry {
82 struct hlist_node hnode;
84 struct list_head dispose;
88 struct nfs4_xattr_bucket *bucket;
92 #define NFS4_XATTR_ENTRY_EXTVAL 0x0001
95 * LRU list of NFS inodes that have xattr caches.
97 static struct list_lru nfs4_xattr_cache_lru;
98 static struct list_lru nfs4_xattr_entry_lru;
99 static struct list_lru nfs4_xattr_large_entry_lru;
101 static struct kmem_cache *nfs4_xattr_cache_cachep;
104 * Hashing helper functions.
107 nfs4_xattr_hash_init(struct nfs4_xattr_cache *cache)
111 for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
112 INIT_HLIST_HEAD(&cache->buckets[i].hlist);
113 spin_lock_init(&cache->buckets[i].lock);
114 cache->buckets[i].cache = cache;
115 cache->buckets[i].draining = false;
121 * 1. inode i_lock or bucket lock
122 * 2. list_lru lock (taken by list_lru_* functions)
126 * Wrapper functions to add a cache entry to the right LRU.
129 nfs4_xattr_entry_lru_add(struct nfs4_xattr_entry *entry)
131 struct list_lru *lru;
133 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
134 &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
136 return list_lru_add(lru, &entry->lru);
140 nfs4_xattr_entry_lru_del(struct nfs4_xattr_entry *entry)
142 struct list_lru *lru;
144 lru = (entry->flags & NFS4_XATTR_ENTRY_EXTVAL) ?
145 &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
147 return list_lru_del(lru, &entry->lru);
151 * This function allocates cache entries. They are the normal
152 * extended attribute name/value pairs, but may also be a listxattr
153 * cache. Those allocations use the same entry so that they can be
154 * treated as one by the memory shrinker.
156 * xattr cache entries are allocated together with names. If the
157 * value fits in to one page with the entry structure and the name,
158 * it will also be part of the same allocation (kmalloc). This is
159 * expected to be the vast majority of cases. Larger allocations
160 * have a value pointer that is allocated separately by kvmalloc.
164 * @name: Name of the extended attribute. NULL for listxattr cache
166 * @value: Value of attribute, or listxattr cache. NULL if the
167 * value is to be copied from pages instead.
168 * @pages: Pages to copy the value from, if not NULL. Passed in to
169 * make it easier to copy the value after an RPC, even if
170 * the value will not be passed up to application (e.g.
171 * for a 'query' getxattr with NULL buffer).
172 * @len: Length of the value. Can be 0 for zero-length attribues.
173 * @value and @pages will be NULL if @len is 0.
175 static struct nfs4_xattr_entry *
176 nfs4_xattr_alloc_entry(const char *name, const void *value,
177 struct page **pages, size_t len)
179 struct nfs4_xattr_entry *entry;
182 size_t alloclen, slen;
186 BUILD_BUG_ON(sizeof(struct nfs4_xattr_entry) +
187 XATTR_NAME_MAX + 1 > PAGE_SIZE);
189 alloclen = sizeof(struct nfs4_xattr_entry);
191 slen = strlen(name) + 1;
196 if (alloclen + len <= PAGE_SIZE) {
200 flags = NFS4_XATTR_ENTRY_EXTVAL;
203 buf = kmalloc(alloclen, GFP_KERNEL_ACCOUNT | GFP_NOFS);
206 entry = (struct nfs4_xattr_entry *)buf;
209 namep = buf + sizeof(struct nfs4_xattr_entry);
210 memcpy(namep, name, slen);
216 if (flags & NFS4_XATTR_ENTRY_EXTVAL) {
217 valp = kvmalloc(len, GFP_KERNEL_ACCOUNT | GFP_NOFS);
222 } else if (len != 0) {
223 valp = buf + sizeof(struct nfs4_xattr_entry) + slen;
229 memcpy(valp, value, len);
231 _copy_from_pages(valp, pages, 0, len);
234 entry->flags = flags;
235 entry->xattr_value = valp;
236 kref_init(&entry->ref);
237 entry->xattr_name = namep;
238 entry->xattr_size = len;
239 entry->bucket = NULL;
240 INIT_LIST_HEAD(&entry->lru);
241 INIT_LIST_HEAD(&entry->dispose);
242 INIT_HLIST_NODE(&entry->hnode);
248 nfs4_xattr_free_entry(struct nfs4_xattr_entry *entry)
250 if (entry->flags & NFS4_XATTR_ENTRY_EXTVAL)
251 kvfree(entry->xattr_value);
256 nfs4_xattr_free_entry_cb(struct kref *kref)
258 struct nfs4_xattr_entry *entry;
260 entry = container_of(kref, struct nfs4_xattr_entry, ref);
262 if (WARN_ON(!list_empty(&entry->lru)))
265 nfs4_xattr_free_entry(entry);
269 nfs4_xattr_free_cache_cb(struct kref *kref)
271 struct nfs4_xattr_cache *cache;
274 cache = container_of(kref, struct nfs4_xattr_cache, ref);
276 for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
277 if (WARN_ON(!hlist_empty(&cache->buckets[i].hlist)))
279 cache->buckets[i].draining = false;
282 cache->listxattr = NULL;
284 kmem_cache_free(nfs4_xattr_cache_cachep, cache);
288 static struct nfs4_xattr_cache *
289 nfs4_xattr_alloc_cache(void)
291 struct nfs4_xattr_cache *cache;
293 cache = kmem_cache_alloc(nfs4_xattr_cache_cachep,
294 GFP_KERNEL_ACCOUNT | GFP_NOFS);
298 kref_init(&cache->ref);
299 atomic_long_set(&cache->nent, 0);
305 * Set the listxattr cache, which is a special-cased cache entry.
306 * The special value ERR_PTR(-ESTALE) is used to indicate that
307 * the cache is being drained - this prevents a new listxattr
308 * cache from being added to what is now a stale cache.
311 nfs4_xattr_set_listcache(struct nfs4_xattr_cache *cache,
312 struct nfs4_xattr_entry *new)
314 struct nfs4_xattr_entry *old;
317 spin_lock(&cache->listxattr_lock);
319 old = cache->listxattr;
321 if (old == ERR_PTR(-ESTALE)) {
326 cache->listxattr = new;
327 if (new != NULL && new != ERR_PTR(-ESTALE))
328 nfs4_xattr_entry_lru_add(new);
331 nfs4_xattr_entry_lru_del(old);
332 kref_put(&old->ref, nfs4_xattr_free_entry_cb);
335 spin_unlock(&cache->listxattr_lock);
341 * Unlink a cache from its parent inode, clearing out an invalid
342 * cache. Must be called with i_lock held.
344 static struct nfs4_xattr_cache *
345 nfs4_xattr_cache_unlink(struct inode *inode)
347 struct nfs_inode *nfsi;
348 struct nfs4_xattr_cache *oldcache;
352 oldcache = nfsi->xattr_cache;
353 if (oldcache != NULL) {
354 list_lru_del(&nfs4_xattr_cache_lru, &oldcache->lru);
355 oldcache->inode = NULL;
357 nfsi->xattr_cache = NULL;
358 nfsi->cache_validity &= ~NFS_INO_INVALID_XATTR;
365 * Discard a cache. Called by get_cache() if there was an old,
366 * invalid cache. Can also be called from a shrinker callback.
368 * The cache is dead, it has already been unlinked from its inode,
369 * and no longer appears on the cache LRU list.
371 * Mark all buckets as draining, so that no new entries are added. This
372 * could still happen in the unlikely, but possible case that another
373 * thread had grabbed a reference before it was unlinked from the inode,
374 * and is still holding it for an add operation.
376 * Remove all entries from the LRU lists, so that there is no longer
377 * any way to 'find' this cache. Then, remove the entries from the hash
380 * At that point, the cache will remain empty and can be freed when the final
381 * reference drops, which is very likely the kref_put at the end of
382 * this function, or the one called immediately afterwards in the
386 nfs4_xattr_discard_cache(struct nfs4_xattr_cache *cache)
389 struct nfs4_xattr_entry *entry;
390 struct nfs4_xattr_bucket *bucket;
391 struct hlist_node *n;
393 nfs4_xattr_set_listcache(cache, ERR_PTR(-ESTALE));
395 for (i = 0; i < NFS4_XATTR_HASH_SIZE; i++) {
396 bucket = &cache->buckets[i];
398 spin_lock(&bucket->lock);
399 bucket->draining = true;
400 hlist_for_each_entry_safe(entry, n, &bucket->hlist, hnode) {
401 nfs4_xattr_entry_lru_del(entry);
402 hlist_del_init(&entry->hnode);
403 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
405 spin_unlock(&bucket->lock);
408 atomic_long_set(&cache->nent, 0);
410 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
414 * Get a referenced copy of the cache structure. Avoid doing allocs
415 * while holding i_lock. Which means that we do some optimistic allocation,
416 * and might have to free the result in rare cases.
418 * This function only checks the NFS_INO_INVALID_XATTR cache validity bit
419 * and acts accordingly, replacing the cache when needed. For the read case
420 * (!add), this means that the caller must make sure that the cache
421 * is valid before caling this function. getxattr and listxattr call
422 * revalidate_inode to do this. The attribute cache timeout (for the
423 * non-delegated case) is expected to be dealt with in the revalidate
427 static struct nfs4_xattr_cache *
428 nfs4_xattr_get_cache(struct inode *inode, int add)
430 struct nfs_inode *nfsi;
431 struct nfs4_xattr_cache *cache, *oldcache, *newcache;
435 cache = oldcache = NULL;
437 spin_lock(&inode->i_lock);
439 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR)
440 oldcache = nfs4_xattr_cache_unlink(inode);
442 cache = nfsi->xattr_cache;
445 kref_get(&cache->ref);
447 spin_unlock(&inode->i_lock);
449 if (add && cache == NULL) {
452 cache = nfs4_xattr_alloc_cache();
456 spin_lock(&inode->i_lock);
457 if (nfsi->cache_validity & NFS_INO_INVALID_XATTR) {
459 * The cache was invalidated again. Give up,
460 * since what we want to enter is now likely
463 spin_unlock(&inode->i_lock);
464 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
470 * Check if someone beat us to it.
472 if (nfsi->xattr_cache != NULL) {
473 newcache = nfsi->xattr_cache;
474 kref_get(&newcache->ref);
476 kref_get(&cache->ref);
477 nfsi->xattr_cache = cache;
478 cache->inode = inode;
479 list_lru_add(&nfs4_xattr_cache_lru, &cache->lru);
482 spin_unlock(&inode->i_lock);
485 * If there was a race, throw away the cache we just
486 * allocated, and use the new one allocated by someone
489 if (newcache != NULL) {
490 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
497 * Discard the now orphaned old cache.
499 if (oldcache != NULL)
500 nfs4_xattr_discard_cache(oldcache);
505 static inline struct nfs4_xattr_bucket *
506 nfs4_xattr_hash_bucket(struct nfs4_xattr_cache *cache, const char *name)
508 return &cache->buckets[jhash(name, strlen(name), 0) &
509 (ARRAY_SIZE(cache->buckets) - 1)];
512 static struct nfs4_xattr_entry *
513 nfs4_xattr_get_entry(struct nfs4_xattr_bucket *bucket, const char *name)
515 struct nfs4_xattr_entry *entry;
519 hlist_for_each_entry(entry, &bucket->hlist, hnode) {
520 if (!strcmp(entry->xattr_name, name))
528 nfs4_xattr_hash_add(struct nfs4_xattr_cache *cache,
529 struct nfs4_xattr_entry *entry)
531 struct nfs4_xattr_bucket *bucket;
532 struct nfs4_xattr_entry *oldentry = NULL;
535 bucket = nfs4_xattr_hash_bucket(cache, entry->xattr_name);
536 entry->bucket = bucket;
538 spin_lock(&bucket->lock);
540 if (bucket->draining) {
545 oldentry = nfs4_xattr_get_entry(bucket, entry->xattr_name);
546 if (oldentry != NULL) {
547 hlist_del_init(&oldentry->hnode);
548 nfs4_xattr_entry_lru_del(oldentry);
550 atomic_long_inc(&cache->nent);
553 hlist_add_head(&entry->hnode, &bucket->hlist);
554 nfs4_xattr_entry_lru_add(entry);
557 spin_unlock(&bucket->lock);
559 if (oldentry != NULL)
560 kref_put(&oldentry->ref, nfs4_xattr_free_entry_cb);
566 nfs4_xattr_hash_remove(struct nfs4_xattr_cache *cache, const char *name)
568 struct nfs4_xattr_bucket *bucket;
569 struct nfs4_xattr_entry *entry;
571 bucket = nfs4_xattr_hash_bucket(cache, name);
573 spin_lock(&bucket->lock);
575 entry = nfs4_xattr_get_entry(bucket, name);
577 hlist_del_init(&entry->hnode);
578 nfs4_xattr_entry_lru_del(entry);
579 atomic_long_dec(&cache->nent);
582 spin_unlock(&bucket->lock);
585 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
588 static struct nfs4_xattr_entry *
589 nfs4_xattr_hash_find(struct nfs4_xattr_cache *cache, const char *name)
591 struct nfs4_xattr_bucket *bucket;
592 struct nfs4_xattr_entry *entry;
594 bucket = nfs4_xattr_hash_bucket(cache, name);
596 spin_lock(&bucket->lock);
598 entry = nfs4_xattr_get_entry(bucket, name);
600 kref_get(&entry->ref);
602 spin_unlock(&bucket->lock);
608 * Entry point to retrieve an entry from the cache.
610 ssize_t nfs4_xattr_cache_get(struct inode *inode, const char *name, char *buf,
613 struct nfs4_xattr_cache *cache;
614 struct nfs4_xattr_entry *entry;
617 cache = nfs4_xattr_get_cache(inode, 0);
622 entry = nfs4_xattr_hash_find(cache, name);
625 dprintk("%s: cache hit '%s', len %lu\n", __func__,
626 entry->xattr_name, (unsigned long)entry->xattr_size);
628 /* Length probe only */
629 ret = entry->xattr_size;
630 } else if (buflen < entry->xattr_size)
633 memcpy(buf, entry->xattr_value, entry->xattr_size);
634 ret = entry->xattr_size;
636 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
638 dprintk("%s: cache miss '%s'\n", __func__, name);
642 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
648 * Retrieve a cached list of xattrs from the cache.
650 ssize_t nfs4_xattr_cache_list(struct inode *inode, char *buf, ssize_t buflen)
652 struct nfs4_xattr_cache *cache;
653 struct nfs4_xattr_entry *entry;
656 cache = nfs4_xattr_get_cache(inode, 0);
660 spin_lock(&cache->listxattr_lock);
662 entry = cache->listxattr;
664 if (entry != NULL && entry != ERR_PTR(-ESTALE)) {
666 /* Length probe only */
667 ret = entry->xattr_size;
668 } else if (entry->xattr_size > buflen)
671 memcpy(buf, entry->xattr_value, entry->xattr_size);
672 ret = entry->xattr_size;
678 spin_unlock(&cache->listxattr_lock);
680 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
686 * Add an xattr to the cache.
688 * This also invalidates the xattr list cache.
690 void nfs4_xattr_cache_add(struct inode *inode, const char *name,
691 const char *buf, struct page **pages, ssize_t buflen)
693 struct nfs4_xattr_cache *cache;
694 struct nfs4_xattr_entry *entry;
696 dprintk("%s: add '%s' len %lu\n", __func__,
697 name, (unsigned long)buflen);
699 cache = nfs4_xattr_get_cache(inode, 1);
703 entry = nfs4_xattr_alloc_entry(name, buf, pages, buflen);
707 (void)nfs4_xattr_set_listcache(cache, NULL);
709 if (!nfs4_xattr_hash_add(cache, entry))
710 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
713 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
718 * Remove an xattr from the cache.
720 * This also invalidates the xattr list cache.
722 void nfs4_xattr_cache_remove(struct inode *inode, const char *name)
724 struct nfs4_xattr_cache *cache;
726 dprintk("%s: remove '%s'\n", __func__, name);
728 cache = nfs4_xattr_get_cache(inode, 0);
732 (void)nfs4_xattr_set_listcache(cache, NULL);
733 nfs4_xattr_hash_remove(cache, name);
735 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
739 * Cache listxattr output, replacing any possible old one.
741 void nfs4_xattr_cache_set_list(struct inode *inode, const char *buf,
744 struct nfs4_xattr_cache *cache;
745 struct nfs4_xattr_entry *entry;
747 cache = nfs4_xattr_get_cache(inode, 1);
751 entry = nfs4_xattr_alloc_entry(NULL, buf, NULL, buflen);
756 * This is just there to be able to get to bucket->cache,
757 * which is obviously the same for all buckets, so just
760 entry->bucket = &cache->buckets[0];
762 if (!nfs4_xattr_set_listcache(cache, entry))
763 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
766 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
770 * Zap the entire cache. Called when an inode is evicted.
772 void nfs4_xattr_cache_zap(struct inode *inode)
774 struct nfs4_xattr_cache *oldcache;
776 spin_lock(&inode->i_lock);
777 oldcache = nfs4_xattr_cache_unlink(inode);
778 spin_unlock(&inode->i_lock);
781 nfs4_xattr_discard_cache(oldcache);
785 * The entry LRU is shrunk more aggressively than the cache LRU,
786 * by settings @seeks to 1.
788 * Cache structures are freed only when they've become empty, after
789 * pruning all but one entry.
792 static unsigned long nfs4_xattr_cache_count(struct shrinker *shrink,
793 struct shrink_control *sc);
794 static unsigned long nfs4_xattr_entry_count(struct shrinker *shrink,
795 struct shrink_control *sc);
796 static unsigned long nfs4_xattr_cache_scan(struct shrinker *shrink,
797 struct shrink_control *sc);
798 static unsigned long nfs4_xattr_entry_scan(struct shrinker *shrink,
799 struct shrink_control *sc);
801 static struct shrinker nfs4_xattr_cache_shrinker = {
802 .count_objects = nfs4_xattr_cache_count,
803 .scan_objects = nfs4_xattr_cache_scan,
804 .seeks = DEFAULT_SEEKS,
805 .flags = SHRINKER_MEMCG_AWARE,
808 static struct shrinker nfs4_xattr_entry_shrinker = {
809 .count_objects = nfs4_xattr_entry_count,
810 .scan_objects = nfs4_xattr_entry_scan,
811 .seeks = DEFAULT_SEEKS,
813 .flags = SHRINKER_MEMCG_AWARE,
816 static struct shrinker nfs4_xattr_large_entry_shrinker = {
817 .count_objects = nfs4_xattr_entry_count,
818 .scan_objects = nfs4_xattr_entry_scan,
821 .flags = SHRINKER_MEMCG_AWARE,
824 static enum lru_status
825 cache_lru_isolate(struct list_head *item,
826 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
828 struct list_head *dispose = arg;
830 struct nfs4_xattr_cache *cache = container_of(item,
831 struct nfs4_xattr_cache, lru);
833 if (atomic_long_read(&cache->nent) > 1)
837 * If a cache structure is on the LRU list, we know that
838 * its inode is valid. Try to lock it to break the link.
839 * Since we're inverting the lock order here, only try.
841 inode = cache->inode;
843 if (!spin_trylock(&inode->i_lock))
846 kref_get(&cache->ref);
849 NFS_I(inode)->xattr_cache = NULL;
850 NFS_I(inode)->cache_validity &= ~NFS_INO_INVALID_XATTR;
851 list_lru_isolate(lru, &cache->lru);
853 spin_unlock(&inode->i_lock);
855 list_add_tail(&cache->dispose, dispose);
860 nfs4_xattr_cache_scan(struct shrinker *shrink, struct shrink_control *sc)
864 struct nfs4_xattr_cache *cache;
866 freed = list_lru_shrink_walk(&nfs4_xattr_cache_lru, sc,
867 cache_lru_isolate, &dispose);
868 while (!list_empty(&dispose)) {
869 cache = list_first_entry(&dispose, struct nfs4_xattr_cache,
871 list_del_init(&cache->dispose);
872 nfs4_xattr_discard_cache(cache);
873 kref_put(&cache->ref, nfs4_xattr_free_cache_cb);
881 nfs4_xattr_cache_count(struct shrinker *shrink, struct shrink_control *sc)
885 count = list_lru_count(&nfs4_xattr_cache_lru);
886 return vfs_pressure_ratio(count);
889 static enum lru_status
890 entry_lru_isolate(struct list_head *item,
891 struct list_lru_one *lru, spinlock_t *lru_lock, void *arg)
893 struct list_head *dispose = arg;
894 struct nfs4_xattr_bucket *bucket;
895 struct nfs4_xattr_cache *cache;
896 struct nfs4_xattr_entry *entry = container_of(item,
897 struct nfs4_xattr_entry, lru);
899 bucket = entry->bucket;
900 cache = bucket->cache;
903 * Unhook the entry from its parent (either a cache bucket
904 * or a cache structure if it's a listxattr buf), so that
905 * it's no longer found. Then add it to the isolate list,
908 * In both cases, we're reverting lock order, so use
909 * trylock and skip the entry if we can't get the lock.
911 if (entry->xattr_name != NULL) {
912 /* Regular cache entry */
913 if (!spin_trylock(&bucket->lock))
916 kref_get(&entry->ref);
918 hlist_del_init(&entry->hnode);
919 atomic_long_dec(&cache->nent);
920 list_lru_isolate(lru, &entry->lru);
922 spin_unlock(&bucket->lock);
924 /* Listxattr cache entry */
925 if (!spin_trylock(&cache->listxattr_lock))
928 kref_get(&entry->ref);
930 cache->listxattr = NULL;
931 list_lru_isolate(lru, &entry->lru);
933 spin_unlock(&cache->listxattr_lock);
936 list_add_tail(&entry->dispose, dispose);
941 nfs4_xattr_entry_scan(struct shrinker *shrink, struct shrink_control *sc)
945 struct nfs4_xattr_entry *entry;
946 struct list_lru *lru;
948 lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
949 &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
951 freed = list_lru_shrink_walk(lru, sc, entry_lru_isolate, &dispose);
953 while (!list_empty(&dispose)) {
954 entry = list_first_entry(&dispose, struct nfs4_xattr_entry,
956 list_del_init(&entry->dispose);
959 * Drop two references: the one that we just grabbed
960 * in entry_lru_isolate, and the one that was set
961 * when the entry was first allocated.
963 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
964 kref_put(&entry->ref, nfs4_xattr_free_entry_cb);
971 nfs4_xattr_entry_count(struct shrinker *shrink, struct shrink_control *sc)
974 struct list_lru *lru;
976 lru = (shrink == &nfs4_xattr_large_entry_shrinker) ?
977 &nfs4_xattr_large_entry_lru : &nfs4_xattr_entry_lru;
979 count = list_lru_count(lru);
980 return vfs_pressure_ratio(count);
984 static void nfs4_xattr_cache_init_once(void *p)
986 struct nfs4_xattr_cache *cache = (struct nfs4_xattr_cache *)p;
988 spin_lock_init(&cache->listxattr_lock);
989 atomic_long_set(&cache->nent, 0);
990 nfs4_xattr_hash_init(cache);
991 cache->listxattr = NULL;
992 INIT_LIST_HEAD(&cache->lru);
993 INIT_LIST_HEAD(&cache->dispose);
996 int __init nfs4_xattr_cache_init(void)
1000 nfs4_xattr_cache_cachep = kmem_cache_create("nfs4_xattr_cache_cache",
1001 sizeof(struct nfs4_xattr_cache), 0,
1002 (SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD|SLAB_ACCOUNT),
1003 nfs4_xattr_cache_init_once);
1004 if (nfs4_xattr_cache_cachep == NULL)
1007 ret = list_lru_init_memcg(&nfs4_xattr_large_entry_lru,
1008 &nfs4_xattr_large_entry_shrinker);
1012 ret = list_lru_init_memcg(&nfs4_xattr_entry_lru,
1013 &nfs4_xattr_entry_shrinker);
1017 ret = list_lru_init_memcg(&nfs4_xattr_cache_lru,
1018 &nfs4_xattr_cache_shrinker);
1022 ret = register_shrinker(&nfs4_xattr_cache_shrinker);
1026 ret = register_shrinker(&nfs4_xattr_entry_shrinker);
1030 ret = register_shrinker(&nfs4_xattr_large_entry_shrinker);
1034 unregister_shrinker(&nfs4_xattr_entry_shrinker);
1036 unregister_shrinker(&nfs4_xattr_cache_shrinker);
1038 list_lru_destroy(&nfs4_xattr_cache_lru);
1040 list_lru_destroy(&nfs4_xattr_entry_lru);
1042 list_lru_destroy(&nfs4_xattr_large_entry_lru);
1044 kmem_cache_destroy(nfs4_xattr_cache_cachep);
1049 void nfs4_xattr_cache_exit(void)
1051 unregister_shrinker(&nfs4_xattr_entry_shrinker);
1052 unregister_shrinker(&nfs4_xattr_cache_shrinker);
1053 list_lru_destroy(&nfs4_xattr_entry_lru);
1054 list_lru_destroy(&nfs4_xattr_cache_lru);
1055 kmem_cache_destroy(nfs4_xattr_cache_cachep);