1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/spinlock.h>
3 #include <linux/slab.h>
4 #include <linux/list.h>
5 #include <linux/list_bl.h>
6 #include <linux/module.h>
7 #include <linux/sched.h>
8 #include <linux/workqueue.h>
9 #include <linux/mbcache.h>
12 * Mbcache is a simple key-value store. Keys need not be unique, however
13 * key-value pairs are expected to be unique (we use this fact in
14 * mb_cache_entry_delete()).
16 * Ext2 and ext4 use this cache for deduplication of extended attribute blocks.
17 * Ext4 also uses it for deduplication of xattr values stored in inodes.
18 * They use hash of data as a key and provide a value that may represent a
19 * block or inode number. That's why keys need not be unique (hash of different
20 * data may be the same). However user provided value always uniquely
21 * identifies a cache entry.
23 * We provide functions for creation and removal of entries, search by key,
24 * and a special "delete entry with given key-value pair" operation. Fixed
25 * size hash table is used for fast key lookups.
29 /* Hash table of entries */
30 struct hlist_bl_head *c_hash;
31 /* log2 of hash table size */
33 /* Maximum entries in cache to avoid degrading hash too much */
34 unsigned long c_max_entries;
35 /* Protects c_list, c_entry_count */
36 spinlock_t c_list_lock;
37 struct list_head c_list;
38 /* Number of entries in cache */
39 unsigned long c_entry_count;
40 struct shrinker c_shrink;
41 /* Work for shrinking when the cache has too many entries */
42 struct work_struct c_shrink_work;
45 static struct kmem_cache *mb_entry_cache;
47 static unsigned long mb_cache_shrink(struct mb_cache *cache,
48 unsigned long nr_to_scan);
50 static inline struct hlist_bl_head *mb_cache_entry_head(struct mb_cache *cache,
53 return &cache->c_hash[hash_32(key, cache->c_bucket_bits)];
57 * Number of entries to reclaim synchronously when there are too many entries
60 #define SYNC_SHRINK_BATCH 64
63 * mb_cache_entry_create - create entry in cache
64 * @cache - cache where the entry should be created
65 * @mask - gfp mask with which the entry should be allocated
66 * @key - key of the entry
67 * @value - value of the entry
68 * @reusable - is the entry reusable by others?
70 * Creates entry in @cache with key @key and value @value. The function returns
71 * -EBUSY if entry with the same key and value already exists in cache.
72 * Otherwise 0 is returned.
74 int mb_cache_entry_create(struct mb_cache *cache, gfp_t mask, u32 key,
75 u64 value, bool reusable)
77 struct mb_cache_entry *entry, *dup;
78 struct hlist_bl_node *dup_node;
79 struct hlist_bl_head *head;
81 /* Schedule background reclaim if there are too many entries */
82 if (cache->c_entry_count >= cache->c_max_entries)
83 schedule_work(&cache->c_shrink_work);
84 /* Do some sync reclaim if background reclaim cannot keep up */
85 if (cache->c_entry_count >= 2*cache->c_max_entries)
86 mb_cache_shrink(cache, SYNC_SHRINK_BATCH);
88 entry = kmem_cache_alloc(mb_entry_cache, mask);
92 INIT_LIST_HEAD(&entry->e_list);
93 /* One ref for hash, one ref returned */
94 atomic_set(&entry->e_refcnt, 1);
96 entry->e_value = value;
97 entry->e_reusable = reusable;
98 entry->e_referenced = 0;
99 head = mb_cache_entry_head(cache, key);
101 hlist_bl_for_each_entry(dup, dup_node, head, e_hash_list) {
102 if (dup->e_key == key && dup->e_value == value) {
103 hlist_bl_unlock(head);
104 kmem_cache_free(mb_entry_cache, entry);
108 hlist_bl_add_head(&entry->e_hash_list, head);
109 hlist_bl_unlock(head);
111 spin_lock(&cache->c_list_lock);
112 list_add_tail(&entry->e_list, &cache->c_list);
113 /* Grab ref for LRU list */
114 atomic_inc(&entry->e_refcnt);
115 cache->c_entry_count++;
116 spin_unlock(&cache->c_list_lock);
120 EXPORT_SYMBOL(mb_cache_entry_create);
122 void __mb_cache_entry_free(struct mb_cache_entry *entry)
124 kmem_cache_free(mb_entry_cache, entry);
126 EXPORT_SYMBOL(__mb_cache_entry_free);
128 static struct mb_cache_entry *__entry_find(struct mb_cache *cache,
129 struct mb_cache_entry *entry,
132 struct mb_cache_entry *old_entry = entry;
133 struct hlist_bl_node *node;
134 struct hlist_bl_head *head;
136 head = mb_cache_entry_head(cache, key);
138 if (entry && !hlist_bl_unhashed(&entry->e_hash_list))
139 node = entry->e_hash_list.next;
141 node = hlist_bl_first(head);
143 entry = hlist_bl_entry(node, struct mb_cache_entry,
145 if (entry->e_key == key && entry->e_reusable) {
146 atomic_inc(&entry->e_refcnt);
153 hlist_bl_unlock(head);
155 mb_cache_entry_put(cache, old_entry);
161 * mb_cache_entry_find_first - find the first reusable entry with the given key
162 * @cache: cache where we should search
163 * @key: key to look for
165 * Search in @cache for a reusable entry with key @key. Grabs reference to the
166 * first reusable entry found and returns the entry.
168 struct mb_cache_entry *mb_cache_entry_find_first(struct mb_cache *cache,
171 return __entry_find(cache, NULL, key);
173 EXPORT_SYMBOL(mb_cache_entry_find_first);
176 * mb_cache_entry_find_next - find next reusable entry with the same key
177 * @cache: cache where we should search
178 * @entry: entry to start search from
180 * Finds next reusable entry in the hash chain which has the same key as @entry.
181 * If @entry is unhashed (which can happen when deletion of entry races with the
182 * search), finds the first reusable entry in the hash chain. The function drops
183 * reference to @entry and returns with a reference to the found entry.
185 struct mb_cache_entry *mb_cache_entry_find_next(struct mb_cache *cache,
186 struct mb_cache_entry *entry)
188 return __entry_find(cache, entry, entry->e_key);
190 EXPORT_SYMBOL(mb_cache_entry_find_next);
193 * mb_cache_entry_get - get a cache entry by value (and key)
194 * @cache - cache we work with
198 struct mb_cache_entry *mb_cache_entry_get(struct mb_cache *cache, u32 key,
201 struct hlist_bl_node *node;
202 struct hlist_bl_head *head;
203 struct mb_cache_entry *entry;
205 head = mb_cache_entry_head(cache, key);
207 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
208 if (entry->e_key == key && entry->e_value == value) {
209 atomic_inc(&entry->e_refcnt);
215 hlist_bl_unlock(head);
218 EXPORT_SYMBOL(mb_cache_entry_get);
220 /* mb_cache_entry_delete - remove a cache entry
221 * @cache - cache we work with
225 * Remove entry from cache @cache with key @key and value @value.
227 void mb_cache_entry_delete(struct mb_cache *cache, u32 key, u64 value)
229 struct hlist_bl_node *node;
230 struct hlist_bl_head *head;
231 struct mb_cache_entry *entry;
233 head = mb_cache_entry_head(cache, key);
235 hlist_bl_for_each_entry(entry, node, head, e_hash_list) {
236 if (entry->e_key == key && entry->e_value == value) {
237 /* We keep hash list reference to keep entry alive */
238 hlist_bl_del_init(&entry->e_hash_list);
239 hlist_bl_unlock(head);
240 spin_lock(&cache->c_list_lock);
241 if (!list_empty(&entry->e_list)) {
242 list_del_init(&entry->e_list);
243 if (!WARN_ONCE(cache->c_entry_count == 0,
244 "mbcache: attempt to decrement c_entry_count past zero"))
245 cache->c_entry_count--;
246 atomic_dec(&entry->e_refcnt);
248 spin_unlock(&cache->c_list_lock);
249 mb_cache_entry_put(cache, entry);
253 hlist_bl_unlock(head);
255 EXPORT_SYMBOL(mb_cache_entry_delete);
257 /* mb_cache_entry_touch - cache entry got used
258 * @cache - cache the entry belongs to
259 * @entry - entry that got used
261 * Marks entry as used to give hit higher chances of surviving in cache.
263 void mb_cache_entry_touch(struct mb_cache *cache,
264 struct mb_cache_entry *entry)
266 entry->e_referenced = 1;
268 EXPORT_SYMBOL(mb_cache_entry_touch);
270 static unsigned long mb_cache_count(struct shrinker *shrink,
271 struct shrink_control *sc)
273 struct mb_cache *cache = container_of(shrink, struct mb_cache,
276 return cache->c_entry_count;
279 /* Shrink number of entries in cache */
280 static unsigned long mb_cache_shrink(struct mb_cache *cache,
281 unsigned long nr_to_scan)
283 struct mb_cache_entry *entry;
284 struct hlist_bl_head *head;
285 unsigned long shrunk = 0;
287 spin_lock(&cache->c_list_lock);
288 while (nr_to_scan-- && !list_empty(&cache->c_list)) {
289 entry = list_first_entry(&cache->c_list,
290 struct mb_cache_entry, e_list);
291 if (entry->e_referenced) {
292 entry->e_referenced = 0;
293 list_move_tail(&entry->e_list, &cache->c_list);
296 list_del_init(&entry->e_list);
297 cache->c_entry_count--;
299 * We keep LRU list reference so that entry doesn't go away
302 spin_unlock(&cache->c_list_lock);
303 head = mb_cache_entry_head(cache, entry->e_key);
305 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
306 hlist_bl_del_init(&entry->e_hash_list);
307 atomic_dec(&entry->e_refcnt);
309 hlist_bl_unlock(head);
310 if (mb_cache_entry_put(cache, entry))
313 spin_lock(&cache->c_list_lock);
315 spin_unlock(&cache->c_list_lock);
320 static unsigned long mb_cache_scan(struct shrinker *shrink,
321 struct shrink_control *sc)
323 struct mb_cache *cache = container_of(shrink, struct mb_cache,
325 return mb_cache_shrink(cache, sc->nr_to_scan);
328 /* We shrink 1/X of the cache when we have too many entries in it */
329 #define SHRINK_DIVISOR 16
331 static void mb_cache_shrink_worker(struct work_struct *work)
333 struct mb_cache *cache = container_of(work, struct mb_cache,
335 mb_cache_shrink(cache, cache->c_max_entries / SHRINK_DIVISOR);
339 * mb_cache_create - create cache
340 * @bucket_bits: log2 of the hash table size
342 * Create cache for keys with 2^bucket_bits hash entries.
344 struct mb_cache *mb_cache_create(int bucket_bits)
346 struct mb_cache *cache;
347 unsigned long bucket_count = 1UL << bucket_bits;
350 cache = kzalloc(sizeof(struct mb_cache), GFP_KERNEL);
353 cache->c_bucket_bits = bucket_bits;
354 cache->c_max_entries = bucket_count << 4;
355 INIT_LIST_HEAD(&cache->c_list);
356 spin_lock_init(&cache->c_list_lock);
357 cache->c_hash = kmalloc_array(bucket_count,
358 sizeof(struct hlist_bl_head),
360 if (!cache->c_hash) {
364 for (i = 0; i < bucket_count; i++)
365 INIT_HLIST_BL_HEAD(&cache->c_hash[i]);
367 cache->c_shrink.count_objects = mb_cache_count;
368 cache->c_shrink.scan_objects = mb_cache_scan;
369 cache->c_shrink.seeks = DEFAULT_SEEKS;
370 if (register_shrinker(&cache->c_shrink)) {
371 kfree(cache->c_hash);
376 INIT_WORK(&cache->c_shrink_work, mb_cache_shrink_worker);
383 EXPORT_SYMBOL(mb_cache_create);
386 * mb_cache_destroy - destroy cache
387 * @cache: the cache to destroy
389 * Free all entries in cache and cache itself. Caller must make sure nobody
390 * (except shrinker) can reach @cache when calling this.
392 void mb_cache_destroy(struct mb_cache *cache)
394 struct mb_cache_entry *entry, *next;
396 unregister_shrinker(&cache->c_shrink);
399 * We don't bother with any locking. Cache must not be used at this
402 list_for_each_entry_safe(entry, next, &cache->c_list, e_list) {
403 if (!hlist_bl_unhashed(&entry->e_hash_list)) {
404 hlist_bl_del_init(&entry->e_hash_list);
405 atomic_dec(&entry->e_refcnt);
408 list_del(&entry->e_list);
409 WARN_ON(atomic_read(&entry->e_refcnt) != 1);
410 mb_cache_entry_put(cache, entry);
412 kfree(cache->c_hash);
415 EXPORT_SYMBOL(mb_cache_destroy);
417 static int __init mbcache_init(void)
419 mb_entry_cache = kmem_cache_create("mbcache",
420 sizeof(struct mb_cache_entry), 0,
421 SLAB_RECLAIM_ACCOUNT|SLAB_MEM_SPREAD, NULL);
427 static void __exit mbcache_exit(void)
429 kmem_cache_destroy(mb_entry_cache);
432 module_init(mbcache_init)
433 module_exit(mbcache_exit)
435 MODULE_AUTHOR("Jan Kara <jack@suse.cz>");
436 MODULE_DESCRIPTION("Meta block cache (for extended attributes)");
437 MODULE_LICENSE("GPL");