Linux 6.9-rc1
[linux-2.6-microblaze.git] / kernel / bpf / bpf_lru_list.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2016 Facebook
3  */
4 #include <linux/cpumask.h>
5 #include <linux/spinlock.h>
6 #include <linux/percpu.h>
7
8 #include "bpf_lru_list.h"
9
10 #define LOCAL_FREE_TARGET               (128)
11 #define LOCAL_NR_SCANS                  LOCAL_FREE_TARGET
12
13 #define PERCPU_FREE_TARGET              (4)
14 #define PERCPU_NR_SCANS                 PERCPU_FREE_TARGET
15
16 /* Helpers to get the local list index */
17 #define LOCAL_LIST_IDX(t)       ((t) - BPF_LOCAL_LIST_T_OFFSET)
18 #define LOCAL_FREE_LIST_IDX     LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
19 #define LOCAL_PENDING_LIST_IDX  LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
20 #define IS_LOCAL_LIST_TYPE(t)   ((t) >= BPF_LOCAL_LIST_T_OFFSET)
21
22 static int get_next_cpu(int cpu)
23 {
24         cpu = cpumask_next(cpu, cpu_possible_mask);
25         if (cpu >= nr_cpu_ids)
26                 cpu = cpumask_first(cpu_possible_mask);
27         return cpu;
28 }
29
30 /* Local list helpers */
31 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
32 {
33         return &loc_l->lists[LOCAL_FREE_LIST_IDX];
34 }
35
36 static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
37 {
38         return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
39 }
40
41 /* bpf_lru_node helpers */
42 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
43 {
44         return READ_ONCE(node->ref);
45 }
46
47 static void bpf_lru_node_clear_ref(struct bpf_lru_node *node)
48 {
49         WRITE_ONCE(node->ref, 0);
50 }
51
52 static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
53                                    enum bpf_lru_list_type type)
54 {
55         if (type < NR_BPF_LRU_LIST_COUNT)
56                 l->counts[type]++;
57 }
58
59 static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
60                                    enum bpf_lru_list_type type)
61 {
62         if (type < NR_BPF_LRU_LIST_COUNT)
63                 l->counts[type]--;
64 }
65
66 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
67                                         struct bpf_lru_node *node,
68                                         struct list_head *free_list,
69                                         enum bpf_lru_list_type tgt_free_type)
70 {
71         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
72                 return;
73
74         /* If the removing node is the next_inactive_rotation candidate,
75          * move the next_inactive_rotation pointer also.
76          */
77         if (&node->list == l->next_inactive_rotation)
78                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
79
80         bpf_lru_list_count_dec(l, node->type);
81
82         node->type = tgt_free_type;
83         list_move(&node->list, free_list);
84 }
85
86 /* Move nodes from local list to the LRU list */
87 static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
88                                    struct bpf_lru_node *node,
89                                    enum bpf_lru_list_type tgt_type)
90 {
91         if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
92             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
93                 return;
94
95         bpf_lru_list_count_inc(l, tgt_type);
96         node->type = tgt_type;
97         bpf_lru_node_clear_ref(node);
98         list_move(&node->list, &l->lists[tgt_type]);
99 }
100
101 /* Move nodes between or within active and inactive list (like
102  * active to inactive, inactive to active or tail of active back to
103  * the head of active).
104  */
105 static void __bpf_lru_node_move(struct bpf_lru_list *l,
106                                 struct bpf_lru_node *node,
107                                 enum bpf_lru_list_type tgt_type)
108 {
109         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
110             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
111                 return;
112
113         if (node->type != tgt_type) {
114                 bpf_lru_list_count_dec(l, node->type);
115                 bpf_lru_list_count_inc(l, tgt_type);
116                 node->type = tgt_type;
117         }
118         bpf_lru_node_clear_ref(node);
119
120         /* If the moving node is the next_inactive_rotation candidate,
121          * move the next_inactive_rotation pointer also.
122          */
123         if (&node->list == l->next_inactive_rotation)
124                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
125
126         list_move(&node->list, &l->lists[tgt_type]);
127 }
128
129 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
130 {
131         return l->counts[BPF_LRU_LIST_T_INACTIVE] <
132                 l->counts[BPF_LRU_LIST_T_ACTIVE];
133 }
134
135 /* Rotate the active list:
136  * 1. Start from tail
137  * 2. If the node has the ref bit set, it will be rotated
138  *    back to the head of active list with the ref bit cleared.
139  *    Give this node one more chance to survive in the active list.
140  * 3. If the ref bit is not set, move it to the head of the
141  *    inactive list.
142  * 4. It will at most scan nr_scans nodes
143  */
144 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
145                                          struct bpf_lru_list *l)
146 {
147         struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
148         struct bpf_lru_node *node, *tmp_node, *first_node;
149         unsigned int i = 0;
150
151         first_node = list_first_entry(active, struct bpf_lru_node, list);
152         list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
153                 if (bpf_lru_node_is_ref(node))
154                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
155                 else
156                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
157
158                 if (++i == lru->nr_scans || node == first_node)
159                         break;
160         }
161 }
162
163 /* Rotate the inactive list.  It starts from the next_inactive_rotation
164  * 1. If the node has ref bit set, it will be moved to the head
165  *    of active list with the ref bit cleared.
166  * 2. If the node does not have ref bit set, it will leave it
167  *    at its current location (i.e. do nothing) so that it can
168  *    be considered during the next inactive_shrink.
169  * 3. It will at most scan nr_scans nodes
170  */
171 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
172                                            struct bpf_lru_list *l)
173 {
174         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
175         struct list_head *cur, *last, *next = inactive;
176         struct bpf_lru_node *node;
177         unsigned int i = 0;
178
179         if (list_empty(inactive))
180                 return;
181
182         last = l->next_inactive_rotation->next;
183         if (last == inactive)
184                 last = last->next;
185
186         cur = l->next_inactive_rotation;
187         while (i < lru->nr_scans) {
188                 if (cur == inactive) {
189                         cur = cur->prev;
190                         continue;
191                 }
192
193                 node = list_entry(cur, struct bpf_lru_node, list);
194                 next = cur->prev;
195                 if (bpf_lru_node_is_ref(node))
196                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
197                 if (cur == last)
198                         break;
199                 cur = next;
200                 i++;
201         }
202
203         l->next_inactive_rotation = next;
204 }
205
206 /* Shrink the inactive list.  It starts from the tail of the
207  * inactive list and only move the nodes without the ref bit
208  * set to the designated free list.
209  */
210 static unsigned int
211 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
212                                struct bpf_lru_list *l,
213                                unsigned int tgt_nshrink,
214                                struct list_head *free_list,
215                                enum bpf_lru_list_type tgt_free_type)
216 {
217         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
218         struct bpf_lru_node *node, *tmp_node;
219         unsigned int nshrinked = 0;
220         unsigned int i = 0;
221
222         list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
223                 if (bpf_lru_node_is_ref(node)) {
224                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
225                 } else if (lru->del_from_htab(lru->del_arg, node)) {
226                         __bpf_lru_node_move_to_free(l, node, free_list,
227                                                     tgt_free_type);
228                         if (++nshrinked == tgt_nshrink)
229                                 break;
230                 }
231
232                 if (++i == lru->nr_scans)
233                         break;
234         }
235
236         return nshrinked;
237 }
238
239 /* 1. Rotate the active list (if needed)
240  * 2. Always rotate the inactive list
241  */
242 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
243 {
244         if (bpf_lru_list_inactive_low(l))
245                 __bpf_lru_list_rotate_active(lru, l);
246
247         __bpf_lru_list_rotate_inactive(lru, l);
248 }
249
250 /* Calls __bpf_lru_list_shrink_inactive() to shrink some
251  * ref-bit-cleared nodes and move them to the designated
252  * free list.
253  *
254  * If it cannot get a free node after calling
255  * __bpf_lru_list_shrink_inactive().  It will just remove
256  * one node from either inactive or active list without
257  * honoring the ref-bit.  It prefers inactive list to active
258  * list in this situation.
259  */
260 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
261                                           struct bpf_lru_list *l,
262                                           unsigned int tgt_nshrink,
263                                           struct list_head *free_list,
264                                           enum bpf_lru_list_type tgt_free_type)
265
266 {
267         struct bpf_lru_node *node, *tmp_node;
268         struct list_head *force_shrink_list;
269         unsigned int nshrinked;
270
271         nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
272                                                    free_list, tgt_free_type);
273         if (nshrinked)
274                 return nshrinked;
275
276         /* Do a force shrink by ignoring the reference bit */
277         if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
278                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
279         else
280                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
281
282         list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
283                                          list) {
284                 if (lru->del_from_htab(lru->del_arg, node)) {
285                         __bpf_lru_node_move_to_free(l, node, free_list,
286                                                     tgt_free_type);
287                         return 1;
288                 }
289         }
290
291         return 0;
292 }
293
294 /* Flush the nodes from the local pending list to the LRU list */
295 static void __local_list_flush(struct bpf_lru_list *l,
296                                struct bpf_lru_locallist *loc_l)
297 {
298         struct bpf_lru_node *node, *tmp_node;
299
300         list_for_each_entry_safe_reverse(node, tmp_node,
301                                          local_pending_list(loc_l), list) {
302                 if (bpf_lru_node_is_ref(node))
303                         __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
304                 else
305                         __bpf_lru_node_move_in(l, node,
306                                                BPF_LRU_LIST_T_INACTIVE);
307         }
308 }
309
310 static void bpf_lru_list_push_free(struct bpf_lru_list *l,
311                                    struct bpf_lru_node *node)
312 {
313         unsigned long flags;
314
315         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
316                 return;
317
318         raw_spin_lock_irqsave(&l->lock, flags);
319         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
320         raw_spin_unlock_irqrestore(&l->lock, flags);
321 }
322
323 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
324                                            struct bpf_lru_locallist *loc_l)
325 {
326         struct bpf_lru_list *l = &lru->common_lru.lru_list;
327         struct bpf_lru_node *node, *tmp_node;
328         unsigned int nfree = 0;
329
330         raw_spin_lock(&l->lock);
331
332         __local_list_flush(l, loc_l);
333
334         __bpf_lru_list_rotate(lru, l);
335
336         list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
337                                  list) {
338                 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
339                                             BPF_LRU_LOCAL_LIST_T_FREE);
340                 if (++nfree == LOCAL_FREE_TARGET)
341                         break;
342         }
343
344         if (nfree < LOCAL_FREE_TARGET)
345                 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
346                                       local_free_list(loc_l),
347                                       BPF_LRU_LOCAL_LIST_T_FREE);
348
349         raw_spin_unlock(&l->lock);
350 }
351
352 static void __local_list_add_pending(struct bpf_lru *lru,
353                                      struct bpf_lru_locallist *loc_l,
354                                      int cpu,
355                                      struct bpf_lru_node *node,
356                                      u32 hash)
357 {
358         *(u32 *)((void *)node + lru->hash_offset) = hash;
359         node->cpu = cpu;
360         node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
361         bpf_lru_node_clear_ref(node);
362         list_add(&node->list, local_pending_list(loc_l));
363 }
364
365 static struct bpf_lru_node *
366 __local_list_pop_free(struct bpf_lru_locallist *loc_l)
367 {
368         struct bpf_lru_node *node;
369
370         node = list_first_entry_or_null(local_free_list(loc_l),
371                                         struct bpf_lru_node,
372                                         list);
373         if (node)
374                 list_del(&node->list);
375
376         return node;
377 }
378
379 static struct bpf_lru_node *
380 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
381 {
382         struct bpf_lru_node *node;
383         bool force = false;
384
385 ignore_ref:
386         /* Get from the tail (i.e. older element) of the pending list. */
387         list_for_each_entry_reverse(node, local_pending_list(loc_l),
388                                     list) {
389                 if ((!bpf_lru_node_is_ref(node) || force) &&
390                     lru->del_from_htab(lru->del_arg, node)) {
391                         list_del(&node->list);
392                         return node;
393                 }
394         }
395
396         if (!force) {
397                 force = true;
398                 goto ignore_ref;
399         }
400
401         return NULL;
402 }
403
404 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
405                                                     u32 hash)
406 {
407         struct list_head *free_list;
408         struct bpf_lru_node *node = NULL;
409         struct bpf_lru_list *l;
410         unsigned long flags;
411         int cpu = raw_smp_processor_id();
412
413         l = per_cpu_ptr(lru->percpu_lru, cpu);
414
415         raw_spin_lock_irqsave(&l->lock, flags);
416
417         __bpf_lru_list_rotate(lru, l);
418
419         free_list = &l->lists[BPF_LRU_LIST_T_FREE];
420         if (list_empty(free_list))
421                 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
422                                       BPF_LRU_LIST_T_FREE);
423
424         if (!list_empty(free_list)) {
425                 node = list_first_entry(free_list, struct bpf_lru_node, list);
426                 *(u32 *)((void *)node + lru->hash_offset) = hash;
427                 bpf_lru_node_clear_ref(node);
428                 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
429         }
430
431         raw_spin_unlock_irqrestore(&l->lock, flags);
432
433         return node;
434 }
435
436 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
437                                                     u32 hash)
438 {
439         struct bpf_lru_locallist *loc_l, *steal_loc_l;
440         struct bpf_common_lru *clru = &lru->common_lru;
441         struct bpf_lru_node *node;
442         int steal, first_steal;
443         unsigned long flags;
444         int cpu = raw_smp_processor_id();
445
446         loc_l = per_cpu_ptr(clru->local_list, cpu);
447
448         raw_spin_lock_irqsave(&loc_l->lock, flags);
449
450         node = __local_list_pop_free(loc_l);
451         if (!node) {
452                 bpf_lru_list_pop_free_to_local(lru, loc_l);
453                 node = __local_list_pop_free(loc_l);
454         }
455
456         if (node)
457                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
458
459         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
460
461         if (node)
462                 return node;
463
464         /* No free nodes found from the local free list and
465          * the global LRU list.
466          *
467          * Steal from the local free/pending list of the
468          * current CPU and remote CPU in RR.  It starts
469          * with the loc_l->next_steal CPU.
470          */
471
472         first_steal = loc_l->next_steal;
473         steal = first_steal;
474         do {
475                 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
476
477                 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
478
479                 node = __local_list_pop_free(steal_loc_l);
480                 if (!node)
481                         node = __local_list_pop_pending(lru, steal_loc_l);
482
483                 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
484
485                 steal = get_next_cpu(steal);
486         } while (!node && steal != first_steal);
487
488         loc_l->next_steal = steal;
489
490         if (node) {
491                 raw_spin_lock_irqsave(&loc_l->lock, flags);
492                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
493                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
494         }
495
496         return node;
497 }
498
499 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
500 {
501         if (lru->percpu)
502                 return bpf_percpu_lru_pop_free(lru, hash);
503         else
504                 return bpf_common_lru_pop_free(lru, hash);
505 }
506
507 static void bpf_common_lru_push_free(struct bpf_lru *lru,
508                                      struct bpf_lru_node *node)
509 {
510         u8 node_type = READ_ONCE(node->type);
511         unsigned long flags;
512
513         if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
514             WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
515                 return;
516
517         if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
518                 struct bpf_lru_locallist *loc_l;
519
520                 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
521
522                 raw_spin_lock_irqsave(&loc_l->lock, flags);
523
524                 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
525                         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
526                         goto check_lru_list;
527                 }
528
529                 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
530                 bpf_lru_node_clear_ref(node);
531                 list_move(&node->list, local_free_list(loc_l));
532
533                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
534                 return;
535         }
536
537 check_lru_list:
538         bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
539 }
540
541 static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
542                                      struct bpf_lru_node *node)
543 {
544         struct bpf_lru_list *l;
545         unsigned long flags;
546
547         l = per_cpu_ptr(lru->percpu_lru, node->cpu);
548
549         raw_spin_lock_irqsave(&l->lock, flags);
550
551         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
552
553         raw_spin_unlock_irqrestore(&l->lock, flags);
554 }
555
556 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
557 {
558         if (lru->percpu)
559                 bpf_percpu_lru_push_free(lru, node);
560         else
561                 bpf_common_lru_push_free(lru, node);
562 }
563
564 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
565                                     u32 node_offset, u32 elem_size,
566                                     u32 nr_elems)
567 {
568         struct bpf_lru_list *l = &lru->common_lru.lru_list;
569         u32 i;
570
571         for (i = 0; i < nr_elems; i++) {
572                 struct bpf_lru_node *node;
573
574                 node = (struct bpf_lru_node *)(buf + node_offset);
575                 node->type = BPF_LRU_LIST_T_FREE;
576                 bpf_lru_node_clear_ref(node);
577                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
578                 buf += elem_size;
579         }
580 }
581
582 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
583                                     u32 node_offset, u32 elem_size,
584                                     u32 nr_elems)
585 {
586         u32 i, pcpu_entries;
587         int cpu;
588         struct bpf_lru_list *l;
589
590         pcpu_entries = nr_elems / num_possible_cpus();
591
592         i = 0;
593
594         for_each_possible_cpu(cpu) {
595                 struct bpf_lru_node *node;
596
597                 l = per_cpu_ptr(lru->percpu_lru, cpu);
598 again:
599                 node = (struct bpf_lru_node *)(buf + node_offset);
600                 node->cpu = cpu;
601                 node->type = BPF_LRU_LIST_T_FREE;
602                 bpf_lru_node_clear_ref(node);
603                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
604                 i++;
605                 buf += elem_size;
606                 if (i == nr_elems)
607                         break;
608                 if (i % pcpu_entries)
609                         goto again;
610         }
611 }
612
613 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
614                       u32 elem_size, u32 nr_elems)
615 {
616         if (lru->percpu)
617                 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
618                                         nr_elems);
619         else
620                 bpf_common_lru_populate(lru, buf, node_offset, elem_size,
621                                         nr_elems);
622 }
623
624 static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
625 {
626         int i;
627
628         for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
629                 INIT_LIST_HEAD(&loc_l->lists[i]);
630
631         loc_l->next_steal = cpu;
632
633         raw_spin_lock_init(&loc_l->lock);
634 }
635
636 static void bpf_lru_list_init(struct bpf_lru_list *l)
637 {
638         int i;
639
640         for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
641                 INIT_LIST_HEAD(&l->lists[i]);
642
643         for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
644                 l->counts[i] = 0;
645
646         l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
647
648         raw_spin_lock_init(&l->lock);
649 }
650
651 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
652                  del_from_htab_func del_from_htab, void *del_arg)
653 {
654         int cpu;
655
656         if (percpu) {
657                 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
658                 if (!lru->percpu_lru)
659                         return -ENOMEM;
660
661                 for_each_possible_cpu(cpu) {
662                         struct bpf_lru_list *l;
663
664                         l = per_cpu_ptr(lru->percpu_lru, cpu);
665                         bpf_lru_list_init(l);
666                 }
667                 lru->nr_scans = PERCPU_NR_SCANS;
668         } else {
669                 struct bpf_common_lru *clru = &lru->common_lru;
670
671                 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
672                 if (!clru->local_list)
673                         return -ENOMEM;
674
675                 for_each_possible_cpu(cpu) {
676                         struct bpf_lru_locallist *loc_l;
677
678                         loc_l = per_cpu_ptr(clru->local_list, cpu);
679                         bpf_lru_locallist_init(loc_l, cpu);
680                 }
681
682                 bpf_lru_list_init(&clru->lru_list);
683                 lru->nr_scans = LOCAL_NR_SCANS;
684         }
685
686         lru->percpu = percpu;
687         lru->del_from_htab = del_from_htab;
688         lru->del_arg = del_arg;
689         lru->hash_offset = hash_offset;
690
691         return 0;
692 }
693
694 void bpf_lru_destroy(struct bpf_lru *lru)
695 {
696         if (lru->percpu)
697                 free_percpu(lru->percpu_lru);
698         else
699                 free_percpu(lru->common_lru.local_list);
700 }