Merge tag 'for-5.15-tag' of git://git.kernel.org/pub/scm/linux/kernel/git/kdave/linux
[linux-2.6-microblaze.git] / kernel / bpf / percpu_freelist.c
index b367430..3d897de 100644 (file)
@@ -17,6 +17,8 @@ int pcpu_freelist_init(struct pcpu_freelist *s)
                raw_spin_lock_init(&head->lock);
                head->first = NULL;
        }
+       raw_spin_lock_init(&s->extralist.lock);
+       s->extralist.first = NULL;
        return 0;
 }
 
@@ -40,12 +42,50 @@ static inline void ___pcpu_freelist_push(struct pcpu_freelist_head *head,
        raw_spin_unlock(&head->lock);
 }
 
+static inline bool pcpu_freelist_try_push_extra(struct pcpu_freelist *s,
+                                               struct pcpu_freelist_node *node)
+{
+       if (!raw_spin_trylock(&s->extralist.lock))
+               return false;
+
+       pcpu_freelist_push_node(&s->extralist, node);
+       raw_spin_unlock(&s->extralist.lock);
+       return true;
+}
+
+static inline void ___pcpu_freelist_push_nmi(struct pcpu_freelist *s,
+                                            struct pcpu_freelist_node *node)
+{
+       int cpu, orig_cpu;
+
+       orig_cpu = cpu = raw_smp_processor_id();
+       while (1) {
+               struct pcpu_freelist_head *head;
+
+               head = per_cpu_ptr(s->freelist, cpu);
+               if (raw_spin_trylock(&head->lock)) {
+                       pcpu_freelist_push_node(head, node);
+                       raw_spin_unlock(&head->lock);
+                       return;
+               }
+               cpu = cpumask_next(cpu, cpu_possible_mask);
+               if (cpu >= nr_cpu_ids)
+                       cpu = 0;
+
+               /* cannot lock any per cpu lock, try extralist */
+               if (cpu == orig_cpu &&
+                   pcpu_freelist_try_push_extra(s, node))
+                       return;
+       }
+}
+
 void __pcpu_freelist_push(struct pcpu_freelist *s,
                        struct pcpu_freelist_node *node)
 {
-       struct pcpu_freelist_head *head = this_cpu_ptr(s->freelist);
-
-       ___pcpu_freelist_push(head, node);
+       if (in_nmi())
+               ___pcpu_freelist_push_nmi(s, node);
+       else
+               ___pcpu_freelist_push(this_cpu_ptr(s->freelist), node);
 }
 
 void pcpu_freelist_push(struct pcpu_freelist *s,
@@ -81,7 +121,7 @@ again:
        }
 }
 
-struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+static struct pcpu_freelist_node *___pcpu_freelist_pop(struct pcpu_freelist *s)
 {
        struct pcpu_freelist_head *head;
        struct pcpu_freelist_node *node;
@@ -102,8 +142,59 @@ struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
                if (cpu >= nr_cpu_ids)
                        cpu = 0;
                if (cpu == orig_cpu)
-                       return NULL;
+                       break;
+       }
+
+       /* per cpu lists are all empty, try extralist */
+       raw_spin_lock(&s->extralist.lock);
+       node = s->extralist.first;
+       if (node)
+               s->extralist.first = node->next;
+       raw_spin_unlock(&s->extralist.lock);
+       return node;
+}
+
+static struct pcpu_freelist_node *
+___pcpu_freelist_pop_nmi(struct pcpu_freelist *s)
+{
+       struct pcpu_freelist_head *head;
+       struct pcpu_freelist_node *node;
+       int orig_cpu, cpu;
+
+       orig_cpu = cpu = raw_smp_processor_id();
+       while (1) {
+               head = per_cpu_ptr(s->freelist, cpu);
+               if (raw_spin_trylock(&head->lock)) {
+                       node = head->first;
+                       if (node) {
+                               head->first = node->next;
+                               raw_spin_unlock(&head->lock);
+                               return node;
+                       }
+                       raw_spin_unlock(&head->lock);
+               }
+               cpu = cpumask_next(cpu, cpu_possible_mask);
+               if (cpu >= nr_cpu_ids)
+                       cpu = 0;
+               if (cpu == orig_cpu)
+                       break;
        }
+
+       /* cannot pop from per cpu lists, try extralist */
+       if (!raw_spin_trylock(&s->extralist.lock))
+               return NULL;
+       node = s->extralist.first;
+       if (node)
+               s->extralist.first = node->next;
+       raw_spin_unlock(&s->extralist.lock);
+       return node;
+}
+
+struct pcpu_freelist_node *__pcpu_freelist_pop(struct pcpu_freelist *s)
+{
+       if (in_nmi())
+               return ___pcpu_freelist_pop_nmi(s);
+       return ___pcpu_freelist_pop(s);
 }
 
 struct pcpu_freelist_node *pcpu_freelist_pop(struct pcpu_freelist *s)