2db90e7706165c2d18775efa010964e3ff232b55
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / i915_globals.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include <linux/slab.h>
8 #include <linux/workqueue.h>
9
10 #include "i915_active.h"
11 #include "gem/i915_gem_context.h"
12 #include "gem/i915_gem_object.h"
13 #include "i915_globals.h"
14 #include "i915_request.h"
15 #include "i915_scheduler.h"
16 #include "i915_vma.h"
17
18 static LIST_HEAD(globals);
19
20 static atomic_t active;
21 static atomic_t epoch;
22 static struct park_work {
23         struct delayed_work work;
24         struct rcu_head rcu;
25         unsigned long flags;
26 #define PENDING 0
27         int epoch;
28 } park;
29
30 static void i915_globals_shrink(void)
31 {
32         struct i915_global *global;
33
34         /*
35          * kmem_cache_shrink() discards empty slabs and reorders partially
36          * filled slabs to prioritise allocating from the mostly full slabs,
37          * with the aim of reducing fragmentation.
38          */
39         list_for_each_entry(global, &globals, link)
40                 global->shrink();
41 }
42
43 static void __i915_globals_grace(struct rcu_head *rcu)
44 {
45         /* Ratelimit parking as shrinking is quite slow */
46         schedule_delayed_work(&park.work, round_jiffies_up_relative(2 * HZ));
47 }
48
49 static void __i915_globals_queue_rcu(void)
50 {
51         park.epoch = atomic_inc_return(&epoch);
52         if (!atomic_read(&active)) {
53                 init_rcu_head(&park.rcu);
54                 call_rcu(&park.rcu, __i915_globals_grace);
55         }
56 }
57
58 static void __i915_globals_park(struct work_struct *work)
59 {
60         destroy_rcu_head(&park.rcu);
61
62         /* Confirm nothing woke up in the last grace period */
63         if (park.epoch != atomic_read(&epoch)) {
64                 __i915_globals_queue_rcu();
65                 return;
66         }
67
68         clear_bit(PENDING, &park.flags);
69         i915_globals_shrink();
70 }
71
72 void __init i915_global_register(struct i915_global *global)
73 {
74         GEM_BUG_ON(!global->shrink);
75         GEM_BUG_ON(!global->exit);
76
77         list_add_tail(&global->link, &globals);
78 }
79
80 static void __i915_globals_cleanup(void)
81 {
82         struct i915_global *global, *next;
83
84         list_for_each_entry_safe_reverse(global, next, &globals, link)
85                 global->exit();
86 }
87
88 static __initconst int (* const initfn[])(void) = {
89         i915_global_active_init,
90         i915_global_context_init,
91         i915_global_gem_context_init,
92         i915_global_objects_init,
93         i915_global_request_init,
94         i915_global_scheduler_init,
95         i915_global_vma_init,
96 };
97
98 int __init i915_globals_init(void)
99 {
100         int i;
101
102         for (i = 0; i < ARRAY_SIZE(initfn); i++) {
103                 int err;
104
105                 err = initfn[i]();
106                 if (err) {
107                         __i915_globals_cleanup();
108                         return err;
109                 }
110         }
111
112         INIT_DELAYED_WORK(&park.work, __i915_globals_park);
113         return 0;
114 }
115
116 void i915_globals_park(void)
117 {
118         /*
119          * Defer shrinking the global slab caches (and other work) until
120          * after a RCU grace period has completed with no activity. This
121          * is to try and reduce the latency impact on the consumers caused
122          * by us shrinking the caches the same time as they are trying to
123          * allocate, with the assumption being that if we idle long enough
124          * for an RCU grace period to elapse since the last use, it is likely
125          * to be longer until we need the caches again.
126          */
127         if (!atomic_dec_and_test(&active))
128                 return;
129
130         /* Queue cleanup after the next RCU grace period has freed slabs */
131         if (!test_and_set_bit(PENDING, &park.flags))
132                 __i915_globals_queue_rcu();
133 }
134
135 void i915_globals_unpark(void)
136 {
137         atomic_inc(&epoch);
138         atomic_inc(&active);
139 }
140
141 static void __exit __i915_globals_flush(void)
142 {
143         atomic_inc(&active); /* skip shrinking */
144
145         rcu_barrier(); /* wait for the work to be queued */
146         flush_delayed_work(&park.work);
147
148         atomic_dec(&active);
149 }
150
151 void i915_globals_exit(void)
152 {
153         GEM_BUG_ON(atomic_read(&active));
154
155         __i915_globals_flush();
156         __i915_globals_cleanup();
157
158         /* And ensure that our DESTROY_BY_RCU slabs are truly destroyed */
159         rcu_barrier();
160 }