Merge tag 'mailbox-v6.8' of git://git.kernel.org/pub/scm/linux/kernel/git/jassibrar...
[linux-2.6-microblaze.git] / drivers / gpu / drm / msm / msm_gem_shrinker.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2016 Red Hat
4  * Author: Rob Clark <robdclark@gmail.com>
5  */
6
7 #include <linux/vmalloc.h>
8 #include <linux/sched/mm.h>
9
10 #include "msm_drv.h"
11 #include "msm_gem.h"
12 #include "msm_gpu.h"
13 #include "msm_gpu_trace.h"
14
15 /* Default disabled for now until it has some more testing on the different
16  * iommu combinations that can be paired with the driver:
17  */
18 static bool enable_eviction = true;
19 MODULE_PARM_DESC(enable_eviction, "Enable swappable GEM buffers");
20 module_param(enable_eviction, bool, 0600);
21
22 static bool can_swap(void)
23 {
24         return enable_eviction && get_nr_swap_pages() > 0;
25 }
26
27 static bool can_block(struct shrink_control *sc)
28 {
29         if (!(sc->gfp_mask & __GFP_DIRECT_RECLAIM))
30                 return false;
31         return current_is_kswapd() || (sc->gfp_mask & __GFP_RECLAIM);
32 }
33
34 static unsigned long
35 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
36 {
37         struct msm_drm_private *priv = shrinker->private_data;
38         unsigned count = priv->lru.dontneed.count;
39
40         if (can_swap())
41                 count += priv->lru.willneed.count;
42
43         return count;
44 }
45
46 static bool
47 purge(struct drm_gem_object *obj)
48 {
49         if (!is_purgeable(to_msm_bo(obj)))
50                 return false;
51
52         if (msm_gem_active(obj))
53                 return false;
54
55         msm_gem_purge(obj);
56
57         return true;
58 }
59
60 static bool
61 evict(struct drm_gem_object *obj)
62 {
63         if (is_unevictable(to_msm_bo(obj)))
64                 return false;
65
66         if (msm_gem_active(obj))
67                 return false;
68
69         msm_gem_evict(obj);
70
71         return true;
72 }
73
74 static bool
75 wait_for_idle(struct drm_gem_object *obj)
76 {
77         enum dma_resv_usage usage = dma_resv_usage_rw(true);
78         return dma_resv_wait_timeout(obj->resv, usage, false, 10) > 0;
79 }
80
81 static bool
82 active_purge(struct drm_gem_object *obj)
83 {
84         if (!wait_for_idle(obj))
85                 return false;
86
87         return purge(obj);
88 }
89
90 static bool
91 active_evict(struct drm_gem_object *obj)
92 {
93         if (!wait_for_idle(obj))
94                 return false;
95
96         return evict(obj);
97 }
98
99 static unsigned long
100 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
101 {
102         struct msm_drm_private *priv = shrinker->private_data;
103         struct {
104                 struct drm_gem_lru *lru;
105                 bool (*shrink)(struct drm_gem_object *obj);
106                 bool cond;
107                 unsigned long freed;
108                 unsigned long remaining;
109         } stages[] = {
110                 /* Stages of progressively more aggressive/expensive reclaim: */
111                 { &priv->lru.dontneed, purge,        true },
112                 { &priv->lru.willneed, evict,        can_swap() },
113                 { &priv->lru.dontneed, active_purge, can_block(sc) },
114                 { &priv->lru.willneed, active_evict, can_swap() && can_block(sc) },
115         };
116         long nr = sc->nr_to_scan;
117         unsigned long freed = 0;
118         unsigned long remaining = 0;
119
120         for (unsigned i = 0; (nr > 0) && (i < ARRAY_SIZE(stages)); i++) {
121                 if (!stages[i].cond)
122                         continue;
123                 stages[i].freed =
124                         drm_gem_lru_scan(stages[i].lru, nr,
125                                         &stages[i].remaining,
126                                          stages[i].shrink);
127                 nr -= stages[i].freed;
128                 freed += stages[i].freed;
129                 remaining += stages[i].remaining;
130         }
131
132         if (freed) {
133                 trace_msm_gem_shrink(sc->nr_to_scan, stages[0].freed,
134                                      stages[1].freed, stages[2].freed,
135                                      stages[3].freed);
136         }
137
138         return (freed > 0 && remaining > 0) ? freed : SHRINK_STOP;
139 }
140
141 #ifdef CONFIG_DEBUG_FS
142 unsigned long
143 msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan)
144 {
145         struct msm_drm_private *priv = dev->dev_private;
146         struct shrink_control sc = {
147                 .nr_to_scan = nr_to_scan,
148         };
149         unsigned long ret = SHRINK_STOP;
150
151         fs_reclaim_acquire(GFP_KERNEL);
152         if (priv->shrinker)
153                 ret = msm_gem_shrinker_scan(priv->shrinker, &sc);
154         fs_reclaim_release(GFP_KERNEL);
155
156         return ret;
157 }
158 #endif
159
160 /* since we don't know any better, lets bail after a few
161  * and if necessary the shrinker will be invoked again.
162  * Seems better than unmapping *everything*
163  */
164 static const int vmap_shrink_limit = 15;
165
166 static bool
167 vmap_shrink(struct drm_gem_object *obj)
168 {
169         if (!is_vunmapable(to_msm_bo(obj)))
170                 return false;
171
172         msm_gem_vunmap(obj);
173
174         return true;
175 }
176
177 static int
178 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
179 {
180         struct msm_drm_private *priv =
181                 container_of(nb, struct msm_drm_private, vmap_notifier);
182         struct drm_gem_lru *lrus[] = {
183                 &priv->lru.dontneed,
184                 &priv->lru.willneed,
185                 &priv->lru.pinned,
186                 NULL,
187         };
188         unsigned idx, unmapped = 0;
189         unsigned long remaining = 0;
190
191         for (idx = 0; lrus[idx] && unmapped < vmap_shrink_limit; idx++) {
192                 unmapped += drm_gem_lru_scan(lrus[idx],
193                                              vmap_shrink_limit - unmapped,
194                                              &remaining,
195                                              vmap_shrink);
196         }
197
198         *(unsigned long *)ptr += unmapped;
199
200         if (unmapped > 0)
201                 trace_msm_gem_purge_vmaps(unmapped);
202
203         return NOTIFY_DONE;
204 }
205
206 /**
207  * msm_gem_shrinker_init - Initialize msm shrinker
208  * @dev: drm device
209  *
210  * This function registers and sets up the msm shrinker.
211  */
212 int msm_gem_shrinker_init(struct drm_device *dev)
213 {
214         struct msm_drm_private *priv = dev->dev_private;
215
216         priv->shrinker = shrinker_alloc(0, "drm-msm_gem");
217         if (!priv->shrinker)
218                 return -ENOMEM;
219
220         priv->shrinker->count_objects = msm_gem_shrinker_count;
221         priv->shrinker->scan_objects = msm_gem_shrinker_scan;
222         priv->shrinker->private_data = priv;
223
224         shrinker_register(priv->shrinker);
225
226         priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
227         WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
228
229         return 0;
230 }
231
232 /**
233  * msm_gem_shrinker_cleanup - Clean up msm shrinker
234  * @dev: drm device
235  *
236  * This function unregisters the msm shrinker.
237  */
238 void msm_gem_shrinker_cleanup(struct drm_device *dev)
239 {
240         struct msm_drm_private *priv = dev->dev_private;
241
242         if (priv->shrinker) {
243                 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
244                 shrinker_free(priv->shrinker);
245         }
246 }