1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2016 Red Hat
4 * Author: Rob Clark <robdclark@gmail.com>
10 #include "msm_gpu_trace.h"
13 msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc)
15 struct msm_drm_private *priv =
16 container_of(shrinker, struct msm_drm_private, shrinker);
17 return priv->shrinkable_count;
21 msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc)
23 struct msm_drm_private *priv =
24 container_of(shrinker, struct msm_drm_private, shrinker);
25 struct list_head still_in_list;
26 unsigned long freed = 0;
28 INIT_LIST_HEAD(&still_in_list);
30 mutex_lock(&priv->mm_lock);
32 while (freed < sc->nr_to_scan) {
33 struct msm_gem_object *msm_obj = list_first_entry_or_null(
34 &priv->inactive_dontneed, typeof(*msm_obj), mm_list);
39 list_move_tail(&msm_obj->mm_list, &still_in_list);
42 * If it is in the process of being freed, msm_gem_free_object
43 * can be blocked on mm_lock waiting to remove it. So just
46 if (!kref_get_unless_zero(&msm_obj->base.refcount))
50 * Now that we own a reference, we can drop mm_lock for the
51 * rest of the loop body, to reduce contention with the
52 * retire_submit path (which could make more objects purgable)
55 mutex_unlock(&priv->mm_lock);
58 * Note that this still needs to be trylock, since we can
59 * hit shrinker in response to trying to get backing pages
60 * for this obj (ie. while it's lock is already held)
62 if (!msm_gem_trylock(&msm_obj->base))
65 if (is_purgeable(msm_obj)) {
67 * This will move the obj out of still_in_list to
70 msm_gem_purge(&msm_obj->base);
71 freed += msm_obj->base.size >> PAGE_SHIFT;
73 msm_gem_unlock(&msm_obj->base);
76 drm_gem_object_put(&msm_obj->base);
77 mutex_lock(&priv->mm_lock);
80 list_splice_tail(&still_in_list, &priv->inactive_dontneed);
81 mutex_unlock(&priv->mm_lock);
84 trace_msm_gem_purge(freed << PAGE_SHIFT);
92 /* since we don't know any better, lets bail after a few
93 * and if necessary the shrinker will be invoked again.
94 * Seems better than unmapping *everything*
96 static const int vmap_shrink_limit = 15;
99 vmap_shrink(struct list_head *mm_list)
101 struct msm_gem_object *msm_obj;
102 unsigned unmapped = 0;
104 list_for_each_entry(msm_obj, mm_list, mm_list) {
105 /* Use trylock, because we cannot block on a obj that
106 * might be trying to acquire mm_lock
108 if (!msm_gem_trylock(&msm_obj->base))
110 if (is_vunmapable(msm_obj)) {
111 msm_gem_vunmap(&msm_obj->base);
114 msm_gem_unlock(&msm_obj->base);
116 if (++unmapped >= vmap_shrink_limit)
124 msm_gem_shrinker_vmap(struct notifier_block *nb, unsigned long event, void *ptr)
126 struct msm_drm_private *priv =
127 container_of(nb, struct msm_drm_private, vmap_notifier);
128 struct list_head *mm_lists[] = {
129 &priv->inactive_dontneed,
130 &priv->inactive_willneed,
131 priv->gpu ? &priv->gpu->active_list : NULL,
134 unsigned idx, unmapped = 0;
136 mutex_lock(&priv->mm_lock);
138 for (idx = 0; mm_lists[idx]; idx++) {
139 unmapped += vmap_shrink(mm_lists[idx]);
141 if (unmapped >= vmap_shrink_limit)
145 mutex_unlock(&priv->mm_lock);
147 *(unsigned long *)ptr += unmapped;
150 trace_msm_gem_purge_vmaps(unmapped);
156 * msm_gem_shrinker_init - Initialize msm shrinker
159 * This function registers and sets up the msm shrinker.
161 void msm_gem_shrinker_init(struct drm_device *dev)
163 struct msm_drm_private *priv = dev->dev_private;
164 priv->shrinker.count_objects = msm_gem_shrinker_count;
165 priv->shrinker.scan_objects = msm_gem_shrinker_scan;
166 priv->shrinker.seeks = DEFAULT_SEEKS;
167 WARN_ON(register_shrinker(&priv->shrinker));
169 priv->vmap_notifier.notifier_call = msm_gem_shrinker_vmap;
170 WARN_ON(register_vmap_purge_notifier(&priv->vmap_notifier));
174 * msm_gem_shrinker_cleanup - Clean up msm shrinker
177 * This function unregisters the msm shrinker.
179 void msm_gem_shrinker_cleanup(struct drm_device *dev)
181 struct msm_drm_private *priv = dev->dev_private;
183 if (priv->shrinker.nr_deferred) {
184 WARN_ON(unregister_vmap_purge_notifier(&priv->vmap_notifier));
185 unregister_shrinker(&priv->shrinker);