Merge tag 'drm-intel-next-2019-05-24' of git://anongit.freedesktop.org/drm/drm-intel...
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / intel_context.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_gem_context.h"
9 #include "i915_globals.h"
10 #include "intel_context.h"
11 #include "intel_ringbuffer.h"
12
13 static struct i915_global_context {
14         struct i915_global base;
15         struct kmem_cache *slab_ce;
16 } global;
17
18 struct intel_context *intel_context_alloc(void)
19 {
20         return kmem_cache_zalloc(global.slab_ce, GFP_KERNEL);
21 }
22
23 void intel_context_free(struct intel_context *ce)
24 {
25         kmem_cache_free(global.slab_ce, ce);
26 }
27
28 struct intel_context *
29 intel_context_lookup(struct i915_gem_context *ctx,
30                      struct intel_engine_cs *engine)
31 {
32         struct intel_context *ce = NULL;
33         struct rb_node *p;
34
35         spin_lock(&ctx->hw_contexts_lock);
36         p = ctx->hw_contexts.rb_node;
37         while (p) {
38                 struct intel_context *this =
39                         rb_entry(p, struct intel_context, node);
40
41                 if (this->engine == engine) {
42                         GEM_BUG_ON(this->gem_context != ctx);
43                         ce = this;
44                         break;
45                 }
46
47                 if (this->engine < engine)
48                         p = p->rb_right;
49                 else
50                         p = p->rb_left;
51         }
52         spin_unlock(&ctx->hw_contexts_lock);
53
54         return ce;
55 }
56
57 struct intel_context *
58 __intel_context_insert(struct i915_gem_context *ctx,
59                        struct intel_engine_cs *engine,
60                        struct intel_context *ce)
61 {
62         struct rb_node **p, *parent;
63         int err = 0;
64
65         spin_lock(&ctx->hw_contexts_lock);
66
67         parent = NULL;
68         p = &ctx->hw_contexts.rb_node;
69         while (*p) {
70                 struct intel_context *this;
71
72                 parent = *p;
73                 this = rb_entry(parent, struct intel_context, node);
74
75                 if (this->engine == engine) {
76                         err = -EEXIST;
77                         ce = this;
78                         break;
79                 }
80
81                 if (this->engine < engine)
82                         p = &parent->rb_right;
83                 else
84                         p = &parent->rb_left;
85         }
86         if (!err) {
87                 rb_link_node(&ce->node, parent, p);
88                 rb_insert_color(&ce->node, &ctx->hw_contexts);
89         }
90
91         spin_unlock(&ctx->hw_contexts_lock);
92
93         return ce;
94 }
95
96 void __intel_context_remove(struct intel_context *ce)
97 {
98         struct i915_gem_context *ctx = ce->gem_context;
99
100         spin_lock(&ctx->hw_contexts_lock);
101         rb_erase(&ce->node, &ctx->hw_contexts);
102         spin_unlock(&ctx->hw_contexts_lock);
103 }
104
105 static struct intel_context *
106 intel_context_instance(struct i915_gem_context *ctx,
107                        struct intel_engine_cs *engine)
108 {
109         struct intel_context *ce, *pos;
110
111         ce = intel_context_lookup(ctx, engine);
112         if (likely(ce))
113                 return ce;
114
115         ce = intel_context_alloc();
116         if (!ce)
117                 return ERR_PTR(-ENOMEM);
118
119         intel_context_init(ce, ctx, engine);
120
121         pos = __intel_context_insert(ctx, engine, ce);
122         if (unlikely(pos != ce)) /* Beaten! Use their HW context instead */
123                 intel_context_free(ce);
124
125         GEM_BUG_ON(intel_context_lookup(ctx, engine) != pos);
126         return pos;
127 }
128
129 struct intel_context *
130 intel_context_pin_lock(struct i915_gem_context *ctx,
131                        struct intel_engine_cs *engine)
132         __acquires(ce->pin_mutex)
133 {
134         struct intel_context *ce;
135
136         ce = intel_context_instance(ctx, engine);
137         if (IS_ERR(ce))
138                 return ce;
139
140         if (mutex_lock_interruptible(&ce->pin_mutex))
141                 return ERR_PTR(-EINTR);
142
143         return ce;
144 }
145
146 struct intel_context *
147 intel_context_pin(struct i915_gem_context *ctx,
148                   struct intel_engine_cs *engine)
149 {
150         struct intel_context *ce;
151         int err;
152
153         ce = intel_context_instance(ctx, engine);
154         if (IS_ERR(ce))
155                 return ce;
156
157         if (likely(atomic_inc_not_zero(&ce->pin_count)))
158                 return ce;
159
160         if (mutex_lock_interruptible(&ce->pin_mutex))
161                 return ERR_PTR(-EINTR);
162
163         if (likely(!atomic_read(&ce->pin_count))) {
164                 err = ce->ops->pin(ce);
165                 if (err)
166                         goto err;
167
168                 i915_gem_context_get(ctx);
169                 GEM_BUG_ON(ce->gem_context != ctx);
170
171                 mutex_lock(&ctx->mutex);
172                 list_add(&ce->active_link, &ctx->active_engines);
173                 mutex_unlock(&ctx->mutex);
174
175                 intel_context_get(ce);
176                 smp_mb__before_atomic(); /* flush pin before it is visible */
177         }
178
179         atomic_inc(&ce->pin_count);
180         GEM_BUG_ON(!intel_context_is_pinned(ce)); /* no overflow! */
181
182         mutex_unlock(&ce->pin_mutex);
183         return ce;
184
185 err:
186         mutex_unlock(&ce->pin_mutex);
187         return ERR_PTR(err);
188 }
189
190 void intel_context_unpin(struct intel_context *ce)
191 {
192         if (likely(atomic_add_unless(&ce->pin_count, -1, 1)))
193                 return;
194
195         /* We may be called from inside intel_context_pin() to evict another */
196         intel_context_get(ce);
197         mutex_lock_nested(&ce->pin_mutex, SINGLE_DEPTH_NESTING);
198
199         if (likely(atomic_dec_and_test(&ce->pin_count))) {
200                 ce->ops->unpin(ce);
201
202                 mutex_lock(&ce->gem_context->mutex);
203                 list_del(&ce->active_link);
204                 mutex_unlock(&ce->gem_context->mutex);
205
206                 i915_gem_context_put(ce->gem_context);
207                 intel_context_put(ce);
208         }
209
210         mutex_unlock(&ce->pin_mutex);
211         intel_context_put(ce);
212 }
213
214 static void intel_context_retire(struct i915_active_request *active,
215                                  struct i915_request *rq)
216 {
217         struct intel_context *ce =
218                 container_of(active, typeof(*ce), active_tracker);
219
220         intel_context_unpin(ce);
221 }
222
223 void
224 intel_context_init(struct intel_context *ce,
225                    struct i915_gem_context *ctx,
226                    struct intel_engine_cs *engine)
227 {
228         kref_init(&ce->ref);
229
230         ce->gem_context = ctx;
231         ce->engine = engine;
232         ce->ops = engine->cops;
233         ce->saturated = 0;
234
235         INIT_LIST_HEAD(&ce->signal_link);
236         INIT_LIST_HEAD(&ce->signals);
237
238         mutex_init(&ce->pin_mutex);
239
240         /* Use the whole device by default */
241         ce->sseu = intel_device_default_sseu(ctx->i915);
242
243         i915_active_request_init(&ce->active_tracker,
244                                  NULL, intel_context_retire);
245 }
246
247 static void i915_global_context_shrink(void)
248 {
249         kmem_cache_shrink(global.slab_ce);
250 }
251
252 static void i915_global_context_exit(void)
253 {
254         kmem_cache_destroy(global.slab_ce);
255 }
256
257 static struct i915_global_context global = { {
258         .shrink = i915_global_context_shrink,
259         .exit = i915_global_context_exit,
260 } };
261
262 int __init i915_global_context_init(void)
263 {
264         global.slab_ce = KMEM_CACHE(intel_context, SLAB_HWCACHE_ALIGN);
265         if (!global.slab_ce)
266                 return -ENOMEM;
267
268         i915_global_register(&global.base);
269         return 0;
270 }