11661de8c40b1cc515288ae2cf5f36a804f306d8
[linux-2.6-microblaze.git] / drivers / gpu / drm / i915 / gt / intel_gt_pm.c
1 /*
2  * SPDX-License-Identifier: MIT
3  *
4  * Copyright © 2019 Intel Corporation
5  */
6
7 #include "i915_drv.h"
8 #include "i915_globals.h"
9 #include "i915_params.h"
10 #include "intel_context.h"
11 #include "intel_engine_pm.h"
12 #include "intel_gt.h"
13 #include "intel_gt_pm.h"
14 #include "intel_gt_requests.h"
15 #include "intel_llc.h"
16 #include "intel_pm.h"
17 #include "intel_rc6.h"
18 #include "intel_rps.h"
19 #include "intel_wakeref.h"
20
21 static void user_forcewake(struct intel_gt *gt, bool suspend)
22 {
23         int count = atomic_read(&gt->user_wakeref);
24
25         /* Inside suspend/resume so single threaded, no races to worry about. */
26         if (likely(!count))
27                 return;
28
29         intel_gt_pm_get(gt);
30         if (suspend) {
31                 GEM_BUG_ON(count > atomic_read(&gt->wakeref.count));
32                 atomic_sub(count, &gt->wakeref.count);
33         } else {
34                 atomic_add(count, &gt->wakeref.count);
35         }
36         intel_gt_pm_put(gt);
37 }
38
39 static int __gt_unpark(struct intel_wakeref *wf)
40 {
41         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
42         struct drm_i915_private *i915 = gt->i915;
43
44         GEM_TRACE("\n");
45
46         i915_globals_unpark();
47
48         /*
49          * It seems that the DMC likes to transition between the DC states a lot
50          * when there are no connected displays (no active power domains) during
51          * command submission.
52          *
53          * This activity has negative impact on the performance of the chip with
54          * huge latencies observed in the interrupt handler and elsewhere.
55          *
56          * Work around it by grabbing a GT IRQ power domain whilst there is any
57          * GT activity, preventing any DC state transitions.
58          */
59         gt->awake = intel_display_power_get(i915, POWER_DOMAIN_GT_IRQ);
60         GEM_BUG_ON(!gt->awake);
61
62         intel_rps_unpark(&gt->rps);
63         i915_pmu_gt_unparked(i915);
64
65         intel_gt_unpark_requests(gt);
66
67         return 0;
68 }
69
70 static int __gt_park(struct intel_wakeref *wf)
71 {
72         struct intel_gt *gt = container_of(wf, typeof(*gt), wakeref);
73         intel_wakeref_t wakeref = fetch_and_zero(&gt->awake);
74         struct drm_i915_private *i915 = gt->i915;
75
76         GEM_TRACE("\n");
77
78         intel_gt_park_requests(gt);
79
80         i915_vma_parked(gt);
81         i915_pmu_gt_parked(i915);
82         intel_rps_park(&gt->rps);
83
84         /* Everything switched off, flush any residual interrupt just in case */
85         intel_synchronize_irq(i915);
86
87         GEM_BUG_ON(!wakeref);
88         intel_display_power_put(i915, POWER_DOMAIN_GT_IRQ, wakeref);
89
90         i915_globals_park();
91
92         return 0;
93 }
94
95 static const struct intel_wakeref_ops wf_ops = {
96         .get = __gt_unpark,
97         .put = __gt_park,
98         .flags = INTEL_WAKEREF_PUT_ASYNC,
99 };
100
101 void intel_gt_pm_init_early(struct intel_gt *gt)
102 {
103         intel_wakeref_init(&gt->wakeref, gt->uncore->rpm, &wf_ops);
104 }
105
106 void intel_gt_pm_init(struct intel_gt *gt)
107 {
108         /*
109          * Enabling power-management should be "self-healing". If we cannot
110          * enable a feature, simply leave it disabled with a notice to the
111          * user.
112          */
113         intel_rc6_init(&gt->rc6);
114         intel_rps_init(&gt->rps);
115 }
116
117 static bool reset_engines(struct intel_gt *gt)
118 {
119         if (INTEL_INFO(gt->i915)->gpu_reset_clobbers_display)
120                 return false;
121
122         return __intel_gt_reset(gt, ALL_ENGINES) == 0;
123 }
124
125 /**
126  * intel_gt_sanitize: called after the GPU has lost power
127  * @gt: the i915 GT container
128  * @force: ignore a failed reset and sanitize engine state anyway
129  *
130  * Anytime we reset the GPU, either with an explicit GPU reset or through a
131  * PCI power cycle, the GPU loses state and we must reset our state tracking
132  * to match. Note that calling intel_gt_sanitize() if the GPU has not
133  * been reset results in much confusion!
134  */
135 void intel_gt_sanitize(struct intel_gt *gt, bool force)
136 {
137         struct intel_engine_cs *engine;
138         enum intel_engine_id id;
139         intel_wakeref_t wakeref;
140
141         GEM_TRACE("force:%s\n", yesno(force));
142
143         /* Use a raw wakeref to avoid calling intel_display_power_get early */
144         wakeref = intel_runtime_pm_get(gt->uncore->rpm);
145         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
146
147         /*
148          * As we have just resumed the machine and woken the device up from
149          * deep PCI sleep (presumably D3_cold), assume the HW has been reset
150          * back to defaults, recovering from whatever wedged state we left it
151          * in and so worth trying to use the device once more.
152          */
153         if (intel_gt_is_wedged(gt))
154                 intel_gt_unset_wedged(gt);
155
156         intel_uc_sanitize(&gt->uc);
157
158         for_each_engine(engine, gt, id)
159                 if (engine->reset.prepare)
160                         engine->reset.prepare(engine);
161
162         intel_uc_reset_prepare(&gt->uc);
163
164         if (reset_engines(gt) || force) {
165                 for_each_engine(engine, gt, id)
166                         __intel_engine_reset(engine, false);
167         }
168
169         for_each_engine(engine, gt, id)
170                 if (engine->reset.finish)
171                         engine->reset.finish(engine);
172
173         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
174         intel_runtime_pm_put(gt->uncore->rpm, wakeref);
175 }
176
177 void intel_gt_pm_fini(struct intel_gt *gt)
178 {
179         intel_rc6_fini(&gt->rc6);
180 }
181
182 int intel_gt_resume(struct intel_gt *gt)
183 {
184         struct intel_engine_cs *engine;
185         enum intel_engine_id id;
186         int err = 0;
187
188         GEM_TRACE("\n");
189
190         /*
191          * After resume, we may need to poke into the pinned kernel
192          * contexts to paper over any damage caused by the sudden suspend.
193          * Only the kernel contexts should remain pinned over suspend,
194          * allowing us to fixup the user contexts on their first pin.
195          */
196         intel_gt_pm_get(gt);
197
198         intel_uncore_forcewake_get(gt->uncore, FORCEWAKE_ALL);
199         intel_rc6_sanitize(&gt->rc6);
200
201         intel_rps_enable(&gt->rps);
202         intel_llc_enable(&gt->llc);
203
204         for_each_engine(engine, gt, id) {
205                 struct intel_context *ce;
206
207                 intel_engine_pm_get(engine);
208
209                 ce = engine->kernel_context;
210                 if (ce) {
211                         GEM_BUG_ON(!intel_context_is_pinned(ce));
212                         ce->ops->reset(ce);
213                 }
214
215                 engine->serial++; /* kernel context lost */
216                 err = engine->resume(engine);
217
218                 intel_engine_pm_put(engine);
219                 if (err) {
220                         dev_err(gt->i915->drm.dev,
221                                 "Failed to restart %s (%d)\n",
222                                 engine->name, err);
223                         break;
224                 }
225         }
226
227         intel_rc6_enable(&gt->rc6);
228
229         intel_uc_resume(&gt->uc);
230
231         user_forcewake(gt, false);
232
233         intel_uncore_forcewake_put(gt->uncore, FORCEWAKE_ALL);
234         intel_gt_pm_put(gt);
235
236         return err;
237 }
238
239 static void wait_for_idle(struct intel_gt *gt)
240 {
241         if (intel_gt_wait_for_idle(gt, I915_GEM_IDLE_TIMEOUT) == -ETIME) {
242                 /*
243                  * Forcibly cancel outstanding work and leave
244                  * the gpu quiet.
245                  */
246                 intel_gt_set_wedged(gt);
247         }
248
249         intel_gt_pm_wait_for_idle(gt);
250 }
251
252 void intel_gt_suspend(struct intel_gt *gt)
253 {
254         intel_wakeref_t wakeref;
255
256         user_forcewake(gt, true);
257
258         /* We expect to be idle already; but also want to be independent */
259         wait_for_idle(gt);
260
261         intel_uc_suspend(&gt->uc);
262
263         with_intel_runtime_pm(gt->uncore->rpm, wakeref) {
264                 intel_rps_disable(&gt->rps);
265                 intel_rc6_disable(&gt->rc6);
266                 intel_llc_disable(&gt->llc);
267         }
268
269         intel_gt_sanitize(gt, false);
270
271         GEM_TRACE("\n");
272 }
273
274 void intel_gt_runtime_suspend(struct intel_gt *gt)
275 {
276         intel_uc_runtime_suspend(&gt->uc);
277
278         GEM_TRACE("\n");
279 }
280
281 int intel_gt_runtime_resume(struct intel_gt *gt)
282 {
283         GEM_TRACE("\n");
284
285         intel_gt_init_swizzling(gt);
286
287         return intel_uc_runtime_resume(&gt->uc);
288 }
289
290 #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
291 #include "selftest_gt_pm.c"
292 #endif