drm/amdgpu: Remove in_interrupt() usage in gfx_v9_0_kiq_read_clock()
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_gfx.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  * Copyright 2008 Red Hat Inc.
4  * Copyright 2009 Jerome Glisse.
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining a
7  * copy of this software and associated documentation files (the "Software"),
8  * to deal in the Software without restriction, including without limitation
9  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10  * and/or sell copies of the Software, and to permit persons to whom the
11  * Software is furnished to do so, subject to the following conditions:
12  *
13  * The above copyright notice and this permission notice shall be included in
14  * all copies or substantial portions of the Software.
15  *
16  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22  * OTHER DEALINGS IN THE SOFTWARE.
23  *
24  */
25
26 #include "amdgpu.h"
27 #include "amdgpu_gfx.h"
28 #include "amdgpu_rlc.h"
29 #include "amdgpu_ras.h"
30
31 /* delay 0.1 second to enable gfx off feature */
32 #define GFX_OFF_DELAY_ENABLE         msecs_to_jiffies(100)
33
34 /*
35  * GPU GFX IP block helpers function.
36  */
37
38 int amdgpu_gfx_mec_queue_to_bit(struct amdgpu_device *adev, int mec,
39                                 int pipe, int queue)
40 {
41         int bit = 0;
42
43         bit += mec * adev->gfx.mec.num_pipe_per_mec
44                 * adev->gfx.mec.num_queue_per_pipe;
45         bit += pipe * adev->gfx.mec.num_queue_per_pipe;
46         bit += queue;
47
48         return bit;
49 }
50
51 void amdgpu_queue_mask_bit_to_mec_queue(struct amdgpu_device *adev, int bit,
52                                  int *mec, int *pipe, int *queue)
53 {
54         *queue = bit % adev->gfx.mec.num_queue_per_pipe;
55         *pipe = (bit / adev->gfx.mec.num_queue_per_pipe)
56                 % adev->gfx.mec.num_pipe_per_mec;
57         *mec = (bit / adev->gfx.mec.num_queue_per_pipe)
58                / adev->gfx.mec.num_pipe_per_mec;
59
60 }
61
62 bool amdgpu_gfx_is_mec_queue_enabled(struct amdgpu_device *adev,
63                                      int mec, int pipe, int queue)
64 {
65         return test_bit(amdgpu_gfx_mec_queue_to_bit(adev, mec, pipe, queue),
66                         adev->gfx.mec.queue_bitmap);
67 }
68
69 int amdgpu_gfx_me_queue_to_bit(struct amdgpu_device *adev,
70                                int me, int pipe, int queue)
71 {
72         int bit = 0;
73
74         bit += me * adev->gfx.me.num_pipe_per_me
75                 * adev->gfx.me.num_queue_per_pipe;
76         bit += pipe * adev->gfx.me.num_queue_per_pipe;
77         bit += queue;
78
79         return bit;
80 }
81
82 void amdgpu_gfx_bit_to_me_queue(struct amdgpu_device *adev, int bit,
83                                 int *me, int *pipe, int *queue)
84 {
85         *queue = bit % adev->gfx.me.num_queue_per_pipe;
86         *pipe = (bit / adev->gfx.me.num_queue_per_pipe)
87                 % adev->gfx.me.num_pipe_per_me;
88         *me = (bit / adev->gfx.me.num_queue_per_pipe)
89                 / adev->gfx.me.num_pipe_per_me;
90 }
91
92 bool amdgpu_gfx_is_me_queue_enabled(struct amdgpu_device *adev,
93                                     int me, int pipe, int queue)
94 {
95         return test_bit(amdgpu_gfx_me_queue_to_bit(adev, me, pipe, queue),
96                         adev->gfx.me.queue_bitmap);
97 }
98
99 /**
100  * amdgpu_gfx_scratch_get - Allocate a scratch register
101  *
102  * @adev: amdgpu_device pointer
103  * @reg: scratch register mmio offset
104  *
105  * Allocate a CP scratch register for use by the driver (all asics).
106  * Returns 0 on success or -EINVAL on failure.
107  */
108 int amdgpu_gfx_scratch_get(struct amdgpu_device *adev, uint32_t *reg)
109 {
110         int i;
111
112         i = ffs(adev->gfx.scratch.free_mask);
113         if (i != 0 && i <= adev->gfx.scratch.num_reg) {
114                 i--;
115                 adev->gfx.scratch.free_mask &= ~(1u << i);
116                 *reg = adev->gfx.scratch.reg_base + i;
117                 return 0;
118         }
119         return -EINVAL;
120 }
121
122 /**
123  * amdgpu_gfx_scratch_free - Free a scratch register
124  *
125  * @adev: amdgpu_device pointer
126  * @reg: scratch register mmio offset
127  *
128  * Free a CP scratch register allocated for use by the driver (all asics)
129  */
130 void amdgpu_gfx_scratch_free(struct amdgpu_device *adev, uint32_t reg)
131 {
132         adev->gfx.scratch.free_mask |= 1u << (reg - adev->gfx.scratch.reg_base);
133 }
134
135 /**
136  * amdgpu_gfx_parse_disable_cu - Parse the disable_cu module parameter
137  *
138  * @mask: array in which the per-shader array disable masks will be stored
139  * @max_se: number of SEs
140  * @max_sh: number of SHs
141  *
142  * The bitmask of CUs to be disabled in the shader array determined by se and
143  * sh is stored in mask[se * max_sh + sh].
144  */
145 void amdgpu_gfx_parse_disable_cu(unsigned *mask, unsigned max_se, unsigned max_sh)
146 {
147         unsigned se, sh, cu;
148         const char *p;
149
150         memset(mask, 0, sizeof(*mask) * max_se * max_sh);
151
152         if (!amdgpu_disable_cu || !*amdgpu_disable_cu)
153                 return;
154
155         p = amdgpu_disable_cu;
156         for (;;) {
157                 char *next;
158                 int ret = sscanf(p, "%u.%u.%u", &se, &sh, &cu);
159                 if (ret < 3) {
160                         DRM_ERROR("amdgpu: could not parse disable_cu\n");
161                         return;
162                 }
163
164                 if (se < max_se && sh < max_sh && cu < 16) {
165                         DRM_INFO("amdgpu: disabling CU %u.%u.%u\n", se, sh, cu);
166                         mask[se * max_sh + sh] |= 1u << cu;
167                 } else {
168                         DRM_ERROR("amdgpu: disable_cu %u.%u.%u is out of range\n",
169                                   se, sh, cu);
170                 }
171
172                 next = strchr(p, ',');
173                 if (!next)
174                         break;
175                 p = next + 1;
176         }
177 }
178
179 static bool amdgpu_gfx_is_multipipe_capable(struct amdgpu_device *adev)
180 {
181         if (amdgpu_compute_multipipe != -1) {
182                 DRM_INFO("amdgpu: forcing compute pipe policy %d\n",
183                          amdgpu_compute_multipipe);
184                 return amdgpu_compute_multipipe == 1;
185         }
186
187         /* FIXME: spreading the queues across pipes causes perf regressions
188          * on POLARIS11 compute workloads */
189         if (adev->asic_type == CHIP_POLARIS11)
190                 return false;
191
192         return adev->gfx.mec.num_mec > 1;
193 }
194
195 bool amdgpu_gfx_is_high_priority_compute_queue(struct amdgpu_device *adev,
196                                                struct amdgpu_ring *ring)
197 {
198         /* Policy: use 1st queue as high priority compute queue if we
199          * have more than one compute queue.
200          */
201         if (adev->gfx.num_compute_rings > 1 &&
202             ring == &adev->gfx.compute_ring[0])
203                 return true;
204
205         return false;
206 }
207
208 void amdgpu_gfx_compute_queue_acquire(struct amdgpu_device *adev)
209 {
210         int i, queue, pipe;
211         bool multipipe_policy = amdgpu_gfx_is_multipipe_capable(adev);
212         int max_queues_per_mec = min(adev->gfx.mec.num_pipe_per_mec *
213                                      adev->gfx.mec.num_queue_per_pipe,
214                                      adev->gfx.num_compute_rings);
215
216         if (multipipe_policy) {
217                 /* policy: make queues evenly cross all pipes on MEC1 only */
218                 for (i = 0; i < max_queues_per_mec; i++) {
219                         pipe = i % adev->gfx.mec.num_pipe_per_mec;
220                         queue = (i / adev->gfx.mec.num_pipe_per_mec) %
221                                 adev->gfx.mec.num_queue_per_pipe;
222
223                         set_bit(pipe * adev->gfx.mec.num_queue_per_pipe + queue,
224                                         adev->gfx.mec.queue_bitmap);
225                 }
226         } else {
227                 /* policy: amdgpu owns all queues in the given pipe */
228                 for (i = 0; i < max_queues_per_mec; ++i)
229                         set_bit(i, adev->gfx.mec.queue_bitmap);
230         }
231
232         dev_dbg(adev->dev, "mec queue bitmap weight=%d\n", bitmap_weight(adev->gfx.mec.queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES));
233 }
234
235 void amdgpu_gfx_graphics_queue_acquire(struct amdgpu_device *adev)
236 {
237         int i, queue, me;
238
239         for (i = 0; i < AMDGPU_MAX_GFX_QUEUES; ++i) {
240                 queue = i % adev->gfx.me.num_queue_per_pipe;
241                 me = (i / adev->gfx.me.num_queue_per_pipe)
242                       / adev->gfx.me.num_pipe_per_me;
243
244                 if (me >= adev->gfx.me.num_me)
245                         break;
246                 /* policy: amdgpu owns the first queue per pipe at this stage
247                  * will extend to mulitple queues per pipe later */
248                 if (me == 0 && queue < 1)
249                         set_bit(i, adev->gfx.me.queue_bitmap);
250         }
251
252         /* update the number of active graphics rings */
253         adev->gfx.num_gfx_rings =
254                 bitmap_weight(adev->gfx.me.queue_bitmap, AMDGPU_MAX_GFX_QUEUES);
255 }
256
257 static int amdgpu_gfx_kiq_acquire(struct amdgpu_device *adev,
258                                   struct amdgpu_ring *ring)
259 {
260         int queue_bit;
261         int mec, pipe, queue;
262
263         queue_bit = adev->gfx.mec.num_mec
264                     * adev->gfx.mec.num_pipe_per_mec
265                     * adev->gfx.mec.num_queue_per_pipe;
266
267         while (queue_bit-- >= 0) {
268                 if (test_bit(queue_bit, adev->gfx.mec.queue_bitmap))
269                         continue;
270
271                 amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
272
273                 /*
274                  * 1. Using pipes 2/3 from MEC 2 seems cause problems.
275                  * 2. It must use queue id 0, because CGPG_IDLE/SAVE/LOAD/RUN
276                  * only can be issued on queue 0.
277                  */
278                 if ((mec == 1 && pipe > 1) || queue != 0)
279                         continue;
280
281                 ring->me = mec + 1;
282                 ring->pipe = pipe;
283                 ring->queue = queue;
284
285                 return 0;
286         }
287
288         dev_err(adev->dev, "Failed to find a queue for KIQ\n");
289         return -EINVAL;
290 }
291
292 int amdgpu_gfx_kiq_init_ring(struct amdgpu_device *adev,
293                              struct amdgpu_ring *ring,
294                              struct amdgpu_irq_src *irq)
295 {
296         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
297         int r = 0;
298
299         spin_lock_init(&kiq->ring_lock);
300
301         ring->adev = NULL;
302         ring->ring_obj = NULL;
303         ring->use_doorbell = true;
304         ring->doorbell_index = adev->doorbell_index.kiq;
305
306         r = amdgpu_gfx_kiq_acquire(adev, ring);
307         if (r)
308                 return r;
309
310         ring->eop_gpu_addr = kiq->eop_gpu_addr;
311         ring->no_scheduler = true;
312         sprintf(ring->name, "kiq_%d.%d.%d", ring->me, ring->pipe, ring->queue);
313         r = amdgpu_ring_init(adev, ring, 1024,
314                              irq, AMDGPU_CP_KIQ_IRQ_DRIVER0,
315                              AMDGPU_RING_PRIO_DEFAULT);
316         if (r)
317                 dev_warn(adev->dev, "(%d) failed to init kiq ring\n", r);
318
319         return r;
320 }
321
322 void amdgpu_gfx_kiq_free_ring(struct amdgpu_ring *ring)
323 {
324         amdgpu_ring_fini(ring);
325 }
326
327 void amdgpu_gfx_kiq_fini(struct amdgpu_device *adev)
328 {
329         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
330
331         amdgpu_bo_free_kernel(&kiq->eop_obj, &kiq->eop_gpu_addr, NULL);
332 }
333
334 int amdgpu_gfx_kiq_init(struct amdgpu_device *adev,
335                         unsigned hpd_size)
336 {
337         int r;
338         u32 *hpd;
339         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
340
341         r = amdgpu_bo_create_kernel(adev, hpd_size, PAGE_SIZE,
342                                     AMDGPU_GEM_DOMAIN_GTT, &kiq->eop_obj,
343                                     &kiq->eop_gpu_addr, (void **)&hpd);
344         if (r) {
345                 dev_warn(adev->dev, "failed to create KIQ bo (%d).\n", r);
346                 return r;
347         }
348
349         memset(hpd, 0, hpd_size);
350
351         r = amdgpu_bo_reserve(kiq->eop_obj, true);
352         if (unlikely(r != 0))
353                 dev_warn(adev->dev, "(%d) reserve kiq eop bo failed\n", r);
354         amdgpu_bo_kunmap(kiq->eop_obj);
355         amdgpu_bo_unreserve(kiq->eop_obj);
356
357         return 0;
358 }
359
360 /* create MQD for each compute/gfx queue */
361 int amdgpu_gfx_mqd_sw_init(struct amdgpu_device *adev,
362                            unsigned mqd_size)
363 {
364         struct amdgpu_ring *ring = NULL;
365         int r, i;
366
367         /* create MQD for KIQ */
368         ring = &adev->gfx.kiq.ring;
369         if (!ring->mqd_obj) {
370                 /* originaly the KIQ MQD is put in GTT domain, but for SRIOV VRAM domain is a must
371                  * otherwise hypervisor trigger SAVE_VF fail after driver unloaded which mean MQD
372                  * deallocated and gart_unbind, to strict diverage we decide to use VRAM domain for
373                  * KIQ MQD no matter SRIOV or Bare-metal
374                  */
375                 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
376                                             AMDGPU_GEM_DOMAIN_VRAM, &ring->mqd_obj,
377                                             &ring->mqd_gpu_addr, &ring->mqd_ptr);
378                 if (r) {
379                         dev_warn(adev->dev, "failed to create ring mqd ob (%d)", r);
380                         return r;
381                 }
382
383                 /* prepare MQD backup */
384                 adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS] = kmalloc(mqd_size, GFP_KERNEL);
385                 if (!adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS])
386                                 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
387         }
388
389         if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
390                 /* create MQD for each KGQ */
391                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
392                         ring = &adev->gfx.gfx_ring[i];
393                         if (!ring->mqd_obj) {
394                                 r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
395                                                             AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
396                                                             &ring->mqd_gpu_addr, &ring->mqd_ptr);
397                                 if (r) {
398                                         dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
399                                         return r;
400                                 }
401
402                                 /* prepare MQD backup */
403                                 adev->gfx.me.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
404                                 if (!adev->gfx.me.mqd_backup[i])
405                                         dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
406                         }
407                 }
408         }
409
410         /* create MQD for each KCQ */
411         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
412                 ring = &adev->gfx.compute_ring[i];
413                 if (!ring->mqd_obj) {
414                         r = amdgpu_bo_create_kernel(adev, mqd_size, PAGE_SIZE,
415                                                     AMDGPU_GEM_DOMAIN_GTT, &ring->mqd_obj,
416                                                     &ring->mqd_gpu_addr, &ring->mqd_ptr);
417                         if (r) {
418                                 dev_warn(adev->dev, "failed to create ring mqd bo (%d)", r);
419                                 return r;
420                         }
421
422                         /* prepare MQD backup */
423                         adev->gfx.mec.mqd_backup[i] = kmalloc(mqd_size, GFP_KERNEL);
424                         if (!adev->gfx.mec.mqd_backup[i])
425                                 dev_warn(adev->dev, "no memory to create MQD backup for ring %s\n", ring->name);
426                 }
427         }
428
429         return 0;
430 }
431
432 void amdgpu_gfx_mqd_sw_fini(struct amdgpu_device *adev)
433 {
434         struct amdgpu_ring *ring = NULL;
435         int i;
436
437         if (adev->asic_type >= CHIP_NAVI10 && amdgpu_async_gfx_ring) {
438                 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
439                         ring = &adev->gfx.gfx_ring[i];
440                         kfree(adev->gfx.me.mqd_backup[i]);
441                         amdgpu_bo_free_kernel(&ring->mqd_obj,
442                                               &ring->mqd_gpu_addr,
443                                               &ring->mqd_ptr);
444                 }
445         }
446
447         for (i = 0; i < adev->gfx.num_compute_rings; i++) {
448                 ring = &adev->gfx.compute_ring[i];
449                 kfree(adev->gfx.mec.mqd_backup[i]);
450                 amdgpu_bo_free_kernel(&ring->mqd_obj,
451                                       &ring->mqd_gpu_addr,
452                                       &ring->mqd_ptr);
453         }
454
455         ring = &adev->gfx.kiq.ring;
456         kfree(adev->gfx.mec.mqd_backup[AMDGPU_MAX_COMPUTE_RINGS]);
457         amdgpu_bo_free_kernel(&ring->mqd_obj,
458                               &ring->mqd_gpu_addr,
459                               &ring->mqd_ptr);
460 }
461
462 int amdgpu_gfx_disable_kcq(struct amdgpu_device *adev)
463 {
464         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
465         struct amdgpu_ring *kiq_ring = &kiq->ring;
466         int i;
467
468         if (!kiq->pmf || !kiq->pmf->kiq_unmap_queues)
469                 return -EINVAL;
470
471         if (amdgpu_ring_alloc(kiq_ring, kiq->pmf->unmap_queues_size *
472                                         adev->gfx.num_compute_rings))
473                 return -ENOMEM;
474
475         for (i = 0; i < adev->gfx.num_compute_rings; i++)
476                 kiq->pmf->kiq_unmap_queues(kiq_ring, &adev->gfx.compute_ring[i],
477                                            RESET_QUEUES, 0, 0);
478
479         return amdgpu_ring_test_helper(kiq_ring);
480 }
481
482 int amdgpu_queue_mask_bit_to_set_resource_bit(struct amdgpu_device *adev,
483                                         int queue_bit)
484 {
485         int mec, pipe, queue;
486         int set_resource_bit = 0;
487
488         amdgpu_queue_mask_bit_to_mec_queue(adev, queue_bit, &mec, &pipe, &queue);
489
490         set_resource_bit = mec * 4 * 8 + pipe * 8 + queue;
491
492         return set_resource_bit;
493 }
494
495 int amdgpu_gfx_enable_kcq(struct amdgpu_device *adev)
496 {
497         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
498         struct amdgpu_ring *kiq_ring = &adev->gfx.kiq.ring;
499         uint64_t queue_mask = 0;
500         int r, i;
501
502         if (!kiq->pmf || !kiq->pmf->kiq_map_queues || !kiq->pmf->kiq_set_resources)
503                 return -EINVAL;
504
505         for (i = 0; i < AMDGPU_MAX_COMPUTE_QUEUES; ++i) {
506                 if (!test_bit(i, adev->gfx.mec.queue_bitmap))
507                         continue;
508
509                 /* This situation may be hit in the future if a new HW
510                  * generation exposes more than 64 queues. If so, the
511                  * definition of queue_mask needs updating */
512                 if (WARN_ON(i > (sizeof(queue_mask)*8))) {
513                         DRM_ERROR("Invalid KCQ enabled: %d\n", i);
514                         break;
515                 }
516
517                 queue_mask |= (1ull << amdgpu_queue_mask_bit_to_set_resource_bit(adev, i));
518         }
519
520         DRM_INFO("kiq ring mec %d pipe %d q %d\n", kiq_ring->me, kiq_ring->pipe,
521                                                         kiq_ring->queue);
522
523         r = amdgpu_ring_alloc(kiq_ring, kiq->pmf->map_queues_size *
524                                         adev->gfx.num_compute_rings +
525                                         kiq->pmf->set_resources_size);
526         if (r) {
527                 DRM_ERROR("Failed to lock KIQ (%d).\n", r);
528                 return r;
529         }
530
531         kiq->pmf->kiq_set_resources(kiq_ring, queue_mask);
532         for (i = 0; i < adev->gfx.num_compute_rings; i++)
533                 kiq->pmf->kiq_map_queues(kiq_ring, &adev->gfx.compute_ring[i]);
534
535         r = amdgpu_ring_test_helper(kiq_ring);
536         if (r)
537                 DRM_ERROR("KCQ enable failed\n");
538
539         return r;
540 }
541
542 /* amdgpu_gfx_off_ctrl - Handle gfx off feature enable/disable
543  *
544  * @adev: amdgpu_device pointer
545  * @bool enable true: enable gfx off feature, false: disable gfx off feature
546  *
547  * 1. gfx off feature will be enabled by gfx ip after gfx cg gp enabled.
548  * 2. other client can send request to disable gfx off feature, the request should be honored.
549  * 3. other client can cancel their request of disable gfx off feature
550  * 4. other client should not send request to enable gfx off feature before disable gfx off feature.
551  */
552
553 void amdgpu_gfx_off_ctrl(struct amdgpu_device *adev, bool enable)
554 {
555         if (!(adev->pm.pp_feature & PP_GFXOFF_MASK))
556                 return;
557
558         mutex_lock(&adev->gfx.gfx_off_mutex);
559
560         if (!enable)
561                 adev->gfx.gfx_off_req_count++;
562         else if (adev->gfx.gfx_off_req_count > 0)
563                 adev->gfx.gfx_off_req_count--;
564
565         if (enable && !adev->gfx.gfx_off_state && !adev->gfx.gfx_off_req_count) {
566                 schedule_delayed_work(&adev->gfx.gfx_off_delay_work, GFX_OFF_DELAY_ENABLE);
567         } else if (!enable && adev->gfx.gfx_off_state) {
568                 if (!amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_GFX, false)) {
569                         adev->gfx.gfx_off_state = false;
570
571                         if (adev->gfx.funcs->init_spm_golden) {
572                                 dev_dbg(adev->dev, "GFXOFF is disabled, re-init SPM golden settings\n");
573                                 amdgpu_gfx_init_spm_golden(adev);
574                         }
575                 }
576         }
577
578         mutex_unlock(&adev->gfx.gfx_off_mutex);
579 }
580
581 int amdgpu_get_gfx_off_status(struct amdgpu_device *adev, uint32_t *value)
582 {
583
584         int r = 0;
585
586         mutex_lock(&adev->gfx.gfx_off_mutex);
587
588         r = smu_get_status_gfxoff(adev, value);
589
590         mutex_unlock(&adev->gfx.gfx_off_mutex);
591
592         return r;
593 }
594
595 int amdgpu_gfx_ras_late_init(struct amdgpu_device *adev)
596 {
597         int r;
598         struct ras_fs_if fs_info = {
599                 .sysfs_name = "gfx_err_count",
600         };
601         struct ras_ih_if ih_info = {
602                 .cb = amdgpu_gfx_process_ras_data_cb,
603         };
604         struct ras_query_if info = { 0 };
605
606         if (!adev->gfx.ras_if) {
607                 adev->gfx.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
608                 if (!adev->gfx.ras_if)
609                         return -ENOMEM;
610                 adev->gfx.ras_if->block = AMDGPU_RAS_BLOCK__GFX;
611                 adev->gfx.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
612                 adev->gfx.ras_if->sub_block_index = 0;
613                 strcpy(adev->gfx.ras_if->name, "gfx");
614         }
615         fs_info.head = ih_info.head = *adev->gfx.ras_if;
616         r = amdgpu_ras_late_init(adev, adev->gfx.ras_if,
617                                  &fs_info, &ih_info);
618         if (r)
619                 goto free;
620
621         if (amdgpu_ras_is_supported(adev, adev->gfx.ras_if->block)) {
622                 if (adev->gmc.xgmi.connected_to_cpu) {
623                         info.head = *adev->gfx.ras_if;
624                         amdgpu_ras_query_error_status(adev, &info);
625                 } else {
626                         amdgpu_ras_reset_error_status(adev, AMDGPU_RAS_BLOCK__GFX);
627                 }
628
629                 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
630                 if (r)
631                         goto late_fini;
632         } else {
633                 /* free gfx ras_if if ras is not supported */
634                 r = 0;
635                 goto free;
636         }
637
638         return 0;
639 late_fini:
640         amdgpu_ras_late_fini(adev, adev->gfx.ras_if, &ih_info);
641 free:
642         kfree(adev->gfx.ras_if);
643         adev->gfx.ras_if = NULL;
644         return r;
645 }
646
647 void amdgpu_gfx_ras_fini(struct amdgpu_device *adev)
648 {
649         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX) &&
650                         adev->gfx.ras_if) {
651                 struct ras_common_if *ras_if = adev->gfx.ras_if;
652                 struct ras_ih_if ih_info = {
653                         .head = *ras_if,
654                         .cb = amdgpu_gfx_process_ras_data_cb,
655                 };
656
657                 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
658                 kfree(ras_if);
659         }
660 }
661
662 int amdgpu_gfx_process_ras_data_cb(struct amdgpu_device *adev,
663                 void *err_data,
664                 struct amdgpu_iv_entry *entry)
665 {
666         /* TODO ue will trigger an interrupt.
667          *
668          * When “Full RAS” is enabled, the per-IP interrupt sources should
669          * be disabled and the driver should only look for the aggregated
670          * interrupt via sync flood
671          */
672         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__GFX)) {
673                 kgd2kfd_set_sram_ecc_flag(adev->kfd.dev);
674                 if (adev->gfx.funcs->query_ras_error_count)
675                         adev->gfx.funcs->query_ras_error_count(adev, err_data);
676                 amdgpu_ras_reset_gpu(adev);
677         }
678         return AMDGPU_RAS_SUCCESS;
679 }
680
681 int amdgpu_gfx_cp_ecc_error_irq(struct amdgpu_device *adev,
682                                   struct amdgpu_irq_src *source,
683                                   struct amdgpu_iv_entry *entry)
684 {
685         struct ras_common_if *ras_if = adev->gfx.ras_if;
686         struct ras_dispatch_if ih_data = {
687                 .entry = entry,
688         };
689
690         if (!ras_if)
691                 return 0;
692
693         ih_data.head = *ras_if;
694
695         DRM_ERROR("CP ECC ERROR IRQ\n");
696         amdgpu_ras_interrupt_dispatch(adev, &ih_data);
697         return 0;
698 }
699
700 uint32_t amdgpu_kiq_rreg(struct amdgpu_device *adev, uint32_t reg)
701 {
702         signed long r, cnt = 0;
703         unsigned long flags;
704         uint32_t seq, reg_val_offs = 0, value = 0;
705         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
706         struct amdgpu_ring *ring = &kiq->ring;
707
708         if (adev->in_pci_err_recovery)
709                 return 0;
710
711         BUG_ON(!ring->funcs->emit_rreg);
712
713         spin_lock_irqsave(&kiq->ring_lock, flags);
714         if (amdgpu_device_wb_get(adev, &reg_val_offs)) {
715                 pr_err("critical bug! too many kiq readers\n");
716                 goto failed_unlock;
717         }
718         amdgpu_ring_alloc(ring, 32);
719         amdgpu_ring_emit_rreg(ring, reg, reg_val_offs);
720         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
721         if (r)
722                 goto failed_undo;
723
724         amdgpu_ring_commit(ring);
725         spin_unlock_irqrestore(&kiq->ring_lock, flags);
726
727         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
728
729         /* don't wait anymore for gpu reset case because this way may
730          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
731          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
732          * never return if we keep waiting in virt_kiq_rreg, which cause
733          * gpu_recover() hang there.
734          *
735          * also don't wait anymore for IRQ context
736          * */
737         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
738                 goto failed_kiq_read;
739
740         might_sleep();
741         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
742                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
743                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
744         }
745
746         if (cnt > MAX_KIQ_REG_TRY)
747                 goto failed_kiq_read;
748
749         mb();
750         value = adev->wb.wb[reg_val_offs];
751         amdgpu_device_wb_free(adev, reg_val_offs);
752         return value;
753
754 failed_undo:
755         amdgpu_ring_undo(ring);
756 failed_unlock:
757         spin_unlock_irqrestore(&kiq->ring_lock, flags);
758 failed_kiq_read:
759         if (reg_val_offs)
760                 amdgpu_device_wb_free(adev, reg_val_offs);
761         dev_err(adev->dev, "failed to read reg:%x\n", reg);
762         return ~0;
763 }
764
765 void amdgpu_kiq_wreg(struct amdgpu_device *adev, uint32_t reg, uint32_t v)
766 {
767         signed long r, cnt = 0;
768         unsigned long flags;
769         uint32_t seq;
770         struct amdgpu_kiq *kiq = &adev->gfx.kiq;
771         struct amdgpu_ring *ring = &kiq->ring;
772
773         BUG_ON(!ring->funcs->emit_wreg);
774
775         if (adev->in_pci_err_recovery)
776                 return;
777
778         spin_lock_irqsave(&kiq->ring_lock, flags);
779         amdgpu_ring_alloc(ring, 32);
780         amdgpu_ring_emit_wreg(ring, reg, v);
781         r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
782         if (r)
783                 goto failed_undo;
784
785         amdgpu_ring_commit(ring);
786         spin_unlock_irqrestore(&kiq->ring_lock, flags);
787
788         r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
789
790         /* don't wait anymore for gpu reset case because this way may
791          * block gpu_recover() routine forever, e.g. this virt_kiq_rreg
792          * is triggered in TTM and ttm_bo_lock_delayed_workqueue() will
793          * never return if we keep waiting in virt_kiq_rreg, which cause
794          * gpu_recover() hang there.
795          *
796          * also don't wait anymore for IRQ context
797          * */
798         if (r < 1 && (amdgpu_in_reset(adev) || in_interrupt()))
799                 goto failed_kiq_write;
800
801         might_sleep();
802         while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
803
804                 msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
805                 r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
806         }
807
808         if (cnt > MAX_KIQ_REG_TRY)
809                 goto failed_kiq_write;
810
811         return;
812
813 failed_undo:
814         amdgpu_ring_undo(ring);
815         spin_unlock_irqrestore(&kiq->ring_lock, flags);
816 failed_kiq_write:
817         dev_err(adev->dev, "failed to write reg:%x\n", reg);
818 }
819
820 int amdgpu_gfx_get_num_kcq(struct amdgpu_device *adev)
821 {
822         if (amdgpu_num_kcq == -1) {
823                 return 8;
824         } else if (amdgpu_num_kcq > 8 || amdgpu_num_kcq < 0) {
825                 dev_warn(adev->dev, "set kernel compute queue number to 8 due to invalid parameter provided by user\n");
826                 return 8;
827         }
828         return amdgpu_num_kcq;
829 }
830
831 /* amdgpu_gfx_state_change_set - Handle gfx power state change set
832  * @adev: amdgpu_device pointer
833  * @state: gfx power state(1 -sGpuChangeState_D0Entry and 2 -sGpuChangeState_D3Entry)
834  *
835  */
836
837 void amdgpu_gfx_state_change_set(struct amdgpu_device *adev, enum gfx_change_state state)
838 {
839         if (is_support_sw_smu(adev)) {
840                 smu_gfx_state_change_set(&adev->smu, state);
841         } else {
842                 mutex_lock(&adev->pm.mutex);
843                 if (adev->powerplay.pp_funcs &&
844                     adev->powerplay.pp_funcs->gfx_state_change_set)
845                         ((adev)->powerplay.pp_funcs->gfx_state_change_set(
846                                 (adev)->powerplay.pp_handle, state));
847                 mutex_unlock(&adev->pm.mutex);
848         }
849 }