Merge tag 'rtc-5.9' of git://git.kernel.org/pub/scm/linux/kernel/git/abelloni/linux
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / uvd_v3_1.c
1 /*
2  * Copyright 2020 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  * Authors: Sonny Jiang <sonny.jiang@amd.com>
23  */
24
25 #include <linux/firmware.h>
26
27 #include "amdgpu.h"
28 #include "amdgpu_uvd.h"
29 #include "sid.h"
30
31 #include "uvd/uvd_3_1_d.h"
32 #include "uvd/uvd_3_1_sh_mask.h"
33
34 #include "oss/oss_1_0_d.h"
35 #include "oss/oss_1_0_sh_mask.h"
36
37 /**
38  * uvd_v3_1_ring_get_rptr - get read pointer
39  *
40  * @ring: amdgpu_ring pointer
41  *
42  * Returns the current hardware read pointer
43  */
44 static uint64_t uvd_v3_1_ring_get_rptr(struct amdgpu_ring *ring)
45 {
46         struct amdgpu_device *adev = ring->adev;
47
48         return RREG32(mmUVD_RBC_RB_RPTR);
49 }
50
51 /**
52  * uvd_v3_1_ring_get_wptr - get write pointer
53  *
54  * @ring: amdgpu_ring pointer
55  *
56  * Returns the current hardware write pointer
57  */
58 static uint64_t uvd_v3_1_ring_get_wptr(struct amdgpu_ring *ring)
59 {
60         struct amdgpu_device *adev = ring->adev;
61
62         return RREG32(mmUVD_RBC_RB_WPTR);
63 }
64
65 /**
66  * uvd_v3_1_ring_set_wptr - set write pointer
67  *
68  * @ring: amdgpu_ring pointer
69  *
70  * Commits the write pointer to the hardware
71  */
72 static void uvd_v3_1_ring_set_wptr(struct amdgpu_ring *ring)
73 {
74         struct amdgpu_device *adev = ring->adev;
75
76         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
77 }
78
79 /**
80  * uvd_v3_1_ring_emit_ib - execute indirect buffer
81  *
82  * @ring: amdgpu_ring pointer
83  * @ib: indirect buffer to execute
84  *
85  * Write ring commands to execute the indirect buffer
86  */
87 static void uvd_v3_1_ring_emit_ib(struct amdgpu_ring *ring,
88                                   struct amdgpu_job *job,
89                                   struct amdgpu_ib *ib,
90                                   uint32_t flags)
91 {
92         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_BASE, 0));
93         amdgpu_ring_write(ring, ib->gpu_addr);
94         amdgpu_ring_write(ring, PACKET0(mmUVD_RBC_IB_SIZE, 0));
95         amdgpu_ring_write(ring, ib->length_dw);
96 }
97
98 /**
99  * uvd_v3_1_ring_emit_fence - emit an fence & trap command
100  *
101  * @ring: amdgpu_ring pointer
102  * @fence: fence to emit
103  *
104  * Write a fence and a trap command to the ring.
105  */
106 static void uvd_v3_1_ring_emit_fence(struct amdgpu_ring *ring, u64 addr, u64 seq,
107                                  unsigned flags)
108 {
109         WARN_ON(flags & AMDGPU_FENCE_FLAG_64BIT);
110
111         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
112         amdgpu_ring_write(ring, seq);
113         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
114         amdgpu_ring_write(ring, addr & 0xffffffff);
115         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
116         amdgpu_ring_write(ring, upper_32_bits(addr) & 0xff);
117         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
118         amdgpu_ring_write(ring, 0);
119
120         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA0, 0));
121         amdgpu_ring_write(ring, 0);
122         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_DATA1, 0));
123         amdgpu_ring_write(ring, 0);
124         amdgpu_ring_write(ring, PACKET0(mmUVD_GPCOM_VCPU_CMD, 0));
125         amdgpu_ring_write(ring, 2);
126 }
127
128 /**
129  * uvd_v3_1_ring_test_ring - register write test
130  *
131  * @ring: amdgpu_ring pointer
132  *
133  * Test if we can successfully write to the context register
134  */
135 static int uvd_v3_1_ring_test_ring(struct amdgpu_ring *ring)
136 {
137         struct amdgpu_device *adev = ring->adev;
138         uint32_t tmp = 0;
139         unsigned i;
140         int r;
141
142         WREG32(mmUVD_CONTEXT_ID, 0xCAFEDEAD);
143         r = amdgpu_ring_alloc(ring, 3);
144         if (r)
145                 return r;
146
147         amdgpu_ring_write(ring, PACKET0(mmUVD_CONTEXT_ID, 0));
148         amdgpu_ring_write(ring, 0xDEADBEEF);
149         amdgpu_ring_commit(ring);
150         for (i = 0; i < adev->usec_timeout; i++) {
151                 tmp = RREG32(mmUVD_CONTEXT_ID);
152                 if (tmp == 0xDEADBEEF)
153                         break;
154                 udelay(1);
155         }
156
157         if (i >= adev->usec_timeout)
158                 r = -ETIMEDOUT;
159
160         return r;
161 }
162
163 static void uvd_v3_1_ring_insert_nop(struct amdgpu_ring *ring, uint32_t count)
164 {
165         int i;
166
167         WARN_ON(ring->wptr % 2 || count % 2);
168
169         for (i = 0; i < count / 2; i++) {
170                 amdgpu_ring_write(ring, PACKET0(mmUVD_NO_OP, 0));
171                 amdgpu_ring_write(ring, 0);
172         }
173 }
174
175 static const struct amdgpu_ring_funcs uvd_v3_1_ring_funcs = {
176         .type = AMDGPU_RING_TYPE_UVD,
177         .align_mask = 0xf,
178         .support_64bit_ptrs = false,
179         .no_user_fence = true,
180         .get_rptr = uvd_v3_1_ring_get_rptr,
181         .get_wptr = uvd_v3_1_ring_get_wptr,
182         .set_wptr = uvd_v3_1_ring_set_wptr,
183         .parse_cs = amdgpu_uvd_ring_parse_cs,
184         .emit_frame_size =
185                 14, /* uvd_v3_1_ring_emit_fence  x1 no user fence */
186         .emit_ib_size = 4, /* uvd_v3_1_ring_emit_ib */
187         .emit_ib = uvd_v3_1_ring_emit_ib,
188         .emit_fence = uvd_v3_1_ring_emit_fence,
189         .test_ring = uvd_v3_1_ring_test_ring,
190         .test_ib = amdgpu_uvd_ring_test_ib,
191         .insert_nop = uvd_v3_1_ring_insert_nop,
192         .pad_ib = amdgpu_ring_generic_pad_ib,
193         .begin_use = amdgpu_uvd_ring_begin_use,
194         .end_use = amdgpu_uvd_ring_end_use,
195 };
196
197 static void uvd_v3_1_set_ring_funcs(struct amdgpu_device *adev)
198 {
199         adev->uvd.inst->ring.funcs = &uvd_v3_1_ring_funcs;
200 }
201
202 static void uvd_v3_1_set_dcm(struct amdgpu_device *adev,
203                                                          bool sw_mode)
204 {
205         u32 tmp, tmp2;
206
207         WREG32_FIELD(UVD_CGC_GATE, REGS, 0);
208
209         tmp = RREG32(mmUVD_CGC_CTRL);
210         tmp &= ~(UVD_CGC_CTRL__CLK_OFF_DELAY_MASK | UVD_CGC_CTRL__CLK_GATE_DLY_TIMER_MASK);
211         tmp |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK |
212                 (1 << UVD_CGC_CTRL__CLK_GATE_DLY_TIMER__SHIFT) |
213                 (4 << UVD_CGC_CTRL__CLK_OFF_DELAY__SHIFT);
214
215         if (sw_mode) {
216                 tmp &= ~0x7ffff800;
217                 tmp2 = UVD_CGC_CTRL2__DYN_OCLK_RAMP_EN_MASK |
218                         UVD_CGC_CTRL2__DYN_RCLK_RAMP_EN_MASK |
219                         (7 << UVD_CGC_CTRL2__GATER_DIV_ID__SHIFT);
220         } else {
221                 tmp |= 0x7ffff800;
222                 tmp2 = 0;
223         }
224
225         WREG32(mmUVD_CGC_CTRL, tmp);
226         WREG32_UVD_CTX(ixUVD_CGC_CTRL2, tmp2);
227 }
228
229 /**
230  * uvd_v3_1_mc_resume - memory controller programming
231  *
232  * @adev: amdgpu_device pointer
233  *
234  * Let the UVD memory controller know it's offsets
235  */
236 static void uvd_v3_1_mc_resume(struct amdgpu_device *adev)
237 {
238         uint64_t addr;
239         uint32_t size;
240
241         /* programm the VCPU memory controller bits 0-27 */
242         addr = (adev->uvd.inst->gpu_addr + AMDGPU_UVD_FIRMWARE_OFFSET) >> 3;
243         size = AMDGPU_UVD_FIRMWARE_SIZE(adev) >> 3;
244         WREG32(mmUVD_VCPU_CACHE_OFFSET0, addr);
245         WREG32(mmUVD_VCPU_CACHE_SIZE0, size);
246
247         addr += size;
248         size = AMDGPU_UVD_HEAP_SIZE >> 3;
249         WREG32(mmUVD_VCPU_CACHE_OFFSET1, addr);
250         WREG32(mmUVD_VCPU_CACHE_SIZE1, size);
251
252         addr += size;
253         size = (AMDGPU_UVD_STACK_SIZE +
254                 (AMDGPU_UVD_SESSION_SIZE * adev->uvd.max_handles)) >> 3;
255         WREG32(mmUVD_VCPU_CACHE_OFFSET2, addr);
256         WREG32(mmUVD_VCPU_CACHE_SIZE2, size);
257
258         /* bits 28-31 */
259         addr = (adev->uvd.inst->gpu_addr >> 28) & 0xF;
260         WREG32(mmUVD_LMI_ADDR_EXT, (addr << 12) | (addr << 0));
261
262         /* bits 32-39 */
263         addr = (adev->uvd.inst->gpu_addr >> 32) & 0xFF;
264         WREG32(mmUVD_LMI_EXT40_ADDR, addr | (0x9 << 16) | (0x1 << 31));
265
266         WREG32(mmUVD_UDEC_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
267         WREG32(mmUVD_UDEC_DB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
268         WREG32(mmUVD_UDEC_DBW_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
269 }
270
271 /**
272  * uvd_v3_1_fw_validate - FW validation operation
273  *
274  * @adev: amdgpu_device pointer
275  *
276  * Initialate and check UVD validation.
277  */
278 static int uvd_v3_1_fw_validate(struct amdgpu_device *adev)
279 {
280         void *ptr;
281         uint32_t ucode_len, i;
282         uint32_t keysel;
283
284         ptr = adev->uvd.inst[0].cpu_addr;
285         ptr += 192 + 16;
286         memcpy(&ucode_len, ptr, 4);
287         ptr += ucode_len;
288         memcpy(&keysel, ptr, 4);
289
290         WREG32(mmUVD_FW_START, keysel);
291
292         for (i = 0; i < 10; ++i) {
293                 mdelay(10);
294                 if (RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__DONE_MASK)
295                         break;
296         }
297
298         if (i == 10)
299                 return -ETIMEDOUT;
300
301         if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__PASS_MASK))
302                 return -EINVAL;
303
304         for (i = 0; i < 10; ++i) {
305                 mdelay(10);
306                 if (!(RREG32(mmUVD_FW_STATUS) & UVD_FW_STATUS__BUSY_MASK))
307                         break;
308         }
309
310         if (i == 10)
311                 return -ETIMEDOUT;
312
313         return 0;
314 }
315
316 /**
317  * uvd_v3_1_start - start UVD block
318  *
319  * @adev: amdgpu_device pointer
320  *
321  * Setup and start the UVD block
322  */
323 static int uvd_v3_1_start(struct amdgpu_device *adev)
324 {
325         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
326         uint32_t rb_bufsz;
327         int i, j, r;
328         u32 tmp;
329         /* disable byte swapping */
330         u32 lmi_swap_cntl = 0;
331         u32 mp_swap_cntl = 0;
332
333         /* set uvd busy */
334         WREG32_P(mmUVD_STATUS, 1<<2, ~(1<<2));
335
336         uvd_v3_1_set_dcm(adev, true);
337         WREG32(mmUVD_CGC_GATE, 0);
338
339         /* take UVD block out of reset */
340         WREG32_P(mmSRBM_SOFT_RESET, 0, ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
341         mdelay(5);
342
343         /* enable VCPU clock */
344         WREG32(mmUVD_VCPU_CNTL,  1 << 9);
345
346         /* disable interupt */
347         WREG32_P(mmUVD_MASTINT_EN, 0, ~(1 << 1));
348
349 #ifdef __BIG_ENDIAN
350         /* swap (8 in 32) RB and IB */
351         lmi_swap_cntl = 0xa;
352         mp_swap_cntl = 0;
353 #endif
354         WREG32(mmUVD_LMI_SWAP_CNTL, lmi_swap_cntl);
355         WREG32(mmUVD_MP_SWAP_CNTL, mp_swap_cntl);
356
357         /* initialize UVD memory controller */
358         WREG32(mmUVD_LMI_CTRL, 0x40 | (1 << 8) | (1 << 13) |
359                 (1 << 21) | (1 << 9) | (1 << 20));
360
361         tmp = RREG32(mmUVD_MPC_CNTL);
362         WREG32(mmUVD_MPC_CNTL, tmp | 0x10);
363
364         WREG32(mmUVD_MPC_SET_MUXA0, 0x40c2040);
365         WREG32(mmUVD_MPC_SET_MUXA1, 0x0);
366         WREG32(mmUVD_MPC_SET_MUXB0, 0x40c2040);
367         WREG32(mmUVD_MPC_SET_MUXB1, 0x0);
368         WREG32(mmUVD_MPC_SET_ALU, 0);
369         WREG32(mmUVD_MPC_SET_MUX, 0x88);
370
371         tmp = RREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL);
372         WREG32_UVD_CTX(ixUVD_LMI_CACHE_CTRL, tmp & (~0x10));
373
374         /* enable UMC */
375         WREG32_P(mmUVD_LMI_CTRL2, 0, ~(1 << 8));
376
377         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_SOFT_RESET_MASK);
378
379         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
380
381         WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
382
383         mdelay(10);
384
385         for (i = 0; i < 10; ++i) {
386                 uint32_t status;
387                 for (j = 0; j < 100; ++j) {
388                         status = RREG32(mmUVD_STATUS);
389                         if (status & 2)
390                                 break;
391                         mdelay(10);
392                 }
393                 r = 0;
394                 if (status & 2)
395                         break;
396
397                 DRM_ERROR("UVD not responding, trying to reset the VCPU!!!\n");
398                 WREG32_P(mmUVD_SOFT_RESET, UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK,
399                                  ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
400                 mdelay(10);
401                 WREG32_P(mmUVD_SOFT_RESET, 0, ~UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK);
402                 mdelay(10);
403                 r = -1;
404         }
405
406         if (r) {
407                 DRM_ERROR("UVD not responding, giving up!!!\n");
408                 return r;
409         }
410
411         /* enable interupt */
412         WREG32_P(mmUVD_MASTINT_EN, 3<<1, ~(3 << 1));
413
414         WREG32_P(mmUVD_STATUS, 0, ~(1<<2));
415
416         /* force RBC into idle state */
417         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
418
419         /* Set the write pointer delay */
420         WREG32(mmUVD_RBC_RB_WPTR_CNTL, 0);
421
422         /* programm the 4GB memory segment for rptr and ring buffer */
423         WREG32(mmUVD_LMI_EXT40_ADDR, upper_32_bits(ring->gpu_addr) |
424                    (0x7 << 16) | (0x1 << 31));
425
426         /* Initialize the ring buffer's read and write pointers */
427         WREG32(mmUVD_RBC_RB_RPTR, 0x0);
428
429         ring->wptr = RREG32(mmUVD_RBC_RB_RPTR);
430         WREG32(mmUVD_RBC_RB_WPTR, lower_32_bits(ring->wptr));
431
432         /* set the ring address */
433         WREG32(mmUVD_RBC_RB_BASE, ring->gpu_addr);
434
435         /* Set ring buffer size */
436         rb_bufsz = order_base_2(ring->ring_size);
437         rb_bufsz = (0x1 << 8) | rb_bufsz;
438         WREG32_P(mmUVD_RBC_RB_CNTL, rb_bufsz, ~0x11f1f);
439
440         return 0;
441 }
442
443 /**
444  * uvd_v3_1_stop - stop UVD block
445  *
446  * @adev: amdgpu_device pointer
447  *
448  * stop the UVD block
449  */
450 static void uvd_v3_1_stop(struct amdgpu_device *adev)
451 {
452         uint32_t i, j;
453         uint32_t status;
454
455         WREG32(mmUVD_RBC_RB_CNTL, 0x11010101);
456
457         for (i = 0; i < 10; ++i) {
458                 for (j = 0; j < 100; ++j) {
459                         status = RREG32(mmUVD_STATUS);
460                         if (status & 2)
461                                 break;
462                         mdelay(1);
463                 }
464                 if (status & 2)
465                         break;
466         }
467
468         for (i = 0; i < 10; ++i) {
469                 for (j = 0; j < 100; ++j) {
470                         status = RREG32(mmUVD_LMI_STATUS);
471                         if (status & 0xf)
472                                 break;
473                         mdelay(1);
474                 }
475                 if (status & 0xf)
476                         break;
477         }
478
479         /* Stall UMC and register bus before resetting VCPU */
480         WREG32_P(mmUVD_LMI_CTRL2, 1 << 8, ~(1 << 8));
481
482         for (i = 0; i < 10; ++i) {
483                 for (j = 0; j < 100; ++j) {
484                         status = RREG32(mmUVD_LMI_STATUS);
485                         if (status & 0x240)
486                                 break;
487                         mdelay(1);
488                 }
489                 if (status & 0x240)
490                         break;
491         }
492
493         WREG32_P(0x3D49, 0, ~(1 << 2));
494
495         WREG32_P(mmUVD_VCPU_CNTL, 0, ~(1 << 9));
496
497         /* put LMI, VCPU, RBC etc... into reset */
498         WREG32(mmUVD_SOFT_RESET, UVD_SOFT_RESET__LMI_SOFT_RESET_MASK |
499                 UVD_SOFT_RESET__VCPU_SOFT_RESET_MASK |
500                 UVD_SOFT_RESET__LMI_UMC_SOFT_RESET_MASK);
501
502         WREG32(mmUVD_STATUS, 0);
503
504         uvd_v3_1_set_dcm(adev, false);
505 }
506
507 static int uvd_v3_1_set_interrupt_state(struct amdgpu_device *adev,
508                                         struct amdgpu_irq_src *source,
509                                         unsigned type,
510                                         enum amdgpu_interrupt_state state)
511 {
512         return 0;
513 }
514
515 static int uvd_v3_1_process_interrupt(struct amdgpu_device *adev,
516                                       struct amdgpu_irq_src *source,
517                                       struct amdgpu_iv_entry *entry)
518 {
519         DRM_DEBUG("IH: UVD TRAP\n");
520         amdgpu_fence_process(&adev->uvd.inst->ring);
521         return 0;
522 }
523
524
525 static const struct amdgpu_irq_src_funcs uvd_v3_1_irq_funcs = {
526         .set = uvd_v3_1_set_interrupt_state,
527         .process = uvd_v3_1_process_interrupt,
528 };
529
530 static void uvd_v3_1_set_irq_funcs(struct amdgpu_device *adev)
531 {
532         adev->uvd.inst->irq.num_types = 1;
533         adev->uvd.inst->irq.funcs = &uvd_v3_1_irq_funcs;
534 }
535
536
537 static int uvd_v3_1_early_init(void *handle)
538 {
539         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
540         adev->uvd.num_uvd_inst = 1;
541
542         uvd_v3_1_set_ring_funcs(adev);
543         uvd_v3_1_set_irq_funcs(adev);
544
545         return 0;
546 }
547
548 static int uvd_v3_1_sw_init(void *handle)
549 {
550         struct amdgpu_ring *ring;
551         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
552         int r;
553
554         /* UVD TRAP */
555         r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 124, &adev->uvd.inst->irq);
556         if (r)
557                 return r;
558
559         r = amdgpu_uvd_sw_init(adev);
560         if (r)
561                 return r;
562
563         ring = &adev->uvd.inst->ring;
564         sprintf(ring->name, "uvd");
565         r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0,
566                          AMDGPU_RING_PRIO_DEFAULT);
567         if (r)
568                 return r;
569
570         r = amdgpu_uvd_resume(adev);
571         if (r)
572                 return r;
573
574         r = amdgpu_uvd_entity_init(adev);
575
576         return r;
577 }
578
579 static int uvd_v3_1_sw_fini(void *handle)
580 {
581         int r;
582         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
583
584         r = amdgpu_uvd_suspend(adev);
585         if (r)
586                 return r;
587
588         return amdgpu_uvd_sw_fini(adev);
589 }
590
591 static void uvd_v3_1_enable_mgcg(struct amdgpu_device *adev,
592                                  bool enable)
593 {
594         u32 orig, data;
595
596         if (enable && (adev->cg_flags & AMD_CG_SUPPORT_UVD_MGCG)) {
597                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
598                 data |= 0x3fff;
599                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
600
601                 orig = data = RREG32(mmUVD_CGC_CTRL);
602                 data |= UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
603                 if (orig != data)
604                         WREG32(mmUVD_CGC_CTRL, data);
605         } else {
606                 data = RREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL);
607                 data &= ~0x3fff;
608                 WREG32_UVD_CTX(ixUVD_CGC_MEM_CTRL, data);
609
610                 orig = data = RREG32(mmUVD_CGC_CTRL);
611                 data &= ~UVD_CGC_CTRL__DYN_CLOCK_MODE_MASK;
612                 if (orig != data)
613                         WREG32(mmUVD_CGC_CTRL, data);
614         }
615 }
616
617 /**
618  * uvd_v3_1_hw_init - start and test UVD block
619  *
620  * @adev: amdgpu_device pointer
621  *
622  * Initialize the hardware, boot up the VCPU and do some testing
623  */
624 static int uvd_v3_1_hw_init(void *handle)
625 {
626         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
627         struct amdgpu_ring *ring = &adev->uvd.inst->ring;
628         uint32_t tmp;
629         int r;
630
631         uvd_v3_1_mc_resume(adev);
632
633         r = uvd_v3_1_fw_validate(adev);
634         if (r) {
635                 DRM_ERROR("amdgpu: UVD Firmware validate fail (%d).\n", r);
636                 return r;
637         }
638
639         uvd_v3_1_enable_mgcg(adev, true);
640         amdgpu_asic_set_uvd_clocks(adev, 53300, 40000);
641
642         uvd_v3_1_start(adev);
643
644         r = amdgpu_ring_test_helper(ring);
645         if (r) {
646                 DRM_ERROR("amdgpu: UVD ring test fail (%d).\n", r);
647                 goto done;
648         }
649
650         r = amdgpu_ring_alloc(ring, 10);
651         if (r) {
652                 DRM_ERROR("amdgpu: ring failed to lock UVD ring (%d).\n", r);
653                 goto done;
654         }
655
656         tmp = PACKET0(mmUVD_SEMA_WAIT_FAULT_TIMEOUT_CNTL, 0);
657         amdgpu_ring_write(ring, tmp);
658         amdgpu_ring_write(ring, 0xFFFFF);
659
660         tmp = PACKET0(mmUVD_SEMA_WAIT_INCOMPLETE_TIMEOUT_CNTL, 0);
661         amdgpu_ring_write(ring, tmp);
662         amdgpu_ring_write(ring, 0xFFFFF);
663
664         tmp = PACKET0(mmUVD_SEMA_SIGNAL_INCOMPLETE_TIMEOUT_CNTL, 0);
665         amdgpu_ring_write(ring, tmp);
666         amdgpu_ring_write(ring, 0xFFFFF);
667
668         /* Clear timeout status bits */
669         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_TIMEOUT_STATUS, 0));
670         amdgpu_ring_write(ring, 0x8);
671
672         amdgpu_ring_write(ring, PACKET0(mmUVD_SEMA_CNTL, 0));
673         amdgpu_ring_write(ring, 3);
674
675         amdgpu_ring_commit(ring);
676
677 done:
678         if (!r)
679                 DRM_INFO("UVD initialized successfully.\n");
680
681         return r;
682 }
683
684 /**
685  * uvd_v3_1_hw_fini - stop the hardware block
686  *
687  * @adev: amdgpu_device pointer
688  *
689  * Stop the UVD block, mark ring as not ready any more
690  */
691 static int uvd_v3_1_hw_fini(void *handle)
692 {
693         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
694
695         if (RREG32(mmUVD_STATUS) != 0)
696                 uvd_v3_1_stop(adev);
697
698         return 0;
699 }
700
701 static int uvd_v3_1_suspend(void *handle)
702 {
703         int r;
704         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
705
706         r = uvd_v3_1_hw_fini(adev);
707         if (r)
708                 return r;
709
710         return amdgpu_uvd_suspend(adev);
711 }
712
713 static int uvd_v3_1_resume(void *handle)
714 {
715         int r;
716         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
717
718         r = amdgpu_uvd_resume(adev);
719         if (r)
720                 return r;
721
722         return uvd_v3_1_hw_init(adev);
723 }
724
725 static bool uvd_v3_1_is_idle(void *handle)
726 {
727         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
728
729         return !(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK);
730 }
731
732 static int uvd_v3_1_wait_for_idle(void *handle)
733 {
734         unsigned i;
735         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
736
737         for (i = 0; i < adev->usec_timeout; i++) {
738                 if (!(RREG32(mmSRBM_STATUS) & SRBM_STATUS__UVD_BUSY_MASK))
739                         return 0;
740         }
741         return -ETIMEDOUT;
742 }
743
744 static int uvd_v3_1_soft_reset(void *handle)
745 {
746         struct amdgpu_device *adev = (struct amdgpu_device *)handle;
747
748         uvd_v3_1_stop(adev);
749
750         WREG32_P(mmSRBM_SOFT_RESET, SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK,
751                          ~SRBM_SOFT_RESET__SOFT_RESET_UVD_MASK);
752         mdelay(5);
753
754         return uvd_v3_1_start(adev);
755 }
756
757 static int uvd_v3_1_set_clockgating_state(void *handle,
758                                           enum amd_clockgating_state state)
759 {
760         return 0;
761 }
762
763 static int uvd_v3_1_set_powergating_state(void *handle,
764                                           enum amd_powergating_state state)
765 {
766         return 0;
767 }
768
769 static const struct amd_ip_funcs uvd_v3_1_ip_funcs = {
770         .name = "uvd_v3_1",
771         .early_init = uvd_v3_1_early_init,
772         .late_init = NULL,
773         .sw_init = uvd_v3_1_sw_init,
774         .sw_fini = uvd_v3_1_sw_fini,
775         .hw_init = uvd_v3_1_hw_init,
776         .hw_fini = uvd_v3_1_hw_fini,
777         .suspend = uvd_v3_1_suspend,
778         .resume = uvd_v3_1_resume,
779         .is_idle = uvd_v3_1_is_idle,
780         .wait_for_idle = uvd_v3_1_wait_for_idle,
781         .soft_reset = uvd_v3_1_soft_reset,
782         .set_clockgating_state = uvd_v3_1_set_clockgating_state,
783         .set_powergating_state = uvd_v3_1_set_powergating_state,
784 };
785
786 const struct amdgpu_ip_block_version uvd_v3_1_ip_block =
787 {
788         .type = AMD_IP_BLOCK_TYPE_UVD,
789         .major = 3,
790         .minor = 1,
791         .rev = 0,
792         .funcs = &uvd_v3_1_ip_funcs,
793 };