Linux 6.9-rc1
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_amdkfd_gfx_v7.c
1 /*
2  * Copyright 2014 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include "amdgpu.h"
24 #include "amdgpu_amdkfd.h"
25 #include "cikd.h"
26 #include "cik_sdma.h"
27 #include "gfx_v7_0.h"
28 #include "gca/gfx_7_2_d.h"
29 #include "gca/gfx_7_2_enum.h"
30 #include "gca/gfx_7_2_sh_mask.h"
31 #include "oss/oss_2_0_d.h"
32 #include "oss/oss_2_0_sh_mask.h"
33 #include "gmc/gmc_7_1_d.h"
34 #include "gmc/gmc_7_1_sh_mask.h"
35 #include "cik_structs.h"
36
37 enum hqd_dequeue_request_type {
38         NO_ACTION = 0,
39         DRAIN_PIPE,
40         RESET_WAVES
41 };
42
43 enum {
44         MAX_TRAPID = 8,         /* 3 bits in the bitfield. */
45         MAX_WATCH_ADDRESSES = 4
46 };
47
48 static void lock_srbm(struct amdgpu_device *adev, uint32_t mec, uint32_t pipe,
49                         uint32_t queue, uint32_t vmid)
50 {
51         uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
52
53         mutex_lock(&adev->srbm_mutex);
54         WREG32(mmSRBM_GFX_CNTL, value);
55 }
56
57 static void unlock_srbm(struct amdgpu_device *adev)
58 {
59         WREG32(mmSRBM_GFX_CNTL, 0);
60         mutex_unlock(&adev->srbm_mutex);
61 }
62
63 static void acquire_queue(struct amdgpu_device *adev, uint32_t pipe_id,
64                                 uint32_t queue_id)
65 {
66         uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
67         uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
68
69         lock_srbm(adev, mec, pipe, queue_id, 0);
70 }
71
72 static void release_queue(struct amdgpu_device *adev)
73 {
74         unlock_srbm(adev);
75 }
76
77 static void kgd_program_sh_mem_settings(struct amdgpu_device *adev, uint32_t vmid,
78                                         uint32_t sh_mem_config,
79                                         uint32_t sh_mem_ape1_base,
80                                         uint32_t sh_mem_ape1_limit,
81                                         uint32_t sh_mem_bases, uint32_t inst)
82 {
83         lock_srbm(adev, 0, 0, 0, vmid);
84
85         WREG32(mmSH_MEM_CONFIG, sh_mem_config);
86         WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
87         WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
88         WREG32(mmSH_MEM_BASES, sh_mem_bases);
89
90         unlock_srbm(adev);
91 }
92
93 static int kgd_set_pasid_vmid_mapping(struct amdgpu_device *adev, u32 pasid,
94                                         unsigned int vmid, uint32_t inst)
95 {
96         /*
97          * We have to assume that there is no outstanding mapping.
98          * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
99          * a mapping is in progress or because a mapping finished and the
100          * SW cleared it. So the protocol is to always wait & clear.
101          */
102         uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
103                         ATC_VMID0_PASID_MAPPING__VALID_MASK;
104
105         WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
106
107         while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
108                 cpu_relax();
109         WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
110
111         /* Mapping vmid to pasid also for IH block */
112         WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
113
114         return 0;
115 }
116
117 static int kgd_init_interrupts(struct amdgpu_device *adev, uint32_t pipe_id,
118                                 uint32_t inst)
119 {
120         uint32_t mec;
121         uint32_t pipe;
122
123         mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
124         pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
125
126         lock_srbm(adev, mec, pipe, 0, 0);
127
128         WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
129                         CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
130
131         unlock_srbm(adev);
132
133         return 0;
134 }
135
136 static inline uint32_t get_sdma_rlc_reg_offset(struct cik_sdma_rlc_registers *m)
137 {
138         uint32_t retval;
139
140         retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
141                         m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
142
143         pr_debug("RLC register offset for SDMA%d RLC%d: 0x%x\n",
144                         m->sdma_engine_id, m->sdma_queue_id, retval);
145
146         return retval;
147 }
148
149 static inline struct cik_mqd *get_mqd(void *mqd)
150 {
151         return (struct cik_mqd *)mqd;
152 }
153
154 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
155 {
156         return (struct cik_sdma_rlc_registers *)mqd;
157 }
158
159 static int kgd_hqd_load(struct amdgpu_device *adev, void *mqd,
160                         uint32_t pipe_id, uint32_t queue_id,
161                         uint32_t __user *wptr, uint32_t wptr_shift,
162                         uint32_t wptr_mask, struct mm_struct *mm, uint32_t inst)
163 {
164         struct cik_mqd *m;
165         uint32_t *mqd_hqd;
166         uint32_t reg, wptr_val, data;
167         bool valid_wptr = false;
168
169         m = get_mqd(mqd);
170
171         acquire_queue(adev, pipe_id, queue_id);
172
173         /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
174         mqd_hqd = &m->cp_mqd_base_addr_lo;
175
176         for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
177                 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
178
179         /* Copy userspace write pointer value to register.
180          * Activate doorbell logic to monitor subsequent changes.
181          */
182         data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
183                              CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
184         WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
185
186         /* read_user_ptr may take the mm->mmap_lock.
187          * release srbm_mutex to avoid circular dependency between
188          * srbm_mutex->mmap_lock->reservation_ww_class_mutex->srbm_mutex.
189          */
190         release_queue(adev);
191         valid_wptr = read_user_wptr(mm, wptr, wptr_val);
192         acquire_queue(adev, pipe_id, queue_id);
193         if (valid_wptr)
194                 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
195
196         data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
197         WREG32(mmCP_HQD_ACTIVE, data);
198
199         release_queue(adev);
200
201         return 0;
202 }
203
204 static int kgd_hqd_dump(struct amdgpu_device *adev,
205                         uint32_t pipe_id, uint32_t queue_id,
206                         uint32_t (**dump)[2], uint32_t *n_regs, uint32_t inst)
207 {
208         uint32_t i = 0, reg;
209 #define HQD_N_REGS (35+4)
210 #define DUMP_REG(addr) do {                             \
211                 if (WARN_ON_ONCE(i >= HQD_N_REGS))      \
212                         break;                          \
213                 (*dump)[i][0] = (addr) << 2;            \
214                 (*dump)[i++][1] = RREG32(addr);         \
215         } while (0)
216
217         *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
218         if (*dump == NULL)
219                 return -ENOMEM;
220
221         acquire_queue(adev, pipe_id, queue_id);
222
223         DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE0);
224         DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE1);
225         DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE2);
226         DUMP_REG(mmCOMPUTE_STATIC_THREAD_MGMT_SE3);
227
228         for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
229                 DUMP_REG(reg);
230
231         release_queue(adev);
232
233         WARN_ON_ONCE(i != HQD_N_REGS);
234         *n_regs = i;
235
236         return 0;
237 }
238
239 static int kgd_hqd_sdma_load(struct amdgpu_device *adev, void *mqd,
240                              uint32_t __user *wptr, struct mm_struct *mm)
241 {
242         struct cik_sdma_rlc_registers *m;
243         unsigned long end_jiffies;
244         uint32_t sdma_rlc_reg_offset;
245         uint32_t data;
246
247         m = get_sdma_mqd(mqd);
248         sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
249
250         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
251                 m->sdma_rlc_rb_cntl & (~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK));
252
253         end_jiffies = msecs_to_jiffies(2000) + jiffies;
254         while (true) {
255                 data = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
256                 if (data & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
257                         break;
258                 if (time_after(jiffies, end_jiffies)) {
259                         pr_err("SDMA RLC not idle in %s\n", __func__);
260                         return -ETIME;
261                 }
262                 usleep_range(500, 1000);
263         }
264
265         data = REG_SET_FIELD(m->sdma_rlc_doorbell, SDMA0_RLC0_DOORBELL,
266                              ENABLE, 1);
267         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, data);
268         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR,
269                                 m->sdma_rlc_rb_rptr);
270
271         if (read_user_wptr(mm, wptr, data))
272                 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR, data);
273         else
274                 WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_WPTR,
275                        m->sdma_rlc_rb_rptr);
276
277         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_VIRTUAL_ADDR,
278                                 m->sdma_rlc_virtual_addr);
279         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE, m->sdma_rlc_rb_base);
280         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_BASE_HI,
281                         m->sdma_rlc_rb_base_hi);
282         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
283                         m->sdma_rlc_rb_rptr_addr_lo);
284         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
285                         m->sdma_rlc_rb_rptr_addr_hi);
286
287         data = REG_SET_FIELD(m->sdma_rlc_rb_cntl, SDMA0_RLC0_RB_CNTL,
288                              RB_ENABLE, 1);
289         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, data);
290
291         return 0;
292 }
293
294 static int kgd_hqd_sdma_dump(struct amdgpu_device *adev,
295                              uint32_t engine_id, uint32_t queue_id,
296                              uint32_t (**dump)[2], uint32_t *n_regs)
297 {
298         uint32_t sdma_offset = engine_id * SDMA1_REGISTER_OFFSET +
299                 queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
300         uint32_t i = 0, reg;
301 #undef HQD_N_REGS
302 #define HQD_N_REGS (19+4)
303
304         *dump = kmalloc_array(HQD_N_REGS, sizeof(**dump), GFP_KERNEL);
305         if (*dump == NULL)
306                 return -ENOMEM;
307
308         for (reg = mmSDMA0_RLC0_RB_CNTL; reg <= mmSDMA0_RLC0_DOORBELL; reg++)
309                 DUMP_REG(sdma_offset + reg);
310         for (reg = mmSDMA0_RLC0_VIRTUAL_ADDR; reg <= mmSDMA0_RLC0_WATERMARK;
311              reg++)
312                 DUMP_REG(sdma_offset + reg);
313
314         WARN_ON_ONCE(i != HQD_N_REGS);
315         *n_regs = i;
316
317         return 0;
318 }
319
320 static bool kgd_hqd_is_occupied(struct amdgpu_device *adev,
321                                 uint64_t queue_address, uint32_t pipe_id,
322                                 uint32_t queue_id, uint32_t inst)
323 {
324         uint32_t act;
325         bool retval = false;
326         uint32_t low, high;
327
328         acquire_queue(adev, pipe_id, queue_id);
329         act = RREG32(mmCP_HQD_ACTIVE);
330         if (act) {
331                 low = lower_32_bits(queue_address >> 8);
332                 high = upper_32_bits(queue_address >> 8);
333
334                 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
335                                 high == RREG32(mmCP_HQD_PQ_BASE_HI))
336                         retval = true;
337         }
338         release_queue(adev);
339         return retval;
340 }
341
342 static bool kgd_hqd_sdma_is_occupied(struct amdgpu_device *adev, void *mqd)
343 {
344         struct cik_sdma_rlc_registers *m;
345         uint32_t sdma_rlc_reg_offset;
346         uint32_t sdma_rlc_rb_cntl;
347
348         m = get_sdma_mqd(mqd);
349         sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
350
351         sdma_rlc_rb_cntl = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
352
353         if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
354                 return true;
355
356         return false;
357 }
358
359 static int kgd_hqd_destroy(struct amdgpu_device *adev, void *mqd,
360                                 enum kfd_preempt_type reset_type,
361                                 unsigned int utimeout, uint32_t pipe_id,
362                                 uint32_t queue_id, uint32_t inst)
363 {
364         uint32_t temp;
365         enum hqd_dequeue_request_type type;
366         unsigned long flags, end_jiffies;
367         int retry;
368
369         if (amdgpu_in_reset(adev))
370                 return -EIO;
371
372         acquire_queue(adev, pipe_id, queue_id);
373         WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
374
375         switch (reset_type) {
376         case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
377                 type = DRAIN_PIPE;
378                 break;
379         case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
380                 type = RESET_WAVES;
381                 break;
382         default:
383                 type = DRAIN_PIPE;
384                 break;
385         }
386
387         /* Workaround: If IQ timer is active and the wait time is close to or
388          * equal to 0, dequeueing is not safe. Wait until either the wait time
389          * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
390          * cleared before continuing. Also, ensure wait times are set to at
391          * least 0x3.
392          */
393         local_irq_save(flags);
394         preempt_disable();
395         retry = 5000; /* wait for 500 usecs at maximum */
396         while (true) {
397                 temp = RREG32(mmCP_HQD_IQ_TIMER);
398                 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
399                         pr_debug("HW is processing IQ\n");
400                         goto loop;
401                 }
402                 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
403                         if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
404                                         == 3) /* SEM-rearm is safe */
405                                 break;
406                         /* Wait time 3 is safe for CP, but our MMIO read/write
407                          * time is close to 1 microsecond, so check for 10 to
408                          * leave more buffer room
409                          */
410                         if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
411                                         >= 10)
412                                 break;
413                         pr_debug("IQ timer is active\n");
414                 } else
415                         break;
416 loop:
417                 if (!retry) {
418                         pr_err("CP HQD IQ timer status time out\n");
419                         break;
420                 }
421                 ndelay(100);
422                 --retry;
423         }
424         retry = 1000;
425         while (true) {
426                 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
427                 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
428                         break;
429                 pr_debug("Dequeue request is pending\n");
430
431                 if (!retry) {
432                         pr_err("CP HQD dequeue request time out\n");
433                         break;
434                 }
435                 ndelay(100);
436                 --retry;
437         }
438         local_irq_restore(flags);
439         preempt_enable();
440
441         WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
442
443         end_jiffies = (utimeout * HZ / 1000) + jiffies;
444         while (true) {
445                 temp = RREG32(mmCP_HQD_ACTIVE);
446                 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
447                         break;
448                 if (time_after(jiffies, end_jiffies)) {
449                         pr_err("cp queue preemption time out\n");
450                         release_queue(adev);
451                         return -ETIME;
452                 }
453                 usleep_range(500, 1000);
454         }
455
456         release_queue(adev);
457         return 0;
458 }
459
460 static int kgd_hqd_sdma_destroy(struct amdgpu_device *adev, void *mqd,
461                                 unsigned int utimeout)
462 {
463         struct cik_sdma_rlc_registers *m;
464         uint32_t sdma_rlc_reg_offset;
465         uint32_t temp;
466         unsigned long end_jiffies = (utimeout * HZ / 1000) + jiffies;
467
468         m = get_sdma_mqd(mqd);
469         sdma_rlc_reg_offset = get_sdma_rlc_reg_offset(m);
470
471         temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL);
472         temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
473         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL, temp);
474
475         while (true) {
476                 temp = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_CONTEXT_STATUS);
477                 if (temp & SDMA0_RLC0_CONTEXT_STATUS__IDLE_MASK)
478                         break;
479                 if (time_after(jiffies, end_jiffies)) {
480                         pr_err("SDMA RLC not idle in %s\n", __func__);
481                         return -ETIME;
482                 }
483                 usleep_range(500, 1000);
484         }
485
486         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_DOORBELL, 0);
487         WREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL,
488                 RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_CNTL) |
489                 SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK);
490
491         m->sdma_rlc_rb_rptr = RREG32(sdma_rlc_reg_offset + mmSDMA0_RLC0_RB_RPTR);
492
493         return 0;
494 }
495
496 static int kgd_wave_control_execute(struct amdgpu_device *adev,
497                                         uint32_t gfx_index_val,
498                                         uint32_t sq_cmd, uint32_t inst)
499 {
500         uint32_t data;
501
502         mutex_lock(&adev->grbm_idx_mutex);
503
504         WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
505         WREG32(mmSQ_CMD, sq_cmd);
506
507         /*  Restore the GRBM_GFX_INDEX register  */
508
509         data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
510                 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
511                 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
512
513         WREG32(mmGRBM_GFX_INDEX, data);
514
515         mutex_unlock(&adev->grbm_idx_mutex);
516
517         return 0;
518 }
519
520 static bool get_atc_vmid_pasid_mapping_info(struct amdgpu_device *adev,
521                                         uint8_t vmid, uint16_t *p_pasid)
522 {
523         uint32_t value;
524
525         value = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
526         *p_pasid = value & ATC_VMID0_PASID_MAPPING__PASID_MASK;
527
528         return !!(value & ATC_VMID0_PASID_MAPPING__VALID_MASK);
529 }
530
531 static void set_scratch_backing_va(struct amdgpu_device *adev,
532                                         uint64_t va, uint32_t vmid)
533 {
534         lock_srbm(adev, 0, 0, 0, vmid);
535         WREG32(mmSH_HIDDEN_PRIVATE_BASE_VMID, va);
536         unlock_srbm(adev);
537 }
538
539 static void set_vm_context_page_table_base(struct amdgpu_device *adev,
540                         uint32_t vmid, uint64_t page_table_base)
541 {
542         if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid)) {
543                 pr_err("trying to set page table base for wrong VMID\n");
544                 return;
545         }
546         WREG32(mmVM_CONTEXT8_PAGE_TABLE_BASE_ADDR + vmid - 8,
547                 lower_32_bits(page_table_base));
548 }
549
550  /**
551   * read_vmid_from_vmfault_reg - read vmid from register
552   *
553   * adev: amdgpu_device pointer
554   * @vmid: vmid pointer
555   * read vmid from register (CIK).
556   */
557 static uint32_t read_vmid_from_vmfault_reg(struct amdgpu_device *adev)
558 {
559         uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
560
561         return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
562 }
563
564 const struct kfd2kgd_calls gfx_v7_kfd2kgd = {
565         .program_sh_mem_settings = kgd_program_sh_mem_settings,
566         .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
567         .init_interrupts = kgd_init_interrupts,
568         .hqd_load = kgd_hqd_load,
569         .hqd_sdma_load = kgd_hqd_sdma_load,
570         .hqd_dump = kgd_hqd_dump,
571         .hqd_sdma_dump = kgd_hqd_sdma_dump,
572         .hqd_is_occupied = kgd_hqd_is_occupied,
573         .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
574         .hqd_destroy = kgd_hqd_destroy,
575         .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
576         .wave_control_execute = kgd_wave_control_execute,
577         .get_atc_vmid_pasid_mapping_info = get_atc_vmid_pasid_mapping_info,
578         .set_scratch_backing_va = set_scratch_backing_va,
579         .set_vm_context_page_table_base = set_vm_context_page_table_base,
580         .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
581 };