2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/fdtable.h>
24 #include <linux/uaccess.h>
25 #include <linux/firmware.h>
28 #include "amdgpu_amdkfd.h"
31 #include "amdgpu_ucode.h"
33 #include "gca/gfx_7_2_d.h"
34 #include "gca/gfx_7_2_enum.h"
35 #include "gca/gfx_7_2_sh_mask.h"
36 #include "oss/oss_2_0_d.h"
37 #include "oss/oss_2_0_sh_mask.h"
38 #include "gmc/gmc_7_1_d.h"
39 #include "gmc/gmc_7_1_sh_mask.h"
40 #include "cik_structs.h"
42 enum hqd_dequeue_request_type {
49 MAX_TRAPID = 8, /* 3 bits in the bitfield. */
50 MAX_WATCH_ADDRESSES = 4
54 ADDRESS_WATCH_REG_ADDR_HI = 0,
55 ADDRESS_WATCH_REG_ADDR_LO,
56 ADDRESS_WATCH_REG_CNTL,
60 /* not defined in the CI/KV reg file */
62 ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
63 ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
64 ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
65 /* extend the mask to 26 bits to match the low address field */
66 ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
67 ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
70 static const uint32_t watchRegs[MAX_WATCH_ADDRESSES * ADDRESS_WATCH_REG_MAX] = {
71 mmTCP_WATCH0_ADDR_H, mmTCP_WATCH0_ADDR_L, mmTCP_WATCH0_CNTL,
72 mmTCP_WATCH1_ADDR_H, mmTCP_WATCH1_ADDR_L, mmTCP_WATCH1_CNTL,
73 mmTCP_WATCH2_ADDR_H, mmTCP_WATCH2_ADDR_L, mmTCP_WATCH2_CNTL,
74 mmTCP_WATCH3_ADDR_H, mmTCP_WATCH3_ADDR_L, mmTCP_WATCH3_CNTL
77 union TCP_WATCH_CNTL_BITS {
91 * Register access functions
94 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
95 uint32_t sh_mem_config, uint32_t sh_mem_ape1_base,
96 uint32_t sh_mem_ape1_limit, uint32_t sh_mem_bases);
98 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
101 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
102 uint32_t hpd_size, uint64_t hpd_gpu_addr);
103 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id);
104 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
105 uint32_t queue_id, uint32_t __user *wptr,
106 uint32_t wptr_shift, uint32_t wptr_mask,
107 struct mm_struct *mm);
108 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd);
109 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
110 uint32_t pipe_id, uint32_t queue_id);
112 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
113 enum kfd_preempt_type reset_type,
114 unsigned int utimeout, uint32_t pipe_id,
116 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd);
117 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
118 unsigned int utimeout);
119 static int kgd_address_watch_disable(struct kgd_dev *kgd);
120 static int kgd_address_watch_execute(struct kgd_dev *kgd,
121 unsigned int watch_point_id,
125 static int kgd_wave_control_execute(struct kgd_dev *kgd,
126 uint32_t gfx_index_val,
128 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
129 unsigned int watch_point_id,
130 unsigned int reg_offset);
132 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd, uint8_t vmid);
133 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
135 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid);
137 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type);
139 static const struct kfd2kgd_calls kfd2kgd = {
140 .init_gtt_mem_allocation = alloc_gtt_mem,
141 .free_gtt_mem = free_gtt_mem,
142 .get_vmem_size = get_vmem_size,
143 .get_gpu_clock_counter = get_gpu_clock_counter,
144 .get_max_engine_clock_in_mhz = get_max_engine_clock_in_mhz,
145 .program_sh_mem_settings = kgd_program_sh_mem_settings,
146 .set_pasid_vmid_mapping = kgd_set_pasid_vmid_mapping,
147 .init_pipeline = kgd_init_pipeline,
148 .init_interrupts = kgd_init_interrupts,
149 .hqd_load = kgd_hqd_load,
150 .hqd_sdma_load = kgd_hqd_sdma_load,
151 .hqd_is_occupied = kgd_hqd_is_occupied,
152 .hqd_sdma_is_occupied = kgd_hqd_sdma_is_occupied,
153 .hqd_destroy = kgd_hqd_destroy,
154 .hqd_sdma_destroy = kgd_hqd_sdma_destroy,
155 .address_watch_disable = kgd_address_watch_disable,
156 .address_watch_execute = kgd_address_watch_execute,
157 .wave_control_execute = kgd_wave_control_execute,
158 .address_watch_get_offset = kgd_address_watch_get_offset,
159 .get_atc_vmid_pasid_mapping_pasid = get_atc_vmid_pasid_mapping_pasid,
160 .get_atc_vmid_pasid_mapping_valid = get_atc_vmid_pasid_mapping_valid,
161 .write_vmid_invalidate_request = write_vmid_invalidate_request,
162 .get_fw_version = get_fw_version
165 struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
167 return (struct kfd2kgd_calls *)&kfd2kgd;
170 static inline struct amdgpu_device *get_amdgpu_device(struct kgd_dev *kgd)
172 return (struct amdgpu_device *)kgd;
175 static void lock_srbm(struct kgd_dev *kgd, uint32_t mec, uint32_t pipe,
176 uint32_t queue, uint32_t vmid)
178 struct amdgpu_device *adev = get_amdgpu_device(kgd);
179 uint32_t value = PIPEID(pipe) | MEID(mec) | VMID(vmid) | QUEUEID(queue);
181 mutex_lock(&adev->srbm_mutex);
182 WREG32(mmSRBM_GFX_CNTL, value);
185 static void unlock_srbm(struct kgd_dev *kgd)
187 struct amdgpu_device *adev = get_amdgpu_device(kgd);
189 WREG32(mmSRBM_GFX_CNTL, 0);
190 mutex_unlock(&adev->srbm_mutex);
193 static void acquire_queue(struct kgd_dev *kgd, uint32_t pipe_id,
196 struct amdgpu_device *adev = get_amdgpu_device(kgd);
198 uint32_t mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
199 uint32_t pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
201 lock_srbm(kgd, mec, pipe, queue_id, 0);
204 static void release_queue(struct kgd_dev *kgd)
209 static void kgd_program_sh_mem_settings(struct kgd_dev *kgd, uint32_t vmid,
210 uint32_t sh_mem_config,
211 uint32_t sh_mem_ape1_base,
212 uint32_t sh_mem_ape1_limit,
213 uint32_t sh_mem_bases)
215 struct amdgpu_device *adev = get_amdgpu_device(kgd);
217 lock_srbm(kgd, 0, 0, 0, vmid);
219 WREG32(mmSH_MEM_CONFIG, sh_mem_config);
220 WREG32(mmSH_MEM_APE1_BASE, sh_mem_ape1_base);
221 WREG32(mmSH_MEM_APE1_LIMIT, sh_mem_ape1_limit);
222 WREG32(mmSH_MEM_BASES, sh_mem_bases);
227 static int kgd_set_pasid_vmid_mapping(struct kgd_dev *kgd, unsigned int pasid,
230 struct amdgpu_device *adev = get_amdgpu_device(kgd);
233 * We have to assume that there is no outstanding mapping.
234 * The ATC_VMID_PASID_MAPPING_UPDATE_STATUS bit could be 0 because
235 * a mapping is in progress or because a mapping finished and the
236 * SW cleared it. So the protocol is to always wait & clear.
238 uint32_t pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
239 ATC_VMID0_PASID_MAPPING__VALID_MASK;
241 WREG32(mmATC_VMID0_PASID_MAPPING + vmid, pasid_mapping);
243 while (!(RREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS) & (1U << vmid)))
245 WREG32(mmATC_VMID_PASID_MAPPING_UPDATE_STATUS, 1U << vmid);
247 /* Mapping vmid to pasid also for IH block */
248 WREG32(mmIH_VMID_0_LUT + vmid, pasid_mapping);
253 static int kgd_init_pipeline(struct kgd_dev *kgd, uint32_t pipe_id,
254 uint32_t hpd_size, uint64_t hpd_gpu_addr)
256 /* amdgpu owns the per-pipe state */
260 static int kgd_init_interrupts(struct kgd_dev *kgd, uint32_t pipe_id)
262 struct amdgpu_device *adev = get_amdgpu_device(kgd);
266 mec = (pipe_id / adev->gfx.mec.num_pipe_per_mec) + 1;
267 pipe = (pipe_id % adev->gfx.mec.num_pipe_per_mec);
269 lock_srbm(kgd, mec, pipe, 0, 0);
271 WREG32(mmCPC_INT_CNTL, CP_INT_CNTL_RING0__TIME_STAMP_INT_ENABLE_MASK |
272 CP_INT_CNTL_RING0__OPCODE_ERROR_INT_ENABLE_MASK);
279 static inline uint32_t get_sdma_base_addr(struct cik_sdma_rlc_registers *m)
283 retval = m->sdma_engine_id * SDMA1_REGISTER_OFFSET +
284 m->sdma_queue_id * KFD_CIK_SDMA_QUEUE_OFFSET;
286 pr_debug("kfd: sdma base address: 0x%x\n", retval);
291 static inline struct cik_mqd *get_mqd(void *mqd)
293 return (struct cik_mqd *)mqd;
296 static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
298 return (struct cik_sdma_rlc_registers *)mqd;
301 static int kgd_hqd_load(struct kgd_dev *kgd, void *mqd, uint32_t pipe_id,
302 uint32_t queue_id, uint32_t __user *wptr,
303 uint32_t wptr_shift, uint32_t wptr_mask,
304 struct mm_struct *mm)
306 struct amdgpu_device *adev = get_amdgpu_device(kgd);
309 uint32_t reg, wptr_val, data;
313 acquire_queue(kgd, pipe_id, queue_id);
315 /* HQD registers extend from CP_MQD_BASE_ADDR to CP_MQD_CONTROL. */
316 mqd_hqd = &m->cp_mqd_base_addr_lo;
318 for (reg = mmCP_MQD_BASE_ADDR; reg <= mmCP_MQD_CONTROL; reg++)
319 WREG32(reg, mqd_hqd[reg - mmCP_MQD_BASE_ADDR]);
321 /* Copy userspace write pointer value to register.
322 * Activate doorbell logic to monitor subsequent changes.
324 data = REG_SET_FIELD(m->cp_hqd_pq_doorbell_control,
325 CP_HQD_PQ_DOORBELL_CONTROL, DOORBELL_EN, 1);
326 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, data);
328 if (read_user_wptr(mm, wptr, wptr_val))
329 WREG32(mmCP_HQD_PQ_WPTR, (wptr_val << wptr_shift) & wptr_mask);
331 data = REG_SET_FIELD(m->cp_hqd_active, CP_HQD_ACTIVE, ACTIVE, 1);
332 WREG32(mmCP_HQD_ACTIVE, data);
339 static int kgd_hqd_sdma_load(struct kgd_dev *kgd, void *mqd)
341 struct amdgpu_device *adev = get_amdgpu_device(kgd);
342 struct cik_sdma_rlc_registers *m;
343 uint32_t sdma_base_addr;
345 m = get_sdma_mqd(mqd);
346 sdma_base_addr = get_sdma_base_addr(m);
348 WREG32(sdma_base_addr + mmSDMA0_RLC0_VIRTUAL_ADDR,
349 m->sdma_rlc_virtual_addr);
351 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE,
352 m->sdma_rlc_rb_base);
354 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE_HI,
355 m->sdma_rlc_rb_base_hi);
357 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_LO,
358 m->sdma_rlc_rb_rptr_addr_lo);
360 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR_ADDR_HI,
361 m->sdma_rlc_rb_rptr_addr_hi);
363 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL,
364 m->sdma_rlc_doorbell);
366 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL,
367 m->sdma_rlc_rb_cntl);
372 static bool kgd_hqd_is_occupied(struct kgd_dev *kgd, uint64_t queue_address,
373 uint32_t pipe_id, uint32_t queue_id)
375 struct amdgpu_device *adev = get_amdgpu_device(kgd);
380 acquire_queue(kgd, pipe_id, queue_id);
381 act = RREG32(mmCP_HQD_ACTIVE);
383 low = lower_32_bits(queue_address >> 8);
384 high = upper_32_bits(queue_address >> 8);
386 if (low == RREG32(mmCP_HQD_PQ_BASE) &&
387 high == RREG32(mmCP_HQD_PQ_BASE_HI))
394 static bool kgd_hqd_sdma_is_occupied(struct kgd_dev *kgd, void *mqd)
396 struct amdgpu_device *adev = get_amdgpu_device(kgd);
397 struct cik_sdma_rlc_registers *m;
398 uint32_t sdma_base_addr;
399 uint32_t sdma_rlc_rb_cntl;
401 m = get_sdma_mqd(mqd);
402 sdma_base_addr = get_sdma_base_addr(m);
404 sdma_rlc_rb_cntl = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
406 if (sdma_rlc_rb_cntl & SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK)
412 static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
413 enum kfd_preempt_type reset_type,
414 unsigned int utimeout, uint32_t pipe_id,
417 struct amdgpu_device *adev = get_amdgpu_device(kgd);
419 enum hqd_dequeue_request_type type;
420 unsigned long flags, end_jiffies;
423 acquire_queue(kgd, pipe_id, queue_id);
424 WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
426 switch (reset_type) {
427 case KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN:
430 case KFD_PREEMPT_TYPE_WAVEFRONT_RESET:
438 /* Workaround: If IQ timer is active and the wait time is close to or
439 * equal to 0, dequeueing is not safe. Wait until either the wait time
440 * is larger or timer is cleared. Also, ensure that IQ_REQ_PEND is
441 * cleared before continuing. Also, ensure wait times are set to at
444 local_irq_save(flags);
446 retry = 5000; /* wait for 500 usecs at maximum */
448 temp = RREG32(mmCP_HQD_IQ_TIMER);
449 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, PROCESSING_IQ)) {
450 pr_debug("HW is processing IQ\n");
453 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, ACTIVE)) {
454 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, RETRY_TYPE)
455 == 3) /* SEM-rearm is safe */
457 /* Wait time 3 is safe for CP, but our MMIO read/write
458 * time is close to 1 microsecond, so check for 10 to
459 * leave more buffer room
461 if (REG_GET_FIELD(temp, CP_HQD_IQ_TIMER, WAIT_TIME)
464 pr_debug("IQ timer is active\n");
469 pr_err("CP HQD IQ timer status time out\n");
477 temp = RREG32(mmCP_HQD_DEQUEUE_REQUEST);
478 if (!(temp & CP_HQD_DEQUEUE_REQUEST__IQ_REQ_PEND_MASK))
480 pr_debug("Dequeue request is pending\n");
483 pr_err("CP HQD dequeue request time out\n");
489 local_irq_restore(flags);
492 WREG32(mmCP_HQD_DEQUEUE_REQUEST, type);
494 end_jiffies = (utimeout * HZ / 1000) + jiffies;
496 temp = RREG32(mmCP_HQD_ACTIVE);
497 if (!(temp & CP_HQD_ACTIVE__ACTIVE_MASK))
499 if (time_after(jiffies, end_jiffies)) {
500 pr_err("cp queue preemption time out\n");
504 usleep_range(500, 1000);
511 static int kgd_hqd_sdma_destroy(struct kgd_dev *kgd, void *mqd,
512 unsigned int utimeout)
514 struct amdgpu_device *adev = get_amdgpu_device(kgd);
515 struct cik_sdma_rlc_registers *m;
516 uint32_t sdma_base_addr;
518 int timeout = utimeout;
520 m = get_sdma_mqd(mqd);
521 sdma_base_addr = get_sdma_base_addr(m);
523 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL);
524 temp = temp & ~SDMA0_RLC0_RB_CNTL__RB_ENABLE_MASK;
525 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_CNTL, temp);
528 temp = RREG32(sdma_base_addr + mmSDMA0_RLC0_CONTEXT_STATUS);
529 if (temp & SDMA0_STATUS_REG__RB_CMD_IDLE__SHIFT)
537 WREG32(sdma_base_addr + mmSDMA0_RLC0_DOORBELL, 0);
538 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_RPTR, 0);
539 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_WPTR, 0);
540 WREG32(sdma_base_addr + mmSDMA0_RLC0_RB_BASE, 0);
545 static int kgd_address_watch_disable(struct kgd_dev *kgd)
547 struct amdgpu_device *adev = get_amdgpu_device(kgd);
548 union TCP_WATCH_CNTL_BITS cntl;
553 cntl.bitfields.valid = 0;
554 cntl.bitfields.mask = ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK;
555 cntl.bitfields.atc = 1;
557 /* Turning off this address until we set all the registers */
558 for (i = 0; i < MAX_WATCH_ADDRESSES; i++)
559 WREG32(watchRegs[i * ADDRESS_WATCH_REG_MAX +
560 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
565 static int kgd_address_watch_execute(struct kgd_dev *kgd,
566 unsigned int watch_point_id,
571 struct amdgpu_device *adev = get_amdgpu_device(kgd);
572 union TCP_WATCH_CNTL_BITS cntl;
574 cntl.u32All = cntl_val;
576 /* Turning off this watch point until we set all the registers */
577 cntl.bitfields.valid = 0;
578 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
579 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
581 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
582 ADDRESS_WATCH_REG_ADDR_HI], addr_hi);
584 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
585 ADDRESS_WATCH_REG_ADDR_LO], addr_lo);
587 /* Enable the watch point */
588 cntl.bitfields.valid = 1;
590 WREG32(watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX +
591 ADDRESS_WATCH_REG_CNTL], cntl.u32All);
596 static int kgd_wave_control_execute(struct kgd_dev *kgd,
597 uint32_t gfx_index_val,
600 struct amdgpu_device *adev = get_amdgpu_device(kgd);
603 mutex_lock(&adev->grbm_idx_mutex);
605 WREG32(mmGRBM_GFX_INDEX, gfx_index_val);
606 WREG32(mmSQ_CMD, sq_cmd);
608 /* Restore the GRBM_GFX_INDEX register */
610 data = GRBM_GFX_INDEX__INSTANCE_BROADCAST_WRITES_MASK |
611 GRBM_GFX_INDEX__SH_BROADCAST_WRITES_MASK |
612 GRBM_GFX_INDEX__SE_BROADCAST_WRITES_MASK;
614 WREG32(mmGRBM_GFX_INDEX, data);
616 mutex_unlock(&adev->grbm_idx_mutex);
621 static uint32_t kgd_address_watch_get_offset(struct kgd_dev *kgd,
622 unsigned int watch_point_id,
623 unsigned int reg_offset)
625 return watchRegs[watch_point_id * ADDRESS_WATCH_REG_MAX + reg_offset];
628 static bool get_atc_vmid_pasid_mapping_valid(struct kgd_dev *kgd,
632 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
634 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
635 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
638 static uint16_t get_atc_vmid_pasid_mapping_pasid(struct kgd_dev *kgd,
642 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
644 reg = RREG32(mmATC_VMID0_PASID_MAPPING + vmid);
645 return reg & ATC_VMID0_PASID_MAPPING__VALID_MASK;
648 static void write_vmid_invalidate_request(struct kgd_dev *kgd, uint8_t vmid)
650 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
652 WREG32(mmVM_INVALIDATE_REQUEST, 1 << vmid);
655 static uint16_t get_fw_version(struct kgd_dev *kgd, enum kgd_engine_type type)
657 struct amdgpu_device *adev = (struct amdgpu_device *) kgd;
658 const union amdgpu_firmware_header *hdr;
664 hdr = (const union amdgpu_firmware_header *)
665 adev->gfx.pfp_fw->data;
669 hdr = (const union amdgpu_firmware_header *)
670 adev->gfx.me_fw->data;
674 hdr = (const union amdgpu_firmware_header *)
675 adev->gfx.ce_fw->data;
678 case KGD_ENGINE_MEC1:
679 hdr = (const union amdgpu_firmware_header *)
680 adev->gfx.mec_fw->data;
683 case KGD_ENGINE_MEC2:
684 hdr = (const union amdgpu_firmware_header *)
685 adev->gfx.mec2_fw->data;
689 hdr = (const union amdgpu_firmware_header *)
690 adev->gfx.rlc_fw->data;
693 case KGD_ENGINE_SDMA1:
694 hdr = (const union amdgpu_firmware_header *)
695 adev->sdma.instance[0].fw->data;
698 case KGD_ENGINE_SDMA2:
699 hdr = (const union amdgpu_firmware_header *)
700 adev->sdma.instance[1].fw->data;
710 /* Only 12 bit in use*/
711 return hdr->common.ucode_version;