2 * Copyright 2016-2018 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/printk.h>
25 #include <linux/slab.h>
26 #include <linux/uaccess.h>
28 #include "kfd_mqd_manager.h"
29 #include "v9_structs.h"
30 #include "gc/gc_9_0_offset.h"
31 #include "gc/gc_9_0_sh_mask.h"
32 #include "sdma0/sdma0_4_0_sh_mask.h"
33 #include "amdgpu_amdkfd.h"
35 static inline struct v9_mqd *get_mqd(void *mqd)
37 return (struct v9_mqd *)mqd;
40 static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
42 return (struct v9_sdma_mqd *)mqd;
45 static void update_cu_mask(struct mqd_manager *mm, void *mqd,
46 struct mqd_update_info *minfo)
49 uint32_t se_mask[KFD_MAX_NUM_SE] = {0};
51 if (!minfo || (minfo->update_flag != UPDATE_FLAG_CU_MASK) ||
55 mqd_symmetrically_map_cu_mask(mm,
56 minfo->cu_mask.ptr, minfo->cu_mask.count, se_mask);
59 m->compute_static_thread_mgmt_se0 = se_mask[0];
60 m->compute_static_thread_mgmt_se1 = se_mask[1];
61 m->compute_static_thread_mgmt_se2 = se_mask[2];
62 m->compute_static_thread_mgmt_se3 = se_mask[3];
63 m->compute_static_thread_mgmt_se4 = se_mask[4];
64 m->compute_static_thread_mgmt_se5 = se_mask[5];
65 m->compute_static_thread_mgmt_se6 = se_mask[6];
66 m->compute_static_thread_mgmt_se7 = se_mask[7];
68 pr_debug("update cu mask to %#x %#x %#x %#x %#x %#x %#x %#x\n",
69 m->compute_static_thread_mgmt_se0,
70 m->compute_static_thread_mgmt_se1,
71 m->compute_static_thread_mgmt_se2,
72 m->compute_static_thread_mgmt_se3,
73 m->compute_static_thread_mgmt_se4,
74 m->compute_static_thread_mgmt_se5,
75 m->compute_static_thread_mgmt_se6,
76 m->compute_static_thread_mgmt_se7);
79 static void set_priority(struct v9_mqd *m, struct queue_properties *q)
81 m->cp_hqd_pipe_priority = pipe_priority_map[q->priority];
82 m->cp_hqd_queue_priority = q->priority;
85 static struct kfd_mem_obj *allocate_mqd(struct kfd_dev *kfd,
86 struct queue_properties *q)
89 struct kfd_mem_obj *mqd_mem_obj = NULL;
91 /* For V9 only, due to a HW bug, the control stack of a user mode
92 * compute queue needs to be allocated just behind the page boundary
93 * of its regular MQD buffer. So we allocate an enlarged MQD buffer:
94 * the first page of the buffer serves as the regular MQD buffer
95 * purpose and the remaining is for control stack. Although the two
96 * parts are in the same buffer object, they need different memory
97 * types: MQD part needs UC (uncached) as usual, while control stack
98 * needs NC (non coherent), which is different from the UC type which
99 * is used when control stack is allocated in user space.
101 * Because of all those, we use the gtt allocation function instead
102 * of sub-allocation function for this enlarged MQD buffer. Moreover,
103 * in order to achieve two memory types in a single buffer object, we
104 * pass a special bo flag AMDGPU_GEM_CREATE_CP_MQD_GFX9 to instruct
105 * amdgpu memory functions to do so.
107 if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
108 mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
111 retval = amdgpu_amdkfd_alloc_gtt_mem(kfd->adev,
112 ALIGN(q->ctl_stack_size, PAGE_SIZE) +
113 ALIGN(sizeof(struct v9_mqd), PAGE_SIZE),
114 &(mqd_mem_obj->gtt_mem),
115 &(mqd_mem_obj->gpu_addr),
116 (void *)&(mqd_mem_obj->cpu_ptr), true);
118 retval = kfd_gtt_sa_allocate(kfd, sizeof(struct v9_mqd),
131 static void init_mqd(struct mqd_manager *mm, void **mqd,
132 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
133 struct queue_properties *q)
138 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
139 addr = mqd_mem_obj->gpu_addr;
141 memset(m, 0, sizeof(struct v9_mqd));
143 m->header = 0xC0310800;
144 m->compute_pipelinestat_enable = 1;
145 m->compute_static_thread_mgmt_se0 = 0xFFFFFFFF;
146 m->compute_static_thread_mgmt_se1 = 0xFFFFFFFF;
147 m->compute_static_thread_mgmt_se2 = 0xFFFFFFFF;
148 m->compute_static_thread_mgmt_se3 = 0xFFFFFFFF;
149 m->compute_static_thread_mgmt_se4 = 0xFFFFFFFF;
150 m->compute_static_thread_mgmt_se5 = 0xFFFFFFFF;
151 m->compute_static_thread_mgmt_se6 = 0xFFFFFFFF;
152 m->compute_static_thread_mgmt_se7 = 0xFFFFFFFF;
154 m->cp_hqd_persistent_state = CP_HQD_PERSISTENT_STATE__PRELOAD_REQ_MASK |
155 0x53 << CP_HQD_PERSISTENT_STATE__PRELOAD_SIZE__SHIFT;
157 m->cp_mqd_control = 1 << CP_MQD_CONTROL__PRIV_STATE__SHIFT;
159 m->cp_mqd_base_addr_lo = lower_32_bits(addr);
160 m->cp_mqd_base_addr_hi = upper_32_bits(addr);
162 m->cp_hqd_quantum = 1 << CP_HQD_QUANTUM__QUANTUM_EN__SHIFT |
163 1 << CP_HQD_QUANTUM__QUANTUM_SCALE__SHIFT |
164 1 << CP_HQD_QUANTUM__QUANTUM_DURATION__SHIFT;
166 if (q->format == KFD_QUEUE_FORMAT_AQL) {
167 m->cp_hqd_aql_control =
168 1 << CP_HQD_AQL_CONTROL__CONTROL0__SHIFT;
172 m->compute_pgm_rsrc2 |=
173 (1 << COMPUTE_PGM_RSRC2__TRAP_PRESENT__SHIFT);
176 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address) {
177 m->cp_hqd_persistent_state |=
178 (1 << CP_HQD_PERSISTENT_STATE__QSWITCH_MODE__SHIFT);
179 m->cp_hqd_ctx_save_base_addr_lo =
180 lower_32_bits(q->ctx_save_restore_area_address);
181 m->cp_hqd_ctx_save_base_addr_hi =
182 upper_32_bits(q->ctx_save_restore_area_address);
183 m->cp_hqd_ctx_save_size = q->ctx_save_restore_area_size;
184 m->cp_hqd_cntl_stack_size = q->ctl_stack_size;
185 m->cp_hqd_cntl_stack_offset = q->ctl_stack_size;
186 m->cp_hqd_wg_state_offset = q->ctl_stack_size;
192 mm->update_mqd(mm, m, q, NULL);
195 static int load_mqd(struct mqd_manager *mm, void *mqd,
196 uint32_t pipe_id, uint32_t queue_id,
197 struct queue_properties *p, struct mm_struct *mms)
199 /* AQL write pointer counts in 64B packets, PM4/CP counts in dwords. */
200 uint32_t wptr_shift = (p->format == KFD_QUEUE_FORMAT_AQL ? 4 : 0);
202 return mm->dev->kfd2kgd->hqd_load(mm->dev->adev, mqd, pipe_id, queue_id,
203 (uint32_t __user *)p->write_ptr,
207 static int hiq_load_mqd_kiq(struct mqd_manager *mm, void *mqd,
208 uint32_t pipe_id, uint32_t queue_id,
209 struct queue_properties *p, struct mm_struct *mms)
211 return mm->dev->kfd2kgd->hiq_mqd_load(mm->dev->adev, mqd, pipe_id,
212 queue_id, p->doorbell_off);
215 static void update_mqd(struct mqd_manager *mm, void *mqd,
216 struct queue_properties *q,
217 struct mqd_update_info *minfo)
223 m->cp_hqd_pq_control = 5 << CP_HQD_PQ_CONTROL__RPTR_BLOCK_SIZE__SHIFT;
224 m->cp_hqd_pq_control |= order_base_2(q->queue_size / 4) - 1;
225 pr_debug("cp_hqd_pq_control 0x%x\n", m->cp_hqd_pq_control);
227 m->cp_hqd_pq_base_lo = lower_32_bits((uint64_t)q->queue_address >> 8);
228 m->cp_hqd_pq_base_hi = upper_32_bits((uint64_t)q->queue_address >> 8);
230 m->cp_hqd_pq_rptr_report_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
231 m->cp_hqd_pq_rptr_report_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
232 m->cp_hqd_pq_wptr_poll_addr_lo = lower_32_bits((uint64_t)q->write_ptr);
233 m->cp_hqd_pq_wptr_poll_addr_hi = upper_32_bits((uint64_t)q->write_ptr);
235 m->cp_hqd_pq_doorbell_control =
237 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
238 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
239 m->cp_hqd_pq_doorbell_control);
241 m->cp_hqd_ib_control =
242 3 << CP_HQD_IB_CONTROL__MIN_IB_AVAIL_SIZE__SHIFT |
243 1 << CP_HQD_IB_CONTROL__IB_EXE_DISABLE__SHIFT;
246 * HW does not clamp this field correctly. Maximum EOP queue size
247 * is constrained by per-SE EOP done signal count, which is 8-bit.
248 * Limit is 0xFF EOP entries (= 0x7F8 dwords). CP will not submit
249 * more than (EOP entry count - 1) so a queue size of 0x800 dwords
250 * is safe, giving a maximum field value of 0xA.
252 m->cp_hqd_eop_control = min(0xA,
253 order_base_2(q->eop_ring_buffer_size / 4) - 1);
254 m->cp_hqd_eop_base_addr_lo =
255 lower_32_bits(q->eop_ring_buffer_address >> 8);
256 m->cp_hqd_eop_base_addr_hi =
257 upper_32_bits(q->eop_ring_buffer_address >> 8);
259 m->cp_hqd_iq_timer = 0;
261 m->cp_hqd_vmid = q->vmid;
263 if (q->format == KFD_QUEUE_FORMAT_AQL) {
264 m->cp_hqd_pq_control |= CP_HQD_PQ_CONTROL__NO_UPDATE_RPTR_MASK |
265 2 << CP_HQD_PQ_CONTROL__SLOT_BASED_WPTR__SHIFT |
266 1 << CP_HQD_PQ_CONTROL__QUEUE_FULL_EN__SHIFT |
267 1 << CP_HQD_PQ_CONTROL__WPP_CLAMP_EN__SHIFT;
268 m->cp_hqd_pq_doorbell_control |= 1 <<
269 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_BIF_DROP__SHIFT;
271 if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
272 m->cp_hqd_ctx_save_control = 0;
274 update_cu_mask(mm, mqd, minfo);
277 q->is_active = QUEUE_IS_ACTIVE(*q);
281 static uint32_t read_doorbell_id(void *mqd)
283 struct v9_mqd *m = (struct v9_mqd *)mqd;
285 return m->queue_doorbell_id0;
288 static int destroy_mqd(struct mqd_manager *mm, void *mqd,
289 enum kfd_preempt_type type,
290 unsigned int timeout, uint32_t pipe_id,
293 return mm->dev->kfd2kgd->hqd_destroy
294 (mm->dev->adev, mqd, type, timeout,
298 static void free_mqd(struct mqd_manager *mm, void *mqd,
299 struct kfd_mem_obj *mqd_mem_obj)
301 struct kfd_dev *kfd = mm->dev;
303 if (mqd_mem_obj->gtt_mem) {
304 amdgpu_amdkfd_free_gtt_mem(kfd->adev, mqd_mem_obj->gtt_mem);
307 kfd_gtt_sa_free(mm->dev, mqd_mem_obj);
311 static bool is_occupied(struct mqd_manager *mm, void *mqd,
312 uint64_t queue_address, uint32_t pipe_id,
315 return mm->dev->kfd2kgd->hqd_is_occupied(
316 mm->dev->adev, queue_address,
320 static int get_wave_state(struct mqd_manager *mm, void *mqd,
321 void __user *ctl_stack,
322 u32 *ctl_stack_used_size,
323 u32 *save_area_used_size)
327 /* Control stack is located one page after MQD. */
328 void *mqd_ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
332 *ctl_stack_used_size = m->cp_hqd_cntl_stack_size -
333 m->cp_hqd_cntl_stack_offset;
334 *save_area_used_size = m->cp_hqd_wg_state_offset -
335 m->cp_hqd_cntl_stack_size;
337 if (copy_to_user(ctl_stack, mqd_ctl_stack, m->cp_hqd_cntl_stack_size))
343 static void get_checkpoint_info(struct mqd_manager *mm, void *mqd, u32 *ctl_stack_size)
345 struct v9_mqd *m = get_mqd(mqd);
347 *ctl_stack_size = m->cp_hqd_cntl_stack_size;
350 static void checkpoint_mqd(struct mqd_manager *mm, void *mqd, void *mqd_dst, void *ctl_stack_dst)
353 /* Control stack is located one page after MQD. */
354 void *ctl_stack = (void *)((uintptr_t)mqd + PAGE_SIZE);
358 memcpy(mqd_dst, m, sizeof(struct v9_mqd));
359 memcpy(ctl_stack_dst, ctl_stack, m->cp_hqd_cntl_stack_size);
362 static void restore_mqd(struct mqd_manager *mm, void **mqd,
363 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
364 struct queue_properties *qp,
366 const void *ctl_stack_src, u32 ctl_stack_size)
372 m = (struct v9_mqd *) mqd_mem_obj->cpu_ptr;
373 addr = mqd_mem_obj->gpu_addr;
375 memcpy(m, mqd_src, sizeof(*m));
381 /* Control stack is located one page after MQD. */
382 ctl_stack = (void *)((uintptr_t)*mqd + PAGE_SIZE);
383 memcpy(ctl_stack, ctl_stack_src, ctl_stack_size);
385 m->cp_hqd_pq_doorbell_control =
387 CP_HQD_PQ_DOORBELL_CONTROL__DOORBELL_OFFSET__SHIFT;
388 pr_debug("cp_hqd_pq_doorbell_control 0x%x\n",
389 m->cp_hqd_pq_doorbell_control);
394 static void init_mqd_hiq(struct mqd_manager *mm, void **mqd,
395 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
396 struct queue_properties *q)
400 init_mqd(mm, mqd, mqd_mem_obj, gart_addr, q);
404 m->cp_hqd_pq_control |= 1 << CP_HQD_PQ_CONTROL__PRIV_STATE__SHIFT |
405 1 << CP_HQD_PQ_CONTROL__KMD_QUEUE__SHIFT;
408 static void init_mqd_sdma(struct mqd_manager *mm, void **mqd,
409 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
410 struct queue_properties *q)
412 struct v9_sdma_mqd *m;
414 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
416 memset(m, 0, sizeof(struct v9_sdma_mqd));
420 *gart_addr = mqd_mem_obj->gpu_addr;
422 mm->update_mqd(mm, m, q, NULL);
425 static int load_mqd_sdma(struct mqd_manager *mm, void *mqd,
426 uint32_t pipe_id, uint32_t queue_id,
427 struct queue_properties *p, struct mm_struct *mms)
429 return mm->dev->kfd2kgd->hqd_sdma_load(mm->dev->adev, mqd,
430 (uint32_t __user *)p->write_ptr,
434 #define SDMA_RLC_DUMMY_DEFAULT 0xf
436 static void update_mqd_sdma(struct mqd_manager *mm, void *mqd,
437 struct queue_properties *q,
438 struct mqd_update_info *minfo)
440 struct v9_sdma_mqd *m;
442 m = get_sdma_mqd(mqd);
443 m->sdmax_rlcx_rb_cntl = order_base_2(q->queue_size / 4)
444 << SDMA0_RLC0_RB_CNTL__RB_SIZE__SHIFT |
445 q->vmid << SDMA0_RLC0_RB_CNTL__RB_VMID__SHIFT |
446 1 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_ENABLE__SHIFT |
447 6 << SDMA0_RLC0_RB_CNTL__RPTR_WRITEBACK_TIMER__SHIFT;
449 m->sdmax_rlcx_rb_base = lower_32_bits(q->queue_address >> 8);
450 m->sdmax_rlcx_rb_base_hi = upper_32_bits(q->queue_address >> 8);
451 m->sdmax_rlcx_rb_rptr_addr_lo = lower_32_bits((uint64_t)q->read_ptr);
452 m->sdmax_rlcx_rb_rptr_addr_hi = upper_32_bits((uint64_t)q->read_ptr);
453 m->sdmax_rlcx_doorbell_offset =
454 q->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
456 m->sdma_engine_id = q->sdma_engine_id;
457 m->sdma_queue_id = q->sdma_queue_id;
458 m->sdmax_rlcx_dummy_reg = SDMA_RLC_DUMMY_DEFAULT;
460 q->is_active = QUEUE_IS_ACTIVE(*q);
464 * * preempt type here is ignored because there is only one way
465 * * to preempt sdma queue
467 static int destroy_mqd_sdma(struct mqd_manager *mm, void *mqd,
468 enum kfd_preempt_type type,
469 unsigned int timeout, uint32_t pipe_id,
472 return mm->dev->kfd2kgd->hqd_sdma_destroy(mm->dev->adev, mqd, timeout);
475 static bool is_occupied_sdma(struct mqd_manager *mm, void *mqd,
476 uint64_t queue_address, uint32_t pipe_id,
479 return mm->dev->kfd2kgd->hqd_sdma_is_occupied(mm->dev->adev, mqd);
482 static void checkpoint_mqd_sdma(struct mqd_manager *mm,
487 struct v9_sdma_mqd *m;
489 m = get_sdma_mqd(mqd);
491 memcpy(mqd_dst, m, sizeof(struct v9_sdma_mqd));
494 static void restore_mqd_sdma(struct mqd_manager *mm, void **mqd,
495 struct kfd_mem_obj *mqd_mem_obj, uint64_t *gart_addr,
496 struct queue_properties *qp,
498 const void *ctl_stack_src, const u32 ctl_stack_size)
501 struct v9_sdma_mqd *m;
503 m = (struct v9_sdma_mqd *) mqd_mem_obj->cpu_ptr;
504 addr = mqd_mem_obj->gpu_addr;
506 memcpy(m, mqd_src, sizeof(*m));
508 m->sdmax_rlcx_doorbell_offset =
509 qp->doorbell_off << SDMA0_RLC0_DOORBELL_OFFSET__OFFSET__SHIFT;
518 #if defined(CONFIG_DEBUG_FS)
520 static int debugfs_show_mqd(struct seq_file *m, void *data)
522 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
523 data, sizeof(struct v9_mqd), false);
527 static int debugfs_show_mqd_sdma(struct seq_file *m, void *data)
529 seq_hex_dump(m, " ", DUMP_PREFIX_OFFSET, 32, 4,
530 data, sizeof(struct v9_sdma_mqd), false);
536 struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
539 struct mqd_manager *mqd;
541 if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
544 mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
551 case KFD_MQD_TYPE_CP:
552 mqd->allocate_mqd = allocate_mqd;
553 mqd->init_mqd = init_mqd;
554 mqd->free_mqd = free_mqd;
555 mqd->load_mqd = load_mqd;
556 mqd->update_mqd = update_mqd;
557 mqd->destroy_mqd = destroy_mqd;
558 mqd->is_occupied = is_occupied;
559 mqd->get_wave_state = get_wave_state;
560 mqd->get_checkpoint_info = get_checkpoint_info;
561 mqd->checkpoint_mqd = checkpoint_mqd;
562 mqd->restore_mqd = restore_mqd;
563 mqd->mqd_size = sizeof(struct v9_mqd);
564 #if defined(CONFIG_DEBUG_FS)
565 mqd->debugfs_show_mqd = debugfs_show_mqd;
568 case KFD_MQD_TYPE_HIQ:
569 mqd->allocate_mqd = allocate_hiq_mqd;
570 mqd->init_mqd = init_mqd_hiq;
571 mqd->free_mqd = free_mqd_hiq_sdma;
572 mqd->load_mqd = hiq_load_mqd_kiq;
573 mqd->update_mqd = update_mqd;
574 mqd->destroy_mqd = destroy_mqd;
575 mqd->is_occupied = is_occupied;
576 mqd->mqd_size = sizeof(struct v9_mqd);
577 #if defined(CONFIG_DEBUG_FS)
578 mqd->debugfs_show_mqd = debugfs_show_mqd;
580 mqd->read_doorbell_id = read_doorbell_id;
582 case KFD_MQD_TYPE_DIQ:
583 mqd->allocate_mqd = allocate_mqd;
584 mqd->init_mqd = init_mqd_hiq;
585 mqd->free_mqd = free_mqd;
586 mqd->load_mqd = load_mqd;
587 mqd->update_mqd = update_mqd;
588 mqd->destroy_mqd = destroy_mqd;
589 mqd->is_occupied = is_occupied;
590 mqd->mqd_size = sizeof(struct v9_mqd);
591 #if defined(CONFIG_DEBUG_FS)
592 mqd->debugfs_show_mqd = debugfs_show_mqd;
595 case KFD_MQD_TYPE_SDMA:
596 mqd->allocate_mqd = allocate_sdma_mqd;
597 mqd->init_mqd = init_mqd_sdma;
598 mqd->free_mqd = free_mqd_hiq_sdma;
599 mqd->load_mqd = load_mqd_sdma;
600 mqd->update_mqd = update_mqd_sdma;
601 mqd->destroy_mqd = destroy_mqd_sdma;
602 mqd->is_occupied = is_occupied_sdma;
603 mqd->checkpoint_mqd = checkpoint_mqd_sdma;
604 mqd->restore_mqd = restore_mqd_sdma;
605 mqd->mqd_size = sizeof(struct v9_sdma_mqd);
606 #if defined(CONFIG_DEBUG_FS)
607 mqd->debugfs_show_mqd = debugfs_show_mqd_sdma;