2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include "kfd_device_queue_manager.h"
28 #include "kfd_kernel_queue.h"
29 #include "amdgpu_amdkfd.h"
31 static inline struct process_queue_node *get_queue_by_qid(
32 struct process_queue_manager *pqm, unsigned int qid)
34 struct process_queue_node *pqn;
36 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
37 if ((pqn->q && pqn->q->properties.queue_id == qid) ||
38 (pqn->kq && pqn->kq->queue->properties.queue_id == qid))
45 static int assign_queue_slot_by_qid(struct process_queue_manager *pqm,
48 if (qid >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS)
51 if (__test_and_set_bit(qid, pqm->queue_slot_bitmap)) {
52 pr_err("Cannot create new queue because requested qid(%u) is in use\n", qid);
59 static int find_available_queue_slot(struct process_queue_manager *pqm,
64 found = find_first_zero_bit(pqm->queue_slot_bitmap,
65 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
67 pr_debug("The new slot id %lu\n", found);
69 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
70 pr_info("Cannot open more queues for process with pasid 0x%x\n",
75 set_bit(found, pqm->queue_slot_bitmap);
81 void kfd_process_dequeue_from_device(struct kfd_process_device *pdd)
83 struct kfd_dev *dev = pdd->dev;
85 if (pdd->already_dequeued)
88 dev->dqm->ops.process_termination(dev->dqm, &pdd->qpd);
89 pdd->already_dequeued = true;
92 int pqm_set_gws(struct process_queue_manager *pqm, unsigned int qid,
95 struct kfd_dev *dev = NULL;
96 struct process_queue_node *pqn;
97 struct kfd_process_device *pdd;
98 struct kgd_mem *mem = NULL;
101 pqn = get_queue_by_qid(pqm, qid);
103 pr_err("Queue id does not match any known queue\n");
108 dev = pqn->q->device;
112 pdd = kfd_get_process_device_data(dev, pqm->process);
114 pr_err("Process device data doesn't exist\n");
118 /* Only allow one queue per process can have GWS assigned */
119 if (gws && pdd->qpd.num_gws)
122 if (!gws && pdd->qpd.num_gws == 0)
126 ret = amdgpu_amdkfd_add_gws_to_process(pdd->process->kgd_process_info,
129 ret = amdgpu_amdkfd_remove_gws_from_process(pdd->process->kgd_process_info,
135 pdd->qpd.num_gws = gws ? dev->adev->gds.gws_size : 0;
137 return pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
141 void kfd_process_dequeue_from_all_devices(struct kfd_process *p)
145 for (i = 0; i < p->n_pdds; i++)
146 kfd_process_dequeue_from_device(p->pdds[i]);
149 int pqm_init(struct process_queue_manager *pqm, struct kfd_process *p)
151 INIT_LIST_HEAD(&pqm->queues);
152 pqm->queue_slot_bitmap = bitmap_zalloc(KFD_MAX_NUM_OF_QUEUES_PER_PROCESS,
154 if (!pqm->queue_slot_bitmap)
161 void pqm_uninit(struct process_queue_manager *pqm)
163 struct process_queue_node *pqn, *next;
165 list_for_each_entry_safe(pqn, next, &pqm->queues, process_queue_list) {
166 if (pqn->q && pqn->q->gws)
167 amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
169 kfd_procfs_del_queue(pqn->q);
170 uninit_queue(pqn->q);
171 list_del(&pqn->process_queue_list);
175 bitmap_free(pqm->queue_slot_bitmap);
176 pqm->queue_slot_bitmap = NULL;
179 static int init_user_queue(struct process_queue_manager *pqm,
180 struct kfd_dev *dev, struct queue **q,
181 struct queue_properties *q_properties,
182 struct file *f, unsigned int qid)
186 /* Doorbell initialized in user space*/
187 q_properties->doorbell_ptr = NULL;
189 /* let DQM handle it*/
190 q_properties->vmid = 0;
191 q_properties->queue_id = qid;
193 retval = init_queue(q, q_properties);
198 (*q)->process = pqm->process;
200 pr_debug("PQM After init queue");
205 int pqm_create_queue(struct process_queue_manager *pqm,
208 struct queue_properties *properties,
210 const struct kfd_criu_queue_priv_data *q_data,
211 const void *restore_mqd,
212 const void *restore_ctl_stack,
213 uint32_t *p_doorbell_offset_in_process)
216 struct kfd_process_device *pdd;
218 struct process_queue_node *pqn;
219 struct kernel_queue *kq;
220 enum kfd_queue_type type = properties->type;
221 unsigned int max_queues = 127; /* HWS limit */
226 pdd = kfd_get_process_device_data(dev, pqm->process);
228 pr_err("Process device data doesn't exist\n");
233 * for debug process, verify that it is within the static queues limit
234 * currently limit is set to half of the total avail HQD slots
235 * If we are just about to create DIQ, the is_debug flag is not set yet
236 * Hence we also check the type as well
238 if ((pdd->qpd.is_debug) || (type == KFD_QUEUE_TYPE_DIQ))
239 max_queues = dev->device_info.max_no_of_hqd/2;
241 if (pdd->qpd.queue_count >= max_queues)
245 retval = assign_queue_slot_by_qid(pqm, q_data->q_id);
248 retval = find_available_queue_slot(pqm, qid);
253 if (list_empty(&pdd->qpd.queues_list) &&
254 list_empty(&pdd->qpd.priv_queue_list))
255 dev->dqm->ops.register_process(dev->dqm, &pdd->qpd);
257 pqn = kzalloc(sizeof(*pqn), GFP_KERNEL);
260 goto err_allocate_pqn;
264 case KFD_QUEUE_TYPE_SDMA:
265 case KFD_QUEUE_TYPE_SDMA_XGMI:
266 /* SDMA queues are always allocated statically no matter
267 * which scheduler mode is used. We also do not need to
268 * check whether a SDMA queue can be allocated here, because
269 * allocate_sdma_queue() in create_queue() has the
270 * corresponding check logic.
272 retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
274 goto err_create_queue;
277 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
278 restore_mqd, restore_ctl_stack);
282 case KFD_QUEUE_TYPE_COMPUTE:
283 /* check if there is over subscription */
284 if ((dev->dqm->sched_policy ==
285 KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
286 ((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
287 (dev->dqm->active_queue_count >= get_cp_queues_num(dev->dqm)))) {
288 pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
290 goto err_create_queue;
293 retval = init_user_queue(pqm, dev, &q, properties, f, *qid);
295 goto err_create_queue;
298 retval = dev->dqm->ops.create_queue(dev->dqm, q, &pdd->qpd, q_data,
299 restore_mqd, restore_ctl_stack);
302 case KFD_QUEUE_TYPE_DIQ:
303 kq = kernel_queue_init(dev, KFD_QUEUE_TYPE_DIQ);
306 goto err_create_queue;
308 kq->queue->properties.queue_id = *qid;
311 retval = dev->dqm->ops.create_kernel_queue(dev->dqm,
315 WARN(1, "Invalid queue type %d", type);
320 pr_err("Pasid 0x%x DQM create queue type %d failed. ret %d\n",
321 pqm->process->pasid, type, retval);
322 goto err_create_queue;
325 if (q && p_doorbell_offset_in_process)
326 /* Return the doorbell offset within the doorbell page
327 * to the caller so it can be passed up to user mode
329 * There are always 1024 doorbells per process, so in case
330 * of 8-byte doorbells, there are two doorbell pages per
333 *p_doorbell_offset_in_process =
334 (q->properties.doorbell_off * sizeof(uint32_t)) &
335 (kfd_doorbell_process_slice(dev) - 1);
337 pr_debug("PQM After DQM create queue\n");
339 list_add(&pqn->process_queue_list, &pqm->queues);
342 pr_debug("PQM done creating queue\n");
343 kfd_procfs_add_queue(q);
344 print_queue_properties(&q->properties);
352 kernel_queue_uninit(kq, false);
355 /* check if queues list is empty unregister process from device */
356 clear_bit(*qid, pqm->queue_slot_bitmap);
357 if (list_empty(&pdd->qpd.queues_list) &&
358 list_empty(&pdd->qpd.priv_queue_list))
359 dev->dqm->ops.unregister_process(dev->dqm, &pdd->qpd);
363 int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
365 struct process_queue_node *pqn;
366 struct kfd_process_device *pdd;
367 struct device_queue_manager *dqm;
375 pqn = get_queue_by_qid(pqm, qid);
377 pr_err("Queue id does not match any known queue\n");
385 dev = pqn->q->device;
389 pdd = kfd_get_process_device_data(dev, pqm->process);
391 pr_err("Process device data doesn't exist\n");
396 /* destroy kernel queue (DIQ) */
397 dqm = pqn->kq->dev->dqm;
398 dqm->ops.destroy_kernel_queue(dqm, pqn->kq, &pdd->qpd);
399 kernel_queue_uninit(pqn->kq, false);
403 kfd_procfs_del_queue(pqn->q);
404 dqm = pqn->q->device->dqm;
405 retval = dqm->ops.destroy_queue(dqm, &pdd->qpd, pqn->q);
407 pr_err("Pasid 0x%x destroy queue %d failed, ret %d\n",
409 pqn->q->properties.queue_id, retval);
410 if (retval != -ETIME)
411 goto err_destroy_queue;
415 amdgpu_amdkfd_remove_gws_from_process(pqm->process->kgd_process_info,
417 pdd->qpd.num_gws = 0;
420 uninit_queue(pqn->q);
423 list_del(&pqn->process_queue_list);
425 clear_bit(qid, pqm->queue_slot_bitmap);
427 if (list_empty(&pdd->qpd.queues_list) &&
428 list_empty(&pdd->qpd.priv_queue_list))
429 dqm->ops.unregister_process(dqm, &pdd->qpd);
435 int pqm_update_queue_properties(struct process_queue_manager *pqm,
436 unsigned int qid, struct queue_properties *p)
439 struct process_queue_node *pqn;
441 pqn = get_queue_by_qid(pqm, qid);
443 pr_debug("No queue %d exists for update operation\n", qid);
447 pqn->q->properties.queue_address = p->queue_address;
448 pqn->q->properties.queue_size = p->queue_size;
449 pqn->q->properties.queue_percent = p->queue_percent;
450 pqn->q->properties.priority = p->priority;
452 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
460 int pqm_update_mqd(struct process_queue_manager *pqm,
461 unsigned int qid, struct mqd_update_info *minfo)
464 struct process_queue_node *pqn;
466 pqn = get_queue_by_qid(pqm, qid);
468 pr_debug("No queue %d exists for update operation\n", qid);
472 retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
480 struct kernel_queue *pqm_get_kernel_queue(
481 struct process_queue_manager *pqm,
484 struct process_queue_node *pqn;
486 pqn = get_queue_by_qid(pqm, qid);
493 struct queue *pqm_get_user_queue(struct process_queue_manager *pqm,
496 struct process_queue_node *pqn;
498 pqn = get_queue_by_qid(pqm, qid);
499 return pqn ? pqn->q : NULL;
502 int pqm_get_wave_state(struct process_queue_manager *pqm,
504 void __user *ctl_stack,
505 u32 *ctl_stack_used_size,
506 u32 *save_area_used_size)
508 struct process_queue_node *pqn;
510 pqn = get_queue_by_qid(pqm, qid);
512 pr_debug("amdkfd: No queue %d exists for operation\n",
517 return pqn->q->device->dqm->ops.get_wave_state(pqn->q->device->dqm,
521 save_area_used_size);
524 static int get_queue_data_sizes(struct kfd_process_device *pdd,
527 uint32_t *ctl_stack_size)
531 ret = pqm_get_queue_checkpoint_info(&pdd->process->pqm,
532 q->properties.queue_id,
536 pr_err("Failed to get queue dump info (%d)\n", ret);
541 int kfd_process_get_queue_info(struct kfd_process *p,
542 uint32_t *num_queues,
543 uint64_t *priv_data_sizes)
545 uint32_t extra_data_sizes = 0;
552 /* Run over all PDDs of the process */
553 for (i = 0; i < p->n_pdds; i++) {
554 struct kfd_process_device *pdd = p->pdds[i];
556 list_for_each_entry(q, &pdd->qpd.queues_list, list) {
557 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
558 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
559 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
560 uint32_t mqd_size, ctl_stack_size;
562 *num_queues = *num_queues + 1;
564 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
568 extra_data_sizes += mqd_size + ctl_stack_size;
570 pr_err("Unsupported queue type (%d)\n", q->properties.type);
575 *priv_data_sizes = extra_data_sizes +
576 (*num_queues * sizeof(struct kfd_criu_queue_priv_data));
581 static int pqm_checkpoint_mqd(struct process_queue_manager *pqm,
586 struct process_queue_node *pqn;
588 pqn = get_queue_by_qid(pqm, qid);
590 pr_debug("amdkfd: No queue %d exists for operation\n", qid);
594 if (!pqn->q->device->dqm->ops.checkpoint_mqd) {
595 pr_err("amdkfd: queue dumping not supported on this device\n");
599 return pqn->q->device->dqm->ops.checkpoint_mqd(pqn->q->device->dqm,
600 pqn->q, mqd, ctl_stack);
603 static int criu_checkpoint_queue(struct kfd_process_device *pdd,
605 struct kfd_criu_queue_priv_data *q_data)
607 uint8_t *mqd, *ctl_stack;
610 mqd = (void *)(q_data + 1);
611 ctl_stack = mqd + q_data->mqd_size;
613 q_data->gpu_id = pdd->dev->id;
614 q_data->type = q->properties.type;
615 q_data->format = q->properties.format;
616 q_data->q_id = q->properties.queue_id;
617 q_data->q_address = q->properties.queue_address;
618 q_data->q_size = q->properties.queue_size;
619 q_data->priority = q->properties.priority;
620 q_data->q_percent = q->properties.queue_percent;
621 q_data->read_ptr_addr = (uint64_t)q->properties.read_ptr;
622 q_data->write_ptr_addr = (uint64_t)q->properties.write_ptr;
623 q_data->doorbell_id = q->doorbell_id;
625 q_data->sdma_id = q->sdma_id;
627 q_data->eop_ring_buffer_address =
628 q->properties.eop_ring_buffer_address;
630 q_data->eop_ring_buffer_size = q->properties.eop_ring_buffer_size;
632 q_data->ctx_save_restore_area_address =
633 q->properties.ctx_save_restore_area_address;
635 q_data->ctx_save_restore_area_size =
636 q->properties.ctx_save_restore_area_size;
638 ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
640 pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
644 pr_debug("Dumping Queue: gpu_id:%x queue_id:%u\n", q_data->gpu_id, q_data->q_id);
648 static int criu_checkpoint_queues_device(struct kfd_process_device *pdd,
649 uint8_t __user *user_priv,
650 unsigned int *q_index,
651 uint64_t *queues_priv_data_offset)
653 unsigned int q_private_data_size = 0;
654 uint8_t *q_private_data = NULL; /* Local buffer to store individual queue private data */
658 list_for_each_entry(q, &pdd->qpd.queues_list, list) {
659 struct kfd_criu_queue_priv_data *q_data;
660 uint64_t q_data_size;
662 uint32_t ctl_stack_size;
664 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE &&
665 q->properties.type != KFD_QUEUE_TYPE_SDMA &&
666 q->properties.type != KFD_QUEUE_TYPE_SDMA_XGMI) {
668 pr_err("Unsupported queue type (%d)\n", q->properties.type);
673 ret = get_queue_data_sizes(pdd, q, &mqd_size, &ctl_stack_size);
677 q_data_size = sizeof(*q_data) + mqd_size + ctl_stack_size;
679 /* Increase local buffer space if needed */
680 if (q_private_data_size < q_data_size) {
681 kfree(q_private_data);
683 q_private_data = kzalloc(q_data_size, GFP_KERNEL);
684 if (!q_private_data) {
688 q_private_data_size = q_data_size;
691 q_data = (struct kfd_criu_queue_priv_data *)q_private_data;
693 /* data stored in this order: priv_data, mqd, ctl_stack */
694 q_data->mqd_size = mqd_size;
695 q_data->ctl_stack_size = ctl_stack_size;
697 ret = criu_checkpoint_queue(pdd, q, q_data);
701 q_data->object_type = KFD_CRIU_OBJECT_TYPE_QUEUE;
703 ret = copy_to_user(user_priv + *queues_priv_data_offset,
704 q_data, q_data_size);
709 *queues_priv_data_offset += q_data_size;
710 *q_index = *q_index + 1;
713 kfree(q_private_data);
718 int kfd_criu_checkpoint_queues(struct kfd_process *p,
719 uint8_t __user *user_priv_data,
720 uint64_t *priv_data_offset)
722 int ret = 0, pdd_index, q_index = 0;
724 for (pdd_index = 0; pdd_index < p->n_pdds; pdd_index++) {
725 struct kfd_process_device *pdd = p->pdds[pdd_index];
728 * criu_checkpoint_queues_device will copy data to user and update q_index and
729 * queues_priv_data_offset
731 ret = criu_checkpoint_queues_device(pdd, user_priv_data, &q_index,
741 static void set_queue_properties_from_criu(struct queue_properties *qp,
742 struct kfd_criu_queue_priv_data *q_data)
744 qp->is_interop = false;
745 qp->is_gws = q_data->is_gws;
746 qp->queue_percent = q_data->q_percent;
747 qp->priority = q_data->priority;
748 qp->queue_address = q_data->q_address;
749 qp->queue_size = q_data->q_size;
750 qp->read_ptr = (uint32_t *) q_data->read_ptr_addr;
751 qp->write_ptr = (uint32_t *) q_data->write_ptr_addr;
752 qp->eop_ring_buffer_address = q_data->eop_ring_buffer_address;
753 qp->eop_ring_buffer_size = q_data->eop_ring_buffer_size;
754 qp->ctx_save_restore_area_address = q_data->ctx_save_restore_area_address;
755 qp->ctx_save_restore_area_size = q_data->ctx_save_restore_area_size;
756 qp->ctl_stack_size = q_data->ctl_stack_size;
757 qp->type = q_data->type;
758 qp->format = q_data->format;
761 int kfd_criu_restore_queue(struct kfd_process *p,
762 uint8_t __user *user_priv_ptr,
763 uint64_t *priv_data_offset,
764 uint64_t max_priv_data_size)
766 uint8_t *mqd, *ctl_stack, *q_extra_data = NULL;
767 struct kfd_criu_queue_priv_data *q_data;
768 struct kfd_process_device *pdd;
769 uint64_t q_extra_data_size;
770 struct queue_properties qp;
771 unsigned int queue_id;
775 if (*priv_data_offset + sizeof(*q_data) > max_priv_data_size)
778 q_data = kmalloc(sizeof(*q_data), GFP_KERNEL);
782 ret = copy_from_user(q_data, user_priv_ptr + *priv_data_offset, sizeof(*q_data));
788 *priv_data_offset += sizeof(*q_data);
789 q_extra_data_size = q_data->ctl_stack_size + q_data->mqd_size;
791 if (*priv_data_offset + q_extra_data_size > max_priv_data_size) {
796 q_extra_data = kmalloc(q_extra_data_size, GFP_KERNEL);
802 ret = copy_from_user(q_extra_data, user_priv_ptr + *priv_data_offset, q_extra_data_size);
808 *priv_data_offset += q_extra_data_size;
810 dev = kfd_device_by_id(q_data->gpu_id);
812 pr_err("Could not get kfd_dev from gpu_id = 0x%x\n",
819 pdd = kfd_get_process_device_data(dev, p);
821 pr_err("Failed to get pdd\n");
825 /* data stored in this order: mqd, ctl_stack */
827 ctl_stack = mqd + q_data->mqd_size;
829 memset(&qp, 0, sizeof(qp));
830 set_queue_properties_from_criu(&qp, q_data);
832 print_queue_properties(&qp);
834 ret = pqm_create_queue(&p->pqm, pdd->dev, NULL, &qp, &queue_id, q_data, mqd, ctl_stack,
837 pr_err("Failed to create new queue err:%d\n", ret);
843 pr_err("Failed to create queue (%d)\n", ret);
845 pr_debug("Queue id %d was restored successfully\n", queue_id);
852 int pqm_get_queue_checkpoint_info(struct process_queue_manager *pqm,
855 uint32_t *ctl_stack_size)
857 struct process_queue_node *pqn;
859 pqn = get_queue_by_qid(pqm, qid);
861 pr_debug("amdkfd: No queue %d exists for operation\n", qid);
865 if (!pqn->q->device->dqm->ops.get_queue_checkpoint_info) {
866 pr_err("amdkfd: queue dumping not supported on this device\n");
870 pqn->q->device->dqm->ops.get_queue_checkpoint_info(pqn->q->device->dqm,
876 #if defined(CONFIG_DEBUG_FS)
878 int pqm_debugfs_mqds(struct seq_file *m, void *data)
880 struct process_queue_manager *pqm = data;
881 struct process_queue_node *pqn;
883 enum KFD_MQD_TYPE mqd_type;
884 struct mqd_manager *mqd_mgr;
887 list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
890 switch (q->properties.type) {
891 case KFD_QUEUE_TYPE_SDMA:
892 case KFD_QUEUE_TYPE_SDMA_XGMI:
893 seq_printf(m, " SDMA queue on device %x\n",
895 mqd_type = KFD_MQD_TYPE_SDMA;
897 case KFD_QUEUE_TYPE_COMPUTE:
898 seq_printf(m, " Compute queue on device %x\n",
900 mqd_type = KFD_MQD_TYPE_CP;
904 " Bad user queue type %d on device %x\n",
905 q->properties.type, q->device->id);
908 mqd_mgr = q->device->dqm->mqd_mgrs[mqd_type];
909 } else if (pqn->kq) {
911 mqd_mgr = pqn->kq->mqd_mgr;
912 switch (q->properties.type) {
913 case KFD_QUEUE_TYPE_DIQ:
914 seq_printf(m, " DIQ on device %x\n",
919 " Bad kernel queue type %d on device %x\n",
926 " Weird: Queue node with neither kernel nor user queue\n");
930 r = mqd_mgr->debugfs_show_mqd(m, q->mqd);