2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
36 #include "amdgpu_amdkfd.h"
38 /* Size of the per-pipe EOP queue */
39 #define CIK_HPD_EOP_BYTES_LOG2 11
40 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
62 static void kfd_process_hw_exception(struct work_struct *work);
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.queue_bitmap))
86 unsigned int get_queues_num(struct device_queue_manager *dqm)
88 return bitmap_weight(dqm->dev->shared_resources.queue_bitmap,
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
94 return dqm->dev->shared_resources.num_queue_per_pipe;
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99 return dqm->dev->shared_resources.num_pipe_per_mec;
102 static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
104 return dqm->dev->device_info->num_sdma_engines;
107 static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
112 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
114 return dqm->dev->device_info->num_sdma_engines
115 * dqm->dev->device_info->num_sdma_queues_per_engine;
118 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
120 return dqm->dev->device_info->num_xgmi_sdma_engines
121 * dqm->dev->device_info->num_sdma_queues_per_engine;
124 void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
127 return dqm->dev->kfd2kgd->program_sh_mem_settings(
128 dqm->dev->kgd, qpd->vmid,
130 qpd->sh_mem_ape1_base,
131 qpd->sh_mem_ape1_limit,
135 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
137 struct kfd_dev *dev = qpd->dqm->dev;
139 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
140 /* On pre-SOC15 chips we need to use the queue ID to
141 * preserve the user mode ABI.
143 q->doorbell_id = q->properties.queue_id;
144 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
145 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
146 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
147 * doorbell assignments based on the engine and queue id.
148 * The doobell index distance between RLC (2*i) and (2*i+1)
149 * for a SDMA engine is 512.
151 uint32_t *idx_offset =
152 dev->shared_resources.sdma_doorbell_idx;
154 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
155 + (q->properties.sdma_queue_id & 1)
156 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
157 + (q->properties.sdma_queue_id >> 1);
159 /* For CP queues on SOC15 reserve a free doorbell ID */
162 found = find_first_zero_bit(qpd->doorbell_bitmap,
163 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
164 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
165 pr_debug("No doorbells available");
168 set_bit(found, qpd->doorbell_bitmap);
169 q->doorbell_id = found;
172 q->properties.doorbell_off =
173 kfd_doorbell_id_to_offset(dev, q->process,
179 static void deallocate_doorbell(struct qcm_process_device *qpd,
183 struct kfd_dev *dev = qpd->dqm->dev;
185 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
186 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
187 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
190 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
194 static int allocate_vmid(struct device_queue_manager *dqm,
195 struct qcm_process_device *qpd,
198 int bit, allocated_vmid;
200 if (dqm->vmid_bitmap == 0)
203 bit = ffs(dqm->vmid_bitmap) - 1;
204 dqm->vmid_bitmap &= ~(1 << bit);
206 allocated_vmid = bit + dqm->dev->vm_info.first_vmid_kfd;
207 pr_debug("vmid allocation %d\n", allocated_vmid);
208 qpd->vmid = allocated_vmid;
209 q->properties.vmid = allocated_vmid;
211 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
212 program_sh_mem_settings(dqm, qpd);
214 /* qpd->page_table_base is set earlier when register_process()
215 * is called, i.e. when the first queue is created.
217 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
219 qpd->page_table_base);
220 /* invalidate the VM context after pasid and vmid mapping is set up */
221 kfd_flush_tlb(qpd_to_pdd(qpd));
223 dqm->dev->kfd2kgd->set_scratch_backing_va(
224 dqm->dev->kgd, qpd->sh_hidden_private_base, qpd->vmid);
229 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
230 struct qcm_process_device *qpd)
232 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
238 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
242 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
243 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
244 pmf->release_mem_size / sizeof(uint32_t));
247 static void deallocate_vmid(struct device_queue_manager *dqm,
248 struct qcm_process_device *qpd,
251 int bit = qpd->vmid - dqm->dev->vm_info.first_vmid_kfd;
253 /* On GFX v7, CP doesn't flush TC at dequeue */
254 if (q->device->device_info->asic_family == CHIP_HAWAII)
255 if (flush_texture_cache_nocpsch(q->device, qpd))
256 pr_err("Failed to flush TC\n");
258 kfd_flush_tlb(qpd_to_pdd(qpd));
260 /* Release the vmid mapping */
261 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
263 dqm->vmid_bitmap |= (1 << bit);
265 q->properties.vmid = 0;
268 static int create_queue_nocpsch(struct device_queue_manager *dqm,
270 struct qcm_process_device *qpd)
272 struct mqd_manager *mqd_mgr;
279 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
280 pr_warn("Can't create new usermode queue because %d queues were already created\n",
281 dqm->total_queue_count);
286 if (list_empty(&qpd->queues_list)) {
287 retval = allocate_vmid(dqm, qpd, q);
291 q->properties.vmid = qpd->vmid;
293 * Eviction state logic: mark all queues as evicted, even ones
294 * not currently active. Restoring inactive queues later only
295 * updates the is_evicted flag but is a no-op otherwise.
297 q->properties.is_evicted = !!qpd->evicted;
299 q->properties.tba_addr = qpd->tba_addr;
300 q->properties.tma_addr = qpd->tma_addr;
302 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
303 q->properties.type)];
304 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
305 retval = allocate_hqd(dqm, q);
307 goto deallocate_vmid;
308 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
310 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
311 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
312 retval = allocate_sdma_queue(dqm, q);
314 goto deallocate_vmid;
315 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
318 retval = allocate_doorbell(qpd, q);
320 goto out_deallocate_hqd;
322 /* Temporarily release dqm lock to avoid a circular lock dependency */
324 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
327 if (!q->mqd_mem_obj) {
329 goto out_deallocate_doorbell;
331 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
332 &q->gart_mqd_addr, &q->properties);
333 if (q->properties.is_active) {
335 if (WARN(q->process->mm != current->mm,
336 "should only run in user thread"))
339 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
340 q->queue, &q->properties, current->mm);
345 list_add(&q->list, &qpd->queues_list);
347 if (q->properties.is_active)
350 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
351 dqm->sdma_queue_count++;
352 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
353 dqm->xgmi_sdma_queue_count++;
356 * Unconditionally increment this counter, regardless of the queue's
357 * type or whether the queue is active.
359 dqm->total_queue_count++;
360 pr_debug("Total of %d queues are accountable so far\n",
361 dqm->total_queue_count);
365 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
366 out_deallocate_doorbell:
367 deallocate_doorbell(qpd, q);
369 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
370 deallocate_hqd(dqm, q);
371 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
372 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
373 deallocate_sdma_queue(dqm, q);
375 if (list_empty(&qpd->queues_list))
376 deallocate_vmid(dqm, qpd, q);
382 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
389 for (pipe = dqm->next_pipe_to_allocate, i = 0;
390 i < get_pipes_per_mec(dqm);
391 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
393 if (!is_pipe_enabled(dqm, 0, pipe))
396 if (dqm->allocated_queues[pipe] != 0) {
397 bit = ffs(dqm->allocated_queues[pipe]) - 1;
398 dqm->allocated_queues[pipe] &= ~(1 << bit);
409 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
410 /* horizontal hqd allocation */
411 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
416 static inline void deallocate_hqd(struct device_queue_manager *dqm,
419 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
422 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
423 * to avoid asynchronized access
425 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
426 struct qcm_process_device *qpd,
430 struct mqd_manager *mqd_mgr;
432 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
433 q->properties.type)];
435 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
436 deallocate_hqd(dqm, q);
437 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
438 dqm->sdma_queue_count--;
439 deallocate_sdma_queue(dqm, q);
440 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
441 dqm->xgmi_sdma_queue_count--;
442 deallocate_sdma_queue(dqm, q);
444 pr_debug("q->properties.type %d is invalid\n",
448 dqm->total_queue_count--;
450 deallocate_doorbell(qpd, q);
452 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
453 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
454 KFD_UNMAP_LATENCY_MS,
456 if (retval == -ETIME)
457 qpd->reset_wavefronts = true;
459 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
462 if (list_empty(&qpd->queues_list)) {
463 if (qpd->reset_wavefronts) {
464 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
466 /* dbgdev_wave_reset_wavefronts has to be called before
467 * deallocate_vmid(), i.e. when vmid is still in use.
469 dbgdev_wave_reset_wavefronts(dqm->dev,
471 qpd->reset_wavefronts = false;
474 deallocate_vmid(dqm, qpd, q);
477 if (q->properties.is_active)
483 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
484 struct qcm_process_device *qpd,
490 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
496 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
499 struct mqd_manager *mqd_mgr;
500 struct kfd_process_device *pdd;
501 bool prev_active = false;
504 pdd = kfd_get_process_device_data(q->device, q->process);
509 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
510 q->properties.type)];
512 /* Save previous activity state for counters */
513 prev_active = q->properties.is_active;
515 /* Make sure the queue is unmapped before updating the MQD */
516 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
517 retval = unmap_queues_cpsch(dqm,
518 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
520 pr_err("unmap queue failed\n");
523 } else if (prev_active &&
524 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
525 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
526 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
527 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
528 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
529 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
531 pr_err("destroy mqd failed\n");
536 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
539 * check active state vs. the previous state and modify
540 * counter accordingly. map_queues_cpsch uses the
541 * dqm->queue_count to determine whether a new runlist must be
544 if (q->properties.is_active && !prev_active)
546 else if (!q->properties.is_active && prev_active)
549 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
550 retval = map_queues_cpsch(dqm);
551 else if (q->properties.is_active &&
552 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
553 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
554 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
555 if (WARN(q->process->mm != current->mm,
556 "should only run in user thread"))
559 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
561 &q->properties, current->mm);
569 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
570 struct qcm_process_device *qpd)
573 struct mqd_manager *mqd_mgr;
574 struct kfd_process_device *pdd;
578 if (qpd->evicted++ > 0) /* already evicted, do nothing */
581 pdd = qpd_to_pdd(qpd);
582 pr_info_ratelimited("Evicting PASID %u queues\n",
583 pdd->process->pasid);
585 /* Mark all queues as evicted. Deactivate all active queues on
588 list_for_each_entry(q, &qpd->queues_list, list) {
589 q->properties.is_evicted = true;
590 if (!q->properties.is_active)
593 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
594 q->properties.type)];
595 q->properties.is_active = false;
596 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
597 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
598 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
600 /* Return the first error, but keep going to
601 * maintain a consistent eviction state
612 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
613 struct qcm_process_device *qpd)
616 struct kfd_process_device *pdd;
620 if (qpd->evicted++ > 0) /* already evicted, do nothing */
623 pdd = qpd_to_pdd(qpd);
624 pr_info_ratelimited("Evicting PASID %u queues\n",
625 pdd->process->pasid);
627 /* Mark all queues as evicted. Deactivate all active queues on
630 list_for_each_entry(q, &qpd->queues_list, list) {
631 q->properties.is_evicted = true;
632 if (!q->properties.is_active)
635 q->properties.is_active = false;
638 retval = execute_queues_cpsch(dqm,
640 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
641 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
648 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
649 struct qcm_process_device *qpd)
651 struct mm_struct *mm = NULL;
653 struct mqd_manager *mqd_mgr;
654 struct kfd_process_device *pdd;
658 pdd = qpd_to_pdd(qpd);
659 /* Retrieve PD base */
660 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
663 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
665 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
670 pr_info_ratelimited("Restoring PASID %u queues\n",
671 pdd->process->pasid);
673 /* Update PD Base in QPD */
674 qpd->page_table_base = pd_base;
675 pr_debug("Updated PD address to 0x%llx\n", pd_base);
677 if (!list_empty(&qpd->queues_list)) {
678 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
681 qpd->page_table_base);
685 /* Take a safe reference to the mm_struct, which may otherwise
686 * disappear even while the kfd_process is still referenced.
688 mm = get_task_mm(pdd->process->lead_thread);
694 /* Remove the eviction flags. Activate queues that are not
695 * inactive for other reasons.
697 list_for_each_entry(q, &qpd->queues_list, list) {
698 q->properties.is_evicted = false;
699 if (!QUEUE_IS_ACTIVE(q->properties))
702 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
703 q->properties.type)];
704 q->properties.is_active = true;
705 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
706 q->queue, &q->properties, mm);
708 /* Return the first error, but keep going to
709 * maintain a consistent eviction state
722 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
723 struct qcm_process_device *qpd)
726 struct kfd_process_device *pdd;
730 pdd = qpd_to_pdd(qpd);
731 /* Retrieve PD base */
732 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
735 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
737 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
742 pr_info_ratelimited("Restoring PASID %u queues\n",
743 pdd->process->pasid);
745 /* Update PD Base in QPD */
746 qpd->page_table_base = pd_base;
747 pr_debug("Updated PD address to 0x%llx\n", pd_base);
749 /* activate all active queues on the qpd */
750 list_for_each_entry(q, &qpd->queues_list, list) {
751 q->properties.is_evicted = false;
752 if (!QUEUE_IS_ACTIVE(q->properties))
755 q->properties.is_active = true;
758 retval = execute_queues_cpsch(dqm,
759 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
766 static int register_process(struct device_queue_manager *dqm,
767 struct qcm_process_device *qpd)
769 struct device_process_node *n;
770 struct kfd_process_device *pdd;
774 n = kzalloc(sizeof(*n), GFP_KERNEL);
780 pdd = qpd_to_pdd(qpd);
781 /* Retrieve PD base */
782 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
785 list_add(&n->list, &dqm->queues);
787 /* Update PD Base in QPD */
788 qpd->page_table_base = pd_base;
789 pr_debug("Updated PD address to 0x%llx\n", pd_base);
791 retval = dqm->asic_ops.update_qpd(dqm, qpd);
793 dqm->processes_count++;
797 /* Outside the DQM lock because under the DQM lock we can't do
798 * reclaim or take other locks that others hold while reclaiming.
800 kfd_inc_compute_active(dqm->dev);
805 static int unregister_process(struct device_queue_manager *dqm,
806 struct qcm_process_device *qpd)
809 struct device_process_node *cur, *next;
811 pr_debug("qpd->queues_list is %s\n",
812 list_empty(&qpd->queues_list) ? "empty" : "not empty");
817 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
818 if (qpd == cur->qpd) {
819 list_del(&cur->list);
821 dqm->processes_count--;
825 /* qpd not found in dqm list */
830 /* Outside the DQM lock because under the DQM lock we can't do
831 * reclaim or take other locks that others hold while reclaiming.
834 kfd_dec_compute_active(dqm->dev);
840 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
843 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
844 dqm->dev->kgd, pasid, vmid);
847 static void init_interrupts(struct device_queue_manager *dqm)
851 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
852 if (is_pipe_enabled(dqm, 0, i))
853 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
856 static int initialize_nocpsch(struct device_queue_manager *dqm)
860 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
862 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
863 sizeof(unsigned int), GFP_KERNEL);
864 if (!dqm->allocated_queues)
867 mutex_init(&dqm->lock_hidden);
868 INIT_LIST_HEAD(&dqm->queues);
869 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
870 dqm->sdma_queue_count = 0;
871 dqm->xgmi_sdma_queue_count = 0;
873 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
874 int pipe_offset = pipe * get_queues_per_pipe(dqm);
876 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
877 if (test_bit(pipe_offset + queue,
878 dqm->dev->shared_resources.queue_bitmap))
879 dqm->allocated_queues[pipe] |= 1 << queue;
882 dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
883 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
884 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
889 static void uninitialize(struct device_queue_manager *dqm)
893 WARN_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
895 kfree(dqm->allocated_queues);
896 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
897 kfree(dqm->mqd_mgrs[i]);
898 mutex_destroy(&dqm->lock_hidden);
899 kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
902 static int start_nocpsch(struct device_queue_manager *dqm)
904 init_interrupts(dqm);
905 return pm_init(&dqm->packets, dqm);
908 static int stop_nocpsch(struct device_queue_manager *dqm)
910 pm_uninit(&dqm->packets);
914 static int allocate_sdma_queue(struct device_queue_manager *dqm,
919 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
920 if (dqm->sdma_bitmap == 0)
922 bit = __ffs64(dqm->sdma_bitmap);
923 dqm->sdma_bitmap &= ~(1ULL << bit);
925 q->properties.sdma_engine_id = q->sdma_id %
926 get_num_sdma_engines(dqm);
927 q->properties.sdma_queue_id = q->sdma_id /
928 get_num_sdma_engines(dqm);
929 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
930 if (dqm->xgmi_sdma_bitmap == 0)
932 bit = __ffs64(dqm->xgmi_sdma_bitmap);
933 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
935 /* sdma_engine_id is sdma id including
936 * both PCIe-optimized SDMAs and XGMI-
937 * optimized SDMAs. The calculation below
938 * assumes the first N engines are always
939 * PCIe-optimized ones
941 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
942 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
943 q->properties.sdma_queue_id = q->sdma_id /
944 get_num_xgmi_sdma_engines(dqm);
947 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
948 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
953 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
956 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
957 if (q->sdma_id >= get_num_sdma_queues(dqm))
959 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
960 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
961 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
963 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
968 * Device Queue Manager implementation for cp scheduler
971 static int set_sched_resources(struct device_queue_manager *dqm)
974 struct scheduling_resources res;
976 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
979 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
980 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
981 / dqm->dev->shared_resources.num_pipe_per_mec;
983 if (!test_bit(i, dqm->dev->shared_resources.queue_bitmap))
986 /* only acquire queues from the first MEC */
990 /* This situation may be hit in the future if a new HW
991 * generation exposes more than 64 queues. If so, the
992 * definition of res.queue_mask needs updating
994 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
995 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
999 res.queue_mask |= (1ull << i);
1001 res.gws_mask = ~0ull;
1002 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1004 pr_debug("Scheduling resources:\n"
1005 "vmid mask: 0x%8X\n"
1006 "queue mask: 0x%8llX\n",
1007 res.vmid_mask, res.queue_mask);
1009 return pm_send_set_resources(&dqm->packets, &res);
1012 static int initialize_cpsch(struct device_queue_manager *dqm)
1014 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1016 mutex_init(&dqm->lock_hidden);
1017 INIT_LIST_HEAD(&dqm->queues);
1018 dqm->queue_count = dqm->processes_count = 0;
1019 dqm->sdma_queue_count = 0;
1020 dqm->xgmi_sdma_queue_count = 0;
1021 dqm->active_runlist = false;
1022 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1023 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1025 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1030 static int start_cpsch(struct device_queue_manager *dqm)
1036 retval = pm_init(&dqm->packets, dqm);
1038 goto fail_packet_manager_init;
1040 retval = set_sched_resources(dqm);
1042 goto fail_set_sched_resources;
1044 pr_debug("Allocating fence memory\n");
1046 /* allocate fence memory on the gart */
1047 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1051 goto fail_allocate_vidmem;
1053 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1054 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1056 init_interrupts(dqm);
1059 /* clear hang status when driver try to start the hw scheduler */
1060 dqm->is_hws_hang = false;
1061 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1065 fail_allocate_vidmem:
1066 fail_set_sched_resources:
1067 pm_uninit(&dqm->packets);
1068 fail_packet_manager_init:
1072 static int stop_cpsch(struct device_queue_manager *dqm)
1075 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1078 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1079 pm_uninit(&dqm->packets);
1084 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1085 struct kernel_queue *kq,
1086 struct qcm_process_device *qpd)
1089 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1090 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1091 dqm->total_queue_count);
1097 * Unconditionally increment this counter, regardless of the queue's
1098 * type or whether the queue is active.
1100 dqm->total_queue_count++;
1101 pr_debug("Total of %d queues are accountable so far\n",
1102 dqm->total_queue_count);
1104 list_add(&kq->list, &qpd->priv_queue_list);
1106 qpd->is_debug = true;
1107 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1113 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1114 struct kernel_queue *kq,
1115 struct qcm_process_device *qpd)
1118 list_del(&kq->list);
1120 qpd->is_debug = false;
1121 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1123 * Unconditionally decrement this counter, regardless of the queue's
1126 dqm->total_queue_count--;
1127 pr_debug("Total of %d queues are accountable so far\n",
1128 dqm->total_queue_count);
1132 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1133 struct qcm_process_device *qpd)
1136 struct mqd_manager *mqd_mgr;
1138 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1139 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1140 dqm->total_queue_count);
1145 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1146 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1148 retval = allocate_sdma_queue(dqm, q);
1154 retval = allocate_doorbell(qpd, q);
1156 goto out_deallocate_sdma_queue;
1158 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1159 q->properties.type)];
1161 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1162 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1163 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1164 q->properties.tba_addr = qpd->tba_addr;
1165 q->properties.tma_addr = qpd->tma_addr;
1166 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1167 if (!q->mqd_mem_obj) {
1169 goto out_deallocate_doorbell;
1174 * Eviction state logic: mark all queues as evicted, even ones
1175 * not currently active. Restoring inactive queues later only
1176 * updates the is_evicted flag but is a no-op otherwise.
1178 q->properties.is_evicted = !!qpd->evicted;
1179 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1180 &q->gart_mqd_addr, &q->properties);
1182 list_add(&q->list, &qpd->queues_list);
1184 if (q->properties.is_active) {
1186 retval = execute_queues_cpsch(dqm,
1187 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1190 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1191 dqm->sdma_queue_count++;
1192 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1193 dqm->xgmi_sdma_queue_count++;
1195 * Unconditionally increment this counter, regardless of the queue's
1196 * type or whether the queue is active.
1198 dqm->total_queue_count++;
1200 pr_debug("Total of %d queues are accountable so far\n",
1201 dqm->total_queue_count);
1206 out_deallocate_doorbell:
1207 deallocate_doorbell(qpd, q);
1208 out_deallocate_sdma_queue:
1209 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1210 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1212 deallocate_sdma_queue(dqm, q);
1219 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1220 unsigned int fence_value,
1221 unsigned int timeout_ms)
1223 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1225 while (*fence_addr != fence_value) {
1226 if (time_after(jiffies, end_jiffies)) {
1227 pr_err("qcm fence wait loop timeout expired\n");
1228 /* In HWS case, this is used to halt the driver thread
1229 * in order not to mess up CP states before doing
1230 * scandumps for FW debugging.
1232 while (halt_if_hws_hang)
1243 static int unmap_sdma_queues(struct device_queue_manager *dqm)
1247 for (i = 0; i < dqm->dev->device_info->num_sdma_engines +
1248 dqm->dev->device_info->num_xgmi_sdma_engines; i++) {
1249 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_SDMA,
1250 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0, false, i);
1257 /* dqm->lock mutex has to be locked before calling this function */
1258 static int map_queues_cpsch(struct device_queue_manager *dqm)
1262 if (dqm->queue_count <= 0 || dqm->processes_count <= 0)
1265 if (dqm->active_runlist)
1268 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1269 pr_debug("%s sent runlist\n", __func__);
1271 pr_err("failed to execute runlist\n");
1274 dqm->active_runlist = true;
1279 /* dqm->lock mutex has to be locked before calling this function */
1280 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1281 enum kfd_unmap_queues_filter filter,
1282 uint32_t filter_param)
1286 if (dqm->is_hws_hang)
1288 if (!dqm->active_runlist)
1291 pr_debug("Before destroying queues, sdma queue count is : %u, xgmi sdma queue count is : %u\n",
1292 dqm->sdma_queue_count, dqm->xgmi_sdma_queue_count);
1294 if (dqm->sdma_queue_count > 0 || dqm->xgmi_sdma_queue_count)
1295 unmap_sdma_queues(dqm);
1297 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1298 filter, filter_param, false, 0);
1302 *dqm->fence_addr = KFD_FENCE_INIT;
1303 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1304 KFD_FENCE_COMPLETED);
1305 /* should be timed out */
1306 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1307 queue_preemption_timeout_ms);
1311 pm_release_ib(&dqm->packets);
1312 dqm->active_runlist = false;
1317 /* dqm->lock mutex has to be locked before calling this function */
1318 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1319 enum kfd_unmap_queues_filter filter,
1320 uint32_t filter_param)
1324 if (dqm->is_hws_hang)
1326 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1328 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1329 dqm->is_hws_hang = true;
1330 schedule_work(&dqm->hw_exception_work);
1334 return map_queues_cpsch(dqm);
1337 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1338 struct qcm_process_device *qpd,
1342 struct mqd_manager *mqd_mgr;
1346 /* remove queue from list to prevent rescheduling after preemption */
1349 if (qpd->is_debug) {
1351 * error, currently we do not allow to destroy a queue
1352 * of a currently debugged process
1355 goto failed_try_destroy_debugged_queue;
1359 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1360 q->properties.type)];
1362 deallocate_doorbell(qpd, q);
1364 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1365 dqm->sdma_queue_count--;
1366 deallocate_sdma_queue(dqm, q);
1367 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1368 dqm->xgmi_sdma_queue_count--;
1369 deallocate_sdma_queue(dqm, q);
1374 if (q->properties.is_active) {
1376 retval = execute_queues_cpsch(dqm,
1377 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1378 if (retval == -ETIME)
1379 qpd->reset_wavefronts = true;
1383 * Unconditionally decrement this counter, regardless of the queue's
1386 dqm->total_queue_count--;
1387 pr_debug("Total of %d queues are accountable so far\n",
1388 dqm->total_queue_count);
1392 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1393 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1397 failed_try_destroy_debugged_queue:
1404 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1405 * stay in user mode.
1407 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1408 /* APE1 limit is inclusive and 64K aligned. */
1409 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1411 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1412 struct qcm_process_device *qpd,
1413 enum cache_policy default_policy,
1414 enum cache_policy alternate_policy,
1415 void __user *alternate_aperture_base,
1416 uint64_t alternate_aperture_size)
1420 if (!dqm->asic_ops.set_cache_memory_policy)
1425 if (alternate_aperture_size == 0) {
1426 /* base > limit disables APE1 */
1427 qpd->sh_mem_ape1_base = 1;
1428 qpd->sh_mem_ape1_limit = 0;
1431 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1432 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1433 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1434 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1435 * Verify that the base and size parameters can be
1436 * represented in this format and convert them.
1437 * Additionally restrict APE1 to user-mode addresses.
1440 uint64_t base = (uintptr_t)alternate_aperture_base;
1441 uint64_t limit = base + alternate_aperture_size - 1;
1443 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1444 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1449 qpd->sh_mem_ape1_base = base >> 16;
1450 qpd->sh_mem_ape1_limit = limit >> 16;
1453 retval = dqm->asic_ops.set_cache_memory_policy(
1458 alternate_aperture_base,
1459 alternate_aperture_size);
1461 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1462 program_sh_mem_settings(dqm, qpd);
1464 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1465 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1466 qpd->sh_mem_ape1_limit);
1473 static int set_trap_handler(struct device_queue_manager *dqm,
1474 struct qcm_process_device *qpd,
1480 if (dqm->dev->cwsr_enabled) {
1481 /* Jump from CWSR trap handler to user trap */
1482 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1486 qpd->tba_addr = tba_addr;
1487 qpd->tma_addr = tma_addr;
1493 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1494 struct qcm_process_device *qpd)
1496 struct queue *q, *next;
1497 struct device_process_node *cur, *next_dpn;
1503 /* Clear all user mode queues */
1504 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1507 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1512 /* Unregister process */
1513 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1514 if (qpd == cur->qpd) {
1515 list_del(&cur->list);
1517 dqm->processes_count--;
1525 /* Outside the DQM lock because under the DQM lock we can't do
1526 * reclaim or take other locks that others hold while reclaiming.
1529 kfd_dec_compute_active(dqm->dev);
1534 static int get_wave_state(struct device_queue_manager *dqm,
1536 void __user *ctl_stack,
1537 u32 *ctl_stack_used_size,
1538 u32 *save_area_used_size)
1540 struct mqd_manager *mqd_mgr;
1545 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1546 q->properties.is_active || !q->device->cwsr_enabled) {
1551 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_COMPUTE];
1553 if (!mqd_mgr->get_wave_state) {
1558 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1559 ctl_stack_used_size, save_area_used_size);
1566 static int process_termination_cpsch(struct device_queue_manager *dqm,
1567 struct qcm_process_device *qpd)
1570 struct queue *q, *next;
1571 struct kernel_queue *kq, *kq_next;
1572 struct mqd_manager *mqd_mgr;
1573 struct device_process_node *cur, *next_dpn;
1574 enum kfd_unmap_queues_filter filter =
1575 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1582 /* Clean all kernel queues */
1583 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1584 list_del(&kq->list);
1586 qpd->is_debug = false;
1587 dqm->total_queue_count--;
1588 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1591 /* Clear all user mode queues */
1592 list_for_each_entry(q, &qpd->queues_list, list) {
1593 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1594 dqm->sdma_queue_count--;
1595 deallocate_sdma_queue(dqm, q);
1596 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1597 dqm->xgmi_sdma_queue_count--;
1598 deallocate_sdma_queue(dqm, q);
1601 if (q->properties.is_active)
1604 dqm->total_queue_count--;
1607 /* Unregister process */
1608 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1609 if (qpd == cur->qpd) {
1610 list_del(&cur->list);
1612 dqm->processes_count--;
1618 retval = execute_queues_cpsch(dqm, filter, 0);
1619 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1620 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1621 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1622 qpd->reset_wavefronts = false;
1627 /* Outside the DQM lock because under the DQM lock we can't do
1628 * reclaim or take other locks that others hold while reclaiming.
1631 kfd_dec_compute_active(dqm->dev);
1633 /* Lastly, free mqd resources.
1634 * Do free_mqd() after dqm_unlock to avoid circular locking.
1636 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1637 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1638 q->properties.type)];
1641 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1647 static int init_mqd_managers(struct device_queue_manager *dqm)
1650 struct mqd_manager *mqd_mgr;
1652 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1653 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1655 pr_err("mqd manager [%d] initialization failed\n", i);
1658 dqm->mqd_mgrs[i] = mqd_mgr;
1664 for (j = 0; j < i; j++) {
1665 kfree(dqm->mqd_mgrs[j]);
1666 dqm->mqd_mgrs[j] = NULL;
1672 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1673 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1676 struct kfd_dev *dev = dqm->dev;
1677 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1678 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1679 dev->device_info->num_sdma_engines *
1680 dev->device_info->num_sdma_queues_per_engine +
1681 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1683 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1684 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1685 (void *)&(mem_obj->cpu_ptr), true);
1690 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1692 struct device_queue_manager *dqm;
1694 pr_debug("Loading device queue manager\n");
1696 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1700 switch (dev->device_info->asic_family) {
1701 /* HWS is not available on Hawaii. */
1703 /* HWS depends on CWSR for timely dequeue. CWSR is not
1704 * available on Tonga.
1706 * FIXME: This argument also applies to Kaveri.
1709 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1712 dqm->sched_policy = sched_policy;
1717 switch (dqm->sched_policy) {
1718 case KFD_SCHED_POLICY_HWS:
1719 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1720 /* initialize dqm for cp scheduling */
1721 dqm->ops.create_queue = create_queue_cpsch;
1722 dqm->ops.initialize = initialize_cpsch;
1723 dqm->ops.start = start_cpsch;
1724 dqm->ops.stop = stop_cpsch;
1725 dqm->ops.destroy_queue = destroy_queue_cpsch;
1726 dqm->ops.update_queue = update_queue;
1727 dqm->ops.register_process = register_process;
1728 dqm->ops.unregister_process = unregister_process;
1729 dqm->ops.uninitialize = uninitialize;
1730 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1731 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1732 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1733 dqm->ops.set_trap_handler = set_trap_handler;
1734 dqm->ops.process_termination = process_termination_cpsch;
1735 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1736 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1737 dqm->ops.get_wave_state = get_wave_state;
1739 case KFD_SCHED_POLICY_NO_HWS:
1740 /* initialize dqm for no cp scheduling */
1741 dqm->ops.start = start_nocpsch;
1742 dqm->ops.stop = stop_nocpsch;
1743 dqm->ops.create_queue = create_queue_nocpsch;
1744 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1745 dqm->ops.update_queue = update_queue;
1746 dqm->ops.register_process = register_process;
1747 dqm->ops.unregister_process = unregister_process;
1748 dqm->ops.initialize = initialize_nocpsch;
1749 dqm->ops.uninitialize = uninitialize;
1750 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1751 dqm->ops.set_trap_handler = set_trap_handler;
1752 dqm->ops.process_termination = process_termination_nocpsch;
1753 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1754 dqm->ops.restore_process_queues =
1755 restore_process_queues_nocpsch;
1756 dqm->ops.get_wave_state = get_wave_state;
1759 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1763 switch (dev->device_info->asic_family) {
1765 device_queue_manager_init_vi(&dqm->asic_ops);
1769 device_queue_manager_init_cik(&dqm->asic_ops);
1773 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1778 case CHIP_POLARIS10:
1779 case CHIP_POLARIS11:
1780 case CHIP_POLARIS12:
1782 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1790 device_queue_manager_init_v9(&dqm->asic_ops);
1793 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1796 WARN(1, "Unexpected ASIC family %u",
1797 dev->device_info->asic_family);
1801 if (init_mqd_managers(dqm))
1804 if (allocate_hiq_sdma_mqd(dqm)) {
1805 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1809 if (!dqm->ops.initialize(dqm))
1817 static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1818 struct kfd_mem_obj *mqd)
1820 WARN(!mqd, "No hiq sdma mqd trunk to free");
1822 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1825 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1827 dqm->ops.uninitialize(dqm);
1828 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1832 int kfd_process_vm_fault(struct device_queue_manager *dqm,
1835 struct kfd_process_device *pdd;
1836 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1841 pdd = kfd_get_process_device_data(dqm->dev, p);
1843 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1844 kfd_unref_process(p);
1849 static void kfd_process_hw_exception(struct work_struct *work)
1851 struct device_queue_manager *dqm = container_of(work,
1852 struct device_queue_manager, hw_exception_work);
1853 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1856 #if defined(CONFIG_DEBUG_FS)
1858 static void seq_reg_dump(struct seq_file *m,
1859 uint32_t (*dump)[2], uint32_t n_regs)
1863 for (i = 0, count = 0; i < n_regs; i++) {
1865 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1866 seq_printf(m, "%s %08x: %08x",
1868 dump[i][0], dump[i][1]);
1871 seq_printf(m, " %08x", dump[i][1]);
1879 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1881 struct device_queue_manager *dqm = data;
1882 uint32_t (*dump)[2], n_regs;
1886 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1887 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1890 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1891 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1892 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1894 seq_reg_dump(m, dump, n_regs);
1899 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1900 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1902 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1903 if (!test_bit(pipe_offset + queue,
1904 dqm->dev->shared_resources.queue_bitmap))
1907 r = dqm->dev->kfd2kgd->hqd_dump(
1908 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1912 seq_printf(m, " CP Pipe %d, Queue %d\n",
1914 seq_reg_dump(m, dump, n_regs);
1920 for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
1922 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
1924 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1925 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1929 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1931 seq_reg_dump(m, dump, n_regs);
1940 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
1945 dqm->active_runlist = true;
1946 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);