2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/ratelimit.h>
25 #include <linux/printk.h>
26 #include <linux/slab.h>
27 #include <linux/list.h>
28 #include <linux/types.h>
29 #include <linux/bitops.h>
30 #include <linux/sched.h>
32 #include "kfd_device_queue_manager.h"
33 #include "kfd_mqd_manager.h"
35 #include "kfd_kernel_queue.h"
36 #include "amdgpu_amdkfd.h"
38 /* Size of the per-pipe EOP queue */
39 #define CIK_HPD_EOP_BYTES_LOG2 11
40 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
42 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
43 unsigned int pasid, unsigned int vmid);
45 static int execute_queues_cpsch(struct device_queue_manager *dqm,
46 enum kfd_unmap_queues_filter filter,
47 uint32_t filter_param);
48 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
49 enum kfd_unmap_queues_filter filter,
50 uint32_t filter_param);
52 static int map_queues_cpsch(struct device_queue_manager *dqm);
54 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
57 static inline void deallocate_hqd(struct device_queue_manager *dqm,
59 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q);
60 static int allocate_sdma_queue(struct device_queue_manager *dqm,
62 static void kfd_process_hw_exception(struct work_struct *work);
65 enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
67 if (type == KFD_QUEUE_TYPE_SDMA || type == KFD_QUEUE_TYPE_SDMA_XGMI)
68 return KFD_MQD_TYPE_SDMA;
69 return KFD_MQD_TYPE_CP;
72 static bool is_pipe_enabled(struct device_queue_manager *dqm, int mec, int pipe)
75 int pipe_offset = mec * dqm->dev->shared_resources.num_pipe_per_mec
76 + pipe * dqm->dev->shared_resources.num_queue_per_pipe;
78 /* queue is available for KFD usage if bit is 1 */
79 for (i = 0; i < dqm->dev->shared_resources.num_queue_per_pipe; ++i)
80 if (test_bit(pipe_offset + i,
81 dqm->dev->shared_resources.cp_queue_bitmap))
86 unsigned int get_cp_queues_num(struct device_queue_manager *dqm)
88 return bitmap_weight(dqm->dev->shared_resources.cp_queue_bitmap,
92 unsigned int get_queues_per_pipe(struct device_queue_manager *dqm)
94 return dqm->dev->shared_resources.num_queue_per_pipe;
97 unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
99 return dqm->dev->shared_resources.num_pipe_per_mec;
102 static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
104 return dqm->dev->device_info->num_sdma_engines;
107 static unsigned int get_num_xgmi_sdma_engines(struct device_queue_manager *dqm)
109 return dqm->dev->device_info->num_xgmi_sdma_engines;
112 static unsigned int get_num_all_sdma_engines(struct device_queue_manager *dqm)
114 return get_num_sdma_engines(dqm) + get_num_xgmi_sdma_engines(dqm);
117 unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
119 return dqm->dev->device_info->num_sdma_engines
120 * dqm->dev->device_info->num_sdma_queues_per_engine;
123 unsigned int get_num_xgmi_sdma_queues(struct device_queue_manager *dqm)
125 return dqm->dev->device_info->num_xgmi_sdma_engines
126 * dqm->dev->device_info->num_sdma_queues_per_engine;
129 void program_sh_mem_settings(struct device_queue_manager *dqm,
130 struct qcm_process_device *qpd)
132 return dqm->dev->kfd2kgd->program_sh_mem_settings(
133 dqm->dev->kgd, qpd->vmid,
135 qpd->sh_mem_ape1_base,
136 qpd->sh_mem_ape1_limit,
140 void increment_queue_count(struct device_queue_manager *dqm,
141 enum kfd_queue_type type)
143 dqm->active_queue_count++;
144 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
145 dqm->active_cp_queue_count++;
148 void decrement_queue_count(struct device_queue_manager *dqm,
149 enum kfd_queue_type type)
151 dqm->active_queue_count--;
152 if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
153 dqm->active_cp_queue_count--;
156 static int allocate_doorbell(struct qcm_process_device *qpd, struct queue *q)
158 struct kfd_dev *dev = qpd->dqm->dev;
160 if (!KFD_IS_SOC15(dev->device_info->asic_family)) {
161 /* On pre-SOC15 chips we need to use the queue ID to
162 * preserve the user mode ABI.
164 q->doorbell_id = q->properties.queue_id;
165 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
166 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
167 /* For SDMA queues on SOC15 with 8-byte doorbell, use static
168 * doorbell assignments based on the engine and queue id.
169 * The doobell index distance between RLC (2*i) and (2*i+1)
170 * for a SDMA engine is 512.
172 uint32_t *idx_offset =
173 dev->shared_resources.sdma_doorbell_idx;
175 q->doorbell_id = idx_offset[q->properties.sdma_engine_id]
176 + (q->properties.sdma_queue_id & 1)
177 * KFD_QUEUE_DOORBELL_MIRROR_OFFSET
178 + (q->properties.sdma_queue_id >> 1);
180 /* For CP queues on SOC15 reserve a free doorbell ID */
183 found = find_first_zero_bit(qpd->doorbell_bitmap,
184 KFD_MAX_NUM_OF_QUEUES_PER_PROCESS);
185 if (found >= KFD_MAX_NUM_OF_QUEUES_PER_PROCESS) {
186 pr_debug("No doorbells available");
189 set_bit(found, qpd->doorbell_bitmap);
190 q->doorbell_id = found;
193 q->properties.doorbell_off =
194 kfd_get_doorbell_dw_offset_in_bar(dev, q->process,
200 static void deallocate_doorbell(struct qcm_process_device *qpd,
204 struct kfd_dev *dev = qpd->dqm->dev;
206 if (!KFD_IS_SOC15(dev->device_info->asic_family) ||
207 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
208 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
211 old = test_and_clear_bit(q->doorbell_id, qpd->doorbell_bitmap);
215 static int allocate_vmid(struct device_queue_manager *dqm,
216 struct qcm_process_device *qpd,
219 int allocated_vmid = -1, i;
221 for (i = dqm->dev->vm_info.first_vmid_kfd;
222 i <= dqm->dev->vm_info.last_vmid_kfd; i++) {
223 if (!dqm->vmid_pasid[i]) {
229 if (allocated_vmid < 0) {
230 pr_err("no more vmid to allocate\n");
234 pr_debug("vmid allocated: %d\n", allocated_vmid);
236 dqm->vmid_pasid[allocated_vmid] = q->process->pasid;
238 set_pasid_vmid_mapping(dqm, q->process->pasid, allocated_vmid);
240 qpd->vmid = allocated_vmid;
241 q->properties.vmid = allocated_vmid;
243 program_sh_mem_settings(dqm, qpd);
245 /* qpd->page_table_base is set earlier when register_process()
246 * is called, i.e. when the first queue is created.
248 dqm->dev->kfd2kgd->set_vm_context_page_table_base(dqm->dev->kgd,
250 qpd->page_table_base);
251 /* invalidate the VM context after pasid and vmid mapping is set up */
252 kfd_flush_tlb(qpd_to_pdd(qpd));
254 if (dqm->dev->kfd2kgd->set_scratch_backing_va)
255 dqm->dev->kfd2kgd->set_scratch_backing_va(dqm->dev->kgd,
256 qpd->sh_hidden_private_base, qpd->vmid);
261 static int flush_texture_cache_nocpsch(struct kfd_dev *kdev,
262 struct qcm_process_device *qpd)
264 const struct packet_manager_funcs *pmf = qpd->dqm->packets.pmf;
270 ret = pmf->release_mem(qpd->ib_base, (uint32_t *)qpd->ib_kaddr);
274 return amdgpu_amdkfd_submit_ib(kdev->kgd, KGD_ENGINE_MEC1, qpd->vmid,
275 qpd->ib_base, (uint32_t *)qpd->ib_kaddr,
276 pmf->release_mem_size / sizeof(uint32_t));
279 static void deallocate_vmid(struct device_queue_manager *dqm,
280 struct qcm_process_device *qpd,
283 /* On GFX v7, CP doesn't flush TC at dequeue */
284 if (q->device->device_info->asic_family == CHIP_HAWAII)
285 if (flush_texture_cache_nocpsch(q->device, qpd))
286 pr_err("Failed to flush TC\n");
288 kfd_flush_tlb(qpd_to_pdd(qpd));
290 /* Release the vmid mapping */
291 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
292 dqm->vmid_pasid[qpd->vmid] = 0;
295 q->properties.vmid = 0;
298 static int create_queue_nocpsch(struct device_queue_manager *dqm,
300 struct qcm_process_device *qpd)
302 struct mqd_manager *mqd_mgr;
307 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
308 pr_warn("Can't create new usermode queue because %d queues were already created\n",
309 dqm->total_queue_count);
314 if (list_empty(&qpd->queues_list)) {
315 retval = allocate_vmid(dqm, qpd, q);
319 q->properties.vmid = qpd->vmid;
321 * Eviction state logic: mark all queues as evicted, even ones
322 * not currently active. Restoring inactive queues later only
323 * updates the is_evicted flag but is a no-op otherwise.
325 q->properties.is_evicted = !!qpd->evicted;
327 q->properties.tba_addr = qpd->tba_addr;
328 q->properties.tma_addr = qpd->tma_addr;
330 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
331 q->properties.type)];
332 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
333 retval = allocate_hqd(dqm, q);
335 goto deallocate_vmid;
336 pr_debug("Loading mqd to hqd on pipe %d, queue %d\n",
338 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
339 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
340 retval = allocate_sdma_queue(dqm, q);
342 goto deallocate_vmid;
343 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
346 retval = allocate_doorbell(qpd, q);
348 goto out_deallocate_hqd;
350 /* Temporarily release dqm lock to avoid a circular lock dependency */
352 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
355 if (!q->mqd_mem_obj) {
357 goto out_deallocate_doorbell;
359 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
360 &q->gart_mqd_addr, &q->properties);
361 if (q->properties.is_active) {
362 if (!dqm->sched_running) {
363 WARN_ONCE(1, "Load non-HWS mqd while stopped\n");
364 goto add_queue_to_list;
367 if (WARN(q->process->mm != current->mm,
368 "should only run in user thread"))
371 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
372 q->queue, &q->properties, current->mm);
378 list_add(&q->list, &qpd->queues_list);
380 if (q->properties.is_active)
381 increment_queue_count(dqm, q->properties.type);
384 * Unconditionally increment this counter, regardless of the queue's
385 * type or whether the queue is active.
387 dqm->total_queue_count++;
388 pr_debug("Total of %d queues are accountable so far\n",
389 dqm->total_queue_count);
393 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
394 out_deallocate_doorbell:
395 deallocate_doorbell(qpd, q);
397 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
398 deallocate_hqd(dqm, q);
399 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
400 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
401 deallocate_sdma_queue(dqm, q);
403 if (list_empty(&qpd->queues_list))
404 deallocate_vmid(dqm, qpd, q);
410 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
417 for (pipe = dqm->next_pipe_to_allocate, i = 0;
418 i < get_pipes_per_mec(dqm);
419 pipe = ((pipe + 1) % get_pipes_per_mec(dqm)), ++i) {
421 if (!is_pipe_enabled(dqm, 0, pipe))
424 if (dqm->allocated_queues[pipe] != 0) {
425 bit = ffs(dqm->allocated_queues[pipe]) - 1;
426 dqm->allocated_queues[pipe] &= ~(1 << bit);
437 pr_debug("hqd slot - pipe %d, queue %d\n", q->pipe, q->queue);
438 /* horizontal hqd allocation */
439 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_per_mec(dqm);
444 static inline void deallocate_hqd(struct device_queue_manager *dqm,
447 dqm->allocated_queues[q->pipe] |= (1 << q->queue);
450 /* Access to DQM has to be locked before calling destroy_queue_nocpsch_locked
451 * to avoid asynchronized access
453 static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
454 struct qcm_process_device *qpd,
458 struct mqd_manager *mqd_mgr;
460 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
461 q->properties.type)];
463 if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE)
464 deallocate_hqd(dqm, q);
465 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
466 deallocate_sdma_queue(dqm, q);
467 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
468 deallocate_sdma_queue(dqm, q);
470 pr_debug("q->properties.type %d is invalid\n",
474 dqm->total_queue_count--;
476 deallocate_doorbell(qpd, q);
478 if (!dqm->sched_running) {
479 WARN_ONCE(1, "Destroy non-HWS queue while stopped\n");
483 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
484 KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
485 KFD_UNMAP_LATENCY_MS,
487 if (retval == -ETIME)
488 qpd->reset_wavefronts = true;
490 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
493 if (list_empty(&qpd->queues_list)) {
494 if (qpd->reset_wavefronts) {
495 pr_warn("Resetting wave fronts (nocpsch) on dev %p\n",
497 /* dbgdev_wave_reset_wavefronts has to be called before
498 * deallocate_vmid(), i.e. when vmid is still in use.
500 dbgdev_wave_reset_wavefronts(dqm->dev,
502 qpd->reset_wavefronts = false;
505 deallocate_vmid(dqm, qpd, q);
508 if (q->properties.is_active)
509 decrement_queue_count(dqm, q->properties.type);
514 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
515 struct qcm_process_device *qpd,
521 retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
527 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
530 struct mqd_manager *mqd_mgr;
531 struct kfd_process_device *pdd;
532 bool prev_active = false;
535 pdd = kfd_get_process_device_data(q->device, q->process);
540 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
541 q->properties.type)];
543 /* Save previous activity state for counters */
544 prev_active = q->properties.is_active;
546 /* Make sure the queue is unmapped before updating the MQD */
547 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS) {
548 retval = unmap_queues_cpsch(dqm,
549 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
551 pr_err("unmap queue failed\n");
554 } else if (prev_active &&
555 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
556 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
557 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
559 if (!dqm->sched_running) {
560 WARN_ONCE(1, "Update non-HWS queue while stopped\n");
564 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
565 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
566 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
568 pr_err("destroy mqd failed\n");
573 mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
576 * check active state vs. the previous state and modify
577 * counter accordingly. map_queues_cpsch uses the
578 * dqm->active_queue_count to determine whether a new runlist must be
581 if (q->properties.is_active && !prev_active)
582 increment_queue_count(dqm, q->properties.type);
583 else if (!q->properties.is_active && prev_active)
584 decrement_queue_count(dqm, q->properties.type);
586 if (dqm->sched_policy != KFD_SCHED_POLICY_NO_HWS)
587 retval = map_queues_cpsch(dqm);
588 else if (q->properties.is_active &&
589 (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
590 q->properties.type == KFD_QUEUE_TYPE_SDMA ||
591 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)) {
592 if (WARN(q->process->mm != current->mm,
593 "should only run in user thread"))
596 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd,
598 &q->properties, current->mm);
606 static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
607 struct qcm_process_device *qpd)
610 struct mqd_manager *mqd_mgr;
611 struct kfd_process_device *pdd;
615 if (qpd->evicted++ > 0) /* already evicted, do nothing */
618 pdd = qpd_to_pdd(qpd);
619 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
620 pdd->process->pasid);
622 /* Mark all queues as evicted. Deactivate all active queues on
625 list_for_each_entry(q, &qpd->queues_list, list) {
626 q->properties.is_evicted = true;
627 if (!q->properties.is_active)
630 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
631 q->properties.type)];
632 q->properties.is_active = false;
633 decrement_queue_count(dqm, q->properties.type);
635 if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
638 retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
639 KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
640 KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
642 /* Return the first error, but keep going to
643 * maintain a consistent eviction state
653 static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
654 struct qcm_process_device *qpd)
657 struct kfd_process_device *pdd;
661 if (qpd->evicted++ > 0) /* already evicted, do nothing */
664 pdd = qpd_to_pdd(qpd);
665 pr_info_ratelimited("Evicting PASID 0x%x queues\n",
666 pdd->process->pasid);
668 /* Mark all queues as evicted. Deactivate all active queues on
671 list_for_each_entry(q, &qpd->queues_list, list) {
672 q->properties.is_evicted = true;
673 if (!q->properties.is_active)
676 q->properties.is_active = false;
677 decrement_queue_count(dqm, q->properties.type);
679 retval = execute_queues_cpsch(dqm,
681 KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES :
682 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
689 static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
690 struct qcm_process_device *qpd)
692 struct mm_struct *mm = NULL;
694 struct mqd_manager *mqd_mgr;
695 struct kfd_process_device *pdd;
699 pdd = qpd_to_pdd(qpd);
700 /* Retrieve PD base */
701 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
704 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
706 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
711 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
712 pdd->process->pasid);
714 /* Update PD Base in QPD */
715 qpd->page_table_base = pd_base;
716 pr_debug("Updated PD address to 0x%llx\n", pd_base);
718 if (!list_empty(&qpd->queues_list)) {
719 dqm->dev->kfd2kgd->set_vm_context_page_table_base(
722 qpd->page_table_base);
726 /* Take a safe reference to the mm_struct, which may otherwise
727 * disappear even while the kfd_process is still referenced.
729 mm = get_task_mm(pdd->process->lead_thread);
735 /* Remove the eviction flags. Activate queues that are not
736 * inactive for other reasons.
738 list_for_each_entry(q, &qpd->queues_list, list) {
739 q->properties.is_evicted = false;
740 if (!QUEUE_IS_ACTIVE(q->properties))
743 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
744 q->properties.type)];
745 q->properties.is_active = true;
746 increment_queue_count(dqm, q->properties.type);
748 if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
751 retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
752 q->queue, &q->properties, mm);
754 /* Return the first error, but keep going to
755 * maintain a consistent eviction state
767 static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
768 struct qcm_process_device *qpd)
771 struct kfd_process_device *pdd;
775 pdd = qpd_to_pdd(qpd);
776 /* Retrieve PD base */
777 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
780 if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
782 if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
787 pr_info_ratelimited("Restoring PASID 0x%x queues\n",
788 pdd->process->pasid);
790 /* Update PD Base in QPD */
791 qpd->page_table_base = pd_base;
792 pr_debug("Updated PD address to 0x%llx\n", pd_base);
794 /* activate all active queues on the qpd */
795 list_for_each_entry(q, &qpd->queues_list, list) {
796 q->properties.is_evicted = false;
797 if (!QUEUE_IS_ACTIVE(q->properties))
800 q->properties.is_active = true;
801 increment_queue_count(dqm, q->properties.type);
803 retval = execute_queues_cpsch(dqm,
804 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
811 static int register_process(struct device_queue_manager *dqm,
812 struct qcm_process_device *qpd)
814 struct device_process_node *n;
815 struct kfd_process_device *pdd;
819 n = kzalloc(sizeof(*n), GFP_KERNEL);
825 pdd = qpd_to_pdd(qpd);
826 /* Retrieve PD base */
827 pd_base = amdgpu_amdkfd_gpuvm_get_process_page_dir(pdd->vm);
830 list_add(&n->list, &dqm->queues);
832 /* Update PD Base in QPD */
833 qpd->page_table_base = pd_base;
834 pr_debug("Updated PD address to 0x%llx\n", pd_base);
836 retval = dqm->asic_ops.update_qpd(dqm, qpd);
838 dqm->processes_count++;
842 /* Outside the DQM lock because under the DQM lock we can't do
843 * reclaim or take other locks that others hold while reclaiming.
845 kfd_inc_compute_active(dqm->dev);
850 static int unregister_process(struct device_queue_manager *dqm,
851 struct qcm_process_device *qpd)
854 struct device_process_node *cur, *next;
856 pr_debug("qpd->queues_list is %s\n",
857 list_empty(&qpd->queues_list) ? "empty" : "not empty");
862 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
863 if (qpd == cur->qpd) {
864 list_del(&cur->list);
866 dqm->processes_count--;
870 /* qpd not found in dqm list */
875 /* Outside the DQM lock because under the DQM lock we can't do
876 * reclaim or take other locks that others hold while reclaiming.
879 kfd_dec_compute_active(dqm->dev);
885 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
888 return dqm->dev->kfd2kgd->set_pasid_vmid_mapping(
889 dqm->dev->kgd, pasid, vmid);
892 static void init_interrupts(struct device_queue_manager *dqm)
896 for (i = 0 ; i < get_pipes_per_mec(dqm) ; i++)
897 if (is_pipe_enabled(dqm, 0, i))
898 dqm->dev->kfd2kgd->init_interrupts(dqm->dev->kgd, i);
901 static int initialize_nocpsch(struct device_queue_manager *dqm)
905 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
907 dqm->allocated_queues = kcalloc(get_pipes_per_mec(dqm),
908 sizeof(unsigned int), GFP_KERNEL);
909 if (!dqm->allocated_queues)
912 mutex_init(&dqm->lock_hidden);
913 INIT_LIST_HEAD(&dqm->queues);
914 dqm->active_queue_count = dqm->next_pipe_to_allocate = 0;
915 dqm->active_cp_queue_count = 0;
917 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
918 int pipe_offset = pipe * get_queues_per_pipe(dqm);
920 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++)
921 if (test_bit(pipe_offset + queue,
922 dqm->dev->shared_resources.cp_queue_bitmap))
923 dqm->allocated_queues[pipe] |= 1 << queue;
926 memset(dqm->vmid_pasid, 0, sizeof(dqm->vmid_pasid));
928 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
929 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
934 static void uninitialize(struct device_queue_manager *dqm)
938 WARN_ON(dqm->active_queue_count > 0 || dqm->processes_count > 0);
940 kfree(dqm->allocated_queues);
941 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
942 kfree(dqm->mqd_mgrs[i]);
943 mutex_destroy(&dqm->lock_hidden);
946 static int start_nocpsch(struct device_queue_manager *dqm)
948 pr_info("SW scheduler is used");
949 init_interrupts(dqm);
951 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
952 return pm_init(&dqm->packets, dqm);
953 dqm->sched_running = true;
958 static int stop_nocpsch(struct device_queue_manager *dqm)
960 if (dqm->dev->device_info->asic_family == CHIP_HAWAII)
961 pm_uninit(&dqm->packets, false);
962 dqm->sched_running = false;
967 static void pre_reset(struct device_queue_manager *dqm)
970 dqm->is_resetting = true;
974 static int allocate_sdma_queue(struct device_queue_manager *dqm,
979 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
980 if (dqm->sdma_bitmap == 0) {
981 pr_err("No more SDMA queue to allocate\n");
985 bit = __ffs64(dqm->sdma_bitmap);
986 dqm->sdma_bitmap &= ~(1ULL << bit);
988 q->properties.sdma_engine_id = q->sdma_id %
989 get_num_sdma_engines(dqm);
990 q->properties.sdma_queue_id = q->sdma_id /
991 get_num_sdma_engines(dqm);
992 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
993 if (dqm->xgmi_sdma_bitmap == 0) {
994 pr_err("No more XGMI SDMA queue to allocate\n");
997 bit = __ffs64(dqm->xgmi_sdma_bitmap);
998 dqm->xgmi_sdma_bitmap &= ~(1ULL << bit);
1000 /* sdma_engine_id is sdma id including
1001 * both PCIe-optimized SDMAs and XGMI-
1002 * optimized SDMAs. The calculation below
1003 * assumes the first N engines are always
1004 * PCIe-optimized ones
1006 q->properties.sdma_engine_id = get_num_sdma_engines(dqm) +
1007 q->sdma_id % get_num_xgmi_sdma_engines(dqm);
1008 q->properties.sdma_queue_id = q->sdma_id /
1009 get_num_xgmi_sdma_engines(dqm);
1012 pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
1013 pr_debug("SDMA queue id: %d\n", q->properties.sdma_queue_id);
1018 static void deallocate_sdma_queue(struct device_queue_manager *dqm,
1021 if (q->properties.type == KFD_QUEUE_TYPE_SDMA) {
1022 if (q->sdma_id >= get_num_sdma_queues(dqm))
1024 dqm->sdma_bitmap |= (1ULL << q->sdma_id);
1025 } else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1026 if (q->sdma_id >= get_num_xgmi_sdma_queues(dqm))
1028 dqm->xgmi_sdma_bitmap |= (1ULL << q->sdma_id);
1033 * Device Queue Manager implementation for cp scheduler
1036 static int set_sched_resources(struct device_queue_manager *dqm)
1039 struct scheduling_resources res;
1041 res.vmid_mask = dqm->dev->shared_resources.compute_vmid_bitmap;
1044 for (i = 0; i < KGD_MAX_QUEUES; ++i) {
1045 mec = (i / dqm->dev->shared_resources.num_queue_per_pipe)
1046 / dqm->dev->shared_resources.num_pipe_per_mec;
1048 if (!test_bit(i, dqm->dev->shared_resources.cp_queue_bitmap))
1051 /* only acquire queues from the first MEC */
1055 /* This situation may be hit in the future if a new HW
1056 * generation exposes more than 64 queues. If so, the
1057 * definition of res.queue_mask needs updating
1059 if (WARN_ON(i >= (sizeof(res.queue_mask)*8))) {
1060 pr_err("Invalid queue enabled by amdgpu: %d\n", i);
1064 res.queue_mask |= (1ull << i);
1066 res.gws_mask = ~0ull;
1067 res.oac_mask = res.gds_heap_base = res.gds_heap_size = 0;
1069 pr_debug("Scheduling resources:\n"
1070 "vmid mask: 0x%8X\n"
1071 "queue mask: 0x%8llX\n",
1072 res.vmid_mask, res.queue_mask);
1074 return pm_send_set_resources(&dqm->packets, &res);
1077 static int initialize_cpsch(struct device_queue_manager *dqm)
1079 pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
1081 mutex_init(&dqm->lock_hidden);
1082 INIT_LIST_HEAD(&dqm->queues);
1083 dqm->active_queue_count = dqm->processes_count = 0;
1084 dqm->active_cp_queue_count = 0;
1086 dqm->active_runlist = false;
1087 dqm->sdma_bitmap = ~0ULL >> (64 - get_num_sdma_queues(dqm));
1088 dqm->xgmi_sdma_bitmap = ~0ULL >> (64 - get_num_xgmi_sdma_queues(dqm));
1090 INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
1095 static int start_cpsch(struct device_queue_manager *dqm)
1101 retval = pm_init(&dqm->packets, dqm);
1103 goto fail_packet_manager_init;
1105 retval = set_sched_resources(dqm);
1107 goto fail_set_sched_resources;
1109 pr_debug("Allocating fence memory\n");
1111 /* allocate fence memory on the gart */
1112 retval = kfd_gtt_sa_allocate(dqm->dev, sizeof(*dqm->fence_addr),
1116 goto fail_allocate_vidmem;
1118 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
1119 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
1121 init_interrupts(dqm);
1124 /* clear hang status when driver try to start the hw scheduler */
1125 dqm->is_hws_hang = false;
1126 dqm->is_resetting = false;
1127 dqm->sched_running = true;
1128 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1132 fail_allocate_vidmem:
1133 fail_set_sched_resources:
1134 pm_uninit(&dqm->packets, false);
1135 fail_packet_manager_init:
1139 static int stop_cpsch(struct device_queue_manager *dqm)
1144 if (!dqm->is_hws_hang)
1145 unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1146 hanging = dqm->is_hws_hang || dqm->is_resetting;
1147 dqm->sched_running = false;
1150 kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
1151 pm_uninit(&dqm->packets, hanging);
1156 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
1157 struct kernel_queue *kq,
1158 struct qcm_process_device *qpd)
1161 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1162 pr_warn("Can't create new kernel queue because %d queues were already created\n",
1163 dqm->total_queue_count);
1169 * Unconditionally increment this counter, regardless of the queue's
1170 * type or whether the queue is active.
1172 dqm->total_queue_count++;
1173 pr_debug("Total of %d queues are accountable so far\n",
1174 dqm->total_queue_count);
1176 list_add(&kq->list, &qpd->priv_queue_list);
1177 increment_queue_count(dqm, kq->queue->properties.type);
1178 qpd->is_debug = true;
1179 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1185 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
1186 struct kernel_queue *kq,
1187 struct qcm_process_device *qpd)
1190 list_del(&kq->list);
1191 decrement_queue_count(dqm, kq->queue->properties.type);
1192 qpd->is_debug = false;
1193 execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
1195 * Unconditionally decrement this counter, regardless of the queue's
1198 dqm->total_queue_count--;
1199 pr_debug("Total of %d queues are accountable so far\n",
1200 dqm->total_queue_count);
1204 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
1205 struct qcm_process_device *qpd)
1208 struct mqd_manager *mqd_mgr;
1210 if (dqm->total_queue_count >= max_num_of_queues_per_device) {
1211 pr_warn("Can't create new usermode queue because %d queues were already created\n",
1212 dqm->total_queue_count);
1217 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1218 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1220 retval = allocate_sdma_queue(dqm, q);
1226 retval = allocate_doorbell(qpd, q);
1228 goto out_deallocate_sdma_queue;
1230 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1231 q->properties.type)];
1233 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1234 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1235 dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
1236 q->properties.tba_addr = qpd->tba_addr;
1237 q->properties.tma_addr = qpd->tma_addr;
1238 q->mqd_mem_obj = mqd_mgr->allocate_mqd(mqd_mgr->dev, &q->properties);
1239 if (!q->mqd_mem_obj) {
1241 goto out_deallocate_doorbell;
1246 * Eviction state logic: mark all queues as evicted, even ones
1247 * not currently active. Restoring inactive queues later only
1248 * updates the is_evicted flag but is a no-op otherwise.
1250 q->properties.is_evicted = !!qpd->evicted;
1251 mqd_mgr->init_mqd(mqd_mgr, &q->mqd, q->mqd_mem_obj,
1252 &q->gart_mqd_addr, &q->properties);
1254 list_add(&q->list, &qpd->queues_list);
1257 if (q->properties.is_active) {
1258 increment_queue_count(dqm, q->properties.type);
1260 retval = execute_queues_cpsch(dqm,
1261 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1265 * Unconditionally increment this counter, regardless of the queue's
1266 * type or whether the queue is active.
1268 dqm->total_queue_count++;
1270 pr_debug("Total of %d queues are accountable so far\n",
1271 dqm->total_queue_count);
1276 out_deallocate_doorbell:
1277 deallocate_doorbell(qpd, q);
1278 out_deallocate_sdma_queue:
1279 if (q->properties.type == KFD_QUEUE_TYPE_SDMA ||
1280 q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI) {
1282 deallocate_sdma_queue(dqm, q);
1289 int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
1290 unsigned int fence_value,
1291 unsigned int timeout_ms)
1293 unsigned long end_jiffies = msecs_to_jiffies(timeout_ms) + jiffies;
1295 while (*fence_addr != fence_value) {
1296 if (time_after(jiffies, end_jiffies)) {
1297 pr_err("qcm fence wait loop timeout expired\n");
1298 /* In HWS case, this is used to halt the driver thread
1299 * in order not to mess up CP states before doing
1300 * scandumps for FW debugging.
1302 while (halt_if_hws_hang)
1313 /* dqm->lock mutex has to be locked before calling this function */
1314 static int map_queues_cpsch(struct device_queue_manager *dqm)
1318 if (!dqm->sched_running)
1320 if (dqm->active_queue_count <= 0 || dqm->processes_count <= 0)
1322 if (dqm->active_runlist)
1325 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
1326 pr_debug("%s sent runlist\n", __func__);
1328 pr_err("failed to execute runlist\n");
1331 dqm->active_runlist = true;
1336 /* dqm->lock mutex has to be locked before calling this function */
1337 static int unmap_queues_cpsch(struct device_queue_manager *dqm,
1338 enum kfd_unmap_queues_filter filter,
1339 uint32_t filter_param)
1343 if (!dqm->sched_running)
1345 if (dqm->is_hws_hang)
1347 if (!dqm->active_runlist)
1350 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
1351 filter, filter_param, false, 0);
1355 *dqm->fence_addr = KFD_FENCE_INIT;
1356 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
1357 KFD_FENCE_COMPLETED);
1358 /* should be timed out */
1359 retval = amdkfd_fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
1360 queue_preemption_timeout_ms);
1362 pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
1363 dqm->is_hws_hang = true;
1364 /* It's possible we're detecting a HWS hang in the
1365 * middle of a GPU reset. No need to schedule another
1366 * reset in this case.
1368 if (!dqm->is_resetting)
1369 schedule_work(&dqm->hw_exception_work);
1373 pm_release_ib(&dqm->packets);
1374 dqm->active_runlist = false;
1379 /* dqm->lock mutex has to be locked before calling this function */
1380 static int execute_queues_cpsch(struct device_queue_manager *dqm,
1381 enum kfd_unmap_queues_filter filter,
1382 uint32_t filter_param)
1386 if (dqm->is_hws_hang)
1388 retval = unmap_queues_cpsch(dqm, filter, filter_param);
1392 return map_queues_cpsch(dqm);
1395 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
1396 struct qcm_process_device *qpd,
1400 struct mqd_manager *mqd_mgr;
1404 /* remove queue from list to prevent rescheduling after preemption */
1407 if (qpd->is_debug) {
1409 * error, currently we do not allow to destroy a queue
1410 * of a currently debugged process
1413 goto failed_try_destroy_debugged_queue;
1417 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1418 q->properties.type)];
1420 deallocate_doorbell(qpd, q);
1422 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1423 deallocate_sdma_queue(dqm, q);
1424 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1425 deallocate_sdma_queue(dqm, q);
1429 if (q->properties.is_active) {
1430 decrement_queue_count(dqm, q->properties.type);
1431 retval = execute_queues_cpsch(dqm,
1432 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
1433 if (retval == -ETIME)
1434 qpd->reset_wavefronts = true;
1438 * Unconditionally decrement this counter, regardless of the queue's
1441 dqm->total_queue_count--;
1442 pr_debug("Total of %d queues are accountable so far\n",
1443 dqm->total_queue_count);
1447 /* Do free_mqd after dqm_unlock(dqm) to avoid circular locking */
1448 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1452 failed_try_destroy_debugged_queue:
1459 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
1460 * stay in user mode.
1462 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
1463 /* APE1 limit is inclusive and 64K aligned. */
1464 #define APE1_LIMIT_ALIGNMENT 0xFFFF
1466 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
1467 struct qcm_process_device *qpd,
1468 enum cache_policy default_policy,
1469 enum cache_policy alternate_policy,
1470 void __user *alternate_aperture_base,
1471 uint64_t alternate_aperture_size)
1475 if (!dqm->asic_ops.set_cache_memory_policy)
1480 if (alternate_aperture_size == 0) {
1481 /* base > limit disables APE1 */
1482 qpd->sh_mem_ape1_base = 1;
1483 qpd->sh_mem_ape1_limit = 0;
1486 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
1487 * SH_MEM_APE1_BASE[31:0], 0x0000 }
1488 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
1489 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
1490 * Verify that the base and size parameters can be
1491 * represented in this format and convert them.
1492 * Additionally restrict APE1 to user-mode addresses.
1495 uint64_t base = (uintptr_t)alternate_aperture_base;
1496 uint64_t limit = base + alternate_aperture_size - 1;
1498 if (limit <= base || (base & APE1_FIXED_BITS_MASK) != 0 ||
1499 (limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT) {
1504 qpd->sh_mem_ape1_base = base >> 16;
1505 qpd->sh_mem_ape1_limit = limit >> 16;
1508 retval = dqm->asic_ops.set_cache_memory_policy(
1513 alternate_aperture_base,
1514 alternate_aperture_size);
1516 if ((dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1517 program_sh_mem_settings(dqm, qpd);
1519 pr_debug("sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1520 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1521 qpd->sh_mem_ape1_limit);
1528 static int set_trap_handler(struct device_queue_manager *dqm,
1529 struct qcm_process_device *qpd,
1535 if (dqm->dev->cwsr_enabled) {
1536 /* Jump from CWSR trap handler to user trap */
1537 tma = (uint64_t *)(qpd->cwsr_kaddr + KFD_CWSR_TMA_OFFSET);
1541 qpd->tba_addr = tba_addr;
1542 qpd->tma_addr = tma_addr;
1548 static int process_termination_nocpsch(struct device_queue_manager *dqm,
1549 struct qcm_process_device *qpd)
1551 struct queue *q, *next;
1552 struct device_process_node *cur, *next_dpn;
1558 /* Clear all user mode queues */
1559 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1562 ret = destroy_queue_nocpsch_locked(dqm, qpd, q);
1567 /* Unregister process */
1568 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1569 if (qpd == cur->qpd) {
1570 list_del(&cur->list);
1572 dqm->processes_count--;
1580 /* Outside the DQM lock because under the DQM lock we can't do
1581 * reclaim or take other locks that others hold while reclaiming.
1584 kfd_dec_compute_active(dqm->dev);
1589 static int get_wave_state(struct device_queue_manager *dqm,
1591 void __user *ctl_stack,
1592 u32 *ctl_stack_used_size,
1593 u32 *save_area_used_size)
1595 struct mqd_manager *mqd_mgr;
1600 if (q->properties.type != KFD_QUEUE_TYPE_COMPUTE ||
1601 q->properties.is_active || !q->device->cwsr_enabled) {
1606 mqd_mgr = dqm->mqd_mgrs[KFD_MQD_TYPE_CP];
1608 if (!mqd_mgr->get_wave_state) {
1613 r = mqd_mgr->get_wave_state(mqd_mgr, q->mqd, ctl_stack,
1614 ctl_stack_used_size, save_area_used_size);
1621 static int process_termination_cpsch(struct device_queue_manager *dqm,
1622 struct qcm_process_device *qpd)
1625 struct queue *q, *next;
1626 struct kernel_queue *kq, *kq_next;
1627 struct mqd_manager *mqd_mgr;
1628 struct device_process_node *cur, *next_dpn;
1629 enum kfd_unmap_queues_filter filter =
1630 KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
1637 /* Clean all kernel queues */
1638 list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
1639 list_del(&kq->list);
1640 decrement_queue_count(dqm, kq->queue->properties.type);
1641 qpd->is_debug = false;
1642 dqm->total_queue_count--;
1643 filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
1646 /* Clear all user mode queues */
1647 list_for_each_entry(q, &qpd->queues_list, list) {
1648 if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
1649 deallocate_sdma_queue(dqm, q);
1650 else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
1651 deallocate_sdma_queue(dqm, q);
1653 if (q->properties.is_active)
1654 decrement_queue_count(dqm, q->properties.type);
1656 dqm->total_queue_count--;
1659 /* Unregister process */
1660 list_for_each_entry_safe(cur, next_dpn, &dqm->queues, list) {
1661 if (qpd == cur->qpd) {
1662 list_del(&cur->list);
1664 dqm->processes_count--;
1670 retval = execute_queues_cpsch(dqm, filter, 0);
1671 if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
1672 pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
1673 dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
1674 qpd->reset_wavefronts = false;
1679 /* Outside the DQM lock because under the DQM lock we can't do
1680 * reclaim or take other locks that others hold while reclaiming.
1683 kfd_dec_compute_active(dqm->dev);
1685 /* Lastly, free mqd resources.
1686 * Do free_mqd() after dqm_unlock to avoid circular locking.
1688 list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
1689 mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
1690 q->properties.type)];
1693 mqd_mgr->free_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
1699 static int init_mqd_managers(struct device_queue_manager *dqm)
1702 struct mqd_manager *mqd_mgr;
1704 for (i = 0; i < KFD_MQD_TYPE_MAX; i++) {
1705 mqd_mgr = dqm->asic_ops.mqd_manager_init(i, dqm->dev);
1707 pr_err("mqd manager [%d] initialization failed\n", i);
1710 dqm->mqd_mgrs[i] = mqd_mgr;
1716 for (j = 0; j < i; j++) {
1717 kfree(dqm->mqd_mgrs[j]);
1718 dqm->mqd_mgrs[j] = NULL;
1724 /* Allocate one hiq mqd (HWS) and all SDMA mqd in a continuous trunk*/
1725 static int allocate_hiq_sdma_mqd(struct device_queue_manager *dqm)
1728 struct kfd_dev *dev = dqm->dev;
1729 struct kfd_mem_obj *mem_obj = &dqm->hiq_sdma_mqd;
1730 uint32_t size = dqm->mqd_mgrs[KFD_MQD_TYPE_SDMA]->mqd_size *
1731 get_num_all_sdma_engines(dqm) *
1732 dev->device_info->num_sdma_queues_per_engine +
1733 dqm->mqd_mgrs[KFD_MQD_TYPE_HIQ]->mqd_size;
1735 retval = amdgpu_amdkfd_alloc_gtt_mem(dev->kgd, size,
1736 &(mem_obj->gtt_mem), &(mem_obj->gpu_addr),
1737 (void *)&(mem_obj->cpu_ptr), false);
1742 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1744 struct device_queue_manager *dqm;
1746 pr_debug("Loading device queue manager\n");
1748 dqm = kzalloc(sizeof(*dqm), GFP_KERNEL);
1752 switch (dev->device_info->asic_family) {
1753 /* HWS is not available on Hawaii. */
1755 /* HWS depends on CWSR for timely dequeue. CWSR is not
1756 * available on Tonga.
1758 * FIXME: This argument also applies to Kaveri.
1761 dqm->sched_policy = KFD_SCHED_POLICY_NO_HWS;
1764 dqm->sched_policy = sched_policy;
1769 switch (dqm->sched_policy) {
1770 case KFD_SCHED_POLICY_HWS:
1771 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1772 /* initialize dqm for cp scheduling */
1773 dqm->ops.create_queue = create_queue_cpsch;
1774 dqm->ops.initialize = initialize_cpsch;
1775 dqm->ops.start = start_cpsch;
1776 dqm->ops.stop = stop_cpsch;
1777 dqm->ops.pre_reset = pre_reset;
1778 dqm->ops.destroy_queue = destroy_queue_cpsch;
1779 dqm->ops.update_queue = update_queue;
1780 dqm->ops.register_process = register_process;
1781 dqm->ops.unregister_process = unregister_process;
1782 dqm->ops.uninitialize = uninitialize;
1783 dqm->ops.create_kernel_queue = create_kernel_queue_cpsch;
1784 dqm->ops.destroy_kernel_queue = destroy_kernel_queue_cpsch;
1785 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1786 dqm->ops.set_trap_handler = set_trap_handler;
1787 dqm->ops.process_termination = process_termination_cpsch;
1788 dqm->ops.evict_process_queues = evict_process_queues_cpsch;
1789 dqm->ops.restore_process_queues = restore_process_queues_cpsch;
1790 dqm->ops.get_wave_state = get_wave_state;
1792 case KFD_SCHED_POLICY_NO_HWS:
1793 /* initialize dqm for no cp scheduling */
1794 dqm->ops.start = start_nocpsch;
1795 dqm->ops.stop = stop_nocpsch;
1796 dqm->ops.pre_reset = pre_reset;
1797 dqm->ops.create_queue = create_queue_nocpsch;
1798 dqm->ops.destroy_queue = destroy_queue_nocpsch;
1799 dqm->ops.update_queue = update_queue;
1800 dqm->ops.register_process = register_process;
1801 dqm->ops.unregister_process = unregister_process;
1802 dqm->ops.initialize = initialize_nocpsch;
1803 dqm->ops.uninitialize = uninitialize;
1804 dqm->ops.set_cache_memory_policy = set_cache_memory_policy;
1805 dqm->ops.set_trap_handler = set_trap_handler;
1806 dqm->ops.process_termination = process_termination_nocpsch;
1807 dqm->ops.evict_process_queues = evict_process_queues_nocpsch;
1808 dqm->ops.restore_process_queues =
1809 restore_process_queues_nocpsch;
1810 dqm->ops.get_wave_state = get_wave_state;
1813 pr_err("Invalid scheduling policy %d\n", dqm->sched_policy);
1817 switch (dev->device_info->asic_family) {
1819 device_queue_manager_init_vi(&dqm->asic_ops);
1823 device_queue_manager_init_cik(&dqm->asic_ops);
1827 device_queue_manager_init_cik_hawaii(&dqm->asic_ops);
1832 case CHIP_POLARIS10:
1833 case CHIP_POLARIS11:
1834 case CHIP_POLARIS12:
1836 device_queue_manager_init_vi_tonga(&dqm->asic_ops);
1845 device_queue_manager_init_v9(&dqm->asic_ops);
1850 device_queue_manager_init_v10_navi10(&dqm->asic_ops);
1853 WARN(1, "Unexpected ASIC family %u",
1854 dev->device_info->asic_family);
1858 if (init_mqd_managers(dqm))
1861 if (allocate_hiq_sdma_mqd(dqm)) {
1862 pr_err("Failed to allocate hiq sdma mqd trunk buffer\n");
1866 if (!dqm->ops.initialize(dqm))
1874 static void deallocate_hiq_sdma_mqd(struct kfd_dev *dev,
1875 struct kfd_mem_obj *mqd)
1877 WARN(!mqd, "No hiq sdma mqd trunk to free");
1879 amdgpu_amdkfd_free_gtt_mem(dev->kgd, mqd->gtt_mem);
1882 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1884 dqm->ops.uninitialize(dqm);
1885 deallocate_hiq_sdma_mqd(dqm->dev, &dqm->hiq_sdma_mqd);
1889 int kfd_process_vm_fault(struct device_queue_manager *dqm,
1892 struct kfd_process_device *pdd;
1893 struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
1898 pdd = kfd_get_process_device_data(dqm->dev, p);
1900 ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
1901 kfd_unref_process(p);
1906 static void kfd_process_hw_exception(struct work_struct *work)
1908 struct device_queue_manager *dqm = container_of(work,
1909 struct device_queue_manager, hw_exception_work);
1910 amdgpu_amdkfd_gpu_reset(dqm->dev->kgd);
1913 #if defined(CONFIG_DEBUG_FS)
1915 static void seq_reg_dump(struct seq_file *m,
1916 uint32_t (*dump)[2], uint32_t n_regs)
1920 for (i = 0, count = 0; i < n_regs; i++) {
1922 dump[i-1][0] + sizeof(uint32_t) != dump[i][0]) {
1923 seq_printf(m, "%s %08x: %08x",
1925 dump[i][0], dump[i][1]);
1928 seq_printf(m, " %08x", dump[i][1]);
1936 int dqm_debugfs_hqds(struct seq_file *m, void *data)
1938 struct device_queue_manager *dqm = data;
1939 uint32_t (*dump)[2], n_regs;
1943 if (!dqm->sched_running) {
1944 seq_printf(m, " Device is stopped\n");
1949 r = dqm->dev->kfd2kgd->hqd_dump(dqm->dev->kgd,
1950 KFD_CIK_HIQ_PIPE, KFD_CIK_HIQ_QUEUE,
1953 seq_printf(m, " HIQ on MEC %d Pipe %d Queue %d\n",
1954 KFD_CIK_HIQ_PIPE/get_pipes_per_mec(dqm)+1,
1955 KFD_CIK_HIQ_PIPE%get_pipes_per_mec(dqm),
1957 seq_reg_dump(m, dump, n_regs);
1962 for (pipe = 0; pipe < get_pipes_per_mec(dqm); pipe++) {
1963 int pipe_offset = pipe * get_queues_per_pipe(dqm);
1965 for (queue = 0; queue < get_queues_per_pipe(dqm); queue++) {
1966 if (!test_bit(pipe_offset + queue,
1967 dqm->dev->shared_resources.cp_queue_bitmap))
1970 r = dqm->dev->kfd2kgd->hqd_dump(
1971 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1975 seq_printf(m, " CP Pipe %d, Queue %d\n",
1977 seq_reg_dump(m, dump, n_regs);
1983 for (pipe = 0; pipe < get_num_all_sdma_engines(dqm); pipe++) {
1985 queue < dqm->dev->device_info->num_sdma_queues_per_engine;
1987 r = dqm->dev->kfd2kgd->hqd_sdma_dump(
1988 dqm->dev->kgd, pipe, queue, &dump, &n_regs);
1992 seq_printf(m, " SDMA Engine %d, RLC %d\n",
1994 seq_reg_dump(m, dump, n_regs);
2003 int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
2008 dqm->active_runlist = true;
2009 r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);