2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
30 #include "kfd_device_queue_manager.h"
31 #include "kfd_mqd_manager.h"
33 #include "kfd_kernel_queue.h"
34 #include "../../radeon/cik_reg.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static bool is_mem_initialized;
42 static int init_memory(struct device_queue_manager *dqm);
43 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
44 unsigned int pasid, unsigned int vmid);
46 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
48 struct qcm_process_device *qpd);
49 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
53 static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
55 BUG_ON(!dqm || !dqm->dev);
56 return dqm->dev->shared_resources.compute_pipe_count;
59 static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
62 return dqm->dev->shared_resources.first_compute_pipe;
65 static inline unsigned int get_pipes_num_cpsch(void)
67 return PIPE_PER_ME_CP_SCHEDULING;
70 static inline unsigned int
71 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
75 nybble = (pdd->lds_base >> 60) & 0x0E;
81 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
83 unsigned int shared_base;
85 shared_base = (pdd->lds_base >> 16) & 0xFF;
90 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
91 static void init_process_memory(struct device_queue_manager *dqm,
92 struct qcm_process_device *qpd)
94 struct kfd_process_device *pdd;
99 pdd = qpd_to_pdd(qpd);
101 /* check if sh_mem_config register already configured */
102 if (qpd->sh_mem_config == 0) {
104 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
105 DEFAULT_MTYPE(MTYPE_NONCACHED) |
106 APE1_MTYPE(MTYPE_NONCACHED);
107 qpd->sh_mem_ape1_limit = 0;
108 qpd->sh_mem_ape1_base = 0;
111 if (qpd->pqm->process->is_32bit_user_mode) {
112 temp = get_sh_mem_bases_32(pdd);
113 qpd->sh_mem_bases = SHARED_BASE(temp);
114 qpd->sh_mem_config |= PTR32;
116 temp = get_sh_mem_bases_nybble_64(pdd);
117 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
120 pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
121 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
124 static void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
127 return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
129 qpd->sh_mem_ape1_base,
130 qpd->sh_mem_ape1_limit,
134 static int allocate_vmid(struct device_queue_manager *dqm,
135 struct qcm_process_device *qpd,
138 int bit, allocated_vmid;
140 if (dqm->vmid_bitmap == 0)
143 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
144 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
146 /* Kaveri kfd vmid's starts from vmid 8 */
147 allocated_vmid = bit + KFD_VMID_START_OFFSET;
148 pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
149 qpd->vmid = allocated_vmid;
150 q->properties.vmid = allocated_vmid;
152 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
153 program_sh_mem_settings(dqm, qpd);
158 static void deallocate_vmid(struct device_queue_manager *dqm,
159 struct qcm_process_device *qpd,
162 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
164 /* Release the vmid mapping */
165 set_pasid_vmid_mapping(dqm, 0, qpd->vmid);
167 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
169 q->properties.vmid = 0;
172 static int create_queue_nocpsch(struct device_queue_manager *dqm,
174 struct qcm_process_device *qpd,
179 BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
181 pr_debug("kfd: In func %s\n", __func__);
184 mutex_lock(&dqm->lock);
186 if (list_empty(&qpd->queues_list)) {
187 retval = allocate_vmid(dqm, qpd, q);
189 mutex_unlock(&dqm->lock);
193 *allocated_vmid = qpd->vmid;
194 q->properties.vmid = qpd->vmid;
196 retval = create_compute_queue_nocpsch(dqm, q, qpd);
199 if (list_empty(&qpd->queues_list)) {
200 deallocate_vmid(dqm, qpd, q);
203 mutex_unlock(&dqm->lock);
207 list_add(&q->list, &qpd->queues_list);
210 mutex_unlock(&dqm->lock);
214 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
221 for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
222 pipe = (pipe + 1) % get_pipes_num(dqm)) {
223 if (dqm->allocated_queues[pipe] != 0) {
224 bit = find_first_bit(
225 (unsigned long *)&dqm->allocated_queues[pipe],
229 (unsigned long *)&dqm->allocated_queues[pipe]);
240 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
241 __func__, q->pipe, q->queue);
242 /* horizontal hqd allocation */
243 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
248 static inline void deallocate_hqd(struct device_queue_manager *dqm,
251 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
254 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
256 struct qcm_process_device *qpd)
259 struct mqd_manager *mqd;
261 BUG_ON(!dqm || !q || !qpd);
263 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
267 retval = allocate_hqd(dqm, q);
271 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
272 &q->gart_mqd_addr, &q->properties);
274 deallocate_hqd(dqm, q);
278 pr_debug("kfd: loading mqd to hqd on pipe (%d) queue (%d)\n",
282 retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
283 q->queue, q->properties.write_ptr);
285 deallocate_hqd(dqm, q);
286 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
293 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
294 struct qcm_process_device *qpd,
298 struct mqd_manager *mqd;
300 BUG_ON(!dqm || !q || !q->mqd || !qpd);
304 pr_debug("kfd: In Func %s\n", __func__);
306 mutex_lock(&dqm->lock);
307 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
313 retval = mqd->destroy_mqd(mqd, q->mqd,
314 KFD_PREEMPT_TYPE_WAVEFRONT,
315 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
321 deallocate_hqd(dqm, q);
323 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
326 if (list_empty(&qpd->queues_list))
327 deallocate_vmid(dqm, qpd, q);
330 mutex_unlock(&dqm->lock);
334 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
337 struct mqd_manager *mqd;
338 bool prev_active = false;
340 BUG_ON(!dqm || !q || !q->mqd);
342 mutex_lock(&dqm->lock);
343 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
345 mutex_unlock(&dqm->lock);
349 if (q->properties.is_active == true)
354 * check active state vs. the previous state
355 * and modify counter accordingly
357 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
358 if ((q->properties.is_active == true) && (prev_active == false))
360 else if ((q->properties.is_active == false) && (prev_active == true))
363 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
364 retval = execute_queues_cpsch(dqm, false);
366 mutex_unlock(&dqm->lock);
370 static struct mqd_manager *get_mqd_manager_nocpsch(
371 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
373 struct mqd_manager *mqd;
375 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
377 pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
379 mqd = dqm->mqds[type];
381 mqd = mqd_manager_init(type, dqm->dev);
383 pr_err("kfd: mqd manager is NULL");
384 dqm->mqds[type] = mqd;
390 static int register_process_nocpsch(struct device_queue_manager *dqm,
391 struct qcm_process_device *qpd)
393 struct device_process_node *n;
395 BUG_ON(!dqm || !qpd);
397 pr_debug("kfd: In func %s\n", __func__);
399 n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
405 mutex_lock(&dqm->lock);
406 list_add(&n->list, &dqm->queues);
408 init_process_memory(dqm, qpd);
409 dqm->processes_count++;
411 mutex_unlock(&dqm->lock);
416 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
417 struct qcm_process_device *qpd)
420 struct device_process_node *cur, *next;
422 BUG_ON(!dqm || !qpd);
424 BUG_ON(!list_empty(&qpd->queues_list));
426 pr_debug("kfd: In func %s\n", __func__);
429 mutex_lock(&dqm->lock);
431 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
432 if (qpd == cur->qpd) {
433 list_del(&cur->list);
435 dqm->processes_count--;
439 /* qpd not found in dqm list */
442 mutex_unlock(&dqm->lock);
447 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
450 uint32_t pasid_mapping;
452 pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
453 ATC_VMID_PASID_MAPPING_VALID;
454 return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
458 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
460 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
461 * scratch and GPUVM apertures.
462 * The hardware fills in the remaining 59 bits according to the
464 * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
465 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
466 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
468 * (where X/Y is the configurable nybble with the low-bit 0)
470 * LDS and scratch will have the same top nybble programmed in the
471 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
472 * GPUVM can have a different top nybble programmed in the
473 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
474 * We don't bother to support different top nybbles
475 * for LDS/Scratch and GPUVM.
478 BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
479 top_address_nybble == 0);
481 return PRIVATE_BASE(top_address_nybble << 12) |
482 SHARED_BASE(top_address_nybble << 12);
485 static int init_memory(struct device_queue_manager *dqm)
489 for (i = 8; i < 16; i++)
490 set_pasid_vmid_mapping(dqm, 0, i);
492 retval = kfd2kgd->init_memory(dqm->dev->kgd);
494 is_mem_initialized = true;
499 static int init_pipelines(struct device_queue_manager *dqm,
500 unsigned int pipes_num, unsigned int first_pipe)
503 struct mqd_manager *mqd;
504 unsigned int i, err, inx;
505 uint64_t pipe_hpd_addr;
507 BUG_ON(!dqm || !dqm->dev);
509 pr_debug("kfd: In func %s\n", __func__);
512 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
513 * The driver never accesses this memory after zeroing it.
514 * It doesn't even have to be saved/restored on suspend/resume
515 * because it contains no data when there are no active queues.
518 err = kfd2kgd->allocate_mem(dqm->dev->kgd,
519 CIK_HPD_EOP_BYTES * pipes_num,
521 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
522 (struct kgd_mem **) &dqm->pipeline_mem);
525 pr_err("kfd: error allocate vidmem num pipes: %d\n",
530 hpdptr = dqm->pipeline_mem->cpu_ptr;
531 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
533 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
535 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
537 kfd2kgd->free_mem(dqm->dev->kgd,
538 (struct kgd_mem *) dqm->pipeline_mem);
542 for (i = 0; i < pipes_num; i++) {
543 inx = i + first_pipe;
544 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
545 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
546 /* = log2(bytes/4)-1 */
547 kfd2kgd->init_pipeline(dqm->dev->kgd, i,
548 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
555 static int init_scheduler(struct device_queue_manager *dqm)
561 pr_debug("kfd: In %s\n", __func__);
563 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
567 retval = init_memory(dqm);
572 static int initialize_nocpsch(struct device_queue_manager *dqm)
578 pr_debug("kfd: In func %s num of pipes: %d\n",
579 __func__, get_pipes_num(dqm));
581 mutex_init(&dqm->lock);
582 INIT_LIST_HEAD(&dqm->queues);
583 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
584 dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
585 sizeof(unsigned int), GFP_KERNEL);
586 if (!dqm->allocated_queues) {
587 mutex_destroy(&dqm->lock);
591 for (i = 0; i < get_pipes_num(dqm); i++)
592 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
594 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
600 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
606 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
608 kfree(dqm->allocated_queues);
609 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
611 mutex_destroy(&dqm->lock);
612 kfd2kgd->free_mem(dqm->dev->kgd,
613 (struct kgd_mem *) dqm->pipeline_mem);
616 static int start_nocpsch(struct device_queue_manager *dqm)
621 static int stop_nocpsch(struct device_queue_manager *dqm)
627 * Device Queue Manager implementation for cp scheduler
630 static int set_sched_resources(struct device_queue_manager *dqm)
632 struct scheduling_resources res;
633 unsigned int queue_num, queue_mask;
637 pr_debug("kfd: In func %s\n", __func__);
639 queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
640 queue_mask = (1 << queue_num) - 1;
641 res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
642 res.vmid_mask <<= KFD_VMID_START_OFFSET;
643 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
644 res.gws_mask = res.oac_mask = res.gds_heap_base =
645 res.gds_heap_size = 0;
647 pr_debug("kfd: scheduling resources:\n"
648 " vmid mask: 0x%8X\n"
649 " queue mask: 0x%8llX\n",
650 res.vmid_mask, res.queue_mask);
652 return pm_send_set_resources(&dqm->packets, &res);
655 static int initialize_cpsch(struct device_queue_manager *dqm)
661 pr_debug("kfd: In func %s num of pipes: %d\n",
662 __func__, get_pipes_num_cpsch());
664 mutex_init(&dqm->lock);
665 INIT_LIST_HEAD(&dqm->queues);
666 dqm->queue_count = dqm->processes_count = 0;
667 dqm->active_runlist = false;
668 retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
670 goto fail_init_pipelines;
675 mutex_destroy(&dqm->lock);
679 static int start_cpsch(struct device_queue_manager *dqm)
681 struct device_process_node *node;
688 retval = pm_init(&dqm->packets, dqm);
690 goto fail_packet_manager_init;
692 retval = set_sched_resources(dqm);
694 goto fail_set_sched_resources;
696 pr_debug("kfd: allocating fence memory\n");
698 /* allocate fence memory on the gart */
699 retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
700 sizeof(*dqm->fence_addr),
702 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
703 (struct kgd_mem **) &dqm->fence_mem);
706 goto fail_allocate_vidmem;
708 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
709 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
711 list_for_each_entry(node, &dqm->queues, list)
712 if (node->qpd->pqm->process && dqm->dev)
713 kfd_bind_process_to_device(dqm->dev,
714 node->qpd->pqm->process);
716 execute_queues_cpsch(dqm, true);
719 fail_allocate_vidmem:
720 fail_set_sched_resources:
721 pm_uninit(&dqm->packets);
722 fail_packet_manager_init:
726 static int stop_cpsch(struct device_queue_manager *dqm)
728 struct device_process_node *node;
729 struct kfd_process_device *pdd;
733 destroy_queues_cpsch(dqm, true);
735 list_for_each_entry(node, &dqm->queues, list) {
736 pdd = qpd_to_pdd(node->qpd);
739 kfd2kgd->free_mem(dqm->dev->kgd,
740 (struct kgd_mem *) dqm->fence_mem);
741 pm_uninit(&dqm->packets);
746 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
747 struct kernel_queue *kq,
748 struct qcm_process_device *qpd)
750 BUG_ON(!dqm || !kq || !qpd);
752 pr_debug("kfd: In func %s\n", __func__);
754 mutex_lock(&dqm->lock);
755 list_add(&kq->list, &qpd->priv_queue_list);
757 qpd->is_debug = true;
758 execute_queues_cpsch(dqm, false);
759 mutex_unlock(&dqm->lock);
764 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
765 struct kernel_queue *kq,
766 struct qcm_process_device *qpd)
770 pr_debug("kfd: In %s\n", __func__);
772 mutex_lock(&dqm->lock);
773 destroy_queues_cpsch(dqm, false);
776 qpd->is_debug = false;
777 execute_queues_cpsch(dqm, false);
778 mutex_unlock(&dqm->lock);
781 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
782 struct qcm_process_device *qpd, int *allocate_vmid)
785 struct mqd_manager *mqd;
787 BUG_ON(!dqm || !q || !qpd);
794 mutex_lock(&dqm->lock);
796 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
798 mutex_unlock(&dqm->lock);
802 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
803 &q->gart_mqd_addr, &q->properties);
807 list_add(&q->list, &qpd->queues_list);
808 if (q->properties.is_active) {
810 retval = execute_queues_cpsch(dqm, false);
814 mutex_unlock(&dqm->lock);
818 static int fence_wait_timeout(unsigned int *fence_addr,
819 unsigned int fence_value,
820 unsigned long timeout)
825 while (*fence_addr != fence_value) {
826 if (time_after(jiffies, timeout)) {
827 pr_err("kfd: qcm fence wait loop timeout expired\n");
836 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
845 mutex_lock(&dqm->lock);
846 if (dqm->active_runlist == false)
848 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
849 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
853 *dqm->fence_addr = KFD_FENCE_INIT;
854 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
855 KFD_FENCE_COMPLETED);
856 /* should be timed out */
857 fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
858 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
859 pm_release_ib(&dqm->packets);
860 dqm->active_runlist = false;
864 mutex_unlock(&dqm->lock);
868 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
875 mutex_lock(&dqm->lock);
877 retval = destroy_queues_cpsch(dqm, false);
879 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
883 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
888 if (dqm->active_runlist) {
893 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
895 pr_err("kfd: failed to execute runlist");
898 dqm->active_runlist = true;
902 mutex_unlock(&dqm->lock);
906 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
907 struct qcm_process_device *qpd,
911 struct mqd_manager *mqd;
913 BUG_ON(!dqm || !qpd || !q);
917 /* remove queue from list to prevent rescheduling after preemption */
918 mutex_lock(&dqm->lock);
920 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
929 execute_queues_cpsch(dqm, false);
931 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
933 mutex_unlock(&dqm->lock);
938 mutex_unlock(&dqm->lock);
943 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
946 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
947 /* APE1 limit is inclusive and 64K aligned. */
948 #define APE1_LIMIT_ALIGNMENT 0xFFFF
950 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
951 struct qcm_process_device *qpd,
952 enum cache_policy default_policy,
953 enum cache_policy alternate_policy,
954 void __user *alternate_aperture_base,
955 uint64_t alternate_aperture_size)
957 uint32_t default_mtype;
960 pr_debug("kfd: In func %s\n", __func__);
962 mutex_lock(&dqm->lock);
964 if (alternate_aperture_size == 0) {
965 /* base > limit disables APE1 */
966 qpd->sh_mem_ape1_base = 1;
967 qpd->sh_mem_ape1_limit = 0;
970 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
971 * SH_MEM_APE1_BASE[31:0], 0x0000 }
972 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
973 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
974 * Verify that the base and size parameters can be
975 * represented in this format and convert them.
976 * Additionally restrict APE1 to user-mode addresses.
979 uint64_t base = (uintptr_t)alternate_aperture_base;
980 uint64_t limit = base + alternate_aperture_size - 1;
985 if ((base & APE1_FIXED_BITS_MASK) != 0)
988 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
991 qpd->sh_mem_ape1_base = base >> 16;
992 qpd->sh_mem_ape1_limit = limit >> 16;
995 default_mtype = (default_policy == cache_policy_coherent) ?
999 ape1_mtype = (alternate_policy == cache_policy_coherent) ?
1003 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
1004 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
1005 | DEFAULT_MTYPE(default_mtype)
1006 | APE1_MTYPE(ape1_mtype);
1008 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
1009 program_sh_mem_settings(dqm, qpd);
1011 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
1012 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
1013 qpd->sh_mem_ape1_limit);
1015 mutex_unlock(&dqm->lock);
1019 mutex_unlock(&dqm->lock);
1023 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1025 struct device_queue_manager *dqm;
1029 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1034 switch (sched_policy) {
1035 case KFD_SCHED_POLICY_HWS:
1036 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1037 /* initialize dqm for cp scheduling */
1038 dqm->create_queue = create_queue_cpsch;
1039 dqm->initialize = initialize_cpsch;
1040 dqm->start = start_cpsch;
1041 dqm->stop = stop_cpsch;
1042 dqm->destroy_queue = destroy_queue_cpsch;
1043 dqm->update_queue = update_queue;
1044 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1045 dqm->register_process = register_process_nocpsch;
1046 dqm->unregister_process = unregister_process_nocpsch;
1047 dqm->uninitialize = uninitialize_nocpsch;
1048 dqm->create_kernel_queue = create_kernel_queue_cpsch;
1049 dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
1050 dqm->set_cache_memory_policy = set_cache_memory_policy;
1052 case KFD_SCHED_POLICY_NO_HWS:
1053 /* initialize dqm for no cp scheduling */
1054 dqm->start = start_nocpsch;
1055 dqm->stop = stop_nocpsch;
1056 dqm->create_queue = create_queue_nocpsch;
1057 dqm->destroy_queue = destroy_queue_nocpsch;
1058 dqm->update_queue = update_queue;
1059 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1060 dqm->register_process = register_process_nocpsch;
1061 dqm->unregister_process = unregister_process_nocpsch;
1062 dqm->initialize = initialize_nocpsch;
1063 dqm->uninitialize = uninitialize_nocpsch;
1064 dqm->set_cache_memory_policy = set_cache_memory_policy;
1071 if (dqm->initialize(dqm) != 0) {
1079 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1083 dqm->uninitialize(dqm);