2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/slab.h>
25 #include <linux/list.h>
26 #include <linux/types.h>
27 #include <linux/printk.h>
28 #include <linux/bitops.h>
30 #include "kfd_device_queue_manager.h"
31 #include "kfd_mqd_manager.h"
33 #include "kfd_kernel_queue.h"
34 #include "../../radeon/cik_reg.h"
36 /* Size of the per-pipe EOP queue */
37 #define CIK_HPD_EOP_BYTES_LOG2 11
38 #define CIK_HPD_EOP_BYTES (1U << CIK_HPD_EOP_BYTES_LOG2)
40 static bool is_mem_initialized;
42 static int init_memory(struct device_queue_manager *dqm);
43 static int set_pasid_vmid_mapping(struct device_queue_manager *dqm,
44 unsigned int pasid, unsigned int vmid);
46 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
48 struct qcm_process_device *qpd);
49 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock);
50 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock);
53 static inline unsigned int get_pipes_num(struct device_queue_manager *dqm)
55 BUG_ON(!dqm || !dqm->dev);
56 return dqm->dev->shared_resources.compute_pipe_count;
59 static inline unsigned int get_first_pipe(struct device_queue_manager *dqm)
62 return dqm->dev->shared_resources.first_compute_pipe;
65 static inline unsigned int get_pipes_num_cpsch(void)
67 return PIPE_PER_ME_CP_SCHEDULING;
70 static inline unsigned int
71 get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
75 nybble = (pdd->lds_base >> 60) & 0x0E;
81 static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
83 unsigned int shared_base;
85 shared_base = (pdd->lds_base >> 16) & 0xFF;
90 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble);
91 static void init_process_memory(struct device_queue_manager *dqm,
92 struct qcm_process_device *qpd)
94 struct kfd_process_device *pdd;
99 pdd = qpd_to_pdd(qpd);
101 /* check if sh_mem_config register already configured */
102 if (qpd->sh_mem_config == 0) {
104 ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED) |
105 DEFAULT_MTYPE(MTYPE_NONCACHED) |
106 APE1_MTYPE(MTYPE_NONCACHED);
107 qpd->sh_mem_ape1_limit = 0;
108 qpd->sh_mem_ape1_base = 0;
111 if (qpd->pqm->process->is_32bit_user_mode) {
112 temp = get_sh_mem_bases_32(pdd);
113 qpd->sh_mem_bases = SHARED_BASE(temp);
114 qpd->sh_mem_config |= PTR32;
116 temp = get_sh_mem_bases_nybble_64(pdd);
117 qpd->sh_mem_bases = compute_sh_mem_bases_64bit(temp);
120 pr_debug("kfd: is32bit process: %d sh_mem_bases nybble: 0x%X and register 0x%X\n",
121 qpd->pqm->process->is_32bit_user_mode, temp, qpd->sh_mem_bases);
124 static void program_sh_mem_settings(struct device_queue_manager *dqm,
125 struct qcm_process_device *qpd)
127 return kfd2kgd->program_sh_mem_settings(dqm->dev->kgd, qpd->vmid,
129 qpd->sh_mem_ape1_base,
130 qpd->sh_mem_ape1_limit,
134 static int allocate_vmid(struct device_queue_manager *dqm,
135 struct qcm_process_device *qpd,
138 int bit, allocated_vmid;
140 if (dqm->vmid_bitmap == 0)
143 bit = find_first_bit((unsigned long *)&dqm->vmid_bitmap, CIK_VMID_NUM);
144 clear_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
146 /* Kaveri kfd vmid's starts from vmid 8 */
147 allocated_vmid = bit + KFD_VMID_START_OFFSET;
148 pr_debug("kfd: vmid allocation %d\n", allocated_vmid);
149 qpd->vmid = allocated_vmid;
150 q->properties.vmid = allocated_vmid;
152 set_pasid_vmid_mapping(dqm, q->process->pasid, q->properties.vmid);
153 program_sh_mem_settings(dqm, qpd);
158 static void deallocate_vmid(struct device_queue_manager *dqm,
159 struct qcm_process_device *qpd,
162 int bit = qpd->vmid - KFD_VMID_START_OFFSET;
164 set_bit(bit, (unsigned long *)&dqm->vmid_bitmap);
166 q->properties.vmid = 0;
169 static int create_queue_nocpsch(struct device_queue_manager *dqm,
171 struct qcm_process_device *qpd,
176 BUG_ON(!dqm || !q || !qpd || !allocated_vmid);
178 pr_debug("kfd: In func %s\n", __func__);
181 mutex_lock(&dqm->lock);
183 if (list_empty(&qpd->queues_list)) {
184 retval = allocate_vmid(dqm, qpd, q);
186 mutex_unlock(&dqm->lock);
190 *allocated_vmid = qpd->vmid;
191 q->properties.vmid = qpd->vmid;
193 retval = create_compute_queue_nocpsch(dqm, q, qpd);
196 if (list_empty(&qpd->queues_list)) {
197 deallocate_vmid(dqm, qpd, q);
200 mutex_unlock(&dqm->lock);
204 list_add(&q->list, &qpd->queues_list);
207 mutex_unlock(&dqm->lock);
211 static int allocate_hqd(struct device_queue_manager *dqm, struct queue *q)
218 for (pipe = dqm->next_pipe_to_allocate; pipe < get_pipes_num(dqm);
219 pipe = (pipe + 1) % get_pipes_num(dqm)) {
220 if (dqm->allocated_queues[pipe] != 0) {
221 bit = find_first_bit(
222 (unsigned long *)&dqm->allocated_queues[pipe],
226 (unsigned long *)&dqm->allocated_queues[pipe]);
237 pr_debug("kfd: DQM %s hqd slot - pipe (%d) queue(%d)\n",
238 __func__, q->pipe, q->queue);
239 /* horizontal hqd allocation */
240 dqm->next_pipe_to_allocate = (pipe + 1) % get_pipes_num(dqm);
245 static inline void deallocate_hqd(struct device_queue_manager *dqm,
248 set_bit(q->queue, (unsigned long *)&dqm->allocated_queues[q->pipe]);
251 static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
253 struct qcm_process_device *qpd)
256 struct mqd_manager *mqd;
258 BUG_ON(!dqm || !q || !qpd);
260 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
264 retval = allocate_hqd(dqm, q);
268 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
269 &q->gart_mqd_addr, &q->properties);
271 deallocate_hqd(dqm, q);
278 static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
279 struct qcm_process_device *qpd,
283 struct mqd_manager *mqd;
285 BUG_ON(!dqm || !q || !q->mqd || !qpd);
289 pr_debug("kfd: In Func %s\n", __func__);
291 mutex_lock(&dqm->lock);
292 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
298 retval = mqd->destroy_mqd(mqd, q->mqd,
299 KFD_PREEMPT_TYPE_WAVEFRONT,
300 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS,
306 deallocate_hqd(dqm, q);
308 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
311 if (list_empty(&qpd->queues_list))
312 deallocate_vmid(dqm, qpd, q);
315 mutex_unlock(&dqm->lock);
319 static int update_queue(struct device_queue_manager *dqm, struct queue *q)
322 struct mqd_manager *mqd;
324 BUG_ON(!dqm || !q || !q->mqd);
326 mutex_lock(&dqm->lock);
327 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
329 mutex_unlock(&dqm->lock);
333 retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
334 if (q->properties.is_active == true)
339 if (sched_policy != KFD_SCHED_POLICY_NO_HWS)
340 retval = execute_queues_cpsch(dqm, false);
342 mutex_unlock(&dqm->lock);
346 static struct mqd_manager *get_mqd_manager_nocpsch(
347 struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
349 struct mqd_manager *mqd;
351 BUG_ON(!dqm || type >= KFD_MQD_TYPE_MAX);
353 pr_debug("kfd: In func %s mqd type %d\n", __func__, type);
355 mqd = dqm->mqds[type];
357 mqd = mqd_manager_init(type, dqm->dev);
359 pr_err("kfd: mqd manager is NULL");
360 dqm->mqds[type] = mqd;
366 static int register_process_nocpsch(struct device_queue_manager *dqm,
367 struct qcm_process_device *qpd)
369 struct device_process_node *n;
371 BUG_ON(!dqm || !qpd);
373 pr_debug("kfd: In func %s\n", __func__);
375 n = kzalloc(sizeof(struct device_process_node), GFP_KERNEL);
381 mutex_lock(&dqm->lock);
382 list_add(&n->list, &dqm->queues);
384 init_process_memory(dqm, qpd);
385 dqm->processes_count++;
387 mutex_unlock(&dqm->lock);
392 static int unregister_process_nocpsch(struct device_queue_manager *dqm,
393 struct qcm_process_device *qpd)
396 struct device_process_node *cur, *next;
398 BUG_ON(!dqm || !qpd);
400 BUG_ON(!list_empty(&qpd->queues_list));
402 pr_debug("kfd: In func %s\n", __func__);
405 mutex_lock(&dqm->lock);
407 list_for_each_entry_safe(cur, next, &dqm->queues, list) {
408 if (qpd == cur->qpd) {
409 list_del(&cur->list);
411 dqm->processes_count--;
415 /* qpd not found in dqm list */
418 mutex_unlock(&dqm->lock);
423 set_pasid_vmid_mapping(struct device_queue_manager *dqm, unsigned int pasid,
426 uint32_t pasid_mapping;
428 pasid_mapping = (pasid == 0) ? 0 : (uint32_t)pasid |
429 ATC_VMID_PASID_MAPPING_VALID;
430 return kfd2kgd->set_pasid_vmid_mapping(dqm->dev->kgd, pasid_mapping,
434 static uint32_t compute_sh_mem_bases_64bit(unsigned int top_address_nybble)
436 /* In 64-bit mode, we can only control the top 3 bits of the LDS,
437 * scratch and GPUVM apertures.
438 * The hardware fills in the remaining 59 bits according to the
440 * LDS: X0000000'00000000 - X0000001'00000000 (4GB)
441 * Scratch: X0000001'00000000 - X0000002'00000000 (4GB)
442 * GPUVM: Y0010000'00000000 - Y0020000'00000000 (1TB)
444 * (where X/Y is the configurable nybble with the low-bit 0)
446 * LDS and scratch will have the same top nybble programmed in the
447 * top 3 bits of SH_MEM_BASES.PRIVATE_BASE.
448 * GPUVM can have a different top nybble programmed in the
449 * top 3 bits of SH_MEM_BASES.SHARED_BASE.
450 * We don't bother to support different top nybbles
451 * for LDS/Scratch and GPUVM.
454 BUG_ON((top_address_nybble & 1) || top_address_nybble > 0xE ||
455 top_address_nybble == 0);
457 return PRIVATE_BASE(top_address_nybble << 12) |
458 SHARED_BASE(top_address_nybble << 12);
461 static int init_memory(struct device_queue_manager *dqm)
465 for (i = 8; i < 16; i++)
466 set_pasid_vmid_mapping(dqm, 0, i);
468 retval = kfd2kgd->init_memory(dqm->dev->kgd);
470 is_mem_initialized = true;
475 static int init_pipelines(struct device_queue_manager *dqm,
476 unsigned int pipes_num, unsigned int first_pipe)
479 struct mqd_manager *mqd;
480 unsigned int i, err, inx;
481 uint64_t pipe_hpd_addr;
483 BUG_ON(!dqm || !dqm->dev);
485 pr_debug("kfd: In func %s\n", __func__);
488 * Allocate memory for the HPDs. This is hardware-owned per-pipe data.
489 * The driver never accesses this memory after zeroing it.
490 * It doesn't even have to be saved/restored on suspend/resume
491 * because it contains no data when there are no active queues.
494 err = kfd2kgd->allocate_mem(dqm->dev->kgd,
495 CIK_HPD_EOP_BYTES * pipes_num,
497 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
498 (struct kgd_mem **) &dqm->pipeline_mem);
501 pr_err("kfd: error allocate vidmem num pipes: %d\n",
506 hpdptr = dqm->pipeline_mem->cpu_ptr;
507 dqm->pipelines_addr = dqm->pipeline_mem->gpu_addr;
509 memset(hpdptr, 0, CIK_HPD_EOP_BYTES * pipes_num);
511 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_COMPUTE);
513 kfd2kgd->free_mem(dqm->dev->kgd,
514 (struct kgd_mem *) dqm->pipeline_mem);
518 for (i = 0; i < pipes_num; i++) {
519 inx = i + first_pipe;
520 pipe_hpd_addr = dqm->pipelines_addr + i * CIK_HPD_EOP_BYTES;
521 pr_debug("kfd: pipeline address %llX\n", pipe_hpd_addr);
522 /* = log2(bytes/4)-1 */
523 kfd2kgd->init_pipeline(dqm->dev->kgd, i,
524 CIK_HPD_EOP_BYTES_LOG2 - 3, pipe_hpd_addr);
531 static int init_scheduler(struct device_queue_manager *dqm)
537 pr_debug("kfd: In %s\n", __func__);
539 retval = init_pipelines(dqm, get_pipes_num(dqm), KFD_DQM_FIRST_PIPE);
543 retval = init_memory(dqm);
548 static int initialize_nocpsch(struct device_queue_manager *dqm)
554 pr_debug("kfd: In func %s num of pipes: %d\n",
555 __func__, get_pipes_num(dqm));
557 mutex_init(&dqm->lock);
558 INIT_LIST_HEAD(&dqm->queues);
559 dqm->queue_count = dqm->next_pipe_to_allocate = 0;
560 dqm->allocated_queues = kcalloc(get_pipes_num(dqm),
561 sizeof(unsigned int), GFP_KERNEL);
562 if (!dqm->allocated_queues) {
563 mutex_destroy(&dqm->lock);
567 for (i = 0; i < get_pipes_num(dqm); i++)
568 dqm->allocated_queues[i] = (1 << QUEUES_PER_PIPE) - 1;
570 dqm->vmid_bitmap = (1 << VMID_PER_DEVICE) - 1;
576 static void uninitialize_nocpsch(struct device_queue_manager *dqm)
582 BUG_ON(dqm->queue_count > 0 || dqm->processes_count > 0);
584 kfree(dqm->allocated_queues);
585 for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
587 mutex_destroy(&dqm->lock);
588 kfd2kgd->free_mem(dqm->dev->kgd,
589 (struct kgd_mem *) dqm->pipeline_mem);
592 static int start_nocpsch(struct device_queue_manager *dqm)
597 static int stop_nocpsch(struct device_queue_manager *dqm)
603 * Device Queue Manager implementation for cp scheduler
606 static int set_sched_resources(struct device_queue_manager *dqm)
608 struct scheduling_resources res;
609 unsigned int queue_num, queue_mask;
613 pr_debug("kfd: In func %s\n", __func__);
615 queue_num = get_pipes_num_cpsch() * QUEUES_PER_PIPE;
616 queue_mask = (1 << queue_num) - 1;
617 res.vmid_mask = (1 << VMID_PER_DEVICE) - 1;
618 res.vmid_mask <<= KFD_VMID_START_OFFSET;
619 res.queue_mask = queue_mask << (get_first_pipe(dqm) * QUEUES_PER_PIPE);
620 res.gws_mask = res.oac_mask = res.gds_heap_base =
621 res.gds_heap_size = 0;
623 pr_debug("kfd: scheduling resources:\n"
624 " vmid mask: 0x%8X\n"
625 " queue mask: 0x%8llX\n",
626 res.vmid_mask, res.queue_mask);
628 return pm_send_set_resources(&dqm->packets, &res);
631 static int initialize_cpsch(struct device_queue_manager *dqm)
637 pr_debug("kfd: In func %s num of pipes: %d\n",
638 __func__, get_pipes_num_cpsch());
640 mutex_init(&dqm->lock);
641 INIT_LIST_HEAD(&dqm->queues);
642 dqm->queue_count = dqm->processes_count = 0;
643 dqm->active_runlist = false;
644 retval = init_pipelines(dqm, get_pipes_num(dqm), 0);
646 goto fail_init_pipelines;
651 mutex_destroy(&dqm->lock);
655 static int start_cpsch(struct device_queue_manager *dqm)
657 struct device_process_node *node;
664 retval = pm_init(&dqm->packets, dqm);
666 goto fail_packet_manager_init;
668 retval = set_sched_resources(dqm);
670 goto fail_set_sched_resources;
672 pr_debug("kfd: allocating fence memory\n");
674 /* allocate fence memory on the gart */
675 retval = kfd2kgd->allocate_mem(dqm->dev->kgd,
676 sizeof(*dqm->fence_addr),
678 KFD_MEMPOOL_SYSTEM_WRITECOMBINE,
679 (struct kgd_mem **) &dqm->fence_mem);
682 goto fail_allocate_vidmem;
684 dqm->fence_addr = dqm->fence_mem->cpu_ptr;
685 dqm->fence_gpu_addr = dqm->fence_mem->gpu_addr;
687 list_for_each_entry(node, &dqm->queues, list)
688 if (node->qpd->pqm->process && dqm->dev)
689 kfd_bind_process_to_device(dqm->dev,
690 node->qpd->pqm->process);
692 execute_queues_cpsch(dqm, true);
695 fail_allocate_vidmem:
696 fail_set_sched_resources:
697 pm_uninit(&dqm->packets);
698 fail_packet_manager_init:
702 static int stop_cpsch(struct device_queue_manager *dqm)
704 struct device_process_node *node;
705 struct kfd_process_device *pdd;
709 destroy_queues_cpsch(dqm, true);
711 list_for_each_entry(node, &dqm->queues, list) {
712 pdd = qpd_to_pdd(node->qpd);
715 kfd2kgd->free_mem(dqm->dev->kgd,
716 (struct kgd_mem *) dqm->fence_mem);
717 pm_uninit(&dqm->packets);
722 static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
723 struct kernel_queue *kq,
724 struct qcm_process_device *qpd)
726 BUG_ON(!dqm || !kq || !qpd);
728 pr_debug("kfd: In func %s\n", __func__);
730 mutex_lock(&dqm->lock);
731 list_add(&kq->list, &qpd->priv_queue_list);
733 qpd->is_debug = true;
734 execute_queues_cpsch(dqm, false);
735 mutex_unlock(&dqm->lock);
740 static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
741 struct kernel_queue *kq,
742 struct qcm_process_device *qpd)
746 pr_debug("kfd: In %s\n", __func__);
748 mutex_lock(&dqm->lock);
749 destroy_queues_cpsch(dqm, false);
752 qpd->is_debug = false;
753 execute_queues_cpsch(dqm, false);
754 mutex_unlock(&dqm->lock);
757 static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
758 struct qcm_process_device *qpd, int *allocate_vmid)
761 struct mqd_manager *mqd;
763 BUG_ON(!dqm || !q || !qpd);
770 mutex_lock(&dqm->lock);
772 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
774 mutex_unlock(&dqm->lock);
778 retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
779 &q->gart_mqd_addr, &q->properties);
783 list_add(&q->list, &qpd->queues_list);
784 if (q->properties.is_active) {
786 retval = execute_queues_cpsch(dqm, false);
790 mutex_unlock(&dqm->lock);
794 static int fence_wait_timeout(unsigned int *fence_addr,
795 unsigned int fence_value,
796 unsigned long timeout)
801 while (*fence_addr != fence_value) {
802 if (time_after(jiffies, timeout)) {
803 pr_err("kfd: qcm fence wait loop timeout expired\n");
812 static int destroy_queues_cpsch(struct device_queue_manager *dqm, bool lock)
821 mutex_lock(&dqm->lock);
822 if (dqm->active_runlist == false)
824 retval = pm_send_unmap_queue(&dqm->packets, KFD_QUEUE_TYPE_COMPUTE,
825 KFD_PREEMPT_TYPE_FILTER_ALL_QUEUES, 0, false, 0);
829 *dqm->fence_addr = KFD_FENCE_INIT;
830 pm_send_query_status(&dqm->packets, dqm->fence_gpu_addr,
831 KFD_FENCE_COMPLETED);
832 /* should be timed out */
833 fence_wait_timeout(dqm->fence_addr, KFD_FENCE_COMPLETED,
834 QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS);
835 pm_release_ib(&dqm->packets);
836 dqm->active_runlist = false;
840 mutex_unlock(&dqm->lock);
844 static int execute_queues_cpsch(struct device_queue_manager *dqm, bool lock)
851 mutex_lock(&dqm->lock);
853 retval = destroy_queues_cpsch(dqm, false);
855 pr_err("kfd: the cp might be in an unrecoverable state due to an unsuccessful queues preemption");
859 if (dqm->queue_count <= 0 || dqm->processes_count <= 0) {
864 if (dqm->active_runlist) {
869 retval = pm_send_runlist(&dqm->packets, &dqm->queues);
871 pr_err("kfd: failed to execute runlist");
874 dqm->active_runlist = true;
878 mutex_unlock(&dqm->lock);
882 static int destroy_queue_cpsch(struct device_queue_manager *dqm,
883 struct qcm_process_device *qpd,
887 struct mqd_manager *mqd;
889 BUG_ON(!dqm || !qpd || !q);
893 /* remove queue from list to prevent rescheduling after preemption */
894 mutex_lock(&dqm->lock);
896 mqd = dqm->get_mqd_manager(dqm, KFD_MQD_TYPE_CIK_CP);
905 execute_queues_cpsch(dqm, false);
907 mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
909 mutex_unlock(&dqm->lock);
914 mutex_unlock(&dqm->lock);
919 * Low bits must be 0000/FFFF as required by HW, high bits must be 0 to
922 #define APE1_FIXED_BITS_MASK 0xFFFF80000000FFFFULL
923 /* APE1 limit is inclusive and 64K aligned. */
924 #define APE1_LIMIT_ALIGNMENT 0xFFFF
926 static bool set_cache_memory_policy(struct device_queue_manager *dqm,
927 struct qcm_process_device *qpd,
928 enum cache_policy default_policy,
929 enum cache_policy alternate_policy,
930 void __user *alternate_aperture_base,
931 uint64_t alternate_aperture_size)
933 uint32_t default_mtype;
936 pr_debug("kfd: In func %s\n", __func__);
938 mutex_lock(&dqm->lock);
940 if (alternate_aperture_size == 0) {
941 /* base > limit disables APE1 */
942 qpd->sh_mem_ape1_base = 1;
943 qpd->sh_mem_ape1_limit = 0;
946 * In FSA64, APE1_Base[63:0] = { 16{SH_MEM_APE1_BASE[31]},
947 * SH_MEM_APE1_BASE[31:0], 0x0000 }
948 * APE1_Limit[63:0] = { 16{SH_MEM_APE1_LIMIT[31]},
949 * SH_MEM_APE1_LIMIT[31:0], 0xFFFF }
950 * Verify that the base and size parameters can be
951 * represented in this format and convert them.
952 * Additionally restrict APE1 to user-mode addresses.
955 uint64_t base = (uintptr_t)alternate_aperture_base;
956 uint64_t limit = base + alternate_aperture_size - 1;
961 if ((base & APE1_FIXED_BITS_MASK) != 0)
964 if ((limit & APE1_FIXED_BITS_MASK) != APE1_LIMIT_ALIGNMENT)
967 qpd->sh_mem_ape1_base = base >> 16;
968 qpd->sh_mem_ape1_limit = limit >> 16;
971 default_mtype = (default_policy == cache_policy_coherent) ?
975 ape1_mtype = (alternate_policy == cache_policy_coherent) ?
979 qpd->sh_mem_config = (qpd->sh_mem_config & PTR32)
980 | ALIGNMENT_MODE(SH_MEM_ALIGNMENT_MODE_UNALIGNED)
981 | DEFAULT_MTYPE(default_mtype)
982 | APE1_MTYPE(ape1_mtype);
984 if ((sched_policy == KFD_SCHED_POLICY_NO_HWS) && (qpd->vmid != 0))
985 program_sh_mem_settings(dqm, qpd);
987 pr_debug("kfd: sh_mem_config: 0x%x, ape1_base: 0x%x, ape1_limit: 0x%x\n",
988 qpd->sh_mem_config, qpd->sh_mem_ape1_base,
989 qpd->sh_mem_ape1_limit);
991 mutex_unlock(&dqm->lock);
995 mutex_unlock(&dqm->lock);
999 struct device_queue_manager *device_queue_manager_init(struct kfd_dev *dev)
1001 struct device_queue_manager *dqm;
1005 dqm = kzalloc(sizeof(struct device_queue_manager), GFP_KERNEL);
1010 switch (sched_policy) {
1011 case KFD_SCHED_POLICY_HWS:
1012 case KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION:
1013 /* initialize dqm for cp scheduling */
1014 dqm->create_queue = create_queue_cpsch;
1015 dqm->initialize = initialize_cpsch;
1016 dqm->start = start_cpsch;
1017 dqm->stop = stop_cpsch;
1018 dqm->destroy_queue = destroy_queue_cpsch;
1019 dqm->update_queue = update_queue;
1020 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1021 dqm->register_process = register_process_nocpsch;
1022 dqm->unregister_process = unregister_process_nocpsch;
1023 dqm->uninitialize = uninitialize_nocpsch;
1024 dqm->create_kernel_queue = create_kernel_queue_cpsch;
1025 dqm->destroy_kernel_queue = destroy_kernel_queue_cpsch;
1026 dqm->set_cache_memory_policy = set_cache_memory_policy;
1028 case KFD_SCHED_POLICY_NO_HWS:
1029 /* initialize dqm for no cp scheduling */
1030 dqm->start = start_nocpsch;
1031 dqm->stop = stop_nocpsch;
1032 dqm->create_queue = create_queue_nocpsch;
1033 dqm->destroy_queue = destroy_queue_nocpsch;
1034 dqm->update_queue = update_queue;
1035 dqm->get_mqd_manager = get_mqd_manager_nocpsch;
1036 dqm->register_process = register_process_nocpsch;
1037 dqm->unregister_process = unregister_process_nocpsch;
1038 dqm->initialize = initialize_nocpsch;
1039 dqm->uninitialize = uninitialize_nocpsch;
1040 dqm->set_cache_memory_policy = set_cache_memory_policy;
1047 if (dqm->initialize(dqm) != 0) {
1055 void device_queue_manager_uninit(struct device_queue_manager *dqm)
1059 dqm->uninitialize(dqm);