2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
24 #include "kfd_kernel_queue.h"
25 #include "kfd_device_queue_manager.h"
26 #include "kfd_pm4_headers_vi.h"
27 #include "kfd_pm4_opcodes.h"
29 static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
30 enum kfd_queue_type type, unsigned int queue_size);
31 static void uninitialize_vi(struct kernel_queue *kq);
32 static void submit_packet_vi(struct kernel_queue *kq);
34 void kernel_queue_init_vi(struct kernel_queue_ops *ops)
36 ops->initialize = initialize_vi;
37 ops->uninitialize = uninitialize_vi;
38 ops->submit_packet = submit_packet_vi;
41 static bool initialize_vi(struct kernel_queue *kq, struct kfd_dev *dev,
42 enum kfd_queue_type type, unsigned int queue_size)
46 /*For CIK family asics, kq->eop_mem is not needed */
47 if (dev->device_info->asic_family <= CHIP_MULLINS)
50 retval = kfd_gtt_sa_allocate(dev, PAGE_SIZE, &kq->eop_mem);
54 kq->eop_gpu_addr = kq->eop_mem->gpu_addr;
55 kq->eop_kernel_addr = kq->eop_mem->cpu_ptr;
57 memset(kq->eop_kernel_addr, 0, PAGE_SIZE);
62 static void uninitialize_vi(struct kernel_queue *kq)
64 /* For CIK family asics, kq->eop_mem is Null, kfd_gtt_sa_free()
65 * is able to handle NULL properly.
67 kfd_gtt_sa_free(kq->dev, kq->eop_mem);
70 static void submit_packet_vi(struct kernel_queue *kq)
72 *kq->wptr_kernel = kq->pending_wptr;
73 write_kernel_doorbell(kq->queue->properties.doorbell_ptr,
77 unsigned int pm_build_pm4_header(unsigned int opcode, size_t packet_size)
79 union PM4_MES_TYPE_3_HEADER header;
82 header.opcode = opcode;
83 header.count = packet_size / 4 - 2;
84 header.type = PM4_TYPE_3;
89 static int pm_map_process_vi(struct packet_manager *pm, uint32_t *buffer,
90 struct qcm_process_device *qpd)
92 struct pm4_mes_map_process *packet;
94 packet = (struct pm4_mes_map_process *)buffer;
96 memset(buffer, 0, sizeof(struct pm4_mes_map_process));
98 packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
99 sizeof(struct pm4_mes_map_process));
100 packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
101 packet->bitfields2.process_quantum = 1;
102 packet->bitfields2.pasid = qpd->pqm->process->pasid;
103 packet->bitfields3.page_table_base = qpd->page_table_base;
104 packet->bitfields10.gds_size = qpd->gds_size;
105 packet->bitfields10.num_gws = qpd->num_gws;
106 packet->bitfields10.num_oac = qpd->num_oac;
107 packet->bitfields10.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
109 packet->sh_mem_config = qpd->sh_mem_config;
110 packet->sh_mem_bases = qpd->sh_mem_bases;
111 packet->sh_mem_ape1_base = qpd->sh_mem_ape1_base;
112 packet->sh_mem_ape1_limit = qpd->sh_mem_ape1_limit;
114 packet->sh_hidden_private_base_vmid = qpd->sh_hidden_private_base;
116 packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
117 packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
122 static int pm_runlist_vi(struct packet_manager *pm, uint32_t *buffer,
123 uint64_t ib, size_t ib_size_in_dwords, bool chain)
125 struct pm4_mes_runlist *packet;
126 int concurrent_proc_cnt = 0;
127 struct kfd_dev *kfd = pm->dqm->dev;
132 /* Determine the number of processes to map together to HW:
133 * it can not exceed the number of VMIDs available to the
134 * scheduler, and it is determined by the smaller of the number
135 * of processes in the runlist and kfd module parameter
137 * Note: the arbitration between the number of VMIDs and
138 * hws_max_conc_proc has been done in
139 * kgd2kfd_device_init().
141 concurrent_proc_cnt = min(pm->dqm->processes_count,
142 kfd->max_proc_per_quantum);
144 packet = (struct pm4_mes_runlist *)buffer;
146 memset(buffer, 0, sizeof(struct pm4_mes_runlist));
147 packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
148 sizeof(struct pm4_mes_runlist));
150 packet->bitfields4.ib_size = ib_size_in_dwords;
151 packet->bitfields4.chain = chain ? 1 : 0;
152 packet->bitfields4.offload_polling = 0;
153 packet->bitfields4.valid = 1;
154 packet->bitfields4.process_cnt = concurrent_proc_cnt;
155 packet->ordinal2 = lower_32_bits(ib);
156 packet->bitfields3.ib_base_hi = upper_32_bits(ib);
161 int pm_set_resources_vi(struct packet_manager *pm, uint32_t *buffer,
162 struct scheduling_resources *res)
164 struct pm4_mes_set_resources *packet;
166 packet = (struct pm4_mes_set_resources *)buffer;
167 memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
169 packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
170 sizeof(struct pm4_mes_set_resources));
172 packet->bitfields2.queue_type =
173 queue_type__mes_set_resources__hsa_interface_queue_hiq;
174 packet->bitfields2.vmid_mask = res->vmid_mask;
175 packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
176 packet->bitfields7.oac_mask = res->oac_mask;
177 packet->bitfields8.gds_heap_base = res->gds_heap_base;
178 packet->bitfields8.gds_heap_size = res->gds_heap_size;
180 packet->gws_mask_lo = lower_32_bits(res->gws_mask);
181 packet->gws_mask_hi = upper_32_bits(res->gws_mask);
183 packet->queue_mask_lo = lower_32_bits(res->queue_mask);
184 packet->queue_mask_hi = upper_32_bits(res->queue_mask);
189 static int pm_map_queues_vi(struct packet_manager *pm, uint32_t *buffer,
190 struct queue *q, bool is_static)
192 struct pm4_mes_map_queues *packet;
193 bool use_static = is_static;
195 packet = (struct pm4_mes_map_queues *)buffer;
196 memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
198 packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
199 sizeof(struct pm4_mes_map_queues));
200 packet->bitfields2.num_queues = 1;
201 packet->bitfields2.queue_sel =
202 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
204 packet->bitfields2.engine_sel =
205 engine_sel__mes_map_queues__compute_vi;
206 packet->bitfields2.queue_type =
207 queue_type__mes_map_queues__normal_compute_vi;
209 switch (q->properties.type) {
210 case KFD_QUEUE_TYPE_COMPUTE:
212 packet->bitfields2.queue_type =
213 queue_type__mes_map_queues__normal_latency_static_queue_vi;
215 case KFD_QUEUE_TYPE_DIQ:
216 packet->bitfields2.queue_type =
217 queue_type__mes_map_queues__debug_interface_queue_vi;
219 case KFD_QUEUE_TYPE_SDMA:
220 case KFD_QUEUE_TYPE_SDMA_XGMI:
221 packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
222 engine_sel__mes_map_queues__sdma0_vi;
223 use_static = false; /* no static queues under SDMA */
226 WARN(1, "queue type %d", q->properties.type);
229 packet->bitfields3.doorbell_offset =
230 q->properties.doorbell_off;
232 packet->mqd_addr_lo =
233 lower_32_bits(q->gart_mqd_addr);
235 packet->mqd_addr_hi =
236 upper_32_bits(q->gart_mqd_addr);
238 packet->wptr_addr_lo =
239 lower_32_bits((uint64_t)q->properties.write_ptr);
241 packet->wptr_addr_hi =
242 upper_32_bits((uint64_t)q->properties.write_ptr);
247 static int pm_unmap_queues_vi(struct packet_manager *pm, uint32_t *buffer,
248 enum kfd_queue_type type,
249 enum kfd_unmap_queues_filter filter,
250 uint32_t filter_param, bool reset,
251 unsigned int sdma_engine)
253 struct pm4_mes_unmap_queues *packet;
255 packet = (struct pm4_mes_unmap_queues *)buffer;
256 memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
258 packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
259 sizeof(struct pm4_mes_unmap_queues));
261 case KFD_QUEUE_TYPE_COMPUTE:
262 case KFD_QUEUE_TYPE_DIQ:
263 packet->bitfields2.engine_sel =
264 engine_sel__mes_unmap_queues__compute;
266 case KFD_QUEUE_TYPE_SDMA:
267 case KFD_QUEUE_TYPE_SDMA_XGMI:
268 packet->bitfields2.engine_sel =
269 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
272 WARN(1, "queue type %d", type);
277 packet->bitfields2.action =
278 action__mes_unmap_queues__reset_queues;
280 packet->bitfields2.action =
281 action__mes_unmap_queues__preempt_queues;
284 case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
285 packet->bitfields2.queue_sel =
286 queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
287 packet->bitfields2.num_queues = 1;
288 packet->bitfields3b.doorbell_offset0 = filter_param;
290 case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
291 packet->bitfields2.queue_sel =
292 queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
293 packet->bitfields3a.pasid = filter_param;
295 case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
296 packet->bitfields2.queue_sel =
297 queue_sel__mes_unmap_queues__unmap_all_queues;
299 case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
300 /* in this case, we do not preempt static queues */
301 packet->bitfields2.queue_sel =
302 queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
305 WARN(1, "filter %d", filter);
313 static int pm_query_status_vi(struct packet_manager *pm, uint32_t *buffer,
314 uint64_t fence_address, uint32_t fence_value)
316 struct pm4_mes_query_status *packet;
318 packet = (struct pm4_mes_query_status *)buffer;
319 memset(buffer, 0, sizeof(struct pm4_mes_query_status));
321 packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
322 sizeof(struct pm4_mes_query_status));
324 packet->bitfields2.context_id = 0;
325 packet->bitfields2.interrupt_sel =
326 interrupt_sel__mes_query_status__completion_status;
327 packet->bitfields2.command =
328 command__mes_query_status__fence_only_after_write_ack;
330 packet->addr_hi = upper_32_bits((uint64_t)fence_address);
331 packet->addr_lo = lower_32_bits((uint64_t)fence_address);
332 packet->data_hi = upper_32_bits((uint64_t)fence_value);
333 packet->data_lo = lower_32_bits((uint64_t)fence_value);
338 static int pm_release_mem_vi(uint64_t gpu_addr, uint32_t *buffer)
340 struct pm4_mec_release_mem *packet;
342 packet = (struct pm4_mec_release_mem *)buffer;
343 memset(buffer, 0, sizeof(*packet));
345 packet->header.u32All = pm_build_pm4_header(IT_RELEASE_MEM,
348 packet->bitfields2.event_type = CACHE_FLUSH_AND_INV_TS_EVENT;
349 packet->bitfields2.event_index = event_index___release_mem__end_of_pipe;
350 packet->bitfields2.tcl1_action_ena = 1;
351 packet->bitfields2.tc_action_ena = 1;
352 packet->bitfields2.cache_policy = cache_policy___release_mem__lru;
353 packet->bitfields2.atc = 0;
355 packet->bitfields3.data_sel = data_sel___release_mem__send_32_bit_low;
356 packet->bitfields3.int_sel =
357 int_sel___release_mem__send_interrupt_after_write_confirm;
359 packet->bitfields4.address_lo_32b = (gpu_addr & 0xffffffff) >> 2;
360 packet->address_hi = upper_32_bits(gpu_addr);
367 const struct packet_manager_funcs kfd_vi_pm_funcs = {
368 .map_process = pm_map_process_vi,
369 .runlist = pm_runlist_vi,
370 .set_resources = pm_set_resources_vi,
371 .map_queues = pm_map_queues_vi,
372 .unmap_queues = pm_unmap_queues_vi,
373 .query_status = pm_query_status_vi,
374 .release_mem = pm_release_mem_vi,
375 .map_process_size = sizeof(struct pm4_mes_map_process),
376 .runlist_size = sizeof(struct pm4_mes_runlist),
377 .set_resources_size = sizeof(struct pm4_mes_set_resources),
378 .map_queues_size = sizeof(struct pm4_mes_map_queues),
379 .unmap_queues_size = sizeof(struct pm4_mes_unmap_queues),
380 .query_status_size = sizeof(struct pm4_mes_query_status),
381 .release_mem_size = sizeof(struct pm4_mec_release_mem)