Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_packet_manager_v9.c
1 /*
2  * Copyright 2016-2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include "kfd_kernel_queue.h"
25 #include "kfd_device_queue_manager.h"
26 #include "kfd_pm4_headers_ai.h"
27 #include "kfd_pm4_headers_aldebaran.h"
28 #include "kfd_pm4_opcodes.h"
29 #include "gc/gc_10_1_0_sh_mask.h"
30
31 static int pm_map_process_v9(struct packet_manager *pm,
32                 uint32_t *buffer, struct qcm_process_device *qpd)
33 {
34         struct pm4_mes_map_process *packet;
35         uint64_t vm_page_table_base_addr = qpd->page_table_base;
36
37         packet = (struct pm4_mes_map_process *)buffer;
38         memset(buffer, 0, sizeof(struct pm4_mes_map_process));
39         packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
40                                         sizeof(struct pm4_mes_map_process));
41         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
42         packet->bitfields2.process_quantum = 10;
43         packet->bitfields2.pasid = qpd->pqm->process->pasid;
44         packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
45         packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
46         packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
47         packet->bitfields14.num_oac = qpd->num_oac;
48         packet->bitfields14.sdma_enable = 1;
49         packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
50
51         packet->sh_mem_config = qpd->sh_mem_config;
52         packet->sh_mem_bases = qpd->sh_mem_bases;
53         if (qpd->tba_addr) {
54                 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
55                 /* On GFX9, unlike GFX10, bit TRAP_EN of SQ_SHADER_TBA_HI is
56                  * not defined, so setting it won't do any harm.
57                  */
58                 packet->sq_shader_tba_hi = upper_32_bits(qpd->tba_addr >> 8)
59                                 | 1 << SQ_SHADER_TBA_HI__TRAP_EN__SHIFT;
60
61                 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
62                 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
63         }
64
65         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
66         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
67
68         packet->vm_context_page_table_base_addr_lo32 =
69                         lower_32_bits(vm_page_table_base_addr);
70         packet->vm_context_page_table_base_addr_hi32 =
71                         upper_32_bits(vm_page_table_base_addr);
72
73         return 0;
74 }
75
76 static int pm_map_process_aldebaran(struct packet_manager *pm,
77                 uint32_t *buffer, struct qcm_process_device *qpd)
78 {
79         struct pm4_mes_map_process_aldebaran *packet;
80         uint64_t vm_page_table_base_addr = qpd->page_table_base;
81
82         packet = (struct pm4_mes_map_process_aldebaran *)buffer;
83         memset(buffer, 0, sizeof(struct pm4_mes_map_process_aldebaran));
84         packet->header.u32All = pm_build_pm4_header(IT_MAP_PROCESS,
85                         sizeof(struct pm4_mes_map_process_aldebaran));
86         packet->bitfields2.diq_enable = (qpd->is_debug) ? 1 : 0;
87         packet->bitfields2.process_quantum = 10;
88         packet->bitfields2.pasid = qpd->pqm->process->pasid;
89         packet->bitfields14.gds_size = qpd->gds_size & 0x3F;
90         packet->bitfields14.gds_size_hi = (qpd->gds_size >> 6) & 0xF;
91         packet->bitfields14.num_gws = (qpd->mapped_gws_queue) ? qpd->num_gws : 0;
92         packet->bitfields14.num_oac = qpd->num_oac;
93         packet->bitfields14.sdma_enable = 1;
94         packet->bitfields14.num_queues = (qpd->is_debug) ? 0 : qpd->queue_count;
95
96         packet->sh_mem_config = qpd->sh_mem_config;
97         packet->sh_mem_bases = qpd->sh_mem_bases;
98         if (qpd->tba_addr) {
99                 packet->sq_shader_tba_lo = lower_32_bits(qpd->tba_addr >> 8);
100                 packet->sq_shader_tma_lo = lower_32_bits(qpd->tma_addr >> 8);
101                 packet->sq_shader_tma_hi = upper_32_bits(qpd->tma_addr >> 8);
102         }
103
104         packet->gds_addr_lo = lower_32_bits(qpd->gds_context_area);
105         packet->gds_addr_hi = upper_32_bits(qpd->gds_context_area);
106
107         packet->vm_context_page_table_base_addr_lo32 =
108                         lower_32_bits(vm_page_table_base_addr);
109         packet->vm_context_page_table_base_addr_hi32 =
110                         upper_32_bits(vm_page_table_base_addr);
111
112         return 0;
113 }
114
115 static int pm_runlist_v9(struct packet_manager *pm, uint32_t *buffer,
116                         uint64_t ib, size_t ib_size_in_dwords, bool chain)
117 {
118         struct pm4_mes_runlist *packet;
119
120         int concurrent_proc_cnt = 0;
121         struct kfd_dev *kfd = pm->dqm->dev;
122
123         /* Determine the number of processes to map together to HW:
124          * it can not exceed the number of VMIDs available to the
125          * scheduler, and it is determined by the smaller of the number
126          * of processes in the runlist and kfd module parameter
127          * hws_max_conc_proc.
128          * Note: the arbitration between the number of VMIDs and
129          * hws_max_conc_proc has been done in
130          * kgd2kfd_device_init().
131          */
132         concurrent_proc_cnt = min(pm->dqm->processes_count,
133                         kfd->max_proc_per_quantum);
134
135         packet = (struct pm4_mes_runlist *)buffer;
136
137         memset(buffer, 0, sizeof(struct pm4_mes_runlist));
138         packet->header.u32All = pm_build_pm4_header(IT_RUN_LIST,
139                                                 sizeof(struct pm4_mes_runlist));
140
141         packet->bitfields4.ib_size = ib_size_in_dwords;
142         packet->bitfields4.chain = chain ? 1 : 0;
143         packet->bitfields4.offload_polling = 0;
144         packet->bitfields4.chained_runlist_idle_disable = chain ? 1 : 0;
145         packet->bitfields4.valid = 1;
146         packet->bitfields4.process_cnt = concurrent_proc_cnt;
147         packet->ordinal2 = lower_32_bits(ib);
148         packet->ib_base_hi = upper_32_bits(ib);
149
150         return 0;
151 }
152
153 static int pm_set_resources_v9(struct packet_manager *pm, uint32_t *buffer,
154                                 struct scheduling_resources *res)
155 {
156         struct pm4_mes_set_resources *packet;
157
158         packet = (struct pm4_mes_set_resources *)buffer;
159         memset(buffer, 0, sizeof(struct pm4_mes_set_resources));
160
161         packet->header.u32All = pm_build_pm4_header(IT_SET_RESOURCES,
162                                         sizeof(struct pm4_mes_set_resources));
163
164         packet->bitfields2.queue_type =
165                         queue_type__mes_set_resources__hsa_interface_queue_hiq;
166         packet->bitfields2.vmid_mask = res->vmid_mask;
167         packet->bitfields2.unmap_latency = KFD_UNMAP_LATENCY_MS / 100;
168         packet->bitfields7.oac_mask = res->oac_mask;
169         packet->bitfields8.gds_heap_base = res->gds_heap_base;
170         packet->bitfields8.gds_heap_size = res->gds_heap_size;
171
172         packet->gws_mask_lo = lower_32_bits(res->gws_mask);
173         packet->gws_mask_hi = upper_32_bits(res->gws_mask);
174
175         packet->queue_mask_lo = lower_32_bits(res->queue_mask);
176         packet->queue_mask_hi = upper_32_bits(res->queue_mask);
177
178         return 0;
179 }
180
181 static int pm_map_queues_v9(struct packet_manager *pm, uint32_t *buffer,
182                 struct queue *q, bool is_static)
183 {
184         struct pm4_mes_map_queues *packet;
185         bool use_static = is_static;
186
187         packet = (struct pm4_mes_map_queues *)buffer;
188         memset(buffer, 0, sizeof(struct pm4_mes_map_queues));
189
190         packet->header.u32All = pm_build_pm4_header(IT_MAP_QUEUES,
191                                         sizeof(struct pm4_mes_map_queues));
192         packet->bitfields2.num_queues = 1;
193         packet->bitfields2.queue_sel =
194                 queue_sel__mes_map_queues__map_to_hws_determined_queue_slots_vi;
195
196         packet->bitfields2.engine_sel =
197                 engine_sel__mes_map_queues__compute_vi;
198         packet->bitfields2.gws_control_queue = q->gws ? 1 : 0;
199         packet->bitfields2.extended_engine_sel =
200                 extended_engine_sel__mes_map_queues__legacy_engine_sel;
201         packet->bitfields2.queue_type =
202                 queue_type__mes_map_queues__normal_compute_vi;
203
204         switch (q->properties.type) {
205         case KFD_QUEUE_TYPE_COMPUTE:
206                 if (use_static)
207                         packet->bitfields2.queue_type =
208                 queue_type__mes_map_queues__normal_latency_static_queue_vi;
209                 break;
210         case KFD_QUEUE_TYPE_DIQ:
211                 packet->bitfields2.queue_type =
212                         queue_type__mes_map_queues__debug_interface_queue_vi;
213                 break;
214         case KFD_QUEUE_TYPE_SDMA:
215         case KFD_QUEUE_TYPE_SDMA_XGMI:
216                 use_static = false; /* no static queues under SDMA */
217                 if (q->properties.sdma_engine_id < 2)
218                         packet->bitfields2.engine_sel = q->properties.sdma_engine_id +
219                                 engine_sel__mes_map_queues__sdma0_vi;
220                 else {
221                         packet->bitfields2.extended_engine_sel =
222                                 extended_engine_sel__mes_map_queues__sdma0_to_7_sel;
223                         packet->bitfields2.engine_sel = q->properties.sdma_engine_id;
224                 }
225                 break;
226         default:
227                 WARN(1, "queue type %d", q->properties.type);
228                 return -EINVAL;
229         }
230         packet->bitfields3.doorbell_offset =
231                         q->properties.doorbell_off;
232
233         packet->mqd_addr_lo =
234                         lower_32_bits(q->gart_mqd_addr);
235
236         packet->mqd_addr_hi =
237                         upper_32_bits(q->gart_mqd_addr);
238
239         packet->wptr_addr_lo =
240                         lower_32_bits((uint64_t)q->properties.write_ptr);
241
242         packet->wptr_addr_hi =
243                         upper_32_bits((uint64_t)q->properties.write_ptr);
244
245         return 0;
246 }
247
248 static int pm_unmap_queues_v9(struct packet_manager *pm, uint32_t *buffer,
249                         enum kfd_queue_type type,
250                         enum kfd_unmap_queues_filter filter,
251                         uint32_t filter_param, bool reset,
252                         unsigned int sdma_engine)
253 {
254         struct pm4_mes_unmap_queues *packet;
255
256         packet = (struct pm4_mes_unmap_queues *)buffer;
257         memset(buffer, 0, sizeof(struct pm4_mes_unmap_queues));
258
259         packet->header.u32All = pm_build_pm4_header(IT_UNMAP_QUEUES,
260                                         sizeof(struct pm4_mes_unmap_queues));
261         switch (type) {
262         case KFD_QUEUE_TYPE_COMPUTE:
263         case KFD_QUEUE_TYPE_DIQ:
264                 packet->bitfields2.extended_engine_sel =
265                         extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
266                 packet->bitfields2.engine_sel =
267                         engine_sel__mes_unmap_queues__compute;
268                 break;
269         case KFD_QUEUE_TYPE_SDMA:
270         case KFD_QUEUE_TYPE_SDMA_XGMI:
271                 if (sdma_engine < 2) {
272                         packet->bitfields2.extended_engine_sel =
273                                 extended_engine_sel__mes_unmap_queues__legacy_engine_sel;
274                         packet->bitfields2.engine_sel =
275                                 engine_sel__mes_unmap_queues__sdma0 + sdma_engine;
276                 } else {
277                         packet->bitfields2.extended_engine_sel =
278                                 extended_engine_sel__mes_unmap_queues__sdma0_to_7_sel;
279                         packet->bitfields2.engine_sel = sdma_engine;
280                 }
281                 break;
282         default:
283                 WARN(1, "queue type %d", type);
284                 return -EINVAL;
285         }
286
287         if (reset)
288                 packet->bitfields2.action =
289                         action__mes_unmap_queues__reset_queues;
290         else
291                 packet->bitfields2.action =
292                         action__mes_unmap_queues__preempt_queues;
293
294         switch (filter) {
295         case KFD_UNMAP_QUEUES_FILTER_SINGLE_QUEUE:
296                 packet->bitfields2.queue_sel =
297                         queue_sel__mes_unmap_queues__perform_request_on_specified_queues;
298                 packet->bitfields2.num_queues = 1;
299                 packet->bitfields3b.doorbell_offset0 = filter_param;
300                 break;
301         case KFD_UNMAP_QUEUES_FILTER_BY_PASID:
302                 packet->bitfields2.queue_sel =
303                         queue_sel__mes_unmap_queues__perform_request_on_pasid_queues;
304                 packet->bitfields3a.pasid = filter_param;
305                 break;
306         case KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES:
307                 packet->bitfields2.queue_sel =
308                         queue_sel__mes_unmap_queues__unmap_all_queues;
309                 break;
310         case KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES:
311                 /* in this case, we do not preempt static queues */
312                 packet->bitfields2.queue_sel =
313                         queue_sel__mes_unmap_queues__unmap_all_non_static_queues;
314                 break;
315         default:
316                 WARN(1, "filter %d", filter);
317                 return -EINVAL;
318         }
319
320         return 0;
321
322 }
323
324 static int pm_query_status_v9(struct packet_manager *pm, uint32_t *buffer,
325                         uint64_t fence_address, uint64_t fence_value)
326 {
327         struct pm4_mes_query_status *packet;
328
329         packet = (struct pm4_mes_query_status *)buffer;
330         memset(buffer, 0, sizeof(struct pm4_mes_query_status));
331
332
333         packet->header.u32All = pm_build_pm4_header(IT_QUERY_STATUS,
334                                         sizeof(struct pm4_mes_query_status));
335
336         packet->bitfields2.context_id = 0;
337         packet->bitfields2.interrupt_sel =
338                         interrupt_sel__mes_query_status__completion_status;
339         packet->bitfields2.command =
340                         command__mes_query_status__fence_only_after_write_ack;
341
342         packet->addr_hi = upper_32_bits((uint64_t)fence_address);
343         packet->addr_lo = lower_32_bits((uint64_t)fence_address);
344         packet->data_hi = upper_32_bits((uint64_t)fence_value);
345         packet->data_lo = lower_32_bits((uint64_t)fence_value);
346
347         return 0;
348 }
349
350 const struct packet_manager_funcs kfd_v9_pm_funcs = {
351         .map_process            = pm_map_process_v9,
352         .runlist                = pm_runlist_v9,
353         .set_resources          = pm_set_resources_v9,
354         .map_queues             = pm_map_queues_v9,
355         .unmap_queues           = pm_unmap_queues_v9,
356         .query_status           = pm_query_status_v9,
357         .release_mem            = NULL,
358         .map_process_size       = sizeof(struct pm4_mes_map_process),
359         .runlist_size           = sizeof(struct pm4_mes_runlist),
360         .set_resources_size     = sizeof(struct pm4_mes_set_resources),
361         .map_queues_size        = sizeof(struct pm4_mes_map_queues),
362         .unmap_queues_size      = sizeof(struct pm4_mes_unmap_queues),
363         .query_status_size      = sizeof(struct pm4_mes_query_status),
364         .release_mem_size       = 0,
365 };
366
367 const struct packet_manager_funcs kfd_aldebaran_pm_funcs = {
368         .map_process            = pm_map_process_aldebaran,
369         .runlist                = pm_runlist_v9,
370         .set_resources          = pm_set_resources_v9,
371         .map_queues             = pm_map_queues_v9,
372         .unmap_queues           = pm_unmap_queues_v9,
373         .query_status           = pm_query_status_v9,
374         .release_mem            = NULL,
375         .map_process_size       = sizeof(struct pm4_mes_map_process_aldebaran),
376         .runlist_size           = sizeof(struct pm4_mes_runlist),
377         .set_resources_size     = sizeof(struct pm4_mes_set_resources),
378         .map_queues_size        = sizeof(struct pm4_mes_map_queues),
379         .unmap_queues_size      = sizeof(struct pm4_mes_unmap_queues),
380         .query_status_size      = sizeof(struct pm4_mes_query_status),
381         .release_mem_size       = 0,
382 };