2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "cwsr_trap_handler.h"
30 #include "kfd_iommu.h"
31 #include "amdgpu_amdkfd.h"
32 #include "kfd_smi_events.h"
34 #define MQD_SIZE_ALIGNED 768
37 * kfd_locked is used to lock the kfd driver during suspend or reset
38 * once locked, kfd driver will stop any further GPU execution.
39 * create process (open) will return -EAGAIN.
41 static atomic_t kfd_locked = ATOMIC_INIT(0);
43 #ifdef CONFIG_DRM_AMDGPU_CIK
44 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
46 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
47 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
48 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
49 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
50 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
52 static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
53 #ifdef KFD_SUPPORT_IOMMU_V2
54 #ifdef CONFIG_DRM_AMDGPU_CIK
55 [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
57 [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
58 [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
60 #ifdef CONFIG_DRM_AMDGPU_CIK
61 [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
63 [CHIP_TONGA] = &gfx_v8_kfd2kgd,
64 [CHIP_FIJI] = &gfx_v8_kfd2kgd,
65 [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
66 [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
67 [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
68 [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
69 [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
70 [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
71 [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
72 [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
73 [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
74 [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
75 [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
76 [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
77 [CHIP_SIENNA_CICHLID] = &gfx_v10_3_kfd2kgd,
78 [CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd,
81 #ifdef KFD_SUPPORT_IOMMU_V2
82 static const struct kfd_device_info kaveri_device_info = {
83 .asic_family = CHIP_KAVERI,
84 .asic_name = "kaveri",
86 /* max num of queues for KV.TODO should be a dynamic value */
89 .ih_ring_entry_size = 4 * sizeof(uint32_t),
90 .event_interrupt_class = &event_interrupt_class_cik,
91 .num_of_watch_points = 4,
92 .mqd_size_aligned = MQD_SIZE_ALIGNED,
93 .supports_cwsr = false,
94 .needs_iommu_device = true,
95 .needs_pci_atomics = false,
96 .num_sdma_engines = 2,
97 .num_xgmi_sdma_engines = 0,
98 .num_sdma_queues_per_engine = 2,
101 static const struct kfd_device_info carrizo_device_info = {
102 .asic_family = CHIP_CARRIZO,
103 .asic_name = "carrizo",
104 .max_pasid_bits = 16,
105 /* max num of queues for CZ.TODO should be a dynamic value */
108 .ih_ring_entry_size = 4 * sizeof(uint32_t),
109 .event_interrupt_class = &event_interrupt_class_cik,
110 .num_of_watch_points = 4,
111 .mqd_size_aligned = MQD_SIZE_ALIGNED,
112 .supports_cwsr = true,
113 .needs_iommu_device = true,
114 .needs_pci_atomics = false,
115 .num_sdma_engines = 2,
116 .num_xgmi_sdma_engines = 0,
117 .num_sdma_queues_per_engine = 2,
121 static const struct kfd_device_info raven_device_info = {
122 .asic_family = CHIP_RAVEN,
123 .asic_name = "raven",
124 .max_pasid_bits = 16,
127 .ih_ring_entry_size = 8 * sizeof(uint32_t),
128 .event_interrupt_class = &event_interrupt_class_v9,
129 .num_of_watch_points = 4,
130 .mqd_size_aligned = MQD_SIZE_ALIGNED,
131 .supports_cwsr = true,
132 .needs_iommu_device = true,
133 .needs_pci_atomics = true,
134 .num_sdma_engines = 1,
135 .num_xgmi_sdma_engines = 0,
136 .num_sdma_queues_per_engine = 2,
139 static const struct kfd_device_info hawaii_device_info = {
140 .asic_family = CHIP_HAWAII,
141 .asic_name = "hawaii",
142 .max_pasid_bits = 16,
143 /* max num of queues for KV.TODO should be a dynamic value */
146 .ih_ring_entry_size = 4 * sizeof(uint32_t),
147 .event_interrupt_class = &event_interrupt_class_cik,
148 .num_of_watch_points = 4,
149 .mqd_size_aligned = MQD_SIZE_ALIGNED,
150 .supports_cwsr = false,
151 .needs_iommu_device = false,
152 .needs_pci_atomics = false,
153 .num_sdma_engines = 2,
154 .num_xgmi_sdma_engines = 0,
155 .num_sdma_queues_per_engine = 2,
158 static const struct kfd_device_info tonga_device_info = {
159 .asic_family = CHIP_TONGA,
160 .asic_name = "tonga",
161 .max_pasid_bits = 16,
164 .ih_ring_entry_size = 4 * sizeof(uint32_t),
165 .event_interrupt_class = &event_interrupt_class_cik,
166 .num_of_watch_points = 4,
167 .mqd_size_aligned = MQD_SIZE_ALIGNED,
168 .supports_cwsr = false,
169 .needs_iommu_device = false,
170 .needs_pci_atomics = true,
171 .num_sdma_engines = 2,
172 .num_xgmi_sdma_engines = 0,
173 .num_sdma_queues_per_engine = 2,
176 static const struct kfd_device_info fiji_device_info = {
177 .asic_family = CHIP_FIJI,
179 .max_pasid_bits = 16,
182 .ih_ring_entry_size = 4 * sizeof(uint32_t),
183 .event_interrupt_class = &event_interrupt_class_cik,
184 .num_of_watch_points = 4,
185 .mqd_size_aligned = MQD_SIZE_ALIGNED,
186 .supports_cwsr = true,
187 .needs_iommu_device = false,
188 .needs_pci_atomics = true,
189 .num_sdma_engines = 2,
190 .num_xgmi_sdma_engines = 0,
191 .num_sdma_queues_per_engine = 2,
194 static const struct kfd_device_info fiji_vf_device_info = {
195 .asic_family = CHIP_FIJI,
197 .max_pasid_bits = 16,
200 .ih_ring_entry_size = 4 * sizeof(uint32_t),
201 .event_interrupt_class = &event_interrupt_class_cik,
202 .num_of_watch_points = 4,
203 .mqd_size_aligned = MQD_SIZE_ALIGNED,
204 .supports_cwsr = true,
205 .needs_iommu_device = false,
206 .needs_pci_atomics = false,
207 .num_sdma_engines = 2,
208 .num_xgmi_sdma_engines = 0,
209 .num_sdma_queues_per_engine = 2,
213 static const struct kfd_device_info polaris10_device_info = {
214 .asic_family = CHIP_POLARIS10,
215 .asic_name = "polaris10",
216 .max_pasid_bits = 16,
219 .ih_ring_entry_size = 4 * sizeof(uint32_t),
220 .event_interrupt_class = &event_interrupt_class_cik,
221 .num_of_watch_points = 4,
222 .mqd_size_aligned = MQD_SIZE_ALIGNED,
223 .supports_cwsr = true,
224 .needs_iommu_device = false,
225 .needs_pci_atomics = true,
226 .num_sdma_engines = 2,
227 .num_xgmi_sdma_engines = 0,
228 .num_sdma_queues_per_engine = 2,
231 static const struct kfd_device_info polaris10_vf_device_info = {
232 .asic_family = CHIP_POLARIS10,
233 .asic_name = "polaris10",
234 .max_pasid_bits = 16,
237 .ih_ring_entry_size = 4 * sizeof(uint32_t),
238 .event_interrupt_class = &event_interrupt_class_cik,
239 .num_of_watch_points = 4,
240 .mqd_size_aligned = MQD_SIZE_ALIGNED,
241 .supports_cwsr = true,
242 .needs_iommu_device = false,
243 .needs_pci_atomics = false,
244 .num_sdma_engines = 2,
245 .num_xgmi_sdma_engines = 0,
246 .num_sdma_queues_per_engine = 2,
249 static const struct kfd_device_info polaris11_device_info = {
250 .asic_family = CHIP_POLARIS11,
251 .asic_name = "polaris11",
252 .max_pasid_bits = 16,
255 .ih_ring_entry_size = 4 * sizeof(uint32_t),
256 .event_interrupt_class = &event_interrupt_class_cik,
257 .num_of_watch_points = 4,
258 .mqd_size_aligned = MQD_SIZE_ALIGNED,
259 .supports_cwsr = true,
260 .needs_iommu_device = false,
261 .needs_pci_atomics = true,
262 .num_sdma_engines = 2,
263 .num_xgmi_sdma_engines = 0,
264 .num_sdma_queues_per_engine = 2,
267 static const struct kfd_device_info polaris12_device_info = {
268 .asic_family = CHIP_POLARIS12,
269 .asic_name = "polaris12",
270 .max_pasid_bits = 16,
273 .ih_ring_entry_size = 4 * sizeof(uint32_t),
274 .event_interrupt_class = &event_interrupt_class_cik,
275 .num_of_watch_points = 4,
276 .mqd_size_aligned = MQD_SIZE_ALIGNED,
277 .supports_cwsr = true,
278 .needs_iommu_device = false,
279 .needs_pci_atomics = true,
280 .num_sdma_engines = 2,
281 .num_xgmi_sdma_engines = 0,
282 .num_sdma_queues_per_engine = 2,
285 static const struct kfd_device_info vegam_device_info = {
286 .asic_family = CHIP_VEGAM,
287 .asic_name = "vegam",
288 .max_pasid_bits = 16,
291 .ih_ring_entry_size = 4 * sizeof(uint32_t),
292 .event_interrupt_class = &event_interrupt_class_cik,
293 .num_of_watch_points = 4,
294 .mqd_size_aligned = MQD_SIZE_ALIGNED,
295 .supports_cwsr = true,
296 .needs_iommu_device = false,
297 .needs_pci_atomics = true,
298 .num_sdma_engines = 2,
299 .num_xgmi_sdma_engines = 0,
300 .num_sdma_queues_per_engine = 2,
303 static const struct kfd_device_info vega10_device_info = {
304 .asic_family = CHIP_VEGA10,
305 .asic_name = "vega10",
306 .max_pasid_bits = 16,
309 .ih_ring_entry_size = 8 * sizeof(uint32_t),
310 .event_interrupt_class = &event_interrupt_class_v9,
311 .num_of_watch_points = 4,
312 .mqd_size_aligned = MQD_SIZE_ALIGNED,
313 .supports_cwsr = true,
314 .needs_iommu_device = false,
315 .needs_pci_atomics = false,
316 .num_sdma_engines = 2,
317 .num_xgmi_sdma_engines = 0,
318 .num_sdma_queues_per_engine = 2,
321 static const struct kfd_device_info vega10_vf_device_info = {
322 .asic_family = CHIP_VEGA10,
323 .asic_name = "vega10",
324 .max_pasid_bits = 16,
327 .ih_ring_entry_size = 8 * sizeof(uint32_t),
328 .event_interrupt_class = &event_interrupt_class_v9,
329 .num_of_watch_points = 4,
330 .mqd_size_aligned = MQD_SIZE_ALIGNED,
331 .supports_cwsr = true,
332 .needs_iommu_device = false,
333 .needs_pci_atomics = false,
334 .num_sdma_engines = 2,
335 .num_xgmi_sdma_engines = 0,
336 .num_sdma_queues_per_engine = 2,
339 static const struct kfd_device_info vega12_device_info = {
340 .asic_family = CHIP_VEGA12,
341 .asic_name = "vega12",
342 .max_pasid_bits = 16,
345 .ih_ring_entry_size = 8 * sizeof(uint32_t),
346 .event_interrupt_class = &event_interrupt_class_v9,
347 .num_of_watch_points = 4,
348 .mqd_size_aligned = MQD_SIZE_ALIGNED,
349 .supports_cwsr = true,
350 .needs_iommu_device = false,
351 .needs_pci_atomics = false,
352 .num_sdma_engines = 2,
353 .num_xgmi_sdma_engines = 0,
354 .num_sdma_queues_per_engine = 2,
357 static const struct kfd_device_info vega20_device_info = {
358 .asic_family = CHIP_VEGA20,
359 .asic_name = "vega20",
360 .max_pasid_bits = 16,
363 .ih_ring_entry_size = 8 * sizeof(uint32_t),
364 .event_interrupt_class = &event_interrupt_class_v9,
365 .num_of_watch_points = 4,
366 .mqd_size_aligned = MQD_SIZE_ALIGNED,
367 .supports_cwsr = true,
368 .needs_iommu_device = false,
369 .needs_pci_atomics = false,
370 .num_sdma_engines = 2,
371 .num_xgmi_sdma_engines = 0,
372 .num_sdma_queues_per_engine = 8,
375 static const struct kfd_device_info arcturus_device_info = {
376 .asic_family = CHIP_ARCTURUS,
377 .asic_name = "arcturus",
378 .max_pasid_bits = 16,
381 .ih_ring_entry_size = 8 * sizeof(uint32_t),
382 .event_interrupt_class = &event_interrupt_class_v9,
383 .num_of_watch_points = 4,
384 .mqd_size_aligned = MQD_SIZE_ALIGNED,
385 .supports_cwsr = true,
386 .needs_iommu_device = false,
387 .needs_pci_atomics = false,
388 .num_sdma_engines = 2,
389 .num_xgmi_sdma_engines = 6,
390 .num_sdma_queues_per_engine = 8,
393 static const struct kfd_device_info renoir_device_info = {
394 .asic_family = CHIP_RENOIR,
395 .asic_name = "renoir",
396 .max_pasid_bits = 16,
399 .ih_ring_entry_size = 8 * sizeof(uint32_t),
400 .event_interrupt_class = &event_interrupt_class_v9,
401 .num_of_watch_points = 4,
402 .mqd_size_aligned = MQD_SIZE_ALIGNED,
403 .supports_cwsr = true,
404 .needs_iommu_device = false,
405 .needs_pci_atomics = false,
406 .num_sdma_engines = 1,
407 .num_xgmi_sdma_engines = 0,
408 .num_sdma_queues_per_engine = 2,
411 static const struct kfd_device_info navi10_device_info = {
412 .asic_family = CHIP_NAVI10,
413 .asic_name = "navi10",
414 .max_pasid_bits = 16,
417 .ih_ring_entry_size = 8 * sizeof(uint32_t),
418 .event_interrupt_class = &event_interrupt_class_v9,
419 .num_of_watch_points = 4,
420 .mqd_size_aligned = MQD_SIZE_ALIGNED,
421 .needs_iommu_device = false,
422 .supports_cwsr = true,
423 .needs_pci_atomics = false,
424 .num_sdma_engines = 2,
425 .num_xgmi_sdma_engines = 0,
426 .num_sdma_queues_per_engine = 8,
429 static const struct kfd_device_info navi12_device_info = {
430 .asic_family = CHIP_NAVI12,
431 .asic_name = "navi12",
432 .max_pasid_bits = 16,
435 .ih_ring_entry_size = 8 * sizeof(uint32_t),
436 .event_interrupt_class = &event_interrupt_class_v9,
437 .num_of_watch_points = 4,
438 .mqd_size_aligned = MQD_SIZE_ALIGNED,
439 .needs_iommu_device = false,
440 .supports_cwsr = true,
441 .needs_pci_atomics = false,
442 .num_sdma_engines = 2,
443 .num_xgmi_sdma_engines = 0,
444 .num_sdma_queues_per_engine = 8,
447 static const struct kfd_device_info navi14_device_info = {
448 .asic_family = CHIP_NAVI14,
449 .asic_name = "navi14",
450 .max_pasid_bits = 16,
453 .ih_ring_entry_size = 8 * sizeof(uint32_t),
454 .event_interrupt_class = &event_interrupt_class_v9,
455 .num_of_watch_points = 4,
456 .mqd_size_aligned = MQD_SIZE_ALIGNED,
457 .needs_iommu_device = false,
458 .supports_cwsr = true,
459 .needs_pci_atomics = false,
460 .num_sdma_engines = 2,
461 .num_xgmi_sdma_engines = 0,
462 .num_sdma_queues_per_engine = 8,
465 static const struct kfd_device_info sienna_cichlid_device_info = {
466 .asic_family = CHIP_SIENNA_CICHLID,
467 .asic_name = "sienna_cichlid",
468 .max_pasid_bits = 16,
471 .ih_ring_entry_size = 8 * sizeof(uint32_t),
472 .event_interrupt_class = &event_interrupt_class_v9,
473 .num_of_watch_points = 4,
474 .mqd_size_aligned = MQD_SIZE_ALIGNED,
475 .needs_iommu_device = false,
476 .supports_cwsr = true,
477 .needs_pci_atomics = false,
478 .num_sdma_engines = 4,
479 .num_xgmi_sdma_engines = 0,
480 .num_sdma_queues_per_engine = 8,
483 static const struct kfd_device_info navy_flounder_device_info = {
484 .asic_family = CHIP_NAVY_FLOUNDER,
485 .asic_name = "navy_flounder",
486 .max_pasid_bits = 16,
489 .ih_ring_entry_size = 8 * sizeof(uint32_t),
490 .event_interrupt_class = &event_interrupt_class_v9,
491 .num_of_watch_points = 4,
492 .mqd_size_aligned = MQD_SIZE_ALIGNED,
493 .needs_iommu_device = false,
494 .supports_cwsr = true,
495 .needs_pci_atomics = false,
496 .num_sdma_engines = 2,
497 .num_xgmi_sdma_engines = 0,
498 .num_sdma_queues_per_engine = 8,
501 /* For each entry, [0] is regular and [1] is virtualisation device. */
502 static const struct kfd_device_info *kfd_supported_devices[][2] = {
503 #ifdef KFD_SUPPORT_IOMMU_V2
504 [CHIP_KAVERI] = {&kaveri_device_info, NULL},
505 [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
506 [CHIP_RAVEN] = {&raven_device_info, NULL},
508 [CHIP_HAWAII] = {&hawaii_device_info, NULL},
509 [CHIP_TONGA] = {&tonga_device_info, NULL},
510 [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
511 [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
512 [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
513 [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
514 [CHIP_VEGAM] = {&vegam_device_info, NULL},
515 [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
516 [CHIP_VEGA12] = {&vega12_device_info, NULL},
517 [CHIP_VEGA20] = {&vega20_device_info, NULL},
518 [CHIP_RENOIR] = {&renoir_device_info, NULL},
519 [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
520 [CHIP_NAVI10] = {&navi10_device_info, NULL},
521 [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
522 [CHIP_NAVI14] = {&navi14_device_info, NULL},
523 [CHIP_SIENNA_CICHLID] = {&sienna_cichlid_device_info, &sienna_cichlid_device_info},
524 [CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info},
527 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
528 unsigned int chunk_size);
529 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
531 static int kfd_resume(struct kfd_dev *kfd);
533 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
534 struct pci_dev *pdev, unsigned int asic_type, bool vf)
537 const struct kfd_device_info *device_info;
538 const struct kfd2kgd_calls *f2g;
540 if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
541 || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
542 dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
543 return NULL; /* asic_type out of range */
546 device_info = kfd_supported_devices[asic_type][vf];
547 f2g = kfd2kgd_funcs[asic_type];
549 if (!device_info || !f2g) {
550 dev_err(kfd_device, "%s %s not supported in kfd\n",
551 amdgpu_asic_name[asic_type], vf ? "VF" : "");
555 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
559 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
560 * 32 and 64-bit requests are possible and must be
563 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
564 if (device_info->needs_pci_atomics &&
565 !kfd->pci_atomic_requested) {
567 "skipped device %x:%x, PCI rejects atomics\n",
568 pdev->vendor, pdev->device);
574 kfd->device_info = device_info;
576 kfd->init_complete = false;
578 atomic_set(&kfd->compute_profile, 0);
580 mutex_init(&kfd->doorbell_mutex);
581 memset(&kfd->doorbell_available_index, 0,
582 sizeof(kfd->doorbell_available_index));
584 atomic_set(&kfd->sram_ecc_flag, 0);
589 static void kfd_cwsr_init(struct kfd_dev *kfd)
591 if (cwsr_enable && kfd->device_info->supports_cwsr) {
592 if (kfd->device_info->asic_family < CHIP_VEGA10) {
593 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
594 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
595 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
596 } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
597 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
598 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
599 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
600 } else if (kfd->device_info->asic_family < CHIP_NAVI10) {
601 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
602 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
603 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
604 } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) {
605 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
606 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
607 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
609 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
610 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
611 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
614 kfd->cwsr_enabled = true;
618 static int kfd_gws_init(struct kfd_dev *kfd)
622 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
626 || (kfd->device_info->asic_family == CHIP_VEGA10
627 && kfd->mec2_fw_version >= 0x81b3)
628 || (kfd->device_info->asic_family >= CHIP_VEGA12
629 && kfd->device_info->asic_family <= CHIP_RAVEN
630 && kfd->mec2_fw_version >= 0x1b3)
631 || (kfd->device_info->asic_family == CHIP_ARCTURUS
632 && kfd->mec2_fw_version >= 0x30))
633 ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
634 amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
639 static void kfd_smi_init(struct kfd_dev *dev) {
640 INIT_LIST_HEAD(&dev->smi_clients);
641 spin_lock_init(&dev->smi_lock);
644 bool kgd2kfd_device_init(struct kfd_dev *kfd,
645 struct drm_device *ddev,
646 const struct kgd2kfd_shared_resources *gpu_resources)
651 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
653 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
655 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
657 kfd->shared_resources = *gpu_resources;
659 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
660 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
661 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
662 - kfd->vm_info.first_vmid_kfd + 1;
664 /* Verify module parameters regarding mapped process number*/
665 if ((hws_max_conc_proc < 0)
666 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
668 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
669 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
670 kfd->vm_info.vmid_num_kfd);
671 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
673 kfd->max_proc_per_quantum = hws_max_conc_proc;
675 /* calculate max size of mqds needed for queues */
676 size = max_num_of_queues_per_device *
677 kfd->device_info->mqd_size_aligned;
680 * calculate max size of runlist packet.
681 * There can be only 2 packets at once
683 size += (KFD_MAX_NUM_OF_PROCESSES * sizeof(struct pm4_mes_map_process) +
684 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
685 + sizeof(struct pm4_mes_runlist)) * 2;
687 /* Add size of HIQ & DIQ */
688 size += KFD_KERNEL_QUEUE_SIZE * 2;
690 /* add another 512KB for all other allocations on gart (HPD, fences) */
693 if (amdgpu_amdkfd_alloc_gtt_mem(
694 kfd->kgd, size, &kfd->gtt_mem,
695 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
697 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
698 goto alloc_gtt_mem_failure;
701 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
703 /* Initialize GTT sa with 512 byte chunk size */
704 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
705 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
706 goto kfd_gtt_sa_init_error;
709 if (kfd_doorbell_init(kfd)) {
711 "Error initializing doorbell aperture\n");
712 goto kfd_doorbell_error;
715 if (kfd->kfd2kgd->get_hive_id)
716 kfd->hive_id = kfd->kfd2kgd->get_hive_id(kfd->kgd);
718 kfd->unique_id = amdgpu_amdkfd_get_unique_id(kfd->kgd);
720 if (kfd_interrupt_init(kfd)) {
721 dev_err(kfd_device, "Error initializing interrupts\n");
722 goto kfd_interrupt_error;
725 kfd->dqm = device_queue_manager_init(kfd);
727 dev_err(kfd_device, "Error initializing queue manager\n");
728 goto device_queue_manager_error;
731 /* If supported on this device, allocate global GWS that is shared
732 * by all KFD processes
734 if (kfd_gws_init(kfd)) {
735 dev_err(kfd_device, "Could not allocate %d gws\n",
736 amdgpu_amdkfd_get_num_gws(kfd->kgd));
740 /* If CRAT is broken, won't set iommu enabled */
741 kfd_double_confirm_iommu_support(kfd);
743 if (kfd_iommu_device_init(kfd)) {
744 dev_err(kfd_device, "Error initializing iommuv2\n");
745 goto device_iommu_error;
751 goto kfd_resume_error;
755 if (kfd_topology_add_device(kfd)) {
756 dev_err(kfd_device, "Error adding device to topology\n");
757 goto kfd_topology_add_device_error;
762 kfd->init_complete = true;
763 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
766 pr_debug("Starting kfd with the following scheduling policy %d\n",
767 kfd->dqm->sched_policy);
771 kfd_topology_add_device_error:
775 device_queue_manager_uninit(kfd->dqm);
776 device_queue_manager_error:
777 kfd_interrupt_exit(kfd);
779 kfd_doorbell_fini(kfd);
781 kfd_gtt_sa_fini(kfd);
782 kfd_gtt_sa_init_error:
783 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
784 alloc_gtt_mem_failure:
786 amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
788 "device %x:%x NOT added due to errors\n",
789 kfd->pdev->vendor, kfd->pdev->device);
791 return kfd->init_complete;
794 void kgd2kfd_device_exit(struct kfd_dev *kfd)
796 if (kfd->init_complete) {
797 kgd2kfd_suspend(kfd, false);
798 device_queue_manager_uninit(kfd->dqm);
799 kfd_interrupt_exit(kfd);
800 kfd_topology_remove_device(kfd);
801 kfd_doorbell_fini(kfd);
802 kfd_gtt_sa_fini(kfd);
803 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
805 amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
811 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
813 if (!kfd->init_complete)
816 kfd->dqm->ops.pre_reset(kfd->dqm);
818 kgd2kfd_suspend(kfd, false);
820 kfd_signal_reset_event(kfd);
825 * Fix me. KFD won't be able to resume existing process for now.
826 * We will keep all existing process in a evicted state and
827 * wait the process to be terminated.
830 int kgd2kfd_post_reset(struct kfd_dev *kfd)
834 if (!kfd->init_complete)
837 ret = kfd_resume(kfd);
840 atomic_dec(&kfd_locked);
842 atomic_set(&kfd->sram_ecc_flag, 0);
847 bool kfd_is_locked(void)
849 return (atomic_read(&kfd_locked) > 0);
852 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
854 if (!kfd->init_complete)
857 /* for runtime suspend, skip locking kfd */
859 /* For first KFD device suspend all the KFD processes */
860 if (atomic_inc_return(&kfd_locked) == 1)
861 kfd_suspend_all_processes();
864 kfd->dqm->ops.stop(kfd->dqm);
865 kfd_iommu_suspend(kfd);
868 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
872 if (!kfd->init_complete)
875 ret = kfd_resume(kfd);
879 /* for runtime resume, skip unlocking kfd */
881 count = atomic_dec_return(&kfd_locked);
882 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
884 ret = kfd_resume_all_processes();
890 static int kfd_resume(struct kfd_dev *kfd)
894 err = kfd_iommu_resume(kfd);
897 "Failed to resume IOMMU for device %x:%x\n",
898 kfd->pdev->vendor, kfd->pdev->device);
902 err = kfd->dqm->ops.start(kfd->dqm);
905 "Error starting queue manager for device %x:%x\n",
906 kfd->pdev->vendor, kfd->pdev->device);
907 goto dqm_start_error;
913 kfd_iommu_suspend(kfd);
917 static inline void kfd_queue_work(struct workqueue_struct *wq,
918 struct work_struct *work)
922 cpu = new_cpu = smp_processor_id();
924 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
925 if (cpu_to_node(new_cpu) == numa_node_id())
927 } while (cpu != new_cpu);
929 queue_work_on(new_cpu, wq, work);
932 /* This is called directly from KGD at ISR. */
933 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
935 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
936 bool is_patched = false;
939 if (!kfd->init_complete)
942 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
943 dev_err_once(kfd_device, "Ring entry too small\n");
947 spin_lock_irqsave(&kfd->interrupt_lock, flags);
949 if (kfd->interrupts_active
950 && interrupt_is_wanted(kfd, ih_ring_entry,
951 patched_ihre, &is_patched)
952 && enqueue_ih_ring_entry(kfd,
953 is_patched ? patched_ihre : ih_ring_entry))
954 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
956 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
959 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
961 struct kfd_process *p;
964 /* Because we are called from arbitrary context (workqueue) as opposed
965 * to process context, kfd_process could attempt to exit while we are
966 * running so the lookup function increments the process ref count.
968 p = kfd_lookup_process_by_mm(mm);
972 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
973 r = kfd_process_evict_queues(p);
975 kfd_unref_process(p);
979 int kgd2kfd_resume_mm(struct mm_struct *mm)
981 struct kfd_process *p;
984 /* Because we are called from arbitrary context (workqueue) as opposed
985 * to process context, kfd_process could attempt to exit while we are
986 * running so the lookup function increments the process ref count.
988 p = kfd_lookup_process_by_mm(mm);
992 r = kfd_process_restore_queues(p);
994 kfd_unref_process(p);
998 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
999 * prepare for safe eviction of KFD BOs that belong to the specified
1002 * @mm: mm_struct that identifies the specified KFD process
1003 * @fence: eviction fence attached to KFD process BOs
1006 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1007 struct dma_fence *fence)
1009 struct kfd_process *p;
1010 unsigned long active_time;
1011 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1016 if (dma_fence_is_signaled(fence))
1019 p = kfd_lookup_process_by_mm(mm);
1023 if (fence->seqno == p->last_eviction_seqno)
1026 p->last_eviction_seqno = fence->seqno;
1028 /* Avoid KFD process starvation. Wait for at least
1029 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1031 active_time = get_jiffies_64() - p->last_restore_timestamp;
1032 if (delay_jiffies > active_time)
1033 delay_jiffies -= active_time;
1037 /* During process initialization eviction_work.dwork is initialized
1038 * to kfd_evict_bo_worker
1040 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1041 p->lead_thread->pid, delay_jiffies);
1042 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1044 kfd_unref_process(p);
1048 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1049 unsigned int chunk_size)
1051 unsigned int num_of_longs;
1053 if (WARN_ON(buf_size < chunk_size))
1055 if (WARN_ON(buf_size == 0))
1057 if (WARN_ON(chunk_size == 0))
1060 kfd->gtt_sa_chunk_size = chunk_size;
1061 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1063 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
1066 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
1068 if (!kfd->gtt_sa_bitmap)
1071 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1072 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1074 mutex_init(&kfd->gtt_sa_lock);
1080 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1082 mutex_destroy(&kfd->gtt_sa_lock);
1083 kfree(kfd->gtt_sa_bitmap);
1086 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1087 unsigned int bit_num,
1088 unsigned int chunk_size)
1090 return start_addr + bit_num * chunk_size;
1093 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1094 unsigned int bit_num,
1095 unsigned int chunk_size)
1097 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1100 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
1101 struct kfd_mem_obj **mem_obj)
1103 unsigned int found, start_search, cur_size;
1108 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1111 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1115 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1119 mutex_lock(&kfd->gtt_sa_lock);
1121 kfd_gtt_restart_search:
1122 /* Find the first chunk that is free */
1123 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1124 kfd->gtt_sa_num_of_chunks,
1127 pr_debug("Found = %d\n", found);
1129 /* If there wasn't any free chunk, bail out */
1130 if (found == kfd->gtt_sa_num_of_chunks)
1131 goto kfd_gtt_no_free_chunk;
1133 /* Update fields of mem_obj */
1134 (*mem_obj)->range_start = found;
1135 (*mem_obj)->range_end = found;
1136 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1137 kfd->gtt_start_gpu_addr,
1139 kfd->gtt_sa_chunk_size);
1140 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1141 kfd->gtt_start_cpu_ptr,
1143 kfd->gtt_sa_chunk_size);
1145 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1146 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1148 /* If we need only one chunk, mark it as allocated and get out */
1149 if (size <= kfd->gtt_sa_chunk_size) {
1150 pr_debug("Single bit\n");
1151 set_bit(found, kfd->gtt_sa_bitmap);
1155 /* Otherwise, try to see if we have enough contiguous chunks */
1156 cur_size = size - kfd->gtt_sa_chunk_size;
1158 (*mem_obj)->range_end =
1159 find_next_zero_bit(kfd->gtt_sa_bitmap,
1160 kfd->gtt_sa_num_of_chunks, ++found);
1162 * If next free chunk is not contiguous than we need to
1163 * restart our search from the last free chunk we found (which
1164 * wasn't contiguous to the previous ones
1166 if ((*mem_obj)->range_end != found) {
1167 start_search = found;
1168 goto kfd_gtt_restart_search;
1172 * If we reached end of buffer, bail out with error
1174 if (found == kfd->gtt_sa_num_of_chunks)
1175 goto kfd_gtt_no_free_chunk;
1177 /* Check if we don't need another chunk */
1178 if (cur_size <= kfd->gtt_sa_chunk_size)
1181 cur_size -= kfd->gtt_sa_chunk_size;
1183 } while (cur_size > 0);
1185 pr_debug("range_start = %d, range_end = %d\n",
1186 (*mem_obj)->range_start, (*mem_obj)->range_end);
1188 /* Mark the chunks as allocated */
1189 for (found = (*mem_obj)->range_start;
1190 found <= (*mem_obj)->range_end;
1192 set_bit(found, kfd->gtt_sa_bitmap);
1195 mutex_unlock(&kfd->gtt_sa_lock);
1198 kfd_gtt_no_free_chunk:
1199 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1200 mutex_unlock(&kfd->gtt_sa_lock);
1205 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1209 /* Act like kfree when trying to free a NULL object */
1213 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1214 mem_obj, mem_obj->range_start, mem_obj->range_end);
1216 mutex_lock(&kfd->gtt_sa_lock);
1218 /* Mark the chunks as free */
1219 for (bit = mem_obj->range_start;
1220 bit <= mem_obj->range_end;
1222 clear_bit(bit, kfd->gtt_sa_bitmap);
1224 mutex_unlock(&kfd->gtt_sa_lock);
1230 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1233 atomic_inc(&kfd->sram_ecc_flag);
1236 void kfd_inc_compute_active(struct kfd_dev *kfd)
1238 if (atomic_inc_return(&kfd->compute_profile) == 1)
1239 amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
1242 void kfd_dec_compute_active(struct kfd_dev *kfd)
1244 int count = atomic_dec_return(&kfd->compute_profile);
1247 amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
1248 WARN_ONCE(count < 0, "Compute profile ref. count error");
1251 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint32_t throttle_bitmask)
1254 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
1257 #if defined(CONFIG_DEBUG_FS)
1259 /* This function will send a package to HIQ to hang the HWS
1260 * which will trigger a GPU reset and bring the HWS back to normal state
1262 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1266 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1267 pr_err("HWS is not enabled");
1271 r = pm_debugfs_hang_hws(&dev->dqm->packets);
1273 r = dqm_debugfs_execute_queues(dev->dqm);