1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "kfd_iommu.h"
33 #include "amdgpu_amdkfd.h"
34 #include "kfd_smi_events.h"
35 #include "kfd_migrate.h"
38 #define MQD_SIZE_ALIGNED 768
41 * kfd_locked is used to lock the kfd driver during suspend or reset
42 * once locked, kfd driver will stop any further GPU execution.
43 * create process (open) will return -EAGAIN.
45 static atomic_t kfd_locked = ATOMIC_INIT(0);
47 #ifdef CONFIG_DRM_AMDGPU_CIK
48 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
50 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
51 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
52 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
53 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
54 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
55 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
56 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
58 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
59 unsigned int chunk_size);
60 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
62 static int kfd_resume(struct kfd_dev *kfd);
64 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
66 uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
68 switch (sdma_version) {
69 case IP_VERSION(4, 0, 0):/* VEGA10 */
70 case IP_VERSION(4, 0, 1):/* VEGA12 */
71 case IP_VERSION(4, 1, 0):/* RAVEN */
72 case IP_VERSION(4, 1, 1):/* RAVEN */
73 case IP_VERSION(4, 1, 2):/* RENOIR */
74 case IP_VERSION(5, 2, 1):/* VANGOGH */
75 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
76 case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
77 case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
78 kfd->device_info.num_sdma_queues_per_engine = 2;
80 case IP_VERSION(4, 2, 0):/* VEGA20 */
81 case IP_VERSION(4, 2, 2):/* ARCTURUS */
82 case IP_VERSION(4, 4, 0):/* ALDEBARAN */
83 case IP_VERSION(5, 0, 0):/* NAVI10 */
84 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
85 case IP_VERSION(5, 0, 2):/* NAVI14 */
86 case IP_VERSION(5, 0, 5):/* NAVI12 */
87 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
88 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
89 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
90 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
91 case IP_VERSION(6, 0, 0):
92 case IP_VERSION(6, 0, 1):
93 case IP_VERSION(6, 0, 2):
94 case IP_VERSION(6, 0, 3):
95 kfd->device_info.num_sdma_queues_per_engine = 8;
99 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
101 kfd->device_info.num_sdma_queues_per_engine = 8;
104 switch (sdma_version) {
105 case IP_VERSION(6, 0, 0):
106 case IP_VERSION(6, 0, 2):
107 case IP_VERSION(6, 0, 3):
108 /* Reserve 1 for paging and 1 for gfx */
109 kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
110 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
111 kfd->device_info.reserved_sdma_queues_bitmap = 0xFULL;
113 case IP_VERSION(6, 0, 1):
114 /* Reserve 1 for paging and 1 for gfx */
115 kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
116 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-0 queue-1; ... */
117 kfd->device_info.reserved_sdma_queues_bitmap = 0x3ULL;
124 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
126 uint32_t gc_version = KFD_GC_VERSION(kfd);
128 switch (gc_version) {
129 case IP_VERSION(9, 0, 1): /* VEGA10 */
130 case IP_VERSION(9, 1, 0): /* RAVEN */
131 case IP_VERSION(9, 2, 1): /* VEGA12 */
132 case IP_VERSION(9, 2, 2): /* RAVEN */
133 case IP_VERSION(9, 3, 0): /* RENOIR */
134 case IP_VERSION(9, 4, 0): /* VEGA20 */
135 case IP_VERSION(9, 4, 1): /* ARCTURUS */
136 case IP_VERSION(9, 4, 2): /* ALDEBARAN */
137 case IP_VERSION(10, 3, 1): /* VANGOGH */
138 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
139 case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
140 case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
141 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
142 case IP_VERSION(10, 1, 4):
143 case IP_VERSION(10, 1, 10): /* NAVI10 */
144 case IP_VERSION(10, 1, 2): /* NAVI12 */
145 case IP_VERSION(10, 1, 1): /* NAVI14 */
146 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
147 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
148 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
149 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
150 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
152 case IP_VERSION(11, 0, 0):
153 case IP_VERSION(11, 0, 1):
154 case IP_VERSION(11, 0, 2):
155 case IP_VERSION(11, 0, 3):
156 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
159 dev_warn(kfd_device, "v9 event interrupt handler is set due to "
160 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
161 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
165 static void kfd_device_info_init(struct kfd_dev *kfd,
166 bool vf, uint32_t gfx_target_version)
168 uint32_t gc_version = KFD_GC_VERSION(kfd);
169 uint32_t asic_type = kfd->adev->asic_type;
171 kfd->device_info.max_pasid_bits = 16;
172 kfd->device_info.max_no_of_hqd = 24;
173 kfd->device_info.num_of_watch_points = 4;
174 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
175 kfd->device_info.gfx_target_version = gfx_target_version;
177 if (KFD_IS_SOC15(kfd)) {
178 kfd->device_info.doorbell_size = 8;
179 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
180 kfd->device_info.supports_cwsr = true;
182 kfd_device_info_set_sdma_info(kfd);
184 kfd_device_info_set_event_interrupt_class(kfd);
187 if (gc_version == IP_VERSION(9, 1, 0) ||
188 gc_version == IP_VERSION(9, 2, 2))
189 kfd->device_info.needs_iommu_device = true;
191 if (gc_version < IP_VERSION(11, 0, 0)) {
192 /* Navi2x+, Navi1x+ */
193 if (gc_version == IP_VERSION(10, 3, 6))
194 kfd->device_info.no_atomic_fw_version = 14;
195 else if (gc_version == IP_VERSION(10, 3, 7))
196 kfd->device_info.no_atomic_fw_version = 3;
197 else if (gc_version >= IP_VERSION(10, 3, 0))
198 kfd->device_info.no_atomic_fw_version = 92;
199 else if (gc_version >= IP_VERSION(10, 1, 1))
200 kfd->device_info.no_atomic_fw_version = 145;
203 if (gc_version >= IP_VERSION(10, 1, 1))
204 kfd->device_info.needs_pci_atomics = true;
207 kfd->device_info.doorbell_size = 4;
208 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
209 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
210 kfd->device_info.num_sdma_queues_per_engine = 2;
212 if (asic_type != CHIP_KAVERI &&
213 asic_type != CHIP_HAWAII &&
214 asic_type != CHIP_TONGA)
215 kfd->device_info.supports_cwsr = true;
217 if (asic_type == CHIP_KAVERI ||
218 asic_type == CHIP_CARRIZO)
219 kfd->device_info.needs_iommu_device = true;
221 if (asic_type != CHIP_HAWAII && !vf)
222 kfd->device_info.needs_pci_atomics = true;
226 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
228 struct kfd_dev *kfd = NULL;
229 const struct kfd2kgd_calls *f2g = NULL;
230 uint32_t gfx_target_version = 0;
232 switch (adev->asic_type) {
233 #ifdef KFD_SUPPORT_IOMMU_V2
234 #ifdef CONFIG_DRM_AMDGPU_CIK
236 gfx_target_version = 70000;
238 f2g = &gfx_v7_kfd2kgd;
242 gfx_target_version = 80001;
244 f2g = &gfx_v8_kfd2kgd;
247 #ifdef CONFIG_DRM_AMDGPU_CIK
249 gfx_target_version = 70001;
250 if (!amdgpu_exp_hw_support)
252 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
255 f2g = &gfx_v7_kfd2kgd;
259 gfx_target_version = 80002;
261 f2g = &gfx_v8_kfd2kgd;
264 gfx_target_version = 80003;
265 f2g = &gfx_v8_kfd2kgd;
268 gfx_target_version = 80003;
269 f2g = &gfx_v8_kfd2kgd;
272 gfx_target_version = 80003;
274 f2g = &gfx_v8_kfd2kgd;
277 gfx_target_version = 80003;
279 f2g = &gfx_v8_kfd2kgd;
282 gfx_target_version = 80003;
284 f2g = &gfx_v8_kfd2kgd;
287 switch (adev->ip_versions[GC_HWIP][0]) {
289 case IP_VERSION(9, 0, 1):
290 gfx_target_version = 90000;
291 f2g = &gfx_v9_kfd2kgd;
293 #ifdef KFD_SUPPORT_IOMMU_V2
295 case IP_VERSION(9, 1, 0):
296 case IP_VERSION(9, 2, 2):
297 gfx_target_version = 90002;
299 f2g = &gfx_v9_kfd2kgd;
303 case IP_VERSION(9, 2, 1):
304 gfx_target_version = 90004;
306 f2g = &gfx_v9_kfd2kgd;
309 case IP_VERSION(9, 3, 0):
310 gfx_target_version = 90012;
312 f2g = &gfx_v9_kfd2kgd;
315 case IP_VERSION(9, 4, 0):
316 gfx_target_version = 90006;
318 f2g = &gfx_v9_kfd2kgd;
321 case IP_VERSION(9, 4, 1):
322 gfx_target_version = 90008;
323 f2g = &arcturus_kfd2kgd;
326 case IP_VERSION(9, 4, 2):
327 gfx_target_version = 90010;
328 f2g = &aldebaran_kfd2kgd;
331 case IP_VERSION(10, 1, 10):
332 gfx_target_version = 100100;
334 f2g = &gfx_v10_kfd2kgd;
337 case IP_VERSION(10, 1, 2):
338 gfx_target_version = 100101;
339 f2g = &gfx_v10_kfd2kgd;
342 case IP_VERSION(10, 1, 1):
343 gfx_target_version = 100102;
345 f2g = &gfx_v10_kfd2kgd;
348 case IP_VERSION(10, 1, 3):
349 case IP_VERSION(10, 1, 4):
350 gfx_target_version = 100103;
352 f2g = &gfx_v10_kfd2kgd;
355 case IP_VERSION(10, 3, 0):
356 gfx_target_version = 100300;
357 f2g = &gfx_v10_3_kfd2kgd;
360 case IP_VERSION(10, 3, 2):
361 gfx_target_version = 100301;
362 f2g = &gfx_v10_3_kfd2kgd;
365 case IP_VERSION(10, 3, 1):
366 gfx_target_version = 100303;
368 f2g = &gfx_v10_3_kfd2kgd;
370 /* Dimgrey Cavefish */
371 case IP_VERSION(10, 3, 4):
372 gfx_target_version = 100302;
373 f2g = &gfx_v10_3_kfd2kgd;
376 case IP_VERSION(10, 3, 5):
377 gfx_target_version = 100304;
378 f2g = &gfx_v10_3_kfd2kgd;
381 case IP_VERSION(10, 3, 3):
382 gfx_target_version = 100305;
384 f2g = &gfx_v10_3_kfd2kgd;
386 case IP_VERSION(10, 3, 6):
387 case IP_VERSION(10, 3, 7):
388 gfx_target_version = 100306;
390 f2g = &gfx_v10_3_kfd2kgd;
392 case IP_VERSION(11, 0, 0):
393 gfx_target_version = 110000;
394 f2g = &gfx_v11_kfd2kgd;
396 case IP_VERSION(11, 0, 1):
397 gfx_target_version = 110003;
398 f2g = &gfx_v11_kfd2kgd;
400 case IP_VERSION(11, 0, 2):
401 gfx_target_version = 110002;
402 f2g = &gfx_v11_kfd2kgd;
404 case IP_VERSION(11, 0, 3):
405 /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
406 gfx_target_version = 110001;
407 f2g = &gfx_v11_kfd2kgd;
416 if (adev->ip_versions[GC_HWIP][0])
417 dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
418 adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
420 dev_err(kfd_device, "%s %s not supported in kfd\n",
421 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
425 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
430 kfd_device_info_init(kfd, vf, gfx_target_version);
431 kfd->init_complete = false;
433 atomic_set(&kfd->compute_profile, 0);
435 mutex_init(&kfd->doorbell_mutex);
436 memset(&kfd->doorbell_available_index, 0,
437 sizeof(kfd->doorbell_available_index));
439 atomic_set(&kfd->sram_ecc_flag, 0);
441 ida_init(&kfd->doorbell_ida);
446 static void kfd_cwsr_init(struct kfd_dev *kfd)
448 if (cwsr_enable && kfd->device_info.supports_cwsr) {
449 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
450 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
451 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
452 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
453 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
454 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
455 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
456 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
457 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
458 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
459 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
460 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
461 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
462 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
463 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
464 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
465 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
466 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
467 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
468 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
469 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
470 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
471 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
472 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
474 BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
475 kfd->cwsr_isa = cwsr_trap_gfx11_hex;
476 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
479 kfd->cwsr_enabled = true;
483 static int kfd_gws_init(struct kfd_dev *kfd)
487 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
490 if (hws_gws_support || (KFD_IS_SOC15(kfd) &&
491 ((KFD_GC_VERSION(kfd) == IP_VERSION(9, 0, 1)
492 && kfd->mec2_fw_version >= 0x81b3) ||
493 (KFD_GC_VERSION(kfd) <= IP_VERSION(9, 4, 0)
494 && kfd->mec2_fw_version >= 0x1b3) ||
495 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)
496 && kfd->mec2_fw_version >= 0x30) ||
497 (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)
498 && kfd->mec2_fw_version >= 0x28) ||
499 (KFD_GC_VERSION(kfd) >= IP_VERSION(10, 3, 0)
500 && KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)
501 && kfd->mec2_fw_version >= 0x6b))))
502 ret = amdgpu_amdkfd_alloc_gws(kfd->adev,
503 kfd->adev->gds.gws_size, &kfd->gws);
508 static void kfd_smi_init(struct kfd_dev *dev)
510 INIT_LIST_HEAD(&dev->smi_clients);
511 spin_lock_init(&dev->smi_lock);
514 bool kgd2kfd_device_init(struct kfd_dev *kfd,
515 const struct kgd2kfd_shared_resources *gpu_resources)
517 unsigned int size, map_process_packet_size;
519 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
521 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
523 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
525 kfd->shared_resources = *gpu_resources;
527 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
528 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
529 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
530 - kfd->vm_info.first_vmid_kfd + 1;
532 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
533 * 32 and 64-bit requests are possible and must be
536 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
537 if (!kfd->pci_atomic_requested &&
538 kfd->device_info.needs_pci_atomics &&
539 (!kfd->device_info.no_atomic_fw_version ||
540 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
542 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
543 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
545 kfd->device_info.no_atomic_fw_version);
549 /* Verify module parameters regarding mapped process number*/
550 if (hws_max_conc_proc >= 0)
551 kfd->max_proc_per_quantum = min((u32)hws_max_conc_proc, kfd->vm_info.vmid_num_kfd);
553 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
555 /* calculate max size of mqds needed for queues */
556 size = max_num_of_queues_per_device *
557 kfd->device_info.mqd_size_aligned;
560 * calculate max size of runlist packet.
561 * There can be only 2 packets at once
563 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
564 sizeof(struct pm4_mes_map_process_aldebaran) :
565 sizeof(struct pm4_mes_map_process);
566 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
567 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
568 + sizeof(struct pm4_mes_runlist)) * 2;
570 /* Add size of HIQ & DIQ */
571 size += KFD_KERNEL_QUEUE_SIZE * 2;
573 /* add another 512KB for all other allocations on gart (HPD, fences) */
576 if (amdgpu_amdkfd_alloc_gtt_mem(
577 kfd->adev, size, &kfd->gtt_mem,
578 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
580 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
581 goto alloc_gtt_mem_failure;
584 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
586 /* Initialize GTT sa with 512 byte chunk size */
587 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
588 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
589 goto kfd_gtt_sa_init_error;
592 if (kfd_doorbell_init(kfd)) {
594 "Error initializing doorbell aperture\n");
595 goto kfd_doorbell_error;
598 if (amdgpu_use_xgmi_p2p)
599 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
601 kfd->noretry = kfd->adev->gmc.noretry;
603 if (kfd_interrupt_init(kfd)) {
604 dev_err(kfd_device, "Error initializing interrupts\n");
605 goto kfd_interrupt_error;
608 kfd->dqm = device_queue_manager_init(kfd);
610 dev_err(kfd_device, "Error initializing queue manager\n");
611 goto device_queue_manager_error;
614 /* If supported on this device, allocate global GWS that is shared
615 * by all KFD processes
617 if (kfd_gws_init(kfd)) {
618 dev_err(kfd_device, "Could not allocate %d gws\n",
619 kfd->adev->gds.gws_size);
623 /* If CRAT is broken, won't set iommu enabled */
624 kfd_double_confirm_iommu_support(kfd);
626 if (kfd_iommu_device_init(kfd)) {
627 kfd->use_iommu_v2 = false;
628 dev_err(kfd_device, "Error initializing iommuv2\n");
629 goto device_iommu_error;
634 svm_migrate_init(kfd->adev);
636 if (kgd2kfd_resume_iommu(kfd))
637 goto device_iommu_error;
640 goto kfd_resume_error;
642 amdgpu_amdkfd_get_local_mem_info(kfd->adev, &kfd->local_mem_info);
644 if (kfd_topology_add_device(kfd)) {
645 dev_err(kfd_device, "Error adding device to topology\n");
646 goto kfd_topology_add_device_error;
651 kfd->init_complete = true;
652 dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
653 kfd->adev->pdev->device);
655 pr_debug("Starting kfd with the following scheduling policy %d\n",
656 kfd->dqm->sched_policy);
660 kfd_topology_add_device_error:
664 device_queue_manager_uninit(kfd->dqm);
665 device_queue_manager_error:
666 kfd_interrupt_exit(kfd);
668 kfd_doorbell_fini(kfd);
670 kfd_gtt_sa_fini(kfd);
671 kfd_gtt_sa_init_error:
672 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
673 alloc_gtt_mem_failure:
675 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
677 "device %x:%x NOT added due to errors\n",
678 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
680 return kfd->init_complete;
683 void kgd2kfd_device_exit(struct kfd_dev *kfd)
685 if (kfd->init_complete) {
686 device_queue_manager_uninit(kfd->dqm);
687 kfd_interrupt_exit(kfd);
688 kfd_topology_remove_device(kfd);
689 kfd_doorbell_fini(kfd);
690 ida_destroy(&kfd->doorbell_ida);
691 kfd_gtt_sa_fini(kfd);
692 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
694 amdgpu_amdkfd_free_gws(kfd->adev, kfd->gws);
700 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
702 if (!kfd->init_complete)
705 kfd_smi_event_update_gpu_reset(kfd, false);
707 kfd->dqm->ops.pre_reset(kfd->dqm);
709 kgd2kfd_suspend(kfd, false);
711 kfd_signal_reset_event(kfd);
716 * Fix me. KFD won't be able to resume existing process for now.
717 * We will keep all existing process in a evicted state and
718 * wait the process to be terminated.
721 int kgd2kfd_post_reset(struct kfd_dev *kfd)
725 if (!kfd->init_complete)
728 ret = kfd_resume(kfd);
731 atomic_dec(&kfd_locked);
733 atomic_set(&kfd->sram_ecc_flag, 0);
735 kfd_smi_event_update_gpu_reset(kfd, true);
740 bool kfd_is_locked(void)
742 return (atomic_read(&kfd_locked) > 0);
745 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
747 if (!kfd->init_complete)
750 /* for runtime suspend, skip locking kfd */
752 /* For first KFD device suspend all the KFD processes */
753 if (atomic_inc_return(&kfd_locked) == 1)
754 kfd_suspend_all_processes();
757 kfd->dqm->ops.stop(kfd->dqm);
758 kfd_iommu_suspend(kfd);
761 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
765 if (!kfd->init_complete)
768 ret = kfd_resume(kfd);
772 /* for runtime resume, skip unlocking kfd */
774 count = atomic_dec_return(&kfd_locked);
775 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
777 ret = kfd_resume_all_processes();
783 int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
787 err = kfd_iommu_resume(kfd);
790 "Failed to resume IOMMU for device %x:%x\n",
791 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
795 static int kfd_resume(struct kfd_dev *kfd)
799 err = kfd->dqm->ops.start(kfd->dqm);
802 "Error starting queue manager for device %x:%x\n",
803 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
808 static inline void kfd_queue_work(struct workqueue_struct *wq,
809 struct work_struct *work)
813 cpu = new_cpu = smp_processor_id();
815 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
816 if (cpu_to_node(new_cpu) == numa_node_id())
818 } while (cpu != new_cpu);
820 queue_work_on(new_cpu, wq, work);
823 /* This is called directly from KGD at ISR. */
824 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
826 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
827 bool is_patched = false;
830 if (!kfd->init_complete)
833 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
834 dev_err_once(kfd_device, "Ring entry too small\n");
838 spin_lock_irqsave(&kfd->interrupt_lock, flags);
840 if (kfd->interrupts_active
841 && interrupt_is_wanted(kfd, ih_ring_entry,
842 patched_ihre, &is_patched)
843 && enqueue_ih_ring_entry(kfd,
844 is_patched ? patched_ihre : ih_ring_entry))
845 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
847 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
850 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
852 struct kfd_process *p;
855 /* Because we are called from arbitrary context (workqueue) as opposed
856 * to process context, kfd_process could attempt to exit while we are
857 * running so the lookup function increments the process ref count.
859 p = kfd_lookup_process_by_mm(mm);
863 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
864 r = kfd_process_evict_queues(p, trigger);
866 kfd_unref_process(p);
870 int kgd2kfd_resume_mm(struct mm_struct *mm)
872 struct kfd_process *p;
875 /* Because we are called from arbitrary context (workqueue) as opposed
876 * to process context, kfd_process could attempt to exit while we are
877 * running so the lookup function increments the process ref count.
879 p = kfd_lookup_process_by_mm(mm);
883 r = kfd_process_restore_queues(p);
885 kfd_unref_process(p);
889 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
890 * prepare for safe eviction of KFD BOs that belong to the specified
893 * @mm: mm_struct that identifies the specified KFD process
894 * @fence: eviction fence attached to KFD process BOs
897 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
898 struct dma_fence *fence)
900 struct kfd_process *p;
901 unsigned long active_time;
902 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
907 if (dma_fence_is_signaled(fence))
910 p = kfd_lookup_process_by_mm(mm);
914 if (fence->seqno == p->last_eviction_seqno)
917 p->last_eviction_seqno = fence->seqno;
919 /* Avoid KFD process starvation. Wait for at least
920 * PROCESS_ACTIVE_TIME_MS before evicting the process again
922 active_time = get_jiffies_64() - p->last_restore_timestamp;
923 if (delay_jiffies > active_time)
924 delay_jiffies -= active_time;
928 /* During process initialization eviction_work.dwork is initialized
929 * to kfd_evict_bo_worker
931 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
932 p->lead_thread->pid, delay_jiffies);
933 schedule_delayed_work(&p->eviction_work, delay_jiffies);
935 kfd_unref_process(p);
939 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
940 unsigned int chunk_size)
942 if (WARN_ON(buf_size < chunk_size))
944 if (WARN_ON(buf_size == 0))
946 if (WARN_ON(chunk_size == 0))
949 kfd->gtt_sa_chunk_size = chunk_size;
950 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
952 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
954 if (!kfd->gtt_sa_bitmap)
957 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
958 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
960 mutex_init(&kfd->gtt_sa_lock);
965 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
967 mutex_destroy(&kfd->gtt_sa_lock);
968 bitmap_free(kfd->gtt_sa_bitmap);
971 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
972 unsigned int bit_num,
973 unsigned int chunk_size)
975 return start_addr + bit_num * chunk_size;
978 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
979 unsigned int bit_num,
980 unsigned int chunk_size)
982 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
985 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
986 struct kfd_mem_obj **mem_obj)
988 unsigned int found, start_search, cur_size;
993 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
996 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1000 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1004 mutex_lock(&kfd->gtt_sa_lock);
1006 kfd_gtt_restart_search:
1007 /* Find the first chunk that is free */
1008 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1009 kfd->gtt_sa_num_of_chunks,
1012 pr_debug("Found = %d\n", found);
1014 /* If there wasn't any free chunk, bail out */
1015 if (found == kfd->gtt_sa_num_of_chunks)
1016 goto kfd_gtt_no_free_chunk;
1018 /* Update fields of mem_obj */
1019 (*mem_obj)->range_start = found;
1020 (*mem_obj)->range_end = found;
1021 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1022 kfd->gtt_start_gpu_addr,
1024 kfd->gtt_sa_chunk_size);
1025 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1026 kfd->gtt_start_cpu_ptr,
1028 kfd->gtt_sa_chunk_size);
1030 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1031 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1033 /* If we need only one chunk, mark it as allocated and get out */
1034 if (size <= kfd->gtt_sa_chunk_size) {
1035 pr_debug("Single bit\n");
1036 __set_bit(found, kfd->gtt_sa_bitmap);
1040 /* Otherwise, try to see if we have enough contiguous chunks */
1041 cur_size = size - kfd->gtt_sa_chunk_size;
1043 (*mem_obj)->range_end =
1044 find_next_zero_bit(kfd->gtt_sa_bitmap,
1045 kfd->gtt_sa_num_of_chunks, ++found);
1047 * If next free chunk is not contiguous than we need to
1048 * restart our search from the last free chunk we found (which
1049 * wasn't contiguous to the previous ones
1051 if ((*mem_obj)->range_end != found) {
1052 start_search = found;
1053 goto kfd_gtt_restart_search;
1057 * If we reached end of buffer, bail out with error
1059 if (found == kfd->gtt_sa_num_of_chunks)
1060 goto kfd_gtt_no_free_chunk;
1062 /* Check if we don't need another chunk */
1063 if (cur_size <= kfd->gtt_sa_chunk_size)
1066 cur_size -= kfd->gtt_sa_chunk_size;
1068 } while (cur_size > 0);
1070 pr_debug("range_start = %d, range_end = %d\n",
1071 (*mem_obj)->range_start, (*mem_obj)->range_end);
1073 /* Mark the chunks as allocated */
1074 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1075 (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1078 mutex_unlock(&kfd->gtt_sa_lock);
1081 kfd_gtt_no_free_chunk:
1082 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1083 mutex_unlock(&kfd->gtt_sa_lock);
1088 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1090 /* Act like kfree when trying to free a NULL object */
1094 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1095 mem_obj, mem_obj->range_start, mem_obj->range_end);
1097 mutex_lock(&kfd->gtt_sa_lock);
1099 /* Mark the chunks as free */
1100 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1101 mem_obj->range_end - mem_obj->range_start + 1);
1103 mutex_unlock(&kfd->gtt_sa_lock);
1109 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1112 atomic_inc(&kfd->sram_ecc_flag);
1115 void kfd_inc_compute_active(struct kfd_dev *kfd)
1117 if (atomic_inc_return(&kfd->compute_profile) == 1)
1118 amdgpu_amdkfd_set_compute_idle(kfd->adev, false);
1121 void kfd_dec_compute_active(struct kfd_dev *kfd)
1123 int count = atomic_dec_return(&kfd->compute_profile);
1126 amdgpu_amdkfd_set_compute_idle(kfd->adev, true);
1127 WARN_ONCE(count < 0, "Compute profile ref. count error");
1130 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1132 if (kfd && kfd->init_complete)
1133 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
1136 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1137 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1138 * When the device has more than two engines, we reserve two for PCIe to enable
1139 * full-duplex and the rest are used as XGMI.
1141 unsigned int kfd_get_num_sdma_engines(struct kfd_dev *kdev)
1143 /* If XGMI is not supported, all SDMA engines are PCIe */
1144 if (!kdev->adev->gmc.xgmi.supported)
1145 return kdev->adev->sdma.num_instances;
1147 return min(kdev->adev->sdma.num_instances, 2);
1150 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_dev *kdev)
1152 /* After reserved for PCIe, the rest of engines are XGMI */
1153 return kdev->adev->sdma.num_instances - kfd_get_num_sdma_engines(kdev);
1156 #if defined(CONFIG_DEBUG_FS)
1158 /* This function will send a package to HIQ to hang the HWS
1159 * which will trigger a GPU reset and bring the HWS back to normal state
1161 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1163 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1164 pr_err("HWS is not enabled");
1168 return dqm_debugfs_hang_hws(dev->dqm);