1 // SPDX-License-Identifier: GPL-2.0 OR MIT
3 * Copyright 2014-2022 Advanced Micro Devices, Inc.
5 * Permission is hereby granted, free of charge, to any person obtaining a
6 * copy of this software and associated documentation files (the "Software"),
7 * to deal in the Software without restriction, including without limitation
8 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
9 * and/or sell copies of the Software, and to permit persons to whom the
10 * Software is furnished to do so, subject to the following conditions:
12 * The above copyright notice and this permission notice shall be included in
13 * all copies or substantial portions of the Software.
15 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
19 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
20 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
21 * OTHER DEALINGS IN THE SOFTWARE.
24 #include <linux/bsearch.h>
25 #include <linux/pci.h>
26 #include <linux/slab.h>
28 #include "kfd_device_queue_manager.h"
29 #include "kfd_pm4_headers_vi.h"
30 #include "kfd_pm4_headers_aldebaran.h"
31 #include "cwsr_trap_handler.h"
32 #include "kfd_iommu.h"
33 #include "amdgpu_amdkfd.h"
34 #include "kfd_smi_events.h"
36 #include "kfd_migrate.h"
38 #include "amdgpu_xcp.h"
40 #define MQD_SIZE_ALIGNED 768
43 * kfd_locked is used to lock the kfd driver during suspend or reset
44 * once locked, kfd driver will stop any further GPU execution.
45 * create process (open) will return -EAGAIN.
47 static int kfd_locked;
49 #ifdef CONFIG_DRM_AMDGPU_CIK
50 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
53 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
54 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
55 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
56 extern const struct kfd2kgd_calls gc_9_4_3_kfd2kgd;
57 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
58 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
59 extern const struct kfd2kgd_calls gfx_v11_kfd2kgd;
61 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
62 unsigned int chunk_size);
63 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
65 static int kfd_resume_iommu(struct kfd_dev *kfd);
66 static int kfd_resume(struct kfd_node *kfd);
68 static void kfd_device_info_set_sdma_info(struct kfd_dev *kfd)
70 uint32_t sdma_version = kfd->adev->ip_versions[SDMA0_HWIP][0];
72 switch (sdma_version) {
73 case IP_VERSION(4, 0, 0):/* VEGA10 */
74 case IP_VERSION(4, 0, 1):/* VEGA12 */
75 case IP_VERSION(4, 1, 0):/* RAVEN */
76 case IP_VERSION(4, 1, 1):/* RAVEN */
77 case IP_VERSION(4, 1, 2):/* RENOIR */
78 case IP_VERSION(5, 2, 1):/* VANGOGH */
79 case IP_VERSION(5, 2, 3):/* YELLOW_CARP */
80 case IP_VERSION(5, 2, 6):/* GC 10.3.6 */
81 case IP_VERSION(5, 2, 7):/* GC 10.3.7 */
82 kfd->device_info.num_sdma_queues_per_engine = 2;
84 case IP_VERSION(4, 2, 0):/* VEGA20 */
85 case IP_VERSION(4, 2, 2):/* ARCTURUS */
86 case IP_VERSION(4, 4, 0):/* ALDEBARAN */
87 case IP_VERSION(4, 4, 2):
88 case IP_VERSION(5, 0, 0):/* NAVI10 */
89 case IP_VERSION(5, 0, 1):/* CYAN_SKILLFISH */
90 case IP_VERSION(5, 0, 2):/* NAVI14 */
91 case IP_VERSION(5, 0, 5):/* NAVI12 */
92 case IP_VERSION(5, 2, 0):/* SIENNA_CICHLID */
93 case IP_VERSION(5, 2, 2):/* NAVY_FLOUNDER */
94 case IP_VERSION(5, 2, 4):/* DIMGREY_CAVEFISH */
95 case IP_VERSION(5, 2, 5):/* BEIGE_GOBY */
96 case IP_VERSION(6, 0, 0):
97 case IP_VERSION(6, 0, 1):
98 case IP_VERSION(6, 0, 2):
99 case IP_VERSION(6, 0, 3):
100 kfd->device_info.num_sdma_queues_per_engine = 8;
104 "Default sdma queue per engine(8) is set due to mismatch of sdma ip block(SDMA_HWIP:0x%x).\n",
106 kfd->device_info.num_sdma_queues_per_engine = 8;
109 bitmap_zero(kfd->device_info.reserved_sdma_queues_bitmap, KFD_MAX_SDMA_QUEUES);
111 switch (sdma_version) {
112 case IP_VERSION(6, 0, 0):
113 case IP_VERSION(6, 0, 1):
114 case IP_VERSION(6, 0, 2):
115 case IP_VERSION(6, 0, 3):
116 /* Reserve 1 for paging and 1 for gfx */
117 kfd->device_info.num_reserved_sdma_queues_per_engine = 2;
118 /* BIT(0)=engine-0 queue-0; BIT(1)=engine-1 queue-0; BIT(2)=engine-0 queue-1; ... */
119 bitmap_set(kfd->device_info.reserved_sdma_queues_bitmap, 0,
120 kfd->adev->sdma.num_instances *
121 kfd->device_info.num_reserved_sdma_queues_per_engine);
128 static void kfd_device_info_set_event_interrupt_class(struct kfd_dev *kfd)
130 uint32_t gc_version = KFD_GC_VERSION(kfd);
132 switch (gc_version) {
133 case IP_VERSION(9, 0, 1): /* VEGA10 */
134 case IP_VERSION(9, 1, 0): /* RAVEN */
135 case IP_VERSION(9, 2, 1): /* VEGA12 */
136 case IP_VERSION(9, 2, 2): /* RAVEN */
137 case IP_VERSION(9, 3, 0): /* RENOIR */
138 case IP_VERSION(9, 4, 0): /* VEGA20 */
139 case IP_VERSION(9, 4, 1): /* ARCTURUS */
140 case IP_VERSION(9, 4, 2): /* ALDEBARAN */
141 case IP_VERSION(9, 4, 3): /* GC 9.4.3 */
142 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
144 case IP_VERSION(10, 3, 1): /* VANGOGH */
145 case IP_VERSION(10, 3, 3): /* YELLOW_CARP */
146 case IP_VERSION(10, 3, 6): /* GC 10.3.6 */
147 case IP_VERSION(10, 3, 7): /* GC 10.3.7 */
148 case IP_VERSION(10, 1, 3): /* CYAN_SKILLFISH */
149 case IP_VERSION(10, 1, 4):
150 case IP_VERSION(10, 1, 10): /* NAVI10 */
151 case IP_VERSION(10, 1, 2): /* NAVI12 */
152 case IP_VERSION(10, 1, 1): /* NAVI14 */
153 case IP_VERSION(10, 3, 0): /* SIENNA_CICHLID */
154 case IP_VERSION(10, 3, 2): /* NAVY_FLOUNDER */
155 case IP_VERSION(10, 3, 4): /* DIMGREY_CAVEFISH */
156 case IP_VERSION(10, 3, 5): /* BEIGE_GOBY */
157 kfd->device_info.event_interrupt_class = &event_interrupt_class_v10;
159 case IP_VERSION(11, 0, 0):
160 case IP_VERSION(11, 0, 1):
161 case IP_VERSION(11, 0, 2):
162 case IP_VERSION(11, 0, 3):
163 case IP_VERSION(11, 0, 4):
164 kfd->device_info.event_interrupt_class = &event_interrupt_class_v11;
167 dev_warn(kfd_device, "v9 event interrupt handler is set due to "
168 "mismatch of gc ip block(GC_HWIP:0x%x).\n", gc_version);
169 kfd->device_info.event_interrupt_class = &event_interrupt_class_v9;
173 static void kfd_device_info_init(struct kfd_dev *kfd,
174 bool vf, uint32_t gfx_target_version)
176 uint32_t gc_version = KFD_GC_VERSION(kfd);
177 uint32_t asic_type = kfd->adev->asic_type;
179 kfd->device_info.max_pasid_bits = 16;
180 kfd->device_info.max_no_of_hqd = 24;
181 kfd->device_info.num_of_watch_points = 4;
182 kfd->device_info.mqd_size_aligned = MQD_SIZE_ALIGNED;
183 kfd->device_info.gfx_target_version = gfx_target_version;
185 if (KFD_IS_SOC15(kfd)) {
186 kfd->device_info.doorbell_size = 8;
187 kfd->device_info.ih_ring_entry_size = 8 * sizeof(uint32_t);
188 kfd->device_info.supports_cwsr = true;
190 kfd_device_info_set_sdma_info(kfd);
192 kfd_device_info_set_event_interrupt_class(kfd);
195 if (gc_version == IP_VERSION(9, 1, 0) ||
196 gc_version == IP_VERSION(9, 2, 2))
197 kfd->device_info.needs_iommu_device = true;
199 if (gc_version < IP_VERSION(11, 0, 0)) {
200 /* Navi2x+, Navi1x+ */
201 if (gc_version == IP_VERSION(10, 3, 6))
202 kfd->device_info.no_atomic_fw_version = 14;
203 else if (gc_version == IP_VERSION(10, 3, 7))
204 kfd->device_info.no_atomic_fw_version = 3;
205 else if (gc_version >= IP_VERSION(10, 3, 0))
206 kfd->device_info.no_atomic_fw_version = 92;
207 else if (gc_version >= IP_VERSION(10, 1, 1))
208 kfd->device_info.no_atomic_fw_version = 145;
211 if (gc_version >= IP_VERSION(10, 1, 1))
212 kfd->device_info.needs_pci_atomics = true;
213 } else if (gc_version < IP_VERSION(12, 0, 0)) {
215 * PCIe atomics support acknowledgment in GFX11 RS64 CPFW requires
216 * MEC version >= 509. Prior RS64 CPFW versions (and all F32) require
217 * PCIe atomics support.
219 kfd->device_info.needs_pci_atomics = true;
220 kfd->device_info.no_atomic_fw_version = kfd->adev->gfx.rs64_enable ? 509 : 0;
223 kfd->device_info.doorbell_size = 4;
224 kfd->device_info.ih_ring_entry_size = 4 * sizeof(uint32_t);
225 kfd->device_info.event_interrupt_class = &event_interrupt_class_cik;
226 kfd->device_info.num_sdma_queues_per_engine = 2;
228 if (asic_type != CHIP_KAVERI &&
229 asic_type != CHIP_HAWAII &&
230 asic_type != CHIP_TONGA)
231 kfd->device_info.supports_cwsr = true;
233 if (asic_type == CHIP_KAVERI ||
234 asic_type == CHIP_CARRIZO)
235 kfd->device_info.needs_iommu_device = true;
237 if (asic_type != CHIP_HAWAII && !vf)
238 kfd->device_info.needs_pci_atomics = true;
242 struct kfd_dev *kgd2kfd_probe(struct amdgpu_device *adev, bool vf)
244 struct kfd_dev *kfd = NULL;
245 const struct kfd2kgd_calls *f2g = NULL;
246 uint32_t gfx_target_version = 0;
248 switch (adev->asic_type) {
249 #ifdef KFD_SUPPORT_IOMMU_V2
250 #ifdef CONFIG_DRM_AMDGPU_CIK
252 gfx_target_version = 70000;
254 f2g = &gfx_v7_kfd2kgd;
258 gfx_target_version = 80001;
260 f2g = &gfx_v8_kfd2kgd;
263 #ifdef CONFIG_DRM_AMDGPU_CIK
265 gfx_target_version = 70001;
266 if (!amdgpu_exp_hw_support)
268 "KFD support on Hawaii is experimental. See modparam exp_hw_support\n"
271 f2g = &gfx_v7_kfd2kgd;
275 gfx_target_version = 80002;
277 f2g = &gfx_v8_kfd2kgd;
281 gfx_target_version = 80003;
282 f2g = &gfx_v8_kfd2kgd;
287 gfx_target_version = 80003;
289 f2g = &gfx_v8_kfd2kgd;
292 switch (adev->ip_versions[GC_HWIP][0]) {
294 case IP_VERSION(9, 0, 1):
295 gfx_target_version = 90000;
296 f2g = &gfx_v9_kfd2kgd;
298 #ifdef KFD_SUPPORT_IOMMU_V2
300 case IP_VERSION(9, 1, 0):
301 case IP_VERSION(9, 2, 2):
302 gfx_target_version = 90002;
304 f2g = &gfx_v9_kfd2kgd;
308 case IP_VERSION(9, 2, 1):
309 gfx_target_version = 90004;
311 f2g = &gfx_v9_kfd2kgd;
314 case IP_VERSION(9, 3, 0):
315 gfx_target_version = 90012;
317 f2g = &gfx_v9_kfd2kgd;
320 case IP_VERSION(9, 4, 0):
321 gfx_target_version = 90006;
323 f2g = &gfx_v9_kfd2kgd;
326 case IP_VERSION(9, 4, 1):
327 gfx_target_version = 90008;
328 f2g = &arcturus_kfd2kgd;
331 case IP_VERSION(9, 4, 2):
332 gfx_target_version = 90010;
333 f2g = &aldebaran_kfd2kgd;
335 case IP_VERSION(9, 4, 3):
336 gfx_target_version = adev->rev_id >= 1 ? 90402
337 : adev->flags & AMD_IS_APU ? 90400
339 f2g = &gc_9_4_3_kfd2kgd;
342 case IP_VERSION(10, 1, 10):
343 gfx_target_version = 100100;
345 f2g = &gfx_v10_kfd2kgd;
348 case IP_VERSION(10, 1, 2):
349 gfx_target_version = 100101;
350 f2g = &gfx_v10_kfd2kgd;
353 case IP_VERSION(10, 1, 1):
354 gfx_target_version = 100102;
356 f2g = &gfx_v10_kfd2kgd;
359 case IP_VERSION(10, 1, 3):
360 case IP_VERSION(10, 1, 4):
361 gfx_target_version = 100103;
363 f2g = &gfx_v10_kfd2kgd;
366 case IP_VERSION(10, 3, 0):
367 gfx_target_version = 100300;
368 f2g = &gfx_v10_3_kfd2kgd;
371 case IP_VERSION(10, 3, 2):
372 gfx_target_version = 100301;
373 f2g = &gfx_v10_3_kfd2kgd;
376 case IP_VERSION(10, 3, 1):
377 gfx_target_version = 100303;
379 f2g = &gfx_v10_3_kfd2kgd;
381 /* Dimgrey Cavefish */
382 case IP_VERSION(10, 3, 4):
383 gfx_target_version = 100302;
384 f2g = &gfx_v10_3_kfd2kgd;
387 case IP_VERSION(10, 3, 5):
388 gfx_target_version = 100304;
389 f2g = &gfx_v10_3_kfd2kgd;
392 case IP_VERSION(10, 3, 3):
393 gfx_target_version = 100305;
395 f2g = &gfx_v10_3_kfd2kgd;
397 case IP_VERSION(10, 3, 6):
398 case IP_VERSION(10, 3, 7):
399 gfx_target_version = 100306;
401 f2g = &gfx_v10_3_kfd2kgd;
403 case IP_VERSION(11, 0, 0):
404 gfx_target_version = 110000;
405 f2g = &gfx_v11_kfd2kgd;
407 case IP_VERSION(11, 0, 1):
408 case IP_VERSION(11, 0, 4):
409 gfx_target_version = 110003;
410 f2g = &gfx_v11_kfd2kgd;
412 case IP_VERSION(11, 0, 2):
413 gfx_target_version = 110002;
414 f2g = &gfx_v11_kfd2kgd;
416 case IP_VERSION(11, 0, 3):
417 if ((adev->pdev->device == 0x7460 &&
418 adev->pdev->revision == 0x00) ||
419 (adev->pdev->device == 0x7461 &&
420 adev->pdev->revision == 0x00))
421 /* Note: Compiler version is 11.0.5 while HW version is 11.0.3 */
422 gfx_target_version = 110005;
424 /* Note: Compiler version is 11.0.1 while HW version is 11.0.3 */
425 gfx_target_version = 110001;
426 f2g = &gfx_v11_kfd2kgd;
435 if (adev->ip_versions[GC_HWIP][0])
436 dev_err(kfd_device, "GC IP %06x %s not supported in kfd\n",
437 adev->ip_versions[GC_HWIP][0], vf ? "VF" : "");
439 dev_err(kfd_device, "%s %s not supported in kfd\n",
440 amdgpu_asic_name[adev->asic_type], vf ? "VF" : "");
444 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
449 kfd_device_info_init(kfd, vf, gfx_target_version);
450 kfd->init_complete = false;
452 atomic_set(&kfd->compute_profile, 0);
454 mutex_init(&kfd->doorbell_mutex);
455 memset(&kfd->doorbell_available_index, 0,
456 sizeof(kfd->doorbell_available_index));
458 ida_init(&kfd->doorbell_ida);
463 static void kfd_cwsr_init(struct kfd_dev *kfd)
465 if (cwsr_enable && kfd->device_info.supports_cwsr) {
466 if (KFD_GC_VERSION(kfd) < IP_VERSION(9, 0, 1)) {
467 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
468 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
469 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
470 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 1)) {
471 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
472 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
473 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
474 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2)) {
475 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
476 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
477 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
478 } else if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) {
479 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_4_3_hex) > PAGE_SIZE);
480 kfd->cwsr_isa = cwsr_trap_gfx9_4_3_hex;
481 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_4_3_hex);
482 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 1, 1)) {
483 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
484 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
485 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
486 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(10, 3, 0)) {
487 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
488 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
489 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
490 } else if (KFD_GC_VERSION(kfd) < IP_VERSION(11, 0, 0)) {
491 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
492 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
493 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
495 BUILD_BUG_ON(sizeof(cwsr_trap_gfx11_hex) > PAGE_SIZE);
496 kfd->cwsr_isa = cwsr_trap_gfx11_hex;
497 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx11_hex);
500 kfd->cwsr_enabled = true;
504 static int kfd_gws_init(struct kfd_node *node)
507 struct kfd_dev *kfd = node->kfd;
509 if (node->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
512 if (hws_gws_support || (KFD_IS_SOC15(node) &&
513 ((KFD_GC_VERSION(node) == IP_VERSION(9, 0, 1)
514 && kfd->mec2_fw_version >= 0x81b3) ||
515 (KFD_GC_VERSION(node) <= IP_VERSION(9, 4, 0)
516 && kfd->mec2_fw_version >= 0x1b3) ||
517 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 1)
518 && kfd->mec2_fw_version >= 0x30) ||
519 (KFD_GC_VERSION(node) == IP_VERSION(9, 4, 2)
520 && kfd->mec2_fw_version >= 0x28) ||
521 (KFD_GC_VERSION(node) >= IP_VERSION(10, 3, 0)
522 && KFD_GC_VERSION(node) < IP_VERSION(11, 0, 0)
523 && kfd->mec2_fw_version >= 0x6b))))
524 ret = amdgpu_amdkfd_alloc_gws(node->adev,
525 node->adev->gds.gws_size, &node->gws);
530 static void kfd_smi_init(struct kfd_node *dev)
532 INIT_LIST_HEAD(&dev->smi_clients);
533 spin_lock_init(&dev->smi_lock);
536 static int kfd_init_node(struct kfd_node *node)
540 if (kfd_interrupt_init(node)) {
541 dev_err(kfd_device, "Error initializing interrupts\n");
542 goto kfd_interrupt_error;
545 node->dqm = device_queue_manager_init(node);
547 dev_err(kfd_device, "Error initializing queue manager\n");
548 goto device_queue_manager_error;
551 if (kfd_gws_init(node)) {
552 dev_err(kfd_device, "Could not allocate %d gws\n",
553 node->adev->gds.gws_size);
557 if (kfd_resume(node))
558 goto kfd_resume_error;
560 if (kfd_topology_add_device(node)) {
561 dev_err(kfd_device, "Error adding device to topology\n");
562 goto kfd_topology_add_device_error;
569 kfd_topology_add_device_error:
572 device_queue_manager_uninit(node->dqm);
573 device_queue_manager_error:
574 kfd_interrupt_exit(node);
577 amdgpu_amdkfd_free_gws(node->adev, node->gws);
579 /* Cleanup the node memory here */
584 static void kfd_cleanup_nodes(struct kfd_dev *kfd, unsigned int num_nodes)
586 struct kfd_node *knode;
589 for (i = 0; i < num_nodes; i++) {
590 knode = kfd->nodes[i];
591 device_queue_manager_uninit(knode->dqm);
592 kfd_interrupt_exit(knode);
593 kfd_topology_remove_device(knode);
595 amdgpu_amdkfd_free_gws(knode->adev, knode->gws);
597 kfd->nodes[i] = NULL;
601 bool kgd2kfd_device_init(struct kfd_dev *kfd,
602 const struct kgd2kfd_shared_resources *gpu_resources)
604 unsigned int size, map_process_packet_size, i;
605 struct kfd_node *node;
606 uint32_t first_vmid_kfd, last_vmid_kfd, vmid_num_kfd;
607 unsigned int max_proc_per_quantum;
611 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
613 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
615 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->adev,
617 kfd->shared_resources = *gpu_resources;
619 kfd->num_nodes = amdgpu_xcp_get_num_xcp(kfd->adev->xcp_mgr);
621 if (kfd->num_nodes == 0) {
623 "KFD num nodes cannot be 0, num_xcc_in_node: %d\n",
624 kfd->adev->gfx.num_xcc_per_xcp);
628 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
629 * 32 and 64-bit requests are possible and must be
632 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kfd->adev);
633 if (!kfd->pci_atomic_requested &&
634 kfd->device_info.needs_pci_atomics &&
635 (!kfd->device_info.no_atomic_fw_version ||
636 kfd->mec_fw_version < kfd->device_info.no_atomic_fw_version)) {
638 "skipped device %x:%x, PCI rejects atomics %d<%d\n",
639 kfd->adev->pdev->vendor, kfd->adev->pdev->device,
641 kfd->device_info.no_atomic_fw_version);
645 first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
646 last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
647 vmid_num_kfd = last_vmid_kfd - first_vmid_kfd + 1;
649 /* For GFX9.4.3, we need special handling for VMIDs depending on
651 * In CPX mode, the VMID range needs to be shared between XCDs.
652 * Additionally, there are 13 VMIDs (3-15) available for KFD. To
653 * divide them equally, we change starting VMID to 4 and not use
655 * If the VMID range changes for GFX9.4.3, then this code MUST be
658 if (kfd->adev->xcp_mgr) {
659 partition_mode = amdgpu_xcp_query_partition_mode(kfd->adev->xcp_mgr,
660 AMDGPU_XCP_FL_LOCKED);
661 if (partition_mode == AMDGPU_CPX_PARTITION_MODE &&
662 kfd->num_nodes != 1) {
664 first_vmid_kfd = last_vmid_kfd + 1 - vmid_num_kfd*2;
668 /* Verify module parameters regarding mapped process number*/
669 if (hws_max_conc_proc >= 0)
670 max_proc_per_quantum = min((u32)hws_max_conc_proc, vmid_num_kfd);
672 max_proc_per_quantum = vmid_num_kfd;
674 /* calculate max size of mqds needed for queues */
675 size = max_num_of_queues_per_device *
676 kfd->device_info.mqd_size_aligned;
679 * calculate max size of runlist packet.
680 * There can be only 2 packets at once
682 map_process_packet_size = KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 2) ?
683 sizeof(struct pm4_mes_map_process_aldebaran) :
684 sizeof(struct pm4_mes_map_process);
685 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
686 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
687 + sizeof(struct pm4_mes_runlist)) * 2;
689 /* Add size of HIQ & DIQ */
690 size += KFD_KERNEL_QUEUE_SIZE * 2;
692 /* add another 512KB for all other allocations on gart (HPD, fences) */
695 if (amdgpu_amdkfd_alloc_gtt_mem(
696 kfd->adev, size, &kfd->gtt_mem,
697 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
699 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
700 goto alloc_gtt_mem_failure;
703 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
705 /* Initialize GTT sa with 512 byte chunk size */
706 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
707 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
708 goto kfd_gtt_sa_init_error;
711 if (kfd_doorbell_init(kfd)) {
713 "Error initializing doorbell aperture\n");
714 goto kfd_doorbell_error;
717 if (amdgpu_use_xgmi_p2p)
718 kfd->hive_id = kfd->adev->gmc.xgmi.hive_id;
721 * For GFX9.4.3, the KFD abstracts all partitions within a socket as
722 * xGMI connected in the topology so assign a unique hive id per
723 * device based on the pci device location if device is in PCIe mode.
725 if (!kfd->hive_id && (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3)) && kfd->num_nodes > 1)
726 kfd->hive_id = pci_dev_id(kfd->adev->pdev);
728 kfd->noretry = kfd->adev->gmc.noretry;
730 /* If CRAT is broken, won't set iommu enabled */
731 kfd_double_confirm_iommu_support(kfd);
733 if (kfd_iommu_device_init(kfd)) {
734 kfd->use_iommu_v2 = false;
735 dev_err(kfd_device, "Error initializing iommuv2\n");
736 goto device_iommu_error;
741 dev_info(kfd_device, "Total number of KFD nodes to be created: %d\n",
744 /* Allocate the KFD nodes */
745 for (i = 0, xcp_idx = 0; i < kfd->num_nodes; i++) {
746 node = kzalloc(sizeof(struct kfd_node), GFP_KERNEL);
748 goto node_alloc_error;
751 node->adev = kfd->adev;
753 node->kfd2kgd = kfd->kfd2kgd;
754 node->vm_info.vmid_num_kfd = vmid_num_kfd;
755 node->xcp = amdgpu_get_next_xcp(kfd->adev->xcp_mgr, &xcp_idx);
756 /* TODO : Check if error handling is needed */
758 amdgpu_xcp_get_inst_details(node->xcp, AMDGPU_XCP_GFX,
763 (1U << NUM_XCC(kfd->adev->gfx.xcc_mask)) - 1;
767 dev_info(kfd_device, "KFD node %d partition %d size %lldM\n",
768 node->node_id, node->xcp->mem_id,
769 KFD_XCP_MEMORY_SIZE(node->adev, node->node_id) >> 20);
772 if (KFD_GC_VERSION(kfd) == IP_VERSION(9, 4, 3) &&
773 partition_mode == AMDGPU_CPX_PARTITION_MODE &&
774 kfd->num_nodes != 1) {
775 /* For GFX9.4.3 and CPX mode, first XCD gets VMID range
776 * 4-9 and second XCD gets VMID range 10-15.
779 node->vm_info.first_vmid_kfd = (i%2 == 0) ?
781 first_vmid_kfd+vmid_num_kfd;
782 node->vm_info.last_vmid_kfd = (i%2 == 0) ?
783 last_vmid_kfd-vmid_num_kfd :
785 node->compute_vmid_bitmap =
786 ((0x1 << (node->vm_info.last_vmid_kfd + 1)) - 1) -
787 ((0x1 << (node->vm_info.first_vmid_kfd)) - 1);
789 node->vm_info.first_vmid_kfd = first_vmid_kfd;
790 node->vm_info.last_vmid_kfd = last_vmid_kfd;
791 node->compute_vmid_bitmap =
792 gpu_resources->compute_vmid_bitmap;
794 node->max_proc_per_quantum = max_proc_per_quantum;
795 atomic_set(&node->sram_ecc_flag, 0);
797 amdgpu_amdkfd_get_local_mem_info(kfd->adev,
798 &node->local_mem_info, node->xcp);
800 /* Initialize the KFD node */
801 if (kfd_init_node(node)) {
802 dev_err(kfd_device, "Error initializing KFD node\n");
803 goto node_init_error;
805 kfd->nodes[i] = node;
808 svm_range_set_max_pages(kfd->adev);
810 if (kfd_resume_iommu(kfd))
811 goto kfd_resume_iommu_error;
813 spin_lock_init(&kfd->watch_points_lock);
815 kfd->init_complete = true;
816 dev_info(kfd_device, "added device %x:%x\n", kfd->adev->pdev->vendor,
817 kfd->adev->pdev->device);
819 pr_debug("Starting kfd with the following scheduling policy %d\n",
820 node->dqm->sched_policy);
824 kfd_resume_iommu_error:
827 kfd_cleanup_nodes(kfd, i);
829 kfd_doorbell_fini(kfd);
831 kfd_gtt_sa_fini(kfd);
832 kfd_gtt_sa_init_error:
833 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
834 alloc_gtt_mem_failure:
836 "device %x:%x NOT added due to errors\n",
837 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
839 return kfd->init_complete;
842 void kgd2kfd_device_exit(struct kfd_dev *kfd)
844 if (kfd->init_complete) {
845 /* Cleanup KFD nodes */
846 kfd_cleanup_nodes(kfd, kfd->num_nodes);
847 /* Cleanup common/shared resources */
848 kfd_doorbell_fini(kfd);
849 ida_destroy(&kfd->doorbell_ida);
850 kfd_gtt_sa_fini(kfd);
851 amdgpu_amdkfd_free_gtt_mem(kfd->adev, kfd->gtt_mem);
857 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
859 struct kfd_node *node;
862 if (!kfd->init_complete)
865 for (i = 0; i < kfd->num_nodes; i++) {
866 node = kfd->nodes[i];
867 kfd_smi_event_update_gpu_reset(node, false);
868 node->dqm->ops.pre_reset(node->dqm);
871 kgd2kfd_suspend(kfd, false);
873 for (i = 0; i < kfd->num_nodes; i++)
874 kfd_signal_reset_event(kfd->nodes[i]);
880 * Fix me. KFD won't be able to resume existing process for now.
881 * We will keep all existing process in a evicted state and
882 * wait the process to be terminated.
885 int kgd2kfd_post_reset(struct kfd_dev *kfd)
888 struct kfd_node *node;
891 if (!kfd->init_complete)
894 for (i = 0; i < kfd->num_nodes; i++) {
895 ret = kfd_resume(kfd->nodes[i]);
900 mutex_lock(&kfd_processes_mutex);
902 mutex_unlock(&kfd_processes_mutex);
904 for (i = 0; i < kfd->num_nodes; i++) {
905 node = kfd->nodes[i];
906 atomic_set(&node->sram_ecc_flag, 0);
907 kfd_smi_event_update_gpu_reset(node, true);
913 bool kfd_is_locked(void)
915 lockdep_assert_held(&kfd_processes_mutex);
916 return (kfd_locked > 0);
919 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
921 struct kfd_node *node;
925 if (!kfd->init_complete)
928 /* for runtime suspend, skip locking kfd */
930 mutex_lock(&kfd_processes_mutex);
931 count = ++kfd_locked;
932 mutex_unlock(&kfd_processes_mutex);
934 /* For first KFD device suspend all the KFD processes */
936 kfd_suspend_all_processes();
939 for (i = 0; i < kfd->num_nodes; i++) {
940 node = kfd->nodes[i];
941 node->dqm->ops.stop(node->dqm);
943 kfd_iommu_suspend(kfd);
946 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
950 if (!kfd->init_complete)
953 for (i = 0; i < kfd->num_nodes; i++) {
954 ret = kfd_resume(kfd->nodes[i]);
959 /* for runtime resume, skip unlocking kfd */
961 mutex_lock(&kfd_processes_mutex);
962 count = --kfd_locked;
963 mutex_unlock(&kfd_processes_mutex);
965 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
967 ret = kfd_resume_all_processes();
973 int kgd2kfd_resume_iommu(struct kfd_dev *kfd)
975 if (!kfd->init_complete)
978 return kfd_resume_iommu(kfd);
981 static int kfd_resume_iommu(struct kfd_dev *kfd)
985 err = kfd_iommu_resume(kfd);
988 "Failed to resume IOMMU for device %x:%x\n",
989 kfd->adev->pdev->vendor, kfd->adev->pdev->device);
993 static int kfd_resume(struct kfd_node *node)
997 err = node->dqm->ops.start(node->dqm);
1000 "Error starting queue manager for device %x:%x\n",
1001 node->adev->pdev->vendor, node->adev->pdev->device);
1006 static inline void kfd_queue_work(struct workqueue_struct *wq,
1007 struct work_struct *work)
1011 cpu = new_cpu = smp_processor_id();
1013 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1014 if (cpu_to_node(new_cpu) == numa_node_id())
1016 } while (cpu != new_cpu);
1018 queue_work_on(new_cpu, wq, work);
1021 /* This is called directly from KGD at ISR. */
1022 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1024 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE], i;
1025 bool is_patched = false;
1026 unsigned long flags;
1027 struct kfd_node *node;
1029 if (!kfd->init_complete)
1032 if (kfd->device_info.ih_ring_entry_size > sizeof(patched_ihre)) {
1033 dev_err_once(kfd_device, "Ring entry too small\n");
1037 for (i = 0; i < kfd->num_nodes; i++) {
1038 node = kfd->nodes[i];
1039 spin_lock_irqsave(&node->interrupt_lock, flags);
1041 if (node->interrupts_active
1042 && interrupt_is_wanted(node, ih_ring_entry,
1043 patched_ihre, &is_patched)
1044 && enqueue_ih_ring_entry(node,
1045 is_patched ? patched_ihre : ih_ring_entry)) {
1046 kfd_queue_work(node->ih_wq, &node->interrupt_work);
1047 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1050 spin_unlock_irqrestore(&node->interrupt_lock, flags);
1055 int kgd2kfd_quiesce_mm(struct mm_struct *mm, uint32_t trigger)
1057 struct kfd_process *p;
1060 /* Because we are called from arbitrary context (workqueue) as opposed
1061 * to process context, kfd_process could attempt to exit while we are
1062 * running so the lookup function increments the process ref count.
1064 p = kfd_lookup_process_by_mm(mm);
1068 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1069 r = kfd_process_evict_queues(p, trigger);
1071 kfd_unref_process(p);
1075 int kgd2kfd_resume_mm(struct mm_struct *mm)
1077 struct kfd_process *p;
1080 /* Because we are called from arbitrary context (workqueue) as opposed
1081 * to process context, kfd_process could attempt to exit while we are
1082 * running so the lookup function increments the process ref count.
1084 p = kfd_lookup_process_by_mm(mm);
1088 r = kfd_process_restore_queues(p);
1090 kfd_unref_process(p);
1094 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1095 * prepare for safe eviction of KFD BOs that belong to the specified
1098 * @mm: mm_struct that identifies the specified KFD process
1099 * @fence: eviction fence attached to KFD process BOs
1102 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1103 struct dma_fence *fence)
1105 struct kfd_process *p;
1106 unsigned long active_time;
1107 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1112 if (dma_fence_is_signaled(fence))
1115 p = kfd_lookup_process_by_mm(mm);
1119 if (fence->seqno == p->last_eviction_seqno)
1122 p->last_eviction_seqno = fence->seqno;
1124 /* Avoid KFD process starvation. Wait for at least
1125 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1127 active_time = get_jiffies_64() - p->last_restore_timestamp;
1128 if (delay_jiffies > active_time)
1129 delay_jiffies -= active_time;
1133 /* During process initialization eviction_work.dwork is initialized
1134 * to kfd_evict_bo_worker
1136 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1137 p->lead_thread->pid, delay_jiffies);
1138 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1140 kfd_unref_process(p);
1144 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1145 unsigned int chunk_size)
1147 if (WARN_ON(buf_size < chunk_size))
1149 if (WARN_ON(buf_size == 0))
1151 if (WARN_ON(chunk_size == 0))
1154 kfd->gtt_sa_chunk_size = chunk_size;
1155 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1157 kfd->gtt_sa_bitmap = bitmap_zalloc(kfd->gtt_sa_num_of_chunks,
1159 if (!kfd->gtt_sa_bitmap)
1162 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1163 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1165 mutex_init(&kfd->gtt_sa_lock);
1170 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1172 mutex_destroy(&kfd->gtt_sa_lock);
1173 bitmap_free(kfd->gtt_sa_bitmap);
1176 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1177 unsigned int bit_num,
1178 unsigned int chunk_size)
1180 return start_addr + bit_num * chunk_size;
1183 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1184 unsigned int bit_num,
1185 unsigned int chunk_size)
1187 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1190 int kfd_gtt_sa_allocate(struct kfd_node *node, unsigned int size,
1191 struct kfd_mem_obj **mem_obj)
1193 unsigned int found, start_search, cur_size;
1194 struct kfd_dev *kfd = node->kfd;
1199 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1202 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1206 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1210 mutex_lock(&kfd->gtt_sa_lock);
1212 kfd_gtt_restart_search:
1213 /* Find the first chunk that is free */
1214 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1215 kfd->gtt_sa_num_of_chunks,
1218 pr_debug("Found = %d\n", found);
1220 /* If there wasn't any free chunk, bail out */
1221 if (found == kfd->gtt_sa_num_of_chunks)
1222 goto kfd_gtt_no_free_chunk;
1224 /* Update fields of mem_obj */
1225 (*mem_obj)->range_start = found;
1226 (*mem_obj)->range_end = found;
1227 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1228 kfd->gtt_start_gpu_addr,
1230 kfd->gtt_sa_chunk_size);
1231 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1232 kfd->gtt_start_cpu_ptr,
1234 kfd->gtt_sa_chunk_size);
1236 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1237 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1239 /* If we need only one chunk, mark it as allocated and get out */
1240 if (size <= kfd->gtt_sa_chunk_size) {
1241 pr_debug("Single bit\n");
1242 __set_bit(found, kfd->gtt_sa_bitmap);
1246 /* Otherwise, try to see if we have enough contiguous chunks */
1247 cur_size = size - kfd->gtt_sa_chunk_size;
1249 (*mem_obj)->range_end =
1250 find_next_zero_bit(kfd->gtt_sa_bitmap,
1251 kfd->gtt_sa_num_of_chunks, ++found);
1253 * If next free chunk is not contiguous than we need to
1254 * restart our search from the last free chunk we found (which
1255 * wasn't contiguous to the previous ones
1257 if ((*mem_obj)->range_end != found) {
1258 start_search = found;
1259 goto kfd_gtt_restart_search;
1263 * If we reached end of buffer, bail out with error
1265 if (found == kfd->gtt_sa_num_of_chunks)
1266 goto kfd_gtt_no_free_chunk;
1268 /* Check if we don't need another chunk */
1269 if (cur_size <= kfd->gtt_sa_chunk_size)
1272 cur_size -= kfd->gtt_sa_chunk_size;
1274 } while (cur_size > 0);
1276 pr_debug("range_start = %d, range_end = %d\n",
1277 (*mem_obj)->range_start, (*mem_obj)->range_end);
1279 /* Mark the chunks as allocated */
1280 bitmap_set(kfd->gtt_sa_bitmap, (*mem_obj)->range_start,
1281 (*mem_obj)->range_end - (*mem_obj)->range_start + 1);
1284 mutex_unlock(&kfd->gtt_sa_lock);
1287 kfd_gtt_no_free_chunk:
1288 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1289 mutex_unlock(&kfd->gtt_sa_lock);
1294 int kfd_gtt_sa_free(struct kfd_node *node, struct kfd_mem_obj *mem_obj)
1296 struct kfd_dev *kfd = node->kfd;
1298 /* Act like kfree when trying to free a NULL object */
1302 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1303 mem_obj, mem_obj->range_start, mem_obj->range_end);
1305 mutex_lock(&kfd->gtt_sa_lock);
1307 /* Mark the chunks as free */
1308 bitmap_clear(kfd->gtt_sa_bitmap, mem_obj->range_start,
1309 mem_obj->range_end - mem_obj->range_start + 1);
1311 mutex_unlock(&kfd->gtt_sa_lock);
1317 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1320 * TODO: Currently update SRAM ECC flag for first node.
1321 * This needs to be updated later when we can
1322 * identify SRAM ECC error on other nodes also.
1325 atomic_inc(&kfd->nodes[0]->sram_ecc_flag);
1328 void kfd_inc_compute_active(struct kfd_node *node)
1330 if (atomic_inc_return(&node->kfd->compute_profile) == 1)
1331 amdgpu_amdkfd_set_compute_idle(node->adev, false);
1334 void kfd_dec_compute_active(struct kfd_node *node)
1336 int count = atomic_dec_return(&node->kfd->compute_profile);
1339 amdgpu_amdkfd_set_compute_idle(node->adev, true);
1340 WARN_ONCE(count < 0, "Compute profile ref. count error");
1343 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1346 * TODO: For now, raise the throttling event only on first node.
1347 * This will need to change after we are able to determine
1348 * which node raised the throttling event.
1350 if (kfd && kfd->init_complete)
1351 kfd_smi_event_update_thermal_throttling(kfd->nodes[0],
1355 /* kfd_get_num_sdma_engines returns the number of PCIe optimized SDMA and
1356 * kfd_get_num_xgmi_sdma_engines returns the number of XGMI SDMA.
1357 * When the device has more than two engines, we reserve two for PCIe to enable
1358 * full-duplex and the rest are used as XGMI.
1360 unsigned int kfd_get_num_sdma_engines(struct kfd_node *node)
1362 /* If XGMI is not supported, all SDMA engines are PCIe */
1363 if (!node->adev->gmc.xgmi.supported)
1364 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes;
1366 return min(node->adev->sdma.num_instances/(int)node->kfd->num_nodes, 2);
1369 unsigned int kfd_get_num_xgmi_sdma_engines(struct kfd_node *node)
1371 /* After reserved for PCIe, the rest of engines are XGMI */
1372 return node->adev->sdma.num_instances/(int)node->kfd->num_nodes -
1373 kfd_get_num_sdma_engines(node);
1376 int kgd2kfd_check_and_lock_kfd(void)
1378 mutex_lock(&kfd_processes_mutex);
1379 if (!hash_empty(kfd_processes_table) || kfd_is_locked()) {
1380 mutex_unlock(&kfd_processes_mutex);
1385 mutex_unlock(&kfd_processes_mutex);
1390 void kgd2kfd_unlock_kfd(void)
1392 mutex_lock(&kfd_processes_mutex);
1394 mutex_unlock(&kfd_processes_mutex);
1397 #if defined(CONFIG_DEBUG_FS)
1399 /* This function will send a package to HIQ to hang the HWS
1400 * which will trigger a GPU reset and bring the HWS back to normal state
1402 int kfd_debugfs_hang_hws(struct kfd_node *dev)
1404 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1405 pr_err("HWS is not enabled");
1409 return dqm_debugfs_hang_hws(dev->dqm);