2 * Copyright 2014 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/bsearch.h>
24 #include <linux/pci.h>
25 #include <linux/slab.h>
27 #include "kfd_device_queue_manager.h"
28 #include "kfd_pm4_headers_vi.h"
29 #include "kfd_pm4_headers_aldebaran.h"
30 #include "cwsr_trap_handler.h"
31 #include "kfd_iommu.h"
32 #include "amdgpu_amdkfd.h"
33 #include "kfd_smi_events.h"
34 #include "kfd_migrate.h"
36 #define MQD_SIZE_ALIGNED 768
39 * kfd_locked is used to lock the kfd driver during suspend or reset
40 * once locked, kfd driver will stop any further GPU execution.
41 * create process (open) will return -EAGAIN.
43 static atomic_t kfd_locked = ATOMIC_INIT(0);
45 #ifdef CONFIG_DRM_AMDGPU_CIK
46 extern const struct kfd2kgd_calls gfx_v7_kfd2kgd;
48 extern const struct kfd2kgd_calls gfx_v8_kfd2kgd;
49 extern const struct kfd2kgd_calls gfx_v9_kfd2kgd;
50 extern const struct kfd2kgd_calls arcturus_kfd2kgd;
51 extern const struct kfd2kgd_calls aldebaran_kfd2kgd;
52 extern const struct kfd2kgd_calls gfx_v10_kfd2kgd;
53 extern const struct kfd2kgd_calls gfx_v10_3_kfd2kgd;
55 static const struct kfd2kgd_calls *kfd2kgd_funcs[] = {
56 #ifdef KFD_SUPPORT_IOMMU_V2
57 #ifdef CONFIG_DRM_AMDGPU_CIK
58 [CHIP_KAVERI] = &gfx_v7_kfd2kgd,
60 [CHIP_CARRIZO] = &gfx_v8_kfd2kgd,
61 [CHIP_RAVEN] = &gfx_v9_kfd2kgd,
63 #ifdef CONFIG_DRM_AMDGPU_CIK
64 [CHIP_HAWAII] = &gfx_v7_kfd2kgd,
66 [CHIP_TONGA] = &gfx_v8_kfd2kgd,
67 [CHIP_FIJI] = &gfx_v8_kfd2kgd,
68 [CHIP_POLARIS10] = &gfx_v8_kfd2kgd,
69 [CHIP_POLARIS11] = &gfx_v8_kfd2kgd,
70 [CHIP_POLARIS12] = &gfx_v8_kfd2kgd,
71 [CHIP_VEGAM] = &gfx_v8_kfd2kgd,
72 [CHIP_VEGA10] = &gfx_v9_kfd2kgd,
73 [CHIP_VEGA12] = &gfx_v9_kfd2kgd,
74 [CHIP_VEGA20] = &gfx_v9_kfd2kgd,
75 [CHIP_RENOIR] = &gfx_v9_kfd2kgd,
76 [CHIP_ARCTURUS] = &arcturus_kfd2kgd,
77 [CHIP_ALDEBARAN] = &aldebaran_kfd2kgd,
78 [CHIP_NAVI10] = &gfx_v10_kfd2kgd,
79 [CHIP_NAVI12] = &gfx_v10_kfd2kgd,
80 [CHIP_NAVI14] = &gfx_v10_kfd2kgd,
81 [CHIP_SIENNA_CICHLID] = &gfx_v10_3_kfd2kgd,
82 [CHIP_NAVY_FLOUNDER] = &gfx_v10_3_kfd2kgd,
83 [CHIP_VANGOGH] = &gfx_v10_3_kfd2kgd,
84 [CHIP_DIMGREY_CAVEFISH] = &gfx_v10_3_kfd2kgd,
85 [CHIP_BEIGE_GOBY] = &gfx_v10_3_kfd2kgd,
86 [CHIP_YELLOW_CARP] = &gfx_v10_3_kfd2kgd,
87 [CHIP_CYAN_SKILLFISH] = &gfx_v10_kfd2kgd,
90 #ifdef KFD_SUPPORT_IOMMU_V2
91 static const struct kfd_device_info kaveri_device_info = {
92 .asic_family = CHIP_KAVERI,
93 .asic_name = "kaveri",
94 .gfx_target_version = 70000,
96 /* max num of queues for KV.TODO should be a dynamic value */
99 .ih_ring_entry_size = 4 * sizeof(uint32_t),
100 .event_interrupt_class = &event_interrupt_class_cik,
101 .num_of_watch_points = 4,
102 .mqd_size_aligned = MQD_SIZE_ALIGNED,
103 .supports_cwsr = false,
104 .needs_iommu_device = true,
105 .needs_pci_atomics = false,
106 .num_sdma_engines = 2,
107 .num_xgmi_sdma_engines = 0,
108 .num_sdma_queues_per_engine = 2,
111 static const struct kfd_device_info carrizo_device_info = {
112 .asic_family = CHIP_CARRIZO,
113 .asic_name = "carrizo",
114 .gfx_target_version = 80001,
115 .max_pasid_bits = 16,
116 /* max num of queues for CZ.TODO should be a dynamic value */
119 .ih_ring_entry_size = 4 * sizeof(uint32_t),
120 .event_interrupt_class = &event_interrupt_class_cik,
121 .num_of_watch_points = 4,
122 .mqd_size_aligned = MQD_SIZE_ALIGNED,
123 .supports_cwsr = true,
124 .needs_iommu_device = true,
125 .needs_pci_atomics = false,
126 .num_sdma_engines = 2,
127 .num_xgmi_sdma_engines = 0,
128 .num_sdma_queues_per_engine = 2,
132 static const struct kfd_device_info raven_device_info = {
133 .asic_family = CHIP_RAVEN,
134 .asic_name = "raven",
135 .gfx_target_version = 90002,
136 .max_pasid_bits = 16,
139 .ih_ring_entry_size = 8 * sizeof(uint32_t),
140 .event_interrupt_class = &event_interrupt_class_v9,
141 .num_of_watch_points = 4,
142 .mqd_size_aligned = MQD_SIZE_ALIGNED,
143 .supports_cwsr = true,
144 .needs_iommu_device = true,
145 .needs_pci_atomics = true,
146 .num_sdma_engines = 1,
147 .num_xgmi_sdma_engines = 0,
148 .num_sdma_queues_per_engine = 2,
151 static const struct kfd_device_info hawaii_device_info = {
152 .asic_family = CHIP_HAWAII,
153 .asic_name = "hawaii",
154 .gfx_target_version = 70001,
155 .max_pasid_bits = 16,
156 /* max num of queues for KV.TODO should be a dynamic value */
159 .ih_ring_entry_size = 4 * sizeof(uint32_t),
160 .event_interrupt_class = &event_interrupt_class_cik,
161 .num_of_watch_points = 4,
162 .mqd_size_aligned = MQD_SIZE_ALIGNED,
163 .supports_cwsr = false,
164 .needs_iommu_device = false,
165 .needs_pci_atomics = false,
166 .num_sdma_engines = 2,
167 .num_xgmi_sdma_engines = 0,
168 .num_sdma_queues_per_engine = 2,
171 static const struct kfd_device_info tonga_device_info = {
172 .asic_family = CHIP_TONGA,
173 .asic_name = "tonga",
174 .gfx_target_version = 80002,
175 .max_pasid_bits = 16,
178 .ih_ring_entry_size = 4 * sizeof(uint32_t),
179 .event_interrupt_class = &event_interrupt_class_cik,
180 .num_of_watch_points = 4,
181 .mqd_size_aligned = MQD_SIZE_ALIGNED,
182 .supports_cwsr = false,
183 .needs_iommu_device = false,
184 .needs_pci_atomics = true,
185 .num_sdma_engines = 2,
186 .num_xgmi_sdma_engines = 0,
187 .num_sdma_queues_per_engine = 2,
190 static const struct kfd_device_info fiji_device_info = {
191 .asic_family = CHIP_FIJI,
193 .gfx_target_version = 80003,
194 .max_pasid_bits = 16,
197 .ih_ring_entry_size = 4 * sizeof(uint32_t),
198 .event_interrupt_class = &event_interrupt_class_cik,
199 .num_of_watch_points = 4,
200 .mqd_size_aligned = MQD_SIZE_ALIGNED,
201 .supports_cwsr = true,
202 .needs_iommu_device = false,
203 .needs_pci_atomics = true,
204 .num_sdma_engines = 2,
205 .num_xgmi_sdma_engines = 0,
206 .num_sdma_queues_per_engine = 2,
209 static const struct kfd_device_info fiji_vf_device_info = {
210 .asic_family = CHIP_FIJI,
212 .gfx_target_version = 80003,
213 .max_pasid_bits = 16,
216 .ih_ring_entry_size = 4 * sizeof(uint32_t),
217 .event_interrupt_class = &event_interrupt_class_cik,
218 .num_of_watch_points = 4,
219 .mqd_size_aligned = MQD_SIZE_ALIGNED,
220 .supports_cwsr = true,
221 .needs_iommu_device = false,
222 .needs_pci_atomics = false,
223 .num_sdma_engines = 2,
224 .num_xgmi_sdma_engines = 0,
225 .num_sdma_queues_per_engine = 2,
229 static const struct kfd_device_info polaris10_device_info = {
230 .asic_family = CHIP_POLARIS10,
231 .asic_name = "polaris10",
232 .gfx_target_version = 80003,
233 .max_pasid_bits = 16,
236 .ih_ring_entry_size = 4 * sizeof(uint32_t),
237 .event_interrupt_class = &event_interrupt_class_cik,
238 .num_of_watch_points = 4,
239 .mqd_size_aligned = MQD_SIZE_ALIGNED,
240 .supports_cwsr = true,
241 .needs_iommu_device = false,
242 .needs_pci_atomics = true,
243 .num_sdma_engines = 2,
244 .num_xgmi_sdma_engines = 0,
245 .num_sdma_queues_per_engine = 2,
248 static const struct kfd_device_info polaris10_vf_device_info = {
249 .asic_family = CHIP_POLARIS10,
250 .asic_name = "polaris10",
251 .gfx_target_version = 80003,
252 .max_pasid_bits = 16,
255 .ih_ring_entry_size = 4 * sizeof(uint32_t),
256 .event_interrupt_class = &event_interrupt_class_cik,
257 .num_of_watch_points = 4,
258 .mqd_size_aligned = MQD_SIZE_ALIGNED,
259 .supports_cwsr = true,
260 .needs_iommu_device = false,
261 .needs_pci_atomics = false,
262 .num_sdma_engines = 2,
263 .num_xgmi_sdma_engines = 0,
264 .num_sdma_queues_per_engine = 2,
267 static const struct kfd_device_info polaris11_device_info = {
268 .asic_family = CHIP_POLARIS11,
269 .asic_name = "polaris11",
270 .gfx_target_version = 80003,
271 .max_pasid_bits = 16,
274 .ih_ring_entry_size = 4 * sizeof(uint32_t),
275 .event_interrupt_class = &event_interrupt_class_cik,
276 .num_of_watch_points = 4,
277 .mqd_size_aligned = MQD_SIZE_ALIGNED,
278 .supports_cwsr = true,
279 .needs_iommu_device = false,
280 .needs_pci_atomics = true,
281 .num_sdma_engines = 2,
282 .num_xgmi_sdma_engines = 0,
283 .num_sdma_queues_per_engine = 2,
286 static const struct kfd_device_info polaris12_device_info = {
287 .asic_family = CHIP_POLARIS12,
288 .asic_name = "polaris12",
289 .gfx_target_version = 80003,
290 .max_pasid_bits = 16,
293 .ih_ring_entry_size = 4 * sizeof(uint32_t),
294 .event_interrupt_class = &event_interrupt_class_cik,
295 .num_of_watch_points = 4,
296 .mqd_size_aligned = MQD_SIZE_ALIGNED,
297 .supports_cwsr = true,
298 .needs_iommu_device = false,
299 .needs_pci_atomics = true,
300 .num_sdma_engines = 2,
301 .num_xgmi_sdma_engines = 0,
302 .num_sdma_queues_per_engine = 2,
305 static const struct kfd_device_info vegam_device_info = {
306 .asic_family = CHIP_VEGAM,
307 .asic_name = "vegam",
308 .gfx_target_version = 80003,
309 .max_pasid_bits = 16,
312 .ih_ring_entry_size = 4 * sizeof(uint32_t),
313 .event_interrupt_class = &event_interrupt_class_cik,
314 .num_of_watch_points = 4,
315 .mqd_size_aligned = MQD_SIZE_ALIGNED,
316 .supports_cwsr = true,
317 .needs_iommu_device = false,
318 .needs_pci_atomics = true,
319 .num_sdma_engines = 2,
320 .num_xgmi_sdma_engines = 0,
321 .num_sdma_queues_per_engine = 2,
324 static const struct kfd_device_info vega10_device_info = {
325 .asic_family = CHIP_VEGA10,
326 .asic_name = "vega10",
327 .gfx_target_version = 90000,
328 .max_pasid_bits = 16,
331 .ih_ring_entry_size = 8 * sizeof(uint32_t),
332 .event_interrupt_class = &event_interrupt_class_v9,
333 .num_of_watch_points = 4,
334 .mqd_size_aligned = MQD_SIZE_ALIGNED,
335 .supports_cwsr = true,
336 .needs_iommu_device = false,
337 .needs_pci_atomics = false,
338 .num_sdma_engines = 2,
339 .num_xgmi_sdma_engines = 0,
340 .num_sdma_queues_per_engine = 2,
343 static const struct kfd_device_info vega10_vf_device_info = {
344 .asic_family = CHIP_VEGA10,
345 .asic_name = "vega10",
346 .gfx_target_version = 90000,
347 .max_pasid_bits = 16,
350 .ih_ring_entry_size = 8 * sizeof(uint32_t),
351 .event_interrupt_class = &event_interrupt_class_v9,
352 .num_of_watch_points = 4,
353 .mqd_size_aligned = MQD_SIZE_ALIGNED,
354 .supports_cwsr = true,
355 .needs_iommu_device = false,
356 .needs_pci_atomics = false,
357 .num_sdma_engines = 2,
358 .num_xgmi_sdma_engines = 0,
359 .num_sdma_queues_per_engine = 2,
362 static const struct kfd_device_info vega12_device_info = {
363 .asic_family = CHIP_VEGA12,
364 .asic_name = "vega12",
365 .gfx_target_version = 90004,
366 .max_pasid_bits = 16,
369 .ih_ring_entry_size = 8 * sizeof(uint32_t),
370 .event_interrupt_class = &event_interrupt_class_v9,
371 .num_of_watch_points = 4,
372 .mqd_size_aligned = MQD_SIZE_ALIGNED,
373 .supports_cwsr = true,
374 .needs_iommu_device = false,
375 .needs_pci_atomics = false,
376 .num_sdma_engines = 2,
377 .num_xgmi_sdma_engines = 0,
378 .num_sdma_queues_per_engine = 2,
381 static const struct kfd_device_info vega20_device_info = {
382 .asic_family = CHIP_VEGA20,
383 .asic_name = "vega20",
384 .gfx_target_version = 90006,
385 .max_pasid_bits = 16,
388 .ih_ring_entry_size = 8 * sizeof(uint32_t),
389 .event_interrupt_class = &event_interrupt_class_v9,
390 .num_of_watch_points = 4,
391 .mqd_size_aligned = MQD_SIZE_ALIGNED,
392 .supports_cwsr = true,
393 .needs_iommu_device = false,
394 .needs_pci_atomics = false,
395 .num_sdma_engines = 2,
396 .num_xgmi_sdma_engines = 0,
397 .num_sdma_queues_per_engine = 8,
400 static const struct kfd_device_info arcturus_device_info = {
401 .asic_family = CHIP_ARCTURUS,
402 .asic_name = "arcturus",
403 .gfx_target_version = 90008,
404 .max_pasid_bits = 16,
407 .ih_ring_entry_size = 8 * sizeof(uint32_t),
408 .event_interrupt_class = &event_interrupt_class_v9,
409 .num_of_watch_points = 4,
410 .mqd_size_aligned = MQD_SIZE_ALIGNED,
411 .supports_cwsr = true,
412 .needs_iommu_device = false,
413 .needs_pci_atomics = false,
414 .num_sdma_engines = 2,
415 .num_xgmi_sdma_engines = 6,
416 .num_sdma_queues_per_engine = 8,
419 static const struct kfd_device_info aldebaran_device_info = {
420 .asic_family = CHIP_ALDEBARAN,
421 .asic_name = "aldebaran",
422 .gfx_target_version = 90010,
423 .max_pasid_bits = 16,
426 .ih_ring_entry_size = 8 * sizeof(uint32_t),
427 .event_interrupt_class = &event_interrupt_class_v9,
428 .num_of_watch_points = 4,
429 .mqd_size_aligned = MQD_SIZE_ALIGNED,
430 .supports_cwsr = true,
431 .needs_iommu_device = false,
432 .needs_pci_atomics = false,
433 .num_sdma_engines = 2,
434 .num_xgmi_sdma_engines = 3,
435 .num_sdma_queues_per_engine = 8,
438 static const struct kfd_device_info renoir_device_info = {
439 .asic_family = CHIP_RENOIR,
440 .asic_name = "renoir",
441 .gfx_target_version = 90002,
442 .max_pasid_bits = 16,
445 .ih_ring_entry_size = 8 * sizeof(uint32_t),
446 .event_interrupt_class = &event_interrupt_class_v9,
447 .num_of_watch_points = 4,
448 .mqd_size_aligned = MQD_SIZE_ALIGNED,
449 .supports_cwsr = true,
450 .needs_iommu_device = false,
451 .needs_pci_atomics = false,
452 .num_sdma_engines = 1,
453 .num_xgmi_sdma_engines = 0,
454 .num_sdma_queues_per_engine = 2,
457 static const struct kfd_device_info navi10_device_info = {
458 .asic_family = CHIP_NAVI10,
459 .asic_name = "navi10",
460 .gfx_target_version = 100100,
461 .max_pasid_bits = 16,
464 .ih_ring_entry_size = 8 * sizeof(uint32_t),
465 .event_interrupt_class = &event_interrupt_class_v9,
466 .num_of_watch_points = 4,
467 .mqd_size_aligned = MQD_SIZE_ALIGNED,
468 .needs_iommu_device = false,
469 .supports_cwsr = true,
470 .needs_pci_atomics = true,
471 .num_sdma_engines = 2,
472 .num_xgmi_sdma_engines = 0,
473 .num_sdma_queues_per_engine = 8,
476 static const struct kfd_device_info navi12_device_info = {
477 .asic_family = CHIP_NAVI12,
478 .asic_name = "navi12",
479 .gfx_target_version = 100101,
480 .max_pasid_bits = 16,
483 .ih_ring_entry_size = 8 * sizeof(uint32_t),
484 .event_interrupt_class = &event_interrupt_class_v9,
485 .num_of_watch_points = 4,
486 .mqd_size_aligned = MQD_SIZE_ALIGNED,
487 .needs_iommu_device = false,
488 .supports_cwsr = true,
489 .needs_pci_atomics = true,
490 .num_sdma_engines = 2,
491 .num_xgmi_sdma_engines = 0,
492 .num_sdma_queues_per_engine = 8,
495 static const struct kfd_device_info navi14_device_info = {
496 .asic_family = CHIP_NAVI14,
497 .asic_name = "navi14",
498 .gfx_target_version = 100102,
499 .max_pasid_bits = 16,
502 .ih_ring_entry_size = 8 * sizeof(uint32_t),
503 .event_interrupt_class = &event_interrupt_class_v9,
504 .num_of_watch_points = 4,
505 .mqd_size_aligned = MQD_SIZE_ALIGNED,
506 .needs_iommu_device = false,
507 .supports_cwsr = true,
508 .needs_pci_atomics = true,
509 .num_sdma_engines = 2,
510 .num_xgmi_sdma_engines = 0,
511 .num_sdma_queues_per_engine = 8,
514 static const struct kfd_device_info sienna_cichlid_device_info = {
515 .asic_family = CHIP_SIENNA_CICHLID,
516 .asic_name = "sienna_cichlid",
517 .gfx_target_version = 100300,
518 .max_pasid_bits = 16,
521 .ih_ring_entry_size = 8 * sizeof(uint32_t),
522 .event_interrupt_class = &event_interrupt_class_v9,
523 .num_of_watch_points = 4,
524 .mqd_size_aligned = MQD_SIZE_ALIGNED,
525 .needs_iommu_device = false,
526 .supports_cwsr = true,
527 .needs_pci_atomics = true,
528 .num_sdma_engines = 4,
529 .num_xgmi_sdma_engines = 0,
530 .num_sdma_queues_per_engine = 8,
533 static const struct kfd_device_info navy_flounder_device_info = {
534 .asic_family = CHIP_NAVY_FLOUNDER,
535 .asic_name = "navy_flounder",
536 .gfx_target_version = 100301,
537 .max_pasid_bits = 16,
540 .ih_ring_entry_size = 8 * sizeof(uint32_t),
541 .event_interrupt_class = &event_interrupt_class_v9,
542 .num_of_watch_points = 4,
543 .mqd_size_aligned = MQD_SIZE_ALIGNED,
544 .needs_iommu_device = false,
545 .supports_cwsr = true,
546 .needs_pci_atomics = true,
547 .num_sdma_engines = 2,
548 .num_xgmi_sdma_engines = 0,
549 .num_sdma_queues_per_engine = 8,
552 static const struct kfd_device_info vangogh_device_info = {
553 .asic_family = CHIP_VANGOGH,
554 .asic_name = "vangogh",
555 .gfx_target_version = 100303,
556 .max_pasid_bits = 16,
559 .ih_ring_entry_size = 8 * sizeof(uint32_t),
560 .event_interrupt_class = &event_interrupt_class_v9,
561 .num_of_watch_points = 4,
562 .mqd_size_aligned = MQD_SIZE_ALIGNED,
563 .needs_iommu_device = false,
564 .supports_cwsr = true,
565 .needs_pci_atomics = false,
566 .num_sdma_engines = 1,
567 .num_xgmi_sdma_engines = 0,
568 .num_sdma_queues_per_engine = 2,
571 static const struct kfd_device_info dimgrey_cavefish_device_info = {
572 .asic_family = CHIP_DIMGREY_CAVEFISH,
573 .asic_name = "dimgrey_cavefish",
574 .gfx_target_version = 100302,
575 .max_pasid_bits = 16,
578 .ih_ring_entry_size = 8 * sizeof(uint32_t),
579 .event_interrupt_class = &event_interrupt_class_v9,
580 .num_of_watch_points = 4,
581 .mqd_size_aligned = MQD_SIZE_ALIGNED,
582 .needs_iommu_device = false,
583 .supports_cwsr = true,
584 .needs_pci_atomics = true,
585 .num_sdma_engines = 2,
586 .num_xgmi_sdma_engines = 0,
587 .num_sdma_queues_per_engine = 8,
590 static const struct kfd_device_info beige_goby_device_info = {
591 .asic_family = CHIP_BEIGE_GOBY,
592 .asic_name = "beige_goby",
593 .gfx_target_version = 100304,
594 .max_pasid_bits = 16,
597 .ih_ring_entry_size = 8 * sizeof(uint32_t),
598 .event_interrupt_class = &event_interrupt_class_v9,
599 .num_of_watch_points = 4,
600 .mqd_size_aligned = MQD_SIZE_ALIGNED,
601 .needs_iommu_device = false,
602 .supports_cwsr = true,
603 .needs_pci_atomics = true,
604 .num_sdma_engines = 1,
605 .num_xgmi_sdma_engines = 0,
606 .num_sdma_queues_per_engine = 8,
609 static const struct kfd_device_info yellow_carp_device_info = {
610 .asic_family = CHIP_YELLOW_CARP,
611 .asic_name = "yellow_carp",
612 .gfx_target_version = 100305,
613 .max_pasid_bits = 16,
616 .ih_ring_entry_size = 8 * sizeof(uint32_t),
617 .event_interrupt_class = &event_interrupt_class_v9,
618 .num_of_watch_points = 4,
619 .mqd_size_aligned = MQD_SIZE_ALIGNED,
620 .needs_iommu_device = false,
621 .supports_cwsr = true,
622 .needs_pci_atomics = false,
623 .num_sdma_engines = 1,
624 .num_xgmi_sdma_engines = 0,
625 .num_sdma_queues_per_engine = 2,
628 static const struct kfd_device_info cyan_skillfish_device_info = {
629 .asic_family = CHIP_CYAN_SKILLFISH,
630 .asic_name = "cyan_skillfish",
631 .gfx_target_version = 100103,
632 .max_pasid_bits = 16,
635 .ih_ring_entry_size = 8 * sizeof(uint32_t),
636 .event_interrupt_class = &event_interrupt_class_v9,
637 .num_of_watch_points = 4,
638 .mqd_size_aligned = MQD_SIZE_ALIGNED,
639 .needs_iommu_device = false,
640 .supports_cwsr = true,
641 .needs_pci_atomics = true,
642 .num_sdma_engines = 2,
643 .num_xgmi_sdma_engines = 0,
644 .num_sdma_queues_per_engine = 8,
647 /* For each entry, [0] is regular and [1] is virtualisation device. */
648 static const struct kfd_device_info *kfd_supported_devices[][2] = {
649 #ifdef KFD_SUPPORT_IOMMU_V2
650 [CHIP_KAVERI] = {&kaveri_device_info, NULL},
651 [CHIP_CARRIZO] = {&carrizo_device_info, NULL},
653 [CHIP_RAVEN] = {&raven_device_info, NULL},
654 [CHIP_HAWAII] = {&hawaii_device_info, NULL},
655 [CHIP_TONGA] = {&tonga_device_info, NULL},
656 [CHIP_FIJI] = {&fiji_device_info, &fiji_vf_device_info},
657 [CHIP_POLARIS10] = {&polaris10_device_info, &polaris10_vf_device_info},
658 [CHIP_POLARIS11] = {&polaris11_device_info, NULL},
659 [CHIP_POLARIS12] = {&polaris12_device_info, NULL},
660 [CHIP_VEGAM] = {&vegam_device_info, NULL},
661 [CHIP_VEGA10] = {&vega10_device_info, &vega10_vf_device_info},
662 [CHIP_VEGA12] = {&vega12_device_info, NULL},
663 [CHIP_VEGA20] = {&vega20_device_info, NULL},
664 [CHIP_RENOIR] = {&renoir_device_info, NULL},
665 [CHIP_ARCTURUS] = {&arcturus_device_info, &arcturus_device_info},
666 [CHIP_ALDEBARAN] = {&aldebaran_device_info, &aldebaran_device_info},
667 [CHIP_NAVI10] = {&navi10_device_info, NULL},
668 [CHIP_NAVI12] = {&navi12_device_info, &navi12_device_info},
669 [CHIP_NAVI14] = {&navi14_device_info, NULL},
670 [CHIP_SIENNA_CICHLID] = {&sienna_cichlid_device_info, &sienna_cichlid_device_info},
671 [CHIP_NAVY_FLOUNDER] = {&navy_flounder_device_info, &navy_flounder_device_info},
672 [CHIP_VANGOGH] = {&vangogh_device_info, NULL},
673 [CHIP_DIMGREY_CAVEFISH] = {&dimgrey_cavefish_device_info, &dimgrey_cavefish_device_info},
674 [CHIP_BEIGE_GOBY] = {&beige_goby_device_info, &beige_goby_device_info},
675 [CHIP_YELLOW_CARP] = {&yellow_carp_device_info, NULL},
676 [CHIP_CYAN_SKILLFISH] = {&cyan_skillfish_device_info, NULL},
679 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
680 unsigned int chunk_size);
681 static void kfd_gtt_sa_fini(struct kfd_dev *kfd);
683 static int kfd_resume(struct kfd_dev *kfd);
685 struct kfd_dev *kgd2kfd_probe(struct kgd_dev *kgd,
686 struct pci_dev *pdev, unsigned int asic_type, bool vf)
689 const struct kfd_device_info *device_info;
690 const struct kfd2kgd_calls *f2g;
692 if (asic_type >= sizeof(kfd_supported_devices) / (sizeof(void *) * 2)
693 || asic_type >= sizeof(kfd2kgd_funcs) / sizeof(void *)) {
694 dev_err(kfd_device, "asic_type %d out of range\n", asic_type);
695 return NULL; /* asic_type out of range */
698 device_info = kfd_supported_devices[asic_type][vf];
699 f2g = kfd2kgd_funcs[asic_type];
701 if (!device_info || !f2g) {
702 dev_err(kfd_device, "%s %s not supported in kfd\n",
703 amdgpu_asic_name[asic_type], vf ? "VF" : "");
707 kfd = kzalloc(sizeof(*kfd), GFP_KERNEL);
711 /* Allow BIF to recode atomics to PCIe 3.0 AtomicOps.
712 * 32 and 64-bit requests are possible and must be
715 kfd->pci_atomic_requested = amdgpu_amdkfd_have_atomics_support(kgd);
716 if (device_info->needs_pci_atomics &&
717 !kfd->pci_atomic_requested) {
719 "skipped device %x:%x, PCI rejects atomics\n",
720 pdev->vendor, pdev->device);
726 kfd->device_info = device_info;
728 kfd->init_complete = false;
730 atomic_set(&kfd->compute_profile, 0);
732 mutex_init(&kfd->doorbell_mutex);
733 memset(&kfd->doorbell_available_index, 0,
734 sizeof(kfd->doorbell_available_index));
736 atomic_set(&kfd->sram_ecc_flag, 0);
738 ida_init(&kfd->doorbell_ida);
743 static void kfd_cwsr_init(struct kfd_dev *kfd)
745 if (cwsr_enable && kfd->device_info->supports_cwsr) {
746 if (kfd->device_info->asic_family < CHIP_VEGA10) {
747 BUILD_BUG_ON(sizeof(cwsr_trap_gfx8_hex) > PAGE_SIZE);
748 kfd->cwsr_isa = cwsr_trap_gfx8_hex;
749 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx8_hex);
750 } else if (kfd->device_info->asic_family == CHIP_ARCTURUS) {
751 BUILD_BUG_ON(sizeof(cwsr_trap_arcturus_hex) > PAGE_SIZE);
752 kfd->cwsr_isa = cwsr_trap_arcturus_hex;
753 kfd->cwsr_isa_size = sizeof(cwsr_trap_arcturus_hex);
754 } else if (kfd->device_info->asic_family == CHIP_ALDEBARAN) {
755 BUILD_BUG_ON(sizeof(cwsr_trap_aldebaran_hex) > PAGE_SIZE);
756 kfd->cwsr_isa = cwsr_trap_aldebaran_hex;
757 kfd->cwsr_isa_size = sizeof(cwsr_trap_aldebaran_hex);
758 } else if (kfd->device_info->asic_family < CHIP_NAVI10) {
759 BUILD_BUG_ON(sizeof(cwsr_trap_gfx9_hex) > PAGE_SIZE);
760 kfd->cwsr_isa = cwsr_trap_gfx9_hex;
761 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx9_hex);
762 } else if (kfd->device_info->asic_family < CHIP_SIENNA_CICHLID) {
763 BUILD_BUG_ON(sizeof(cwsr_trap_nv1x_hex) > PAGE_SIZE);
764 kfd->cwsr_isa = cwsr_trap_nv1x_hex;
765 kfd->cwsr_isa_size = sizeof(cwsr_trap_nv1x_hex);
767 BUILD_BUG_ON(sizeof(cwsr_trap_gfx10_hex) > PAGE_SIZE);
768 kfd->cwsr_isa = cwsr_trap_gfx10_hex;
769 kfd->cwsr_isa_size = sizeof(cwsr_trap_gfx10_hex);
772 kfd->cwsr_enabled = true;
776 static int kfd_gws_init(struct kfd_dev *kfd)
780 if (kfd->dqm->sched_policy == KFD_SCHED_POLICY_NO_HWS)
784 || (kfd->device_info->asic_family == CHIP_VEGA10
785 && kfd->mec2_fw_version >= 0x81b3)
786 || (kfd->device_info->asic_family >= CHIP_VEGA12
787 && kfd->device_info->asic_family <= CHIP_RAVEN
788 && kfd->mec2_fw_version >= 0x1b3)
789 || (kfd->device_info->asic_family == CHIP_ARCTURUS
790 && kfd->mec2_fw_version >= 0x30)
791 || (kfd->device_info->asic_family == CHIP_ALDEBARAN
792 && kfd->mec2_fw_version >= 0x28))
793 ret = amdgpu_amdkfd_alloc_gws(kfd->kgd,
794 amdgpu_amdkfd_get_num_gws(kfd->kgd), &kfd->gws);
799 static void kfd_smi_init(struct kfd_dev *dev) {
800 INIT_LIST_HEAD(&dev->smi_clients);
801 spin_lock_init(&dev->smi_lock);
804 bool kgd2kfd_device_init(struct kfd_dev *kfd,
805 struct drm_device *ddev,
806 const struct kgd2kfd_shared_resources *gpu_resources)
808 unsigned int size, map_process_packet_size;
811 kfd->mec_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
813 kfd->mec2_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
815 kfd->sdma_fw_version = amdgpu_amdkfd_get_fw_version(kfd->kgd,
817 kfd->shared_resources = *gpu_resources;
819 kfd->vm_info.first_vmid_kfd = ffs(gpu_resources->compute_vmid_bitmap)-1;
820 kfd->vm_info.last_vmid_kfd = fls(gpu_resources->compute_vmid_bitmap)-1;
821 kfd->vm_info.vmid_num_kfd = kfd->vm_info.last_vmid_kfd
822 - kfd->vm_info.first_vmid_kfd + 1;
824 /* Verify module parameters regarding mapped process number*/
825 if ((hws_max_conc_proc < 0)
826 || (hws_max_conc_proc > kfd->vm_info.vmid_num_kfd)) {
828 "hws_max_conc_proc %d must be between 0 and %d, use %d instead\n",
829 hws_max_conc_proc, kfd->vm_info.vmid_num_kfd,
830 kfd->vm_info.vmid_num_kfd);
831 kfd->max_proc_per_quantum = kfd->vm_info.vmid_num_kfd;
833 kfd->max_proc_per_quantum = hws_max_conc_proc;
835 /* calculate max size of mqds needed for queues */
836 size = max_num_of_queues_per_device *
837 kfd->device_info->mqd_size_aligned;
840 * calculate max size of runlist packet.
841 * There can be only 2 packets at once
843 map_process_packet_size =
844 kfd->device_info->asic_family == CHIP_ALDEBARAN ?
845 sizeof(struct pm4_mes_map_process_aldebaran) :
846 sizeof(struct pm4_mes_map_process);
847 size += (KFD_MAX_NUM_OF_PROCESSES * map_process_packet_size +
848 max_num_of_queues_per_device * sizeof(struct pm4_mes_map_queues)
849 + sizeof(struct pm4_mes_runlist)) * 2;
851 /* Add size of HIQ & DIQ */
852 size += KFD_KERNEL_QUEUE_SIZE * 2;
854 /* add another 512KB for all other allocations on gart (HPD, fences) */
857 if (amdgpu_amdkfd_alloc_gtt_mem(
858 kfd->kgd, size, &kfd->gtt_mem,
859 &kfd->gtt_start_gpu_addr, &kfd->gtt_start_cpu_ptr,
861 dev_err(kfd_device, "Could not allocate %d bytes\n", size);
862 goto alloc_gtt_mem_failure;
865 dev_info(kfd_device, "Allocated %d bytes on gart\n", size);
867 /* Initialize GTT sa with 512 byte chunk size */
868 if (kfd_gtt_sa_init(kfd, size, 512) != 0) {
869 dev_err(kfd_device, "Error initializing gtt sub-allocator\n");
870 goto kfd_gtt_sa_init_error;
873 if (kfd_doorbell_init(kfd)) {
875 "Error initializing doorbell aperture\n");
876 goto kfd_doorbell_error;
879 kfd->hive_id = amdgpu_amdkfd_get_hive_id(kfd->kgd);
881 kfd->noretry = amdgpu_amdkfd_get_noretry(kfd->kgd);
883 if (kfd_interrupt_init(kfd)) {
884 dev_err(kfd_device, "Error initializing interrupts\n");
885 goto kfd_interrupt_error;
888 kfd->dqm = device_queue_manager_init(kfd);
890 dev_err(kfd_device, "Error initializing queue manager\n");
891 goto device_queue_manager_error;
894 /* If supported on this device, allocate global GWS that is shared
895 * by all KFD processes
897 if (kfd_gws_init(kfd)) {
898 dev_err(kfd_device, "Could not allocate %d gws\n",
899 amdgpu_amdkfd_get_num_gws(kfd->kgd));
903 /* If CRAT is broken, won't set iommu enabled */
904 kfd_double_confirm_iommu_support(kfd);
906 if (kfd_iommu_device_init(kfd)) {
907 dev_err(kfd_device, "Error initializing iommuv2\n");
908 goto device_iommu_error;
913 svm_migrate_init((struct amdgpu_device *)kfd->kgd);
916 goto kfd_resume_error;
920 if (kfd_topology_add_device(kfd)) {
921 dev_err(kfd_device, "Error adding device to topology\n");
922 goto kfd_topology_add_device_error;
927 kfd->init_complete = true;
928 dev_info(kfd_device, "added device %x:%x\n", kfd->pdev->vendor,
931 pr_debug("Starting kfd with the following scheduling policy %d\n",
932 kfd->dqm->sched_policy);
936 kfd_topology_add_device_error:
940 device_queue_manager_uninit(kfd->dqm);
941 device_queue_manager_error:
942 kfd_interrupt_exit(kfd);
944 kfd_doorbell_fini(kfd);
946 kfd_gtt_sa_fini(kfd);
947 kfd_gtt_sa_init_error:
948 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
949 alloc_gtt_mem_failure:
951 amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
953 "device %x:%x NOT added due to errors\n",
954 kfd->pdev->vendor, kfd->pdev->device);
956 return kfd->init_complete;
959 void kgd2kfd_device_exit(struct kfd_dev *kfd)
961 if (kfd->init_complete) {
962 svm_migrate_fini((struct amdgpu_device *)kfd->kgd);
963 device_queue_manager_uninit(kfd->dqm);
964 kfd_interrupt_exit(kfd);
965 kfd_topology_remove_device(kfd);
966 kfd_doorbell_fini(kfd);
967 ida_destroy(&kfd->doorbell_ida);
968 kfd_gtt_sa_fini(kfd);
969 amdgpu_amdkfd_free_gtt_mem(kfd->kgd, kfd->gtt_mem);
971 amdgpu_amdkfd_free_gws(kfd->kgd, kfd->gws);
977 int kgd2kfd_pre_reset(struct kfd_dev *kfd)
979 if (!kfd->init_complete)
982 kfd_smi_event_update_gpu_reset(kfd, false);
984 kfd->dqm->ops.pre_reset(kfd->dqm);
986 kgd2kfd_suspend(kfd, false);
988 kfd_signal_reset_event(kfd);
993 * Fix me. KFD won't be able to resume existing process for now.
994 * We will keep all existing process in a evicted state and
995 * wait the process to be terminated.
998 int kgd2kfd_post_reset(struct kfd_dev *kfd)
1002 if (!kfd->init_complete)
1005 ret = kfd_resume(kfd);
1008 atomic_dec(&kfd_locked);
1010 atomic_set(&kfd->sram_ecc_flag, 0);
1012 kfd_smi_event_update_gpu_reset(kfd, true);
1017 bool kfd_is_locked(void)
1019 return (atomic_read(&kfd_locked) > 0);
1022 void kgd2kfd_suspend(struct kfd_dev *kfd, bool run_pm)
1024 if (!kfd->init_complete)
1027 /* for runtime suspend, skip locking kfd */
1029 /* For first KFD device suspend all the KFD processes */
1030 if (atomic_inc_return(&kfd_locked) == 1)
1031 kfd_suspend_all_processes();
1034 kfd->dqm->ops.stop(kfd->dqm);
1035 kfd_iommu_suspend(kfd);
1038 int kgd2kfd_resume(struct kfd_dev *kfd, bool run_pm)
1042 if (!kfd->init_complete)
1045 ret = kfd_resume(kfd);
1049 /* for runtime resume, skip unlocking kfd */
1051 count = atomic_dec_return(&kfd_locked);
1052 WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
1054 ret = kfd_resume_all_processes();
1060 static int kfd_resume(struct kfd_dev *kfd)
1064 err = kfd_iommu_resume(kfd);
1067 "Failed to resume IOMMU for device %x:%x\n",
1068 kfd->pdev->vendor, kfd->pdev->device);
1072 err = kfd->dqm->ops.start(kfd->dqm);
1075 "Error starting queue manager for device %x:%x\n",
1076 kfd->pdev->vendor, kfd->pdev->device);
1077 goto dqm_start_error;
1083 kfd_iommu_suspend(kfd);
1087 static inline void kfd_queue_work(struct workqueue_struct *wq,
1088 struct work_struct *work)
1092 cpu = new_cpu = smp_processor_id();
1094 new_cpu = cpumask_next(new_cpu, cpu_online_mask) % nr_cpu_ids;
1095 if (cpu_to_node(new_cpu) == numa_node_id())
1097 } while (cpu != new_cpu);
1099 queue_work_on(new_cpu, wq, work);
1102 /* This is called directly from KGD at ISR. */
1103 void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
1105 uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
1106 bool is_patched = false;
1107 unsigned long flags;
1109 if (!kfd->init_complete)
1112 if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
1113 dev_err_once(kfd_device, "Ring entry too small\n");
1117 spin_lock_irqsave(&kfd->interrupt_lock, flags);
1119 if (kfd->interrupts_active
1120 && interrupt_is_wanted(kfd, ih_ring_entry,
1121 patched_ihre, &is_patched)
1122 && enqueue_ih_ring_entry(kfd,
1123 is_patched ? patched_ihre : ih_ring_entry))
1124 kfd_queue_work(kfd->ih_wq, &kfd->interrupt_work);
1126 spin_unlock_irqrestore(&kfd->interrupt_lock, flags);
1129 int kgd2kfd_quiesce_mm(struct mm_struct *mm)
1131 struct kfd_process *p;
1134 /* Because we are called from arbitrary context (workqueue) as opposed
1135 * to process context, kfd_process could attempt to exit while we are
1136 * running so the lookup function increments the process ref count.
1138 p = kfd_lookup_process_by_mm(mm);
1142 WARN(debug_evictions, "Evicting pid %d", p->lead_thread->pid);
1143 r = kfd_process_evict_queues(p);
1145 kfd_unref_process(p);
1149 int kgd2kfd_resume_mm(struct mm_struct *mm)
1151 struct kfd_process *p;
1154 /* Because we are called from arbitrary context (workqueue) as opposed
1155 * to process context, kfd_process could attempt to exit while we are
1156 * running so the lookup function increments the process ref count.
1158 p = kfd_lookup_process_by_mm(mm);
1162 r = kfd_process_restore_queues(p);
1164 kfd_unref_process(p);
1168 /** kgd2kfd_schedule_evict_and_restore_process - Schedules work queue that will
1169 * prepare for safe eviction of KFD BOs that belong to the specified
1172 * @mm: mm_struct that identifies the specified KFD process
1173 * @fence: eviction fence attached to KFD process BOs
1176 int kgd2kfd_schedule_evict_and_restore_process(struct mm_struct *mm,
1177 struct dma_fence *fence)
1179 struct kfd_process *p;
1180 unsigned long active_time;
1181 unsigned long delay_jiffies = msecs_to_jiffies(PROCESS_ACTIVE_TIME_MS);
1186 if (dma_fence_is_signaled(fence))
1189 p = kfd_lookup_process_by_mm(mm);
1193 if (fence->seqno == p->last_eviction_seqno)
1196 p->last_eviction_seqno = fence->seqno;
1198 /* Avoid KFD process starvation. Wait for at least
1199 * PROCESS_ACTIVE_TIME_MS before evicting the process again
1201 active_time = get_jiffies_64() - p->last_restore_timestamp;
1202 if (delay_jiffies > active_time)
1203 delay_jiffies -= active_time;
1207 /* During process initialization eviction_work.dwork is initialized
1208 * to kfd_evict_bo_worker
1210 WARN(debug_evictions, "Scheduling eviction of pid %d in %ld jiffies",
1211 p->lead_thread->pid, delay_jiffies);
1212 schedule_delayed_work(&p->eviction_work, delay_jiffies);
1214 kfd_unref_process(p);
1218 static int kfd_gtt_sa_init(struct kfd_dev *kfd, unsigned int buf_size,
1219 unsigned int chunk_size)
1221 unsigned int num_of_longs;
1223 if (WARN_ON(buf_size < chunk_size))
1225 if (WARN_ON(buf_size == 0))
1227 if (WARN_ON(chunk_size == 0))
1230 kfd->gtt_sa_chunk_size = chunk_size;
1231 kfd->gtt_sa_num_of_chunks = buf_size / chunk_size;
1233 num_of_longs = (kfd->gtt_sa_num_of_chunks + BITS_PER_LONG - 1) /
1236 kfd->gtt_sa_bitmap = kcalloc(num_of_longs, sizeof(long), GFP_KERNEL);
1238 if (!kfd->gtt_sa_bitmap)
1241 pr_debug("gtt_sa_num_of_chunks = %d, gtt_sa_bitmap = %p\n",
1242 kfd->gtt_sa_num_of_chunks, kfd->gtt_sa_bitmap);
1244 mutex_init(&kfd->gtt_sa_lock);
1250 static void kfd_gtt_sa_fini(struct kfd_dev *kfd)
1252 mutex_destroy(&kfd->gtt_sa_lock);
1253 kfree(kfd->gtt_sa_bitmap);
1256 static inline uint64_t kfd_gtt_sa_calc_gpu_addr(uint64_t start_addr,
1257 unsigned int bit_num,
1258 unsigned int chunk_size)
1260 return start_addr + bit_num * chunk_size;
1263 static inline uint32_t *kfd_gtt_sa_calc_cpu_addr(void *start_addr,
1264 unsigned int bit_num,
1265 unsigned int chunk_size)
1267 return (uint32_t *) ((uint64_t) start_addr + bit_num * chunk_size);
1270 int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
1271 struct kfd_mem_obj **mem_obj)
1273 unsigned int found, start_search, cur_size;
1278 if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
1281 *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
1285 pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
1289 mutex_lock(&kfd->gtt_sa_lock);
1291 kfd_gtt_restart_search:
1292 /* Find the first chunk that is free */
1293 found = find_next_zero_bit(kfd->gtt_sa_bitmap,
1294 kfd->gtt_sa_num_of_chunks,
1297 pr_debug("Found = %d\n", found);
1299 /* If there wasn't any free chunk, bail out */
1300 if (found == kfd->gtt_sa_num_of_chunks)
1301 goto kfd_gtt_no_free_chunk;
1303 /* Update fields of mem_obj */
1304 (*mem_obj)->range_start = found;
1305 (*mem_obj)->range_end = found;
1306 (*mem_obj)->gpu_addr = kfd_gtt_sa_calc_gpu_addr(
1307 kfd->gtt_start_gpu_addr,
1309 kfd->gtt_sa_chunk_size);
1310 (*mem_obj)->cpu_ptr = kfd_gtt_sa_calc_cpu_addr(
1311 kfd->gtt_start_cpu_ptr,
1313 kfd->gtt_sa_chunk_size);
1315 pr_debug("gpu_addr = %p, cpu_addr = %p\n",
1316 (uint64_t *) (*mem_obj)->gpu_addr, (*mem_obj)->cpu_ptr);
1318 /* If we need only one chunk, mark it as allocated and get out */
1319 if (size <= kfd->gtt_sa_chunk_size) {
1320 pr_debug("Single bit\n");
1321 set_bit(found, kfd->gtt_sa_bitmap);
1325 /* Otherwise, try to see if we have enough contiguous chunks */
1326 cur_size = size - kfd->gtt_sa_chunk_size;
1328 (*mem_obj)->range_end =
1329 find_next_zero_bit(kfd->gtt_sa_bitmap,
1330 kfd->gtt_sa_num_of_chunks, ++found);
1332 * If next free chunk is not contiguous than we need to
1333 * restart our search from the last free chunk we found (which
1334 * wasn't contiguous to the previous ones
1336 if ((*mem_obj)->range_end != found) {
1337 start_search = found;
1338 goto kfd_gtt_restart_search;
1342 * If we reached end of buffer, bail out with error
1344 if (found == kfd->gtt_sa_num_of_chunks)
1345 goto kfd_gtt_no_free_chunk;
1347 /* Check if we don't need another chunk */
1348 if (cur_size <= kfd->gtt_sa_chunk_size)
1351 cur_size -= kfd->gtt_sa_chunk_size;
1353 } while (cur_size > 0);
1355 pr_debug("range_start = %d, range_end = %d\n",
1356 (*mem_obj)->range_start, (*mem_obj)->range_end);
1358 /* Mark the chunks as allocated */
1359 for (found = (*mem_obj)->range_start;
1360 found <= (*mem_obj)->range_end;
1362 set_bit(found, kfd->gtt_sa_bitmap);
1365 mutex_unlock(&kfd->gtt_sa_lock);
1368 kfd_gtt_no_free_chunk:
1369 pr_debug("Allocation failed with mem_obj = %p\n", *mem_obj);
1370 mutex_unlock(&kfd->gtt_sa_lock);
1375 int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
1379 /* Act like kfree when trying to free a NULL object */
1383 pr_debug("Free mem_obj = %p, range_start = %d, range_end = %d\n",
1384 mem_obj, mem_obj->range_start, mem_obj->range_end);
1386 mutex_lock(&kfd->gtt_sa_lock);
1388 /* Mark the chunks as free */
1389 for (bit = mem_obj->range_start;
1390 bit <= mem_obj->range_end;
1392 clear_bit(bit, kfd->gtt_sa_bitmap);
1394 mutex_unlock(&kfd->gtt_sa_lock);
1400 void kgd2kfd_set_sram_ecc_flag(struct kfd_dev *kfd)
1403 atomic_inc(&kfd->sram_ecc_flag);
1406 void kfd_inc_compute_active(struct kfd_dev *kfd)
1408 if (atomic_inc_return(&kfd->compute_profile) == 1)
1409 amdgpu_amdkfd_set_compute_idle(kfd->kgd, false);
1412 void kfd_dec_compute_active(struct kfd_dev *kfd)
1414 int count = atomic_dec_return(&kfd->compute_profile);
1417 amdgpu_amdkfd_set_compute_idle(kfd->kgd, true);
1418 WARN_ONCE(count < 0, "Compute profile ref. count error");
1421 void kgd2kfd_smi_event_throttle(struct kfd_dev *kfd, uint64_t throttle_bitmask)
1423 if (kfd && kfd->init_complete)
1424 kfd_smi_event_update_thermal_throttling(kfd, throttle_bitmask);
1427 #if defined(CONFIG_DEBUG_FS)
1429 /* This function will send a package to HIQ to hang the HWS
1430 * which will trigger a GPU reset and bring the HWS back to normal state
1432 int kfd_debugfs_hang_hws(struct kfd_dev *dev)
1434 if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
1435 pr_err("HWS is not enabled");
1439 return dqm_debugfs_hang_hws(dev->dqm);