2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
30 #include "amdgpu_amdkfd.h"
32 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
33 * GPU processor ID are expressed with Bit[31]=1.
34 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
37 static uint32_t gpu_processor_id_low = 0x80001000;
39 /* Return the next available gpu_processor_id and increment it for next GPU
40 * @total_cu_count - Total CUs present in the GPU including ones
43 static inline unsigned int get_and_inc_gpu_processor_id(
44 unsigned int total_cu_count)
46 int current_id = gpu_processor_id_low;
48 gpu_processor_id_low += total_cu_count;
52 /* Static table to describe GPU Cache information */
53 struct kfd_gpu_cache_info {
57 /* Indicates how many Compute Units share this cache
58 * within a SA. Value = 1 indicates the cache is not shared
60 uint32_t num_cu_shared;
63 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
65 /* TCP L1 Cache per CU */
68 .flags = (CRAT_CACHE_FLAGS_ENABLED |
69 CRAT_CACHE_FLAGS_DATA_CACHE |
70 CRAT_CACHE_FLAGS_SIMD_CACHE),
74 /* Scalar L1 Instruction Cache (in SQC module) per bank */
77 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78 CRAT_CACHE_FLAGS_INST_CACHE |
79 CRAT_CACHE_FLAGS_SIMD_CACHE),
83 /* Scalar L1 Data Cache (in SQC module) per bank */
86 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87 CRAT_CACHE_FLAGS_DATA_CACHE |
88 CRAT_CACHE_FLAGS_SIMD_CACHE),
92 /* TODO: Add L2 Cache information */
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
98 /* TCP L1 Cache per CU */
101 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102 CRAT_CACHE_FLAGS_DATA_CACHE |
103 CRAT_CACHE_FLAGS_SIMD_CACHE),
107 /* Scalar L1 Instruction Cache (in SQC module) per bank */
110 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111 CRAT_CACHE_FLAGS_INST_CACHE |
112 CRAT_CACHE_FLAGS_SIMD_CACHE),
116 /* Scalar L1 Data Cache (in SQC module) per bank. */
119 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120 CRAT_CACHE_FLAGS_DATA_CACHE |
121 CRAT_CACHE_FLAGS_SIMD_CACHE),
125 /* TODO: Add L2 Cache information */
128 #define hawaii_cache_info kaveri_cache_info
129 #define tonga_cache_info carrizo_cache_info
130 #define fiji_cache_info carrizo_cache_info
131 #define polaris10_cache_info carrizo_cache_info
132 #define polaris11_cache_info carrizo_cache_info
133 #define polaris12_cache_info carrizo_cache_info
134 #define vegam_cache_info carrizo_cache_info
136 /* NOTE: L1 cache information has been updated and L2/L3
137 * cache information has been added for Vega10 and
138 * newer ASICs. The unit for cache_size is KiB.
139 * In future, check & update cache details
140 * for every new ASIC is required.
143 static struct kfd_gpu_cache_info vega10_cache_info[] = {
145 /* TCP L1 Cache per CU */
148 .flags = (CRAT_CACHE_FLAGS_ENABLED |
149 CRAT_CACHE_FLAGS_DATA_CACHE |
150 CRAT_CACHE_FLAGS_SIMD_CACHE),
154 /* Scalar L1 Instruction Cache per SQC */
157 .flags = (CRAT_CACHE_FLAGS_ENABLED |
158 CRAT_CACHE_FLAGS_INST_CACHE |
159 CRAT_CACHE_FLAGS_SIMD_CACHE),
163 /* Scalar L1 Data Cache per SQC */
166 .flags = (CRAT_CACHE_FLAGS_ENABLED |
167 CRAT_CACHE_FLAGS_DATA_CACHE |
168 CRAT_CACHE_FLAGS_SIMD_CACHE),
172 /* L2 Data Cache per GPU (Total Tex Cache) */
175 .flags = (CRAT_CACHE_FLAGS_ENABLED |
176 CRAT_CACHE_FLAGS_DATA_CACHE |
177 CRAT_CACHE_FLAGS_SIMD_CACHE),
182 static struct kfd_gpu_cache_info raven_cache_info[] = {
184 /* TCP L1 Cache per CU */
187 .flags = (CRAT_CACHE_FLAGS_ENABLED |
188 CRAT_CACHE_FLAGS_DATA_CACHE |
189 CRAT_CACHE_FLAGS_SIMD_CACHE),
193 /* Scalar L1 Instruction Cache per SQC */
196 .flags = (CRAT_CACHE_FLAGS_ENABLED |
197 CRAT_CACHE_FLAGS_INST_CACHE |
198 CRAT_CACHE_FLAGS_SIMD_CACHE),
202 /* Scalar L1 Data Cache per SQC */
205 .flags = (CRAT_CACHE_FLAGS_ENABLED |
206 CRAT_CACHE_FLAGS_DATA_CACHE |
207 CRAT_CACHE_FLAGS_SIMD_CACHE),
211 /* L2 Data Cache per GPU (Total Tex Cache) */
214 .flags = (CRAT_CACHE_FLAGS_ENABLED |
215 CRAT_CACHE_FLAGS_DATA_CACHE |
216 CRAT_CACHE_FLAGS_SIMD_CACHE),
221 static struct kfd_gpu_cache_info renoir_cache_info[] = {
223 /* TCP L1 Cache per CU */
226 .flags = (CRAT_CACHE_FLAGS_ENABLED |
227 CRAT_CACHE_FLAGS_DATA_CACHE |
228 CRAT_CACHE_FLAGS_SIMD_CACHE),
232 /* Scalar L1 Instruction Cache per SQC */
235 .flags = (CRAT_CACHE_FLAGS_ENABLED |
236 CRAT_CACHE_FLAGS_INST_CACHE |
237 CRAT_CACHE_FLAGS_SIMD_CACHE),
241 /* Scalar L1 Data Cache per SQC */
244 .flags = (CRAT_CACHE_FLAGS_ENABLED |
245 CRAT_CACHE_FLAGS_DATA_CACHE |
246 CRAT_CACHE_FLAGS_SIMD_CACHE),
250 /* L2 Data Cache per GPU (Total Tex Cache) */
253 .flags = (CRAT_CACHE_FLAGS_ENABLED |
254 CRAT_CACHE_FLAGS_DATA_CACHE |
255 CRAT_CACHE_FLAGS_SIMD_CACHE),
260 static struct kfd_gpu_cache_info vega12_cache_info[] = {
262 /* TCP L1 Cache per CU */
265 .flags = (CRAT_CACHE_FLAGS_ENABLED |
266 CRAT_CACHE_FLAGS_DATA_CACHE |
267 CRAT_CACHE_FLAGS_SIMD_CACHE),
271 /* Scalar L1 Instruction Cache per SQC */
274 .flags = (CRAT_CACHE_FLAGS_ENABLED |
275 CRAT_CACHE_FLAGS_INST_CACHE |
276 CRAT_CACHE_FLAGS_SIMD_CACHE),
280 /* Scalar L1 Data Cache per SQC */
283 .flags = (CRAT_CACHE_FLAGS_ENABLED |
284 CRAT_CACHE_FLAGS_DATA_CACHE |
285 CRAT_CACHE_FLAGS_SIMD_CACHE),
289 /* L2 Data Cache per GPU (Total Tex Cache) */
292 .flags = (CRAT_CACHE_FLAGS_ENABLED |
293 CRAT_CACHE_FLAGS_DATA_CACHE |
294 CRAT_CACHE_FLAGS_SIMD_CACHE),
299 static struct kfd_gpu_cache_info vega20_cache_info[] = {
301 /* TCP L1 Cache per CU */
304 .flags = (CRAT_CACHE_FLAGS_ENABLED |
305 CRAT_CACHE_FLAGS_DATA_CACHE |
306 CRAT_CACHE_FLAGS_SIMD_CACHE),
310 /* Scalar L1 Instruction Cache per SQC */
313 .flags = (CRAT_CACHE_FLAGS_ENABLED |
314 CRAT_CACHE_FLAGS_INST_CACHE |
315 CRAT_CACHE_FLAGS_SIMD_CACHE),
319 /* Scalar L1 Data Cache per SQC */
322 .flags = (CRAT_CACHE_FLAGS_ENABLED |
323 CRAT_CACHE_FLAGS_DATA_CACHE |
324 CRAT_CACHE_FLAGS_SIMD_CACHE),
328 /* L2 Data Cache per GPU (Total Tex Cache) */
331 .flags = (CRAT_CACHE_FLAGS_ENABLED |
332 CRAT_CACHE_FLAGS_DATA_CACHE |
333 CRAT_CACHE_FLAGS_SIMD_CACHE),
338 static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
340 /* TCP L1 Cache per CU */
343 .flags = (CRAT_CACHE_FLAGS_ENABLED |
344 CRAT_CACHE_FLAGS_DATA_CACHE |
345 CRAT_CACHE_FLAGS_SIMD_CACHE),
349 /* Scalar L1 Instruction Cache per SQC */
352 .flags = (CRAT_CACHE_FLAGS_ENABLED |
353 CRAT_CACHE_FLAGS_INST_CACHE |
354 CRAT_CACHE_FLAGS_SIMD_CACHE),
358 /* Scalar L1 Data Cache per SQC */
361 .flags = (CRAT_CACHE_FLAGS_ENABLED |
362 CRAT_CACHE_FLAGS_DATA_CACHE |
363 CRAT_CACHE_FLAGS_SIMD_CACHE),
367 /* L2 Data Cache per GPU (Total Tex Cache) */
370 .flags = (CRAT_CACHE_FLAGS_ENABLED |
371 CRAT_CACHE_FLAGS_DATA_CACHE |
372 CRAT_CACHE_FLAGS_SIMD_CACHE),
377 static struct kfd_gpu_cache_info navi10_cache_info[] = {
379 /* TCP L1 Cache per CU */
382 .flags = (CRAT_CACHE_FLAGS_ENABLED |
383 CRAT_CACHE_FLAGS_DATA_CACHE |
384 CRAT_CACHE_FLAGS_SIMD_CACHE),
388 /* Scalar L1 Instruction Cache per SQC */
391 .flags = (CRAT_CACHE_FLAGS_ENABLED |
392 CRAT_CACHE_FLAGS_INST_CACHE |
393 CRAT_CACHE_FLAGS_SIMD_CACHE),
397 /* Scalar L1 Data Cache per SQC */
400 .flags = (CRAT_CACHE_FLAGS_ENABLED |
401 CRAT_CACHE_FLAGS_DATA_CACHE |
402 CRAT_CACHE_FLAGS_SIMD_CACHE),
406 /* GL1 Data Cache per SA */
409 .flags = (CRAT_CACHE_FLAGS_ENABLED |
410 CRAT_CACHE_FLAGS_DATA_CACHE |
411 CRAT_CACHE_FLAGS_SIMD_CACHE),
415 /* L2 Data Cache per GPU (Total Tex Cache) */
418 .flags = (CRAT_CACHE_FLAGS_ENABLED |
419 CRAT_CACHE_FLAGS_DATA_CACHE |
420 CRAT_CACHE_FLAGS_SIMD_CACHE),
425 static struct kfd_gpu_cache_info vangogh_cache_info[] = {
427 /* TCP L1 Cache per CU */
430 .flags = (CRAT_CACHE_FLAGS_ENABLED |
431 CRAT_CACHE_FLAGS_DATA_CACHE |
432 CRAT_CACHE_FLAGS_SIMD_CACHE),
436 /* Scalar L1 Instruction Cache per SQC */
439 .flags = (CRAT_CACHE_FLAGS_ENABLED |
440 CRAT_CACHE_FLAGS_INST_CACHE |
441 CRAT_CACHE_FLAGS_SIMD_CACHE),
445 /* Scalar L1 Data Cache per SQC */
448 .flags = (CRAT_CACHE_FLAGS_ENABLED |
449 CRAT_CACHE_FLAGS_DATA_CACHE |
450 CRAT_CACHE_FLAGS_SIMD_CACHE),
454 /* GL1 Data Cache per SA */
457 .flags = (CRAT_CACHE_FLAGS_ENABLED |
458 CRAT_CACHE_FLAGS_DATA_CACHE |
459 CRAT_CACHE_FLAGS_SIMD_CACHE),
463 /* L2 Data Cache per GPU (Total Tex Cache) */
466 .flags = (CRAT_CACHE_FLAGS_ENABLED |
467 CRAT_CACHE_FLAGS_DATA_CACHE |
468 CRAT_CACHE_FLAGS_SIMD_CACHE),
473 static struct kfd_gpu_cache_info navi14_cache_info[] = {
475 /* TCP L1 Cache per CU */
478 .flags = (CRAT_CACHE_FLAGS_ENABLED |
479 CRAT_CACHE_FLAGS_DATA_CACHE |
480 CRAT_CACHE_FLAGS_SIMD_CACHE),
484 /* Scalar L1 Instruction Cache per SQC */
487 .flags = (CRAT_CACHE_FLAGS_ENABLED |
488 CRAT_CACHE_FLAGS_INST_CACHE |
489 CRAT_CACHE_FLAGS_SIMD_CACHE),
493 /* Scalar L1 Data Cache per SQC */
496 .flags = (CRAT_CACHE_FLAGS_ENABLED |
497 CRAT_CACHE_FLAGS_DATA_CACHE |
498 CRAT_CACHE_FLAGS_SIMD_CACHE),
502 /* GL1 Data Cache per SA */
505 .flags = (CRAT_CACHE_FLAGS_ENABLED |
506 CRAT_CACHE_FLAGS_DATA_CACHE |
507 CRAT_CACHE_FLAGS_SIMD_CACHE),
511 /* L2 Data Cache per GPU (Total Tex Cache) */
514 .flags = (CRAT_CACHE_FLAGS_ENABLED |
515 CRAT_CACHE_FLAGS_DATA_CACHE |
516 CRAT_CACHE_FLAGS_SIMD_CACHE),
521 static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
523 /* TCP L1 Cache per CU */
526 .flags = (CRAT_CACHE_FLAGS_ENABLED |
527 CRAT_CACHE_FLAGS_DATA_CACHE |
528 CRAT_CACHE_FLAGS_SIMD_CACHE),
532 /* Scalar L1 Instruction Cache per SQC */
535 .flags = (CRAT_CACHE_FLAGS_ENABLED |
536 CRAT_CACHE_FLAGS_INST_CACHE |
537 CRAT_CACHE_FLAGS_SIMD_CACHE),
541 /* Scalar L1 Data Cache per SQC */
544 .flags = (CRAT_CACHE_FLAGS_ENABLED |
545 CRAT_CACHE_FLAGS_DATA_CACHE |
546 CRAT_CACHE_FLAGS_SIMD_CACHE),
550 /* GL1 Data Cache per SA */
553 .flags = (CRAT_CACHE_FLAGS_ENABLED |
554 CRAT_CACHE_FLAGS_DATA_CACHE |
555 CRAT_CACHE_FLAGS_SIMD_CACHE),
559 /* L2 Data Cache per GPU (Total Tex Cache) */
562 .flags = (CRAT_CACHE_FLAGS_ENABLED |
563 CRAT_CACHE_FLAGS_DATA_CACHE |
564 CRAT_CACHE_FLAGS_SIMD_CACHE),
568 /* L3 Data Cache per GPU */
569 .cache_size = 128*1024,
571 .flags = (CRAT_CACHE_FLAGS_ENABLED |
572 CRAT_CACHE_FLAGS_DATA_CACHE |
573 CRAT_CACHE_FLAGS_SIMD_CACHE),
578 static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
580 /* TCP L1 Cache per CU */
583 .flags = (CRAT_CACHE_FLAGS_ENABLED |
584 CRAT_CACHE_FLAGS_DATA_CACHE |
585 CRAT_CACHE_FLAGS_SIMD_CACHE),
589 /* Scalar L1 Instruction Cache per SQC */
592 .flags = (CRAT_CACHE_FLAGS_ENABLED |
593 CRAT_CACHE_FLAGS_INST_CACHE |
594 CRAT_CACHE_FLAGS_SIMD_CACHE),
598 /* Scalar L1 Data Cache per SQC */
601 .flags = (CRAT_CACHE_FLAGS_ENABLED |
602 CRAT_CACHE_FLAGS_DATA_CACHE |
603 CRAT_CACHE_FLAGS_SIMD_CACHE),
607 /* GL1 Data Cache per SA */
610 .flags = (CRAT_CACHE_FLAGS_ENABLED |
611 CRAT_CACHE_FLAGS_DATA_CACHE |
612 CRAT_CACHE_FLAGS_SIMD_CACHE),
616 /* L2 Data Cache per GPU (Total Tex Cache) */
619 .flags = (CRAT_CACHE_FLAGS_ENABLED |
620 CRAT_CACHE_FLAGS_DATA_CACHE |
621 CRAT_CACHE_FLAGS_SIMD_CACHE),
625 /* L3 Data Cache per GPU */
626 .cache_size = 96*1024,
628 .flags = (CRAT_CACHE_FLAGS_ENABLED |
629 CRAT_CACHE_FLAGS_DATA_CACHE |
630 CRAT_CACHE_FLAGS_SIMD_CACHE),
635 static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
637 /* TCP L1 Cache per CU */
640 .flags = (CRAT_CACHE_FLAGS_ENABLED |
641 CRAT_CACHE_FLAGS_DATA_CACHE |
642 CRAT_CACHE_FLAGS_SIMD_CACHE),
646 /* Scalar L1 Instruction Cache per SQC */
649 .flags = (CRAT_CACHE_FLAGS_ENABLED |
650 CRAT_CACHE_FLAGS_INST_CACHE |
651 CRAT_CACHE_FLAGS_SIMD_CACHE),
655 /* Scalar L1 Data Cache per SQC */
658 .flags = (CRAT_CACHE_FLAGS_ENABLED |
659 CRAT_CACHE_FLAGS_DATA_CACHE |
660 CRAT_CACHE_FLAGS_SIMD_CACHE),
664 /* GL1 Data Cache per SA */
667 .flags = (CRAT_CACHE_FLAGS_ENABLED |
668 CRAT_CACHE_FLAGS_DATA_CACHE |
669 CRAT_CACHE_FLAGS_SIMD_CACHE),
673 /* L2 Data Cache per GPU (Total Tex Cache) */
676 .flags = (CRAT_CACHE_FLAGS_ENABLED |
677 CRAT_CACHE_FLAGS_DATA_CACHE |
678 CRAT_CACHE_FLAGS_SIMD_CACHE),
682 /* L3 Data Cache per GPU */
683 .cache_size = 32*1024,
685 .flags = (CRAT_CACHE_FLAGS_ENABLED |
686 CRAT_CACHE_FLAGS_DATA_CACHE |
687 CRAT_CACHE_FLAGS_SIMD_CACHE),
692 static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
694 /* TCP L1 Cache per CU */
697 .flags = (CRAT_CACHE_FLAGS_ENABLED |
698 CRAT_CACHE_FLAGS_DATA_CACHE |
699 CRAT_CACHE_FLAGS_SIMD_CACHE),
703 /* Scalar L1 Instruction Cache per SQC */
706 .flags = (CRAT_CACHE_FLAGS_ENABLED |
707 CRAT_CACHE_FLAGS_INST_CACHE |
708 CRAT_CACHE_FLAGS_SIMD_CACHE),
712 /* Scalar L1 Data Cache per SQC */
715 .flags = (CRAT_CACHE_FLAGS_ENABLED |
716 CRAT_CACHE_FLAGS_DATA_CACHE |
717 CRAT_CACHE_FLAGS_SIMD_CACHE),
721 /* GL1 Data Cache per SA */
724 .flags = (CRAT_CACHE_FLAGS_ENABLED |
725 CRAT_CACHE_FLAGS_DATA_CACHE |
726 CRAT_CACHE_FLAGS_SIMD_CACHE),
730 /* L2 Data Cache per GPU (Total Tex Cache) */
733 .flags = (CRAT_CACHE_FLAGS_ENABLED |
734 CRAT_CACHE_FLAGS_DATA_CACHE |
735 CRAT_CACHE_FLAGS_SIMD_CACHE),
739 /* L3 Data Cache per GPU */
740 .cache_size = 16*1024,
742 .flags = (CRAT_CACHE_FLAGS_ENABLED |
743 CRAT_CACHE_FLAGS_DATA_CACHE |
744 CRAT_CACHE_FLAGS_SIMD_CACHE),
749 static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
751 /* TCP L1 Cache per CU */
754 .flags = (CRAT_CACHE_FLAGS_ENABLED |
755 CRAT_CACHE_FLAGS_DATA_CACHE |
756 CRAT_CACHE_FLAGS_SIMD_CACHE),
760 /* Scalar L1 Instruction Cache per SQC */
763 .flags = (CRAT_CACHE_FLAGS_ENABLED |
764 CRAT_CACHE_FLAGS_INST_CACHE |
765 CRAT_CACHE_FLAGS_SIMD_CACHE),
769 /* Scalar L1 Data Cache per SQC */
772 .flags = (CRAT_CACHE_FLAGS_ENABLED |
773 CRAT_CACHE_FLAGS_DATA_CACHE |
774 CRAT_CACHE_FLAGS_SIMD_CACHE),
778 /* GL1 Data Cache per SA */
781 .flags = (CRAT_CACHE_FLAGS_ENABLED |
782 CRAT_CACHE_FLAGS_DATA_CACHE |
783 CRAT_CACHE_FLAGS_SIMD_CACHE),
787 /* L2 Data Cache per GPU (Total Tex Cache) */
790 .flags = (CRAT_CACHE_FLAGS_ENABLED |
791 CRAT_CACHE_FLAGS_DATA_CACHE |
792 CRAT_CACHE_FLAGS_SIMD_CACHE),
797 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
798 struct crat_subtype_computeunit *cu)
800 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
801 dev->node_props.cpu_core_id_base = cu->processor_id_low;
802 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
803 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
805 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
806 cu->processor_id_low);
809 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
810 struct crat_subtype_computeunit *cu)
812 dev->node_props.simd_id_base = cu->processor_id_low;
813 dev->node_props.simd_count = cu->num_simd_cores;
814 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
815 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
816 dev->node_props.wave_front_size = cu->wave_front_size;
817 dev->node_props.array_count = cu->array_count;
818 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
819 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
820 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
821 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
822 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
823 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
826 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
827 * topology device present in the device_list
829 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
830 struct list_head *device_list)
832 struct kfd_topology_device *dev;
834 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
835 cu->proximity_domain, cu->hsa_capability);
836 list_for_each_entry(dev, device_list, list) {
837 if (cu->proximity_domain == dev->proximity_domain) {
838 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
839 kfd_populated_cu_info_cpu(dev, cu);
841 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
842 kfd_populated_cu_info_gpu(dev, cu);
850 static struct kfd_mem_properties *
851 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
852 struct kfd_topology_device *dev)
854 struct kfd_mem_properties *props;
856 list_for_each_entry(props, &dev->mem_props, list) {
857 if (props->heap_type == heap_type
858 && props->flags == flags
859 && props->width == width)
865 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
866 * topology device present in the device_list
868 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
869 struct list_head *device_list)
871 struct kfd_mem_properties *props;
872 struct kfd_topology_device *dev;
874 uint64_t size_in_bytes;
878 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
879 mem->proximity_domain);
880 list_for_each_entry(dev, device_list, list) {
881 if (mem->proximity_domain == dev->proximity_domain) {
882 /* We're on GPU node */
883 if (dev->node_props.cpu_cores_count == 0) {
885 if (mem->visibility_type == 0)
887 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
890 heap_type = mem->visibility_type;
892 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
894 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
895 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
896 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
897 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
900 ((uint64_t)mem->length_high << 32) +
904 /* Multiple banks of the same type are aggregated into
905 * one. User mode doesn't care about multiple physical
906 * memory segments. It's managed as a single virtual
907 * heap for user mode.
909 props = find_subtype_mem(heap_type, flags, width, dev);
911 props->size_in_bytes += size_in_bytes;
915 props = kfd_alloc_struct(props);
919 props->heap_type = heap_type;
920 props->flags = flags;
921 props->size_in_bytes = size_in_bytes;
922 props->width = width;
924 dev->node_props.mem_banks_count++;
925 list_add_tail(&props->list, &dev->mem_props);
934 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
935 * topology device present in the device_list
937 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
938 struct list_head *device_list)
940 struct kfd_cache_properties *props;
941 struct kfd_topology_device *dev;
943 uint32_t total_num_of_cu;
945 id = cache->processor_id_low;
947 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
948 list_for_each_entry(dev, device_list, list) {
949 total_num_of_cu = (dev->node_props.array_count *
950 dev->node_props.cu_per_simd_array);
952 /* Cache infomration in CRAT doesn't have proximity_domain
953 * information as it is associated with a CPU core or GPU
954 * Compute Unit. So map the cache using CPU core Id or SIMD
956 * TODO: This works because currently we can safely assume that
957 * Compute Units are parsed before caches are parsed. In
958 * future, remove this dependency
960 if ((id >= dev->node_props.cpu_core_id_base &&
961 id <= dev->node_props.cpu_core_id_base +
962 dev->node_props.cpu_cores_count) ||
963 (id >= dev->node_props.simd_id_base &&
964 id < dev->node_props.simd_id_base +
966 props = kfd_alloc_struct(props);
970 props->processor_id_low = id;
971 props->cache_level = cache->cache_level;
972 props->cache_size = cache->cache_size;
973 props->cacheline_size = cache->cache_line_size;
974 props->cachelines_per_tag = cache->lines_per_tag;
975 props->cache_assoc = cache->associativity;
976 props->cache_latency = cache->cache_latency;
977 memcpy(props->sibling_map, cache->sibling_map,
978 sizeof(props->sibling_map));
980 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
981 props->cache_type |= HSA_CACHE_TYPE_DATA;
982 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
983 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
984 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
985 props->cache_type |= HSA_CACHE_TYPE_CPU;
986 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
987 props->cache_type |= HSA_CACHE_TYPE_HSACU;
990 dev->node_props.caches_count++;
991 list_add_tail(&props->list, &dev->cache_props);
1000 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1001 * topology device present in the device_list
1003 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1004 struct list_head *device_list)
1006 struct kfd_iolink_properties *props = NULL, *props2;
1007 struct kfd_topology_device *dev, *to_dev;
1011 id_from = iolink->proximity_domain_from;
1012 id_to = iolink->proximity_domain_to;
1014 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1016 list_for_each_entry(dev, device_list, list) {
1017 if (id_from == dev->proximity_domain) {
1018 props = kfd_alloc_struct(props);
1022 props->node_from = id_from;
1023 props->node_to = id_to;
1024 props->ver_maj = iolink->version_major;
1025 props->ver_min = iolink->version_minor;
1026 props->iolink_type = iolink->io_interface_type;
1028 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1030 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1031 props->weight = 15 * iolink->num_hops_xgmi;
1033 props->weight = node_distance(id_from, id_to);
1035 props->min_latency = iolink->minimum_latency;
1036 props->max_latency = iolink->maximum_latency;
1037 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1038 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1039 props->rec_transfer_size =
1040 iolink->recommended_transfer_size;
1042 dev->io_link_count++;
1043 dev->node_props.io_links_count++;
1044 list_add_tail(&props->list, &dev->io_link_props);
1049 /* CPU topology is created before GPUs are detected, so CPU->GPU
1050 * links are not built at that time. If a PCIe type is discovered, it
1051 * means a GPU is detected and we are adding GPU->CPU to the topology.
1052 * At this time, also add the corresponded CPU->GPU link if GPU
1054 * For xGMI, we only added the link with one direction in the crat
1055 * table, add corresponded reversed direction link now.
1057 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1058 to_dev = kfd_topology_device_by_proximity_domain(id_to);
1061 /* same everything but the other direction */
1062 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1063 props2->node_from = id_to;
1064 props2->node_to = id_from;
1065 props2->kobj = NULL;
1066 to_dev->io_link_count++;
1067 to_dev->node_props.io_links_count++;
1068 list_add_tail(&props2->list, &to_dev->io_link_props);
1074 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1075 * present in the device_list
1076 * @sub_type_hdr - subtype section of crat_image
1077 * @device_list - list of topology devices present in this crat_image
1079 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1080 struct list_head *device_list)
1082 struct crat_subtype_computeunit *cu;
1083 struct crat_subtype_memory *mem;
1084 struct crat_subtype_cache *cache;
1085 struct crat_subtype_iolink *iolink;
1088 switch (sub_type_hdr->type) {
1089 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1090 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1091 ret = kfd_parse_subtype_cu(cu, device_list);
1093 case CRAT_SUBTYPE_MEMORY_AFFINITY:
1094 mem = (struct crat_subtype_memory *)sub_type_hdr;
1095 ret = kfd_parse_subtype_mem(mem, device_list);
1097 case CRAT_SUBTYPE_CACHE_AFFINITY:
1098 cache = (struct crat_subtype_cache *)sub_type_hdr;
1099 ret = kfd_parse_subtype_cache(cache, device_list);
1101 case CRAT_SUBTYPE_TLB_AFFINITY:
1103 * For now, nothing to do here
1105 pr_debug("Found TLB entry in CRAT table (not processing)\n");
1107 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1109 * For now, nothing to do here
1111 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1113 case CRAT_SUBTYPE_IOLINK_AFFINITY:
1114 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1115 ret = kfd_parse_subtype_iolink(iolink, device_list);
1118 pr_warn("Unknown subtype %d in CRAT\n",
1119 sub_type_hdr->type);
1125 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1126 * create a kfd_topology_device and add in to device_list. Also parse
1127 * CRAT subtypes and attach it to appropriate kfd_topology_device
1128 * @crat_image - input image containing CRAT
1129 * @device_list - [OUT] list of kfd_topology_device generated after
1130 * parsing crat_image
1131 * @proximity_domain - Proximity domain of the first device in the table
1133 * Return - 0 if successful else -ve value
1135 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1136 uint32_t proximity_domain)
1138 struct kfd_topology_device *top_dev = NULL;
1139 struct crat_subtype_generic *sub_type_hdr;
1142 struct crat_header *crat_table = (struct crat_header *)crat_image;
1149 if (!list_empty(device_list)) {
1150 pr_warn("Error device list should be empty\n");
1154 num_nodes = crat_table->num_domains;
1155 image_len = crat_table->length;
1157 pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1159 for (node_id = 0; node_id < num_nodes; node_id++) {
1160 top_dev = kfd_create_topology_device(device_list);
1163 top_dev->proximity_domain = proximity_domain++;
1171 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1172 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1173 CRAT_OEMTABLEID_LENGTH);
1174 top_dev->oem_revision = crat_table->oem_revision;
1176 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1177 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1178 ((char *)crat_image) + image_len) {
1179 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1180 ret = kfd_parse_subtype(sub_type_hdr, device_list);
1185 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1186 sub_type_hdr->length);
1191 kfd_release_topology_device_list(device_list);
1196 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1197 static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1198 struct kfd_gpu_cache_info *pcache_info,
1199 struct kfd_cu_info *cu_info,
1202 int cache_type, unsigned int cu_processor_id,
1205 unsigned int cu_sibling_map_mask;
1206 int first_active_cu;
1208 /* First check if enough memory is available */
1209 if (sizeof(struct crat_subtype_cache) > mem_available)
1212 cu_sibling_map_mask = cu_bitmask;
1213 cu_sibling_map_mask >>= cu_block;
1214 cu_sibling_map_mask &=
1215 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1216 first_active_cu = ffs(cu_sibling_map_mask);
1218 /* CU could be inactive. In case of shared cache find the first active
1219 * CU. and incase of non-shared cache check if the CU is inactive. If
1220 * inactive active skip it
1222 if (first_active_cu) {
1223 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1224 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1225 pcache->length = sizeof(struct crat_subtype_cache);
1226 pcache->flags = pcache_info[cache_type].flags;
1227 pcache->processor_id_low = cu_processor_id
1228 + (first_active_cu - 1);
1229 pcache->cache_level = pcache_info[cache_type].cache_level;
1230 pcache->cache_size = pcache_info[cache_type].cache_size;
1232 /* Sibling map is w.r.t processor_id_low, so shift out
1235 cu_sibling_map_mask =
1236 cu_sibling_map_mask >> (first_active_cu - 1);
1238 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1239 pcache->sibling_map[1] =
1240 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1241 pcache->sibling_map[2] =
1242 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1243 pcache->sibling_map[3] =
1244 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1250 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1251 static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1252 struct kfd_gpu_cache_info *pcache_info,
1253 struct kfd_cu_info *cu_info,
1255 int cache_type, unsigned int cu_processor_id)
1257 unsigned int cu_sibling_map_mask;
1258 int first_active_cu;
1261 /* First check if enough memory is available */
1262 if (sizeof(struct crat_subtype_cache) > mem_available)
1265 cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1266 cu_sibling_map_mask &=
1267 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1268 first_active_cu = ffs(cu_sibling_map_mask);
1270 /* CU could be inactive. In case of shared cache find the first active
1271 * CU. and incase of non-shared cache check if the CU is inactive. If
1272 * inactive active skip it
1274 if (first_active_cu) {
1275 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1276 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1277 pcache->length = sizeof(struct crat_subtype_cache);
1278 pcache->flags = pcache_info[cache_type].flags;
1279 pcache->processor_id_low = cu_processor_id
1280 + (first_active_cu - 1);
1281 pcache->cache_level = pcache_info[cache_type].cache_level;
1282 pcache->cache_size = pcache_info[cache_type].cache_size;
1284 /* Sibling map is w.r.t processor_id_low, so shift out
1287 cu_sibling_map_mask =
1288 cu_sibling_map_mask >> (first_active_cu - 1);
1290 for (i = 0; i < cu_info->num_shader_engines; i++) {
1291 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1293 pcache->sibling_map[k] =
1294 (uint8_t)(cu_sibling_map_mask & 0xFF);
1295 pcache->sibling_map[k+1] =
1296 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1297 pcache->sibling_map[k+2] =
1298 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1299 pcache->sibling_map[k+3] =
1300 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1302 cu_sibling_map_mask =
1303 cu_info->cu_bitmap[i % 4][j + i / 4];
1304 cu_sibling_map_mask &= (
1305 (1 << pcache_info[cache_type].num_cu_shared)
1314 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1317 * @kdev - [IN] GPU device
1318 * @gpu_processor_id - [IN] GPU processor ID to which these caches
1320 * @available_size - [IN] Amount of memory available in pcache
1321 * @cu_info - [IN] Compute Unit info obtained from KGD
1322 * @pcache - [OUT] memory into which cache data is to be filled in.
1323 * @size_filled - [OUT] amount of data used up in pcache.
1324 * @num_of_entries - [OUT] number of caches added
1326 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1327 int gpu_processor_id,
1329 struct kfd_cu_info *cu_info,
1330 struct crat_subtype_cache *pcache,
1332 int *num_of_entries)
1334 struct kfd_gpu_cache_info *pcache_info;
1335 int num_of_cache_types = 0;
1338 int mem_available = available_size;
1339 unsigned int cu_processor_id;
1341 unsigned int num_cu_shared;
1343 switch (kdev->device_info->asic_family) {
1345 pcache_info = kaveri_cache_info;
1346 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1349 pcache_info = hawaii_cache_info;
1350 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1353 pcache_info = carrizo_cache_info;
1354 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1357 pcache_info = tonga_cache_info;
1358 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1361 pcache_info = fiji_cache_info;
1362 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1364 case CHIP_POLARIS10:
1365 pcache_info = polaris10_cache_info;
1366 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1368 case CHIP_POLARIS11:
1369 pcache_info = polaris11_cache_info;
1370 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1372 case CHIP_POLARIS12:
1373 pcache_info = polaris12_cache_info;
1374 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1377 pcache_info = vegam_cache_info;
1378 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1381 pcache_info = vega10_cache_info;
1382 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1385 pcache_info = vega12_cache_info;
1386 num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1390 pcache_info = vega20_cache_info;
1391 num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1393 case CHIP_ALDEBARAN:
1394 pcache_info = aldebaran_cache_info;
1395 num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1398 pcache_info = raven_cache_info;
1399 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1402 pcache_info = renoir_cache_info;
1403 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1407 pcache_info = navi10_cache_info;
1408 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1411 pcache_info = navi14_cache_info;
1412 num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1414 case CHIP_SIENNA_CICHLID:
1415 pcache_info = sienna_cichlid_cache_info;
1416 num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1418 case CHIP_NAVY_FLOUNDER:
1419 pcache_info = navy_flounder_cache_info;
1420 num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1422 case CHIP_DIMGREY_CAVEFISH:
1423 pcache_info = dimgrey_cavefish_cache_info;
1424 num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1427 pcache_info = vangogh_cache_info;
1428 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1430 case CHIP_BEIGE_GOBY:
1431 pcache_info = beige_goby_cache_info;
1432 num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1434 case CHIP_YELLOW_CARP:
1435 pcache_info = yellow_carp_cache_info;
1436 num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1443 *num_of_entries = 0;
1445 /* For each type of cache listed in the kfd_gpu_cache_info table,
1446 * go through all available Compute Units.
1447 * The [i,j,k] loop will
1448 * if kfd_gpu_cache_info.num_cu_shared = 1
1449 * will parse through all available CU
1450 * If (kfd_gpu_cache_info.num_cu_shared != 1)
1451 * then it will consider only one CU from
1455 for (ct = 0; ct < num_of_cache_types; ct++) {
1456 cu_processor_id = gpu_processor_id;
1457 if (pcache_info[ct].cache_level == 1) {
1458 for (i = 0; i < cu_info->num_shader_engines; i++) {
1459 for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1460 for (k = 0; k < cu_info->num_cu_per_sh;
1461 k += pcache_info[ct].num_cu_shared) {
1462 ret = fill_in_l1_pcache(pcache,
1466 cu_info->cu_bitmap[i % 4][j + i / 4],
1476 (*num_of_entries)++;
1477 mem_available -= sizeof(*pcache);
1478 (*size_filled) += sizeof(*pcache);
1481 /* Move to next CU block */
1482 num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1483 cu_info->num_cu_per_sh) ?
1484 pcache_info[ct].num_cu_shared :
1485 (cu_info->num_cu_per_sh - k);
1486 cu_processor_id += num_cu_shared;
1491 ret = fill_in_l2_l3_pcache(pcache,
1503 (*num_of_entries)++;
1504 mem_available -= sizeof(*pcache);
1505 (*size_filled) += sizeof(*pcache);
1510 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1515 static bool kfd_ignore_crat(void)
1522 #ifndef KFD_SUPPORT_IOMMU_V2
1532 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1533 * copies CRAT from ACPI (if available).
1534 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1536 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1537 * crat_image will be NULL
1538 * @size: [OUT] size of crat_image
1540 * Return 0 if successful else return error code
1542 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1544 struct acpi_table_header *crat_table;
1554 if (kfd_ignore_crat()) {
1555 pr_info("CRAT table disabled by module option\n");
1559 /* Fetch the CRAT table from ACPI */
1560 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1561 if (status == AE_NOT_FOUND) {
1562 pr_warn("CRAT table not found\n");
1564 } else if (ACPI_FAILURE(status)) {
1565 const char *err = acpi_format_exception(status);
1567 pr_err("CRAT table error: %s\n", err);
1571 pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1577 memcpy(pcrat_image, crat_table, crat_table->length);
1578 *crat_image = pcrat_image;
1579 *size = crat_table->length;
1581 acpi_put_table(crat_table);
1585 /* Memory required to create Virtual CRAT.
1586 * Since there is no easy way to predict the amount of memory required, the
1587 * following amount is allocated for GPU Virtual CRAT. This is
1588 * expected to cover all known conditions. But to be safe additional check
1589 * is put in the code to ensure we don't overwrite.
1591 #define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
1593 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1595 * @numa_node_id: CPU NUMA node id
1596 * @avail_size: Available size in the memory
1597 * @sub_type_hdr: Memory into which compute info will be filled in
1599 * Return 0 if successful else return -ve value
1601 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1602 int proximity_domain,
1603 struct crat_subtype_computeunit *sub_type_hdr)
1605 const struct cpumask *cpumask;
1607 *avail_size -= sizeof(struct crat_subtype_computeunit);
1608 if (*avail_size < 0)
1611 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1613 /* Fill in subtype header data */
1614 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1615 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1616 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1618 cpumask = cpumask_of_node(numa_node_id);
1620 /* Fill in CU data */
1621 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1622 sub_type_hdr->proximity_domain = proximity_domain;
1623 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1624 if (sub_type_hdr->processor_id_low == -1)
1627 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1632 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1634 * @numa_node_id: CPU NUMA node id
1635 * @avail_size: Available size in the memory
1636 * @sub_type_hdr: Memory into which compute info will be filled in
1638 * Return 0 if successful else return -ve value
1640 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1641 int proximity_domain,
1642 struct crat_subtype_memory *sub_type_hdr)
1644 uint64_t mem_in_bytes = 0;
1648 *avail_size -= sizeof(struct crat_subtype_memory);
1649 if (*avail_size < 0)
1652 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1654 /* Fill in subtype header data */
1655 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1656 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1657 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1659 /* Fill in Memory Subunit data */
1661 /* Unlike si_meminfo, si_meminfo_node is not exported. So
1662 * the following lines are duplicated from si_meminfo_node
1665 pgdat = NODE_DATA(numa_node_id);
1666 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1667 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1668 mem_in_bytes <<= PAGE_SHIFT;
1670 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1671 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1672 sub_type_hdr->proximity_domain = proximity_domain;
1677 #ifdef CONFIG_X86_64
1678 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1679 uint32_t *num_entries,
1680 struct crat_subtype_iolink *sub_type_hdr)
1683 struct cpuinfo_x86 *c = &cpu_data(0);
1686 if (c->x86_vendor == X86_VENDOR_AMD)
1687 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1689 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1693 /* Create IO links from this node to other CPU nodes */
1694 for_each_online_node(nid) {
1695 if (nid == numa_node_id) /* node itself */
1698 *avail_size -= sizeof(struct crat_subtype_iolink);
1699 if (*avail_size < 0)
1702 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1704 /* Fill in subtype header data */
1705 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1706 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1707 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1709 /* Fill in IO link data */
1710 sub_type_hdr->proximity_domain_from = numa_node_id;
1711 sub_type_hdr->proximity_domain_to = nid;
1712 sub_type_hdr->io_interface_type = link_type;
1722 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1724 * @pcrat_image: Fill in VCRAT for CPU
1725 * @size: [IN] allocated size of crat_image.
1726 * [OUT] actual size of data filled in crat_image
1728 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1730 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1731 struct acpi_table_header *acpi_table;
1733 struct crat_subtype_generic *sub_type_hdr;
1734 int avail_size = *size;
1736 #ifdef CONFIG_X86_64
1737 uint32_t entries = 0;
1744 /* Fill in CRAT Header.
1745 * Modify length and total_entries as subunits are added.
1747 avail_size -= sizeof(struct crat_header);
1751 memset(crat_table, 0, sizeof(struct crat_header));
1752 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1753 sizeof(crat_table->signature));
1754 crat_table->length = sizeof(struct crat_header);
1756 status = acpi_get_table("DSDT", 0, &acpi_table);
1757 if (status != AE_OK)
1758 pr_warn("DSDT table not found for OEM information\n");
1760 crat_table->oem_revision = acpi_table->revision;
1761 memcpy(crat_table->oem_id, acpi_table->oem_id,
1763 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1764 CRAT_OEMTABLEID_LENGTH);
1765 acpi_put_table(acpi_table);
1767 crat_table->total_entries = 0;
1768 crat_table->num_domains = 0;
1770 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1772 for_each_online_node(numa_node_id) {
1773 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1776 /* Fill in Subtype: Compute Unit */
1777 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1778 crat_table->num_domains,
1779 (struct crat_subtype_computeunit *)sub_type_hdr);
1782 crat_table->length += sub_type_hdr->length;
1783 crat_table->total_entries++;
1785 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1786 sub_type_hdr->length);
1788 /* Fill in Subtype: Memory */
1789 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1790 crat_table->num_domains,
1791 (struct crat_subtype_memory *)sub_type_hdr);
1794 crat_table->length += sub_type_hdr->length;
1795 crat_table->total_entries++;
1797 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1798 sub_type_hdr->length);
1800 /* Fill in Subtype: IO Link */
1801 #ifdef CONFIG_X86_64
1802 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1804 (struct crat_subtype_iolink *)sub_type_hdr);
1809 crat_table->length += (sub_type_hdr->length * entries);
1810 crat_table->total_entries += entries;
1812 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1813 sub_type_hdr->length * entries);
1816 pr_info("IO link not available for non x86 platforms\n");
1819 crat_table->num_domains++;
1822 /* TODO: Add cache Subtype for CPU.
1823 * Currently, CPU cache information is available in function
1824 * detect_cache_attributes(cpu) defined in the file
1825 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1826 * exported and to get the same information the code needs to be
1830 *size = crat_table->length;
1831 pr_info("Virtual CRAT table created for CPU\n");
1836 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1837 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1838 struct crat_subtype_memory *sub_type_hdr,
1839 uint32_t proximity_domain,
1840 const struct kfd_local_mem_info *local_mem_info)
1842 *avail_size -= sizeof(struct crat_subtype_memory);
1843 if (*avail_size < 0)
1846 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1847 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1848 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1849 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1851 sub_type_hdr->proximity_domain = proximity_domain;
1853 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1856 sub_type_hdr->length_low = lower_32_bits(size);
1857 sub_type_hdr->length_high = upper_32_bits(size);
1859 sub_type_hdr->width = local_mem_info->vram_width;
1860 sub_type_hdr->visibility_type = type;
1865 #ifdef CONFIG_ACPI_NUMA
1866 static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1868 struct acpi_table_header *table_header = NULL;
1869 struct acpi_subtable_header *sub_header = NULL;
1870 unsigned long table_end, subtable_len;
1871 u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1872 pci_dev_id(kdev->pdev);
1875 struct acpi_srat_cpu_affinity *cpu;
1876 struct acpi_srat_generic_affinity *gpu;
1877 int pxm = 0, max_pxm = 0;
1878 int numa_node = NUMA_NO_NODE;
1881 /* Fetch the SRAT table from ACPI */
1882 status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1883 if (status == AE_NOT_FOUND) {
1884 pr_warn("SRAT table not found\n");
1886 } else if (ACPI_FAILURE(status)) {
1887 const char *err = acpi_format_exception(status);
1888 pr_err("SRAT table error: %s\n", err);
1892 table_end = (unsigned long)table_header + table_header->length;
1894 /* Parse all entries looking for a match. */
1895 sub_header = (struct acpi_subtable_header *)
1896 ((unsigned long)table_header +
1897 sizeof(struct acpi_table_srat));
1898 subtable_len = sub_header->length;
1900 while (((unsigned long)sub_header) + subtable_len < table_end) {
1902 * If length is 0, break from this loop to avoid
1905 if (subtable_len == 0) {
1906 pr_err("SRAT invalid zero length\n");
1910 switch (sub_header->type) {
1911 case ACPI_SRAT_TYPE_CPU_AFFINITY:
1912 cpu = (struct acpi_srat_cpu_affinity *)sub_header;
1913 pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
1914 cpu->proximity_domain_lo;
1918 case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
1919 gpu = (struct acpi_srat_generic_affinity *)sub_header;
1920 bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
1921 *((u16 *)(&gpu->device_handle[2]));
1922 if (bdf == pci_id) {
1924 numa_node = pxm_to_node(gpu->proximity_domain);
1934 sub_header = (struct acpi_subtable_header *)
1935 ((unsigned long)sub_header + subtable_len);
1936 subtable_len = sub_header->length;
1939 acpi_put_table(table_header);
1941 /* Workaround bad cpu-gpu binding case */
1942 if (found && (numa_node < 0 ||
1943 numa_node > pxm_to_node(max_pxm)))
1946 if (numa_node != NUMA_NO_NODE)
1947 set_dev_node(&kdev->pdev->dev, numa_node);
1951 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1953 * @avail_size: Available size in the memory
1954 * @kdev - [IN] GPU device
1955 * @sub_type_hdr: Memory into which io link info will be filled in
1956 * @proximity_domain - proximity domain of the GPU node
1958 * Return 0 if successful else return -ve value
1960 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1961 struct kfd_dev *kdev,
1962 struct crat_subtype_iolink *sub_type_hdr,
1963 uint32_t proximity_domain)
1965 struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
1967 *avail_size -= sizeof(struct crat_subtype_iolink);
1968 if (*avail_size < 0)
1971 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1973 /* Fill in subtype header data */
1974 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1975 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1976 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1977 if (kfd_dev_is_large_bar(kdev))
1978 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1980 /* Fill in IOLINK subtype.
1981 * TODO: Fill-in other fields of iolink subtype
1983 if (adev->gmc.xgmi.connected_to_cpu) {
1985 * with host gpu xgmi link, host can access gpu memory whether
1986 * or not pcie bar type is large, so always create bidirectional
1989 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1990 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1991 sub_type_hdr->num_hops_xgmi = 1;
1993 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1996 sub_type_hdr->proximity_domain_from = proximity_domain;
1998 #ifdef CONFIG_ACPI_NUMA
1999 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2000 kfd_find_numa_node_in_srat(kdev);
2003 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2004 sub_type_hdr->proximity_domain_to = 0;
2006 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2008 sub_type_hdr->proximity_domain_to = 0;
2013 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2014 struct kfd_dev *kdev,
2015 struct kfd_dev *peer_kdev,
2016 struct crat_subtype_iolink *sub_type_hdr,
2017 uint32_t proximity_domain_from,
2018 uint32_t proximity_domain_to)
2020 *avail_size -= sizeof(struct crat_subtype_iolink);
2021 if (*avail_size < 0)
2024 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2026 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2027 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2028 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2029 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2031 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2032 sub_type_hdr->proximity_domain_from = proximity_domain_from;
2033 sub_type_hdr->proximity_domain_to = proximity_domain_to;
2034 sub_type_hdr->num_hops_xgmi =
2035 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
2039 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2041 * @pcrat_image: Fill in VCRAT for GPU
2042 * @size: [IN] allocated size of crat_image.
2043 * [OUT] actual size of data filled in crat_image
2045 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2046 size_t *size, struct kfd_dev *kdev,
2047 uint32_t proximity_domain)
2049 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2050 struct crat_subtype_generic *sub_type_hdr;
2051 struct kfd_local_mem_info local_mem_info;
2052 struct kfd_topology_device *peer_dev;
2053 struct crat_subtype_computeunit *cu;
2054 struct kfd_cu_info cu_info;
2055 int avail_size = *size;
2056 uint32_t total_num_of_cu;
2057 int num_of_cache_entries = 0;
2058 int cache_mem_filled = 0;
2062 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2065 /* Fill the CRAT Header.
2066 * Modify length and total_entries as subunits are added.
2068 avail_size -= sizeof(struct crat_header);
2072 memset(crat_table, 0, sizeof(struct crat_header));
2074 memcpy(&crat_table->signature, CRAT_SIGNATURE,
2075 sizeof(crat_table->signature));
2076 /* Change length as we add more subtypes*/
2077 crat_table->length = sizeof(struct crat_header);
2078 crat_table->num_domains = 1;
2079 crat_table->total_entries = 0;
2081 /* Fill in Subtype: Compute Unit
2082 * First fill in the sub type header and then sub type data
2084 avail_size -= sizeof(struct crat_subtype_computeunit);
2088 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2089 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2091 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2092 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2093 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2095 /* Fill CU subtype data */
2096 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2097 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2098 cu->proximity_domain = proximity_domain;
2100 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
2101 cu->num_simd_per_cu = cu_info.simd_per_cu;
2102 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2103 cu->max_waves_simd = cu_info.max_waves_per_simd;
2105 cu->wave_front_size = cu_info.wave_front_size;
2106 cu->array_count = cu_info.num_shader_arrays_per_engine *
2107 cu_info.num_shader_engines;
2108 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2109 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2110 cu->num_cu_per_array = cu_info.num_cu_per_sh;
2111 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2112 cu->num_banks = cu_info.num_shader_engines;
2113 cu->lds_size_in_kb = cu_info.lds_size;
2115 cu->hsa_capability = 0;
2117 /* Check if this node supports IOMMU. During parsing this flag will
2118 * translate to HSA_CAP_ATS_PRESENT
2120 if (!kfd_iommu_check_device(kdev))
2121 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2123 crat_table->length += sub_type_hdr->length;
2124 crat_table->total_entries++;
2126 /* Fill in Subtype: Memory. Only on systems with large BAR (no
2127 * private FB), report memory as public. On other systems
2128 * report the total FB size (public+private) as a single
2131 amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
2132 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2133 sub_type_hdr->length);
2136 local_mem_info.local_mem_size_private = 0;
2138 if (local_mem_info.local_mem_size_private == 0)
2139 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2140 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2141 local_mem_info.local_mem_size_public,
2142 (struct crat_subtype_memory *)sub_type_hdr,
2146 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2147 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2148 local_mem_info.local_mem_size_public +
2149 local_mem_info.local_mem_size_private,
2150 (struct crat_subtype_memory *)sub_type_hdr,
2156 crat_table->length += sizeof(struct crat_subtype_memory);
2157 crat_table->total_entries++;
2159 /* TODO: Fill in cache information. This information is NOT readily
2162 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2163 sub_type_hdr->length);
2164 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2167 (struct crat_subtype_cache *)sub_type_hdr,
2169 &num_of_cache_entries);
2174 crat_table->length += cache_mem_filled;
2175 crat_table->total_entries += num_of_cache_entries;
2176 avail_size -= cache_mem_filled;
2178 /* Fill in Subtype: IO_LINKS
2179 * Only direct links are added here which is Link from GPU to
2180 * to its NUMA node. Indirect links are added by userspace.
2182 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2184 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2185 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2190 crat_table->length += sub_type_hdr->length;
2191 crat_table->total_entries++;
2194 /* Fill in Subtype: IO_LINKS
2195 * Direct links from GPU to other GPUs through xGMI.
2196 * We will loop GPUs that already be processed (with lower value
2197 * of proximity_domain), add the link for the GPUs with same
2198 * hive id (from this GPU to other GPU) . The reversed iolink
2199 * (from other GPU to this GPU) will be added
2200 * in kfd_parse_subtype_iolink.
2202 if (kdev->hive_id) {
2203 for (nid = 0; nid < proximity_domain; ++nid) {
2204 peer_dev = kfd_topology_device_by_proximity_domain(nid);
2207 if (peer_dev->gpu->hive_id != kdev->hive_id)
2209 sub_type_hdr = (typeof(sub_type_hdr))(
2210 (char *)sub_type_hdr +
2211 sizeof(struct crat_subtype_iolink));
2212 ret = kfd_fill_gpu_xgmi_link_to_gpu(
2213 &avail_size, kdev, peer_dev->gpu,
2214 (struct crat_subtype_iolink *)sub_type_hdr,
2215 proximity_domain, nid);
2218 crat_table->length += sub_type_hdr->length;
2219 crat_table->total_entries++;
2222 *size = crat_table->length;
2223 pr_info("Virtual CRAT table created for GPU\n");
2228 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2229 * creates a Virtual CRAT (VCRAT) image
2231 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2233 * @crat_image: VCRAT image created because ACPI does not have a
2234 * CRAT for this device
2235 * @size: [OUT] size of virtual crat_image
2236 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2237 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
2238 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2239 * -- this option is not currently implemented.
2240 * The assumption is that all AMD APUs will have CRAT
2241 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2243 * Return 0 if successful else return -ve value
2245 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2246 int flags, struct kfd_dev *kdev,
2247 uint32_t proximity_domain)
2249 void *pcrat_image = NULL;
2250 int ret = 0, num_nodes;
2258 /* Allocate the CPU Virtual CRAT size based on the number of online
2259 * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2260 * This should cover all the current conditions. A check is put not
2261 * to overwrite beyond allocated size for GPUs
2264 case COMPUTE_UNIT_CPU:
2265 num_nodes = num_online_nodes();
2266 dyn_size = sizeof(struct crat_header) +
2267 num_nodes * (sizeof(struct crat_subtype_computeunit) +
2268 sizeof(struct crat_subtype_memory) +
2269 (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2270 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2274 pr_debug("CRAT size is %ld", dyn_size);
2275 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2277 case COMPUTE_UNIT_GPU:
2280 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2283 *size = VCRAT_SIZE_FOR_GPU;
2284 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2287 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2290 pr_err("VCRAT not implemented for APU\n");
2297 *crat_image = pcrat_image;
2299 kvfree(pcrat_image);
2305 /* kfd_destroy_crat_image
2307 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2310 void kfd_destroy_crat_image(void *crat_image)