2 * Copyright 2015-2017 Advanced Micro Devices, Inc.
4 * Permission is hereby granted, free of charge, to any person obtaining a
5 * copy of this software and associated documentation files (the "Software"),
6 * to deal in the Software without restriction, including without limitation
7 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8 * and/or sell copies of the Software, and to permit persons to whom the
9 * Software is furnished to do so, subject to the following conditions:
11 * The above copyright notice and this permission notice shall be included in
12 * all copies or substantial portions of the Software.
14 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20 * OTHER DEALINGS IN THE SOFTWARE.
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu_amdkfd.h"
31 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
32 * GPU processor ID are expressed with Bit[31]=1.
33 * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
36 static uint32_t gpu_processor_id_low = 0x80001000;
38 /* Return the next available gpu_processor_id and increment it for next GPU
39 * @total_cu_count - Total CUs present in the GPU including ones
42 static inline unsigned int get_and_inc_gpu_processor_id(
43 unsigned int total_cu_count)
45 int current_id = gpu_processor_id_low;
47 gpu_processor_id_low += total_cu_count;
51 /* Static table to describe GPU Cache information */
52 struct kfd_gpu_cache_info {
56 /* Indicates how many Compute Units share this cache
57 * Value = 1 indicates the cache is not shared
59 uint32_t num_cu_shared;
62 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
64 /* TCP L1 Cache per CU */
67 .flags = (CRAT_CACHE_FLAGS_ENABLED |
68 CRAT_CACHE_FLAGS_DATA_CACHE |
69 CRAT_CACHE_FLAGS_SIMD_CACHE),
74 /* Scalar L1 Instruction Cache (in SQC module) per bank */
77 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78 CRAT_CACHE_FLAGS_INST_CACHE |
79 CRAT_CACHE_FLAGS_SIMD_CACHE),
83 /* Scalar L1 Data Cache (in SQC module) per bank */
86 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87 CRAT_CACHE_FLAGS_DATA_CACHE |
88 CRAT_CACHE_FLAGS_SIMD_CACHE),
92 /* TODO: Add L2 Cache information */
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
98 /* TCP L1 Cache per CU */
101 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102 CRAT_CACHE_FLAGS_DATA_CACHE |
103 CRAT_CACHE_FLAGS_SIMD_CACHE),
107 /* Scalar L1 Instruction Cache (in SQC module) per bank */
110 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111 CRAT_CACHE_FLAGS_INST_CACHE |
112 CRAT_CACHE_FLAGS_SIMD_CACHE),
116 /* Scalar L1 Data Cache (in SQC module) per bank. */
119 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120 CRAT_CACHE_FLAGS_DATA_CACHE |
121 CRAT_CACHE_FLAGS_SIMD_CACHE),
125 /* TODO: Add L2 Cache information */
128 /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
129 * the following ASICs may need a separate table.
131 #define hawaii_cache_info kaveri_cache_info
132 #define tonga_cache_info carrizo_cache_info
133 #define fiji_cache_info carrizo_cache_info
134 #define polaris10_cache_info carrizo_cache_info
135 #define polaris11_cache_info carrizo_cache_info
136 #define polaris12_cache_info carrizo_cache_info
137 #define vegam_cache_info carrizo_cache_info
138 /* TODO - check & update Vega10 cache details */
139 #define vega10_cache_info carrizo_cache_info
140 #define raven_cache_info carrizo_cache_info
141 /* TODO - check & update Navi10 cache details */
142 #define navi10_cache_info carrizo_cache_info
144 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
145 struct crat_subtype_computeunit *cu)
147 dev->node_props.cpu_cores_count = cu->num_cpu_cores;
148 dev->node_props.cpu_core_id_base = cu->processor_id_low;
149 if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
150 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
152 pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
153 cu->processor_id_low);
156 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
157 struct crat_subtype_computeunit *cu)
159 dev->node_props.simd_id_base = cu->processor_id_low;
160 dev->node_props.simd_count = cu->num_simd_cores;
161 dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
162 dev->node_props.max_waves_per_simd = cu->max_waves_simd;
163 dev->node_props.wave_front_size = cu->wave_front_size;
164 dev->node_props.array_count = cu->array_count;
165 dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
166 dev->node_props.simd_per_cu = cu->num_simd_per_cu;
167 dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
168 if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
169 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
170 pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
173 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
174 * topology device present in the device_list
176 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
177 struct list_head *device_list)
179 struct kfd_topology_device *dev;
181 pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
182 cu->proximity_domain, cu->hsa_capability);
183 list_for_each_entry(dev, device_list, list) {
184 if (cu->proximity_domain == dev->proximity_domain) {
185 if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
186 kfd_populated_cu_info_cpu(dev, cu);
188 if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
189 kfd_populated_cu_info_gpu(dev, cu);
197 static struct kfd_mem_properties *
198 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
199 struct kfd_topology_device *dev)
201 struct kfd_mem_properties *props;
203 list_for_each_entry(props, &dev->mem_props, list) {
204 if (props->heap_type == heap_type
205 && props->flags == flags
206 && props->width == width)
212 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
213 * topology device present in the device_list
215 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
216 struct list_head *device_list)
218 struct kfd_mem_properties *props;
219 struct kfd_topology_device *dev;
221 uint64_t size_in_bytes;
225 pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
226 mem->proximity_domain);
227 list_for_each_entry(dev, device_list, list) {
228 if (mem->proximity_domain == dev->proximity_domain) {
229 /* We're on GPU node */
230 if (dev->node_props.cpu_cores_count == 0) {
232 if (mem->visibility_type == 0)
234 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
237 heap_type = mem->visibility_type;
239 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
241 if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
242 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
243 if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
244 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
247 ((uint64_t)mem->length_high << 32) +
251 /* Multiple banks of the same type are aggregated into
252 * one. User mode doesn't care about multiple physical
253 * memory segments. It's managed as a single virtual
254 * heap for user mode.
256 props = find_subtype_mem(heap_type, flags, width, dev);
258 props->size_in_bytes += size_in_bytes;
262 props = kfd_alloc_struct(props);
266 props->heap_type = heap_type;
267 props->flags = flags;
268 props->size_in_bytes = size_in_bytes;
269 props->width = width;
271 dev->node_props.mem_banks_count++;
272 list_add_tail(&props->list, &dev->mem_props);
281 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
282 * topology device present in the device_list
284 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
285 struct list_head *device_list)
287 struct kfd_cache_properties *props;
288 struct kfd_topology_device *dev;
290 uint32_t total_num_of_cu;
292 id = cache->processor_id_low;
294 pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
295 list_for_each_entry(dev, device_list, list) {
296 total_num_of_cu = (dev->node_props.array_count *
297 dev->node_props.cu_per_simd_array);
299 /* Cache infomration in CRAT doesn't have proximity_domain
300 * information as it is associated with a CPU core or GPU
301 * Compute Unit. So map the cache using CPU core Id or SIMD
303 * TODO: This works because currently we can safely assume that
304 * Compute Units are parsed before caches are parsed. In
305 * future, remove this dependency
307 if ((id >= dev->node_props.cpu_core_id_base &&
308 id <= dev->node_props.cpu_core_id_base +
309 dev->node_props.cpu_cores_count) ||
310 (id >= dev->node_props.simd_id_base &&
311 id < dev->node_props.simd_id_base +
313 props = kfd_alloc_struct(props);
317 props->processor_id_low = id;
318 props->cache_level = cache->cache_level;
319 props->cache_size = cache->cache_size;
320 props->cacheline_size = cache->cache_line_size;
321 props->cachelines_per_tag = cache->lines_per_tag;
322 props->cache_assoc = cache->associativity;
323 props->cache_latency = cache->cache_latency;
324 memcpy(props->sibling_map, cache->sibling_map,
325 sizeof(props->sibling_map));
327 if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
328 props->cache_type |= HSA_CACHE_TYPE_DATA;
329 if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
330 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
331 if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
332 props->cache_type |= HSA_CACHE_TYPE_CPU;
333 if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
334 props->cache_type |= HSA_CACHE_TYPE_HSACU;
337 dev->node_props.caches_count++;
338 list_add_tail(&props->list, &dev->cache_props);
347 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
348 * topology device present in the device_list
350 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
351 struct list_head *device_list)
353 struct kfd_iolink_properties *props = NULL, *props2;
354 struct kfd_topology_device *dev, *to_dev;
358 id_from = iolink->proximity_domain_from;
359 id_to = iolink->proximity_domain_to;
361 pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
363 list_for_each_entry(dev, device_list, list) {
364 if (id_from == dev->proximity_domain) {
365 props = kfd_alloc_struct(props);
369 props->node_from = id_from;
370 props->node_to = id_to;
371 props->ver_maj = iolink->version_major;
372 props->ver_min = iolink->version_minor;
373 props->iolink_type = iolink->io_interface_type;
375 if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
377 else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
378 props->weight = 15 * iolink->num_hops_xgmi;
380 props->weight = node_distance(id_from, id_to);
382 props->min_latency = iolink->minimum_latency;
383 props->max_latency = iolink->maximum_latency;
384 props->min_bandwidth = iolink->minimum_bandwidth_mbs;
385 props->max_bandwidth = iolink->maximum_bandwidth_mbs;
386 props->rec_transfer_size =
387 iolink->recommended_transfer_size;
389 dev->io_link_count++;
390 dev->node_props.io_links_count++;
391 list_add_tail(&props->list, &dev->io_link_props);
396 /* CPU topology is created before GPUs are detected, so CPU->GPU
397 * links are not built at that time. If a PCIe type is discovered, it
398 * means a GPU is detected and we are adding GPU->CPU to the topology.
399 * At this time, also add the corresponded CPU->GPU link if GPU
401 * For xGMI, we only added the link with one direction in the crat
402 * table, add corresponded reversed direction link now.
404 if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
405 to_dev = kfd_topology_device_by_proximity_domain(id_to);
408 /* same everything but the other direction */
409 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
410 props2->node_from = id_to;
411 props2->node_to = id_from;
413 to_dev->io_link_count++;
414 to_dev->node_props.io_links_count++;
415 list_add_tail(&props2->list, &to_dev->io_link_props);
421 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
422 * present in the device_list
423 * @sub_type_hdr - subtype section of crat_image
424 * @device_list - list of topology devices present in this crat_image
426 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
427 struct list_head *device_list)
429 struct crat_subtype_computeunit *cu;
430 struct crat_subtype_memory *mem;
431 struct crat_subtype_cache *cache;
432 struct crat_subtype_iolink *iolink;
435 switch (sub_type_hdr->type) {
436 case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
437 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
438 ret = kfd_parse_subtype_cu(cu, device_list);
440 case CRAT_SUBTYPE_MEMORY_AFFINITY:
441 mem = (struct crat_subtype_memory *)sub_type_hdr;
442 ret = kfd_parse_subtype_mem(mem, device_list);
444 case CRAT_SUBTYPE_CACHE_AFFINITY:
445 cache = (struct crat_subtype_cache *)sub_type_hdr;
446 ret = kfd_parse_subtype_cache(cache, device_list);
448 case CRAT_SUBTYPE_TLB_AFFINITY:
450 * For now, nothing to do here
452 pr_debug("Found TLB entry in CRAT table (not processing)\n");
454 case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
456 * For now, nothing to do here
458 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
460 case CRAT_SUBTYPE_IOLINK_AFFINITY:
461 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
462 ret = kfd_parse_subtype_iolink(iolink, device_list);
465 pr_warn("Unknown subtype %d in CRAT\n",
472 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
473 * create a kfd_topology_device and add in to device_list. Also parse
474 * CRAT subtypes and attach it to appropriate kfd_topology_device
475 * @crat_image - input image containing CRAT
476 * @device_list - [OUT] list of kfd_topology_device generated after
478 * @proximity_domain - Proximity domain of the first device in the table
480 * Return - 0 if successful else -ve value
482 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
483 uint32_t proximity_domain)
485 struct kfd_topology_device *top_dev = NULL;
486 struct crat_subtype_generic *sub_type_hdr;
489 struct crat_header *crat_table = (struct crat_header *)crat_image;
496 if (!list_empty(device_list)) {
497 pr_warn("Error device list should be empty\n");
501 num_nodes = crat_table->num_domains;
502 image_len = crat_table->length;
504 pr_info("Parsing CRAT table with %d nodes\n", num_nodes);
506 for (node_id = 0; node_id < num_nodes; node_id++) {
507 top_dev = kfd_create_topology_device(device_list);
510 top_dev->proximity_domain = proximity_domain++;
518 memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
519 memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
520 CRAT_OEMTABLEID_LENGTH);
521 top_dev->oem_revision = crat_table->oem_revision;
523 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
524 while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
525 ((char *)crat_image) + image_len) {
526 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
527 ret = kfd_parse_subtype(sub_type_hdr, device_list);
532 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
533 sub_type_hdr->length);
538 kfd_release_topology_device_list(device_list);
543 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
544 static int fill_in_pcache(struct crat_subtype_cache *pcache,
545 struct kfd_gpu_cache_info *pcache_info,
546 struct kfd_cu_info *cu_info,
549 int cache_type, unsigned int cu_processor_id,
552 unsigned int cu_sibling_map_mask;
555 /* First check if enough memory is available */
556 if (sizeof(struct crat_subtype_cache) > mem_available)
559 cu_sibling_map_mask = cu_bitmask;
560 cu_sibling_map_mask >>= cu_block;
561 cu_sibling_map_mask &=
562 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
563 first_active_cu = ffs(cu_sibling_map_mask);
565 /* CU could be inactive. In case of shared cache find the first active
566 * CU. and incase of non-shared cache check if the CU is inactive. If
567 * inactive active skip it
569 if (first_active_cu) {
570 memset(pcache, 0, sizeof(struct crat_subtype_cache));
571 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
572 pcache->length = sizeof(struct crat_subtype_cache);
573 pcache->flags = pcache_info[cache_type].flags;
574 pcache->processor_id_low = cu_processor_id
575 + (first_active_cu - 1);
576 pcache->cache_level = pcache_info[cache_type].cache_level;
577 pcache->cache_size = pcache_info[cache_type].cache_size;
579 /* Sibling map is w.r.t processor_id_low, so shift out
582 cu_sibling_map_mask =
583 cu_sibling_map_mask >> (first_active_cu - 1);
585 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
586 pcache->sibling_map[1] =
587 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
588 pcache->sibling_map[2] =
589 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
590 pcache->sibling_map[3] =
591 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
597 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
600 * @kdev - [IN] GPU device
601 * @gpu_processor_id - [IN] GPU processor ID to which these caches
603 * @available_size - [IN] Amount of memory available in pcache
604 * @cu_info - [IN] Compute Unit info obtained from KGD
605 * @pcache - [OUT] memory into which cache data is to be filled in.
606 * @size_filled - [OUT] amount of data used up in pcache.
607 * @num_of_entries - [OUT] number of caches added
609 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
610 int gpu_processor_id,
612 struct kfd_cu_info *cu_info,
613 struct crat_subtype_cache *pcache,
617 struct kfd_gpu_cache_info *pcache_info;
618 int num_of_cache_types = 0;
621 int mem_available = available_size;
622 unsigned int cu_processor_id;
625 switch (kdev->device_info->asic_family) {
627 pcache_info = kaveri_cache_info;
628 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
631 pcache_info = hawaii_cache_info;
632 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
635 pcache_info = carrizo_cache_info;
636 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
639 pcache_info = tonga_cache_info;
640 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
643 pcache_info = fiji_cache_info;
644 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
647 pcache_info = polaris10_cache_info;
648 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
651 pcache_info = polaris11_cache_info;
652 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
655 pcache_info = polaris12_cache_info;
656 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
659 pcache_info = vegam_cache_info;
660 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
665 pcache_info = vega10_cache_info;
666 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
669 pcache_info = raven_cache_info;
670 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
673 pcache_info = navi10_cache_info;
674 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
683 /* For each type of cache listed in the kfd_gpu_cache_info table,
684 * go through all available Compute Units.
685 * The [i,j,k] loop will
686 * if kfd_gpu_cache_info.num_cu_shared = 1
687 * will parse through all available CU
688 * If (kfd_gpu_cache_info.num_cu_shared != 1)
689 * then it will consider only one CU from
693 for (ct = 0; ct < num_of_cache_types; ct++) {
694 cu_processor_id = gpu_processor_id;
695 for (i = 0; i < cu_info->num_shader_engines; i++) {
696 for (j = 0; j < cu_info->num_shader_arrays_per_engine;
698 for (k = 0; k < cu_info->num_cu_per_sh;
699 k += pcache_info[ct].num_cu_shared) {
701 ret = fill_in_pcache(pcache,
705 cu_info->cu_bitmap[i][j],
722 /* Move to next CU block */
724 pcache_info[ct].num_cu_shared;
730 pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
736 * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
737 * copies CRAT from ACPI (if available).
738 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
740 * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
741 * crat_image will be NULL
742 * @size: [OUT] size of crat_image
744 * Return 0 if successful else return error code
746 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
748 struct acpi_table_header *crat_table;
757 /* Fetch the CRAT table from ACPI */
758 status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
759 if (status == AE_NOT_FOUND) {
760 pr_warn("CRAT table not found\n");
762 } else if (ACPI_FAILURE(status)) {
763 const char *err = acpi_format_exception(status);
765 pr_err("CRAT table error: %s\n", err);
770 pr_info("CRAT table disabled by module option\n");
774 pcrat_image = kmemdup(crat_table, crat_table->length, GFP_KERNEL);
778 *crat_image = pcrat_image;
779 *size = crat_table->length;
784 /* Memory required to create Virtual CRAT.
785 * Since there is no easy way to predict the amount of memory required, the
786 * following amount are allocated for CPU and GPU Virtual CRAT. This is
787 * expected to cover all known conditions. But to be safe additional check
788 * is put in the code to ensure we don't overwrite.
790 #define VCRAT_SIZE_FOR_CPU (2 * PAGE_SIZE)
791 #define VCRAT_SIZE_FOR_GPU (3 * PAGE_SIZE)
793 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
795 * @numa_node_id: CPU NUMA node id
796 * @avail_size: Available size in the memory
797 * @sub_type_hdr: Memory into which compute info will be filled in
799 * Return 0 if successful else return -ve value
801 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
802 int proximity_domain,
803 struct crat_subtype_computeunit *sub_type_hdr)
805 const struct cpumask *cpumask;
807 *avail_size -= sizeof(struct crat_subtype_computeunit);
811 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
813 /* Fill in subtype header data */
814 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
815 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
816 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
818 cpumask = cpumask_of_node(numa_node_id);
820 /* Fill in CU data */
821 sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
822 sub_type_hdr->proximity_domain = proximity_domain;
823 sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
824 if (sub_type_hdr->processor_id_low == -1)
827 sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
832 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
834 * @numa_node_id: CPU NUMA node id
835 * @avail_size: Available size in the memory
836 * @sub_type_hdr: Memory into which compute info will be filled in
838 * Return 0 if successful else return -ve value
840 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
841 int proximity_domain,
842 struct crat_subtype_memory *sub_type_hdr)
844 uint64_t mem_in_bytes = 0;
848 *avail_size -= sizeof(struct crat_subtype_memory);
852 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
854 /* Fill in subtype header data */
855 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
856 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
857 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
859 /* Fill in Memory Subunit data */
861 /* Unlike si_meminfo, si_meminfo_node is not exported. So
862 * the following lines are duplicated from si_meminfo_node
865 pgdat = NODE_DATA(numa_node_id);
866 for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
867 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
868 mem_in_bytes <<= PAGE_SHIFT;
870 sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
871 sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
872 sub_type_hdr->proximity_domain = proximity_domain;
878 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
879 uint32_t *num_entries,
880 struct crat_subtype_iolink *sub_type_hdr)
883 struct cpuinfo_x86 *c = &cpu_data(0);
886 if (c->x86_vendor == X86_VENDOR_AMD)
887 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
889 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
893 /* Create IO links from this node to other CPU nodes */
894 for_each_online_node(nid) {
895 if (nid == numa_node_id) /* node itself */
898 *avail_size -= sizeof(struct crat_subtype_iolink);
902 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
904 /* Fill in subtype header data */
905 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
906 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
907 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
909 /* Fill in IO link data */
910 sub_type_hdr->proximity_domain_from = numa_node_id;
911 sub_type_hdr->proximity_domain_to = nid;
912 sub_type_hdr->io_interface_type = link_type;
922 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
924 * @pcrat_image: Fill in VCRAT for CPU
925 * @size: [IN] allocated size of crat_image.
926 * [OUT] actual size of data filled in crat_image
928 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
930 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
931 struct acpi_table_header *acpi_table;
933 struct crat_subtype_generic *sub_type_hdr;
934 int avail_size = *size;
937 uint32_t entries = 0;
941 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
944 /* Fill in CRAT Header.
945 * Modify length and total_entries as subunits are added.
947 avail_size -= sizeof(struct crat_header);
951 memset(crat_table, 0, sizeof(struct crat_header));
952 memcpy(&crat_table->signature, CRAT_SIGNATURE,
953 sizeof(crat_table->signature));
954 crat_table->length = sizeof(struct crat_header);
956 status = acpi_get_table("DSDT", 0, &acpi_table);
958 pr_warn("DSDT table not found for OEM information\n");
960 crat_table->oem_revision = acpi_table->revision;
961 memcpy(crat_table->oem_id, acpi_table->oem_id,
963 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
964 CRAT_OEMTABLEID_LENGTH);
966 crat_table->total_entries = 0;
967 crat_table->num_domains = 0;
969 sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
971 for_each_online_node(numa_node_id) {
972 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
975 /* Fill in Subtype: Compute Unit */
976 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
977 crat_table->num_domains,
978 (struct crat_subtype_computeunit *)sub_type_hdr);
981 crat_table->length += sub_type_hdr->length;
982 crat_table->total_entries++;
984 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
985 sub_type_hdr->length);
987 /* Fill in Subtype: Memory */
988 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
989 crat_table->num_domains,
990 (struct crat_subtype_memory *)sub_type_hdr);
993 crat_table->length += sub_type_hdr->length;
994 crat_table->total_entries++;
996 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
997 sub_type_hdr->length);
999 /* Fill in Subtype: IO Link */
1000 #ifdef CONFIG_X86_64
1001 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1003 (struct crat_subtype_iolink *)sub_type_hdr);
1006 crat_table->length += (sub_type_hdr->length * entries);
1007 crat_table->total_entries += entries;
1009 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1010 sub_type_hdr->length * entries);
1012 pr_info("IO link not available for non x86 platforms\n");
1015 crat_table->num_domains++;
1018 /* TODO: Add cache Subtype for CPU.
1019 * Currently, CPU cache information is available in function
1020 * detect_cache_attributes(cpu) defined in the file
1021 * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1022 * exported and to get the same information the code needs to be
1026 *size = crat_table->length;
1027 pr_info("Virtual CRAT table created for CPU\n");
1032 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1033 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1034 struct crat_subtype_memory *sub_type_hdr,
1035 uint32_t proximity_domain,
1036 const struct kfd_local_mem_info *local_mem_info)
1038 *avail_size -= sizeof(struct crat_subtype_memory);
1039 if (*avail_size < 0)
1042 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1043 sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1044 sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1045 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1047 sub_type_hdr->proximity_domain = proximity_domain;
1049 pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1052 sub_type_hdr->length_low = lower_32_bits(size);
1053 sub_type_hdr->length_high = upper_32_bits(size);
1055 sub_type_hdr->width = local_mem_info->vram_width;
1056 sub_type_hdr->visibility_type = type;
1061 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1063 * @avail_size: Available size in the memory
1064 * @kdev - [IN] GPU device
1065 * @sub_type_hdr: Memory into which io link info will be filled in
1066 * @proximity_domain - proximity domain of the GPU node
1068 * Return 0 if successful else return -ve value
1070 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1071 struct kfd_dev *kdev,
1072 struct crat_subtype_iolink *sub_type_hdr,
1073 uint32_t proximity_domain)
1075 *avail_size -= sizeof(struct crat_subtype_iolink);
1076 if (*avail_size < 0)
1079 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1081 /* Fill in subtype header data */
1082 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1083 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1084 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1085 if (kfd_dev_is_large_bar(kdev))
1086 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1088 /* Fill in IOLINK subtype.
1089 * TODO: Fill-in other fields of iolink subtype
1091 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1092 sub_type_hdr->proximity_domain_from = proximity_domain;
1094 if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1095 sub_type_hdr->proximity_domain_to = 0;
1097 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1099 sub_type_hdr->proximity_domain_to = 0;
1104 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1105 struct kfd_dev *kdev,
1106 struct kfd_dev *peer_kdev,
1107 struct crat_subtype_iolink *sub_type_hdr,
1108 uint32_t proximity_domain_from,
1109 uint32_t proximity_domain_to)
1111 *avail_size -= sizeof(struct crat_subtype_iolink);
1112 if (*avail_size < 0)
1115 memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1117 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1118 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1119 sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1120 CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1122 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1123 sub_type_hdr->proximity_domain_from = proximity_domain_from;
1124 sub_type_hdr->proximity_domain_to = proximity_domain_to;
1125 sub_type_hdr->num_hops_xgmi =
1126 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1130 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1132 * @pcrat_image: Fill in VCRAT for GPU
1133 * @size: [IN] allocated size of crat_image.
1134 * [OUT] actual size of data filled in crat_image
1136 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1137 size_t *size, struct kfd_dev *kdev,
1138 uint32_t proximity_domain)
1140 struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1141 struct crat_subtype_generic *sub_type_hdr;
1142 struct kfd_local_mem_info local_mem_info;
1143 struct kfd_topology_device *peer_dev;
1144 struct crat_subtype_computeunit *cu;
1145 struct kfd_cu_info cu_info;
1146 int avail_size = *size;
1147 uint32_t total_num_of_cu;
1148 int num_of_cache_entries = 0;
1149 int cache_mem_filled = 0;
1153 if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1156 /* Fill the CRAT Header.
1157 * Modify length and total_entries as subunits are added.
1159 avail_size -= sizeof(struct crat_header);
1163 memset(crat_table, 0, sizeof(struct crat_header));
1165 memcpy(&crat_table->signature, CRAT_SIGNATURE,
1166 sizeof(crat_table->signature));
1167 /* Change length as we add more subtypes*/
1168 crat_table->length = sizeof(struct crat_header);
1169 crat_table->num_domains = 1;
1170 crat_table->total_entries = 0;
1172 /* Fill in Subtype: Compute Unit
1173 * First fill in the sub type header and then sub type data
1175 avail_size -= sizeof(struct crat_subtype_computeunit);
1179 sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1180 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1182 sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1183 sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1184 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1186 /* Fill CU subtype data */
1187 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1188 cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1189 cu->proximity_domain = proximity_domain;
1191 amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1192 cu->num_simd_per_cu = cu_info.simd_per_cu;
1193 cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1194 cu->max_waves_simd = cu_info.max_waves_per_simd;
1196 cu->wave_front_size = cu_info.wave_front_size;
1197 cu->array_count = cu_info.num_shader_arrays_per_engine *
1198 cu_info.num_shader_engines;
1199 total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1200 cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1201 cu->num_cu_per_array = cu_info.num_cu_per_sh;
1202 cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1203 cu->num_banks = cu_info.num_shader_engines;
1204 cu->lds_size_in_kb = cu_info.lds_size;
1206 cu->hsa_capability = 0;
1208 /* Check if this node supports IOMMU. During parsing this flag will
1209 * translate to HSA_CAP_ATS_PRESENT
1211 if (!kfd_iommu_check_device(kdev))
1212 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1214 crat_table->length += sub_type_hdr->length;
1215 crat_table->total_entries++;
1217 /* Fill in Subtype: Memory. Only on systems with large BAR (no
1218 * private FB), report memory as public. On other systems
1219 * report the total FB size (public+private) as a single
1222 amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1223 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1224 sub_type_hdr->length);
1227 local_mem_info.local_mem_size_private = 0;
1229 if (local_mem_info.local_mem_size_private == 0)
1230 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1231 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1232 local_mem_info.local_mem_size_public,
1233 (struct crat_subtype_memory *)sub_type_hdr,
1237 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1238 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1239 local_mem_info.local_mem_size_public +
1240 local_mem_info.local_mem_size_private,
1241 (struct crat_subtype_memory *)sub_type_hdr,
1247 crat_table->length += sizeof(struct crat_subtype_memory);
1248 crat_table->total_entries++;
1250 /* TODO: Fill in cache information. This information is NOT readily
1253 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1254 sub_type_hdr->length);
1255 ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1258 (struct crat_subtype_cache *)sub_type_hdr,
1260 &num_of_cache_entries);
1265 crat_table->length += cache_mem_filled;
1266 crat_table->total_entries += num_of_cache_entries;
1267 avail_size -= cache_mem_filled;
1269 /* Fill in Subtype: IO_LINKS
1270 * Only direct links are added here which is Link from GPU to
1271 * to its NUMA node. Indirect links are added by userspace.
1273 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1275 ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1276 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1281 crat_table->length += sub_type_hdr->length;
1282 crat_table->total_entries++;
1285 /* Fill in Subtype: IO_LINKS
1286 * Direct links from GPU to other GPUs through xGMI.
1287 * We will loop GPUs that already be processed (with lower value
1288 * of proximity_domain), add the link for the GPUs with same
1289 * hive id (from this GPU to other GPU) . The reversed iolink
1290 * (from other GPU to this GPU) will be added
1291 * in kfd_parse_subtype_iolink.
1293 if (kdev->hive_id) {
1294 for (nid = 0; nid < proximity_domain; ++nid) {
1295 peer_dev = kfd_topology_device_by_proximity_domain(nid);
1298 if (peer_dev->gpu->hive_id != kdev->hive_id)
1300 sub_type_hdr = (typeof(sub_type_hdr))(
1301 (char *)sub_type_hdr +
1302 sizeof(struct crat_subtype_iolink));
1303 ret = kfd_fill_gpu_xgmi_link_to_gpu(
1304 &avail_size, kdev, peer_dev->gpu,
1305 (struct crat_subtype_iolink *)sub_type_hdr,
1306 proximity_domain, nid);
1309 crat_table->length += sub_type_hdr->length;
1310 crat_table->total_entries++;
1313 *size = crat_table->length;
1314 pr_info("Virtual CRAT table created for GPU\n");
1319 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1320 * creates a Virtual CRAT (VCRAT) image
1322 * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1324 * @crat_image: VCRAT image created because ACPI does not have a
1325 * CRAT for this device
1326 * @size: [OUT] size of virtual crat_image
1327 * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1328 * COMPUTE_UNIT_GPU - Create VCRAT for GPU
1329 * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1330 * -- this option is not currently implemented.
1331 * The assumption is that all AMD APUs will have CRAT
1332 * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1334 * Return 0 if successful else return -ve value
1336 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1337 int flags, struct kfd_dev *kdev,
1338 uint32_t proximity_domain)
1340 void *pcrat_image = NULL;
1348 /* Allocate one VCRAT_SIZE_FOR_CPU for CPU virtual CRAT image and
1349 * VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image. This should cover
1350 * all the current conditions. A check is put not to overwrite beyond
1354 case COMPUTE_UNIT_CPU:
1355 pcrat_image = kmalloc(VCRAT_SIZE_FOR_CPU, GFP_KERNEL);
1358 *size = VCRAT_SIZE_FOR_CPU;
1359 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1361 case COMPUTE_UNIT_GPU:
1364 pcrat_image = kmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1367 *size = VCRAT_SIZE_FOR_GPU;
1368 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1371 case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1374 pr_err("VCRAT not implemented for APU\n");
1381 *crat_image = pcrat_image;
1389 /* kfd_destroy_crat_image
1391 * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1394 void kfd_destroy_crat_image(void *crat_image)