Merge tag 'arm-soc-5.11' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_crat.c
1 /*
2  * Copyright 2015-2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
25 #include "kfd_crat.h"
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu_amdkfd.h"
30
31 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
32  * GPU processor ID are expressed with Bit[31]=1.
33  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
34  * used in the CRAT.
35  */
36 static uint32_t gpu_processor_id_low = 0x80001000;
37
38 /* Return the next available gpu_processor_id and increment it for next GPU
39  *      @total_cu_count - Total CUs present in the GPU including ones
40  *                        masked off
41  */
42 static inline unsigned int get_and_inc_gpu_processor_id(
43                                 unsigned int total_cu_count)
44 {
45         int current_id = gpu_processor_id_low;
46
47         gpu_processor_id_low += total_cu_count;
48         return current_id;
49 }
50
51 /* Static table to describe GPU Cache information */
52 struct kfd_gpu_cache_info {
53         uint32_t        cache_size;
54         uint32_t        cache_level;
55         uint32_t        flags;
56         /* Indicates how many Compute Units share this cache
57          * Value = 1 indicates the cache is not shared
58          */
59         uint32_t        num_cu_shared;
60 };
61
62 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
63         {
64                 /* TCP L1 Cache per CU */
65                 .cache_size = 16,
66                 .cache_level = 1,
67                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
68                                 CRAT_CACHE_FLAGS_DATA_CACHE |
69                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
70                 .num_cu_shared = 1,
71
72         },
73         {
74                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
75                 .cache_size = 16,
76                 .cache_level = 1,
77                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78                                 CRAT_CACHE_FLAGS_INST_CACHE |
79                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
80                 .num_cu_shared = 2,
81         },
82         {
83                 /* Scalar L1 Data Cache (in SQC module) per bank */
84                 .cache_size = 8,
85                 .cache_level = 1,
86                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87                                 CRAT_CACHE_FLAGS_DATA_CACHE |
88                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
89                 .num_cu_shared = 2,
90         },
91
92         /* TODO: Add L2 Cache information */
93 };
94
95
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97         {
98                 /* TCP L1 Cache per CU */
99                 .cache_size = 16,
100                 .cache_level = 1,
101                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102                                 CRAT_CACHE_FLAGS_DATA_CACHE |
103                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
104                 .num_cu_shared = 1,
105         },
106         {
107                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
108                 .cache_size = 8,
109                 .cache_level = 1,
110                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111                                 CRAT_CACHE_FLAGS_INST_CACHE |
112                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
113                 .num_cu_shared = 4,
114         },
115         {
116                 /* Scalar L1 Data Cache (in SQC module) per bank. */
117                 .cache_size = 4,
118                 .cache_level = 1,
119                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120                                 CRAT_CACHE_FLAGS_DATA_CACHE |
121                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
122                 .num_cu_shared = 4,
123         },
124
125         /* TODO: Add L2 Cache information */
126 };
127
128 /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
129  * the following ASICs may need a separate table.
130  */
131 #define hawaii_cache_info kaveri_cache_info
132 #define tonga_cache_info carrizo_cache_info
133 #define fiji_cache_info  carrizo_cache_info
134 #define polaris10_cache_info carrizo_cache_info
135 #define polaris11_cache_info carrizo_cache_info
136 #define polaris12_cache_info carrizo_cache_info
137 #define vegam_cache_info carrizo_cache_info
138 /* TODO - check & update Vega10 cache details */
139 #define vega10_cache_info carrizo_cache_info
140 #define raven_cache_info carrizo_cache_info
141 #define renoir_cache_info carrizo_cache_info
142 /* TODO - check & update Navi10 cache details */
143 #define navi10_cache_info carrizo_cache_info
144 #define vangogh_cache_info carrizo_cache_info
145
146 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
147                 struct crat_subtype_computeunit *cu)
148 {
149         dev->node_props.cpu_cores_count = cu->num_cpu_cores;
150         dev->node_props.cpu_core_id_base = cu->processor_id_low;
151         if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
152                 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
153
154         pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
155                         cu->processor_id_low);
156 }
157
158 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
159                 struct crat_subtype_computeunit *cu)
160 {
161         dev->node_props.simd_id_base = cu->processor_id_low;
162         dev->node_props.simd_count = cu->num_simd_cores;
163         dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
164         dev->node_props.max_waves_per_simd = cu->max_waves_simd;
165         dev->node_props.wave_front_size = cu->wave_front_size;
166         dev->node_props.array_count = cu->array_count;
167         dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
168         dev->node_props.simd_per_cu = cu->num_simd_per_cu;
169         dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
170         if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
171                 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
172         pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
173 }
174
175 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
176  * topology device present in the device_list
177  */
178 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
179                                 struct list_head *device_list)
180 {
181         struct kfd_topology_device *dev;
182
183         pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
184                         cu->proximity_domain, cu->hsa_capability);
185         list_for_each_entry(dev, device_list, list) {
186                 if (cu->proximity_domain == dev->proximity_domain) {
187                         if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
188                                 kfd_populated_cu_info_cpu(dev, cu);
189
190                         if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
191                                 kfd_populated_cu_info_gpu(dev, cu);
192                         break;
193                 }
194         }
195
196         return 0;
197 }
198
199 static struct kfd_mem_properties *
200 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
201                 struct kfd_topology_device *dev)
202 {
203         struct kfd_mem_properties *props;
204
205         list_for_each_entry(props, &dev->mem_props, list) {
206                 if (props->heap_type == heap_type
207                                 && props->flags == flags
208                                 && props->width == width)
209                         return props;
210         }
211
212         return NULL;
213 }
214 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
215  * topology device present in the device_list
216  */
217 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
218                                 struct list_head *device_list)
219 {
220         struct kfd_mem_properties *props;
221         struct kfd_topology_device *dev;
222         uint32_t heap_type;
223         uint64_t size_in_bytes;
224         uint32_t flags = 0;
225         uint32_t width;
226
227         pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
228                         mem->proximity_domain);
229         list_for_each_entry(dev, device_list, list) {
230                 if (mem->proximity_domain == dev->proximity_domain) {
231                         /* We're on GPU node */
232                         if (dev->node_props.cpu_cores_count == 0) {
233                                 /* APU */
234                                 if (mem->visibility_type == 0)
235                                         heap_type =
236                                                 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
237                                 /* dGPU */
238                                 else
239                                         heap_type = mem->visibility_type;
240                         } else
241                                 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
242
243                         if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
244                                 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
245                         if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
246                                 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
247
248                         size_in_bytes =
249                                 ((uint64_t)mem->length_high << 32) +
250                                                         mem->length_low;
251                         width = mem->width;
252
253                         /* Multiple banks of the same type are aggregated into
254                          * one. User mode doesn't care about multiple physical
255                          * memory segments. It's managed as a single virtual
256                          * heap for user mode.
257                          */
258                         props = find_subtype_mem(heap_type, flags, width, dev);
259                         if (props) {
260                                 props->size_in_bytes += size_in_bytes;
261                                 break;
262                         }
263
264                         props = kfd_alloc_struct(props);
265                         if (!props)
266                                 return -ENOMEM;
267
268                         props->heap_type = heap_type;
269                         props->flags = flags;
270                         props->size_in_bytes = size_in_bytes;
271                         props->width = width;
272
273                         dev->node_props.mem_banks_count++;
274                         list_add_tail(&props->list, &dev->mem_props);
275
276                         break;
277                 }
278         }
279
280         return 0;
281 }
282
283 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
284  * topology device present in the device_list
285  */
286 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
287                         struct list_head *device_list)
288 {
289         struct kfd_cache_properties *props;
290         struct kfd_topology_device *dev;
291         uint32_t id;
292         uint32_t total_num_of_cu;
293
294         id = cache->processor_id_low;
295
296         pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
297         list_for_each_entry(dev, device_list, list) {
298                 total_num_of_cu = (dev->node_props.array_count *
299                                         dev->node_props.cu_per_simd_array);
300
301                 /* Cache infomration in CRAT doesn't have proximity_domain
302                  * information as it is associated with a CPU core or GPU
303                  * Compute Unit. So map the cache using CPU core Id or SIMD
304                  * (GPU) ID.
305                  * TODO: This works because currently we can safely assume that
306                  *  Compute Units are parsed before caches are parsed. In
307                  *  future, remove this dependency
308                  */
309                 if ((id >= dev->node_props.cpu_core_id_base &&
310                         id <= dev->node_props.cpu_core_id_base +
311                                 dev->node_props.cpu_cores_count) ||
312                         (id >= dev->node_props.simd_id_base &&
313                         id < dev->node_props.simd_id_base +
314                                 total_num_of_cu)) {
315                         props = kfd_alloc_struct(props);
316                         if (!props)
317                                 return -ENOMEM;
318
319                         props->processor_id_low = id;
320                         props->cache_level = cache->cache_level;
321                         props->cache_size = cache->cache_size;
322                         props->cacheline_size = cache->cache_line_size;
323                         props->cachelines_per_tag = cache->lines_per_tag;
324                         props->cache_assoc = cache->associativity;
325                         props->cache_latency = cache->cache_latency;
326                         memcpy(props->sibling_map, cache->sibling_map,
327                                         sizeof(props->sibling_map));
328
329                         if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
330                                 props->cache_type |= HSA_CACHE_TYPE_DATA;
331                         if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
332                                 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
333                         if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
334                                 props->cache_type |= HSA_CACHE_TYPE_CPU;
335                         if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
336                                 props->cache_type |= HSA_CACHE_TYPE_HSACU;
337
338                         dev->cache_count++;
339                         dev->node_props.caches_count++;
340                         list_add_tail(&props->list, &dev->cache_props);
341
342                         break;
343                 }
344         }
345
346         return 0;
347 }
348
349 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
350  * topology device present in the device_list
351  */
352 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
353                                         struct list_head *device_list)
354 {
355         struct kfd_iolink_properties *props = NULL, *props2;
356         struct kfd_topology_device *dev, *to_dev;
357         uint32_t id_from;
358         uint32_t id_to;
359
360         id_from = iolink->proximity_domain_from;
361         id_to = iolink->proximity_domain_to;
362
363         pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
364                         id_from, id_to);
365         list_for_each_entry(dev, device_list, list) {
366                 if (id_from == dev->proximity_domain) {
367                         props = kfd_alloc_struct(props);
368                         if (!props)
369                                 return -ENOMEM;
370
371                         props->node_from = id_from;
372                         props->node_to = id_to;
373                         props->ver_maj = iolink->version_major;
374                         props->ver_min = iolink->version_minor;
375                         props->iolink_type = iolink->io_interface_type;
376
377                         if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
378                                 props->weight = 20;
379                         else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
380                                 props->weight = 15 * iolink->num_hops_xgmi;
381                         else
382                                 props->weight = node_distance(id_from, id_to);
383
384                         props->min_latency = iolink->minimum_latency;
385                         props->max_latency = iolink->maximum_latency;
386                         props->min_bandwidth = iolink->minimum_bandwidth_mbs;
387                         props->max_bandwidth = iolink->maximum_bandwidth_mbs;
388                         props->rec_transfer_size =
389                                         iolink->recommended_transfer_size;
390
391                         dev->io_link_count++;
392                         dev->node_props.io_links_count++;
393                         list_add_tail(&props->list, &dev->io_link_props);
394                         break;
395                 }
396         }
397
398         /* CPU topology is created before GPUs are detected, so CPU->GPU
399          * links are not built at that time. If a PCIe type is discovered, it
400          * means a GPU is detected and we are adding GPU->CPU to the topology.
401          * At this time, also add the corresponded CPU->GPU link if GPU
402          * is large bar.
403          * For xGMI, we only added the link with one direction in the crat
404          * table, add corresponded reversed direction link now.
405          */
406         if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
407                 to_dev = kfd_topology_device_by_proximity_domain(id_to);
408                 if (!to_dev)
409                         return -ENODEV;
410                 /* same everything but the other direction */
411                 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
412                 props2->node_from = id_to;
413                 props2->node_to = id_from;
414                 props2->kobj = NULL;
415                 to_dev->io_link_count++;
416                 to_dev->node_props.io_links_count++;
417                 list_add_tail(&props2->list, &to_dev->io_link_props);
418         }
419
420         return 0;
421 }
422
423 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
424  * present in the device_list
425  *      @sub_type_hdr - subtype section of crat_image
426  *      @device_list - list of topology devices present in this crat_image
427  */
428 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
429                                 struct list_head *device_list)
430 {
431         struct crat_subtype_computeunit *cu;
432         struct crat_subtype_memory *mem;
433         struct crat_subtype_cache *cache;
434         struct crat_subtype_iolink *iolink;
435         int ret = 0;
436
437         switch (sub_type_hdr->type) {
438         case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
439                 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
440                 ret = kfd_parse_subtype_cu(cu, device_list);
441                 break;
442         case CRAT_SUBTYPE_MEMORY_AFFINITY:
443                 mem = (struct crat_subtype_memory *)sub_type_hdr;
444                 ret = kfd_parse_subtype_mem(mem, device_list);
445                 break;
446         case CRAT_SUBTYPE_CACHE_AFFINITY:
447                 cache = (struct crat_subtype_cache *)sub_type_hdr;
448                 ret = kfd_parse_subtype_cache(cache, device_list);
449                 break;
450         case CRAT_SUBTYPE_TLB_AFFINITY:
451                 /*
452                  * For now, nothing to do here
453                  */
454                 pr_debug("Found TLB entry in CRAT table (not processing)\n");
455                 break;
456         case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
457                 /*
458                  * For now, nothing to do here
459                  */
460                 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
461                 break;
462         case CRAT_SUBTYPE_IOLINK_AFFINITY:
463                 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
464                 ret = kfd_parse_subtype_iolink(iolink, device_list);
465                 break;
466         default:
467                 pr_warn("Unknown subtype %d in CRAT\n",
468                                 sub_type_hdr->type);
469         }
470
471         return ret;
472 }
473
474 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
475  * create a kfd_topology_device and add in to device_list. Also parse
476  * CRAT subtypes and attach it to appropriate kfd_topology_device
477  *      @crat_image - input image containing CRAT
478  *      @device_list - [OUT] list of kfd_topology_device generated after
479  *                     parsing crat_image
480  *      @proximity_domain - Proximity domain of the first device in the table
481  *
482  *      Return - 0 if successful else -ve value
483  */
484 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
485                          uint32_t proximity_domain)
486 {
487         struct kfd_topology_device *top_dev = NULL;
488         struct crat_subtype_generic *sub_type_hdr;
489         uint16_t node_id;
490         int ret = 0;
491         struct crat_header *crat_table = (struct crat_header *)crat_image;
492         uint16_t num_nodes;
493         uint32_t image_len;
494
495         if (!crat_image)
496                 return -EINVAL;
497
498         if (!list_empty(device_list)) {
499                 pr_warn("Error device list should be empty\n");
500                 return -EINVAL;
501         }
502
503         num_nodes = crat_table->num_domains;
504         image_len = crat_table->length;
505
506         pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
507
508         for (node_id = 0; node_id < num_nodes; node_id++) {
509                 top_dev = kfd_create_topology_device(device_list);
510                 if (!top_dev)
511                         break;
512                 top_dev->proximity_domain = proximity_domain++;
513         }
514
515         if (!top_dev) {
516                 ret = -ENOMEM;
517                 goto err;
518         }
519
520         memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
521         memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
522                         CRAT_OEMTABLEID_LENGTH);
523         top_dev->oem_revision = crat_table->oem_revision;
524
525         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
526         while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
527                         ((char *)crat_image) + image_len) {
528                 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
529                         ret = kfd_parse_subtype(sub_type_hdr, device_list);
530                         if (ret)
531                                 break;
532                 }
533
534                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
535                                 sub_type_hdr->length);
536         }
537
538 err:
539         if (ret)
540                 kfd_release_topology_device_list(device_list);
541
542         return ret;
543 }
544
545 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
546 static int fill_in_pcache(struct crat_subtype_cache *pcache,
547                                 struct kfd_gpu_cache_info *pcache_info,
548                                 struct kfd_cu_info *cu_info,
549                                 int mem_available,
550                                 int cu_bitmask,
551                                 int cache_type, unsigned int cu_processor_id,
552                                 int cu_block)
553 {
554         unsigned int cu_sibling_map_mask;
555         int first_active_cu;
556
557         /* First check if enough memory is available */
558         if (sizeof(struct crat_subtype_cache) > mem_available)
559                 return -ENOMEM;
560
561         cu_sibling_map_mask = cu_bitmask;
562         cu_sibling_map_mask >>= cu_block;
563         cu_sibling_map_mask &=
564                 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
565         first_active_cu = ffs(cu_sibling_map_mask);
566
567         /* CU could be inactive. In case of shared cache find the first active
568          * CU. and incase of non-shared cache check if the CU is inactive. If
569          * inactive active skip it
570          */
571         if (first_active_cu) {
572                 memset(pcache, 0, sizeof(struct crat_subtype_cache));
573                 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
574                 pcache->length = sizeof(struct crat_subtype_cache);
575                 pcache->flags = pcache_info[cache_type].flags;
576                 pcache->processor_id_low = cu_processor_id
577                                          + (first_active_cu - 1);
578                 pcache->cache_level = pcache_info[cache_type].cache_level;
579                 pcache->cache_size = pcache_info[cache_type].cache_size;
580
581                 /* Sibling map is w.r.t processor_id_low, so shift out
582                  * inactive CU
583                  */
584                 cu_sibling_map_mask =
585                         cu_sibling_map_mask >> (first_active_cu - 1);
586
587                 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
588                 pcache->sibling_map[1] =
589                                 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
590                 pcache->sibling_map[2] =
591                                 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
592                 pcache->sibling_map[3] =
593                                 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
594                 return 0;
595         }
596         return 1;
597 }
598
599 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
600  * tables
601  *
602  *      @kdev - [IN] GPU device
603  *      @gpu_processor_id - [IN] GPU processor ID to which these caches
604  *                          associate
605  *      @available_size - [IN] Amount of memory available in pcache
606  *      @cu_info - [IN] Compute Unit info obtained from KGD
607  *      @pcache - [OUT] memory into which cache data is to be filled in.
608  *      @size_filled - [OUT] amount of data used up in pcache.
609  *      @num_of_entries - [OUT] number of caches added
610  */
611 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
612                         int gpu_processor_id,
613                         int available_size,
614                         struct kfd_cu_info *cu_info,
615                         struct crat_subtype_cache *pcache,
616                         int *size_filled,
617                         int *num_of_entries)
618 {
619         struct kfd_gpu_cache_info *pcache_info;
620         int num_of_cache_types = 0;
621         int i, j, k;
622         int ct = 0;
623         int mem_available = available_size;
624         unsigned int cu_processor_id;
625         int ret;
626
627         switch (kdev->device_info->asic_family) {
628         case CHIP_KAVERI:
629                 pcache_info = kaveri_cache_info;
630                 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
631                 break;
632         case CHIP_HAWAII:
633                 pcache_info = hawaii_cache_info;
634                 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
635                 break;
636         case CHIP_CARRIZO:
637                 pcache_info = carrizo_cache_info;
638                 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
639                 break;
640         case CHIP_TONGA:
641                 pcache_info = tonga_cache_info;
642                 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
643                 break;
644         case CHIP_FIJI:
645                 pcache_info = fiji_cache_info;
646                 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
647                 break;
648         case CHIP_POLARIS10:
649                 pcache_info = polaris10_cache_info;
650                 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
651                 break;
652         case CHIP_POLARIS11:
653                 pcache_info = polaris11_cache_info;
654                 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
655                 break;
656         case CHIP_POLARIS12:
657                 pcache_info = polaris12_cache_info;
658                 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
659                 break;
660         case CHIP_VEGAM:
661                 pcache_info = vegam_cache_info;
662                 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
663                 break;
664         case CHIP_VEGA10:
665         case CHIP_VEGA12:
666         case CHIP_VEGA20:
667         case CHIP_ARCTURUS:
668                 pcache_info = vega10_cache_info;
669                 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
670                 break;
671         case CHIP_RAVEN:
672                 pcache_info = raven_cache_info;
673                 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
674                 break;
675         case CHIP_RENOIR:
676                 pcache_info = renoir_cache_info;
677                 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
678                 break;
679         case CHIP_NAVI10:
680         case CHIP_NAVI12:
681         case CHIP_NAVI14:
682         case CHIP_SIENNA_CICHLID:
683         case CHIP_NAVY_FLOUNDER:
684         case CHIP_DIMGREY_CAVEFISH:
685                 pcache_info = navi10_cache_info;
686                 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
687                 break;
688         case CHIP_VANGOGH:
689                 pcache_info = vangogh_cache_info;
690                 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
691                 break;
692         default:
693                 return -EINVAL;
694         }
695
696         *size_filled = 0;
697         *num_of_entries = 0;
698
699         /* For each type of cache listed in the kfd_gpu_cache_info table,
700          * go through all available Compute Units.
701          * The [i,j,k] loop will
702          *              if kfd_gpu_cache_info.num_cu_shared = 1
703          *                      will parse through all available CU
704          *              If (kfd_gpu_cache_info.num_cu_shared != 1)
705          *                      then it will consider only one CU from
706          *                      the shared unit
707          */
708
709         for (ct = 0; ct < num_of_cache_types; ct++) {
710                 cu_processor_id = gpu_processor_id;
711                 for (i = 0; i < cu_info->num_shader_engines; i++) {
712                         for (j = 0; j < cu_info->num_shader_arrays_per_engine;
713                                 j++) {
714                                 for (k = 0; k < cu_info->num_cu_per_sh;
715                                         k += pcache_info[ct].num_cu_shared) {
716
717                                         ret = fill_in_pcache(pcache,
718                                                 pcache_info,
719                                                 cu_info,
720                                                 mem_available,
721                                                 cu_info->cu_bitmap[i % 4][j + i / 4],
722                                                 ct,
723                                                 cu_processor_id,
724                                                 k);
725
726                                         if (ret < 0)
727                                                 break;
728
729                                         if (!ret) {
730                                                 pcache++;
731                                                 (*num_of_entries)++;
732                                                 mem_available -=
733                                                         sizeof(*pcache);
734                                                 (*size_filled) +=
735                                                         sizeof(*pcache);
736                                         }
737
738                                         /* Move to next CU block */
739                                         cu_processor_id +=
740                                                 pcache_info[ct].num_cu_shared;
741                                 }
742                         }
743                 }
744         }
745
746         pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
747
748         return 0;
749 }
750
751 static bool kfd_ignore_crat(void)
752 {
753         bool ret;
754
755         if (ignore_crat)
756                 return true;
757
758 #ifndef KFD_SUPPORT_IOMMU_V2
759         ret = true;
760 #else
761         ret = false;
762 #endif
763
764         return ret;
765 }
766
767 /*
768  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
769  * copies CRAT from ACPI (if available).
770  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
771  *
772  *      @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
773  *                   crat_image will be NULL
774  *      @size: [OUT] size of crat_image
775  *
776  *      Return 0 if successful else return error code
777  */
778 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
779 {
780         struct acpi_table_header *crat_table;
781         acpi_status status;
782         void *pcrat_image;
783         int rc = 0;
784
785         if (!crat_image)
786                 return -EINVAL;
787
788         *crat_image = NULL;
789
790         if (kfd_ignore_crat()) {
791                 pr_info("CRAT table disabled by module option\n");
792                 return -ENODATA;
793         }
794
795         /* Fetch the CRAT table from ACPI */
796         status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
797         if (status == AE_NOT_FOUND) {
798                 pr_warn("CRAT table not found\n");
799                 return -ENODATA;
800         } else if (ACPI_FAILURE(status)) {
801                 const char *err = acpi_format_exception(status);
802
803                 pr_err("CRAT table error: %s\n", err);
804                 return -EINVAL;
805         }
806
807         pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
808         if (!pcrat_image) {
809                 rc = -ENOMEM;
810                 goto out;
811         }
812
813         memcpy(pcrat_image, crat_table, crat_table->length);
814         *crat_image = pcrat_image;
815         *size = crat_table->length;
816 out:
817         acpi_put_table(crat_table);
818         return rc;
819 }
820
821 /* Memory required to create Virtual CRAT.
822  * Since there is no easy way to predict the amount of memory required, the
823  * following amount is allocated for GPU Virtual CRAT. This is
824  * expected to cover all known conditions. But to be safe additional check
825  * is put in the code to ensure we don't overwrite.
826  */
827 #define VCRAT_SIZE_FOR_GPU      (4 * PAGE_SIZE)
828
829 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
830  *
831  *      @numa_node_id: CPU NUMA node id
832  *      @avail_size: Available size in the memory
833  *      @sub_type_hdr: Memory into which compute info will be filled in
834  *
835  *      Return 0 if successful else return -ve value
836  */
837 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
838                                 int proximity_domain,
839                                 struct crat_subtype_computeunit *sub_type_hdr)
840 {
841         const struct cpumask *cpumask;
842
843         *avail_size -= sizeof(struct crat_subtype_computeunit);
844         if (*avail_size < 0)
845                 return -ENOMEM;
846
847         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
848
849         /* Fill in subtype header data */
850         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
851         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
852         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
853
854         cpumask = cpumask_of_node(numa_node_id);
855
856         /* Fill in CU data */
857         sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
858         sub_type_hdr->proximity_domain = proximity_domain;
859         sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
860         if (sub_type_hdr->processor_id_low == -1)
861                 return -EINVAL;
862
863         sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
864
865         return 0;
866 }
867
868 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
869  *
870  *      @numa_node_id: CPU NUMA node id
871  *      @avail_size: Available size in the memory
872  *      @sub_type_hdr: Memory into which compute info will be filled in
873  *
874  *      Return 0 if successful else return -ve value
875  */
876 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
877                         int proximity_domain,
878                         struct crat_subtype_memory *sub_type_hdr)
879 {
880         uint64_t mem_in_bytes = 0;
881         pg_data_t *pgdat;
882         int zone_type;
883
884         *avail_size -= sizeof(struct crat_subtype_memory);
885         if (*avail_size < 0)
886                 return -ENOMEM;
887
888         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
889
890         /* Fill in subtype header data */
891         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
892         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
893         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
894
895         /* Fill in Memory Subunit data */
896
897         /* Unlike si_meminfo, si_meminfo_node is not exported. So
898          * the following lines are duplicated from si_meminfo_node
899          * function
900          */
901         pgdat = NODE_DATA(numa_node_id);
902         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
903                 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
904         mem_in_bytes <<= PAGE_SHIFT;
905
906         sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
907         sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
908         sub_type_hdr->proximity_domain = proximity_domain;
909
910         return 0;
911 }
912
913 #ifdef CONFIG_X86_64
914 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
915                                 uint32_t *num_entries,
916                                 struct crat_subtype_iolink *sub_type_hdr)
917 {
918         int nid;
919         struct cpuinfo_x86 *c = &cpu_data(0);
920         uint8_t link_type;
921
922         if (c->x86_vendor == X86_VENDOR_AMD)
923                 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
924         else
925                 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
926
927         *num_entries = 0;
928
929         /* Create IO links from this node to other CPU nodes */
930         for_each_online_node(nid) {
931                 if (nid == numa_node_id) /* node itself */
932                         continue;
933
934                 *avail_size -= sizeof(struct crat_subtype_iolink);
935                 if (*avail_size < 0)
936                         return -ENOMEM;
937
938                 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
939
940                 /* Fill in subtype header data */
941                 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
942                 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
943                 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
944
945                 /* Fill in IO link data */
946                 sub_type_hdr->proximity_domain_from = numa_node_id;
947                 sub_type_hdr->proximity_domain_to = nid;
948                 sub_type_hdr->io_interface_type = link_type;
949
950                 (*num_entries)++;
951                 sub_type_hdr++;
952         }
953
954         return 0;
955 }
956 #endif
957
958 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
959  *
960  *      @pcrat_image: Fill in VCRAT for CPU
961  *      @size:  [IN] allocated size of crat_image.
962  *              [OUT] actual size of data filled in crat_image
963  */
964 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
965 {
966         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
967         struct acpi_table_header *acpi_table;
968         acpi_status status;
969         struct crat_subtype_generic *sub_type_hdr;
970         int avail_size = *size;
971         int numa_node_id;
972 #ifdef CONFIG_X86_64
973         uint32_t entries = 0;
974 #endif
975         int ret = 0;
976
977         if (!pcrat_image)
978                 return -EINVAL;
979
980         /* Fill in CRAT Header.
981          * Modify length and total_entries as subunits are added.
982          */
983         avail_size -= sizeof(struct crat_header);
984         if (avail_size < 0)
985                 return -ENOMEM;
986
987         memset(crat_table, 0, sizeof(struct crat_header));
988         memcpy(&crat_table->signature, CRAT_SIGNATURE,
989                         sizeof(crat_table->signature));
990         crat_table->length = sizeof(struct crat_header);
991
992         status = acpi_get_table("DSDT", 0, &acpi_table);
993         if (status != AE_OK)
994                 pr_warn("DSDT table not found for OEM information\n");
995         else {
996                 crat_table->oem_revision = acpi_table->revision;
997                 memcpy(crat_table->oem_id, acpi_table->oem_id,
998                                 CRAT_OEMID_LENGTH);
999                 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1000                                 CRAT_OEMTABLEID_LENGTH);
1001                 acpi_put_table(acpi_table);
1002         }
1003         crat_table->total_entries = 0;
1004         crat_table->num_domains = 0;
1005
1006         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1007
1008         for_each_online_node(numa_node_id) {
1009                 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1010                         continue;
1011
1012                 /* Fill in Subtype: Compute Unit */
1013                 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1014                         crat_table->num_domains,
1015                         (struct crat_subtype_computeunit *)sub_type_hdr);
1016                 if (ret < 0)
1017                         return ret;
1018                 crat_table->length += sub_type_hdr->length;
1019                 crat_table->total_entries++;
1020
1021                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1022                         sub_type_hdr->length);
1023
1024                 /* Fill in Subtype: Memory */
1025                 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1026                         crat_table->num_domains,
1027                         (struct crat_subtype_memory *)sub_type_hdr);
1028                 if (ret < 0)
1029                         return ret;
1030                 crat_table->length += sub_type_hdr->length;
1031                 crat_table->total_entries++;
1032
1033                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1034                         sub_type_hdr->length);
1035
1036                 /* Fill in Subtype: IO Link */
1037 #ifdef CONFIG_X86_64
1038                 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1039                                 &entries,
1040                                 (struct crat_subtype_iolink *)sub_type_hdr);
1041                 if (ret < 0)
1042                         return ret;
1043                 crat_table->length += (sub_type_hdr->length * entries);
1044                 crat_table->total_entries += entries;
1045
1046                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1047                                 sub_type_hdr->length * entries);
1048 #else
1049                 pr_info("IO link not available for non x86 platforms\n");
1050 #endif
1051
1052                 crat_table->num_domains++;
1053         }
1054
1055         /* TODO: Add cache Subtype for CPU.
1056          * Currently, CPU cache information is available in function
1057          * detect_cache_attributes(cpu) defined in the file
1058          * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1059          * exported and to get the same information the code needs to be
1060          * duplicated.
1061          */
1062
1063         *size = crat_table->length;
1064         pr_info("Virtual CRAT table created for CPU\n");
1065
1066         return 0;
1067 }
1068
1069 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1070                 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1071                 struct crat_subtype_memory *sub_type_hdr,
1072                 uint32_t proximity_domain,
1073                 const struct kfd_local_mem_info *local_mem_info)
1074 {
1075         *avail_size -= sizeof(struct crat_subtype_memory);
1076         if (*avail_size < 0)
1077                 return -ENOMEM;
1078
1079         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1080         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1081         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1082         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1083
1084         sub_type_hdr->proximity_domain = proximity_domain;
1085
1086         pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1087                         type, size);
1088
1089         sub_type_hdr->length_low = lower_32_bits(size);
1090         sub_type_hdr->length_high = upper_32_bits(size);
1091
1092         sub_type_hdr->width = local_mem_info->vram_width;
1093         sub_type_hdr->visibility_type = type;
1094
1095         return 0;
1096 }
1097
1098 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1099  * to its NUMA node
1100  *      @avail_size: Available size in the memory
1101  *      @kdev - [IN] GPU device
1102  *      @sub_type_hdr: Memory into which io link info will be filled in
1103  *      @proximity_domain - proximity domain of the GPU node
1104  *
1105  *      Return 0 if successful else return -ve value
1106  */
1107 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1108                         struct kfd_dev *kdev,
1109                         struct crat_subtype_iolink *sub_type_hdr,
1110                         uint32_t proximity_domain)
1111 {
1112         *avail_size -= sizeof(struct crat_subtype_iolink);
1113         if (*avail_size < 0)
1114                 return -ENOMEM;
1115
1116         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1117
1118         /* Fill in subtype header data */
1119         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1120         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1121         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1122         if (kfd_dev_is_large_bar(kdev))
1123                 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1124
1125         /* Fill in IOLINK subtype.
1126          * TODO: Fill-in other fields of iolink subtype
1127          */
1128         sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1129         sub_type_hdr->proximity_domain_from = proximity_domain;
1130 #ifdef CONFIG_NUMA
1131         if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1132                 sub_type_hdr->proximity_domain_to = 0;
1133         else
1134                 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1135 #else
1136         sub_type_hdr->proximity_domain_to = 0;
1137 #endif
1138         return 0;
1139 }
1140
1141 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1142                         struct kfd_dev *kdev,
1143                         struct kfd_dev *peer_kdev,
1144                         struct crat_subtype_iolink *sub_type_hdr,
1145                         uint32_t proximity_domain_from,
1146                         uint32_t proximity_domain_to)
1147 {
1148         *avail_size -= sizeof(struct crat_subtype_iolink);
1149         if (*avail_size < 0)
1150                 return -ENOMEM;
1151
1152         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1153
1154         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1155         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1156         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1157                                CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1158
1159         sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1160         sub_type_hdr->proximity_domain_from = proximity_domain_from;
1161         sub_type_hdr->proximity_domain_to = proximity_domain_to;
1162         sub_type_hdr->num_hops_xgmi =
1163                 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1164         return 0;
1165 }
1166
1167 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1168  *
1169  *      @pcrat_image: Fill in VCRAT for GPU
1170  *      @size:  [IN] allocated size of crat_image.
1171  *              [OUT] actual size of data filled in crat_image
1172  */
1173 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1174                                       size_t *size, struct kfd_dev *kdev,
1175                                       uint32_t proximity_domain)
1176 {
1177         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1178         struct crat_subtype_generic *sub_type_hdr;
1179         struct kfd_local_mem_info local_mem_info;
1180         struct kfd_topology_device *peer_dev;
1181         struct crat_subtype_computeunit *cu;
1182         struct kfd_cu_info cu_info;
1183         int avail_size = *size;
1184         uint32_t total_num_of_cu;
1185         int num_of_cache_entries = 0;
1186         int cache_mem_filled = 0;
1187         uint32_t nid = 0;
1188         int ret = 0;
1189
1190         if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1191                 return -EINVAL;
1192
1193         /* Fill the CRAT Header.
1194          * Modify length and total_entries as subunits are added.
1195          */
1196         avail_size -= sizeof(struct crat_header);
1197         if (avail_size < 0)
1198                 return -ENOMEM;
1199
1200         memset(crat_table, 0, sizeof(struct crat_header));
1201
1202         memcpy(&crat_table->signature, CRAT_SIGNATURE,
1203                         sizeof(crat_table->signature));
1204         /* Change length as we add more subtypes*/
1205         crat_table->length = sizeof(struct crat_header);
1206         crat_table->num_domains = 1;
1207         crat_table->total_entries = 0;
1208
1209         /* Fill in Subtype: Compute Unit
1210          * First fill in the sub type header and then sub type data
1211          */
1212         avail_size -= sizeof(struct crat_subtype_computeunit);
1213         if (avail_size < 0)
1214                 return -ENOMEM;
1215
1216         sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1217         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1218
1219         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1220         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1221         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1222
1223         /* Fill CU subtype data */
1224         cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1225         cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1226         cu->proximity_domain = proximity_domain;
1227
1228         amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1229         cu->num_simd_per_cu = cu_info.simd_per_cu;
1230         cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1231         cu->max_waves_simd = cu_info.max_waves_per_simd;
1232
1233         cu->wave_front_size = cu_info.wave_front_size;
1234         cu->array_count = cu_info.num_shader_arrays_per_engine *
1235                 cu_info.num_shader_engines;
1236         total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1237         cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1238         cu->num_cu_per_array = cu_info.num_cu_per_sh;
1239         cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1240         cu->num_banks = cu_info.num_shader_engines;
1241         cu->lds_size_in_kb = cu_info.lds_size;
1242
1243         cu->hsa_capability = 0;
1244
1245         /* Check if this node supports IOMMU. During parsing this flag will
1246          * translate to HSA_CAP_ATS_PRESENT
1247          */
1248         if (!kfd_iommu_check_device(kdev))
1249                 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1250
1251         crat_table->length += sub_type_hdr->length;
1252         crat_table->total_entries++;
1253
1254         /* Fill in Subtype: Memory. Only on systems with large BAR (no
1255          * private FB), report memory as public. On other systems
1256          * report the total FB size (public+private) as a single
1257          * private heap.
1258          */
1259         amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1260         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1261                         sub_type_hdr->length);
1262
1263         if (debug_largebar)
1264                 local_mem_info.local_mem_size_private = 0;
1265
1266         if (local_mem_info.local_mem_size_private == 0)
1267                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1268                                 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1269                                 local_mem_info.local_mem_size_public,
1270                                 (struct crat_subtype_memory *)sub_type_hdr,
1271                                 proximity_domain,
1272                                 &local_mem_info);
1273         else
1274                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
1275                                 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1276                                 local_mem_info.local_mem_size_public +
1277                                 local_mem_info.local_mem_size_private,
1278                                 (struct crat_subtype_memory *)sub_type_hdr,
1279                                 proximity_domain,
1280                                 &local_mem_info);
1281         if (ret < 0)
1282                 return ret;
1283
1284         crat_table->length += sizeof(struct crat_subtype_memory);
1285         crat_table->total_entries++;
1286
1287         /* TODO: Fill in cache information. This information is NOT readily
1288          * available in KGD
1289          */
1290         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1291                 sub_type_hdr->length);
1292         ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1293                                 avail_size,
1294                                 &cu_info,
1295                                 (struct crat_subtype_cache *)sub_type_hdr,
1296                                 &cache_mem_filled,
1297                                 &num_of_cache_entries);
1298
1299         if (ret < 0)
1300                 return ret;
1301
1302         crat_table->length += cache_mem_filled;
1303         crat_table->total_entries += num_of_cache_entries;
1304         avail_size -= cache_mem_filled;
1305
1306         /* Fill in Subtype: IO_LINKS
1307          *  Only direct links are added here which is Link from GPU to
1308          *  to its NUMA node. Indirect links are added by userspace.
1309          */
1310         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1311                 cache_mem_filled);
1312         ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1313                 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1314
1315         if (ret < 0)
1316                 return ret;
1317
1318         crat_table->length += sub_type_hdr->length;
1319         crat_table->total_entries++;
1320
1321
1322         /* Fill in Subtype: IO_LINKS
1323          * Direct links from GPU to other GPUs through xGMI.
1324          * We will loop GPUs that already be processed (with lower value
1325          * of proximity_domain), add the link for the GPUs with same
1326          * hive id (from this GPU to other GPU) . The reversed iolink
1327          * (from other GPU to this GPU) will be added
1328          * in kfd_parse_subtype_iolink.
1329          */
1330         if (kdev->hive_id) {
1331                 for (nid = 0; nid < proximity_domain; ++nid) {
1332                         peer_dev = kfd_topology_device_by_proximity_domain(nid);
1333                         if (!peer_dev->gpu)
1334                                 continue;
1335                         if (peer_dev->gpu->hive_id != kdev->hive_id)
1336                                 continue;
1337                         sub_type_hdr = (typeof(sub_type_hdr))(
1338                                 (char *)sub_type_hdr +
1339                                 sizeof(struct crat_subtype_iolink));
1340                         ret = kfd_fill_gpu_xgmi_link_to_gpu(
1341                                 &avail_size, kdev, peer_dev->gpu,
1342                                 (struct crat_subtype_iolink *)sub_type_hdr,
1343                                 proximity_domain, nid);
1344                         if (ret < 0)
1345                                 return ret;
1346                         crat_table->length += sub_type_hdr->length;
1347                         crat_table->total_entries++;
1348                 }
1349         }
1350         *size = crat_table->length;
1351         pr_info("Virtual CRAT table created for GPU\n");
1352
1353         return ret;
1354 }
1355
1356 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1357  *              creates a Virtual CRAT (VCRAT) image
1358  *
1359  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1360  *
1361  *      @crat_image: VCRAT image created because ACPI does not have a
1362  *                   CRAT for this device
1363  *      @size: [OUT] size of virtual crat_image
1364  *      @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1365  *              COMPUTE_UNIT_GPU - Create VCRAT for GPU
1366  *              (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1367  *                      -- this option is not currently implemented.
1368  *                      The assumption is that all AMD APUs will have CRAT
1369  *      @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1370  *
1371  *      Return 0 if successful else return -ve value
1372  */
1373 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1374                                   int flags, struct kfd_dev *kdev,
1375                                   uint32_t proximity_domain)
1376 {
1377         void *pcrat_image = NULL;
1378         int ret = 0, num_nodes;
1379         size_t dyn_size;
1380
1381         if (!crat_image)
1382                 return -EINVAL;
1383
1384         *crat_image = NULL;
1385
1386         /* Allocate the CPU Virtual CRAT size based on the number of online
1387          * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
1388          * This should cover all the current conditions. A check is put not
1389          * to overwrite beyond allocated size for GPUs
1390          */
1391         switch (flags) {
1392         case COMPUTE_UNIT_CPU:
1393                 num_nodes = num_online_nodes();
1394                 dyn_size = sizeof(struct crat_header) +
1395                         num_nodes * (sizeof(struct crat_subtype_computeunit) +
1396                         sizeof(struct crat_subtype_memory) +
1397                         (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
1398                 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
1399                 if (!pcrat_image)
1400                         return -ENOMEM;
1401                 *size = dyn_size;
1402                 pr_debug("CRAT size is %ld", dyn_size);
1403                 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1404                 break;
1405         case COMPUTE_UNIT_GPU:
1406                 if (!kdev)
1407                         return -EINVAL;
1408                 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1409                 if (!pcrat_image)
1410                         return -ENOMEM;
1411                 *size = VCRAT_SIZE_FOR_GPU;
1412                 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1413                                                  proximity_domain);
1414                 break;
1415         case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1416                 /* TODO: */
1417                 ret = -EINVAL;
1418                 pr_err("VCRAT not implemented for APU\n");
1419                 break;
1420         default:
1421                 ret = -EINVAL;
1422         }
1423
1424         if (!ret)
1425                 *crat_image = pcrat_image;
1426         else
1427                 kvfree(pcrat_image);
1428
1429         return ret;
1430 }
1431
1432
1433 /* kfd_destroy_crat_image
1434  *
1435  *      @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1436  *
1437  */
1438 void kfd_destroy_crat_image(void *crat_image)
1439 {
1440         kvfree(crat_image);
1441 }