Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdkfd / kfd_crat.c
1 /*
2  * Copyright 2015-2017 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  */
22
23 #include <linux/pci.h>
24 #include <linux/acpi.h>
25 #include "kfd_crat.h"
26 #include "kfd_priv.h"
27 #include "kfd_topology.h"
28 #include "kfd_iommu.h"
29 #include "amdgpu.h"
30 #include "amdgpu_amdkfd.h"
31
32 /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
33  * GPU processor ID are expressed with Bit[31]=1.
34  * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
35  * used in the CRAT.
36  */
37 static uint32_t gpu_processor_id_low = 0x80001000;
38
39 /* Return the next available gpu_processor_id and increment it for next GPU
40  *      @total_cu_count - Total CUs present in the GPU including ones
41  *                        masked off
42  */
43 static inline unsigned int get_and_inc_gpu_processor_id(
44                                 unsigned int total_cu_count)
45 {
46         int current_id = gpu_processor_id_low;
47
48         gpu_processor_id_low += total_cu_count;
49         return current_id;
50 }
51
52 /* Static table to describe GPU Cache information */
53 struct kfd_gpu_cache_info {
54         uint32_t        cache_size;
55         uint32_t        cache_level;
56         uint32_t        flags;
57         /* Indicates how many Compute Units share this cache
58          * within a SA. Value = 1 indicates the cache is not shared
59          */
60         uint32_t        num_cu_shared;
61 };
62
63 static struct kfd_gpu_cache_info kaveri_cache_info[] = {
64         {
65                 /* TCP L1 Cache per CU */
66                 .cache_size = 16,
67                 .cache_level = 1,
68                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
69                                 CRAT_CACHE_FLAGS_DATA_CACHE |
70                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
71                 .num_cu_shared = 1,
72         },
73         {
74                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
75                 .cache_size = 16,
76                 .cache_level = 1,
77                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
78                                 CRAT_CACHE_FLAGS_INST_CACHE |
79                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
80                 .num_cu_shared = 2,
81         },
82         {
83                 /* Scalar L1 Data Cache (in SQC module) per bank */
84                 .cache_size = 8,
85                 .cache_level = 1,
86                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
87                                 CRAT_CACHE_FLAGS_DATA_CACHE |
88                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
89                 .num_cu_shared = 2,
90         },
91
92         /* TODO: Add L2 Cache information */
93 };
94
95
96 static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97         {
98                 /* TCP L1 Cache per CU */
99                 .cache_size = 16,
100                 .cache_level = 1,
101                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
102                                 CRAT_CACHE_FLAGS_DATA_CACHE |
103                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
104                 .num_cu_shared = 1,
105         },
106         {
107                 /* Scalar L1 Instruction Cache (in SQC module) per bank */
108                 .cache_size = 8,
109                 .cache_level = 1,
110                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
111                                 CRAT_CACHE_FLAGS_INST_CACHE |
112                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
113                 .num_cu_shared = 4,
114         },
115         {
116                 /* Scalar L1 Data Cache (in SQC module) per bank. */
117                 .cache_size = 4,
118                 .cache_level = 1,
119                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
120                                 CRAT_CACHE_FLAGS_DATA_CACHE |
121                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
122                 .num_cu_shared = 4,
123         },
124
125         /* TODO: Add L2 Cache information */
126 };
127
128 #define hawaii_cache_info kaveri_cache_info
129 #define tonga_cache_info carrizo_cache_info
130 #define fiji_cache_info  carrizo_cache_info
131 #define polaris10_cache_info carrizo_cache_info
132 #define polaris11_cache_info carrizo_cache_info
133 #define polaris12_cache_info carrizo_cache_info
134 #define vegam_cache_info carrizo_cache_info
135
136 /* NOTE: L1 cache information has been updated and L2/L3
137  * cache information has been added for Vega10 and
138  * newer ASICs. The unit for cache_size is KiB.
139  * In future,  check & update cache details
140  * for every new ASIC is required.
141  */
142
143 static struct kfd_gpu_cache_info vega10_cache_info[] = {
144         {
145                 /* TCP L1 Cache per CU */
146                 .cache_size = 16,
147                 .cache_level = 1,
148                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
149                                 CRAT_CACHE_FLAGS_DATA_CACHE |
150                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
151                 .num_cu_shared = 1,
152         },
153         {
154                 /* Scalar L1 Instruction Cache per SQC */
155                 .cache_size = 32,
156                 .cache_level = 1,
157                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
158                                 CRAT_CACHE_FLAGS_INST_CACHE |
159                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
160                 .num_cu_shared = 3,
161         },
162         {
163                 /* Scalar L1 Data Cache per SQC */
164                 .cache_size = 16,
165                 .cache_level = 1,
166                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
167                                 CRAT_CACHE_FLAGS_DATA_CACHE |
168                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
169                 .num_cu_shared = 3,
170         },
171         {
172                 /* L2 Data Cache per GPU (Total Tex Cache) */
173                 .cache_size = 4096,
174                 .cache_level = 2,
175                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
176                                 CRAT_CACHE_FLAGS_DATA_CACHE |
177                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
178                 .num_cu_shared = 16,
179         },
180 };
181
182 static struct kfd_gpu_cache_info raven_cache_info[] = {
183         {
184                 /* TCP L1 Cache per CU */
185                 .cache_size = 16,
186                 .cache_level = 1,
187                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
188                                 CRAT_CACHE_FLAGS_DATA_CACHE |
189                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
190                 .num_cu_shared = 1,
191         },
192         {
193                 /* Scalar L1 Instruction Cache per SQC */
194                 .cache_size = 32,
195                 .cache_level = 1,
196                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
197                                 CRAT_CACHE_FLAGS_INST_CACHE |
198                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
199                 .num_cu_shared = 3,
200         },
201         {
202                 /* Scalar L1 Data Cache per SQC */
203                 .cache_size = 16,
204                 .cache_level = 1,
205                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
206                                 CRAT_CACHE_FLAGS_DATA_CACHE |
207                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
208                 .num_cu_shared = 3,
209         },
210         {
211                 /* L2 Data Cache per GPU (Total Tex Cache) */
212                 .cache_size = 1024,
213                 .cache_level = 2,
214                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
215                                 CRAT_CACHE_FLAGS_DATA_CACHE |
216                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
217                 .num_cu_shared = 11,
218         },
219 };
220
221 static struct kfd_gpu_cache_info renoir_cache_info[] = {
222         {
223                 /* TCP L1 Cache per CU */
224                 .cache_size = 16,
225                 .cache_level = 1,
226                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
227                                 CRAT_CACHE_FLAGS_DATA_CACHE |
228                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
229                 .num_cu_shared = 1,
230         },
231         {
232                 /* Scalar L1 Instruction Cache per SQC */
233                 .cache_size = 32,
234                 .cache_level = 1,
235                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
236                                 CRAT_CACHE_FLAGS_INST_CACHE |
237                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
238                 .num_cu_shared = 3,
239         },
240         {
241                 /* Scalar L1 Data Cache per SQC */
242                 .cache_size = 16,
243                 .cache_level = 1,
244                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
245                                 CRAT_CACHE_FLAGS_DATA_CACHE |
246                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
247                 .num_cu_shared = 3,
248         },
249         {
250                 /* L2 Data Cache per GPU (Total Tex Cache) */
251                 .cache_size = 1024,
252                 .cache_level = 2,
253                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
254                                 CRAT_CACHE_FLAGS_DATA_CACHE |
255                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
256                 .num_cu_shared = 8,
257         },
258 };
259
260 static struct kfd_gpu_cache_info vega12_cache_info[] = {
261         {
262                 /* TCP L1 Cache per CU */
263                 .cache_size = 16,
264                 .cache_level = 1,
265                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
266                                 CRAT_CACHE_FLAGS_DATA_CACHE |
267                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
268                 .num_cu_shared = 1,
269         },
270         {
271                 /* Scalar L1 Instruction Cache per SQC */
272                 .cache_size = 32,
273                 .cache_level = 1,
274                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
275                                 CRAT_CACHE_FLAGS_INST_CACHE |
276                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
277                 .num_cu_shared = 3,
278         },
279         {
280                 /* Scalar L1 Data Cache per SQC */
281                 .cache_size = 16,
282                 .cache_level = 1,
283                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
284                                 CRAT_CACHE_FLAGS_DATA_CACHE |
285                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
286                 .num_cu_shared = 3,
287         },
288         {
289                 /* L2 Data Cache per GPU (Total Tex Cache) */
290                 .cache_size = 2048,
291                 .cache_level = 2,
292                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
293                                 CRAT_CACHE_FLAGS_DATA_CACHE |
294                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
295                 .num_cu_shared = 5,
296         },
297 };
298
299 static struct kfd_gpu_cache_info vega20_cache_info[] = {
300         {
301                 /* TCP L1 Cache per CU */
302                 .cache_size = 16,
303                 .cache_level = 1,
304                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
305                                 CRAT_CACHE_FLAGS_DATA_CACHE |
306                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
307                 .num_cu_shared = 1,
308         },
309         {
310                 /* Scalar L1 Instruction Cache per SQC */
311                 .cache_size = 32,
312                 .cache_level = 1,
313                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
314                                 CRAT_CACHE_FLAGS_INST_CACHE |
315                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
316                 .num_cu_shared = 3,
317         },
318         {
319                 /* Scalar L1 Data Cache per SQC */
320                 .cache_size = 16,
321                 .cache_level = 1,
322                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
323                                 CRAT_CACHE_FLAGS_DATA_CACHE |
324                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
325                 .num_cu_shared = 3,
326         },
327         {
328                 /* L2 Data Cache per GPU (Total Tex Cache) */
329                 .cache_size = 8192,
330                 .cache_level = 2,
331                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
332                                 CRAT_CACHE_FLAGS_DATA_CACHE |
333                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
334                 .num_cu_shared = 16,
335         },
336 };
337
338 static struct kfd_gpu_cache_info aldebaran_cache_info[] = {
339         {
340                 /* TCP L1 Cache per CU */
341                 .cache_size = 16,
342                 .cache_level = 1,
343                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
344                                 CRAT_CACHE_FLAGS_DATA_CACHE |
345                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
346                 .num_cu_shared = 1,
347         },
348         {
349                 /* Scalar L1 Instruction Cache per SQC */
350                 .cache_size = 32,
351                 .cache_level = 1,
352                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
353                                 CRAT_CACHE_FLAGS_INST_CACHE |
354                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
355                 .num_cu_shared = 2,
356         },
357         {
358                 /* Scalar L1 Data Cache per SQC */
359                 .cache_size = 16,
360                 .cache_level = 1,
361                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
362                                 CRAT_CACHE_FLAGS_DATA_CACHE |
363                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
364                 .num_cu_shared = 2,
365         },
366         {
367                 /* L2 Data Cache per GPU (Total Tex Cache) */
368                 .cache_size = 8192,
369                 .cache_level = 2,
370                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
371                                 CRAT_CACHE_FLAGS_DATA_CACHE |
372                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
373                 .num_cu_shared = 14,
374         },
375 };
376
377 static struct kfd_gpu_cache_info navi10_cache_info[] = {
378         {
379                 /* TCP L1 Cache per CU */
380                 .cache_size = 16,
381                 .cache_level = 1,
382                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
383                                 CRAT_CACHE_FLAGS_DATA_CACHE |
384                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
385                 .num_cu_shared = 1,
386         },
387         {
388                 /* Scalar L1 Instruction Cache per SQC */
389                 .cache_size = 32,
390                 .cache_level = 1,
391                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
392                                 CRAT_CACHE_FLAGS_INST_CACHE |
393                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
394                 .num_cu_shared = 2,
395         },
396         {
397                 /* Scalar L1 Data Cache per SQC */
398                 .cache_size = 16,
399                 .cache_level = 1,
400                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
401                                 CRAT_CACHE_FLAGS_DATA_CACHE |
402                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
403                 .num_cu_shared = 2,
404         },
405         {
406                 /* GL1 Data Cache per SA */
407                 .cache_size = 128,
408                 .cache_level = 1,
409                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
410                                 CRAT_CACHE_FLAGS_DATA_CACHE |
411                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
412                 .num_cu_shared = 10,
413         },
414         {
415                 /* L2 Data Cache per GPU (Total Tex Cache) */
416                 .cache_size = 4096,
417                 .cache_level = 2,
418                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
419                                 CRAT_CACHE_FLAGS_DATA_CACHE |
420                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
421                 .num_cu_shared = 10,
422         },
423 };
424
425 static struct kfd_gpu_cache_info vangogh_cache_info[] = {
426         {
427                 /* TCP L1 Cache per CU */
428                 .cache_size = 16,
429                 .cache_level = 1,
430                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
431                                 CRAT_CACHE_FLAGS_DATA_CACHE |
432                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
433                 .num_cu_shared = 1,
434         },
435         {
436                 /* Scalar L1 Instruction Cache per SQC */
437                 .cache_size = 32,
438                 .cache_level = 1,
439                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
440                                 CRAT_CACHE_FLAGS_INST_CACHE |
441                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
442                 .num_cu_shared = 2,
443         },
444         {
445                 /* Scalar L1 Data Cache per SQC */
446                 .cache_size = 16,
447                 .cache_level = 1,
448                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
449                                 CRAT_CACHE_FLAGS_DATA_CACHE |
450                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
451                 .num_cu_shared = 2,
452         },
453         {
454                 /* GL1 Data Cache per SA */
455                 .cache_size = 128,
456                 .cache_level = 1,
457                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
458                                 CRAT_CACHE_FLAGS_DATA_CACHE |
459                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
460                 .num_cu_shared = 8,
461         },
462         {
463                 /* L2 Data Cache per GPU (Total Tex Cache) */
464                 .cache_size = 1024,
465                 .cache_level = 2,
466                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
467                                 CRAT_CACHE_FLAGS_DATA_CACHE |
468                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
469                 .num_cu_shared = 8,
470         },
471 };
472
473 static struct kfd_gpu_cache_info navi14_cache_info[] = {
474         {
475                 /* TCP L1 Cache per CU */
476                 .cache_size = 16,
477                 .cache_level = 1,
478                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
479                                 CRAT_CACHE_FLAGS_DATA_CACHE |
480                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
481                 .num_cu_shared = 1,
482         },
483         {
484                 /* Scalar L1 Instruction Cache per SQC */
485                 .cache_size = 32,
486                 .cache_level = 1,
487                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
488                                 CRAT_CACHE_FLAGS_INST_CACHE |
489                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
490                 .num_cu_shared = 2,
491         },
492         {
493                 /* Scalar L1 Data Cache per SQC */
494                 .cache_size = 16,
495                 .cache_level = 1,
496                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
497                                 CRAT_CACHE_FLAGS_DATA_CACHE |
498                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
499                 .num_cu_shared = 2,
500         },
501         {
502                 /* GL1 Data Cache per SA */
503                 .cache_size = 128,
504                 .cache_level = 1,
505                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
506                                 CRAT_CACHE_FLAGS_DATA_CACHE |
507                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
508                 .num_cu_shared = 12,
509         },
510         {
511                 /* L2 Data Cache per GPU (Total Tex Cache) */
512                 .cache_size = 2048,
513                 .cache_level = 2,
514                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
515                                 CRAT_CACHE_FLAGS_DATA_CACHE |
516                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
517                 .num_cu_shared = 12,
518         },
519 };
520
521 static struct kfd_gpu_cache_info sienna_cichlid_cache_info[] = {
522         {
523                 /* TCP L1 Cache per CU */
524                 .cache_size = 16,
525                 .cache_level = 1,
526                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
527                                 CRAT_CACHE_FLAGS_DATA_CACHE |
528                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
529                 .num_cu_shared = 1,
530         },
531         {
532                 /* Scalar L1 Instruction Cache per SQC */
533                 .cache_size = 32,
534                 .cache_level = 1,
535                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
536                                 CRAT_CACHE_FLAGS_INST_CACHE |
537                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
538                 .num_cu_shared = 2,
539         },
540         {
541                 /* Scalar L1 Data Cache per SQC */
542                 .cache_size = 16,
543                 .cache_level = 1,
544                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
545                                 CRAT_CACHE_FLAGS_DATA_CACHE |
546                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
547                 .num_cu_shared = 2,
548         },
549         {
550                 /* GL1 Data Cache per SA */
551                 .cache_size = 128,
552                 .cache_level = 1,
553                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
554                                 CRAT_CACHE_FLAGS_DATA_CACHE |
555                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
556                 .num_cu_shared = 10,
557         },
558         {
559                 /* L2 Data Cache per GPU (Total Tex Cache) */
560                 .cache_size = 4096,
561                 .cache_level = 2,
562                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
563                                 CRAT_CACHE_FLAGS_DATA_CACHE |
564                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
565                 .num_cu_shared = 10,
566         },
567         {
568                 /* L3 Data Cache per GPU */
569                 .cache_size = 128*1024,
570                 .cache_level = 3,
571                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
572                                 CRAT_CACHE_FLAGS_DATA_CACHE |
573                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
574                 .num_cu_shared = 10,
575         },
576 };
577
578 static struct kfd_gpu_cache_info navy_flounder_cache_info[] = {
579         {
580                 /* TCP L1 Cache per CU */
581                 .cache_size = 16,
582                 .cache_level = 1,
583                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
584                                 CRAT_CACHE_FLAGS_DATA_CACHE |
585                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
586                 .num_cu_shared = 1,
587         },
588         {
589                 /* Scalar L1 Instruction Cache per SQC */
590                 .cache_size = 32,
591                 .cache_level = 1,
592                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
593                                 CRAT_CACHE_FLAGS_INST_CACHE |
594                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
595                 .num_cu_shared = 2,
596         },
597         {
598                 /* Scalar L1 Data Cache per SQC */
599                 .cache_size = 16,
600                 .cache_level = 1,
601                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
602                                 CRAT_CACHE_FLAGS_DATA_CACHE |
603                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
604                 .num_cu_shared = 2,
605         },
606         {
607                 /* GL1 Data Cache per SA */
608                 .cache_size = 128,
609                 .cache_level = 1,
610                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
611                                 CRAT_CACHE_FLAGS_DATA_CACHE |
612                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
613                 .num_cu_shared = 10,
614         },
615         {
616                 /* L2 Data Cache per GPU (Total Tex Cache) */
617                 .cache_size = 3072,
618                 .cache_level = 2,
619                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
620                                 CRAT_CACHE_FLAGS_DATA_CACHE |
621                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
622                 .num_cu_shared = 10,
623         },
624         {
625                 /* L3 Data Cache per GPU */
626                 .cache_size = 96*1024,
627                 .cache_level = 3,
628                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
629                                 CRAT_CACHE_FLAGS_DATA_CACHE |
630                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
631                 .num_cu_shared = 10,
632         },
633 };
634
635 static struct kfd_gpu_cache_info dimgrey_cavefish_cache_info[] = {
636         {
637                 /* TCP L1 Cache per CU */
638                 .cache_size = 16,
639                 .cache_level = 1,
640                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
641                                 CRAT_CACHE_FLAGS_DATA_CACHE |
642                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
643                 .num_cu_shared = 1,
644         },
645         {
646                 /* Scalar L1 Instruction Cache per SQC */
647                 .cache_size = 32,
648                 .cache_level = 1,
649                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
650                                 CRAT_CACHE_FLAGS_INST_CACHE |
651                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
652                 .num_cu_shared = 2,
653         },
654         {
655                 /* Scalar L1 Data Cache per SQC */
656                 .cache_size = 16,
657                 .cache_level = 1,
658                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
659                                 CRAT_CACHE_FLAGS_DATA_CACHE |
660                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
661                 .num_cu_shared = 2,
662         },
663         {
664                 /* GL1 Data Cache per SA */
665                 .cache_size = 128,
666                 .cache_level = 1,
667                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
668                                 CRAT_CACHE_FLAGS_DATA_CACHE |
669                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
670                 .num_cu_shared = 8,
671         },
672         {
673                 /* L2 Data Cache per GPU (Total Tex Cache) */
674                 .cache_size = 2048,
675                 .cache_level = 2,
676                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
677                                 CRAT_CACHE_FLAGS_DATA_CACHE |
678                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
679                 .num_cu_shared = 8,
680         },
681         {
682                 /* L3 Data Cache per GPU */
683                 .cache_size = 32*1024,
684                 .cache_level = 3,
685                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
686                                 CRAT_CACHE_FLAGS_DATA_CACHE |
687                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
688                 .num_cu_shared = 8,
689         },
690 };
691
692 static struct kfd_gpu_cache_info beige_goby_cache_info[] = {
693         {
694                 /* TCP L1 Cache per CU */
695                 .cache_size = 16,
696                 .cache_level = 1,
697                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
698                                 CRAT_CACHE_FLAGS_DATA_CACHE |
699                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
700                 .num_cu_shared = 1,
701         },
702         {
703                 /* Scalar L1 Instruction Cache per SQC */
704                 .cache_size = 32,
705                 .cache_level = 1,
706                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
707                                 CRAT_CACHE_FLAGS_INST_CACHE |
708                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
709                 .num_cu_shared = 2,
710         },
711         {
712                 /* Scalar L1 Data Cache per SQC */
713                 .cache_size = 16,
714                 .cache_level = 1,
715                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
716                                 CRAT_CACHE_FLAGS_DATA_CACHE |
717                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
718                 .num_cu_shared = 2,
719         },
720         {
721                 /* GL1 Data Cache per SA */
722                 .cache_size = 128,
723                 .cache_level = 1,
724                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
725                                 CRAT_CACHE_FLAGS_DATA_CACHE |
726                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
727                 .num_cu_shared = 8,
728         },
729         {
730                 /* L2 Data Cache per GPU (Total Tex Cache) */
731                 .cache_size = 1024,
732                 .cache_level = 2,
733                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
734                                 CRAT_CACHE_FLAGS_DATA_CACHE |
735                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
736                 .num_cu_shared = 8,
737         },
738         {
739                 /* L3 Data Cache per GPU */
740                 .cache_size = 16*1024,
741                 .cache_level = 3,
742                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
743                                 CRAT_CACHE_FLAGS_DATA_CACHE |
744                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
745                 .num_cu_shared = 8,
746         },
747 };
748
749 static struct kfd_gpu_cache_info yellow_carp_cache_info[] = {
750         {
751                 /* TCP L1 Cache per CU */
752                 .cache_size = 16,
753                 .cache_level = 1,
754                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
755                                 CRAT_CACHE_FLAGS_DATA_CACHE |
756                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
757                 .num_cu_shared = 1,
758         },
759         {
760                 /* Scalar L1 Instruction Cache per SQC */
761                 .cache_size = 32,
762                 .cache_level = 1,
763                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
764                                 CRAT_CACHE_FLAGS_INST_CACHE |
765                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
766                 .num_cu_shared = 2,
767         },
768         {
769                 /* Scalar L1 Data Cache per SQC */
770                 .cache_size = 16,
771                 .cache_level = 1,
772                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
773                                 CRAT_CACHE_FLAGS_DATA_CACHE |
774                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
775                 .num_cu_shared = 2,
776         },
777         {
778                 /* GL1 Data Cache per SA */
779                 .cache_size = 128,
780                 .cache_level = 1,
781                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
782                                 CRAT_CACHE_FLAGS_DATA_CACHE |
783                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
784                 .num_cu_shared = 6,
785         },
786         {
787                 /* L2 Data Cache per GPU (Total Tex Cache) */
788                 .cache_size = 2048,
789                 .cache_level = 2,
790                 .flags = (CRAT_CACHE_FLAGS_ENABLED |
791                                 CRAT_CACHE_FLAGS_DATA_CACHE |
792                                 CRAT_CACHE_FLAGS_SIMD_CACHE),
793                 .num_cu_shared = 6,
794         },
795 };
796
797 static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
798                 struct crat_subtype_computeunit *cu)
799 {
800         dev->node_props.cpu_cores_count = cu->num_cpu_cores;
801         dev->node_props.cpu_core_id_base = cu->processor_id_low;
802         if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
803                 dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
804
805         pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
806                         cu->processor_id_low);
807 }
808
809 static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
810                 struct crat_subtype_computeunit *cu)
811 {
812         dev->node_props.simd_id_base = cu->processor_id_low;
813         dev->node_props.simd_count = cu->num_simd_cores;
814         dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
815         dev->node_props.max_waves_per_simd = cu->max_waves_simd;
816         dev->node_props.wave_front_size = cu->wave_front_size;
817         dev->node_props.array_count = cu->array_count;
818         dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
819         dev->node_props.simd_per_cu = cu->num_simd_per_cu;
820         dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
821         if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
822                 dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
823         pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
824 }
825
826 /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
827  * topology device present in the device_list
828  */
829 static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
830                                 struct list_head *device_list)
831 {
832         struct kfd_topology_device *dev;
833
834         pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
835                         cu->proximity_domain, cu->hsa_capability);
836         list_for_each_entry(dev, device_list, list) {
837                 if (cu->proximity_domain == dev->proximity_domain) {
838                         if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
839                                 kfd_populated_cu_info_cpu(dev, cu);
840
841                         if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
842                                 kfd_populated_cu_info_gpu(dev, cu);
843                         break;
844                 }
845         }
846
847         return 0;
848 }
849
850 static struct kfd_mem_properties *
851 find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
852                 struct kfd_topology_device *dev)
853 {
854         struct kfd_mem_properties *props;
855
856         list_for_each_entry(props, &dev->mem_props, list) {
857                 if (props->heap_type == heap_type
858                                 && props->flags == flags
859                                 && props->width == width)
860                         return props;
861         }
862
863         return NULL;
864 }
865 /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
866  * topology device present in the device_list
867  */
868 static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
869                                 struct list_head *device_list)
870 {
871         struct kfd_mem_properties *props;
872         struct kfd_topology_device *dev;
873         uint32_t heap_type;
874         uint64_t size_in_bytes;
875         uint32_t flags = 0;
876         uint32_t width;
877
878         pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
879                         mem->proximity_domain);
880         list_for_each_entry(dev, device_list, list) {
881                 if (mem->proximity_domain == dev->proximity_domain) {
882                         /* We're on GPU node */
883                         if (dev->node_props.cpu_cores_count == 0) {
884                                 /* APU */
885                                 if (mem->visibility_type == 0)
886                                         heap_type =
887                                                 HSA_MEM_HEAP_TYPE_FB_PRIVATE;
888                                 /* dGPU */
889                                 else
890                                         heap_type = mem->visibility_type;
891                         } else
892                                 heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
893
894                         if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
895                                 flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
896                         if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
897                                 flags |= HSA_MEM_FLAGS_NON_VOLATILE;
898
899                         size_in_bytes =
900                                 ((uint64_t)mem->length_high << 32) +
901                                                         mem->length_low;
902                         width = mem->width;
903
904                         /* Multiple banks of the same type are aggregated into
905                          * one. User mode doesn't care about multiple physical
906                          * memory segments. It's managed as a single virtual
907                          * heap for user mode.
908                          */
909                         props = find_subtype_mem(heap_type, flags, width, dev);
910                         if (props) {
911                                 props->size_in_bytes += size_in_bytes;
912                                 break;
913                         }
914
915                         props = kfd_alloc_struct(props);
916                         if (!props)
917                                 return -ENOMEM;
918
919                         props->heap_type = heap_type;
920                         props->flags = flags;
921                         props->size_in_bytes = size_in_bytes;
922                         props->width = width;
923
924                         dev->node_props.mem_banks_count++;
925                         list_add_tail(&props->list, &dev->mem_props);
926
927                         break;
928                 }
929         }
930
931         return 0;
932 }
933
934 /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
935  * topology device present in the device_list
936  */
937 static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
938                         struct list_head *device_list)
939 {
940         struct kfd_cache_properties *props;
941         struct kfd_topology_device *dev;
942         uint32_t id;
943         uint32_t total_num_of_cu;
944
945         id = cache->processor_id_low;
946
947         pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
948         list_for_each_entry(dev, device_list, list) {
949                 total_num_of_cu = (dev->node_props.array_count *
950                                         dev->node_props.cu_per_simd_array);
951
952                 /* Cache infomration in CRAT doesn't have proximity_domain
953                  * information as it is associated with a CPU core or GPU
954                  * Compute Unit. So map the cache using CPU core Id or SIMD
955                  * (GPU) ID.
956                  * TODO: This works because currently we can safely assume that
957                  *  Compute Units are parsed before caches are parsed. In
958                  *  future, remove this dependency
959                  */
960                 if ((id >= dev->node_props.cpu_core_id_base &&
961                         id <= dev->node_props.cpu_core_id_base +
962                                 dev->node_props.cpu_cores_count) ||
963                         (id >= dev->node_props.simd_id_base &&
964                         id < dev->node_props.simd_id_base +
965                                 total_num_of_cu)) {
966                         props = kfd_alloc_struct(props);
967                         if (!props)
968                                 return -ENOMEM;
969
970                         props->processor_id_low = id;
971                         props->cache_level = cache->cache_level;
972                         props->cache_size = cache->cache_size;
973                         props->cacheline_size = cache->cache_line_size;
974                         props->cachelines_per_tag = cache->lines_per_tag;
975                         props->cache_assoc = cache->associativity;
976                         props->cache_latency = cache->cache_latency;
977                         memcpy(props->sibling_map, cache->sibling_map,
978                                         sizeof(props->sibling_map));
979
980                         if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
981                                 props->cache_type |= HSA_CACHE_TYPE_DATA;
982                         if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
983                                 props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
984                         if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
985                                 props->cache_type |= HSA_CACHE_TYPE_CPU;
986                         if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
987                                 props->cache_type |= HSA_CACHE_TYPE_HSACU;
988
989                         dev->cache_count++;
990                         dev->node_props.caches_count++;
991                         list_add_tail(&props->list, &dev->cache_props);
992
993                         break;
994                 }
995         }
996
997         return 0;
998 }
999
1000 /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
1001  * topology device present in the device_list
1002  */
1003 static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
1004                                         struct list_head *device_list)
1005 {
1006         struct kfd_iolink_properties *props = NULL, *props2;
1007         struct kfd_topology_device *dev, *to_dev;
1008         uint32_t id_from;
1009         uint32_t id_to;
1010
1011         id_from = iolink->proximity_domain_from;
1012         id_to = iolink->proximity_domain_to;
1013
1014         pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
1015                         id_from, id_to);
1016         list_for_each_entry(dev, device_list, list) {
1017                 if (id_from == dev->proximity_domain) {
1018                         props = kfd_alloc_struct(props);
1019                         if (!props)
1020                                 return -ENOMEM;
1021
1022                         props->node_from = id_from;
1023                         props->node_to = id_to;
1024                         props->ver_maj = iolink->version_major;
1025                         props->ver_min = iolink->version_minor;
1026                         props->iolink_type = iolink->io_interface_type;
1027
1028                         if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
1029                                 props->weight = 20;
1030                         else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
1031                                 props->weight = 15 * iolink->num_hops_xgmi;
1032                         else
1033                                 props->weight = node_distance(id_from, id_to);
1034
1035                         props->min_latency = iolink->minimum_latency;
1036                         props->max_latency = iolink->maximum_latency;
1037                         props->min_bandwidth = iolink->minimum_bandwidth_mbs;
1038                         props->max_bandwidth = iolink->maximum_bandwidth_mbs;
1039                         props->rec_transfer_size =
1040                                         iolink->recommended_transfer_size;
1041
1042                         dev->io_link_count++;
1043                         dev->node_props.io_links_count++;
1044                         list_add_tail(&props->list, &dev->io_link_props);
1045                         break;
1046                 }
1047         }
1048
1049         /* CPU topology is created before GPUs are detected, so CPU->GPU
1050          * links are not built at that time. If a PCIe type is discovered, it
1051          * means a GPU is detected and we are adding GPU->CPU to the topology.
1052          * At this time, also add the corresponded CPU->GPU link if GPU
1053          * is large bar.
1054          * For xGMI, we only added the link with one direction in the crat
1055          * table, add corresponded reversed direction link now.
1056          */
1057         if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
1058                 to_dev = kfd_topology_device_by_proximity_domain(id_to);
1059                 if (!to_dev)
1060                         return -ENODEV;
1061                 /* same everything but the other direction */
1062                 props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
1063                 props2->node_from = id_to;
1064                 props2->node_to = id_from;
1065                 props2->kobj = NULL;
1066                 to_dev->io_link_count++;
1067                 to_dev->node_props.io_links_count++;
1068                 list_add_tail(&props2->list, &to_dev->io_link_props);
1069         }
1070
1071         return 0;
1072 }
1073
1074 /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
1075  * present in the device_list
1076  *      @sub_type_hdr - subtype section of crat_image
1077  *      @device_list - list of topology devices present in this crat_image
1078  */
1079 static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
1080                                 struct list_head *device_list)
1081 {
1082         struct crat_subtype_computeunit *cu;
1083         struct crat_subtype_memory *mem;
1084         struct crat_subtype_cache *cache;
1085         struct crat_subtype_iolink *iolink;
1086         int ret = 0;
1087
1088         switch (sub_type_hdr->type) {
1089         case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
1090                 cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1091                 ret = kfd_parse_subtype_cu(cu, device_list);
1092                 break;
1093         case CRAT_SUBTYPE_MEMORY_AFFINITY:
1094                 mem = (struct crat_subtype_memory *)sub_type_hdr;
1095                 ret = kfd_parse_subtype_mem(mem, device_list);
1096                 break;
1097         case CRAT_SUBTYPE_CACHE_AFFINITY:
1098                 cache = (struct crat_subtype_cache *)sub_type_hdr;
1099                 ret = kfd_parse_subtype_cache(cache, device_list);
1100                 break;
1101         case CRAT_SUBTYPE_TLB_AFFINITY:
1102                 /*
1103                  * For now, nothing to do here
1104                  */
1105                 pr_debug("Found TLB entry in CRAT table (not processing)\n");
1106                 break;
1107         case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
1108                 /*
1109                  * For now, nothing to do here
1110                  */
1111                 pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
1112                 break;
1113         case CRAT_SUBTYPE_IOLINK_AFFINITY:
1114                 iolink = (struct crat_subtype_iolink *)sub_type_hdr;
1115                 ret = kfd_parse_subtype_iolink(iolink, device_list);
1116                 break;
1117         default:
1118                 pr_warn("Unknown subtype %d in CRAT\n",
1119                                 sub_type_hdr->type);
1120         }
1121
1122         return ret;
1123 }
1124
1125 /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
1126  * create a kfd_topology_device and add in to device_list. Also parse
1127  * CRAT subtypes and attach it to appropriate kfd_topology_device
1128  *      @crat_image - input image containing CRAT
1129  *      @device_list - [OUT] list of kfd_topology_device generated after
1130  *                     parsing crat_image
1131  *      @proximity_domain - Proximity domain of the first device in the table
1132  *
1133  *      Return - 0 if successful else -ve value
1134  */
1135 int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
1136                          uint32_t proximity_domain)
1137 {
1138         struct kfd_topology_device *top_dev = NULL;
1139         struct crat_subtype_generic *sub_type_hdr;
1140         uint16_t node_id;
1141         int ret = 0;
1142         struct crat_header *crat_table = (struct crat_header *)crat_image;
1143         uint16_t num_nodes;
1144         uint32_t image_len;
1145
1146         if (!crat_image)
1147                 return -EINVAL;
1148
1149         if (!list_empty(device_list)) {
1150                 pr_warn("Error device list should be empty\n");
1151                 return -EINVAL;
1152         }
1153
1154         num_nodes = crat_table->num_domains;
1155         image_len = crat_table->length;
1156
1157         pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
1158
1159         for (node_id = 0; node_id < num_nodes; node_id++) {
1160                 top_dev = kfd_create_topology_device(device_list);
1161                 if (!top_dev)
1162                         break;
1163                 top_dev->proximity_domain = proximity_domain++;
1164         }
1165
1166         if (!top_dev) {
1167                 ret = -ENOMEM;
1168                 goto err;
1169         }
1170
1171         memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
1172         memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
1173                         CRAT_OEMTABLEID_LENGTH);
1174         top_dev->oem_revision = crat_table->oem_revision;
1175
1176         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1177         while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
1178                         ((char *)crat_image) + image_len) {
1179                 if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
1180                         ret = kfd_parse_subtype(sub_type_hdr, device_list);
1181                         if (ret)
1182                                 break;
1183                 }
1184
1185                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1186                                 sub_type_hdr->length);
1187         }
1188
1189 err:
1190         if (ret)
1191                 kfd_release_topology_device_list(device_list);
1192
1193         return ret;
1194 }
1195
1196 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1197 static int fill_in_l1_pcache(struct crat_subtype_cache *pcache,
1198                                 struct kfd_gpu_cache_info *pcache_info,
1199                                 struct kfd_cu_info *cu_info,
1200                                 int mem_available,
1201                                 int cu_bitmask,
1202                                 int cache_type, unsigned int cu_processor_id,
1203                                 int cu_block)
1204 {
1205         unsigned int cu_sibling_map_mask;
1206         int first_active_cu;
1207
1208         /* First check if enough memory is available */
1209         if (sizeof(struct crat_subtype_cache) > mem_available)
1210                 return -ENOMEM;
1211
1212         cu_sibling_map_mask = cu_bitmask;
1213         cu_sibling_map_mask >>= cu_block;
1214         cu_sibling_map_mask &=
1215                 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1216         first_active_cu = ffs(cu_sibling_map_mask);
1217
1218         /* CU could be inactive. In case of shared cache find the first active
1219          * CU. and incase of non-shared cache check if the CU is inactive. If
1220          * inactive active skip it
1221          */
1222         if (first_active_cu) {
1223                 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1224                 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1225                 pcache->length = sizeof(struct crat_subtype_cache);
1226                 pcache->flags = pcache_info[cache_type].flags;
1227                 pcache->processor_id_low = cu_processor_id
1228                                          + (first_active_cu - 1);
1229                 pcache->cache_level = pcache_info[cache_type].cache_level;
1230                 pcache->cache_size = pcache_info[cache_type].cache_size;
1231
1232                 /* Sibling map is w.r.t processor_id_low, so shift out
1233                  * inactive CU
1234                  */
1235                 cu_sibling_map_mask =
1236                         cu_sibling_map_mask >> (first_active_cu - 1);
1237
1238                 pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
1239                 pcache->sibling_map[1] =
1240                                 (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1241                 pcache->sibling_map[2] =
1242                                 (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1243                 pcache->sibling_map[3] =
1244                                 (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1245                 return 0;
1246         }
1247         return 1;
1248 }
1249
1250 /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
1251 static int fill_in_l2_l3_pcache(struct crat_subtype_cache *pcache,
1252                                 struct kfd_gpu_cache_info *pcache_info,
1253                                 struct kfd_cu_info *cu_info,
1254                                 int mem_available,
1255                                 int cache_type, unsigned int cu_processor_id)
1256 {
1257         unsigned int cu_sibling_map_mask;
1258         int first_active_cu;
1259         int i, j, k;
1260
1261         /* First check if enough memory is available */
1262         if (sizeof(struct crat_subtype_cache) > mem_available)
1263                 return -ENOMEM;
1264
1265         cu_sibling_map_mask = cu_info->cu_bitmap[0][0];
1266         cu_sibling_map_mask &=
1267                 ((1 << pcache_info[cache_type].num_cu_shared) - 1);
1268         first_active_cu = ffs(cu_sibling_map_mask);
1269
1270         /* CU could be inactive. In case of shared cache find the first active
1271          * CU. and incase of non-shared cache check if the CU is inactive. If
1272          * inactive active skip it
1273          */
1274         if (first_active_cu) {
1275                 memset(pcache, 0, sizeof(struct crat_subtype_cache));
1276                 pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
1277                 pcache->length = sizeof(struct crat_subtype_cache);
1278                 pcache->flags = pcache_info[cache_type].flags;
1279                 pcache->processor_id_low = cu_processor_id
1280                                          + (first_active_cu - 1);
1281                 pcache->cache_level = pcache_info[cache_type].cache_level;
1282                 pcache->cache_size = pcache_info[cache_type].cache_size;
1283
1284                 /* Sibling map is w.r.t processor_id_low, so shift out
1285                  * inactive CU
1286                  */
1287                 cu_sibling_map_mask =
1288                         cu_sibling_map_mask >> (first_active_cu - 1);
1289                 k = 0;
1290                 for (i = 0; i < cu_info->num_shader_engines; i++) {
1291                         for (j = 0; j < cu_info->num_shader_arrays_per_engine;
1292                                 j++) {
1293                                 pcache->sibling_map[k] =
1294                                  (uint8_t)(cu_sibling_map_mask & 0xFF);
1295                                 pcache->sibling_map[k+1] =
1296                                  (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
1297                                 pcache->sibling_map[k+2] =
1298                                  (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
1299                                 pcache->sibling_map[k+3] =
1300                                  (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
1301                                 k += 4;
1302                                 cu_sibling_map_mask =
1303                                         cu_info->cu_bitmap[i % 4][j + i / 4];
1304                                 cu_sibling_map_mask &= (
1305                                  (1 << pcache_info[cache_type].num_cu_shared)
1306                                  - 1);
1307                         }
1308                 }
1309                 return 0;
1310         }
1311         return 1;
1312 }
1313
1314 /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
1315  * tables
1316  *
1317  *      @kdev - [IN] GPU device
1318  *      @gpu_processor_id - [IN] GPU processor ID to which these caches
1319  *                          associate
1320  *      @available_size - [IN] Amount of memory available in pcache
1321  *      @cu_info - [IN] Compute Unit info obtained from KGD
1322  *      @pcache - [OUT] memory into which cache data is to be filled in.
1323  *      @size_filled - [OUT] amount of data used up in pcache.
1324  *      @num_of_entries - [OUT] number of caches added
1325  */
1326 static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
1327                         int gpu_processor_id,
1328                         int available_size,
1329                         struct kfd_cu_info *cu_info,
1330                         struct crat_subtype_cache *pcache,
1331                         int *size_filled,
1332                         int *num_of_entries)
1333 {
1334         struct kfd_gpu_cache_info *pcache_info;
1335         int num_of_cache_types = 0;
1336         int i, j, k;
1337         int ct = 0;
1338         int mem_available = available_size;
1339         unsigned int cu_processor_id;
1340         int ret;
1341         unsigned int num_cu_shared;
1342
1343         switch (kdev->device_info->asic_family) {
1344         case CHIP_KAVERI:
1345                 pcache_info = kaveri_cache_info;
1346                 num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
1347                 break;
1348         case CHIP_HAWAII:
1349                 pcache_info = hawaii_cache_info;
1350                 num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
1351                 break;
1352         case CHIP_CARRIZO:
1353                 pcache_info = carrizo_cache_info;
1354                 num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
1355                 break;
1356         case CHIP_TONGA:
1357                 pcache_info = tonga_cache_info;
1358                 num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
1359                 break;
1360         case CHIP_FIJI:
1361                 pcache_info = fiji_cache_info;
1362                 num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
1363                 break;
1364         case CHIP_POLARIS10:
1365                 pcache_info = polaris10_cache_info;
1366                 num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
1367                 break;
1368         case CHIP_POLARIS11:
1369                 pcache_info = polaris11_cache_info;
1370                 num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
1371                 break;
1372         case CHIP_POLARIS12:
1373                 pcache_info = polaris12_cache_info;
1374                 num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
1375                 break;
1376         case CHIP_VEGAM:
1377                 pcache_info = vegam_cache_info;
1378                 num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
1379                 break;
1380         case CHIP_VEGA10:
1381                 pcache_info = vega10_cache_info;
1382                 num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
1383                 break;
1384         case CHIP_VEGA12:
1385                 pcache_info = vega12_cache_info;
1386                 num_of_cache_types = ARRAY_SIZE(vega12_cache_info);
1387                 break;
1388         case CHIP_VEGA20:
1389         case CHIP_ARCTURUS:
1390                 pcache_info = vega20_cache_info;
1391                 num_of_cache_types = ARRAY_SIZE(vega20_cache_info);
1392                 break;
1393         case CHIP_ALDEBARAN:
1394                 pcache_info = aldebaran_cache_info;
1395                 num_of_cache_types = ARRAY_SIZE(aldebaran_cache_info);
1396                 break;
1397         case CHIP_RAVEN:
1398                 pcache_info = raven_cache_info;
1399                 num_of_cache_types = ARRAY_SIZE(raven_cache_info);
1400                 break;
1401         case CHIP_RENOIR:
1402                 pcache_info = renoir_cache_info;
1403                 num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
1404                 break;
1405         case CHIP_NAVI10:
1406         case CHIP_NAVI12:
1407         case CHIP_CYAN_SKILLFISH:
1408                 pcache_info = navi10_cache_info;
1409                 num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
1410                 break;
1411         case CHIP_NAVI14:
1412                 pcache_info = navi14_cache_info;
1413                 num_of_cache_types = ARRAY_SIZE(navi14_cache_info);
1414                 break;
1415         case CHIP_SIENNA_CICHLID:
1416                 pcache_info = sienna_cichlid_cache_info;
1417                 num_of_cache_types = ARRAY_SIZE(sienna_cichlid_cache_info);
1418                 break;
1419         case CHIP_NAVY_FLOUNDER:
1420                 pcache_info = navy_flounder_cache_info;
1421                 num_of_cache_types = ARRAY_SIZE(navy_flounder_cache_info);
1422                 break;
1423         case CHIP_DIMGREY_CAVEFISH:
1424                 pcache_info = dimgrey_cavefish_cache_info;
1425                 num_of_cache_types = ARRAY_SIZE(dimgrey_cavefish_cache_info);
1426                 break;
1427         case CHIP_VANGOGH:
1428                 pcache_info = vangogh_cache_info;
1429                 num_of_cache_types = ARRAY_SIZE(vangogh_cache_info);
1430                 break;
1431         case CHIP_BEIGE_GOBY:
1432                 pcache_info = beige_goby_cache_info;
1433                 num_of_cache_types = ARRAY_SIZE(beige_goby_cache_info);
1434                 break;
1435         case CHIP_YELLOW_CARP:
1436                 pcache_info = yellow_carp_cache_info;
1437                 num_of_cache_types = ARRAY_SIZE(yellow_carp_cache_info);
1438                 break;
1439         default:
1440                 return -EINVAL;
1441         }
1442
1443         *size_filled = 0;
1444         *num_of_entries = 0;
1445
1446         /* For each type of cache listed in the kfd_gpu_cache_info table,
1447          * go through all available Compute Units.
1448          * The [i,j,k] loop will
1449          *              if kfd_gpu_cache_info.num_cu_shared = 1
1450          *                      will parse through all available CU
1451          *              If (kfd_gpu_cache_info.num_cu_shared != 1)
1452          *                      then it will consider only one CU from
1453          *                      the shared unit
1454          */
1455
1456         for (ct = 0; ct < num_of_cache_types; ct++) {
1457           cu_processor_id = gpu_processor_id;
1458           if (pcache_info[ct].cache_level == 1) {
1459             for (i = 0; i < cu_info->num_shader_engines; i++) {
1460               for (j = 0; j < cu_info->num_shader_arrays_per_engine; j++) {
1461                 for (k = 0; k < cu_info->num_cu_per_sh;
1462                   k += pcache_info[ct].num_cu_shared) {
1463                   ret = fill_in_l1_pcache(pcache,
1464                                         pcache_info,
1465                                         cu_info,
1466                                         mem_available,
1467                                         cu_info->cu_bitmap[i % 4][j + i / 4],
1468                                         ct,
1469                                         cu_processor_id,
1470                                         k);
1471
1472                   if (ret < 0)
1473                         break;
1474
1475                   if (!ret) {
1476                                 pcache++;
1477                                 (*num_of_entries)++;
1478                                 mem_available -= sizeof(*pcache);
1479                                 (*size_filled) += sizeof(*pcache);
1480                   }
1481
1482                   /* Move to next CU block */
1483                   num_cu_shared = ((k + pcache_info[ct].num_cu_shared) <=
1484                                         cu_info->num_cu_per_sh) ?
1485                                         pcache_info[ct].num_cu_shared :
1486                                         (cu_info->num_cu_per_sh - k);
1487                   cu_processor_id += num_cu_shared;
1488                 }
1489               }
1490             }
1491           } else {
1492                         ret = fill_in_l2_l3_pcache(pcache,
1493                                 pcache_info,
1494                                 cu_info,
1495                                 mem_available,
1496                                 ct,
1497                                 cu_processor_id);
1498
1499                         if (ret < 0)
1500                                 break;
1501
1502                         if (!ret) {
1503                                 pcache++;
1504                                 (*num_of_entries)++;
1505                                 mem_available -= sizeof(*pcache);
1506                                 (*size_filled) += sizeof(*pcache);
1507                         }
1508           }
1509         }
1510
1511         pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
1512
1513         return 0;
1514 }
1515
1516 static bool kfd_ignore_crat(void)
1517 {
1518         bool ret;
1519
1520         if (ignore_crat)
1521                 return true;
1522
1523 #ifndef KFD_SUPPORT_IOMMU_V2
1524         ret = true;
1525 #else
1526         ret = false;
1527 #endif
1528
1529         return ret;
1530 }
1531
1532 /*
1533  * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
1534  * copies CRAT from ACPI (if available).
1535  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1536  *
1537  *      @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
1538  *                   crat_image will be NULL
1539  *      @size: [OUT] size of crat_image
1540  *
1541  *      Return 0 if successful else return error code
1542  */
1543 int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
1544 {
1545         struct acpi_table_header *crat_table;
1546         acpi_status status;
1547         void *pcrat_image;
1548         int rc = 0;
1549
1550         if (!crat_image)
1551                 return -EINVAL;
1552
1553         *crat_image = NULL;
1554
1555         if (kfd_ignore_crat()) {
1556                 pr_info("CRAT table disabled by module option\n");
1557                 return -ENODATA;
1558         }
1559
1560         /* Fetch the CRAT table from ACPI */
1561         status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
1562         if (status == AE_NOT_FOUND) {
1563                 pr_warn("CRAT table not found\n");
1564                 return -ENODATA;
1565         } else if (ACPI_FAILURE(status)) {
1566                 const char *err = acpi_format_exception(status);
1567
1568                 pr_err("CRAT table error: %s\n", err);
1569                 return -EINVAL;
1570         }
1571
1572         pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
1573         if (!pcrat_image) {
1574                 rc = -ENOMEM;
1575                 goto out;
1576         }
1577
1578         memcpy(pcrat_image, crat_table, crat_table->length);
1579         *crat_image = pcrat_image;
1580         *size = crat_table->length;
1581 out:
1582         acpi_put_table(crat_table);
1583         return rc;
1584 }
1585
1586 /* Memory required to create Virtual CRAT.
1587  * Since there is no easy way to predict the amount of memory required, the
1588  * following amount is allocated for GPU Virtual CRAT. This is
1589  * expected to cover all known conditions. But to be safe additional check
1590  * is put in the code to ensure we don't overwrite.
1591  */
1592 #define VCRAT_SIZE_FOR_GPU      (4 * PAGE_SIZE)
1593
1594 /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
1595  *
1596  *      @numa_node_id: CPU NUMA node id
1597  *      @avail_size: Available size in the memory
1598  *      @sub_type_hdr: Memory into which compute info will be filled in
1599  *
1600  *      Return 0 if successful else return -ve value
1601  */
1602 static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
1603                                 int proximity_domain,
1604                                 struct crat_subtype_computeunit *sub_type_hdr)
1605 {
1606         const struct cpumask *cpumask;
1607
1608         *avail_size -= sizeof(struct crat_subtype_computeunit);
1609         if (*avail_size < 0)
1610                 return -ENOMEM;
1611
1612         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1613
1614         /* Fill in subtype header data */
1615         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1616         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1617         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1618
1619         cpumask = cpumask_of_node(numa_node_id);
1620
1621         /* Fill in CU data */
1622         sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
1623         sub_type_hdr->proximity_domain = proximity_domain;
1624         sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
1625         if (sub_type_hdr->processor_id_low == -1)
1626                 return -EINVAL;
1627
1628         sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
1629
1630         return 0;
1631 }
1632
1633 /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
1634  *
1635  *      @numa_node_id: CPU NUMA node id
1636  *      @avail_size: Available size in the memory
1637  *      @sub_type_hdr: Memory into which compute info will be filled in
1638  *
1639  *      Return 0 if successful else return -ve value
1640  */
1641 static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
1642                         int proximity_domain,
1643                         struct crat_subtype_memory *sub_type_hdr)
1644 {
1645         uint64_t mem_in_bytes = 0;
1646         pg_data_t *pgdat;
1647         int zone_type;
1648
1649         *avail_size -= sizeof(struct crat_subtype_memory);
1650         if (*avail_size < 0)
1651                 return -ENOMEM;
1652
1653         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1654
1655         /* Fill in subtype header data */
1656         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1657         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1658         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1659
1660         /* Fill in Memory Subunit data */
1661
1662         /* Unlike si_meminfo, si_meminfo_node is not exported. So
1663          * the following lines are duplicated from si_meminfo_node
1664          * function
1665          */
1666         pgdat = NODE_DATA(numa_node_id);
1667         for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
1668                 mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
1669         mem_in_bytes <<= PAGE_SHIFT;
1670
1671         sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
1672         sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
1673         sub_type_hdr->proximity_domain = proximity_domain;
1674
1675         return 0;
1676 }
1677
1678 #ifdef CONFIG_X86_64
1679 static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
1680                                 uint32_t *num_entries,
1681                                 struct crat_subtype_iolink *sub_type_hdr)
1682 {
1683         int nid;
1684         struct cpuinfo_x86 *c = &cpu_data(0);
1685         uint8_t link_type;
1686
1687         if (c->x86_vendor == X86_VENDOR_AMD)
1688                 link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
1689         else
1690                 link_type = CRAT_IOLINK_TYPE_QPI_1_1;
1691
1692         *num_entries = 0;
1693
1694         /* Create IO links from this node to other CPU nodes */
1695         for_each_online_node(nid) {
1696                 if (nid == numa_node_id) /* node itself */
1697                         continue;
1698
1699                 *avail_size -= sizeof(struct crat_subtype_iolink);
1700                 if (*avail_size < 0)
1701                         return -ENOMEM;
1702
1703                 memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1704
1705                 /* Fill in subtype header data */
1706                 sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1707                 sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1708                 sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1709
1710                 /* Fill in IO link data */
1711                 sub_type_hdr->proximity_domain_from = numa_node_id;
1712                 sub_type_hdr->proximity_domain_to = nid;
1713                 sub_type_hdr->io_interface_type = link_type;
1714
1715                 (*num_entries)++;
1716                 sub_type_hdr++;
1717         }
1718
1719         return 0;
1720 }
1721 #endif
1722
1723 /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
1724  *
1725  *      @pcrat_image: Fill in VCRAT for CPU
1726  *      @size:  [IN] allocated size of crat_image.
1727  *              [OUT] actual size of data filled in crat_image
1728  */
1729 static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
1730 {
1731         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1732         struct acpi_table_header *acpi_table;
1733         acpi_status status;
1734         struct crat_subtype_generic *sub_type_hdr;
1735         int avail_size = *size;
1736         int numa_node_id;
1737 #ifdef CONFIG_X86_64
1738         uint32_t entries = 0;
1739 #endif
1740         int ret = 0;
1741
1742         if (!pcrat_image)
1743                 return -EINVAL;
1744
1745         /* Fill in CRAT Header.
1746          * Modify length and total_entries as subunits are added.
1747          */
1748         avail_size -= sizeof(struct crat_header);
1749         if (avail_size < 0)
1750                 return -ENOMEM;
1751
1752         memset(crat_table, 0, sizeof(struct crat_header));
1753         memcpy(&crat_table->signature, CRAT_SIGNATURE,
1754                         sizeof(crat_table->signature));
1755         crat_table->length = sizeof(struct crat_header);
1756
1757         status = acpi_get_table("DSDT", 0, &acpi_table);
1758         if (status != AE_OK)
1759                 pr_warn("DSDT table not found for OEM information\n");
1760         else {
1761                 crat_table->oem_revision = acpi_table->revision;
1762                 memcpy(crat_table->oem_id, acpi_table->oem_id,
1763                                 CRAT_OEMID_LENGTH);
1764                 memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
1765                                 CRAT_OEMTABLEID_LENGTH);
1766                 acpi_put_table(acpi_table);
1767         }
1768         crat_table->total_entries = 0;
1769         crat_table->num_domains = 0;
1770
1771         sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1772
1773         for_each_online_node(numa_node_id) {
1774                 if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1775                         continue;
1776
1777                 /* Fill in Subtype: Compute Unit */
1778                 ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1779                         crat_table->num_domains,
1780                         (struct crat_subtype_computeunit *)sub_type_hdr);
1781                 if (ret < 0)
1782                         return ret;
1783                 crat_table->length += sub_type_hdr->length;
1784                 crat_table->total_entries++;
1785
1786                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1787                         sub_type_hdr->length);
1788
1789                 /* Fill in Subtype: Memory */
1790                 ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1791                         crat_table->num_domains,
1792                         (struct crat_subtype_memory *)sub_type_hdr);
1793                 if (ret < 0)
1794                         return ret;
1795                 crat_table->length += sub_type_hdr->length;
1796                 crat_table->total_entries++;
1797
1798                 sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1799                         sub_type_hdr->length);
1800
1801                 /* Fill in Subtype: IO Link */
1802 #ifdef CONFIG_X86_64
1803                 ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1804                                 &entries,
1805                                 (struct crat_subtype_iolink *)sub_type_hdr);
1806                 if (ret < 0)
1807                         return ret;
1808
1809                 if (entries) {
1810                         crat_table->length += (sub_type_hdr->length * entries);
1811                         crat_table->total_entries += entries;
1812
1813                         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1814                                         sub_type_hdr->length * entries);
1815                 }
1816 #else
1817                 pr_info("IO link not available for non x86 platforms\n");
1818 #endif
1819
1820                 crat_table->num_domains++;
1821         }
1822
1823         /* TODO: Add cache Subtype for CPU.
1824          * Currently, CPU cache information is available in function
1825          * detect_cache_attributes(cpu) defined in the file
1826          * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1827          * exported and to get the same information the code needs to be
1828          * duplicated.
1829          */
1830
1831         *size = crat_table->length;
1832         pr_info("Virtual CRAT table created for CPU\n");
1833
1834         return 0;
1835 }
1836
1837 static int kfd_fill_gpu_memory_affinity(int *avail_size,
1838                 struct kfd_dev *kdev, uint8_t type, uint64_t size,
1839                 struct crat_subtype_memory *sub_type_hdr,
1840                 uint32_t proximity_domain,
1841                 const struct kfd_local_mem_info *local_mem_info)
1842 {
1843         *avail_size -= sizeof(struct crat_subtype_memory);
1844         if (*avail_size < 0)
1845                 return -ENOMEM;
1846
1847         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1848         sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1849         sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1850         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1851
1852         sub_type_hdr->proximity_domain = proximity_domain;
1853
1854         pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1855                         type, size);
1856
1857         sub_type_hdr->length_low = lower_32_bits(size);
1858         sub_type_hdr->length_high = upper_32_bits(size);
1859
1860         sub_type_hdr->width = local_mem_info->vram_width;
1861         sub_type_hdr->visibility_type = type;
1862
1863         return 0;
1864 }
1865
1866 #ifdef CONFIG_ACPI_NUMA
1867 static void kfd_find_numa_node_in_srat(struct kfd_dev *kdev)
1868 {
1869         struct acpi_table_header *table_header = NULL;
1870         struct acpi_subtable_header *sub_header = NULL;
1871         unsigned long table_end, subtable_len;
1872         u32 pci_id = pci_domain_nr(kdev->pdev->bus) << 16 |
1873                         pci_dev_id(kdev->pdev);
1874         u32 bdf;
1875         acpi_status status;
1876         struct acpi_srat_cpu_affinity *cpu;
1877         struct acpi_srat_generic_affinity *gpu;
1878         int pxm = 0, max_pxm = 0;
1879         int numa_node = NUMA_NO_NODE;
1880         bool found = false;
1881
1882         /* Fetch the SRAT table from ACPI */
1883         status = acpi_get_table(ACPI_SIG_SRAT, 0, &table_header);
1884         if (status == AE_NOT_FOUND) {
1885                 pr_warn("SRAT table not found\n");
1886                 return;
1887         } else if (ACPI_FAILURE(status)) {
1888                 const char *err = acpi_format_exception(status);
1889                 pr_err("SRAT table error: %s\n", err);
1890                 return;
1891         }
1892
1893         table_end = (unsigned long)table_header + table_header->length;
1894
1895         /* Parse all entries looking for a match. */
1896         sub_header = (struct acpi_subtable_header *)
1897                         ((unsigned long)table_header +
1898                         sizeof(struct acpi_table_srat));
1899         subtable_len = sub_header->length;
1900
1901         while (((unsigned long)sub_header) + subtable_len  < table_end) {
1902                 /*
1903                  * If length is 0, break from this loop to avoid
1904                  * infinite loop.
1905                  */
1906                 if (subtable_len == 0) {
1907                         pr_err("SRAT invalid zero length\n");
1908                         break;
1909                 }
1910
1911                 switch (sub_header->type) {
1912                 case ACPI_SRAT_TYPE_CPU_AFFINITY:
1913                         cpu = (struct acpi_srat_cpu_affinity *)sub_header;
1914                         pxm = *((u32 *)cpu->proximity_domain_hi) << 8 |
1915                                         cpu->proximity_domain_lo;
1916                         if (pxm > max_pxm)
1917                                 max_pxm = pxm;
1918                         break;
1919                 case ACPI_SRAT_TYPE_GENERIC_AFFINITY:
1920                         gpu = (struct acpi_srat_generic_affinity *)sub_header;
1921                         bdf = *((u16 *)(&gpu->device_handle[0])) << 16 |
1922                                         *((u16 *)(&gpu->device_handle[2]));
1923                         if (bdf == pci_id) {
1924                                 found = true;
1925                                 numa_node = pxm_to_node(gpu->proximity_domain);
1926                         }
1927                         break;
1928                 default:
1929                         break;
1930                 }
1931
1932                 if (found)
1933                         break;
1934
1935                 sub_header = (struct acpi_subtable_header *)
1936                                 ((unsigned long)sub_header + subtable_len);
1937                 subtable_len = sub_header->length;
1938         }
1939
1940         acpi_put_table(table_header);
1941
1942         /* Workaround bad cpu-gpu binding case */
1943         if (found && (numa_node < 0 ||
1944                         numa_node > pxm_to_node(max_pxm)))
1945                 numa_node = 0;
1946
1947         if (numa_node != NUMA_NO_NODE)
1948                 set_dev_node(&kdev->pdev->dev, numa_node);
1949 }
1950 #endif
1951
1952 /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1953  * to its NUMA node
1954  *      @avail_size: Available size in the memory
1955  *      @kdev - [IN] GPU device
1956  *      @sub_type_hdr: Memory into which io link info will be filled in
1957  *      @proximity_domain - proximity domain of the GPU node
1958  *
1959  *      Return 0 if successful else return -ve value
1960  */
1961 static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1962                         struct kfd_dev *kdev,
1963                         struct crat_subtype_iolink *sub_type_hdr,
1964                         uint32_t proximity_domain)
1965 {
1966         struct amdgpu_device *adev = (struct amdgpu_device *)kdev->kgd;
1967
1968         *avail_size -= sizeof(struct crat_subtype_iolink);
1969         if (*avail_size < 0)
1970                 return -ENOMEM;
1971
1972         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1973
1974         /* Fill in subtype header data */
1975         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1976         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1977         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1978         if (kfd_dev_is_large_bar(kdev))
1979                 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1980
1981         /* Fill in IOLINK subtype.
1982          * TODO: Fill-in other fields of iolink subtype
1983          */
1984         if (adev->gmc.xgmi.connected_to_cpu) {
1985                 /*
1986                  * with host gpu xgmi link, host can access gpu memory whether
1987                  * or not pcie bar type is large, so always create bidirectional
1988                  * io link.
1989                  */
1990                 sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1991                 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1992                 sub_type_hdr->num_hops_xgmi = 1;
1993                 if (adev->asic_type == CHIP_ALDEBARAN) {
1994                         sub_type_hdr->minimum_bandwidth_mbs =
1995                                         amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(
1996                                                         kdev->kgd, NULL, true);
1997                         sub_type_hdr->maximum_bandwidth_mbs =
1998                                         sub_type_hdr->minimum_bandwidth_mbs;
1999                 }
2000         } else {
2001                 sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
2002                 sub_type_hdr->minimum_bandwidth_mbs =
2003                                 amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, true);
2004                 sub_type_hdr->maximum_bandwidth_mbs =
2005                                 amdgpu_amdkfd_get_pcie_bandwidth_mbytes(kdev->kgd, false);
2006         }
2007
2008         sub_type_hdr->proximity_domain_from = proximity_domain;
2009
2010 #ifdef CONFIG_ACPI_NUMA
2011         if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2012                 kfd_find_numa_node_in_srat(kdev);
2013 #endif
2014 #ifdef CONFIG_NUMA
2015         if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
2016                 sub_type_hdr->proximity_domain_to = 0;
2017         else
2018                 sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
2019 #else
2020         sub_type_hdr->proximity_domain_to = 0;
2021 #endif
2022         return 0;
2023 }
2024
2025 static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
2026                         struct kfd_dev *kdev,
2027                         struct kfd_dev *peer_kdev,
2028                         struct crat_subtype_iolink *sub_type_hdr,
2029                         uint32_t proximity_domain_from,
2030                         uint32_t proximity_domain_to)
2031 {
2032         *avail_size -= sizeof(struct crat_subtype_iolink);
2033         if (*avail_size < 0)
2034                 return -ENOMEM;
2035
2036         memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
2037
2038         sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
2039         sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
2040         sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
2041                                CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
2042
2043         sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
2044         sub_type_hdr->proximity_domain_from = proximity_domain_from;
2045         sub_type_hdr->proximity_domain_to = proximity_domain_to;
2046         sub_type_hdr->num_hops_xgmi =
2047                 amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
2048         sub_type_hdr->maximum_bandwidth_mbs =
2049                 amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, peer_kdev->kgd, false);
2050         sub_type_hdr->minimum_bandwidth_mbs = sub_type_hdr->maximum_bandwidth_mbs ?
2051                 amdgpu_amdkfd_get_xgmi_bandwidth_mbytes(kdev->kgd, NULL, true) : 0;
2052
2053         return 0;
2054 }
2055
2056 /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
2057  *
2058  *      @pcrat_image: Fill in VCRAT for GPU
2059  *      @size:  [IN] allocated size of crat_image.
2060  *              [OUT] actual size of data filled in crat_image
2061  */
2062 static int kfd_create_vcrat_image_gpu(void *pcrat_image,
2063                                       size_t *size, struct kfd_dev *kdev,
2064                                       uint32_t proximity_domain)
2065 {
2066         struct crat_header *crat_table = (struct crat_header *)pcrat_image;
2067         struct crat_subtype_generic *sub_type_hdr;
2068         struct kfd_local_mem_info local_mem_info;
2069         struct kfd_topology_device *peer_dev;
2070         struct crat_subtype_computeunit *cu;
2071         struct kfd_cu_info cu_info;
2072         int avail_size = *size;
2073         uint32_t total_num_of_cu;
2074         int num_of_cache_entries = 0;
2075         int cache_mem_filled = 0;
2076         uint32_t nid = 0;
2077         int ret = 0;
2078
2079         if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
2080                 return -EINVAL;
2081
2082         /* Fill the CRAT Header.
2083          * Modify length and total_entries as subunits are added.
2084          */
2085         avail_size -= sizeof(struct crat_header);
2086         if (avail_size < 0)
2087                 return -ENOMEM;
2088
2089         memset(crat_table, 0, sizeof(struct crat_header));
2090
2091         memcpy(&crat_table->signature, CRAT_SIGNATURE,
2092                         sizeof(crat_table->signature));
2093         /* Change length as we add more subtypes*/
2094         crat_table->length = sizeof(struct crat_header);
2095         crat_table->num_domains = 1;
2096         crat_table->total_entries = 0;
2097
2098         /* Fill in Subtype: Compute Unit
2099          * First fill in the sub type header and then sub type data
2100          */
2101         avail_size -= sizeof(struct crat_subtype_computeunit);
2102         if (avail_size < 0)
2103                 return -ENOMEM;
2104
2105         sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
2106         memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
2107
2108         sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
2109         sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
2110         sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
2111
2112         /* Fill CU subtype data */
2113         cu = (struct crat_subtype_computeunit *)sub_type_hdr;
2114         cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
2115         cu->proximity_domain = proximity_domain;
2116
2117         amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
2118         cu->num_simd_per_cu = cu_info.simd_per_cu;
2119         cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
2120         cu->max_waves_simd = cu_info.max_waves_per_simd;
2121
2122         cu->wave_front_size = cu_info.wave_front_size;
2123         cu->array_count = cu_info.num_shader_arrays_per_engine *
2124                 cu_info.num_shader_engines;
2125         total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
2126         cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
2127         cu->num_cu_per_array = cu_info.num_cu_per_sh;
2128         cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
2129         cu->num_banks = cu_info.num_shader_engines;
2130         cu->lds_size_in_kb = cu_info.lds_size;
2131
2132         cu->hsa_capability = 0;
2133
2134         /* Check if this node supports IOMMU. During parsing this flag will
2135          * translate to HSA_CAP_ATS_PRESENT
2136          */
2137         if (!kfd_iommu_check_device(kdev))
2138                 cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
2139
2140         crat_table->length += sub_type_hdr->length;
2141         crat_table->total_entries++;
2142
2143         /* Fill in Subtype: Memory. Only on systems with large BAR (no
2144          * private FB), report memory as public. On other systems
2145          * report the total FB size (public+private) as a single
2146          * private heap.
2147          */
2148         amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
2149         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2150                         sub_type_hdr->length);
2151
2152         if (debug_largebar)
2153                 local_mem_info.local_mem_size_private = 0;
2154
2155         if (local_mem_info.local_mem_size_private == 0)
2156                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2157                                 kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
2158                                 local_mem_info.local_mem_size_public,
2159                                 (struct crat_subtype_memory *)sub_type_hdr,
2160                                 proximity_domain,
2161                                 &local_mem_info);
2162         else
2163                 ret = kfd_fill_gpu_memory_affinity(&avail_size,
2164                                 kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
2165                                 local_mem_info.local_mem_size_public +
2166                                 local_mem_info.local_mem_size_private,
2167                                 (struct crat_subtype_memory *)sub_type_hdr,
2168                                 proximity_domain,
2169                                 &local_mem_info);
2170         if (ret < 0)
2171                 return ret;
2172
2173         crat_table->length += sizeof(struct crat_subtype_memory);
2174         crat_table->total_entries++;
2175
2176         /* TODO: Fill in cache information. This information is NOT readily
2177          * available in KGD
2178          */
2179         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2180                 sub_type_hdr->length);
2181         ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
2182                                 avail_size,
2183                                 &cu_info,
2184                                 (struct crat_subtype_cache *)sub_type_hdr,
2185                                 &cache_mem_filled,
2186                                 &num_of_cache_entries);
2187
2188         if (ret < 0)
2189                 return ret;
2190
2191         crat_table->length += cache_mem_filled;
2192         crat_table->total_entries += num_of_cache_entries;
2193         avail_size -= cache_mem_filled;
2194
2195         /* Fill in Subtype: IO_LINKS
2196          *  Only direct links are added here which is Link from GPU to
2197          *  to its NUMA node. Indirect links are added by userspace.
2198          */
2199         sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
2200                 cache_mem_filled);
2201         ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
2202                 (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
2203
2204         if (ret < 0)
2205                 return ret;
2206
2207         crat_table->length += sub_type_hdr->length;
2208         crat_table->total_entries++;
2209
2210
2211         /* Fill in Subtype: IO_LINKS
2212          * Direct links from GPU to other GPUs through xGMI.
2213          * We will loop GPUs that already be processed (with lower value
2214          * of proximity_domain), add the link for the GPUs with same
2215          * hive id (from this GPU to other GPU) . The reversed iolink
2216          * (from other GPU to this GPU) will be added
2217          * in kfd_parse_subtype_iolink.
2218          */
2219         if (kdev->hive_id) {
2220                 for (nid = 0; nid < proximity_domain; ++nid) {
2221                         peer_dev = kfd_topology_device_by_proximity_domain(nid);
2222                         if (!peer_dev->gpu)
2223                                 continue;
2224                         if (peer_dev->gpu->hive_id != kdev->hive_id)
2225                                 continue;
2226                         sub_type_hdr = (typeof(sub_type_hdr))(
2227                                 (char *)sub_type_hdr +
2228                                 sizeof(struct crat_subtype_iolink));
2229                         ret = kfd_fill_gpu_xgmi_link_to_gpu(
2230                                 &avail_size, kdev, peer_dev->gpu,
2231                                 (struct crat_subtype_iolink *)sub_type_hdr,
2232                                 proximity_domain, nid);
2233                         if (ret < 0)
2234                                 return ret;
2235                         crat_table->length += sub_type_hdr->length;
2236                         crat_table->total_entries++;
2237                 }
2238         }
2239         *size = crat_table->length;
2240         pr_info("Virtual CRAT table created for GPU\n");
2241
2242         return ret;
2243 }
2244
2245 /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
2246  *              creates a Virtual CRAT (VCRAT) image
2247  *
2248  * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
2249  *
2250  *      @crat_image: VCRAT image created because ACPI does not have a
2251  *                   CRAT for this device
2252  *      @size: [OUT] size of virtual crat_image
2253  *      @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
2254  *              COMPUTE_UNIT_GPU - Create VCRAT for GPU
2255  *              (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
2256  *                      -- this option is not currently implemented.
2257  *                      The assumption is that all AMD APUs will have CRAT
2258  *      @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
2259  *
2260  *      Return 0 if successful else return -ve value
2261  */
2262 int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
2263                                   int flags, struct kfd_dev *kdev,
2264                                   uint32_t proximity_domain)
2265 {
2266         void *pcrat_image = NULL;
2267         int ret = 0, num_nodes;
2268         size_t dyn_size;
2269
2270         if (!crat_image)
2271                 return -EINVAL;
2272
2273         *crat_image = NULL;
2274
2275         /* Allocate the CPU Virtual CRAT size based on the number of online
2276          * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
2277          * This should cover all the current conditions. A check is put not
2278          * to overwrite beyond allocated size for GPUs
2279          */
2280         switch (flags) {
2281         case COMPUTE_UNIT_CPU:
2282                 num_nodes = num_online_nodes();
2283                 dyn_size = sizeof(struct crat_header) +
2284                         num_nodes * (sizeof(struct crat_subtype_computeunit) +
2285                         sizeof(struct crat_subtype_memory) +
2286                         (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
2287                 pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
2288                 if (!pcrat_image)
2289                         return -ENOMEM;
2290                 *size = dyn_size;
2291                 pr_debug("CRAT size is %ld", dyn_size);
2292                 ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
2293                 break;
2294         case COMPUTE_UNIT_GPU:
2295                 if (!kdev)
2296                         return -EINVAL;
2297                 pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
2298                 if (!pcrat_image)
2299                         return -ENOMEM;
2300                 *size = VCRAT_SIZE_FOR_GPU;
2301                 ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
2302                                                  proximity_domain);
2303                 break;
2304         case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
2305                 /* TODO: */
2306                 ret = -EINVAL;
2307                 pr_err("VCRAT not implemented for APU\n");
2308                 break;
2309         default:
2310                 ret = -EINVAL;
2311         }
2312
2313         if (!ret)
2314                 *crat_image = pcrat_image;
2315         else
2316                 kvfree(pcrat_image);
2317
2318         return ret;
2319 }
2320
2321
2322 /* kfd_destroy_crat_image
2323  *
2324  *      @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
2325  *
2326  */
2327 void kfd_destroy_crat_image(void *crat_image)
2328 {
2329         kvfree(crat_image);
2330 }