drm/amdgpu/discovery: add additional validation
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_discovery.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  */
23
24 #include <linux/firmware.h>
25
26 #include "amdgpu.h"
27 #include "amdgpu_discovery.h"
28 #include "soc15_hw_ip.h"
29 #include "discovery.h"
30
31 #include "soc15.h"
32 #include "gfx_v9_0.h"
33 #include "gmc_v9_0.h"
34 #include "df_v1_7.h"
35 #include "df_v3_6.h"
36 #include "nbio_v6_1.h"
37 #include "nbio_v7_0.h"
38 #include "nbio_v7_4.h"
39 #include "hdp_v4_0.h"
40 #include "vega10_ih.h"
41 #include "vega20_ih.h"
42 #include "sdma_v4_0.h"
43 #include "uvd_v7_0.h"
44 #include "vce_v4_0.h"
45 #include "vcn_v1_0.h"
46 #include "vcn_v2_5.h"
47 #include "jpeg_v2_5.h"
48 #include "smuio_v9_0.h"
49 #include "gmc_v10_0.h"
50 #include "gfxhub_v2_0.h"
51 #include "mmhub_v2_0.h"
52 #include "nbio_v2_3.h"
53 #include "nbio_v7_2.h"
54 #include "hdp_v5_0.h"
55 #include "nv.h"
56 #include "navi10_ih.h"
57 #include "gfx_v10_0.h"
58 #include "sdma_v5_0.h"
59 #include "sdma_v5_2.h"
60 #include "vcn_v2_0.h"
61 #include "jpeg_v2_0.h"
62 #include "vcn_v3_0.h"
63 #include "jpeg_v3_0.h"
64 #include "amdgpu_vkms.h"
65 #include "mes_v10_1.h"
66 #include "smuio_v11_0.h"
67 #include "smuio_v11_0_6.h"
68 #include "smuio_v13_0.h"
69
70 #define FIRMWARE_IP_DISCOVERY "amdgpu/ip_discovery.bin"
71 MODULE_FIRMWARE(FIRMWARE_IP_DISCOVERY);
72
73 #define mmRCC_CONFIG_MEMSIZE    0xde3
74 #define mmMM_INDEX              0x0
75 #define mmMM_INDEX_HI           0x6
76 #define mmMM_DATA               0x1
77
78 static const char *hw_id_names[HW_ID_MAX] = {
79         [MP1_HWID]              = "MP1",
80         [MP2_HWID]              = "MP2",
81         [THM_HWID]              = "THM",
82         [SMUIO_HWID]            = "SMUIO",
83         [FUSE_HWID]             = "FUSE",
84         [CLKA_HWID]             = "CLKA",
85         [PWR_HWID]              = "PWR",
86         [GC_HWID]               = "GC",
87         [UVD_HWID]              = "UVD",
88         [AUDIO_AZ_HWID]         = "AUDIO_AZ",
89         [ACP_HWID]              = "ACP",
90         [DCI_HWID]              = "DCI",
91         [DMU_HWID]              = "DMU",
92         [DCO_HWID]              = "DCO",
93         [DIO_HWID]              = "DIO",
94         [XDMA_HWID]             = "XDMA",
95         [DCEAZ_HWID]            = "DCEAZ",
96         [DAZ_HWID]              = "DAZ",
97         [SDPMUX_HWID]           = "SDPMUX",
98         [NTB_HWID]              = "NTB",
99         [IOHC_HWID]             = "IOHC",
100         [L2IMU_HWID]            = "L2IMU",
101         [VCE_HWID]              = "VCE",
102         [MMHUB_HWID]            = "MMHUB",
103         [ATHUB_HWID]            = "ATHUB",
104         [DBGU_NBIO_HWID]        = "DBGU_NBIO",
105         [DFX_HWID]              = "DFX",
106         [DBGU0_HWID]            = "DBGU0",
107         [DBGU1_HWID]            = "DBGU1",
108         [OSSSYS_HWID]           = "OSSSYS",
109         [HDP_HWID]              = "HDP",
110         [SDMA0_HWID]            = "SDMA0",
111         [SDMA1_HWID]            = "SDMA1",
112         [SDMA2_HWID]            = "SDMA2",
113         [SDMA3_HWID]            = "SDMA3",
114         [ISP_HWID]              = "ISP",
115         [DBGU_IO_HWID]          = "DBGU_IO",
116         [DF_HWID]               = "DF",
117         [CLKB_HWID]             = "CLKB",
118         [FCH_HWID]              = "FCH",
119         [DFX_DAP_HWID]          = "DFX_DAP",
120         [L1IMU_PCIE_HWID]       = "L1IMU_PCIE",
121         [L1IMU_NBIF_HWID]       = "L1IMU_NBIF",
122         [L1IMU_IOAGR_HWID]      = "L1IMU_IOAGR",
123         [L1IMU3_HWID]           = "L1IMU3",
124         [L1IMU4_HWID]           = "L1IMU4",
125         [L1IMU5_HWID]           = "L1IMU5",
126         [L1IMU6_HWID]           = "L1IMU6",
127         [L1IMU7_HWID]           = "L1IMU7",
128         [L1IMU8_HWID]           = "L1IMU8",
129         [L1IMU9_HWID]           = "L1IMU9",
130         [L1IMU10_HWID]          = "L1IMU10",
131         [L1IMU11_HWID]          = "L1IMU11",
132         [L1IMU12_HWID]          = "L1IMU12",
133         [L1IMU13_HWID]          = "L1IMU13",
134         [L1IMU14_HWID]          = "L1IMU14",
135         [L1IMU15_HWID]          = "L1IMU15",
136         [WAFLC_HWID]            = "WAFLC",
137         [FCH_USB_PD_HWID]       = "FCH_USB_PD",
138         [PCIE_HWID]             = "PCIE",
139         [PCS_HWID]              = "PCS",
140         [DDCL_HWID]             = "DDCL",
141         [SST_HWID]              = "SST",
142         [IOAGR_HWID]            = "IOAGR",
143         [NBIF_HWID]             = "NBIF",
144         [IOAPIC_HWID]           = "IOAPIC",
145         [SYSTEMHUB_HWID]        = "SYSTEMHUB",
146         [NTBCCP_HWID]           = "NTBCCP",
147         [UMC_HWID]              = "UMC",
148         [SATA_HWID]             = "SATA",
149         [USB_HWID]              = "USB",
150         [CCXSEC_HWID]           = "CCXSEC",
151         [XGMI_HWID]             = "XGMI",
152         [XGBE_HWID]             = "XGBE",
153         [MP0_HWID]              = "MP0",
154 };
155
156 static int hw_id_map[MAX_HWIP] = {
157         [GC_HWIP]       = GC_HWID,
158         [HDP_HWIP]      = HDP_HWID,
159         [SDMA0_HWIP]    = SDMA0_HWID,
160         [SDMA1_HWIP]    = SDMA1_HWID,
161         [SDMA2_HWIP]    = SDMA2_HWID,
162         [SDMA3_HWIP]    = SDMA3_HWID,
163         [MMHUB_HWIP]    = MMHUB_HWID,
164         [ATHUB_HWIP]    = ATHUB_HWID,
165         [NBIO_HWIP]     = NBIF_HWID,
166         [MP0_HWIP]      = MP0_HWID,
167         [MP1_HWIP]      = MP1_HWID,
168         [UVD_HWIP]      = UVD_HWID,
169         [VCE_HWIP]      = VCE_HWID,
170         [DF_HWIP]       = DF_HWID,
171         [DCE_HWIP]      = DMU_HWID,
172         [OSSSYS_HWIP]   = OSSSYS_HWID,
173         [SMUIO_HWIP]    = SMUIO_HWID,
174         [PWR_HWIP]      = PWR_HWID,
175         [NBIF_HWIP]     = NBIF_HWID,
176         [THM_HWIP]      = THM_HWID,
177         [CLK_HWIP]      = CLKA_HWID,
178         [UMC_HWIP]      = UMC_HWID,
179         [XGMI_HWIP]     = XGMI_HWID,
180         [DCI_HWIP]      = DCI_HWID,
181 };
182
183 static int amdgpu_discovery_read_binary_from_vram(struct amdgpu_device *adev, uint8_t *binary)
184 {
185         uint64_t vram_size = (uint64_t)RREG32(mmRCC_CONFIG_MEMSIZE) << 20;
186         uint64_t pos = vram_size - DISCOVERY_TMR_OFFSET;
187
188         amdgpu_device_vram_access(adev, pos, (uint32_t *)binary,
189                                   adev->mman.discovery_tmr_size, false);
190         return 0;
191 }
192
193 static int amdgpu_discovery_read_binary_from_file(struct amdgpu_device *adev, uint8_t *binary)
194 {
195         const struct firmware *fw;
196         const char *fw_name;
197         int r;
198
199         switch (amdgpu_discovery) {
200         case 2:
201                 fw_name = FIRMWARE_IP_DISCOVERY;
202                 break;
203         default:
204                 dev_warn(adev->dev, "amdgpu_discovery is not set properly\n");
205                 return -EINVAL;
206         }
207
208         r = request_firmware(&fw, fw_name, adev->dev);
209         if (r) {
210                 dev_err(adev->dev, "can't load firmware \"%s\"\n",
211                         fw_name);
212                 return r;
213         }
214
215         memcpy((u8 *)binary, (u8 *)fw->data, adev->mman.discovery_tmr_size);
216         release_firmware(fw);
217
218         return 0;
219 }
220
221 static uint16_t amdgpu_discovery_calculate_checksum(uint8_t *data, uint32_t size)
222 {
223         uint16_t checksum = 0;
224         int i;
225
226         for (i = 0; i < size; i++)
227                 checksum += data[i];
228
229         return checksum;
230 }
231
232 static inline bool amdgpu_discovery_verify_checksum(uint8_t *data, uint32_t size,
233                                                     uint16_t expected)
234 {
235         return !!(amdgpu_discovery_calculate_checksum(data, size) == expected);
236 }
237
238 static inline bool amdgpu_discovery_verify_binary_signature(uint8_t *binary)
239 {
240         struct binary_header *bhdr;
241         bhdr = (struct binary_header *)binary;
242
243         return (le32_to_cpu(bhdr->binary_signature) == BINARY_SIGNATURE);
244 }
245
246 static void amdgpu_discovery_harvest_config_quirk(struct amdgpu_device *adev)
247 {
248         /*
249          * So far, apply this quirk only on those Navy Flounder boards which
250          * have a bad harvest table of VCN config.
251          */
252         if ((adev->ip_versions[UVD_HWIP][1] == IP_VERSION(3, 0, 1)) &&
253                 (adev->ip_versions[GC_HWIP][0] == IP_VERSION(10, 3, 2))) {
254                 switch (adev->pdev->revision) {
255                 case 0xC1:
256                 case 0xC2:
257                 case 0xC3:
258                 case 0xC5:
259                 case 0xC7:
260                 case 0xCF:
261                 case 0xDF:
262                         adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
263                         break;
264                 default:
265                         break;
266                 }
267         }
268 }
269
270 static int amdgpu_discovery_init(struct amdgpu_device *adev)
271 {
272         struct table_info *info;
273         struct binary_header *bhdr;
274         uint16_t offset;
275         uint16_t size;
276         uint16_t checksum;
277         int r;
278
279         adev->mman.discovery_tmr_size = DISCOVERY_TMR_SIZE;
280         adev->mman.discovery_bin = kzalloc(adev->mman.discovery_tmr_size, GFP_KERNEL);
281         if (!adev->mman.discovery_bin)
282                 return -ENOMEM;
283
284         r = amdgpu_discovery_read_binary_from_vram(adev, adev->mman.discovery_bin);
285         if (r) {
286                 dev_err(adev->dev, "failed to read ip discovery binary from vram\n");
287                 r = -EINVAL;
288                 goto out;
289         }
290
291         if (!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
292                 dev_warn(adev->dev, "get invalid ip discovery binary signature from vram\n");
293                 /* retry read ip discovery binary from file */
294                 r = amdgpu_discovery_read_binary_from_file(adev, adev->mman.discovery_bin);
295                 if (r) {
296                         dev_err(adev->dev, "failed to read ip discovery binary from file\n");
297                         r = -EINVAL;
298                         goto out;
299                 }
300                 /* check the ip discovery binary signature */
301                 if(!amdgpu_discovery_verify_binary_signature(adev->mman.discovery_bin)) {
302                         dev_warn(adev->dev, "get invalid ip discovery binary signature from file\n");
303                         r = -EINVAL;
304                         goto out;
305                 }
306         }
307
308         bhdr = (struct binary_header *)adev->mman.discovery_bin;
309
310         offset = offsetof(struct binary_header, binary_checksum) +
311                 sizeof(bhdr->binary_checksum);
312         size = le16_to_cpu(bhdr->binary_size) - offset;
313         checksum = le16_to_cpu(bhdr->binary_checksum);
314
315         if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
316                                               size, checksum)) {
317                 dev_err(adev->dev, "invalid ip discovery binary checksum\n");
318                 r = -EINVAL;
319                 goto out;
320         }
321
322         info = &bhdr->table_list[IP_DISCOVERY];
323         offset = le16_to_cpu(info->offset);
324         checksum = le16_to_cpu(info->checksum);
325
326         if (offset) {
327                 struct ip_discovery_header *ihdr =
328                         (struct ip_discovery_header *)(adev->mman.discovery_bin + offset);
329                 if (le32_to_cpu(ihdr->signature) != DISCOVERY_TABLE_SIGNATURE) {
330                         dev_err(adev->dev, "invalid ip discovery data table signature\n");
331                         r = -EINVAL;
332                         goto out;
333                 }
334
335                 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
336                                                       le16_to_cpu(ihdr->size), checksum)) {
337                         dev_err(adev->dev, "invalid ip discovery data table checksum\n");
338                         r = -EINVAL;
339                         goto out;
340                 }
341         }
342
343         info = &bhdr->table_list[GC];
344         offset = le16_to_cpu(info->offset);
345         checksum = le16_to_cpu(info->checksum);
346
347         if (offset) {
348                 struct gpu_info_header *ghdr =
349                         (struct gpu_info_header *)(adev->mman.discovery_bin + offset);
350
351                 if (le32_to_cpu(ghdr->table_id) != GC_TABLE_ID) {
352                         dev_err(adev->dev, "invalid ip discovery gc table id\n");
353                         r = -EINVAL;
354                         goto out;
355                 }
356
357                 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
358                                                       le32_to_cpu(ghdr->size), checksum)) {
359                         dev_err(adev->dev, "invalid gc data table checksum\n");
360                         r = -EINVAL;
361                         goto out;
362                 }
363         }
364
365         info = &bhdr->table_list[HARVEST_INFO];
366         offset = le16_to_cpu(info->offset);
367         checksum = le16_to_cpu(info->checksum);
368
369         if (offset) {
370                 struct harvest_info_header *hhdr =
371                         (struct harvest_info_header *)(adev->mman.discovery_bin + offset);
372
373                 if (le32_to_cpu(hhdr->signature) != HARVEST_TABLE_SIGNATURE) {
374                         dev_err(adev->dev, "invalid ip discovery harvest table signature\n");
375                         r = -EINVAL;
376                         goto out;
377                 }
378
379                 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
380                                                       sizeof(struct harvest_table), checksum)) {
381                         dev_err(adev->dev, "invalid harvest data table checksum\n");
382                         r = -EINVAL;
383                         goto out;
384                 }
385         }
386
387         info = &bhdr->table_list[VCN_INFO];
388         offset = le16_to_cpu(info->offset);
389         checksum = le16_to_cpu(info->checksum);
390
391         if (offset) {
392                 struct vcn_info_header *vhdr =
393                         (struct vcn_info_header *)(adev->mman.discovery_bin + offset);
394
395                 if (le32_to_cpu(vhdr->table_id) != VCN_INFO_TABLE_ID) {
396                         dev_err(adev->dev, "invalid ip discovery vcn table id\n");
397                         r = -EINVAL;
398                         goto out;
399                 }
400
401                 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
402                                                       le32_to_cpu(vhdr->size_bytes), checksum)) {
403                         dev_err(adev->dev, "invalid vcn data table checksum\n");
404                         r = -EINVAL;
405                         goto out;
406                 }
407         }
408
409         info = &bhdr->table_list[MALL_INFO];
410         offset = le16_to_cpu(info->offset);
411         checksum = le16_to_cpu(info->checksum);
412
413         if (0 && offset) {
414                 struct mall_info_header *mhdr =
415                         (struct mall_info_header *)(adev->mman.discovery_bin + offset);
416
417                 if (le32_to_cpu(mhdr->table_id) != MALL_INFO_TABLE_ID) {
418                         dev_err(adev->dev, "invalid ip discovery mall table id\n");
419                         r = -EINVAL;
420                         goto out;
421                 }
422
423                 if (!amdgpu_discovery_verify_checksum(adev->mman.discovery_bin + offset,
424                                                       le32_to_cpu(mhdr->size_bytes), checksum)) {
425                         dev_err(adev->dev, "invalid mall data table checksum\n");
426                         r = -EINVAL;
427                         goto out;
428                 }
429         }
430
431         return 0;
432
433 out:
434         kfree(adev->mman.discovery_bin);
435         adev->mman.discovery_bin = NULL;
436
437         return r;
438 }
439
440 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev);
441
442 void amdgpu_discovery_fini(struct amdgpu_device *adev)
443 {
444         amdgpu_discovery_sysfs_fini(adev);
445         kfree(adev->mman.discovery_bin);
446         adev->mman.discovery_bin = NULL;
447 }
448
449 static int amdgpu_discovery_validate_ip(const struct ip *ip)
450 {
451         if (ip->number_instance >= HWIP_MAX_INSTANCE) {
452                 DRM_ERROR("Unexpected number_instance (%d) from ip discovery blob\n",
453                           ip->number_instance);
454                 return -EINVAL;
455         }
456         if (le16_to_cpu(ip->hw_id) >= HW_ID_MAX) {
457                 DRM_ERROR("Unexpected hw_id (%d) from ip discovery blob\n",
458                           le16_to_cpu(ip->hw_id));
459                 return -EINVAL;
460         }
461
462         return 0;
463 }
464
465 static void amdgpu_discovery_read_harvest_bit_per_ip(struct amdgpu_device *adev,
466                                                 uint32_t *vcn_harvest_count)
467 {
468         struct binary_header *bhdr;
469         struct ip_discovery_header *ihdr;
470         struct die_header *dhdr;
471         struct ip *ip;
472         uint16_t die_offset, ip_offset, num_dies, num_ips;
473         int i, j;
474
475         bhdr = (struct binary_header *)adev->mman.discovery_bin;
476         ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
477                         le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
478         num_dies = le16_to_cpu(ihdr->num_dies);
479
480         /* scan harvest bit of all IP data structures */
481         for (i = 0; i < num_dies; i++) {
482                 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
483                 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
484                 num_ips = le16_to_cpu(dhdr->num_ips);
485                 ip_offset = die_offset + sizeof(*dhdr);
486
487                 for (j = 0; j < num_ips; j++) {
488                         ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
489
490                         if (amdgpu_discovery_validate_ip(ip))
491                                 goto next_ip;
492
493                         if (le16_to_cpu(ip->harvest) == 1) {
494                                 switch (le16_to_cpu(ip->hw_id)) {
495                                 case VCN_HWID:
496                                         (*vcn_harvest_count)++;
497                                         if (ip->number_instance == 0)
498                                                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
499                                         else
500                                                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
501                                         break;
502                                 case DMU_HWID:
503                                         adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
504                                         break;
505                                 default:
506                                         break;
507                                 }
508                         }
509 next_ip:
510                         ip_offset += struct_size(ip, base_address, ip->num_base_address);
511                 }
512         }
513 }
514
515 static void amdgpu_discovery_read_from_harvest_table(struct amdgpu_device *adev,
516                                                      uint32_t *vcn_harvest_count,
517                                                      uint32_t *umc_harvest_count)
518 {
519         struct binary_header *bhdr;
520         struct harvest_table *harvest_info;
521         u16 offset;
522         int i;
523
524         bhdr = (struct binary_header *)adev->mman.discovery_bin;
525         offset = le16_to_cpu(bhdr->table_list[HARVEST_INFO].offset);
526
527         if (!offset) {
528                 dev_err(adev->dev, "invalid harvest table offset\n");
529                 return;
530         }
531
532         harvest_info = (struct harvest_table *)(adev->mman.discovery_bin + offset);
533
534         for (i = 0; i < 32; i++) {
535                 if (le16_to_cpu(harvest_info->list[i].hw_id) == 0)
536                         break;
537
538                 switch (le16_to_cpu(harvest_info->list[i].hw_id)) {
539                 case VCN_HWID:
540                         (*vcn_harvest_count)++;
541                         if (harvest_info->list[i].number_instance == 0)
542                                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN0;
543                         else
544                                 adev->vcn.harvest_config |= AMDGPU_VCN_HARVEST_VCN1;
545                         break;
546                 case DMU_HWID:
547                         adev->harvest_ip_mask |= AMD_HARVEST_IP_DMU_MASK;
548                         break;
549                 case UMC_HWID:
550                         (*umc_harvest_count)++;
551                         break;
552                 default:
553                         break;
554                 }
555         }
556 }
557
558 /* ================================================== */
559
560 struct ip_hw_instance {
561         struct kobject kobj; /* ip_discovery/die/#die/#hw_id/#instance/<attrs...> */
562
563         int hw_id;
564         u8  num_instance;
565         u8  major, minor, revision;
566         u8  harvest;
567
568         int num_base_addresses;
569         u32 base_addr[];
570 };
571
572 struct ip_hw_id {
573         struct kset hw_id_kset;  /* ip_discovery/die/#die/#hw_id/, contains ip_hw_instance */
574         int hw_id;
575 };
576
577 struct ip_die_entry {
578         struct kset ip_kset;     /* ip_discovery/die/#die/, contains ip_hw_id  */
579         u16 num_ips;
580 };
581
582 /* -------------------------------------------------- */
583
584 struct ip_hw_instance_attr {
585         struct attribute attr;
586         ssize_t (*show)(struct ip_hw_instance *ip_hw_instance, char *buf);
587 };
588
589 static ssize_t hw_id_show(struct ip_hw_instance *ip_hw_instance, char *buf)
590 {
591         return sysfs_emit(buf, "%d\n", ip_hw_instance->hw_id);
592 }
593
594 static ssize_t num_instance_show(struct ip_hw_instance *ip_hw_instance, char *buf)
595 {
596         return sysfs_emit(buf, "%d\n", ip_hw_instance->num_instance);
597 }
598
599 static ssize_t major_show(struct ip_hw_instance *ip_hw_instance, char *buf)
600 {
601         return sysfs_emit(buf, "%d\n", ip_hw_instance->major);
602 }
603
604 static ssize_t minor_show(struct ip_hw_instance *ip_hw_instance, char *buf)
605 {
606         return sysfs_emit(buf, "%d\n", ip_hw_instance->minor);
607 }
608
609 static ssize_t revision_show(struct ip_hw_instance *ip_hw_instance, char *buf)
610 {
611         return sysfs_emit(buf, "%d\n", ip_hw_instance->revision);
612 }
613
614 static ssize_t harvest_show(struct ip_hw_instance *ip_hw_instance, char *buf)
615 {
616         return sysfs_emit(buf, "0x%01X\n", ip_hw_instance->harvest);
617 }
618
619 static ssize_t num_base_addresses_show(struct ip_hw_instance *ip_hw_instance, char *buf)
620 {
621         return sysfs_emit(buf, "%d\n", ip_hw_instance->num_base_addresses);
622 }
623
624 static ssize_t base_addr_show(struct ip_hw_instance *ip_hw_instance, char *buf)
625 {
626         ssize_t res, at;
627         int ii;
628
629         for (res = at = ii = 0; ii < ip_hw_instance->num_base_addresses; ii++) {
630                 /* Here we satisfy the condition that, at + size <= PAGE_SIZE.
631                  */
632                 if (at + 12 > PAGE_SIZE)
633                         break;
634                 res = sysfs_emit_at(buf, at, "0x%08X\n",
635                                     ip_hw_instance->base_addr[ii]);
636                 if (res <= 0)
637                         break;
638                 at += res;
639         }
640
641         return res < 0 ? res : at;
642 }
643
644 static struct ip_hw_instance_attr ip_hw_attr[] = {
645         __ATTR_RO(hw_id),
646         __ATTR_RO(num_instance),
647         __ATTR_RO(major),
648         __ATTR_RO(minor),
649         __ATTR_RO(revision),
650         __ATTR_RO(harvest),
651         __ATTR_RO(num_base_addresses),
652         __ATTR_RO(base_addr),
653 };
654
655 static struct attribute *ip_hw_instance_attrs[ARRAY_SIZE(ip_hw_attr) + 1];
656 ATTRIBUTE_GROUPS(ip_hw_instance);
657
658 #define to_ip_hw_instance(x) container_of(x, struct ip_hw_instance, kobj)
659 #define to_ip_hw_instance_attr(x) container_of(x, struct ip_hw_instance_attr, attr)
660
661 static ssize_t ip_hw_instance_attr_show(struct kobject *kobj,
662                                         struct attribute *attr,
663                                         char *buf)
664 {
665         struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
666         struct ip_hw_instance_attr *ip_hw_attr = to_ip_hw_instance_attr(attr);
667
668         if (!ip_hw_attr->show)
669                 return -EIO;
670
671         return ip_hw_attr->show(ip_hw_instance, buf);
672 }
673
674 static const struct sysfs_ops ip_hw_instance_sysfs_ops = {
675         .show = ip_hw_instance_attr_show,
676 };
677
678 static void ip_hw_instance_release(struct kobject *kobj)
679 {
680         struct ip_hw_instance *ip_hw_instance = to_ip_hw_instance(kobj);
681
682         kfree(ip_hw_instance);
683 }
684
685 static struct kobj_type ip_hw_instance_ktype = {
686         .release = ip_hw_instance_release,
687         .sysfs_ops = &ip_hw_instance_sysfs_ops,
688         .default_groups = ip_hw_instance_groups,
689 };
690
691 /* -------------------------------------------------- */
692
693 #define to_ip_hw_id(x)  container_of(to_kset(x), struct ip_hw_id, hw_id_kset)
694
695 static void ip_hw_id_release(struct kobject *kobj)
696 {
697         struct ip_hw_id *ip_hw_id = to_ip_hw_id(kobj);
698
699         if (!list_empty(&ip_hw_id->hw_id_kset.list))
700                 DRM_ERROR("ip_hw_id->hw_id_kset is not empty");
701         kfree(ip_hw_id);
702 }
703
704 static struct kobj_type ip_hw_id_ktype = {
705         .release = ip_hw_id_release,
706         .sysfs_ops = &kobj_sysfs_ops,
707 };
708
709 /* -------------------------------------------------- */
710
711 static void die_kobj_release(struct kobject *kobj);
712 static void ip_disc_release(struct kobject *kobj);
713
714 struct ip_die_entry_attribute {
715         struct attribute attr;
716         ssize_t (*show)(struct ip_die_entry *ip_die_entry, char *buf);
717 };
718
719 #define to_ip_die_entry_attr(x)  container_of(x, struct ip_die_entry_attribute, attr)
720
721 static ssize_t num_ips_show(struct ip_die_entry *ip_die_entry, char *buf)
722 {
723         return sysfs_emit(buf, "%d\n", ip_die_entry->num_ips);
724 }
725
726 /* If there are more ip_die_entry attrs, other than the number of IPs,
727  * we can make this intro an array of attrs, and then initialize
728  * ip_die_entry_attrs in a loop.
729  */
730 static struct ip_die_entry_attribute num_ips_attr =
731         __ATTR_RO(num_ips);
732
733 static struct attribute *ip_die_entry_attrs[] = {
734         &num_ips_attr.attr,
735         NULL,
736 };
737 ATTRIBUTE_GROUPS(ip_die_entry); /* ip_die_entry_groups */
738
739 #define to_ip_die_entry(x) container_of(to_kset(x), struct ip_die_entry, ip_kset)
740
741 static ssize_t ip_die_entry_attr_show(struct kobject *kobj,
742                                       struct attribute *attr,
743                                       char *buf)
744 {
745         struct ip_die_entry_attribute *ip_die_entry_attr = to_ip_die_entry_attr(attr);
746         struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
747
748         if (!ip_die_entry_attr->show)
749                 return -EIO;
750
751         return ip_die_entry_attr->show(ip_die_entry, buf);
752 }
753
754 static void ip_die_entry_release(struct kobject *kobj)
755 {
756         struct ip_die_entry *ip_die_entry = to_ip_die_entry(kobj);
757
758         if (!list_empty(&ip_die_entry->ip_kset.list))
759                 DRM_ERROR("ip_die_entry->ip_kset is not empty");
760         kfree(ip_die_entry);
761 }
762
763 static const struct sysfs_ops ip_die_entry_sysfs_ops = {
764         .show = ip_die_entry_attr_show,
765 };
766
767 static struct kobj_type ip_die_entry_ktype = {
768         .release = ip_die_entry_release,
769         .sysfs_ops = &ip_die_entry_sysfs_ops,
770         .default_groups = ip_die_entry_groups,
771 };
772
773 static struct kobj_type die_kobj_ktype = {
774         .release = die_kobj_release,
775         .sysfs_ops = &kobj_sysfs_ops,
776 };
777
778 static struct kobj_type ip_discovery_ktype = {
779         .release = ip_disc_release,
780         .sysfs_ops = &kobj_sysfs_ops,
781 };
782
783 struct ip_discovery_top {
784         struct kobject kobj;    /* ip_discovery/ */
785         struct kset die_kset;   /* ip_discovery/die/, contains ip_die_entry */
786         struct amdgpu_device *adev;
787 };
788
789 static void die_kobj_release(struct kobject *kobj)
790 {
791         struct ip_discovery_top *ip_top = container_of(to_kset(kobj),
792                                                        struct ip_discovery_top,
793                                                        die_kset);
794         if (!list_empty(&ip_top->die_kset.list))
795                 DRM_ERROR("ip_top->die_kset is not empty");
796 }
797
798 static void ip_disc_release(struct kobject *kobj)
799 {
800         struct ip_discovery_top *ip_top = container_of(kobj, struct ip_discovery_top,
801                                                        kobj);
802         struct amdgpu_device *adev = ip_top->adev;
803
804         adev->ip_top = NULL;
805         kfree(ip_top);
806 }
807
808 static int amdgpu_discovery_sysfs_ips(struct amdgpu_device *adev,
809                                       struct ip_die_entry *ip_die_entry,
810                                       const size_t _ip_offset, const int num_ips)
811 {
812         int ii, jj, kk, res;
813
814         DRM_DEBUG("num_ips:%d", num_ips);
815
816         /* Find all IPs of a given HW ID, and add their instance to
817          * #die/#hw_id/#instance/<attributes>
818          */
819         for (ii = 0; ii < HW_ID_MAX; ii++) {
820                 struct ip_hw_id *ip_hw_id = NULL;
821                 size_t ip_offset = _ip_offset;
822
823                 for (jj = 0; jj < num_ips; jj++) {
824                         struct ip *ip;
825                         struct ip_hw_instance *ip_hw_instance;
826
827                         ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
828                         if (amdgpu_discovery_validate_ip(ip) ||
829                             le16_to_cpu(ip->hw_id) != ii)
830                                 goto next_ip;
831
832                         DRM_DEBUG("match:%d @ ip_offset:%zu", ii, ip_offset);
833
834                         /* We have a hw_id match; register the hw
835                          * block if not yet registered.
836                          */
837                         if (!ip_hw_id) {
838                                 ip_hw_id = kzalloc(sizeof(*ip_hw_id), GFP_KERNEL);
839                                 if (!ip_hw_id)
840                                         return -ENOMEM;
841                                 ip_hw_id->hw_id = ii;
842
843                                 kobject_set_name(&ip_hw_id->hw_id_kset.kobj, "%d", ii);
844                                 ip_hw_id->hw_id_kset.kobj.kset = &ip_die_entry->ip_kset;
845                                 ip_hw_id->hw_id_kset.kobj.ktype = &ip_hw_id_ktype;
846                                 res = kset_register(&ip_hw_id->hw_id_kset);
847                                 if (res) {
848                                         DRM_ERROR("Couldn't register ip_hw_id kset");
849                                         kfree(ip_hw_id);
850                                         return res;
851                                 }
852                                 if (hw_id_names[ii]) {
853                                         res = sysfs_create_link(&ip_die_entry->ip_kset.kobj,
854                                                                 &ip_hw_id->hw_id_kset.kobj,
855                                                                 hw_id_names[ii]);
856                                         if (res) {
857                                                 DRM_ERROR("Couldn't create IP link %s in IP Die:%s\n",
858                                                           hw_id_names[ii],
859                                                           kobject_name(&ip_die_entry->ip_kset.kobj));
860                                         }
861                                 }
862                         }
863
864                         /* Now register its instance.
865                          */
866                         ip_hw_instance = kzalloc(struct_size(ip_hw_instance,
867                                                              base_addr,
868                                                              ip->num_base_address),
869                                                  GFP_KERNEL);
870                         if (!ip_hw_instance) {
871                                 DRM_ERROR("no memory for ip_hw_instance");
872                                 return -ENOMEM;
873                         }
874                         ip_hw_instance->hw_id = le16_to_cpu(ip->hw_id); /* == ii */
875                         ip_hw_instance->num_instance = ip->number_instance;
876                         ip_hw_instance->major = ip->major;
877                         ip_hw_instance->minor = ip->minor;
878                         ip_hw_instance->revision = ip->revision;
879                         ip_hw_instance->harvest = ip->harvest;
880                         ip_hw_instance->num_base_addresses = ip->num_base_address;
881
882                         for (kk = 0; kk < ip_hw_instance->num_base_addresses; kk++)
883                                 ip_hw_instance->base_addr[kk] = ip->base_address[kk];
884
885                         kobject_init(&ip_hw_instance->kobj, &ip_hw_instance_ktype);
886                         ip_hw_instance->kobj.kset = &ip_hw_id->hw_id_kset;
887                         res = kobject_add(&ip_hw_instance->kobj, NULL,
888                                           "%d", ip_hw_instance->num_instance);
889 next_ip:
890                         ip_offset += struct_size(ip, base_address, ip->num_base_address);
891                 }
892         }
893
894         return 0;
895 }
896
897 static int amdgpu_discovery_sysfs_recurse(struct amdgpu_device *adev)
898 {
899         struct binary_header *bhdr;
900         struct ip_discovery_header *ihdr;
901         struct die_header *dhdr;
902         struct kset *die_kset = &adev->ip_top->die_kset;
903         u16 num_dies, die_offset, num_ips;
904         size_t ip_offset;
905         int ii, res;
906
907         bhdr = (struct binary_header *)adev->mman.discovery_bin;
908         ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
909                                               le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
910         num_dies = le16_to_cpu(ihdr->num_dies);
911
912         DRM_DEBUG("number of dies: %d\n", num_dies);
913
914         for (ii = 0; ii < num_dies; ii++) {
915                 struct ip_die_entry *ip_die_entry;
916
917                 die_offset = le16_to_cpu(ihdr->die_info[ii].die_offset);
918                 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
919                 num_ips = le16_to_cpu(dhdr->num_ips);
920                 ip_offset = die_offset + sizeof(*dhdr);
921
922                 /* Add the die to the kset.
923                  *
924                  * dhdr->die_id == ii, which was checked in
925                  * amdgpu_discovery_reg_base_init().
926                  */
927
928                 ip_die_entry = kzalloc(sizeof(*ip_die_entry), GFP_KERNEL);
929                 if (!ip_die_entry)
930                         return -ENOMEM;
931
932                 ip_die_entry->num_ips = num_ips;
933
934                 kobject_set_name(&ip_die_entry->ip_kset.kobj, "%d", le16_to_cpu(dhdr->die_id));
935                 ip_die_entry->ip_kset.kobj.kset = die_kset;
936                 ip_die_entry->ip_kset.kobj.ktype = &ip_die_entry_ktype;
937                 res = kset_register(&ip_die_entry->ip_kset);
938                 if (res) {
939                         DRM_ERROR("Couldn't register ip_die_entry kset");
940                         kfree(ip_die_entry);
941                         return res;
942                 }
943
944                 amdgpu_discovery_sysfs_ips(adev, ip_die_entry, ip_offset, num_ips);
945         }
946
947         return 0;
948 }
949
950 static int amdgpu_discovery_sysfs_init(struct amdgpu_device *adev)
951 {
952         struct kset *die_kset;
953         int res, ii;
954
955         adev->ip_top = kzalloc(sizeof(*adev->ip_top), GFP_KERNEL);
956         if (!adev->ip_top)
957                 return -ENOMEM;
958
959         adev->ip_top->adev = adev;
960
961         res = kobject_init_and_add(&adev->ip_top->kobj, &ip_discovery_ktype,
962                                    &adev->dev->kobj, "ip_discovery");
963         if (res) {
964                 DRM_ERROR("Couldn't init and add ip_discovery/");
965                 goto Err;
966         }
967
968         die_kset = &adev->ip_top->die_kset;
969         kobject_set_name(&die_kset->kobj, "%s", "die");
970         die_kset->kobj.parent = &adev->ip_top->kobj;
971         die_kset->kobj.ktype = &die_kobj_ktype;
972         res = kset_register(&adev->ip_top->die_kset);
973         if (res) {
974                 DRM_ERROR("Couldn't register die_kset");
975                 goto Err;
976         }
977
978         for (ii = 0; ii < ARRAY_SIZE(ip_hw_attr); ii++)
979                 ip_hw_instance_attrs[ii] = &ip_hw_attr[ii].attr;
980         ip_hw_instance_attrs[ii] = NULL;
981
982         res = amdgpu_discovery_sysfs_recurse(adev);
983
984         return res;
985 Err:
986         kobject_put(&adev->ip_top->kobj);
987         return res;
988 }
989
990 /* -------------------------------------------------- */
991
992 #define list_to_kobj(el) container_of(el, struct kobject, entry)
993
994 static void amdgpu_discovery_sysfs_ip_hw_free(struct ip_hw_id *ip_hw_id)
995 {
996         struct list_head *el, *tmp;
997         struct kset *hw_id_kset;
998
999         hw_id_kset = &ip_hw_id->hw_id_kset;
1000         spin_lock(&hw_id_kset->list_lock);
1001         list_for_each_prev_safe(el, tmp, &hw_id_kset->list) {
1002                 list_del_init(el);
1003                 spin_unlock(&hw_id_kset->list_lock);
1004                 /* kobject is embedded in ip_hw_instance */
1005                 kobject_put(list_to_kobj(el));
1006                 spin_lock(&hw_id_kset->list_lock);
1007         }
1008         spin_unlock(&hw_id_kset->list_lock);
1009         kobject_put(&ip_hw_id->hw_id_kset.kobj);
1010 }
1011
1012 static void amdgpu_discovery_sysfs_die_free(struct ip_die_entry *ip_die_entry)
1013 {
1014         struct list_head *el, *tmp;
1015         struct kset *ip_kset;
1016
1017         ip_kset = &ip_die_entry->ip_kset;
1018         spin_lock(&ip_kset->list_lock);
1019         list_for_each_prev_safe(el, tmp, &ip_kset->list) {
1020                 list_del_init(el);
1021                 spin_unlock(&ip_kset->list_lock);
1022                 amdgpu_discovery_sysfs_ip_hw_free(to_ip_hw_id(list_to_kobj(el)));
1023                 spin_lock(&ip_kset->list_lock);
1024         }
1025         spin_unlock(&ip_kset->list_lock);
1026         kobject_put(&ip_die_entry->ip_kset.kobj);
1027 }
1028
1029 static void amdgpu_discovery_sysfs_fini(struct amdgpu_device *adev)
1030 {
1031         struct list_head *el, *tmp;
1032         struct kset *die_kset;
1033
1034         die_kset = &adev->ip_top->die_kset;
1035         spin_lock(&die_kset->list_lock);
1036         list_for_each_prev_safe(el, tmp, &die_kset->list) {
1037                 list_del_init(el);
1038                 spin_unlock(&die_kset->list_lock);
1039                 amdgpu_discovery_sysfs_die_free(to_ip_die_entry(list_to_kobj(el)));
1040                 spin_lock(&die_kset->list_lock);
1041         }
1042         spin_unlock(&die_kset->list_lock);
1043         kobject_put(&adev->ip_top->die_kset.kobj);
1044         kobject_put(&adev->ip_top->kobj);
1045 }
1046
1047 /* ================================================== */
1048
1049 int amdgpu_discovery_reg_base_init(struct amdgpu_device *adev)
1050 {
1051         struct binary_header *bhdr;
1052         struct ip_discovery_header *ihdr;
1053         struct die_header *dhdr;
1054         struct ip *ip;
1055         uint16_t die_offset;
1056         uint16_t ip_offset;
1057         uint16_t num_dies;
1058         uint16_t num_ips;
1059         uint8_t num_base_address;
1060         int hw_ip;
1061         int i, j, k;
1062         int r;
1063
1064         r = amdgpu_discovery_init(adev);
1065         if (r) {
1066                 DRM_ERROR("amdgpu_discovery_init failed\n");
1067                 return r;
1068         }
1069
1070         bhdr = (struct binary_header *)adev->mman.discovery_bin;
1071         ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1072                         le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1073         num_dies = le16_to_cpu(ihdr->num_dies);
1074
1075         DRM_DEBUG("number of dies: %d\n", num_dies);
1076
1077         for (i = 0; i < num_dies; i++) {
1078                 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1079                 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1080                 num_ips = le16_to_cpu(dhdr->num_ips);
1081                 ip_offset = die_offset + sizeof(*dhdr);
1082
1083                 if (le16_to_cpu(dhdr->die_id) != i) {
1084                         DRM_ERROR("invalid die id %d, expected %d\n",
1085                                         le16_to_cpu(dhdr->die_id), i);
1086                         return -EINVAL;
1087                 }
1088
1089                 DRM_DEBUG("number of hardware IPs on die%d: %d\n",
1090                                 le16_to_cpu(dhdr->die_id), num_ips);
1091
1092                 for (j = 0; j < num_ips; j++) {
1093                         ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1094
1095                         if (amdgpu_discovery_validate_ip(ip))
1096                                 goto next_ip;
1097
1098                         num_base_address = ip->num_base_address;
1099
1100                         DRM_DEBUG("%s(%d) #%d v%d.%d.%d:\n",
1101                                   hw_id_names[le16_to_cpu(ip->hw_id)],
1102                                   le16_to_cpu(ip->hw_id),
1103                                   ip->number_instance,
1104                                   ip->major, ip->minor,
1105                                   ip->revision);
1106
1107                         if (le16_to_cpu(ip->hw_id) == VCN_HWID) {
1108                                 /* Bit [5:0]: original revision value
1109                                  * Bit [7:6]: en/decode capability:
1110                                  *     0b00 : VCN function normally
1111                                  *     0b10 : encode is disabled
1112                                  *     0b01 : decode is disabled
1113                                  */
1114                                 adev->vcn.vcn_config[adev->vcn.num_vcn_inst] =
1115                                         ip->revision & 0xc0;
1116                                 ip->revision &= ~0xc0;
1117                                 adev->vcn.num_vcn_inst++;
1118                         }
1119                         if (le16_to_cpu(ip->hw_id) == SDMA0_HWID ||
1120                             le16_to_cpu(ip->hw_id) == SDMA1_HWID ||
1121                             le16_to_cpu(ip->hw_id) == SDMA2_HWID ||
1122                             le16_to_cpu(ip->hw_id) == SDMA3_HWID)
1123                                 adev->sdma.num_instances++;
1124
1125                         if (le16_to_cpu(ip->hw_id) == UMC_HWID)
1126                                 adev->gmc.num_umc++;
1127
1128                         for (k = 0; k < num_base_address; k++) {
1129                                 /*
1130                                  * convert the endianness of base addresses in place,
1131                                  * so that we don't need to convert them when accessing adev->reg_offset.
1132                                  */
1133                                 ip->base_address[k] = le32_to_cpu(ip->base_address[k]);
1134                                 DRM_DEBUG("\t0x%08x\n", ip->base_address[k]);
1135                         }
1136
1137                         for (hw_ip = 0; hw_ip < MAX_HWIP; hw_ip++) {
1138                                 if (hw_id_map[hw_ip] == le16_to_cpu(ip->hw_id)) {
1139                                         DRM_DEBUG("set register base offset for %s\n",
1140                                                         hw_id_names[le16_to_cpu(ip->hw_id)]);
1141                                         adev->reg_offset[hw_ip][ip->number_instance] =
1142                                                 ip->base_address;
1143                                         /* Instance support is somewhat inconsistent.
1144                                          * SDMA is a good example.  Sienna cichlid has 4 total
1145                                          * SDMA instances, each enumerated separately (HWIDs
1146                                          * 42, 43, 68, 69).  Arcturus has 8 total SDMA instances,
1147                                          * but they are enumerated as multiple instances of the
1148                                          * same HWIDs (4x HWID 42, 4x HWID 43).  UMC is another
1149                                          * example.  On most chips there are multiple instances
1150                                          * with the same HWID.
1151                                          */
1152                                         adev->ip_versions[hw_ip][ip->number_instance] =
1153                                                 IP_VERSION(ip->major, ip->minor, ip->revision);
1154                                 }
1155                         }
1156
1157 next_ip:
1158                         ip_offset += struct_size(ip, base_address, ip->num_base_address);
1159                 }
1160         }
1161
1162         amdgpu_discovery_sysfs_init(adev);
1163
1164         return 0;
1165 }
1166
1167 int amdgpu_discovery_get_ip_version(struct amdgpu_device *adev, int hw_id, int number_instance,
1168                                     int *major, int *minor, int *revision)
1169 {
1170         struct binary_header *bhdr;
1171         struct ip_discovery_header *ihdr;
1172         struct die_header *dhdr;
1173         struct ip *ip;
1174         uint16_t die_offset;
1175         uint16_t ip_offset;
1176         uint16_t num_dies;
1177         uint16_t num_ips;
1178         int i, j;
1179
1180         if (!adev->mman.discovery_bin) {
1181                 DRM_ERROR("ip discovery uninitialized\n");
1182                 return -EINVAL;
1183         }
1184
1185         bhdr = (struct binary_header *)adev->mman.discovery_bin;
1186         ihdr = (struct ip_discovery_header *)(adev->mman.discovery_bin +
1187                         le16_to_cpu(bhdr->table_list[IP_DISCOVERY].offset));
1188         num_dies = le16_to_cpu(ihdr->num_dies);
1189
1190         for (i = 0; i < num_dies; i++) {
1191                 die_offset = le16_to_cpu(ihdr->die_info[i].die_offset);
1192                 dhdr = (struct die_header *)(adev->mman.discovery_bin + die_offset);
1193                 num_ips = le16_to_cpu(dhdr->num_ips);
1194                 ip_offset = die_offset + sizeof(*dhdr);
1195
1196                 for (j = 0; j < num_ips; j++) {
1197                         ip = (struct ip *)(adev->mman.discovery_bin + ip_offset);
1198
1199                         if ((le16_to_cpu(ip->hw_id) == hw_id) && (ip->number_instance == number_instance)) {
1200                                 if (major)
1201                                         *major = ip->major;
1202                                 if (minor)
1203                                         *minor = ip->minor;
1204                                 if (revision)
1205                                         *revision = ip->revision;
1206                                 return 0;
1207                         }
1208                         ip_offset += struct_size(ip, base_address, ip->num_base_address);
1209                 }
1210         }
1211
1212         return -EINVAL;
1213 }
1214
1215 void amdgpu_discovery_harvest_ip(struct amdgpu_device *adev)
1216 {
1217         int vcn_harvest_count = 0;
1218         int umc_harvest_count = 0;
1219
1220         /*
1221          * Harvest table does not fit Navi1x and legacy GPUs,
1222          * so read harvest bit per IP data structure to set
1223          * harvest configuration.
1224          */
1225         if (adev->ip_versions[GC_HWIP][0] < IP_VERSION(10, 2, 0)) {
1226                 if ((adev->pdev->device == 0x731E &&
1227                         (adev->pdev->revision == 0xC6 ||
1228                          adev->pdev->revision == 0xC7)) ||
1229                         (adev->pdev->device == 0x7340 &&
1230                          adev->pdev->revision == 0xC9) ||
1231                         (adev->pdev->device == 0x7360 &&
1232                          adev->pdev->revision == 0xC7))
1233                         amdgpu_discovery_read_harvest_bit_per_ip(adev,
1234                                 &vcn_harvest_count);
1235         } else {
1236                 amdgpu_discovery_read_from_harvest_table(adev,
1237                                                          &vcn_harvest_count,
1238                                                          &umc_harvest_count);
1239         }
1240
1241         amdgpu_discovery_harvest_config_quirk(adev);
1242
1243         if (vcn_harvest_count == adev->vcn.num_vcn_inst) {
1244                 adev->harvest_ip_mask |= AMD_HARVEST_IP_VCN_MASK;
1245                 adev->harvest_ip_mask |= AMD_HARVEST_IP_JPEG_MASK;
1246         }
1247
1248         if (umc_harvest_count < adev->gmc.num_umc) {
1249                 adev->gmc.num_umc -= umc_harvest_count;
1250         }
1251 }
1252
1253 union gc_info {
1254         struct gc_info_v1_0 v1;
1255         struct gc_info_v1_1 v1_1;
1256         struct gc_info_v1_2 v1_2;
1257         struct gc_info_v2_0 v2;
1258 };
1259
1260 int amdgpu_discovery_get_gfx_info(struct amdgpu_device *adev)
1261 {
1262         struct binary_header *bhdr;
1263         union gc_info *gc_info;
1264         u16 offset;
1265
1266         if (!adev->mman.discovery_bin) {
1267                 DRM_ERROR("ip discovery uninitialized\n");
1268                 return -EINVAL;
1269         }
1270
1271         bhdr = (struct binary_header *)adev->mman.discovery_bin;
1272         offset = le16_to_cpu(bhdr->table_list[GC].offset);
1273
1274         if (!offset) {
1275                 dev_err(adev->dev, "invalid GC table offset\n");
1276                 return -EINVAL;
1277         }
1278
1279         gc_info = (union gc_info *)(adev->mman.discovery_bin + offset);
1280
1281         switch (le16_to_cpu(gc_info->v1.header.version_major)) {
1282         case 1:
1283                 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v1.gc_num_se);
1284                 adev->gfx.config.max_cu_per_sh = 2 * (le32_to_cpu(gc_info->v1.gc_num_wgp0_per_sa) +
1285                                                       le32_to_cpu(gc_info->v1.gc_num_wgp1_per_sa));
1286                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1287                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v1.gc_num_rb_per_se);
1288                 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v1.gc_num_gl2c);
1289                 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v1.gc_num_gprs);
1290                 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v1.gc_num_max_gs_thds);
1291                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v1.gc_gs_table_depth);
1292                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v1.gc_gsprim_buff_depth);
1293                 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v1.gc_double_offchip_lds_buffer);
1294                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v1.gc_wave_size);
1295                 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v1.gc_max_waves_per_simd);
1296                 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v1.gc_max_scratch_slots_per_cu);
1297                 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v1.gc_lds_size);
1298                 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v1.gc_num_sc_per_se) /
1299                         le32_to_cpu(gc_info->v1.gc_num_sa_per_se);
1300                 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v1.gc_num_packer_per_sc);
1301                 if (gc_info->v1.header.version_minor >= 1) {
1302                         adev->gfx.config.gc_num_tcp_per_sa = le32_to_cpu(gc_info->v1_1.gc_num_tcp_per_sa);
1303                         adev->gfx.config.gc_num_sdp_interface = le32_to_cpu(gc_info->v1_1.gc_num_sdp_interface);
1304                         adev->gfx.config.gc_num_tcps = le32_to_cpu(gc_info->v1_1.gc_num_tcps);
1305                 }
1306                 if (gc_info->v1.header.version_minor >= 2) {
1307                         adev->gfx.config.gc_num_tcp_per_wpg = le32_to_cpu(gc_info->v1_2.gc_num_tcp_per_wpg);
1308                         adev->gfx.config.gc_tcp_l1_size = le32_to_cpu(gc_info->v1_2.gc_tcp_l1_size);
1309                         adev->gfx.config.gc_num_sqc_per_wgp = le32_to_cpu(gc_info->v1_2.gc_num_sqc_per_wgp);
1310                         adev->gfx.config.gc_l1_instruction_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_instruction_cache_size_per_sqc);
1311                         adev->gfx.config.gc_l1_data_cache_size_per_sqc = le32_to_cpu(gc_info->v1_2.gc_l1_data_cache_size_per_sqc);
1312                         adev->gfx.config.gc_gl1c_per_sa = le32_to_cpu(gc_info->v1_2.gc_gl1c_per_sa);
1313                         adev->gfx.config.gc_gl1c_size_per_instance = le32_to_cpu(gc_info->v1_2.gc_gl1c_size_per_instance);
1314                         adev->gfx.config.gc_gl2c_per_gpu = le32_to_cpu(gc_info->v1_2.gc_gl2c_per_gpu);
1315                 }
1316                 break;
1317         case 2:
1318                 adev->gfx.config.max_shader_engines = le32_to_cpu(gc_info->v2.gc_num_se);
1319                 adev->gfx.config.max_cu_per_sh = le32_to_cpu(gc_info->v2.gc_num_cu_per_sh);
1320                 adev->gfx.config.max_sh_per_se = le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1321                 adev->gfx.config.max_backends_per_se = le32_to_cpu(gc_info->v2.gc_num_rb_per_se);
1322                 adev->gfx.config.max_texture_channel_caches = le32_to_cpu(gc_info->v2.gc_num_tccs);
1323                 adev->gfx.config.max_gprs = le32_to_cpu(gc_info->v2.gc_num_gprs);
1324                 adev->gfx.config.max_gs_threads = le32_to_cpu(gc_info->v2.gc_num_max_gs_thds);
1325                 adev->gfx.config.gs_vgt_table_depth = le32_to_cpu(gc_info->v2.gc_gs_table_depth);
1326                 adev->gfx.config.gs_prim_buffer_depth = le32_to_cpu(gc_info->v2.gc_gsprim_buff_depth);
1327                 adev->gfx.config.double_offchip_lds_buf = le32_to_cpu(gc_info->v2.gc_double_offchip_lds_buffer);
1328                 adev->gfx.cu_info.wave_front_size = le32_to_cpu(gc_info->v2.gc_wave_size);
1329                 adev->gfx.cu_info.max_waves_per_simd = le32_to_cpu(gc_info->v2.gc_max_waves_per_simd);
1330                 adev->gfx.cu_info.max_scratch_slots_per_cu = le32_to_cpu(gc_info->v2.gc_max_scratch_slots_per_cu);
1331                 adev->gfx.cu_info.lds_size = le32_to_cpu(gc_info->v2.gc_lds_size);
1332                 adev->gfx.config.num_sc_per_sh = le32_to_cpu(gc_info->v2.gc_num_sc_per_se) /
1333                         le32_to_cpu(gc_info->v2.gc_num_sh_per_se);
1334                 adev->gfx.config.num_packer_per_sc = le32_to_cpu(gc_info->v2.gc_num_packer_per_sc);
1335                 break;
1336         default:
1337                 dev_err(adev->dev,
1338                         "Unhandled GC info table %d.%d\n",
1339                         le16_to_cpu(gc_info->v1.header.version_major),
1340                         le16_to_cpu(gc_info->v1.header.version_minor));
1341                 return -EINVAL;
1342         }
1343         return 0;
1344 }
1345
1346 union mall_info {
1347         struct mall_info_v1_0 v1;
1348 };
1349
1350 int amdgpu_discovery_get_mall_info(struct amdgpu_device *adev)
1351 {
1352         struct binary_header *bhdr;
1353         union mall_info *mall_info;
1354         u32 u, mall_size_per_umc, m_s_present, half_use;
1355         u64 mall_size;
1356         u16 offset;
1357
1358         if (!adev->mman.discovery_bin) {
1359                 DRM_ERROR("ip discovery uninitialized\n");
1360                 return -EINVAL;
1361         }
1362
1363         bhdr = (struct binary_header *)adev->mman.discovery_bin;
1364         offset = le16_to_cpu(bhdr->table_list[MALL_INFO].offset);
1365
1366         if (!offset) {
1367                 dev_err(adev->dev, "invalid mall table offset\n");
1368                 return -EINVAL;
1369         }
1370
1371         mall_info = (union mall_info *)(adev->mman.discovery_bin + offset);
1372
1373         switch (le16_to_cpu(mall_info->v1.header.version_major)) {
1374         case 1:
1375                 mall_size = 0;
1376                 mall_size_per_umc = le32_to_cpu(mall_info->v1.mall_size_per_m);
1377                 m_s_present = le32_to_cpu(mall_info->v1.m_s_present);
1378                 half_use = le32_to_cpu(mall_info->v1.m_half_use);
1379                 for (u = 0; u < adev->gmc.num_umc; u++) {
1380                         if (m_s_present & (1 << u))
1381                                 mall_size += mall_size_per_umc * 2;
1382                         else if (half_use & (1 << u))
1383                                 mall_size += mall_size_per_umc / 2;
1384                         else
1385                                 mall_size += mall_size_per_umc;
1386                 }
1387                 adev->gmc.mall_size = mall_size;
1388                 break;
1389         default:
1390                 dev_err(adev->dev,
1391                         "Unhandled MALL info table %d.%d\n",
1392                         le16_to_cpu(mall_info->v1.header.version_major),
1393                         le16_to_cpu(mall_info->v1.header.version_minor));
1394                 return -EINVAL;
1395         }
1396         return 0;
1397 }
1398
1399 static int amdgpu_discovery_set_common_ip_blocks(struct amdgpu_device *adev)
1400 {
1401         /* what IP to use for this? */
1402         switch (adev->ip_versions[GC_HWIP][0]) {
1403         case IP_VERSION(9, 0, 1):
1404         case IP_VERSION(9, 1, 0):
1405         case IP_VERSION(9, 2, 1):
1406         case IP_VERSION(9, 2, 2):
1407         case IP_VERSION(9, 3, 0):
1408         case IP_VERSION(9, 4, 0):
1409         case IP_VERSION(9, 4, 1):
1410         case IP_VERSION(9, 4, 2):
1411                 amdgpu_device_ip_block_add(adev, &vega10_common_ip_block);
1412                 break;
1413         case IP_VERSION(10, 1, 10):
1414         case IP_VERSION(10, 1, 1):
1415         case IP_VERSION(10, 1, 2):
1416         case IP_VERSION(10, 1, 3):
1417         case IP_VERSION(10, 1, 4):
1418         case IP_VERSION(10, 3, 0):
1419         case IP_VERSION(10, 3, 1):
1420         case IP_VERSION(10, 3, 2):
1421         case IP_VERSION(10, 3, 3):
1422         case IP_VERSION(10, 3, 4):
1423         case IP_VERSION(10, 3, 5):
1424         case IP_VERSION(10, 3, 6):
1425         case IP_VERSION(10, 3, 7):
1426                 amdgpu_device_ip_block_add(adev, &nv_common_ip_block);
1427                 break;
1428         default:
1429                 dev_err(adev->dev,
1430                         "Failed to add common ip block(GC_HWIP:0x%x)\n",
1431                         adev->ip_versions[GC_HWIP][0]);
1432                 return -EINVAL;
1433         }
1434         return 0;
1435 }
1436
1437 static int amdgpu_discovery_set_gmc_ip_blocks(struct amdgpu_device *adev)
1438 {
1439         /* use GC or MMHUB IP version */
1440         switch (adev->ip_versions[GC_HWIP][0]) {
1441         case IP_VERSION(9, 0, 1):
1442         case IP_VERSION(9, 1, 0):
1443         case IP_VERSION(9, 2, 1):
1444         case IP_VERSION(9, 2, 2):
1445         case IP_VERSION(9, 3, 0):
1446         case IP_VERSION(9, 4, 0):
1447         case IP_VERSION(9, 4, 1):
1448         case IP_VERSION(9, 4, 2):
1449                 amdgpu_device_ip_block_add(adev, &gmc_v9_0_ip_block);
1450                 break;
1451         case IP_VERSION(10, 1, 10):
1452         case IP_VERSION(10, 1, 1):
1453         case IP_VERSION(10, 1, 2):
1454         case IP_VERSION(10, 1, 3):
1455         case IP_VERSION(10, 1, 4):
1456         case IP_VERSION(10, 3, 0):
1457         case IP_VERSION(10, 3, 1):
1458         case IP_VERSION(10, 3, 2):
1459         case IP_VERSION(10, 3, 3):
1460         case IP_VERSION(10, 3, 4):
1461         case IP_VERSION(10, 3, 5):
1462         case IP_VERSION(10, 3, 6):
1463         case IP_VERSION(10, 3, 7):
1464                 amdgpu_device_ip_block_add(adev, &gmc_v10_0_ip_block);
1465                 break;
1466         default:
1467                 dev_err(adev->dev,
1468                         "Failed to add gmc ip block(GC_HWIP:0x%x)\n",
1469                         adev->ip_versions[GC_HWIP][0]);
1470                 return -EINVAL;
1471         }
1472         return 0;
1473 }
1474
1475 static int amdgpu_discovery_set_ih_ip_blocks(struct amdgpu_device *adev)
1476 {
1477         switch (adev->ip_versions[OSSSYS_HWIP][0]) {
1478         case IP_VERSION(4, 0, 0):
1479         case IP_VERSION(4, 0, 1):
1480         case IP_VERSION(4, 1, 0):
1481         case IP_VERSION(4, 1, 1):
1482         case IP_VERSION(4, 3, 0):
1483                 amdgpu_device_ip_block_add(adev, &vega10_ih_ip_block);
1484                 break;
1485         case IP_VERSION(4, 2, 0):
1486         case IP_VERSION(4, 2, 1):
1487         case IP_VERSION(4, 4, 0):
1488                 amdgpu_device_ip_block_add(adev, &vega20_ih_ip_block);
1489                 break;
1490         case IP_VERSION(5, 0, 0):
1491         case IP_VERSION(5, 0, 1):
1492         case IP_VERSION(5, 0, 2):
1493         case IP_VERSION(5, 0, 3):
1494         case IP_VERSION(5, 2, 0):
1495         case IP_VERSION(5, 2, 1):
1496                 amdgpu_device_ip_block_add(adev, &navi10_ih_ip_block);
1497                 break;
1498         default:
1499                 dev_err(adev->dev,
1500                         "Failed to add ih ip block(OSSSYS_HWIP:0x%x)\n",
1501                         adev->ip_versions[OSSSYS_HWIP][0]);
1502                 return -EINVAL;
1503         }
1504         return 0;
1505 }
1506
1507 static int amdgpu_discovery_set_psp_ip_blocks(struct amdgpu_device *adev)
1508 {
1509         switch (adev->ip_versions[MP0_HWIP][0]) {
1510         case IP_VERSION(9, 0, 0):
1511                 amdgpu_device_ip_block_add(adev, &psp_v3_1_ip_block);
1512                 break;
1513         case IP_VERSION(10, 0, 0):
1514         case IP_VERSION(10, 0, 1):
1515                 amdgpu_device_ip_block_add(adev, &psp_v10_0_ip_block);
1516                 break;
1517         case IP_VERSION(11, 0, 0):
1518         case IP_VERSION(11, 0, 2):
1519         case IP_VERSION(11, 0, 4):
1520         case IP_VERSION(11, 0, 5):
1521         case IP_VERSION(11, 0, 9):
1522         case IP_VERSION(11, 0, 7):
1523         case IP_VERSION(11, 0, 11):
1524         case IP_VERSION(11, 0, 12):
1525         case IP_VERSION(11, 0, 13):
1526         case IP_VERSION(11, 5, 0):
1527                 amdgpu_device_ip_block_add(adev, &psp_v11_0_ip_block);
1528                 break;
1529         case IP_VERSION(11, 0, 8):
1530                 amdgpu_device_ip_block_add(adev, &psp_v11_0_8_ip_block);
1531                 break;
1532         case IP_VERSION(11, 0, 3):
1533         case IP_VERSION(12, 0, 1):
1534                 amdgpu_device_ip_block_add(adev, &psp_v12_0_ip_block);
1535                 break;
1536         case IP_VERSION(13, 0, 1):
1537         case IP_VERSION(13, 0, 2):
1538         case IP_VERSION(13, 0, 3):
1539         case IP_VERSION(13, 0, 5):
1540         case IP_VERSION(13, 0, 8):
1541                 amdgpu_device_ip_block_add(adev, &psp_v13_0_ip_block);
1542                 break;
1543         default:
1544                 dev_err(adev->dev,
1545                         "Failed to add psp ip block(MP0_HWIP:0x%x)\n",
1546                         adev->ip_versions[MP0_HWIP][0]);
1547                 return -EINVAL;
1548         }
1549         return 0;
1550 }
1551
1552 static int amdgpu_discovery_set_smu_ip_blocks(struct amdgpu_device *adev)
1553 {
1554         switch (adev->ip_versions[MP1_HWIP][0]) {
1555         case IP_VERSION(9, 0, 0):
1556         case IP_VERSION(10, 0, 0):
1557         case IP_VERSION(10, 0, 1):
1558         case IP_VERSION(11, 0, 2):
1559                 if (adev->asic_type == CHIP_ARCTURUS)
1560                         amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1561                 else
1562                         amdgpu_device_ip_block_add(adev, &pp_smu_ip_block);
1563                 break;
1564         case IP_VERSION(11, 0, 0):
1565         case IP_VERSION(11, 0, 5):
1566         case IP_VERSION(11, 0, 9):
1567         case IP_VERSION(11, 0, 7):
1568         case IP_VERSION(11, 0, 8):
1569         case IP_VERSION(11, 0, 11):
1570         case IP_VERSION(11, 0, 12):
1571         case IP_VERSION(11, 0, 13):
1572         case IP_VERSION(11, 5, 0):
1573                 amdgpu_device_ip_block_add(adev, &smu_v11_0_ip_block);
1574                 break;
1575         case IP_VERSION(12, 0, 0):
1576         case IP_VERSION(12, 0, 1):
1577                 amdgpu_device_ip_block_add(adev, &smu_v12_0_ip_block);
1578                 break;
1579         case IP_VERSION(13, 0, 1):
1580         case IP_VERSION(13, 0, 2):
1581         case IP_VERSION(13, 0, 3):
1582         case IP_VERSION(13, 0, 5):
1583         case IP_VERSION(13, 0, 8):
1584                 amdgpu_device_ip_block_add(adev, &smu_v13_0_ip_block);
1585                 break;
1586         default:
1587                 dev_err(adev->dev,
1588                         "Failed to add smu ip block(MP1_HWIP:0x%x)\n",
1589                         adev->ip_versions[MP1_HWIP][0]);
1590                 return -EINVAL;
1591         }
1592         return 0;
1593 }
1594
1595 static int amdgpu_discovery_set_display_ip_blocks(struct amdgpu_device *adev)
1596 {
1597         if (adev->enable_virtual_display || amdgpu_sriov_vf(adev)) {
1598                 amdgpu_device_ip_block_add(adev, &amdgpu_vkms_ip_block);
1599                 return 0;
1600         }
1601
1602         if (!amdgpu_device_has_dc_support(adev))
1603                 return 0;
1604
1605 #if defined(CONFIG_DRM_AMD_DC)
1606         if (adev->ip_versions[DCE_HWIP][0]) {
1607                 switch (adev->ip_versions[DCE_HWIP][0]) {
1608                 case IP_VERSION(1, 0, 0):
1609                 case IP_VERSION(1, 0, 1):
1610                 case IP_VERSION(2, 0, 2):
1611                 case IP_VERSION(2, 0, 0):
1612                 case IP_VERSION(2, 0, 3):
1613                 case IP_VERSION(2, 1, 0):
1614                 case IP_VERSION(3, 0, 0):
1615                 case IP_VERSION(3, 0, 2):
1616                 case IP_VERSION(3, 0, 3):
1617                 case IP_VERSION(3, 0, 1):
1618                 case IP_VERSION(3, 1, 2):
1619                 case IP_VERSION(3, 1, 3):
1620                 case IP_VERSION(3, 1, 5):
1621                 case IP_VERSION(3, 1, 6):
1622                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
1623                         break;
1624                 default:
1625                         dev_err(adev->dev,
1626                                 "Failed to add dm ip block(DCE_HWIP:0x%x)\n",
1627                                 adev->ip_versions[DCE_HWIP][0]);
1628                         return -EINVAL;
1629                 }
1630         } else if (adev->ip_versions[DCI_HWIP][0]) {
1631                 switch (adev->ip_versions[DCI_HWIP][0]) {
1632                 case IP_VERSION(12, 0, 0):
1633                 case IP_VERSION(12, 0, 1):
1634                 case IP_VERSION(12, 1, 0):
1635                         amdgpu_device_ip_block_add(adev, &dm_ip_block);
1636                         break;
1637                 default:
1638                         dev_err(adev->dev,
1639                                 "Failed to add dm ip block(DCI_HWIP:0x%x)\n",
1640                                 adev->ip_versions[DCI_HWIP][0]);
1641                         return -EINVAL;
1642                 }
1643         }
1644 #endif
1645         return 0;
1646 }
1647
1648 static int amdgpu_discovery_set_gc_ip_blocks(struct amdgpu_device *adev)
1649 {
1650         switch (adev->ip_versions[GC_HWIP][0]) {
1651         case IP_VERSION(9, 0, 1):
1652         case IP_VERSION(9, 1, 0):
1653         case IP_VERSION(9, 2, 1):
1654         case IP_VERSION(9, 2, 2):
1655         case IP_VERSION(9, 3, 0):
1656         case IP_VERSION(9, 4, 0):
1657         case IP_VERSION(9, 4, 1):
1658         case IP_VERSION(9, 4, 2):
1659                 amdgpu_device_ip_block_add(adev, &gfx_v9_0_ip_block);
1660                 break;
1661         case IP_VERSION(10, 1, 10):
1662         case IP_VERSION(10, 1, 2):
1663         case IP_VERSION(10, 1, 1):
1664         case IP_VERSION(10, 1, 3):
1665         case IP_VERSION(10, 1, 4):
1666         case IP_VERSION(10, 3, 0):
1667         case IP_VERSION(10, 3, 2):
1668         case IP_VERSION(10, 3, 1):
1669         case IP_VERSION(10, 3, 4):
1670         case IP_VERSION(10, 3, 5):
1671         case IP_VERSION(10, 3, 6):
1672         case IP_VERSION(10, 3, 3):
1673         case IP_VERSION(10, 3, 7):
1674                 amdgpu_device_ip_block_add(adev, &gfx_v10_0_ip_block);
1675                 break;
1676         default:
1677                 dev_err(adev->dev,
1678                         "Failed to add gfx ip block(GC_HWIP:0x%x)\n",
1679                         adev->ip_versions[GC_HWIP][0]);
1680                 return -EINVAL;
1681         }
1682         return 0;
1683 }
1684
1685 static int amdgpu_discovery_set_sdma_ip_blocks(struct amdgpu_device *adev)
1686 {
1687         switch (adev->ip_versions[SDMA0_HWIP][0]) {
1688         case IP_VERSION(4, 0, 0):
1689         case IP_VERSION(4, 0, 1):
1690         case IP_VERSION(4, 1, 0):
1691         case IP_VERSION(4, 1, 1):
1692         case IP_VERSION(4, 1, 2):
1693         case IP_VERSION(4, 2, 0):
1694         case IP_VERSION(4, 2, 2):
1695         case IP_VERSION(4, 4, 0):
1696                 amdgpu_device_ip_block_add(adev, &sdma_v4_0_ip_block);
1697                 break;
1698         case IP_VERSION(5, 0, 0):
1699         case IP_VERSION(5, 0, 1):
1700         case IP_VERSION(5, 0, 2):
1701         case IP_VERSION(5, 0, 5):
1702                 amdgpu_device_ip_block_add(adev, &sdma_v5_0_ip_block);
1703                 break;
1704         case IP_VERSION(5, 2, 0):
1705         case IP_VERSION(5, 2, 2):
1706         case IP_VERSION(5, 2, 4):
1707         case IP_VERSION(5, 2, 5):
1708         case IP_VERSION(5, 2, 6):
1709         case IP_VERSION(5, 2, 3):
1710         case IP_VERSION(5, 2, 1):
1711         case IP_VERSION(5, 2, 7):
1712                 amdgpu_device_ip_block_add(adev, &sdma_v5_2_ip_block);
1713                 break;
1714         default:
1715                 dev_err(adev->dev,
1716                         "Failed to add sdma ip block(SDMA0_HWIP:0x%x)\n",
1717                         adev->ip_versions[SDMA0_HWIP][0]);
1718                 return -EINVAL;
1719         }
1720         return 0;
1721 }
1722
1723 static int amdgpu_discovery_set_mm_ip_blocks(struct amdgpu_device *adev)
1724 {
1725         if (adev->ip_versions[VCE_HWIP][0]) {
1726                 switch (adev->ip_versions[UVD_HWIP][0]) {
1727                 case IP_VERSION(7, 0, 0):
1728                 case IP_VERSION(7, 2, 0):
1729                         /* UVD is not supported on vega20 SR-IOV */
1730                         if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1731                                 amdgpu_device_ip_block_add(adev, &uvd_v7_0_ip_block);
1732                         break;
1733                 default:
1734                         dev_err(adev->dev,
1735                                 "Failed to add uvd v7 ip block(UVD_HWIP:0x%x)\n",
1736                                 adev->ip_versions[UVD_HWIP][0]);
1737                         return -EINVAL;
1738                 }
1739                 switch (adev->ip_versions[VCE_HWIP][0]) {
1740                 case IP_VERSION(4, 0, 0):
1741                 case IP_VERSION(4, 1, 0):
1742                         /* VCE is not supported on vega20 SR-IOV */
1743                         if (!(adev->asic_type == CHIP_VEGA20 && amdgpu_sriov_vf(adev)))
1744                                 amdgpu_device_ip_block_add(adev, &vce_v4_0_ip_block);
1745                         break;
1746                 default:
1747                         dev_err(adev->dev,
1748                                 "Failed to add VCE v4 ip block(VCE_HWIP:0x%x)\n",
1749                                 adev->ip_versions[VCE_HWIP][0]);
1750                         return -EINVAL;
1751                 }
1752         } else {
1753                 switch (adev->ip_versions[UVD_HWIP][0]) {
1754                 case IP_VERSION(1, 0, 0):
1755                 case IP_VERSION(1, 0, 1):
1756                         amdgpu_device_ip_block_add(adev, &vcn_v1_0_ip_block);
1757                         break;
1758                 case IP_VERSION(2, 0, 0):
1759                 case IP_VERSION(2, 0, 2):
1760                 case IP_VERSION(2, 2, 0):
1761                         amdgpu_device_ip_block_add(adev, &vcn_v2_0_ip_block);
1762                         if (!amdgpu_sriov_vf(adev))
1763                                 amdgpu_device_ip_block_add(adev, &jpeg_v2_0_ip_block);
1764                         break;
1765                 case IP_VERSION(2, 0, 3):
1766                         break;
1767                 case IP_VERSION(2, 5, 0):
1768                         amdgpu_device_ip_block_add(adev, &vcn_v2_5_ip_block);
1769                         amdgpu_device_ip_block_add(adev, &jpeg_v2_5_ip_block);
1770                         break;
1771                 case IP_VERSION(2, 6, 0):
1772                         amdgpu_device_ip_block_add(adev, &vcn_v2_6_ip_block);
1773                         amdgpu_device_ip_block_add(adev, &jpeg_v2_6_ip_block);
1774                         break;
1775                 case IP_VERSION(3, 0, 0):
1776                 case IP_VERSION(3, 0, 16):
1777                 case IP_VERSION(3, 1, 1):
1778                 case IP_VERSION(3, 1, 2):
1779                 case IP_VERSION(3, 0, 2):
1780                 case IP_VERSION(3, 0, 192):
1781                         amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1782                         if (!amdgpu_sriov_vf(adev))
1783                                 amdgpu_device_ip_block_add(adev, &jpeg_v3_0_ip_block);
1784                         break;
1785                 case IP_VERSION(3, 0, 33):
1786                         amdgpu_device_ip_block_add(adev, &vcn_v3_0_ip_block);
1787                         break;
1788                 default:
1789                         dev_err(adev->dev,
1790                                 "Failed to add vcn/jpeg ip block(UVD_HWIP:0x%x)\n",
1791                                 adev->ip_versions[UVD_HWIP][0]);
1792                         return -EINVAL;
1793                 }
1794         }
1795         return 0;
1796 }
1797
1798 static int amdgpu_discovery_set_mes_ip_blocks(struct amdgpu_device *adev)
1799 {
1800         switch (adev->ip_versions[GC_HWIP][0]) {
1801         case IP_VERSION(10, 1, 10):
1802         case IP_VERSION(10, 1, 1):
1803         case IP_VERSION(10, 1, 2):
1804         case IP_VERSION(10, 1, 3):
1805         case IP_VERSION(10, 1, 4):
1806         case IP_VERSION(10, 3, 0):
1807         case IP_VERSION(10, 3, 1):
1808         case IP_VERSION(10, 3, 2):
1809         case IP_VERSION(10, 3, 3):
1810         case IP_VERSION(10, 3, 4):
1811         case IP_VERSION(10, 3, 5):
1812         case IP_VERSION(10, 3, 6):
1813                 amdgpu_device_ip_block_add(adev, &mes_v10_1_ip_block);
1814                 break;
1815         default:
1816                 break;
1817         }
1818         return 0;
1819 }
1820
1821 int amdgpu_discovery_set_ip_blocks(struct amdgpu_device *adev)
1822 {
1823         int r;
1824
1825         switch (adev->asic_type) {
1826         case CHIP_VEGA10:
1827                 vega10_reg_base_init(adev);
1828                 adev->sdma.num_instances = 2;
1829                 adev->gmc.num_umc = 4;
1830                 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1831                 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 0, 0);
1832                 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 0);
1833                 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 0);
1834                 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 0);
1835                 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 0);
1836                 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1837                 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 1, 0);
1838                 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 0, 0);
1839                 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1840                 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1841                 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1842                 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 0);
1843                 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 0, 1);
1844                 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1845                 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1846                 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 0);
1847                 break;
1848         case CHIP_VEGA12:
1849                 vega10_reg_base_init(adev);
1850                 adev->sdma.num_instances = 2;
1851                 adev->gmc.num_umc = 4;
1852                 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1853                 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 3, 0);
1854                 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 0, 1);
1855                 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 0, 1);
1856                 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 0, 1);
1857                 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 0, 1);
1858                 adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 5, 0);
1859                 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(6, 2, 0);
1860                 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 0);
1861                 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(9, 0, 0);
1862                 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(9, 0, 0);
1863                 adev->ip_versions[THM_HWIP][0] = IP_VERSION(9, 0, 0);
1864                 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(9, 0, 1);
1865                 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 1);
1866                 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 0, 0);
1867                 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 0, 0);
1868                 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 0, 1);
1869                 break;
1870         case CHIP_RAVEN:
1871                 vega10_reg_base_init(adev);
1872                 adev->sdma.num_instances = 1;
1873                 adev->vcn.num_vcn_inst = 1;
1874                 adev->gmc.num_umc = 2;
1875                 if (adev->apu_flags & AMD_APU_IS_RAVEN2) {
1876                         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 2, 0);
1877                         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 2, 0);
1878                         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 1);
1879                         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 1);
1880                         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 1);
1881                         adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 1);
1882                         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 1);
1883                         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 5, 0);
1884                         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 1);
1885                         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 1);
1886                         adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 1, 0);
1887                         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 1);
1888                         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 2, 2);
1889                         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 1);
1890                         adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 1);
1891                 } else {
1892                         adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 1, 0);
1893                         adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 1, 0);
1894                         adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 1, 0);
1895                         adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 1, 0);
1896                         adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 1, 0);
1897                         adev->ip_versions[DF_HWIP][0] = IP_VERSION(2, 1, 0);
1898                         adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 0, 0);
1899                         adev->ip_versions[UMC_HWIP][0] = IP_VERSION(7, 0, 0);
1900                         adev->ip_versions[MP0_HWIP][0] = IP_VERSION(10, 0, 0);
1901                         adev->ip_versions[MP1_HWIP][0] = IP_VERSION(10, 0, 0);
1902                         adev->ip_versions[THM_HWIP][0] = IP_VERSION(10, 0, 0);
1903                         adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(10, 0, 0);
1904                         adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 1, 0);
1905                         adev->ip_versions[UVD_HWIP][0] = IP_VERSION(1, 0, 0);
1906                         adev->ip_versions[DCE_HWIP][0] = IP_VERSION(1, 0, 0);
1907                 }
1908                 break;
1909         case CHIP_VEGA20:
1910                 vega20_reg_base_init(adev);
1911                 adev->sdma.num_instances = 2;
1912                 adev->gmc.num_umc = 8;
1913                 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 0);
1914                 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 0);
1915                 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 0);
1916                 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 0);
1917                 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 0);
1918                 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 0);
1919                 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 0);
1920                 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 0);
1921                 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 1);
1922                 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 2);
1923                 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
1924                 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 2);
1925                 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 2);
1926                 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 0);
1927                 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(7, 2, 0);
1928                 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(7, 2, 0);
1929                 adev->ip_versions[VCE_HWIP][0] = IP_VERSION(4, 1, 0);
1930                 adev->ip_versions[DCI_HWIP][0] = IP_VERSION(12, 1, 0);
1931                 break;
1932         case CHIP_ARCTURUS:
1933                 arct_reg_base_init(adev);
1934                 adev->sdma.num_instances = 8;
1935                 adev->vcn.num_vcn_inst = 2;
1936                 adev->gmc.num_umc = 8;
1937                 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 1);
1938                 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 1);
1939                 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 2, 1);
1940                 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 2, 1);
1941                 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 2, 2);
1942                 adev->ip_versions[SDMA1_HWIP][0] = IP_VERSION(4, 2, 2);
1943                 adev->ip_versions[SDMA1_HWIP][1] = IP_VERSION(4, 2, 2);
1944                 adev->ip_versions[SDMA1_HWIP][2] = IP_VERSION(4, 2, 2);
1945                 adev->ip_versions[SDMA1_HWIP][3] = IP_VERSION(4, 2, 2);
1946                 adev->ip_versions[SDMA1_HWIP][4] = IP_VERSION(4, 2, 2);
1947                 adev->ip_versions[SDMA1_HWIP][5] = IP_VERSION(4, 2, 2);
1948                 adev->ip_versions[SDMA1_HWIP][6] = IP_VERSION(4, 2, 2);
1949                 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 1);
1950                 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 1);
1951                 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 1, 2);
1952                 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(11, 0, 4);
1953                 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(11, 0, 2);
1954                 adev->ip_versions[THM_HWIP][0] = IP_VERSION(11, 0, 3);
1955                 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(11, 0, 3);
1956                 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 1);
1957                 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 5, 0);
1958                 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 5, 0);
1959                 break;
1960         case CHIP_ALDEBARAN:
1961                 aldebaran_reg_base_init(adev);
1962                 adev->sdma.num_instances = 5;
1963                 adev->vcn.num_vcn_inst = 2;
1964                 adev->gmc.num_umc = 4;
1965                 adev->ip_versions[MMHUB_HWIP][0] = IP_VERSION(9, 4, 2);
1966                 adev->ip_versions[ATHUB_HWIP][0] = IP_VERSION(9, 4, 2);
1967                 adev->ip_versions[OSSSYS_HWIP][0] = IP_VERSION(4, 4, 0);
1968                 adev->ip_versions[HDP_HWIP][0] = IP_VERSION(4, 4, 0);
1969                 adev->ip_versions[SDMA0_HWIP][0] = IP_VERSION(4, 4, 0);
1970                 adev->ip_versions[SDMA0_HWIP][1] = IP_VERSION(4, 4, 0);
1971                 adev->ip_versions[SDMA0_HWIP][2] = IP_VERSION(4, 4, 0);
1972                 adev->ip_versions[SDMA0_HWIP][3] = IP_VERSION(4, 4, 0);
1973                 adev->ip_versions[SDMA0_HWIP][4] = IP_VERSION(4, 4, 0);
1974                 adev->ip_versions[DF_HWIP][0] = IP_VERSION(3, 6, 2);
1975                 adev->ip_versions[NBIO_HWIP][0] = IP_VERSION(7, 4, 4);
1976                 adev->ip_versions[UMC_HWIP][0] = IP_VERSION(6, 7, 0);
1977                 adev->ip_versions[MP0_HWIP][0] = IP_VERSION(13, 0, 2);
1978                 adev->ip_versions[MP1_HWIP][0] = IP_VERSION(13, 0, 2);
1979                 adev->ip_versions[THM_HWIP][0] = IP_VERSION(13, 0, 2);
1980                 adev->ip_versions[SMUIO_HWIP][0] = IP_VERSION(13, 0, 2);
1981                 adev->ip_versions[GC_HWIP][0] = IP_VERSION(9, 4, 2);
1982                 adev->ip_versions[UVD_HWIP][0] = IP_VERSION(2, 6, 0);
1983                 adev->ip_versions[UVD_HWIP][1] = IP_VERSION(2, 6, 0);
1984                 adev->ip_versions[XGMI_HWIP][0] = IP_VERSION(6, 1, 0);
1985                 break;
1986         default:
1987                 r = amdgpu_discovery_reg_base_init(adev);
1988                 if (r)
1989                         return -EINVAL;
1990
1991                 amdgpu_discovery_harvest_ip(adev);
1992                 break;
1993         }
1994
1995         switch (adev->ip_versions[GC_HWIP][0]) {
1996         case IP_VERSION(9, 0, 1):
1997         case IP_VERSION(9, 2, 1):
1998         case IP_VERSION(9, 4, 0):
1999         case IP_VERSION(9, 4, 1):
2000         case IP_VERSION(9, 4, 2):
2001                 adev->family = AMDGPU_FAMILY_AI;
2002                 break;
2003         case IP_VERSION(9, 1, 0):
2004         case IP_VERSION(9, 2, 2):
2005         case IP_VERSION(9, 3, 0):
2006                 adev->family = AMDGPU_FAMILY_RV;
2007                 break;
2008         case IP_VERSION(10, 1, 10):
2009         case IP_VERSION(10, 1, 1):
2010         case IP_VERSION(10, 1, 2):
2011         case IP_VERSION(10, 1, 3):
2012         case IP_VERSION(10, 1, 4):
2013         case IP_VERSION(10, 3, 0):
2014         case IP_VERSION(10, 3, 2):
2015         case IP_VERSION(10, 3, 4):
2016         case IP_VERSION(10, 3, 5):
2017                 adev->family = AMDGPU_FAMILY_NV;
2018                 break;
2019         case IP_VERSION(10, 3, 1):
2020                 adev->family = AMDGPU_FAMILY_VGH;
2021                 break;
2022         case IP_VERSION(10, 3, 3):
2023                 adev->family = AMDGPU_FAMILY_YC;
2024                 break;
2025         case IP_VERSION(10, 3, 6):
2026                 adev->family = AMDGPU_FAMILY_GC_10_3_6;
2027                 break;
2028         case IP_VERSION(10, 3, 7):
2029                 adev->family = AMDGPU_FAMILY_GC_10_3_7;
2030                 break;
2031         default:
2032                 return -EINVAL;
2033         }
2034
2035         switch (adev->ip_versions[GC_HWIP][0]) {
2036         case IP_VERSION(9, 1, 0):
2037         case IP_VERSION(9, 2, 2):
2038         case IP_VERSION(9, 3, 0):
2039         case IP_VERSION(10, 1, 3):
2040         case IP_VERSION(10, 1, 4):
2041         case IP_VERSION(10, 3, 1):
2042         case IP_VERSION(10, 3, 3):
2043         case IP_VERSION(10, 3, 6):
2044         case IP_VERSION(10, 3, 7):
2045                 adev->flags |= AMD_IS_APU;
2046                 break;
2047         default:
2048                 break;
2049         }
2050
2051         if (adev->ip_versions[XGMI_HWIP][0] == IP_VERSION(4, 8, 0))
2052                 adev->gmc.xgmi.supported = true;
2053
2054         /* set NBIO version */
2055         switch (adev->ip_versions[NBIO_HWIP][0]) {
2056         case IP_VERSION(6, 1, 0):
2057         case IP_VERSION(6, 2, 0):
2058                 adev->nbio.funcs = &nbio_v6_1_funcs;
2059                 adev->nbio.hdp_flush_reg = &nbio_v6_1_hdp_flush_reg;
2060                 break;
2061         case IP_VERSION(7, 0, 0):
2062         case IP_VERSION(7, 0, 1):
2063         case IP_VERSION(2, 5, 0):
2064                 adev->nbio.funcs = &nbio_v7_0_funcs;
2065                 adev->nbio.hdp_flush_reg = &nbio_v7_0_hdp_flush_reg;
2066                 break;
2067         case IP_VERSION(7, 4, 0):
2068         case IP_VERSION(7, 4, 1):
2069                 adev->nbio.funcs = &nbio_v7_4_funcs;
2070                 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg;
2071                 break;
2072         case IP_VERSION(7, 4, 4):
2073                 adev->nbio.funcs = &nbio_v7_4_funcs;
2074                 adev->nbio.hdp_flush_reg = &nbio_v7_4_hdp_flush_reg_ald;
2075                 break;
2076         case IP_VERSION(7, 2, 0):
2077         case IP_VERSION(7, 2, 1):
2078         case IP_VERSION(7, 3, 0):
2079         case IP_VERSION(7, 5, 0):
2080         case IP_VERSION(7, 5, 1):
2081                 adev->nbio.funcs = &nbio_v7_2_funcs;
2082                 adev->nbio.hdp_flush_reg = &nbio_v7_2_hdp_flush_reg;
2083                 break;
2084         case IP_VERSION(2, 1, 1):
2085         case IP_VERSION(2, 3, 0):
2086         case IP_VERSION(2, 3, 1):
2087         case IP_VERSION(2, 3, 2):
2088                 adev->nbio.funcs = &nbio_v2_3_funcs;
2089                 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg;
2090                 break;
2091         case IP_VERSION(3, 3, 0):
2092         case IP_VERSION(3, 3, 1):
2093         case IP_VERSION(3, 3, 2):
2094         case IP_VERSION(3, 3, 3):
2095                 adev->nbio.funcs = &nbio_v2_3_funcs;
2096                 adev->nbio.hdp_flush_reg = &nbio_v2_3_hdp_flush_reg_sc;
2097                 break;
2098         default:
2099                 break;
2100         }
2101
2102         switch (adev->ip_versions[HDP_HWIP][0]) {
2103         case IP_VERSION(4, 0, 0):
2104         case IP_VERSION(4, 0, 1):
2105         case IP_VERSION(4, 1, 0):
2106         case IP_VERSION(4, 1, 1):
2107         case IP_VERSION(4, 1, 2):
2108         case IP_VERSION(4, 2, 0):
2109         case IP_VERSION(4, 2, 1):
2110         case IP_VERSION(4, 4, 0):
2111                 adev->hdp.funcs = &hdp_v4_0_funcs;
2112                 break;
2113         case IP_VERSION(5, 0, 0):
2114         case IP_VERSION(5, 0, 1):
2115         case IP_VERSION(5, 0, 2):
2116         case IP_VERSION(5, 0, 3):
2117         case IP_VERSION(5, 0, 4):
2118         case IP_VERSION(5, 2, 0):
2119                 adev->hdp.funcs = &hdp_v5_0_funcs;
2120                 break;
2121         default:
2122                 break;
2123         }
2124
2125         switch (adev->ip_versions[DF_HWIP][0]) {
2126         case IP_VERSION(3, 6, 0):
2127         case IP_VERSION(3, 6, 1):
2128         case IP_VERSION(3, 6, 2):
2129                 adev->df.funcs = &df_v3_6_funcs;
2130                 break;
2131         case IP_VERSION(2, 1, 0):
2132         case IP_VERSION(2, 1, 1):
2133         case IP_VERSION(2, 5, 0):
2134         case IP_VERSION(3, 5, 1):
2135         case IP_VERSION(3, 5, 2):
2136                 adev->df.funcs = &df_v1_7_funcs;
2137                 break;
2138         default:
2139                 break;
2140         }
2141
2142         switch (adev->ip_versions[SMUIO_HWIP][0]) {
2143         case IP_VERSION(9, 0, 0):
2144         case IP_VERSION(9, 0, 1):
2145         case IP_VERSION(10, 0, 0):
2146         case IP_VERSION(10, 0, 1):
2147         case IP_VERSION(10, 0, 2):
2148                 adev->smuio.funcs = &smuio_v9_0_funcs;
2149                 break;
2150         case IP_VERSION(11, 0, 0):
2151         case IP_VERSION(11, 0, 2):
2152         case IP_VERSION(11, 0, 3):
2153         case IP_VERSION(11, 0, 4):
2154         case IP_VERSION(11, 0, 7):
2155         case IP_VERSION(11, 0, 8):
2156                 adev->smuio.funcs = &smuio_v11_0_funcs;
2157                 break;
2158         case IP_VERSION(11, 0, 6):
2159         case IP_VERSION(11, 0, 10):
2160         case IP_VERSION(11, 0, 11):
2161         case IP_VERSION(11, 5, 0):
2162         case IP_VERSION(13, 0, 1):
2163         case IP_VERSION(13, 0, 9):
2164         case IP_VERSION(13, 0, 10):
2165                 adev->smuio.funcs = &smuio_v11_0_6_funcs;
2166                 break;
2167         case IP_VERSION(13, 0, 2):
2168                 adev->smuio.funcs = &smuio_v13_0_funcs;
2169                 break;
2170         default:
2171                 break;
2172         }
2173
2174         r = amdgpu_discovery_set_common_ip_blocks(adev);
2175         if (r)
2176                 return r;
2177
2178         r = amdgpu_discovery_set_gmc_ip_blocks(adev);
2179         if (r)
2180                 return r;
2181
2182         /* For SR-IOV, PSP needs to be initialized before IH */
2183         if (amdgpu_sriov_vf(adev)) {
2184                 r = amdgpu_discovery_set_psp_ip_blocks(adev);
2185                 if (r)
2186                         return r;
2187                 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2188                 if (r)
2189                         return r;
2190         } else {
2191                 r = amdgpu_discovery_set_ih_ip_blocks(adev);
2192                 if (r)
2193                         return r;
2194
2195                 if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2196                         r = amdgpu_discovery_set_psp_ip_blocks(adev);
2197                         if (r)
2198                                 return r;
2199                 }
2200         }
2201
2202         if (likely(adev->firmware.load_type == AMDGPU_FW_LOAD_PSP)) {
2203                 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2204                 if (r)
2205                         return r;
2206         }
2207
2208         r = amdgpu_discovery_set_display_ip_blocks(adev);
2209         if (r)
2210                 return r;
2211
2212         r = amdgpu_discovery_set_gc_ip_blocks(adev);
2213         if (r)
2214                 return r;
2215
2216         r = amdgpu_discovery_set_sdma_ip_blocks(adev);
2217         if (r)
2218                 return r;
2219
2220         if (adev->firmware.load_type == AMDGPU_FW_LOAD_DIRECT &&
2221             !amdgpu_sriov_vf(adev)) {
2222                 r = amdgpu_discovery_set_smu_ip_blocks(adev);
2223                 if (r)
2224                         return r;
2225         }
2226
2227         r = amdgpu_discovery_set_mm_ip_blocks(adev);
2228         if (r)
2229                 return r;
2230
2231         if (adev->enable_mes) {
2232                 r = amdgpu_discovery_set_mes_ip_blocks(adev);
2233                 if (r)
2234                         return r;
2235         }
2236
2237         return 0;
2238 }
2239