fbmem: don't allow too huge resolutions
[linux-2.6-microblaze.git] / drivers / gpu / drm / amd / amdgpu / amdgpu_xgmi.c
1 /*
2  * Copyright 2018 Advanced Micro Devices, Inc.
3  *
4  * Permission is hereby granted, free of charge, to any person obtaining a
5  * copy of this software and associated documentation files (the "Software"),
6  * to deal in the Software without restriction, including without limitation
7  * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8  * and/or sell copies of the Software, and to permit persons to whom the
9  * Software is furnished to do so, subject to the following conditions:
10  *
11  * The above copyright notice and this permission notice shall be included in
12  * all copies or substantial portions of the Software.
13  *
14  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
17  * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18  * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19  * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20  * OTHER DEALINGS IN THE SOFTWARE.
21  *
22  *
23  */
24 #include <linux/list.h>
25 #include "amdgpu.h"
26 #include "amdgpu_xgmi.h"
27 #include "amdgpu_ras.h"
28 #include "soc15.h"
29 #include "df/df_3_6_offset.h"
30 #include "xgmi/xgmi_4_0_0_smn.h"
31 #include "xgmi/xgmi_4_0_0_sh_mask.h"
32 #include "wafl/wafl2_4_0_0_smn.h"
33 #include "wafl/wafl2_4_0_0_sh_mask.h"
34
35 static DEFINE_MUTEX(xgmi_mutex);
36
37 #define AMDGPU_MAX_XGMI_DEVICE_PER_HIVE         4
38
39 static LIST_HEAD(xgmi_hive_list);
40
41 static const int xgmi_pcs_err_status_reg_vg20[] = {
42         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
43         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
44 };
45
46 static const int wafl_pcs_err_status_reg_vg20[] = {
47         smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
48         smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
49 };
50
51 static const int xgmi_pcs_err_status_reg_arct[] = {
52         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS,
53         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x100000,
54         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x500000,
55         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x600000,
56         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x700000,
57         smnXGMI0_PCS_GOPX16_PCS_ERROR_STATUS + 0x800000,
58 };
59
60 /* same as vg20*/
61 static const int wafl_pcs_err_status_reg_arct[] = {
62         smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS,
63         smnPCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS + 0x100000,
64 };
65
66 static const struct amdgpu_pcs_ras_field xgmi_pcs_ras_fields[] = {
67         {"XGMI PCS DataLossErr",
68          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataLossErr)},
69         {"XGMI PCS TrainingErr",
70          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TrainingErr)},
71         {"XGMI PCS CRCErr",
72          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, CRCErr)},
73         {"XGMI PCS BERExceededErr",
74          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, BERExceededErr)},
75         {"XGMI PCS TxMetaDataErr",
76          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, TxMetaDataErr)},
77         {"XGMI PCS ReplayBufParityErr",
78          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayBufParityErr)},
79         {"XGMI PCS DataParityErr",
80          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataParityErr)},
81         {"XGMI PCS ReplayFifoOverflowErr",
82          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
83         {"XGMI PCS ReplayFifoUnderflowErr",
84          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
85         {"XGMI PCS ElasticFifoOverflowErr",
86          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
87         {"XGMI PCS DeskewErr",
88          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DeskewErr)},
89         {"XGMI PCS DataStartupLimitErr",
90          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, DataStartupLimitErr)},
91         {"XGMI PCS FCInitTimeoutErr",
92          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, FCInitTimeoutErr)},
93         {"XGMI PCS RecoveryTimeoutErr",
94          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
95         {"XGMI PCS ReadySerialTimeoutErr",
96          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
97         {"XGMI PCS ReadySerialAttemptErr",
98          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
99         {"XGMI PCS RecoveryAttemptErr",
100          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryAttemptErr)},
101         {"XGMI PCS RecoveryRelockAttemptErr",
102          SOC15_REG_FIELD(XGMI0_PCS_GOPX16_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
103 };
104
105 static const struct amdgpu_pcs_ras_field wafl_pcs_ras_fields[] = {
106         {"WAFL PCS DataLossErr",
107          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataLossErr)},
108         {"WAFL PCS TrainingErr",
109          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TrainingErr)},
110         {"WAFL PCS CRCErr",
111          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, CRCErr)},
112         {"WAFL PCS BERExceededErr",
113          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, BERExceededErr)},
114         {"WAFL PCS TxMetaDataErr",
115          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, TxMetaDataErr)},
116         {"WAFL PCS ReplayBufParityErr",
117          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayBufParityErr)},
118         {"WAFL PCS DataParityErr",
119          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataParityErr)},
120         {"WAFL PCS ReplayFifoOverflowErr",
121          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoOverflowErr)},
122         {"WAFL PCS ReplayFifoUnderflowErr",
123          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReplayFifoUnderflowErr)},
124         {"WAFL PCS ElasticFifoOverflowErr",
125          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ElasticFifoOverflowErr)},
126         {"WAFL PCS DeskewErr",
127          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DeskewErr)},
128         {"WAFL PCS DataStartupLimitErr",
129          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, DataStartupLimitErr)},
130         {"WAFL PCS FCInitTimeoutErr",
131          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, FCInitTimeoutErr)},
132         {"WAFL PCS RecoveryTimeoutErr",
133          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryTimeoutErr)},
134         {"WAFL PCS ReadySerialTimeoutErr",
135          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialTimeoutErr)},
136         {"WAFL PCS ReadySerialAttemptErr",
137          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, ReadySerialAttemptErr)},
138         {"WAFL PCS RecoveryAttemptErr",
139          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryAttemptErr)},
140         {"WAFL PCS RecoveryRelockAttemptErr",
141          SOC15_REG_FIELD(PCS_GOPX1_0_PCS_GOPX1_PCS_ERROR_STATUS, RecoveryRelockAttemptErr)},
142 };
143
144 /**
145  * DOC: AMDGPU XGMI Support
146  *
147  * XGMI is a high speed interconnect that joins multiple GPU cards
148  * into a homogeneous memory space that is organized by a collective
149  * hive ID and individual node IDs, both of which are 64-bit numbers.
150  *
151  * The file xgmi_device_id contains the unique per GPU device ID and
152  * is stored in the /sys/class/drm/card${cardno}/device/ directory.
153  *
154  * Inside the device directory a sub-directory 'xgmi_hive_info' is
155  * created which contains the hive ID and the list of nodes.
156  *
157  * The hive ID is stored in:
158  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/xgmi_hive_id
159  *
160  * The node information is stored in numbered directories:
161  *   /sys/class/drm/card${cardno}/device/xgmi_hive_info/node${nodeno}/xgmi_device_id
162  *
163  * Each device has their own xgmi_hive_info direction with a mirror
164  * set of node sub-directories.
165  *
166  * The XGMI memory space is built by contiguously adding the power of
167  * two padded VRAM space from each node to each other.
168  *
169  */
170
171 static struct attribute amdgpu_xgmi_hive_id = {
172         .name = "xgmi_hive_id",
173         .mode = S_IRUGO
174 };
175
176 static struct attribute *amdgpu_xgmi_hive_attrs[] = {
177         &amdgpu_xgmi_hive_id,
178         NULL
179 };
180
181 static ssize_t amdgpu_xgmi_show_attrs(struct kobject *kobj,
182         struct attribute *attr, char *buf)
183 {
184         struct amdgpu_hive_info *hive = container_of(
185                 kobj, struct amdgpu_hive_info, kobj);
186
187         if (attr == &amdgpu_xgmi_hive_id)
188                 return snprintf(buf, PAGE_SIZE, "%llu\n", hive->hive_id);
189
190         return 0;
191 }
192
193 static void amdgpu_xgmi_hive_release(struct kobject *kobj)
194 {
195         struct amdgpu_hive_info *hive = container_of(
196                 kobj, struct amdgpu_hive_info, kobj);
197
198         mutex_destroy(&hive->hive_lock);
199         kfree(hive);
200 }
201
202 static const struct sysfs_ops amdgpu_xgmi_hive_ops = {
203         .show = amdgpu_xgmi_show_attrs,
204 };
205
206 struct kobj_type amdgpu_xgmi_hive_type = {
207         .release = amdgpu_xgmi_hive_release,
208         .sysfs_ops = &amdgpu_xgmi_hive_ops,
209         .default_attrs = amdgpu_xgmi_hive_attrs,
210 };
211
212 static ssize_t amdgpu_xgmi_show_device_id(struct device *dev,
213                                      struct device_attribute *attr,
214                                      char *buf)
215 {
216         struct drm_device *ddev = dev_get_drvdata(dev);
217         struct amdgpu_device *adev = drm_to_adev(ddev);
218
219         return sysfs_emit(buf, "%llu\n", adev->gmc.xgmi.node_id);
220
221 }
222
223 #define AMDGPU_XGMI_SET_FICAA(o)        ((o) | 0x456801)
224 static ssize_t amdgpu_xgmi_show_error(struct device *dev,
225                                       struct device_attribute *attr,
226                                       char *buf)
227 {
228         struct drm_device *ddev = dev_get_drvdata(dev);
229         struct amdgpu_device *adev = drm_to_adev(ddev);
230         uint32_t ficaa_pie_ctl_in, ficaa_pie_status_in;
231         uint64_t fica_out;
232         unsigned int error_count = 0;
233
234         ficaa_pie_ctl_in = AMDGPU_XGMI_SET_FICAA(0x200);
235         ficaa_pie_status_in = AMDGPU_XGMI_SET_FICAA(0x208);
236
237         fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_ctl_in);
238         if (fica_out != 0x1f)
239                 pr_err("xGMI error counters not enabled!\n");
240
241         fica_out = adev->df.funcs->get_fica(adev, ficaa_pie_status_in);
242
243         if ((fica_out & 0xffff) == 2)
244                 error_count = ((fica_out >> 62) & 0x1) + (fica_out >> 63);
245
246         adev->df.funcs->set_fica(adev, ficaa_pie_status_in, 0, 0);
247
248         return sysfs_emit(buf, "%u\n", error_count);
249 }
250
251
252 static DEVICE_ATTR(xgmi_device_id, S_IRUGO, amdgpu_xgmi_show_device_id, NULL);
253 static DEVICE_ATTR(xgmi_error, S_IRUGO, amdgpu_xgmi_show_error, NULL);
254
255 static int amdgpu_xgmi_sysfs_add_dev_info(struct amdgpu_device *adev,
256                                          struct amdgpu_hive_info *hive)
257 {
258         int ret = 0;
259         char node[10] = { 0 };
260
261         /* Create xgmi device id file */
262         ret = device_create_file(adev->dev, &dev_attr_xgmi_device_id);
263         if (ret) {
264                 dev_err(adev->dev, "XGMI: Failed to create device file xgmi_device_id\n");
265                 return ret;
266         }
267
268         /* Create xgmi error file */
269         ret = device_create_file(adev->dev, &dev_attr_xgmi_error);
270         if (ret)
271                 pr_err("failed to create xgmi_error\n");
272
273
274         /* Create sysfs link to hive info folder on the first device */
275         if (hive->kobj.parent != (&adev->dev->kobj)) {
276                 ret = sysfs_create_link(&adev->dev->kobj, &hive->kobj,
277                                         "xgmi_hive_info");
278                 if (ret) {
279                         dev_err(adev->dev, "XGMI: Failed to create link to hive info");
280                         goto remove_file;
281                 }
282         }
283
284         sprintf(node, "node%d", atomic_read(&hive->number_devices));
285         /* Create sysfs link form the hive folder to yourself */
286         ret = sysfs_create_link(&hive->kobj, &adev->dev->kobj, node);
287         if (ret) {
288                 dev_err(adev->dev, "XGMI: Failed to create link from hive info");
289                 goto remove_link;
290         }
291
292         goto success;
293
294
295 remove_link:
296         sysfs_remove_link(&adev->dev->kobj, adev_to_drm(adev)->unique);
297
298 remove_file:
299         device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
300
301 success:
302         return ret;
303 }
304
305 static void amdgpu_xgmi_sysfs_rem_dev_info(struct amdgpu_device *adev,
306                                           struct amdgpu_hive_info *hive)
307 {
308         char node[10];
309         memset(node, 0, sizeof(node));
310
311         device_remove_file(adev->dev, &dev_attr_xgmi_device_id);
312         device_remove_file(adev->dev, &dev_attr_xgmi_error);
313
314         if (hive->kobj.parent != (&adev->dev->kobj))
315                 sysfs_remove_link(&adev->dev->kobj,"xgmi_hive_info");
316
317         sprintf(node, "node%d", atomic_read(&hive->number_devices));
318         sysfs_remove_link(&hive->kobj, node);
319
320 }
321
322
323
324 struct amdgpu_hive_info *amdgpu_get_xgmi_hive(struct amdgpu_device *adev)
325 {
326         struct amdgpu_hive_info *hive = NULL;
327         int ret;
328
329         if (!adev->gmc.xgmi.hive_id)
330                 return NULL;
331
332         if (adev->hive) {
333                 kobject_get(&adev->hive->kobj);
334                 return adev->hive;
335         }
336
337         mutex_lock(&xgmi_mutex);
338
339         list_for_each_entry(hive, &xgmi_hive_list, node)  {
340                 if (hive->hive_id == adev->gmc.xgmi.hive_id)
341                         goto pro_end;
342         }
343
344         hive = kzalloc(sizeof(*hive), GFP_KERNEL);
345         if (!hive) {
346                 dev_err(adev->dev, "XGMI: allocation failed\n");
347                 hive = NULL;
348                 goto pro_end;
349         }
350
351         /* initialize new hive if not exist */
352         ret = kobject_init_and_add(&hive->kobj,
353                         &amdgpu_xgmi_hive_type,
354                         &adev->dev->kobj,
355                         "%s", "xgmi_hive_info");
356         if (ret) {
357                 dev_err(adev->dev, "XGMI: failed initializing kobject for xgmi hive\n");
358                 kfree(hive);
359                 hive = NULL;
360                 goto pro_end;
361         }
362
363         hive->hive_id = adev->gmc.xgmi.hive_id;
364         INIT_LIST_HEAD(&hive->device_list);
365         INIT_LIST_HEAD(&hive->node);
366         mutex_init(&hive->hive_lock);
367         atomic_set(&hive->in_reset, 0);
368         atomic_set(&hive->number_devices, 0);
369         task_barrier_init(&hive->tb);
370         hive->pstate = AMDGPU_XGMI_PSTATE_UNKNOWN;
371         hive->hi_req_gpu = NULL;
372         /*
373          * hive pstate on boot is high in vega20 so we have to go to low
374          * pstate on after boot.
375          */
376         hive->hi_req_count = AMDGPU_MAX_XGMI_DEVICE_PER_HIVE;
377         list_add_tail(&hive->node, &xgmi_hive_list);
378
379 pro_end:
380         if (hive)
381                 kobject_get(&hive->kobj);
382         mutex_unlock(&xgmi_mutex);
383         return hive;
384 }
385
386 void amdgpu_put_xgmi_hive(struct amdgpu_hive_info *hive)
387 {
388         if (hive)
389                 kobject_put(&hive->kobj);
390 }
391
392 int amdgpu_xgmi_set_pstate(struct amdgpu_device *adev, int pstate)
393 {
394         int ret = 0;
395         struct amdgpu_hive_info *hive;
396         struct amdgpu_device *request_adev;
397         bool is_hi_req = pstate == AMDGPU_XGMI_PSTATE_MAX_VEGA20;
398         bool init_low;
399
400         hive = amdgpu_get_xgmi_hive(adev);
401         if (!hive)
402                 return 0;
403
404         request_adev = hive->hi_req_gpu ? hive->hi_req_gpu : adev;
405         init_low = hive->pstate == AMDGPU_XGMI_PSTATE_UNKNOWN;
406         amdgpu_put_xgmi_hive(hive);
407         /* fw bug so temporarily disable pstate switching */
408         return 0;
409
410         if (!hive || adev->asic_type != CHIP_VEGA20)
411                 return 0;
412
413         mutex_lock(&hive->hive_lock);
414
415         if (is_hi_req)
416                 hive->hi_req_count++;
417         else
418                 hive->hi_req_count--;
419
420         /*
421          * Vega20 only needs single peer to request pstate high for the hive to
422          * go high but all peers must request pstate low for the hive to go low
423          */
424         if (hive->pstate == pstate ||
425                         (!is_hi_req && hive->hi_req_count && !init_low))
426                 goto out;
427
428         dev_dbg(request_adev->dev, "Set xgmi pstate %d.\n", pstate);
429
430         ret = amdgpu_dpm_set_xgmi_pstate(request_adev, pstate);
431         if (ret) {
432                 dev_err(request_adev->dev,
433                         "XGMI: Set pstate failure on device %llx, hive %llx, ret %d",
434                         request_adev->gmc.xgmi.node_id,
435                         request_adev->gmc.xgmi.hive_id, ret);
436                 goto out;
437         }
438
439         if (init_low)
440                 hive->pstate = hive->hi_req_count ?
441                                         hive->pstate : AMDGPU_XGMI_PSTATE_MIN;
442         else {
443                 hive->pstate = pstate;
444                 hive->hi_req_gpu = pstate != AMDGPU_XGMI_PSTATE_MIN ?
445                                                         adev : NULL;
446         }
447 out:
448         mutex_unlock(&hive->hive_lock);
449         return ret;
450 }
451
452 int amdgpu_xgmi_update_topology(struct amdgpu_hive_info *hive, struct amdgpu_device *adev)
453 {
454         int ret;
455
456         /* Each psp need to set the latest topology */
457         ret = psp_xgmi_set_topology_info(&adev->psp,
458                                          atomic_read(&hive->number_devices),
459                                          &adev->psp.xgmi_context.top_info);
460         if (ret)
461                 dev_err(adev->dev,
462                         "XGMI: Set topology failure on device %llx, hive %llx, ret %d",
463                         adev->gmc.xgmi.node_id,
464                         adev->gmc.xgmi.hive_id, ret);
465
466         return ret;
467 }
468
469
470 /*
471  * NOTE psp_xgmi_node_info.num_hops layout is as follows:
472  * num_hops[7:6] = link type (0 = xGMI2, 1 = xGMI3, 2/3 = reserved)
473  * num_hops[5:3] = reserved
474  * num_hops[2:0] = number of hops
475  */
476 int amdgpu_xgmi_get_hops_count(struct amdgpu_device *adev,
477                 struct amdgpu_device *peer_adev)
478 {
479         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
480         uint8_t num_hops_mask = 0x7;
481         int i;
482
483         for (i = 0 ; i < top->num_nodes; ++i)
484                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
485                         return top->nodes[i].num_hops & num_hops_mask;
486         return  -EINVAL;
487 }
488
489 int amdgpu_xgmi_get_num_links(struct amdgpu_device *adev,
490                 struct amdgpu_device *peer_adev)
491 {
492         struct psp_xgmi_topology_info *top = &adev->psp.xgmi_context.top_info;
493         int i;
494
495         for (i = 0 ; i < top->num_nodes; ++i)
496                 if (top->nodes[i].node_id == peer_adev->gmc.xgmi.node_id)
497                         return top->nodes[i].num_links;
498         return  -EINVAL;
499 }
500
501 int amdgpu_xgmi_add_device(struct amdgpu_device *adev)
502 {
503         struct psp_xgmi_topology_info *top_info;
504         struct amdgpu_hive_info *hive;
505         struct amdgpu_xgmi      *entry;
506         struct amdgpu_device *tmp_adev = NULL;
507
508         int count = 0, ret = 0;
509
510         if (!adev->gmc.xgmi.supported)
511                 return 0;
512
513         if (!adev->gmc.xgmi.pending_reset &&
514             amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
515                 ret = psp_xgmi_initialize(&adev->psp);
516                 if (ret) {
517                         dev_err(adev->dev,
518                                 "XGMI: Failed to initialize xgmi session\n");
519                         return ret;
520                 }
521
522                 ret = psp_xgmi_get_hive_id(&adev->psp, &adev->gmc.xgmi.hive_id);
523                 if (ret) {
524                         dev_err(adev->dev,
525                                 "XGMI: Failed to get hive id\n");
526                         return ret;
527                 }
528
529                 ret = psp_xgmi_get_node_id(&adev->psp, &adev->gmc.xgmi.node_id);
530                 if (ret) {
531                         dev_err(adev->dev,
532                                 "XGMI: Failed to get node id\n");
533                         return ret;
534                 }
535         } else {
536                 adev->gmc.xgmi.hive_id = 16;
537                 adev->gmc.xgmi.node_id = adev->gmc.xgmi.physical_node_id + 16;
538         }
539
540         hive = amdgpu_get_xgmi_hive(adev);
541         if (!hive) {
542                 ret = -EINVAL;
543                 dev_err(adev->dev,
544                         "XGMI: node 0x%llx, can not match hive 0x%llx in the hive list.\n",
545                         adev->gmc.xgmi.node_id, adev->gmc.xgmi.hive_id);
546                 goto exit;
547         }
548         mutex_lock(&hive->hive_lock);
549
550         top_info = &adev->psp.xgmi_context.top_info;
551
552         list_add_tail(&adev->gmc.xgmi.head, &hive->device_list);
553         list_for_each_entry(entry, &hive->device_list, head)
554                 top_info->nodes[count++].node_id = entry->node_id;
555         top_info->num_nodes = count;
556         atomic_set(&hive->number_devices, count);
557
558         task_barrier_add_task(&hive->tb);
559
560         if (!adev->gmc.xgmi.pending_reset &&
561             amdgpu_device_ip_get_ip_block(adev, AMD_IP_BLOCK_TYPE_PSP)) {
562                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
563                         /* update node list for other device in the hive */
564                         if (tmp_adev != adev) {
565                                 top_info = &tmp_adev->psp.xgmi_context.top_info;
566                                 top_info->nodes[count - 1].node_id =
567                                         adev->gmc.xgmi.node_id;
568                                 top_info->num_nodes = count;
569                         }
570                         ret = amdgpu_xgmi_update_topology(hive, tmp_adev);
571                         if (ret)
572                                 goto exit_unlock;
573                 }
574
575                 /* get latest topology info for each device from psp */
576                 list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) {
577                         ret = psp_xgmi_get_topology_info(&tmp_adev->psp, count,
578                                         &tmp_adev->psp.xgmi_context.top_info);
579                         if (ret) {
580                                 dev_err(tmp_adev->dev,
581                                         "XGMI: Get topology failure on device %llx, hive %llx, ret %d",
582                                         tmp_adev->gmc.xgmi.node_id,
583                                         tmp_adev->gmc.xgmi.hive_id, ret);
584                                 /* To do : continue with some node failed or disable the whole hive */
585                                 goto exit_unlock;
586                         }
587                 }
588         }
589
590         if (!ret && !adev->gmc.xgmi.pending_reset)
591                 ret = amdgpu_xgmi_sysfs_add_dev_info(adev, hive);
592
593 exit_unlock:
594         mutex_unlock(&hive->hive_lock);
595 exit:
596         if (!ret) {
597                 adev->hive = hive;
598                 dev_info(adev->dev, "XGMI: Add node %d, hive 0x%llx.\n",
599                          adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id);
600         } else {
601                 amdgpu_put_xgmi_hive(hive);
602                 dev_err(adev->dev, "XGMI: Failed to add node %d, hive 0x%llx ret: %d\n",
603                         adev->gmc.xgmi.physical_node_id, adev->gmc.xgmi.hive_id,
604                         ret);
605         }
606
607         return ret;
608 }
609
610 int amdgpu_xgmi_remove_device(struct amdgpu_device *adev)
611 {
612         struct amdgpu_hive_info *hive = adev->hive;
613
614         if (!adev->gmc.xgmi.supported)
615                 return -EINVAL;
616
617         if (!hive)
618                 return -EINVAL;
619
620         mutex_lock(&hive->hive_lock);
621         task_barrier_rem_task(&hive->tb);
622         amdgpu_xgmi_sysfs_rem_dev_info(adev, hive);
623         if (hive->hi_req_gpu == adev)
624                 hive->hi_req_gpu = NULL;
625         list_del(&adev->gmc.xgmi.head);
626         mutex_unlock(&hive->hive_lock);
627
628         amdgpu_put_xgmi_hive(hive);
629         adev->hive = NULL;
630
631         if (atomic_dec_return(&hive->number_devices) == 0) {
632                 /* Remove the hive from global hive list */
633                 mutex_lock(&xgmi_mutex);
634                 list_del(&hive->node);
635                 mutex_unlock(&xgmi_mutex);
636
637                 amdgpu_put_xgmi_hive(hive);
638         }
639
640         return psp_xgmi_terminate(&adev->psp);
641 }
642
643 static int amdgpu_xgmi_ras_late_init(struct amdgpu_device *adev)
644 {
645         int r;
646         struct ras_ih_if ih_info = {
647                 .cb = NULL,
648         };
649         struct ras_fs_if fs_info = {
650                 .sysfs_name = "xgmi_wafl_err_count",
651         };
652
653         if (!adev->gmc.xgmi.supported ||
654             adev->gmc.xgmi.num_physical_nodes == 0)
655                 return 0;
656
657         adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
658
659         if (!adev->gmc.xgmi.ras_if) {
660                 adev->gmc.xgmi.ras_if = kmalloc(sizeof(struct ras_common_if), GFP_KERNEL);
661                 if (!adev->gmc.xgmi.ras_if)
662                         return -ENOMEM;
663                 adev->gmc.xgmi.ras_if->block = AMDGPU_RAS_BLOCK__XGMI_WAFL;
664                 adev->gmc.xgmi.ras_if->type = AMDGPU_RAS_ERROR__MULTI_UNCORRECTABLE;
665                 adev->gmc.xgmi.ras_if->sub_block_index = 0;
666                 strcpy(adev->gmc.xgmi.ras_if->name, "xgmi_wafl");
667         }
668         ih_info.head = fs_info.head = *adev->gmc.xgmi.ras_if;
669         r = amdgpu_ras_late_init(adev, adev->gmc.xgmi.ras_if,
670                                  &fs_info, &ih_info);
671         if (r || !amdgpu_ras_is_supported(adev, adev->gmc.xgmi.ras_if->block)) {
672                 kfree(adev->gmc.xgmi.ras_if);
673                 adev->gmc.xgmi.ras_if = NULL;
674         }
675
676         return r;
677 }
678
679 static void amdgpu_xgmi_ras_fini(struct amdgpu_device *adev)
680 {
681         if (amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL) &&
682                         adev->gmc.xgmi.ras_if) {
683                 struct ras_common_if *ras_if = adev->gmc.xgmi.ras_if;
684                 struct ras_ih_if ih_info = {
685                         .cb = NULL,
686                 };
687
688                 amdgpu_ras_late_fini(adev, ras_if, &ih_info);
689                 kfree(ras_if);
690         }
691 }
692
693 uint64_t amdgpu_xgmi_get_relative_phy_addr(struct amdgpu_device *adev,
694                                            uint64_t addr)
695 {
696         struct amdgpu_xgmi *xgmi = &adev->gmc.xgmi;
697         return (addr + xgmi->physical_node_id * xgmi->node_segment_size);
698 }
699
700 static void pcs_clear_status(struct amdgpu_device *adev, uint32_t pcs_status_reg)
701 {
702         WREG32_PCIE(pcs_status_reg, 0xFFFFFFFF);
703         WREG32_PCIE(pcs_status_reg, 0);
704 }
705
706 static void amdgpu_xgmi_reset_ras_error_count(struct amdgpu_device *adev)
707 {
708         uint32_t i;
709
710         switch (adev->asic_type) {
711         case CHIP_ARCTURUS:
712                 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++)
713                         pcs_clear_status(adev,
714                                          xgmi_pcs_err_status_reg_arct[i]);
715                 break;
716         case CHIP_VEGA20:
717                 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++)
718                         pcs_clear_status(adev,
719                                          xgmi_pcs_err_status_reg_vg20[i]);
720                 break;
721         default:
722                 break;
723         }
724 }
725
726 static int amdgpu_xgmi_query_pcs_error_status(struct amdgpu_device *adev,
727                                               uint32_t value,
728                                               uint32_t *ue_count,
729                                               uint32_t *ce_count,
730                                               bool is_xgmi_pcs)
731 {
732         int i;
733         int ue_cnt;
734
735         if (is_xgmi_pcs) {
736                 /* query xgmi pcs error status,
737                  * only ue is supported */
738                 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_ras_fields); i ++) {
739                         ue_cnt = (value &
740                                   xgmi_pcs_ras_fields[i].pcs_err_mask) >>
741                                   xgmi_pcs_ras_fields[i].pcs_err_shift;
742                         if (ue_cnt) {
743                                 dev_info(adev->dev, "%s detected\n",
744                                          xgmi_pcs_ras_fields[i].err_name);
745                                 *ue_count += ue_cnt;
746                         }
747                 }
748         } else {
749                 /* query wafl pcs error status,
750                  * only ue is supported */
751                 for (i = 0; i < ARRAY_SIZE(wafl_pcs_ras_fields); i++) {
752                         ue_cnt = (value &
753                                   wafl_pcs_ras_fields[i].pcs_err_mask) >>
754                                   wafl_pcs_ras_fields[i].pcs_err_shift;
755                         if (ue_cnt) {
756                                 dev_info(adev->dev, "%s detected\n",
757                                          wafl_pcs_ras_fields[i].err_name);
758                                 *ue_count += ue_cnt;
759                         }
760                 }
761         }
762
763         return 0;
764 }
765
766 static int amdgpu_xgmi_query_ras_error_count(struct amdgpu_device *adev,
767                                              void *ras_error_status)
768 {
769         struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status;
770         int i;
771         uint32_t data;
772         uint32_t ue_cnt = 0, ce_cnt = 0;
773
774         if (!amdgpu_ras_is_supported(adev, AMDGPU_RAS_BLOCK__XGMI_WAFL))
775                 return -EINVAL;
776
777         err_data->ue_count = 0;
778         err_data->ce_count = 0;
779
780         switch (adev->asic_type) {
781         case CHIP_ARCTURUS:
782                 /* check xgmi pcs error */
783                 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_arct); i++) {
784                         data = RREG32_PCIE(xgmi_pcs_err_status_reg_arct[i]);
785                         if (data)
786                                 amdgpu_xgmi_query_pcs_error_status(adev,
787                                                 data, &ue_cnt, &ce_cnt, true);
788                 }
789                 /* check wafl pcs error */
790                 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_arct); i++) {
791                         data = RREG32_PCIE(wafl_pcs_err_status_reg_arct[i]);
792                         if (data)
793                                 amdgpu_xgmi_query_pcs_error_status(adev,
794                                                 data, &ue_cnt, &ce_cnt, false);
795                 }
796                 break;
797         case CHIP_VEGA20:
798         default:
799                 /* check xgmi pcs error */
800                 for (i = 0; i < ARRAY_SIZE(xgmi_pcs_err_status_reg_vg20); i++) {
801                         data = RREG32_PCIE(xgmi_pcs_err_status_reg_vg20[i]);
802                         if (data)
803                                 amdgpu_xgmi_query_pcs_error_status(adev,
804                                                 data, &ue_cnt, &ce_cnt, true);
805                 }
806                 /* check wafl pcs error */
807                 for (i = 0; i < ARRAY_SIZE(wafl_pcs_err_status_reg_vg20); i++) {
808                         data = RREG32_PCIE(wafl_pcs_err_status_reg_vg20[i]);
809                         if (data)
810                                 amdgpu_xgmi_query_pcs_error_status(adev,
811                                                 data, &ue_cnt, &ce_cnt, false);
812                 }
813                 break;
814         }
815
816         adev->gmc.xgmi.ras_funcs->reset_ras_error_count(adev);
817
818         err_data->ue_count += ue_cnt;
819         err_data->ce_count += ce_cnt;
820
821         return 0;
822 }
823
824 const struct amdgpu_xgmi_ras_funcs xgmi_ras_funcs = {
825         .ras_late_init = amdgpu_xgmi_ras_late_init,
826         .ras_fini = amdgpu_xgmi_ras_fini,
827         .query_ras_error_count = amdgpu_xgmi_query_ras_error_count,
828         .reset_ras_error_count = amdgpu_xgmi_reset_ras_error_count,
829 };