Merge branch 'stable/for-linus-5.13' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-2.6-microblaze.git] / arch / x86 / kernel / amd_nb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared support code for AMD K8 northbridges and derivatives.
4  * Copyright 2006 Andi Kleen, SUSE Labs.
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
17
18 #define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
22 #define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
23 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
24 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
25 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
26 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
27 #define PCI_DEVICE_ID_AMD_19H_DF_F4     0x1654
28
29 /* Protect the PCI config register pairs used for SMN and DF indirect access. */
30 static DEFINE_MUTEX(smn_mutex);
31
32 static u32 *flush_words;
33
34 static const struct pci_device_id amd_root_ids[] = {
35         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
36         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
37         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
38         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
39         {}
40 };
41
42 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
43
44 static const struct pci_device_id amd_nb_misc_ids[] = {
45         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
46         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
47         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
48         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
49         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
50         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
51         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
52         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
53         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
54         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
55         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
56         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
57         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
58         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
59         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
60         {}
61 };
62
63 static const struct pci_device_id amd_nb_link_ids[] = {
64         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
65         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
66         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
67         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
68         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
69         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
70         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
71         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
72         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
73         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
74         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
75         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
76         {}
77 };
78
79 static const struct pci_device_id hygon_root_ids[] = {
80         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
81         {}
82 };
83
84 static const struct pci_device_id hygon_nb_misc_ids[] = {
85         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
86         {}
87 };
88
89 static const struct pci_device_id hygon_nb_link_ids[] = {
90         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
91         {}
92 };
93
94 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
95         { 0x00, 0x18, 0x20 },
96         { 0xff, 0x00, 0x20 },
97         { 0xfe, 0x00, 0x20 },
98         { }
99 };
100
101 static struct amd_northbridge_info amd_northbridges;
102
103 u16 amd_nb_num(void)
104 {
105         return amd_northbridges.num;
106 }
107 EXPORT_SYMBOL_GPL(amd_nb_num);
108
109 bool amd_nb_has_feature(unsigned int feature)
110 {
111         return ((amd_northbridges.flags & feature) == feature);
112 }
113 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
114
115 struct amd_northbridge *node_to_amd_nb(int node)
116 {
117         return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
118 }
119 EXPORT_SYMBOL_GPL(node_to_amd_nb);
120
121 static struct pci_dev *next_northbridge(struct pci_dev *dev,
122                                         const struct pci_device_id *ids)
123 {
124         do {
125                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
126                 if (!dev)
127                         break;
128         } while (!pci_match_id(ids, dev));
129         return dev;
130 }
131
132 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
133 {
134         struct pci_dev *root;
135         int err = -ENODEV;
136
137         if (node >= amd_northbridges.num)
138                 goto out;
139
140         root = node_to_amd_nb(node)->root;
141         if (!root)
142                 goto out;
143
144         mutex_lock(&smn_mutex);
145
146         err = pci_write_config_dword(root, 0x60, address);
147         if (err) {
148                 pr_warn("Error programming SMN address 0x%x.\n", address);
149                 goto out_unlock;
150         }
151
152         err = (write ? pci_write_config_dword(root, 0x64, *value)
153                      : pci_read_config_dword(root, 0x64, value));
154         if (err)
155                 pr_warn("Error %s SMN address 0x%x.\n",
156                         (write ? "writing to" : "reading from"), address);
157
158 out_unlock:
159         mutex_unlock(&smn_mutex);
160
161 out:
162         return err;
163 }
164
165 int amd_smn_read(u16 node, u32 address, u32 *value)
166 {
167         return __amd_smn_rw(node, address, value, false);
168 }
169 EXPORT_SYMBOL_GPL(amd_smn_read);
170
171 int amd_smn_write(u16 node, u32 address, u32 value)
172 {
173         return __amd_smn_rw(node, address, &value, true);
174 }
175 EXPORT_SYMBOL_GPL(amd_smn_write);
176
177 /*
178  * Data Fabric Indirect Access uses FICAA/FICAD.
179  *
180  * Fabric Indirect Configuration Access Address (FICAA): Constructed based
181  * on the device's Instance Id and the PCI function and register offset of
182  * the desired register.
183  *
184  * Fabric Indirect Configuration Access Data (FICAD): There are FICAD LO
185  * and FICAD HI registers but so far we only need the LO register.
186  */
187 int amd_df_indirect_read(u16 node, u8 func, u16 reg, u8 instance_id, u32 *lo)
188 {
189         struct pci_dev *F4;
190         u32 ficaa;
191         int err = -ENODEV;
192
193         if (node >= amd_northbridges.num)
194                 goto out;
195
196         F4 = node_to_amd_nb(node)->link;
197         if (!F4)
198                 goto out;
199
200         ficaa  = 1;
201         ficaa |= reg & 0x3FC;
202         ficaa |= (func & 0x7) << 11;
203         ficaa |= instance_id << 16;
204
205         mutex_lock(&smn_mutex);
206
207         err = pci_write_config_dword(F4, 0x5C, ficaa);
208         if (err) {
209                 pr_warn("Error writing DF Indirect FICAA, FICAA=0x%x\n", ficaa);
210                 goto out_unlock;
211         }
212
213         err = pci_read_config_dword(F4, 0x98, lo);
214         if (err)
215                 pr_warn("Error reading DF Indirect FICAD LO, FICAA=0x%x.\n", ficaa);
216
217 out_unlock:
218         mutex_unlock(&smn_mutex);
219
220 out:
221         return err;
222 }
223 EXPORT_SYMBOL_GPL(amd_df_indirect_read);
224
225 int amd_cache_northbridges(void)
226 {
227         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
228         const struct pci_device_id *link_ids = amd_nb_link_ids;
229         const struct pci_device_id *root_ids = amd_root_ids;
230         struct pci_dev *root, *misc, *link;
231         struct amd_northbridge *nb;
232         u16 roots_per_misc = 0;
233         u16 misc_count = 0;
234         u16 root_count = 0;
235         u16 i, j;
236
237         if (amd_northbridges.num)
238                 return 0;
239
240         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
241                 root_ids = hygon_root_ids;
242                 misc_ids = hygon_nb_misc_ids;
243                 link_ids = hygon_nb_link_ids;
244         }
245
246         misc = NULL;
247         while ((misc = next_northbridge(misc, misc_ids)) != NULL)
248                 misc_count++;
249
250         if (!misc_count)
251                 return -ENODEV;
252
253         root = NULL;
254         while ((root = next_northbridge(root, root_ids)) != NULL)
255                 root_count++;
256
257         if (root_count) {
258                 roots_per_misc = root_count / misc_count;
259
260                 /*
261                  * There should be _exactly_ N roots for each DF/SMN
262                  * interface.
263                  */
264                 if (!roots_per_misc || (root_count % roots_per_misc)) {
265                         pr_info("Unsupported AMD DF/PCI configuration found\n");
266                         return -ENODEV;
267                 }
268         }
269
270         nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
271         if (!nb)
272                 return -ENOMEM;
273
274         amd_northbridges.nb = nb;
275         amd_northbridges.num = misc_count;
276
277         link = misc = root = NULL;
278         for (i = 0; i < amd_northbridges.num; i++) {
279                 node_to_amd_nb(i)->root = root =
280                         next_northbridge(root, root_ids);
281                 node_to_amd_nb(i)->misc = misc =
282                         next_northbridge(misc, misc_ids);
283                 node_to_amd_nb(i)->link = link =
284                         next_northbridge(link, link_ids);
285
286                 /*
287                  * If there are more PCI root devices than data fabric/
288                  * system management network interfaces, then the (N)
289                  * PCI roots per DF/SMN interface are functionally the
290                  * same (for DF/SMN access) and N-1 are redundant.  N-1
291                  * PCI roots should be skipped per DF/SMN interface so
292                  * the following DF/SMN interfaces get mapped to
293                  * correct PCI roots.
294                  */
295                 for (j = 1; j < roots_per_misc; j++)
296                         root = next_northbridge(root, root_ids);
297         }
298
299         if (amd_gart_present())
300                 amd_northbridges.flags |= AMD_NB_GART;
301
302         /*
303          * Check for L3 cache presence.
304          */
305         if (!cpuid_edx(0x80000006))
306                 return 0;
307
308         /*
309          * Some CPU families support L3 Cache Index Disable. There are some
310          * limitations because of E382 and E388 on family 0x10.
311          */
312         if (boot_cpu_data.x86 == 0x10 &&
313             boot_cpu_data.x86_model >= 0x8 &&
314             (boot_cpu_data.x86_model > 0x9 ||
315              boot_cpu_data.x86_stepping >= 0x1))
316                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
317
318         if (boot_cpu_data.x86 == 0x15)
319                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
320
321         /* L3 cache partitioning is supported on family 0x15 */
322         if (boot_cpu_data.x86 == 0x15)
323                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
324
325         return 0;
326 }
327 EXPORT_SYMBOL_GPL(amd_cache_northbridges);
328
329 /*
330  * Ignores subdevice/subvendor but as far as I can figure out
331  * they're useless anyways
332  */
333 bool __init early_is_amd_nb(u32 device)
334 {
335         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
336         const struct pci_device_id *id;
337         u32 vendor = device & 0xffff;
338
339         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
340             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
341                 return false;
342
343         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
344                 misc_ids = hygon_nb_misc_ids;
345
346         device >>= 16;
347         for (id = misc_ids; id->vendor; id++)
348                 if (vendor == id->vendor && device == id->device)
349                         return true;
350         return false;
351 }
352
353 struct resource *amd_get_mmconfig_range(struct resource *res)
354 {
355         u32 address;
356         u64 base, msr;
357         unsigned int segn_busn_bits;
358
359         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
360             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
361                 return NULL;
362
363         /* assume all cpus from fam10h have mmconfig */
364         if (boot_cpu_data.x86 < 0x10)
365                 return NULL;
366
367         address = MSR_FAM10H_MMIO_CONF_BASE;
368         rdmsrl(address, msr);
369
370         /* mmconfig is not enabled */
371         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
372                 return NULL;
373
374         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
375
376         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
377                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
378
379         res->flags = IORESOURCE_MEM;
380         res->start = base;
381         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
382         return res;
383 }
384
385 int amd_get_subcaches(int cpu)
386 {
387         struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
388         unsigned int mask;
389
390         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
391                 return 0;
392
393         pci_read_config_dword(link, 0x1d4, &mask);
394
395         return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
396 }
397
398 int amd_set_subcaches(int cpu, unsigned long mask)
399 {
400         static unsigned int reset, ban;
401         struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
402         unsigned int reg;
403         int cuid;
404
405         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
406                 return -EINVAL;
407
408         /* if necessary, collect reset state of L3 partitioning and BAN mode */
409         if (reset == 0) {
410                 pci_read_config_dword(nb->link, 0x1d4, &reset);
411                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
412                 ban &= 0x180000;
413         }
414
415         /* deactivate BAN mode if any subcaches are to be disabled */
416         if (mask != 0xf) {
417                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
418                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
419         }
420
421         cuid = cpu_data(cpu).cpu_core_id;
422         mask <<= 4 * cuid;
423         mask |= (0xf ^ (1 << cuid)) << 26;
424
425         pci_write_config_dword(nb->link, 0x1d4, mask);
426
427         /* reset BAN mode if L3 partitioning returned to reset state */
428         pci_read_config_dword(nb->link, 0x1d4, &reg);
429         if (reg == reset) {
430                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
431                 reg &= ~0x180000;
432                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
433         }
434
435         return 0;
436 }
437
438 static void amd_cache_gart(void)
439 {
440         u16 i;
441
442         if (!amd_nb_has_feature(AMD_NB_GART))
443                 return;
444
445         flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
446         if (!flush_words) {
447                 amd_northbridges.flags &= ~AMD_NB_GART;
448                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
449                 return;
450         }
451
452         for (i = 0; i != amd_northbridges.num; i++)
453                 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
454 }
455
456 void amd_flush_garts(void)
457 {
458         int flushed, i;
459         unsigned long flags;
460         static DEFINE_SPINLOCK(gart_lock);
461
462         if (!amd_nb_has_feature(AMD_NB_GART))
463                 return;
464
465         /*
466          * Avoid races between AGP and IOMMU. In theory it's not needed
467          * but I'm not sure if the hardware won't lose flush requests
468          * when another is pending. This whole thing is so expensive anyways
469          * that it doesn't matter to serialize more. -AK
470          */
471         spin_lock_irqsave(&gart_lock, flags);
472         flushed = 0;
473         for (i = 0; i < amd_northbridges.num; i++) {
474                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
475                                        flush_words[i] | 1);
476                 flushed++;
477         }
478         for (i = 0; i < amd_northbridges.num; i++) {
479                 u32 w;
480                 /* Make sure the hardware actually executed the flush*/
481                 for (;;) {
482                         pci_read_config_dword(node_to_amd_nb(i)->misc,
483                                               0x9c, &w);
484                         if (!(w & 1))
485                                 break;
486                         cpu_relax();
487                 }
488         }
489         spin_unlock_irqrestore(&gart_lock, flags);
490         if (!flushed)
491                 pr_notice("nothing to flush?\n");
492 }
493 EXPORT_SYMBOL_GPL(amd_flush_garts);
494
495 static void __fix_erratum_688(void *info)
496 {
497 #define MSR_AMD64_IC_CFG 0xC0011021
498
499         msr_set_bit(MSR_AMD64_IC_CFG, 3);
500         msr_set_bit(MSR_AMD64_IC_CFG, 14);
501 }
502
503 /* Apply erratum 688 fix so machines without a BIOS fix work. */
504 static __init void fix_erratum_688(void)
505 {
506         struct pci_dev *F4;
507         u32 val;
508
509         if (boot_cpu_data.x86 != 0x14)
510                 return;
511
512         if (!amd_northbridges.num)
513                 return;
514
515         F4 = node_to_amd_nb(0)->link;
516         if (!F4)
517                 return;
518
519         if (pci_read_config_dword(F4, 0x164, &val))
520                 return;
521
522         if (val & BIT(2))
523                 return;
524
525         on_each_cpu(__fix_erratum_688, NULL, 0);
526
527         pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
528 }
529
530 static __init int init_amd_nbs(void)
531 {
532         amd_cache_northbridges();
533         amd_cache_gart();
534
535         fix_erratum_688();
536
537         return 0;
538 }
539
540 /* This has to go after the PCI subsystem */
541 fs_initcall(init_amd_nbs);