Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[linux-2.6-microblaze.git] / arch / x86 / kernel / amd_nb.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Shared support code for AMD K8 northbridges and derivatives.
4  * Copyright 2006 Andi Kleen, SUSE Labs.
5  */
6
7 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
8
9 #include <linux/types.h>
10 #include <linux/slab.h>
11 #include <linux/init.h>
12 #include <linux/errno.h>
13 #include <linux/export.h>
14 #include <linux/spinlock.h>
15 #include <linux/pci_ids.h>
16 #include <asm/amd_nb.h>
17
18 #define PCI_DEVICE_ID_AMD_17H_ROOT      0x1450
19 #define PCI_DEVICE_ID_AMD_17H_M10H_ROOT 0x15d0
20 #define PCI_DEVICE_ID_AMD_17H_M30H_ROOT 0x1480
21 #define PCI_DEVICE_ID_AMD_17H_M60H_ROOT 0x1630
22 #define PCI_DEVICE_ID_AMD_17H_MA0H_ROOT 0x14b5
23 #define PCI_DEVICE_ID_AMD_19H_M10H_ROOT 0x14a4
24 #define PCI_DEVICE_ID_AMD_19H_M60H_ROOT 0x14d8
25 #define PCI_DEVICE_ID_AMD_19H_M70H_ROOT 0x14e8
26 #define PCI_DEVICE_ID_AMD_17H_DF_F4     0x1464
27 #define PCI_DEVICE_ID_AMD_17H_M10H_DF_F4 0x15ec
28 #define PCI_DEVICE_ID_AMD_17H_M30H_DF_F4 0x1494
29 #define PCI_DEVICE_ID_AMD_17H_M60H_DF_F4 0x144c
30 #define PCI_DEVICE_ID_AMD_17H_M70H_DF_F4 0x1444
31 #define PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4 0x1728
32 #define PCI_DEVICE_ID_AMD_19H_DF_F4     0x1654
33 #define PCI_DEVICE_ID_AMD_19H_M10H_DF_F4 0x14b1
34 #define PCI_DEVICE_ID_AMD_19H_M40H_ROOT 0x14b5
35 #define PCI_DEVICE_ID_AMD_19H_M40H_DF_F4 0x167d
36 #define PCI_DEVICE_ID_AMD_19H_M50H_DF_F4 0x166e
37 #define PCI_DEVICE_ID_AMD_19H_M60H_DF_F4 0x14e4
38 #define PCI_DEVICE_ID_AMD_19H_M70H_DF_F4 0x14f4
39
40 /* Protect the PCI config register pairs used for SMN. */
41 static DEFINE_MUTEX(smn_mutex);
42
43 static u32 *flush_words;
44
45 static const struct pci_device_id amd_root_ids[] = {
46         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_ROOT) },
47         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_ROOT) },
48         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_ROOT) },
49         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_ROOT) },
50         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_ROOT) },
51         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_ROOT) },
52         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_ROOT) },
53         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_ROOT) },
54         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_ROOT) },
55         {}
56 };
57
58 #define PCI_DEVICE_ID_AMD_CNB17H_F4     0x1704
59
60 static const struct pci_device_id amd_nb_misc_ids[] = {
61         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_K8_NB_MISC) },
62         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_10H_NB_MISC) },
63         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F3) },
64         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M10H_F3) },
65         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F3) },
66         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F3) },
67         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F3) },
68         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F3) },
69         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F3) },
70         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F3) },
71         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F3) },
72         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F3) },
73         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F3) },
74         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F3) },
75         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F3) },
76         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F3) },
77         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F3) },
78         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F3) },
79         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F3) },
80         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M60H_DF_F3) },
81         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M70H_DF_F3) },
82         {}
83 };
84
85 static const struct pci_device_id amd_nb_link_ids[] = {
86         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_NB_F4) },
87         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M30H_NB_F4) },
88         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_15H_M60H_NB_F4) },
89         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_NB_F4) },
90         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_16H_M30H_NB_F4) },
91         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_DF_F4) },
92         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M10H_DF_F4) },
93         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M30H_DF_F4) },
94         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M60H_DF_F4) },
95         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_M70H_DF_F4) },
96         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_17H_MA0H_DF_F4) },
97         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_DF_F4) },
98         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M10H_DF_F4) },
99         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M40H_DF_F4) },
100         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_19H_M50H_DF_F4) },
101         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_CNB17H_F4) },
102         {}
103 };
104
105 static const struct pci_device_id hygon_root_ids[] = {
106         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_ROOT) },
107         {}
108 };
109
110 static const struct pci_device_id hygon_nb_misc_ids[] = {
111         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F3) },
112         {}
113 };
114
115 static const struct pci_device_id hygon_nb_link_ids[] = {
116         { PCI_DEVICE(PCI_VENDOR_ID_HYGON, PCI_DEVICE_ID_AMD_17H_DF_F4) },
117         {}
118 };
119
120 const struct amd_nb_bus_dev_range amd_nb_bus_dev_ranges[] __initconst = {
121         { 0x00, 0x18, 0x20 },
122         { 0xff, 0x00, 0x20 },
123         { 0xfe, 0x00, 0x20 },
124         { }
125 };
126
127 static struct amd_northbridge_info amd_northbridges;
128
129 u16 amd_nb_num(void)
130 {
131         return amd_northbridges.num;
132 }
133 EXPORT_SYMBOL_GPL(amd_nb_num);
134
135 bool amd_nb_has_feature(unsigned int feature)
136 {
137         return ((amd_northbridges.flags & feature) == feature);
138 }
139 EXPORT_SYMBOL_GPL(amd_nb_has_feature);
140
141 struct amd_northbridge *node_to_amd_nb(int node)
142 {
143         return (node < amd_northbridges.num) ? &amd_northbridges.nb[node] : NULL;
144 }
145 EXPORT_SYMBOL_GPL(node_to_amd_nb);
146
147 static struct pci_dev *next_northbridge(struct pci_dev *dev,
148                                         const struct pci_device_id *ids)
149 {
150         do {
151                 dev = pci_get_device(PCI_ANY_ID, PCI_ANY_ID, dev);
152                 if (!dev)
153                         break;
154         } while (!pci_match_id(ids, dev));
155         return dev;
156 }
157
158 static int __amd_smn_rw(u16 node, u32 address, u32 *value, bool write)
159 {
160         struct pci_dev *root;
161         int err = -ENODEV;
162
163         if (node >= amd_northbridges.num)
164                 goto out;
165
166         root = node_to_amd_nb(node)->root;
167         if (!root)
168                 goto out;
169
170         mutex_lock(&smn_mutex);
171
172         err = pci_write_config_dword(root, 0x60, address);
173         if (err) {
174                 pr_warn("Error programming SMN address 0x%x.\n", address);
175                 goto out_unlock;
176         }
177
178         err = (write ? pci_write_config_dword(root, 0x64, *value)
179                      : pci_read_config_dword(root, 0x64, value));
180         if (err)
181                 pr_warn("Error %s SMN address 0x%x.\n",
182                         (write ? "writing to" : "reading from"), address);
183
184 out_unlock:
185         mutex_unlock(&smn_mutex);
186
187 out:
188         return err;
189 }
190
191 int amd_smn_read(u16 node, u32 address, u32 *value)
192 {
193         return __amd_smn_rw(node, address, value, false);
194 }
195 EXPORT_SYMBOL_GPL(amd_smn_read);
196
197 int amd_smn_write(u16 node, u32 address, u32 value)
198 {
199         return __amd_smn_rw(node, address, &value, true);
200 }
201 EXPORT_SYMBOL_GPL(amd_smn_write);
202
203
204 static int amd_cache_northbridges(void)
205 {
206         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
207         const struct pci_device_id *link_ids = amd_nb_link_ids;
208         const struct pci_device_id *root_ids = amd_root_ids;
209         struct pci_dev *root, *misc, *link;
210         struct amd_northbridge *nb;
211         u16 roots_per_misc = 0;
212         u16 misc_count = 0;
213         u16 root_count = 0;
214         u16 i, j;
215
216         if (amd_northbridges.num)
217                 return 0;
218
219         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
220                 root_ids = hygon_root_ids;
221                 misc_ids = hygon_nb_misc_ids;
222                 link_ids = hygon_nb_link_ids;
223         }
224
225         misc = NULL;
226         while ((misc = next_northbridge(misc, misc_ids)))
227                 misc_count++;
228
229         if (!misc_count)
230                 return -ENODEV;
231
232         root = NULL;
233         while ((root = next_northbridge(root, root_ids)))
234                 root_count++;
235
236         if (root_count) {
237                 roots_per_misc = root_count / misc_count;
238
239                 /*
240                  * There should be _exactly_ N roots for each DF/SMN
241                  * interface.
242                  */
243                 if (!roots_per_misc || (root_count % roots_per_misc)) {
244                         pr_info("Unsupported AMD DF/PCI configuration found\n");
245                         return -ENODEV;
246                 }
247         }
248
249         nb = kcalloc(misc_count, sizeof(struct amd_northbridge), GFP_KERNEL);
250         if (!nb)
251                 return -ENOMEM;
252
253         amd_northbridges.nb = nb;
254         amd_northbridges.num = misc_count;
255
256         link = misc = root = NULL;
257         for (i = 0; i < amd_northbridges.num; i++) {
258                 node_to_amd_nb(i)->root = root =
259                         next_northbridge(root, root_ids);
260                 node_to_amd_nb(i)->misc = misc =
261                         next_northbridge(misc, misc_ids);
262                 node_to_amd_nb(i)->link = link =
263                         next_northbridge(link, link_ids);
264
265                 /*
266                  * If there are more PCI root devices than data fabric/
267                  * system management network interfaces, then the (N)
268                  * PCI roots per DF/SMN interface are functionally the
269                  * same (for DF/SMN access) and N-1 are redundant.  N-1
270                  * PCI roots should be skipped per DF/SMN interface so
271                  * the following DF/SMN interfaces get mapped to
272                  * correct PCI roots.
273                  */
274                 for (j = 1; j < roots_per_misc; j++)
275                         root = next_northbridge(root, root_ids);
276         }
277
278         if (amd_gart_present())
279                 amd_northbridges.flags |= AMD_NB_GART;
280
281         /*
282          * Check for L3 cache presence.
283          */
284         if (!cpuid_edx(0x80000006))
285                 return 0;
286
287         /*
288          * Some CPU families support L3 Cache Index Disable. There are some
289          * limitations because of E382 and E388 on family 0x10.
290          */
291         if (boot_cpu_data.x86 == 0x10 &&
292             boot_cpu_data.x86_model >= 0x8 &&
293             (boot_cpu_data.x86_model > 0x9 ||
294              boot_cpu_data.x86_stepping >= 0x1))
295                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
296
297         if (boot_cpu_data.x86 == 0x15)
298                 amd_northbridges.flags |= AMD_NB_L3_INDEX_DISABLE;
299
300         /* L3 cache partitioning is supported on family 0x15 */
301         if (boot_cpu_data.x86 == 0x15)
302                 amd_northbridges.flags |= AMD_NB_L3_PARTITIONING;
303
304         return 0;
305 }
306
307 /*
308  * Ignores subdevice/subvendor but as far as I can figure out
309  * they're useless anyways
310  */
311 bool __init early_is_amd_nb(u32 device)
312 {
313         const struct pci_device_id *misc_ids = amd_nb_misc_ids;
314         const struct pci_device_id *id;
315         u32 vendor = device & 0xffff;
316
317         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
318             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
319                 return false;
320
321         if (boot_cpu_data.x86_vendor == X86_VENDOR_HYGON)
322                 misc_ids = hygon_nb_misc_ids;
323
324         device >>= 16;
325         for (id = misc_ids; id->vendor; id++)
326                 if (vendor == id->vendor && device == id->device)
327                         return true;
328         return false;
329 }
330
331 struct resource *amd_get_mmconfig_range(struct resource *res)
332 {
333         u32 address;
334         u64 base, msr;
335         unsigned int segn_busn_bits;
336
337         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
338             boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
339                 return NULL;
340
341         /* assume all cpus from fam10h have mmconfig */
342         if (boot_cpu_data.x86 < 0x10)
343                 return NULL;
344
345         address = MSR_FAM10H_MMIO_CONF_BASE;
346         rdmsrl(address, msr);
347
348         /* mmconfig is not enabled */
349         if (!(msr & FAM10H_MMIO_CONF_ENABLE))
350                 return NULL;
351
352         base = msr & (FAM10H_MMIO_CONF_BASE_MASK<<FAM10H_MMIO_CONF_BASE_SHIFT);
353
354         segn_busn_bits = (msr >> FAM10H_MMIO_CONF_BUSRANGE_SHIFT) &
355                          FAM10H_MMIO_CONF_BUSRANGE_MASK;
356
357         res->flags = IORESOURCE_MEM;
358         res->start = base;
359         res->end = base + (1ULL<<(segn_busn_bits + 20)) - 1;
360         return res;
361 }
362
363 int amd_get_subcaches(int cpu)
364 {
365         struct pci_dev *link = node_to_amd_nb(topology_die_id(cpu))->link;
366         unsigned int mask;
367
368         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
369                 return 0;
370
371         pci_read_config_dword(link, 0x1d4, &mask);
372
373         return (mask >> (4 * cpu_data(cpu).cpu_core_id)) & 0xf;
374 }
375
376 int amd_set_subcaches(int cpu, unsigned long mask)
377 {
378         static unsigned int reset, ban;
379         struct amd_northbridge *nb = node_to_amd_nb(topology_die_id(cpu));
380         unsigned int reg;
381         int cuid;
382
383         if (!amd_nb_has_feature(AMD_NB_L3_PARTITIONING) || mask > 0xf)
384                 return -EINVAL;
385
386         /* if necessary, collect reset state of L3 partitioning and BAN mode */
387         if (reset == 0) {
388                 pci_read_config_dword(nb->link, 0x1d4, &reset);
389                 pci_read_config_dword(nb->misc, 0x1b8, &ban);
390                 ban &= 0x180000;
391         }
392
393         /* deactivate BAN mode if any subcaches are to be disabled */
394         if (mask != 0xf) {
395                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
396                 pci_write_config_dword(nb->misc, 0x1b8, reg & ~0x180000);
397         }
398
399         cuid = cpu_data(cpu).cpu_core_id;
400         mask <<= 4 * cuid;
401         mask |= (0xf ^ (1 << cuid)) << 26;
402
403         pci_write_config_dword(nb->link, 0x1d4, mask);
404
405         /* reset BAN mode if L3 partitioning returned to reset state */
406         pci_read_config_dword(nb->link, 0x1d4, &reg);
407         if (reg == reset) {
408                 pci_read_config_dword(nb->misc, 0x1b8, &reg);
409                 reg &= ~0x180000;
410                 pci_write_config_dword(nb->misc, 0x1b8, reg | ban);
411         }
412
413         return 0;
414 }
415
416 static void amd_cache_gart(void)
417 {
418         u16 i;
419
420         if (!amd_nb_has_feature(AMD_NB_GART))
421                 return;
422
423         flush_words = kmalloc_array(amd_northbridges.num, sizeof(u32), GFP_KERNEL);
424         if (!flush_words) {
425                 amd_northbridges.flags &= ~AMD_NB_GART;
426                 pr_notice("Cannot initialize GART flush words, GART support disabled\n");
427                 return;
428         }
429
430         for (i = 0; i != amd_northbridges.num; i++)
431                 pci_read_config_dword(node_to_amd_nb(i)->misc, 0x9c, &flush_words[i]);
432 }
433
434 void amd_flush_garts(void)
435 {
436         int flushed, i;
437         unsigned long flags;
438         static DEFINE_SPINLOCK(gart_lock);
439
440         if (!amd_nb_has_feature(AMD_NB_GART))
441                 return;
442
443         /*
444          * Avoid races between AGP and IOMMU. In theory it's not needed
445          * but I'm not sure if the hardware won't lose flush requests
446          * when another is pending. This whole thing is so expensive anyways
447          * that it doesn't matter to serialize more. -AK
448          */
449         spin_lock_irqsave(&gart_lock, flags);
450         flushed = 0;
451         for (i = 0; i < amd_northbridges.num; i++) {
452                 pci_write_config_dword(node_to_amd_nb(i)->misc, 0x9c,
453                                        flush_words[i] | 1);
454                 flushed++;
455         }
456         for (i = 0; i < amd_northbridges.num; i++) {
457                 u32 w;
458                 /* Make sure the hardware actually executed the flush*/
459                 for (;;) {
460                         pci_read_config_dword(node_to_amd_nb(i)->misc,
461                                               0x9c, &w);
462                         if (!(w & 1))
463                                 break;
464                         cpu_relax();
465                 }
466         }
467         spin_unlock_irqrestore(&gart_lock, flags);
468         if (!flushed)
469                 pr_notice("nothing to flush?\n");
470 }
471 EXPORT_SYMBOL_GPL(amd_flush_garts);
472
473 static void __fix_erratum_688(void *info)
474 {
475 #define MSR_AMD64_IC_CFG 0xC0011021
476
477         msr_set_bit(MSR_AMD64_IC_CFG, 3);
478         msr_set_bit(MSR_AMD64_IC_CFG, 14);
479 }
480
481 /* Apply erratum 688 fix so machines without a BIOS fix work. */
482 static __init void fix_erratum_688(void)
483 {
484         struct pci_dev *F4;
485         u32 val;
486
487         if (boot_cpu_data.x86 != 0x14)
488                 return;
489
490         if (!amd_northbridges.num)
491                 return;
492
493         F4 = node_to_amd_nb(0)->link;
494         if (!F4)
495                 return;
496
497         if (pci_read_config_dword(F4, 0x164, &val))
498                 return;
499
500         if (val & BIT(2))
501                 return;
502
503         on_each_cpu(__fix_erratum_688, NULL, 0);
504
505         pr_info("x86/cpu/AMD: CPU erratum 688 worked around\n");
506 }
507
508 static __init int init_amd_nbs(void)
509 {
510         amd_cache_northbridges();
511         amd_cache_gart();
512
513         fix_erratum_688();
514
515         return 0;
516 }
517
518 /* This has to go after the PCI subsystem */
519 fs_initcall(init_amd_nbs);