1 // SPDX-License-Identifier: GPL-2.0
3 * Routines to identify caches on Intel CPU.
6 * Venkatesh Pallipadi : Adding cache identification through cpuid(4)
7 * Ashok Raj <ashok.raj@intel.com>: Work with CPU hotplug infrastructure.
8 * Andi Kleen / Andreas Herrmann : CPUID4 emulation on AMD.
11 #include <linux/slab.h>
12 #include <linux/cacheinfo.h>
13 #include <linux/cpu.h>
14 #include <linux/sched.h>
15 #include <linux/capability.h>
16 #include <linux/sysfs.h>
17 #include <linux/pci.h>
19 #include <asm/cpufeature.h>
20 #include <asm/amd_nb.h>
30 unsigned char descriptor;
35 #define MB(x) ((x) * 1024)
37 /* All the cache descriptor types we care about (no TLB or
38 trace cache entries) */
40 static const struct _cache_table cache_table[] =
42 { 0x06, LVL_1_INST, 8 }, /* 4-way set assoc, 32 byte line size */
43 { 0x08, LVL_1_INST, 16 }, /* 4-way set assoc, 32 byte line size */
44 { 0x09, LVL_1_INST, 32 }, /* 4-way set assoc, 64 byte line size */
45 { 0x0a, LVL_1_DATA, 8 }, /* 2 way set assoc, 32 byte line size */
46 { 0x0c, LVL_1_DATA, 16 }, /* 4-way set assoc, 32 byte line size */
47 { 0x0d, LVL_1_DATA, 16 }, /* 4-way set assoc, 64 byte line size */
48 { 0x0e, LVL_1_DATA, 24 }, /* 6-way set assoc, 64 byte line size */
49 { 0x21, LVL_2, 256 }, /* 8-way set assoc, 64 byte line size */
50 { 0x22, LVL_3, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
51 { 0x23, LVL_3, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
52 { 0x25, LVL_3, MB(2) }, /* 8-way set assoc, sectored cache, 64 byte line size */
53 { 0x29, LVL_3, MB(4) }, /* 8-way set assoc, sectored cache, 64 byte line size */
54 { 0x2c, LVL_1_DATA, 32 }, /* 8-way set assoc, 64 byte line size */
55 { 0x30, LVL_1_INST, 32 }, /* 8-way set assoc, 64 byte line size */
56 { 0x39, LVL_2, 128 }, /* 4-way set assoc, sectored cache, 64 byte line size */
57 { 0x3a, LVL_2, 192 }, /* 6-way set assoc, sectored cache, 64 byte line size */
58 { 0x3b, LVL_2, 128 }, /* 2-way set assoc, sectored cache, 64 byte line size */
59 { 0x3c, LVL_2, 256 }, /* 4-way set assoc, sectored cache, 64 byte line size */
60 { 0x3d, LVL_2, 384 }, /* 6-way set assoc, sectored cache, 64 byte line size */
61 { 0x3e, LVL_2, 512 }, /* 4-way set assoc, sectored cache, 64 byte line size */
62 { 0x3f, LVL_2, 256 }, /* 2-way set assoc, 64 byte line size */
63 { 0x41, LVL_2, 128 }, /* 4-way set assoc, 32 byte line size */
64 { 0x42, LVL_2, 256 }, /* 4-way set assoc, 32 byte line size */
65 { 0x43, LVL_2, 512 }, /* 4-way set assoc, 32 byte line size */
66 { 0x44, LVL_2, MB(1) }, /* 4-way set assoc, 32 byte line size */
67 { 0x45, LVL_2, MB(2) }, /* 4-way set assoc, 32 byte line size */
68 { 0x46, LVL_3, MB(4) }, /* 4-way set assoc, 64 byte line size */
69 { 0x47, LVL_3, MB(8) }, /* 8-way set assoc, 64 byte line size */
70 { 0x48, LVL_2, MB(3) }, /* 12-way set assoc, 64 byte line size */
71 { 0x49, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
72 { 0x4a, LVL_3, MB(6) }, /* 12-way set assoc, 64 byte line size */
73 { 0x4b, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
74 { 0x4c, LVL_3, MB(12) }, /* 12-way set assoc, 64 byte line size */
75 { 0x4d, LVL_3, MB(16) }, /* 16-way set assoc, 64 byte line size */
76 { 0x4e, LVL_2, MB(6) }, /* 24-way set assoc, 64 byte line size */
77 { 0x60, LVL_1_DATA, 16 }, /* 8-way set assoc, sectored cache, 64 byte line size */
78 { 0x66, LVL_1_DATA, 8 }, /* 4-way set assoc, sectored cache, 64 byte line size */
79 { 0x67, LVL_1_DATA, 16 }, /* 4-way set assoc, sectored cache, 64 byte line size */
80 { 0x68, LVL_1_DATA, 32 }, /* 4-way set assoc, sectored cache, 64 byte line size */
81 { 0x70, LVL_TRACE, 12 }, /* 8-way set assoc */
82 { 0x71, LVL_TRACE, 16 }, /* 8-way set assoc */
83 { 0x72, LVL_TRACE, 32 }, /* 8-way set assoc */
84 { 0x73, LVL_TRACE, 64 }, /* 8-way set assoc */
85 { 0x78, LVL_2, MB(1) }, /* 4-way set assoc, 64 byte line size */
86 { 0x79, LVL_2, 128 }, /* 8-way set assoc, sectored cache, 64 byte line size */
87 { 0x7a, LVL_2, 256 }, /* 8-way set assoc, sectored cache, 64 byte line size */
88 { 0x7b, LVL_2, 512 }, /* 8-way set assoc, sectored cache, 64 byte line size */
89 { 0x7c, LVL_2, MB(1) }, /* 8-way set assoc, sectored cache, 64 byte line size */
90 { 0x7d, LVL_2, MB(2) }, /* 8-way set assoc, 64 byte line size */
91 { 0x7f, LVL_2, 512 }, /* 2-way set assoc, 64 byte line size */
92 { 0x80, LVL_2, 512 }, /* 8-way set assoc, 64 byte line size */
93 { 0x82, LVL_2, 256 }, /* 8-way set assoc, 32 byte line size */
94 { 0x83, LVL_2, 512 }, /* 8-way set assoc, 32 byte line size */
95 { 0x84, LVL_2, MB(1) }, /* 8-way set assoc, 32 byte line size */
96 { 0x85, LVL_2, MB(2) }, /* 8-way set assoc, 32 byte line size */
97 { 0x86, LVL_2, 512 }, /* 4-way set assoc, 64 byte line size */
98 { 0x87, LVL_2, MB(1) }, /* 8-way set assoc, 64 byte line size */
99 { 0xd0, LVL_3, 512 }, /* 4-way set assoc, 64 byte line size */
100 { 0xd1, LVL_3, MB(1) }, /* 4-way set assoc, 64 byte line size */
101 { 0xd2, LVL_3, MB(2) }, /* 4-way set assoc, 64 byte line size */
102 { 0xd6, LVL_3, MB(1) }, /* 8-way set assoc, 64 byte line size */
103 { 0xd7, LVL_3, MB(2) }, /* 8-way set assoc, 64 byte line size */
104 { 0xd8, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
105 { 0xdc, LVL_3, MB(2) }, /* 12-way set assoc, 64 byte line size */
106 { 0xdd, LVL_3, MB(4) }, /* 12-way set assoc, 64 byte line size */
107 { 0xde, LVL_3, MB(8) }, /* 12-way set assoc, 64 byte line size */
108 { 0xe2, LVL_3, MB(2) }, /* 16-way set assoc, 64 byte line size */
109 { 0xe3, LVL_3, MB(4) }, /* 16-way set assoc, 64 byte line size */
110 { 0xe4, LVL_3, MB(8) }, /* 16-way set assoc, 64 byte line size */
111 { 0xea, LVL_3, MB(12) }, /* 24-way set assoc, 64 byte line size */
112 { 0xeb, LVL_3, MB(18) }, /* 24-way set assoc, 64 byte line size */
113 { 0xec, LVL_3, MB(24) }, /* 24-way set assoc, 64 byte line size */
125 union _cpuid4_leaf_eax {
127 enum _cache_type type:5;
128 unsigned int level:3;
129 unsigned int is_self_initializing:1;
130 unsigned int is_fully_associative:1;
131 unsigned int reserved:4;
132 unsigned int num_threads_sharing:12;
133 unsigned int num_cores_on_die:6;
138 union _cpuid4_leaf_ebx {
140 unsigned int coherency_line_size:12;
141 unsigned int physical_line_partition:10;
142 unsigned int ways_of_associativity:10;
147 union _cpuid4_leaf_ecx {
149 unsigned int number_of_sets:32;
154 struct _cpuid4_info_regs {
155 union _cpuid4_leaf_eax eax;
156 union _cpuid4_leaf_ebx ebx;
157 union _cpuid4_leaf_ecx ecx;
160 struct amd_northbridge *nb;
163 static unsigned short num_cache_leaves;
165 /* AMD doesn't have CPUID4. Emulate it here to report the same
166 information to the user. This makes some assumptions about the machine:
167 L2 not shared, no SMT etc. that is currently true on AMD CPUs.
169 In theory the TLBs could be reported as fake type (they are in "dummy").
173 unsigned line_size:8;
174 unsigned lines_per_tag:8;
176 unsigned size_in_kb:8;
183 unsigned line_size:8;
184 unsigned lines_per_tag:4;
186 unsigned size_in_kb:16;
193 unsigned line_size:8;
194 unsigned lines_per_tag:4;
197 unsigned size_encoded:14;
202 static const unsigned short assocs[] = {
213 [0xf] = 0xffff /* fully associative - no way to show this currently */
216 static const unsigned char levels[] = { 1, 1, 2, 3 };
217 static const unsigned char types[] = { 1, 2, 3, 3 };
219 static const enum cache_type cache_type_map[] = {
220 [CTYPE_NULL] = CACHE_TYPE_NOCACHE,
221 [CTYPE_DATA] = CACHE_TYPE_DATA,
222 [CTYPE_INST] = CACHE_TYPE_INST,
223 [CTYPE_UNIFIED] = CACHE_TYPE_UNIFIED,
227 amd_cpuid4(int leaf, union _cpuid4_leaf_eax *eax,
228 union _cpuid4_leaf_ebx *ebx,
229 union _cpuid4_leaf_ecx *ecx)
232 unsigned line_size, lines_per_tag, assoc, size_in_kb;
233 union l1_cache l1i, l1d;
236 union l1_cache *l1 = &l1d;
242 cpuid(0x80000005, &dummy, &dummy, &l1d.val, &l1i.val);
243 cpuid(0x80000006, &dummy, &dummy, &l2.val, &l3.val);
251 assoc = assocs[l1->assoc];
252 line_size = l1->line_size;
253 lines_per_tag = l1->lines_per_tag;
254 size_in_kb = l1->size_in_kb;
259 assoc = assocs[l2.assoc];
260 line_size = l2.line_size;
261 lines_per_tag = l2.lines_per_tag;
262 /* cpu_data has errata corrections for K7 applied */
263 size_in_kb = __this_cpu_read(cpu_info.x86_cache_size);
268 assoc = assocs[l3.assoc];
269 line_size = l3.line_size;
270 lines_per_tag = l3.lines_per_tag;
271 size_in_kb = l3.size_encoded * 512;
272 if (boot_cpu_has(X86_FEATURE_AMD_DCM)) {
273 size_in_kb = size_in_kb >> 1;
281 eax->split.is_self_initializing = 1;
282 eax->split.type = types[leaf];
283 eax->split.level = levels[leaf];
284 eax->split.num_threads_sharing = 0;
285 eax->split.num_cores_on_die = __this_cpu_read(cpu_info.x86_max_cores) - 1;
289 eax->split.is_fully_associative = 1;
290 ebx->split.coherency_line_size = line_size - 1;
291 ebx->split.ways_of_associativity = assoc - 1;
292 ebx->split.physical_line_partition = lines_per_tag - 1;
293 ecx->split.number_of_sets = (size_in_kb * 1024) / line_size /
294 (ebx->split.ways_of_associativity + 1) - 1;
297 #if defined(CONFIG_AMD_NB) && defined(CONFIG_SYSFS)
300 * L3 cache descriptors
302 static void amd_calc_l3_indices(struct amd_northbridge *nb)
304 struct amd_l3_cache *l3 = &nb->l3_cache;
305 unsigned int sc0, sc1, sc2, sc3;
308 pci_read_config_dword(nb->misc, 0x1C4, &val);
310 /* calculate subcache sizes */
311 l3->subcaches[0] = sc0 = !(val & BIT(0));
312 l3->subcaches[1] = sc1 = !(val & BIT(4));
314 if (boot_cpu_data.x86 == 0x15) {
315 l3->subcaches[0] = sc0 += !(val & BIT(1));
316 l3->subcaches[1] = sc1 += !(val & BIT(5));
319 l3->subcaches[2] = sc2 = !(val & BIT(8)) + !(val & BIT(9));
320 l3->subcaches[3] = sc3 = !(val & BIT(12)) + !(val & BIT(13));
322 l3->indices = (max(max3(sc0, sc1, sc2), sc3) << 10) - 1;
326 * check whether a slot used for disabling an L3 index is occupied.
327 * @l3: L3 cache descriptor
328 * @slot: slot number (0..1)
330 * @returns: the disabled index if used or negative value if slot free.
332 static int amd_get_l3_disable_slot(struct amd_northbridge *nb, unsigned slot)
334 unsigned int reg = 0;
336 pci_read_config_dword(nb->misc, 0x1BC + slot * 4, ®);
338 /* check whether this slot is activated already */
339 if (reg & (3UL << 30))
345 static ssize_t show_cache_disable(struct cacheinfo *this_leaf, char *buf,
349 struct amd_northbridge *nb = this_leaf->priv;
351 index = amd_get_l3_disable_slot(nb, slot);
353 return sprintf(buf, "%d\n", index);
355 return sprintf(buf, "FREE\n");
358 #define SHOW_CACHE_DISABLE(slot) \
360 cache_disable_##slot##_show(struct device *dev, \
361 struct device_attribute *attr, char *buf) \
363 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
364 return show_cache_disable(this_leaf, buf, slot); \
366 SHOW_CACHE_DISABLE(0)
367 SHOW_CACHE_DISABLE(1)
369 static void amd_l3_disable_index(struct amd_northbridge *nb, int cpu,
370 unsigned slot, unsigned long idx)
377 * disable index in all 4 subcaches
379 for (i = 0; i < 4; i++) {
380 u32 reg = idx | (i << 20);
382 if (!nb->l3_cache.subcaches[i])
385 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
388 * We need to WBINVD on a core on the node containing the L3
389 * cache which indices we disable therefore a simple wbinvd()
395 pci_write_config_dword(nb->misc, 0x1BC + slot * 4, reg);
400 * disable a L3 cache index by using a disable-slot
402 * @l3: L3 cache descriptor
403 * @cpu: A CPU on the node containing the L3 cache
404 * @slot: slot number (0..1)
405 * @index: index to disable
407 * @return: 0 on success, error status on failure
409 static int amd_set_l3_disable_slot(struct amd_northbridge *nb, int cpu,
410 unsigned slot, unsigned long index)
414 /* check if @slot is already used or the index is already disabled */
415 ret = amd_get_l3_disable_slot(nb, slot);
419 if (index > nb->l3_cache.indices)
422 /* check whether the other slot has disabled the same index already */
423 if (index == amd_get_l3_disable_slot(nb, !slot))
426 amd_l3_disable_index(nb, cpu, slot, index);
431 static ssize_t store_cache_disable(struct cacheinfo *this_leaf,
432 const char *buf, size_t count,
435 unsigned long val = 0;
437 struct amd_northbridge *nb = this_leaf->priv;
439 if (!capable(CAP_SYS_ADMIN))
442 cpu = cpumask_first(&this_leaf->shared_cpu_map);
444 if (kstrtoul(buf, 10, &val) < 0)
447 err = amd_set_l3_disable_slot(nb, cpu, slot, val);
450 pr_warn("L3 slot %d in use/index already disabled!\n",
457 #define STORE_CACHE_DISABLE(slot) \
459 cache_disable_##slot##_store(struct device *dev, \
460 struct device_attribute *attr, \
461 const char *buf, size_t count) \
463 struct cacheinfo *this_leaf = dev_get_drvdata(dev); \
464 return store_cache_disable(this_leaf, buf, count, slot); \
466 STORE_CACHE_DISABLE(0)
467 STORE_CACHE_DISABLE(1)
469 static ssize_t subcaches_show(struct device *dev,
470 struct device_attribute *attr, char *buf)
472 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
473 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
475 return sprintf(buf, "%x\n", amd_get_subcaches(cpu));
478 static ssize_t subcaches_store(struct device *dev,
479 struct device_attribute *attr,
480 const char *buf, size_t count)
482 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
483 int cpu = cpumask_first(&this_leaf->shared_cpu_map);
486 if (!capable(CAP_SYS_ADMIN))
489 if (kstrtoul(buf, 16, &val) < 0)
492 if (amd_set_subcaches(cpu, val))
498 static DEVICE_ATTR_RW(cache_disable_0);
499 static DEVICE_ATTR_RW(cache_disable_1);
500 static DEVICE_ATTR_RW(subcaches);
503 cache_private_attrs_is_visible(struct kobject *kobj,
504 struct attribute *attr, int unused)
506 struct device *dev = kobj_to_dev(kobj);
507 struct cacheinfo *this_leaf = dev_get_drvdata(dev);
508 umode_t mode = attr->mode;
510 if (!this_leaf->priv)
513 if ((attr == &dev_attr_subcaches.attr) &&
514 amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
517 if ((attr == &dev_attr_cache_disable_0.attr ||
518 attr == &dev_attr_cache_disable_1.attr) &&
519 amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
525 static struct attribute_group cache_private_group = {
526 .is_visible = cache_private_attrs_is_visible,
529 static void init_amd_l3_attrs(void)
532 static struct attribute **amd_l3_attrs;
534 if (amd_l3_attrs) /* already initialized */
537 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE))
539 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
542 amd_l3_attrs = kcalloc(n, sizeof(*amd_l3_attrs), GFP_KERNEL);
547 if (amd_nb_has_feature(AMD_NB_L3_INDEX_DISABLE)) {
548 amd_l3_attrs[n++] = &dev_attr_cache_disable_0.attr;
549 amd_l3_attrs[n++] = &dev_attr_cache_disable_1.attr;
551 if (amd_nb_has_feature(AMD_NB_L3_PARTITIONING))
552 amd_l3_attrs[n++] = &dev_attr_subcaches.attr;
554 cache_private_group.attrs = amd_l3_attrs;
557 const struct attribute_group *
558 cache_get_priv_group(struct cacheinfo *this_leaf)
560 struct amd_northbridge *nb = this_leaf->priv;
562 if (this_leaf->level < 3 || !nb)
565 if (nb && nb->l3_cache.indices)
568 return &cache_private_group;
571 static void amd_init_l3_cache(struct _cpuid4_info_regs *this_leaf, int index)
575 /* only for L3, and not in virtualized environments */
579 node = amd_get_nb_id(smp_processor_id());
580 this_leaf->nb = node_to_amd_nb(node);
581 if (this_leaf->nb && !this_leaf->nb->l3_cache.indices)
582 amd_calc_l3_indices(this_leaf->nb);
585 #define amd_init_l3_cache(x, y)
586 #endif /* CONFIG_AMD_NB && CONFIG_SYSFS */
589 cpuid4_cache_lookup_regs(int index, struct _cpuid4_info_regs *this_leaf)
591 union _cpuid4_leaf_eax eax;
592 union _cpuid4_leaf_ebx ebx;
593 union _cpuid4_leaf_ecx ecx;
596 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD) {
597 if (boot_cpu_has(X86_FEATURE_TOPOEXT))
598 cpuid_count(0x8000001d, index, &eax.full,
599 &ebx.full, &ecx.full, &edx);
601 amd_cpuid4(index, &eax, &ebx, &ecx);
602 amd_init_l3_cache(this_leaf, index);
604 cpuid_count(4, index, &eax.full, &ebx.full, &ecx.full, &edx);
607 if (eax.split.type == CTYPE_NULL)
608 return -EIO; /* better error ? */
610 this_leaf->eax = eax;
611 this_leaf->ebx = ebx;
612 this_leaf->ecx = ecx;
613 this_leaf->size = (ecx.split.number_of_sets + 1) *
614 (ebx.split.coherency_line_size + 1) *
615 (ebx.split.physical_line_partition + 1) *
616 (ebx.split.ways_of_associativity + 1);
620 static int find_num_cache_leaves(struct cpuinfo_x86 *c)
622 unsigned int eax, ebx, ecx, edx, op;
623 union _cpuid4_leaf_eax cache_eax;
626 if (c->x86_vendor == X86_VENDOR_AMD)
633 /* Do cpuid(op) loop to find out num_cache_leaves */
634 cpuid_count(op, i, &eax, &ebx, &ecx, &edx);
635 cache_eax.full = eax;
636 } while (cache_eax.split.type != CTYPE_NULL);
640 void cacheinfo_amd_init_llc_id(struct cpuinfo_x86 *c, int cpu, u8 node_id)
643 * We may have multiple LLCs if L3 caches exist, so check if we
644 * have an L3 cache by looking at the L3 cache CPUID leaf.
646 if (!cpuid_edx(0x80000006))
650 /* LLC is at the node level. */
651 per_cpu(cpu_llc_id, cpu) = node_id;
652 } else if (c->x86 == 0x17 &&
653 c->x86_model >= 0 && c->x86_model <= 0x1F) {
655 * LLC is at the core complex level.
656 * Core complex ID is ApicId[3] for these processors.
658 per_cpu(cpu_llc_id, cpu) = c->apicid >> 3;
661 * LLC ID is calculated from the number of threads sharing the
664 u32 eax, ebx, ecx, edx, num_sharing_cache = 0;
665 u32 llc_index = find_num_cache_leaves(c) - 1;
667 cpuid_count(0x8000001d, llc_index, &eax, &ebx, &ecx, &edx);
669 num_sharing_cache = ((eax >> 14) & 0xfff) + 1;
671 if (num_sharing_cache) {
672 int bits = get_count_order(num_sharing_cache) - 1;
674 per_cpu(cpu_llc_id, cpu) = c->apicid >> bits;
679 void init_amd_cacheinfo(struct cpuinfo_x86 *c)
682 if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
683 num_cache_leaves = find_num_cache_leaves(c);
684 } else if (c->extended_cpuid_level >= 0x80000006) {
685 if (cpuid_edx(0x80000006) & 0xf000)
686 num_cache_leaves = 4;
688 num_cache_leaves = 3;
692 unsigned int init_intel_cacheinfo(struct cpuinfo_x86 *c)
695 unsigned int trace = 0, l1i = 0, l1d = 0, l2 = 0, l3 = 0;
696 unsigned int new_l1d = 0, new_l1i = 0; /* Cache sizes from cpuid(4) */
697 unsigned int new_l2 = 0, new_l3 = 0, i; /* Cache sizes from cpuid(4) */
698 unsigned int l2_id = 0, l3_id = 0, num_threads_sharing, index_msb;
700 unsigned int cpu = c->cpu_index;
703 if (c->cpuid_level > 3) {
704 static int is_initialized;
706 if (is_initialized == 0) {
707 /* Init num_cache_leaves from boot CPU */
708 num_cache_leaves = find_num_cache_leaves(c);
713 * Whenever possible use cpuid(4), deterministic cache
714 * parameters cpuid leaf to find the cache details
716 for (i = 0; i < num_cache_leaves; i++) {
717 struct _cpuid4_info_regs this_leaf = {};
720 retval = cpuid4_cache_lookup_regs(i, &this_leaf);
724 switch (this_leaf.eax.split.level) {
726 if (this_leaf.eax.split.type == CTYPE_DATA)
727 new_l1d = this_leaf.size/1024;
728 else if (this_leaf.eax.split.type == CTYPE_INST)
729 new_l1i = this_leaf.size/1024;
732 new_l2 = this_leaf.size/1024;
733 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
734 index_msb = get_count_order(num_threads_sharing);
735 l2_id = c->apicid & ~((1 << index_msb) - 1);
738 new_l3 = this_leaf.size/1024;
739 num_threads_sharing = 1 + this_leaf.eax.split.num_threads_sharing;
740 index_msb = get_count_order(num_threads_sharing);
741 l3_id = c->apicid & ~((1 << index_msb) - 1);
749 * Don't use cpuid2 if cpuid4 is supported. For P4, we use cpuid2 for
752 if ((num_cache_leaves == 0 || c->x86 == 15) && c->cpuid_level > 1) {
753 /* supports eax=2 call */
755 unsigned int regs[4];
756 unsigned char *dp = (unsigned char *)regs;
759 if (num_cache_leaves != 0 && c->x86 == 15)
762 /* Number of times to iterate */
763 n = cpuid_eax(2) & 0xFF;
765 for (i = 0 ; i < n ; i++) {
766 cpuid(2, ®s[0], ®s[1], ®s[2], ®s[3]);
768 /* If bit 31 is set, this is an unknown format */
769 for (j = 0 ; j < 3 ; j++)
770 if (regs[j] & (1 << 31))
773 /* Byte 0 is level count, not a descriptor */
774 for (j = 1 ; j < 16 ; j++) {
775 unsigned char des = dp[j];
778 /* look up this descriptor in the table */
779 while (cache_table[k].descriptor != 0) {
780 if (cache_table[k].descriptor == des) {
781 if (only_trace && cache_table[k].cache_type != LVL_TRACE)
783 switch (cache_table[k].cache_type) {
785 l1i += cache_table[k].size;
788 l1d += cache_table[k].size;
791 l2 += cache_table[k].size;
794 l3 += cache_table[k].size;
797 trace += cache_table[k].size;
819 per_cpu(cpu_llc_id, cpu) = l2_id;
826 per_cpu(cpu_llc_id, cpu) = l3_id;
832 * If cpu_llc_id is not yet set, this means cpuid_level < 4 which in
833 * turns means that the only possibility is SMT (as indicated in
834 * cpuid1). Since cpuid2 doesn't specify shared caches, and we know
835 * that SMT shares all caches, we can unconditionally set cpu_llc_id to
838 if (per_cpu(cpu_llc_id, cpu) == BAD_APICID)
839 per_cpu(cpu_llc_id, cpu) = c->phys_proc_id;
842 c->x86_cache_size = l3 ? l3 : (l2 ? l2 : (l1i+l1d));
847 static int __cache_amd_cpumap_setup(unsigned int cpu, int index,
848 struct _cpuid4_info_regs *base)
850 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
851 struct cacheinfo *this_leaf;
855 * For L3, always use the pre-calculated cpu_llc_shared_mask
856 * to derive shared_cpu_map.
859 for_each_cpu(i, cpu_llc_shared_mask(cpu)) {
860 this_cpu_ci = get_cpu_cacheinfo(i);
861 if (!this_cpu_ci->info_list)
863 this_leaf = this_cpu_ci->info_list + index;
864 for_each_cpu(sibling, cpu_llc_shared_mask(cpu)) {
865 if (!cpu_online(sibling))
867 cpumask_set_cpu(sibling,
868 &this_leaf->shared_cpu_map);
871 } else if (boot_cpu_has(X86_FEATURE_TOPOEXT)) {
872 unsigned int apicid, nshared, first, last;
874 nshared = base->eax.split.num_threads_sharing + 1;
875 apicid = cpu_data(cpu).apicid;
876 first = apicid - (apicid % nshared);
877 last = first + nshared - 1;
879 for_each_online_cpu(i) {
880 this_cpu_ci = get_cpu_cacheinfo(i);
881 if (!this_cpu_ci->info_list)
884 apicid = cpu_data(i).apicid;
885 if ((apicid < first) || (apicid > last))
888 this_leaf = this_cpu_ci->info_list + index;
890 for_each_online_cpu(sibling) {
891 apicid = cpu_data(sibling).apicid;
892 if ((apicid < first) || (apicid > last))
894 cpumask_set_cpu(sibling,
895 &this_leaf->shared_cpu_map);
904 static void __cache_cpumap_setup(unsigned int cpu, int index,
905 struct _cpuid4_info_regs *base)
907 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
908 struct cacheinfo *this_leaf, *sibling_leaf;
909 unsigned long num_threads_sharing;
911 struct cpuinfo_x86 *c = &cpu_data(cpu);
913 if (c->x86_vendor == X86_VENDOR_AMD) {
914 if (__cache_amd_cpumap_setup(cpu, index, base))
918 this_leaf = this_cpu_ci->info_list + index;
919 num_threads_sharing = 1 + base->eax.split.num_threads_sharing;
921 cpumask_set_cpu(cpu, &this_leaf->shared_cpu_map);
922 if (num_threads_sharing == 1)
925 index_msb = get_count_order(num_threads_sharing);
927 for_each_online_cpu(i)
928 if (cpu_data(i).apicid >> index_msb == c->apicid >> index_msb) {
929 struct cpu_cacheinfo *sib_cpu_ci = get_cpu_cacheinfo(i);
931 if (i == cpu || !sib_cpu_ci->info_list)
932 continue;/* skip if itself or no cacheinfo */
933 sibling_leaf = sib_cpu_ci->info_list + index;
934 cpumask_set_cpu(i, &this_leaf->shared_cpu_map);
935 cpumask_set_cpu(cpu, &sibling_leaf->shared_cpu_map);
939 static void ci_leaf_init(struct cacheinfo *this_leaf,
940 struct _cpuid4_info_regs *base)
942 this_leaf->id = base->id;
943 this_leaf->attributes = CACHE_ID;
944 this_leaf->level = base->eax.split.level;
945 this_leaf->type = cache_type_map[base->eax.split.type];
946 this_leaf->coherency_line_size =
947 base->ebx.split.coherency_line_size + 1;
948 this_leaf->ways_of_associativity =
949 base->ebx.split.ways_of_associativity + 1;
950 this_leaf->size = base->size;
951 this_leaf->number_of_sets = base->ecx.split.number_of_sets + 1;
952 this_leaf->physical_line_partition =
953 base->ebx.split.physical_line_partition + 1;
954 this_leaf->priv = base->nb;
957 static int __init_cache_level(unsigned int cpu)
959 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
961 if (!num_cache_leaves)
965 this_cpu_ci->num_levels = 3;
966 this_cpu_ci->num_leaves = num_cache_leaves;
971 * The max shared threads number comes from CPUID.4:EAX[25-14] with input
972 * ECX as cache index. Then right shift apicid by the number's order to get
973 * cache id for this cache node.
975 static void get_cache_id(int cpu, struct _cpuid4_info_regs *id4_regs)
977 struct cpuinfo_x86 *c = &cpu_data(cpu);
978 unsigned long num_threads_sharing;
981 num_threads_sharing = 1 + id4_regs->eax.split.num_threads_sharing;
982 index_msb = get_count_order(num_threads_sharing);
983 id4_regs->id = c->apicid >> index_msb;
986 static int __populate_cache_leaves(unsigned int cpu)
988 unsigned int idx, ret;
989 struct cpu_cacheinfo *this_cpu_ci = get_cpu_cacheinfo(cpu);
990 struct cacheinfo *this_leaf = this_cpu_ci->info_list;
991 struct _cpuid4_info_regs id4_regs = {};
993 for (idx = 0; idx < this_cpu_ci->num_leaves; idx++) {
994 ret = cpuid4_cache_lookup_regs(idx, &id4_regs);
997 get_cache_id(cpu, &id4_regs);
998 ci_leaf_init(this_leaf++, &id4_regs);
999 __cache_cpumap_setup(cpu, idx, &id4_regs);
1001 this_cpu_ci->cpu_map_populated = true;
1006 DEFINE_SMP_CALL_CACHE_FUNCTION(init_cache_level)
1007 DEFINE_SMP_CALL_CACHE_FUNCTION(populate_cache_leaves)