1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 * Vikas Shivappa <vikas.shivappa@intel.com>
13 * More information about RDT be found in the Intel (R) x86 Architecture
14 * Software Developer Manual June 2016, volume 3, section 17.17.
17 #define pr_fmt(fmt) "resctrl: " fmt
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/cacheinfo.h>
22 #include <linux/cpuhotplug.h>
24 #include <asm/intel-family.h>
25 #include <asm/resctrl_sched.h>
28 /* Mutex to protect rdtgroup access. */
29 DEFINE_MUTEX(rdtgroup_mutex);
32 * The cached resctrl_pqr_state is strictly per CPU and can never be
33 * updated from a remote CPU. Functions which modify the state
34 * are called with interrupts disabled and no preemption, which
35 * is sufficient for the protection.
37 DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
40 * Used to store the max resource name width and max resource data width
41 * to display the schemata in a tabular format
43 int max_name_width, max_data_width;
46 * Global boolean for rdt_alloc which is true if any
47 * resource allocation is enabled.
49 bool rdt_alloc_capable;
52 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
53 struct rdt_resource *r);
55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
57 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
58 struct rdt_resource *r);
60 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].domains)
62 struct rdt_resource rdt_resources_all[] = {
65 .rid = RDT_RESOURCE_L3,
67 .domains = domain_init(RDT_RESOURCE_L3),
68 .msr_base = MSR_IA32_L3_CBM_BASE,
69 .msr_update = cat_wrmsr,
76 .parse_ctrlval = parse_cbm,
77 .format_str = "%d=%0*x",
78 .fflags = RFTYPE_RES_CACHE,
80 [RDT_RESOURCE_L3DATA] =
82 .rid = RDT_RESOURCE_L3DATA,
84 .domains = domain_init(RDT_RESOURCE_L3DATA),
85 .msr_base = MSR_IA32_L3_CBM_BASE,
86 .msr_update = cat_wrmsr,
93 .parse_ctrlval = parse_cbm,
94 .format_str = "%d=%0*x",
95 .fflags = RFTYPE_RES_CACHE,
97 [RDT_RESOURCE_L3CODE] =
99 .rid = RDT_RESOURCE_L3CODE,
101 .domains = domain_init(RDT_RESOURCE_L3CODE),
102 .msr_base = MSR_IA32_L3_CBM_BASE,
103 .msr_update = cat_wrmsr,
110 .parse_ctrlval = parse_cbm,
111 .format_str = "%d=%0*x",
112 .fflags = RFTYPE_RES_CACHE,
116 .rid = RDT_RESOURCE_L2,
118 .domains = domain_init(RDT_RESOURCE_L2),
119 .msr_base = MSR_IA32_L2_CBM_BASE,
120 .msr_update = cat_wrmsr,
127 .parse_ctrlval = parse_cbm,
128 .format_str = "%d=%0*x",
129 .fflags = RFTYPE_RES_CACHE,
131 [RDT_RESOURCE_L2DATA] =
133 .rid = RDT_RESOURCE_L2DATA,
135 .domains = domain_init(RDT_RESOURCE_L2DATA),
136 .msr_base = MSR_IA32_L2_CBM_BASE,
137 .msr_update = cat_wrmsr,
144 .parse_ctrlval = parse_cbm,
145 .format_str = "%d=%0*x",
146 .fflags = RFTYPE_RES_CACHE,
148 [RDT_RESOURCE_L2CODE] =
150 .rid = RDT_RESOURCE_L2CODE,
152 .domains = domain_init(RDT_RESOURCE_L2CODE),
153 .msr_base = MSR_IA32_L2_CBM_BASE,
154 .msr_update = cat_wrmsr,
161 .parse_ctrlval = parse_cbm,
162 .format_str = "%d=%0*x",
163 .fflags = RFTYPE_RES_CACHE,
167 .rid = RDT_RESOURCE_MBA,
169 .domains = domain_init(RDT_RESOURCE_MBA),
171 .format_str = "%d=%*u",
172 .fflags = RFTYPE_RES_MB,
176 static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
178 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
182 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
183 * as they do not have CPUID enumeration support for Cache allocation.
184 * The check for Vendor/Family/Model is not enough to guarantee that
185 * the MSRs won't #GP fault because only the following SKUs support
187 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
188 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
189 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
190 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
191 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
192 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
194 * Probe by trying to write the first of the L3 cach mask registers
195 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
196 * is always 20 on hsw server parts. The minimum cache bitmask length
197 * allowed for HSW server is always 2 bits. Hardcode all of them.
199 static inline void cache_alloc_hsw_probe(void)
201 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3];
202 u32 l, h, max_cbm = BIT_MASK(20) - 1;
204 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
207 rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
209 /* If all the bits were set in MSR, return success */
214 r->default_ctrl = max_cbm;
215 r->cache.cbm_len = 20;
216 r->cache.shareable_bits = 0xc0000;
217 r->cache.min_cbm_bits = 2;
218 r->alloc_capable = true;
219 r->alloc_enabled = true;
221 rdt_alloc_capable = true;
224 bool is_mba_sc(struct rdt_resource *r)
227 return rdt_resources_all[RDT_RESOURCE_MBA].membw.mba_sc;
229 return r->membw.mba_sc;
233 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
234 * exposed to user interface and the h/w understandable delay values.
236 * The non-linear delay values have the granularity of power of two
237 * and also the h/w does not guarantee a curve for configured delay
238 * values vs. actual b/w enforced.
239 * Hence we need a mapping that is pre calibrated so the user can
240 * express the memory b/w as a percentage value.
242 static inline bool rdt_get_mb_table(struct rdt_resource *r)
245 * There are no Intel SKUs as of now to support non-linear delay.
247 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
248 boot_cpu_data.x86, boot_cpu_data.x86_model);
253 static bool __get_mem_config_intel(struct rdt_resource *r)
255 union cpuid_0x10_3_eax eax;
256 union cpuid_0x10_x_edx edx;
259 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
260 r->num_closid = edx.split.cos_max + 1;
261 r->membw.max_delay = eax.split.max_delay + 1;
262 r->default_ctrl = MAX_MBA_BW;
263 if (ecx & MBA_IS_LINEAR) {
264 r->membw.delay_linear = true;
265 r->membw.min_bw = MAX_MBA_BW - r->membw.max_delay;
266 r->membw.bw_gran = MAX_MBA_BW - r->membw.max_delay;
268 if (!rdt_get_mb_table(r))
273 r->alloc_capable = true;
274 r->alloc_enabled = true;
279 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
281 union cpuid_0x10_3_eax eax;
282 union cpuid_0x10_x_edx edx;
285 cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
286 r->num_closid = edx.split.cos_max + 1;
287 r->default_ctrl = MAX_MBA_BW_AMD;
289 /* AMD does not use delay */
290 r->membw.delay_linear = false;
293 r->membw.bw_gran = 1;
294 /* Max value is 2048, Data width should be 4 in decimal */
297 r->alloc_capable = true;
298 r->alloc_enabled = true;
303 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
305 union cpuid_0x10_1_eax eax;
306 union cpuid_0x10_x_edx edx;
309 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
310 r->num_closid = edx.split.cos_max + 1;
311 r->cache.cbm_len = eax.split.cbm_len + 1;
312 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
313 r->cache.shareable_bits = ebx & r->default_ctrl;
314 r->data_width = (r->cache.cbm_len + 3) / 4;
315 r->alloc_capable = true;
316 r->alloc_enabled = true;
319 static void rdt_get_cdp_config(int level, int type)
321 struct rdt_resource *r_l = &rdt_resources_all[level];
322 struct rdt_resource *r = &rdt_resources_all[type];
324 r->num_closid = r_l->num_closid / 2;
325 r->cache.cbm_len = r_l->cache.cbm_len;
326 r->default_ctrl = r_l->default_ctrl;
327 r->cache.shareable_bits = r_l->cache.shareable_bits;
328 r->data_width = (r->cache.cbm_len + 3) / 4;
329 r->alloc_capable = true;
331 * By default, CDP is disabled. CDP can be enabled by mount parameter
332 * "cdp" during resctrl file system mount time.
334 r->alloc_enabled = false;
337 static void rdt_get_cdp_l3_config(void)
339 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
340 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
343 static void rdt_get_cdp_l2_config(void)
345 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
346 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
349 static int get_cache_id(int cpu, int level)
351 struct cpu_cacheinfo *ci = get_cpu_cacheinfo(cpu);
354 for (i = 0; i < ci->num_leaves; i++) {
355 if (ci->info_list[i].level == level)
356 return ci->info_list[i].id;
363 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
367 for (i = m->low; i < m->high; i++)
368 wrmsrl(r->msr_base + i, d->ctrl_val[i]);
372 * Map the memory b/w percentage value to delay values
373 * that can be written to QOS_MSRs.
374 * There are currently no SKUs which support non linear delay values.
376 u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
378 if (r->membw.delay_linear)
379 return MAX_MBA_BW - bw;
381 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
382 return r->default_ctrl;
386 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
387 struct rdt_resource *r)
391 /* Write the delay values for mba. */
392 for (i = m->low; i < m->high; i++)
393 wrmsrl(r->msr_base + i, delay_bw_map(d->ctrl_val[i], r));
397 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
401 for (i = m->low; i < m->high; i++)
402 wrmsrl(r->msr_base + cbm_idx(r, i), d->ctrl_val[i]);
405 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
407 struct rdt_domain *d;
409 list_for_each_entry(d, &r->domains, list) {
410 /* Find the domain that contains this CPU */
411 if (cpumask_test_cpu(cpu, &d->cpu_mask))
418 void rdt_ctrl_update(void *arg)
420 struct msr_param *m = arg;
421 struct rdt_resource *r = m->res;
422 int cpu = smp_processor_id();
423 struct rdt_domain *d;
425 d = get_domain_from_cpu(cpu, r);
427 r->msr_update(d, m, r);
430 pr_warn_once("cpu %d not found in any domain for resource %s\n",
435 * rdt_find_domain - Find a domain in a resource that matches input resource id
437 * Search resource r's domain list to find the resource id. If the resource
438 * id is found in a domain, return the domain. Otherwise, if requested by
439 * caller, return the first domain whose id is bigger than the input id.
440 * The domain list is sorted by id in ascending order.
442 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
443 struct list_head **pos)
445 struct rdt_domain *d;
449 return ERR_PTR(-ENODEV);
451 list_for_each(l, &r->domains) {
452 d = list_entry(l, struct rdt_domain, list);
453 /* When id is found, return its domain. */
456 /* Stop searching when finding id's position in sorted list. */
467 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
472 * Initialize the Control MSRs to having no control.
473 * For Cache Allocation: Set all bits in cbm
474 * For Memory Allocation: Set b/w requested to 100%
475 * and the bandwidth in MBps to U32_MAX
477 for (i = 0; i < r->num_closid; i++, dc++, dm++) {
478 *dc = r->default_ctrl;
483 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
488 dc = kmalloc_array(r->num_closid, sizeof(*d->ctrl_val), GFP_KERNEL);
492 dm = kmalloc_array(r->num_closid, sizeof(*d->mbps_val), GFP_KERNEL);
500 setup_default_ctrlval(r, dc, dm);
503 m.high = r->num_closid;
504 r->msr_update(d, &m, r);
508 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
512 if (is_llc_occupancy_enabled()) {
513 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
514 if (!d->rmid_busy_llc)
516 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
518 if (is_mbm_total_enabled()) {
519 tsize = sizeof(*d->mbm_total);
520 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
522 bitmap_free(d->rmid_busy_llc);
526 if (is_mbm_local_enabled()) {
527 tsize = sizeof(*d->mbm_local);
528 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
530 bitmap_free(d->rmid_busy_llc);
536 if (is_mbm_enabled()) {
537 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
538 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
545 * domain_add_cpu - Add a cpu to a resource's domain list.
547 * If an existing domain in the resource r's domain list matches the cpu's
548 * resource id, add the cpu in the domain.
550 * Otherwise, a new domain is allocated and inserted into the right position
551 * in the domain list sorted by id in ascending order.
553 * The order in the domain list is visible to users when we print entries
554 * in the schemata file and schemata input is validated to have the same order
557 static void domain_add_cpu(int cpu, struct rdt_resource *r)
559 int id = get_cache_id(cpu, r->cache_level);
560 struct list_head *add_pos = NULL;
561 struct rdt_domain *d;
563 d = rdt_find_domain(r, id, &add_pos);
565 pr_warn("Could't find cache id for cpu %d\n", cpu);
570 cpumask_set_cpu(cpu, &d->cpu_mask);
574 d = kzalloc_node(sizeof(*d), GFP_KERNEL, cpu_to_node(cpu));
579 cpumask_set_cpu(cpu, &d->cpu_mask);
581 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
586 if (r->mon_capable && domain_setup_mon_state(r, d)) {
591 list_add_tail(&d->list, add_pos);
594 * If resctrl is mounted, add
595 * per domain monitor data directories.
597 if (static_branch_unlikely(&rdt_mon_enable_key))
598 mkdir_mondata_subdir_allrdtgrp(r, d);
601 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
603 int id = get_cache_id(cpu, r->cache_level);
604 struct rdt_domain *d;
606 d = rdt_find_domain(r, id, NULL);
607 if (IS_ERR_OR_NULL(d)) {
608 pr_warn("Could't find cache id for cpu %d\n", cpu);
612 cpumask_clear_cpu(cpu, &d->cpu_mask);
613 if (cpumask_empty(&d->cpu_mask)) {
615 * If resctrl is mounted, remove all the
616 * per domain monitor data directories.
618 if (static_branch_unlikely(&rdt_mon_enable_key))
619 rmdir_mondata_subdir_allrdtgrp(r, d->id);
621 if (r->mon_capable && is_mbm_enabled())
622 cancel_delayed_work(&d->mbm_over);
623 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
625 * When a package is going down, forcefully
626 * decrement rmid->ebusy. There is no way to know
627 * that the L3 was flushed and hence may lead to
628 * incorrect counts in rare scenarios, but leaving
629 * the RMID as busy creates RMID leaks if the
630 * package never comes back.
632 __check_limbo(d, true);
633 cancel_delayed_work(&d->cqm_limbo);
637 * rdt_domain "d" is going to be freed below, so clear
638 * its pointer from pseudo_lock_region struct.
645 bitmap_free(d->rmid_busy_llc);
652 if (r == &rdt_resources_all[RDT_RESOURCE_L3]) {
653 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
654 cancel_delayed_work(&d->mbm_over);
655 mbm_setup_overflow_handler(d, 0);
657 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
658 has_busy_rmid(r, d)) {
659 cancel_delayed_work(&d->cqm_limbo);
660 cqm_setup_limbo_handler(d, 0);
665 static void clear_closid_rmid(int cpu)
667 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
669 state->default_closid = 0;
670 state->default_rmid = 0;
671 state->cur_closid = 0;
673 wrmsr(IA32_PQR_ASSOC, 0, 0);
676 static int resctrl_online_cpu(unsigned int cpu)
678 struct rdt_resource *r;
680 mutex_lock(&rdtgroup_mutex);
681 for_each_capable_rdt_resource(r)
682 domain_add_cpu(cpu, r);
683 /* The cpu is set in default rdtgroup after online. */
684 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
685 clear_closid_rmid(cpu);
686 mutex_unlock(&rdtgroup_mutex);
691 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
695 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
696 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
702 static int resctrl_offline_cpu(unsigned int cpu)
704 struct rdtgroup *rdtgrp;
705 struct rdt_resource *r;
707 mutex_lock(&rdtgroup_mutex);
708 for_each_capable_rdt_resource(r)
709 domain_remove_cpu(cpu, r);
710 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
711 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
712 clear_childcpus(rdtgrp, cpu);
716 clear_closid_rmid(cpu);
717 mutex_unlock(&rdtgroup_mutex);
723 * Choose a width for the resource name and resource data based on the
724 * resource that has widest name and cbm.
726 static __init void rdt_init_padding(void)
728 struct rdt_resource *r;
731 for_each_alloc_capable_rdt_resource(r) {
732 cl = strlen(r->name);
733 if (cl > max_name_width)
736 if (r->data_width > max_data_width)
737 max_data_width = r->data_width;
752 #define RDT_OPT(idx, n, f) \
761 bool force_off, force_on;
764 static struct rdt_options rdt_options[] __initdata = {
765 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
766 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
767 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
768 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
769 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
770 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
771 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
772 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
774 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
776 static int __init set_rdt_options(char *str)
778 struct rdt_options *o;
784 while ((tok = strsep(&str, ",")) != NULL) {
785 force_off = *tok == '!';
788 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
789 if (strcmp(tok, o->name) == 0) {
800 __setup("rdt", set_rdt_options);
802 static bool __init rdt_cpu_has(int flag)
804 bool ret = boot_cpu_has(flag);
805 struct rdt_options *o;
810 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
811 if (flag == o->flag) {
822 static __init bool get_mem_config(void)
824 if (!rdt_cpu_has(X86_FEATURE_MBA))
827 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
828 return __get_mem_config_intel(&rdt_resources_all[RDT_RESOURCE_MBA]);
829 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
830 return __rdt_get_mem_config_amd(&rdt_resources_all[RDT_RESOURCE_MBA]);
835 static __init bool get_rdt_alloc_resources(void)
839 if (rdt_alloc_capable)
842 if (!boot_cpu_has(X86_FEATURE_RDT_A))
845 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
846 rdt_get_cache_alloc_cfg(1, &rdt_resources_all[RDT_RESOURCE_L3]);
847 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
848 rdt_get_cdp_l3_config();
851 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
852 /* CPUID 0x10.2 fields are same format at 0x10.1 */
853 rdt_get_cache_alloc_cfg(2, &rdt_resources_all[RDT_RESOURCE_L2]);
854 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
855 rdt_get_cdp_l2_config();
859 if (get_mem_config())
865 static __init bool get_rdt_mon_resources(void)
867 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
868 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
869 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
870 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
871 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
872 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
874 if (!rdt_mon_features)
877 return !rdt_get_mon_l3_config(&rdt_resources_all[RDT_RESOURCE_L3]);
880 static __init void __check_quirks_intel(void)
882 switch (boot_cpu_data.x86_model) {
883 case INTEL_FAM6_HASWELL_X:
884 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
885 cache_alloc_hsw_probe();
887 case INTEL_FAM6_SKYLAKE_X:
888 if (boot_cpu_data.x86_stepping <= 4)
889 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
891 set_rdt_options("!l3cat");
895 static __init void check_quirks(void)
897 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
898 __check_quirks_intel();
901 static __init bool get_rdt_resources(void)
903 rdt_alloc_capable = get_rdt_alloc_resources();
904 rdt_mon_capable = get_rdt_mon_resources();
906 return (rdt_mon_capable || rdt_alloc_capable);
909 static __init void rdt_init_res_defs_intel(void)
911 struct rdt_resource *r;
913 for_each_rdt_resource(r) {
914 if (r->rid == RDT_RESOURCE_L3 ||
915 r->rid == RDT_RESOURCE_L3DATA ||
916 r->rid == RDT_RESOURCE_L3CODE ||
917 r->rid == RDT_RESOURCE_L2 ||
918 r->rid == RDT_RESOURCE_L2DATA ||
919 r->rid == RDT_RESOURCE_L2CODE)
920 r->cbm_validate = cbm_validate_intel;
921 else if (r->rid == RDT_RESOURCE_MBA) {
922 r->msr_base = MSR_IA32_MBA_THRTL_BASE;
923 r->msr_update = mba_wrmsr_intel;
924 r->parse_ctrlval = parse_bw_intel;
929 static __init void rdt_init_res_defs_amd(void)
931 struct rdt_resource *r;
933 for_each_rdt_resource(r) {
934 if (r->rid == RDT_RESOURCE_L3 ||
935 r->rid == RDT_RESOURCE_L3DATA ||
936 r->rid == RDT_RESOURCE_L3CODE ||
937 r->rid == RDT_RESOURCE_L2 ||
938 r->rid == RDT_RESOURCE_L2DATA ||
939 r->rid == RDT_RESOURCE_L2CODE)
940 r->cbm_validate = cbm_validate_amd;
941 else if (r->rid == RDT_RESOURCE_MBA) {
942 r->msr_base = MSR_IA32_MBA_BW_BASE;
943 r->msr_update = mba_wrmsr_amd;
944 r->parse_ctrlval = parse_bw_amd;
949 static __init void rdt_init_res_defs(void)
951 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
952 rdt_init_res_defs_intel();
953 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
954 rdt_init_res_defs_amd();
957 static enum cpuhp_state rdt_online;
959 static int __init resctrl_late_init(void)
961 struct rdt_resource *r;
965 * Initialize functions(or definitions) that are different
966 * between vendors here.
972 if (!get_rdt_resources())
977 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
978 "x86/resctrl/cat:online:",
979 resctrl_online_cpu, resctrl_offline_cpu);
983 ret = rdtgroup_init();
985 cpuhp_remove_state(state);
990 for_each_alloc_capable_rdt_resource(r)
991 pr_info("%s allocation detected\n", r->name);
993 for_each_mon_capable_rdt_resource(r)
994 pr_info("%s monitoring detected\n", r->name);
999 late_initcall(resctrl_late_init);
1001 static void __exit resctrl_exit(void)
1003 cpuhp_remove_state(rdt_online);
1007 __exitcall(resctrl_exit);