1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
4 * - Cache Allocation code.
6 * Copyright (C) 2016 Intel Corporation
9 * Fenghua Yu <fenghua.yu@intel.com>
10 * Tony Luck <tony.luck@intel.com>
11 * Vikas Shivappa <vikas.shivappa@intel.com>
13 * More information about RDT be found in the Intel (R) x86 Architecture
14 * Software Developer Manual June 2016, volume 3, section 17.17.
17 #define pr_fmt(fmt) "resctrl: " fmt
19 #include <linux/slab.h>
20 #include <linux/err.h>
21 #include <linux/cacheinfo.h>
22 #include <linux/cpuhotplug.h>
24 #include <asm/intel-family.h>
25 #include <asm/resctrl.h>
28 /* Mutex to protect rdtgroup access. */
29 DEFINE_MUTEX(rdtgroup_mutex);
32 * The cached resctrl_pqr_state is strictly per CPU and can never be
33 * updated from a remote CPU. Functions which modify the state
34 * are called with interrupts disabled and no preemption, which
35 * is sufficient for the protection.
37 DEFINE_PER_CPU(struct resctrl_pqr_state, pqr_state);
40 * Used to store the max resource name width and max resource data width
41 * to display the schemata in a tabular format
43 int max_name_width, max_data_width;
46 * Global boolean for rdt_alloc which is true if any
47 * resource allocation is enabled.
49 bool rdt_alloc_capable;
52 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
53 struct rdt_resource *r);
55 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r);
57 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m,
58 struct rdt_resource *r);
60 #define domain_init(id) LIST_HEAD_INIT(rdt_resources_all[id].r_resctrl.domains)
62 struct rdt_hw_resource rdt_resources_all[] = {
65 .conf_type = CDP_NONE,
67 .rid = RDT_RESOURCE_L3,
75 .domains = domain_init(RDT_RESOURCE_L3),
76 .parse_ctrlval = parse_cbm,
77 .format_str = "%d=%0*x",
78 .fflags = RFTYPE_RES_CACHE,
80 .msr_base = MSR_IA32_L3_CBM_BASE,
81 .msr_update = cat_wrmsr,
83 [RDT_RESOURCE_L3DATA] =
85 .conf_type = CDP_DATA,
87 .rid = RDT_RESOURCE_L3DATA,
95 .domains = domain_init(RDT_RESOURCE_L3DATA),
96 .parse_ctrlval = parse_cbm,
97 .format_str = "%d=%0*x",
98 .fflags = RFTYPE_RES_CACHE,
100 .msr_base = MSR_IA32_L3_CBM_BASE,
101 .msr_update = cat_wrmsr,
103 [RDT_RESOURCE_L3CODE] =
105 .conf_type = CDP_CODE,
107 .rid = RDT_RESOURCE_L3CODE,
115 .domains = domain_init(RDT_RESOURCE_L3CODE),
116 .parse_ctrlval = parse_cbm,
117 .format_str = "%d=%0*x",
118 .fflags = RFTYPE_RES_CACHE,
120 .msr_base = MSR_IA32_L3_CBM_BASE,
121 .msr_update = cat_wrmsr,
125 .conf_type = CDP_NONE,
127 .rid = RDT_RESOURCE_L2,
135 .domains = domain_init(RDT_RESOURCE_L2),
136 .parse_ctrlval = parse_cbm,
137 .format_str = "%d=%0*x",
138 .fflags = RFTYPE_RES_CACHE,
140 .msr_base = MSR_IA32_L2_CBM_BASE,
141 .msr_update = cat_wrmsr,
143 [RDT_RESOURCE_L2DATA] =
145 .conf_type = CDP_DATA,
147 .rid = RDT_RESOURCE_L2DATA,
155 .domains = domain_init(RDT_RESOURCE_L2DATA),
156 .parse_ctrlval = parse_cbm,
157 .format_str = "%d=%0*x",
158 .fflags = RFTYPE_RES_CACHE,
160 .msr_base = MSR_IA32_L2_CBM_BASE,
161 .msr_update = cat_wrmsr,
163 [RDT_RESOURCE_L2CODE] =
165 .conf_type = CDP_CODE,
167 .rid = RDT_RESOURCE_L2CODE,
175 .domains = domain_init(RDT_RESOURCE_L2CODE),
176 .parse_ctrlval = parse_cbm,
177 .format_str = "%d=%0*x",
178 .fflags = RFTYPE_RES_CACHE,
180 .msr_base = MSR_IA32_L2_CBM_BASE,
181 .msr_update = cat_wrmsr,
185 .conf_type = CDP_NONE,
187 .rid = RDT_RESOURCE_MBA,
190 .domains = domain_init(RDT_RESOURCE_MBA),
191 .parse_ctrlval = parse_bw,
192 .format_str = "%d=%*u",
193 .fflags = RFTYPE_RES_MB,
198 static unsigned int cbm_idx(struct rdt_resource *r, unsigned int closid)
200 return closid * r->cache.cbm_idx_mult + r->cache.cbm_idx_offset;
204 * cache_alloc_hsw_probe() - Have to probe for Intel haswell server CPUs
205 * as they do not have CPUID enumeration support for Cache allocation.
206 * The check for Vendor/Family/Model is not enough to guarantee that
207 * the MSRs won't #GP fault because only the following SKUs support
209 * Intel(R) Xeon(R) CPU E5-2658 v3 @ 2.20GHz
210 * Intel(R) Xeon(R) CPU E5-2648L v3 @ 1.80GHz
211 * Intel(R) Xeon(R) CPU E5-2628L v3 @ 2.00GHz
212 * Intel(R) Xeon(R) CPU E5-2618L v3 @ 2.30GHz
213 * Intel(R) Xeon(R) CPU E5-2608L v3 @ 2.00GHz
214 * Intel(R) Xeon(R) CPU E5-2658A v3 @ 2.20GHz
216 * Probe by trying to write the first of the L3 cache mask registers
217 * and checking that the bits stick. Max CLOSids is always 4 and max cbm length
218 * is always 20 on hsw server parts. The minimum cache bitmask length
219 * allowed for HSW server is always 2 bits. Hardcode all of them.
221 static inline void cache_alloc_hsw_probe(void)
223 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_L3];
224 struct rdt_resource *r = &hw_res->r_resctrl;
225 u32 l, h, max_cbm = BIT_MASK(20) - 1;
227 if (wrmsr_safe(MSR_IA32_L3_CBM_BASE, max_cbm, 0))
230 rdmsr(MSR_IA32_L3_CBM_BASE, l, h);
232 /* If all the bits were set in MSR, return success */
236 hw_res->num_closid = 4;
237 r->default_ctrl = max_cbm;
238 r->cache.cbm_len = 20;
239 r->cache.shareable_bits = 0xc0000;
240 r->cache.min_cbm_bits = 2;
241 r->alloc_capable = true;
242 r->alloc_enabled = true;
244 rdt_alloc_capable = true;
247 bool is_mba_sc(struct rdt_resource *r)
250 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.mba_sc;
252 return r->membw.mba_sc;
256 * rdt_get_mb_table() - get a mapping of bandwidth(b/w) percentage values
257 * exposed to user interface and the h/w understandable delay values.
259 * The non-linear delay values have the granularity of power of two
260 * and also the h/w does not guarantee a curve for configured delay
261 * values vs. actual b/w enforced.
262 * Hence we need a mapping that is pre calibrated so the user can
263 * express the memory b/w as a percentage value.
265 static inline bool rdt_get_mb_table(struct rdt_resource *r)
268 * There are no Intel SKUs as of now to support non-linear delay.
270 pr_info("MBA b/w map not implemented for cpu:%d, model:%d",
271 boot_cpu_data.x86, boot_cpu_data.x86_model);
276 static bool __get_mem_config_intel(struct rdt_resource *r)
278 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
279 union cpuid_0x10_3_eax eax;
280 union cpuid_0x10_x_edx edx;
281 u32 ebx, ecx, max_delay;
283 cpuid_count(0x00000010, 3, &eax.full, &ebx, &ecx, &edx.full);
284 hw_res->num_closid = edx.split.cos_max + 1;
285 max_delay = eax.split.max_delay + 1;
286 r->default_ctrl = MAX_MBA_BW;
287 r->membw.arch_needs_linear = true;
288 if (ecx & MBA_IS_LINEAR) {
289 r->membw.delay_linear = true;
290 r->membw.min_bw = MAX_MBA_BW - max_delay;
291 r->membw.bw_gran = MAX_MBA_BW - max_delay;
293 if (!rdt_get_mb_table(r))
295 r->membw.arch_needs_linear = false;
299 if (boot_cpu_has(X86_FEATURE_PER_THREAD_MBA))
300 r->membw.throttle_mode = THREAD_THROTTLE_PER_THREAD;
302 r->membw.throttle_mode = THREAD_THROTTLE_MAX;
303 thread_throttle_mode_init();
305 r->alloc_capable = true;
306 r->alloc_enabled = true;
311 static bool __rdt_get_mem_config_amd(struct rdt_resource *r)
313 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
314 union cpuid_0x10_3_eax eax;
315 union cpuid_0x10_x_edx edx;
318 cpuid_count(0x80000020, 1, &eax.full, &ebx, &ecx, &edx.full);
319 hw_res->num_closid = edx.split.cos_max + 1;
320 r->default_ctrl = MAX_MBA_BW_AMD;
322 /* AMD does not use delay */
323 r->membw.delay_linear = false;
324 r->membw.arch_needs_linear = false;
327 * AMD does not use memory delay throttle model to control
328 * the allocation like Intel does.
330 r->membw.throttle_mode = THREAD_THROTTLE_UNDEFINED;
332 r->membw.bw_gran = 1;
333 /* Max value is 2048, Data width should be 4 in decimal */
336 r->alloc_capable = true;
337 r->alloc_enabled = true;
342 static void rdt_get_cache_alloc_cfg(int idx, struct rdt_resource *r)
344 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
345 union cpuid_0x10_1_eax eax;
346 union cpuid_0x10_x_edx edx;
349 cpuid_count(0x00000010, idx, &eax.full, &ebx, &ecx, &edx.full);
350 hw_res->num_closid = edx.split.cos_max + 1;
351 r->cache.cbm_len = eax.split.cbm_len + 1;
352 r->default_ctrl = BIT_MASK(eax.split.cbm_len + 1) - 1;
353 r->cache.shareable_bits = ebx & r->default_ctrl;
354 r->data_width = (r->cache.cbm_len + 3) / 4;
355 r->alloc_capable = true;
356 r->alloc_enabled = true;
359 static void rdt_get_cdp_config(int level, int type)
361 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
362 struct rdt_hw_resource *hw_res_l = resctrl_to_arch_res(r_l);
363 struct rdt_resource *r = &rdt_resources_all[type].r_resctrl;
364 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
366 hw_res->num_closid = hw_res_l->num_closid / 2;
367 r->cache.cbm_len = r_l->cache.cbm_len;
368 r->default_ctrl = r_l->default_ctrl;
369 r->cache.shareable_bits = r_l->cache.shareable_bits;
370 r->data_width = (r->cache.cbm_len + 3) / 4;
371 r->alloc_capable = true;
373 * By default, CDP is disabled. CDP can be enabled by mount parameter
374 * "cdp" during resctrl file system mount time.
376 r->alloc_enabled = false;
379 static void rdt_get_cdp_l3_config(void)
381 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA);
382 rdt_get_cdp_config(RDT_RESOURCE_L3, RDT_RESOURCE_L3CODE);
385 static void rdt_get_cdp_l2_config(void)
387 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA);
388 rdt_get_cdp_config(RDT_RESOURCE_L2, RDT_RESOURCE_L2CODE);
392 mba_wrmsr_amd(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
395 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
396 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
398 for (i = m->low; i < m->high; i++)
399 wrmsrl(hw_res->msr_base + i, hw_dom->ctrl_val[i]);
403 * Map the memory b/w percentage value to delay values
404 * that can be written to QOS_MSRs.
405 * There are currently no SKUs which support non linear delay values.
407 u32 delay_bw_map(unsigned long bw, struct rdt_resource *r)
409 if (r->membw.delay_linear)
410 return MAX_MBA_BW - bw;
412 pr_warn_once("Non Linear delay-bw map not supported but queried\n");
413 return r->default_ctrl;
417 mba_wrmsr_intel(struct rdt_domain *d, struct msr_param *m,
418 struct rdt_resource *r)
421 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
422 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
424 /* Write the delay values for mba. */
425 for (i = m->low; i < m->high; i++)
426 wrmsrl(hw_res->msr_base + i, delay_bw_map(hw_dom->ctrl_val[i], r));
430 cat_wrmsr(struct rdt_domain *d, struct msr_param *m, struct rdt_resource *r)
433 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
434 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
436 for (i = m->low; i < m->high; i++)
437 wrmsrl(hw_res->msr_base + cbm_idx(r, i), hw_dom->ctrl_val[i]);
440 struct rdt_domain *get_domain_from_cpu(int cpu, struct rdt_resource *r)
442 struct rdt_domain *d;
444 list_for_each_entry(d, &r->domains, list) {
445 /* Find the domain that contains this CPU */
446 if (cpumask_test_cpu(cpu, &d->cpu_mask))
453 u32 resctrl_arch_get_num_closid(struct rdt_resource *r)
455 return resctrl_to_arch_res(r)->num_closid;
458 void rdt_ctrl_update(void *arg)
460 struct msr_param *m = arg;
461 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(m->res);
462 struct rdt_resource *r = m->res;
463 int cpu = smp_processor_id();
464 struct rdt_domain *d;
466 d = get_domain_from_cpu(cpu, r);
468 hw_res->msr_update(d, m, r);
471 pr_warn_once("cpu %d not found in any domain for resource %s\n",
476 * rdt_find_domain - Find a domain in a resource that matches input resource id
478 * Search resource r's domain list to find the resource id. If the resource
479 * id is found in a domain, return the domain. Otherwise, if requested by
480 * caller, return the first domain whose id is bigger than the input id.
481 * The domain list is sorted by id in ascending order.
483 struct rdt_domain *rdt_find_domain(struct rdt_resource *r, int id,
484 struct list_head **pos)
486 struct rdt_domain *d;
490 return ERR_PTR(-ENODEV);
492 list_for_each(l, &r->domains) {
493 d = list_entry(l, struct rdt_domain, list);
494 /* When id is found, return its domain. */
497 /* Stop searching when finding id's position in sorted list. */
508 void setup_default_ctrlval(struct rdt_resource *r, u32 *dc, u32 *dm)
510 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
514 * Initialize the Control MSRs to having no control.
515 * For Cache Allocation: Set all bits in cbm
516 * For Memory Allocation: Set b/w requested to 100%
517 * and the bandwidth in MBps to U32_MAX
519 for (i = 0; i < hw_res->num_closid; i++, dc++, dm++) {
520 *dc = r->default_ctrl;
525 static int domain_setup_ctrlval(struct rdt_resource *r, struct rdt_domain *d)
527 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
528 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
532 dc = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->ctrl_val), GFP_KERNEL);
536 dm = kmalloc_array(hw_res->num_closid, sizeof(*hw_dom->mbps_val), GFP_KERNEL);
542 hw_dom->ctrl_val = dc;
543 hw_dom->mbps_val = dm;
544 setup_default_ctrlval(r, dc, dm);
547 m.high = hw_res->num_closid;
548 hw_res->msr_update(d, &m, r);
552 static int domain_setup_mon_state(struct rdt_resource *r, struct rdt_domain *d)
556 if (is_llc_occupancy_enabled()) {
557 d->rmid_busy_llc = bitmap_zalloc(r->num_rmid, GFP_KERNEL);
558 if (!d->rmid_busy_llc)
560 INIT_DELAYED_WORK(&d->cqm_limbo, cqm_handle_limbo);
562 if (is_mbm_total_enabled()) {
563 tsize = sizeof(*d->mbm_total);
564 d->mbm_total = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
566 bitmap_free(d->rmid_busy_llc);
570 if (is_mbm_local_enabled()) {
571 tsize = sizeof(*d->mbm_local);
572 d->mbm_local = kcalloc(r->num_rmid, tsize, GFP_KERNEL);
574 bitmap_free(d->rmid_busy_llc);
580 if (is_mbm_enabled()) {
581 INIT_DELAYED_WORK(&d->mbm_over, mbm_handle_overflow);
582 mbm_setup_overflow_handler(d, MBM_OVERFLOW_INTERVAL);
589 * domain_add_cpu - Add a cpu to a resource's domain list.
591 * If an existing domain in the resource r's domain list matches the cpu's
592 * resource id, add the cpu in the domain.
594 * Otherwise, a new domain is allocated and inserted into the right position
595 * in the domain list sorted by id in ascending order.
597 * The order in the domain list is visible to users when we print entries
598 * in the schemata file and schemata input is validated to have the same order
601 static void domain_add_cpu(int cpu, struct rdt_resource *r)
603 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
604 struct list_head *add_pos = NULL;
605 struct rdt_hw_domain *hw_dom;
606 struct rdt_domain *d;
608 d = rdt_find_domain(r, id, &add_pos);
610 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
615 cpumask_set_cpu(cpu, &d->cpu_mask);
616 if (r->cache.arch_has_per_cpu_cfg)
617 rdt_domain_reconfigure_cdp(r);
621 hw_dom = kzalloc_node(sizeof(*hw_dom), GFP_KERNEL, cpu_to_node(cpu));
625 d = &hw_dom->d_resctrl;
627 cpumask_set_cpu(cpu, &d->cpu_mask);
629 rdt_domain_reconfigure_cdp(r);
631 if (r->alloc_capable && domain_setup_ctrlval(r, d)) {
636 if (r->mon_capable && domain_setup_mon_state(r, d)) {
641 list_add_tail(&d->list, add_pos);
644 * If resctrl is mounted, add
645 * per domain monitor data directories.
647 if (static_branch_unlikely(&rdt_mon_enable_key))
648 mkdir_mondata_subdir_allrdtgrp(r, d);
651 static void domain_remove_cpu(int cpu, struct rdt_resource *r)
653 int id = get_cpu_cacheinfo_id(cpu, r->cache_level);
654 struct rdt_hw_domain *hw_dom;
655 struct rdt_domain *d;
657 d = rdt_find_domain(r, id, NULL);
658 if (IS_ERR_OR_NULL(d)) {
659 pr_warn("Couldn't find cache id for CPU %d\n", cpu);
662 hw_dom = resctrl_to_arch_dom(d);
664 cpumask_clear_cpu(cpu, &d->cpu_mask);
665 if (cpumask_empty(&d->cpu_mask)) {
667 * If resctrl is mounted, remove all the
668 * per domain monitor data directories.
670 if (static_branch_unlikely(&rdt_mon_enable_key))
671 rmdir_mondata_subdir_allrdtgrp(r, d->id);
673 if (r->mon_capable && is_mbm_enabled())
674 cancel_delayed_work(&d->mbm_over);
675 if (is_llc_occupancy_enabled() && has_busy_rmid(r, d)) {
677 * When a package is going down, forcefully
678 * decrement rmid->ebusy. There is no way to know
679 * that the L3 was flushed and hence may lead to
680 * incorrect counts in rare scenarios, but leaving
681 * the RMID as busy creates RMID leaks if the
682 * package never comes back.
684 __check_limbo(d, true);
685 cancel_delayed_work(&d->cqm_limbo);
689 * rdt_domain "d" is going to be freed below, so clear
690 * its pointer from pseudo_lock_region struct.
695 kfree(hw_dom->ctrl_val);
696 kfree(hw_dom->mbps_val);
697 bitmap_free(d->rmid_busy_llc);
704 if (r == &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl) {
705 if (is_mbm_enabled() && cpu == d->mbm_work_cpu) {
706 cancel_delayed_work(&d->mbm_over);
707 mbm_setup_overflow_handler(d, 0);
709 if (is_llc_occupancy_enabled() && cpu == d->cqm_work_cpu &&
710 has_busy_rmid(r, d)) {
711 cancel_delayed_work(&d->cqm_limbo);
712 cqm_setup_limbo_handler(d, 0);
717 static void clear_closid_rmid(int cpu)
719 struct resctrl_pqr_state *state = this_cpu_ptr(&pqr_state);
721 state->default_closid = 0;
722 state->default_rmid = 0;
723 state->cur_closid = 0;
725 wrmsr(IA32_PQR_ASSOC, 0, 0);
728 static int resctrl_online_cpu(unsigned int cpu)
730 struct rdt_resource *r;
732 mutex_lock(&rdtgroup_mutex);
733 for_each_capable_rdt_resource(r)
734 domain_add_cpu(cpu, r);
735 /* The cpu is set in default rdtgroup after online. */
736 cpumask_set_cpu(cpu, &rdtgroup_default.cpu_mask);
737 clear_closid_rmid(cpu);
738 mutex_unlock(&rdtgroup_mutex);
743 static void clear_childcpus(struct rdtgroup *r, unsigned int cpu)
747 list_for_each_entry(cr, &r->mon.crdtgrp_list, mon.crdtgrp_list) {
748 if (cpumask_test_and_clear_cpu(cpu, &cr->cpu_mask)) {
754 static int resctrl_offline_cpu(unsigned int cpu)
756 struct rdtgroup *rdtgrp;
757 struct rdt_resource *r;
759 mutex_lock(&rdtgroup_mutex);
760 for_each_capable_rdt_resource(r)
761 domain_remove_cpu(cpu, r);
762 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
763 if (cpumask_test_and_clear_cpu(cpu, &rdtgrp->cpu_mask)) {
764 clear_childcpus(rdtgrp, cpu);
768 clear_closid_rmid(cpu);
769 mutex_unlock(&rdtgroup_mutex);
775 * Choose a width for the resource name and resource data based on the
776 * resource that has widest name and cbm.
778 static __init void rdt_init_padding(void)
780 struct rdt_resource *r;
783 for_each_alloc_capable_rdt_resource(r) {
784 cl = strlen(r->name);
785 if (cl > max_name_width)
788 if (r->data_width > max_data_width)
789 max_data_width = r->data_width;
804 #define RDT_OPT(idx, n, f) \
813 bool force_off, force_on;
816 static struct rdt_options rdt_options[] __initdata = {
817 RDT_OPT(RDT_FLAG_CMT, "cmt", X86_FEATURE_CQM_OCCUP_LLC),
818 RDT_OPT(RDT_FLAG_MBM_TOTAL, "mbmtotal", X86_FEATURE_CQM_MBM_TOTAL),
819 RDT_OPT(RDT_FLAG_MBM_LOCAL, "mbmlocal", X86_FEATURE_CQM_MBM_LOCAL),
820 RDT_OPT(RDT_FLAG_L3_CAT, "l3cat", X86_FEATURE_CAT_L3),
821 RDT_OPT(RDT_FLAG_L3_CDP, "l3cdp", X86_FEATURE_CDP_L3),
822 RDT_OPT(RDT_FLAG_L2_CAT, "l2cat", X86_FEATURE_CAT_L2),
823 RDT_OPT(RDT_FLAG_L2_CDP, "l2cdp", X86_FEATURE_CDP_L2),
824 RDT_OPT(RDT_FLAG_MBA, "mba", X86_FEATURE_MBA),
826 #define NUM_RDT_OPTIONS ARRAY_SIZE(rdt_options)
828 static int __init set_rdt_options(char *str)
830 struct rdt_options *o;
836 while ((tok = strsep(&str, ",")) != NULL) {
837 force_off = *tok == '!';
840 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
841 if (strcmp(tok, o->name) == 0) {
852 __setup("rdt", set_rdt_options);
854 static bool __init rdt_cpu_has(int flag)
856 bool ret = boot_cpu_has(flag);
857 struct rdt_options *o;
862 for (o = rdt_options; o < &rdt_options[NUM_RDT_OPTIONS]; o++) {
863 if (flag == o->flag) {
874 static __init bool get_mem_config(void)
876 struct rdt_hw_resource *hw_res = &rdt_resources_all[RDT_RESOURCE_MBA];
878 if (!rdt_cpu_has(X86_FEATURE_MBA))
881 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
882 return __get_mem_config_intel(&hw_res->r_resctrl);
883 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
884 return __rdt_get_mem_config_amd(&hw_res->r_resctrl);
889 static __init bool get_rdt_alloc_resources(void)
891 struct rdt_resource *r;
894 if (rdt_alloc_capable)
897 if (!boot_cpu_has(X86_FEATURE_RDT_A))
900 if (rdt_cpu_has(X86_FEATURE_CAT_L3)) {
901 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
902 rdt_get_cache_alloc_cfg(1, r);
903 if (rdt_cpu_has(X86_FEATURE_CDP_L3))
904 rdt_get_cdp_l3_config();
907 if (rdt_cpu_has(X86_FEATURE_CAT_L2)) {
908 /* CPUID 0x10.2 fields are same format at 0x10.1 */
909 r = &rdt_resources_all[RDT_RESOURCE_L2].r_resctrl;
910 rdt_get_cache_alloc_cfg(2, r);
911 if (rdt_cpu_has(X86_FEATURE_CDP_L2))
912 rdt_get_cdp_l2_config();
916 if (get_mem_config())
922 static __init bool get_rdt_mon_resources(void)
924 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
926 if (rdt_cpu_has(X86_FEATURE_CQM_OCCUP_LLC))
927 rdt_mon_features |= (1 << QOS_L3_OCCUP_EVENT_ID);
928 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL))
929 rdt_mon_features |= (1 << QOS_L3_MBM_TOTAL_EVENT_ID);
930 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL))
931 rdt_mon_features |= (1 << QOS_L3_MBM_LOCAL_EVENT_ID);
933 if (!rdt_mon_features)
936 return !rdt_get_mon_l3_config(r);
939 static __init void __check_quirks_intel(void)
941 switch (boot_cpu_data.x86_model) {
942 case INTEL_FAM6_HASWELL_X:
943 if (!rdt_options[RDT_FLAG_L3_CAT].force_off)
944 cache_alloc_hsw_probe();
946 case INTEL_FAM6_SKYLAKE_X:
947 if (boot_cpu_data.x86_stepping <= 4)
948 set_rdt_options("!cmt,!mbmtotal,!mbmlocal,!l3cat");
950 set_rdt_options("!l3cat");
952 case INTEL_FAM6_BROADWELL_X:
953 intel_rdt_mbm_apply_quirk();
958 static __init void check_quirks(void)
960 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
961 __check_quirks_intel();
964 static __init bool get_rdt_resources(void)
966 rdt_alloc_capable = get_rdt_alloc_resources();
967 rdt_mon_capable = get_rdt_mon_resources();
969 return (rdt_mon_capable || rdt_alloc_capable);
972 static __init void rdt_init_res_defs_intel(void)
974 struct rdt_hw_resource *hw_res;
975 struct rdt_resource *r;
977 for_each_rdt_resource(r) {
978 hw_res = resctrl_to_arch_res(r);
980 if (r->rid == RDT_RESOURCE_L3 ||
981 r->rid == RDT_RESOURCE_L3DATA ||
982 r->rid == RDT_RESOURCE_L3CODE ||
983 r->rid == RDT_RESOURCE_L2 ||
984 r->rid == RDT_RESOURCE_L2DATA ||
985 r->rid == RDT_RESOURCE_L2CODE) {
986 r->cache.arch_has_sparse_bitmaps = false;
987 r->cache.arch_has_empty_bitmaps = false;
988 r->cache.arch_has_per_cpu_cfg = false;
989 } else if (r->rid == RDT_RESOURCE_MBA) {
990 hw_res->msr_base = MSR_IA32_MBA_THRTL_BASE;
991 hw_res->msr_update = mba_wrmsr_intel;
996 static __init void rdt_init_res_defs_amd(void)
998 struct rdt_hw_resource *hw_res;
999 struct rdt_resource *r;
1001 for_each_rdt_resource(r) {
1002 hw_res = resctrl_to_arch_res(r);
1004 if (r->rid == RDT_RESOURCE_L3 ||
1005 r->rid == RDT_RESOURCE_L3DATA ||
1006 r->rid == RDT_RESOURCE_L3CODE ||
1007 r->rid == RDT_RESOURCE_L2 ||
1008 r->rid == RDT_RESOURCE_L2DATA ||
1009 r->rid == RDT_RESOURCE_L2CODE) {
1010 r->cache.arch_has_sparse_bitmaps = true;
1011 r->cache.arch_has_empty_bitmaps = true;
1012 r->cache.arch_has_per_cpu_cfg = true;
1013 } else if (r->rid == RDT_RESOURCE_MBA) {
1014 hw_res->msr_base = MSR_IA32_MBA_BW_BASE;
1015 hw_res->msr_update = mba_wrmsr_amd;
1020 static __init void rdt_init_res_defs(void)
1022 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
1023 rdt_init_res_defs_intel();
1024 else if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
1025 rdt_init_res_defs_amd();
1028 static enum cpuhp_state rdt_online;
1030 /* Runs once on the BSP during boot. */
1031 void resctrl_cpu_detect(struct cpuinfo_x86 *c)
1033 if (!cpu_has(c, X86_FEATURE_CQM_LLC)) {
1034 c->x86_cache_max_rmid = -1;
1035 c->x86_cache_occ_scale = -1;
1036 c->x86_cache_mbm_width_offset = -1;
1040 /* will be overridden if occupancy monitoring exists */
1041 c->x86_cache_max_rmid = cpuid_ebx(0xf);
1043 if (cpu_has(c, X86_FEATURE_CQM_OCCUP_LLC) ||
1044 cpu_has(c, X86_FEATURE_CQM_MBM_TOTAL) ||
1045 cpu_has(c, X86_FEATURE_CQM_MBM_LOCAL)) {
1046 u32 eax, ebx, ecx, edx;
1048 /* QoS sub-leaf, EAX=0Fh, ECX=1 */
1049 cpuid_count(0xf, 1, &eax, &ebx, &ecx, &edx);
1051 c->x86_cache_max_rmid = ecx;
1052 c->x86_cache_occ_scale = ebx;
1053 c->x86_cache_mbm_width_offset = eax & 0xff;
1055 if (c->x86_vendor == X86_VENDOR_AMD && !c->x86_cache_mbm_width_offset)
1056 c->x86_cache_mbm_width_offset = MBM_CNTR_WIDTH_OFFSET_AMD;
1060 static int __init resctrl_late_init(void)
1062 struct rdt_resource *r;
1066 * Initialize functions(or definitions) that are different
1067 * between vendors here.
1069 rdt_init_res_defs();
1073 if (!get_rdt_resources())
1078 state = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
1079 "x86/resctrl/cat:online:",
1080 resctrl_online_cpu, resctrl_offline_cpu);
1084 ret = rdtgroup_init();
1086 cpuhp_remove_state(state);
1091 for_each_alloc_capable_rdt_resource(r)
1092 pr_info("%s allocation detected\n", r->name);
1094 for_each_mon_capable_rdt_resource(r)
1095 pr_info("%s monitoring detected\n", r->name);
1100 late_initcall(resctrl_late_init);
1102 static void __exit resctrl_exit(void)
1104 cpuhp_remove_state(rdt_online);
1108 __exitcall(resctrl_exit);