1 // SPDX-License-Identifier: GPL-2.0-only
3 * Resource Director Technology(RDT)
6 * Copyright (C) 2017 Intel Corporation
9 * Vikas Shivappa <vikas.shivappa@intel.com>
11 * This replaces the cqm.c based on perf but we reuse a lot of
12 * code and datastructures originally from Peter Zijlstra and Matt Fleming.
14 * More information about RDT be found in the Intel (R) x86 Architecture
15 * Software Developer Manual June 2016, volume 3, section 17.17.
18 #include <linux/module.h>
19 #include <linux/sizes.h>
20 #include <linux/slab.h>
22 #include <asm/cpu_device_id.h>
23 #include <asm/resctrl.h>
28 * struct rmid_entry - dirty tracking for all RMID.
29 * @closid: The CLOSID for this entry.
30 * @rmid: The RMID for this entry.
31 * @busy: The number of domains with cached data using this RMID.
32 * @list: Member of the rmid_free_lru list when busy == 0.
34 * Depending on the architecture the correct monitor is accessed using
35 * both @closid and @rmid, or @rmid only.
37 * Take the rdtgroup_mutex when accessing.
43 struct list_head list;
47 * @rmid_free_lru - A least recently used list of free RMIDs
48 * These RMIDs are guaranteed to have an occupancy less than the
51 static LIST_HEAD(rmid_free_lru);
54 * @closid_num_dirty_rmid The number of dirty RMID each CLOSID has.
55 * Only allocated when CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID is defined.
56 * Indexed by CLOSID. Protected by rdtgroup_mutex.
58 static u32 *closid_num_dirty_rmid;
61 * @rmid_limbo_count - count of currently unused but (potentially)
63 * This counts RMIDs that no one is currently using but that
64 * may have a occupancy value > resctrl_rmid_realloc_threshold. User can
65 * change the threshold occupancy value.
67 static unsigned int rmid_limbo_count;
70 * @rmid_entry - The entry in the limbo and free lists.
72 static struct rmid_entry *rmid_ptrs;
75 * Global boolean for rdt_monitor which is true if any
76 * resource monitoring is enabled.
81 * Global to indicate which monitoring events are enabled.
83 unsigned int rdt_mon_features;
86 * This is the threshold cache occupancy in bytes at which we will consider an
87 * RMID available for re-allocation.
89 unsigned int resctrl_rmid_realloc_threshold;
92 * This is the maximum value for the reallocation threshold, in bytes.
94 unsigned int resctrl_rmid_realloc_limit;
96 #define CF(cf) ((unsigned long)(1048576 * (cf) + 0.5))
99 * The correction factor table is documented in Documentation/arch/x86/resctrl.rst.
100 * If rmid > rmid threshold, MBM total and local values should be multiplied
101 * by the correction factor.
103 * The original table is modified for better code:
105 * 1. The threshold 0 is changed to rmid count - 1 so don't do correction
107 * 2. MBM total and local correction table indexed by core counter which is
108 * equal to (x86_cache_max_rmid + 1) / 8 - 1 and is from 0 up to 27.
109 * 3. The correction factor is normalized to 2^20 (1048576) so it's faster
110 * to calculate corrected value by shifting:
111 * corrected_value = (original_value * correction_factor) >> 20
113 static const struct mbm_correction_factor_table {
116 } mbm_cf_table[] __initconst = {
147 static u32 mbm_cf_rmidthreshold __read_mostly = UINT_MAX;
148 static u64 mbm_cf __read_mostly;
150 static inline u64 get_corrected_mbm_count(u32 rmid, unsigned long val)
152 /* Correct MBM value. */
153 if (rmid > mbm_cf_rmidthreshold)
154 val = (val * mbm_cf) >> 20;
160 * x86 and arm64 differ in their handling of monitoring.
161 * x86's RMID are independent numbers, there is only one source of traffic
162 * with an RMID value of '1'.
163 * arm64's PMG extends the PARTID/CLOSID space, there are multiple sources of
164 * traffic with a PMG value of '1', one for each CLOSID, meaning the RMID
165 * value is no longer unique.
166 * To account for this, resctrl uses an index. On x86 this is just the RMID,
167 * on arm64 it encodes the CLOSID and RMID. This gives a unique number.
169 * The domain's rmid_busy_llc and rmid_ptrs[] are sized by index. The arch code
170 * must accept an attempt to read every index.
172 static inline struct rmid_entry *__rmid_entry(u32 idx)
174 struct rmid_entry *entry;
177 entry = &rmid_ptrs[idx];
178 resctrl_arch_rmid_idx_decode(idx, &closid, &rmid);
180 WARN_ON_ONCE(entry->closid != closid);
181 WARN_ON_ONCE(entry->rmid != rmid);
186 static int __rmid_read(u32 rmid, enum resctrl_event_id eventid, u64 *val)
191 * As per the SDM, when IA32_QM_EVTSEL.EvtID (bits 7:0) is configured
192 * with a valid event code for supported resource type and the bits
193 * IA32_QM_EVTSEL.RMID (bits 41:32) are configured with valid RMID,
194 * IA32_QM_CTR.data (bits 61:0) reports the monitored data.
195 * IA32_QM_CTR.Error (bit 63) and IA32_QM_CTR.Unavailable (bit 62)
198 wrmsr(MSR_IA32_QM_EVTSEL, eventid, rmid);
199 rdmsrl(MSR_IA32_QM_CTR, msr_val);
201 if (msr_val & RMID_VAL_ERROR)
203 if (msr_val & RMID_VAL_UNAVAIL)
210 static struct arch_mbm_state *get_arch_mbm_state(struct rdt_hw_domain *hw_dom,
212 enum resctrl_event_id eventid)
215 case QOS_L3_OCCUP_EVENT_ID:
217 case QOS_L3_MBM_TOTAL_EVENT_ID:
218 return &hw_dom->arch_mbm_total[rmid];
219 case QOS_L3_MBM_LOCAL_EVENT_ID:
220 return &hw_dom->arch_mbm_local[rmid];
223 /* Never expect to get here */
229 void resctrl_arch_reset_rmid(struct rdt_resource *r, struct rdt_domain *d,
230 u32 unused, u32 rmid,
231 enum resctrl_event_id eventid)
233 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
234 struct arch_mbm_state *am;
236 am = get_arch_mbm_state(hw_dom, rmid, eventid);
238 memset(am, 0, sizeof(*am));
240 /* Record any initial, non-zero count value. */
241 __rmid_read(rmid, eventid, &am->prev_msr);
246 * Assumes that hardware counters are also reset and thus that there is
247 * no need to record initial non-zero counts.
249 void resctrl_arch_reset_rmid_all(struct rdt_resource *r, struct rdt_domain *d)
251 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
253 if (is_mbm_total_enabled())
254 memset(hw_dom->arch_mbm_total, 0,
255 sizeof(*hw_dom->arch_mbm_total) * r->num_rmid);
257 if (is_mbm_local_enabled())
258 memset(hw_dom->arch_mbm_local, 0,
259 sizeof(*hw_dom->arch_mbm_local) * r->num_rmid);
262 static u64 mbm_overflow_count(u64 prev_msr, u64 cur_msr, unsigned int width)
264 u64 shift = 64 - width, chunks;
266 chunks = (cur_msr << shift) - (prev_msr << shift);
267 return chunks >> shift;
270 int resctrl_arch_rmid_read(struct rdt_resource *r, struct rdt_domain *d,
271 u32 unused, u32 rmid, enum resctrl_event_id eventid,
274 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
275 struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
276 struct arch_mbm_state *am;
280 if (!cpumask_test_cpu(smp_processor_id(), &d->cpu_mask))
283 ret = __rmid_read(rmid, eventid, &msr_val);
287 am = get_arch_mbm_state(hw_dom, rmid, eventid);
289 am->chunks += mbm_overflow_count(am->prev_msr, msr_val,
291 chunks = get_corrected_mbm_count(rmid, am->chunks);
292 am->prev_msr = msr_val;
297 *val = chunks * hw_res->mon_scale;
302 static void limbo_release_entry(struct rmid_entry *entry)
304 lockdep_assert_held(&rdtgroup_mutex);
307 list_add_tail(&entry->list, &rmid_free_lru);
309 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
310 closid_num_dirty_rmid[entry->closid]--;
314 * Check the RMIDs that are marked as busy for this domain. If the
315 * reported LLC occupancy is below the threshold clear the busy bit and
316 * decrement the count. If the busy count gets to zero on an RMID, we
319 void __check_limbo(struct rdt_domain *d, bool force_free)
321 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
322 u32 idx_limit = resctrl_arch_system_num_rmid_idx();
323 struct rmid_entry *entry;
324 u32 idx, cur_idx = 1;
329 * Skip RMID 0 and start from RMID 1 and check all the RMIDs that
330 * are marked as busy for occupancy < threshold. If the occupancy
331 * is less than the threshold decrement the busy counter of the
332 * RMID and move it to the free list when the counter reaches 0.
335 idx = find_next_bit(d->rmid_busy_llc, idx_limit, cur_idx);
336 if (idx >= idx_limit)
339 entry = __rmid_entry(idx);
340 if (resctrl_arch_rmid_read(r, d, entry->closid, entry->rmid,
341 QOS_L3_OCCUP_EVENT_ID, &val)) {
344 rmid_dirty = (val >= resctrl_rmid_realloc_threshold);
347 if (force_free || !rmid_dirty) {
348 clear_bit(idx, d->rmid_busy_llc);
350 limbo_release_entry(entry);
356 bool has_busy_rmid(struct rdt_domain *d)
358 u32 idx_limit = resctrl_arch_system_num_rmid_idx();
360 return find_first_bit(d->rmid_busy_llc, idx_limit) != idx_limit;
363 static struct rmid_entry *resctrl_find_free_rmid(u32 closid)
365 struct rmid_entry *itr;
366 u32 itr_idx, cmp_idx;
368 if (list_empty(&rmid_free_lru))
369 return rmid_limbo_count ? ERR_PTR(-EBUSY) : ERR_PTR(-ENOSPC);
371 list_for_each_entry(itr, &rmid_free_lru, list) {
373 * Get the index of this free RMID, and the index it would need
374 * to be if it were used with this CLOSID.
375 * If the CLOSID is irrelevant on this architecture, the two
376 * index values are always the same on every entry and thus the
377 * very first entry will be returned.
379 itr_idx = resctrl_arch_rmid_idx_encode(itr->closid, itr->rmid);
380 cmp_idx = resctrl_arch_rmid_idx_encode(closid, itr->rmid);
382 if (itr_idx == cmp_idx)
386 return ERR_PTR(-ENOSPC);
390 * resctrl_find_cleanest_closid() - Find a CLOSID where all the associated
391 * RMID are clean, or the CLOSID that has
392 * the most clean RMID.
394 * MPAM's equivalent of RMID are per-CLOSID, meaning a freshly allocated CLOSID
395 * may not be able to allocate clean RMID. To avoid this the allocator will
396 * choose the CLOSID with the most clean RMID.
398 * When the CLOSID and RMID are independent numbers, the first free CLOSID will
401 int resctrl_find_cleanest_closid(void)
403 u32 cleanest_closid = ~0;
406 lockdep_assert_held(&rdtgroup_mutex);
408 if (!IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
411 for (i = 0; i < closids_supported(); i++) {
414 if (closid_allocated(i))
417 num_dirty = closid_num_dirty_rmid[i];
421 if (cleanest_closid == ~0)
424 if (num_dirty < closid_num_dirty_rmid[cleanest_closid])
428 if (cleanest_closid == ~0)
431 return cleanest_closid;
435 * For MPAM the RMID value is not unique, and has to be considered with
436 * the CLOSID. The (CLOSID, RMID) pair is allocated on all domains, which
437 * allows all domains to be managed by a single free list.
438 * Each domain also has a rmid_busy_llc to reduce the work of the limbo handler.
440 int alloc_rmid(u32 closid)
442 struct rmid_entry *entry;
444 lockdep_assert_held(&rdtgroup_mutex);
446 entry = resctrl_find_free_rmid(closid);
448 return PTR_ERR(entry);
450 list_del(&entry->list);
454 static void add_rmid_to_limbo(struct rmid_entry *entry)
456 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
457 struct rdt_domain *d;
462 lockdep_assert_held(&rdtgroup_mutex);
464 idx = resctrl_arch_rmid_idx_encode(entry->closid, entry->rmid);
468 list_for_each_entry(d, &r->domains, list) {
469 if (cpumask_test_cpu(cpu, &d->cpu_mask)) {
470 err = resctrl_arch_rmid_read(r, d, entry->closid,
472 QOS_L3_OCCUP_EVENT_ID,
474 if (err || val <= resctrl_rmid_realloc_threshold)
479 * For the first limbo RMID in the domain,
480 * setup up the limbo worker.
482 if (!has_busy_rmid(d))
483 cqm_setup_limbo_handler(d, CQM_LIMBOCHECK_INTERVAL);
484 set_bit(idx, d->rmid_busy_llc);
491 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID))
492 closid_num_dirty_rmid[entry->closid]++;
494 list_add_tail(&entry->list, &rmid_free_lru);
498 void free_rmid(u32 closid, u32 rmid)
500 u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
501 struct rmid_entry *entry;
503 lockdep_assert_held(&rdtgroup_mutex);
506 * Do not allow the default rmid to be free'd. Comparing by index
507 * allows architectures that ignore the closid parameter to avoid an
510 if (idx == resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
511 RESCTRL_RESERVED_RMID))
514 entry = __rmid_entry(idx);
516 if (is_llc_occupancy_enabled())
517 add_rmid_to_limbo(entry);
519 list_add_tail(&entry->list, &rmid_free_lru);
522 static struct mbm_state *get_mbm_state(struct rdt_domain *d, u32 closid,
523 u32 rmid, enum resctrl_event_id evtid)
525 u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
528 case QOS_L3_MBM_TOTAL_EVENT_ID:
529 return &d->mbm_total[idx];
530 case QOS_L3_MBM_LOCAL_EVENT_ID:
531 return &d->mbm_local[idx];
537 static int __mon_event_count(u32 closid, u32 rmid, struct rmid_read *rr)
543 resctrl_arch_reset_rmid(rr->r, rr->d, closid, rmid, rr->evtid);
544 m = get_mbm_state(rr->d, closid, rmid, rr->evtid);
546 memset(m, 0, sizeof(struct mbm_state));
550 rr->err = resctrl_arch_rmid_read(rr->r, rr->d, closid, rmid, rr->evtid,
561 * mbm_bw_count() - Update bw count from values previously read by
562 * __mon_event_count().
563 * @closid: The closid used to identify the cached mbm_state.
564 * @rmid: The rmid used to identify the cached mbm_state.
565 * @rr: The struct rmid_read populated by __mon_event_count().
567 * Supporting function to calculate the memory bandwidth
568 * and delta bandwidth in MBps. The chunks value previously read by
569 * __mon_event_count() is compared with the chunks value from the previous
570 * invocation. This must be called once per second to maintain values in MBps.
572 static void mbm_bw_count(u32 closid, u32 rmid, struct rmid_read *rr)
574 u32 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
575 struct mbm_state *m = &rr->d->mbm_local[idx];
576 u64 cur_bw, bytes, cur_bytes;
579 bytes = cur_bytes - m->prev_bw_bytes;
580 m->prev_bw_bytes = cur_bytes;
582 cur_bw = bytes / SZ_1M;
588 * This is called via IPI to read the CQM/MBM counters
591 void mon_event_count(void *info)
593 struct rdtgroup *rdtgrp, *entry;
594 struct rmid_read *rr = info;
595 struct list_head *head;
600 ret = __mon_event_count(rdtgrp->closid, rdtgrp->mon.rmid, rr);
603 * For Ctrl groups read data from child monitor groups and
604 * add them together. Count events which are read successfully.
605 * Discard the rmid_read's reporting errors.
607 head = &rdtgrp->mon.crdtgrp_list;
609 if (rdtgrp->type == RDTCTRL_GROUP) {
610 list_for_each_entry(entry, head, mon.crdtgrp_list) {
611 if (__mon_event_count(entry->closid, entry->mon.rmid,
618 * __mon_event_count() calls for newly created monitor groups may
619 * report -EINVAL/Unavailable if the monitor hasn't seen any traffic.
620 * Discard error if any of the monitor event reads succeeded.
627 * Feedback loop for MBA software controller (mba_sc)
629 * mba_sc is a feedback loop where we periodically read MBM counters and
630 * adjust the bandwidth percentage values via the IA32_MBA_THRTL_MSRs so
633 * current bandwidth(cur_bw) < user specified bandwidth(user_bw)
635 * This uses the MBM counters to measure the bandwidth and MBA throttle
636 * MSRs to control the bandwidth for a particular rdtgrp. It builds on the
637 * fact that resctrl rdtgroups have both monitoring and control.
639 * The frequency of the checks is 1s and we just tag along the MBM overflow
640 * timer. Having 1s interval makes the calculation of bandwidth simpler.
642 * Although MBA's goal is to restrict the bandwidth to a maximum, there may
643 * be a need to increase the bandwidth to avoid unnecessarily restricting
644 * the L2 <-> L3 traffic.
646 * Since MBA controls the L2 external bandwidth where as MBM measures the
647 * L3 external bandwidth the following sequence could lead to such a
650 * Consider an rdtgroup which had high L3 <-> memory traffic in initial
651 * phases -> mba_sc kicks in and reduced bandwidth percentage values -> but
652 * after some time rdtgroup has mostly L2 <-> L3 traffic.
654 * In this case we may restrict the rdtgroup's L2 <-> L3 traffic as its
655 * throttle MSRs already have low percentage values. To avoid
656 * unnecessarily restricting such rdtgroups, we also increase the bandwidth.
658 static void update_mba_bw(struct rdtgroup *rgrp, struct rdt_domain *dom_mbm)
660 u32 closid, rmid, cur_msr_val, new_msr_val;
661 struct mbm_state *pmbm_data, *cmbm_data;
662 struct rdt_resource *r_mba;
663 struct rdt_domain *dom_mba;
664 u32 cur_bw, user_bw, idx;
665 struct list_head *head;
666 struct rdtgroup *entry;
668 if (!is_mbm_local_enabled())
671 r_mba = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
673 closid = rgrp->closid;
674 rmid = rgrp->mon.rmid;
675 idx = resctrl_arch_rmid_idx_encode(closid, rmid);
676 pmbm_data = &dom_mbm->mbm_local[idx];
678 dom_mba = get_domain_from_cpu(smp_processor_id(), r_mba);
680 pr_warn_once("Failure to get domain for MBA update\n");
684 cur_bw = pmbm_data->prev_bw;
685 user_bw = dom_mba->mbps_val[closid];
687 /* MBA resource doesn't support CDP */
688 cur_msr_val = resctrl_arch_get_config(r_mba, dom_mba, closid, CDP_NONE);
691 * For Ctrl groups read data from child monitor groups.
693 head = &rgrp->mon.crdtgrp_list;
694 list_for_each_entry(entry, head, mon.crdtgrp_list) {
695 cmbm_data = &dom_mbm->mbm_local[entry->mon.rmid];
696 cur_bw += cmbm_data->prev_bw;
700 * Scale up/down the bandwidth linearly for the ctrl group. The
701 * bandwidth step is the bandwidth granularity specified by the
703 * Always increase throttling if current bandwidth is above the
704 * target set by user.
705 * But avoid thrashing up and down on every poll by checking
706 * whether a decrease in throttling is likely to push the group
707 * back over target. E.g. if currently throttling to 30% of bandwidth
708 * on a system with 10% granularity steps, check whether moving to
709 * 40% would go past the limit by multiplying current bandwidth by
712 if (cur_msr_val > r_mba->membw.min_bw && user_bw < cur_bw) {
713 new_msr_val = cur_msr_val - r_mba->membw.bw_gran;
714 } else if (cur_msr_val < MAX_MBA_BW &&
715 (user_bw > (cur_bw * (cur_msr_val + r_mba->membw.min_bw) / cur_msr_val))) {
716 new_msr_val = cur_msr_val + r_mba->membw.bw_gran;
721 resctrl_arch_update_one(r_mba, dom_mba, closid, CDP_NONE, new_msr_val);
724 static void mbm_update(struct rdt_resource *r, struct rdt_domain *d,
725 u32 closid, u32 rmid)
734 * This is protected from concurrent reads from user
735 * as both the user and we hold the global mutex.
737 if (is_mbm_total_enabled()) {
738 rr.evtid = QOS_L3_MBM_TOTAL_EVENT_ID;
740 __mon_event_count(closid, rmid, &rr);
742 if (is_mbm_local_enabled()) {
743 rr.evtid = QOS_L3_MBM_LOCAL_EVENT_ID;
745 __mon_event_count(closid, rmid, &rr);
748 * Call the MBA software controller only for the
749 * control groups and when user has enabled
750 * the software controller explicitly.
753 mbm_bw_count(closid, rmid, &rr);
758 * Handler to scan the limbo list and move the RMIDs
759 * to free list whose occupancy < threshold_occupancy.
761 void cqm_handle_limbo(struct work_struct *work)
763 unsigned long delay = msecs_to_jiffies(CQM_LIMBOCHECK_INTERVAL);
764 struct rdt_domain *d;
766 mutex_lock(&rdtgroup_mutex);
768 d = container_of(work, struct rdt_domain, cqm_limbo.work);
770 __check_limbo(d, false);
772 if (has_busy_rmid(d)) {
773 d->cqm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask);
774 schedule_delayed_work_on(d->cqm_work_cpu, &d->cqm_limbo,
778 mutex_unlock(&rdtgroup_mutex);
781 void cqm_setup_limbo_handler(struct rdt_domain *dom, unsigned long delay_ms)
783 unsigned long delay = msecs_to_jiffies(delay_ms);
786 cpu = cpumask_any_housekeeping(&dom->cpu_mask);
787 dom->cqm_work_cpu = cpu;
789 schedule_delayed_work_on(cpu, &dom->cqm_limbo, delay);
792 void mbm_handle_overflow(struct work_struct *work)
794 unsigned long delay = msecs_to_jiffies(MBM_OVERFLOW_INTERVAL);
795 struct rdtgroup *prgrp, *crgrp;
796 struct list_head *head;
797 struct rdt_resource *r;
798 struct rdt_domain *d;
800 mutex_lock(&rdtgroup_mutex);
802 if (!static_branch_likely(&rdt_mon_enable_key))
805 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
806 d = container_of(work, struct rdt_domain, mbm_over.work);
808 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
809 mbm_update(r, d, prgrp->closid, prgrp->mon.rmid);
811 head = &prgrp->mon.crdtgrp_list;
812 list_for_each_entry(crgrp, head, mon.crdtgrp_list)
813 mbm_update(r, d, crgrp->closid, crgrp->mon.rmid);
816 update_mba_bw(prgrp, d);
820 * Re-check for housekeeping CPUs. This allows the overflow handler to
821 * move off a nohz_full CPU quickly.
823 d->mbm_work_cpu = cpumask_any_housekeeping(&d->cpu_mask);
824 schedule_delayed_work_on(d->mbm_work_cpu, &d->mbm_over, delay);
827 mutex_unlock(&rdtgroup_mutex);
830 void mbm_setup_overflow_handler(struct rdt_domain *dom, unsigned long delay_ms)
832 unsigned long delay = msecs_to_jiffies(delay_ms);
835 if (!static_branch_likely(&rdt_mon_enable_key))
837 cpu = cpumask_any_housekeeping(&dom->cpu_mask);
838 dom->mbm_work_cpu = cpu;
839 schedule_delayed_work_on(cpu, &dom->mbm_over, delay);
842 static int dom_data_init(struct rdt_resource *r)
844 u32 idx_limit = resctrl_arch_system_num_rmid_idx();
845 u32 num_closid = resctrl_arch_get_num_closid(r);
846 struct rmid_entry *entry = NULL;
850 mutex_lock(&rdtgroup_mutex);
851 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
855 * If the architecture hasn't provided a sanitised value here,
856 * this may result in larger arrays than necessary. Resctrl will
857 * use a smaller system wide value based on the resources in
860 tmp = kcalloc(num_closid, sizeof(*tmp), GFP_KERNEL);
866 closid_num_dirty_rmid = tmp;
869 rmid_ptrs = kcalloc(idx_limit, sizeof(struct rmid_entry), GFP_KERNEL);
871 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
872 kfree(closid_num_dirty_rmid);
873 closid_num_dirty_rmid = NULL;
879 for (i = 0; i < idx_limit; i++) {
880 entry = &rmid_ptrs[i];
881 INIT_LIST_HEAD(&entry->list);
883 resctrl_arch_rmid_idx_decode(i, &entry->closid, &entry->rmid);
884 list_add_tail(&entry->list, &rmid_free_lru);
888 * RESCTRL_RESERVED_CLOSID and RESCTRL_RESERVED_RMID are special and
889 * are always allocated. These are used for the rdtgroup_default
890 * control group, which will be setup later in rdtgroup_init().
892 idx = resctrl_arch_rmid_idx_encode(RESCTRL_RESERVED_CLOSID,
893 RESCTRL_RESERVED_RMID);
894 entry = __rmid_entry(idx);
895 list_del(&entry->list);
898 mutex_unlock(&rdtgroup_mutex);
903 static void __exit dom_data_exit(void)
905 mutex_lock(&rdtgroup_mutex);
907 if (IS_ENABLED(CONFIG_RESCTRL_RMID_DEPENDS_ON_CLOSID)) {
908 kfree(closid_num_dirty_rmid);
909 closid_num_dirty_rmid = NULL;
915 mutex_unlock(&rdtgroup_mutex);
918 static struct mon_evt llc_occupancy_event = {
919 .name = "llc_occupancy",
920 .evtid = QOS_L3_OCCUP_EVENT_ID,
923 static struct mon_evt mbm_total_event = {
924 .name = "mbm_total_bytes",
925 .evtid = QOS_L3_MBM_TOTAL_EVENT_ID,
928 static struct mon_evt mbm_local_event = {
929 .name = "mbm_local_bytes",
930 .evtid = QOS_L3_MBM_LOCAL_EVENT_ID,
934 * Initialize the event list for the resource.
936 * Note that MBM events are also part of RDT_RESOURCE_L3 resource
937 * because as per the SDM the total and local memory bandwidth
938 * are enumerated as part of L3 monitoring.
940 static void l3_mon_evt_init(struct rdt_resource *r)
942 INIT_LIST_HEAD(&r->evt_list);
944 if (is_llc_occupancy_enabled())
945 list_add_tail(&llc_occupancy_event.list, &r->evt_list);
946 if (is_mbm_total_enabled())
947 list_add_tail(&mbm_total_event.list, &r->evt_list);
948 if (is_mbm_local_enabled())
949 list_add_tail(&mbm_local_event.list, &r->evt_list);
952 int __init rdt_get_mon_l3_config(struct rdt_resource *r)
954 unsigned int mbm_offset = boot_cpu_data.x86_cache_mbm_width_offset;
955 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
956 unsigned int threshold;
959 resctrl_rmid_realloc_limit = boot_cpu_data.x86_cache_size * 1024;
960 hw_res->mon_scale = boot_cpu_data.x86_cache_occ_scale;
961 r->num_rmid = boot_cpu_data.x86_cache_max_rmid + 1;
962 hw_res->mbm_width = MBM_CNTR_WIDTH_BASE;
964 if (mbm_offset > 0 && mbm_offset <= MBM_CNTR_WIDTH_OFFSET_MAX)
965 hw_res->mbm_width += mbm_offset;
966 else if (mbm_offset > MBM_CNTR_WIDTH_OFFSET_MAX)
967 pr_warn("Ignoring impossible MBM counter offset\n");
970 * A reasonable upper limit on the max threshold is the number
971 * of lines tagged per RMID if all RMIDs have the same number of
972 * lines tagged in the LLC.
974 * For a 35MB LLC and 56 RMIDs, this is ~1.8% of the LLC.
976 threshold = resctrl_rmid_realloc_limit / r->num_rmid;
979 * Because num_rmid may not be a power of two, round the value
980 * to the nearest multiple of hw_res->mon_scale so it matches a
981 * value the hardware will measure. mon_scale may not be a power of 2.
983 resctrl_rmid_realloc_threshold = resctrl_arch_round_mon_val(threshold);
985 ret = dom_data_init(r);
989 if (rdt_cpu_has(X86_FEATURE_BMEC)) {
990 u32 eax, ebx, ecx, edx;
992 /* Detect list of bandwidth sources that can be tracked */
993 cpuid_count(0x80000020, 3, &eax, &ebx, &ecx, &edx);
994 hw_res->mbm_cfg_mask = ecx & MAX_EVT_CONFIG_BITS;
996 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_TOTAL)) {
997 mbm_total_event.configurable = true;
998 mbm_config_rftype_init("mbm_total_bytes_config");
1000 if (rdt_cpu_has(X86_FEATURE_CQM_MBM_LOCAL)) {
1001 mbm_local_event.configurable = true;
1002 mbm_config_rftype_init("mbm_local_bytes_config");
1008 r->mon_capable = true;
1013 void __exit rdt_put_mon_l3_config(void)
1018 void __init intel_rdt_mbm_apply_quirk(void)
1022 cf_index = (boot_cpu_data.x86_cache_max_rmid + 1) / 8 - 1;
1023 if (cf_index >= ARRAY_SIZE(mbm_cf_table)) {
1024 pr_info("No MBM correction factor available\n");
1028 mbm_cf_rmidthreshold = mbm_cf_table[cf_index].rmidthreshold;
1029 mbm_cf = mbm_cf_table[cf_index].cf;