1 // SPDX-License-Identifier: GPL-2.0-only
3 * User interface for Resource Allocation in Resource Director Technology(RDT)
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cacheinfo.h>
16 #include <linux/cpu.h>
17 #include <linux/debugfs.h>
19 #include <linux/fs_parser.h>
20 #include <linux/sysfs.h>
21 #include <linux/kernfs.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/signal.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/task_work.h>
28 #include <linux/user_namespace.h>
30 #include <uapi/linux/magic.h>
32 #include <asm/resctrl.h>
35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
38 static struct kernfs_root *rdt_root;
39 struct rdtgroup rdtgroup_default;
40 LIST_HEAD(rdt_all_groups);
42 /* list of entries for the schemata file */
43 LIST_HEAD(resctrl_schema_all);
45 /* Kernel fs node for "info" directory under root */
46 static struct kernfs_node *kn_info;
48 /* Kernel fs node for "mon_groups" directory under root */
49 static struct kernfs_node *kn_mongrp;
51 /* Kernel fs node for "mon_data" directory under root */
52 static struct kernfs_node *kn_mondata;
54 static struct seq_buf last_cmd_status;
55 static char last_cmd_status_buf[512];
57 struct dentry *debugfs_resctrl;
59 void rdt_last_cmd_clear(void)
61 lockdep_assert_held(&rdtgroup_mutex);
62 seq_buf_clear(&last_cmd_status);
65 void rdt_last_cmd_puts(const char *s)
67 lockdep_assert_held(&rdtgroup_mutex);
68 seq_buf_puts(&last_cmd_status, s);
71 void rdt_last_cmd_printf(const char *fmt, ...)
76 lockdep_assert_held(&rdtgroup_mutex);
77 seq_buf_vprintf(&last_cmd_status, fmt, ap);
82 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
83 * we can keep a bitmap of free CLOSIDs in a single integer.
85 * Using a global CLOSID across all resources has some advantages and
87 * + We can simply set "current->closid" to assign a task to a resource
89 * + Context switch code can avoid extra memory references deciding which
90 * CLOSID to load into the PQR_ASSOC MSR
91 * - We give up some options in configuring resource groups across multi-socket
93 * - Our choices on how to configure each resource become progressively more
94 * limited as the number of resources grows.
96 static int closid_free_map;
97 static int closid_free_map_len;
99 int closids_supported(void)
101 return closid_free_map_len;
104 static void closid_init(void)
106 struct resctrl_schema *s;
107 u32 rdt_min_closid = 32;
109 /* Compute rdt_min_closid across all resources */
110 list_for_each_entry(s, &resctrl_schema_all, list)
111 rdt_min_closid = min(rdt_min_closid, s->num_closid);
113 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
115 /* CLOSID 0 is always reserved for the default group */
116 closid_free_map &= ~1;
117 closid_free_map_len = rdt_min_closid;
120 static int closid_alloc(void)
122 u32 closid = ffs(closid_free_map);
127 closid_free_map &= ~(1 << closid);
132 void closid_free(int closid)
134 closid_free_map |= 1 << closid;
138 * closid_allocated - test if provided closid is in use
139 * @closid: closid to be tested
141 * Return: true if @closid is currently associated with a resource group,
142 * false if @closid is free
144 static bool closid_allocated(unsigned int closid)
146 return (closid_free_map & (1 << closid)) == 0;
150 * rdtgroup_mode_by_closid - Return mode of resource group with closid
151 * @closid: closid if the resource group
153 * Each resource group is associated with a @closid. Here the mode
154 * of a resource group can be queried by searching for it using its closid.
156 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
158 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
160 struct rdtgroup *rdtgrp;
162 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
163 if (rdtgrp->closid == closid)
167 return RDT_NUM_MODES;
170 static const char * const rdt_mode_str[] = {
171 [RDT_MODE_SHAREABLE] = "shareable",
172 [RDT_MODE_EXCLUSIVE] = "exclusive",
173 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
174 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
178 * rdtgroup_mode_str - Return the string representation of mode
179 * @mode: the resource group mode as &enum rdtgroup_mode
181 * Return: string representation of valid mode, "unknown" otherwise
183 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
185 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
188 return rdt_mode_str[mode];
191 /* set uid and gid of rdtgroup dirs and files to that of the creator */
192 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
194 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
195 .ia_uid = current_fsuid(),
196 .ia_gid = current_fsgid(), };
198 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
199 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
202 return kernfs_setattr(kn, &iattr);
205 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
207 struct kernfs_node *kn;
210 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
211 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
212 0, rft->kf_ops, rft, NULL, NULL);
216 ret = rdtgroup_kn_set_ugid(kn);
225 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
227 struct kernfs_open_file *of = m->private;
228 struct rftype *rft = of->kn->priv;
231 return rft->seq_show(of, m, arg);
235 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
236 size_t nbytes, loff_t off)
238 struct rftype *rft = of->kn->priv;
241 return rft->write(of, buf, nbytes, off);
246 static const struct kernfs_ops rdtgroup_kf_single_ops = {
247 .atomic_write_len = PAGE_SIZE,
248 .write = rdtgroup_file_write,
249 .seq_show = rdtgroup_seqfile_show,
252 static const struct kernfs_ops kf_mondata_ops = {
253 .atomic_write_len = PAGE_SIZE,
254 .seq_show = rdtgroup_mondata_show,
257 static bool is_cpu_list(struct kernfs_open_file *of)
259 struct rftype *rft = of->kn->priv;
261 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
264 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
265 struct seq_file *s, void *v)
267 struct rdtgroup *rdtgrp;
268 struct cpumask *mask;
271 rdtgrp = rdtgroup_kn_lock_live(of->kn);
274 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
275 if (!rdtgrp->plr->d) {
276 rdt_last_cmd_clear();
277 rdt_last_cmd_puts("Cache domain offline\n");
280 mask = &rdtgrp->plr->d->cpu_mask;
281 seq_printf(s, is_cpu_list(of) ?
282 "%*pbl\n" : "%*pb\n",
283 cpumask_pr_args(mask));
286 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
287 cpumask_pr_args(&rdtgrp->cpu_mask));
292 rdtgroup_kn_unlock(of->kn);
298 * This is safe against resctrl_sched_in() called from __switch_to()
299 * because __switch_to() is executed with interrupts disabled. A local call
300 * from update_closid_rmid() is protected against __switch_to() because
301 * preemption is disabled.
303 static void update_cpu_closid_rmid(void *info)
305 struct rdtgroup *r = info;
308 this_cpu_write(pqr_state.default_closid, r->closid);
309 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
313 * We cannot unconditionally write the MSR because the current
314 * executing task might have its own closid selected. Just reuse
315 * the context switch code.
321 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
323 * Per task closids/rmids must have been set up before calling this function.
326 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
330 if (cpumask_test_cpu(cpu, cpu_mask))
331 update_cpu_closid_rmid(r);
332 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
336 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
337 cpumask_var_t tmpmask)
339 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
340 struct list_head *head;
342 /* Check whether cpus belong to parent ctrl group */
343 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
344 if (cpumask_weight(tmpmask)) {
345 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
349 /* Check whether cpus are dropped from this group */
350 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
351 if (cpumask_weight(tmpmask)) {
352 /* Give any dropped cpus to parent rdtgroup */
353 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
354 update_closid_rmid(tmpmask, prgrp);
358 * If we added cpus, remove them from previous group that owned them
359 * and update per-cpu rmid
361 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
362 if (cpumask_weight(tmpmask)) {
363 head = &prgrp->mon.crdtgrp_list;
364 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
367 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
370 update_closid_rmid(tmpmask, rdtgrp);
373 /* Done pushing/pulling - update this group with new mask */
374 cpumask_copy(&rdtgrp->cpu_mask, newmask);
379 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
381 struct rdtgroup *crgrp;
383 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
384 /* update the child mon group masks as well*/
385 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
386 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
389 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
390 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
392 struct rdtgroup *r, *crgrp;
393 struct list_head *head;
395 /* Check whether cpus are dropped from this group */
396 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
397 if (cpumask_weight(tmpmask)) {
398 /* Can't drop from default group */
399 if (rdtgrp == &rdtgroup_default) {
400 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
404 /* Give any dropped cpus to rdtgroup_default */
405 cpumask_or(&rdtgroup_default.cpu_mask,
406 &rdtgroup_default.cpu_mask, tmpmask);
407 update_closid_rmid(tmpmask, &rdtgroup_default);
411 * If we added cpus, remove them from previous group and
412 * the prev group's child groups that owned them
413 * and update per-cpu closid/rmid.
415 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
416 if (cpumask_weight(tmpmask)) {
417 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
420 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
421 if (cpumask_weight(tmpmask1))
422 cpumask_rdtgrp_clear(r, tmpmask1);
424 update_closid_rmid(tmpmask, rdtgrp);
427 /* Done pushing/pulling - update this group with new mask */
428 cpumask_copy(&rdtgrp->cpu_mask, newmask);
431 * Clear child mon group masks since there is a new parent mask
432 * now and update the rmid for the cpus the child lost.
434 head = &rdtgrp->mon.crdtgrp_list;
435 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
436 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
437 update_closid_rmid(tmpmask, rdtgrp);
438 cpumask_clear(&crgrp->cpu_mask);
444 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
445 char *buf, size_t nbytes, loff_t off)
447 cpumask_var_t tmpmask, newmask, tmpmask1;
448 struct rdtgroup *rdtgrp;
454 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
456 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
457 free_cpumask_var(tmpmask);
460 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
461 free_cpumask_var(tmpmask);
462 free_cpumask_var(newmask);
466 rdtgrp = rdtgroup_kn_lock_live(of->kn);
472 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
473 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
475 rdt_last_cmd_puts("Pseudo-locking in progress\n");
480 ret = cpulist_parse(buf, newmask);
482 ret = cpumask_parse(buf, newmask);
485 rdt_last_cmd_puts("Bad CPU list/mask\n");
489 /* check that user didn't specify any offline cpus */
490 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
491 if (cpumask_weight(tmpmask)) {
493 rdt_last_cmd_puts("Can only assign online CPUs\n");
497 if (rdtgrp->type == RDTCTRL_GROUP)
498 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
499 else if (rdtgrp->type == RDTMON_GROUP)
500 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
505 rdtgroup_kn_unlock(of->kn);
506 free_cpumask_var(tmpmask);
507 free_cpumask_var(newmask);
508 free_cpumask_var(tmpmask1);
510 return ret ?: nbytes;
514 * rdtgroup_remove - the helper to remove resource group safely
515 * @rdtgrp: resource group to remove
517 * On resource group creation via a mkdir, an extra kernfs_node reference is
518 * taken to ensure that the rdtgroup structure remains accessible for the
519 * rdtgroup_kn_unlock() calls where it is removed.
521 * Drop the extra reference here, then free the rdtgroup structure.
525 static void rdtgroup_remove(struct rdtgroup *rdtgrp)
527 kernfs_put(rdtgrp->kn);
531 static void _update_task_closid_rmid(void *task)
534 * If the task is still current on this CPU, update PQR_ASSOC MSR.
535 * Otherwise, the MSR is updated when the task is scheduled in.
541 static void update_task_closid_rmid(struct task_struct *t)
543 if (IS_ENABLED(CONFIG_SMP) && task_curr(t))
544 smp_call_function_single(task_cpu(t), _update_task_closid_rmid, t, 1);
546 _update_task_closid_rmid(t);
549 static int __rdtgroup_move_task(struct task_struct *tsk,
550 struct rdtgroup *rdtgrp)
552 /* If the task is already in rdtgrp, no need to move the task. */
553 if ((rdtgrp->type == RDTCTRL_GROUP && tsk->closid == rdtgrp->closid &&
554 tsk->rmid == rdtgrp->mon.rmid) ||
555 (rdtgrp->type == RDTMON_GROUP && tsk->rmid == rdtgrp->mon.rmid &&
556 tsk->closid == rdtgrp->mon.parent->closid))
560 * Set the task's closid/rmid before the PQR_ASSOC MSR can be
563 * For ctrl_mon groups, move both closid and rmid.
564 * For monitor groups, can move the tasks only from
565 * their parent CTRL group.
568 if (rdtgrp->type == RDTCTRL_GROUP) {
569 WRITE_ONCE(tsk->closid, rdtgrp->closid);
570 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
571 } else if (rdtgrp->type == RDTMON_GROUP) {
572 if (rdtgrp->mon.parent->closid == tsk->closid) {
573 WRITE_ONCE(tsk->rmid, rdtgrp->mon.rmid);
575 rdt_last_cmd_puts("Can't move task to different control group\n");
581 * Ensure the task's closid and rmid are written before determining if
582 * the task is current that will decide if it will be interrupted.
587 * By now, the task's closid and rmid are set. If the task is current
588 * on a CPU, the PQR_ASSOC MSR needs to be updated to make the resource
589 * group go into effect. If the task is not current, the MSR will be
590 * updated when the task is scheduled in.
592 update_task_closid_rmid(tsk);
597 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
599 return (rdt_alloc_capable &&
600 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
603 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
605 return (rdt_mon_capable &&
606 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
610 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
613 * Return: 1 if tasks have been assigned to @r, 0 otherwise
615 int rdtgroup_tasks_assigned(struct rdtgroup *r)
617 struct task_struct *p, *t;
620 lockdep_assert_held(&rdtgroup_mutex);
623 for_each_process_thread(p, t) {
624 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
634 static int rdtgroup_task_write_permission(struct task_struct *task,
635 struct kernfs_open_file *of)
637 const struct cred *tcred = get_task_cred(task);
638 const struct cred *cred = current_cred();
642 * Even if we're attaching all tasks in the thread group, we only
643 * need to check permissions on one of them.
645 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
646 !uid_eq(cred->euid, tcred->uid) &&
647 !uid_eq(cred->euid, tcred->suid)) {
648 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
656 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
657 struct kernfs_open_file *of)
659 struct task_struct *tsk;
664 tsk = find_task_by_vpid(pid);
667 rdt_last_cmd_printf("No task %d\n", pid);
674 get_task_struct(tsk);
677 ret = rdtgroup_task_write_permission(tsk, of);
679 ret = __rdtgroup_move_task(tsk, rdtgrp);
681 put_task_struct(tsk);
685 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
686 char *buf, size_t nbytes, loff_t off)
688 struct rdtgroup *rdtgrp;
692 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
694 rdtgrp = rdtgroup_kn_lock_live(of->kn);
696 rdtgroup_kn_unlock(of->kn);
699 rdt_last_cmd_clear();
701 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
702 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
704 rdt_last_cmd_puts("Pseudo-locking in progress\n");
708 ret = rdtgroup_move_task(pid, rdtgrp, of);
711 rdtgroup_kn_unlock(of->kn);
713 return ret ?: nbytes;
716 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
718 struct task_struct *p, *t;
721 for_each_process_thread(p, t) {
722 if (is_closid_match(t, r) || is_rmid_match(t, r))
723 seq_printf(s, "%d\n", t->pid);
728 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
729 struct seq_file *s, void *v)
731 struct rdtgroup *rdtgrp;
734 rdtgrp = rdtgroup_kn_lock_live(of->kn);
736 show_rdt_tasks(rdtgrp, s);
739 rdtgroup_kn_unlock(of->kn);
744 #ifdef CONFIG_PROC_CPU_RESCTRL
747 * A task can only be part of one resctrl control group and of one monitor
748 * group which is associated to that control group.
753 * resctrl is not available.
758 * Task is part of the root resctrl control group, and it is not associated
759 * to any monitor group.
764 * Task is part of the root resctrl control group and monitor group mon0.
769 * Task is part of resctrl control group group0, and it is not associated
770 * to any monitor group.
775 * Task is part of resctrl control group group0 and monitor group mon1.
777 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
778 struct pid *pid, struct task_struct *tsk)
780 struct rdtgroup *rdtg;
783 mutex_lock(&rdtgroup_mutex);
785 /* Return empty if resctrl has not been mounted. */
786 if (!static_branch_unlikely(&rdt_enable_key)) {
787 seq_puts(s, "res:\nmon:\n");
791 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
792 struct rdtgroup *crg;
795 * Task information is only relevant for shareable
796 * and exclusive groups.
798 if (rdtg->mode != RDT_MODE_SHAREABLE &&
799 rdtg->mode != RDT_MODE_EXCLUSIVE)
802 if (rdtg->closid != tsk->closid)
805 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
808 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
810 if (tsk->rmid != crg->mon.rmid)
812 seq_printf(s, "%s", crg->kn->name);
819 * The above search should succeed. Otherwise return
824 mutex_unlock(&rdtgroup_mutex);
830 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
831 struct seq_file *seq, void *v)
835 mutex_lock(&rdtgroup_mutex);
836 len = seq_buf_used(&last_cmd_status);
838 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
840 seq_puts(seq, "ok\n");
841 mutex_unlock(&rdtgroup_mutex);
845 static int rdt_num_closids_show(struct kernfs_open_file *of,
846 struct seq_file *seq, void *v)
848 struct resctrl_schema *s = of->kn->parent->priv;
850 seq_printf(seq, "%u\n", s->num_closid);
854 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
855 struct seq_file *seq, void *v)
857 struct resctrl_schema *s = of->kn->parent->priv;
858 struct rdt_resource *r = s->res;
860 seq_printf(seq, "%x\n", r->default_ctrl);
864 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
865 struct seq_file *seq, void *v)
867 struct resctrl_schema *s = of->kn->parent->priv;
868 struct rdt_resource *r = s->res;
870 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
874 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
875 struct seq_file *seq, void *v)
877 struct resctrl_schema *s = of->kn->parent->priv;
878 struct rdt_resource *r = s->res;
880 seq_printf(seq, "%x\n", r->cache.shareable_bits);
885 * rdt_bit_usage_show - Display current usage of resources
887 * A domain is a shared resource that can now be allocated differently. Here
888 * we display the current regions of the domain as an annotated bitmask.
889 * For each domain of this resource its allocation bitmask
890 * is annotated as below to indicate the current usage of the corresponding bit:
891 * 0 - currently unused
892 * X - currently available for sharing and used by software and hardware
893 * H - currently used by hardware only but available for software use
894 * S - currently used and shareable by software only
895 * E - currently used exclusively by one resource group
896 * P - currently pseudo-locked by one resource group
898 static int rdt_bit_usage_show(struct kernfs_open_file *of,
899 struct seq_file *seq, void *v)
901 struct resctrl_schema *s = of->kn->parent->priv;
903 * Use unsigned long even though only 32 bits are used to ensure
904 * test_bit() is used safely.
906 unsigned long sw_shareable = 0, hw_shareable = 0;
907 unsigned long exclusive = 0, pseudo_locked = 0;
908 struct rdt_resource *r = s->res;
909 struct rdt_domain *dom;
910 int i, hwb, swb, excl, psl;
911 enum rdtgrp_mode mode;
915 mutex_lock(&rdtgroup_mutex);
916 hw_shareable = r->cache.shareable_bits;
917 list_for_each_entry(dom, &r->domains, list) {
922 seq_printf(seq, "%d=", dom->id);
923 for (i = 0; i < closids_supported(); i++) {
924 if (!closid_allocated(i))
926 ctrl_val = resctrl_arch_get_config(r, dom, i,
928 mode = rdtgroup_mode_by_closid(i);
930 case RDT_MODE_SHAREABLE:
931 sw_shareable |= ctrl_val;
933 case RDT_MODE_EXCLUSIVE:
934 exclusive |= ctrl_val;
936 case RDT_MODE_PSEUDO_LOCKSETUP:
938 * RDT_MODE_PSEUDO_LOCKSETUP is possible
939 * here but not included since the CBM
940 * associated with this CLOSID in this mode
941 * is not initialized and no task or cpu can be
942 * assigned this CLOSID.
945 case RDT_MODE_PSEUDO_LOCKED:
948 "invalid mode for closid %d\n", i);
952 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
953 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
954 hwb = test_bit(i, &hw_shareable);
955 swb = test_bit(i, &sw_shareable);
956 excl = test_bit(i, &exclusive);
957 psl = test_bit(i, &pseudo_locked);
960 else if (hwb && !swb)
962 else if (!hwb && swb)
968 else /* Unused bits remain */
974 mutex_unlock(&rdtgroup_mutex);
978 static int rdt_min_bw_show(struct kernfs_open_file *of,
979 struct seq_file *seq, void *v)
981 struct resctrl_schema *s = of->kn->parent->priv;
982 struct rdt_resource *r = s->res;
984 seq_printf(seq, "%u\n", r->membw.min_bw);
988 static int rdt_num_rmids_show(struct kernfs_open_file *of,
989 struct seq_file *seq, void *v)
991 struct rdt_resource *r = of->kn->parent->priv;
993 seq_printf(seq, "%d\n", r->num_rmid);
998 static int rdt_mon_features_show(struct kernfs_open_file *of,
999 struct seq_file *seq, void *v)
1001 struct rdt_resource *r = of->kn->parent->priv;
1002 struct mon_evt *mevt;
1004 list_for_each_entry(mevt, &r->evt_list, list)
1005 seq_printf(seq, "%s\n", mevt->name);
1010 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1011 struct seq_file *seq, void *v)
1013 struct resctrl_schema *s = of->kn->parent->priv;
1014 struct rdt_resource *r = s->res;
1016 seq_printf(seq, "%u\n", r->membw.bw_gran);
1020 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1021 struct seq_file *seq, void *v)
1023 struct resctrl_schema *s = of->kn->parent->priv;
1024 struct rdt_resource *r = s->res;
1026 seq_printf(seq, "%u\n", r->membw.delay_linear);
1030 static int max_threshold_occ_show(struct kernfs_open_file *of,
1031 struct seq_file *seq, void *v)
1033 struct rdt_resource *r = of->kn->parent->priv;
1034 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1036 seq_printf(seq, "%u\n", resctrl_cqm_threshold * hw_res->mon_scale);
1041 static int rdt_thread_throttle_mode_show(struct kernfs_open_file *of,
1042 struct seq_file *seq, void *v)
1044 struct resctrl_schema *s = of->kn->parent->priv;
1045 struct rdt_resource *r = s->res;
1047 if (r->membw.throttle_mode == THREAD_THROTTLE_PER_THREAD)
1048 seq_puts(seq, "per-thread\n");
1050 seq_puts(seq, "max\n");
1055 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1056 char *buf, size_t nbytes, loff_t off)
1058 struct rdt_hw_resource *hw_res;
1062 ret = kstrtouint(buf, 0, &bytes);
1066 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
1069 hw_res = resctrl_to_arch_res(of->kn->parent->priv);
1070 resctrl_cqm_threshold = bytes / hw_res->mon_scale;
1076 * rdtgroup_mode_show - Display mode of this resource group
1078 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1079 struct seq_file *s, void *v)
1081 struct rdtgroup *rdtgrp;
1083 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1085 rdtgroup_kn_unlock(of->kn);
1089 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1091 rdtgroup_kn_unlock(of->kn);
1095 static enum resctrl_conf_type resctrl_peer_type(enum resctrl_conf_type my_type)
1109 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1110 * @r: Resource to which domain instance @d belongs.
1111 * @d: The domain instance for which @closid is being tested.
1112 * @cbm: Capacity bitmask being tested.
1113 * @closid: Intended closid for @cbm.
1114 * @exclusive: Only check if overlaps with exclusive resource groups
1116 * Checks if provided @cbm intended to be used for @closid on domain
1117 * @d overlaps with any other closids or other hardware usage associated
1118 * with this domain. If @exclusive is true then only overlaps with
1119 * resource groups in exclusive mode will be considered. If @exclusive
1120 * is false then overlaps with any resource group or hardware entities
1121 * will be considered.
1123 * @cbm is unsigned long, even if only 32 bits are used, to make the
1124 * bitmap functions work correctly.
1126 * Return: false if CBM does not overlap, true if it does.
1128 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1129 unsigned long cbm, int closid,
1130 enum resctrl_conf_type type, bool exclusive)
1132 enum rdtgrp_mode mode;
1133 unsigned long ctrl_b;
1136 /* Check for any overlap with regions used by hardware directly */
1138 ctrl_b = r->cache.shareable_bits;
1139 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1143 /* Check for overlap with other resource groups */
1144 for (i = 0; i < closids_supported(); i++) {
1145 ctrl_b = resctrl_arch_get_config(r, d, i, type);
1146 mode = rdtgroup_mode_by_closid(i);
1147 if (closid_allocated(i) && i != closid &&
1148 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1149 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1151 if (mode == RDT_MODE_EXCLUSIVE)
1164 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1165 * @s: Schema for the resource to which domain instance @d belongs.
1166 * @d: The domain instance for which @closid is being tested.
1167 * @cbm: Capacity bitmask being tested.
1168 * @closid: Intended closid for @cbm.
1169 * @exclusive: Only check if overlaps with exclusive resource groups
1171 * Resources that can be allocated using a CBM can use the CBM to control
1172 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1173 * for overlap. Overlap test is not limited to the specific resource for
1174 * which the CBM is intended though - when dealing with CDP resources that
1175 * share the underlying hardware the overlap check should be performed on
1176 * the CDP resource sharing the hardware also.
1178 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1181 * Return: true if CBM overlap detected, false if there is no overlap
1183 bool rdtgroup_cbm_overlaps(struct resctrl_schema *s, struct rdt_domain *d,
1184 unsigned long cbm, int closid, bool exclusive)
1186 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
1187 struct rdt_resource *r = s->res;
1189 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, s->conf_type,
1193 if (!resctrl_arch_get_cdp_enabled(r->rid))
1195 return __rdtgroup_cbm_overlaps(r, d, cbm, closid, peer_type, exclusive);
1199 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1201 * An exclusive resource group implies that there should be no sharing of
1202 * its allocated resources. At the time this group is considered to be
1203 * exclusive this test can determine if its current schemata supports this
1204 * setting by testing for overlap with all other resource groups.
1206 * Return: true if resource group can be exclusive, false if there is overlap
1207 * with allocations of other resource groups and thus this resource group
1208 * cannot be exclusive.
1210 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1212 int closid = rdtgrp->closid;
1213 struct resctrl_schema *s;
1214 struct rdt_resource *r;
1215 bool has_cache = false;
1216 struct rdt_domain *d;
1219 list_for_each_entry(s, &resctrl_schema_all, list) {
1221 if (r->rid == RDT_RESOURCE_MBA)
1224 list_for_each_entry(d, &r->domains, list) {
1225 ctrl = resctrl_arch_get_config(r, d, closid,
1227 if (rdtgroup_cbm_overlaps(s, d, ctrl, closid, false)) {
1228 rdt_last_cmd_puts("Schemata overlaps\n");
1235 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1243 * rdtgroup_mode_write - Modify the resource group's mode
1246 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1247 char *buf, size_t nbytes, loff_t off)
1249 struct rdtgroup *rdtgrp;
1250 enum rdtgrp_mode mode;
1253 /* Valid input requires a trailing newline */
1254 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1256 buf[nbytes - 1] = '\0';
1258 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1260 rdtgroup_kn_unlock(of->kn);
1264 rdt_last_cmd_clear();
1266 mode = rdtgrp->mode;
1268 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1269 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1270 (!strcmp(buf, "pseudo-locksetup") &&
1271 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1272 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1275 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1276 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1281 if (!strcmp(buf, "shareable")) {
1282 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1283 ret = rdtgroup_locksetup_exit(rdtgrp);
1287 rdtgrp->mode = RDT_MODE_SHAREABLE;
1288 } else if (!strcmp(buf, "exclusive")) {
1289 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1293 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1294 ret = rdtgroup_locksetup_exit(rdtgrp);
1298 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1299 } else if (!strcmp(buf, "pseudo-locksetup")) {
1300 ret = rdtgroup_locksetup_enter(rdtgrp);
1303 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1305 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1310 rdtgroup_kn_unlock(of->kn);
1311 return ret ?: nbytes;
1315 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1316 * @r: RDT resource to which @d belongs.
1317 * @d: RDT domain instance.
1318 * @cbm: bitmask for which the size should be computed.
1320 * The bitmask provided associated with the RDT domain instance @d will be
1321 * translated into how many bytes it represents. The size in bytes is
1322 * computed by first dividing the total cache size by the CBM length to
1323 * determine how many bytes each bit in the bitmask represents. The result
1324 * is multiplied with the number of bits set in the bitmask.
1326 * @cbm is unsigned long, even if only 32 bits are used to make the
1327 * bitmap functions work correctly.
1329 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1330 struct rdt_domain *d, unsigned long cbm)
1332 struct cpu_cacheinfo *ci;
1333 unsigned int size = 0;
1336 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1337 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1338 for (i = 0; i < ci->num_leaves; i++) {
1339 if (ci->info_list[i].level == r->cache_level) {
1340 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1349 * rdtgroup_size_show - Display size in bytes of allocated regions
1351 * The "size" file mirrors the layout of the "schemata" file, printing the
1352 * size in bytes of each region instead of the capacity bitmask.
1355 static int rdtgroup_size_show(struct kernfs_open_file *of,
1356 struct seq_file *s, void *v)
1358 struct resctrl_schema *schema;
1359 struct rdtgroup *rdtgrp;
1360 struct rdt_resource *r;
1361 struct rdt_domain *d;
1367 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1369 rdtgroup_kn_unlock(of->kn);
1373 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1374 if (!rdtgrp->plr->d) {
1375 rdt_last_cmd_clear();
1376 rdt_last_cmd_puts("Cache domain offline\n");
1379 seq_printf(s, "%*s:", max_name_width,
1380 rdtgrp->plr->s->name);
1381 size = rdtgroup_cbm_to_size(rdtgrp->plr->s->res,
1384 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1389 list_for_each_entry(schema, &resctrl_schema_all, list) {
1392 seq_printf(s, "%*s:", max_name_width, schema->name);
1393 list_for_each_entry(d, &r->domains, list) {
1396 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1399 ctrl = resctrl_arch_get_config(r, d,
1402 if (r->rid == RDT_RESOURCE_MBA)
1405 size = rdtgroup_cbm_to_size(r, d, ctrl);
1407 seq_printf(s, "%d=%u", d->id, size);
1414 rdtgroup_kn_unlock(of->kn);
1419 /* rdtgroup information files for one cache resource. */
1420 static struct rftype res_common_files[] = {
1422 .name = "last_cmd_status",
1424 .kf_ops = &rdtgroup_kf_single_ops,
1425 .seq_show = rdt_last_cmd_status_show,
1426 .fflags = RF_TOP_INFO,
1429 .name = "num_closids",
1431 .kf_ops = &rdtgroup_kf_single_ops,
1432 .seq_show = rdt_num_closids_show,
1433 .fflags = RF_CTRL_INFO,
1436 .name = "mon_features",
1438 .kf_ops = &rdtgroup_kf_single_ops,
1439 .seq_show = rdt_mon_features_show,
1440 .fflags = RF_MON_INFO,
1443 .name = "num_rmids",
1445 .kf_ops = &rdtgroup_kf_single_ops,
1446 .seq_show = rdt_num_rmids_show,
1447 .fflags = RF_MON_INFO,
1452 .kf_ops = &rdtgroup_kf_single_ops,
1453 .seq_show = rdt_default_ctrl_show,
1454 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1457 .name = "min_cbm_bits",
1459 .kf_ops = &rdtgroup_kf_single_ops,
1460 .seq_show = rdt_min_cbm_bits_show,
1461 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1464 .name = "shareable_bits",
1466 .kf_ops = &rdtgroup_kf_single_ops,
1467 .seq_show = rdt_shareable_bits_show,
1468 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1471 .name = "bit_usage",
1473 .kf_ops = &rdtgroup_kf_single_ops,
1474 .seq_show = rdt_bit_usage_show,
1475 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1478 .name = "min_bandwidth",
1480 .kf_ops = &rdtgroup_kf_single_ops,
1481 .seq_show = rdt_min_bw_show,
1482 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1485 .name = "bandwidth_gran",
1487 .kf_ops = &rdtgroup_kf_single_ops,
1488 .seq_show = rdt_bw_gran_show,
1489 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1492 .name = "delay_linear",
1494 .kf_ops = &rdtgroup_kf_single_ops,
1495 .seq_show = rdt_delay_linear_show,
1496 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1499 * Platform specific which (if any) capabilities are provided by
1500 * thread_throttle_mode. Defer "fflags" initialization to platform
1504 .name = "thread_throttle_mode",
1506 .kf_ops = &rdtgroup_kf_single_ops,
1507 .seq_show = rdt_thread_throttle_mode_show,
1510 .name = "max_threshold_occupancy",
1512 .kf_ops = &rdtgroup_kf_single_ops,
1513 .write = max_threshold_occ_write,
1514 .seq_show = max_threshold_occ_show,
1515 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1520 .kf_ops = &rdtgroup_kf_single_ops,
1521 .write = rdtgroup_cpus_write,
1522 .seq_show = rdtgroup_cpus_show,
1523 .fflags = RFTYPE_BASE,
1526 .name = "cpus_list",
1528 .kf_ops = &rdtgroup_kf_single_ops,
1529 .write = rdtgroup_cpus_write,
1530 .seq_show = rdtgroup_cpus_show,
1531 .flags = RFTYPE_FLAGS_CPUS_LIST,
1532 .fflags = RFTYPE_BASE,
1537 .kf_ops = &rdtgroup_kf_single_ops,
1538 .write = rdtgroup_tasks_write,
1539 .seq_show = rdtgroup_tasks_show,
1540 .fflags = RFTYPE_BASE,
1545 .kf_ops = &rdtgroup_kf_single_ops,
1546 .write = rdtgroup_schemata_write,
1547 .seq_show = rdtgroup_schemata_show,
1548 .fflags = RF_CTRL_BASE,
1553 .kf_ops = &rdtgroup_kf_single_ops,
1554 .write = rdtgroup_mode_write,
1555 .seq_show = rdtgroup_mode_show,
1556 .fflags = RF_CTRL_BASE,
1561 .kf_ops = &rdtgroup_kf_single_ops,
1562 .seq_show = rdtgroup_size_show,
1563 .fflags = RF_CTRL_BASE,
1568 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1570 struct rftype *rfts, *rft;
1573 rfts = res_common_files;
1574 len = ARRAY_SIZE(res_common_files);
1576 lockdep_assert_held(&rdtgroup_mutex);
1578 for (rft = rfts; rft < rfts + len; rft++) {
1579 if (rft->fflags && ((fflags & rft->fflags) == rft->fflags)) {
1580 ret = rdtgroup_add_file(kn, rft);
1588 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1589 while (--rft >= rfts) {
1590 if ((fflags & rft->fflags) == rft->fflags)
1591 kernfs_remove_by_name(kn, rft->name);
1596 static struct rftype *rdtgroup_get_rftype_by_name(const char *name)
1598 struct rftype *rfts, *rft;
1601 rfts = res_common_files;
1602 len = ARRAY_SIZE(res_common_files);
1604 for (rft = rfts; rft < rfts + len; rft++) {
1605 if (!strcmp(rft->name, name))
1612 void __init thread_throttle_mode_init(void)
1616 rft = rdtgroup_get_rftype_by_name("thread_throttle_mode");
1620 rft->fflags = RF_CTRL_INFO | RFTYPE_RES_MB;
1624 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1625 * @r: The resource group with which the file is associated.
1626 * @name: Name of the file
1628 * The permissions of named resctrl file, directory, or link are modified
1629 * to not allow read, write, or execute by any user.
1631 * WARNING: This function is intended to communicate to the user that the
1632 * resctrl file has been locked down - that it is not relevant to the
1633 * particular state the system finds itself in. It should not be relied
1634 * on to protect from user access because after the file's permissions
1635 * are restricted the user can still change the permissions using chmod
1636 * from the command line.
1638 * Return: 0 on success, <0 on failure.
1640 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1642 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1643 struct kernfs_node *kn;
1646 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1650 switch (kernfs_type(kn)) {
1652 iattr.ia_mode = S_IFDIR;
1655 iattr.ia_mode = S_IFREG;
1658 iattr.ia_mode = S_IFLNK;
1662 ret = kernfs_setattr(kn, &iattr);
1668 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1669 * @r: The resource group with which the file is associated.
1670 * @name: Name of the file
1671 * @mask: Mask of permissions that should be restored
1673 * Restore the permissions of the named file. If @name is a directory the
1674 * permissions of its parent will be used.
1676 * Return: 0 on success, <0 on failure.
1678 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1681 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1682 struct kernfs_node *kn, *parent;
1683 struct rftype *rfts, *rft;
1686 rfts = res_common_files;
1687 len = ARRAY_SIZE(res_common_files);
1689 for (rft = rfts; rft < rfts + len; rft++) {
1690 if (!strcmp(rft->name, name))
1691 iattr.ia_mode = rft->mode & mask;
1694 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1698 switch (kernfs_type(kn)) {
1700 parent = kernfs_get_parent(kn);
1702 iattr.ia_mode |= parent->mode;
1705 iattr.ia_mode |= S_IFDIR;
1708 iattr.ia_mode |= S_IFREG;
1711 iattr.ia_mode |= S_IFLNK;
1715 ret = kernfs_setattr(kn, &iattr);
1720 static int rdtgroup_mkdir_info_resdir(void *priv, char *name,
1721 unsigned long fflags)
1723 struct kernfs_node *kn_subdir;
1726 kn_subdir = kernfs_create_dir(kn_info, name,
1727 kn_info->mode, priv);
1728 if (IS_ERR(kn_subdir))
1729 return PTR_ERR(kn_subdir);
1731 ret = rdtgroup_kn_set_ugid(kn_subdir);
1735 ret = rdtgroup_add_files(kn_subdir, fflags);
1737 kernfs_activate(kn_subdir);
1742 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1744 struct resctrl_schema *s;
1745 struct rdt_resource *r;
1746 unsigned long fflags;
1750 /* create the directory */
1751 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1752 if (IS_ERR(kn_info))
1753 return PTR_ERR(kn_info);
1755 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1759 /* loop over enabled controls, these are all alloc_enabled */
1760 list_for_each_entry(s, &resctrl_schema_all, list) {
1762 fflags = r->fflags | RF_CTRL_INFO;
1763 ret = rdtgroup_mkdir_info_resdir(s, s->name, fflags);
1768 for_each_mon_enabled_rdt_resource(r) {
1769 fflags = r->fflags | RF_MON_INFO;
1770 sprintf(name, "%s_MON", r->name);
1771 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1776 ret = rdtgroup_kn_set_ugid(kn_info);
1780 kernfs_activate(kn_info);
1785 kernfs_remove(kn_info);
1790 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1791 char *name, struct kernfs_node **dest_kn)
1793 struct kernfs_node *kn;
1796 /* create the directory */
1797 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1804 ret = rdtgroup_kn_set_ugid(kn);
1808 kernfs_activate(kn);
1817 static void l3_qos_cfg_update(void *arg)
1821 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1824 static void l2_qos_cfg_update(void *arg)
1828 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1831 static inline bool is_mba_linear(void)
1833 return rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl.membw.delay_linear;
1836 static int set_cache_qos_cfg(int level, bool enable)
1838 void (*update)(void *arg);
1839 struct rdt_resource *r_l;
1840 cpumask_var_t cpu_mask;
1841 struct rdt_domain *d;
1844 if (level == RDT_RESOURCE_L3)
1845 update = l3_qos_cfg_update;
1846 else if (level == RDT_RESOURCE_L2)
1847 update = l2_qos_cfg_update;
1851 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1854 r_l = &rdt_resources_all[level].r_resctrl;
1855 list_for_each_entry(d, &r_l->domains, list) {
1856 if (r_l->cache.arch_has_per_cpu_cfg)
1857 /* Pick all the CPUs in the domain instance */
1858 for_each_cpu(cpu, &d->cpu_mask)
1859 cpumask_set_cpu(cpu, cpu_mask);
1861 /* Pick one CPU from each domain instance to update MSR */
1862 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1865 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1866 if (cpumask_test_cpu(cpu, cpu_mask))
1868 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1869 smp_call_function_many(cpu_mask, update, &enable, 1);
1872 free_cpumask_var(cpu_mask);
1877 /* Restore the qos cfg state when a domain comes online */
1878 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1880 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
1882 if (!r->cdp_capable)
1885 if (r->rid == RDT_RESOURCE_L2)
1886 l2_qos_cfg_update(&hw_res->cdp_enabled);
1888 if (r->rid == RDT_RESOURCE_L3)
1889 l3_qos_cfg_update(&hw_res->cdp_enabled);
1893 * Enable or disable the MBA software controller
1894 * which helps user specify bandwidth in MBps.
1895 * MBA software controller is supported only if
1896 * MBM is supported and MBA is in linear scale.
1898 static int set_mba_sc(bool mba_sc)
1900 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl;
1901 struct rdt_hw_domain *hw_dom;
1902 struct rdt_domain *d;
1904 if (!is_mbm_enabled() || !is_mba_linear() ||
1905 mba_sc == is_mba_sc(r))
1908 r->membw.mba_sc = mba_sc;
1909 list_for_each_entry(d, &r->domains, list) {
1910 hw_dom = resctrl_to_arch_dom(d);
1911 setup_default_ctrlval(r, hw_dom->ctrl_val, hw_dom->mbps_val);
1917 static int cdp_enable(int level)
1919 struct rdt_resource *r_l = &rdt_resources_all[level].r_resctrl;
1922 if (!r_l->alloc_capable)
1925 ret = set_cache_qos_cfg(level, true);
1927 rdt_resources_all[level].cdp_enabled = true;
1932 static void cdp_disable(int level)
1934 struct rdt_hw_resource *r_hw = &rdt_resources_all[level];
1936 if (r_hw->cdp_enabled) {
1937 set_cache_qos_cfg(level, false);
1938 r_hw->cdp_enabled = false;
1942 int resctrl_arch_set_cdp_enabled(enum resctrl_res_level l, bool enable)
1944 struct rdt_hw_resource *hw_res = &rdt_resources_all[l];
1946 if (!hw_res->r_resctrl.cdp_capable)
1950 return cdp_enable(l);
1957 static void cdp_disable_all(void)
1959 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
1960 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, false);
1961 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
1962 resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, false);
1966 * We don't allow rdtgroup directories to be created anywhere
1967 * except the root directory. Thus when looking for the rdtgroup
1968 * structure for a kernfs node we are either looking at a directory,
1969 * in which case the rdtgroup structure is pointed at by the "priv"
1970 * field, otherwise we have a file, and need only look to the parent
1971 * to find the rdtgroup.
1973 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1975 if (kernfs_type(kn) == KERNFS_DIR) {
1977 * All the resource directories use "kn->priv"
1978 * to point to the "struct rdtgroup" for the
1979 * resource. "info" and its subdirectories don't
1980 * have rdtgroup structures, so return NULL here.
1982 if (kn == kn_info || kn->parent == kn_info)
1987 return kn->parent->priv;
1991 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1993 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
1998 atomic_inc(&rdtgrp->waitcount);
1999 kernfs_break_active_protection(kn);
2001 mutex_lock(&rdtgroup_mutex);
2003 /* Was this group deleted while we waited? */
2004 if (rdtgrp->flags & RDT_DELETED)
2010 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2012 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2017 mutex_unlock(&rdtgroup_mutex);
2019 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2020 (rdtgrp->flags & RDT_DELETED)) {
2021 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2022 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2023 rdtgroup_pseudo_lock_remove(rdtgrp);
2024 kernfs_unbreak_active_protection(kn);
2025 rdtgroup_remove(rdtgrp);
2027 kernfs_unbreak_active_protection(kn);
2031 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2032 struct rdtgroup *prgrp,
2033 struct kernfs_node **mon_data_kn);
2035 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2039 if (ctx->enable_cdpl2)
2040 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L2, true);
2042 if (!ret && ctx->enable_cdpl3)
2043 ret = resctrl_arch_set_cdp_enabled(RDT_RESOURCE_L3, true);
2045 if (!ret && ctx->enable_mba_mbps)
2046 ret = set_mba_sc(true);
2051 static int schemata_list_add(struct rdt_resource *r, enum resctrl_conf_type type)
2053 struct resctrl_schema *s;
2054 const char *suffix = "";
2057 s = kzalloc(sizeof(*s), GFP_KERNEL);
2062 s->num_closid = resctrl_arch_get_num_closid(r);
2063 if (resctrl_arch_get_cdp_enabled(r->rid))
2066 s->conf_type = type;
2079 ret = snprintf(s->name, sizeof(s->name), "%s%s", r->name, suffix);
2080 if (ret >= sizeof(s->name)) {
2085 cl = strlen(s->name);
2088 * If CDP is supported by this resource, but not enabled,
2089 * include the suffix. This ensures the tabular format of the
2090 * schemata file does not change between mounts of the filesystem.
2092 if (r->cdp_capable && !resctrl_arch_get_cdp_enabled(r->rid))
2095 if (cl > max_name_width)
2096 max_name_width = cl;
2098 INIT_LIST_HEAD(&s->list);
2099 list_add(&s->list, &resctrl_schema_all);
2104 static int schemata_list_create(void)
2106 struct rdt_resource *r;
2109 for_each_alloc_enabled_rdt_resource(r) {
2110 if (resctrl_arch_get_cdp_enabled(r->rid)) {
2111 ret = schemata_list_add(r, CDP_CODE);
2115 ret = schemata_list_add(r, CDP_DATA);
2117 ret = schemata_list_add(r, CDP_NONE);
2127 static void schemata_list_destroy(void)
2129 struct resctrl_schema *s, *tmp;
2131 list_for_each_entry_safe(s, tmp, &resctrl_schema_all, list) {
2137 static int rdt_get_tree(struct fs_context *fc)
2139 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2140 struct rdt_domain *dom;
2141 struct rdt_resource *r;
2145 mutex_lock(&rdtgroup_mutex);
2147 * resctrl file system can only be mounted once.
2149 if (static_branch_unlikely(&rdt_enable_key)) {
2154 ret = rdt_enable_ctx(ctx);
2158 ret = schemata_list_create();
2160 schemata_list_destroy();
2166 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2168 goto out_schemata_free;
2170 if (rdt_mon_capable) {
2171 ret = mongroup_create_dir(rdtgroup_default.kn,
2172 &rdtgroup_default, "mon_groups",
2177 ret = mkdir_mondata_all(rdtgroup_default.kn,
2178 &rdtgroup_default, &kn_mondata);
2181 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2184 ret = rdt_pseudo_lock_init();
2188 ret = kernfs_get_tree(fc);
2192 if (rdt_alloc_capable)
2193 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2194 if (rdt_mon_capable)
2195 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2197 if (rdt_alloc_capable || rdt_mon_capable)
2198 static_branch_enable_cpuslocked(&rdt_enable_key);
2200 if (is_mbm_enabled()) {
2201 r = &rdt_resources_all[RDT_RESOURCE_L3].r_resctrl;
2202 list_for_each_entry(dom, &r->domains, list)
2203 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2209 rdt_pseudo_lock_release();
2211 if (rdt_mon_capable)
2212 kernfs_remove(kn_mondata);
2214 if (rdt_mon_capable)
2215 kernfs_remove(kn_mongrp);
2217 kernfs_remove(kn_info);
2219 schemata_list_destroy();
2221 if (ctx->enable_mba_mbps)
2226 rdt_last_cmd_clear();
2227 mutex_unlock(&rdtgroup_mutex);
2239 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2240 fsparam_flag("cdp", Opt_cdp),
2241 fsparam_flag("cdpl2", Opt_cdpl2),
2242 fsparam_flag("mba_MBps", Opt_mba_mbps),
2246 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2248 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2249 struct fs_parse_result result;
2252 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2258 ctx->enable_cdpl3 = true;
2261 ctx->enable_cdpl2 = true;
2264 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2266 ctx->enable_mba_mbps = true;
2273 static void rdt_fs_context_free(struct fs_context *fc)
2275 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2277 kernfs_free_fs_context(fc);
2281 static const struct fs_context_operations rdt_fs_context_ops = {
2282 .free = rdt_fs_context_free,
2283 .parse_param = rdt_parse_param,
2284 .get_tree = rdt_get_tree,
2287 static int rdt_init_fs_context(struct fs_context *fc)
2289 struct rdt_fs_context *ctx;
2291 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2295 ctx->kfc.root = rdt_root;
2296 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2297 fc->fs_private = &ctx->kfc;
2298 fc->ops = &rdt_fs_context_ops;
2299 put_user_ns(fc->user_ns);
2300 fc->user_ns = get_user_ns(&init_user_ns);
2305 static int reset_all_ctrls(struct rdt_resource *r)
2307 struct rdt_hw_resource *hw_res = resctrl_to_arch_res(r);
2308 struct rdt_hw_domain *hw_dom;
2309 struct msr_param msr_param;
2310 cpumask_var_t cpu_mask;
2311 struct rdt_domain *d;
2314 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2319 msr_param.high = hw_res->num_closid;
2322 * Disable resource control for this resource by setting all
2323 * CBMs in all domains to the maximum mask value. Pick one CPU
2324 * from each domain to update the MSRs below.
2326 list_for_each_entry(d, &r->domains, list) {
2327 hw_dom = resctrl_to_arch_dom(d);
2328 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2330 for (i = 0; i < hw_res->num_closid; i++)
2331 hw_dom->ctrl_val[i] = r->default_ctrl;
2334 /* Update CBM on this cpu if it's in cpu_mask. */
2335 if (cpumask_test_cpu(cpu, cpu_mask))
2336 rdt_ctrl_update(&msr_param);
2337 /* Update CBM on all other cpus in cpu_mask. */
2338 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2341 free_cpumask_var(cpu_mask);
2347 * Move tasks from one to the other group. If @from is NULL, then all tasks
2348 * in the systems are moved unconditionally (used for teardown).
2350 * If @mask is not NULL the cpus on which moved tasks are running are set
2351 * in that mask so the update smp function call is restricted to affected
2354 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2355 struct cpumask *mask)
2357 struct task_struct *p, *t;
2359 read_lock(&tasklist_lock);
2360 for_each_process_thread(p, t) {
2361 if (!from || is_closid_match(t, from) ||
2362 is_rmid_match(t, from)) {
2363 WRITE_ONCE(t->closid, to->closid);
2364 WRITE_ONCE(t->rmid, to->mon.rmid);
2367 * If the task is on a CPU, set the CPU in the mask.
2368 * The detection is inaccurate as tasks might move or
2369 * schedule before the smp function call takes place.
2370 * In such a case the function call is pointless, but
2371 * there is no other side effect.
2373 if (IS_ENABLED(CONFIG_SMP) && mask && task_curr(t))
2374 cpumask_set_cpu(task_cpu(t), mask);
2377 read_unlock(&tasklist_lock);
2380 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2382 struct rdtgroup *sentry, *stmp;
2383 struct list_head *head;
2385 head = &rdtgrp->mon.crdtgrp_list;
2386 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2387 free_rmid(sentry->mon.rmid);
2388 list_del(&sentry->mon.crdtgrp_list);
2390 if (atomic_read(&sentry->waitcount) != 0)
2391 sentry->flags = RDT_DELETED;
2393 rdtgroup_remove(sentry);
2398 * Forcibly remove all of subdirectories under root.
2400 static void rmdir_all_sub(void)
2402 struct rdtgroup *rdtgrp, *tmp;
2404 /* Move all tasks to the default resource group */
2405 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2407 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2408 /* Free any child rmids */
2409 free_all_child_rdtgrp(rdtgrp);
2411 /* Remove each rdtgroup other than root */
2412 if (rdtgrp == &rdtgroup_default)
2415 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2416 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2417 rdtgroup_pseudo_lock_remove(rdtgrp);
2420 * Give any CPUs back to the default group. We cannot copy
2421 * cpu_online_mask because a CPU might have executed the
2422 * offline callback already, but is still marked online.
2424 cpumask_or(&rdtgroup_default.cpu_mask,
2425 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2427 free_rmid(rdtgrp->mon.rmid);
2429 kernfs_remove(rdtgrp->kn);
2430 list_del(&rdtgrp->rdtgroup_list);
2432 if (atomic_read(&rdtgrp->waitcount) != 0)
2433 rdtgrp->flags = RDT_DELETED;
2435 rdtgroup_remove(rdtgrp);
2437 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2438 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2440 kernfs_remove(kn_info);
2441 kernfs_remove(kn_mongrp);
2442 kernfs_remove(kn_mondata);
2445 static void rdt_kill_sb(struct super_block *sb)
2447 struct rdt_resource *r;
2450 mutex_lock(&rdtgroup_mutex);
2454 /*Put everything back to default values. */
2455 for_each_alloc_enabled_rdt_resource(r)
2459 rdt_pseudo_lock_release();
2460 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2461 schemata_list_destroy();
2462 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2463 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2464 static_branch_disable_cpuslocked(&rdt_enable_key);
2466 mutex_unlock(&rdtgroup_mutex);
2470 static struct file_system_type rdt_fs_type = {
2472 .init_fs_context = rdt_init_fs_context,
2473 .parameters = rdt_fs_parameters,
2474 .kill_sb = rdt_kill_sb,
2477 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2480 struct kernfs_node *kn;
2483 kn = __kernfs_create_file(parent_kn, name, 0444,
2484 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2485 &kf_mondata_ops, priv, NULL, NULL);
2489 ret = rdtgroup_kn_set_ugid(kn);
2499 * Remove all subdirectories of mon_data of ctrl_mon groups
2500 * and monitor groups with given domain id.
2502 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2504 struct rdtgroup *prgrp, *crgrp;
2507 if (!r->mon_enabled)
2510 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2511 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2512 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2514 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2515 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2519 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2520 struct rdt_domain *d,
2521 struct rdt_resource *r, struct rdtgroup *prgrp)
2523 union mon_data_bits priv;
2524 struct kernfs_node *kn;
2525 struct mon_evt *mevt;
2526 struct rmid_read rr;
2530 sprintf(name, "mon_%s_%02d", r->name, d->id);
2531 /* create the directory */
2532 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2536 ret = rdtgroup_kn_set_ugid(kn);
2540 if (WARN_ON(list_empty(&r->evt_list))) {
2545 priv.u.rid = r->rid;
2546 priv.u.domid = d->id;
2547 list_for_each_entry(mevt, &r->evt_list, list) {
2548 priv.u.evtid = mevt->evtid;
2549 ret = mon_addfile(kn, mevt->name, priv.priv);
2553 if (is_mbm_event(mevt->evtid))
2554 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
2556 kernfs_activate(kn);
2565 * Add all subdirectories of mon_data for "ctrl_mon" groups
2566 * and "monitor" groups with given domain id.
2568 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2569 struct rdt_domain *d)
2571 struct kernfs_node *parent_kn;
2572 struct rdtgroup *prgrp, *crgrp;
2573 struct list_head *head;
2575 if (!r->mon_enabled)
2578 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2579 parent_kn = prgrp->mon.mon_data_kn;
2580 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2582 head = &prgrp->mon.crdtgrp_list;
2583 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2584 parent_kn = crgrp->mon.mon_data_kn;
2585 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2590 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2591 struct rdt_resource *r,
2592 struct rdtgroup *prgrp)
2594 struct rdt_domain *dom;
2597 list_for_each_entry(dom, &r->domains, list) {
2598 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2607 * This creates a directory mon_data which contains the monitored data.
2609 * mon_data has one directory for each domain which are named
2610 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2611 * with L3 domain looks as below:
2618 * Each domain directory has one file per event:
2623 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2624 struct rdtgroup *prgrp,
2625 struct kernfs_node **dest_kn)
2627 struct rdt_resource *r;
2628 struct kernfs_node *kn;
2632 * Create the mon_data directory first.
2634 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2642 * Create the subdirectories for each domain. Note that all events
2643 * in a domain like L3 are grouped into a resource whose domain is L3
2645 for_each_mon_enabled_rdt_resource(r) {
2646 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2659 * cbm_ensure_valid - Enforce validity on provided CBM
2660 * @_val: Candidate CBM
2661 * @r: RDT resource to which the CBM belongs
2663 * The provided CBM represents all cache portions available for use. This
2664 * may be represented by a bitmap that does not consist of contiguous ones
2665 * and thus be an invalid CBM.
2666 * Here the provided CBM is forced to be a valid CBM by only considering
2667 * the first set of contiguous bits as valid and clearing all bits.
2668 * The intention here is to provide a valid default CBM with which a new
2669 * resource group is initialized. The user can follow this with a
2670 * modification to the CBM if the default does not satisfy the
2673 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
2675 unsigned int cbm_len = r->cache.cbm_len;
2676 unsigned long first_bit, zero_bit;
2677 unsigned long val = _val;
2682 first_bit = find_first_bit(&val, cbm_len);
2683 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2685 /* Clear any remaining bits to ensure contiguous region */
2686 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2691 * Initialize cache resources per RDT domain
2693 * Set the RDT domain up to start off with all usable allocations. That is,
2694 * all shareable and unused bits. All-zero CBM is invalid.
2696 static int __init_one_rdt_domain(struct rdt_domain *d, struct resctrl_schema *s,
2699 enum resctrl_conf_type peer_type = resctrl_peer_type(s->conf_type);
2700 enum resctrl_conf_type t = s->conf_type;
2701 struct resctrl_staged_config *cfg;
2702 struct rdt_resource *r = s->res;
2703 u32 used_b = 0, unused_b = 0;
2704 unsigned long tmp_cbm;
2705 enum rdtgrp_mode mode;
2706 u32 peer_ctl, ctrl_val;
2709 cfg = &d->staged_config[t];
2710 cfg->have_new_ctrl = false;
2711 cfg->new_ctrl = r->cache.shareable_bits;
2712 used_b = r->cache.shareable_bits;
2713 for (i = 0; i < closids_supported(); i++) {
2714 if (closid_allocated(i) && i != closid) {
2715 mode = rdtgroup_mode_by_closid(i);
2716 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2718 * ctrl values for locksetup aren't relevant
2719 * until the schemata is written, and the mode
2720 * becomes RDT_MODE_PSEUDO_LOCKED.
2724 * If CDP is active include peer domain's
2725 * usage to ensure there is no overlap
2726 * with an exclusive group.
2728 if (resctrl_arch_get_cdp_enabled(r->rid))
2729 peer_ctl = resctrl_arch_get_config(r, d, i,
2733 ctrl_val = resctrl_arch_get_config(r, d, i,
2735 used_b |= ctrl_val | peer_ctl;
2736 if (mode == RDT_MODE_SHAREABLE)
2737 cfg->new_ctrl |= ctrl_val | peer_ctl;
2740 if (d->plr && d->plr->cbm > 0)
2741 used_b |= d->plr->cbm;
2742 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2743 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2744 cfg->new_ctrl |= unused_b;
2746 * Force the initial CBM to be valid, user can
2747 * modify the CBM based on system availability.
2749 cfg->new_ctrl = cbm_ensure_valid(cfg->new_ctrl, r);
2751 * Assign the u32 CBM to an unsigned long to ensure that
2752 * bitmap_weight() does not access out-of-bound memory.
2754 tmp_cbm = cfg->new_ctrl;
2755 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
2756 rdt_last_cmd_printf("No space on %s:%d\n", s->name, d->id);
2759 cfg->have_new_ctrl = true;
2765 * Initialize cache resources with default values.
2767 * A new RDT group is being created on an allocation capable (CAT)
2768 * supporting system. Set this group up to start off with all usable
2771 * If there are no more shareable bits available on any domain then
2772 * the entire allocation will fail.
2774 static int rdtgroup_init_cat(struct resctrl_schema *s, u32 closid)
2776 struct rdt_domain *d;
2779 list_for_each_entry(d, &s->res->domains, list) {
2780 ret = __init_one_rdt_domain(d, s, closid);
2788 /* Initialize MBA resource with default values. */
2789 static void rdtgroup_init_mba(struct rdt_resource *r)
2791 struct resctrl_staged_config *cfg;
2792 struct rdt_domain *d;
2794 list_for_each_entry(d, &r->domains, list) {
2795 cfg = &d->staged_config[CDP_NONE];
2796 cfg->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
2797 cfg->have_new_ctrl = true;
2801 /* Initialize the RDT group's allocations. */
2802 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2804 struct resctrl_schema *s;
2805 struct rdt_resource *r;
2808 list_for_each_entry(s, &resctrl_schema_all, list) {
2810 if (r->rid == RDT_RESOURCE_MBA) {
2811 rdtgroup_init_mba(r);
2813 ret = rdtgroup_init_cat(s, rdtgrp->closid);
2818 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
2820 rdt_last_cmd_puts("Failed to initialize allocations\n");
2826 rdtgrp->mode = RDT_MODE_SHAREABLE;
2831 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2832 const char *name, umode_t mode,
2833 enum rdt_group_type rtype, struct rdtgroup **r)
2835 struct rdtgroup *prdtgrp, *rdtgrp;
2836 struct kernfs_node *kn;
2840 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2846 if (rtype == RDTMON_GROUP &&
2847 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2848 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2850 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2854 /* allocate the rdtgroup. */
2855 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2858 rdt_last_cmd_puts("Kernel out of memory\n");
2862 rdtgrp->mon.parent = prdtgrp;
2863 rdtgrp->type = rtype;
2864 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2866 /* kernfs creates the directory for rdtgrp */
2867 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2870 rdt_last_cmd_puts("kernfs create error\n");
2876 * kernfs_remove() will drop the reference count on "kn" which
2877 * will free it. But we still need it to stick around for the
2878 * rdtgroup_kn_unlock(kn) call. Take one extra reference here,
2879 * which will be dropped by kernfs_put() in rdtgroup_remove().
2883 ret = rdtgroup_kn_set_ugid(kn);
2885 rdt_last_cmd_puts("kernfs perm error\n");
2889 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2890 ret = rdtgroup_add_files(kn, files);
2892 rdt_last_cmd_puts("kernfs fill error\n");
2896 if (rdt_mon_capable) {
2899 rdt_last_cmd_puts("Out of RMIDs\n");
2902 rdtgrp->mon.rmid = ret;
2904 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2906 rdt_last_cmd_puts("kernfs subdir error\n");
2910 kernfs_activate(kn);
2913 * The caller unlocks the parent_kn upon success.
2918 free_rmid(rdtgrp->mon.rmid);
2920 kernfs_put(rdtgrp->kn);
2921 kernfs_remove(rdtgrp->kn);
2925 rdtgroup_kn_unlock(parent_kn);
2929 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2931 kernfs_remove(rgrp->kn);
2932 free_rmid(rgrp->mon.rmid);
2933 rdtgroup_remove(rgrp);
2937 * Create a monitor group under "mon_groups" directory of a control
2938 * and monitor group(ctrl_mon). This is a resource group
2939 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2941 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2942 const char *name, umode_t mode)
2944 struct rdtgroup *rdtgrp, *prgrp;
2947 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
2951 prgrp = rdtgrp->mon.parent;
2952 rdtgrp->closid = prgrp->closid;
2955 * Add the rdtgrp to the list of rdtgrps the parent
2956 * ctrl_mon group has to track.
2958 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2960 rdtgroup_kn_unlock(parent_kn);
2965 * These are rdtgroups created under the root directory. Can be used
2966 * to allocate and monitor resources.
2968 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2969 const char *name, umode_t mode)
2971 struct rdtgroup *rdtgrp;
2972 struct kernfs_node *kn;
2976 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
2981 ret = closid_alloc();
2983 rdt_last_cmd_puts("Out of CLOSIDs\n");
2984 goto out_common_fail;
2989 rdtgrp->closid = closid;
2990 ret = rdtgroup_init_alloc(rdtgrp);
2994 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2996 if (rdt_mon_capable) {
2998 * Create an empty mon_groups directory to hold the subset
2999 * of tasks and cpus to monitor.
3001 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
3003 rdt_last_cmd_puts("kernfs subdir error\n");
3011 list_del(&rdtgrp->rdtgroup_list);
3013 closid_free(closid);
3015 mkdir_rdt_prepare_clean(rdtgrp);
3017 rdtgroup_kn_unlock(parent_kn);
3022 * We allow creating mon groups only with in a directory called "mon_groups"
3023 * which is present in every ctrl_mon group. Check if this is a valid
3024 * "mon_groups" directory.
3026 * 1. The directory should be named "mon_groups".
3027 * 2. The mon group itself should "not" be named "mon_groups".
3028 * This makes sure "mon_groups" directory always has a ctrl_mon group
3031 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
3033 return (!strcmp(kn->name, "mon_groups") &&
3034 strcmp(name, "mon_groups"));
3037 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
3040 /* Do not accept '\n' to avoid unparsable situation. */
3041 if (strchr(name, '\n'))
3045 * If the parent directory is the root directory and RDT
3046 * allocation is supported, add a control and monitoring
3049 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
3050 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
3053 * If RDT monitoring is supported and the parent directory is a valid
3054 * "mon_groups" directory, add a monitoring subdirectory.
3056 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
3057 return rdtgroup_mkdir_mon(parent_kn, name, mode);
3062 static int rdtgroup_rmdir_mon(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3064 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
3067 /* Give any tasks back to the parent group */
3068 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
3070 /* Update per cpu rmid of the moved CPUs first */
3071 for_each_cpu(cpu, &rdtgrp->cpu_mask)
3072 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
3074 * Update the MSR on moved CPUs and CPUs which have moved
3075 * task running on them.
3077 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3078 update_closid_rmid(tmpmask, NULL);
3080 rdtgrp->flags = RDT_DELETED;
3081 free_rmid(rdtgrp->mon.rmid);
3084 * Remove the rdtgrp from the parent ctrl_mon group's list
3086 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
3087 list_del(&rdtgrp->mon.crdtgrp_list);
3089 kernfs_remove(rdtgrp->kn);
3094 static int rdtgroup_ctrl_remove(struct rdtgroup *rdtgrp)
3096 rdtgrp->flags = RDT_DELETED;
3097 list_del(&rdtgrp->rdtgroup_list);
3099 kernfs_remove(rdtgrp->kn);
3103 static int rdtgroup_rmdir_ctrl(struct rdtgroup *rdtgrp, cpumask_var_t tmpmask)
3107 /* Give any tasks back to the default group */
3108 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3110 /* Give any CPUs back to the default group */
3111 cpumask_or(&rdtgroup_default.cpu_mask,
3112 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3114 /* Update per cpu closid and rmid of the moved CPUs first */
3115 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3116 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3117 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3121 * Update the MSR on moved CPUs and CPUs which have moved
3122 * task running on them.
3124 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3125 update_closid_rmid(tmpmask, NULL);
3127 closid_free(rdtgrp->closid);
3128 free_rmid(rdtgrp->mon.rmid);
3130 rdtgroup_ctrl_remove(rdtgrp);
3133 * Free all the child monitor group rmids.
3135 free_all_child_rdtgrp(rdtgrp);
3140 static int rdtgroup_rmdir(struct kernfs_node *kn)
3142 struct kernfs_node *parent_kn = kn->parent;
3143 struct rdtgroup *rdtgrp;
3144 cpumask_var_t tmpmask;
3147 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3150 rdtgrp = rdtgroup_kn_lock_live(kn);
3157 * If the rdtgroup is a ctrl_mon group and parent directory
3158 * is the root directory, remove the ctrl_mon group.
3160 * If the rdtgroup is a mon group and parent directory
3161 * is a valid "mon_groups" directory, remove the mon group.
3163 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3164 rdtgrp != &rdtgroup_default) {
3165 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3166 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3167 ret = rdtgroup_ctrl_remove(rdtgrp);
3169 ret = rdtgroup_rmdir_ctrl(rdtgrp, tmpmask);
3171 } else if (rdtgrp->type == RDTMON_GROUP &&
3172 is_mon_groups(parent_kn, kn->name)) {
3173 ret = rdtgroup_rmdir_mon(rdtgrp, tmpmask);
3179 rdtgroup_kn_unlock(kn);
3180 free_cpumask_var(tmpmask);
3184 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3186 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L3))
3187 seq_puts(seq, ",cdp");
3189 if (resctrl_arch_get_cdp_enabled(RDT_RESOURCE_L2))
3190 seq_puts(seq, ",cdpl2");
3192 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA].r_resctrl))
3193 seq_puts(seq, ",mba_MBps");
3198 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3199 .mkdir = rdtgroup_mkdir,
3200 .rmdir = rdtgroup_rmdir,
3201 .show_options = rdtgroup_show_options,
3204 static int __init rdtgroup_setup_root(void)
3208 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3209 KERNFS_ROOT_CREATE_DEACTIVATED |
3210 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3212 if (IS_ERR(rdt_root))
3213 return PTR_ERR(rdt_root);
3215 mutex_lock(&rdtgroup_mutex);
3217 rdtgroup_default.closid = 0;
3218 rdtgroup_default.mon.rmid = 0;
3219 rdtgroup_default.type = RDTCTRL_GROUP;
3220 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3222 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3224 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
3226 kernfs_destroy_root(rdt_root);
3230 rdtgroup_default.kn = rdt_root->kn;
3231 kernfs_activate(rdtgroup_default.kn);
3234 mutex_unlock(&rdtgroup_mutex);
3240 * rdtgroup_init - rdtgroup initialization
3242 * Setup resctrl file system including set up root, create mount point,
3243 * register rdtgroup filesystem, and initialize files under root directory.
3245 * Return: 0 on success or -errno
3247 int __init rdtgroup_init(void)
3251 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3252 sizeof(last_cmd_status_buf));
3254 ret = rdtgroup_setup_root();
3258 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3262 ret = register_filesystem(&rdt_fs_type);
3264 goto cleanup_mountpoint;
3267 * Adding the resctrl debugfs directory here may not be ideal since
3268 * it would let the resctrl debugfs directory appear on the debugfs
3269 * filesystem before the resctrl filesystem is mounted.
3270 * It may also be ok since that would enable debugging of RDT before
3271 * resctrl is mounted.
3272 * The reason why the debugfs directory is created here and not in
3273 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
3274 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3275 * (the lockdep class of inode->i_rwsem). Other filesystem
3276 * interactions (eg. SyS_getdents) have the lock ordering:
3277 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
3278 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
3279 * is taken, thus creating dependency:
3280 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
3281 * issues considering the other two lock dependencies.
3282 * By creating the debugfs directory here we avoid a dependency
3283 * that may cause deadlock (even though file operations cannot
3284 * occur until the filesystem is mounted, but I do not know how to
3285 * tell lockdep that).
3287 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3292 sysfs_remove_mount_point(fs_kobj, "resctrl");
3294 kernfs_destroy_root(rdt_root);
3299 void __exit rdtgroup_exit(void)
3301 debugfs_remove_recursive(debugfs_resctrl);
3302 unregister_filesystem(&rdt_fs_type);
3303 sysfs_remove_mount_point(fs_kobj, "resctrl");
3304 kernfs_destroy_root(rdt_root);