1 // SPDX-License-Identifier: GPL-2.0-only
3 * User interface for Resource Alloction in Resource Director Technology(RDT)
5 * Copyright (C) 2016 Intel Corporation
7 * Author: Fenghua Yu <fenghua.yu@intel.com>
9 * More information about RDT be found in the Intel (R) x86 Architecture
10 * Software Developer Manual.
13 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
15 #include <linux/cacheinfo.h>
16 #include <linux/cpu.h>
17 #include <linux/debugfs.h>
19 #include <linux/fs_parser.h>
20 #include <linux/sysfs.h>
21 #include <linux/kernfs.h>
22 #include <linux/seq_buf.h>
23 #include <linux/seq_file.h>
24 #include <linux/sched/signal.h>
25 #include <linux/sched/task.h>
26 #include <linux/slab.h>
27 #include <linux/task_work.h>
28 #include <linux/user_namespace.h>
30 #include <uapi/linux/magic.h>
32 #include <asm/resctrl.h>
35 DEFINE_STATIC_KEY_FALSE(rdt_enable_key);
36 DEFINE_STATIC_KEY_FALSE(rdt_mon_enable_key);
37 DEFINE_STATIC_KEY_FALSE(rdt_alloc_enable_key);
38 static struct kernfs_root *rdt_root;
39 struct rdtgroup rdtgroup_default;
40 LIST_HEAD(rdt_all_groups);
42 /* Kernel fs node for "info" directory under root */
43 static struct kernfs_node *kn_info;
45 /* Kernel fs node for "mon_groups" directory under root */
46 static struct kernfs_node *kn_mongrp;
48 /* Kernel fs node for "mon_data" directory under root */
49 static struct kernfs_node *kn_mondata;
51 static struct seq_buf last_cmd_status;
52 static char last_cmd_status_buf[512];
54 struct dentry *debugfs_resctrl;
56 void rdt_last_cmd_clear(void)
58 lockdep_assert_held(&rdtgroup_mutex);
59 seq_buf_clear(&last_cmd_status);
62 void rdt_last_cmd_puts(const char *s)
64 lockdep_assert_held(&rdtgroup_mutex);
65 seq_buf_puts(&last_cmd_status, s);
68 void rdt_last_cmd_printf(const char *fmt, ...)
73 lockdep_assert_held(&rdtgroup_mutex);
74 seq_buf_vprintf(&last_cmd_status, fmt, ap);
79 * Trivial allocator for CLOSIDs. Since h/w only supports a small number,
80 * we can keep a bitmap of free CLOSIDs in a single integer.
82 * Using a global CLOSID across all resources has some advantages and
84 * + We can simply set "current->closid" to assign a task to a resource
86 * + Context switch code can avoid extra memory references deciding which
87 * CLOSID to load into the PQR_ASSOC MSR
88 * - We give up some options in configuring resource groups across multi-socket
90 * - Our choices on how to configure each resource become progressively more
91 * limited as the number of resources grows.
93 static int closid_free_map;
94 static int closid_free_map_len;
96 int closids_supported(void)
98 return closid_free_map_len;
101 static void closid_init(void)
103 struct rdt_resource *r;
104 int rdt_min_closid = 32;
106 /* Compute rdt_min_closid across all resources */
107 for_each_alloc_enabled_rdt_resource(r)
108 rdt_min_closid = min(rdt_min_closid, r->num_closid);
110 closid_free_map = BIT_MASK(rdt_min_closid) - 1;
112 /* CLOSID 0 is always reserved for the default group */
113 closid_free_map &= ~1;
114 closid_free_map_len = rdt_min_closid;
117 static int closid_alloc(void)
119 u32 closid = ffs(closid_free_map);
124 closid_free_map &= ~(1 << closid);
129 void closid_free(int closid)
131 closid_free_map |= 1 << closid;
135 * closid_allocated - test if provided closid is in use
136 * @closid: closid to be tested
138 * Return: true if @closid is currently associated with a resource group,
139 * false if @closid is free
141 static bool closid_allocated(unsigned int closid)
143 return (closid_free_map & (1 << closid)) == 0;
147 * rdtgroup_mode_by_closid - Return mode of resource group with closid
148 * @closid: closid if the resource group
150 * Each resource group is associated with a @closid. Here the mode
151 * of a resource group can be queried by searching for it using its closid.
153 * Return: mode as &enum rdtgrp_mode of resource group with closid @closid
155 enum rdtgrp_mode rdtgroup_mode_by_closid(int closid)
157 struct rdtgroup *rdtgrp;
159 list_for_each_entry(rdtgrp, &rdt_all_groups, rdtgroup_list) {
160 if (rdtgrp->closid == closid)
164 return RDT_NUM_MODES;
167 static const char * const rdt_mode_str[] = {
168 [RDT_MODE_SHAREABLE] = "shareable",
169 [RDT_MODE_EXCLUSIVE] = "exclusive",
170 [RDT_MODE_PSEUDO_LOCKSETUP] = "pseudo-locksetup",
171 [RDT_MODE_PSEUDO_LOCKED] = "pseudo-locked",
175 * rdtgroup_mode_str - Return the string representation of mode
176 * @mode: the resource group mode as &enum rdtgroup_mode
178 * Return: string representation of valid mode, "unknown" otherwise
180 static const char *rdtgroup_mode_str(enum rdtgrp_mode mode)
182 if (mode < RDT_MODE_SHAREABLE || mode >= RDT_NUM_MODES)
185 return rdt_mode_str[mode];
188 /* set uid and gid of rdtgroup dirs and files to that of the creator */
189 static int rdtgroup_kn_set_ugid(struct kernfs_node *kn)
191 struct iattr iattr = { .ia_valid = ATTR_UID | ATTR_GID,
192 .ia_uid = current_fsuid(),
193 .ia_gid = current_fsgid(), };
195 if (uid_eq(iattr.ia_uid, GLOBAL_ROOT_UID) &&
196 gid_eq(iattr.ia_gid, GLOBAL_ROOT_GID))
199 return kernfs_setattr(kn, &iattr);
202 static int rdtgroup_add_file(struct kernfs_node *parent_kn, struct rftype *rft)
204 struct kernfs_node *kn;
207 kn = __kernfs_create_file(parent_kn, rft->name, rft->mode,
208 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID,
209 0, rft->kf_ops, rft, NULL, NULL);
213 ret = rdtgroup_kn_set_ugid(kn);
222 static int rdtgroup_seqfile_show(struct seq_file *m, void *arg)
224 struct kernfs_open_file *of = m->private;
225 struct rftype *rft = of->kn->priv;
228 return rft->seq_show(of, m, arg);
232 static ssize_t rdtgroup_file_write(struct kernfs_open_file *of, char *buf,
233 size_t nbytes, loff_t off)
235 struct rftype *rft = of->kn->priv;
238 return rft->write(of, buf, nbytes, off);
243 static struct kernfs_ops rdtgroup_kf_single_ops = {
244 .atomic_write_len = PAGE_SIZE,
245 .write = rdtgroup_file_write,
246 .seq_show = rdtgroup_seqfile_show,
249 static struct kernfs_ops kf_mondata_ops = {
250 .atomic_write_len = PAGE_SIZE,
251 .seq_show = rdtgroup_mondata_show,
254 static bool is_cpu_list(struct kernfs_open_file *of)
256 struct rftype *rft = of->kn->priv;
258 return rft->flags & RFTYPE_FLAGS_CPUS_LIST;
261 static int rdtgroup_cpus_show(struct kernfs_open_file *of,
262 struct seq_file *s, void *v)
264 struct rdtgroup *rdtgrp;
265 struct cpumask *mask;
268 rdtgrp = rdtgroup_kn_lock_live(of->kn);
271 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
272 if (!rdtgrp->plr->d) {
273 rdt_last_cmd_clear();
274 rdt_last_cmd_puts("Cache domain offline\n");
277 mask = &rdtgrp->plr->d->cpu_mask;
278 seq_printf(s, is_cpu_list(of) ?
279 "%*pbl\n" : "%*pb\n",
280 cpumask_pr_args(mask));
283 seq_printf(s, is_cpu_list(of) ? "%*pbl\n" : "%*pb\n",
284 cpumask_pr_args(&rdtgrp->cpu_mask));
289 rdtgroup_kn_unlock(of->kn);
295 * This is safe against resctrl_sched_in() called from __switch_to()
296 * because __switch_to() is executed with interrupts disabled. A local call
297 * from update_closid_rmid() is proteced against __switch_to() because
298 * preemption is disabled.
300 static void update_cpu_closid_rmid(void *info)
302 struct rdtgroup *r = info;
305 this_cpu_write(pqr_state.default_closid, r->closid);
306 this_cpu_write(pqr_state.default_rmid, r->mon.rmid);
310 * We cannot unconditionally write the MSR because the current
311 * executing task might have its own closid selected. Just reuse
312 * the context switch code.
318 * Update the PGR_ASSOC MSR on all cpus in @cpu_mask,
320 * Per task closids/rmids must have been set up before calling this function.
323 update_closid_rmid(const struct cpumask *cpu_mask, struct rdtgroup *r)
327 if (cpumask_test_cpu(cpu, cpu_mask))
328 update_cpu_closid_rmid(r);
329 smp_call_function_many(cpu_mask, update_cpu_closid_rmid, r, 1);
333 static int cpus_mon_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
334 cpumask_var_t tmpmask)
336 struct rdtgroup *prgrp = rdtgrp->mon.parent, *crgrp;
337 struct list_head *head;
339 /* Check whether cpus belong to parent ctrl group */
340 cpumask_andnot(tmpmask, newmask, &prgrp->cpu_mask);
341 if (cpumask_weight(tmpmask)) {
342 rdt_last_cmd_puts("Can only add CPUs to mongroup that belong to parent\n");
346 /* Check whether cpus are dropped from this group */
347 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
348 if (cpumask_weight(tmpmask)) {
349 /* Give any dropped cpus to parent rdtgroup */
350 cpumask_or(&prgrp->cpu_mask, &prgrp->cpu_mask, tmpmask);
351 update_closid_rmid(tmpmask, prgrp);
355 * If we added cpus, remove them from previous group that owned them
356 * and update per-cpu rmid
358 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
359 if (cpumask_weight(tmpmask)) {
360 head = &prgrp->mon.crdtgrp_list;
361 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
364 cpumask_andnot(&crgrp->cpu_mask, &crgrp->cpu_mask,
367 update_closid_rmid(tmpmask, rdtgrp);
370 /* Done pushing/pulling - update this group with new mask */
371 cpumask_copy(&rdtgrp->cpu_mask, newmask);
376 static void cpumask_rdtgrp_clear(struct rdtgroup *r, struct cpumask *m)
378 struct rdtgroup *crgrp;
380 cpumask_andnot(&r->cpu_mask, &r->cpu_mask, m);
381 /* update the child mon group masks as well*/
382 list_for_each_entry(crgrp, &r->mon.crdtgrp_list, mon.crdtgrp_list)
383 cpumask_and(&crgrp->cpu_mask, &r->cpu_mask, &crgrp->cpu_mask);
386 static int cpus_ctrl_write(struct rdtgroup *rdtgrp, cpumask_var_t newmask,
387 cpumask_var_t tmpmask, cpumask_var_t tmpmask1)
389 struct rdtgroup *r, *crgrp;
390 struct list_head *head;
392 /* Check whether cpus are dropped from this group */
393 cpumask_andnot(tmpmask, &rdtgrp->cpu_mask, newmask);
394 if (cpumask_weight(tmpmask)) {
395 /* Can't drop from default group */
396 if (rdtgrp == &rdtgroup_default) {
397 rdt_last_cmd_puts("Can't drop CPUs from default group\n");
401 /* Give any dropped cpus to rdtgroup_default */
402 cpumask_or(&rdtgroup_default.cpu_mask,
403 &rdtgroup_default.cpu_mask, tmpmask);
404 update_closid_rmid(tmpmask, &rdtgroup_default);
408 * If we added cpus, remove them from previous group and
409 * the prev group's child groups that owned them
410 * and update per-cpu closid/rmid.
412 cpumask_andnot(tmpmask, newmask, &rdtgrp->cpu_mask);
413 if (cpumask_weight(tmpmask)) {
414 list_for_each_entry(r, &rdt_all_groups, rdtgroup_list) {
417 cpumask_and(tmpmask1, &r->cpu_mask, tmpmask);
418 if (cpumask_weight(tmpmask1))
419 cpumask_rdtgrp_clear(r, tmpmask1);
421 update_closid_rmid(tmpmask, rdtgrp);
424 /* Done pushing/pulling - update this group with new mask */
425 cpumask_copy(&rdtgrp->cpu_mask, newmask);
428 * Clear child mon group masks since there is a new parent mask
429 * now and update the rmid for the cpus the child lost.
431 head = &rdtgrp->mon.crdtgrp_list;
432 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
433 cpumask_and(tmpmask, &rdtgrp->cpu_mask, &crgrp->cpu_mask);
434 update_closid_rmid(tmpmask, rdtgrp);
435 cpumask_clear(&crgrp->cpu_mask);
441 static ssize_t rdtgroup_cpus_write(struct kernfs_open_file *of,
442 char *buf, size_t nbytes, loff_t off)
444 cpumask_var_t tmpmask, newmask, tmpmask1;
445 struct rdtgroup *rdtgrp;
451 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
453 if (!zalloc_cpumask_var(&newmask, GFP_KERNEL)) {
454 free_cpumask_var(tmpmask);
457 if (!zalloc_cpumask_var(&tmpmask1, GFP_KERNEL)) {
458 free_cpumask_var(tmpmask);
459 free_cpumask_var(newmask);
463 rdtgrp = rdtgroup_kn_lock_live(of->kn);
469 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
470 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
472 rdt_last_cmd_puts("Pseudo-locking in progress\n");
477 ret = cpulist_parse(buf, newmask);
479 ret = cpumask_parse(buf, newmask);
482 rdt_last_cmd_puts("Bad CPU list/mask\n");
486 /* check that user didn't specify any offline cpus */
487 cpumask_andnot(tmpmask, newmask, cpu_online_mask);
488 if (cpumask_weight(tmpmask)) {
490 rdt_last_cmd_puts("Can only assign online CPUs\n");
494 if (rdtgrp->type == RDTCTRL_GROUP)
495 ret = cpus_ctrl_write(rdtgrp, newmask, tmpmask, tmpmask1);
496 else if (rdtgrp->type == RDTMON_GROUP)
497 ret = cpus_mon_write(rdtgrp, newmask, tmpmask);
502 rdtgroup_kn_unlock(of->kn);
503 free_cpumask_var(tmpmask);
504 free_cpumask_var(newmask);
505 free_cpumask_var(tmpmask1);
507 return ret ?: nbytes;
510 struct task_move_callback {
511 struct callback_head work;
512 struct rdtgroup *rdtgrp;
515 static void move_myself(struct callback_head *head)
517 struct task_move_callback *callback;
518 struct rdtgroup *rdtgrp;
520 callback = container_of(head, struct task_move_callback, work);
521 rdtgrp = callback->rdtgrp;
524 * If resource group was deleted before this task work callback
525 * was invoked, then assign the task to root group and free the
528 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
529 (rdtgrp->flags & RDT_DELETED)) {
535 if (unlikely(current->flags & PF_EXITING))
539 /* update PQR_ASSOC MSR to make resource group go into effect */
547 static int __rdtgroup_move_task(struct task_struct *tsk,
548 struct rdtgroup *rdtgrp)
550 struct task_move_callback *callback;
553 callback = kzalloc(sizeof(*callback), GFP_KERNEL);
556 callback->work.func = move_myself;
557 callback->rdtgrp = rdtgrp;
560 * Take a refcount, so rdtgrp cannot be freed before the
561 * callback has been invoked.
563 atomic_inc(&rdtgrp->waitcount);
564 ret = task_work_add(tsk, &callback->work, true);
567 * Task is exiting. Drop the refcount and free the callback.
568 * No need to check the refcount as the group cannot be
569 * deleted before the write function unlocks rdtgroup_mutex.
571 atomic_dec(&rdtgrp->waitcount);
573 rdt_last_cmd_puts("Task exited\n");
576 * For ctrl_mon groups move both closid and rmid.
577 * For monitor groups, can move the tasks only from
578 * their parent CTRL group.
580 if (rdtgrp->type == RDTCTRL_GROUP) {
581 tsk->closid = rdtgrp->closid;
582 tsk->rmid = rdtgrp->mon.rmid;
583 } else if (rdtgrp->type == RDTMON_GROUP) {
584 if (rdtgrp->mon.parent->closid == tsk->closid) {
585 tsk->rmid = rdtgrp->mon.rmid;
587 rdt_last_cmd_puts("Can't move task to different control group\n");
595 static bool is_closid_match(struct task_struct *t, struct rdtgroup *r)
597 return (rdt_alloc_capable &&
598 (r->type == RDTCTRL_GROUP) && (t->closid == r->closid));
601 static bool is_rmid_match(struct task_struct *t, struct rdtgroup *r)
603 return (rdt_mon_capable &&
604 (r->type == RDTMON_GROUP) && (t->rmid == r->mon.rmid));
608 * rdtgroup_tasks_assigned - Test if tasks have been assigned to resource group
611 * Return: 1 if tasks have been assigned to @r, 0 otherwise
613 int rdtgroup_tasks_assigned(struct rdtgroup *r)
615 struct task_struct *p, *t;
618 lockdep_assert_held(&rdtgroup_mutex);
621 for_each_process_thread(p, t) {
622 if (is_closid_match(t, r) || is_rmid_match(t, r)) {
632 static int rdtgroup_task_write_permission(struct task_struct *task,
633 struct kernfs_open_file *of)
635 const struct cred *tcred = get_task_cred(task);
636 const struct cred *cred = current_cred();
640 * Even if we're attaching all tasks in the thread group, we only
641 * need to check permissions on one of them.
643 if (!uid_eq(cred->euid, GLOBAL_ROOT_UID) &&
644 !uid_eq(cred->euid, tcred->uid) &&
645 !uid_eq(cred->euid, tcred->suid)) {
646 rdt_last_cmd_printf("No permission to move task %d\n", task->pid);
654 static int rdtgroup_move_task(pid_t pid, struct rdtgroup *rdtgrp,
655 struct kernfs_open_file *of)
657 struct task_struct *tsk;
662 tsk = find_task_by_vpid(pid);
665 rdt_last_cmd_printf("No task %d\n", pid);
672 get_task_struct(tsk);
675 ret = rdtgroup_task_write_permission(tsk, of);
677 ret = __rdtgroup_move_task(tsk, rdtgrp);
679 put_task_struct(tsk);
683 static ssize_t rdtgroup_tasks_write(struct kernfs_open_file *of,
684 char *buf, size_t nbytes, loff_t off)
686 struct rdtgroup *rdtgrp;
690 if (kstrtoint(strstrip(buf), 0, &pid) || pid < 0)
692 rdtgrp = rdtgroup_kn_lock_live(of->kn);
694 rdtgroup_kn_unlock(of->kn);
697 rdt_last_cmd_clear();
699 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED ||
700 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
702 rdt_last_cmd_puts("Pseudo-locking in progress\n");
706 ret = rdtgroup_move_task(pid, rdtgrp, of);
709 rdtgroup_kn_unlock(of->kn);
711 return ret ?: nbytes;
714 static void show_rdt_tasks(struct rdtgroup *r, struct seq_file *s)
716 struct task_struct *p, *t;
719 for_each_process_thread(p, t) {
720 if (is_closid_match(t, r) || is_rmid_match(t, r))
721 seq_printf(s, "%d\n", t->pid);
726 static int rdtgroup_tasks_show(struct kernfs_open_file *of,
727 struct seq_file *s, void *v)
729 struct rdtgroup *rdtgrp;
732 rdtgrp = rdtgroup_kn_lock_live(of->kn);
734 show_rdt_tasks(rdtgrp, s);
737 rdtgroup_kn_unlock(of->kn);
742 #ifdef CONFIG_PROC_CPU_RESCTRL
745 * A task can only be part of one resctrl control group and of one monitor
746 * group which is associated to that control group.
751 * resctrl is not available.
756 * Task is part of the root resctrl control group, and it is not associated
757 * to any monitor group.
762 * Task is part of the root resctrl control group and monitor group mon0.
767 * Task is part of resctrl control group group0, and it is not associated
768 * to any monitor group.
773 * Task is part of resctrl control group group0 and monitor group mon1.
775 int proc_resctrl_show(struct seq_file *s, struct pid_namespace *ns,
776 struct pid *pid, struct task_struct *tsk)
778 struct rdtgroup *rdtg;
781 mutex_lock(&rdtgroup_mutex);
783 /* Return empty if resctrl has not been mounted. */
784 if (!static_branch_unlikely(&rdt_enable_key)) {
785 seq_puts(s, "res:\nmon:\n");
789 list_for_each_entry(rdtg, &rdt_all_groups, rdtgroup_list) {
790 struct rdtgroup *crg;
793 * Task information is only relevant for shareable
794 * and exclusive groups.
796 if (rdtg->mode != RDT_MODE_SHAREABLE &&
797 rdtg->mode != RDT_MODE_EXCLUSIVE)
800 if (rdtg->closid != tsk->closid)
803 seq_printf(s, "res:%s%s\n", (rdtg == &rdtgroup_default) ? "/" : "",
806 list_for_each_entry(crg, &rdtg->mon.crdtgrp_list,
808 if (tsk->rmid != crg->mon.rmid)
810 seq_printf(s, "%s", crg->kn->name);
817 * The above search should succeed. Otherwise return
822 mutex_unlock(&rdtgroup_mutex);
828 static int rdt_last_cmd_status_show(struct kernfs_open_file *of,
829 struct seq_file *seq, void *v)
833 mutex_lock(&rdtgroup_mutex);
834 len = seq_buf_used(&last_cmd_status);
836 seq_printf(seq, "%.*s", len, last_cmd_status_buf);
838 seq_puts(seq, "ok\n");
839 mutex_unlock(&rdtgroup_mutex);
843 static int rdt_num_closids_show(struct kernfs_open_file *of,
844 struct seq_file *seq, void *v)
846 struct rdt_resource *r = of->kn->parent->priv;
848 seq_printf(seq, "%d\n", r->num_closid);
852 static int rdt_default_ctrl_show(struct kernfs_open_file *of,
853 struct seq_file *seq, void *v)
855 struct rdt_resource *r = of->kn->parent->priv;
857 seq_printf(seq, "%x\n", r->default_ctrl);
861 static int rdt_min_cbm_bits_show(struct kernfs_open_file *of,
862 struct seq_file *seq, void *v)
864 struct rdt_resource *r = of->kn->parent->priv;
866 seq_printf(seq, "%u\n", r->cache.min_cbm_bits);
870 static int rdt_shareable_bits_show(struct kernfs_open_file *of,
871 struct seq_file *seq, void *v)
873 struct rdt_resource *r = of->kn->parent->priv;
875 seq_printf(seq, "%x\n", r->cache.shareable_bits);
880 * rdt_bit_usage_show - Display current usage of resources
882 * A domain is a shared resource that can now be allocated differently. Here
883 * we display the current regions of the domain as an annotated bitmask.
884 * For each domain of this resource its allocation bitmask
885 * is annotated as below to indicate the current usage of the corresponding bit:
886 * 0 - currently unused
887 * X - currently available for sharing and used by software and hardware
888 * H - currently used by hardware only but available for software use
889 * S - currently used and shareable by software only
890 * E - currently used exclusively by one resource group
891 * P - currently pseudo-locked by one resource group
893 static int rdt_bit_usage_show(struct kernfs_open_file *of,
894 struct seq_file *seq, void *v)
896 struct rdt_resource *r = of->kn->parent->priv;
898 * Use unsigned long even though only 32 bits are used to ensure
899 * test_bit() is used safely.
901 unsigned long sw_shareable = 0, hw_shareable = 0;
902 unsigned long exclusive = 0, pseudo_locked = 0;
903 struct rdt_domain *dom;
904 int i, hwb, swb, excl, psl;
905 enum rdtgrp_mode mode;
909 mutex_lock(&rdtgroup_mutex);
910 hw_shareable = r->cache.shareable_bits;
911 list_for_each_entry(dom, &r->domains, list) {
914 ctrl = dom->ctrl_val;
917 seq_printf(seq, "%d=", dom->id);
918 for (i = 0; i < closids_supported(); i++, ctrl++) {
919 if (!closid_allocated(i))
921 mode = rdtgroup_mode_by_closid(i);
923 case RDT_MODE_SHAREABLE:
924 sw_shareable |= *ctrl;
926 case RDT_MODE_EXCLUSIVE:
929 case RDT_MODE_PSEUDO_LOCKSETUP:
931 * RDT_MODE_PSEUDO_LOCKSETUP is possible
932 * here but not included since the CBM
933 * associated with this CLOSID in this mode
934 * is not initialized and no task or cpu can be
935 * assigned this CLOSID.
938 case RDT_MODE_PSEUDO_LOCKED:
941 "invalid mode for closid %d\n", i);
945 for (i = r->cache.cbm_len - 1; i >= 0; i--) {
946 pseudo_locked = dom->plr ? dom->plr->cbm : 0;
947 hwb = test_bit(i, &hw_shareable);
948 swb = test_bit(i, &sw_shareable);
949 excl = test_bit(i, &exclusive);
950 psl = test_bit(i, &pseudo_locked);
953 else if (hwb && !swb)
955 else if (!hwb && swb)
961 else /* Unused bits remain */
967 mutex_unlock(&rdtgroup_mutex);
971 static int rdt_min_bw_show(struct kernfs_open_file *of,
972 struct seq_file *seq, void *v)
974 struct rdt_resource *r = of->kn->parent->priv;
976 seq_printf(seq, "%u\n", r->membw.min_bw);
980 static int rdt_num_rmids_show(struct kernfs_open_file *of,
981 struct seq_file *seq, void *v)
983 struct rdt_resource *r = of->kn->parent->priv;
985 seq_printf(seq, "%d\n", r->num_rmid);
990 static int rdt_mon_features_show(struct kernfs_open_file *of,
991 struct seq_file *seq, void *v)
993 struct rdt_resource *r = of->kn->parent->priv;
994 struct mon_evt *mevt;
996 list_for_each_entry(mevt, &r->evt_list, list)
997 seq_printf(seq, "%s\n", mevt->name);
1002 static int rdt_bw_gran_show(struct kernfs_open_file *of,
1003 struct seq_file *seq, void *v)
1005 struct rdt_resource *r = of->kn->parent->priv;
1007 seq_printf(seq, "%u\n", r->membw.bw_gran);
1011 static int rdt_delay_linear_show(struct kernfs_open_file *of,
1012 struct seq_file *seq, void *v)
1014 struct rdt_resource *r = of->kn->parent->priv;
1016 seq_printf(seq, "%u\n", r->membw.delay_linear);
1020 static int max_threshold_occ_show(struct kernfs_open_file *of,
1021 struct seq_file *seq, void *v)
1023 struct rdt_resource *r = of->kn->parent->priv;
1025 seq_printf(seq, "%u\n", resctrl_cqm_threshold * r->mon_scale);
1030 static ssize_t max_threshold_occ_write(struct kernfs_open_file *of,
1031 char *buf, size_t nbytes, loff_t off)
1033 struct rdt_resource *r = of->kn->parent->priv;
1037 ret = kstrtouint(buf, 0, &bytes);
1041 if (bytes > (boot_cpu_data.x86_cache_size * 1024))
1044 resctrl_cqm_threshold = bytes / r->mon_scale;
1050 * rdtgroup_mode_show - Display mode of this resource group
1052 static int rdtgroup_mode_show(struct kernfs_open_file *of,
1053 struct seq_file *s, void *v)
1055 struct rdtgroup *rdtgrp;
1057 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1059 rdtgroup_kn_unlock(of->kn);
1063 seq_printf(s, "%s\n", rdtgroup_mode_str(rdtgrp->mode));
1065 rdtgroup_kn_unlock(of->kn);
1070 * rdt_cdp_peer_get - Retrieve CDP peer if it exists
1071 * @r: RDT resource to which RDT domain @d belongs
1072 * @d: Cache instance for which a CDP peer is requested
1073 * @r_cdp: RDT resource that shares hardware with @r (RDT resource peer)
1074 * Used to return the result.
1075 * @d_cdp: RDT domain that shares hardware with @d (RDT domain peer)
1076 * Used to return the result.
1078 * RDT resources are managed independently and by extension the RDT domains
1079 * (RDT resource instances) are managed independently also. The Code and
1080 * Data Prioritization (CDP) RDT resources, while managed independently,
1081 * could refer to the same underlying hardware. For example,
1082 * RDT_RESOURCE_L2CODE and RDT_RESOURCE_L2DATA both refer to the L2 cache.
1084 * When provided with an RDT resource @r and an instance of that RDT
1085 * resource @d rdt_cdp_peer_get() will return if there is a peer RDT
1086 * resource and the exact instance that shares the same hardware.
1088 * Return: 0 if a CDP peer was found, <0 on error or if no CDP peer exists.
1089 * If a CDP peer was found, @r_cdp will point to the peer RDT resource
1090 * and @d_cdp will point to the peer RDT domain.
1092 static int rdt_cdp_peer_get(struct rdt_resource *r, struct rdt_domain *d,
1093 struct rdt_resource **r_cdp,
1094 struct rdt_domain **d_cdp)
1096 struct rdt_resource *_r_cdp = NULL;
1097 struct rdt_domain *_d_cdp = NULL;
1101 case RDT_RESOURCE_L3DATA:
1102 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3CODE];
1104 case RDT_RESOURCE_L3CODE:
1105 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L3DATA];
1107 case RDT_RESOURCE_L2DATA:
1108 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2CODE];
1110 case RDT_RESOURCE_L2CODE:
1111 _r_cdp = &rdt_resources_all[RDT_RESOURCE_L2DATA];
1119 * When a new CPU comes online and CDP is enabled then the new
1120 * RDT domains (if any) associated with both CDP RDT resources
1121 * are added in the same CPU online routine while the
1122 * rdtgroup_mutex is held. It should thus not happen for one
1123 * RDT domain to exist and be associated with its RDT CDP
1124 * resource but there is no RDT domain associated with the
1125 * peer RDT CDP resource. Hence the WARN.
1127 _d_cdp = rdt_find_domain(_r_cdp, d->id, NULL);
1128 if (WARN_ON(IS_ERR_OR_NULL(_d_cdp))) {
1142 * __rdtgroup_cbm_overlaps - Does CBM for intended closid overlap with other
1143 * @r: Resource to which domain instance @d belongs.
1144 * @d: The domain instance for which @closid is being tested.
1145 * @cbm: Capacity bitmask being tested.
1146 * @closid: Intended closid for @cbm.
1147 * @exclusive: Only check if overlaps with exclusive resource groups
1149 * Checks if provided @cbm intended to be used for @closid on domain
1150 * @d overlaps with any other closids or other hardware usage associated
1151 * with this domain. If @exclusive is true then only overlaps with
1152 * resource groups in exclusive mode will be considered. If @exclusive
1153 * is false then overlaps with any resource group or hardware entities
1154 * will be considered.
1156 * @cbm is unsigned long, even if only 32 bits are used, to make the
1157 * bitmap functions work correctly.
1159 * Return: false if CBM does not overlap, true if it does.
1161 static bool __rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1162 unsigned long cbm, int closid, bool exclusive)
1164 enum rdtgrp_mode mode;
1165 unsigned long ctrl_b;
1169 /* Check for any overlap with regions used by hardware directly */
1171 ctrl_b = r->cache.shareable_bits;
1172 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len))
1176 /* Check for overlap with other resource groups */
1178 for (i = 0; i < closids_supported(); i++, ctrl++) {
1180 mode = rdtgroup_mode_by_closid(i);
1181 if (closid_allocated(i) && i != closid &&
1182 mode != RDT_MODE_PSEUDO_LOCKSETUP) {
1183 if (bitmap_intersects(&cbm, &ctrl_b, r->cache.cbm_len)) {
1185 if (mode == RDT_MODE_EXCLUSIVE)
1198 * rdtgroup_cbm_overlaps - Does CBM overlap with other use of hardware
1199 * @r: Resource to which domain instance @d belongs.
1200 * @d: The domain instance for which @closid is being tested.
1201 * @cbm: Capacity bitmask being tested.
1202 * @closid: Intended closid for @cbm.
1203 * @exclusive: Only check if overlaps with exclusive resource groups
1205 * Resources that can be allocated using a CBM can use the CBM to control
1206 * the overlap of these allocations. rdtgroup_cmb_overlaps() is the test
1207 * for overlap. Overlap test is not limited to the specific resource for
1208 * which the CBM is intended though - when dealing with CDP resources that
1209 * share the underlying hardware the overlap check should be performed on
1210 * the CDP resource sharing the hardware also.
1212 * Refer to description of __rdtgroup_cbm_overlaps() for the details of the
1215 * Return: true if CBM overlap detected, false if there is no overlap
1217 bool rdtgroup_cbm_overlaps(struct rdt_resource *r, struct rdt_domain *d,
1218 unsigned long cbm, int closid, bool exclusive)
1220 struct rdt_resource *r_cdp;
1221 struct rdt_domain *d_cdp;
1223 if (__rdtgroup_cbm_overlaps(r, d, cbm, closid, exclusive))
1226 if (rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp) < 0)
1229 return __rdtgroup_cbm_overlaps(r_cdp, d_cdp, cbm, closid, exclusive);
1233 * rdtgroup_mode_test_exclusive - Test if this resource group can be exclusive
1235 * An exclusive resource group implies that there should be no sharing of
1236 * its allocated resources. At the time this group is considered to be
1237 * exclusive this test can determine if its current schemata supports this
1238 * setting by testing for overlap with all other resource groups.
1240 * Return: true if resource group can be exclusive, false if there is overlap
1241 * with allocations of other resource groups and thus this resource group
1242 * cannot be exclusive.
1244 static bool rdtgroup_mode_test_exclusive(struct rdtgroup *rdtgrp)
1246 int closid = rdtgrp->closid;
1247 struct rdt_resource *r;
1248 bool has_cache = false;
1249 struct rdt_domain *d;
1251 for_each_alloc_enabled_rdt_resource(r) {
1252 if (r->rid == RDT_RESOURCE_MBA)
1255 list_for_each_entry(d, &r->domains, list) {
1256 if (rdtgroup_cbm_overlaps(r, d, d->ctrl_val[closid],
1257 rdtgrp->closid, false)) {
1258 rdt_last_cmd_puts("Schemata overlaps\n");
1265 rdt_last_cmd_puts("Cannot be exclusive without CAT/CDP\n");
1273 * rdtgroup_mode_write - Modify the resource group's mode
1276 static ssize_t rdtgroup_mode_write(struct kernfs_open_file *of,
1277 char *buf, size_t nbytes, loff_t off)
1279 struct rdtgroup *rdtgrp;
1280 enum rdtgrp_mode mode;
1283 /* Valid input requires a trailing newline */
1284 if (nbytes == 0 || buf[nbytes - 1] != '\n')
1286 buf[nbytes - 1] = '\0';
1288 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1290 rdtgroup_kn_unlock(of->kn);
1294 rdt_last_cmd_clear();
1296 mode = rdtgrp->mode;
1298 if ((!strcmp(buf, "shareable") && mode == RDT_MODE_SHAREABLE) ||
1299 (!strcmp(buf, "exclusive") && mode == RDT_MODE_EXCLUSIVE) ||
1300 (!strcmp(buf, "pseudo-locksetup") &&
1301 mode == RDT_MODE_PSEUDO_LOCKSETUP) ||
1302 (!strcmp(buf, "pseudo-locked") && mode == RDT_MODE_PSEUDO_LOCKED))
1305 if (mode == RDT_MODE_PSEUDO_LOCKED) {
1306 rdt_last_cmd_puts("Cannot change pseudo-locked group\n");
1311 if (!strcmp(buf, "shareable")) {
1312 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1313 ret = rdtgroup_locksetup_exit(rdtgrp);
1317 rdtgrp->mode = RDT_MODE_SHAREABLE;
1318 } else if (!strcmp(buf, "exclusive")) {
1319 if (!rdtgroup_mode_test_exclusive(rdtgrp)) {
1323 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1324 ret = rdtgroup_locksetup_exit(rdtgrp);
1328 rdtgrp->mode = RDT_MODE_EXCLUSIVE;
1329 } else if (!strcmp(buf, "pseudo-locksetup")) {
1330 ret = rdtgroup_locksetup_enter(rdtgrp);
1333 rdtgrp->mode = RDT_MODE_PSEUDO_LOCKSETUP;
1335 rdt_last_cmd_puts("Unknown or unsupported mode\n");
1340 rdtgroup_kn_unlock(of->kn);
1341 return ret ?: nbytes;
1345 * rdtgroup_cbm_to_size - Translate CBM to size in bytes
1346 * @r: RDT resource to which @d belongs.
1347 * @d: RDT domain instance.
1348 * @cbm: bitmask for which the size should be computed.
1350 * The bitmask provided associated with the RDT domain instance @d will be
1351 * translated into how many bytes it represents. The size in bytes is
1352 * computed by first dividing the total cache size by the CBM length to
1353 * determine how many bytes each bit in the bitmask represents. The result
1354 * is multiplied with the number of bits set in the bitmask.
1356 * @cbm is unsigned long, even if only 32 bits are used to make the
1357 * bitmap functions work correctly.
1359 unsigned int rdtgroup_cbm_to_size(struct rdt_resource *r,
1360 struct rdt_domain *d, unsigned long cbm)
1362 struct cpu_cacheinfo *ci;
1363 unsigned int size = 0;
1366 num_b = bitmap_weight(&cbm, r->cache.cbm_len);
1367 ci = get_cpu_cacheinfo(cpumask_any(&d->cpu_mask));
1368 for (i = 0; i < ci->num_leaves; i++) {
1369 if (ci->info_list[i].level == r->cache_level) {
1370 size = ci->info_list[i].size / r->cache.cbm_len * num_b;
1379 * rdtgroup_size_show - Display size in bytes of allocated regions
1381 * The "size" file mirrors the layout of the "schemata" file, printing the
1382 * size in bytes of each region instead of the capacity bitmask.
1385 static int rdtgroup_size_show(struct kernfs_open_file *of,
1386 struct seq_file *s, void *v)
1388 struct rdtgroup *rdtgrp;
1389 struct rdt_resource *r;
1390 struct rdt_domain *d;
1396 rdtgrp = rdtgroup_kn_lock_live(of->kn);
1398 rdtgroup_kn_unlock(of->kn);
1402 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
1403 if (!rdtgrp->plr->d) {
1404 rdt_last_cmd_clear();
1405 rdt_last_cmd_puts("Cache domain offline\n");
1408 seq_printf(s, "%*s:", max_name_width,
1409 rdtgrp->plr->r->name);
1410 size = rdtgroup_cbm_to_size(rdtgrp->plr->r,
1413 seq_printf(s, "%d=%u\n", rdtgrp->plr->d->id, size);
1418 for_each_alloc_enabled_rdt_resource(r) {
1420 seq_printf(s, "%*s:", max_name_width, r->name);
1421 list_for_each_entry(d, &r->domains, list) {
1424 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
1427 ctrl = (!is_mba_sc(r) ?
1428 d->ctrl_val[rdtgrp->closid] :
1429 d->mbps_val[rdtgrp->closid]);
1430 if (r->rid == RDT_RESOURCE_MBA)
1433 size = rdtgroup_cbm_to_size(r, d, ctrl);
1435 seq_printf(s, "%d=%u", d->id, size);
1442 rdtgroup_kn_unlock(of->kn);
1447 /* rdtgroup information files for one cache resource. */
1448 static struct rftype res_common_files[] = {
1450 .name = "last_cmd_status",
1452 .kf_ops = &rdtgroup_kf_single_ops,
1453 .seq_show = rdt_last_cmd_status_show,
1454 .fflags = RF_TOP_INFO,
1457 .name = "num_closids",
1459 .kf_ops = &rdtgroup_kf_single_ops,
1460 .seq_show = rdt_num_closids_show,
1461 .fflags = RF_CTRL_INFO,
1464 .name = "mon_features",
1466 .kf_ops = &rdtgroup_kf_single_ops,
1467 .seq_show = rdt_mon_features_show,
1468 .fflags = RF_MON_INFO,
1471 .name = "num_rmids",
1473 .kf_ops = &rdtgroup_kf_single_ops,
1474 .seq_show = rdt_num_rmids_show,
1475 .fflags = RF_MON_INFO,
1480 .kf_ops = &rdtgroup_kf_single_ops,
1481 .seq_show = rdt_default_ctrl_show,
1482 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1485 .name = "min_cbm_bits",
1487 .kf_ops = &rdtgroup_kf_single_ops,
1488 .seq_show = rdt_min_cbm_bits_show,
1489 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1492 .name = "shareable_bits",
1494 .kf_ops = &rdtgroup_kf_single_ops,
1495 .seq_show = rdt_shareable_bits_show,
1496 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1499 .name = "bit_usage",
1501 .kf_ops = &rdtgroup_kf_single_ops,
1502 .seq_show = rdt_bit_usage_show,
1503 .fflags = RF_CTRL_INFO | RFTYPE_RES_CACHE,
1506 .name = "min_bandwidth",
1508 .kf_ops = &rdtgroup_kf_single_ops,
1509 .seq_show = rdt_min_bw_show,
1510 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1513 .name = "bandwidth_gran",
1515 .kf_ops = &rdtgroup_kf_single_ops,
1516 .seq_show = rdt_bw_gran_show,
1517 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1520 .name = "delay_linear",
1522 .kf_ops = &rdtgroup_kf_single_ops,
1523 .seq_show = rdt_delay_linear_show,
1524 .fflags = RF_CTRL_INFO | RFTYPE_RES_MB,
1527 .name = "max_threshold_occupancy",
1529 .kf_ops = &rdtgroup_kf_single_ops,
1530 .write = max_threshold_occ_write,
1531 .seq_show = max_threshold_occ_show,
1532 .fflags = RF_MON_INFO | RFTYPE_RES_CACHE,
1537 .kf_ops = &rdtgroup_kf_single_ops,
1538 .write = rdtgroup_cpus_write,
1539 .seq_show = rdtgroup_cpus_show,
1540 .fflags = RFTYPE_BASE,
1543 .name = "cpus_list",
1545 .kf_ops = &rdtgroup_kf_single_ops,
1546 .write = rdtgroup_cpus_write,
1547 .seq_show = rdtgroup_cpus_show,
1548 .flags = RFTYPE_FLAGS_CPUS_LIST,
1549 .fflags = RFTYPE_BASE,
1554 .kf_ops = &rdtgroup_kf_single_ops,
1555 .write = rdtgroup_tasks_write,
1556 .seq_show = rdtgroup_tasks_show,
1557 .fflags = RFTYPE_BASE,
1562 .kf_ops = &rdtgroup_kf_single_ops,
1563 .write = rdtgroup_schemata_write,
1564 .seq_show = rdtgroup_schemata_show,
1565 .fflags = RF_CTRL_BASE,
1570 .kf_ops = &rdtgroup_kf_single_ops,
1571 .write = rdtgroup_mode_write,
1572 .seq_show = rdtgroup_mode_show,
1573 .fflags = RF_CTRL_BASE,
1578 .kf_ops = &rdtgroup_kf_single_ops,
1579 .seq_show = rdtgroup_size_show,
1580 .fflags = RF_CTRL_BASE,
1585 static int rdtgroup_add_files(struct kernfs_node *kn, unsigned long fflags)
1587 struct rftype *rfts, *rft;
1590 rfts = res_common_files;
1591 len = ARRAY_SIZE(res_common_files);
1593 lockdep_assert_held(&rdtgroup_mutex);
1595 for (rft = rfts; rft < rfts + len; rft++) {
1596 if ((fflags & rft->fflags) == rft->fflags) {
1597 ret = rdtgroup_add_file(kn, rft);
1605 pr_warn("Failed to add %s, err=%d\n", rft->name, ret);
1606 while (--rft >= rfts) {
1607 if ((fflags & rft->fflags) == rft->fflags)
1608 kernfs_remove_by_name(kn, rft->name);
1614 * rdtgroup_kn_mode_restrict - Restrict user access to named resctrl file
1615 * @r: The resource group with which the file is associated.
1616 * @name: Name of the file
1618 * The permissions of named resctrl file, directory, or link are modified
1619 * to not allow read, write, or execute by any user.
1621 * WARNING: This function is intended to communicate to the user that the
1622 * resctrl file has been locked down - that it is not relevant to the
1623 * particular state the system finds itself in. It should not be relied
1624 * on to protect from user access because after the file's permissions
1625 * are restricted the user can still change the permissions using chmod
1626 * from the command line.
1628 * Return: 0 on success, <0 on failure.
1630 int rdtgroup_kn_mode_restrict(struct rdtgroup *r, const char *name)
1632 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1633 struct kernfs_node *kn;
1636 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1640 switch (kernfs_type(kn)) {
1642 iattr.ia_mode = S_IFDIR;
1645 iattr.ia_mode = S_IFREG;
1648 iattr.ia_mode = S_IFLNK;
1652 ret = kernfs_setattr(kn, &iattr);
1658 * rdtgroup_kn_mode_restore - Restore user access to named resctrl file
1659 * @r: The resource group with which the file is associated.
1660 * @name: Name of the file
1661 * @mask: Mask of permissions that should be restored
1663 * Restore the permissions of the named file. If @name is a directory the
1664 * permissions of its parent will be used.
1666 * Return: 0 on success, <0 on failure.
1668 int rdtgroup_kn_mode_restore(struct rdtgroup *r, const char *name,
1671 struct iattr iattr = {.ia_valid = ATTR_MODE,};
1672 struct kernfs_node *kn, *parent;
1673 struct rftype *rfts, *rft;
1676 rfts = res_common_files;
1677 len = ARRAY_SIZE(res_common_files);
1679 for (rft = rfts; rft < rfts + len; rft++) {
1680 if (!strcmp(rft->name, name))
1681 iattr.ia_mode = rft->mode & mask;
1684 kn = kernfs_find_and_get_ns(r->kn, name, NULL);
1688 switch (kernfs_type(kn)) {
1690 parent = kernfs_get_parent(kn);
1692 iattr.ia_mode |= parent->mode;
1695 iattr.ia_mode |= S_IFDIR;
1698 iattr.ia_mode |= S_IFREG;
1701 iattr.ia_mode |= S_IFLNK;
1705 ret = kernfs_setattr(kn, &iattr);
1710 static int rdtgroup_mkdir_info_resdir(struct rdt_resource *r, char *name,
1711 unsigned long fflags)
1713 struct kernfs_node *kn_subdir;
1716 kn_subdir = kernfs_create_dir(kn_info, name,
1718 if (IS_ERR(kn_subdir))
1719 return PTR_ERR(kn_subdir);
1721 kernfs_get(kn_subdir);
1722 ret = rdtgroup_kn_set_ugid(kn_subdir);
1726 ret = rdtgroup_add_files(kn_subdir, fflags);
1728 kernfs_activate(kn_subdir);
1733 static int rdtgroup_create_info_dir(struct kernfs_node *parent_kn)
1735 struct rdt_resource *r;
1736 unsigned long fflags;
1740 /* create the directory */
1741 kn_info = kernfs_create_dir(parent_kn, "info", parent_kn->mode, NULL);
1742 if (IS_ERR(kn_info))
1743 return PTR_ERR(kn_info);
1744 kernfs_get(kn_info);
1746 ret = rdtgroup_add_files(kn_info, RF_TOP_INFO);
1750 for_each_alloc_enabled_rdt_resource(r) {
1751 fflags = r->fflags | RF_CTRL_INFO;
1752 ret = rdtgroup_mkdir_info_resdir(r, r->name, fflags);
1757 for_each_mon_enabled_rdt_resource(r) {
1758 fflags = r->fflags | RF_MON_INFO;
1759 sprintf(name, "%s_MON", r->name);
1760 ret = rdtgroup_mkdir_info_resdir(r, name, fflags);
1766 * This extra ref will be put in kernfs_remove() and guarantees
1767 * that @rdtgrp->kn is always accessible.
1769 kernfs_get(kn_info);
1771 ret = rdtgroup_kn_set_ugid(kn_info);
1775 kernfs_activate(kn_info);
1780 kernfs_remove(kn_info);
1785 mongroup_create_dir(struct kernfs_node *parent_kn, struct rdtgroup *prgrp,
1786 char *name, struct kernfs_node **dest_kn)
1788 struct kernfs_node *kn;
1791 /* create the directory */
1792 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
1800 * This extra ref will be put in kernfs_remove() and guarantees
1801 * that @rdtgrp->kn is always accessible.
1805 ret = rdtgroup_kn_set_ugid(kn);
1809 kernfs_activate(kn);
1818 static void l3_qos_cfg_update(void *arg)
1822 wrmsrl(MSR_IA32_L3_QOS_CFG, *enable ? L3_QOS_CDP_ENABLE : 0ULL);
1825 static void l2_qos_cfg_update(void *arg)
1829 wrmsrl(MSR_IA32_L2_QOS_CFG, *enable ? L2_QOS_CDP_ENABLE : 0ULL);
1832 static inline bool is_mba_linear(void)
1834 return rdt_resources_all[RDT_RESOURCE_MBA].membw.delay_linear;
1837 static int set_cache_qos_cfg(int level, bool enable)
1839 void (*update)(void *arg);
1840 struct rdt_resource *r_l;
1841 cpumask_var_t cpu_mask;
1842 struct rdt_domain *d;
1845 if (level == RDT_RESOURCE_L3)
1846 update = l3_qos_cfg_update;
1847 else if (level == RDT_RESOURCE_L2)
1848 update = l2_qos_cfg_update;
1852 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
1855 r_l = &rdt_resources_all[level];
1856 list_for_each_entry(d, &r_l->domains, list) {
1857 /* Pick one CPU from each domain instance to update MSR */
1858 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
1861 /* Update QOS_CFG MSR on this cpu if it's in cpu_mask. */
1862 if (cpumask_test_cpu(cpu, cpu_mask))
1864 /* Update QOS_CFG MSR on all other cpus in cpu_mask. */
1865 smp_call_function_many(cpu_mask, update, &enable, 1);
1868 free_cpumask_var(cpu_mask);
1873 /* Restore the qos cfg state when a domain comes online */
1874 void rdt_domain_reconfigure_cdp(struct rdt_resource *r)
1876 if (!r->alloc_capable)
1879 if (r == &rdt_resources_all[RDT_RESOURCE_L2DATA])
1880 l2_qos_cfg_update(&r->alloc_enabled);
1882 if (r == &rdt_resources_all[RDT_RESOURCE_L3DATA])
1883 l3_qos_cfg_update(&r->alloc_enabled);
1887 * Enable or disable the MBA software controller
1888 * which helps user specify bandwidth in MBps.
1889 * MBA software controller is supported only if
1890 * MBM is supported and MBA is in linear scale.
1892 static int set_mba_sc(bool mba_sc)
1894 struct rdt_resource *r = &rdt_resources_all[RDT_RESOURCE_MBA];
1895 struct rdt_domain *d;
1897 if (!is_mbm_enabled() || !is_mba_linear() ||
1898 mba_sc == is_mba_sc(r))
1901 r->membw.mba_sc = mba_sc;
1902 list_for_each_entry(d, &r->domains, list)
1903 setup_default_ctrlval(r, d->ctrl_val, d->mbps_val);
1908 static int cdp_enable(int level, int data_type, int code_type)
1910 struct rdt_resource *r_ldata = &rdt_resources_all[data_type];
1911 struct rdt_resource *r_lcode = &rdt_resources_all[code_type];
1912 struct rdt_resource *r_l = &rdt_resources_all[level];
1915 if (!r_l->alloc_capable || !r_ldata->alloc_capable ||
1916 !r_lcode->alloc_capable)
1919 ret = set_cache_qos_cfg(level, true);
1921 r_l->alloc_enabled = false;
1922 r_ldata->alloc_enabled = true;
1923 r_lcode->alloc_enabled = true;
1928 static int cdpl3_enable(void)
1930 return cdp_enable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA,
1931 RDT_RESOURCE_L3CODE);
1934 static int cdpl2_enable(void)
1936 return cdp_enable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA,
1937 RDT_RESOURCE_L2CODE);
1940 static void cdp_disable(int level, int data_type, int code_type)
1942 struct rdt_resource *r = &rdt_resources_all[level];
1944 r->alloc_enabled = r->alloc_capable;
1946 if (rdt_resources_all[data_type].alloc_enabled) {
1947 rdt_resources_all[data_type].alloc_enabled = false;
1948 rdt_resources_all[code_type].alloc_enabled = false;
1949 set_cache_qos_cfg(level, false);
1953 static void cdpl3_disable(void)
1955 cdp_disable(RDT_RESOURCE_L3, RDT_RESOURCE_L3DATA, RDT_RESOURCE_L3CODE);
1958 static void cdpl2_disable(void)
1960 cdp_disable(RDT_RESOURCE_L2, RDT_RESOURCE_L2DATA, RDT_RESOURCE_L2CODE);
1963 static void cdp_disable_all(void)
1965 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
1967 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
1972 * We don't allow rdtgroup directories to be created anywhere
1973 * except the root directory. Thus when looking for the rdtgroup
1974 * structure for a kernfs node we are either looking at a directory,
1975 * in which case the rdtgroup structure is pointed at by the "priv"
1976 * field, otherwise we have a file, and need only look to the parent
1977 * to find the rdtgroup.
1979 static struct rdtgroup *kernfs_to_rdtgroup(struct kernfs_node *kn)
1981 if (kernfs_type(kn) == KERNFS_DIR) {
1983 * All the resource directories use "kn->priv"
1984 * to point to the "struct rdtgroup" for the
1985 * resource. "info" and its subdirectories don't
1986 * have rdtgroup structures, so return NULL here.
1988 if (kn == kn_info || kn->parent == kn_info)
1993 return kn->parent->priv;
1997 struct rdtgroup *rdtgroup_kn_lock_live(struct kernfs_node *kn)
1999 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2004 atomic_inc(&rdtgrp->waitcount);
2005 kernfs_break_active_protection(kn);
2007 mutex_lock(&rdtgroup_mutex);
2009 /* Was this group deleted while we waited? */
2010 if (rdtgrp->flags & RDT_DELETED)
2016 void rdtgroup_kn_unlock(struct kernfs_node *kn)
2018 struct rdtgroup *rdtgrp = kernfs_to_rdtgroup(kn);
2023 mutex_unlock(&rdtgroup_mutex);
2025 if (atomic_dec_and_test(&rdtgrp->waitcount) &&
2026 (rdtgrp->flags & RDT_DELETED)) {
2027 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2028 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2029 rdtgroup_pseudo_lock_remove(rdtgrp);
2030 kernfs_unbreak_active_protection(kn);
2031 kernfs_put(rdtgrp->kn);
2034 kernfs_unbreak_active_protection(kn);
2038 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2039 struct rdtgroup *prgrp,
2040 struct kernfs_node **mon_data_kn);
2042 static int rdt_enable_ctx(struct rdt_fs_context *ctx)
2046 if (ctx->enable_cdpl2)
2047 ret = cdpl2_enable();
2049 if (!ret && ctx->enable_cdpl3)
2050 ret = cdpl3_enable();
2052 if (!ret && ctx->enable_mba_mbps)
2053 ret = set_mba_sc(true);
2058 static int rdt_get_tree(struct fs_context *fc)
2060 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2061 struct rdt_domain *dom;
2062 struct rdt_resource *r;
2066 mutex_lock(&rdtgroup_mutex);
2068 * resctrl file system can only be mounted once.
2070 if (static_branch_unlikely(&rdt_enable_key)) {
2075 ret = rdt_enable_ctx(ctx);
2081 ret = rdtgroup_create_info_dir(rdtgroup_default.kn);
2085 if (rdt_mon_capable) {
2086 ret = mongroup_create_dir(rdtgroup_default.kn,
2087 &rdtgroup_default, "mon_groups",
2091 kernfs_get(kn_mongrp);
2093 ret = mkdir_mondata_all(rdtgroup_default.kn,
2094 &rdtgroup_default, &kn_mondata);
2097 kernfs_get(kn_mondata);
2098 rdtgroup_default.mon.mon_data_kn = kn_mondata;
2101 ret = rdt_pseudo_lock_init();
2105 ret = kernfs_get_tree(fc);
2109 if (rdt_alloc_capable)
2110 static_branch_enable_cpuslocked(&rdt_alloc_enable_key);
2111 if (rdt_mon_capable)
2112 static_branch_enable_cpuslocked(&rdt_mon_enable_key);
2114 if (rdt_alloc_capable || rdt_mon_capable)
2115 static_branch_enable_cpuslocked(&rdt_enable_key);
2117 if (is_mbm_enabled()) {
2118 r = &rdt_resources_all[RDT_RESOURCE_L3];
2119 list_for_each_entry(dom, &r->domains, list)
2120 mbm_setup_overflow_handler(dom, MBM_OVERFLOW_INTERVAL);
2126 rdt_pseudo_lock_release();
2128 if (rdt_mon_capable)
2129 kernfs_remove(kn_mondata);
2131 if (rdt_mon_capable)
2132 kernfs_remove(kn_mongrp);
2134 kernfs_remove(kn_info);
2136 if (ctx->enable_mba_mbps)
2141 rdt_last_cmd_clear();
2142 mutex_unlock(&rdtgroup_mutex);
2154 static const struct fs_parameter_spec rdt_fs_parameters[] = {
2155 fsparam_flag("cdp", Opt_cdp),
2156 fsparam_flag("cdpl2", Opt_cdpl2),
2157 fsparam_flag("mba_MBps", Opt_mba_mbps),
2161 static int rdt_parse_param(struct fs_context *fc, struct fs_parameter *param)
2163 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2164 struct fs_parse_result result;
2167 opt = fs_parse(fc, rdt_fs_parameters, param, &result);
2173 ctx->enable_cdpl3 = true;
2176 ctx->enable_cdpl2 = true;
2179 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
2181 ctx->enable_mba_mbps = true;
2188 static void rdt_fs_context_free(struct fs_context *fc)
2190 struct rdt_fs_context *ctx = rdt_fc2context(fc);
2192 kernfs_free_fs_context(fc);
2196 static const struct fs_context_operations rdt_fs_context_ops = {
2197 .free = rdt_fs_context_free,
2198 .parse_param = rdt_parse_param,
2199 .get_tree = rdt_get_tree,
2202 static int rdt_init_fs_context(struct fs_context *fc)
2204 struct rdt_fs_context *ctx;
2206 ctx = kzalloc(sizeof(struct rdt_fs_context), GFP_KERNEL);
2210 ctx->kfc.root = rdt_root;
2211 ctx->kfc.magic = RDTGROUP_SUPER_MAGIC;
2212 fc->fs_private = &ctx->kfc;
2213 fc->ops = &rdt_fs_context_ops;
2214 put_user_ns(fc->user_ns);
2215 fc->user_ns = get_user_ns(&init_user_ns);
2220 static int reset_all_ctrls(struct rdt_resource *r)
2222 struct msr_param msr_param;
2223 cpumask_var_t cpu_mask;
2224 struct rdt_domain *d;
2227 if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
2232 msr_param.high = r->num_closid;
2235 * Disable resource control for this resource by setting all
2236 * CBMs in all domains to the maximum mask value. Pick one CPU
2237 * from each domain to update the MSRs below.
2239 list_for_each_entry(d, &r->domains, list) {
2240 cpumask_set_cpu(cpumask_any(&d->cpu_mask), cpu_mask);
2242 for (i = 0; i < r->num_closid; i++)
2243 d->ctrl_val[i] = r->default_ctrl;
2246 /* Update CBM on this cpu if it's in cpu_mask. */
2247 if (cpumask_test_cpu(cpu, cpu_mask))
2248 rdt_ctrl_update(&msr_param);
2249 /* Update CBM on all other cpus in cpu_mask. */
2250 smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
2253 free_cpumask_var(cpu_mask);
2259 * Move tasks from one to the other group. If @from is NULL, then all tasks
2260 * in the systems are moved unconditionally (used for teardown).
2262 * If @mask is not NULL the cpus on which moved tasks are running are set
2263 * in that mask so the update smp function call is restricted to affected
2266 static void rdt_move_group_tasks(struct rdtgroup *from, struct rdtgroup *to,
2267 struct cpumask *mask)
2269 struct task_struct *p, *t;
2271 read_lock(&tasklist_lock);
2272 for_each_process_thread(p, t) {
2273 if (!from || is_closid_match(t, from) ||
2274 is_rmid_match(t, from)) {
2275 t->closid = to->closid;
2276 t->rmid = to->mon.rmid;
2280 * This is safe on x86 w/o barriers as the ordering
2281 * of writing to task_cpu() and t->on_cpu is
2282 * reverse to the reading here. The detection is
2283 * inaccurate as tasks might move or schedule
2284 * before the smp function call takes place. In
2285 * such a case the function call is pointless, but
2286 * there is no other side effect.
2288 if (mask && t->on_cpu)
2289 cpumask_set_cpu(task_cpu(t), mask);
2293 read_unlock(&tasklist_lock);
2296 static void free_all_child_rdtgrp(struct rdtgroup *rdtgrp)
2298 struct rdtgroup *sentry, *stmp;
2299 struct list_head *head;
2301 head = &rdtgrp->mon.crdtgrp_list;
2302 list_for_each_entry_safe(sentry, stmp, head, mon.crdtgrp_list) {
2303 free_rmid(sentry->mon.rmid);
2304 list_del(&sentry->mon.crdtgrp_list);
2306 if (atomic_read(&sentry->waitcount) != 0)
2307 sentry->flags = RDT_DELETED;
2314 * Forcibly remove all of subdirectories under root.
2316 static void rmdir_all_sub(void)
2318 struct rdtgroup *rdtgrp, *tmp;
2320 /* Move all tasks to the default resource group */
2321 rdt_move_group_tasks(NULL, &rdtgroup_default, NULL);
2323 list_for_each_entry_safe(rdtgrp, tmp, &rdt_all_groups, rdtgroup_list) {
2324 /* Free any child rmids */
2325 free_all_child_rdtgrp(rdtgrp);
2327 /* Remove each rdtgroup other than root */
2328 if (rdtgrp == &rdtgroup_default)
2331 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2332 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)
2333 rdtgroup_pseudo_lock_remove(rdtgrp);
2336 * Give any CPUs back to the default group. We cannot copy
2337 * cpu_online_mask because a CPU might have executed the
2338 * offline callback already, but is still marked online.
2340 cpumask_or(&rdtgroup_default.cpu_mask,
2341 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
2343 free_rmid(rdtgrp->mon.rmid);
2345 kernfs_remove(rdtgrp->kn);
2346 list_del(&rdtgrp->rdtgroup_list);
2348 if (atomic_read(&rdtgrp->waitcount) != 0)
2349 rdtgrp->flags = RDT_DELETED;
2353 /* Notify online CPUs to update per cpu storage and PQR_ASSOC MSR */
2354 update_closid_rmid(cpu_online_mask, &rdtgroup_default);
2356 kernfs_remove(kn_info);
2357 kernfs_remove(kn_mongrp);
2358 kernfs_remove(kn_mondata);
2361 static void rdt_kill_sb(struct super_block *sb)
2363 struct rdt_resource *r;
2366 mutex_lock(&rdtgroup_mutex);
2370 /*Put everything back to default values. */
2371 for_each_alloc_enabled_rdt_resource(r)
2375 rdt_pseudo_lock_release();
2376 rdtgroup_default.mode = RDT_MODE_SHAREABLE;
2377 static_branch_disable_cpuslocked(&rdt_alloc_enable_key);
2378 static_branch_disable_cpuslocked(&rdt_mon_enable_key);
2379 static_branch_disable_cpuslocked(&rdt_enable_key);
2381 mutex_unlock(&rdtgroup_mutex);
2385 static struct file_system_type rdt_fs_type = {
2387 .init_fs_context = rdt_init_fs_context,
2388 .parameters = rdt_fs_parameters,
2389 .kill_sb = rdt_kill_sb,
2392 static int mon_addfile(struct kernfs_node *parent_kn, const char *name,
2395 struct kernfs_node *kn;
2398 kn = __kernfs_create_file(parent_kn, name, 0444,
2399 GLOBAL_ROOT_UID, GLOBAL_ROOT_GID, 0,
2400 &kf_mondata_ops, priv, NULL, NULL);
2404 ret = rdtgroup_kn_set_ugid(kn);
2414 * Remove all subdirectories of mon_data of ctrl_mon groups
2415 * and monitor groups with given domain id.
2417 void rmdir_mondata_subdir_allrdtgrp(struct rdt_resource *r, unsigned int dom_id)
2419 struct rdtgroup *prgrp, *crgrp;
2422 if (!r->mon_enabled)
2425 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2426 sprintf(name, "mon_%s_%02d", r->name, dom_id);
2427 kernfs_remove_by_name(prgrp->mon.mon_data_kn, name);
2429 list_for_each_entry(crgrp, &prgrp->mon.crdtgrp_list, mon.crdtgrp_list)
2430 kernfs_remove_by_name(crgrp->mon.mon_data_kn, name);
2434 static int mkdir_mondata_subdir(struct kernfs_node *parent_kn,
2435 struct rdt_domain *d,
2436 struct rdt_resource *r, struct rdtgroup *prgrp)
2438 union mon_data_bits priv;
2439 struct kernfs_node *kn;
2440 struct mon_evt *mevt;
2441 struct rmid_read rr;
2445 sprintf(name, "mon_%s_%02d", r->name, d->id);
2446 /* create the directory */
2447 kn = kernfs_create_dir(parent_kn, name, parent_kn->mode, prgrp);
2452 * This extra ref will be put in kernfs_remove() and guarantees
2453 * that kn is always accessible.
2456 ret = rdtgroup_kn_set_ugid(kn);
2460 if (WARN_ON(list_empty(&r->evt_list))) {
2465 priv.u.rid = r->rid;
2466 priv.u.domid = d->id;
2467 list_for_each_entry(mevt, &r->evt_list, list) {
2468 priv.u.evtid = mevt->evtid;
2469 ret = mon_addfile(kn, mevt->name, priv.priv);
2473 if (is_mbm_event(mevt->evtid))
2474 mon_event_read(&rr, r, d, prgrp, mevt->evtid, true);
2476 kernfs_activate(kn);
2485 * Add all subdirectories of mon_data for "ctrl_mon" groups
2486 * and "monitor" groups with given domain id.
2488 void mkdir_mondata_subdir_allrdtgrp(struct rdt_resource *r,
2489 struct rdt_domain *d)
2491 struct kernfs_node *parent_kn;
2492 struct rdtgroup *prgrp, *crgrp;
2493 struct list_head *head;
2495 if (!r->mon_enabled)
2498 list_for_each_entry(prgrp, &rdt_all_groups, rdtgroup_list) {
2499 parent_kn = prgrp->mon.mon_data_kn;
2500 mkdir_mondata_subdir(parent_kn, d, r, prgrp);
2502 head = &prgrp->mon.crdtgrp_list;
2503 list_for_each_entry(crgrp, head, mon.crdtgrp_list) {
2504 parent_kn = crgrp->mon.mon_data_kn;
2505 mkdir_mondata_subdir(parent_kn, d, r, crgrp);
2510 static int mkdir_mondata_subdir_alldom(struct kernfs_node *parent_kn,
2511 struct rdt_resource *r,
2512 struct rdtgroup *prgrp)
2514 struct rdt_domain *dom;
2517 list_for_each_entry(dom, &r->domains, list) {
2518 ret = mkdir_mondata_subdir(parent_kn, dom, r, prgrp);
2527 * This creates a directory mon_data which contains the monitored data.
2529 * mon_data has one directory for each domain whic are named
2530 * in the format mon_<domain_name>_<domain_id>. For ex: A mon_data
2531 * with L3 domain looks as below:
2538 * Each domain directory has one file per event:
2543 static int mkdir_mondata_all(struct kernfs_node *parent_kn,
2544 struct rdtgroup *prgrp,
2545 struct kernfs_node **dest_kn)
2547 struct rdt_resource *r;
2548 struct kernfs_node *kn;
2552 * Create the mon_data directory first.
2554 ret = mongroup_create_dir(parent_kn, prgrp, "mon_data", &kn);
2562 * Create the subdirectories for each domain. Note that all events
2563 * in a domain like L3 are grouped into a resource whose domain is L3
2565 for_each_mon_enabled_rdt_resource(r) {
2566 ret = mkdir_mondata_subdir_alldom(kn, r, prgrp);
2579 * cbm_ensure_valid - Enforce validity on provided CBM
2580 * @_val: Candidate CBM
2581 * @r: RDT resource to which the CBM belongs
2583 * The provided CBM represents all cache portions available for use. This
2584 * may be represented by a bitmap that does not consist of contiguous ones
2585 * and thus be an invalid CBM.
2586 * Here the provided CBM is forced to be a valid CBM by only considering
2587 * the first set of contiguous bits as valid and clearing all bits.
2588 * The intention here is to provide a valid default CBM with which a new
2589 * resource group is initialized. The user can follow this with a
2590 * modification to the CBM if the default does not satisfy the
2593 static u32 cbm_ensure_valid(u32 _val, struct rdt_resource *r)
2595 unsigned int cbm_len = r->cache.cbm_len;
2596 unsigned long first_bit, zero_bit;
2597 unsigned long val = _val;
2602 first_bit = find_first_bit(&val, cbm_len);
2603 zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
2605 /* Clear any remaining bits to ensure contiguous region */
2606 bitmap_clear(&val, zero_bit, cbm_len - zero_bit);
2611 * Initialize cache resources per RDT domain
2613 * Set the RDT domain up to start off with all usable allocations. That is,
2614 * all shareable and unused bits. All-zero CBM is invalid.
2616 static int __init_one_rdt_domain(struct rdt_domain *d, struct rdt_resource *r,
2619 struct rdt_resource *r_cdp = NULL;
2620 struct rdt_domain *d_cdp = NULL;
2621 u32 used_b = 0, unused_b = 0;
2622 unsigned long tmp_cbm;
2623 enum rdtgrp_mode mode;
2624 u32 peer_ctl, *ctrl;
2627 rdt_cdp_peer_get(r, d, &r_cdp, &d_cdp);
2628 d->have_new_ctrl = false;
2629 d->new_ctrl = r->cache.shareable_bits;
2630 used_b = r->cache.shareable_bits;
2632 for (i = 0; i < closids_supported(); i++, ctrl++) {
2633 if (closid_allocated(i) && i != closid) {
2634 mode = rdtgroup_mode_by_closid(i);
2635 if (mode == RDT_MODE_PSEUDO_LOCKSETUP)
2637 * ctrl values for locksetup aren't relevant
2638 * until the schemata is written, and the mode
2639 * becomes RDT_MODE_PSEUDO_LOCKED.
2643 * If CDP is active include peer domain's
2644 * usage to ensure there is no overlap
2645 * with an exclusive group.
2648 peer_ctl = d_cdp->ctrl_val[i];
2651 used_b |= *ctrl | peer_ctl;
2652 if (mode == RDT_MODE_SHAREABLE)
2653 d->new_ctrl |= *ctrl | peer_ctl;
2656 if (d->plr && d->plr->cbm > 0)
2657 used_b |= d->plr->cbm;
2658 unused_b = used_b ^ (BIT_MASK(r->cache.cbm_len) - 1);
2659 unused_b &= BIT_MASK(r->cache.cbm_len) - 1;
2660 d->new_ctrl |= unused_b;
2662 * Force the initial CBM to be valid, user can
2663 * modify the CBM based on system availability.
2665 d->new_ctrl = cbm_ensure_valid(d->new_ctrl, r);
2667 * Assign the u32 CBM to an unsigned long to ensure that
2668 * bitmap_weight() does not access out-of-bound memory.
2670 tmp_cbm = d->new_ctrl;
2671 if (bitmap_weight(&tmp_cbm, r->cache.cbm_len) < r->cache.min_cbm_bits) {
2672 rdt_last_cmd_printf("No space on %s:%d\n", r->name, d->id);
2675 d->have_new_ctrl = true;
2681 * Initialize cache resources with default values.
2683 * A new RDT group is being created on an allocation capable (CAT)
2684 * supporting system. Set this group up to start off with all usable
2687 * If there are no more shareable bits available on any domain then
2688 * the entire allocation will fail.
2690 static int rdtgroup_init_cat(struct rdt_resource *r, u32 closid)
2692 struct rdt_domain *d;
2695 list_for_each_entry(d, &r->domains, list) {
2696 ret = __init_one_rdt_domain(d, r, closid);
2704 /* Initialize MBA resource with default values. */
2705 static void rdtgroup_init_mba(struct rdt_resource *r)
2707 struct rdt_domain *d;
2709 list_for_each_entry(d, &r->domains, list) {
2710 d->new_ctrl = is_mba_sc(r) ? MBA_MAX_MBPS : r->default_ctrl;
2711 d->have_new_ctrl = true;
2715 /* Initialize the RDT group's allocations. */
2716 static int rdtgroup_init_alloc(struct rdtgroup *rdtgrp)
2718 struct rdt_resource *r;
2721 for_each_alloc_enabled_rdt_resource(r) {
2722 if (r->rid == RDT_RESOURCE_MBA) {
2723 rdtgroup_init_mba(r);
2725 ret = rdtgroup_init_cat(r, rdtgrp->closid);
2730 ret = update_domains(r, rdtgrp->closid);
2732 rdt_last_cmd_puts("Failed to initialize allocations\n");
2738 rdtgrp->mode = RDT_MODE_SHAREABLE;
2743 static int mkdir_rdt_prepare(struct kernfs_node *parent_kn,
2744 const char *name, umode_t mode,
2745 enum rdt_group_type rtype, struct rdtgroup **r)
2747 struct rdtgroup *prdtgrp, *rdtgrp;
2748 struct kernfs_node *kn;
2752 prdtgrp = rdtgroup_kn_lock_live(parent_kn);
2758 if (rtype == RDTMON_GROUP &&
2759 (prdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
2760 prdtgrp->mode == RDT_MODE_PSEUDO_LOCKED)) {
2762 rdt_last_cmd_puts("Pseudo-locking in progress\n");
2766 /* allocate the rdtgroup. */
2767 rdtgrp = kzalloc(sizeof(*rdtgrp), GFP_KERNEL);
2770 rdt_last_cmd_puts("Kernel out of memory\n");
2774 rdtgrp->mon.parent = prdtgrp;
2775 rdtgrp->type = rtype;
2776 INIT_LIST_HEAD(&rdtgrp->mon.crdtgrp_list);
2778 /* kernfs creates the directory for rdtgrp */
2779 kn = kernfs_create_dir(parent_kn, name, mode, rdtgrp);
2782 rdt_last_cmd_puts("kernfs create error\n");
2788 * kernfs_remove() will drop the reference count on "kn" which
2789 * will free it. But we still need it to stick around for the
2790 * rdtgroup_kn_unlock(kn} call below. Take one extra reference
2791 * here, which will be dropped inside rdtgroup_kn_unlock().
2795 ret = rdtgroup_kn_set_ugid(kn);
2797 rdt_last_cmd_puts("kernfs perm error\n");
2801 files = RFTYPE_BASE | BIT(RF_CTRLSHIFT + rtype);
2802 ret = rdtgroup_add_files(kn, files);
2804 rdt_last_cmd_puts("kernfs fill error\n");
2808 if (rdt_mon_capable) {
2811 rdt_last_cmd_puts("Out of RMIDs\n");
2814 rdtgrp->mon.rmid = ret;
2816 ret = mkdir_mondata_all(kn, rdtgrp, &rdtgrp->mon.mon_data_kn);
2818 rdt_last_cmd_puts("kernfs subdir error\n");
2822 kernfs_activate(kn);
2825 * The caller unlocks the parent_kn upon success.
2830 free_rmid(rdtgrp->mon.rmid);
2832 kernfs_remove(rdtgrp->kn);
2836 rdtgroup_kn_unlock(parent_kn);
2840 static void mkdir_rdt_prepare_clean(struct rdtgroup *rgrp)
2842 kernfs_remove(rgrp->kn);
2843 free_rmid(rgrp->mon.rmid);
2848 * Create a monitor group under "mon_groups" directory of a control
2849 * and monitor group(ctrl_mon). This is a resource group
2850 * to monitor a subset of tasks and cpus in its parent ctrl_mon group.
2852 static int rdtgroup_mkdir_mon(struct kernfs_node *parent_kn,
2853 const char *name, umode_t mode)
2855 struct rdtgroup *rdtgrp, *prgrp;
2858 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTMON_GROUP, &rdtgrp);
2862 prgrp = rdtgrp->mon.parent;
2863 rdtgrp->closid = prgrp->closid;
2866 * Add the rdtgrp to the list of rdtgrps the parent
2867 * ctrl_mon group has to track.
2869 list_add_tail(&rdtgrp->mon.crdtgrp_list, &prgrp->mon.crdtgrp_list);
2871 rdtgroup_kn_unlock(parent_kn);
2876 * These are rdtgroups created under the root directory. Can be used
2877 * to allocate and monitor resources.
2879 static int rdtgroup_mkdir_ctrl_mon(struct kernfs_node *parent_kn,
2880 const char *name, umode_t mode)
2882 struct rdtgroup *rdtgrp;
2883 struct kernfs_node *kn;
2887 ret = mkdir_rdt_prepare(parent_kn, name, mode, RDTCTRL_GROUP, &rdtgrp);
2892 ret = closid_alloc();
2894 rdt_last_cmd_puts("Out of CLOSIDs\n");
2895 goto out_common_fail;
2900 rdtgrp->closid = closid;
2901 ret = rdtgroup_init_alloc(rdtgrp);
2905 list_add(&rdtgrp->rdtgroup_list, &rdt_all_groups);
2907 if (rdt_mon_capable) {
2909 * Create an empty mon_groups directory to hold the subset
2910 * of tasks and cpus to monitor.
2912 ret = mongroup_create_dir(kn, rdtgrp, "mon_groups", NULL);
2914 rdt_last_cmd_puts("kernfs subdir error\n");
2922 list_del(&rdtgrp->rdtgroup_list);
2924 closid_free(closid);
2926 mkdir_rdt_prepare_clean(rdtgrp);
2928 rdtgroup_kn_unlock(parent_kn);
2933 * We allow creating mon groups only with in a directory called "mon_groups"
2934 * which is present in every ctrl_mon group. Check if this is a valid
2935 * "mon_groups" directory.
2937 * 1. The directory should be named "mon_groups".
2938 * 2. The mon group itself should "not" be named "mon_groups".
2939 * This makes sure "mon_groups" directory always has a ctrl_mon group
2942 static bool is_mon_groups(struct kernfs_node *kn, const char *name)
2944 return (!strcmp(kn->name, "mon_groups") &&
2945 strcmp(name, "mon_groups"));
2948 static int rdtgroup_mkdir(struct kernfs_node *parent_kn, const char *name,
2951 /* Do not accept '\n' to avoid unparsable situation. */
2952 if (strchr(name, '\n'))
2956 * If the parent directory is the root directory and RDT
2957 * allocation is supported, add a control and monitoring
2960 if (rdt_alloc_capable && parent_kn == rdtgroup_default.kn)
2961 return rdtgroup_mkdir_ctrl_mon(parent_kn, name, mode);
2964 * If RDT monitoring is supported and the parent directory is a valid
2965 * "mon_groups" directory, add a monitoring subdirectory.
2967 if (rdt_mon_capable && is_mon_groups(parent_kn, name))
2968 return rdtgroup_mkdir_mon(parent_kn, name, mode);
2973 static int rdtgroup_rmdir_mon(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
2974 cpumask_var_t tmpmask)
2976 struct rdtgroup *prdtgrp = rdtgrp->mon.parent;
2979 /* Give any tasks back to the parent group */
2980 rdt_move_group_tasks(rdtgrp, prdtgrp, tmpmask);
2982 /* Update per cpu rmid of the moved CPUs first */
2983 for_each_cpu(cpu, &rdtgrp->cpu_mask)
2984 per_cpu(pqr_state.default_rmid, cpu) = prdtgrp->mon.rmid;
2986 * Update the MSR on moved CPUs and CPUs which have moved
2987 * task running on them.
2989 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
2990 update_closid_rmid(tmpmask, NULL);
2992 rdtgrp->flags = RDT_DELETED;
2993 free_rmid(rdtgrp->mon.rmid);
2996 * Remove the rdtgrp from the parent ctrl_mon group's list
2998 WARN_ON(list_empty(&prdtgrp->mon.crdtgrp_list));
2999 list_del(&rdtgrp->mon.crdtgrp_list);
3002 * one extra hold on this, will drop when we kfree(rdtgrp)
3003 * in rdtgroup_kn_unlock()
3006 kernfs_remove(rdtgrp->kn);
3011 static int rdtgroup_ctrl_remove(struct kernfs_node *kn,
3012 struct rdtgroup *rdtgrp)
3014 rdtgrp->flags = RDT_DELETED;
3015 list_del(&rdtgrp->rdtgroup_list);
3018 * one extra hold on this, will drop when we kfree(rdtgrp)
3019 * in rdtgroup_kn_unlock()
3022 kernfs_remove(rdtgrp->kn);
3026 static int rdtgroup_rmdir_ctrl(struct kernfs_node *kn, struct rdtgroup *rdtgrp,
3027 cpumask_var_t tmpmask)
3031 /* Give any tasks back to the default group */
3032 rdt_move_group_tasks(rdtgrp, &rdtgroup_default, tmpmask);
3034 /* Give any CPUs back to the default group */
3035 cpumask_or(&rdtgroup_default.cpu_mask,
3036 &rdtgroup_default.cpu_mask, &rdtgrp->cpu_mask);
3038 /* Update per cpu closid and rmid of the moved CPUs first */
3039 for_each_cpu(cpu, &rdtgrp->cpu_mask) {
3040 per_cpu(pqr_state.default_closid, cpu) = rdtgroup_default.closid;
3041 per_cpu(pqr_state.default_rmid, cpu) = rdtgroup_default.mon.rmid;
3045 * Update the MSR on moved CPUs and CPUs which have moved
3046 * task running on them.
3048 cpumask_or(tmpmask, tmpmask, &rdtgrp->cpu_mask);
3049 update_closid_rmid(tmpmask, NULL);
3051 closid_free(rdtgrp->closid);
3052 free_rmid(rdtgrp->mon.rmid);
3054 rdtgroup_ctrl_remove(kn, rdtgrp);
3057 * Free all the child monitor group rmids.
3059 free_all_child_rdtgrp(rdtgrp);
3064 static int rdtgroup_rmdir(struct kernfs_node *kn)
3066 struct kernfs_node *parent_kn = kn->parent;
3067 struct rdtgroup *rdtgrp;
3068 cpumask_var_t tmpmask;
3071 if (!zalloc_cpumask_var(&tmpmask, GFP_KERNEL))
3074 rdtgrp = rdtgroup_kn_lock_live(kn);
3081 * If the rdtgroup is a ctrl_mon group and parent directory
3082 * is the root directory, remove the ctrl_mon group.
3084 * If the rdtgroup is a mon group and parent directory
3085 * is a valid "mon_groups" directory, remove the mon group.
3087 if (rdtgrp->type == RDTCTRL_GROUP && parent_kn == rdtgroup_default.kn &&
3088 rdtgrp != &rdtgroup_default) {
3089 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP ||
3090 rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
3091 ret = rdtgroup_ctrl_remove(kn, rdtgrp);
3093 ret = rdtgroup_rmdir_ctrl(kn, rdtgrp, tmpmask);
3095 } else if (rdtgrp->type == RDTMON_GROUP &&
3096 is_mon_groups(parent_kn, kn->name)) {
3097 ret = rdtgroup_rmdir_mon(kn, rdtgrp, tmpmask);
3103 rdtgroup_kn_unlock(kn);
3104 free_cpumask_var(tmpmask);
3108 static int rdtgroup_show_options(struct seq_file *seq, struct kernfs_root *kf)
3110 if (rdt_resources_all[RDT_RESOURCE_L3DATA].alloc_enabled)
3111 seq_puts(seq, ",cdp");
3113 if (rdt_resources_all[RDT_RESOURCE_L2DATA].alloc_enabled)
3114 seq_puts(seq, ",cdpl2");
3116 if (is_mba_sc(&rdt_resources_all[RDT_RESOURCE_MBA]))
3117 seq_puts(seq, ",mba_MBps");
3122 static struct kernfs_syscall_ops rdtgroup_kf_syscall_ops = {
3123 .mkdir = rdtgroup_mkdir,
3124 .rmdir = rdtgroup_rmdir,
3125 .show_options = rdtgroup_show_options,
3128 static int __init rdtgroup_setup_root(void)
3132 rdt_root = kernfs_create_root(&rdtgroup_kf_syscall_ops,
3133 KERNFS_ROOT_CREATE_DEACTIVATED |
3134 KERNFS_ROOT_EXTRA_OPEN_PERM_CHECK,
3136 if (IS_ERR(rdt_root))
3137 return PTR_ERR(rdt_root);
3139 mutex_lock(&rdtgroup_mutex);
3141 rdtgroup_default.closid = 0;
3142 rdtgroup_default.mon.rmid = 0;
3143 rdtgroup_default.type = RDTCTRL_GROUP;
3144 INIT_LIST_HEAD(&rdtgroup_default.mon.crdtgrp_list);
3146 list_add(&rdtgroup_default.rdtgroup_list, &rdt_all_groups);
3148 ret = rdtgroup_add_files(rdt_root->kn, RF_CTRL_BASE);
3150 kernfs_destroy_root(rdt_root);
3154 rdtgroup_default.kn = rdt_root->kn;
3155 kernfs_activate(rdtgroup_default.kn);
3158 mutex_unlock(&rdtgroup_mutex);
3164 * rdtgroup_init - rdtgroup initialization
3166 * Setup resctrl file system including set up root, create mount point,
3167 * register rdtgroup filesystem, and initialize files under root directory.
3169 * Return: 0 on success or -errno
3171 int __init rdtgroup_init(void)
3175 seq_buf_init(&last_cmd_status, last_cmd_status_buf,
3176 sizeof(last_cmd_status_buf));
3178 ret = rdtgroup_setup_root();
3182 ret = sysfs_create_mount_point(fs_kobj, "resctrl");
3186 ret = register_filesystem(&rdt_fs_type);
3188 goto cleanup_mountpoint;
3191 * Adding the resctrl debugfs directory here may not be ideal since
3192 * it would let the resctrl debugfs directory appear on the debugfs
3193 * filesystem before the resctrl filesystem is mounted.
3194 * It may also be ok since that would enable debugging of RDT before
3195 * resctrl is mounted.
3196 * The reason why the debugfs directory is created here and not in
3197 * rdt_get_tree() is because rdt_get_tree() takes rdtgroup_mutex and
3198 * during the debugfs directory creation also &sb->s_type->i_mutex_key
3199 * (the lockdep class of inode->i_rwsem). Other filesystem
3200 * interactions (eg. SyS_getdents) have the lock ordering:
3201 * &sb->s_type->i_mutex_key --> &mm->mmap_lock
3202 * During mmap(), called with &mm->mmap_lock, the rdtgroup_mutex
3203 * is taken, thus creating dependency:
3204 * &mm->mmap_lock --> rdtgroup_mutex for the latter that can cause
3205 * issues considering the other two lock dependencies.
3206 * By creating the debugfs directory here we avoid a dependency
3207 * that may cause deadlock (even though file operations cannot
3208 * occur until the filesystem is mounted, but I do not know how to
3209 * tell lockdep that).
3211 debugfs_resctrl = debugfs_create_dir("resctrl", NULL);
3216 sysfs_remove_mount_point(fs_kobj, "resctrl");
3218 kernfs_destroy_root(rdt_root);
3223 void __exit rdtgroup_exit(void)
3225 debugfs_remove_recursive(debugfs_resctrl);
3226 unregister_filesystem(&rdt_fs_type);
3227 sysfs_remove_mount_point(fs_kobj, "resctrl");
3228 kernfs_destroy_root(rdt_root);