Merge tag 'dt-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / arch / x86 / kernel / cpu / resctrl / ctrlmondata.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Resource Director Technology(RDT)
4  * - Cache Allocation code.
5  *
6  * Copyright (C) 2016 Intel Corporation
7  *
8  * Authors:
9  *    Fenghua Yu <fenghua.yu@intel.com>
10  *    Tony Luck <tony.luck@intel.com>
11  *
12  * More information about RDT be found in the Intel (R) x86 Architecture
13  * Software Developer Manual June 2016, volume 3, section 17.17.
14  */
15
16 #define pr_fmt(fmt)     KBUILD_MODNAME ": " fmt
17
18 #include <linux/cpu.h>
19 #include <linux/kernfs.h>
20 #include <linux/seq_file.h>
21 #include <linux/slab.h>
22 #include "internal.h"
23
24 /*
25  * Check whether MBA bandwidth percentage value is correct. The value is
26  * checked against the minimum and max bandwidth values specified by the
27  * hardware. The allocated bandwidth percentage is rounded to the next
28  * control step available on the hardware.
29  */
30 static bool bw_validate(char *buf, unsigned long *data, struct rdt_resource *r)
31 {
32         unsigned long bw;
33         int ret;
34
35         /*
36          * Only linear delay values is supported for current Intel SKUs.
37          */
38         if (!r->membw.delay_linear && r->membw.arch_needs_linear) {
39                 rdt_last_cmd_puts("No support for non-linear MB domains\n");
40                 return false;
41         }
42
43         ret = kstrtoul(buf, 10, &bw);
44         if (ret) {
45                 rdt_last_cmd_printf("Non-decimal digit in MB value %s\n", buf);
46                 return false;
47         }
48
49         if ((bw < r->membw.min_bw || bw > r->default_ctrl) &&
50             !is_mba_sc(r)) {
51                 rdt_last_cmd_printf("MB value %ld out of range [%d,%d]\n", bw,
52                                     r->membw.min_bw, r->default_ctrl);
53                 return false;
54         }
55
56         *data = roundup(bw, (unsigned long)r->membw.bw_gran);
57         return true;
58 }
59
60 int parse_bw(struct rdt_parse_data *data, struct resctrl_schema *s,
61              struct rdt_domain *d)
62 {
63         struct resctrl_staged_config *cfg;
64         struct rdt_resource *r = s->res;
65         unsigned long bw_val;
66
67         cfg = &d->staged_config[s->conf_type];
68         if (cfg->have_new_ctrl) {
69                 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
70                 return -EINVAL;
71         }
72
73         if (!bw_validate(data->buf, &bw_val, r))
74                 return -EINVAL;
75         cfg->new_ctrl = bw_val;
76         cfg->have_new_ctrl = true;
77
78         return 0;
79 }
80
81 /*
82  * Check whether a cache bit mask is valid.
83  * For Intel the SDM says:
84  *      Please note that all (and only) contiguous '1' combinations
85  *      are allowed (e.g. FFFFH, 0FF0H, 003CH, etc.).
86  * Additionally Haswell requires at least two bits set.
87  * AMD allows non-contiguous bitmasks.
88  */
89 static bool cbm_validate(char *buf, u32 *data, struct rdt_resource *r)
90 {
91         unsigned long first_bit, zero_bit, val;
92         unsigned int cbm_len = r->cache.cbm_len;
93         int ret;
94
95         ret = kstrtoul(buf, 16, &val);
96         if (ret) {
97                 rdt_last_cmd_printf("Non-hex character in the mask %s\n", buf);
98                 return false;
99         }
100
101         if ((!r->cache.arch_has_empty_bitmaps && val == 0) ||
102             val > r->default_ctrl) {
103                 rdt_last_cmd_puts("Mask out of range\n");
104                 return false;
105         }
106
107         first_bit = find_first_bit(&val, cbm_len);
108         zero_bit = find_next_zero_bit(&val, cbm_len, first_bit);
109
110         /* Are non-contiguous bitmaps allowed? */
111         if (!r->cache.arch_has_sparse_bitmaps &&
112             (find_next_bit(&val, cbm_len, zero_bit) < cbm_len)) {
113                 rdt_last_cmd_printf("The mask %lx has non-consecutive 1-bits\n", val);
114                 return false;
115         }
116
117         if ((zero_bit - first_bit) < r->cache.min_cbm_bits) {
118                 rdt_last_cmd_printf("Need at least %d bits in the mask\n",
119                                     r->cache.min_cbm_bits);
120                 return false;
121         }
122
123         *data = val;
124         return true;
125 }
126
127 /*
128  * Read one cache bit mask (hex). Check that it is valid for the current
129  * resource type.
130  */
131 int parse_cbm(struct rdt_parse_data *data, struct resctrl_schema *s,
132               struct rdt_domain *d)
133 {
134         struct rdtgroup *rdtgrp = data->rdtgrp;
135         struct resctrl_staged_config *cfg;
136         struct rdt_resource *r = s->res;
137         u32 cbm_val;
138
139         cfg = &d->staged_config[s->conf_type];
140         if (cfg->have_new_ctrl) {
141                 rdt_last_cmd_printf("Duplicate domain %d\n", d->id);
142                 return -EINVAL;
143         }
144
145         /*
146          * Cannot set up more than one pseudo-locked region in a cache
147          * hierarchy.
148          */
149         if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
150             rdtgroup_pseudo_locked_in_hierarchy(d)) {
151                 rdt_last_cmd_puts("Pseudo-locked region in hierarchy\n");
152                 return -EINVAL;
153         }
154
155         if (!cbm_validate(data->buf, &cbm_val, r))
156                 return -EINVAL;
157
158         if ((rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
159              rdtgrp->mode == RDT_MODE_SHAREABLE) &&
160             rdtgroup_cbm_overlaps_pseudo_locked(d, cbm_val)) {
161                 rdt_last_cmd_puts("CBM overlaps with pseudo-locked region\n");
162                 return -EINVAL;
163         }
164
165         /*
166          * The CBM may not overlap with the CBM of another closid if
167          * either is exclusive.
168          */
169         if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, true)) {
170                 rdt_last_cmd_puts("Overlaps with exclusive group\n");
171                 return -EINVAL;
172         }
173
174         if (rdtgroup_cbm_overlaps(s, d, cbm_val, rdtgrp->closid, false)) {
175                 if (rdtgrp->mode == RDT_MODE_EXCLUSIVE ||
176                     rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
177                         rdt_last_cmd_puts("Overlaps with other group\n");
178                         return -EINVAL;
179                 }
180         }
181
182         cfg->new_ctrl = cbm_val;
183         cfg->have_new_ctrl = true;
184
185         return 0;
186 }
187
188 /*
189  * For each domain in this resource we expect to find a series of:
190  *      id=mask
191  * separated by ";". The "id" is in decimal, and must match one of
192  * the "id"s for this resource.
193  */
194 static int parse_line(char *line, struct resctrl_schema *s,
195                       struct rdtgroup *rdtgrp)
196 {
197         enum resctrl_conf_type t = s->conf_type;
198         struct resctrl_staged_config *cfg;
199         struct rdt_resource *r = s->res;
200         struct rdt_parse_data data;
201         char *dom = NULL, *id;
202         struct rdt_domain *d;
203         unsigned long dom_id;
204
205         if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP &&
206             r->rid == RDT_RESOURCE_MBA) {
207                 rdt_last_cmd_puts("Cannot pseudo-lock MBA resource\n");
208                 return -EINVAL;
209         }
210
211 next:
212         if (!line || line[0] == '\0')
213                 return 0;
214         dom = strsep(&line, ";");
215         id = strsep(&dom, "=");
216         if (!dom || kstrtoul(id, 10, &dom_id)) {
217                 rdt_last_cmd_puts("Missing '=' or non-numeric domain\n");
218                 return -EINVAL;
219         }
220         dom = strim(dom);
221         list_for_each_entry(d, &r->domains, list) {
222                 if (d->id == dom_id) {
223                         data.buf = dom;
224                         data.rdtgrp = rdtgrp;
225                         if (r->parse_ctrlval(&data, s, d))
226                                 return -EINVAL;
227                         if (rdtgrp->mode ==  RDT_MODE_PSEUDO_LOCKSETUP) {
228                                 cfg = &d->staged_config[t];
229                                 /*
230                                  * In pseudo-locking setup mode and just
231                                  * parsed a valid CBM that should be
232                                  * pseudo-locked. Only one locked region per
233                                  * resource group and domain so just do
234                                  * the required initialization for single
235                                  * region and return.
236                                  */
237                                 rdtgrp->plr->s = s;
238                                 rdtgrp->plr->d = d;
239                                 rdtgrp->plr->cbm = cfg->new_ctrl;
240                                 d->plr = rdtgrp->plr;
241                                 return 0;
242                         }
243                         goto next;
244                 }
245         }
246         return -EINVAL;
247 }
248
249 static u32 get_config_index(u32 closid, enum resctrl_conf_type type)
250 {
251         switch (type) {
252         default:
253         case CDP_NONE:
254                 return closid;
255         case CDP_CODE:
256                 return closid * 2 + 1;
257         case CDP_DATA:
258                 return closid * 2;
259         }
260 }
261
262 static bool apply_config(struct rdt_hw_domain *hw_dom,
263                          struct resctrl_staged_config *cfg, u32 idx,
264                          cpumask_var_t cpu_mask, bool mba_sc)
265 {
266         struct rdt_domain *dom = &hw_dom->d_resctrl;
267         u32 *dc = !mba_sc ? hw_dom->ctrl_val : hw_dom->mbps_val;
268
269         if (cfg->new_ctrl != dc[idx]) {
270                 cpumask_set_cpu(cpumask_any(&dom->cpu_mask), cpu_mask);
271                 dc[idx] = cfg->new_ctrl;
272
273                 return true;
274         }
275
276         return false;
277 }
278
279 int resctrl_arch_update_domains(struct rdt_resource *r, u32 closid)
280 {
281         struct resctrl_staged_config *cfg;
282         struct rdt_hw_domain *hw_dom;
283         struct msr_param msr_param;
284         enum resctrl_conf_type t;
285         cpumask_var_t cpu_mask;
286         struct rdt_domain *d;
287         bool mba_sc;
288         int cpu;
289         u32 idx;
290
291         if (!zalloc_cpumask_var(&cpu_mask, GFP_KERNEL))
292                 return -ENOMEM;
293
294         mba_sc = is_mba_sc(r);
295         msr_param.res = NULL;
296         list_for_each_entry(d, &r->domains, list) {
297                 hw_dom = resctrl_to_arch_dom(d);
298                 for (t = 0; t < CDP_NUM_TYPES; t++) {
299                         cfg = &hw_dom->d_resctrl.staged_config[t];
300                         if (!cfg->have_new_ctrl)
301                                 continue;
302
303                         idx = get_config_index(closid, t);
304                         if (!apply_config(hw_dom, cfg, idx, cpu_mask, mba_sc))
305                                 continue;
306
307                         if (!msr_param.res) {
308                                 msr_param.low = idx;
309                                 msr_param.high = msr_param.low + 1;
310                                 msr_param.res = r;
311                         } else {
312                                 msr_param.low = min(msr_param.low, idx);
313                                 msr_param.high = max(msr_param.high, idx + 1);
314                         }
315                 }
316         }
317
318         /*
319          * Avoid writing the control msr with control values when
320          * MBA software controller is enabled
321          */
322         if (cpumask_empty(cpu_mask) || mba_sc)
323                 goto done;
324         cpu = get_cpu();
325         /* Update resource control msr on this CPU if it's in cpu_mask. */
326         if (cpumask_test_cpu(cpu, cpu_mask))
327                 rdt_ctrl_update(&msr_param);
328         /* Update resource control msr on other CPUs. */
329         smp_call_function_many(cpu_mask, rdt_ctrl_update, &msr_param, 1);
330         put_cpu();
331
332 done:
333         free_cpumask_var(cpu_mask);
334
335         return 0;
336 }
337
338 static int rdtgroup_parse_resource(char *resname, char *tok,
339                                    struct rdtgroup *rdtgrp)
340 {
341         struct resctrl_schema *s;
342
343         list_for_each_entry(s, &resctrl_schema_all, list) {
344                 if (!strcmp(resname, s->name) && rdtgrp->closid < s->num_closid)
345                         return parse_line(tok, s, rdtgrp);
346         }
347         rdt_last_cmd_printf("Unknown or unsupported resource name '%s'\n", resname);
348         return -EINVAL;
349 }
350
351 ssize_t rdtgroup_schemata_write(struct kernfs_open_file *of,
352                                 char *buf, size_t nbytes, loff_t off)
353 {
354         struct resctrl_schema *s;
355         struct rdtgroup *rdtgrp;
356         struct rdt_domain *dom;
357         struct rdt_resource *r;
358         char *tok, *resname;
359         int ret = 0;
360
361         /* Valid input requires a trailing newline */
362         if (nbytes == 0 || buf[nbytes - 1] != '\n')
363                 return -EINVAL;
364         buf[nbytes - 1] = '\0';
365
366         cpus_read_lock();
367         rdtgrp = rdtgroup_kn_lock_live(of->kn);
368         if (!rdtgrp) {
369                 rdtgroup_kn_unlock(of->kn);
370                 cpus_read_unlock();
371                 return -ENOENT;
372         }
373         rdt_last_cmd_clear();
374
375         /*
376          * No changes to pseudo-locked region allowed. It has to be removed
377          * and re-created instead.
378          */
379         if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
380                 ret = -EINVAL;
381                 rdt_last_cmd_puts("Resource group is pseudo-locked\n");
382                 goto out;
383         }
384
385         list_for_each_entry(s, &resctrl_schema_all, list) {
386                 list_for_each_entry(dom, &s->res->domains, list)
387                         memset(dom->staged_config, 0, sizeof(dom->staged_config));
388         }
389
390         while ((tok = strsep(&buf, "\n")) != NULL) {
391                 resname = strim(strsep(&tok, ":"));
392                 if (!tok) {
393                         rdt_last_cmd_puts("Missing ':'\n");
394                         ret = -EINVAL;
395                         goto out;
396                 }
397                 if (tok[0] == '\0') {
398                         rdt_last_cmd_printf("Missing '%s' value\n", resname);
399                         ret = -EINVAL;
400                         goto out;
401                 }
402                 ret = rdtgroup_parse_resource(resname, tok, rdtgrp);
403                 if (ret)
404                         goto out;
405         }
406
407         list_for_each_entry(s, &resctrl_schema_all, list) {
408                 r = s->res;
409                 ret = resctrl_arch_update_domains(r, rdtgrp->closid);
410                 if (ret)
411                         goto out;
412         }
413
414         if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
415                 /*
416                  * If pseudo-locking fails we keep the resource group in
417                  * mode RDT_MODE_PSEUDO_LOCKSETUP with its class of service
418                  * active and updated for just the domain the pseudo-locked
419                  * region was requested for.
420                  */
421                 ret = rdtgroup_pseudo_lock_create(rdtgrp);
422         }
423
424 out:
425         rdtgroup_kn_unlock(of->kn);
426         cpus_read_unlock();
427         return ret ?: nbytes;
428 }
429
430 u32 resctrl_arch_get_config(struct rdt_resource *r, struct rdt_domain *d,
431                             u32 closid, enum resctrl_conf_type type)
432 {
433         struct rdt_hw_domain *hw_dom = resctrl_to_arch_dom(d);
434         u32 idx = get_config_index(closid, type);
435
436         if (!is_mba_sc(r))
437                 return hw_dom->ctrl_val[idx];
438         return hw_dom->mbps_val[idx];
439 }
440
441 static void show_doms(struct seq_file *s, struct resctrl_schema *schema, int closid)
442 {
443         struct rdt_resource *r = schema->res;
444         struct rdt_domain *dom;
445         bool sep = false;
446         u32 ctrl_val;
447
448         seq_printf(s, "%*s:", max_name_width, schema->name);
449         list_for_each_entry(dom, &r->domains, list) {
450                 if (sep)
451                         seq_puts(s, ";");
452
453                 ctrl_val = resctrl_arch_get_config(r, dom, closid,
454                                                    schema->conf_type);
455                 seq_printf(s, r->format_str, dom->id, max_data_width,
456                            ctrl_val);
457                 sep = true;
458         }
459         seq_puts(s, "\n");
460 }
461
462 int rdtgroup_schemata_show(struct kernfs_open_file *of,
463                            struct seq_file *s, void *v)
464 {
465         struct resctrl_schema *schema;
466         struct rdtgroup *rdtgrp;
467         int ret = 0;
468         u32 closid;
469
470         rdtgrp = rdtgroup_kn_lock_live(of->kn);
471         if (rdtgrp) {
472                 if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKSETUP) {
473                         list_for_each_entry(schema, &resctrl_schema_all, list) {
474                                 seq_printf(s, "%s:uninitialized\n", schema->name);
475                         }
476                 } else if (rdtgrp->mode == RDT_MODE_PSEUDO_LOCKED) {
477                         if (!rdtgrp->plr->d) {
478                                 rdt_last_cmd_clear();
479                                 rdt_last_cmd_puts("Cache domain offline\n");
480                                 ret = -ENODEV;
481                         } else {
482                                 seq_printf(s, "%s:%d=%x\n",
483                                            rdtgrp->plr->s->res->name,
484                                            rdtgrp->plr->d->id,
485                                            rdtgrp->plr->cbm);
486                         }
487                 } else {
488                         closid = rdtgrp->closid;
489                         list_for_each_entry(schema, &resctrl_schema_all, list) {
490                                 if (closid < schema->num_closid)
491                                         show_doms(s, schema, closid);
492                         }
493                 }
494         } else {
495                 ret = -ENOENT;
496         }
497         rdtgroup_kn_unlock(of->kn);
498         return ret;
499 }
500
501 void mon_event_read(struct rmid_read *rr, struct rdt_resource *r,
502                     struct rdt_domain *d, struct rdtgroup *rdtgrp,
503                     int evtid, int first)
504 {
505         /*
506          * setup the parameters to send to the IPI to read the data.
507          */
508         rr->rgrp = rdtgrp;
509         rr->evtid = evtid;
510         rr->r = r;
511         rr->d = d;
512         rr->val = 0;
513         rr->first = first;
514
515         smp_call_function_any(&d->cpu_mask, mon_event_count, rr, 1);
516 }
517
518 int rdtgroup_mondata_show(struct seq_file *m, void *arg)
519 {
520         struct kernfs_open_file *of = m->private;
521         struct rdt_hw_resource *hw_res;
522         u32 resid, evtid, domid;
523         struct rdtgroup *rdtgrp;
524         struct rdt_resource *r;
525         union mon_data_bits md;
526         struct rdt_domain *d;
527         struct rmid_read rr;
528         int ret = 0;
529
530         rdtgrp = rdtgroup_kn_lock_live(of->kn);
531         if (!rdtgrp) {
532                 ret = -ENOENT;
533                 goto out;
534         }
535
536         md.priv = of->kn->priv;
537         resid = md.u.rid;
538         domid = md.u.domid;
539         evtid = md.u.evtid;
540
541         hw_res = &rdt_resources_all[resid];
542         r = &hw_res->r_resctrl;
543         d = rdt_find_domain(r, domid, NULL);
544         if (IS_ERR_OR_NULL(d)) {
545                 ret = -ENOENT;
546                 goto out;
547         }
548
549         mon_event_read(&rr, r, d, rdtgrp, evtid, false);
550
551         if (rr.val & RMID_VAL_ERROR)
552                 seq_puts(m, "Error\n");
553         else if (rr.val & RMID_VAL_UNAVAIL)
554                 seq_puts(m, "Unavailable\n");
555         else
556                 seq_printf(m, "%llu\n", rr.val * hw_res->mon_scale);
557
558 out:
559         rdtgroup_kn_unlock(of->kn);
560         return ret;
561 }