1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/pid_namespace.h>
8 #include <linux/pm_runtime.h>
9 #include <linux/sysfs.h>
10 #include "coresight-etm4x.h"
11 #include "coresight-priv.h"
12 #include "coresight-syscfg.h"
14 static int etm4_set_mode_exclude(struct etmv4_drvdata *drvdata, bool exclude)
17 struct etmv4_config *config = &drvdata->config;
19 idx = config->addr_idx;
22 * TRCACATRn.TYPE bit[1:0]: type of comparison
23 * the trace unit performs
25 if (BMVAL(config->addr_acc[idx], 0, 1) == ETM_INSTR_ADDR) {
30 * We are performing instruction address comparison. Set the
31 * relevant bit of ViewInst Include/Exclude Control register
32 * for corresponding address comparator pair.
34 if (config->addr_type[idx] != ETM_ADDR_TYPE_RANGE ||
35 config->addr_type[idx + 1] != ETM_ADDR_TYPE_RANGE)
38 if (exclude == true) {
40 * Set exclude bit and unset the include bit
41 * corresponding to comparator pair
43 config->viiectlr |= BIT(idx / 2 + 16);
44 config->viiectlr &= ~BIT(idx / 2);
47 * Set include bit and unset exclude bit
48 * corresponding to comparator pair
50 config->viiectlr |= BIT(idx / 2);
51 config->viiectlr &= ~BIT(idx / 2 + 16);
57 static ssize_t nr_pe_cmp_show(struct device *dev,
58 struct device_attribute *attr,
62 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
64 val = drvdata->nr_pe_cmp;
65 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
67 static DEVICE_ATTR_RO(nr_pe_cmp);
69 static ssize_t nr_addr_cmp_show(struct device *dev,
70 struct device_attribute *attr,
74 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
76 val = drvdata->nr_addr_cmp;
77 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
79 static DEVICE_ATTR_RO(nr_addr_cmp);
81 static ssize_t nr_cntr_show(struct device *dev,
82 struct device_attribute *attr,
86 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
88 val = drvdata->nr_cntr;
89 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
91 static DEVICE_ATTR_RO(nr_cntr);
93 static ssize_t nr_ext_inp_show(struct device *dev,
94 struct device_attribute *attr,
98 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
100 val = drvdata->nr_ext_inp;
101 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
103 static DEVICE_ATTR_RO(nr_ext_inp);
105 static ssize_t numcidc_show(struct device *dev,
106 struct device_attribute *attr,
110 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
112 val = drvdata->numcidc;
113 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
115 static DEVICE_ATTR_RO(numcidc);
117 static ssize_t numvmidc_show(struct device *dev,
118 struct device_attribute *attr,
122 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
124 val = drvdata->numvmidc;
125 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
127 static DEVICE_ATTR_RO(numvmidc);
129 static ssize_t nrseqstate_show(struct device *dev,
130 struct device_attribute *attr,
134 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
136 val = drvdata->nrseqstate;
137 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
139 static DEVICE_ATTR_RO(nrseqstate);
141 static ssize_t nr_resource_show(struct device *dev,
142 struct device_attribute *attr,
146 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
148 val = drvdata->nr_resource;
149 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
151 static DEVICE_ATTR_RO(nr_resource);
153 static ssize_t nr_ss_cmp_show(struct device *dev,
154 struct device_attribute *attr,
158 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
160 val = drvdata->nr_ss_cmp;
161 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
163 static DEVICE_ATTR_RO(nr_ss_cmp);
165 static ssize_t reset_store(struct device *dev,
166 struct device_attribute *attr,
167 const char *buf, size_t size)
171 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
172 struct etmv4_config *config = &drvdata->config;
174 if (kstrtoul(buf, 16, &val))
177 spin_lock(&drvdata->spinlock);
181 /* Disable data tracing: do not trace load and store data transfers */
182 config->mode &= ~(ETM_MODE_LOAD | ETM_MODE_STORE);
183 config->cfg &= ~(BIT(1) | BIT(2));
185 /* Disable data value and data address tracing */
186 config->mode &= ~(ETM_MODE_DATA_TRACE_ADDR |
187 ETM_MODE_DATA_TRACE_VAL);
188 config->cfg &= ~(BIT(16) | BIT(17));
190 /* Disable all events tracing */
191 config->eventctrl0 = 0x0;
192 config->eventctrl1 = 0x0;
194 /* Disable timestamp event */
195 config->ts_ctrl = 0x0;
197 /* Disable stalling */
198 config->stall_ctrl = 0x0;
200 /* Reset trace synchronization period to 2^8 = 256 bytes*/
201 if (drvdata->syncpr == false)
202 config->syncfreq = 0x8;
205 * Enable ViewInst to trace everything with start-stop logic in
206 * started state. ARM recommends start-stop logic is set before
209 config->vinst_ctrl = BIT(0);
210 if (drvdata->nr_addr_cmp > 0) {
211 config->mode |= ETM_MODE_VIEWINST_STARTSTOP;
212 /* SSSTATUS, bit[9] */
213 config->vinst_ctrl |= BIT(9);
216 /* No address range filtering for ViewInst */
217 config->viiectlr = 0x0;
219 /* No start-stop filtering for ViewInst */
220 config->vissctlr = 0x0;
221 config->vipcssctlr = 0x0;
223 /* Disable seq events */
224 for (i = 0; i < drvdata->nrseqstate-1; i++)
225 config->seq_ctrl[i] = 0x0;
226 config->seq_rst = 0x0;
227 config->seq_state = 0x0;
229 /* Disable external input events */
230 config->ext_inp = 0x0;
232 config->cntr_idx = 0x0;
233 for (i = 0; i < drvdata->nr_cntr; i++) {
234 config->cntrldvr[i] = 0x0;
235 config->cntr_ctrl[i] = 0x0;
236 config->cntr_val[i] = 0x0;
239 config->res_idx = 0x0;
240 for (i = 2; i < 2 * drvdata->nr_resource; i++)
241 config->res_ctrl[i] = 0x0;
243 config->ss_idx = 0x0;
244 for (i = 0; i < drvdata->nr_ss_cmp; i++) {
245 config->ss_ctrl[i] = 0x0;
246 config->ss_pe_cmp[i] = 0x0;
249 config->addr_idx = 0x0;
250 for (i = 0; i < drvdata->nr_addr_cmp * 2; i++) {
251 config->addr_val[i] = 0x0;
252 config->addr_acc[i] = 0x0;
253 config->addr_type[i] = ETM_ADDR_TYPE_NONE;
256 config->ctxid_idx = 0x0;
257 for (i = 0; i < drvdata->numcidc; i++)
258 config->ctxid_pid[i] = 0x0;
260 config->ctxid_mask0 = 0x0;
261 config->ctxid_mask1 = 0x0;
263 config->vmid_idx = 0x0;
264 for (i = 0; i < drvdata->numvmidc; i++)
265 config->vmid_val[i] = 0x0;
266 config->vmid_mask0 = 0x0;
267 config->vmid_mask1 = 0x0;
269 drvdata->trcid = drvdata->cpu + 1;
271 spin_unlock(&drvdata->spinlock);
273 cscfg_csdev_reset_feats(to_coresight_device(dev));
277 static DEVICE_ATTR_WO(reset);
279 static ssize_t mode_show(struct device *dev,
280 struct device_attribute *attr,
284 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
285 struct etmv4_config *config = &drvdata->config;
288 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
291 static ssize_t mode_store(struct device *dev,
292 struct device_attribute *attr,
293 const char *buf, size_t size)
295 unsigned long val, mode;
296 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
297 struct etmv4_config *config = &drvdata->config;
299 if (kstrtoul(buf, 16, &val))
302 spin_lock(&drvdata->spinlock);
303 config->mode = val & ETMv4_MODE_ALL;
305 if (drvdata->instrp0 == true) {
306 /* start by clearing instruction P0 field */
307 config->cfg &= ~(BIT(1) | BIT(2));
308 if (config->mode & ETM_MODE_LOAD)
309 /* 0b01 Trace load instructions as P0 instructions */
310 config->cfg |= BIT(1);
311 if (config->mode & ETM_MODE_STORE)
312 /* 0b10 Trace store instructions as P0 instructions */
313 config->cfg |= BIT(2);
314 if (config->mode & ETM_MODE_LOAD_STORE)
316 * 0b11 Trace load and store instructions
319 config->cfg |= BIT(1) | BIT(2);
322 /* bit[3], Branch broadcast mode */
323 if ((config->mode & ETM_MODE_BB) && (drvdata->trcbb == true))
324 config->cfg |= BIT(3);
326 config->cfg &= ~BIT(3);
328 /* bit[4], Cycle counting instruction trace bit */
329 if ((config->mode & ETMv4_MODE_CYCACC) &&
330 (drvdata->trccci == true))
331 config->cfg |= BIT(4);
333 config->cfg &= ~BIT(4);
335 /* bit[6], Context ID tracing bit */
336 if ((config->mode & ETMv4_MODE_CTXID) && (drvdata->ctxid_size))
337 config->cfg |= BIT(6);
339 config->cfg &= ~BIT(6);
341 if ((config->mode & ETM_MODE_VMID) && (drvdata->vmid_size))
342 config->cfg |= BIT(7);
344 config->cfg &= ~BIT(7);
346 /* bits[10:8], Conditional instruction tracing bit */
347 mode = ETM_MODE_COND(config->mode);
348 if (drvdata->trccond == true) {
349 config->cfg &= ~(BIT(8) | BIT(9) | BIT(10));
350 config->cfg |= mode << 8;
353 /* bit[11], Global timestamp tracing bit */
354 if ((config->mode & ETMv4_MODE_TIMESTAMP) && (drvdata->ts_size))
355 config->cfg |= BIT(11);
357 config->cfg &= ~BIT(11);
359 /* bit[12], Return stack enable bit */
360 if ((config->mode & ETM_MODE_RETURNSTACK) &&
361 (drvdata->retstack == true))
362 config->cfg |= BIT(12);
364 config->cfg &= ~BIT(12);
366 /* bits[14:13], Q element enable field */
367 mode = ETM_MODE_QELEM(config->mode);
368 /* start by clearing QE bits */
369 config->cfg &= ~(BIT(13) | BIT(14));
371 * if supported, Q elements with instruction counts are enabled.
372 * Always set the low bit for any requested mode. Valid combos are
373 * 0b00, 0b01 and 0b11.
375 if (mode && drvdata->q_support)
376 config->cfg |= BIT(13);
378 * if supported, Q elements with and without instruction
381 if ((mode & BIT(1)) && (drvdata->q_support & BIT(1)))
382 config->cfg |= BIT(14);
384 /* bit[11], AMBA Trace Bus (ATB) trigger enable bit */
385 if ((config->mode & ETM_MODE_ATB_TRIGGER) &&
386 (drvdata->atbtrig == true))
387 config->eventctrl1 |= BIT(11);
389 config->eventctrl1 &= ~BIT(11);
391 /* bit[12], Low-power state behavior override bit */
392 if ((config->mode & ETM_MODE_LPOVERRIDE) &&
393 (drvdata->lpoverride == true))
394 config->eventctrl1 |= BIT(12);
396 config->eventctrl1 &= ~BIT(12);
398 /* bit[8], Instruction stall bit */
399 if ((config->mode & ETM_MODE_ISTALL_EN) && (drvdata->stallctl == true))
400 config->stall_ctrl |= BIT(8);
402 config->stall_ctrl &= ~BIT(8);
404 /* bit[10], Prioritize instruction trace bit */
405 if (config->mode & ETM_MODE_INSTPRIO)
406 config->stall_ctrl |= BIT(10);
408 config->stall_ctrl &= ~BIT(10);
410 /* bit[13], Trace overflow prevention bit */
411 if ((config->mode & ETM_MODE_NOOVERFLOW) &&
412 (drvdata->nooverflow == true))
413 config->stall_ctrl |= BIT(13);
415 config->stall_ctrl &= ~BIT(13);
417 /* bit[9] Start/stop logic control bit */
418 if (config->mode & ETM_MODE_VIEWINST_STARTSTOP)
419 config->vinst_ctrl |= BIT(9);
421 config->vinst_ctrl &= ~BIT(9);
423 /* bit[10], Whether a trace unit must trace a Reset exception */
424 if (config->mode & ETM_MODE_TRACE_RESET)
425 config->vinst_ctrl |= BIT(10);
427 config->vinst_ctrl &= ~BIT(10);
429 /* bit[11], Whether a trace unit must trace a system error exception */
430 if ((config->mode & ETM_MODE_TRACE_ERR) &&
431 (drvdata->trc_error == true))
432 config->vinst_ctrl |= BIT(11);
434 config->vinst_ctrl &= ~BIT(11);
436 if (config->mode & (ETM_MODE_EXCL_KERN | ETM_MODE_EXCL_USER))
437 etm4_config_trace_mode(config);
439 spin_unlock(&drvdata->spinlock);
443 static DEVICE_ATTR_RW(mode);
445 static ssize_t pe_show(struct device *dev,
446 struct device_attribute *attr,
450 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
451 struct etmv4_config *config = &drvdata->config;
453 val = config->pe_sel;
454 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
457 static ssize_t pe_store(struct device *dev,
458 struct device_attribute *attr,
459 const char *buf, size_t size)
462 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
463 struct etmv4_config *config = &drvdata->config;
465 if (kstrtoul(buf, 16, &val))
468 spin_lock(&drvdata->spinlock);
469 if (val > drvdata->nr_pe) {
470 spin_unlock(&drvdata->spinlock);
474 config->pe_sel = val;
475 spin_unlock(&drvdata->spinlock);
478 static DEVICE_ATTR_RW(pe);
480 static ssize_t event_show(struct device *dev,
481 struct device_attribute *attr,
485 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
486 struct etmv4_config *config = &drvdata->config;
488 val = config->eventctrl0;
489 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
492 static ssize_t event_store(struct device *dev,
493 struct device_attribute *attr,
494 const char *buf, size_t size)
497 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
498 struct etmv4_config *config = &drvdata->config;
500 if (kstrtoul(buf, 16, &val))
503 spin_lock(&drvdata->spinlock);
504 switch (drvdata->nr_event) {
506 /* EVENT0, bits[7:0] */
507 config->eventctrl0 = val & 0xFF;
510 /* EVENT1, bits[15:8] */
511 config->eventctrl0 = val & 0xFFFF;
514 /* EVENT2, bits[23:16] */
515 config->eventctrl0 = val & 0xFFFFFF;
518 /* EVENT3, bits[31:24] */
519 config->eventctrl0 = val;
524 spin_unlock(&drvdata->spinlock);
527 static DEVICE_ATTR_RW(event);
529 static ssize_t event_instren_show(struct device *dev,
530 struct device_attribute *attr,
534 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
535 struct etmv4_config *config = &drvdata->config;
537 val = BMVAL(config->eventctrl1, 0, 3);
538 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
541 static ssize_t event_instren_store(struct device *dev,
542 struct device_attribute *attr,
543 const char *buf, size_t size)
546 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
547 struct etmv4_config *config = &drvdata->config;
549 if (kstrtoul(buf, 16, &val))
552 spin_lock(&drvdata->spinlock);
553 /* start by clearing all instruction event enable bits */
554 config->eventctrl1 &= ~(BIT(0) | BIT(1) | BIT(2) | BIT(3));
555 switch (drvdata->nr_event) {
557 /* generate Event element for event 1 */
558 config->eventctrl1 |= val & BIT(1);
561 /* generate Event element for event 1 and 2 */
562 config->eventctrl1 |= val & (BIT(0) | BIT(1));
565 /* generate Event element for event 1, 2 and 3 */
566 config->eventctrl1 |= val & (BIT(0) | BIT(1) | BIT(2));
569 /* generate Event element for all 4 events */
570 config->eventctrl1 |= val & 0xF;
575 spin_unlock(&drvdata->spinlock);
578 static DEVICE_ATTR_RW(event_instren);
580 static ssize_t event_ts_show(struct device *dev,
581 struct device_attribute *attr,
585 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
586 struct etmv4_config *config = &drvdata->config;
588 val = config->ts_ctrl;
589 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
592 static ssize_t event_ts_store(struct device *dev,
593 struct device_attribute *attr,
594 const char *buf, size_t size)
597 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
598 struct etmv4_config *config = &drvdata->config;
600 if (kstrtoul(buf, 16, &val))
602 if (!drvdata->ts_size)
605 config->ts_ctrl = val & ETMv4_EVENT_MASK;
608 static DEVICE_ATTR_RW(event_ts);
610 static ssize_t syncfreq_show(struct device *dev,
611 struct device_attribute *attr,
615 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
616 struct etmv4_config *config = &drvdata->config;
618 val = config->syncfreq;
619 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
622 static ssize_t syncfreq_store(struct device *dev,
623 struct device_attribute *attr,
624 const char *buf, size_t size)
627 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
628 struct etmv4_config *config = &drvdata->config;
630 if (kstrtoul(buf, 16, &val))
632 if (drvdata->syncpr == true)
635 config->syncfreq = val & ETMv4_SYNC_MASK;
638 static DEVICE_ATTR_RW(syncfreq);
640 static ssize_t cyc_threshold_show(struct device *dev,
641 struct device_attribute *attr,
645 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
646 struct etmv4_config *config = &drvdata->config;
648 val = config->ccctlr;
649 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
652 static ssize_t cyc_threshold_store(struct device *dev,
653 struct device_attribute *attr,
654 const char *buf, size_t size)
657 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
658 struct etmv4_config *config = &drvdata->config;
660 if (kstrtoul(buf, 16, &val))
663 /* mask off max threshold before checking min value */
664 val &= ETM_CYC_THRESHOLD_MASK;
665 if (val < drvdata->ccitmin)
668 config->ccctlr = val;
671 static DEVICE_ATTR_RW(cyc_threshold);
673 static ssize_t bb_ctrl_show(struct device *dev,
674 struct device_attribute *attr,
678 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
679 struct etmv4_config *config = &drvdata->config;
681 val = config->bb_ctrl;
682 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
685 static ssize_t bb_ctrl_store(struct device *dev,
686 struct device_attribute *attr,
687 const char *buf, size_t size)
690 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
691 struct etmv4_config *config = &drvdata->config;
693 if (kstrtoul(buf, 16, &val))
695 if (drvdata->trcbb == false)
697 if (!drvdata->nr_addr_cmp)
701 * Bit[8] controls include(1) / exclude(0), bits[0-7] select
702 * individual range comparators. If include then at least 1
703 * range must be selected.
705 if ((val & BIT(8)) && (BMVAL(val, 0, 7) == 0))
708 config->bb_ctrl = val & GENMASK(8, 0);
711 static DEVICE_ATTR_RW(bb_ctrl);
713 static ssize_t event_vinst_show(struct device *dev,
714 struct device_attribute *attr,
718 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
719 struct etmv4_config *config = &drvdata->config;
721 val = config->vinst_ctrl & ETMv4_EVENT_MASK;
722 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
725 static ssize_t event_vinst_store(struct device *dev,
726 struct device_attribute *attr,
727 const char *buf, size_t size)
730 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
731 struct etmv4_config *config = &drvdata->config;
733 if (kstrtoul(buf, 16, &val))
736 spin_lock(&drvdata->spinlock);
737 val &= ETMv4_EVENT_MASK;
738 config->vinst_ctrl &= ~ETMv4_EVENT_MASK;
739 config->vinst_ctrl |= val;
740 spin_unlock(&drvdata->spinlock);
743 static DEVICE_ATTR_RW(event_vinst);
745 static ssize_t s_exlevel_vinst_show(struct device *dev,
746 struct device_attribute *attr,
750 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
751 struct etmv4_config *config = &drvdata->config;
753 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_S_MASK) >> TRCVICTLR_EXLEVEL_S_SHIFT;
754 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
757 static ssize_t s_exlevel_vinst_store(struct device *dev,
758 struct device_attribute *attr,
759 const char *buf, size_t size)
762 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
763 struct etmv4_config *config = &drvdata->config;
765 if (kstrtoul(buf, 16, &val))
768 spin_lock(&drvdata->spinlock);
769 /* clear all EXLEVEL_S bits */
770 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_S_MASK);
771 /* enable instruction tracing for corresponding exception level */
772 val &= drvdata->s_ex_level;
773 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_S_SHIFT);
774 spin_unlock(&drvdata->spinlock);
777 static DEVICE_ATTR_RW(s_exlevel_vinst);
779 static ssize_t ns_exlevel_vinst_show(struct device *dev,
780 struct device_attribute *attr,
784 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
785 struct etmv4_config *config = &drvdata->config;
787 /* EXLEVEL_NS, bits[23:20] */
788 val = (config->vinst_ctrl & TRCVICTLR_EXLEVEL_NS_MASK) >> TRCVICTLR_EXLEVEL_NS_SHIFT;
789 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
792 static ssize_t ns_exlevel_vinst_store(struct device *dev,
793 struct device_attribute *attr,
794 const char *buf, size_t size)
797 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
798 struct etmv4_config *config = &drvdata->config;
800 if (kstrtoul(buf, 16, &val))
803 spin_lock(&drvdata->spinlock);
804 /* clear EXLEVEL_NS bits */
805 config->vinst_ctrl &= ~(TRCVICTLR_EXLEVEL_NS_MASK);
806 /* enable instruction tracing for corresponding exception level */
807 val &= drvdata->ns_ex_level;
808 config->vinst_ctrl |= (val << TRCVICTLR_EXLEVEL_NS_SHIFT);
809 spin_unlock(&drvdata->spinlock);
812 static DEVICE_ATTR_RW(ns_exlevel_vinst);
814 static ssize_t addr_idx_show(struct device *dev,
815 struct device_attribute *attr,
819 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
820 struct etmv4_config *config = &drvdata->config;
822 val = config->addr_idx;
823 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
826 static ssize_t addr_idx_store(struct device *dev,
827 struct device_attribute *attr,
828 const char *buf, size_t size)
831 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
832 struct etmv4_config *config = &drvdata->config;
834 if (kstrtoul(buf, 16, &val))
836 if (val >= drvdata->nr_addr_cmp * 2)
840 * Use spinlock to ensure index doesn't change while it gets
841 * dereferenced multiple times within a spinlock block elsewhere.
843 spin_lock(&drvdata->spinlock);
844 config->addr_idx = val;
845 spin_unlock(&drvdata->spinlock);
848 static DEVICE_ATTR_RW(addr_idx);
850 static ssize_t addr_instdatatype_show(struct device *dev,
851 struct device_attribute *attr,
856 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
857 struct etmv4_config *config = &drvdata->config;
859 spin_lock(&drvdata->spinlock);
860 idx = config->addr_idx;
861 val = BMVAL(config->addr_acc[idx], 0, 1);
862 len = scnprintf(buf, PAGE_SIZE, "%s\n",
863 val == ETM_INSTR_ADDR ? "instr" :
864 (val == ETM_DATA_LOAD_ADDR ? "data_load" :
865 (val == ETM_DATA_STORE_ADDR ? "data_store" :
866 "data_load_store")));
867 spin_unlock(&drvdata->spinlock);
871 static ssize_t addr_instdatatype_store(struct device *dev,
872 struct device_attribute *attr,
873 const char *buf, size_t size)
877 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
878 struct etmv4_config *config = &drvdata->config;
880 if (strlen(buf) >= 20)
882 if (sscanf(buf, "%s", str) != 1)
885 spin_lock(&drvdata->spinlock);
886 idx = config->addr_idx;
887 if (!strcmp(str, "instr"))
888 /* TYPE, bits[1:0] */
889 config->addr_acc[idx] &= ~(BIT(0) | BIT(1));
891 spin_unlock(&drvdata->spinlock);
894 static DEVICE_ATTR_RW(addr_instdatatype);
896 static ssize_t addr_single_show(struct device *dev,
897 struct device_attribute *attr,
902 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
903 struct etmv4_config *config = &drvdata->config;
905 idx = config->addr_idx;
906 spin_lock(&drvdata->spinlock);
907 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
908 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
909 spin_unlock(&drvdata->spinlock);
912 val = (unsigned long)config->addr_val[idx];
913 spin_unlock(&drvdata->spinlock);
914 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
917 static ssize_t addr_single_store(struct device *dev,
918 struct device_attribute *attr,
919 const char *buf, size_t size)
923 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
924 struct etmv4_config *config = &drvdata->config;
926 if (kstrtoul(buf, 16, &val))
929 spin_lock(&drvdata->spinlock);
930 idx = config->addr_idx;
931 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
932 config->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) {
933 spin_unlock(&drvdata->spinlock);
937 config->addr_val[idx] = (u64)val;
938 config->addr_type[idx] = ETM_ADDR_TYPE_SINGLE;
939 spin_unlock(&drvdata->spinlock);
942 static DEVICE_ATTR_RW(addr_single);
944 static ssize_t addr_range_show(struct device *dev,
945 struct device_attribute *attr,
949 unsigned long val1, val2;
950 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
951 struct etmv4_config *config = &drvdata->config;
953 spin_lock(&drvdata->spinlock);
954 idx = config->addr_idx;
956 spin_unlock(&drvdata->spinlock);
959 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
960 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
961 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
962 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
963 spin_unlock(&drvdata->spinlock);
967 val1 = (unsigned long)config->addr_val[idx];
968 val2 = (unsigned long)config->addr_val[idx + 1];
969 spin_unlock(&drvdata->spinlock);
970 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
973 static ssize_t addr_range_store(struct device *dev,
974 struct device_attribute *attr,
975 const char *buf, size_t size)
978 unsigned long val1, val2;
979 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
980 struct etmv4_config *config = &drvdata->config;
981 int elements, exclude;
983 elements = sscanf(buf, "%lx %lx %x", &val1, &val2, &exclude);
985 /* exclude is optional, but need at least two parameter */
988 /* lower address comparator cannot have a higher address value */
992 spin_lock(&drvdata->spinlock);
993 idx = config->addr_idx;
995 spin_unlock(&drvdata->spinlock);
999 if (!((config->addr_type[idx] == ETM_ADDR_TYPE_NONE &&
1000 config->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) ||
1001 (config->addr_type[idx] == ETM_ADDR_TYPE_RANGE &&
1002 config->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) {
1003 spin_unlock(&drvdata->spinlock);
1007 config->addr_val[idx] = (u64)val1;
1008 config->addr_type[idx] = ETM_ADDR_TYPE_RANGE;
1009 config->addr_val[idx + 1] = (u64)val2;
1010 config->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE;
1012 * Program include or exclude control bits for vinst or vdata
1013 * whenever we change addr comparators to ETM_ADDR_TYPE_RANGE
1014 * use supplied value, or default to bit set in 'mode'
1017 exclude = config->mode & ETM_MODE_EXCLUDE;
1018 etm4_set_mode_exclude(drvdata, exclude ? true : false);
1020 spin_unlock(&drvdata->spinlock);
1023 static DEVICE_ATTR_RW(addr_range);
1025 static ssize_t addr_start_show(struct device *dev,
1026 struct device_attribute *attr,
1031 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1032 struct etmv4_config *config = &drvdata->config;
1034 spin_lock(&drvdata->spinlock);
1035 idx = config->addr_idx;
1037 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1038 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1039 spin_unlock(&drvdata->spinlock);
1043 val = (unsigned long)config->addr_val[idx];
1044 spin_unlock(&drvdata->spinlock);
1045 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1048 static ssize_t addr_start_store(struct device *dev,
1049 struct device_attribute *attr,
1050 const char *buf, size_t size)
1054 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1055 struct etmv4_config *config = &drvdata->config;
1057 if (kstrtoul(buf, 16, &val))
1060 spin_lock(&drvdata->spinlock);
1061 idx = config->addr_idx;
1062 if (!drvdata->nr_addr_cmp) {
1063 spin_unlock(&drvdata->spinlock);
1066 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1067 config->addr_type[idx] == ETM_ADDR_TYPE_START)) {
1068 spin_unlock(&drvdata->spinlock);
1072 config->addr_val[idx] = (u64)val;
1073 config->addr_type[idx] = ETM_ADDR_TYPE_START;
1074 config->vissctlr |= BIT(idx);
1075 spin_unlock(&drvdata->spinlock);
1078 static DEVICE_ATTR_RW(addr_start);
1080 static ssize_t addr_stop_show(struct device *dev,
1081 struct device_attribute *attr,
1086 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1087 struct etmv4_config *config = &drvdata->config;
1089 spin_lock(&drvdata->spinlock);
1090 idx = config->addr_idx;
1092 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1093 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1094 spin_unlock(&drvdata->spinlock);
1098 val = (unsigned long)config->addr_val[idx];
1099 spin_unlock(&drvdata->spinlock);
1100 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1103 static ssize_t addr_stop_store(struct device *dev,
1104 struct device_attribute *attr,
1105 const char *buf, size_t size)
1109 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1110 struct etmv4_config *config = &drvdata->config;
1112 if (kstrtoul(buf, 16, &val))
1115 spin_lock(&drvdata->spinlock);
1116 idx = config->addr_idx;
1117 if (!drvdata->nr_addr_cmp) {
1118 spin_unlock(&drvdata->spinlock);
1121 if (!(config->addr_type[idx] == ETM_ADDR_TYPE_NONE ||
1122 config->addr_type[idx] == ETM_ADDR_TYPE_STOP)) {
1123 spin_unlock(&drvdata->spinlock);
1127 config->addr_val[idx] = (u64)val;
1128 config->addr_type[idx] = ETM_ADDR_TYPE_STOP;
1129 config->vissctlr |= BIT(idx + 16);
1130 spin_unlock(&drvdata->spinlock);
1133 static DEVICE_ATTR_RW(addr_stop);
1135 static ssize_t addr_ctxtype_show(struct device *dev,
1136 struct device_attribute *attr,
1141 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1142 struct etmv4_config *config = &drvdata->config;
1144 spin_lock(&drvdata->spinlock);
1145 idx = config->addr_idx;
1146 /* CONTEXTTYPE, bits[3:2] */
1147 val = BMVAL(config->addr_acc[idx], 2, 3);
1148 len = scnprintf(buf, PAGE_SIZE, "%s\n", val == ETM_CTX_NONE ? "none" :
1149 (val == ETM_CTX_CTXID ? "ctxid" :
1150 (val == ETM_CTX_VMID ? "vmid" : "all")));
1151 spin_unlock(&drvdata->spinlock);
1155 static ssize_t addr_ctxtype_store(struct device *dev,
1156 struct device_attribute *attr,
1157 const char *buf, size_t size)
1161 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1162 struct etmv4_config *config = &drvdata->config;
1164 if (strlen(buf) >= 10)
1166 if (sscanf(buf, "%s", str) != 1)
1169 spin_lock(&drvdata->spinlock);
1170 idx = config->addr_idx;
1171 if (!strcmp(str, "none"))
1172 /* start by clearing context type bits */
1173 config->addr_acc[idx] &= ~(BIT(2) | BIT(3));
1174 else if (!strcmp(str, "ctxid")) {
1175 /* 0b01 The trace unit performs a Context ID */
1176 if (drvdata->numcidc) {
1177 config->addr_acc[idx] |= BIT(2);
1178 config->addr_acc[idx] &= ~BIT(3);
1180 } else if (!strcmp(str, "vmid")) {
1181 /* 0b10 The trace unit performs a VMID */
1182 if (drvdata->numvmidc) {
1183 config->addr_acc[idx] &= ~BIT(2);
1184 config->addr_acc[idx] |= BIT(3);
1186 } else if (!strcmp(str, "all")) {
1188 * 0b11 The trace unit performs a Context ID
1189 * comparison and a VMID
1191 if (drvdata->numcidc)
1192 config->addr_acc[idx] |= BIT(2);
1193 if (drvdata->numvmidc)
1194 config->addr_acc[idx] |= BIT(3);
1196 spin_unlock(&drvdata->spinlock);
1199 static DEVICE_ATTR_RW(addr_ctxtype);
1201 static ssize_t addr_context_show(struct device *dev,
1202 struct device_attribute *attr,
1207 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1208 struct etmv4_config *config = &drvdata->config;
1210 spin_lock(&drvdata->spinlock);
1211 idx = config->addr_idx;
1212 /* context ID comparator bits[6:4] */
1213 val = BMVAL(config->addr_acc[idx], 4, 6);
1214 spin_unlock(&drvdata->spinlock);
1215 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1218 static ssize_t addr_context_store(struct device *dev,
1219 struct device_attribute *attr,
1220 const char *buf, size_t size)
1224 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1225 struct etmv4_config *config = &drvdata->config;
1227 if (kstrtoul(buf, 16, &val))
1229 if ((drvdata->numcidc <= 1) && (drvdata->numvmidc <= 1))
1231 if (val >= (drvdata->numcidc >= drvdata->numvmidc ?
1232 drvdata->numcidc : drvdata->numvmidc))
1235 spin_lock(&drvdata->spinlock);
1236 idx = config->addr_idx;
1237 /* clear context ID comparator bits[6:4] */
1238 config->addr_acc[idx] &= ~(BIT(4) | BIT(5) | BIT(6));
1239 config->addr_acc[idx] |= (val << 4);
1240 spin_unlock(&drvdata->spinlock);
1243 static DEVICE_ATTR_RW(addr_context);
1245 static ssize_t addr_exlevel_s_ns_show(struct device *dev,
1246 struct device_attribute *attr,
1251 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1252 struct etmv4_config *config = &drvdata->config;
1254 spin_lock(&drvdata->spinlock);
1255 idx = config->addr_idx;
1256 val = BMVAL(config->addr_acc[idx], 8, 14);
1257 spin_unlock(&drvdata->spinlock);
1258 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1261 static ssize_t addr_exlevel_s_ns_store(struct device *dev,
1262 struct device_attribute *attr,
1263 const char *buf, size_t size)
1267 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1268 struct etmv4_config *config = &drvdata->config;
1270 if (kstrtoul(buf, 0, &val))
1273 if (val & ~((GENMASK(14, 8) >> 8)))
1276 spin_lock(&drvdata->spinlock);
1277 idx = config->addr_idx;
1278 /* clear Exlevel_ns & Exlevel_s bits[14:12, 11:8], bit[15] is res0 */
1279 config->addr_acc[idx] &= ~(GENMASK(14, 8));
1280 config->addr_acc[idx] |= (val << 8);
1281 spin_unlock(&drvdata->spinlock);
1284 static DEVICE_ATTR_RW(addr_exlevel_s_ns);
1286 static const char * const addr_type_names[] = {
1294 static ssize_t addr_cmp_view_show(struct device *dev,
1295 struct device_attribute *attr, char *buf)
1298 unsigned long addr_v, addr_v2, addr_ctrl;
1299 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1300 struct etmv4_config *config = &drvdata->config;
1302 bool exclude = false;
1304 spin_lock(&drvdata->spinlock);
1305 idx = config->addr_idx;
1306 addr_v = config->addr_val[idx];
1307 addr_ctrl = config->addr_acc[idx];
1308 addr_type = config->addr_type[idx];
1309 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1313 addr_v = config->addr_val[idx];
1315 addr_v2 = config->addr_val[idx + 1];
1317 exclude = config->viiectlr & BIT(idx / 2 + 16);
1319 spin_unlock(&drvdata->spinlock);
1321 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] %s %#lx", idx,
1322 addr_type_names[addr_type], addr_v);
1323 if (addr_type == ETM_ADDR_TYPE_RANGE) {
1324 size += scnprintf(buf + size, PAGE_SIZE - size,
1325 " %#lx %s", addr_v2,
1326 exclude ? "exclude" : "include");
1328 size += scnprintf(buf + size, PAGE_SIZE - size,
1329 " ctrl(%#lx)\n", addr_ctrl);
1331 size = scnprintf(buf, PAGE_SIZE, "addr_cmp[%i] unused\n", idx);
1335 static DEVICE_ATTR_RO(addr_cmp_view);
1337 static ssize_t vinst_pe_cmp_start_stop_show(struct device *dev,
1338 struct device_attribute *attr,
1342 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1343 struct etmv4_config *config = &drvdata->config;
1345 if (!drvdata->nr_pe_cmp)
1347 val = config->vipcssctlr;
1348 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1350 static ssize_t vinst_pe_cmp_start_stop_store(struct device *dev,
1351 struct device_attribute *attr,
1352 const char *buf, size_t size)
1355 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1356 struct etmv4_config *config = &drvdata->config;
1358 if (kstrtoul(buf, 16, &val))
1360 if (!drvdata->nr_pe_cmp)
1363 spin_lock(&drvdata->spinlock);
1364 config->vipcssctlr = val;
1365 spin_unlock(&drvdata->spinlock);
1368 static DEVICE_ATTR_RW(vinst_pe_cmp_start_stop);
1370 static ssize_t seq_idx_show(struct device *dev,
1371 struct device_attribute *attr,
1375 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1376 struct etmv4_config *config = &drvdata->config;
1378 val = config->seq_idx;
1379 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1382 static ssize_t seq_idx_store(struct device *dev,
1383 struct device_attribute *attr,
1384 const char *buf, size_t size)
1387 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1388 struct etmv4_config *config = &drvdata->config;
1390 if (kstrtoul(buf, 16, &val))
1392 if (val >= drvdata->nrseqstate - 1)
1396 * Use spinlock to ensure index doesn't change while it gets
1397 * dereferenced multiple times within a spinlock block elsewhere.
1399 spin_lock(&drvdata->spinlock);
1400 config->seq_idx = val;
1401 spin_unlock(&drvdata->spinlock);
1404 static DEVICE_ATTR_RW(seq_idx);
1406 static ssize_t seq_state_show(struct device *dev,
1407 struct device_attribute *attr,
1411 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1412 struct etmv4_config *config = &drvdata->config;
1414 val = config->seq_state;
1415 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1418 static ssize_t seq_state_store(struct device *dev,
1419 struct device_attribute *attr,
1420 const char *buf, size_t size)
1423 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1424 struct etmv4_config *config = &drvdata->config;
1426 if (kstrtoul(buf, 16, &val))
1428 if (val >= drvdata->nrseqstate)
1431 config->seq_state = val;
1434 static DEVICE_ATTR_RW(seq_state);
1436 static ssize_t seq_event_show(struct device *dev,
1437 struct device_attribute *attr,
1442 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1443 struct etmv4_config *config = &drvdata->config;
1445 spin_lock(&drvdata->spinlock);
1446 idx = config->seq_idx;
1447 val = config->seq_ctrl[idx];
1448 spin_unlock(&drvdata->spinlock);
1449 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1452 static ssize_t seq_event_store(struct device *dev,
1453 struct device_attribute *attr,
1454 const char *buf, size_t size)
1458 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1459 struct etmv4_config *config = &drvdata->config;
1461 if (kstrtoul(buf, 16, &val))
1464 spin_lock(&drvdata->spinlock);
1465 idx = config->seq_idx;
1466 /* Seq control has two masks B[15:8] F[7:0] */
1467 config->seq_ctrl[idx] = val & 0xFFFF;
1468 spin_unlock(&drvdata->spinlock);
1471 static DEVICE_ATTR_RW(seq_event);
1473 static ssize_t seq_reset_event_show(struct device *dev,
1474 struct device_attribute *attr,
1478 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1479 struct etmv4_config *config = &drvdata->config;
1481 val = config->seq_rst;
1482 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1485 static ssize_t seq_reset_event_store(struct device *dev,
1486 struct device_attribute *attr,
1487 const char *buf, size_t size)
1490 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1491 struct etmv4_config *config = &drvdata->config;
1493 if (kstrtoul(buf, 16, &val))
1495 if (!(drvdata->nrseqstate))
1498 config->seq_rst = val & ETMv4_EVENT_MASK;
1501 static DEVICE_ATTR_RW(seq_reset_event);
1503 static ssize_t cntr_idx_show(struct device *dev,
1504 struct device_attribute *attr,
1508 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1509 struct etmv4_config *config = &drvdata->config;
1511 val = config->cntr_idx;
1512 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1515 static ssize_t cntr_idx_store(struct device *dev,
1516 struct device_attribute *attr,
1517 const char *buf, size_t size)
1520 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1521 struct etmv4_config *config = &drvdata->config;
1523 if (kstrtoul(buf, 16, &val))
1525 if (val >= drvdata->nr_cntr)
1529 * Use spinlock to ensure index doesn't change while it gets
1530 * dereferenced multiple times within a spinlock block elsewhere.
1532 spin_lock(&drvdata->spinlock);
1533 config->cntr_idx = val;
1534 spin_unlock(&drvdata->spinlock);
1537 static DEVICE_ATTR_RW(cntr_idx);
1539 static ssize_t cntrldvr_show(struct device *dev,
1540 struct device_attribute *attr,
1545 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1546 struct etmv4_config *config = &drvdata->config;
1548 spin_lock(&drvdata->spinlock);
1549 idx = config->cntr_idx;
1550 val = config->cntrldvr[idx];
1551 spin_unlock(&drvdata->spinlock);
1552 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1555 static ssize_t cntrldvr_store(struct device *dev,
1556 struct device_attribute *attr,
1557 const char *buf, size_t size)
1561 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1562 struct etmv4_config *config = &drvdata->config;
1564 if (kstrtoul(buf, 16, &val))
1566 if (val > ETM_CNTR_MAX_VAL)
1569 spin_lock(&drvdata->spinlock);
1570 idx = config->cntr_idx;
1571 config->cntrldvr[idx] = val;
1572 spin_unlock(&drvdata->spinlock);
1575 static DEVICE_ATTR_RW(cntrldvr);
1577 static ssize_t cntr_val_show(struct device *dev,
1578 struct device_attribute *attr,
1583 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1584 struct etmv4_config *config = &drvdata->config;
1586 spin_lock(&drvdata->spinlock);
1587 idx = config->cntr_idx;
1588 val = config->cntr_val[idx];
1589 spin_unlock(&drvdata->spinlock);
1590 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1593 static ssize_t cntr_val_store(struct device *dev,
1594 struct device_attribute *attr,
1595 const char *buf, size_t size)
1599 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1600 struct etmv4_config *config = &drvdata->config;
1602 if (kstrtoul(buf, 16, &val))
1604 if (val > ETM_CNTR_MAX_VAL)
1607 spin_lock(&drvdata->spinlock);
1608 idx = config->cntr_idx;
1609 config->cntr_val[idx] = val;
1610 spin_unlock(&drvdata->spinlock);
1613 static DEVICE_ATTR_RW(cntr_val);
1615 static ssize_t cntr_ctrl_show(struct device *dev,
1616 struct device_attribute *attr,
1621 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1622 struct etmv4_config *config = &drvdata->config;
1624 spin_lock(&drvdata->spinlock);
1625 idx = config->cntr_idx;
1626 val = config->cntr_ctrl[idx];
1627 spin_unlock(&drvdata->spinlock);
1628 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1631 static ssize_t cntr_ctrl_store(struct device *dev,
1632 struct device_attribute *attr,
1633 const char *buf, size_t size)
1637 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1638 struct etmv4_config *config = &drvdata->config;
1640 if (kstrtoul(buf, 16, &val))
1643 spin_lock(&drvdata->spinlock);
1644 idx = config->cntr_idx;
1645 config->cntr_ctrl[idx] = val;
1646 spin_unlock(&drvdata->spinlock);
1649 static DEVICE_ATTR_RW(cntr_ctrl);
1651 static ssize_t res_idx_show(struct device *dev,
1652 struct device_attribute *attr,
1656 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1657 struct etmv4_config *config = &drvdata->config;
1659 val = config->res_idx;
1660 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1663 static ssize_t res_idx_store(struct device *dev,
1664 struct device_attribute *attr,
1665 const char *buf, size_t size)
1668 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1669 struct etmv4_config *config = &drvdata->config;
1671 if (kstrtoul(buf, 16, &val))
1674 * Resource selector pair 0 is always implemented and reserved,
1675 * namely an idx with 0 and 1 is illegal.
1677 if ((val < 2) || (val >= 2 * drvdata->nr_resource))
1681 * Use spinlock to ensure index doesn't change while it gets
1682 * dereferenced multiple times within a spinlock block elsewhere.
1684 spin_lock(&drvdata->spinlock);
1685 config->res_idx = val;
1686 spin_unlock(&drvdata->spinlock);
1689 static DEVICE_ATTR_RW(res_idx);
1691 static ssize_t res_ctrl_show(struct device *dev,
1692 struct device_attribute *attr,
1697 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1698 struct etmv4_config *config = &drvdata->config;
1700 spin_lock(&drvdata->spinlock);
1701 idx = config->res_idx;
1702 val = config->res_ctrl[idx];
1703 spin_unlock(&drvdata->spinlock);
1704 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1707 static ssize_t res_ctrl_store(struct device *dev,
1708 struct device_attribute *attr,
1709 const char *buf, size_t size)
1713 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1714 struct etmv4_config *config = &drvdata->config;
1716 if (kstrtoul(buf, 16, &val))
1719 spin_lock(&drvdata->spinlock);
1720 idx = config->res_idx;
1721 /* For odd idx pair inversal bit is RES0 */
1723 /* PAIRINV, bit[21] */
1725 config->res_ctrl[idx] = val & GENMASK(21, 0);
1726 spin_unlock(&drvdata->spinlock);
1729 static DEVICE_ATTR_RW(res_ctrl);
1731 static ssize_t sshot_idx_show(struct device *dev,
1732 struct device_attribute *attr, char *buf)
1735 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1736 struct etmv4_config *config = &drvdata->config;
1738 val = config->ss_idx;
1739 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1742 static ssize_t sshot_idx_store(struct device *dev,
1743 struct device_attribute *attr,
1744 const char *buf, size_t size)
1747 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1748 struct etmv4_config *config = &drvdata->config;
1750 if (kstrtoul(buf, 16, &val))
1752 if (val >= drvdata->nr_ss_cmp)
1755 spin_lock(&drvdata->spinlock);
1756 config->ss_idx = val;
1757 spin_unlock(&drvdata->spinlock);
1760 static DEVICE_ATTR_RW(sshot_idx);
1762 static ssize_t sshot_ctrl_show(struct device *dev,
1763 struct device_attribute *attr,
1767 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1768 struct etmv4_config *config = &drvdata->config;
1770 spin_lock(&drvdata->spinlock);
1771 val = config->ss_ctrl[config->ss_idx];
1772 spin_unlock(&drvdata->spinlock);
1773 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1776 static ssize_t sshot_ctrl_store(struct device *dev,
1777 struct device_attribute *attr,
1778 const char *buf, size_t size)
1782 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1783 struct etmv4_config *config = &drvdata->config;
1785 if (kstrtoul(buf, 16, &val))
1788 spin_lock(&drvdata->spinlock);
1789 idx = config->ss_idx;
1790 config->ss_ctrl[idx] = val & GENMASK(24, 0);
1791 /* must clear bit 31 in related status register on programming */
1792 config->ss_status[idx] &= ~BIT(31);
1793 spin_unlock(&drvdata->spinlock);
1796 static DEVICE_ATTR_RW(sshot_ctrl);
1798 static ssize_t sshot_status_show(struct device *dev,
1799 struct device_attribute *attr, char *buf)
1802 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1803 struct etmv4_config *config = &drvdata->config;
1805 spin_lock(&drvdata->spinlock);
1806 val = config->ss_status[config->ss_idx];
1807 spin_unlock(&drvdata->spinlock);
1808 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1810 static DEVICE_ATTR_RO(sshot_status);
1812 static ssize_t sshot_pe_ctrl_show(struct device *dev,
1813 struct device_attribute *attr,
1817 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1818 struct etmv4_config *config = &drvdata->config;
1820 spin_lock(&drvdata->spinlock);
1821 val = config->ss_pe_cmp[config->ss_idx];
1822 spin_unlock(&drvdata->spinlock);
1823 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1826 static ssize_t sshot_pe_ctrl_store(struct device *dev,
1827 struct device_attribute *attr,
1828 const char *buf, size_t size)
1832 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1833 struct etmv4_config *config = &drvdata->config;
1835 if (kstrtoul(buf, 16, &val))
1838 spin_lock(&drvdata->spinlock);
1839 idx = config->ss_idx;
1840 config->ss_pe_cmp[idx] = val & GENMASK(7, 0);
1841 /* must clear bit 31 in related status register on programming */
1842 config->ss_status[idx] &= ~BIT(31);
1843 spin_unlock(&drvdata->spinlock);
1846 static DEVICE_ATTR_RW(sshot_pe_ctrl);
1848 static ssize_t ctxid_idx_show(struct device *dev,
1849 struct device_attribute *attr,
1853 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1854 struct etmv4_config *config = &drvdata->config;
1856 val = config->ctxid_idx;
1857 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1860 static ssize_t ctxid_idx_store(struct device *dev,
1861 struct device_attribute *attr,
1862 const char *buf, size_t size)
1865 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1866 struct etmv4_config *config = &drvdata->config;
1868 if (kstrtoul(buf, 16, &val))
1870 if (val >= drvdata->numcidc)
1874 * Use spinlock to ensure index doesn't change while it gets
1875 * dereferenced multiple times within a spinlock block elsewhere.
1877 spin_lock(&drvdata->spinlock);
1878 config->ctxid_idx = val;
1879 spin_unlock(&drvdata->spinlock);
1882 static DEVICE_ATTR_RW(ctxid_idx);
1884 static ssize_t ctxid_pid_show(struct device *dev,
1885 struct device_attribute *attr,
1890 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1891 struct etmv4_config *config = &drvdata->config;
1894 * Don't use contextID tracing if coming from a PID namespace. See
1895 * comment in ctxid_pid_store().
1897 if (task_active_pid_ns(current) != &init_pid_ns)
1900 spin_lock(&drvdata->spinlock);
1901 idx = config->ctxid_idx;
1902 val = (unsigned long)config->ctxid_pid[idx];
1903 spin_unlock(&drvdata->spinlock);
1904 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
1907 static ssize_t ctxid_pid_store(struct device *dev,
1908 struct device_attribute *attr,
1909 const char *buf, size_t size)
1913 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1914 struct etmv4_config *config = &drvdata->config;
1917 * When contextID tracing is enabled the tracers will insert the
1918 * value found in the contextID register in the trace stream. But if
1919 * a process is in a namespace the PID of that process as seen from the
1920 * namespace won't be what the kernel sees, something that makes the
1921 * feature confusing and can potentially leak kernel only information.
1922 * As such refuse to use the feature if @current is not in the initial
1925 if (task_active_pid_ns(current) != &init_pid_ns)
1929 * only implemented when ctxid tracing is enabled, i.e. at least one
1930 * ctxid comparator is implemented and ctxid is greater than 0 bits
1933 if (!drvdata->ctxid_size || !drvdata->numcidc)
1935 if (kstrtoul(buf, 16, &pid))
1938 spin_lock(&drvdata->spinlock);
1939 idx = config->ctxid_idx;
1940 config->ctxid_pid[idx] = (u64)pid;
1941 spin_unlock(&drvdata->spinlock);
1944 static DEVICE_ATTR_RW(ctxid_pid);
1946 static ssize_t ctxid_masks_show(struct device *dev,
1947 struct device_attribute *attr,
1950 unsigned long val1, val2;
1951 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1952 struct etmv4_config *config = &drvdata->config;
1955 * Don't use contextID tracing if coming from a PID namespace. See
1956 * comment in ctxid_pid_store().
1958 if (task_active_pid_ns(current) != &init_pid_ns)
1961 spin_lock(&drvdata->spinlock);
1962 val1 = config->ctxid_mask0;
1963 val2 = config->ctxid_mask1;
1964 spin_unlock(&drvdata->spinlock);
1965 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
1968 static ssize_t ctxid_masks_store(struct device *dev,
1969 struct device_attribute *attr,
1970 const char *buf, size_t size)
1973 unsigned long val1, val2, mask;
1974 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
1975 struct etmv4_config *config = &drvdata->config;
1979 * Don't use contextID tracing if coming from a PID namespace. See
1980 * comment in ctxid_pid_store().
1982 if (task_active_pid_ns(current) != &init_pid_ns)
1986 * only implemented when ctxid tracing is enabled, i.e. at least one
1987 * ctxid comparator is implemented and ctxid is greater than 0 bits
1990 if (!drvdata->ctxid_size || !drvdata->numcidc)
1992 /* one mask if <= 4 comparators, two for up to 8 */
1993 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
1994 if ((drvdata->numcidc > 4) && (nr_inputs != 2))
1997 spin_lock(&drvdata->spinlock);
1999 * each byte[0..3] controls mask value applied to ctxid
2002 switch (drvdata->numcidc) {
2004 /* COMP0, bits[7:0] */
2005 config->ctxid_mask0 = val1 & 0xFF;
2008 /* COMP1, bits[15:8] */
2009 config->ctxid_mask0 = val1 & 0xFFFF;
2012 /* COMP2, bits[23:16] */
2013 config->ctxid_mask0 = val1 & 0xFFFFFF;
2016 /* COMP3, bits[31:24] */
2017 config->ctxid_mask0 = val1;
2020 /* COMP4, bits[7:0] */
2021 config->ctxid_mask0 = val1;
2022 config->ctxid_mask1 = val2 & 0xFF;
2025 /* COMP5, bits[15:8] */
2026 config->ctxid_mask0 = val1;
2027 config->ctxid_mask1 = val2 & 0xFFFF;
2030 /* COMP6, bits[23:16] */
2031 config->ctxid_mask0 = val1;
2032 config->ctxid_mask1 = val2 & 0xFFFFFF;
2035 /* COMP7, bits[31:24] */
2036 config->ctxid_mask0 = val1;
2037 config->ctxid_mask1 = val2;
2043 * If software sets a mask bit to 1, it must program relevant byte
2044 * of ctxid comparator value 0x0, otherwise behavior is unpredictable.
2045 * For example, if bit[3] of ctxid_mask0 is 1, we must clear bits[31:24]
2046 * of ctxid comparator0 value (corresponding to byte 0) register.
2048 mask = config->ctxid_mask0;
2049 for (i = 0; i < drvdata->numcidc; i++) {
2050 /* mask value of corresponding ctxid comparator */
2051 maskbyte = mask & ETMv4_EVENT_MASK;
2053 * each bit corresponds to a byte of respective ctxid comparator
2056 for (j = 0; j < 8; j++) {
2058 config->ctxid_pid[i] &= ~(0xFFUL << (j * 8));
2061 /* Select the next ctxid comparator mask value */
2063 /* ctxid comparators[4-7] */
2064 mask = config->ctxid_mask1;
2069 spin_unlock(&drvdata->spinlock);
2072 static DEVICE_ATTR_RW(ctxid_masks);
2074 static ssize_t vmid_idx_show(struct device *dev,
2075 struct device_attribute *attr,
2079 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2080 struct etmv4_config *config = &drvdata->config;
2082 val = config->vmid_idx;
2083 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2086 static ssize_t vmid_idx_store(struct device *dev,
2087 struct device_attribute *attr,
2088 const char *buf, size_t size)
2091 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2092 struct etmv4_config *config = &drvdata->config;
2094 if (kstrtoul(buf, 16, &val))
2096 if (val >= drvdata->numvmidc)
2100 * Use spinlock to ensure index doesn't change while it gets
2101 * dereferenced multiple times within a spinlock block elsewhere.
2103 spin_lock(&drvdata->spinlock);
2104 config->vmid_idx = val;
2105 spin_unlock(&drvdata->spinlock);
2108 static DEVICE_ATTR_RW(vmid_idx);
2110 static ssize_t vmid_val_show(struct device *dev,
2111 struct device_attribute *attr,
2115 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2116 struct etmv4_config *config = &drvdata->config;
2119 * Don't use virtual contextID tracing if coming from a PID namespace.
2120 * See comment in ctxid_pid_store().
2122 if (!task_is_in_init_pid_ns(current))
2125 spin_lock(&drvdata->spinlock);
2126 val = (unsigned long)config->vmid_val[config->vmid_idx];
2127 spin_unlock(&drvdata->spinlock);
2128 return scnprintf(buf, PAGE_SIZE, "%#lx\n", val);
2131 static ssize_t vmid_val_store(struct device *dev,
2132 struct device_attribute *attr,
2133 const char *buf, size_t size)
2136 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2137 struct etmv4_config *config = &drvdata->config;
2140 * Don't use virtual contextID tracing if coming from a PID namespace.
2141 * See comment in ctxid_pid_store().
2143 if (!task_is_in_init_pid_ns(current))
2147 * only implemented when vmid tracing is enabled, i.e. at least one
2148 * vmid comparator is implemented and at least 8 bit vmid size
2150 if (!drvdata->vmid_size || !drvdata->numvmidc)
2152 if (kstrtoul(buf, 16, &val))
2155 spin_lock(&drvdata->spinlock);
2156 config->vmid_val[config->vmid_idx] = (u64)val;
2157 spin_unlock(&drvdata->spinlock);
2160 static DEVICE_ATTR_RW(vmid_val);
2162 static ssize_t vmid_masks_show(struct device *dev,
2163 struct device_attribute *attr, char *buf)
2165 unsigned long val1, val2;
2166 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2167 struct etmv4_config *config = &drvdata->config;
2170 * Don't use virtual contextID tracing if coming from a PID namespace.
2171 * See comment in ctxid_pid_store().
2173 if (!task_is_in_init_pid_ns(current))
2176 spin_lock(&drvdata->spinlock);
2177 val1 = config->vmid_mask0;
2178 val2 = config->vmid_mask1;
2179 spin_unlock(&drvdata->spinlock);
2180 return scnprintf(buf, PAGE_SIZE, "%#lx %#lx\n", val1, val2);
2183 static ssize_t vmid_masks_store(struct device *dev,
2184 struct device_attribute *attr,
2185 const char *buf, size_t size)
2188 unsigned long val1, val2, mask;
2189 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2190 struct etmv4_config *config = &drvdata->config;
2194 * Don't use virtual contextID tracing if coming from a PID namespace.
2195 * See comment in ctxid_pid_store().
2197 if (!task_is_in_init_pid_ns(current))
2201 * only implemented when vmid tracing is enabled, i.e. at least one
2202 * vmid comparator is implemented and at least 8 bit vmid size
2204 if (!drvdata->vmid_size || !drvdata->numvmidc)
2206 /* one mask if <= 4 comparators, two for up to 8 */
2207 nr_inputs = sscanf(buf, "%lx %lx", &val1, &val2);
2208 if ((drvdata->numvmidc > 4) && (nr_inputs != 2))
2211 spin_lock(&drvdata->spinlock);
2214 * each byte[0..3] controls mask value applied to vmid
2217 switch (drvdata->numvmidc) {
2219 /* COMP0, bits[7:0] */
2220 config->vmid_mask0 = val1 & 0xFF;
2223 /* COMP1, bits[15:8] */
2224 config->vmid_mask0 = val1 & 0xFFFF;
2227 /* COMP2, bits[23:16] */
2228 config->vmid_mask0 = val1 & 0xFFFFFF;
2231 /* COMP3, bits[31:24] */
2232 config->vmid_mask0 = val1;
2235 /* COMP4, bits[7:0] */
2236 config->vmid_mask0 = val1;
2237 config->vmid_mask1 = val2 & 0xFF;
2240 /* COMP5, bits[15:8] */
2241 config->vmid_mask0 = val1;
2242 config->vmid_mask1 = val2 & 0xFFFF;
2245 /* COMP6, bits[23:16] */
2246 config->vmid_mask0 = val1;
2247 config->vmid_mask1 = val2 & 0xFFFFFF;
2250 /* COMP7, bits[31:24] */
2251 config->vmid_mask0 = val1;
2252 config->vmid_mask1 = val2;
2259 * If software sets a mask bit to 1, it must program relevant byte
2260 * of vmid comparator value 0x0, otherwise behavior is unpredictable.
2261 * For example, if bit[3] of vmid_mask0 is 1, we must clear bits[31:24]
2262 * of vmid comparator0 value (corresponding to byte 0) register.
2264 mask = config->vmid_mask0;
2265 for (i = 0; i < drvdata->numvmidc; i++) {
2266 /* mask value of corresponding vmid comparator */
2267 maskbyte = mask & ETMv4_EVENT_MASK;
2269 * each bit corresponds to a byte of respective vmid comparator
2272 for (j = 0; j < 8; j++) {
2274 config->vmid_val[i] &= ~(0xFFUL << (j * 8));
2277 /* Select the next vmid comparator mask value */
2279 /* vmid comparators[4-7] */
2280 mask = config->vmid_mask1;
2284 spin_unlock(&drvdata->spinlock);
2287 static DEVICE_ATTR_RW(vmid_masks);
2289 static ssize_t cpu_show(struct device *dev,
2290 struct device_attribute *attr, char *buf)
2293 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2296 return scnprintf(buf, PAGE_SIZE, "%d\n", val);
2299 static DEVICE_ATTR_RO(cpu);
2301 static struct attribute *coresight_etmv4_attrs[] = {
2302 &dev_attr_nr_pe_cmp.attr,
2303 &dev_attr_nr_addr_cmp.attr,
2304 &dev_attr_nr_cntr.attr,
2305 &dev_attr_nr_ext_inp.attr,
2306 &dev_attr_numcidc.attr,
2307 &dev_attr_numvmidc.attr,
2308 &dev_attr_nrseqstate.attr,
2309 &dev_attr_nr_resource.attr,
2310 &dev_attr_nr_ss_cmp.attr,
2311 &dev_attr_reset.attr,
2312 &dev_attr_mode.attr,
2314 &dev_attr_event.attr,
2315 &dev_attr_event_instren.attr,
2316 &dev_attr_event_ts.attr,
2317 &dev_attr_syncfreq.attr,
2318 &dev_attr_cyc_threshold.attr,
2319 &dev_attr_bb_ctrl.attr,
2320 &dev_attr_event_vinst.attr,
2321 &dev_attr_s_exlevel_vinst.attr,
2322 &dev_attr_ns_exlevel_vinst.attr,
2323 &dev_attr_addr_idx.attr,
2324 &dev_attr_addr_instdatatype.attr,
2325 &dev_attr_addr_single.attr,
2326 &dev_attr_addr_range.attr,
2327 &dev_attr_addr_start.attr,
2328 &dev_attr_addr_stop.attr,
2329 &dev_attr_addr_ctxtype.attr,
2330 &dev_attr_addr_context.attr,
2331 &dev_attr_addr_exlevel_s_ns.attr,
2332 &dev_attr_addr_cmp_view.attr,
2333 &dev_attr_vinst_pe_cmp_start_stop.attr,
2334 &dev_attr_sshot_idx.attr,
2335 &dev_attr_sshot_ctrl.attr,
2336 &dev_attr_sshot_pe_ctrl.attr,
2337 &dev_attr_sshot_status.attr,
2338 &dev_attr_seq_idx.attr,
2339 &dev_attr_seq_state.attr,
2340 &dev_attr_seq_event.attr,
2341 &dev_attr_seq_reset_event.attr,
2342 &dev_attr_cntr_idx.attr,
2343 &dev_attr_cntrldvr.attr,
2344 &dev_attr_cntr_val.attr,
2345 &dev_attr_cntr_ctrl.attr,
2346 &dev_attr_res_idx.attr,
2347 &dev_attr_res_ctrl.attr,
2348 &dev_attr_ctxid_idx.attr,
2349 &dev_attr_ctxid_pid.attr,
2350 &dev_attr_ctxid_masks.attr,
2351 &dev_attr_vmid_idx.attr,
2352 &dev_attr_vmid_val.attr,
2353 &dev_attr_vmid_masks.attr,
2359 struct coresight_device *csdev;
2364 static void do_smp_cross_read(void *data)
2366 struct etmv4_reg *reg = data;
2368 reg->data = etm4x_relaxed_read32(®->csdev->access, reg->offset);
2371 static u32 etmv4_cross_read(const struct etmv4_drvdata *drvdata, u32 offset)
2373 struct etmv4_reg reg;
2375 reg.offset = offset;
2376 reg.csdev = drvdata->csdev;
2379 * smp cross call ensures the CPU will be powered up before
2380 * accessing the ETMv4 trace core registers
2382 smp_call_function_single(drvdata->cpu, do_smp_cross_read, ®, 1);
2386 static inline u32 coresight_etm4x_attr_to_offset(struct device_attribute *attr)
2388 struct dev_ext_attribute *eattr;
2390 eattr = container_of(attr, struct dev_ext_attribute, attr);
2391 return (u32)(unsigned long)eattr->var;
2394 static ssize_t coresight_etm4x_reg_show(struct device *dev,
2395 struct device_attribute *d_attr,
2399 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2401 offset = coresight_etm4x_attr_to_offset(d_attr);
2403 pm_runtime_get_sync(dev->parent);
2404 val = etmv4_cross_read(drvdata, offset);
2405 pm_runtime_put_sync(dev->parent);
2407 return scnprintf(buf, PAGE_SIZE, "0x%x\n", val);
2411 etm4x_register_implemented(struct etmv4_drvdata *drvdata, u32 offset)
2414 ETM_COMMON_SYSREG_LIST_CASES
2416 * Common registers to ETE & ETM4x accessible via system
2417 * instructions are always implemented.
2421 ETM4x_ONLY_SYSREG_LIST_CASES
2423 * We only support etm4x and ete. So if the device is not
2424 * ETE, it must be ETMv4x.
2426 return !etm4x_is_ete(drvdata);
2428 ETM4x_MMAP_LIST_CASES
2430 * Registers accessible only via memory-mapped registers
2431 * must not be accessed via system instructions.
2432 * We cannot access the drvdata->csdev here, as this
2433 * function is called during the device creation, via
2434 * coresight_register() and the csdev is not initialized
2435 * until that is done. So rely on the drvdata->base to
2436 * detect if we have a memory mapped access.
2437 * Also ETE doesn't implement memory mapped access, thus
2438 * it is sufficient to check that we are using mmio.
2440 return !!drvdata->base;
2442 ETE_ONLY_SYSREG_LIST_CASES
2443 return etm4x_is_ete(drvdata);
2450 * Hide the ETM4x registers that may not be available on the
2452 * There are certain management registers unavailable via system
2453 * instructions. Make those sysfs attributes hidden on such
2457 coresight_etm4x_attr_reg_implemented(struct kobject *kobj,
2458 struct attribute *attr, int unused)
2460 struct device *dev = kobj_to_dev(kobj);
2461 struct etmv4_drvdata *drvdata = dev_get_drvdata(dev->parent);
2462 struct device_attribute *d_attr;
2465 d_attr = container_of(attr, struct device_attribute, attr);
2466 offset = coresight_etm4x_attr_to_offset(d_attr);
2468 if (etm4x_register_implemented(drvdata, offset))
2473 #define coresight_etm4x_reg(name, offset) \
2474 &((struct dev_ext_attribute[]) { \
2476 __ATTR(name, 0444, coresight_etm4x_reg_show, NULL), \
2477 (void *)(unsigned long)offset \
2481 static struct attribute *coresight_etmv4_mgmt_attrs[] = {
2482 coresight_etm4x_reg(trcpdcr, TRCPDCR),
2483 coresight_etm4x_reg(trcpdsr, TRCPDSR),
2484 coresight_etm4x_reg(trclsr, TRCLSR),
2485 coresight_etm4x_reg(trcauthstatus, TRCAUTHSTATUS),
2486 coresight_etm4x_reg(trcdevid, TRCDEVID),
2487 coresight_etm4x_reg(trcdevtype, TRCDEVTYPE),
2488 coresight_etm4x_reg(trcpidr0, TRCPIDR0),
2489 coresight_etm4x_reg(trcpidr1, TRCPIDR1),
2490 coresight_etm4x_reg(trcpidr2, TRCPIDR2),
2491 coresight_etm4x_reg(trcpidr3, TRCPIDR3),
2492 coresight_etm4x_reg(trcoslsr, TRCOSLSR),
2493 coresight_etm4x_reg(trcconfig, TRCCONFIGR),
2494 coresight_etm4x_reg(trctraceid, TRCTRACEIDR),
2495 coresight_etm4x_reg(trcdevarch, TRCDEVARCH),
2499 static struct attribute *coresight_etmv4_trcidr_attrs[] = {
2500 coresight_etm4x_reg(trcidr0, TRCIDR0),
2501 coresight_etm4x_reg(trcidr1, TRCIDR1),
2502 coresight_etm4x_reg(trcidr2, TRCIDR2),
2503 coresight_etm4x_reg(trcidr3, TRCIDR3),
2504 coresight_etm4x_reg(trcidr4, TRCIDR4),
2505 coresight_etm4x_reg(trcidr5, TRCIDR5),
2506 /* trcidr[6,7] are reserved */
2507 coresight_etm4x_reg(trcidr8, TRCIDR8),
2508 coresight_etm4x_reg(trcidr9, TRCIDR9),
2509 coresight_etm4x_reg(trcidr10, TRCIDR10),
2510 coresight_etm4x_reg(trcidr11, TRCIDR11),
2511 coresight_etm4x_reg(trcidr12, TRCIDR12),
2512 coresight_etm4x_reg(trcidr13, TRCIDR13),
2516 static const struct attribute_group coresight_etmv4_group = {
2517 .attrs = coresight_etmv4_attrs,
2520 static const struct attribute_group coresight_etmv4_mgmt_group = {
2521 .is_visible = coresight_etm4x_attr_reg_implemented,
2522 .attrs = coresight_etmv4_mgmt_attrs,
2526 static const struct attribute_group coresight_etmv4_trcidr_group = {
2527 .attrs = coresight_etmv4_trcidr_attrs,
2531 const struct attribute_group *coresight_etmv4_groups[] = {
2532 &coresight_etmv4_group,
2533 &coresight_etmv4_mgmt_group,
2534 &coresight_etmv4_trcidr_group,