1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2023 Qualcomm Innovation Center, Inc. All rights reserved.
6 #include <linux/amba/bus.h>
7 #include <linux/bitfield.h>
8 #include <linux/bitmap.h>
9 #include <linux/coresight.h>
10 #include <linux/coresight-pmu.h>
11 #include <linux/device.h>
12 #include <linux/err.h>
15 #include <linux/kernel.h>
16 #include <linux/module.h>
19 #include "coresight-priv.h"
20 #include "coresight-tpdm.h"
22 DEFINE_CORESIGHT_DEVLIST(tpdm_devs, "tpdm");
24 /* Read dataset array member with the index number */
25 static ssize_t tpdm_simple_dataset_show(struct device *dev,
26 struct device_attribute *attr,
29 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
30 struct tpdm_dataset_attribute *tpdm_attr =
31 container_of(attr, struct tpdm_dataset_attribute, attr);
33 switch (tpdm_attr->mem) {
35 if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCR)
37 return sysfs_emit(buf, "0x%x\n",
38 drvdata->dsb->edge_ctrl[tpdm_attr->idx]);
39 case DSB_EDGE_CTRL_MASK:
40 if (tpdm_attr->idx >= TPDM_DSB_MAX_EDCMR)
42 return sysfs_emit(buf, "0x%x\n",
43 drvdata->dsb->edge_ctrl_mask[tpdm_attr->idx]);
45 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
47 return sysfs_emit(buf, "0x%x\n",
48 drvdata->dsb->trig_patt[tpdm_attr->idx]);
49 case DSB_TRIG_PATT_MASK:
50 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
52 return sysfs_emit(buf, "0x%x\n",
53 drvdata->dsb->trig_patt_mask[tpdm_attr->idx]);
55 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
57 return sysfs_emit(buf, "0x%x\n",
58 drvdata->dsb->patt_val[tpdm_attr->idx]);
60 if (tpdm_attr->idx >= TPDM_DSB_MAX_PATT)
62 return sysfs_emit(buf, "0x%x\n",
63 drvdata->dsb->patt_mask[tpdm_attr->idx]);
65 if (tpdm_attr->idx >= drvdata->dsb_msr_num)
67 return sysfs_emit(buf, "0x%x\n",
68 drvdata->dsb->msr[tpdm_attr->idx]);
70 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
72 return sysfs_emit(buf, "0x%x\n",
73 drvdata->cmb->trig_patt[tpdm_attr->idx]);
74 case CMB_TRIG_PATT_MASK:
75 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
77 return sysfs_emit(buf, "0x%x\n",
78 drvdata->cmb->trig_patt_mask[tpdm_attr->idx]);
80 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
82 return sysfs_emit(buf, "0x%x\n",
83 drvdata->cmb->patt_val[tpdm_attr->idx]);
85 if (tpdm_attr->idx >= TPDM_CMB_MAX_PATT)
87 return sysfs_emit(buf, "0x%x\n",
88 drvdata->cmb->patt_mask[tpdm_attr->idx]);
90 if (tpdm_attr->idx >= drvdata->cmb_msr_num)
92 return sysfs_emit(buf, "0x%x\n",
93 drvdata->cmb->msr[tpdm_attr->idx]);
98 /* Write dataset array member with the index number */
99 static ssize_t tpdm_simple_dataset_store(struct device *dev,
100 struct device_attribute *attr,
105 ssize_t ret = -EINVAL;
107 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
108 struct tpdm_dataset_attribute *tpdm_attr =
109 container_of(attr, struct tpdm_dataset_attribute, attr);
111 if (kstrtoul(buf, 0, &val))
114 guard(spinlock)(&drvdata->spinlock);
115 switch (tpdm_attr->mem) {
117 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
118 drvdata->dsb->trig_patt[tpdm_attr->idx] = val;
122 case DSB_TRIG_PATT_MASK:
123 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
124 drvdata->dsb->trig_patt_mask[tpdm_attr->idx] = val;
129 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
130 drvdata->dsb->patt_val[tpdm_attr->idx] = val;
135 if (tpdm_attr->idx < TPDM_DSB_MAX_PATT) {
136 drvdata->dsb->patt_mask[tpdm_attr->idx] = val;
141 if (tpdm_attr->idx < drvdata->dsb_msr_num) {
142 drvdata->dsb->msr[tpdm_attr->idx] = val;
147 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
148 drvdata->cmb->trig_patt[tpdm_attr->idx] = val;
152 case CMB_TRIG_PATT_MASK:
153 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
154 drvdata->cmb->trig_patt_mask[tpdm_attr->idx] = val;
159 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
160 drvdata->cmb->patt_val[tpdm_attr->idx] = val;
165 if (tpdm_attr->idx < TPDM_CMB_MAX_PATT) {
166 drvdata->cmb->patt_mask[tpdm_attr->idx] = val;
171 if (tpdm_attr->idx < drvdata->cmb_msr_num) {
172 drvdata->cmb->msr[tpdm_attr->idx] = val;
183 static umode_t tpdm_dsb_is_visible(struct kobject *kobj,
184 struct attribute *attr, int n)
186 struct device *dev = kobj_to_dev(kobj);
187 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
189 if (drvdata && tpdm_has_dsb_dataset(drvdata))
195 static umode_t tpdm_cmb_is_visible(struct kobject *kobj,
196 struct attribute *attr, int n)
198 struct device *dev = kobj_to_dev(kobj);
199 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
201 if (drvdata && tpdm_has_cmb_dataset(drvdata))
207 static umode_t tpdm_dsb_msr_is_visible(struct kobject *kobj,
208 struct attribute *attr, int n)
210 struct device *dev = kobj_to_dev(kobj);
211 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
212 struct device_attribute *dev_attr =
213 container_of(attr, struct device_attribute, attr);
214 struct tpdm_dataset_attribute *tpdm_attr =
215 container_of(dev_attr, struct tpdm_dataset_attribute, attr);
217 if (tpdm_attr->idx < drvdata->dsb_msr_num)
223 static umode_t tpdm_cmb_msr_is_visible(struct kobject *kobj,
224 struct attribute *attr, int n)
226 struct device *dev = kobj_to_dev(kobj);
227 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
229 struct device_attribute *dev_attr =
230 container_of(attr, struct device_attribute, attr);
231 struct tpdm_dataset_attribute *tpdm_attr =
232 container_of(dev_attr, struct tpdm_dataset_attribute, attr);
234 if (tpdm_attr->idx < drvdata->cmb_msr_num)
240 static void tpdm_reset_datasets(struct tpdm_drvdata *drvdata)
242 if (tpdm_has_dsb_dataset(drvdata)) {
243 memset(drvdata->dsb, 0, sizeof(struct dsb_dataset));
245 drvdata->dsb->trig_ts = true;
246 drvdata->dsb->trig_type = false;
250 memset(drvdata->cmb, 0, sizeof(struct cmb_dataset));
253 static void set_dsb_mode(struct tpdm_drvdata *drvdata, u32 *val)
257 /* Set the test accurate mode */
258 mode = TPDM_DSB_MODE_TEST(drvdata->dsb->mode);
259 *val &= ~TPDM_DSB_CR_TEST_MODE;
260 *val |= FIELD_PREP(TPDM_DSB_CR_TEST_MODE, mode);
262 /* Set the byte lane for high-performance mode */
263 mode = TPDM_DSB_MODE_HPBYTESEL(drvdata->dsb->mode);
264 *val &= ~TPDM_DSB_CR_HPSEL;
265 *val |= FIELD_PREP(TPDM_DSB_CR_HPSEL, mode);
267 /* Set the performance mode */
268 if (drvdata->dsb->mode & TPDM_DSB_MODE_PERF)
269 *val |= TPDM_DSB_CR_MODE;
271 *val &= ~TPDM_DSB_CR_MODE;
274 static void set_dsb_tier(struct tpdm_drvdata *drvdata)
278 val = readl_relaxed(drvdata->base + TPDM_DSB_TIER);
280 /* Clear all relevant fields */
281 val &= ~(TPDM_DSB_TIER_PATT_TSENAB | TPDM_DSB_TIER_PATT_TYPE |
282 TPDM_DSB_TIER_XTRIG_TSENAB);
284 /* Set pattern timestamp type and enablement */
285 if (drvdata->dsb->patt_ts) {
286 val |= TPDM_DSB_TIER_PATT_TSENAB;
287 if (drvdata->dsb->patt_type)
288 val |= TPDM_DSB_TIER_PATT_TYPE;
290 val &= ~TPDM_DSB_TIER_PATT_TYPE;
292 val &= ~TPDM_DSB_TIER_PATT_TSENAB;
295 /* Set trigger timestamp */
296 if (drvdata->dsb->trig_ts)
297 val |= TPDM_DSB_TIER_XTRIG_TSENAB;
299 val &= ~TPDM_DSB_TIER_XTRIG_TSENAB;
301 writel_relaxed(val, drvdata->base + TPDM_DSB_TIER);
304 static void set_dsb_msr(struct tpdm_drvdata *drvdata)
308 for (i = 0; i < drvdata->dsb_msr_num; i++)
309 writel_relaxed(drvdata->dsb->msr[i],
310 drvdata->base + TPDM_DSB_MSR(i));
313 static void tpdm_enable_dsb(struct tpdm_drvdata *drvdata)
317 if (!tpdm_has_dsb_dataset(drvdata))
320 for (i = 0; i < TPDM_DSB_MAX_EDCR; i++)
321 writel_relaxed(drvdata->dsb->edge_ctrl[i],
322 drvdata->base + TPDM_DSB_EDCR(i));
323 for (i = 0; i < TPDM_DSB_MAX_EDCMR; i++)
324 writel_relaxed(drvdata->dsb->edge_ctrl_mask[i],
325 drvdata->base + TPDM_DSB_EDCMR(i));
326 for (i = 0; i < TPDM_DSB_MAX_PATT; i++) {
327 writel_relaxed(drvdata->dsb->patt_val[i],
328 drvdata->base + TPDM_DSB_TPR(i));
329 writel_relaxed(drvdata->dsb->patt_mask[i],
330 drvdata->base + TPDM_DSB_TPMR(i));
331 writel_relaxed(drvdata->dsb->trig_patt[i],
332 drvdata->base + TPDM_DSB_XPR(i));
333 writel_relaxed(drvdata->dsb->trig_patt_mask[i],
334 drvdata->base + TPDM_DSB_XPMR(i));
337 set_dsb_tier(drvdata);
338 set_dsb_msr(drvdata);
340 val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
341 /* Set the mode of DSB dataset */
342 set_dsb_mode(drvdata, &val);
343 /* Set trigger type */
344 if (drvdata->dsb->trig_type)
345 val |= TPDM_DSB_CR_TRIG_TYPE;
347 val &= ~TPDM_DSB_CR_TRIG_TYPE;
348 /* Set the enable bit of DSB control register to 1 */
349 val |= TPDM_DSB_CR_ENA;
350 writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
353 static void set_cmb_tier(struct tpdm_drvdata *drvdata)
357 val = readl_relaxed(drvdata->base + TPDM_CMB_TIER);
359 /* Clear all relevant fields */
360 val &= ~(TPDM_CMB_TIER_PATT_TSENAB | TPDM_CMB_TIER_TS_ALL |
361 TPDM_CMB_TIER_XTRIG_TSENAB);
363 /* Set pattern timestamp type and enablement */
364 if (drvdata->cmb->patt_ts)
365 val |= TPDM_CMB_TIER_PATT_TSENAB;
367 /* Set trigger timestamp */
368 if (drvdata->cmb->trig_ts)
369 val |= TPDM_CMB_TIER_XTRIG_TSENAB;
371 /* Set all timestamp enablement*/
372 if (drvdata->cmb->ts_all)
373 val |= TPDM_CMB_TIER_TS_ALL;
375 writel_relaxed(val, drvdata->base + TPDM_CMB_TIER);
378 static void set_cmb_msr(struct tpdm_drvdata *drvdata)
382 for (i = 0; i < drvdata->cmb_msr_num; i++)
383 writel_relaxed(drvdata->cmb->msr[i],
384 drvdata->base + TPDM_CMB_MSR(i));
387 static void tpdm_enable_cmb(struct tpdm_drvdata *drvdata)
391 if (!tpdm_has_cmb_dataset(drvdata))
394 /* Configure pattern registers */
395 for (i = 0; i < TPDM_CMB_MAX_PATT; i++) {
396 writel_relaxed(drvdata->cmb->patt_val[i],
397 drvdata->base + TPDM_CMB_TPR(i));
398 writel_relaxed(drvdata->cmb->patt_mask[i],
399 drvdata->base + TPDM_CMB_TPMR(i));
400 writel_relaxed(drvdata->cmb->trig_patt[i],
401 drvdata->base + TPDM_CMB_XPR(i));
402 writel_relaxed(drvdata->cmb->trig_patt_mask[i],
403 drvdata->base + TPDM_CMB_XPMR(i));
406 set_cmb_tier(drvdata);
407 set_cmb_msr(drvdata);
409 val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
411 * Set to 0 for continuous CMB collection mode,
412 * 1 for trace-on-change CMB collection mode.
414 if (drvdata->cmb->trace_mode)
415 val |= TPDM_CMB_CR_MODE;
417 val &= ~TPDM_CMB_CR_MODE;
418 /* Set the enable bit of CMB control register to 1 */
419 val |= TPDM_CMB_CR_ENA;
420 writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
424 * TPDM enable operations
425 * The TPDM or Monitor serves as data collection component for various
426 * dataset types. It covers Basic Counts(BC), Tenure Counts(TC),
427 * Continuous Multi-Bit(CMB), Multi-lane CMB(MCMB) and Discrete Single
428 * Bit(DSB). This function will initialize the configuration according
429 * to the dataset type supported by the TPDM.
431 static void __tpdm_enable(struct tpdm_drvdata *drvdata)
433 CS_UNLOCK(drvdata->base);
435 tpdm_enable_dsb(drvdata);
436 tpdm_enable_cmb(drvdata);
438 CS_LOCK(drvdata->base);
441 static int tpdm_enable(struct coresight_device *csdev, struct perf_event *event,
444 struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
446 spin_lock(&drvdata->spinlock);
447 if (drvdata->enable) {
448 spin_unlock(&drvdata->spinlock);
452 __tpdm_enable(drvdata);
453 drvdata->enable = true;
454 spin_unlock(&drvdata->spinlock);
456 dev_dbg(drvdata->dev, "TPDM tracing enabled\n");
460 static void tpdm_disable_dsb(struct tpdm_drvdata *drvdata)
464 if (!tpdm_has_dsb_dataset(drvdata))
467 /* Set the enable bit of DSB control register to 0 */
468 val = readl_relaxed(drvdata->base + TPDM_DSB_CR);
469 val &= ~TPDM_DSB_CR_ENA;
470 writel_relaxed(val, drvdata->base + TPDM_DSB_CR);
473 static void tpdm_disable_cmb(struct tpdm_drvdata *drvdata)
477 if (!tpdm_has_cmb_dataset(drvdata))
480 val = readl_relaxed(drvdata->base + TPDM_CMB_CR);
481 /* Set the enable bit of CMB control register to 0 */
482 val &= ~TPDM_CMB_CR_ENA;
483 writel_relaxed(val, drvdata->base + TPDM_CMB_CR);
486 /* TPDM disable operations */
487 static void __tpdm_disable(struct tpdm_drvdata *drvdata)
489 CS_UNLOCK(drvdata->base);
491 tpdm_disable_dsb(drvdata);
492 tpdm_disable_cmb(drvdata);
494 CS_LOCK(drvdata->base);
497 static void tpdm_disable(struct coresight_device *csdev,
498 struct perf_event *event)
500 struct tpdm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
502 spin_lock(&drvdata->spinlock);
503 if (!drvdata->enable) {
504 spin_unlock(&drvdata->spinlock);
508 __tpdm_disable(drvdata);
509 drvdata->enable = false;
510 spin_unlock(&drvdata->spinlock);
512 dev_dbg(drvdata->dev, "TPDM tracing disabled\n");
515 static const struct coresight_ops_source tpdm_source_ops = {
516 .enable = tpdm_enable,
517 .disable = tpdm_disable,
520 static const struct coresight_ops tpdm_cs_ops = {
521 .source_ops = &tpdm_source_ops,
524 static int tpdm_datasets_setup(struct tpdm_drvdata *drvdata)
528 /* Get the datasets present on the TPDM. */
529 pidr = readl_relaxed(drvdata->base + CORESIGHT_PERIPHIDR0);
530 drvdata->datasets |= pidr & GENMASK(TPDM_DATASETS - 1, 0);
532 if (tpdm_has_dsb_dataset(drvdata) && (!drvdata->dsb)) {
533 drvdata->dsb = devm_kzalloc(drvdata->dev,
534 sizeof(*drvdata->dsb), GFP_KERNEL);
538 if (tpdm_has_cmb_dataset(drvdata) && (!drvdata->cmb)) {
539 drvdata->cmb = devm_kzalloc(drvdata->dev,
540 sizeof(*drvdata->cmb), GFP_KERNEL);
544 tpdm_reset_datasets(drvdata);
549 static ssize_t reset_dataset_store(struct device *dev,
550 struct device_attribute *attr,
556 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
558 ret = kstrtoul(buf, 0, &val);
562 spin_lock(&drvdata->spinlock);
563 tpdm_reset_datasets(drvdata);
564 spin_unlock(&drvdata->spinlock);
568 static DEVICE_ATTR_WO(reset_dataset);
571 * value 1: 64 bits test data
572 * value 2: 32 bits test data
574 static ssize_t integration_test_store(struct device *dev,
575 struct device_attribute *attr,
581 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
583 ret = kstrtoul(buf, 10, &val);
587 if (val != 1 && val != 2)
590 if (!drvdata->enable)
594 val = ATBCNTRL_VAL_64;
596 val = ATBCNTRL_VAL_32;
597 CS_UNLOCK(drvdata->base);
598 writel_relaxed(0x1, drvdata->base + TPDM_ITCNTRL);
600 for (i = 0; i < INTEGRATION_TEST_CYCLE; i++)
601 writel_relaxed(val, drvdata->base + TPDM_ITATBCNTRL);
603 writel_relaxed(0, drvdata->base + TPDM_ITCNTRL);
604 CS_LOCK(drvdata->base);
607 static DEVICE_ATTR_WO(integration_test);
609 static struct attribute *tpdm_attrs[] = {
610 &dev_attr_reset_dataset.attr,
611 &dev_attr_integration_test.attr,
615 static struct attribute_group tpdm_attr_grp = {
619 static ssize_t dsb_mode_show(struct device *dev,
620 struct device_attribute *attr,
623 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
625 return sysfs_emit(buf, "%x\n", drvdata->dsb->mode);
628 static ssize_t dsb_mode_store(struct device *dev,
629 struct device_attribute *attr,
633 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
636 if ((kstrtoul(buf, 0, &val)) || (val < 0) ||
637 (val & ~TPDM_DSB_MODE_MASK))
640 spin_lock(&drvdata->spinlock);
641 drvdata->dsb->mode = val & TPDM_DSB_MODE_MASK;
642 spin_unlock(&drvdata->spinlock);
645 static DEVICE_ATTR_RW(dsb_mode);
647 static ssize_t ctrl_idx_show(struct device *dev,
648 struct device_attribute *attr,
651 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
653 return sysfs_emit(buf, "%u\n",
654 (unsigned int)drvdata->dsb->edge_ctrl_idx);
658 * The EDCR registers can include up to 16 32-bit registers, and each
659 * one can be configured to control up to 16 edge detections(2 bits
660 * control one edge detection). So a total 256 edge detections can be
661 * configured. This function provides a way to set the index number of
662 * the edge detection which needs to be configured.
664 static ssize_t ctrl_idx_store(struct device *dev,
665 struct device_attribute *attr,
669 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
672 if ((kstrtoul(buf, 0, &val)) || (val >= TPDM_DSB_MAX_LINES))
675 spin_lock(&drvdata->spinlock);
676 drvdata->dsb->edge_ctrl_idx = val;
677 spin_unlock(&drvdata->spinlock);
681 static DEVICE_ATTR_RW(ctrl_idx);
684 * This function is used to control the edge detection according
685 * to the index number that has been set.
686 * "edge_ctrl" should be one of the following values.
687 * 0 - Rising edge detection
688 * 1 - Falling edge detection
689 * 2 - Rising and falling edge detection (toggle detection)
691 static ssize_t ctrl_val_store(struct device *dev,
692 struct device_attribute *attr,
696 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
697 unsigned long val, edge_ctrl;
700 if ((kstrtoul(buf, 0, &edge_ctrl)) || (edge_ctrl > 0x2))
703 spin_lock(&drvdata->spinlock);
705 * There are 2 bit per DSB Edge Control line.
706 * Thus we have 16 lines in a 32bit word.
708 reg = EDCR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
709 val = drvdata->dsb->edge_ctrl[reg];
710 val &= ~EDCR_TO_WORD_MASK(drvdata->dsb->edge_ctrl_idx);
711 val |= EDCR_TO_WORD_VAL(edge_ctrl, drvdata->dsb->edge_ctrl_idx);
712 drvdata->dsb->edge_ctrl[reg] = val;
713 spin_unlock(&drvdata->spinlock);
717 static DEVICE_ATTR_WO(ctrl_val);
719 static ssize_t ctrl_mask_store(struct device *dev,
720 struct device_attribute *attr,
724 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
729 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
732 spin_lock(&drvdata->spinlock);
734 * There is 1 bit per DSB Edge Control Mark line.
735 * Thus we have 32 lines in a 32bit word.
737 reg = EDCMR_TO_WORD_IDX(drvdata->dsb->edge_ctrl_idx);
738 set = drvdata->dsb->edge_ctrl_mask[reg];
740 set |= BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
742 set &= ~BIT(EDCMR_TO_WORD_SHIFT(drvdata->dsb->edge_ctrl_idx));
743 drvdata->dsb->edge_ctrl_mask[reg] = set;
744 spin_unlock(&drvdata->spinlock);
748 static DEVICE_ATTR_WO(ctrl_mask);
750 static ssize_t enable_ts_show(struct device *dev,
751 struct device_attribute *attr,
754 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
755 struct tpdm_dataset_attribute *tpdm_attr =
756 container_of(attr, struct tpdm_dataset_attribute, attr);
757 ssize_t size = -EINVAL;
759 if (tpdm_attr->mem == DSB_PATT)
760 size = sysfs_emit(buf, "%u\n",
761 (unsigned int)drvdata->dsb->patt_ts);
762 else if (tpdm_attr->mem == CMB_PATT)
763 size = sysfs_emit(buf, "%u\n",
764 (unsigned int)drvdata->cmb->patt_ts);
770 * value 1: Enable/Disable DSB pattern timestamp
772 static ssize_t enable_ts_store(struct device *dev,
773 struct device_attribute *attr,
777 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
778 struct tpdm_dataset_attribute *tpdm_attr =
779 container_of(attr, struct tpdm_dataset_attribute, attr);
782 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
785 guard(spinlock)(&drvdata->spinlock);
786 if (tpdm_attr->mem == DSB_PATT)
787 drvdata->dsb->patt_ts = !!val;
788 else if (tpdm_attr->mem == CMB_PATT)
789 drvdata->cmb->patt_ts = !!val;
796 static ssize_t set_type_show(struct device *dev,
797 struct device_attribute *attr,
800 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
802 return sysfs_emit(buf, "%u\n",
803 (unsigned int)drvdata->dsb->patt_type);
807 * value 1: Set DSB pattern type
809 static ssize_t set_type_store(struct device *dev,
810 struct device_attribute *attr,
811 const char *buf, size_t size)
813 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
816 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
819 spin_lock(&drvdata->spinlock);
820 drvdata->dsb->patt_type = val;
821 spin_unlock(&drvdata->spinlock);
824 static DEVICE_ATTR_RW(set_type);
826 static ssize_t dsb_trig_type_show(struct device *dev,
827 struct device_attribute *attr, char *buf)
829 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
831 return sysfs_emit(buf, "%u\n",
832 (unsigned int)drvdata->dsb->trig_type);
836 * Trigger type (boolean):
837 * false - Disable trigger type.
838 * true - Enable trigger type.
840 static ssize_t dsb_trig_type_store(struct device *dev,
841 struct device_attribute *attr,
845 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
848 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
851 spin_lock(&drvdata->spinlock);
853 drvdata->dsb->trig_type = true;
855 drvdata->dsb->trig_type = false;
856 spin_unlock(&drvdata->spinlock);
859 static DEVICE_ATTR_RW(dsb_trig_type);
861 static ssize_t dsb_trig_ts_show(struct device *dev,
862 struct device_attribute *attr,
865 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
867 return sysfs_emit(buf, "%u\n",
868 (unsigned int)drvdata->dsb->trig_ts);
872 * Trigger timestamp (boolean):
873 * false - Disable trigger timestamp.
874 * true - Enable trigger timestamp.
876 static ssize_t dsb_trig_ts_store(struct device *dev,
877 struct device_attribute *attr,
881 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
884 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
887 spin_lock(&drvdata->spinlock);
889 drvdata->dsb->trig_ts = true;
891 drvdata->dsb->trig_ts = false;
892 spin_unlock(&drvdata->spinlock);
895 static DEVICE_ATTR_RW(dsb_trig_ts);
897 static ssize_t cmb_mode_show(struct device *dev,
898 struct device_attribute *attr,
901 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
903 return sysfs_emit(buf, "%x\n", drvdata->cmb->trace_mode);
907 static ssize_t cmb_mode_store(struct device *dev,
908 struct device_attribute *attr,
912 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
913 unsigned long trace_mode;
915 if (kstrtoul(buf, 0, &trace_mode) || (trace_mode & ~1UL))
918 spin_lock(&drvdata->spinlock);
919 drvdata->cmb->trace_mode = trace_mode;
920 spin_unlock(&drvdata->spinlock);
923 static DEVICE_ATTR_RW(cmb_mode);
925 static ssize_t cmb_ts_all_show(struct device *dev,
926 struct device_attribute *attr,
929 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
931 return sysfs_emit(buf, "%u\n",
932 (unsigned int)drvdata->cmb->ts_all);
935 static ssize_t cmb_ts_all_store(struct device *dev,
936 struct device_attribute *attr,
940 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
943 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
946 guard(spinlock)(&drvdata->spinlock);
948 drvdata->cmb->ts_all = true;
950 drvdata->cmb->ts_all = false;
954 static DEVICE_ATTR_RW(cmb_ts_all);
956 static ssize_t cmb_trig_ts_show(struct device *dev,
957 struct device_attribute *attr,
960 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
962 return sysfs_emit(buf, "%u\n",
963 (unsigned int)drvdata->cmb->trig_ts);
966 static ssize_t cmb_trig_ts_store(struct device *dev,
967 struct device_attribute *attr,
971 struct tpdm_drvdata *drvdata = dev_get_drvdata(dev->parent);
974 if ((kstrtoul(buf, 0, &val)) || (val & ~1UL))
977 guard(spinlock)(&drvdata->spinlock);
979 drvdata->cmb->trig_ts = true;
981 drvdata->cmb->trig_ts = false;
985 static DEVICE_ATTR_RW(cmb_trig_ts);
987 static struct attribute *tpdm_dsb_edge_attrs[] = {
988 &dev_attr_ctrl_idx.attr,
989 &dev_attr_ctrl_val.attr,
990 &dev_attr_ctrl_mask.attr,
991 DSB_EDGE_CTRL_ATTR(0),
992 DSB_EDGE_CTRL_ATTR(1),
993 DSB_EDGE_CTRL_ATTR(2),
994 DSB_EDGE_CTRL_ATTR(3),
995 DSB_EDGE_CTRL_ATTR(4),
996 DSB_EDGE_CTRL_ATTR(5),
997 DSB_EDGE_CTRL_ATTR(6),
998 DSB_EDGE_CTRL_ATTR(7),
999 DSB_EDGE_CTRL_ATTR(8),
1000 DSB_EDGE_CTRL_ATTR(9),
1001 DSB_EDGE_CTRL_ATTR(10),
1002 DSB_EDGE_CTRL_ATTR(11),
1003 DSB_EDGE_CTRL_ATTR(12),
1004 DSB_EDGE_CTRL_ATTR(13),
1005 DSB_EDGE_CTRL_ATTR(14),
1006 DSB_EDGE_CTRL_ATTR(15),
1007 DSB_EDGE_CTRL_MASK_ATTR(0),
1008 DSB_EDGE_CTRL_MASK_ATTR(1),
1009 DSB_EDGE_CTRL_MASK_ATTR(2),
1010 DSB_EDGE_CTRL_MASK_ATTR(3),
1011 DSB_EDGE_CTRL_MASK_ATTR(4),
1012 DSB_EDGE_CTRL_MASK_ATTR(5),
1013 DSB_EDGE_CTRL_MASK_ATTR(6),
1014 DSB_EDGE_CTRL_MASK_ATTR(7),
1018 static struct attribute *tpdm_dsb_trig_patt_attrs[] = {
1019 DSB_TRIG_PATT_ATTR(0),
1020 DSB_TRIG_PATT_ATTR(1),
1021 DSB_TRIG_PATT_ATTR(2),
1022 DSB_TRIG_PATT_ATTR(3),
1023 DSB_TRIG_PATT_ATTR(4),
1024 DSB_TRIG_PATT_ATTR(5),
1025 DSB_TRIG_PATT_ATTR(6),
1026 DSB_TRIG_PATT_ATTR(7),
1027 DSB_TRIG_PATT_MASK_ATTR(0),
1028 DSB_TRIG_PATT_MASK_ATTR(1),
1029 DSB_TRIG_PATT_MASK_ATTR(2),
1030 DSB_TRIG_PATT_MASK_ATTR(3),
1031 DSB_TRIG_PATT_MASK_ATTR(4),
1032 DSB_TRIG_PATT_MASK_ATTR(5),
1033 DSB_TRIG_PATT_MASK_ATTR(6),
1034 DSB_TRIG_PATT_MASK_ATTR(7),
1038 static struct attribute *tpdm_dsb_patt_attrs[] = {
1047 DSB_PATT_MASK_ATTR(0),
1048 DSB_PATT_MASK_ATTR(1),
1049 DSB_PATT_MASK_ATTR(2),
1050 DSB_PATT_MASK_ATTR(3),
1051 DSB_PATT_MASK_ATTR(4),
1052 DSB_PATT_MASK_ATTR(5),
1053 DSB_PATT_MASK_ATTR(6),
1054 DSB_PATT_MASK_ATTR(7),
1056 &dev_attr_set_type.attr,
1060 static struct attribute *tpdm_dsb_msr_attrs[] = {
1096 static struct attribute *tpdm_cmb_trig_patt_attrs[] = {
1097 CMB_TRIG_PATT_ATTR(0),
1098 CMB_TRIG_PATT_ATTR(1),
1099 CMB_TRIG_PATT_MASK_ATTR(0),
1100 CMB_TRIG_PATT_MASK_ATTR(1),
1104 static struct attribute *tpdm_cmb_patt_attrs[] = {
1107 CMB_PATT_MASK_ATTR(0),
1108 CMB_PATT_MASK_ATTR(1),
1113 static struct attribute *tpdm_cmb_msr_attrs[] = {
1149 static struct attribute *tpdm_dsb_attrs[] = {
1150 &dev_attr_dsb_mode.attr,
1151 &dev_attr_dsb_trig_ts.attr,
1152 &dev_attr_dsb_trig_type.attr,
1156 static struct attribute *tpdm_cmb_attrs[] = {
1157 &dev_attr_cmb_mode.attr,
1158 &dev_attr_cmb_ts_all.attr,
1159 &dev_attr_cmb_trig_ts.attr,
1163 static struct attribute_group tpdm_dsb_attr_grp = {
1164 .attrs = tpdm_dsb_attrs,
1165 .is_visible = tpdm_dsb_is_visible,
1168 static struct attribute_group tpdm_dsb_edge_grp = {
1169 .attrs = tpdm_dsb_edge_attrs,
1170 .is_visible = tpdm_dsb_is_visible,
1174 static struct attribute_group tpdm_dsb_trig_patt_grp = {
1175 .attrs = tpdm_dsb_trig_patt_attrs,
1176 .is_visible = tpdm_dsb_is_visible,
1177 .name = "dsb_trig_patt",
1180 static struct attribute_group tpdm_dsb_patt_grp = {
1181 .attrs = tpdm_dsb_patt_attrs,
1182 .is_visible = tpdm_dsb_is_visible,
1186 static struct attribute_group tpdm_dsb_msr_grp = {
1187 .attrs = tpdm_dsb_msr_attrs,
1188 .is_visible = tpdm_dsb_msr_is_visible,
1192 static struct attribute_group tpdm_cmb_attr_grp = {
1193 .attrs = tpdm_cmb_attrs,
1194 .is_visible = tpdm_cmb_is_visible,
1197 static struct attribute_group tpdm_cmb_trig_patt_grp = {
1198 .attrs = tpdm_cmb_trig_patt_attrs,
1199 .is_visible = tpdm_cmb_is_visible,
1200 .name = "cmb_trig_patt",
1203 static struct attribute_group tpdm_cmb_patt_grp = {
1204 .attrs = tpdm_cmb_patt_attrs,
1205 .is_visible = tpdm_cmb_is_visible,
1209 static struct attribute_group tpdm_cmb_msr_grp = {
1210 .attrs = tpdm_cmb_msr_attrs,
1211 .is_visible = tpdm_cmb_msr_is_visible,
1215 static const struct attribute_group *tpdm_attr_grps[] = {
1219 &tpdm_dsb_trig_patt_grp,
1223 &tpdm_cmb_trig_patt_grp,
1229 static int tpdm_probe(struct amba_device *adev, const struct amba_id *id)
1232 struct device *dev = &adev->dev;
1233 struct coresight_platform_data *pdata;
1234 struct tpdm_drvdata *drvdata;
1235 struct coresight_desc desc = { 0 };
1238 pdata = coresight_get_platform_data(dev);
1240 return PTR_ERR(pdata);
1241 adev->dev.platform_data = pdata;
1244 drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL);
1247 drvdata->dev = &adev->dev;
1248 dev_set_drvdata(dev, drvdata);
1250 base = devm_ioremap_resource(dev, &adev->res);
1252 return PTR_ERR(base);
1254 drvdata->base = base;
1256 ret = tpdm_datasets_setup(drvdata);
1260 if (drvdata && tpdm_has_dsb_dataset(drvdata))
1261 of_property_read_u32(drvdata->dev->of_node,
1262 "qcom,dsb-msrs-num", &drvdata->dsb_msr_num);
1264 if (drvdata && tpdm_has_cmb_dataset(drvdata))
1265 of_property_read_u32(drvdata->dev->of_node,
1266 "qcom,cmb-msrs-num", &drvdata->cmb_msr_num);
1268 /* Set up coresight component description */
1269 desc.name = coresight_alloc_device_name(&tpdm_devs, dev);
1272 desc.type = CORESIGHT_DEV_TYPE_SOURCE;
1273 desc.subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_TPDM;
1274 desc.ops = &tpdm_cs_ops;
1275 desc.pdata = adev->dev.platform_data;
1276 desc.dev = &adev->dev;
1277 desc.access = CSDEV_ACCESS_IOMEM(base);
1278 desc.groups = tpdm_attr_grps;
1279 drvdata->csdev = coresight_register(&desc);
1280 if (IS_ERR(drvdata->csdev))
1281 return PTR_ERR(drvdata->csdev);
1283 spin_lock_init(&drvdata->spinlock);
1285 /* Decrease pm refcount when probe is done.*/
1286 pm_runtime_put(&adev->dev);
1291 static void tpdm_remove(struct amba_device *adev)
1293 struct tpdm_drvdata *drvdata = dev_get_drvdata(&adev->dev);
1295 coresight_unregister(drvdata->csdev);
1299 * Different TPDM has different periph id.
1300 * The difference is 0-7 bits' value. So ignore 0-7 bits.
1302 static struct amba_id tpdm_ids[] = {
1310 static struct amba_driver tpdm_driver = {
1312 .name = "coresight-tpdm",
1313 .suppress_bind_attrs = true,
1315 .probe = tpdm_probe,
1316 .id_table = tpdm_ids,
1317 .remove = tpdm_remove,
1320 module_amba_driver(tpdm_driver);
1322 MODULE_LICENSE("GPL");
1323 MODULE_DESCRIPTION("Trace, Profiling & Diagnostic Monitor driver");