1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015 Linaro Limited. All rights reserved.
4 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 #include <linux/coresight.h>
8 #include <linux/coresight-pmu.h>
9 #include <linux/cpumask.h>
10 #include <linux/device.h>
11 #include <linux/list.h>
13 #include <linux/init.h>
14 #include <linux/perf_event.h>
15 #include <linux/percpu-defs.h>
16 #include <linux/slab.h>
17 #include <linux/stringhash.h>
18 #include <linux/types.h>
19 #include <linux/workqueue.h>
21 #include "coresight-etm-perf.h"
22 #include "coresight-priv.h"
24 static struct pmu etm_pmu;
25 static bool etm_perf_up;
27 static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle);
28 static DEFINE_PER_CPU(struct coresight_device *, csdev_src);
31 * The PMU formats were orignally for ETMv3.5/PTM's ETMCR 'config';
32 * now take them as general formats and apply on all ETMs.
34 PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC));
35 PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID));
36 PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS));
37 PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK));
38 /* Sink ID - same for all ETMs */
39 PMU_FORMAT_ATTR(sinkid, "config2:0-31");
41 static struct attribute *etm_config_formats_attr[] = {
42 &format_attr_cycacc.attr,
43 &format_attr_contextid.attr,
44 &format_attr_timestamp.attr,
45 &format_attr_retstack.attr,
46 &format_attr_sinkid.attr,
50 static const struct attribute_group etm_pmu_format_group = {
52 .attrs = etm_config_formats_attr,
55 static struct attribute *etm_config_sinks_attr[] = {
59 static const struct attribute_group etm_pmu_sinks_group = {
61 .attrs = etm_config_sinks_attr,
64 static const struct attribute_group *etm_pmu_attr_groups[] = {
65 &etm_pmu_format_group,
70 static inline struct list_head **
71 etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu)
73 return per_cpu_ptr(data->path, cpu);
76 static inline struct list_head *
77 etm_event_cpu_path(struct etm_event_data *data, int cpu)
79 return *etm_event_cpu_path_ptr(data, cpu);
82 static void etm_event_read(struct perf_event *event) {}
84 static int etm_addr_filters_alloc(struct perf_event *event)
86 struct etm_filters *filters;
87 int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu);
89 filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node);
94 memcpy(filters, event->parent->hw.addr_filters,
97 event->hw.addr_filters = filters;
102 static void etm_event_destroy(struct perf_event *event)
104 kfree(event->hw.addr_filters);
105 event->hw.addr_filters = NULL;
108 static int etm_event_init(struct perf_event *event)
112 if (event->attr.type != etm_pmu.type) {
117 ret = etm_addr_filters_alloc(event);
121 event->destroy = etm_event_destroy;
126 static void free_sink_buffer(struct etm_event_data *event_data)
129 cpumask_t *mask = &event_data->mask;
130 struct coresight_device *sink;
132 if (!event_data->snk_config)
135 if (WARN_ON(cpumask_empty(mask)))
138 cpu = cpumask_first(mask);
139 sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu));
140 sink_ops(sink)->free_buffer(event_data->snk_config);
143 static void free_event_data(struct work_struct *work)
147 struct etm_event_data *event_data;
149 event_data = container_of(work, struct etm_event_data, work);
150 mask = &event_data->mask;
152 /* Free the sink buffers, if there are any */
153 free_sink_buffer(event_data);
155 for_each_cpu(cpu, mask) {
156 struct list_head **ppath;
158 ppath = etm_event_cpu_path_ptr(event_data, cpu);
159 if (!(IS_ERR_OR_NULL(*ppath)))
160 coresight_release_path(*ppath);
164 free_percpu(event_data->path);
168 static void *alloc_event_data(int cpu)
171 struct etm_event_data *event_data;
173 /* First get memory for the session's data */
174 event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL);
179 mask = &event_data->mask;
181 cpumask_set_cpu(cpu, mask);
183 cpumask_copy(mask, cpu_present_mask);
186 * Each CPU has a single path between source and destination. As such
187 * allocate an array using CPU numbers as indexes. That way a path
188 * for any CPU can easily be accessed at any given time. We proceed
189 * the same way for sessions involving a single CPU. The cost of
190 * unused memory when dealing with single CPU trace scenarios is small
191 * compared to the cost of searching through an optimized array.
193 event_data->path = alloc_percpu(struct list_head *);
195 if (!event_data->path) {
203 static void etm_free_aux(void *data)
205 struct etm_event_data *event_data = data;
207 schedule_work(&event_data->work);
210 static void *etm_setup_aux(struct perf_event *event, void **pages,
211 int nr_pages, bool overwrite)
214 int cpu = event->cpu;
216 struct coresight_device *sink = NULL;
217 struct etm_event_data *event_data = NULL;
219 event_data = alloc_event_data(cpu);
222 INIT_WORK(&event_data->work, free_event_data);
224 /* First get the selected sink from user space. */
225 if (event->attr.config2) {
226 id = (u32)event->attr.config2;
227 sink = coresight_get_sink_by_id(id);
230 mask = &event_data->mask;
233 * Setup the path for each CPU in a trace session. We try to build
234 * trace path for each CPU in the mask. If we don't find an ETM
235 * for the CPU or fail to build a path, we clear the CPU from the
236 * mask and continue with the rest. If ever we try to trace on those
237 * CPUs, we can handle it and fail the session.
239 for_each_cpu(cpu, mask) {
240 struct list_head *path;
241 struct coresight_device *csdev;
243 csdev = per_cpu(csdev_src, cpu);
245 * If there is no ETM associated with this CPU clear it from
246 * the mask and continue with the rest. If ever we try to trace
247 * on this CPU, we handle it accordingly.
250 cpumask_clear_cpu(cpu, mask);
255 * No sink provided - look for a default sink for one of the
256 * devices. At present we only support topology where all CPUs
257 * use the same sink [N:1], so only need to find one sink. The
258 * coresight_build_path later will remove any CPU that does not
259 * attach to the sink, or if we have not found a sink.
262 sink = coresight_find_default_sink(csdev);
265 * Building a path doesn't enable it, it simply builds a
266 * list of devices from source to sink that can be
267 * referenced later when the path is actually needed.
269 path = coresight_build_path(csdev, sink);
271 cpumask_clear_cpu(cpu, mask);
275 *etm_event_cpu_path_ptr(event_data, cpu) = path;
278 /* no sink found for any CPU - cannot trace */
282 /* If we don't have any CPUs ready for tracing, abort */
283 cpu = cpumask_first(mask);
284 if (cpu >= nr_cpu_ids)
287 if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer)
290 /* Allocate the sink buffer for this session */
291 event_data->snk_config =
292 sink_ops(sink)->alloc_buffer(sink, event, pages,
293 nr_pages, overwrite);
294 if (!event_data->snk_config)
301 etm_free_aux(event_data);
306 static void etm_event_start(struct perf_event *event, int flags)
308 int cpu = smp_processor_id();
309 struct etm_event_data *event_data;
310 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
311 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
312 struct list_head *path;
318 * Deal with the ring buffer API and get a handle on the
319 * session's information.
321 event_data = perf_aux_output_begin(handle, event);
326 * Check if this ETM is allowed to trace, as decided
327 * at etm_setup_aux(). This could be due to an unreachable
328 * sink from this ETM. We can't do much in this case if
329 * the sink was specified or hinted to the driver. For
330 * now, simply don't record anything on this ETM.
332 if (!cpumask_test_cpu(cpu, &event_data->mask))
335 path = etm_event_cpu_path(event_data, cpu);
336 /* We need a sink, no need to continue without one */
337 sink = coresight_get_sink(path);
338 if (WARN_ON_ONCE(!sink))
341 /* Nothing will happen without a path */
342 if (coresight_enable_path(path, CS_MODE_PERF, handle))
345 /* Tell the perf core the event is alive */
348 /* Finally enable the tracer */
349 if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF))
350 goto fail_disable_path;
356 coresight_disable_path(path);
358 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
359 perf_aux_output_end(handle, 0);
361 event->hw.state = PERF_HES_STOPPED;
365 static void etm_event_stop(struct perf_event *event, int mode)
367 int cpu = smp_processor_id();
369 struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu);
370 struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle);
371 struct etm_event_data *event_data = perf_get_aux(handle);
372 struct list_head *path;
374 if (event->hw.state == PERF_HES_STOPPED)
380 path = etm_event_cpu_path(event_data, cpu);
384 sink = coresight_get_sink(path);
389 source_ops(csdev)->disable(csdev, event);
392 event->hw.state = PERF_HES_STOPPED;
394 if (mode & PERF_EF_UPDATE) {
395 if (WARN_ON_ONCE(handle->event != event))
398 /* update trace information */
399 if (!sink_ops(sink)->update_buffer)
402 size = sink_ops(sink)->update_buffer(sink, handle,
403 event_data->snk_config);
404 perf_aux_output_end(handle, size);
407 /* Disabling the path make its elements available to other sessions */
408 coresight_disable_path(path);
411 static int etm_event_add(struct perf_event *event, int mode)
414 struct hw_perf_event *hwc = &event->hw;
416 if (mode & PERF_EF_START) {
417 etm_event_start(event, 0);
418 if (hwc->state & PERF_HES_STOPPED)
421 hwc->state = PERF_HES_STOPPED;
427 static void etm_event_del(struct perf_event *event, int mode)
429 etm_event_stop(event, PERF_EF_UPDATE);
432 static int etm_addr_filters_validate(struct list_head *filters)
434 bool range = false, address = false;
436 struct perf_addr_filter *filter;
438 list_for_each_entry(filter, filters, entry) {
440 * No need to go further if there's no more
443 if (++index > ETM_ADDR_CMP_MAX)
446 /* filter::size==0 means single address trigger */
449 * The existing code relies on START/STOP filters
450 * being address filters.
452 if (filter->action == PERF_ADDR_FILTER_ACTION_START ||
453 filter->action == PERF_ADDR_FILTER_ACTION_STOP)
461 * At this time we don't allow range and start/stop filtering
462 * to cohabitate, they have to be mutually exclusive.
464 if (range && address)
471 static void etm_addr_filters_sync(struct perf_event *event)
473 struct perf_addr_filters_head *head = perf_event_addr_filters(event);
474 unsigned long start, stop;
475 struct perf_addr_filter_range *fr = event->addr_filter_ranges;
476 struct etm_filters *filters = event->hw.addr_filters;
477 struct etm_filter *etm_filter;
478 struct perf_addr_filter *filter;
481 list_for_each_entry(filter, &head->list, entry) {
483 stop = start + fr[i].size;
484 etm_filter = &filters->etm_filter[i];
486 switch (filter->action) {
487 case PERF_ADDR_FILTER_ACTION_FILTER:
488 etm_filter->start_addr = start;
489 etm_filter->stop_addr = stop;
490 etm_filter->type = ETM_ADDR_TYPE_RANGE;
492 case PERF_ADDR_FILTER_ACTION_START:
493 etm_filter->start_addr = start;
494 etm_filter->type = ETM_ADDR_TYPE_START;
496 case PERF_ADDR_FILTER_ACTION_STOP:
497 etm_filter->stop_addr = stop;
498 etm_filter->type = ETM_ADDR_TYPE_STOP;
504 filters->nr_filters = i;
507 int etm_perf_symlink(struct coresight_device *csdev, bool link)
509 char entry[sizeof("cpu9999999")];
510 int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev);
511 struct device *pmu_dev = etm_pmu.dev;
512 struct device *cs_dev = &csdev->dev;
514 sprintf(entry, "cpu%d", cpu);
517 return -EPROBE_DEFER;
520 ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry);
523 per_cpu(csdev_src, cpu) = csdev;
525 sysfs_remove_link(&pmu_dev->kobj, entry);
526 per_cpu(csdev_src, cpu) = NULL;
531 EXPORT_SYMBOL_GPL(etm_perf_symlink);
533 static ssize_t etm_perf_sink_name_show(struct device *dev,
534 struct device_attribute *dattr,
537 struct dev_ext_attribute *ea;
539 ea = container_of(dattr, struct dev_ext_attribute, attr);
540 return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var));
543 int etm_perf_add_symlink_sink(struct coresight_device *csdev)
548 struct device *pmu_dev = etm_pmu.dev;
549 struct device *dev = &csdev->dev;
550 struct dev_ext_attribute *ea;
552 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
553 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
556 if (csdev->ea != NULL)
560 return -EPROBE_DEFER;
562 ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL);
566 name = dev_name(dev);
567 /* See function coresight_get_sink_by_id() to know where this is used */
568 hash = hashlen_hash(hashlen_string(NULL, name));
570 sysfs_attr_init(&ea->attr.attr);
571 ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL);
572 if (!ea->attr.attr.name)
575 ea->attr.attr.mode = 0444;
576 ea->attr.show = etm_perf_sink_name_show;
577 ea->var = (unsigned long *)hash;
579 ret = sysfs_add_file_to_group(&pmu_dev->kobj,
580 &ea->attr.attr, "sinks");
588 void etm_perf_del_symlink_sink(struct coresight_device *csdev)
590 struct device *pmu_dev = etm_pmu.dev;
591 struct dev_ext_attribute *ea = csdev->ea;
593 if (csdev->type != CORESIGHT_DEV_TYPE_SINK &&
594 csdev->type != CORESIGHT_DEV_TYPE_LINKSINK)
600 sysfs_remove_file_from_group(&pmu_dev->kobj,
601 &ea->attr.attr, "sinks");
605 int __init etm_perf_init(void)
609 etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE |
610 PERF_PMU_CAP_ITRACE);
612 etm_pmu.attr_groups = etm_pmu_attr_groups;
613 etm_pmu.task_ctx_nr = perf_sw_context;
614 etm_pmu.read = etm_event_read;
615 etm_pmu.event_init = etm_event_init;
616 etm_pmu.setup_aux = etm_setup_aux;
617 etm_pmu.free_aux = etm_free_aux;
618 etm_pmu.start = etm_event_start;
619 etm_pmu.stop = etm_event_stop;
620 etm_pmu.add = etm_event_add;
621 etm_pmu.del = etm_event_del;
622 etm_pmu.addr_filters_sync = etm_addr_filters_sync;
623 etm_pmu.addr_filters_validate = etm_addr_filters_validate;
624 etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX;
626 ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1);
633 void __exit etm_perf_exit(void)
635 perf_pmu_unregister(&etm_pmu);