X-Git-Url: http://git.monstr.eu/?a=blobdiff_plain;f=drivers%2Fhwtracing%2Fcoresight%2Fcoresight-etm-perf.c;h=16b83d8b2ac2fc0ed6e9acc4a07314a6a1bec095;hb=d25054ee8d18c937058a1b69b35fa5bfdef471f3;hp=6776956352114b78f9aa9bc5457424c9915b99b3;hpb=a89d0c2acff952935cb37d501eaa702db5a63a05;p=linux-2.6-microblaze.git diff --git a/drivers/hwtracing/coresight/coresight-etm-perf.c b/drivers/hwtracing/coresight/coresight-etm-perf.c index 677695635211..16b83d8b2ac2 100644 --- a/drivers/hwtracing/coresight/coresight-etm-perf.c +++ b/drivers/hwtracing/coresight/coresight-etm-perf.c @@ -12,6 +12,7 @@ #include #include #include +#include #include #include #include @@ -22,20 +23,6 @@ static struct pmu etm_pmu; static bool etm_perf_up; -/** - * struct etm_event_data - Coresight specifics associated to an event - * @work: Handle to free allocated memory outside IRQ context. - * @mask: Hold the CPU(s) this event was set for. - * @snk_config: The sink configuration. - * @path: An array of path, each slot for one CPU. - */ -struct etm_event_data { - struct work_struct work; - cpumask_t mask; - void *snk_config; - struct list_head **path; -}; - static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); static DEFINE_PER_CPU(struct coresight_device *, csdev_src); @@ -61,6 +48,18 @@ static const struct attribute_group *etm_pmu_attr_groups[] = { NULL, }; +static inline struct list_head ** +etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) +{ + return per_cpu_ptr(data->path, cpu); +} + +static inline struct list_head * +etm_event_cpu_path(struct etm_event_data *data, int cpu) +{ + return *etm_event_cpu_path_ptr(data, cpu); +} + static void etm_event_read(struct perf_event *event) {} static int etm_addr_filters_alloc(struct perf_event *event) @@ -114,29 +113,30 @@ static void free_event_data(struct work_struct *work) event_data = container_of(work, struct etm_event_data, work); mask = &event_data->mask; - /* - * First deal with the sink configuration. See comment in - * etm_setup_aux() about why we take the first available path. - */ - if (event_data->snk_config) { + + /* Free the sink buffers, if there are any */ + if (event_data->snk_config && !WARN_ON(cpumask_empty(mask))) { cpu = cpumask_first(mask); - sink = coresight_get_sink(event_data->path[cpu]); + sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu)); if (sink_ops(sink)->free_buffer) sink_ops(sink)->free_buffer(event_data->snk_config); } for_each_cpu(cpu, mask) { - if (!(IS_ERR_OR_NULL(event_data->path[cpu]))) - coresight_release_path(event_data->path[cpu]); + struct list_head **ppath; + + ppath = etm_event_cpu_path_ptr(event_data, cpu); + if (!(IS_ERR_OR_NULL(*ppath))) + coresight_release_path(*ppath); + *ppath = NULL; } - kfree(event_data->path); + free_percpu(event_data->path); kfree(event_data); } static void *alloc_event_data(int cpu) { - int size; cpumask_t *mask; struct etm_event_data *event_data; @@ -145,16 +145,12 @@ static void *alloc_event_data(int cpu) if (!event_data) return NULL; - /* Make sure nothing disappears under us */ - get_online_cpus(); - size = num_online_cpus(); mask = &event_data->mask; if (cpu != -1) cpumask_set_cpu(cpu, mask); else - cpumask_copy(mask, cpu_online_mask); - put_online_cpus(); + cpumask_copy(mask, cpu_present_mask); /* * Each CPU has a single path between source and destination. As such @@ -164,8 +160,8 @@ static void *alloc_event_data(int cpu) * unused memory when dealing with single CPU trace scenarios is small * compared to the cost of searching through an optimized array. */ - event_data->path = kcalloc(size, - sizeof(struct list_head *), GFP_KERNEL); + event_data->path = alloc_percpu(struct list_head *); + if (!event_data->path) { kfree(event_data); return NULL; @@ -206,34 +202,53 @@ static void *etm_setup_aux(int event_cpu, void **pages, * on the cmd line. As such the "enable_sink" flag in sysFS is reset. */ sink = coresight_get_enabled_sink(true); - if (!sink) + if (!sink || !sink_ops(sink)->alloc_buffer) goto err; mask = &event_data->mask; - /* Setup the path for each CPU in a trace session */ + /* + * Setup the path for each CPU in a trace session. We try to build + * trace path for each CPU in the mask. If we don't find an ETM + * for the CPU or fail to build a path, we clear the CPU from the + * mask and continue with the rest. If ever we try to trace on those + * CPUs, we can handle it and fail the session. + */ for_each_cpu(cpu, mask) { + struct list_head *path; struct coresight_device *csdev; csdev = per_cpu(csdev_src, cpu); - if (!csdev) - goto err; + /* + * If there is no ETM associated with this CPU clear it from + * the mask and continue with the rest. If ever we try to trace + * on this CPU, we handle it accordingly. + */ + if (!csdev) { + cpumask_clear_cpu(cpu, mask); + continue; + } /* * Building a path doesn't enable it, it simply builds a * list of devices from source to sink that can be * referenced later when the path is actually needed. */ - event_data->path[cpu] = coresight_build_path(csdev, sink); - if (IS_ERR(event_data->path[cpu])) - goto err; + path = coresight_build_path(csdev, sink); + if (IS_ERR(path)) { + cpumask_clear_cpu(cpu, mask); + continue; + } + + *etm_event_cpu_path_ptr(event_data, cpu) = path; } - if (!sink_ops(sink)->alloc_buffer) + /* If we don't have any CPUs ready for tracing, abort */ + cpu = cpumask_first(mask); + if (cpu >= nr_cpu_ids) goto err; - cpu = cpumask_first(mask); - /* Get the AUX specific data from the sink buffer */ + /* Allocate the sink buffer for this session */ event_data->snk_config = sink_ops(sink)->alloc_buffer(sink, cpu, pages, nr_pages, overwrite); @@ -255,6 +270,7 @@ static void etm_event_start(struct perf_event *event, int flags) struct etm_event_data *event_data; struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); + struct list_head *path; if (!csdev) goto fail; @@ -267,8 +283,9 @@ static void etm_event_start(struct perf_event *event, int flags) if (!event_data) goto fail; + path = etm_event_cpu_path(event_data, cpu); /* We need a sink, no need to continue without one */ - sink = coresight_get_sink(event_data->path[cpu]); + sink = coresight_get_sink(path); if (WARN_ON_ONCE(!sink || !sink_ops(sink)->set_buffer)) goto fail_end_stop; @@ -278,7 +295,7 @@ static void etm_event_start(struct perf_event *event, int flags) goto fail_end_stop; /* Nothing will happen without a path */ - if (coresight_enable_path(event_data->path[cpu], CS_MODE_PERF)) + if (coresight_enable_path(path, CS_MODE_PERF)) goto fail_end_stop; /* Tell the perf core the event is alive */ @@ -286,11 +303,13 @@ static void etm_event_start(struct perf_event *event, int flags) /* Finally enable the tracer */ if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) - goto fail_end_stop; + goto fail_disable_path; out: return; +fail_disable_path: + coresight_disable_path(path); fail_end_stop: perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); perf_aux_output_end(handle, 0); @@ -306,6 +325,7 @@ static void etm_event_stop(struct perf_event *event, int mode) struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); struct etm_event_data *event_data = perf_get_aux(handle); + struct list_head *path; if (event->hw.state == PERF_HES_STOPPED) return; @@ -313,7 +333,11 @@ static void etm_event_stop(struct perf_event *event, int mode) if (!csdev) return; - sink = coresight_get_sink(event_data->path[cpu]); + path = etm_event_cpu_path(event_data, cpu); + if (!path) + return; + + sink = coresight_get_sink(path); if (!sink) return; @@ -331,20 +355,13 @@ static void etm_event_stop(struct perf_event *event, int mode) if (!sink_ops(sink)->update_buffer) return; - sink_ops(sink)->update_buffer(sink, handle, + size = sink_ops(sink)->update_buffer(sink, handle, event_data->snk_config); - - if (!sink_ops(sink)->reset_buffer) - return; - - size = sink_ops(sink)->reset_buffer(sink, handle, - event_data->snk_config); - perf_aux_output_end(handle, size); } /* Disabling the path make its elements available to other sessions */ - coresight_disable_path(event_data->path[cpu]); + coresight_disable_path(path); } static int etm_event_add(struct perf_event *event, int mode)