Merge tag 'block-5.11-2020-12-23' of git://git.kernel.dk/linux-block
[linux-2.6-microblaze.git] / sound / soc / sof / trace.c
1 // SPDX-License-Identifier: (GPL-2.0-only OR BSD-3-Clause)
2 //
3 // This file is provided under a dual BSD/GPLv2 license.  When using or
4 // redistributing this file, you may do so under either license.
5 //
6 // Copyright(c) 2018 Intel Corporation. All rights reserved.
7 //
8 // Author: Liam Girdwood <liam.r.girdwood@linux.intel.com>
9 //
10
11 #include <linux/debugfs.h>
12 #include <linux/sched/signal.h>
13 #include "sof-priv.h"
14 #include "ops.h"
15
16 #define TRACE_FILTER_ELEMENTS_PER_ENTRY 4
17 #define TRACE_FILTER_MAX_CONFIG_STRING_LENGTH 1024
18
19 static int trace_filter_append_elem(struct snd_sof_dev *sdev, uint32_t key, uint32_t value,
20                                     struct sof_ipc_trace_filter_elem *elem_list,
21                                     int capacity, int *counter)
22 {
23         if (*counter >= capacity)
24                 return -ENOMEM;
25
26         elem_list[*counter].key = key;
27         elem_list[*counter].value = value;
28         ++*counter;
29
30         return 0;
31 }
32
33 static int trace_filter_parse_entry(struct snd_sof_dev *sdev, const char *line,
34                                     struct sof_ipc_trace_filter_elem *elem,
35                                     int capacity, int *counter)
36 {
37         int len = strlen(line);
38         int cnt = *counter;
39         uint32_t uuid_id;
40         int log_level;
41         int pipe_id;
42         int comp_id;
43         int read;
44         int ret;
45
46         /* ignore empty content */
47         ret = sscanf(line, " %n", &read);
48         if (!ret && read == len)
49                 return len;
50
51         ret = sscanf(line, " %d %x %d %d %n", &log_level, &uuid_id, &pipe_id, &comp_id, &read);
52         if (ret != TRACE_FILTER_ELEMENTS_PER_ENTRY || read != len) {
53                 dev_err(sdev->dev, "error: invalid trace filter entry '%s'\n", line);
54                 return -EINVAL;
55         }
56
57         if (uuid_id > 0) {
58                 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_UUID,
59                                                uuid_id, elem, capacity, &cnt);
60                 if (ret)
61                         return ret;
62         }
63         if (pipe_id >= 0) {
64                 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_PIPE,
65                                                pipe_id, elem, capacity, &cnt);
66                 if (ret)
67                         return ret;
68         }
69         if (comp_id >= 0) {
70                 ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_BY_COMP,
71                                                comp_id, elem, capacity, &cnt);
72                 if (ret)
73                         return ret;
74         }
75
76         ret = trace_filter_append_elem(sdev, SOF_IPC_TRACE_FILTER_ELEM_SET_LEVEL |
77                                        SOF_IPC_TRACE_FILTER_ELEM_FIN,
78                                        log_level, elem, capacity, &cnt);
79         if (ret)
80                 return ret;
81
82         /* update counter only when parsing whole entry passed */
83         *counter = cnt;
84
85         return len;
86 }
87
88 static int trace_filter_parse(struct snd_sof_dev *sdev, char *string,
89                               int *out_elem_cnt,
90                               struct sof_ipc_trace_filter_elem **out)
91 {
92         static const char entry_delimiter[] = ";";
93         char *entry = string;
94         int capacity = 0;
95         int entry_len;
96         int cnt = 0;
97
98         /*
99          * Each entry contains at least 1, up to TRACE_FILTER_ELEMENTS_PER_ENTRY
100          * IPC elements, depending on content. Calculate IPC elements capacity
101          * for the input string where each element is set.
102          */
103         while (entry) {
104                 capacity += TRACE_FILTER_ELEMENTS_PER_ENTRY;
105                 entry = strchr(entry + 1, entry_delimiter[0]);
106         }
107         *out = kmalloc(capacity * sizeof(**out), GFP_KERNEL);
108         if (!*out)
109                 return -ENOMEM;
110
111         /* split input string by ';', and parse each entry separately in trace_filter_parse_entry */
112         while ((entry = strsep(&string, entry_delimiter))) {
113                 entry_len = trace_filter_parse_entry(sdev, entry, *out, capacity, &cnt);
114                 if (entry_len < 0) {
115                         dev_err(sdev->dev, "error: %s failed for '%s', %d\n", __func__, entry,
116                                 entry_len);
117                         return -EINVAL;
118                 }
119         }
120
121         *out_elem_cnt = cnt;
122
123         return 0;
124 }
125
126 static int sof_ipc_trace_update_filter(struct snd_sof_dev *sdev, int num_elems,
127                                        struct sof_ipc_trace_filter_elem *elems)
128 {
129         struct sof_ipc_trace_filter *msg;
130         struct sof_ipc_reply reply;
131         size_t size;
132         int ret;
133
134         size = struct_size(msg, elems, num_elems);
135         if (size > SOF_IPC_MSG_MAX_SIZE)
136                 return -EINVAL;
137
138         msg = kmalloc(size, GFP_KERNEL);
139         if (!msg)
140                 return -ENOMEM;
141
142         msg->hdr.size = size;
143         msg->hdr.cmd = SOF_IPC_GLB_TRACE_MSG | SOF_IPC_TRACE_FILTER_UPDATE;
144         msg->elem_cnt = num_elems;
145         memcpy(&msg->elems[0], elems, num_elems * sizeof(*elems));
146
147         ret = pm_runtime_get_sync(sdev->dev);
148         if (ret < 0 && ret != -EACCES) {
149                 pm_runtime_put_noidle(sdev->dev);
150                 dev_err(sdev->dev, "error: enabling device failed: %d\n", ret);
151                 goto error;
152         }
153         ret = sof_ipc_tx_message(sdev->ipc, msg->hdr.cmd, msg, msg->hdr.size,
154                                  &reply, sizeof(reply));
155         pm_runtime_mark_last_busy(sdev->dev);
156         pm_runtime_put_autosuspend(sdev->dev);
157
158 error:
159         kfree(msg);
160         return ret ? ret : reply.error;
161 }
162
163 static ssize_t sof_dfsentry_trace_filter_write(struct file *file, const char __user *from,
164                                                size_t count, loff_t *ppos)
165 {
166         struct snd_sof_dfsentry *dfse = file->private_data;
167         struct sof_ipc_trace_filter_elem *elems = NULL;
168         struct snd_sof_dev *sdev = dfse->sdev;
169         loff_t pos = 0;
170         int num_elems;
171         char *string;
172         int ret;
173
174         if (count > TRACE_FILTER_MAX_CONFIG_STRING_LENGTH) {
175                 dev_err(sdev->dev, "%s too long input, %zu > %d\n", __func__, count,
176                         TRACE_FILTER_MAX_CONFIG_STRING_LENGTH);
177                 return -EINVAL;
178         }
179
180         string = kmalloc(count + 1, GFP_KERNEL);
181         if (!string)
182                 return -ENOMEM;
183
184         /* assert null termination */
185         string[count] = 0;
186         ret = simple_write_to_buffer(string, count, &pos, from, count);
187         if (ret < 0)
188                 goto error;
189
190         ret = trace_filter_parse(sdev, string, &num_elems, &elems);
191         if (ret < 0) {
192                 dev_err(sdev->dev, "error: fail in trace_filter_parse, %d\n", ret);
193                 goto error;
194         }
195
196         if (num_elems) {
197                 ret = sof_ipc_trace_update_filter(sdev, num_elems, elems);
198                 if (ret < 0) {
199                         dev_err(sdev->dev, "error: fail in sof_ipc_trace_update_filter %d\n", ret);
200                         goto error;
201                 }
202         }
203         ret = count;
204 error:
205         kfree(string);
206         kfree(elems);
207         return ret;
208 }
209
210 static const struct file_operations sof_dfs_trace_filter_fops = {
211         .open = simple_open,
212         .write = sof_dfsentry_trace_filter_write,
213         .llseek = default_llseek,
214 };
215
216 static int trace_debugfs_filter_create(struct snd_sof_dev *sdev)
217 {
218         struct snd_sof_dfsentry *dfse;
219
220         dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
221         if (!dfse)
222                 return -ENOMEM;
223
224         dfse->sdev = sdev;
225         dfse->type = SOF_DFSENTRY_TYPE_BUF;
226
227         debugfs_create_file("filter", 0200, sdev->debugfs_root, dfse,
228                             &sof_dfs_trace_filter_fops);
229         /* add to dfsentry list */
230         list_add(&dfse->list, &sdev->dfsentry_list);
231
232         return 0;
233 }
234
235 static size_t sof_trace_avail(struct snd_sof_dev *sdev,
236                               loff_t pos, size_t buffer_size)
237 {
238         loff_t host_offset = READ_ONCE(sdev->host_offset);
239
240         /*
241          * If host offset is less than local pos, it means write pointer of
242          * host DMA buffer has been wrapped. We should output the trace data
243          * at the end of host DMA buffer at first.
244          */
245         if (host_offset < pos)
246                 return buffer_size - pos;
247
248         /* If there is available trace data now, it is unnecessary to wait. */
249         if (host_offset > pos)
250                 return host_offset - pos;
251
252         return 0;
253 }
254
255 static size_t sof_wait_trace_avail(struct snd_sof_dev *sdev,
256                                    loff_t pos, size_t buffer_size)
257 {
258         wait_queue_entry_t wait;
259         size_t ret = sof_trace_avail(sdev, pos, buffer_size);
260
261         /* data immediately available */
262         if (ret)
263                 return ret;
264
265         if (!sdev->dtrace_is_enabled && sdev->dtrace_draining) {
266                 /*
267                  * tracing has ended and all traces have been
268                  * read by client, return EOF
269                  */
270                 sdev->dtrace_draining = false;
271                 return 0;
272         }
273
274         /* wait for available trace data from FW */
275         init_waitqueue_entry(&wait, current);
276         set_current_state(TASK_INTERRUPTIBLE);
277         add_wait_queue(&sdev->trace_sleep, &wait);
278
279         if (!signal_pending(current)) {
280                 /* set timeout to max value, no error code */
281                 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
282         }
283         remove_wait_queue(&sdev->trace_sleep, &wait);
284
285         return sof_trace_avail(sdev, pos, buffer_size);
286 }
287
288 static ssize_t sof_dfsentry_trace_read(struct file *file, char __user *buffer,
289                                        size_t count, loff_t *ppos)
290 {
291         struct snd_sof_dfsentry *dfse = file->private_data;
292         struct snd_sof_dev *sdev = dfse->sdev;
293         unsigned long rem;
294         loff_t lpos = *ppos;
295         size_t avail, buffer_size = dfse->size;
296         u64 lpos_64;
297
298         /* make sure we know about any failures on the DSP side */
299         sdev->dtrace_error = false;
300
301         /* check pos and count */
302         if (lpos < 0)
303                 return -EINVAL;
304         if (!count)
305                 return 0;
306
307         /* check for buffer wrap and count overflow */
308         lpos_64 = lpos;
309         lpos = do_div(lpos_64, buffer_size);
310
311         if (count > buffer_size - lpos) /* min() not used to avoid sparse warnings */
312                 count = buffer_size - lpos;
313
314         /* get available count based on current host offset */
315         avail = sof_wait_trace_avail(sdev, lpos, buffer_size);
316         if (sdev->dtrace_error) {
317                 dev_err(sdev->dev, "error: trace IO error\n");
318                 return -EIO;
319         }
320
321         /* make sure count is <= avail */
322         count = avail > count ? count : avail;
323
324         /* copy available trace data to debugfs */
325         rem = copy_to_user(buffer, ((u8 *)(dfse->buf) + lpos), count);
326         if (rem)
327                 return -EFAULT;
328
329         *ppos += count;
330
331         /* move debugfs reading position */
332         return count;
333 }
334
335 static int sof_dfsentry_trace_release(struct inode *inode, struct file *file)
336 {
337         struct snd_sof_dfsentry *dfse = inode->i_private;
338         struct snd_sof_dev *sdev = dfse->sdev;
339
340         /* avoid duplicate traces at next open */
341         if (!sdev->dtrace_is_enabled)
342                 sdev->host_offset = 0;
343
344         return 0;
345 }
346
347 static const struct file_operations sof_dfs_trace_fops = {
348         .open = simple_open,
349         .read = sof_dfsentry_trace_read,
350         .llseek = default_llseek,
351         .release = sof_dfsentry_trace_release,
352 };
353
354 static int trace_debugfs_create(struct snd_sof_dev *sdev)
355 {
356         struct snd_sof_dfsentry *dfse;
357         int ret;
358
359         if (!sdev)
360                 return -EINVAL;
361
362         ret = trace_debugfs_filter_create(sdev);
363         if (ret < 0)
364                 dev_err(sdev->dev, "error: fail in %s, %d", __func__, ret);
365
366         dfse = devm_kzalloc(sdev->dev, sizeof(*dfse), GFP_KERNEL);
367         if (!dfse)
368                 return -ENOMEM;
369
370         dfse->type = SOF_DFSENTRY_TYPE_BUF;
371         dfse->buf = sdev->dmatb.area;
372         dfse->size = sdev->dmatb.bytes;
373         dfse->sdev = sdev;
374
375         debugfs_create_file("trace", 0444, sdev->debugfs_root, dfse,
376                             &sof_dfs_trace_fops);
377
378         return 0;
379 }
380
381 int snd_sof_init_trace_ipc(struct snd_sof_dev *sdev)
382 {
383         struct sof_ipc_fw_ready *ready = &sdev->fw_ready;
384         struct sof_ipc_fw_version *v = &ready->version;
385         struct sof_ipc_dma_trace_params_ext params;
386         struct sof_ipc_reply ipc_reply;
387         int ret;
388
389         if (!sdev->dtrace_is_supported)
390                 return 0;
391
392         if (sdev->dtrace_is_enabled || !sdev->dma_trace_pages)
393                 return -EINVAL;
394
395         /* set IPC parameters */
396         params.hdr.cmd = SOF_IPC_GLB_TRACE_MSG;
397         /* PARAMS_EXT is only supported from ABI 3.7.0 onwards */
398         if (v->abi_version >= SOF_ABI_VER(3, 7, 0)) {
399                 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params_ext);
400                 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS_EXT;
401                 params.timestamp_ns = ktime_get(); /* in nanosecond */
402         } else {
403                 params.hdr.size = sizeof(struct sof_ipc_dma_trace_params);
404                 params.hdr.cmd |= SOF_IPC_TRACE_DMA_PARAMS;
405         }
406         params.buffer.phy_addr = sdev->dmatp.addr;
407         params.buffer.size = sdev->dmatb.bytes;
408         params.buffer.pages = sdev->dma_trace_pages;
409         params.stream_tag = 0;
410
411         sdev->host_offset = 0;
412         sdev->dtrace_draining = false;
413
414         ret = snd_sof_dma_trace_init(sdev, &params.stream_tag);
415         if (ret < 0) {
416                 dev_err(sdev->dev,
417                         "error: fail in snd_sof_dma_trace_init %d\n", ret);
418                 return ret;
419         }
420         dev_dbg(sdev->dev, "stream_tag: %d\n", params.stream_tag);
421
422         /* send IPC to the DSP */
423         ret = sof_ipc_tx_message(sdev->ipc,
424                                  params.hdr.cmd, &params, sizeof(params),
425                                  &ipc_reply, sizeof(ipc_reply));
426         if (ret < 0) {
427                 dev_err(sdev->dev,
428                         "error: can't set params for DMA for trace %d\n", ret);
429                 goto trace_release;
430         }
431
432         ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_START);
433         if (ret < 0) {
434                 dev_err(sdev->dev,
435                         "error: snd_sof_dma_trace_trigger: start: %d\n", ret);
436                 goto trace_release;
437         }
438
439         sdev->dtrace_is_enabled = true;
440
441         return 0;
442
443 trace_release:
444         snd_sof_dma_trace_release(sdev);
445         return ret;
446 }
447
448 int snd_sof_init_trace(struct snd_sof_dev *sdev)
449 {
450         int ret;
451
452         if (!sdev->dtrace_is_supported)
453                 return 0;
454
455         /* set false before start initialization */
456         sdev->dtrace_is_enabled = false;
457
458         /* allocate trace page table buffer */
459         ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV, sdev->dev,
460                                   PAGE_SIZE, &sdev->dmatp);
461         if (ret < 0) {
462                 dev_err(sdev->dev,
463                         "error: can't alloc page table for trace %d\n", ret);
464                 return ret;
465         }
466
467         /* allocate trace data buffer */
468         ret = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV_SG, sdev->dev,
469                                   DMA_BUF_SIZE_FOR_TRACE, &sdev->dmatb);
470         if (ret < 0) {
471                 dev_err(sdev->dev,
472                         "error: can't alloc buffer for trace %d\n", ret);
473                 goto page_err;
474         }
475
476         /* create compressed page table for audio firmware */
477         ret = snd_sof_create_page_table(sdev->dev, &sdev->dmatb,
478                                         sdev->dmatp.area, sdev->dmatb.bytes);
479         if (ret < 0)
480                 goto table_err;
481
482         sdev->dma_trace_pages = ret;
483         dev_dbg(sdev->dev, "dma_trace_pages: %d\n", sdev->dma_trace_pages);
484
485         if (sdev->first_boot) {
486                 ret = trace_debugfs_create(sdev);
487                 if (ret < 0)
488                         goto table_err;
489         }
490
491         init_waitqueue_head(&sdev->trace_sleep);
492
493         ret = snd_sof_init_trace_ipc(sdev);
494         if (ret < 0)
495                 goto table_err;
496
497         return 0;
498 table_err:
499         sdev->dma_trace_pages = 0;
500         snd_dma_free_pages(&sdev->dmatb);
501 page_err:
502         snd_dma_free_pages(&sdev->dmatp);
503         return ret;
504 }
505 EXPORT_SYMBOL(snd_sof_init_trace);
506
507 int snd_sof_trace_update_pos(struct snd_sof_dev *sdev,
508                              struct sof_ipc_dma_trace_posn *posn)
509 {
510         if (!sdev->dtrace_is_supported)
511                 return 0;
512
513         if (sdev->dtrace_is_enabled && sdev->host_offset != posn->host_offset) {
514                 sdev->host_offset = posn->host_offset;
515                 wake_up(&sdev->trace_sleep);
516         }
517
518         if (posn->overflow != 0)
519                 dev_err(sdev->dev,
520                         "error: DSP trace buffer overflow %u bytes. Total messages %d\n",
521                         posn->overflow, posn->messages);
522
523         return 0;
524 }
525
526 /* an error has occurred within the DSP that prevents further trace */
527 void snd_sof_trace_notify_for_error(struct snd_sof_dev *sdev)
528 {
529         if (!sdev->dtrace_is_supported)
530                 return;
531
532         if (sdev->dtrace_is_enabled) {
533                 dev_err(sdev->dev, "error: waking up any trace sleepers\n");
534                 sdev->dtrace_error = true;
535                 wake_up(&sdev->trace_sleep);
536         }
537 }
538 EXPORT_SYMBOL(snd_sof_trace_notify_for_error);
539
540 void snd_sof_release_trace(struct snd_sof_dev *sdev)
541 {
542         int ret;
543
544         if (!sdev->dtrace_is_supported || !sdev->dtrace_is_enabled)
545                 return;
546
547         ret = snd_sof_dma_trace_trigger(sdev, SNDRV_PCM_TRIGGER_STOP);
548         if (ret < 0)
549                 dev_err(sdev->dev,
550                         "error: snd_sof_dma_trace_trigger: stop: %d\n", ret);
551
552         ret = snd_sof_dma_trace_release(sdev);
553         if (ret < 0)
554                 dev_err(sdev->dev,
555                         "error: fail in snd_sof_dma_trace_release %d\n", ret);
556
557         sdev->dtrace_is_enabled = false;
558         sdev->dtrace_draining = true;
559         wake_up(&sdev->trace_sleep);
560 }
561 EXPORT_SYMBOL(snd_sof_release_trace);
562
563 void snd_sof_free_trace(struct snd_sof_dev *sdev)
564 {
565         if (!sdev->dtrace_is_supported)
566                 return;
567
568         snd_sof_release_trace(sdev);
569
570         if (sdev->dma_trace_pages) {
571                 snd_dma_free_pages(&sdev->dmatb);
572                 snd_dma_free_pages(&sdev->dmatp);
573                 sdev->dma_trace_pages = 0;
574         }
575 }
576 EXPORT_SYMBOL(snd_sof_free_trace);