1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
10 #include <linux/coresight-pmu.h>
11 #include <linux/err.h>
12 #include <linux/list.h>
13 #include <linux/zalloc.h>
15 #include <opencsd/c_api/opencsd_c_api.h>
16 #include <opencsd/etmv4/trc_pkt_types_etmv4.h>
17 #include <opencsd/ocsd_if_types.h>
20 #include "cs-etm-decoder.h"
26 #define CS_LOG_RAW_FRAMES
28 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
29 OCSD_DFRMTR_PACKED_RAW_OUT)
31 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
35 struct cs_etm_decoder {
37 void (*packet_printer)(const char *msg);
38 bool suppress_printing;
39 dcd_tree_handle_t dcd_tree;
40 cs_etm_mem_cb_type mem_access;
41 ocsd_datapath_resp_t prev_return;
45 cs_etm_decoder__mem_access(const void *context,
46 const ocsd_vaddr_t address,
47 const ocsd_mem_space_acc_t mem_space __maybe_unused,
48 const u8 trace_chan_id,
52 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
54 return decoder->mem_access(decoder->data, trace_chan_id,
55 address, req_size, buffer);
58 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
60 cs_etm_mem_cb_type cb_func)
62 decoder->mem_access = cb_func;
64 if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
66 cs_etm_decoder__mem_access,
73 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
75 ocsd_datapath_resp_t dp_ret;
77 decoder->prev_return = OCSD_RESP_CONT;
78 decoder->suppress_printing = true;
79 dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
81 decoder->suppress_printing = false;
82 if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
88 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
89 struct cs_etm_packet *packet)
91 if (!packet_queue || !packet)
94 /* Nothing to do, might as well just return */
95 if (packet_queue->packet_count == 0)
98 * The queueing process in function cs_etm_decoder__buffer_packet()
99 * increments the tail *before* using it. This is somewhat counter
100 * intuitive but it has the advantage of centralizing tail management
101 * at a single location. Because of that we need to follow the same
102 * heuristic with the head, i.e we increment it before using its
103 * value. Otherwise the first element of the packet queue is not
106 packet_queue->head = (packet_queue->head + 1) &
107 (CS_ETM_PACKET_MAX_BUFFER - 1);
109 *packet = packet_queue->packet_buffer[packet_queue->head];
111 packet_queue->packet_count--;
116 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
117 ocsd_etmv3_cfg *config)
119 config->reg_idr = params->etmv3.reg_idr;
120 config->reg_ctrl = params->etmv3.reg_ctrl;
121 config->reg_ccer = params->etmv3.reg_ccer;
122 config->reg_trc_id = params->etmv3.reg_trc_id;
123 config->arch_ver = ARCH_V7;
124 config->core_prof = profile_CortexA;
129 #define TRCIDR1_TRCARCHMIN_SHIFT 4
130 #define TRCIDR1_TRCARCHMIN_MASK GENMASK(7, 4)
131 #define TRCIDR1_TRCARCHMIN(x) (((x) & TRCIDR1_TRCARCHMIN_MASK) >> TRCIDR1_TRCARCHMIN_SHIFT)
133 static enum _ocsd_arch_version cs_etm_decoder__get_etmv4_arch_ver(u32 reg_idr1)
136 * For ETMv4 if the trace minor version is 4 or more then we can assume
137 * the architecture is ARCH_AA64 rather than just V8.
138 * ARCH_V8 = V8 architecture
139 * ARCH_AA64 = Min v8r3 plus additional AA64 PE features
141 return TRCIDR1_TRCARCHMIN(reg_idr1) >= 4 ? ARCH_AA64 : ARCH_V8;
144 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
145 ocsd_etmv4_cfg *config)
147 config->reg_configr = params->etmv4.reg_configr;
148 config->reg_traceidr = params->etmv4.reg_traceidr;
149 config->reg_idr0 = params->etmv4.reg_idr0;
150 config->reg_idr1 = params->etmv4.reg_idr1;
151 config->reg_idr2 = params->etmv4.reg_idr2;
152 config->reg_idr8 = params->etmv4.reg_idr8;
153 config->reg_idr9 = 0;
154 config->reg_idr10 = 0;
155 config->reg_idr11 = 0;
156 config->reg_idr12 = 0;
157 config->reg_idr13 = 0;
158 config->arch_ver = cs_etm_decoder__get_etmv4_arch_ver(params->etmv4.reg_idr1);
159 config->core_prof = profile_CortexA;
162 static void cs_etm_decoder__print_str_cb(const void *p_context,
166 const struct cs_etm_decoder *decoder = p_context;
168 if (p_context && str_len && !decoder->suppress_printing)
169 decoder->packet_printer(msg);
173 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
174 struct cs_etm_decoder *decoder)
178 if (d_params->packet_printer == NULL)
181 decoder->packet_printer = d_params->packet_printer;
184 * Set up a library default logger to process any printers
185 * (packet/raw frame) we add later.
187 ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
191 /* no stdout / err / file output */
192 ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
197 * Set the string CB for the default logger, passes strings to
200 ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
202 cs_etm_decoder__print_str_cb);
209 #ifdef CS_LOG_RAW_FRAMES
211 cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
212 struct cs_etm_decoder *decoder)
214 /* Only log these during a --dump operation */
215 if (d_params->operation == CS_ETM_OPERATION_PRINT) {
216 /* set up a library default logger to process the
217 * raw frame printer we add later
219 ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
221 /* no stdout / err / file output */
222 ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
224 /* set the string CB for the default logger,
225 * passes strings to perf print logger.
227 ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
229 cs_etm_decoder__print_str_cb);
231 /* use the built in library printer for the raw frames */
232 ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
238 cs_etm_decoder__init_raw_frame_logging(
239 struct cs_etm_decoder_params *d_params __maybe_unused,
240 struct cs_etm_decoder *decoder __maybe_unused)
245 static ocsd_datapath_resp_t
246 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
247 struct cs_etm_packet_queue *packet_queue,
248 const uint8_t trace_chan_id)
250 /* No timestamp packet has been received, nothing to do */
251 if (!packet_queue->cs_timestamp)
252 return OCSD_RESP_CONT;
254 packet_queue->cs_timestamp = packet_queue->next_cs_timestamp;
256 /* Estimate the timestamp for the next range packet */
257 packet_queue->next_cs_timestamp += packet_queue->instr_count;
258 packet_queue->instr_count = 0;
260 /* Tell the front end which traceid_queue needs attention */
261 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
263 return OCSD_RESP_WAIT;
266 static ocsd_datapath_resp_t
267 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
268 const ocsd_generic_trace_elem *elem,
269 const uint8_t trace_chan_id,
270 const ocsd_trc_index_t indx)
272 struct cs_etm_packet_queue *packet_queue;
274 /* First get the packet queue for this traceID */
275 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
277 return OCSD_RESP_FATAL_SYS_ERR;
280 * We've seen a timestamp packet before - simply record the new value.
281 * Function do_soft_timestamp() will report the value to the front end,
282 * hence asking the decoder to keep decoding rather than stopping.
284 if (packet_queue->cs_timestamp) {
285 packet_queue->next_cs_timestamp = elem->timestamp;
286 return OCSD_RESP_CONT;
290 if (!elem->timestamp) {
292 * Zero timestamps can be seen due to misconfiguration or hardware bugs.
293 * Warn once, and don't try to subtract instr_count as it would result in an
296 packet_queue->cs_timestamp = 0;
297 if (!cs_etm__etmq_is_timeless(etmq))
298 pr_warning_once("Zero Coresight timestamp found at Idx:%" OCSD_TRC_IDX_STR
299 ". Decoding may be improved by prepending 'Z' to your current --itrace arguments.\n",
302 } else if (packet_queue->instr_count > elem->timestamp) {
304 * Sanity check that the elem->timestamp - packet_queue->instr_count would not
305 * result in an underflow. Warn and clamp at 0 if it would.
307 packet_queue->cs_timestamp = 0;
308 pr_err("Timestamp calculation underflow at Idx:%" OCSD_TRC_IDX_STR "\n", indx);
311 * This is the first timestamp we've seen since the beginning of traces
312 * or a discontinuity. Since timestamps packets are generated *after*
313 * range packets have been generated, we need to estimate the time at
314 * which instructions started by subtracting the number of instructions
315 * executed to the timestamp.
317 packet_queue->cs_timestamp = elem->timestamp - packet_queue->instr_count;
319 packet_queue->next_cs_timestamp = elem->timestamp;
320 packet_queue->instr_count = 0;
322 /* Tell the front end which traceid_queue needs attention */
323 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
325 /* Halt processing until we are being told to proceed */
326 return OCSD_RESP_WAIT;
330 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
332 packet_queue->cs_timestamp = 0;
333 packet_queue->next_cs_timestamp = 0;
334 packet_queue->instr_count = 0;
337 static ocsd_datapath_resp_t
338 cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
339 const u8 trace_chan_id,
340 enum cs_etm_sample_type sample_type)
345 if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
346 return OCSD_RESP_FATAL_SYS_ERR;
348 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
349 return OCSD_RESP_FATAL_SYS_ERR;
351 et = packet_queue->tail;
352 et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
353 packet_queue->tail = et;
354 packet_queue->packet_count++;
356 packet_queue->packet_buffer[et].sample_type = sample_type;
357 packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
358 packet_queue->packet_buffer[et].cpu = cpu;
359 packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
360 packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
361 packet_queue->packet_buffer[et].instr_count = 0;
362 packet_queue->packet_buffer[et].last_instr_taken_branch = false;
363 packet_queue->packet_buffer[et].last_instr_size = 0;
364 packet_queue->packet_buffer[et].last_instr_type = 0;
365 packet_queue->packet_buffer[et].last_instr_subtype = 0;
366 packet_queue->packet_buffer[et].last_instr_cond = 0;
367 packet_queue->packet_buffer[et].flags = 0;
368 packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
369 packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
371 if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
372 return OCSD_RESP_WAIT;
374 return OCSD_RESP_CONT;
377 static ocsd_datapath_resp_t
378 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
379 struct cs_etm_packet_queue *packet_queue,
380 const ocsd_generic_trace_elem *elem,
381 const uint8_t trace_chan_id)
384 struct cs_etm_packet *packet;
386 ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
388 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
391 packet = &packet_queue->packet_buffer[packet_queue->tail];
394 case ocsd_isa_aarch64:
395 packet->isa = CS_ETM_ISA_A64;
398 packet->isa = CS_ETM_ISA_A32;
400 case ocsd_isa_thumb2:
401 packet->isa = CS_ETM_ISA_T32;
404 case ocsd_isa_jazelle:
405 case ocsd_isa_custom:
406 case ocsd_isa_unknown:
408 packet->isa = CS_ETM_ISA_UNKNOWN;
411 packet->start_addr = elem->st_addr;
412 packet->end_addr = elem->en_addr;
413 packet->instr_count = elem->num_instr_range;
414 packet->last_instr_type = elem->last_i_type;
415 packet->last_instr_subtype = elem->last_i_subtype;
416 packet->last_instr_cond = elem->last_instr_cond;
418 if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
419 packet->last_instr_taken_branch = elem->last_instr_exec;
421 packet->last_instr_taken_branch = false;
423 packet->last_instr_size = elem->last_instr_sz;
425 /* per-thread scenario, no need to generate a timestamp */
426 if (cs_etm__etmq_is_timeless(etmq))
430 * The packet queue is full and we haven't seen a timestamp (had we
431 * seen one the packet queue wouldn't be full). Let the front end
434 if (ret == OCSD_RESP_WAIT)
437 packet_queue->instr_count += elem->num_instr_range;
438 /* Tell the front end we have a new timestamp to process */
439 ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
445 static ocsd_datapath_resp_t
446 cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
447 const uint8_t trace_chan_id)
450 * Something happened and who knows when we'll get new traces so
451 * reset time statistics.
453 cs_etm_decoder__reset_timestamp(queue);
454 return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
455 CS_ETM_DISCONTINUITY);
458 static ocsd_datapath_resp_t
459 cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
460 const ocsd_generic_trace_elem *elem,
461 const uint8_t trace_chan_id)
463 struct cs_etm_packet *packet;
465 ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
467 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
470 packet = &queue->packet_buffer[queue->tail];
471 packet->exception_number = elem->exception_number;
476 static ocsd_datapath_resp_t
477 cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
478 const uint8_t trace_chan_id)
480 return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
481 CS_ETM_EXCEPTION_RET);
484 static ocsd_datapath_resp_t
485 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
486 struct cs_etm_packet_queue *packet_queue,
487 const ocsd_generic_trace_elem *elem,
488 const uint8_t trace_chan_id)
495 * As all the ETMs run at the same exception level, the system should
496 * have the same PID format crossing CPUs. So cache the PID format
497 * and reuse it for sequential decoding.
500 ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
502 return OCSD_RESP_FATAL_SYS_ERR;
506 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
507 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
508 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
511 case BIT(ETM_OPT_CTXTID):
512 if (elem->context.ctxt_id_valid)
513 tid = elem->context.context_id;
515 case BIT(ETM_OPT_CTXTID2):
516 if (elem->context.vmid_valid)
517 tid = elem->context.vmid;
524 return OCSD_RESP_CONT;
526 if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
527 return OCSD_RESP_FATAL_SYS_ERR;
530 * A timestamp is generated after a PE_CONTEXT element so make sure
531 * to rely on that coming one.
533 cs_etm_decoder__reset_timestamp(packet_queue);
535 return OCSD_RESP_CONT;
538 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
540 const ocsd_trc_index_t indx,
541 const u8 trace_chan_id __maybe_unused,
542 const ocsd_generic_trace_elem *elem)
544 ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
545 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
546 struct cs_etm_queue *etmq = decoder->data;
547 struct cs_etm_packet_queue *packet_queue;
549 /* First get the packet queue for this traceID */
550 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
552 return OCSD_RESP_FATAL_SYS_ERR;
554 switch (elem->elem_type) {
555 case OCSD_GEN_TRC_ELEM_UNKNOWN:
557 case OCSD_GEN_TRC_ELEM_EO_TRACE:
558 case OCSD_GEN_TRC_ELEM_NO_SYNC:
559 case OCSD_GEN_TRC_ELEM_TRACE_ON:
560 resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
563 case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
564 resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
567 case OCSD_GEN_TRC_ELEM_EXCEPTION:
568 resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
571 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
572 resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
575 case OCSD_GEN_TRC_ELEM_TIMESTAMP:
576 resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
580 case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
581 resp = cs_etm_decoder__set_tid(etmq, packet_queue,
582 elem, trace_chan_id);
584 /* Unused packet types */
585 case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
586 case OCSD_GEN_TRC_ELEM_ADDR_NACC:
587 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
588 case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
589 case OCSD_GEN_TRC_ELEM_EVENT:
590 case OCSD_GEN_TRC_ELEM_SWTRACE:
591 case OCSD_GEN_TRC_ELEM_CUSTOM:
592 case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
593 case OCSD_GEN_TRC_ELEM_MEMTRANS:
602 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
603 struct cs_etm_trace_params *t_params,
604 struct cs_etm_decoder *decoder)
606 const char *decoder_name;
607 ocsd_etmv3_cfg config_etmv3;
608 ocsd_etmv4_cfg trace_config_etmv4;
612 switch (t_params->protocol) {
613 case CS_ETM_PROTO_ETMV3:
614 case CS_ETM_PROTO_PTM:
615 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
616 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
617 OCSD_BUILTIN_DCD_ETMV3 :
618 OCSD_BUILTIN_DCD_PTM;
619 trace_config = &config_etmv3;
621 case CS_ETM_PROTO_ETMV4i:
622 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
623 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
624 trace_config = &trace_config_etmv4;
630 if (d_params->operation == CS_ETM_OPERATION_DECODE) {
631 if (ocsd_dt_create_decoder(decoder->dcd_tree,
633 OCSD_CREATE_FLG_FULL_DECODER,
634 trace_config, &csid))
637 if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
638 cs_etm_decoder__gen_trace_elem_printer,
643 } else if (d_params->operation == CS_ETM_OPERATION_PRINT) {
644 if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
645 OCSD_CREATE_FLG_PACKET_PROC,
646 trace_config, &csid))
649 if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
658 struct cs_etm_decoder *
659 cs_etm_decoder__new(int decoders, struct cs_etm_decoder_params *d_params,
660 struct cs_etm_trace_params t_params[])
662 struct cs_etm_decoder *decoder;
663 ocsd_dcd_tree_src_t format;
667 if ((!t_params) || (!d_params))
670 decoder = zalloc(sizeof(*decoder));
675 decoder->data = d_params->data;
676 decoder->prev_return = OCSD_RESP_CONT;
677 format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
678 OCSD_TRC_SRC_SINGLE);
680 flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
681 flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
682 flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
685 * Drivers may add barrier frames when used with perf, set up to
686 * handle this. Barriers const of FSYNC packet repeated 4 times.
688 flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
690 /* Create decode tree for the data source */
691 decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
693 if (decoder->dcd_tree == 0)
694 goto err_free_decoder;
696 /* init library print logging support */
697 ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
699 goto err_free_decoder;
701 /* init raw frame logging if required */
702 cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
704 for (i = 0; i < decoders; i++) {
705 ret = cs_etm_decoder__create_etm_decoder(d_params,
709 goto err_free_decoder;
715 cs_etm_decoder__free(decoder);
719 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
720 u64 indx, const u8 *buf,
721 size_t len, size_t *consumed)
724 ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
725 ocsd_datapath_resp_t prev_return = decoder->prev_return;
726 size_t processed = 0;
729 while (processed < len) {
730 if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
731 cur = ocsd_dt_process_data(decoder->dcd_tree,
737 } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
738 cur = ocsd_dt_process_data(decoder->dcd_tree,
751 * Return to the input code if the packet buffer is full.
752 * Flushing will get done once the packet buffer has been
755 if (OCSD_DATA_RESP_IS_WAIT(cur))
761 decoder->prev_return = cur;
762 *consumed = processed;
767 void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
772 ocsd_destroy_dcd_tree(decoder->dcd_tree);
773 decoder->dcd_tree = NULL;