1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright(C) 2015-2018 Linaro Limited.
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
9 #include <linux/coresight-pmu.h>
10 #include <linux/err.h>
11 #include <linux/list.h>
12 #include <linux/zalloc.h>
14 #include <opencsd/c_api/opencsd_c_api.h>
15 #include <opencsd/etmv4/trc_pkt_types_etmv4.h>
16 #include <opencsd/ocsd_if_types.h>
19 #include "cs-etm-decoder.h"
24 #define CS_LOG_RAW_FRAMES
26 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT | \
27 OCSD_DFRMTR_PACKED_RAW_OUT)
29 #define CS_RAW_DEBUG_FLAGS (OCSD_DFRMTR_UNPACKED_RAW_OUT)
33 struct cs_etm_decoder {
35 void (*packet_printer)(const char *msg);
36 dcd_tree_handle_t dcd_tree;
37 cs_etm_mem_cb_type mem_access;
38 ocsd_datapath_resp_t prev_return;
42 cs_etm_decoder__mem_access(const void *context,
43 const ocsd_vaddr_t address,
44 const ocsd_mem_space_acc_t mem_space __maybe_unused,
45 const u8 trace_chan_id,
49 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
51 return decoder->mem_access(decoder->data, trace_chan_id,
52 address, req_size, buffer);
55 int cs_etm_decoder__add_mem_access_cb(struct cs_etm_decoder *decoder,
57 cs_etm_mem_cb_type cb_func)
59 decoder->mem_access = cb_func;
61 if (ocsd_dt_add_callback_trcid_mem_acc(decoder->dcd_tree, start, end,
63 cs_etm_decoder__mem_access,
70 int cs_etm_decoder__reset(struct cs_etm_decoder *decoder)
72 ocsd_datapath_resp_t dp_ret;
74 decoder->prev_return = OCSD_RESP_CONT;
76 dp_ret = ocsd_dt_process_data(decoder->dcd_tree, OCSD_OP_RESET,
78 if (OCSD_DATA_RESP_IS_FATAL(dp_ret))
84 int cs_etm_decoder__get_packet(struct cs_etm_packet_queue *packet_queue,
85 struct cs_etm_packet *packet)
87 if (!packet_queue || !packet)
90 /* Nothing to do, might as well just return */
91 if (packet_queue->packet_count == 0)
94 * The queueing process in function cs_etm_decoder__buffer_packet()
95 * increments the tail *before* using it. This is somewhat counter
96 * intuitive but it has the advantage of centralizing tail management
97 * at a single location. Because of that we need to follow the same
98 * heuristic with the head, i.e we increment it before using its
99 * value. Otherwise the first element of the packet queue is not
102 packet_queue->head = (packet_queue->head + 1) &
103 (CS_ETM_PACKET_MAX_BUFFER - 1);
105 *packet = packet_queue->packet_buffer[packet_queue->head];
107 packet_queue->packet_count--;
112 static int cs_etm_decoder__gen_etmv3_config(struct cs_etm_trace_params *params,
113 ocsd_etmv3_cfg *config)
115 config->reg_idr = params->etmv3.reg_idr;
116 config->reg_ctrl = params->etmv3.reg_ctrl;
117 config->reg_ccer = params->etmv3.reg_ccer;
118 config->reg_trc_id = params->etmv3.reg_trc_id;
119 config->arch_ver = ARCH_V7;
120 config->core_prof = profile_CortexA;
125 static void cs_etm_decoder__gen_etmv4_config(struct cs_etm_trace_params *params,
126 ocsd_etmv4_cfg *config)
128 config->reg_configr = params->etmv4.reg_configr;
129 config->reg_traceidr = params->etmv4.reg_traceidr;
130 config->reg_idr0 = params->etmv4.reg_idr0;
131 config->reg_idr1 = params->etmv4.reg_idr1;
132 config->reg_idr2 = params->etmv4.reg_idr2;
133 config->reg_idr8 = params->etmv4.reg_idr8;
134 config->reg_idr9 = 0;
135 config->reg_idr10 = 0;
136 config->reg_idr11 = 0;
137 config->reg_idr12 = 0;
138 config->reg_idr13 = 0;
139 config->arch_ver = ARCH_V8;
140 config->core_prof = profile_CortexA;
143 static void cs_etm_decoder__print_str_cb(const void *p_context,
147 if (p_context && str_len)
148 ((struct cs_etm_decoder *)p_context)->packet_printer(msg);
152 cs_etm_decoder__init_def_logger_printing(struct cs_etm_decoder_params *d_params,
153 struct cs_etm_decoder *decoder)
157 if (d_params->packet_printer == NULL)
160 decoder->packet_printer = d_params->packet_printer;
163 * Set up a library default logger to process any printers
164 * (packet/raw frame) we add later.
166 ret = ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
170 /* no stdout / err / file output */
171 ret = ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
176 * Set the string CB for the default logger, passes strings to
179 ret = ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
181 cs_etm_decoder__print_str_cb);
188 #ifdef CS_LOG_RAW_FRAMES
190 cs_etm_decoder__init_raw_frame_logging(struct cs_etm_decoder_params *d_params,
191 struct cs_etm_decoder *decoder)
193 /* Only log these during a --dump operation */
194 if (d_params->operation == CS_ETM_OPERATION_PRINT) {
195 /* set up a library default logger to process the
196 * raw frame printer we add later
198 ocsd_def_errlog_init(OCSD_ERR_SEV_ERROR, 1);
200 /* no stdout / err / file output */
201 ocsd_def_errlog_config_output(C_API_MSGLOGOUT_FLG_NONE, NULL);
203 /* set the string CB for the default logger,
204 * passes strings to perf print logger.
206 ocsd_def_errlog_set_strprint_cb(decoder->dcd_tree,
208 cs_etm_decoder__print_str_cb);
210 /* use the built in library printer for the raw frames */
211 ocsd_dt_set_raw_frame_printer(decoder->dcd_tree,
217 cs_etm_decoder__init_raw_frame_logging(
218 struct cs_etm_decoder_params *d_params __maybe_unused,
219 struct cs_etm_decoder *decoder __maybe_unused)
224 static int cs_etm_decoder__create_packet_printer(struct cs_etm_decoder *decoder,
225 const char *decoder_name,
230 if (ocsd_dt_create_decoder(decoder->dcd_tree, decoder_name,
231 OCSD_CREATE_FLG_PACKET_PROC,
232 trace_config, &csid))
235 if (ocsd_dt_set_pkt_protocol_printer(decoder->dcd_tree, csid, 0))
242 cs_etm_decoder__create_etm_packet_printer(struct cs_etm_trace_params *t_params,
243 struct cs_etm_decoder *decoder)
245 const char *decoder_name;
246 ocsd_etmv3_cfg config_etmv3;
247 ocsd_etmv4_cfg trace_config_etmv4;
250 switch (t_params->protocol) {
251 case CS_ETM_PROTO_ETMV3:
252 case CS_ETM_PROTO_PTM:
253 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
254 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
255 OCSD_BUILTIN_DCD_ETMV3 :
256 OCSD_BUILTIN_DCD_PTM;
257 trace_config = &config_etmv3;
259 case CS_ETM_PROTO_ETMV4i:
260 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
261 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
262 trace_config = &trace_config_etmv4;
268 return cs_etm_decoder__create_packet_printer(decoder,
273 static ocsd_datapath_resp_t
274 cs_etm_decoder__do_soft_timestamp(struct cs_etm_queue *etmq,
275 struct cs_etm_packet_queue *packet_queue,
276 const uint8_t trace_chan_id)
278 /* No timestamp packet has been received, nothing to do */
279 if (!packet_queue->timestamp)
280 return OCSD_RESP_CONT;
282 packet_queue->timestamp = packet_queue->next_timestamp;
284 /* Estimate the timestamp for the next range packet */
285 packet_queue->next_timestamp += packet_queue->instr_count;
286 packet_queue->instr_count = 0;
288 /* Tell the front end which traceid_queue needs attention */
289 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
291 return OCSD_RESP_WAIT;
294 static ocsd_datapath_resp_t
295 cs_etm_decoder__do_hard_timestamp(struct cs_etm_queue *etmq,
296 const ocsd_generic_trace_elem *elem,
297 const uint8_t trace_chan_id)
299 struct cs_etm_packet_queue *packet_queue;
301 /* First get the packet queue for this traceID */
302 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
304 return OCSD_RESP_FATAL_SYS_ERR;
307 * We've seen a timestamp packet before - simply record the new value.
308 * Function do_soft_timestamp() will report the value to the front end,
309 * hence asking the decoder to keep decoding rather than stopping.
311 if (packet_queue->timestamp) {
312 packet_queue->next_timestamp = elem->timestamp;
313 return OCSD_RESP_CONT;
317 * This is the first timestamp we've seen since the beginning of traces
318 * or a discontinuity. Since timestamps packets are generated *after*
319 * range packets have been generated, we need to estimate the time at
320 * which instructions started by subtracting the number of instructions
321 * executed to the timestamp.
323 packet_queue->timestamp = elem->timestamp - packet_queue->instr_count;
324 packet_queue->next_timestamp = elem->timestamp;
325 packet_queue->instr_count = 0;
327 /* Tell the front end which traceid_queue needs attention */
328 cs_etm__etmq_set_traceid_queue_timestamp(etmq, trace_chan_id);
330 /* Halt processing until we are being told to proceed */
331 return OCSD_RESP_WAIT;
335 cs_etm_decoder__reset_timestamp(struct cs_etm_packet_queue *packet_queue)
337 packet_queue->timestamp = 0;
338 packet_queue->next_timestamp = 0;
339 packet_queue->instr_count = 0;
342 static ocsd_datapath_resp_t
343 cs_etm_decoder__buffer_packet(struct cs_etm_packet_queue *packet_queue,
344 const u8 trace_chan_id,
345 enum cs_etm_sample_type sample_type)
350 if (packet_queue->packet_count >= CS_ETM_PACKET_MAX_BUFFER - 1)
351 return OCSD_RESP_FATAL_SYS_ERR;
353 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
354 return OCSD_RESP_FATAL_SYS_ERR;
356 et = packet_queue->tail;
357 et = (et + 1) & (CS_ETM_PACKET_MAX_BUFFER - 1);
358 packet_queue->tail = et;
359 packet_queue->packet_count++;
361 packet_queue->packet_buffer[et].sample_type = sample_type;
362 packet_queue->packet_buffer[et].isa = CS_ETM_ISA_UNKNOWN;
363 packet_queue->packet_buffer[et].cpu = cpu;
364 packet_queue->packet_buffer[et].start_addr = CS_ETM_INVAL_ADDR;
365 packet_queue->packet_buffer[et].end_addr = CS_ETM_INVAL_ADDR;
366 packet_queue->packet_buffer[et].instr_count = 0;
367 packet_queue->packet_buffer[et].last_instr_taken_branch = false;
368 packet_queue->packet_buffer[et].last_instr_size = 0;
369 packet_queue->packet_buffer[et].last_instr_type = 0;
370 packet_queue->packet_buffer[et].last_instr_subtype = 0;
371 packet_queue->packet_buffer[et].last_instr_cond = 0;
372 packet_queue->packet_buffer[et].flags = 0;
373 packet_queue->packet_buffer[et].exception_number = UINT32_MAX;
374 packet_queue->packet_buffer[et].trace_chan_id = trace_chan_id;
376 if (packet_queue->packet_count == CS_ETM_PACKET_MAX_BUFFER - 1)
377 return OCSD_RESP_WAIT;
379 return OCSD_RESP_CONT;
382 static ocsd_datapath_resp_t
383 cs_etm_decoder__buffer_range(struct cs_etm_queue *etmq,
384 struct cs_etm_packet_queue *packet_queue,
385 const ocsd_generic_trace_elem *elem,
386 const uint8_t trace_chan_id)
389 struct cs_etm_packet *packet;
391 ret = cs_etm_decoder__buffer_packet(packet_queue, trace_chan_id,
393 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
396 packet = &packet_queue->packet_buffer[packet_queue->tail];
399 case ocsd_isa_aarch64:
400 packet->isa = CS_ETM_ISA_A64;
403 packet->isa = CS_ETM_ISA_A32;
405 case ocsd_isa_thumb2:
406 packet->isa = CS_ETM_ISA_T32;
409 case ocsd_isa_jazelle:
410 case ocsd_isa_custom:
411 case ocsd_isa_unknown:
413 packet->isa = CS_ETM_ISA_UNKNOWN;
416 packet->start_addr = elem->st_addr;
417 packet->end_addr = elem->en_addr;
418 packet->instr_count = elem->num_instr_range;
419 packet->last_instr_type = elem->last_i_type;
420 packet->last_instr_subtype = elem->last_i_subtype;
421 packet->last_instr_cond = elem->last_instr_cond;
423 if (elem->last_i_type == OCSD_INSTR_BR || elem->last_i_type == OCSD_INSTR_BR_INDIRECT)
424 packet->last_instr_taken_branch = elem->last_instr_exec;
426 packet->last_instr_taken_branch = false;
428 packet->last_instr_size = elem->last_instr_sz;
430 /* per-thread scenario, no need to generate a timestamp */
431 if (cs_etm__etmq_is_timeless(etmq))
435 * The packet queue is full and we haven't seen a timestamp (had we
436 * seen one the packet queue wouldn't be full). Let the front end
439 if (ret == OCSD_RESP_WAIT)
442 packet_queue->instr_count += elem->num_instr_range;
443 /* Tell the front end we have a new timestamp to process */
444 ret = cs_etm_decoder__do_soft_timestamp(etmq, packet_queue,
450 static ocsd_datapath_resp_t
451 cs_etm_decoder__buffer_discontinuity(struct cs_etm_packet_queue *queue,
452 const uint8_t trace_chan_id)
455 * Something happened and who knows when we'll get new traces so
456 * reset time statistics.
458 cs_etm_decoder__reset_timestamp(queue);
459 return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
460 CS_ETM_DISCONTINUITY);
463 static ocsd_datapath_resp_t
464 cs_etm_decoder__buffer_exception(struct cs_etm_packet_queue *queue,
465 const ocsd_generic_trace_elem *elem,
466 const uint8_t trace_chan_id)
468 struct cs_etm_packet *packet;
470 ret = cs_etm_decoder__buffer_packet(queue, trace_chan_id,
472 if (ret != OCSD_RESP_CONT && ret != OCSD_RESP_WAIT)
475 packet = &queue->packet_buffer[queue->tail];
476 packet->exception_number = elem->exception_number;
481 static ocsd_datapath_resp_t
482 cs_etm_decoder__buffer_exception_ret(struct cs_etm_packet_queue *queue,
483 const uint8_t trace_chan_id)
485 return cs_etm_decoder__buffer_packet(queue, trace_chan_id,
486 CS_ETM_EXCEPTION_RET);
489 static ocsd_datapath_resp_t
490 cs_etm_decoder__set_tid(struct cs_etm_queue *etmq,
491 struct cs_etm_packet_queue *packet_queue,
492 const ocsd_generic_trace_elem *elem,
493 const uint8_t trace_chan_id)
500 * As all the ETMs run at the same exception level, the system should
501 * have the same PID format crossing CPUs. So cache the PID format
502 * and reuse it for sequential decoding.
505 ret = cs_etm__get_pid_fmt(trace_chan_id, &pid_fmt);
507 return OCSD_RESP_FATAL_SYS_ERR;
511 * Process the PE_CONTEXT packets if we have a valid contextID or VMID.
512 * If the kernel is running at EL2, the PID is traced in CONTEXTIDR_EL2
513 * as VMID, Bit ETM_OPT_CTXTID2 is set in this case.
516 case BIT(ETM_OPT_CTXTID):
517 if (elem->context.ctxt_id_valid)
518 tid = elem->context.context_id;
520 case BIT(ETM_OPT_CTXTID2):
521 if (elem->context.vmid_valid)
522 tid = elem->context.vmid;
529 return OCSD_RESP_CONT;
531 if (cs_etm__etmq_set_tid(etmq, tid, trace_chan_id))
532 return OCSD_RESP_FATAL_SYS_ERR;
535 * A timestamp is generated after a PE_CONTEXT element so make sure
536 * to rely on that coming one.
538 cs_etm_decoder__reset_timestamp(packet_queue);
540 return OCSD_RESP_CONT;
543 static ocsd_datapath_resp_t cs_etm_decoder__gen_trace_elem_printer(
545 const ocsd_trc_index_t indx __maybe_unused,
546 const u8 trace_chan_id __maybe_unused,
547 const ocsd_generic_trace_elem *elem)
549 ocsd_datapath_resp_t resp = OCSD_RESP_CONT;
550 struct cs_etm_decoder *decoder = (struct cs_etm_decoder *) context;
551 struct cs_etm_queue *etmq = decoder->data;
552 struct cs_etm_packet_queue *packet_queue;
554 /* First get the packet queue for this traceID */
555 packet_queue = cs_etm__etmq_get_packet_queue(etmq, trace_chan_id);
557 return OCSD_RESP_FATAL_SYS_ERR;
559 switch (elem->elem_type) {
560 case OCSD_GEN_TRC_ELEM_UNKNOWN:
562 case OCSD_GEN_TRC_ELEM_EO_TRACE:
563 case OCSD_GEN_TRC_ELEM_NO_SYNC:
564 case OCSD_GEN_TRC_ELEM_TRACE_ON:
565 resp = cs_etm_decoder__buffer_discontinuity(packet_queue,
568 case OCSD_GEN_TRC_ELEM_INSTR_RANGE:
569 resp = cs_etm_decoder__buffer_range(etmq, packet_queue, elem,
572 case OCSD_GEN_TRC_ELEM_EXCEPTION:
573 resp = cs_etm_decoder__buffer_exception(packet_queue, elem,
576 case OCSD_GEN_TRC_ELEM_EXCEPTION_RET:
577 resp = cs_etm_decoder__buffer_exception_ret(packet_queue,
580 case OCSD_GEN_TRC_ELEM_TIMESTAMP:
581 resp = cs_etm_decoder__do_hard_timestamp(etmq, elem,
584 case OCSD_GEN_TRC_ELEM_PE_CONTEXT:
585 resp = cs_etm_decoder__set_tid(etmq, packet_queue,
586 elem, trace_chan_id);
588 /* Unused packet types */
589 case OCSD_GEN_TRC_ELEM_I_RANGE_NOPATH:
590 case OCSD_GEN_TRC_ELEM_ADDR_NACC:
591 case OCSD_GEN_TRC_ELEM_CYCLE_COUNT:
592 case OCSD_GEN_TRC_ELEM_ADDR_UNKNOWN:
593 case OCSD_GEN_TRC_ELEM_EVENT:
594 case OCSD_GEN_TRC_ELEM_SWTRACE:
595 case OCSD_GEN_TRC_ELEM_CUSTOM:
596 case OCSD_GEN_TRC_ELEM_SYNC_MARKER:
597 case OCSD_GEN_TRC_ELEM_MEMTRANS:
605 static int cs_etm_decoder__create_etm_packet_decoder(
606 struct cs_etm_trace_params *t_params,
607 struct cs_etm_decoder *decoder)
609 const char *decoder_name;
610 ocsd_etmv3_cfg config_etmv3;
611 ocsd_etmv4_cfg trace_config_etmv4;
615 switch (t_params->protocol) {
616 case CS_ETM_PROTO_ETMV3:
617 case CS_ETM_PROTO_PTM:
618 cs_etm_decoder__gen_etmv3_config(t_params, &config_etmv3);
619 decoder_name = (t_params->protocol == CS_ETM_PROTO_ETMV3) ?
620 OCSD_BUILTIN_DCD_ETMV3 :
621 OCSD_BUILTIN_DCD_PTM;
622 trace_config = &config_etmv3;
624 case CS_ETM_PROTO_ETMV4i:
625 cs_etm_decoder__gen_etmv4_config(t_params, &trace_config_etmv4);
626 decoder_name = OCSD_BUILTIN_DCD_ETMV4I;
627 trace_config = &trace_config_etmv4;
633 if (ocsd_dt_create_decoder(decoder->dcd_tree,
635 OCSD_CREATE_FLG_FULL_DECODER,
636 trace_config, &csid))
639 if (ocsd_dt_set_gen_elem_outfn(decoder->dcd_tree,
640 cs_etm_decoder__gen_trace_elem_printer,
648 cs_etm_decoder__create_etm_decoder(struct cs_etm_decoder_params *d_params,
649 struct cs_etm_trace_params *t_params,
650 struct cs_etm_decoder *decoder)
652 if (d_params->operation == CS_ETM_OPERATION_PRINT)
653 return cs_etm_decoder__create_etm_packet_printer(t_params,
655 else if (d_params->operation == CS_ETM_OPERATION_DECODE)
656 return cs_etm_decoder__create_etm_packet_decoder(t_params,
662 struct cs_etm_decoder *
663 cs_etm_decoder__new(int num_cpu, struct cs_etm_decoder_params *d_params,
664 struct cs_etm_trace_params t_params[])
666 struct cs_etm_decoder *decoder;
667 ocsd_dcd_tree_src_t format;
671 if ((!t_params) || (!d_params))
674 decoder = zalloc(sizeof(*decoder));
679 decoder->data = d_params->data;
680 decoder->prev_return = OCSD_RESP_CONT;
681 format = (d_params->formatted ? OCSD_TRC_SRC_FRAME_FORMATTED :
682 OCSD_TRC_SRC_SINGLE);
684 flags |= (d_params->fsyncs ? OCSD_DFRMTR_HAS_FSYNCS : 0);
685 flags |= (d_params->hsyncs ? OCSD_DFRMTR_HAS_HSYNCS : 0);
686 flags |= (d_params->frame_aligned ? OCSD_DFRMTR_FRAME_MEM_ALIGN : 0);
689 * Drivers may add barrier frames when used with perf, set up to
690 * handle this. Barriers const of FSYNC packet repeated 4 times.
692 flags |= OCSD_DFRMTR_RESET_ON_4X_FSYNC;
694 /* Create decode tree for the data source */
695 decoder->dcd_tree = ocsd_create_dcd_tree(format, flags);
697 if (decoder->dcd_tree == 0)
698 goto err_free_decoder;
700 /* init library print logging support */
701 ret = cs_etm_decoder__init_def_logger_printing(d_params, decoder);
703 goto err_free_decoder;
705 /* init raw frame logging if required */
706 cs_etm_decoder__init_raw_frame_logging(d_params, decoder);
708 for (i = 0; i < num_cpu; i++) {
709 ret = cs_etm_decoder__create_etm_decoder(d_params,
713 goto err_free_decoder;
719 cs_etm_decoder__free(decoder);
723 int cs_etm_decoder__process_data_block(struct cs_etm_decoder *decoder,
724 u64 indx, const u8 *buf,
725 size_t len, size_t *consumed)
728 ocsd_datapath_resp_t cur = OCSD_RESP_CONT;
729 ocsd_datapath_resp_t prev_return = decoder->prev_return;
730 size_t processed = 0;
733 while (processed < len) {
734 if (OCSD_DATA_RESP_IS_WAIT(prev_return)) {
735 cur = ocsd_dt_process_data(decoder->dcd_tree,
741 } else if (OCSD_DATA_RESP_IS_CONT(prev_return)) {
742 cur = ocsd_dt_process_data(decoder->dcd_tree,
755 * Return to the input code if the packet buffer is full.
756 * Flushing will get done once the packet buffer has been
759 if (OCSD_DATA_RESP_IS_WAIT(cur))
765 decoder->prev_return = cur;
766 *consumed = processed;
771 void cs_etm_decoder__free(struct cs_etm_decoder *decoder)
776 ocsd_destroy_dcd_tree(decoder->dcd_tree);
777 decoder->dcd_tree = NULL;