1 /* SPDX-License-Identifier: (GPL-2.0 OR BSD-3-Clause) */
3 * Copyright(c) 2018 Intel Corporation.
6 #if !defined(__HFI1_TRACE_TID_H) || defined(TRACE_HEADER_MULTI_READ)
7 #define __HFI1_TRACE_TID_H
9 #include <linux/tracepoint.h>
10 #include <linux/trace_seq.h>
14 #define tidtype_name(type) { PT_##type, #type }
15 #define show_tidtype(type) \
16 __print_symbolic(type, \
17 tidtype_name(EXPECTED), \
18 tidtype_name(EAGER), \
19 tidtype_name(INVALID)) \
22 #define TRACE_SYSTEM hfi1_tid
24 u8 hfi1_trace_get_tid_ctrl(u32 ent);
25 u16 hfi1_trace_get_tid_len(u32 ent);
26 u16 hfi1_trace_get_tid_idx(u32 ent);
28 #define OPFN_PARAM_PRN "[%s] qpn 0x%x %s OPFN: qp 0x%x, max read %u, " \
29 "max write %u, max length %u, jkey 0x%x timeout %u " \
32 #define TID_FLOW_PRN "[%s] qpn 0x%x flow %d: idx %d resp_ib_psn 0x%x " \
33 "generation 0x%x fpsn 0x%x-%x r_next_psn 0x%x " \
34 "ib_psn 0x%x-%x npagesets %u tnode_cnt %u " \
35 "tidcnt %u tid_idx %u tid_offset %u length %u sent %u"
37 #define TID_NODE_PRN "[%s] qpn 0x%x %s idx %u grp base 0x%x map 0x%x " \
40 #define RSP_INFO_PRN "[%s] qpn 0x%x state 0x%x s_state 0x%x psn 0x%x " \
41 "r_psn 0x%x r_state 0x%x r_flags 0x%x " \
42 "r_head_ack_queue %u s_tail_ack_queue %u " \
43 "s_acked_ack_queue %u s_ack_state 0x%x " \
44 "s_nak_state 0x%x s_flags 0x%x ps_flags 0x%x " \
47 #define SENDER_INFO_PRN "[%s] qpn 0x%x state 0x%x s_cur %u s_tail %u " \
48 "s_head %u s_acked %u s_last %u s_psn 0x%x " \
49 "s_last_psn 0x%x s_flags 0x%x ps_flags 0x%x " \
50 "iow_flags 0x%lx s_state 0x%x s_num_rd %u s_retry %u"
52 #define TID_READ_SENDER_PRN "[%s] qpn 0x%x newreq %u tid_r_reqs %u " \
53 "tid_r_comp %u pending_tid_r_segs %u " \
54 "s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx " \
55 "s_state 0x%x hw_flow_index %u generation 0x%x " \
58 #define TID_REQ_PRN "[%s] qpn 0x%x newreq %u opcode 0x%x psn 0x%x lpsn 0x%x " \
59 "cur_seg %u comp_seg %u ack_seg %u alloc_seg %u " \
60 "total_segs %u setup_head %u clear_tail %u flow_idx %u " \
61 "acked_tail %u state %u r_ack_psn 0x%x r_flow_psn 0x%x " \
62 "r_last_ackd 0x%x s_next_psn 0x%x"
64 #define RCV_ERR_PRN "[%s] qpn 0x%x s_flags 0x%x state 0x%x " \
65 "s_acked_ack_queue %u s_tail_ack_queue %u " \
66 "r_head_ack_queue %u opcode 0x%x psn 0x%x r_psn 0x%x " \
69 #define TID_WRITE_RSPDR_PRN "[%s] qpn 0x%x r_tid_head %u r_tid_tail %u " \
70 "r_tid_ack %u r_tid_alloc %u alloc_w_segs %u " \
71 "pending_tid_w_segs %u sync_pt %s " \
72 "ps_nak_psn 0x%x ps_nak_state 0x%x " \
73 "prnr_nak_state 0x%x hw_flow_index %u generation "\
74 "0x%x fpsn 0x%x resync %s" \
75 "r_next_psn_kdeth 0x%x"
77 #define TID_WRITE_SENDER_PRN "[%s] qpn 0x%x newreq %u s_tid_cur %u " \
78 "s_tid_tail %u s_tid_head %u " \
79 "pending_tid_w_resp %u n_requests %u " \
80 "n_tid_requests %u s_flags 0x%x ps_flags 0x%x "\
81 "iow_flags 0x%lx s_state 0x%x s_retry %u"
83 #define KDETH_EFLAGS_ERR_PRN "[%s] qpn 0x%x TID ERR: RcvType 0x%x " \
84 "RcvTypeError 0x%x PSN 0x%x"
86 DECLARE_EVENT_CLASS(/* class */
87 hfi1_exp_tid_reg_unreg,
88 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
89 unsigned long va, unsigned long pa, dma_addr_t dma),
90 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma),
91 TP_STRUCT__entry(/* entry */
92 __field(unsigned int, ctxt)
96 __field(unsigned long, va)
97 __field(unsigned long, pa)
98 __field(dma_addr_t, dma)
100 TP_fast_assign(/* assign */
101 __entry->ctxt = ctxt;
102 __entry->subctxt = subctxt;
103 __entry->rarr = rarr;
104 __entry->npages = npages;
109 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx, va:0x%lx dma:0x%llx",
120 DEFINE_EVENT(/* exp_tid_unreg */
121 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_unreg,
122 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
123 unsigned long va, unsigned long pa, dma_addr_t dma),
124 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
127 DEFINE_EVENT(/* exp_tid_reg */
128 hfi1_exp_tid_reg_unreg, hfi1_exp_tid_reg,
129 TP_PROTO(unsigned int ctxt, u16 subctxt, u32 rarr, u32 npages,
130 unsigned long va, unsigned long pa, dma_addr_t dma),
131 TP_ARGS(ctxt, subctxt, rarr, npages, va, pa, dma)
134 TRACE_EVENT(/* put_tid */
136 TP_PROTO(struct hfi1_devdata *dd,
137 u32 index, u32 type, unsigned long pa, u16 order),
138 TP_ARGS(dd, index, type, pa, order),
139 TP_STRUCT__entry(/* entry */
141 __field(unsigned long, pa);
146 TP_fast_assign(/* assign */
149 __entry->index = index;
150 __entry->type = type;
151 __entry->order = order;
153 TP_printk("[%s] type %s pa %lx index %u order %u",
155 show_tidtype(__entry->type),
162 TRACE_EVENT(/* exp_tid_inval */
164 TP_PROTO(unsigned int ctxt, u16 subctxt, unsigned long va, u32 rarr,
165 u32 npages, dma_addr_t dma),
166 TP_ARGS(ctxt, subctxt, va, rarr, npages, dma),
167 TP_STRUCT__entry(/* entry */
168 __field(unsigned int, ctxt)
169 __field(u16, subctxt)
170 __field(unsigned long, va)
173 __field(dma_addr_t, dma)
175 TP_fast_assign(/* assign */
176 __entry->ctxt = ctxt;
177 __entry->subctxt = subctxt;
179 __entry->rarr = rarr;
180 __entry->npages = npages;
183 TP_printk("[%u:%u] entry:%u, %u pages @ 0x%lx dma: 0x%llx",
193 DECLARE_EVENT_CLASS(/* opfn_state */
194 hfi1_opfn_state_template,
195 TP_PROTO(struct rvt_qp *qp),
197 TP_STRUCT__entry(/* entry */
198 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
200 __field(u16, requested)
201 __field(u16, completed)
204 TP_fast_assign(/* assign */
205 struct hfi1_qp_priv *priv = qp->priv;
207 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
208 __entry->qpn = qp->ibqp.qp_num;
209 __entry->requested = priv->opfn.requested;
210 __entry->completed = priv->opfn.completed;
211 __entry->curr = priv->opfn.curr;
213 TP_printk(/* print */
214 "[%s] qpn 0x%x requested 0x%x completed 0x%x curr 0x%x",
223 DEFINE_EVENT(/* event */
224 hfi1_opfn_state_template, hfi1_opfn_state_conn_request,
225 TP_PROTO(struct rvt_qp *qp),
229 DEFINE_EVENT(/* event */
230 hfi1_opfn_state_template, hfi1_opfn_state_sched_conn_request,
231 TP_PROTO(struct rvt_qp *qp),
235 DEFINE_EVENT(/* event */
236 hfi1_opfn_state_template, hfi1_opfn_state_conn_response,
237 TP_PROTO(struct rvt_qp *qp),
241 DEFINE_EVENT(/* event */
242 hfi1_opfn_state_template, hfi1_opfn_state_conn_reply,
243 TP_PROTO(struct rvt_qp *qp),
247 DEFINE_EVENT(/* event */
248 hfi1_opfn_state_template, hfi1_opfn_state_conn_error,
249 TP_PROTO(struct rvt_qp *qp),
253 DECLARE_EVENT_CLASS(/* opfn_data */
254 hfi1_opfn_data_template,
255 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
256 TP_ARGS(qp, capcode, data),
257 TP_STRUCT__entry(/* entry */
258 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
264 TP_fast_assign(/* assign */
265 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
266 __entry->qpn = qp->ibqp.qp_num;
267 __entry->state = qp->state;
268 __entry->capcode = capcode;
269 __entry->data = data;
271 TP_printk(/* printk */
272 "[%s] qpn 0x%x (state 0x%x) Capcode %u data 0x%llx",
281 DEFINE_EVENT(/* event */
282 hfi1_opfn_data_template, hfi1_opfn_data_conn_request,
283 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
284 TP_ARGS(qp, capcode, data)
287 DEFINE_EVENT(/* event */
288 hfi1_opfn_data_template, hfi1_opfn_data_conn_response,
289 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
290 TP_ARGS(qp, capcode, data)
293 DEFINE_EVENT(/* event */
294 hfi1_opfn_data_template, hfi1_opfn_data_conn_reply,
295 TP_PROTO(struct rvt_qp *qp, u8 capcode, u64 data),
296 TP_ARGS(qp, capcode, data)
299 DECLARE_EVENT_CLASS(/* opfn_param */
300 hfi1_opfn_param_template,
301 TP_PROTO(struct rvt_qp *qp, char remote,
302 struct tid_rdma_params *param),
303 TP_ARGS(qp, remote, param),
304 TP_STRUCT__entry(/* entry */
305 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
307 __field(char, remote)
308 __field(u32, param_qp)
309 __field(u32, max_len)
311 __field(u8, max_read)
312 __field(u8, max_write)
316 TP_fast_assign(/* assign */
317 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
318 __entry->qpn = qp->ibqp.qp_num;
319 __entry->remote = remote;
320 __entry->param_qp = param->qp;
321 __entry->max_len = param->max_len;
322 __entry->jkey = param->jkey;
323 __entry->max_read = param->max_read;
324 __entry->max_write = param->max_write;
325 __entry->timeout = param->timeout;
326 __entry->urg = param->urg;
328 TP_printk(/* print */
332 __entry->remote ? "remote" : "local",
343 DEFINE_EVENT(/* event */
344 hfi1_opfn_param_template, hfi1_opfn_param,
345 TP_PROTO(struct rvt_qp *qp, char remote,
346 struct tid_rdma_params *param),
347 TP_ARGS(qp, remote, param)
350 DECLARE_EVENT_CLASS(/* msg */
352 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
353 TP_ARGS(qp, msg, more),
354 TP_STRUCT__entry(/* entry */
359 TP_fast_assign(/* assign */
360 __entry->qpn = qp ? qp->ibqp.qp_num : 0;
361 __assign_str(msg, msg);
362 __entry->more = more;
364 TP_printk(/* print */
365 "qpn 0x%x %s 0x%llx",
372 DEFINE_EVENT(/* event */
373 hfi1_msg_template, hfi1_msg_opfn_conn_request,
374 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
375 TP_ARGS(qp, msg, more)
378 DEFINE_EVENT(/* event */
379 hfi1_msg_template, hfi1_msg_opfn_conn_error,
380 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
381 TP_ARGS(qp, msg, more)
384 DEFINE_EVENT(/* event */
385 hfi1_msg_template, hfi1_msg_alloc_tids,
386 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
387 TP_ARGS(qp, msg, more)
390 DEFINE_EVENT(/* event */
391 hfi1_msg_template, hfi1_msg_tid_restart_req,
392 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
393 TP_ARGS(qp, msg, more)
396 DEFINE_EVENT(/* event */
397 hfi1_msg_template, hfi1_msg_handle_kdeth_eflags,
398 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
399 TP_ARGS(qp, msg, more)
402 DEFINE_EVENT(/* event */
403 hfi1_msg_template, hfi1_msg_tid_timeout,
404 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
405 TP_ARGS(qp, msg, more)
408 DEFINE_EVENT(/* event */
409 hfi1_msg_template, hfi1_msg_tid_retry_timeout,
410 TP_PROTO(struct rvt_qp *qp, const char *msg, u64 more),
411 TP_ARGS(qp, msg, more)
414 DECLARE_EVENT_CLASS(/* tid_flow_page */
415 hfi1_tid_flow_page_template,
416 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
417 char mtu8k, char v1, void *vaddr),
418 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr),
419 TP_STRUCT__entry(/* entry */
420 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
428 TP_fast_assign(/* assign */
429 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
430 __entry->qpn = qp->ibqp.qp_num;
431 __entry->mtu8k = mtu8k;
433 __entry->index = index;
434 __entry->page = vaddr ? (u64)virt_to_page(vaddr) : 0ULL;
435 __entry->vaddr = (u64)vaddr;
437 TP_printk(/* print */
438 "[%s] qpn 0x%x page[%u]: page 0x%llx %s 0x%llx",
443 __entry->mtu8k ? (__entry->v1 ? "v1" : "v0") : "vaddr",
448 DEFINE_EVENT(/* event */
449 hfi1_tid_flow_page_template, hfi1_tid_flow_page,
450 TP_PROTO(struct rvt_qp *qp, struct tid_rdma_flow *flow, u32 index,
451 char mtu8k, char v1, void *vaddr),
452 TP_ARGS(qp, flow, index, mtu8k, v1, vaddr)
455 DECLARE_EVENT_CLASS(/* tid_pageset */
456 hfi1_tid_pageset_template,
457 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
458 TP_ARGS(qp, index, idx, count),
459 TP_STRUCT__entry(/* entry */
460 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
466 TP_fast_assign(/* assign */
467 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
468 __entry->qpn = qp->ibqp.qp_num;
469 __entry->index = index;
471 __entry->count = count;
473 TP_printk(/* print */
474 "[%s] qpn 0x%x list[%u]: idx %u count %u",
483 DEFINE_EVENT(/* event */
484 hfi1_tid_pageset_template, hfi1_tid_pageset,
485 TP_PROTO(struct rvt_qp *qp, u32 index, u16 idx, u16 count),
486 TP_ARGS(qp, index, idx, count)
489 DECLARE_EVENT_CLASS(/* tid_fow */
490 hfi1_tid_flow_template,
491 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
492 TP_ARGS(qp, index, flow),
493 TP_STRUCT__entry(/* entry */
494 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
498 __field(u32, resp_ib_psn)
499 __field(u32, generation)
502 __field(u32, r_next_psn)
503 __field(u32, ib_spsn)
504 __field(u32, ib_lpsn)
505 __field(u32, npagesets)
506 __field(u32, tnode_cnt)
508 __field(u32, tid_idx)
509 __field(u32, tid_offset)
513 TP_fast_assign(/* assign */
514 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
515 __entry->qpn = qp->ibqp.qp_num;
516 __entry->index = index;
517 __entry->idx = flow->idx;
518 __entry->resp_ib_psn = flow->flow_state.resp_ib_psn;
519 __entry->generation = flow->flow_state.generation;
520 __entry->fspsn = full_flow_psn(flow,
521 flow->flow_state.spsn);
522 __entry->flpsn = full_flow_psn(flow,
523 flow->flow_state.lpsn);
524 __entry->r_next_psn = flow->flow_state.r_next_psn;
525 __entry->ib_spsn = flow->flow_state.ib_spsn;
526 __entry->ib_lpsn = flow->flow_state.ib_lpsn;
527 __entry->npagesets = flow->npagesets;
528 __entry->tnode_cnt = flow->tnode_cnt;
529 __entry->tidcnt = flow->tidcnt;
530 __entry->tid_idx = flow->tid_idx;
531 __entry->tid_offset = flow->tid_offset;
532 __entry->length = flow->length;
533 __entry->sent = flow->sent;
535 TP_printk(/* print */
541 __entry->resp_ib_psn,
558 DEFINE_EVENT(/* event */
559 hfi1_tid_flow_template, hfi1_tid_flow_alloc,
560 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
561 TP_ARGS(qp, index, flow)
564 DEFINE_EVENT(/* event */
565 hfi1_tid_flow_template, hfi1_tid_flow_build_read_pkt,
566 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
567 TP_ARGS(qp, index, flow)
570 DEFINE_EVENT(/* event */
571 hfi1_tid_flow_template, hfi1_tid_flow_build_read_resp,
572 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
573 TP_ARGS(qp, index, flow)
576 DEFINE_EVENT(/* event */
577 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_req,
578 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
579 TP_ARGS(qp, index, flow)
582 DEFINE_EVENT(/* event */
583 hfi1_tid_flow_template, hfi1_tid_flow_rcv_read_resp,
584 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
585 TP_ARGS(qp, index, flow)
588 DEFINE_EVENT(/* event */
589 hfi1_tid_flow_template, hfi1_tid_flow_restart_req,
590 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
591 TP_ARGS(qp, index, flow)
594 DEFINE_EVENT(/* event */
595 hfi1_tid_flow_template, hfi1_tid_flow_build_write_resp,
596 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
597 TP_ARGS(qp, index, flow)
600 DEFINE_EVENT(/* event */
601 hfi1_tid_flow_template, hfi1_tid_flow_rcv_write_resp,
602 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
603 TP_ARGS(qp, index, flow)
606 DEFINE_EVENT(/* event */
607 hfi1_tid_flow_template, hfi1_tid_flow_build_write_data,
608 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
609 TP_ARGS(qp, index, flow)
612 DEFINE_EVENT(/* event */
613 hfi1_tid_flow_template, hfi1_tid_flow_rcv_tid_ack,
614 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
615 TP_ARGS(qp, index, flow)
618 DEFINE_EVENT(/* event */
619 hfi1_tid_flow_template, hfi1_tid_flow_rcv_resync,
620 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
621 TP_ARGS(qp, index, flow)
624 DEFINE_EVENT(/* event */
625 hfi1_tid_flow_template, hfi1_tid_flow_handle_kdeth_eflags,
626 TP_PROTO(struct rvt_qp *qp, int index, struct tid_rdma_flow *flow),
627 TP_ARGS(qp, index, flow)
630 DECLARE_EVENT_CLASS(/* tid_node */
631 hfi1_tid_node_template,
632 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
633 u8 map, u8 used, u8 cnt),
634 TP_ARGS(qp, msg, index, base, map, used, cnt),
635 TP_STRUCT__entry(/* entry */
636 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
645 TP_fast_assign(/* assign */
646 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
647 __entry->qpn = qp->ibqp.qp_num;
648 __assign_str(msg, msg);
649 __entry->index = index;
650 __entry->base = base;
652 __entry->used = used;
655 TP_printk(/* print */
668 DEFINE_EVENT(/* event */
669 hfi1_tid_node_template, hfi1_tid_node_add,
670 TP_PROTO(struct rvt_qp *qp, const char *msg, u32 index, u32 base,
671 u8 map, u8 used, u8 cnt),
672 TP_ARGS(qp, msg, index, base, map, used, cnt)
675 DECLARE_EVENT_CLASS(/* tid_entry */
676 hfi1_tid_entry_template,
677 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
678 TP_ARGS(qp, index, ent),
679 TP_STRUCT__entry(/* entry */
680 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
687 TP_fast_assign(/* assign */
688 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
689 __entry->qpn = qp->ibqp.qp_num;
690 __entry->index = index;
691 __entry->ctrl = hfi1_trace_get_tid_ctrl(ent);
692 __entry->idx = hfi1_trace_get_tid_idx(ent);
693 __entry->len = hfi1_trace_get_tid_len(ent);
695 TP_printk(/* print */
696 "[%s] qpn 0x%x TID entry %d: idx %u len %u ctrl 0x%x",
706 DEFINE_EVENT(/* event */
707 hfi1_tid_entry_template, hfi1_tid_entry_alloc,
708 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
709 TP_ARGS(qp, index, entry)
712 DEFINE_EVENT(/* event */
713 hfi1_tid_entry_template, hfi1_tid_entry_build_read_resp,
714 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
715 TP_ARGS(qp, index, ent)
718 DEFINE_EVENT(/* event */
719 hfi1_tid_entry_template, hfi1_tid_entry_rcv_read_req,
720 TP_PROTO(struct rvt_qp *qp, int index, u32 ent),
721 TP_ARGS(qp, index, ent)
724 DEFINE_EVENT(/* event */
725 hfi1_tid_entry_template, hfi1_tid_entry_rcv_write_resp,
726 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
727 TP_ARGS(qp, index, entry)
730 DEFINE_EVENT(/* event */
731 hfi1_tid_entry_template, hfi1_tid_entry_build_write_data,
732 TP_PROTO(struct rvt_qp *qp, int index, u32 entry),
733 TP_ARGS(qp, index, entry)
736 DECLARE_EVENT_CLASS(/* rsp_info */
737 hfi1_responder_info_template,
738 TP_PROTO(struct rvt_qp *qp, u32 psn),
740 TP_STRUCT__entry(/* entry */
741 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
749 __field(u8, r_head_ack_queue)
750 __field(u8, s_tail_ack_queue)
751 __field(u8, s_acked_ack_queue)
752 __field(u8, s_ack_state)
753 __field(u8, s_nak_state)
754 __field(u8, r_nak_state)
755 __field(u32, s_flags)
756 __field(u32, ps_flags)
757 __field(unsigned long, iow_flags)
759 TP_fast_assign(/* assign */
760 struct hfi1_qp_priv *priv = qp->priv;
762 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
763 __entry->qpn = qp->ibqp.qp_num;
764 __entry->state = qp->state;
765 __entry->s_state = qp->s_state;
767 __entry->r_psn = qp->r_psn;
768 __entry->r_state = qp->r_state;
769 __entry->r_flags = qp->r_flags;
770 __entry->r_head_ack_queue = qp->r_head_ack_queue;
771 __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
772 __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
773 __entry->s_ack_state = qp->s_ack_state;
774 __entry->s_nak_state = qp->s_nak_state;
775 __entry->s_flags = qp->s_flags;
776 __entry->ps_flags = priv->s_flags;
777 __entry->iow_flags = priv->s_iowait.flags;
779 TP_printk(/* print */
789 __entry->r_head_ack_queue,
790 __entry->s_tail_ack_queue,
791 __entry->s_acked_ack_queue,
792 __entry->s_ack_state,
793 __entry->s_nak_state,
800 DEFINE_EVENT(/* event */
801 hfi1_responder_info_template, hfi1_rsp_make_rc_ack,
802 TP_PROTO(struct rvt_qp *qp, u32 psn),
806 DEFINE_EVENT(/* event */
807 hfi1_responder_info_template, hfi1_rsp_rcv_tid_read_req,
808 TP_PROTO(struct rvt_qp *qp, u32 psn),
812 DEFINE_EVENT(/* event */
813 hfi1_responder_info_template, hfi1_rsp_tid_rcv_error,
814 TP_PROTO(struct rvt_qp *qp, u32 psn),
818 DEFINE_EVENT(/* event */
819 hfi1_responder_info_template, hfi1_rsp_tid_write_alloc_res,
820 TP_PROTO(struct rvt_qp *qp, u32 psn),
824 DEFINE_EVENT(/* event */
825 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_req,
826 TP_PROTO(struct rvt_qp *qp, u32 psn),
830 DEFINE_EVENT(/* event */
831 hfi1_responder_info_template, hfi1_rsp_build_tid_write_resp,
832 TP_PROTO(struct rvt_qp *qp, u32 psn),
836 DEFINE_EVENT(/* event */
837 hfi1_responder_info_template, hfi1_rsp_rcv_tid_write_data,
838 TP_PROTO(struct rvt_qp *qp, u32 psn),
842 DEFINE_EVENT(/* event */
843 hfi1_responder_info_template, hfi1_rsp_make_tid_ack,
844 TP_PROTO(struct rvt_qp *qp, u32 psn),
848 DEFINE_EVENT(/* event */
849 hfi1_responder_info_template, hfi1_rsp_handle_kdeth_eflags,
850 TP_PROTO(struct rvt_qp *qp, u32 psn),
854 DECLARE_EVENT_CLASS(/* sender_info */
855 hfi1_sender_info_template,
856 TP_PROTO(struct rvt_qp *qp),
858 TP_STRUCT__entry(/* entry */
859 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
865 __field(u32, s_acked)
868 __field(u32, s_last_psn)
869 __field(u32, s_flags)
870 __field(u32, ps_flags)
871 __field(unsigned long, iow_flags)
873 __field(u8, s_num_rd)
876 TP_fast_assign(/* assign */
877 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
878 __entry->qpn = qp->ibqp.qp_num;
879 __entry->state = qp->state;
880 __entry->s_cur = qp->s_cur;
881 __entry->s_tail = qp->s_tail;
882 __entry->s_head = qp->s_head;
883 __entry->s_acked = qp->s_acked;
884 __entry->s_last = qp->s_last;
885 __entry->s_psn = qp->s_psn;
886 __entry->s_last_psn = qp->s_last_psn;
887 __entry->s_flags = qp->s_flags;
888 __entry->ps_flags = ((struct hfi1_qp_priv *)qp->priv)->s_flags;
890 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
891 __entry->s_state = qp->s_state;
892 __entry->s_num_rd = qp->s_num_rd_atomic;
893 __entry->s_retry = qp->s_retry;
895 TP_printk(/* print */
916 DEFINE_EVENT(/* event */
917 hfi1_sender_info_template, hfi1_sender_make_rc_req,
918 TP_PROTO(struct rvt_qp *qp),
922 DEFINE_EVENT(/* event */
923 hfi1_sender_info_template, hfi1_sender_reset_psn,
924 TP_PROTO(struct rvt_qp *qp),
928 DEFINE_EVENT(/* event */
929 hfi1_sender_info_template, hfi1_sender_restart_rc,
930 TP_PROTO(struct rvt_qp *qp),
934 DEFINE_EVENT(/* event */
935 hfi1_sender_info_template, hfi1_sender_do_rc_ack,
936 TP_PROTO(struct rvt_qp *qp),
940 DEFINE_EVENT(/* event */
941 hfi1_sender_info_template, hfi1_sender_rcv_tid_read_resp,
942 TP_PROTO(struct rvt_qp *qp),
946 DEFINE_EVENT(/* event */
947 hfi1_sender_info_template, hfi1_sender_rcv_tid_ack,
948 TP_PROTO(struct rvt_qp *qp),
952 DEFINE_EVENT(/* event */
953 hfi1_sender_info_template, hfi1_sender_make_tid_pkt,
954 TP_PROTO(struct rvt_qp *qp),
958 DECLARE_EVENT_CLASS(/* tid_read_sender */
959 hfi1_tid_read_sender_template,
960 TP_PROTO(struct rvt_qp *qp, char newreq),
962 TP_STRUCT__entry(/* entry */
963 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
965 __field(char, newreq)
966 __field(u32, tid_r_reqs)
967 __field(u32, tid_r_comp)
968 __field(u32, pending_tid_r_segs)
969 __field(u32, s_flags)
970 __field(u32, ps_flags)
971 __field(unsigned long, iow_flags)
973 __field(u32, hw_flow_index)
974 __field(u32, generation)
977 TP_fast_assign(/* assign */
978 struct hfi1_qp_priv *priv = qp->priv;
980 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
981 __entry->qpn = qp->ibqp.qp_num;
982 __entry->newreq = newreq;
983 __entry->tid_r_reqs = priv->tid_r_reqs;
984 __entry->tid_r_comp = priv->tid_r_comp;
985 __entry->pending_tid_r_segs = priv->pending_tid_r_segs;
986 __entry->s_flags = qp->s_flags;
987 __entry->ps_flags = priv->s_flags;
988 __entry->iow_flags = priv->s_iowait.flags;
989 __entry->s_state = priv->s_state;
990 __entry->hw_flow_index = priv->flow_state.index;
991 __entry->generation = priv->flow_state.generation;
992 __entry->fpsn = priv->flow_state.psn;
994 TP_printk(/* print */
1000 __entry->tid_r_comp,
1001 __entry->pending_tid_r_segs,
1006 __entry->hw_flow_index,
1007 __entry->generation,
1012 DEFINE_EVENT(/* event */
1013 hfi1_tid_read_sender_template, hfi1_tid_read_sender_make_req,
1014 TP_PROTO(struct rvt_qp *qp, char newreq),
1018 DECLARE_EVENT_CLASS(/* tid_rdma_request */
1019 hfi1_tid_rdma_request_template,
1020 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1021 struct tid_rdma_request *req),
1022 TP_ARGS(qp, newreq, opcode, psn, lpsn, req),
1023 TP_STRUCT__entry(/* entry */
1024 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1026 __field(char, newreq)
1030 __field(u32, cur_seg)
1031 __field(u32, comp_seg)
1032 __field(u32, ack_seg)
1033 __field(u32, alloc_seg)
1034 __field(u32, total_segs)
1035 __field(u16, setup_head)
1036 __field(u16, clear_tail)
1037 __field(u16, flow_idx)
1038 __field(u16, acked_tail)
1040 __field(u32, r_ack_psn)
1041 __field(u32, r_flow_psn)
1042 __field(u32, r_last_acked)
1043 __field(u32, s_next_psn)
1045 TP_fast_assign(/* assign */
1046 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1047 __entry->qpn = qp->ibqp.qp_num;
1048 __entry->newreq = newreq;
1049 __entry->opcode = opcode;
1051 __entry->lpsn = lpsn;
1052 __entry->cur_seg = req->cur_seg;
1053 __entry->comp_seg = req->comp_seg;
1054 __entry->ack_seg = req->ack_seg;
1055 __entry->alloc_seg = req->alloc_seg;
1056 __entry->total_segs = req->total_segs;
1057 __entry->setup_head = req->setup_head;
1058 __entry->clear_tail = req->clear_tail;
1059 __entry->flow_idx = req->flow_idx;
1060 __entry->acked_tail = req->acked_tail;
1061 __entry->state = req->state;
1062 __entry->r_ack_psn = req->r_ack_psn;
1063 __entry->r_flow_psn = req->r_flow_psn;
1064 __entry->r_last_acked = req->r_last_acked;
1065 __entry->s_next_psn = req->s_next_psn;
1067 TP_printk(/* print */
1079 __entry->total_segs,
1080 __entry->setup_head,
1081 __entry->clear_tail,
1083 __entry->acked_tail,
1086 __entry->r_flow_psn,
1087 __entry->r_last_acked,
1092 DEFINE_EVENT(/* event */
1093 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_read,
1094 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1095 struct tid_rdma_request *req),
1096 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1099 DEFINE_EVENT(/* event */
1100 hfi1_tid_rdma_request_template, hfi1_tid_req_build_read_req,
1101 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1102 struct tid_rdma_request *req),
1103 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1106 DEFINE_EVENT(/* event */
1107 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_req,
1108 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1109 struct tid_rdma_request *req),
1110 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1113 DEFINE_EVENT(/* event */
1114 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_read_resp,
1115 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1116 struct tid_rdma_request *req),
1117 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1120 DEFINE_EVENT(/* event */
1121 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_err,
1122 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1123 struct tid_rdma_request *req),
1124 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1127 DEFINE_EVENT(/* event */
1128 hfi1_tid_rdma_request_template, hfi1_tid_req_restart_req,
1129 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1130 struct tid_rdma_request *req),
1131 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1134 DEFINE_EVENT(/* event */
1135 hfi1_tid_rdma_request_template, hfi1_tid_req_setup_tid_wqe,
1136 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1137 struct tid_rdma_request *req),
1138 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1141 DEFINE_EVENT(/* event */
1142 hfi1_tid_rdma_request_template, hfi1_tid_req_write_alloc_res,
1143 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1144 struct tid_rdma_request *req),
1145 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1148 DEFINE_EVENT(/* event */
1149 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_req,
1150 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1151 struct tid_rdma_request *req),
1152 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1155 DEFINE_EVENT(/* event */
1156 hfi1_tid_rdma_request_template, hfi1_tid_req_build_write_resp,
1157 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1158 struct tid_rdma_request *req),
1159 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1162 DEFINE_EVENT(/* event */
1163 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_resp,
1164 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1165 struct tid_rdma_request *req),
1166 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1169 DEFINE_EVENT(/* event */
1170 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_write_data,
1171 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1172 struct tid_rdma_request *req),
1173 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1176 DEFINE_EVENT(/* event */
1177 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_tid_ack,
1178 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1179 struct tid_rdma_request *req),
1180 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1183 DEFINE_EVENT(/* event */
1184 hfi1_tid_rdma_request_template, hfi1_tid_req_tid_retry_timeout,
1185 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1186 struct tid_rdma_request *req),
1187 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1190 DEFINE_EVENT(/* event */
1191 hfi1_tid_rdma_request_template, hfi1_tid_req_rcv_resync,
1192 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1193 struct tid_rdma_request *req),
1194 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1197 DEFINE_EVENT(/* event */
1198 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_pkt,
1199 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1200 struct tid_rdma_request *req),
1201 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1204 DEFINE_EVENT(/* event */
1205 hfi1_tid_rdma_request_template, hfi1_tid_req_make_tid_ack,
1206 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1207 struct tid_rdma_request *req),
1208 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1211 DEFINE_EVENT(/* event */
1212 hfi1_tid_rdma_request_template, hfi1_tid_req_handle_kdeth_eflags,
1213 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1214 struct tid_rdma_request *req),
1215 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1218 DEFINE_EVENT(/* event */
1219 hfi1_tid_rdma_request_template, hfi1_tid_req_make_rc_ack_write,
1220 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1221 struct tid_rdma_request *req),
1222 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1225 DEFINE_EVENT(/* event */
1226 hfi1_tid_rdma_request_template, hfi1_tid_req_make_req_write,
1227 TP_PROTO(struct rvt_qp *qp, char newreq, u8 opcode, u32 psn, u32 lpsn,
1228 struct tid_rdma_request *req),
1229 TP_ARGS(qp, newreq, opcode, psn, lpsn, req)
1232 DECLARE_EVENT_CLASS(/* rc_rcv_err */
1233 hfi1_rc_rcv_err_template,
1234 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
1235 TP_ARGS(qp, opcode, psn, diff),
1236 TP_STRUCT__entry(/* entry */
1237 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1239 __field(u32, s_flags)
1241 __field(u8, s_acked_ack_queue)
1242 __field(u8, s_tail_ack_queue)
1243 __field(u8, r_head_ack_queue)
1244 __field(u32, opcode)
1249 TP_fast_assign(/* assign */
1250 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1251 __entry->qpn = qp->ibqp.qp_num;
1252 __entry->s_flags = qp->s_flags;
1253 __entry->state = qp->state;
1254 __entry->s_acked_ack_queue = qp->s_acked_ack_queue;
1255 __entry->s_tail_ack_queue = qp->s_tail_ack_queue;
1256 __entry->r_head_ack_queue = qp->r_head_ack_queue;
1257 __entry->opcode = opcode;
1259 __entry->r_psn = qp->r_psn;
1260 __entry->diff = diff;
1262 TP_printk(/* print */
1268 __entry->s_acked_ack_queue,
1269 __entry->s_tail_ack_queue,
1270 __entry->r_head_ack_queue,
1278 DEFINE_EVENT(/* event */
1279 hfi1_rc_rcv_err_template, hfi1_tid_rdma_rcv_err,
1280 TP_PROTO(struct rvt_qp *qp, u32 opcode, u32 psn, int diff),
1281 TP_ARGS(qp, opcode, psn, diff)
1284 DECLARE_EVENT_CLASS(/* sge */
1286 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
1287 TP_ARGS(qp, index, sge),
1288 TP_STRUCT__entry(/* entry */
1289 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1293 __field(u32, sge_length)
1295 TP_fast_assign(/* assign */
1296 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1297 __entry->qpn = qp->ibqp.qp_num;
1298 __entry->index = index;
1299 __entry->vaddr = (u64)sge->vaddr;
1300 __entry->sge_length = sge->sge_length;
1302 TP_printk(/* print */
1303 "[%s] qpn 0x%x sge %d: vaddr 0x%llx sge_length %u",
1312 DEFINE_EVENT(/* event */
1313 hfi1_sge_template, hfi1_sge_check_align,
1314 TP_PROTO(struct rvt_qp *qp, int index, struct rvt_sge *sge),
1315 TP_ARGS(qp, index, sge)
1318 DECLARE_EVENT_CLASS(/* tid_write_sp */
1319 hfi1_tid_write_rsp_template,
1320 TP_PROTO(struct rvt_qp *qp),
1322 TP_STRUCT__entry(/* entry */
1323 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1325 __field(u32, r_tid_head)
1326 __field(u32, r_tid_tail)
1327 __field(u32, r_tid_ack)
1328 __field(u32, r_tid_alloc)
1329 __field(u32, alloc_w_segs)
1330 __field(u32, pending_tid_w_segs)
1331 __field(bool, sync_pt)
1332 __field(u32, ps_nak_psn)
1333 __field(u8, ps_nak_state)
1334 __field(u8, prnr_nak_state)
1335 __field(u32, hw_flow_index)
1336 __field(u32, generation)
1338 __field(bool, resync)
1339 __field(u32, r_next_psn_kdeth)
1341 TP_fast_assign(/* assign */
1342 struct hfi1_qp_priv *priv = qp->priv;
1344 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1345 __entry->qpn = qp->ibqp.qp_num;
1346 __entry->r_tid_head = priv->r_tid_head;
1347 __entry->r_tid_tail = priv->r_tid_tail;
1348 __entry->r_tid_ack = priv->r_tid_ack;
1349 __entry->r_tid_alloc = priv->r_tid_alloc;
1350 __entry->alloc_w_segs = priv->alloc_w_segs;
1351 __entry->pending_tid_w_segs = priv->pending_tid_w_segs;
1352 __entry->sync_pt = priv->sync_pt;
1353 __entry->ps_nak_psn = priv->s_nak_psn;
1354 __entry->ps_nak_state = priv->s_nak_state;
1355 __entry->prnr_nak_state = priv->rnr_nak_state;
1356 __entry->hw_flow_index = priv->flow_state.index;
1357 __entry->generation = priv->flow_state.generation;
1358 __entry->fpsn = priv->flow_state.psn;
1359 __entry->resync = priv->resync;
1360 __entry->r_next_psn_kdeth = priv->r_next_psn_kdeth;
1362 TP_printk(/* print */
1363 TID_WRITE_RSPDR_PRN,
1366 __entry->r_tid_head,
1367 __entry->r_tid_tail,
1369 __entry->r_tid_alloc,
1370 __entry->alloc_w_segs,
1371 __entry->pending_tid_w_segs,
1372 __entry->sync_pt ? "yes" : "no",
1373 __entry->ps_nak_psn,
1374 __entry->ps_nak_state,
1375 __entry->prnr_nak_state,
1376 __entry->hw_flow_index,
1377 __entry->generation,
1379 __entry->resync ? "yes" : "no",
1380 __entry->r_next_psn_kdeth
1384 DEFINE_EVENT(/* event */
1385 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_alloc_res,
1386 TP_PROTO(struct rvt_qp *qp),
1390 DEFINE_EVENT(/* event */
1391 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_req,
1392 TP_PROTO(struct rvt_qp *qp),
1396 DEFINE_EVENT(/* event */
1397 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_build_resp,
1398 TP_PROTO(struct rvt_qp *qp),
1402 DEFINE_EVENT(/* event */
1403 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_data,
1404 TP_PROTO(struct rvt_qp *qp),
1408 DEFINE_EVENT(/* event */
1409 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_rcv_resync,
1410 TP_PROTO(struct rvt_qp *qp),
1414 DEFINE_EVENT(/* event */
1415 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_tid_ack,
1416 TP_PROTO(struct rvt_qp *qp),
1420 DEFINE_EVENT(/* event */
1421 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_handle_kdeth_eflags,
1422 TP_PROTO(struct rvt_qp *qp),
1426 DEFINE_EVENT(/* event */
1427 hfi1_tid_write_rsp_template, hfi1_tid_write_rsp_make_rc_ack,
1428 TP_PROTO(struct rvt_qp *qp),
1432 DECLARE_EVENT_CLASS(/* tid_write_sender */
1433 hfi1_tid_write_sender_template,
1434 TP_PROTO(struct rvt_qp *qp, char newreq),
1435 TP_ARGS(qp, newreq),
1436 TP_STRUCT__entry(/* entry */
1437 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1439 __field(char, newreq)
1440 __field(u32, s_tid_cur)
1441 __field(u32, s_tid_tail)
1442 __field(u32, s_tid_head)
1443 __field(u32, pending_tid_w_resp)
1444 __field(u32, n_requests)
1445 __field(u32, n_tid_requests)
1446 __field(u32, s_flags)
1447 __field(u32, ps_flags)
1448 __field(unsigned long, iow_flags)
1449 __field(u8, s_state)
1450 __field(u8, s_retry)
1452 TP_fast_assign(/* assign */
1453 struct hfi1_qp_priv *priv = qp->priv;
1455 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1456 __entry->qpn = qp->ibqp.qp_num;
1457 __entry->newreq = newreq;
1458 __entry->s_tid_cur = priv->s_tid_cur;
1459 __entry->s_tid_tail = priv->s_tid_tail;
1460 __entry->s_tid_head = priv->s_tid_head;
1461 __entry->pending_tid_w_resp = priv->pending_tid_w_resp;
1462 __entry->n_requests = atomic_read(&priv->n_requests);
1463 __entry->n_tid_requests = atomic_read(&priv->n_tid_requests);
1464 __entry->s_flags = qp->s_flags;
1465 __entry->ps_flags = priv->s_flags;
1466 __entry->iow_flags = priv->s_iowait.flags;
1467 __entry->s_state = priv->s_state;
1468 __entry->s_retry = priv->s_retry;
1470 TP_printk(/* print */
1471 TID_WRITE_SENDER_PRN,
1476 __entry->s_tid_tail,
1477 __entry->s_tid_head,
1478 __entry->pending_tid_w_resp,
1479 __entry->n_requests,
1480 __entry->n_tid_requests,
1489 DEFINE_EVENT(/* event */
1490 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_resp,
1491 TP_PROTO(struct rvt_qp *qp, char newreq),
1495 DEFINE_EVENT(/* event */
1496 hfi1_tid_write_sender_template, hfi1_tid_write_sender_rcv_tid_ack,
1497 TP_PROTO(struct rvt_qp *qp, char newreq),
1501 DEFINE_EVENT(/* event */
1502 hfi1_tid_write_sender_template, hfi1_tid_write_sender_retry_timeout,
1503 TP_PROTO(struct rvt_qp *qp, char newreq),
1507 DEFINE_EVENT(/* event */
1508 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_tid_pkt,
1509 TP_PROTO(struct rvt_qp *qp, char newreq),
1513 DEFINE_EVENT(/* event */
1514 hfi1_tid_write_sender_template, hfi1_tid_write_sender_make_req,
1515 TP_PROTO(struct rvt_qp *qp, char newreq),
1519 DEFINE_EVENT(/* event */
1520 hfi1_tid_write_sender_template, hfi1_tid_write_sender_restart_rc,
1521 TP_PROTO(struct rvt_qp *qp, char newreq),
1525 DECLARE_EVENT_CLASS(/* tid_ack */
1526 hfi1_tid_ack_template,
1527 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
1528 u32 req_psn, u32 resync_psn),
1529 TP_ARGS(qp, aeth, psn, req_psn, resync_psn),
1530 TP_STRUCT__entry(/* entry */
1531 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1535 __field(u32, req_psn)
1536 __field(u32, resync_psn)
1538 TP_fast_assign(/* assign */
1539 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
1540 __entry->qpn = qp->ibqp.qp_num;
1541 __entry->aeth = aeth;
1543 __entry->req_psn = req_psn;
1544 __entry->resync_psn = resync_psn;
1546 TP_printk(/* print */
1547 "[%s] qpn 0x%x aeth 0x%x psn 0x%x req_psn 0x%x resync_psn 0x%x",
1557 DEFINE_EVENT(/* rcv_tid_ack */
1558 hfi1_tid_ack_template, hfi1_rcv_tid_ack,
1559 TP_PROTO(struct rvt_qp *qp, u32 aeth, u32 psn,
1560 u32 req_psn, u32 resync_psn),
1561 TP_ARGS(qp, aeth, psn, req_psn, resync_psn)
1564 DECLARE_EVENT_CLASS(/* kdeth_eflags_error */
1565 hfi1_kdeth_eflags_error_template,
1566 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
1567 TP_ARGS(qp, rcv_type, rte, psn),
1568 TP_STRUCT__entry(/* entry */
1569 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
1571 __field(u8, rcv_type)
1575 TP_fast_assign(/* assign */
1576 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device));
1577 __entry->qpn = qp->ibqp.qp_num;
1578 __entry->rcv_type = rcv_type;
1582 TP_printk(/* print */
1583 KDETH_EFLAGS_ERR_PRN,
1592 DEFINE_EVENT(/* event */
1593 hfi1_kdeth_eflags_error_template, hfi1_eflags_err_write,
1594 TP_PROTO(struct rvt_qp *qp, u8 rcv_type, u8 rte, u32 psn),
1595 TP_ARGS(qp, rcv_type, rte, psn)
1598 #endif /* __HFI1_TRACE_TID_H */
1600 #undef TRACE_INCLUDE_PATH
1601 #undef TRACE_INCLUDE_FILE
1602 #define TRACE_INCLUDE_PATH .
1603 #define TRACE_INCLUDE_FILE trace_tid
1604 #include <trace/define_trace.h>