1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2017, 2018 Oracle. All rights reserved.
5 * Trace point definitions for the "rpcrdma" subsystem.
8 #define TRACE_SYSTEM rpcrdma
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
13 #include <linux/scatterlist.h>
14 #include <linux/sunrpc/rpc_rdma_cid.h>
15 #include <linux/tracepoint.h>
16 #include <rdma/ib_cm.h>
17 #include <trace/events/rdma.h>
23 DECLARE_EVENT_CLASS(rpcrdma_completion_class,
25 const struct ib_wc *wc,
26 const struct rpc_rdma_cid *cid
33 __field(int, completion_id)
34 __field(unsigned long, status)
35 __field(unsigned int, vendor_err)
39 __entry->cq_id = cid->ci_queue_id;
40 __entry->completion_id = cid->ci_completion_id;
41 __entry->status = wc->status;
43 __entry->vendor_err = wc->vendor_err;
45 __entry->vendor_err = 0;
48 TP_printk("cq.id=%u cid=%d status=%s (%lu/0x%x)",
49 __entry->cq_id, __entry->completion_id,
50 rdma_show_wc_status(__entry->status),
51 __entry->status, __entry->vendor_err
55 #define DEFINE_COMPLETION_EVENT(name) \
56 DEFINE_EVENT(rpcrdma_completion_class, name, \
58 const struct ib_wc *wc, \
59 const struct rpc_rdma_cid *cid \
63 DECLARE_EVENT_CLASS(xprtrdma_reply_class,
65 const struct rpcrdma_rep *rep
74 __string(addr, rpcrdma_addrstr(rep->rr_rxprt))
75 __string(port, rpcrdma_portstr(rep->rr_rxprt))
79 __entry->xid = be32_to_cpu(rep->rr_xid);
80 __entry->version = be32_to_cpu(rep->rr_vers);
81 __entry->proc = be32_to_cpu(rep->rr_proc);
82 __assign_str(addr, rpcrdma_addrstr(rep->rr_rxprt));
83 __assign_str(port, rpcrdma_portstr(rep->rr_rxprt));
86 TP_printk("peer=[%s]:%s xid=0x%08x version=%u proc=%u",
87 __get_str(addr), __get_str(port),
88 __entry->xid, __entry->version, __entry->proc
92 #define DEFINE_REPLY_EVENT(name) \
93 DEFINE_EVENT(xprtrdma_reply_class, \
94 xprtrdma_reply_##name##_err, \
96 const struct rpcrdma_rep *rep \
100 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
102 const struct rpcrdma_xprt *r_xprt
108 __field(const void *, r_xprt)
109 __string(addr, rpcrdma_addrstr(r_xprt))
110 __string(port, rpcrdma_portstr(r_xprt))
114 __entry->r_xprt = r_xprt;
115 __assign_str(addr, rpcrdma_addrstr(r_xprt));
116 __assign_str(port, rpcrdma_portstr(r_xprt));
119 TP_printk("peer=[%s]:%s r_xprt=%p",
120 __get_str(addr), __get_str(port), __entry->r_xprt
124 #define DEFINE_RXPRT_EVENT(name) \
125 DEFINE_EVENT(xprtrdma_rxprt, name, \
127 const struct rpcrdma_xprt *r_xprt \
131 DECLARE_EVENT_CLASS(xprtrdma_connect_class,
133 const struct rpcrdma_xprt *r_xprt,
140 __field(const void *, r_xprt)
142 __field(int, connect_status)
143 __string(addr, rpcrdma_addrstr(r_xprt))
144 __string(port, rpcrdma_portstr(r_xprt))
148 __entry->r_xprt = r_xprt;
150 __entry->connect_status = r_xprt->rx_ep->re_connect_status;
151 __assign_str(addr, rpcrdma_addrstr(r_xprt));
152 __assign_str(port, rpcrdma_portstr(r_xprt));
155 TP_printk("peer=[%s]:%s r_xprt=%p: rc=%d connection status=%d",
156 __get_str(addr), __get_str(port), __entry->r_xprt,
157 __entry->rc, __entry->connect_status
161 #define DEFINE_CONN_EVENT(name) \
162 DEFINE_EVENT(xprtrdma_connect_class, xprtrdma_##name, \
164 const struct rpcrdma_xprt *r_xprt, \
169 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
171 const struct rpc_task *task,
173 struct rpcrdma_mr *mr,
177 TP_ARGS(task, pos, mr, nsegs),
180 __field(unsigned int, task_id)
181 __field(unsigned int, client_id)
182 __field(unsigned int, pos)
191 __entry->task_id = task->tk_pid;
192 __entry->client_id = task->tk_client->cl_clid;
194 __entry->nents = mr->mr_nents;
195 __entry->handle = mr->mr_handle;
196 __entry->length = mr->mr_length;
197 __entry->offset = mr->mr_offset;
198 __entry->nsegs = nsegs;
201 TP_printk("task:%u@%u pos=%u %u@0x%016llx:0x%08x (%s)",
202 __entry->task_id, __entry->client_id,
203 __entry->pos, __entry->length,
204 (unsigned long long)__entry->offset, __entry->handle,
205 __entry->nents < __entry->nsegs ? "more" : "last"
209 #define DEFINE_RDCH_EVENT(name) \
210 DEFINE_EVENT(xprtrdma_rdch_event, xprtrdma_chunk_##name,\
212 const struct rpc_task *task, \
214 struct rpcrdma_mr *mr, \
217 TP_ARGS(task, pos, mr, nsegs))
219 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
221 const struct rpc_task *task,
222 struct rpcrdma_mr *mr,
226 TP_ARGS(task, mr, nsegs),
229 __field(unsigned int, task_id)
230 __field(unsigned int, client_id)
239 __entry->task_id = task->tk_pid;
240 __entry->client_id = task->tk_client->cl_clid;
241 __entry->nents = mr->mr_nents;
242 __entry->handle = mr->mr_handle;
243 __entry->length = mr->mr_length;
244 __entry->offset = mr->mr_offset;
245 __entry->nsegs = nsegs;
248 TP_printk("task:%u@%u %u@0x%016llx:0x%08x (%s)",
249 __entry->task_id, __entry->client_id,
250 __entry->length, (unsigned long long)__entry->offset,
252 __entry->nents < __entry->nsegs ? "more" : "last"
256 #define DEFINE_WRCH_EVENT(name) \
257 DEFINE_EVENT(xprtrdma_wrch_event, xprtrdma_chunk_##name,\
259 const struct rpc_task *task, \
260 struct rpcrdma_mr *mr, \
263 TP_ARGS(task, mr, nsegs))
265 TRACE_DEFINE_ENUM(DMA_BIDIRECTIONAL);
266 TRACE_DEFINE_ENUM(DMA_TO_DEVICE);
267 TRACE_DEFINE_ENUM(DMA_FROM_DEVICE);
268 TRACE_DEFINE_ENUM(DMA_NONE);
270 #define xprtrdma_show_direction(x) \
271 __print_symbolic(x, \
272 { DMA_BIDIRECTIONAL, "BIDIR" }, \
273 { DMA_TO_DEVICE, "TO_DEVICE" }, \
274 { DMA_FROM_DEVICE, "FROM_DEVICE" }, \
275 { DMA_NONE, "NONE" })
277 DECLARE_EVENT_CLASS(xprtrdma_mr_class,
279 const struct rpcrdma_mr *mr
285 __field(unsigned int, task_id)
286 __field(unsigned int, client_id)
296 const struct rpcrdma_req *req = mr->mr_req;
297 const struct rpc_task *task = req->rl_slot.rq_task;
299 __entry->task_id = task->tk_pid;
300 __entry->client_id = task->tk_client->cl_clid;
301 __entry->mr_id = mr->frwr.fr_mr->res.id;
302 __entry->nents = mr->mr_nents;
303 __entry->handle = mr->mr_handle;
304 __entry->length = mr->mr_length;
305 __entry->offset = mr->mr_offset;
306 __entry->dir = mr->mr_dir;
309 TP_printk("task:%u@%u mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
310 __entry->task_id, __entry->client_id,
311 __entry->mr_id, __entry->nents, __entry->length,
312 (unsigned long long)__entry->offset, __entry->handle,
313 xprtrdma_show_direction(__entry->dir)
317 #define DEFINE_MR_EVENT(name) \
318 DEFINE_EVENT(xprtrdma_mr_class, \
319 xprtrdma_mr_##name, \
321 const struct rpcrdma_mr *mr \
325 DECLARE_EVENT_CLASS(xprtrdma_anonymous_mr_class,
327 const struct rpcrdma_mr *mr
342 __entry->mr_id = mr->frwr.fr_mr->res.id;
343 __entry->nents = mr->mr_nents;
344 __entry->handle = mr->mr_handle;
345 __entry->length = mr->mr_length;
346 __entry->offset = mr->mr_offset;
347 __entry->dir = mr->mr_dir;
350 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s)",
351 __entry->mr_id, __entry->nents, __entry->length,
352 (unsigned long long)__entry->offset, __entry->handle,
353 xprtrdma_show_direction(__entry->dir)
357 #define DEFINE_ANON_MR_EVENT(name) \
358 DEFINE_EVENT(xprtrdma_anonymous_mr_class, \
359 xprtrdma_mr_##name, \
361 const struct rpcrdma_mr *mr \
365 DECLARE_EVENT_CLASS(xprtrdma_callback_class,
367 const struct rpcrdma_xprt *r_xprt,
368 const struct rpc_rqst *rqst
371 TP_ARGS(r_xprt, rqst),
375 __string(addr, rpcrdma_addrstr(r_xprt))
376 __string(port, rpcrdma_portstr(r_xprt))
380 __entry->xid = be32_to_cpu(rqst->rq_xid);
381 __assign_str(addr, rpcrdma_addrstr(r_xprt));
382 __assign_str(port, rpcrdma_portstr(r_xprt));
385 TP_printk("peer=[%s]:%s xid=0x%08x",
386 __get_str(addr), __get_str(port), __entry->xid
390 #define DEFINE_CALLBACK_EVENT(name) \
391 DEFINE_EVENT(xprtrdma_callback_class, \
392 xprtrdma_cb_##name, \
394 const struct rpcrdma_xprt *r_xprt, \
395 const struct rpc_rqst *rqst \
397 TP_ARGS(r_xprt, rqst))
403 TRACE_EVENT(xprtrdma_inline_thresh,
405 const struct rpcrdma_ep *ep
411 __field(unsigned int, inline_send)
412 __field(unsigned int, inline_recv)
413 __field(unsigned int, max_send)
414 __field(unsigned int, max_recv)
415 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
416 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
420 const struct rdma_cm_id *id = ep->re_id;
422 __entry->inline_send = ep->re_inline_send;
423 __entry->inline_recv = ep->re_inline_recv;
424 __entry->max_send = ep->re_max_inline_send;
425 __entry->max_recv = ep->re_max_inline_recv;
426 memcpy(__entry->srcaddr, &id->route.addr.src_addr,
427 sizeof(struct sockaddr_in6));
428 memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
429 sizeof(struct sockaddr_in6));
432 TP_printk("%pISpc -> %pISpc neg send/recv=%u/%u, calc send/recv=%u/%u",
433 __entry->srcaddr, __entry->dstaddr,
434 __entry->inline_send, __entry->inline_recv,
435 __entry->max_send, __entry->max_recv
439 DEFINE_CONN_EVENT(connect);
440 DEFINE_CONN_EVENT(disconnect);
442 DEFINE_RXPRT_EVENT(xprtrdma_op_inject_dsc);
444 TRACE_EVENT(xprtrdma_op_connect,
446 const struct rpcrdma_xprt *r_xprt,
450 TP_ARGS(r_xprt, delay),
453 __field(const void *, r_xprt)
454 __field(unsigned long, delay)
455 __string(addr, rpcrdma_addrstr(r_xprt))
456 __string(port, rpcrdma_portstr(r_xprt))
460 __entry->r_xprt = r_xprt;
461 __entry->delay = delay;
462 __assign_str(addr, rpcrdma_addrstr(r_xprt));
463 __assign_str(port, rpcrdma_portstr(r_xprt));
466 TP_printk("peer=[%s]:%s r_xprt=%p delay=%lu",
467 __get_str(addr), __get_str(port), __entry->r_xprt,
473 TRACE_EVENT(xprtrdma_op_set_cto,
475 const struct rpcrdma_xprt *r_xprt,
476 unsigned long connect,
477 unsigned long reconnect
480 TP_ARGS(r_xprt, connect, reconnect),
483 __field(const void *, r_xprt)
484 __field(unsigned long, connect)
485 __field(unsigned long, reconnect)
486 __string(addr, rpcrdma_addrstr(r_xprt))
487 __string(port, rpcrdma_portstr(r_xprt))
491 __entry->r_xprt = r_xprt;
492 __entry->connect = connect;
493 __entry->reconnect = reconnect;
494 __assign_str(addr, rpcrdma_addrstr(r_xprt));
495 __assign_str(port, rpcrdma_portstr(r_xprt));
498 TP_printk("peer=[%s]:%s r_xprt=%p: connect=%lu reconnect=%lu",
499 __get_str(addr), __get_str(port), __entry->r_xprt,
500 __entry->connect / HZ, __entry->reconnect / HZ
504 TRACE_EVENT(xprtrdma_qp_event,
506 const struct rpcrdma_ep *ep,
507 const struct ib_event *event
513 __field(unsigned long, event)
514 __string(name, event->device->name)
515 __array(unsigned char, srcaddr, sizeof(struct sockaddr_in6))
516 __array(unsigned char, dstaddr, sizeof(struct sockaddr_in6))
520 const struct rdma_cm_id *id = ep->re_id;
522 __entry->event = event->event;
523 __assign_str(name, event->device->name);
524 memcpy(__entry->srcaddr, &id->route.addr.src_addr,
525 sizeof(struct sockaddr_in6));
526 memcpy(__entry->dstaddr, &id->route.addr.dst_addr,
527 sizeof(struct sockaddr_in6));
530 TP_printk("%pISpc -> %pISpc device=%s %s (%lu)",
531 __entry->srcaddr, __entry->dstaddr, __get_str(name),
532 rdma_show_ib_event(__entry->event), __entry->event
540 TRACE_EVENT(xprtrdma_createmrs,
542 const struct rpcrdma_xprt *r_xprt,
546 TP_ARGS(r_xprt, count),
549 __field(const void *, r_xprt)
550 __string(addr, rpcrdma_addrstr(r_xprt))
551 __string(port, rpcrdma_portstr(r_xprt))
552 __field(unsigned int, count)
556 __entry->r_xprt = r_xprt;
557 __entry->count = count;
558 __assign_str(addr, rpcrdma_addrstr(r_xprt));
559 __assign_str(port, rpcrdma_portstr(r_xprt));
562 TP_printk("peer=[%s]:%s r_xprt=%p: created %u MRs",
563 __get_str(addr), __get_str(port), __entry->r_xprt,
568 TRACE_EVENT(xprtrdma_nomrs_err,
570 const struct rpcrdma_xprt *r_xprt,
571 const struct rpcrdma_req *req
574 TP_ARGS(r_xprt, req),
577 __field(unsigned int, task_id)
578 __field(unsigned int, client_id)
579 __string(addr, rpcrdma_addrstr(r_xprt))
580 __string(port, rpcrdma_portstr(r_xprt))
584 const struct rpc_rqst *rqst = &req->rl_slot;
586 __entry->task_id = rqst->rq_task->tk_pid;
587 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
588 __assign_str(addr, rpcrdma_addrstr(r_xprt));
589 __assign_str(port, rpcrdma_portstr(r_xprt));
592 TP_printk("peer=[%s]:%s task:%u@%u",
593 __get_str(addr), __get_str(port),
594 __entry->task_id, __entry->client_id
598 DEFINE_RDCH_EVENT(read);
599 DEFINE_WRCH_EVENT(write);
600 DEFINE_WRCH_EVENT(reply);
602 TRACE_DEFINE_ENUM(rpcrdma_noch);
603 TRACE_DEFINE_ENUM(rpcrdma_noch_pullup);
604 TRACE_DEFINE_ENUM(rpcrdma_noch_mapped);
605 TRACE_DEFINE_ENUM(rpcrdma_readch);
606 TRACE_DEFINE_ENUM(rpcrdma_areadch);
607 TRACE_DEFINE_ENUM(rpcrdma_writech);
608 TRACE_DEFINE_ENUM(rpcrdma_replych);
610 #define xprtrdma_show_chunktype(x) \
611 __print_symbolic(x, \
612 { rpcrdma_noch, "inline" }, \
613 { rpcrdma_noch_pullup, "pullup" }, \
614 { rpcrdma_noch_mapped, "mapped" }, \
615 { rpcrdma_readch, "read list" }, \
616 { rpcrdma_areadch, "*read list" }, \
617 { rpcrdma_writech, "write list" }, \
618 { rpcrdma_replych, "reply chunk" })
620 TRACE_EVENT(xprtrdma_marshal,
622 const struct rpcrdma_req *req,
627 TP_ARGS(req, rtype, wtype),
630 __field(unsigned int, task_id)
631 __field(unsigned int, client_id)
633 __field(unsigned int, hdrlen)
634 __field(unsigned int, headlen)
635 __field(unsigned int, pagelen)
636 __field(unsigned int, taillen)
637 __field(unsigned int, rtype)
638 __field(unsigned int, wtype)
642 const struct rpc_rqst *rqst = &req->rl_slot;
644 __entry->task_id = rqst->rq_task->tk_pid;
645 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
646 __entry->xid = be32_to_cpu(rqst->rq_xid);
647 __entry->hdrlen = req->rl_hdrbuf.len;
648 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
649 __entry->pagelen = rqst->rq_snd_buf.page_len;
650 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
651 __entry->rtype = rtype;
652 __entry->wtype = wtype;
655 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
656 __entry->task_id, __entry->client_id, __entry->xid,
658 __entry->headlen, __entry->pagelen, __entry->taillen,
659 xprtrdma_show_chunktype(__entry->rtype),
660 xprtrdma_show_chunktype(__entry->wtype)
664 TRACE_EVENT(xprtrdma_marshal_failed,
665 TP_PROTO(const struct rpc_rqst *rqst,
672 __field(unsigned int, task_id)
673 __field(unsigned int, client_id)
679 __entry->task_id = rqst->rq_task->tk_pid;
680 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
681 __entry->xid = be32_to_cpu(rqst->rq_xid);
685 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
686 __entry->task_id, __entry->client_id, __entry->xid,
691 TRACE_EVENT(xprtrdma_prepsend_failed,
692 TP_PROTO(const struct rpc_rqst *rqst,
699 __field(unsigned int, task_id)
700 __field(unsigned int, client_id)
706 __entry->task_id = rqst->rq_task->tk_pid;
707 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
708 __entry->xid = be32_to_cpu(rqst->rq_xid);
712 TP_printk("task:%u@%u xid=0x%08x: ret=%d",
713 __entry->task_id, __entry->client_id, __entry->xid,
718 TRACE_EVENT(xprtrdma_post_send,
720 const struct rpcrdma_req *req
727 __field(int, completion_id)
728 __field(unsigned int, task_id)
729 __field(unsigned int, client_id)
730 __field(int, num_sge)
731 __field(int, signaled)
735 const struct rpc_rqst *rqst = &req->rl_slot;
736 const struct rpcrdma_sendctx *sc = req->rl_sendctx;
738 __entry->cq_id = sc->sc_cid.ci_queue_id;
739 __entry->completion_id = sc->sc_cid.ci_completion_id;
740 __entry->task_id = rqst->rq_task->tk_pid;
741 __entry->client_id = rqst->rq_task->tk_client ?
742 rqst->rq_task->tk_client->cl_clid : -1;
743 __entry->num_sge = req->rl_wr.num_sge;
744 __entry->signaled = req->rl_wr.send_flags & IB_SEND_SIGNALED;
747 TP_printk("task:%u@%u cq.id=%u cid=%d (%d SGE%s) %s",
748 __entry->task_id, __entry->client_id,
749 __entry->cq_id, __entry->completion_id,
750 __entry->num_sge, (__entry->num_sge == 1 ? "" : "s"),
751 (__entry->signaled ? "signaled" : "")
755 TRACE_EVENT(xprtrdma_post_recv,
757 const struct rpcrdma_rep *rep
764 __field(int, completion_id)
768 __entry->cq_id = rep->rr_cid.ci_queue_id;
769 __entry->completion_id = rep->rr_cid.ci_completion_id;
772 TP_printk("cq.id=%d cid=%d",
773 __entry->cq_id, __entry->completion_id
777 TRACE_EVENT(xprtrdma_post_recvs,
779 const struct rpcrdma_xprt *r_xprt,
784 TP_ARGS(r_xprt, count, status),
787 __field(const void *, r_xprt)
788 __field(unsigned int, count)
791 __string(addr, rpcrdma_addrstr(r_xprt))
792 __string(port, rpcrdma_portstr(r_xprt))
796 __entry->r_xprt = r_xprt;
797 __entry->count = count;
798 __entry->status = status;
799 __entry->posted = r_xprt->rx_ep->re_receive_count;
800 __assign_str(addr, rpcrdma_addrstr(r_xprt));
801 __assign_str(port, rpcrdma_portstr(r_xprt));
804 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
805 __get_str(addr), __get_str(port), __entry->r_xprt,
806 __entry->count, __entry->posted, __entry->status
810 TRACE_EVENT(xprtrdma_post_linv_err,
812 const struct rpcrdma_req *req,
816 TP_ARGS(req, status),
819 __field(unsigned int, task_id)
820 __field(unsigned int, client_id)
825 const struct rpc_task *task = req->rl_slot.rq_task;
827 __entry->task_id = task->tk_pid;
828 __entry->client_id = task->tk_client->cl_clid;
829 __entry->status = status;
832 TP_printk("task:%u@%u status=%d",
833 __entry->task_id, __entry->client_id, __entry->status
841 DEFINE_COMPLETION_EVENT(xprtrdma_wc_receive);
842 DEFINE_COMPLETION_EVENT(xprtrdma_wc_send);
843 DEFINE_COMPLETION_EVENT(xprtrdma_wc_fastreg);
844 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li);
845 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_wake);
846 DEFINE_COMPLETION_EVENT(xprtrdma_wc_li_done);
848 TRACE_EVENT(xprtrdma_frwr_alloc,
850 const struct rpcrdma_mr *mr,
862 __entry->mr_id = mr->frwr.fr_mr->res.id;
866 TP_printk("mr.id=%u: rc=%d",
867 __entry->mr_id, __entry->rc
871 TRACE_EVENT(xprtrdma_frwr_dereg,
873 const struct rpcrdma_mr *mr,
890 __entry->mr_id = mr->frwr.fr_mr->res.id;
891 __entry->nents = mr->mr_nents;
892 __entry->handle = mr->mr_handle;
893 __entry->length = mr->mr_length;
894 __entry->offset = mr->mr_offset;
895 __entry->dir = mr->mr_dir;
899 TP_printk("mr.id=%u nents=%d %u@0x%016llx:0x%08x (%s): rc=%d",
900 __entry->mr_id, __entry->nents, __entry->length,
901 (unsigned long long)__entry->offset, __entry->handle,
902 xprtrdma_show_direction(__entry->dir),
907 TRACE_EVENT(xprtrdma_frwr_sgerr,
909 const struct rpcrdma_mr *mr,
913 TP_ARGS(mr, sg_nents),
923 __entry->mr_id = mr->frwr.fr_mr->res.id;
924 __entry->addr = mr->mr_sg->dma_address;
925 __entry->dir = mr->mr_dir;
926 __entry->nents = sg_nents;
929 TP_printk("mr.id=%u DMA addr=0x%llx (%s) sg_nents=%d",
930 __entry->mr_id, __entry->addr,
931 xprtrdma_show_direction(__entry->dir),
936 TRACE_EVENT(xprtrdma_frwr_maperr,
938 const struct rpcrdma_mr *mr,
942 TP_ARGS(mr, num_mapped),
948 __field(int, num_mapped)
953 __entry->mr_id = mr->frwr.fr_mr->res.id;
954 __entry->addr = mr->mr_sg->dma_address;
955 __entry->dir = mr->mr_dir;
956 __entry->num_mapped = num_mapped;
957 __entry->nents = mr->mr_nents;
960 TP_printk("mr.id=%u DMA addr=0x%llx (%s) nents=%d of %d",
961 __entry->mr_id, __entry->addr,
962 xprtrdma_show_direction(__entry->dir),
963 __entry->num_mapped, __entry->nents
967 DEFINE_MR_EVENT(localinv);
968 DEFINE_MR_EVENT(map);
970 DEFINE_ANON_MR_EVENT(unmap);
971 DEFINE_ANON_MR_EVENT(recycle);
973 TRACE_EVENT(xprtrdma_dma_maperr,
985 __entry->addr = addr;
988 TP_printk("dma addr=0x%llx\n", __entry->addr)
995 TRACE_EVENT(xprtrdma_reply,
997 const struct rpc_task *task,
998 const struct rpcrdma_rep *rep,
1002 TP_ARGS(task, rep, credits),
1005 __field(unsigned int, task_id)
1006 __field(unsigned int, client_id)
1008 __field(unsigned int, credits)
1012 __entry->task_id = task->tk_pid;
1013 __entry->client_id = task->tk_client->cl_clid;
1014 __entry->xid = be32_to_cpu(rep->rr_xid);
1015 __entry->credits = credits;
1018 TP_printk("task:%u@%u xid=0x%08x credits=%u",
1019 __entry->task_id, __entry->client_id, __entry->xid,
1024 DEFINE_REPLY_EVENT(vers);
1025 DEFINE_REPLY_EVENT(rqst);
1026 DEFINE_REPLY_EVENT(short);
1027 DEFINE_REPLY_EVENT(hdr);
1029 TRACE_EVENT(xprtrdma_err_vers,
1031 const struct rpc_rqst *rqst,
1036 TP_ARGS(rqst, min, max),
1039 __field(unsigned int, task_id)
1040 __field(unsigned int, client_id)
1047 __entry->task_id = rqst->rq_task->tk_pid;
1048 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1049 __entry->xid = be32_to_cpu(rqst->rq_xid);
1050 __entry->min = be32_to_cpup(min);
1051 __entry->max = be32_to_cpup(max);
1054 TP_printk("task:%u@%u xid=0x%08x versions=[%u, %u]",
1055 __entry->task_id, __entry->client_id, __entry->xid,
1056 __entry->min, __entry->max
1060 TRACE_EVENT(xprtrdma_err_chunk,
1062 const struct rpc_rqst *rqst
1068 __field(unsigned int, task_id)
1069 __field(unsigned int, client_id)
1074 __entry->task_id = rqst->rq_task->tk_pid;
1075 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1076 __entry->xid = be32_to_cpu(rqst->rq_xid);
1079 TP_printk("task:%u@%u xid=0x%08x",
1080 __entry->task_id, __entry->client_id, __entry->xid
1084 TRACE_EVENT(xprtrdma_err_unrecognized,
1086 const struct rpc_rqst *rqst,
1090 TP_ARGS(rqst, procedure),
1093 __field(unsigned int, task_id)
1094 __field(unsigned int, client_id)
1096 __field(u32, procedure)
1100 __entry->task_id = rqst->rq_task->tk_pid;
1101 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1102 __entry->procedure = be32_to_cpup(procedure);
1105 TP_printk("task:%u@%u xid=0x%08x procedure=%u",
1106 __entry->task_id, __entry->client_id, __entry->xid,
1111 TRACE_EVENT(xprtrdma_fixup,
1113 const struct rpc_rqst *rqst,
1117 TP_ARGS(rqst, fixup),
1120 __field(unsigned int, task_id)
1121 __field(unsigned int, client_id)
1122 __field(unsigned long, fixup)
1123 __field(size_t, headlen)
1124 __field(unsigned int, pagelen)
1125 __field(size_t, taillen)
1129 __entry->task_id = rqst->rq_task->tk_pid;
1130 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
1131 __entry->fixup = fixup;
1132 __entry->headlen = rqst->rq_rcv_buf.head[0].iov_len;
1133 __entry->pagelen = rqst->rq_rcv_buf.page_len;
1134 __entry->taillen = rqst->rq_rcv_buf.tail[0].iov_len;
1137 TP_printk("task:%u@%u fixup=%lu xdr=%zu/%u/%zu",
1138 __entry->task_id, __entry->client_id, __entry->fixup,
1139 __entry->headlen, __entry->pagelen, __entry->taillen
1143 TRACE_EVENT(xprtrdma_decode_seg,
1150 TP_ARGS(handle, length, offset),
1153 __field(u32, handle)
1154 __field(u32, length)
1155 __field(u64, offset)
1159 __entry->handle = handle;
1160 __entry->length = length;
1161 __entry->offset = offset;
1164 TP_printk("%u@0x%016llx:0x%08x",
1165 __entry->length, (unsigned long long)__entry->offset,
1170 TRACE_EVENT(xprtrdma_mrs_zap,
1172 const struct rpc_task *task
1178 __field(unsigned int, task_id)
1179 __field(unsigned int, client_id)
1183 __entry->task_id = task->tk_pid;
1184 __entry->client_id = task->tk_client->cl_clid;
1187 TP_printk("task:%u@%u",
1188 __entry->task_id, __entry->client_id
1196 TRACE_EVENT(xprtrdma_cb_setup,
1198 const struct rpcrdma_xprt *r_xprt,
1202 TP_ARGS(r_xprt, reqs),
1205 __field(const void *, r_xprt)
1206 __field(unsigned int, reqs)
1207 __string(addr, rpcrdma_addrstr(r_xprt))
1208 __string(port, rpcrdma_portstr(r_xprt))
1212 __entry->r_xprt = r_xprt;
1213 __entry->reqs = reqs;
1214 __assign_str(addr, rpcrdma_addrstr(r_xprt));
1215 __assign_str(port, rpcrdma_portstr(r_xprt));
1218 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
1219 __get_str(addr), __get_str(port),
1220 __entry->r_xprt, __entry->reqs
1224 DEFINE_CALLBACK_EVENT(call);
1225 DEFINE_CALLBACK_EVENT(reply);
1228 ** Server-side RPC/RDMA events
1231 DECLARE_EVENT_CLASS(svcrdma_accept_class,
1233 const struct svcxprt_rdma *rdma,
1237 TP_ARGS(rdma, status),
1240 __field(long, status)
1241 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1245 __entry->status = status;
1246 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1249 TP_printk("addr=%s status=%ld",
1250 __get_str(addr), __entry->status
1254 #define DEFINE_ACCEPT_EVENT(name) \
1255 DEFINE_EVENT(svcrdma_accept_class, svcrdma_##name##_err, \
1257 const struct svcxprt_rdma *rdma, \
1260 TP_ARGS(rdma, status))
1262 DEFINE_ACCEPT_EVENT(pd);
1263 DEFINE_ACCEPT_EVENT(qp);
1264 DEFINE_ACCEPT_EVENT(fabric);
1265 DEFINE_ACCEPT_EVENT(initdepth);
1266 DEFINE_ACCEPT_EVENT(accept);
1268 TRACE_DEFINE_ENUM(RDMA_MSG);
1269 TRACE_DEFINE_ENUM(RDMA_NOMSG);
1270 TRACE_DEFINE_ENUM(RDMA_MSGP);
1271 TRACE_DEFINE_ENUM(RDMA_DONE);
1272 TRACE_DEFINE_ENUM(RDMA_ERROR);
1274 #define show_rpcrdma_proc(x) \
1275 __print_symbolic(x, \
1276 { RDMA_MSG, "RDMA_MSG" }, \
1277 { RDMA_NOMSG, "RDMA_NOMSG" }, \
1278 { RDMA_MSGP, "RDMA_MSGP" }, \
1279 { RDMA_DONE, "RDMA_DONE" }, \
1280 { RDMA_ERROR, "RDMA_ERROR" })
1282 TRACE_EVENT(svcrdma_decode_rqst,
1284 const struct svc_rdma_recv_ctxt *ctxt,
1289 TP_ARGS(ctxt, p, hdrlen),
1293 __field(int, completion_id)
1297 __field(u32, credits)
1298 __field(unsigned int, hdrlen)
1302 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1303 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1304 __entry->xid = be32_to_cpup(p++);
1305 __entry->vers = be32_to_cpup(p++);
1306 __entry->credits = be32_to_cpup(p++);
1307 __entry->proc = be32_to_cpup(p);
1308 __entry->hdrlen = hdrlen;
1311 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1312 __entry->cq_id, __entry->completion_id,
1313 __entry->xid, __entry->vers, __entry->credits,
1314 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1317 TRACE_EVENT(svcrdma_decode_short_err,
1319 const struct svc_rdma_recv_ctxt *ctxt,
1323 TP_ARGS(ctxt, hdrlen),
1327 __field(int, completion_id)
1328 __field(unsigned int, hdrlen)
1332 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1333 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1334 __entry->hdrlen = hdrlen;
1337 TP_printk("cq.id=%u cid=%d hdrlen=%u",
1338 __entry->cq_id, __entry->completion_id,
1342 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1344 const struct svc_rdma_recv_ctxt *ctxt,
1352 __field(int, completion_id)
1356 __field(u32, credits)
1360 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1361 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1362 __entry->xid = be32_to_cpup(p++);
1363 __entry->vers = be32_to_cpup(p++);
1364 __entry->credits = be32_to_cpup(p++);
1365 __entry->proc = be32_to_cpup(p);
1368 TP_printk("cq.id=%u cid=%d xid=0x%08x vers=%u credits=%u proc=%u",
1369 __entry->cq_id, __entry->completion_id,
1370 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1373 #define DEFINE_BADREQ_EVENT(name) \
1374 DEFINE_EVENT(svcrdma_badreq_event, \
1375 svcrdma_decode_##name##_err, \
1377 const struct svc_rdma_recv_ctxt *ctxt, \
1382 DEFINE_BADREQ_EVENT(badvers);
1383 DEFINE_BADREQ_EVENT(drop);
1384 DEFINE_BADREQ_EVENT(badproc);
1385 DEFINE_BADREQ_EVENT(parse);
1387 TRACE_EVENT(svcrdma_encode_wseg,
1389 const struct svc_rdma_send_ctxt *ctxt,
1396 TP_ARGS(ctxt, segno, handle, length, offset),
1400 __field(int, completion_id)
1402 __field(u32, handle)
1403 __field(u32, length)
1404 __field(u64, offset)
1408 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1409 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1410 __entry->segno = segno;
1411 __entry->handle = handle;
1412 __entry->length = length;
1413 __entry->offset = offset;
1416 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1417 __entry->cq_id, __entry->completion_id,
1418 __entry->segno, __entry->length,
1419 (unsigned long long)__entry->offset, __entry->handle
1423 TRACE_EVENT(svcrdma_decode_rseg,
1425 const struct rpc_rdma_cid *cid,
1426 const struct svc_rdma_chunk *chunk,
1427 const struct svc_rdma_segment *segment
1430 TP_ARGS(cid, chunk, segment),
1434 __field(int, completion_id)
1436 __field(u32, position)
1437 __field(u32, handle)
1438 __field(u32, length)
1439 __field(u64, offset)
1443 __entry->cq_id = cid->ci_queue_id;
1444 __entry->completion_id = cid->ci_completion_id;
1445 __entry->segno = chunk->ch_segcount;
1446 __entry->position = chunk->ch_position;
1447 __entry->handle = segment->rs_handle;
1448 __entry->length = segment->rs_length;
1449 __entry->offset = segment->rs_offset;
1452 TP_printk("cq_id=%u cid=%d segno=%u position=%u %u@0x%016llx:0x%08x",
1453 __entry->cq_id, __entry->completion_id,
1454 __entry->segno, __entry->position, __entry->length,
1455 (unsigned long long)__entry->offset, __entry->handle
1459 TRACE_EVENT(svcrdma_decode_wseg,
1461 const struct rpc_rdma_cid *cid,
1462 const struct svc_rdma_chunk *chunk,
1466 TP_ARGS(cid, chunk, segno),
1470 __field(int, completion_id)
1472 __field(u32, handle)
1473 __field(u32, length)
1474 __field(u64, offset)
1478 const struct svc_rdma_segment *segment =
1479 &chunk->ch_segments[segno];
1481 __entry->cq_id = cid->ci_queue_id;
1482 __entry->completion_id = cid->ci_completion_id;
1483 __entry->segno = segno;
1484 __entry->handle = segment->rs_handle;
1485 __entry->length = segment->rs_length;
1486 __entry->offset = segment->rs_offset;
1489 TP_printk("cq_id=%u cid=%d segno=%u %u@0x%016llx:0x%08x",
1490 __entry->cq_id, __entry->completion_id,
1491 __entry->segno, __entry->length,
1492 (unsigned long long)__entry->offset, __entry->handle
1496 DECLARE_EVENT_CLASS(svcrdma_error_event,
1508 __entry->xid = be32_to_cpu(xid);
1511 TP_printk("xid=0x%08x",
1516 #define DEFINE_ERROR_EVENT(name) \
1517 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1523 DEFINE_ERROR_EVENT(vers);
1524 DEFINE_ERROR_EVENT(chunk);
1527 ** Server-side RDMA API events
1530 DECLARE_EVENT_CLASS(svcrdma_dma_map_class,
1532 const struct svcxprt_rdma *rdma,
1537 TP_ARGS(rdma, dma_addr, length),
1540 __field(u64, dma_addr)
1541 __field(u32, length)
1542 __string(device, rdma->sc_cm_id->device->name)
1543 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1547 __entry->dma_addr = dma_addr;
1548 __entry->length = length;
1549 __assign_str(device, rdma->sc_cm_id->device->name);
1550 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1553 TP_printk("addr=%s device=%s dma_addr=%llu length=%u",
1554 __get_str(addr), __get_str(device),
1555 __entry->dma_addr, __entry->length
1559 #define DEFINE_SVC_DMA_EVENT(name) \
1560 DEFINE_EVENT(svcrdma_dma_map_class, svcrdma_##name, \
1562 const struct svcxprt_rdma *rdma,\
1566 TP_ARGS(rdma, dma_addr, length))
1568 DEFINE_SVC_DMA_EVENT(dma_map_page);
1569 DEFINE_SVC_DMA_EVENT(dma_map_err);
1570 DEFINE_SVC_DMA_EVENT(dma_unmap_page);
1572 TRACE_EVENT(svcrdma_dma_map_rw_err,
1574 const struct svcxprt_rdma *rdma,
1579 TP_ARGS(rdma, nents, status),
1582 __field(int, status)
1583 __field(unsigned int, nents)
1584 __string(device, rdma->sc_cm_id->device->name)
1585 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1589 __entry->status = status;
1590 __entry->nents = nents;
1591 __assign_str(device, rdma->sc_cm_id->device->name);
1592 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1595 TP_printk("addr=%s device=%s nents=%u status=%d",
1596 __get_str(addr), __get_str(device), __entry->nents,
1601 TRACE_EVENT(svcrdma_no_rwctx_err,
1603 const struct svcxprt_rdma *rdma,
1604 unsigned int num_sges
1607 TP_ARGS(rdma, num_sges),
1610 __field(unsigned int, num_sges)
1611 __string(device, rdma->sc_cm_id->device->name)
1612 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1616 __entry->num_sges = num_sges;
1617 __assign_str(device, rdma->sc_cm_id->device->name);
1618 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1621 TP_printk("addr=%s device=%s num_sges=%d",
1622 __get_str(addr), __get_str(device), __entry->num_sges
1626 TRACE_EVENT(svcrdma_page_overrun_err,
1628 const struct svcxprt_rdma *rdma,
1629 const struct svc_rqst *rqst,
1633 TP_ARGS(rdma, rqst, pageno),
1636 __field(unsigned int, pageno)
1638 __string(device, rdma->sc_cm_id->device->name)
1639 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1643 __entry->pageno = pageno;
1644 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1645 __assign_str(device, rdma->sc_cm_id->device->name);
1646 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1649 TP_printk("addr=%s device=%s xid=0x%08x pageno=%u", __get_str(addr),
1650 __get_str(device), __entry->xid, __entry->pageno
1654 TRACE_EVENT(svcrdma_small_wrch_err,
1656 const struct svcxprt_rdma *rdma,
1657 unsigned int remaining,
1658 unsigned int seg_no,
1659 unsigned int num_segs
1662 TP_ARGS(rdma, remaining, seg_no, num_segs),
1665 __field(unsigned int, remaining)
1666 __field(unsigned int, seg_no)
1667 __field(unsigned int, num_segs)
1668 __string(device, rdma->sc_cm_id->device->name)
1669 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1673 __entry->remaining = remaining;
1674 __entry->seg_no = seg_no;
1675 __entry->num_segs = num_segs;
1676 __assign_str(device, rdma->sc_cm_id->device->name);
1677 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1680 TP_printk("addr=%s device=%s remaining=%u seg_no=%u num_segs=%u",
1681 __get_str(addr), __get_str(device), __entry->remaining,
1682 __entry->seg_no, __entry->num_segs
1686 TRACE_EVENT(svcrdma_send_pullup,
1688 const struct svc_rdma_send_ctxt *ctxt,
1692 TP_ARGS(ctxt, msglen),
1696 __field(int, completion_id)
1697 __field(unsigned int, hdrlen)
1698 __field(unsigned int, msglen)
1702 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1703 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1704 __entry->hdrlen = ctxt->sc_hdrbuf.len,
1705 __entry->msglen = msglen;
1708 TP_printk("cq_id=%u cid=%d hdr=%u msg=%u (total %u)",
1709 __entry->cq_id, __entry->completion_id,
1710 __entry->hdrlen, __entry->msglen,
1711 __entry->hdrlen + __entry->msglen)
1714 TRACE_EVENT(svcrdma_send_err,
1716 const struct svc_rqst *rqst,
1720 TP_ARGS(rqst, status),
1723 __field(int, status)
1725 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1729 __entry->status = status;
1730 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1731 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1734 TP_printk("addr=%s xid=0x%08x status=%d", __get_str(addr),
1735 __entry->xid, __entry->status
1739 TRACE_EVENT(svcrdma_post_send,
1741 const struct svc_rdma_send_ctxt *ctxt
1748 __field(int, completion_id)
1749 __field(unsigned int, num_sge)
1750 __field(u32, inv_rkey)
1754 const struct ib_send_wr *wr = &ctxt->sc_send_wr;
1756 __entry->cq_id = ctxt->sc_cid.ci_queue_id;
1757 __entry->completion_id = ctxt->sc_cid.ci_completion_id;
1758 __entry->num_sge = wr->num_sge;
1759 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1760 wr->ex.invalidate_rkey : 0;
1763 TP_printk("cq_id=%u cid=%d num_sge=%u inv_rkey=0x%08x",
1764 __entry->cq_id, __entry->completion_id,
1765 __entry->num_sge, __entry->inv_rkey
1769 DEFINE_COMPLETION_EVENT(svcrdma_wc_send);
1771 TRACE_EVENT(svcrdma_post_recv,
1773 const struct svc_rdma_recv_ctxt *ctxt
1780 __field(int, completion_id)
1784 __entry->cq_id = ctxt->rc_cid.ci_queue_id;
1785 __entry->completion_id = ctxt->rc_cid.ci_completion_id;
1788 TP_printk("cq.id=%d cid=%d",
1789 __entry->cq_id, __entry->completion_id
1793 DEFINE_COMPLETION_EVENT(svcrdma_wc_receive);
1795 TRACE_EVENT(svcrdma_rq_post_err,
1797 const struct svcxprt_rdma *rdma,
1801 TP_ARGS(rdma, status),
1804 __field(int, status)
1805 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1809 __entry->status = status;
1810 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1813 TP_printk("addr=%s status=%d",
1814 __get_str(addr), __entry->status
1818 DECLARE_EVENT_CLASS(svcrdma_post_chunk_class,
1820 const struct rpc_rdma_cid *cid,
1824 TP_ARGS(cid, sqecount),
1828 __field(int, completion_id)
1829 __field(int, sqecount)
1833 __entry->cq_id = cid->ci_queue_id;
1834 __entry->completion_id = cid->ci_completion_id;
1835 __entry->sqecount = sqecount;
1838 TP_printk("cq.id=%u cid=%d sqecount=%d",
1839 __entry->cq_id, __entry->completion_id,
1844 #define DEFINE_POST_CHUNK_EVENT(name) \
1845 DEFINE_EVENT(svcrdma_post_chunk_class, \
1846 svcrdma_post_##name##_chunk, \
1848 const struct rpc_rdma_cid *cid, \
1851 TP_ARGS(cid, sqecount))
1853 DEFINE_POST_CHUNK_EVENT(read);
1854 DEFINE_POST_CHUNK_EVENT(write);
1855 DEFINE_POST_CHUNK_EVENT(reply);
1857 DEFINE_COMPLETION_EVENT(svcrdma_wc_read);
1858 DEFINE_COMPLETION_EVENT(svcrdma_wc_write);
1860 TRACE_EVENT(svcrdma_qp_error,
1862 const struct ib_event *event,
1863 const struct sockaddr *sap
1866 TP_ARGS(event, sap),
1869 __field(unsigned int, event)
1870 __string(device, event->device->name)
1871 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1875 __entry->event = event->event;
1876 __assign_str(device, event->device->name);
1877 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1881 TP_printk("addr=%s dev=%s event=%s (%u)",
1882 __entry->addr, __get_str(device),
1883 rdma_show_ib_event(__entry->event), __entry->event
1887 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1889 const struct svcxprt_rdma *rdma
1897 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1901 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1902 __entry->depth = rdma->sc_sq_depth;
1903 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1906 TP_printk("addr=%s sc_sq_avail=%d/%d",
1907 __get_str(addr), __entry->avail, __entry->depth
1911 #define DEFINE_SQ_EVENT(name) \
1912 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1914 const struct svcxprt_rdma *rdma \
1918 DEFINE_SQ_EVENT(full);
1919 DEFINE_SQ_EVENT(retry);
1921 TRACE_EVENT(svcrdma_sq_post_err,
1923 const struct svcxprt_rdma *rdma,
1927 TP_ARGS(rdma, status),
1932 __field(int, status)
1933 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1937 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1938 __entry->depth = rdma->sc_sq_depth;
1939 __entry->status = status;
1940 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1943 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1944 __get_str(addr), __entry->avail, __entry->depth,
1949 #endif /* _TRACE_RPCRDMA_H */
1951 #include <trace/define_trace.h>