1 /* SPDX-License-Identifier: GPL-2.0 */
3 * Copyright (c) 2017, 2018 Oracle. All rights reserved.
5 * Trace point definitions for the "rpcrdma" subsystem.
8 #define TRACE_SYSTEM rpcrdma
10 #if !defined(_TRACE_RPCRDMA_H) || defined(TRACE_HEADER_MULTI_READ)
11 #define _TRACE_RPCRDMA_H
13 #include <linux/tracepoint.h>
14 #include <trace/events/rdma.h>
20 DECLARE_EVENT_CLASS(xprtrdma_reply_event,
22 const struct rpcrdma_rep *rep
28 __field(const void *, rep)
29 __field(const void *, r_xprt)
37 __entry->r_xprt = rep->rr_rxprt;
38 __entry->xid = be32_to_cpu(rep->rr_xid);
39 __entry->version = be32_to_cpu(rep->rr_vers);
40 __entry->proc = be32_to_cpu(rep->rr_proc);
43 TP_printk("rxprt %p xid=0x%08x rep=%p: version %u proc %u",
44 __entry->r_xprt, __entry->xid, __entry->rep,
45 __entry->version, __entry->proc
49 #define DEFINE_REPLY_EVENT(name) \
50 DEFINE_EVENT(xprtrdma_reply_event, name, \
52 const struct rpcrdma_rep *rep \
56 DECLARE_EVENT_CLASS(xprtrdma_rxprt,
58 const struct rpcrdma_xprt *r_xprt
64 __field(const void *, r_xprt)
65 __string(addr, rpcrdma_addrstr(r_xprt))
66 __string(port, rpcrdma_portstr(r_xprt))
70 __entry->r_xprt = r_xprt;
71 __assign_str(addr, rpcrdma_addrstr(r_xprt));
72 __assign_str(port, rpcrdma_portstr(r_xprt));
75 TP_printk("peer=[%s]:%s r_xprt=%p",
76 __get_str(addr), __get_str(port), __entry->r_xprt
80 #define DEFINE_RXPRT_EVENT(name) \
81 DEFINE_EVENT(xprtrdma_rxprt, name, \
83 const struct rpcrdma_xprt *r_xprt \
87 DECLARE_EVENT_CLASS(xprtrdma_rdch_event,
89 const struct rpc_task *task,
91 struct rpcrdma_mr *mr,
95 TP_ARGS(task, pos, mr, nsegs),
98 __field(unsigned int, task_id)
99 __field(unsigned int, client_id)
100 __field(const void *, mr)
101 __field(unsigned int, pos)
110 __entry->task_id = task->tk_pid;
111 __entry->client_id = task->tk_client->cl_clid;
114 __entry->nents = mr->mr_nents;
115 __entry->handle = mr->mr_handle;
116 __entry->length = mr->mr_length;
117 __entry->offset = mr->mr_offset;
118 __entry->nsegs = nsegs;
121 TP_printk("task:%u@%u mr=%p pos=%u %u@0x%016llx:0x%08x (%s)",
122 __entry->task_id, __entry->client_id, __entry->mr,
123 __entry->pos, __entry->length,
124 (unsigned long long)__entry->offset, __entry->handle,
125 __entry->nents < __entry->nsegs ? "more" : "last"
129 #define DEFINE_RDCH_EVENT(name) \
130 DEFINE_EVENT(xprtrdma_rdch_event, name, \
132 const struct rpc_task *task, \
134 struct rpcrdma_mr *mr, \
137 TP_ARGS(task, pos, mr, nsegs))
139 DECLARE_EVENT_CLASS(xprtrdma_wrch_event,
141 const struct rpc_task *task,
142 struct rpcrdma_mr *mr,
146 TP_ARGS(task, mr, nsegs),
149 __field(unsigned int, task_id)
150 __field(unsigned int, client_id)
151 __field(const void *, mr)
160 __entry->task_id = task->tk_pid;
161 __entry->client_id = task->tk_client->cl_clid;
163 __entry->nents = mr->mr_nents;
164 __entry->handle = mr->mr_handle;
165 __entry->length = mr->mr_length;
166 __entry->offset = mr->mr_offset;
167 __entry->nsegs = nsegs;
170 TP_printk("task:%u@%u mr=%p %u@0x%016llx:0x%08x (%s)",
171 __entry->task_id, __entry->client_id, __entry->mr,
172 __entry->length, (unsigned long long)__entry->offset,
174 __entry->nents < __entry->nsegs ? "more" : "last"
178 #define DEFINE_WRCH_EVENT(name) \
179 DEFINE_EVENT(xprtrdma_wrch_event, name, \
181 const struct rpc_task *task, \
182 struct rpcrdma_mr *mr, \
185 TP_ARGS(task, mr, nsegs))
187 TRACE_DEFINE_ENUM(FRWR_IS_INVALID);
188 TRACE_DEFINE_ENUM(FRWR_IS_VALID);
189 TRACE_DEFINE_ENUM(FRWR_FLUSHED_FR);
190 TRACE_DEFINE_ENUM(FRWR_FLUSHED_LI);
192 #define xprtrdma_show_frwr_state(x) \
193 __print_symbolic(x, \
194 { FRWR_IS_INVALID, "INVALID" }, \
195 { FRWR_IS_VALID, "VALID" }, \
196 { FRWR_FLUSHED_FR, "FLUSHED_FR" }, \
197 { FRWR_FLUSHED_LI, "FLUSHED_LI" })
199 DECLARE_EVENT_CLASS(xprtrdma_frwr_done,
201 const struct ib_wc *wc,
202 const struct rpcrdma_frwr *frwr
208 __field(const void *, mr)
209 __field(unsigned int, state)
210 __field(unsigned int, status)
211 __field(unsigned int, vendor_err)
215 __entry->mr = container_of(frwr, struct rpcrdma_mr, frwr);
216 __entry->state = frwr->fr_state;
217 __entry->status = wc->status;
218 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
222 "mr=%p state=%s: %s (%u/0x%x)",
223 __entry->mr, xprtrdma_show_frwr_state(__entry->state),
224 rdma_show_wc_status(__entry->status),
225 __entry->status, __entry->vendor_err
229 #define DEFINE_FRWR_DONE_EVENT(name) \
230 DEFINE_EVENT(xprtrdma_frwr_done, name, \
232 const struct ib_wc *wc, \
233 const struct rpcrdma_frwr *frwr \
237 DECLARE_EVENT_CLASS(xprtrdma_mr,
239 const struct rpcrdma_mr *mr
245 __field(const void *, mr)
253 __entry->handle = mr->mr_handle;
254 __entry->length = mr->mr_length;
255 __entry->offset = mr->mr_offset;
258 TP_printk("mr=%p %u@0x%016llx:0x%08x",
259 __entry->mr, __entry->length,
260 (unsigned long long)__entry->offset,
265 #define DEFINE_MR_EVENT(name) \
266 DEFINE_EVENT(xprtrdma_mr, name, \
268 const struct rpcrdma_mr *mr \
272 DECLARE_EVENT_CLASS(xprtrdma_cb_event,
274 const struct rpc_rqst *rqst
280 __field(const void *, rqst)
281 __field(const void *, rep)
282 __field(const void *, req)
287 __entry->rqst = rqst;
288 __entry->req = rpcr_to_rdmar(rqst);
289 __entry->rep = rpcr_to_rdmar(rqst)->rl_reply;
290 __entry->xid = be32_to_cpu(rqst->rq_xid);
293 TP_printk("xid=0x%08x, rqst=%p req=%p rep=%p",
294 __entry->xid, __entry->rqst, __entry->req, __entry->rep
298 #define DEFINE_CB_EVENT(name) \
299 DEFINE_EVENT(xprtrdma_cb_event, name, \
301 const struct rpc_rqst *rqst \
309 TRACE_EVENT(xprtrdma_conn_upcall,
311 const struct rpcrdma_xprt *r_xprt,
312 struct rdma_cm_event *event
315 TP_ARGS(r_xprt, event),
318 __field(const void *, r_xprt)
319 __field(unsigned int, event)
321 __string(addr, rpcrdma_addrstr(r_xprt))
322 __string(port, rpcrdma_portstr(r_xprt))
326 __entry->r_xprt = r_xprt;
327 __entry->event = event->event;
328 __entry->status = event->status;
329 __assign_str(addr, rpcrdma_addrstr(r_xprt));
330 __assign_str(port, rpcrdma_portstr(r_xprt));
333 TP_printk("peer=[%s]:%s r_xprt=%p: %s (%u/%d)",
334 __get_str(addr), __get_str(port),
335 __entry->r_xprt, rdma_show_cm_event(__entry->event),
336 __entry->event, __entry->status
340 TRACE_EVENT(xprtrdma_disconnect,
342 const struct rpcrdma_xprt *r_xprt,
346 TP_ARGS(r_xprt, status),
349 __field(const void *, r_xprt)
351 __field(int, connected)
352 __string(addr, rpcrdma_addrstr(r_xprt))
353 __string(port, rpcrdma_portstr(r_xprt))
357 __entry->r_xprt = r_xprt;
358 __entry->status = status;
359 __entry->connected = r_xprt->rx_ep.rep_connected;
360 __assign_str(addr, rpcrdma_addrstr(r_xprt));
361 __assign_str(port, rpcrdma_portstr(r_xprt));
364 TP_printk("peer=[%s]:%s r_xprt=%p: status=%d %sconnected",
365 __get_str(addr), __get_str(port),
366 __entry->r_xprt, __entry->status,
367 __entry->connected == 1 ? "still " : "dis"
371 DEFINE_RXPRT_EVENT(xprtrdma_conn_start);
372 DEFINE_RXPRT_EVENT(xprtrdma_conn_tout);
373 DEFINE_RXPRT_EVENT(xprtrdma_create);
374 DEFINE_RXPRT_EVENT(xprtrdma_destroy);
375 DEFINE_RXPRT_EVENT(xprtrdma_remove);
376 DEFINE_RXPRT_EVENT(xprtrdma_reinsert);
377 DEFINE_RXPRT_EVENT(xprtrdma_reconnect);
378 DEFINE_RXPRT_EVENT(xprtrdma_inject_dsc);
380 TRACE_EVENT(xprtrdma_qp_error,
382 const struct rpcrdma_xprt *r_xprt,
383 const struct ib_event *event
386 TP_ARGS(r_xprt, event),
389 __field(const void *, r_xprt)
390 __field(unsigned int, event)
391 __string(name, event->device->name)
392 __string(addr, rpcrdma_addrstr(r_xprt))
393 __string(port, rpcrdma_portstr(r_xprt))
397 __entry->r_xprt = r_xprt;
398 __entry->event = event->event;
399 __assign_str(name, event->device->name);
400 __assign_str(addr, rpcrdma_addrstr(r_xprt));
401 __assign_str(port, rpcrdma_portstr(r_xprt));
404 TP_printk("peer=[%s]:%s r_xprt=%p: dev %s: %s (%u)",
405 __get_str(addr), __get_str(port), __entry->r_xprt,
406 __get_str(name), rdma_show_ib_event(__entry->event),
415 TRACE_EVENT(xprtrdma_createmrs,
417 const struct rpcrdma_xprt *r_xprt,
421 TP_ARGS(r_xprt, count),
424 __field(const void *, r_xprt)
425 __field(unsigned int, count)
429 __entry->r_xprt = r_xprt;
430 __entry->count = count;
433 TP_printk("r_xprt=%p: created %u MRs",
434 __entry->r_xprt, __entry->count
438 DEFINE_RXPRT_EVENT(xprtrdma_nomrs);
440 DEFINE_RDCH_EVENT(xprtrdma_read_chunk);
441 DEFINE_WRCH_EVENT(xprtrdma_write_chunk);
442 DEFINE_WRCH_EVENT(xprtrdma_reply_chunk);
444 TRACE_DEFINE_ENUM(rpcrdma_noch);
445 TRACE_DEFINE_ENUM(rpcrdma_readch);
446 TRACE_DEFINE_ENUM(rpcrdma_areadch);
447 TRACE_DEFINE_ENUM(rpcrdma_writech);
448 TRACE_DEFINE_ENUM(rpcrdma_replych);
450 #define xprtrdma_show_chunktype(x) \
451 __print_symbolic(x, \
452 { rpcrdma_noch, "inline" }, \
453 { rpcrdma_readch, "read list" }, \
454 { rpcrdma_areadch, "*read list" }, \
455 { rpcrdma_writech, "write list" }, \
456 { rpcrdma_replych, "reply chunk" })
458 TRACE_EVENT(xprtrdma_marshal,
460 const struct rpc_rqst *rqst,
466 TP_ARGS(rqst, hdrlen, rtype, wtype),
469 __field(unsigned int, task_id)
470 __field(unsigned int, client_id)
472 __field(unsigned int, hdrlen)
473 __field(unsigned int, headlen)
474 __field(unsigned int, pagelen)
475 __field(unsigned int, taillen)
476 __field(unsigned int, rtype)
477 __field(unsigned int, wtype)
481 __entry->task_id = rqst->rq_task->tk_pid;
482 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
483 __entry->xid = be32_to_cpu(rqst->rq_xid);
484 __entry->hdrlen = hdrlen;
485 __entry->headlen = rqst->rq_snd_buf.head[0].iov_len;
486 __entry->pagelen = rqst->rq_snd_buf.page_len;
487 __entry->taillen = rqst->rq_snd_buf.tail[0].iov_len;
488 __entry->rtype = rtype;
489 __entry->wtype = wtype;
492 TP_printk("task:%u@%u xid=0x%08x: hdr=%u xdr=%u/%u/%u %s/%s",
493 __entry->task_id, __entry->client_id, __entry->xid,
495 __entry->headlen, __entry->pagelen, __entry->taillen,
496 xprtrdma_show_chunktype(__entry->rtype),
497 xprtrdma_show_chunktype(__entry->wtype)
501 TRACE_EVENT(xprtrdma_post_send,
503 const struct rpcrdma_req *req,
507 TP_ARGS(req, status),
510 __field(const void *, req)
511 __field(int, num_sge)
512 __field(bool, signaled)
518 __entry->num_sge = req->rl_sendctx->sc_wr.num_sge;
519 __entry->signaled = req->rl_sendctx->sc_wr.send_flags &
521 __entry->status = status;
524 TP_printk("req=%p, %d SGEs%s, status=%d",
525 __entry->req, __entry->num_sge,
526 (__entry->signaled ? ", signaled" : ""),
531 TRACE_EVENT(xprtrdma_post_recv,
533 const struct ib_cqe *cqe
539 __field(const void *, cqe)
551 TRACE_EVENT(xprtrdma_post_recvs,
553 const struct rpcrdma_xprt *r_xprt,
558 TP_ARGS(r_xprt, count, status),
561 __field(const void *, r_xprt)
562 __field(unsigned int, count)
565 __string(addr, rpcrdma_addrstr(r_xprt))
566 __string(port, rpcrdma_portstr(r_xprt))
570 __entry->r_xprt = r_xprt;
571 __entry->count = count;
572 __entry->status = status;
573 __entry->posted = r_xprt->rx_buf.rb_posted_receives;
574 __assign_str(addr, rpcrdma_addrstr(r_xprt));
575 __assign_str(port, rpcrdma_portstr(r_xprt));
578 TP_printk("peer=[%s]:%s r_xprt=%p: %u new recvs, %d active (rc %d)",
579 __get_str(addr), __get_str(port), __entry->r_xprt,
580 __entry->count, __entry->posted, __entry->status
588 TRACE_EVENT(xprtrdma_wc_send,
590 const struct rpcrdma_sendctx *sc,
591 const struct ib_wc *wc
597 __field(const void *, req)
598 __field(unsigned int, unmap_count)
599 __field(unsigned int, status)
600 __field(unsigned int, vendor_err)
604 __entry->req = sc->sc_req;
605 __entry->unmap_count = sc->sc_unmap_count;
606 __entry->status = wc->status;
607 __entry->vendor_err = __entry->status ? wc->vendor_err : 0;
610 TP_printk("req=%p, unmapped %u pages: %s (%u/0x%x)",
611 __entry->req, __entry->unmap_count,
612 rdma_show_wc_status(__entry->status),
613 __entry->status, __entry->vendor_err
617 TRACE_EVENT(xprtrdma_wc_receive,
619 const struct ib_wc *wc
625 __field(const void *, cqe)
626 __field(u32, byte_len)
627 __field(unsigned int, status)
628 __field(u32, vendor_err)
632 __entry->cqe = wc->wr_cqe;
633 __entry->status = wc->status;
635 __entry->byte_len = 0;
636 __entry->vendor_err = wc->vendor_err;
638 __entry->byte_len = wc->byte_len;
639 __entry->vendor_err = 0;
643 TP_printk("cqe=%p %u bytes: %s (%u/0x%x)",
644 __entry->cqe, __entry->byte_len,
645 rdma_show_wc_status(__entry->status),
646 __entry->status, __entry->vendor_err
650 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_fastreg);
651 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li);
652 DEFINE_FRWR_DONE_EVENT(xprtrdma_wc_li_wake);
654 DEFINE_MR_EVENT(xprtrdma_localinv);
655 DEFINE_MR_EVENT(xprtrdma_dma_map);
656 DEFINE_MR_EVENT(xprtrdma_dma_unmap);
657 DEFINE_MR_EVENT(xprtrdma_remoteinv);
658 DEFINE_MR_EVENT(xprtrdma_recover_mr);
664 TRACE_EVENT(xprtrdma_reply,
666 const struct rpc_task *task,
667 const struct rpcrdma_rep *rep,
668 const struct rpcrdma_req *req,
672 TP_ARGS(task, rep, req, credits),
675 __field(unsigned int, task_id)
676 __field(unsigned int, client_id)
677 __field(const void *, rep)
678 __field(const void *, req)
680 __field(unsigned int, credits)
684 __entry->task_id = task->tk_pid;
685 __entry->client_id = task->tk_client->cl_clid;
688 __entry->xid = be32_to_cpu(rep->rr_xid);
689 __entry->credits = credits;
692 TP_printk("task:%u@%u xid=0x%08x, %u credits, rep=%p -> req=%p",
693 __entry->task_id, __entry->client_id, __entry->xid,
694 __entry->credits, __entry->rep, __entry->req
698 TRACE_EVENT(xprtrdma_defer_cmp,
700 const struct rpcrdma_rep *rep
706 __field(unsigned int, task_id)
707 __field(unsigned int, client_id)
708 __field(const void *, rep)
713 __entry->task_id = rep->rr_rqst->rq_task->tk_pid;
714 __entry->client_id = rep->rr_rqst->rq_task->tk_client->cl_clid;
716 __entry->xid = be32_to_cpu(rep->rr_xid);
719 TP_printk("task:%u@%u xid=0x%08x rep=%p",
720 __entry->task_id, __entry->client_id, __entry->xid,
725 DEFINE_REPLY_EVENT(xprtrdma_reply_vers);
726 DEFINE_REPLY_EVENT(xprtrdma_reply_rqst);
727 DEFINE_REPLY_EVENT(xprtrdma_reply_short);
728 DEFINE_REPLY_EVENT(xprtrdma_reply_hdr);
730 TRACE_EVENT(xprtrdma_fixup,
732 const struct rpc_rqst *rqst,
737 TP_ARGS(rqst, len, hdrlen),
740 __field(unsigned int, task_id)
741 __field(unsigned int, client_id)
742 __field(const void *, base)
748 __entry->task_id = rqst->rq_task->tk_pid;
749 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
750 __entry->base = rqst->rq_rcv_buf.head[0].iov_base;
752 __entry->hdrlen = hdrlen;
755 TP_printk("task:%u@%u base=%p len=%d hdrlen=%d",
756 __entry->task_id, __entry->client_id,
757 __entry->base, __entry->len, __entry->hdrlen
761 TRACE_EVENT(xprtrdma_fixup_pg,
763 const struct rpc_rqst *rqst,
770 TP_ARGS(rqst, pageno, pos, len, curlen),
773 __field(unsigned int, task_id)
774 __field(unsigned int, client_id)
775 __field(const void *, pos)
782 __entry->task_id = rqst->rq_task->tk_pid;
783 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
785 __entry->pageno = pageno;
787 __entry->curlen = curlen;
790 TP_printk("task:%u@%u pageno=%d pos=%p len=%d curlen=%d",
791 __entry->task_id, __entry->client_id,
792 __entry->pageno, __entry->pos, __entry->len, __entry->curlen
796 TRACE_EVENT(xprtrdma_decode_seg,
803 TP_ARGS(handle, length, offset),
812 __entry->handle = handle;
813 __entry->length = length;
814 __entry->offset = offset;
817 TP_printk("%u@0x%016llx:0x%08x",
818 __entry->length, (unsigned long long)__entry->offset,
824 ** Allocation/release of rpcrdma_reqs and rpcrdma_reps
827 TRACE_EVENT(xprtrdma_allocate,
829 const struct rpc_task *task,
830 const struct rpcrdma_req *req
836 __field(unsigned int, task_id)
837 __field(unsigned int, client_id)
838 __field(const void *, req)
839 __field(size_t, callsize)
840 __field(size_t, rcvsize)
844 __entry->task_id = task->tk_pid;
845 __entry->client_id = task->tk_client->cl_clid;
847 __entry->callsize = task->tk_rqstp->rq_callsize;
848 __entry->rcvsize = task->tk_rqstp->rq_rcvsize;
851 TP_printk("task:%u@%u req=%p (%zu, %zu)",
852 __entry->task_id, __entry->client_id,
853 __entry->req, __entry->callsize, __entry->rcvsize
857 TRACE_EVENT(xprtrdma_rpc_done,
859 const struct rpc_task *task,
860 const struct rpcrdma_req *req
866 __field(unsigned int, task_id)
867 __field(unsigned int, client_id)
868 __field(const void *, req)
869 __field(const void *, rep)
873 __entry->task_id = task->tk_pid;
874 __entry->client_id = task->tk_client->cl_clid;
876 __entry->rep = req->rl_reply;
879 TP_printk("task:%u@%u req=%p rep=%p",
880 __entry->task_id, __entry->client_id,
881 __entry->req, __entry->rep
889 TRACE_EVENT(xprtrdma_cb_setup,
891 const struct rpcrdma_xprt *r_xprt,
895 TP_ARGS(r_xprt, reqs),
898 __field(const void *, r_xprt)
899 __field(unsigned int, reqs)
900 __string(addr, rpcrdma_addrstr(r_xprt))
901 __string(port, rpcrdma_portstr(r_xprt))
905 __entry->r_xprt = r_xprt;
906 __entry->reqs = reqs;
907 __assign_str(addr, rpcrdma_addrstr(r_xprt));
908 __assign_str(port, rpcrdma_portstr(r_xprt));
911 TP_printk("peer=[%s]:%s r_xprt=%p: %u reqs",
912 __get_str(addr), __get_str(port),
913 __entry->r_xprt, __entry->reqs
917 DEFINE_CB_EVENT(xprtrdma_cb_call);
918 DEFINE_CB_EVENT(xprtrdma_cb_reply);
920 TRACE_EVENT(xprtrdma_leaked_rep,
922 const struct rpc_rqst *rqst,
923 const struct rpcrdma_rep *rep
929 __field(unsigned int, task_id)
930 __field(unsigned int, client_id)
932 __field(const void *, rep)
936 __entry->task_id = rqst->rq_task->tk_pid;
937 __entry->client_id = rqst->rq_task->tk_client->cl_clid;
938 __entry->xid = be32_to_cpu(rqst->rq_xid);
942 TP_printk("task:%u@%u xid=0x%08x rep=%p",
943 __entry->task_id, __entry->client_id, __entry->xid,
949 ** Server-side RPC/RDMA events
952 DECLARE_EVENT_CLASS(svcrdma_xprt_event,
954 const struct svc_xprt *xprt
960 __field(const void *, xprt)
961 __string(addr, xprt->xpt_remotebuf)
965 __entry->xprt = xprt;
966 __assign_str(addr, xprt->xpt_remotebuf);
969 TP_printk("xprt=%p addr=%s",
970 __entry->xprt, __get_str(addr)
974 #define DEFINE_XPRT_EVENT(name) \
975 DEFINE_EVENT(svcrdma_xprt_event, svcrdma_xprt_##name, \
977 const struct svc_xprt *xprt \
981 DEFINE_XPRT_EVENT(accept);
982 DEFINE_XPRT_EVENT(fail);
983 DEFINE_XPRT_EVENT(free);
985 TRACE_DEFINE_ENUM(RDMA_MSG);
986 TRACE_DEFINE_ENUM(RDMA_NOMSG);
987 TRACE_DEFINE_ENUM(RDMA_MSGP);
988 TRACE_DEFINE_ENUM(RDMA_DONE);
989 TRACE_DEFINE_ENUM(RDMA_ERROR);
991 #define show_rpcrdma_proc(x) \
992 __print_symbolic(x, \
993 { RDMA_MSG, "RDMA_MSG" }, \
994 { RDMA_NOMSG, "RDMA_NOMSG" }, \
995 { RDMA_MSGP, "RDMA_MSGP" }, \
996 { RDMA_DONE, "RDMA_DONE" }, \
997 { RDMA_ERROR, "RDMA_ERROR" })
999 TRACE_EVENT(svcrdma_decode_rqst,
1011 __field(u32, credits)
1012 __field(unsigned int, hdrlen)
1016 __entry->xid = be32_to_cpup(p++);
1017 __entry->vers = be32_to_cpup(p++);
1018 __entry->credits = be32_to_cpup(p++);
1019 __entry->proc = be32_to_cpup(p);
1020 __entry->hdrlen = hdrlen;
1023 TP_printk("xid=0x%08x vers=%u credits=%u proc=%s hdrlen=%u",
1024 __entry->xid, __entry->vers, __entry->credits,
1025 show_rpcrdma_proc(__entry->proc), __entry->hdrlen)
1028 TRACE_EVENT(svcrdma_decode_short,
1036 __field(unsigned int, hdrlen)
1040 __entry->hdrlen = hdrlen;
1043 TP_printk("hdrlen=%u", __entry->hdrlen)
1046 DECLARE_EVENT_CLASS(svcrdma_badreq_event,
1057 __field(u32, credits)
1061 __entry->xid = be32_to_cpup(p++);
1062 __entry->vers = be32_to_cpup(p++);
1063 __entry->credits = be32_to_cpup(p++);
1064 __entry->proc = be32_to_cpup(p);
1067 TP_printk("xid=0x%08x vers=%u credits=%u proc=%u",
1068 __entry->xid, __entry->vers, __entry->credits, __entry->proc)
1071 #define DEFINE_BADREQ_EVENT(name) \
1072 DEFINE_EVENT(svcrdma_badreq_event, svcrdma_decode_##name,\
1078 DEFINE_BADREQ_EVENT(badvers);
1079 DEFINE_BADREQ_EVENT(drop);
1080 DEFINE_BADREQ_EVENT(badproc);
1081 DEFINE_BADREQ_EVENT(parse);
1083 DECLARE_EVENT_CLASS(svcrdma_segment_event,
1090 TP_ARGS(handle, length, offset),
1093 __field(u32, handle)
1094 __field(u32, length)
1095 __field(u64, offset)
1099 __entry->handle = handle;
1100 __entry->length = length;
1101 __entry->offset = offset;
1104 TP_printk("%u@0x%016llx:0x%08x",
1105 __entry->length, (unsigned long long)__entry->offset,
1110 #define DEFINE_SEGMENT_EVENT(name) \
1111 DEFINE_EVENT(svcrdma_segment_event, svcrdma_encode_##name,\
1117 TP_ARGS(handle, length, offset))
1119 DEFINE_SEGMENT_EVENT(rseg);
1120 DEFINE_SEGMENT_EVENT(wseg);
1122 DECLARE_EVENT_CLASS(svcrdma_chunk_event,
1130 __field(u32, length)
1134 __entry->length = length;
1137 TP_printk("length=%u",
1142 #define DEFINE_CHUNK_EVENT(name) \
1143 DEFINE_EVENT(svcrdma_chunk_event, svcrdma_encode_##name,\
1149 DEFINE_CHUNK_EVENT(pzr);
1150 DEFINE_CHUNK_EVENT(write);
1151 DEFINE_CHUNK_EVENT(reply);
1153 TRACE_EVENT(svcrdma_encode_read,
1159 TP_ARGS(length, position),
1162 __field(u32, length)
1163 __field(u32, position)
1167 __entry->length = length;
1168 __entry->position = position;
1171 TP_printk("length=%u position=%u",
1172 __entry->length, __entry->position
1176 DECLARE_EVENT_CLASS(svcrdma_error_event,
1188 __entry->xid = be32_to_cpu(xid);
1191 TP_printk("xid=0x%08x",
1196 #define DEFINE_ERROR_EVENT(name) \
1197 DEFINE_EVENT(svcrdma_error_event, svcrdma_err_##name, \
1203 DEFINE_ERROR_EVENT(vers);
1204 DEFINE_ERROR_EVENT(chunk);
1207 ** Server-side RDMA API events
1210 TRACE_EVENT(svcrdma_dma_map_page,
1212 const struct svcxprt_rdma *rdma,
1216 TP_ARGS(rdma, page),
1219 __field(const void *, page);
1220 __string(device, rdma->sc_cm_id->device->name)
1221 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1225 __entry->page = page;
1226 __assign_str(device, rdma->sc_cm_id->device->name);
1227 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1230 TP_printk("addr=%s device=%s page=%p",
1231 __get_str(addr), __get_str(device), __entry->page
1235 TRACE_EVENT(svcrdma_dma_map_rwctx,
1237 const struct svcxprt_rdma *rdma,
1241 TP_ARGS(rdma, status),
1244 __field(int, status)
1245 __string(device, rdma->sc_cm_id->device->name)
1246 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1250 __entry->status = status;
1251 __assign_str(device, rdma->sc_cm_id->device->name);
1252 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1255 TP_printk("addr=%s device=%s status=%d",
1256 __get_str(addr), __get_str(device), __entry->status
1260 TRACE_EVENT(svcrdma_send_failed,
1262 const struct svc_rqst *rqst,
1266 TP_ARGS(rqst, status),
1269 __field(int, status)
1271 __field(const void *, xprt)
1272 __string(addr, rqst->rq_xprt->xpt_remotebuf)
1276 __entry->status = status;
1277 __entry->xid = __be32_to_cpu(rqst->rq_xid);
1278 __entry->xprt = rqst->rq_xprt;
1279 __assign_str(addr, rqst->rq_xprt->xpt_remotebuf);
1282 TP_printk("xprt=%p addr=%s xid=0x%08x status=%d",
1283 __entry->xprt, __get_str(addr),
1284 __entry->xid, __entry->status
1288 DECLARE_EVENT_CLASS(svcrdma_sendcomp_event,
1290 const struct ib_wc *wc
1296 __field(const void *, cqe)
1297 __field(unsigned int, status)
1298 __field(unsigned int, vendor_err)
1302 __entry->cqe = wc->wr_cqe;
1303 __entry->status = wc->status;
1305 __entry->vendor_err = wc->vendor_err;
1307 __entry->vendor_err = 0;
1310 TP_printk("cqe=%p status=%s (%u/0x%x)",
1311 __entry->cqe, rdma_show_wc_status(__entry->status),
1312 __entry->status, __entry->vendor_err
1316 #define DEFINE_SENDCOMP_EVENT(name) \
1317 DEFINE_EVENT(svcrdma_sendcomp_event, svcrdma_wc_##name, \
1319 const struct ib_wc *wc \
1323 TRACE_EVENT(svcrdma_post_send,
1325 const struct ib_send_wr *wr
1331 __field(const void *, cqe)
1332 __field(unsigned int, num_sge)
1333 __field(u32, inv_rkey)
1337 __entry->cqe = wr->wr_cqe;
1338 __entry->num_sge = wr->num_sge;
1339 __entry->inv_rkey = (wr->opcode == IB_WR_SEND_WITH_INV) ?
1340 wr->ex.invalidate_rkey : 0;
1343 TP_printk("cqe=%p num_sge=%u inv_rkey=0x%08x",
1344 __entry->cqe, __entry->num_sge,
1349 DEFINE_SENDCOMP_EVENT(send);
1351 TRACE_EVENT(svcrdma_post_recv,
1353 const struct ib_recv_wr *wr,
1357 TP_ARGS(wr, status),
1360 __field(const void *, cqe)
1361 __field(int, status)
1365 __entry->cqe = wr->wr_cqe;
1366 __entry->status = status;
1369 TP_printk("cqe=%p status=%d",
1370 __entry->cqe, __entry->status
1374 TRACE_EVENT(svcrdma_wc_receive,
1376 const struct ib_wc *wc
1382 __field(const void *, cqe)
1383 __field(u32, byte_len)
1384 __field(unsigned int, status)
1385 __field(u32, vendor_err)
1389 __entry->cqe = wc->wr_cqe;
1390 __entry->status = wc->status;
1392 __entry->byte_len = 0;
1393 __entry->vendor_err = wc->vendor_err;
1395 __entry->byte_len = wc->byte_len;
1396 __entry->vendor_err = 0;
1400 TP_printk("cqe=%p byte_len=%u status=%s (%u/0x%x)",
1401 __entry->cqe, __entry->byte_len,
1402 rdma_show_wc_status(__entry->status),
1403 __entry->status, __entry->vendor_err
1407 TRACE_EVENT(svcrdma_post_rw,
1413 TP_ARGS(cqe, sqecount),
1416 __field(const void *, cqe)
1417 __field(int, sqecount)
1422 __entry->sqecount = sqecount;
1425 TP_printk("cqe=%p sqecount=%d",
1426 __entry->cqe, __entry->sqecount
1430 DEFINE_SENDCOMP_EVENT(read);
1431 DEFINE_SENDCOMP_EVENT(write);
1433 TRACE_EVENT(svcrdma_cm_event,
1435 const struct rdma_cm_event *event,
1436 const struct sockaddr *sap
1439 TP_ARGS(event, sap),
1442 __field(unsigned int, event)
1443 __field(int, status)
1444 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1448 __entry->event = event->event;
1449 __entry->status = event->status;
1450 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1454 TP_printk("addr=%s event=%s (%u/%d)",
1456 rdma_show_cm_event(__entry->event),
1457 __entry->event, __entry->status
1461 TRACE_EVENT(svcrdma_qp_error,
1463 const struct ib_event *event,
1464 const struct sockaddr *sap
1467 TP_ARGS(event, sap),
1470 __field(unsigned int, event)
1471 __string(device, event->device->name)
1472 __array(__u8, addr, INET6_ADDRSTRLEN + 10)
1476 __entry->event = event->event;
1477 __assign_str(device, event->device->name);
1478 snprintf(__entry->addr, sizeof(__entry->addr) - 1,
1482 TP_printk("addr=%s dev=%s event=%s (%u)",
1483 __entry->addr, __get_str(device),
1484 rdma_show_ib_event(__entry->event), __entry->event
1488 DECLARE_EVENT_CLASS(svcrdma_sendqueue_event,
1490 const struct svcxprt_rdma *rdma
1498 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1502 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1503 __entry->depth = rdma->sc_sq_depth;
1504 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1507 TP_printk("addr=%s sc_sq_avail=%d/%d",
1508 __get_str(addr), __entry->avail, __entry->depth
1512 #define DEFINE_SQ_EVENT(name) \
1513 DEFINE_EVENT(svcrdma_sendqueue_event, svcrdma_sq_##name,\
1515 const struct svcxprt_rdma *rdma \
1519 DEFINE_SQ_EVENT(full);
1520 DEFINE_SQ_EVENT(retry);
1522 TRACE_EVENT(svcrdma_sq_post_err,
1524 const struct svcxprt_rdma *rdma,
1528 TP_ARGS(rdma, status),
1533 __field(int, status)
1534 __string(addr, rdma->sc_xprt.xpt_remotebuf)
1538 __entry->avail = atomic_read(&rdma->sc_sq_avail);
1539 __entry->depth = rdma->sc_sq_depth;
1540 __entry->status = status;
1541 __assign_str(addr, rdma->sc_xprt.xpt_remotebuf);
1544 TP_printk("addr=%s sc_sq_avail=%d/%d status=%d",
1545 __get_str(addr), __entry->avail, __entry->depth,
1550 #endif /* _TRACE_RPCRDMA_H */
1552 #include <trace/define_trace.h>