2 * Copyright(c) 2015 - 2017 Intel Corporation.
4 * This file is provided under a dual BSD/GPLv2 license. When using or
5 * redistributing this file, you may do so under either license.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
20 * Redistribution and use in source and binary forms, with or without
21 * modification, are permitted provided that the following conditions
24 * - Redistributions of source code must retain the above copyright
25 * notice, this list of conditions and the following disclaimer.
26 * - Redistributions in binary form must reproduce the above copyright
27 * notice, this list of conditions and the following disclaimer in
28 * the documentation and/or other materials provided with the
30 * - Neither the name of Intel Corporation nor the names of its
31 * contributors may be used to endorse or promote products derived
32 * from this software without specific prior written permission.
34 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
35 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
36 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
37 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
38 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
39 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
40 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
41 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
42 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
43 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
44 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
47 #if !defined(__HFI1_TRACE_TX_H) || defined(TRACE_HEADER_MULTI_READ)
48 #define __HFI1_TRACE_TX_H
50 #include <linux/tracepoint.h>
51 #include <linux/trace_seq.h>
57 #include "user_sdma.h"
59 const char *parse_sdma_flags(struct trace_seq *p, u64 desc0, u64 desc1);
61 #define __parse_sdma_flags(desc0, desc1) parse_sdma_flags(p, desc0, desc1)
64 #define TRACE_SYSTEM hfi1_tx
66 TRACE_EVENT(hfi1_piofree,
67 TP_PROTO(struct send_context *sc, int extra),
69 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
70 __field(u32, sw_index)
71 __field(u32, hw_context)
74 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
75 __entry->sw_index = sc->sw_index;
76 __entry->hw_context = sc->hw_context;
77 __entry->extra = extra;
79 TP_printk("[%s] ctxt %u(%u) extra %d",
87 TRACE_EVENT(hfi1_wantpiointr,
88 TP_PROTO(struct send_context *sc, u32 needint, u64 credit_ctrl),
89 TP_ARGS(sc, needint, credit_ctrl),
90 TP_STRUCT__entry(DD_DEV_ENTRY(sc->dd)
91 __field(u32, sw_index)
92 __field(u32, hw_context)
94 __field(u64, credit_ctrl)
96 TP_fast_assign(DD_DEV_ASSIGN(sc->dd);
97 __entry->sw_index = sc->sw_index;
98 __entry->hw_context = sc->hw_context;
99 __entry->needint = needint;
100 __entry->credit_ctrl = credit_ctrl;
102 TP_printk("[%s] ctxt %u(%u) on %d credit_ctrl 0x%llx",
107 (unsigned long long)__entry->credit_ctrl
111 DECLARE_EVENT_CLASS(hfi1_qpsleepwakeup_template,
112 TP_PROTO(struct rvt_qp *qp, u32 flags),
115 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
118 __field(u32, s_flags)
119 __field(u32, ps_flags)
120 __field(unsigned long, iow_flags)
123 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
124 __entry->flags = flags;
125 __entry->qpn = qp->ibqp.qp_num;
126 __entry->s_flags = qp->s_flags;
128 ((struct hfi1_qp_priv *)qp->priv)->s_flags;
130 ((struct hfi1_qp_priv *)qp->priv)->s_iowait.flags;
133 "[%s] qpn 0x%x flags 0x%x s_flags 0x%x ps_flags 0x%x iow_flags 0x%lx",
143 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpwakeup,
144 TP_PROTO(struct rvt_qp *qp, u32 flags),
147 DEFINE_EVENT(hfi1_qpsleepwakeup_template, hfi1_qpsleep,
148 TP_PROTO(struct rvt_qp *qp, u32 flags),
151 TRACE_EVENT(hfi1_sdma_descriptor,
152 TP_PROTO(struct sdma_engine *sde,
157 TP_ARGS(sde, desc0, desc1, e, descp),
158 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
159 __field(void *, descp)
165 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
166 __entry->desc0 = desc0;
167 __entry->desc1 = desc1;
168 __entry->idx = sde->this_idx;
169 __entry->descp = descp;
173 "[%s] SDE(%u) flags:%s addr:0x%016llx gen:%u len:%u d0:%016llx d1:%016llx to %p,%u",
176 __parse_sdma_flags(__entry->desc0, __entry->desc1),
177 (__entry->desc0 >> SDMA_DESC0_PHY_ADDR_SHIFT) &
178 SDMA_DESC0_PHY_ADDR_MASK,
179 (u8)((__entry->desc1 >> SDMA_DESC1_GENERATION_SHIFT) &
180 SDMA_DESC1_GENERATION_MASK),
181 (u16)((__entry->desc0 >> SDMA_DESC0_BYTE_COUNT_SHIFT) &
182 SDMA_DESC0_BYTE_COUNT_MASK),
190 TRACE_EVENT(hfi1_sdma_engine_select,
191 TP_PROTO(struct hfi1_devdata *dd, u32 sel, u8 vl, u8 idx),
192 TP_ARGS(dd, sel, vl, idx),
193 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
198 TP_fast_assign(DD_DEV_ASSIGN(dd);
203 TP_printk("[%s] selecting SDE %u sel 0x%x vl %u",
211 TRACE_EVENT(hfi1_sdma_user_free_queues,
212 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt),
213 TP_ARGS(dd, ctxt, subctxt),
214 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
216 __field(u16, subctxt)
218 TP_fast_assign(DD_DEV_ASSIGN(dd);
219 __entry->ctxt = ctxt;
220 __entry->subctxt = subctxt;
222 TP_printk("[%s] SDMA [%u:%u] Freeing user SDMA queues",
229 TRACE_EVENT(hfi1_sdma_user_process_request,
230 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
232 TP_ARGS(dd, ctxt, subctxt, comp_idx),
233 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
235 __field(u16, subctxt)
236 __field(u16, comp_idx)
238 TP_fast_assign(DD_DEV_ASSIGN(dd);
239 __entry->ctxt = ctxt;
240 __entry->subctxt = subctxt;
241 __entry->comp_idx = comp_idx;
243 TP_printk("[%s] SDMA [%u:%u] Using req/comp entry: %u",
252 hfi1_sdma_value_template,
253 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt, u16 comp_idx,
255 TP_ARGS(dd, ctxt, subctxt, comp_idx, value),
256 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
258 __field(u16, subctxt)
259 __field(u16, comp_idx)
262 TP_fast_assign(DD_DEV_ASSIGN(dd);
263 __entry->ctxt = ctxt;
264 __entry->subctxt = subctxt;
265 __entry->comp_idx = comp_idx;
266 __entry->value = value;
268 TP_printk("[%s] SDMA [%u:%u:%u] value: %u",
277 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_initial_tidoffset,
278 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
279 u16 comp_idx, u32 tidoffset),
280 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset));
282 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_data_length,
283 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
284 u16 comp_idx, u32 data_len),
285 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
287 DEFINE_EVENT(hfi1_sdma_value_template, hfi1_sdma_user_compute_length,
288 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
289 u16 comp_idx, u32 data_len),
290 TP_ARGS(dd, ctxt, subctxt, comp_idx, data_len));
292 TRACE_EVENT(hfi1_sdma_user_tid_info,
293 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
294 u16 comp_idx, u32 tidoffset, u32 units, u8 shift),
295 TP_ARGS(dd, ctxt, subctxt, comp_idx, tidoffset, units, shift),
296 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
298 __field(u16, subctxt)
299 __field(u16, comp_idx)
300 __field(u32, tidoffset)
304 TP_fast_assign(DD_DEV_ASSIGN(dd);
305 __entry->ctxt = ctxt;
306 __entry->subctxt = subctxt;
307 __entry->comp_idx = comp_idx;
308 __entry->tidoffset = tidoffset;
309 __entry->units = units;
310 __entry->shift = shift;
312 TP_printk("[%s] SDMA [%u:%u:%u] TID offset %ubytes %uunits om %u",
323 TRACE_EVENT(hfi1_sdma_request,
324 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u16 subctxt,
326 TP_ARGS(dd, ctxt, subctxt, dim),
327 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
329 __field(u16, subctxt)
330 __field(unsigned long, dim)
332 TP_fast_assign(DD_DEV_ASSIGN(dd);
333 __entry->ctxt = ctxt;
334 __entry->subctxt = subctxt;
337 TP_printk("[%s] SDMA from %u:%u (%lu)",
345 DECLARE_EVENT_CLASS(hfi1_sdma_engine_class,
346 TP_PROTO(struct sdma_engine *sde, u64 status),
347 TP_ARGS(sde, status),
348 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
352 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
353 __entry->status = status;
354 __entry->idx = sde->this_idx;
356 TP_printk("[%s] SDE(%u) status %llx",
359 (unsigned long long)__entry->status
363 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_interrupt,
364 TP_PROTO(struct sdma_engine *sde, u64 status),
368 DEFINE_EVENT(hfi1_sdma_engine_class, hfi1_sdma_engine_progress,
369 TP_PROTO(struct sdma_engine *sde, u64 status),
373 DECLARE_EVENT_CLASS(hfi1_sdma_ahg_ad,
374 TP_PROTO(struct sdma_engine *sde, int aidx),
376 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
380 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
381 __entry->idx = sde->this_idx;
382 __entry->aidx = aidx;
384 TP_printk("[%s] SDE(%u) aidx %d",
391 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_allocate,
392 TP_PROTO(struct sdma_engine *sde, int aidx),
395 DEFINE_EVENT(hfi1_sdma_ahg_ad, hfi1_ahg_deallocate,
396 TP_PROTO(struct sdma_engine *sde, int aidx),
399 #ifdef CONFIG_HFI1_DEBUG_SDMA_ORDER
400 TRACE_EVENT(hfi1_sdma_progress,
401 TP_PROTO(struct sdma_engine *sde,
404 struct sdma_txreq *txp
406 TP_ARGS(sde, hwhead, swhead, txp),
407 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
412 __field(u16, tx_tail)
413 __field(u16, tx_head)
416 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
417 __entry->hwhead = hwhead;
418 __entry->swhead = swhead;
419 __entry->tx_tail = sde->tx_tail;
420 __entry->tx_head = sde->tx_head;
421 __entry->txnext = txp ? txp->next_descq_idx : ~0;
422 __entry->idx = sde->this_idx;
423 __entry->sn = txp ? txp->sn : ~0;
426 "[%s] SDE(%u) sn %llu hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
438 TRACE_EVENT(hfi1_sdma_progress,
439 TP_PROTO(struct sdma_engine *sde,
440 u16 hwhead, u16 swhead,
441 struct sdma_txreq *txp
443 TP_ARGS(sde, hwhead, swhead, txp),
444 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
448 __field(u16, tx_tail)
449 __field(u16, tx_head)
452 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
453 __entry->hwhead = hwhead;
454 __entry->swhead = swhead;
455 __entry->tx_tail = sde->tx_tail;
456 __entry->tx_head = sde->tx_head;
457 __entry->txnext = txp ? txp->next_descq_idx : ~0;
458 __entry->idx = sde->this_idx;
461 "[%s] SDE(%u) hwhead %u swhead %u next_descq_idx %u tx_head %u tx_tail %u",
473 DECLARE_EVENT_CLASS(hfi1_sdma_sn,
474 TP_PROTO(struct sdma_engine *sde, u64 sn),
476 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
480 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
482 __entry->idx = sde->this_idx;
484 TP_printk("[%s] SDE(%u) sn %llu",
491 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_out_sn,
493 struct sdma_engine *sde,
499 DEFINE_EVENT(hfi1_sdma_sn, hfi1_sdma_in_sn,
500 TP_PROTO(struct sdma_engine *sde, u64 sn),
504 #define USDMA_HDR_FORMAT \
505 "[%s:%u:%u:%u] PBC=(0x%x 0x%x) LRH=(0x%x 0x%x) BTH=(0x%x 0x%x 0x%x) KDETH=(0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x) TIDVal=0x%x"
507 TRACE_EVENT(hfi1_sdma_user_header,
508 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
509 struct hfi1_pkt_header *hdr, u32 tidval),
510 TP_ARGS(dd, ctxt, subctxt, req, hdr, tidval),
535 __le32 *pbc = (__le32 *)hdr->pbc;
536 __be32 *lrh = (__be32 *)hdr->lrh;
537 __be32 *bth = (__be32 *)hdr->bth;
538 __le32 *kdeth = (__le32 *)&hdr->kdeth;
541 __entry->ctxt = ctxt;
542 __entry->subctxt = subctxt;
544 __entry->pbc0 = le32_to_cpu(pbc[0]);
545 __entry->pbc1 = le32_to_cpu(pbc[1]);
546 __entry->lrh0 = be32_to_cpu(lrh[0]);
547 __entry->lrh1 = be32_to_cpu(lrh[1]);
548 __entry->bth0 = be32_to_cpu(bth[0]);
549 __entry->bth1 = be32_to_cpu(bth[1]);
550 __entry->bth2 = be32_to_cpu(bth[2]);
551 __entry->kdeth0 = le32_to_cpu(kdeth[0]);
552 __entry->kdeth1 = le32_to_cpu(kdeth[1]);
553 __entry->kdeth2 = le32_to_cpu(kdeth[2]);
554 __entry->kdeth3 = le32_to_cpu(kdeth[3]);
555 __entry->kdeth4 = le32_to_cpu(kdeth[4]);
556 __entry->kdeth5 = le32_to_cpu(kdeth[5]);
557 __entry->kdeth6 = le32_to_cpu(kdeth[6]);
558 __entry->kdeth7 = le32_to_cpu(kdeth[7]);
559 __entry->kdeth8 = le32_to_cpu(kdeth[8]);
560 __entry->tidval = tidval;
562 TP_printk(USDMA_HDR_FORMAT,
587 #define SDMA_UREQ_FMT \
588 "[%s:%u:%u] ver/op=0x%x, iovcnt=%u, npkts=%u, frag=%u, idx=%u"
589 TRACE_EVENT(hfi1_sdma_user_reqinfo,
590 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 *i),
591 TP_ARGS(dd, ctxt, subctxt, i),
596 __field(u8, ver_opcode)
599 __field(u16, fragsize)
600 __field(u16, comp_idx)
604 __entry->ctxt = ctxt;
605 __entry->subctxt = subctxt;
606 __entry->ver_opcode = i[0] & 0xff;
607 __entry->iovcnt = (i[0] >> 8) & 0xff;
608 __entry->npkts = i[1];
609 __entry->fragsize = i[2];
610 __entry->comp_idx = i[3];
612 TP_printk(SDMA_UREQ_FMT,
624 #define usdma_complete_name(st) { st, #st }
625 #define show_usdma_complete_state(st) \
626 __print_symbolic(st, \
627 usdma_complete_name(FREE), \
628 usdma_complete_name(QUEUED), \
629 usdma_complete_name(COMPLETE), \
630 usdma_complete_name(ERROR))
632 TRACE_EVENT(hfi1_sdma_user_completion,
633 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 idx,
635 TP_ARGS(dd, ctxt, subctxt, idx, state, code),
646 __entry->ctxt = ctxt;
647 __entry->subctxt = subctxt;
649 __entry->state = state;
650 __entry->code = code;
652 TP_printk("[%s:%u:%u:%u] SDMA completion state %s (%d)",
653 __get_str(dev), __entry->ctxt, __entry->subctxt,
654 __entry->idx, show_usdma_complete_state(__entry->state),
658 TRACE_EVENT(hfi1_usdma_defer,
659 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
660 struct sdma_engine *sde,
661 struct iowait *wait),
662 TP_ARGS(pq, sde, wait),
663 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
664 __field(struct hfi1_user_sdma_pkt_q *, pq)
665 __field(struct sdma_engine *, sde)
666 __field(struct iowait *, wait)
670 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
673 __entry->wait = wait;
674 __entry->engine = sde->this_idx;
675 __entry->empty = list_empty(&__entry->wait->list);
677 TP_printk("[%s] pq %llx sde %llx wait %llx engine %d empty %d",
679 (unsigned long long)__entry->pq,
680 (unsigned long long)__entry->sde,
681 (unsigned long long)__entry->wait,
687 TRACE_EVENT(hfi1_usdma_activate,
688 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
691 TP_ARGS(pq, wait, reason),
692 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
693 __field(struct hfi1_user_sdma_pkt_q *, pq)
694 __field(struct iowait *, wait)
697 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
699 __entry->wait = wait;
700 __entry->reason = reason;
702 TP_printk("[%s] pq %llx wait %llx reason %d",
704 (unsigned long long)__entry->pq,
705 (unsigned long long)__entry->wait,
710 TRACE_EVENT(hfi1_usdma_we,
711 TP_PROTO(struct hfi1_user_sdma_pkt_q *pq,
714 TP_STRUCT__entry(DD_DEV_ENTRY(pq->dd)
715 __field(struct hfi1_user_sdma_pkt_q *, pq)
719 TP_fast_assign(DD_DEV_ASSIGN(pq->dd);
721 __entry->state = pq->state;
722 __entry->we_ret = we_ret;
724 TP_printk("[%s] pq %llx state %d we_ret %d",
726 (unsigned long long)__entry->pq,
732 const char *print_u32_array(struct trace_seq *, u32 *, int);
733 #define __print_u32_hex(arr, len) print_u32_array(p, arr, len)
735 TRACE_EVENT(hfi1_sdma_user_header_ahg,
736 TP_PROTO(struct hfi1_devdata *dd, u16 ctxt, u8 subctxt, u16 req,
737 u8 sde, u8 ahgidx, u32 *ahg, int len, u32 tidval),
738 TP_ARGS(dd, ctxt, subctxt, req, sde, ahgidx, ahg, len, tidval),
748 __array(u32, ahg, 10)
752 __entry->ctxt = ctxt;
753 __entry->subctxt = subctxt;
756 __entry->idx = ahgidx;
758 __entry->tidval = tidval;
759 memcpy(__entry->ahg, ahg, len * sizeof(u32));
761 TP_printk("[%s:%u:%u:%u] (SDE%u/AHG%u) ahg[0-%d]=(%s) TIDVal=0x%x",
769 __print_u32_hex(__entry->ahg, __entry->len),
774 TRACE_EVENT(hfi1_sdma_state,
775 TP_PROTO(struct sdma_engine *sde,
779 TP_ARGS(sde, cstate, nstate),
780 TP_STRUCT__entry(DD_DEV_ENTRY(sde->dd)
781 __string(curstate, cstate)
782 __string(newstate, nstate)
784 TP_fast_assign(DD_DEV_ASSIGN(sde->dd);
785 __assign_str(curstate, cstate);
786 __assign_str(newstate, nstate);
788 TP_printk("[%s] current state %s new state %s",
796 "shared_limit %x vls 0-7 [%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x][%x,%x] 15 [%x,%x]"
800 ((struct buffer_control *)__get_dynamic_array(bct))->field \
803 DECLARE_EVENT_CLASS(hfi1_bct_template,
804 TP_PROTO(struct hfi1_devdata *dd,
805 struct buffer_control *bc),
807 TP_STRUCT__entry(DD_DEV_ENTRY(dd)
808 __dynamic_array(u8, bct, sizeof(*bc))
810 TP_fast_assign(DD_DEV_ASSIGN(dd);
811 memcpy(__get_dynamic_array(bct), bc,
814 TP_printk(BCT_FORMAT,
815 BCT(overall_shared_limit),
817 BCT(vl[0].dedicated),
820 BCT(vl[1].dedicated),
823 BCT(vl[2].dedicated),
826 BCT(vl[3].dedicated),
829 BCT(vl[4].dedicated),
832 BCT(vl[5].dedicated),
835 BCT(vl[6].dedicated),
838 BCT(vl[7].dedicated),
841 BCT(vl[15].dedicated),
846 DEFINE_EVENT(hfi1_bct_template, bct_set,
847 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
850 DEFINE_EVENT(hfi1_bct_template, bct_get,
851 TP_PROTO(struct hfi1_devdata *dd, struct buffer_control *bc),
855 hfi1_qp_send_completion,
856 TP_PROTO(struct rvt_qp *qp, struct rvt_swqe *wqe, u32 idx),
857 TP_ARGS(qp, wqe, idx),
859 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
860 __field(struct rvt_swqe *, wqe)
867 __field(enum ib_wr_opcode, opcode)
868 __field(int, send_flags)
871 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
873 __entry->wr_id = wqe->wr.wr_id;
874 __entry->qpn = qp->ibqp.qp_num;
875 __entry->qpt = qp->ibqp.qp_type;
876 __entry->length = wqe->length;
878 __entry->ssn = wqe->ssn;
879 __entry->opcode = wqe->wr.opcode;
880 __entry->send_flags = wqe->wr.send_flags;
883 "[%s] qpn 0x%x qpt %u wqe %p idx %u wr_id %llx length %u ssn %u opcode %x send_flags %x",
898 hfi1_do_send_template,
899 TP_PROTO(struct rvt_qp *qp, bool flag),
902 DD_DEV_ENTRY(dd_from_ibdev(qp->ibqp.device))
907 DD_DEV_ASSIGN(dd_from_ibdev(qp->ibqp.device))
908 __entry->qpn = qp->ibqp.qp_num;
909 __entry->flag = flag;
912 "[%s] qpn %x flag %d",
920 hfi1_do_send_template, hfi1_rc_do_send,
921 TP_PROTO(struct rvt_qp *qp, bool flag),
925 DEFINE_EVENT(/* event */
926 hfi1_do_send_template, hfi1_rc_do_tid_send,
927 TP_PROTO(struct rvt_qp *qp, bool flag),
932 hfi1_do_send_template, hfi1_rc_expired_time_slice,
933 TP_PROTO(struct rvt_qp *qp, bool flag),
937 DECLARE_EVENT_CLASS(/* AIP */
938 hfi1_ipoib_txq_template,
939 TP_PROTO(struct hfi1_ipoib_txq *txq),
941 TP_STRUCT__entry(/* entry */
942 DD_DEV_ENTRY(txq->priv->dd)
943 __field(struct hfi1_ipoib_txq *, txq)
944 __field(struct sdma_engine *, sde)
950 __field(int, no_desc)
954 TP_fast_assign(/* assign */
955 DD_DEV_ASSIGN(txq->priv->dd)
957 __entry->sde = txq->sde;
958 __entry->head = txq->tx_ring.head;
959 __entry->tail = txq->tx_ring.tail;
960 __entry->idx = txq->q_idx;
963 atomic64_read(&txq->complete_txreqs);
964 __entry->flow = txq->flow.as_int;
965 __entry->stops = atomic_read(&txq->stops);
966 __entry->no_desc = atomic_read(&txq->no_desc);
968 __netif_subqueue_stopped(txq->priv->netdev, txq->q_idx);
970 TP_printk(/* print */
971 "[%s] txq %llx idx %u sde %llx head %lx tail %lx flow %x used %u stops %d no_desc %d stopped %u",
973 (unsigned long long)__entry->txq,
975 (unsigned long long)__entry->sde,
986 DEFINE_EVENT(/* queue stop */
987 hfi1_ipoib_txq_template, hfi1_txq_stop,
988 TP_PROTO(struct hfi1_ipoib_txq *txq),
992 DEFINE_EVENT(/* queue wake */
993 hfi1_ipoib_txq_template, hfi1_txq_wake,
994 TP_PROTO(struct hfi1_ipoib_txq *txq),
998 DEFINE_EVENT(/* flow flush */
999 hfi1_ipoib_txq_template, hfi1_flow_flush,
1000 TP_PROTO(struct hfi1_ipoib_txq *txq),
1004 DEFINE_EVENT(/* flow switch */
1005 hfi1_ipoib_txq_template, hfi1_flow_switch,
1006 TP_PROTO(struct hfi1_ipoib_txq *txq),
1010 DEFINE_EVENT(/* wakeup */
1011 hfi1_ipoib_txq_template, hfi1_txq_wakeup,
1012 TP_PROTO(struct hfi1_ipoib_txq *txq),
1016 DEFINE_EVENT(/* full */
1017 hfi1_ipoib_txq_template, hfi1_txq_full,
1018 TP_PROTO(struct hfi1_ipoib_txq *txq),
1022 DEFINE_EVENT(/* queued */
1023 hfi1_ipoib_txq_template, hfi1_txq_queued,
1024 TP_PROTO(struct hfi1_ipoib_txq *txq),
1028 DEFINE_EVENT(/* xmit_stopped */
1029 hfi1_ipoib_txq_template, hfi1_txq_xmit_stopped,
1030 TP_PROTO(struct hfi1_ipoib_txq *txq),
1034 DEFINE_EVENT(/* xmit_unstopped */
1035 hfi1_ipoib_txq_template, hfi1_txq_xmit_unstopped,
1036 TP_PROTO(struct hfi1_ipoib_txq *txq),
1040 #endif /* __HFI1_TRACE_TX_H */
1042 #undef TRACE_INCLUDE_PATH
1043 #undef TRACE_INCLUDE_FILE
1044 #define TRACE_INCLUDE_PATH .
1045 #define TRACE_INCLUDE_FILE trace_tx
1046 #include <trace/define_trace.h>