1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM xdp
5 #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/tracepoint.h>
11 #include <linux/bpf.h>
13 #define __XDP_ACT_MAP(FN) \
20 #define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22 #define __XDP_ACT_SYM_FN(x) \
24 #define __XDP_ACT_SYM_TAB \
25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, NULL }
26 __XDP_ACT_MAP(__XDP_ACT_TP_FN)
28 TRACE_EVENT(xdp_exception,
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
33 TP_ARGS(dev, xdp, act),
42 __entry->prog_id = xdp->aux->id;
44 __entry->ifindex = dev->ifindex;
47 TP_printk("prog_id=%d action=%s ifindex=%d",
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
53 TRACE_EVENT(xdp_bulk_tx,
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
58 TP_ARGS(dev, sent, drops, err),
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
82 #ifndef __DEVMAP_OBJ_TYPE
83 #define __DEVMAP_OBJ_TYPE
84 struct _bpf_dtab_netdev {
85 struct net_device *dev;
87 #endif /* __DEVMAP_OBJ_TYPE */
89 #define devmap_ifindex(tgt, map) \
90 (((map->map_type == BPF_MAP_TYPE_DEVMAP || \
91 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH)) ? \
92 ((struct _bpf_dtab_netdev *)tgt)->dev->ifindex : 0)
94 DECLARE_EVENT_CLASS(xdp_redirect_template,
96 TP_PROTO(const struct net_device *dev,
97 const struct bpf_prog *xdp,
98 const void *tgt, int err,
99 const struct bpf_map *map, u32 index),
101 TP_ARGS(dev, xdp, tgt, err, map, index),
104 __field(int, prog_id)
106 __field(int, ifindex)
108 __field(int, to_ifindex)
110 __field(int, map_index)
114 __entry->prog_id = xdp->aux->id;
115 __entry->act = XDP_REDIRECT;
116 __entry->ifindex = dev->ifindex;
118 __entry->to_ifindex = map ? devmap_ifindex(tgt, map) :
120 __entry->map_id = map ? map->id : 0;
121 __entry->map_index = map ? index : 0;
124 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
125 " map_id=%d map_index=%d",
127 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
128 __entry->ifindex, __entry->to_ifindex,
129 __entry->err, __entry->map_id, __entry->map_index)
132 DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
133 TP_PROTO(const struct net_device *dev,
134 const struct bpf_prog *xdp,
135 const void *tgt, int err,
136 const struct bpf_map *map, u32 index),
137 TP_ARGS(dev, xdp, tgt, err, map, index)
140 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
143 const void *tgt, int err,
144 const struct bpf_map *map, u32 index),
145 TP_ARGS(dev, xdp, tgt, err, map, index)
148 #define _trace_xdp_redirect(dev, xdp, to) \
149 trace_xdp_redirect(dev, xdp, NULL, 0, NULL, to);
151 #define _trace_xdp_redirect_err(dev, xdp, to, err) \
152 trace_xdp_redirect_err(dev, xdp, NULL, err, NULL, to);
154 #define _trace_xdp_redirect_map(dev, xdp, to, map, index) \
155 trace_xdp_redirect(dev, xdp, to, 0, map, index);
157 #define _trace_xdp_redirect_map_err(dev, xdp, to, map, index, err) \
158 trace_xdp_redirect_err(dev, xdp, to, err, map, index);
160 /* not used anymore, but kept around so as not to break old programs */
161 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map,
162 TP_PROTO(const struct net_device *dev,
163 const struct bpf_prog *xdp,
164 const void *tgt, int err,
165 const struct bpf_map *map, u32 index),
166 TP_ARGS(dev, xdp, tgt, err, map, index)
169 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_map_err,
170 TP_PROTO(const struct net_device *dev,
171 const struct bpf_prog *xdp,
172 const void *tgt, int err,
173 const struct bpf_map *map, u32 index),
174 TP_ARGS(dev, xdp, tgt, err, map, index)
177 TRACE_EVENT(xdp_cpumap_kthread,
179 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
180 int sched, struct xdp_cpumap_stats *xdp_stats),
182 TP_ARGS(map_id, processed, drops, sched, xdp_stats),
188 __field(unsigned int, drops)
189 __field(unsigned int, processed)
191 __field(unsigned int, xdp_pass)
192 __field(unsigned int, xdp_drop)
193 __field(unsigned int, xdp_redirect)
197 __entry->map_id = map_id;
198 __entry->act = XDP_REDIRECT;
199 __entry->cpu = smp_processor_id();
200 __entry->drops = drops;
201 __entry->processed = processed;
202 __entry->sched = sched;
203 __entry->xdp_pass = xdp_stats->pass;
204 __entry->xdp_drop = xdp_stats->drop;
205 __entry->xdp_redirect = xdp_stats->redirect;
209 " cpu=%d map_id=%d action=%s"
210 " processed=%u drops=%u"
212 " xdp_pass=%u xdp_drop=%u xdp_redirect=%u",
213 __entry->cpu, __entry->map_id,
214 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
215 __entry->processed, __entry->drops,
217 __entry->xdp_pass, __entry->xdp_drop, __entry->xdp_redirect)
220 TRACE_EVENT(xdp_cpumap_enqueue,
222 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
225 TP_ARGS(map_id, processed, drops, to_cpu),
231 __field(unsigned int, drops)
232 __field(unsigned int, processed)
237 __entry->map_id = map_id;
238 __entry->act = XDP_REDIRECT;
239 __entry->cpu = smp_processor_id();
240 __entry->drops = drops;
241 __entry->processed = processed;
242 __entry->to_cpu = to_cpu;
246 " cpu=%d map_id=%d action=%s"
247 " processed=%u drops=%u"
249 __entry->cpu, __entry->map_id,
250 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
251 __entry->processed, __entry->drops,
255 TRACE_EVENT(xdp_devmap_xmit,
257 TP_PROTO(const struct net_device *from_dev,
258 const struct net_device *to_dev,
259 int sent, int drops, int err),
261 TP_ARGS(from_dev, to_dev, sent, drops, err),
264 __field(int, from_ifindex)
266 __field(int, to_ifindex)
273 __entry->from_ifindex = from_dev->ifindex;
274 __entry->act = XDP_REDIRECT;
275 __entry->to_ifindex = to_dev->ifindex;
276 __entry->drops = drops;
277 __entry->sent = sent;
281 TP_printk("ndo_xdp_xmit"
282 " from_ifindex=%d to_ifindex=%d action=%s"
285 __entry->from_ifindex, __entry->to_ifindex,
286 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
287 __entry->sent, __entry->drops,
291 /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
292 #include <net/xdp_priv.h>
294 #define __MEM_TYPE_MAP(FN) \
300 #define __MEM_TYPE_TP_FN(x) \
301 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
302 #define __MEM_TYPE_SYM_FN(x) \
303 { MEM_TYPE_##x, #x },
304 #define __MEM_TYPE_SYM_TAB \
305 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
306 __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
308 TRACE_EVENT(mem_disconnect,
310 TP_PROTO(const struct xdp_mem_allocator *xa),
315 __field(const struct xdp_mem_allocator *, xa)
317 __field(u32, mem_type)
318 __field(const void *, allocator)
323 __entry->mem_id = xa->mem.id;
324 __entry->mem_type = xa->mem.type;
325 __entry->allocator = xa->allocator;
328 TP_printk("mem_id=%d mem_type=%s allocator=%p",
330 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
335 TRACE_EVENT(mem_connect,
337 TP_PROTO(const struct xdp_mem_allocator *xa,
338 const struct xdp_rxq_info *rxq),
343 __field(const struct xdp_mem_allocator *, xa)
345 __field(u32, mem_type)
346 __field(const void *, allocator)
347 __field(const struct xdp_rxq_info *, rxq)
348 __field(int, ifindex)
353 __entry->mem_id = xa->mem.id;
354 __entry->mem_type = xa->mem.type;
355 __entry->allocator = xa->allocator;
357 __entry->ifindex = rxq->dev->ifindex;
360 TP_printk("mem_id=%d mem_type=%s allocator=%p"
363 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
369 TRACE_EVENT(mem_return_failed,
371 TP_PROTO(const struct xdp_mem_info *mem,
372 const struct page *page),
377 __field(const struct page *, page)
379 __field(u32, mem_type)
383 __entry->page = page;
384 __entry->mem_id = mem->id;
385 __entry->mem_type = mem->type;
388 TP_printk("mem_id=%d mem_type=%s page=%p",
390 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
395 #endif /* _TRACE_XDP_H */
397 #include <trace/define_trace.h>