1 /* SPDX-License-Identifier: GPL-2.0 */
3 #define TRACE_SYSTEM xdp
5 #if !defined(_TRACE_XDP_H) || defined(TRACE_HEADER_MULTI_READ)
8 #include <linux/netdevice.h>
9 #include <linux/filter.h>
10 #include <linux/tracepoint.h>
11 #include <linux/bpf.h>
13 #define __XDP_ACT_MAP(FN) \
20 #define __XDP_ACT_TP_FN(x) \
21 TRACE_DEFINE_ENUM(XDP_##x);
22 #define __XDP_ACT_SYM_FN(x) \
24 #define __XDP_ACT_SYM_TAB \
25 __XDP_ACT_MAP(__XDP_ACT_SYM_FN) { -1, 0 }
26 __XDP_ACT_MAP(__XDP_ACT_TP_FN)
28 TRACE_EVENT(xdp_exception,
30 TP_PROTO(const struct net_device *dev,
31 const struct bpf_prog *xdp, u32 act),
33 TP_ARGS(dev, xdp, act),
42 __entry->prog_id = xdp->aux->id;
44 __entry->ifindex = dev->ifindex;
47 TP_printk("prog_id=%d action=%s ifindex=%d",
49 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
53 TRACE_EVENT(xdp_bulk_tx,
55 TP_PROTO(const struct net_device *dev,
56 int sent, int drops, int err),
58 TP_ARGS(dev, sent, drops, err),
69 __entry->ifindex = dev->ifindex;
70 __entry->act = XDP_TX;
71 __entry->drops = drops;
76 TP_printk("ifindex=%d action=%s sent=%d drops=%d err=%d",
78 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
79 __entry->sent, __entry->drops, __entry->err)
82 DECLARE_EVENT_CLASS(xdp_redirect_template,
84 TP_PROTO(const struct net_device *dev,
85 const struct bpf_prog *xdp,
86 int to_ifindex, int err,
87 const struct bpf_map *map, u32 map_index),
89 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
96 __field(int, to_ifindex)
98 __field(int, map_index)
102 __entry->prog_id = xdp->aux->id;
103 __entry->act = XDP_REDIRECT;
104 __entry->ifindex = dev->ifindex;
106 __entry->to_ifindex = to_ifindex;
107 __entry->map_id = map ? map->id : 0;
108 __entry->map_index = map_index;
111 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d",
113 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
114 __entry->ifindex, __entry->to_ifindex,
118 DEFINE_EVENT(xdp_redirect_template, xdp_redirect,
119 TP_PROTO(const struct net_device *dev,
120 const struct bpf_prog *xdp,
121 int to_ifindex, int err,
122 const struct bpf_map *map, u32 map_index),
123 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
126 DEFINE_EVENT(xdp_redirect_template, xdp_redirect_err,
127 TP_PROTO(const struct net_device *dev,
128 const struct bpf_prog *xdp,
129 int to_ifindex, int err,
130 const struct bpf_map *map, u32 map_index),
131 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index)
134 #define _trace_xdp_redirect(dev, xdp, to) \
135 trace_xdp_redirect(dev, xdp, to, 0, NULL, 0);
137 #define _trace_xdp_redirect_err(dev, xdp, to, err) \
138 trace_xdp_redirect_err(dev, xdp, to, err, NULL, 0);
140 DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map,
141 TP_PROTO(const struct net_device *dev,
142 const struct bpf_prog *xdp,
143 int to_ifindex, int err,
144 const struct bpf_map *map, u32 map_index),
145 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
146 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
147 " map_id=%d map_index=%d",
149 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
150 __entry->ifindex, __entry->to_ifindex,
152 __entry->map_id, __entry->map_index)
155 DEFINE_EVENT_PRINT(xdp_redirect_template, xdp_redirect_map_err,
156 TP_PROTO(const struct net_device *dev,
157 const struct bpf_prog *xdp,
158 int to_ifindex, int err,
159 const struct bpf_map *map, u32 map_index),
160 TP_ARGS(dev, xdp, to_ifindex, err, map, map_index),
161 TP_printk("prog_id=%d action=%s ifindex=%d to_ifindex=%d err=%d"
162 " map_id=%d map_index=%d",
164 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
165 __entry->ifindex, __entry->to_ifindex,
167 __entry->map_id, __entry->map_index)
170 #ifndef __DEVMAP_OBJ_TYPE
171 #define __DEVMAP_OBJ_TYPE
172 struct _bpf_dtab_netdev {
173 struct net_device *dev;
175 #endif /* __DEVMAP_OBJ_TYPE */
177 #define devmap_ifindex(fwd, map) \
178 ((map->map_type == BPF_MAP_TYPE_DEVMAP || \
179 map->map_type == BPF_MAP_TYPE_DEVMAP_HASH) ? \
180 ((struct _bpf_dtab_netdev *)fwd)->dev->ifindex : 0)
182 #define _trace_xdp_redirect_map(dev, xdp, fwd, map, idx) \
183 trace_xdp_redirect_map(dev, xdp, devmap_ifindex(fwd, map), \
186 #define _trace_xdp_redirect_map_err(dev, xdp, fwd, map, idx, err) \
187 trace_xdp_redirect_map_err(dev, xdp, devmap_ifindex(fwd, map), \
190 TRACE_EVENT(xdp_cpumap_kthread,
192 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
195 TP_ARGS(map_id, processed, drops, sched),
201 __field(unsigned int, drops)
202 __field(unsigned int, processed)
207 __entry->map_id = map_id;
208 __entry->act = XDP_REDIRECT;
209 __entry->cpu = smp_processor_id();
210 __entry->drops = drops;
211 __entry->processed = processed;
212 __entry->sched = sched;
216 " cpu=%d map_id=%d action=%s"
217 " processed=%u drops=%u"
219 __entry->cpu, __entry->map_id,
220 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
221 __entry->processed, __entry->drops,
225 TRACE_EVENT(xdp_cpumap_enqueue,
227 TP_PROTO(int map_id, unsigned int processed, unsigned int drops,
230 TP_ARGS(map_id, processed, drops, to_cpu),
236 __field(unsigned int, drops)
237 __field(unsigned int, processed)
242 __entry->map_id = map_id;
243 __entry->act = XDP_REDIRECT;
244 __entry->cpu = smp_processor_id();
245 __entry->drops = drops;
246 __entry->processed = processed;
247 __entry->to_cpu = to_cpu;
251 " cpu=%d map_id=%d action=%s"
252 " processed=%u drops=%u"
254 __entry->cpu, __entry->map_id,
255 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
256 __entry->processed, __entry->drops,
260 TRACE_EVENT(xdp_devmap_xmit,
262 TP_PROTO(const struct bpf_map *map, u32 map_index,
264 const struct net_device *from_dev,
265 const struct net_device *to_dev, int err),
267 TP_ARGS(map, map_index, sent, drops, from_dev, to_dev, err),
272 __field(u32, map_index)
275 __field(int, from_ifindex)
276 __field(int, to_ifindex)
281 __entry->map_id = map->id;
282 __entry->act = XDP_REDIRECT;
283 __entry->map_index = map_index;
284 __entry->drops = drops;
285 __entry->sent = sent;
286 __entry->from_ifindex = from_dev->ifindex;
287 __entry->to_ifindex = to_dev->ifindex;
291 TP_printk("ndo_xdp_xmit"
292 " map_id=%d map_index=%d action=%s"
294 " from_ifindex=%d to_ifindex=%d err=%d",
295 __entry->map_id, __entry->map_index,
296 __print_symbolic(__entry->act, __XDP_ACT_SYM_TAB),
297 __entry->sent, __entry->drops,
298 __entry->from_ifindex, __entry->to_ifindex, __entry->err)
301 /* Expect users already include <net/xdp.h>, but not xdp_priv.h */
302 #include <net/xdp_priv.h>
304 #define __MEM_TYPE_MAP(FN) \
310 #define __MEM_TYPE_TP_FN(x) \
311 TRACE_DEFINE_ENUM(MEM_TYPE_##x);
312 #define __MEM_TYPE_SYM_FN(x) \
313 { MEM_TYPE_##x, #x },
314 #define __MEM_TYPE_SYM_TAB \
315 __MEM_TYPE_MAP(__MEM_TYPE_SYM_FN) { -1, 0 }
316 __MEM_TYPE_MAP(__MEM_TYPE_TP_FN)
318 TRACE_EVENT(mem_disconnect,
320 TP_PROTO(const struct xdp_mem_allocator *xa),
325 __field(const struct xdp_mem_allocator *, xa)
327 __field(u32, mem_type)
328 __field(const void *, allocator)
333 __entry->mem_id = xa->mem.id;
334 __entry->mem_type = xa->mem.type;
335 __entry->allocator = xa->allocator;
338 TP_printk("mem_id=%d mem_type=%s allocator=%p",
340 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
345 TRACE_EVENT(mem_connect,
347 TP_PROTO(const struct xdp_mem_allocator *xa,
348 const struct xdp_rxq_info *rxq),
353 __field(const struct xdp_mem_allocator *, xa)
355 __field(u32, mem_type)
356 __field(const void *, allocator)
357 __field(const struct xdp_rxq_info *, rxq)
358 __field(int, ifindex)
363 __entry->mem_id = xa->mem.id;
364 __entry->mem_type = xa->mem.type;
365 __entry->allocator = xa->allocator;
367 __entry->ifindex = rxq->dev->ifindex;
370 TP_printk("mem_id=%d mem_type=%s allocator=%p"
373 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
379 TRACE_EVENT(mem_return_failed,
381 TP_PROTO(const struct xdp_mem_info *mem,
382 const struct page *page),
387 __field(const struct page *, page)
389 __field(u32, mem_type)
393 __entry->page = page;
394 __entry->mem_id = mem->id;
395 __entry->mem_type = mem->type;
398 TP_printk("mem_id=%d mem_type=%s page=%p",
400 __print_symbolic(__entry->mem_type, __MEM_TYPE_SYM_TAB),
405 #endif /* _TRACE_XDP_H */
407 #include <trace/define_trace.h>