1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright(c) 2017 - 2018 Intel Corporation. */
8 #include <linux/if_link.h>
9 #include <linux/if_xdp.h>
10 #include <linux/if_ether.h>
12 #include <linux/limits.h>
13 #include <linux/udp.h>
14 #include <arpa/inet.h>
16 #include <net/ethernet.h>
17 #include <netinet/ether.h>
26 #include <sys/capability.h>
28 #include <sys/socket.h>
29 #include <sys/types.h>
35 #include <bpf/libbpf.h>
40 /* libbpf APIs for AF_XDP are deprecated starting from v0.7 */
41 #pragma GCC diagnostic ignored "-Wdeprecated-declarations"
55 #define NUM_FRAMES (4 * 1024)
56 #define MIN_PKT_SIZE 64
58 #define DEBUG_HEXDUMP 0
60 #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */
61 #define VLAN_PRIO_SHIFT 13
62 #define VLAN_VID_MASK 0x0fff /* VLAN Identifier */
63 #define VLAN_VID__DEFAULT 1
64 #define VLAN_PRI__DEFAULT 0
66 #define NSEC_PER_SEC 1000000000UL
67 #define NSEC_PER_USEC 1000
69 #define SCHED_PRI__DEFAULT 0
76 static unsigned long prev_time;
77 static long tx_cycle_diff_min;
78 static long tx_cycle_diff_max;
79 static double tx_cycle_diff_ave;
80 static long tx_cycle_cnt;
88 static enum benchmark_type opt_bench = BENCH_RXDROP;
89 static u32 opt_xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
90 static const char *opt_if = "";
91 static int opt_ifindex;
93 static unsigned long opt_duration;
94 static unsigned long start_time;
95 static bool benchmark_done;
96 static u32 opt_batch_size = 64;
97 static int opt_pkt_count;
98 static u16 opt_pkt_size = MIN_PKT_SIZE;
99 static u32 opt_pkt_fill_pattern = 0x12345678;
100 static bool opt_vlan_tag;
101 static u16 opt_pkt_vlan_id = VLAN_VID__DEFAULT;
102 static u16 opt_pkt_vlan_pri = VLAN_PRI__DEFAULT;
103 static struct ether_addr opt_txdmac = {{ 0x3c, 0xfd, 0xfe,
105 static struct ether_addr opt_txsmac = {{ 0xec, 0xb1, 0xd7,
107 static bool opt_extra_stats;
108 static bool opt_quiet;
109 static bool opt_app_stats;
110 static const char *opt_irq_str = "";
112 static int irqs_at_init = -1;
115 static int opt_interval = 1;
116 static int opt_retries = 3;
117 static u32 opt_xdp_bind_flags = XDP_USE_NEED_WAKEUP;
118 static u32 opt_umem_flags;
119 static int opt_unaligned_chunks;
120 static int opt_mmap_flags;
121 static int opt_xsk_frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
122 static int opt_timeout = 1000;
123 static bool opt_need_wakeup = true;
124 static u32 opt_num_xsks = 1;
126 static bool opt_busy_poll;
127 static bool opt_reduced_cap;
128 static clockid_t opt_clock = CLOCK_MONOTONIC;
129 static unsigned long opt_tx_cycle_ns;
130 static int opt_schpolicy = SCHED_OTHER;
131 static int opt_schprio = SCHED_PRI__DEFAULT;
132 static bool opt_tstamp;
135 unsigned char h_dest[6];
136 unsigned char h_source[6];
139 __be16 h_vlan_encapsulated_proto;
142 #define PKTGEN_MAGIC 0xbe9be955
150 struct xsk_ring_stats {
151 unsigned long rx_npkts;
152 unsigned long tx_npkts;
153 unsigned long rx_dropped_npkts;
154 unsigned long rx_invalid_npkts;
155 unsigned long tx_invalid_npkts;
156 unsigned long rx_full_npkts;
157 unsigned long rx_fill_empty_npkts;
158 unsigned long tx_empty_npkts;
159 unsigned long prev_rx_npkts;
160 unsigned long prev_tx_npkts;
161 unsigned long prev_rx_dropped_npkts;
162 unsigned long prev_rx_invalid_npkts;
163 unsigned long prev_tx_invalid_npkts;
164 unsigned long prev_rx_full_npkts;
165 unsigned long prev_rx_fill_empty_npkts;
166 unsigned long prev_tx_empty_npkts;
169 struct xsk_driver_stats {
171 unsigned long prev_intrs;
174 struct xsk_app_stats {
175 unsigned long rx_empty_polls;
176 unsigned long fill_fail_polls;
177 unsigned long copy_tx_sendtos;
178 unsigned long tx_wakeup_sendtos;
179 unsigned long opt_polls;
180 unsigned long prev_rx_empty_polls;
181 unsigned long prev_fill_fail_polls;
182 unsigned long prev_copy_tx_sendtos;
183 unsigned long prev_tx_wakeup_sendtos;
184 unsigned long prev_opt_polls;
187 struct xsk_umem_info {
188 struct xsk_ring_prod fq;
189 struct xsk_ring_cons cq;
190 struct xsk_umem *umem;
194 struct xsk_socket_info {
195 struct xsk_ring_cons rx;
196 struct xsk_ring_prod tx;
197 struct xsk_umem_info *umem;
198 struct xsk_socket *xsk;
199 struct xsk_ring_stats ring_stats;
200 struct xsk_app_stats app_stats;
201 struct xsk_driver_stats drv_stats;
205 static const struct clockid_map {
209 { "REALTIME", CLOCK_REALTIME },
210 { "TAI", CLOCK_TAI },
211 { "BOOTTIME", CLOCK_BOOTTIME },
212 { "MONOTONIC", CLOCK_MONOTONIC },
216 static const struct sched_map {
220 { "OTHER", SCHED_OTHER },
221 { "FIFO", SCHED_FIFO },
225 static int num_socks;
226 struct xsk_socket_info *xsks[MAX_SOCKS];
229 static int get_clockid(clockid_t *id, const char *name)
231 const struct clockid_map *clk;
233 for (clk = clockids_map; clk->name; clk++) {
234 if (strcasecmp(clk->name, name) == 0) {
243 static int get_schpolicy(int *policy, const char *name)
245 const struct sched_map *sch;
247 for (sch = schmap; sch->name; sch++) {
248 if (strcasecmp(sch->name, name) == 0) {
249 *policy = sch->policy;
257 static unsigned long get_nsecs(void)
261 clock_gettime(opt_clock, &ts);
262 return ts.tv_sec * 1000000000UL + ts.tv_nsec;
265 static void print_benchmark(bool running)
267 const char *bench_str = "INVALID";
269 if (opt_bench == BENCH_RXDROP)
270 bench_str = "rxdrop";
271 else if (opt_bench == BENCH_TXONLY)
272 bench_str = "txonly";
273 else if (opt_bench == BENCH_L2FWD)
276 printf("%s:%d %s ", opt_if, opt_queue, bench_str);
277 if (opt_xdp_flags & XDP_FLAGS_SKB_MODE)
279 else if (opt_xdp_flags & XDP_FLAGS_DRV_MODE)
288 printf("running...");
293 static int xsk_get_xdp_stats(int fd, struct xsk_socket_info *xsk)
295 struct xdp_statistics stats;
299 optlen = sizeof(stats);
300 err = getsockopt(fd, SOL_XDP, XDP_STATISTICS, &stats, &optlen);
304 if (optlen == sizeof(struct xdp_statistics)) {
305 xsk->ring_stats.rx_dropped_npkts = stats.rx_dropped;
306 xsk->ring_stats.rx_invalid_npkts = stats.rx_invalid_descs;
307 xsk->ring_stats.tx_invalid_npkts = stats.tx_invalid_descs;
308 xsk->ring_stats.rx_full_npkts = stats.rx_ring_full;
309 xsk->ring_stats.rx_fill_empty_npkts = stats.rx_fill_ring_empty_descs;
310 xsk->ring_stats.tx_empty_npkts = stats.tx_ring_empty_descs;
317 static void dump_app_stats(long dt)
321 for (i = 0; i < num_socks && xsks[i]; i++) {
322 char *fmt = "%-18s %'-14.0f %'-14lu\n";
323 double rx_empty_polls_ps, fill_fail_polls_ps, copy_tx_sendtos_ps,
324 tx_wakeup_sendtos_ps, opt_polls_ps;
326 rx_empty_polls_ps = (xsks[i]->app_stats.rx_empty_polls -
327 xsks[i]->app_stats.prev_rx_empty_polls) * 1000000000. / dt;
328 fill_fail_polls_ps = (xsks[i]->app_stats.fill_fail_polls -
329 xsks[i]->app_stats.prev_fill_fail_polls) * 1000000000. / dt;
330 copy_tx_sendtos_ps = (xsks[i]->app_stats.copy_tx_sendtos -
331 xsks[i]->app_stats.prev_copy_tx_sendtos) * 1000000000. / dt;
332 tx_wakeup_sendtos_ps = (xsks[i]->app_stats.tx_wakeup_sendtos -
333 xsks[i]->app_stats.prev_tx_wakeup_sendtos)
335 opt_polls_ps = (xsks[i]->app_stats.opt_polls -
336 xsks[i]->app_stats.prev_opt_polls) * 1000000000. / dt;
338 printf("\n%-18s %-14s %-14s\n", "", "calls/s", "count");
339 printf(fmt, "rx empty polls", rx_empty_polls_ps, xsks[i]->app_stats.rx_empty_polls);
340 printf(fmt, "fill fail polls", fill_fail_polls_ps,
341 xsks[i]->app_stats.fill_fail_polls);
342 printf(fmt, "copy tx sendtos", copy_tx_sendtos_ps,
343 xsks[i]->app_stats.copy_tx_sendtos);
344 printf(fmt, "tx wakeup sendtos", tx_wakeup_sendtos_ps,
345 xsks[i]->app_stats.tx_wakeup_sendtos);
346 printf(fmt, "opt polls", opt_polls_ps, xsks[i]->app_stats.opt_polls);
348 xsks[i]->app_stats.prev_rx_empty_polls = xsks[i]->app_stats.rx_empty_polls;
349 xsks[i]->app_stats.prev_fill_fail_polls = xsks[i]->app_stats.fill_fail_polls;
350 xsks[i]->app_stats.prev_copy_tx_sendtos = xsks[i]->app_stats.copy_tx_sendtos;
351 xsks[i]->app_stats.prev_tx_wakeup_sendtos = xsks[i]->app_stats.tx_wakeup_sendtos;
352 xsks[i]->app_stats.prev_opt_polls = xsks[i]->app_stats.opt_polls;
355 if (opt_tx_cycle_ns) {
356 printf("\n%-18s %-10s %-10s %-10s %-10s %-10s\n",
357 "", "period", "min", "ave", "max", "cycle");
358 printf("%-18s %-10lu %-10lu %-10lu %-10lu %-10lu\n",
359 "Cyclic TX", opt_tx_cycle_ns, tx_cycle_diff_min,
360 (long)(tx_cycle_diff_ave / tx_cycle_cnt),
361 tx_cycle_diff_max, tx_cycle_cnt);
365 static bool get_interrupt_number(void)
371 f_int_proc = fopen("/proc/interrupts", "r");
372 if (f_int_proc == NULL) {
373 printf("Failed to open /proc/interrupts.\n");
377 while (!feof(f_int_proc) && !found) {
378 /* Make sure to read a full line at a time */
379 if (fgets(line, sizeof(line), f_int_proc) == NULL ||
380 line[strlen(line) - 1] != '\n') {
381 printf("Error reading from interrupts file\n");
385 /* Extract interrupt number from line */
386 if (strstr(line, opt_irq_str) != NULL) {
398 static int get_irqs(void)
400 char count_path[PATH_MAX];
401 int total_intrs = -1;
405 snprintf(count_path, sizeof(count_path),
406 "/sys/kernel/irq/%i/per_cpu_count", irq_no);
407 f_count_proc = fopen(count_path, "r");
408 if (f_count_proc == NULL) {
409 printf("Failed to open %s\n", count_path);
413 if (fgets(line, sizeof(line), f_count_proc) == NULL ||
414 line[strlen(line) - 1] != '\n') {
415 printf("Error reading from %s\n", count_path);
417 static const char com[2] = ",";
421 token = strtok(line, com);
422 while (token != NULL) {
423 /* sum up interrupts across all cores */
424 total_intrs += atoi(token);
425 token = strtok(NULL, com);
429 fclose(f_count_proc);
434 static void dump_driver_stats(long dt)
438 for (i = 0; i < num_socks && xsks[i]; i++) {
439 char *fmt = "%-18s %'-14.0f %'-14lu\n";
441 int n_ints = get_irqs();
444 printf("error getting intr info for intr %i\n", irq_no);
447 xsks[i]->drv_stats.intrs = n_ints - irqs_at_init;
449 intrs_ps = (xsks[i]->drv_stats.intrs - xsks[i]->drv_stats.prev_intrs) *
452 printf("\n%-18s %-14s %-14s\n", "", "intrs/s", "count");
453 printf(fmt, "irqs", intrs_ps, xsks[i]->drv_stats.intrs);
455 xsks[i]->drv_stats.prev_intrs = xsks[i]->drv_stats.intrs;
459 static void dump_stats(void)
461 unsigned long now = get_nsecs();
462 long dt = now - prev_time;
467 for (i = 0; i < num_socks && xsks[i]; i++) {
468 char *fmt = "%-18s %'-14.0f %'-14lu\n";
469 double rx_pps, tx_pps, dropped_pps, rx_invalid_pps, full_pps, fill_empty_pps,
470 tx_invalid_pps, tx_empty_pps;
472 rx_pps = (xsks[i]->ring_stats.rx_npkts - xsks[i]->ring_stats.prev_rx_npkts) *
474 tx_pps = (xsks[i]->ring_stats.tx_npkts - xsks[i]->ring_stats.prev_tx_npkts) *
477 printf("\n sock%d@", i);
478 print_benchmark(false);
481 printf("%-18s %-14s %-14s %-14.2f\n", "", "pps", "pkts",
483 printf(fmt, "rx", rx_pps, xsks[i]->ring_stats.rx_npkts);
484 printf(fmt, "tx", tx_pps, xsks[i]->ring_stats.tx_npkts);
486 xsks[i]->ring_stats.prev_rx_npkts = xsks[i]->ring_stats.rx_npkts;
487 xsks[i]->ring_stats.prev_tx_npkts = xsks[i]->ring_stats.tx_npkts;
489 if (opt_extra_stats) {
490 if (!xsk_get_xdp_stats(xsk_socket__fd(xsks[i]->xsk), xsks[i])) {
491 dropped_pps = (xsks[i]->ring_stats.rx_dropped_npkts -
492 xsks[i]->ring_stats.prev_rx_dropped_npkts) *
494 rx_invalid_pps = (xsks[i]->ring_stats.rx_invalid_npkts -
495 xsks[i]->ring_stats.prev_rx_invalid_npkts) *
497 tx_invalid_pps = (xsks[i]->ring_stats.tx_invalid_npkts -
498 xsks[i]->ring_stats.prev_tx_invalid_npkts) *
500 full_pps = (xsks[i]->ring_stats.rx_full_npkts -
501 xsks[i]->ring_stats.prev_rx_full_npkts) *
503 fill_empty_pps = (xsks[i]->ring_stats.rx_fill_empty_npkts -
504 xsks[i]->ring_stats.prev_rx_fill_empty_npkts) *
506 tx_empty_pps = (xsks[i]->ring_stats.tx_empty_npkts -
507 xsks[i]->ring_stats.prev_tx_empty_npkts) *
510 printf(fmt, "rx dropped", dropped_pps,
511 xsks[i]->ring_stats.rx_dropped_npkts);
512 printf(fmt, "rx invalid", rx_invalid_pps,
513 xsks[i]->ring_stats.rx_invalid_npkts);
514 printf(fmt, "tx invalid", tx_invalid_pps,
515 xsks[i]->ring_stats.tx_invalid_npkts);
516 printf(fmt, "rx queue full", full_pps,
517 xsks[i]->ring_stats.rx_full_npkts);
518 printf(fmt, "fill ring empty", fill_empty_pps,
519 xsks[i]->ring_stats.rx_fill_empty_npkts);
520 printf(fmt, "tx ring empty", tx_empty_pps,
521 xsks[i]->ring_stats.tx_empty_npkts);
523 xsks[i]->ring_stats.prev_rx_dropped_npkts =
524 xsks[i]->ring_stats.rx_dropped_npkts;
525 xsks[i]->ring_stats.prev_rx_invalid_npkts =
526 xsks[i]->ring_stats.rx_invalid_npkts;
527 xsks[i]->ring_stats.prev_tx_invalid_npkts =
528 xsks[i]->ring_stats.tx_invalid_npkts;
529 xsks[i]->ring_stats.prev_rx_full_npkts =
530 xsks[i]->ring_stats.rx_full_npkts;
531 xsks[i]->ring_stats.prev_rx_fill_empty_npkts =
532 xsks[i]->ring_stats.rx_fill_empty_npkts;
533 xsks[i]->ring_stats.prev_tx_empty_npkts =
534 xsks[i]->ring_stats.tx_empty_npkts;
536 printf("%-15s\n", "Error retrieving extra stats");
544 dump_driver_stats(dt);
547 static bool is_benchmark_done(void)
549 if (opt_duration > 0) {
550 unsigned long dt = (get_nsecs() - start_time);
552 if (dt >= opt_duration)
553 benchmark_done = true;
555 return benchmark_done;
558 static void *poller(void *arg)
561 while (!is_benchmark_done()) {
569 static void remove_xdp_program(void)
571 u32 curr_prog_id = 0;
573 if (bpf_xdp_query_id(opt_ifindex, opt_xdp_flags, &curr_prog_id)) {
574 printf("bpf_xdp_query_id failed\n");
578 if (prog_id == curr_prog_id)
579 bpf_xdp_detach(opt_ifindex, opt_xdp_flags, NULL);
580 else if (!curr_prog_id)
581 printf("couldn't find a prog id on a given interface\n");
583 printf("program on interface changed, not removing\n");
586 static void int_exit(int sig)
588 benchmark_done = true;
591 static void __exit_with_error(int error, const char *file, const char *func,
594 fprintf(stderr, "%s:%s:%i: errno: %d/\"%s\"\n", file, func,
595 line, error, strerror(error));
597 if (opt_num_xsks > 1)
598 remove_xdp_program();
602 #define exit_with_error(error) __exit_with_error(error, __FILE__, __func__, __LINE__)
604 static void xdpsock_cleanup(void)
606 struct xsk_umem *umem = xsks[0]->umem->umem;
607 int i, cmd = CLOSE_CONN;
610 for (i = 0; i < num_socks; i++)
611 xsk_socket__delete(xsks[i]->xsk);
612 (void)xsk_umem__delete(umem);
614 if (opt_reduced_cap) {
615 if (write(sock, &cmd, sizeof(int)) < 0)
616 exit_with_error(errno);
619 if (opt_num_xsks > 1)
620 remove_xdp_program();
623 static void swap_mac_addresses(void *data)
625 struct ether_header *eth = (struct ether_header *)data;
626 struct ether_addr *src_addr = (struct ether_addr *)ð->ether_shost;
627 struct ether_addr *dst_addr = (struct ether_addr *)ð->ether_dhost;
628 struct ether_addr tmp;
631 *src_addr = *dst_addr;
635 static void hex_dump(void *pkt, size_t length, u64 addr)
637 const unsigned char *address = (unsigned char *)pkt;
638 const unsigned char *line = address;
639 size_t line_size = 32;
647 sprintf(buf, "addr=%llu", addr);
648 printf("length = %zu\n", length);
649 printf("%s | ", buf);
650 while (length-- > 0) {
651 printf("%02X ", *address++);
652 if (!(++i % line_size) || (length == 0 && i % line_size)) {
654 while (i++ % line_size)
657 printf(" | "); /* right close */
658 while (line < address) {
660 printf("%c", (c < 33 || c == 255) ? 0x2E : c);
664 printf("%s | ", buf);
670 static void *memset32_htonl(void *dest, u32 val, u32 size)
672 u32 *ptr = (u32 *)dest;
677 for (i = 0; i < (size & (~0x3)); i += 4)
680 for (; i < size; i++)
681 ((char *)dest)[i] = ((char *)&val)[i & 3];
687 * This function code has been taken from
688 * Linux kernel lib/checksum.c
690 static inline unsigned short from32to16(unsigned int x)
692 /* add up 16-bit and 16-bit for 16+c bit */
693 x = (x & 0xffff) + (x >> 16);
695 x = (x & 0xffff) + (x >> 16);
700 * This function code has been taken from
701 * Linux kernel lib/checksum.c
703 static unsigned int do_csum(const unsigned char *buff, int len)
705 unsigned int result = 0;
710 odd = 1 & (unsigned long)buff;
712 #ifdef __LITTLE_ENDIAN
713 result += (*buff << 8);
721 if (2 & (unsigned long)buff) {
722 result += *(unsigned short *)buff;
727 const unsigned char *end = buff +
728 ((unsigned int)len & ~3);
729 unsigned int carry = 0;
732 unsigned int w = *(unsigned int *)buff;
737 carry = (w > result);
738 } while (buff < end);
740 result = (result & 0xffff) + (result >> 16);
743 result += *(unsigned short *)buff;
748 #ifdef __LITTLE_ENDIAN
751 result += (*buff << 8);
753 result = from32to16(result);
755 result = ((result >> 8) & 0xff) | ((result & 0xff) << 8);
761 * This is a version of ip_compute_csum() optimized for IP headers,
762 * which always checksum on 4 octet boundaries.
763 * This function code has been taken from
764 * Linux kernel lib/checksum.c
766 static inline __sum16 ip_fast_csum(const void *iph, unsigned int ihl)
768 return (__sum16)~do_csum(iph, ihl * 4);
772 * Fold a partial checksum
773 * This function code has been taken from
774 * Linux kernel include/asm-generic/checksum.h
776 static inline __sum16 csum_fold(__wsum csum)
780 sum = (sum & 0xffff) + (sum >> 16);
781 sum = (sum & 0xffff) + (sum >> 16);
782 return (__sum16)~sum;
786 * This function code has been taken from
787 * Linux kernel lib/checksum.c
789 static inline u32 from64to32(u64 x)
791 /* add up 32-bit and 32-bit for 32+c bit */
792 x = (x & 0xffffffff) + (x >> 32);
794 x = (x & 0xffffffff) + (x >> 32);
798 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
799 __u32 len, __u8 proto, __wsum sum);
802 * This function code has been taken from
803 * Linux kernel lib/checksum.c
805 __wsum csum_tcpudp_nofold(__be32 saddr, __be32 daddr,
806 __u32 len, __u8 proto, __wsum sum)
808 unsigned long long s = (u32)sum;
812 #ifdef __BIG_ENDIAN__
815 s += (proto + len) << 8;
817 return (__wsum)from64to32(s);
821 * This function has been taken from
822 * Linux kernel include/asm-generic/checksum.h
824 static inline __sum16
825 csum_tcpudp_magic(__be32 saddr, __be32 daddr, __u32 len,
826 __u8 proto, __wsum sum)
828 return csum_fold(csum_tcpudp_nofold(saddr, daddr, len, proto, sum));
831 static inline u16 udp_csum(u32 saddr, u32 daddr, u32 len,
832 u8 proto, u16 *udp_pkt)
837 /* udp hdr and data */
838 for (; cnt < len; cnt += 2)
839 csum += udp_pkt[cnt >> 1];
841 return csum_tcpudp_magic(saddr, daddr, len, proto, csum);
844 #define ETH_FCS_SIZE 4
846 #define ETH_HDR_SIZE (opt_vlan_tag ? sizeof(struct vlan_ethhdr) : \
847 sizeof(struct ethhdr))
848 #define PKTGEN_HDR_SIZE (opt_tstamp ? sizeof(struct pktgen_hdr) : 0)
849 #define PKT_HDR_SIZE (ETH_HDR_SIZE + sizeof(struct iphdr) + \
850 sizeof(struct udphdr) + PKTGEN_HDR_SIZE)
851 #define PKTGEN_HDR_OFFSET (ETH_HDR_SIZE + sizeof(struct iphdr) + \
852 sizeof(struct udphdr))
853 #define PKTGEN_SIZE_MIN (PKTGEN_HDR_OFFSET + sizeof(struct pktgen_hdr) + \
856 #define PKT_SIZE (opt_pkt_size - ETH_FCS_SIZE)
857 #define IP_PKT_SIZE (PKT_SIZE - ETH_HDR_SIZE)
858 #define UDP_PKT_SIZE (IP_PKT_SIZE - sizeof(struct iphdr))
859 #define UDP_PKT_DATA_SIZE (UDP_PKT_SIZE - \
860 (sizeof(struct udphdr) + PKTGEN_HDR_SIZE))
862 static u8 pkt_data[XSK_UMEM__DEFAULT_FRAME_SIZE];
864 static void gen_eth_hdr_data(void)
866 struct pktgen_hdr *pktgen_hdr;
867 struct udphdr *udp_hdr;
868 struct iphdr *ip_hdr;
871 struct vlan_ethhdr *veth_hdr = (struct vlan_ethhdr *)pkt_data;
874 udp_hdr = (struct udphdr *)(pkt_data +
875 sizeof(struct vlan_ethhdr) +
876 sizeof(struct iphdr));
877 ip_hdr = (struct iphdr *)(pkt_data +
878 sizeof(struct vlan_ethhdr));
879 pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
880 sizeof(struct vlan_ethhdr) +
881 sizeof(struct iphdr) +
882 sizeof(struct udphdr));
883 /* ethernet & VLAN header */
884 memcpy(veth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
885 memcpy(veth_hdr->h_source, &opt_txsmac, ETH_ALEN);
886 veth_hdr->h_vlan_proto = htons(ETH_P_8021Q);
887 vlan_tci = opt_pkt_vlan_id & VLAN_VID_MASK;
888 vlan_tci |= (opt_pkt_vlan_pri << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK;
889 veth_hdr->h_vlan_TCI = htons(vlan_tci);
890 veth_hdr->h_vlan_encapsulated_proto = htons(ETH_P_IP);
892 struct ethhdr *eth_hdr = (struct ethhdr *)pkt_data;
894 udp_hdr = (struct udphdr *)(pkt_data +
895 sizeof(struct ethhdr) +
896 sizeof(struct iphdr));
897 ip_hdr = (struct iphdr *)(pkt_data +
898 sizeof(struct ethhdr));
899 pktgen_hdr = (struct pktgen_hdr *)(pkt_data +
900 sizeof(struct ethhdr) +
901 sizeof(struct iphdr) +
902 sizeof(struct udphdr));
903 /* ethernet header */
904 memcpy(eth_hdr->h_dest, &opt_txdmac, ETH_ALEN);
905 memcpy(eth_hdr->h_source, &opt_txsmac, ETH_ALEN);
906 eth_hdr->h_proto = htons(ETH_P_IP);
911 ip_hdr->version = IPVERSION;
912 ip_hdr->ihl = 0x5; /* 20 byte header */
914 ip_hdr->tot_len = htons(IP_PKT_SIZE);
916 ip_hdr->frag_off = 0;
917 ip_hdr->ttl = IPDEFTTL;
918 ip_hdr->protocol = IPPROTO_UDP;
919 ip_hdr->saddr = htonl(0x0a0a0a10);
920 ip_hdr->daddr = htonl(0x0a0a0a20);
922 /* IP header checksum */
924 ip_hdr->check = ip_fast_csum((const void *)ip_hdr, ip_hdr->ihl);
927 udp_hdr->source = htons(0x1000);
928 udp_hdr->dest = htons(0x1000);
929 udp_hdr->len = htons(UDP_PKT_SIZE);
932 pktgen_hdr->pgh_magic = htonl(PKTGEN_MAGIC);
935 memset32_htonl(pkt_data + PKT_HDR_SIZE, opt_pkt_fill_pattern,
938 /* UDP header checksum */
940 udp_hdr->check = udp_csum(ip_hdr->saddr, ip_hdr->daddr, UDP_PKT_SIZE,
941 IPPROTO_UDP, (u16 *)udp_hdr);
944 static void gen_eth_frame(struct xsk_umem_info *umem, u64 addr)
946 memcpy(xsk_umem__get_data(umem->buffer, addr), pkt_data,
950 static struct xsk_umem_info *xsk_configure_umem(void *buffer, u64 size)
952 struct xsk_umem_info *umem;
953 struct xsk_umem_config cfg = {
954 /* We recommend that you set the fill ring size >= HW RX ring size +
955 * AF_XDP RX ring size. Make sure you fill up the fill ring
956 * with buffers at regular intervals, and you will with this setting
957 * avoid allocation failures in the driver. These are usually quite
958 * expensive since drivers have not been written to assume that
959 * allocation failures are common. For regular sockets, kernel
960 * allocated memory is used that only runs out in OOM situations
961 * that should be rare.
963 .fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS * 2,
964 .comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS,
965 .frame_size = opt_xsk_frame_size,
966 .frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM,
967 .flags = opt_umem_flags
971 umem = calloc(1, sizeof(*umem));
973 exit_with_error(errno);
975 ret = xsk_umem__create(&umem->umem, buffer, size, &umem->fq, &umem->cq,
978 exit_with_error(-ret);
980 umem->buffer = buffer;
984 static void xsk_populate_fill_ring(struct xsk_umem_info *umem)
989 ret = xsk_ring_prod__reserve(&umem->fq,
990 XSK_RING_PROD__DEFAULT_NUM_DESCS * 2, &idx);
991 if (ret != XSK_RING_PROD__DEFAULT_NUM_DESCS * 2)
992 exit_with_error(-ret);
993 for (i = 0; i < XSK_RING_PROD__DEFAULT_NUM_DESCS * 2; i++)
994 *xsk_ring_prod__fill_addr(&umem->fq, idx++) =
995 i * opt_xsk_frame_size;
996 xsk_ring_prod__submit(&umem->fq, XSK_RING_PROD__DEFAULT_NUM_DESCS * 2);
999 static struct xsk_socket_info *xsk_configure_socket(struct xsk_umem_info *umem,
1002 struct xsk_socket_config cfg;
1003 struct xsk_socket_info *xsk;
1004 struct xsk_ring_cons *rxr;
1005 struct xsk_ring_prod *txr;
1008 xsk = calloc(1, sizeof(*xsk));
1010 exit_with_error(errno);
1013 cfg.rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
1014 cfg.tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
1015 if (opt_num_xsks > 1 || opt_reduced_cap)
1016 cfg.libbpf_flags = XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD;
1018 cfg.libbpf_flags = 0;
1019 cfg.xdp_flags = opt_xdp_flags;
1020 cfg.bind_flags = opt_xdp_bind_flags;
1022 rxr = rx ? &xsk->rx : NULL;
1023 txr = tx ? &xsk->tx : NULL;
1024 ret = xsk_socket__create(&xsk->xsk, opt_if, opt_queue, umem->umem,
1027 exit_with_error(-ret);
1029 ret = bpf_xdp_query_id(opt_ifindex, opt_xdp_flags, &prog_id);
1031 exit_with_error(-ret);
1033 xsk->app_stats.rx_empty_polls = 0;
1034 xsk->app_stats.fill_fail_polls = 0;
1035 xsk->app_stats.copy_tx_sendtos = 0;
1036 xsk->app_stats.tx_wakeup_sendtos = 0;
1037 xsk->app_stats.opt_polls = 0;
1038 xsk->app_stats.prev_rx_empty_polls = 0;
1039 xsk->app_stats.prev_fill_fail_polls = 0;
1040 xsk->app_stats.prev_copy_tx_sendtos = 0;
1041 xsk->app_stats.prev_tx_wakeup_sendtos = 0;
1042 xsk->app_stats.prev_opt_polls = 0;
1047 static struct option long_options[] = {
1048 {"rxdrop", no_argument, 0, 'r'},
1049 {"txonly", no_argument, 0, 't'},
1050 {"l2fwd", no_argument, 0, 'l'},
1051 {"interface", required_argument, 0, 'i'},
1052 {"queue", required_argument, 0, 'q'},
1053 {"poll", no_argument, 0, 'p'},
1054 {"xdp-skb", no_argument, 0, 'S'},
1055 {"xdp-native", no_argument, 0, 'N'},
1056 {"interval", required_argument, 0, 'n'},
1057 {"retries", required_argument, 0, 'O'},
1058 {"zero-copy", no_argument, 0, 'z'},
1059 {"copy", no_argument, 0, 'c'},
1060 {"frame-size", required_argument, 0, 'f'},
1061 {"no-need-wakeup", no_argument, 0, 'm'},
1062 {"unaligned", no_argument, 0, 'u'},
1063 {"shared-umem", no_argument, 0, 'M'},
1064 {"force", no_argument, 0, 'F'},
1065 {"duration", required_argument, 0, 'd'},
1066 {"clock", required_argument, 0, 'w'},
1067 {"batch-size", required_argument, 0, 'b'},
1068 {"tx-pkt-count", required_argument, 0, 'C'},
1069 {"tx-pkt-size", required_argument, 0, 's'},
1070 {"tx-pkt-pattern", required_argument, 0, 'P'},
1071 {"tx-vlan", no_argument, 0, 'V'},
1072 {"tx-vlan-id", required_argument, 0, 'J'},
1073 {"tx-vlan-pri", required_argument, 0, 'K'},
1074 {"tx-dmac", required_argument, 0, 'G'},
1075 {"tx-smac", required_argument, 0, 'H'},
1076 {"tx-cycle", required_argument, 0, 'T'},
1077 {"tstamp", no_argument, 0, 'y'},
1078 {"policy", required_argument, 0, 'W'},
1079 {"schpri", required_argument, 0, 'U'},
1080 {"extra-stats", no_argument, 0, 'x'},
1081 {"quiet", no_argument, 0, 'Q'},
1082 {"app-stats", no_argument, 0, 'a'},
1083 {"irq-string", no_argument, 0, 'I'},
1084 {"busy-poll", no_argument, 0, 'B'},
1085 {"reduce-cap", no_argument, 0, 'R'},
1089 static void usage(const char *prog)
1092 " Usage: %s [OPTIONS]\n"
1094 " -r, --rxdrop Discard all incoming packets (default)\n"
1095 " -t, --txonly Only send packets\n"
1096 " -l, --l2fwd MAC swap L2 forwarding\n"
1097 " -i, --interface=n Run on interface n\n"
1098 " -q, --queue=n Use queue n (default 0)\n"
1099 " -p, --poll Use poll syscall\n"
1100 " -S, --xdp-skb=n Use XDP skb-mod\n"
1101 " -N, --xdp-native=n Enforce XDP native mode\n"
1102 " -n, --interval=n Specify statistics update interval (default 1 sec).\n"
1103 " -O, --retries=n Specify time-out retries (1s interval) attempt (default 3).\n"
1104 " -z, --zero-copy Force zero-copy mode.\n"
1105 " -c, --copy Force copy mode.\n"
1106 " -m, --no-need-wakeup Turn off use of driver need wakeup flag.\n"
1107 " -f, --frame-size=n Set the frame size (must be a power of two in aligned mode, default is %d).\n"
1108 " -u, --unaligned Enable unaligned chunk placement\n"
1109 " -M, --shared-umem Enable XDP_SHARED_UMEM (cannot be used with -R)\n"
1110 " -F, --force Force loading the XDP prog\n"
1111 " -d, --duration=n Duration in secs to run command.\n"
1112 " Default: forever.\n"
1113 " -w, --clock=CLOCK Clock NAME (default MONOTONIC).\n"
1114 " -b, --batch-size=n Batch size for sending or receiving\n"
1115 " packets. Default: %d\n"
1116 " -C, --tx-pkt-count=n Number of packets to send.\n"
1117 " Default: Continuous packets.\n"
1118 " -s, --tx-pkt-size=n Transmit packet size.\n"
1119 " (Default: %d bytes)\n"
1120 " Min size: %d, Max size %d.\n"
1121 " -P, --tx-pkt-pattern=nPacket fill pattern. Default: 0x%x\n"
1122 " -V, --tx-vlan Send VLAN tagged packets (For -t|--txonly)\n"
1123 " -J, --tx-vlan-id=n Tx VLAN ID [1-4095]. Default: %d (For -V|--tx-vlan)\n"
1124 " -K, --tx-vlan-pri=n Tx VLAN Priority [0-7]. Default: %d (For -V|--tx-vlan)\n"
1125 " -G, --tx-dmac=<MAC> Dest MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
1126 " -H, --tx-smac=<MAC> Src MAC addr of TX frame in aa:bb:cc:dd:ee:ff format (For -V|--tx-vlan)\n"
1127 " -T, --tx-cycle=n Tx cycle time in micro-seconds (For -t|--txonly).\n"
1128 " -y, --tstamp Add time-stamp to packet (For -t|--txonly).\n"
1129 " -W, --policy=POLICY Schedule policy. Default: SCHED_OTHER\n"
1130 " -U, --schpri=n Schedule priority. Default: %d\n"
1131 " -x, --extra-stats Display extra statistics.\n"
1132 " -Q, --quiet Do not display any stats.\n"
1133 " -a, --app-stats Display application (syscall) statistics.\n"
1134 " -I, --irq-string Display driver interrupt statistics for interface associated with irq-string.\n"
1135 " -B, --busy-poll Busy poll.\n"
1136 " -R, --reduce-cap Use reduced capabilities (cannot be used with -M)\n"
1138 fprintf(stderr, str, prog, XSK_UMEM__DEFAULT_FRAME_SIZE,
1139 opt_batch_size, MIN_PKT_SIZE, MIN_PKT_SIZE,
1140 XSK_UMEM__DEFAULT_FRAME_SIZE, opt_pkt_fill_pattern,
1141 VLAN_VID__DEFAULT, VLAN_PRI__DEFAULT,
1142 SCHED_PRI__DEFAULT);
1147 static void parse_command_line(int argc, char **argv)
1149 int option_index, c;
1154 c = getopt_long(argc, argv,
1155 "Frtli:q:pSNn:w:O:czf:muMd:b:C:s:P:VJ:K:G:H:T:yW:U:xQaI:BR",
1156 long_options, &option_index);
1162 opt_bench = BENCH_RXDROP;
1165 opt_bench = BENCH_TXONLY;
1168 opt_bench = BENCH_L2FWD;
1174 opt_queue = atoi(optarg);
1180 opt_xdp_flags |= XDP_FLAGS_SKB_MODE;
1181 opt_xdp_bind_flags |= XDP_COPY;
1184 /* default, set below */
1187 opt_interval = atoi(optarg);
1190 if (get_clockid(&opt_clock, optarg)) {
1192 "ERROR: Invalid clock %s. Default to CLOCK_MONOTONIC.\n",
1194 opt_clock = CLOCK_MONOTONIC;
1198 opt_retries = atoi(optarg);
1201 opt_xdp_bind_flags |= XDP_ZEROCOPY;
1204 opt_xdp_bind_flags |= XDP_COPY;
1207 opt_umem_flags |= XDP_UMEM_UNALIGNED_CHUNK_FLAG;
1208 opt_unaligned_chunks = 1;
1209 opt_mmap_flags = MAP_HUGETLB;
1212 opt_xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
1215 opt_xsk_frame_size = atoi(optarg);
1218 opt_need_wakeup = false;
1219 opt_xdp_bind_flags &= ~XDP_USE_NEED_WAKEUP;
1222 opt_num_xsks = MAX_SOCKS;
1225 opt_duration = atoi(optarg);
1226 opt_duration *= 1000000000;
1229 opt_batch_size = atoi(optarg);
1232 opt_pkt_count = atoi(optarg);
1235 opt_pkt_size = atoi(optarg);
1236 if (opt_pkt_size > (XSK_UMEM__DEFAULT_FRAME_SIZE) ||
1237 opt_pkt_size < MIN_PKT_SIZE) {
1239 "ERROR: Invalid frame size %d\n",
1241 usage(basename(argv[0]));
1245 opt_pkt_fill_pattern = strtol(optarg, NULL, 16);
1248 opt_vlan_tag = true;
1251 opt_pkt_vlan_id = atoi(optarg);
1254 opt_pkt_vlan_pri = atoi(optarg);
1257 if (!ether_aton_r(optarg,
1258 (struct ether_addr *)&opt_txdmac)) {
1259 fprintf(stderr, "Invalid dmac address:%s\n",
1261 usage(basename(argv[0]));
1265 if (!ether_aton_r(optarg,
1266 (struct ether_addr *)&opt_txsmac)) {
1267 fprintf(stderr, "Invalid smac address:%s\n",
1269 usage(basename(argv[0]));
1273 opt_tx_cycle_ns = atoi(optarg);
1274 opt_tx_cycle_ns *= NSEC_PER_USEC;
1280 if (get_schpolicy(&opt_schpolicy, optarg)) {
1282 "ERROR: Invalid policy %s. Default to SCHED_OTHER.\n",
1284 opt_schpolicy = SCHED_OTHER;
1288 opt_schprio = atoi(optarg);
1291 opt_extra_stats = 1;
1300 opt_irq_str = optarg;
1301 if (get_interrupt_number())
1302 irqs_at_init = get_irqs();
1303 if (irqs_at_init < 0) {
1304 fprintf(stderr, "ERROR: Failed to get irqs for %s\n", opt_irq_str);
1305 usage(basename(argv[0]));
1312 opt_reduced_cap = true;
1315 usage(basename(argv[0]));
1319 if (!(opt_xdp_flags & XDP_FLAGS_SKB_MODE))
1320 opt_xdp_flags |= XDP_FLAGS_DRV_MODE;
1322 opt_ifindex = if_nametoindex(opt_if);
1324 fprintf(stderr, "ERROR: interface \"%s\" does not exist\n",
1326 usage(basename(argv[0]));
1329 if ((opt_xsk_frame_size & (opt_xsk_frame_size - 1)) &&
1330 !opt_unaligned_chunks) {
1331 fprintf(stderr, "--frame-size=%d is not a power of two\n",
1332 opt_xsk_frame_size);
1333 usage(basename(argv[0]));
1336 if (opt_reduced_cap && opt_num_xsks > 1) {
1337 fprintf(stderr, "ERROR: -M and -R cannot be used together\n");
1338 usage(basename(argv[0]));
1342 static void kick_tx(struct xsk_socket_info *xsk)
1346 ret = sendto(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, 0);
1347 if (ret >= 0 || errno == ENOBUFS || errno == EAGAIN ||
1348 errno == EBUSY || errno == ENETDOWN)
1350 exit_with_error(errno);
1353 static inline void complete_tx_l2fwd(struct xsk_socket_info *xsk)
1355 struct xsk_umem_info *umem = xsk->umem;
1356 u32 idx_cq = 0, idx_fq = 0;
1360 if (!xsk->outstanding_tx)
1363 /* In copy mode, Tx is driven by a syscall so we need to use e.g. sendto() to
1364 * really send the packets. In zero-copy mode we do not have to do this, since Tx
1365 * is driven by the NAPI loop. So as an optimization, we do not have to call
1366 * sendto() all the time in zero-copy mode for l2fwd.
1368 if (opt_xdp_bind_flags & XDP_COPY) {
1369 xsk->app_stats.copy_tx_sendtos++;
1373 ndescs = (xsk->outstanding_tx > opt_batch_size) ? opt_batch_size :
1374 xsk->outstanding_tx;
1376 /* re-add completed Tx buffers */
1377 rcvd = xsk_ring_cons__peek(&umem->cq, ndescs, &idx_cq);
1382 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1383 while (ret != rcvd) {
1385 exit_with_error(-ret);
1386 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&umem->fq)) {
1387 xsk->app_stats.fill_fail_polls++;
1388 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL,
1391 ret = xsk_ring_prod__reserve(&umem->fq, rcvd, &idx_fq);
1394 for (i = 0; i < rcvd; i++)
1395 *xsk_ring_prod__fill_addr(&umem->fq, idx_fq++) =
1396 *xsk_ring_cons__comp_addr(&umem->cq, idx_cq++);
1398 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1399 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1400 xsk->outstanding_tx -= rcvd;
1404 static inline void complete_tx_only(struct xsk_socket_info *xsk,
1410 if (!xsk->outstanding_tx)
1413 if (!opt_need_wakeup || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1414 xsk->app_stats.tx_wakeup_sendtos++;
1418 rcvd = xsk_ring_cons__peek(&xsk->umem->cq, batch_size, &idx);
1420 xsk_ring_cons__release(&xsk->umem->cq, rcvd);
1421 xsk->outstanding_tx -= rcvd;
1425 static void rx_drop(struct xsk_socket_info *xsk)
1427 unsigned int rcvd, i;
1428 u32 idx_rx = 0, idx_fq = 0;
1431 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
1433 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1434 xsk->app_stats.rx_empty_polls++;
1435 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1440 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1441 while (ret != rcvd) {
1443 exit_with_error(-ret);
1444 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1445 xsk->app_stats.fill_fail_polls++;
1446 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1448 ret = xsk_ring_prod__reserve(&xsk->umem->fq, rcvd, &idx_fq);
1451 for (i = 0; i < rcvd; i++) {
1452 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1453 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
1454 u64 orig = xsk_umem__extract_addr(addr);
1456 addr = xsk_umem__add_offset_to_addr(addr);
1457 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1459 hex_dump(pkt, len, addr);
1460 *xsk_ring_prod__fill_addr(&xsk->umem->fq, idx_fq++) = orig;
1463 xsk_ring_prod__submit(&xsk->umem->fq, rcvd);
1464 xsk_ring_cons__release(&xsk->rx, rcvd);
1465 xsk->ring_stats.rx_npkts += rcvd;
1468 static void rx_drop_all(void)
1470 struct pollfd fds[MAX_SOCKS] = {};
1473 for (i = 0; i < num_socks; i++) {
1474 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1475 fds[i].events = POLLIN;
1480 for (i = 0; i < num_socks; i++)
1481 xsks[i]->app_stats.opt_polls++;
1482 ret = poll(fds, num_socks, opt_timeout);
1487 for (i = 0; i < num_socks; i++)
1495 static int tx_only(struct xsk_socket_info *xsk, u32 *frame_nb,
1496 int batch_size, unsigned long tx_ns)
1498 u32 idx, tv_sec, tv_usec;
1501 while (xsk_ring_prod__reserve(&xsk->tx, batch_size, &idx) <
1503 complete_tx_only(xsk, batch_size);
1509 tv_sec = (u32)(tx_ns / NSEC_PER_SEC);
1510 tv_usec = (u32)((tx_ns % NSEC_PER_SEC) / 1000);
1513 for (i = 0; i < batch_size; i++) {
1514 struct xdp_desc *tx_desc = xsk_ring_prod__tx_desc(&xsk->tx,
1516 tx_desc->addr = (*frame_nb + i) * opt_xsk_frame_size;
1517 tx_desc->len = PKT_SIZE;
1520 struct pktgen_hdr *pktgen_hdr;
1521 u64 addr = tx_desc->addr;
1524 pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1525 pktgen_hdr = (struct pktgen_hdr *)(pkt + PKTGEN_HDR_OFFSET);
1527 pktgen_hdr->seq_num = htonl(sequence++);
1528 pktgen_hdr->tv_sec = htonl(tv_sec);
1529 pktgen_hdr->tv_usec = htonl(tv_usec);
1531 hex_dump(pkt, PKT_SIZE, addr);
1535 xsk_ring_prod__submit(&xsk->tx, batch_size);
1536 xsk->ring_stats.tx_npkts += batch_size;
1537 xsk->outstanding_tx += batch_size;
1538 *frame_nb += batch_size;
1539 *frame_nb %= NUM_FRAMES;
1540 complete_tx_only(xsk, batch_size);
1545 static inline int get_batch_size(int pkt_cnt)
1548 return opt_batch_size;
1550 if (pkt_cnt + opt_batch_size <= opt_pkt_count)
1551 return opt_batch_size;
1553 return opt_pkt_count - pkt_cnt;
1556 static void complete_tx_only_all(void)
1563 for (i = 0; i < num_socks; i++) {
1564 if (xsks[i]->outstanding_tx) {
1565 complete_tx_only(xsks[i], opt_batch_size);
1566 pending = !!xsks[i]->outstanding_tx;
1570 } while (pending && opt_retries-- > 0);
1573 static void tx_only_all(void)
1575 struct pollfd fds[MAX_SOCKS] = {};
1576 u32 frame_nb[MAX_SOCKS] = {};
1577 unsigned long next_tx_ns = 0;
1581 if (opt_poll && opt_tx_cycle_ns) {
1583 "Error: --poll and --tx-cycles are both set\n");
1587 for (i = 0; i < num_socks; i++) {
1588 fds[0].fd = xsk_socket__fd(xsks[i]->xsk);
1589 fds[0].events = POLLOUT;
1592 if (opt_tx_cycle_ns) {
1593 /* Align Tx time to micro-second boundary */
1594 next_tx_ns = (get_nsecs() / NSEC_PER_USEC + 1) *
1596 next_tx_ns += opt_tx_cycle_ns;
1598 /* Initialize periodic Tx scheduling variance */
1599 tx_cycle_diff_min = 1000000000;
1600 tx_cycle_diff_max = 0;
1601 tx_cycle_diff_ave = 0.0;
1604 while ((opt_pkt_count && pkt_cnt < opt_pkt_count) || !opt_pkt_count) {
1605 int batch_size = get_batch_size(pkt_cnt);
1606 unsigned long tx_ns = 0;
1607 struct timespec next;
1613 for (i = 0; i < num_socks; i++)
1614 xsks[i]->app_stats.opt_polls++;
1615 ret = poll(fds, num_socks, opt_timeout);
1619 if (!(fds[0].revents & POLLOUT))
1623 if (opt_tx_cycle_ns) {
1624 next.tv_sec = next_tx_ns / NSEC_PER_SEC;
1625 next.tv_nsec = next_tx_ns % NSEC_PER_SEC;
1626 err = clock_nanosleep(opt_clock, TIMER_ABSTIME, &next, NULL);
1630 "clock_nanosleep failed. Err:%d errno:%d\n",
1635 /* Measure periodic Tx scheduling variance */
1636 tx_ns = get_nsecs();
1637 diff = tx_ns - next_tx_ns;
1638 if (diff < tx_cycle_diff_min)
1639 tx_cycle_diff_min = diff;
1641 if (diff > tx_cycle_diff_max)
1642 tx_cycle_diff_max = diff;
1644 tx_cycle_diff_ave += (double)diff;
1646 } else if (opt_tstamp) {
1647 tx_ns = get_nsecs();
1650 for (i = 0; i < num_socks; i++)
1651 tx_cnt += tx_only(xsks[i], &frame_nb[i], batch_size, tx_ns);
1658 if (opt_tx_cycle_ns)
1659 next_tx_ns += opt_tx_cycle_ns;
1663 complete_tx_only_all();
1666 static void l2fwd(struct xsk_socket_info *xsk)
1668 unsigned int rcvd, i;
1669 u32 idx_rx = 0, idx_tx = 0;
1672 complete_tx_l2fwd(xsk);
1674 rcvd = xsk_ring_cons__peek(&xsk->rx, opt_batch_size, &idx_rx);
1676 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->umem->fq)) {
1677 xsk->app_stats.rx_empty_polls++;
1678 recvfrom(xsk_socket__fd(xsk->xsk), NULL, 0, MSG_DONTWAIT, NULL, NULL);
1682 xsk->ring_stats.rx_npkts += rcvd;
1684 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1685 while (ret != rcvd) {
1687 exit_with_error(-ret);
1688 complete_tx_l2fwd(xsk);
1689 if (opt_busy_poll || xsk_ring_prod__needs_wakeup(&xsk->tx)) {
1690 xsk->app_stats.tx_wakeup_sendtos++;
1693 ret = xsk_ring_prod__reserve(&xsk->tx, rcvd, &idx_tx);
1696 for (i = 0; i < rcvd; i++) {
1697 u64 addr = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx)->addr;
1698 u32 len = xsk_ring_cons__rx_desc(&xsk->rx, idx_rx++)->len;
1701 addr = xsk_umem__add_offset_to_addr(addr);
1702 char *pkt = xsk_umem__get_data(xsk->umem->buffer, addr);
1704 swap_mac_addresses(pkt);
1706 hex_dump(pkt, len, addr);
1707 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx)->addr = orig;
1708 xsk_ring_prod__tx_desc(&xsk->tx, idx_tx++)->len = len;
1711 xsk_ring_prod__submit(&xsk->tx, rcvd);
1712 xsk_ring_cons__release(&xsk->rx, rcvd);
1714 xsk->ring_stats.tx_npkts += rcvd;
1715 xsk->outstanding_tx += rcvd;
1718 static void l2fwd_all(void)
1720 struct pollfd fds[MAX_SOCKS] = {};
1725 for (i = 0; i < num_socks; i++) {
1726 fds[i].fd = xsk_socket__fd(xsks[i]->xsk);
1727 fds[i].events = POLLOUT | POLLIN;
1728 xsks[i]->app_stats.opt_polls++;
1730 ret = poll(fds, num_socks, opt_timeout);
1735 for (i = 0; i < num_socks; i++)
1743 static void load_xdp_program(char **argv, struct bpf_object **obj)
1745 struct bpf_prog_load_attr prog_load_attr = {
1746 .prog_type = BPF_PROG_TYPE_XDP,
1748 char xdp_filename[256];
1751 snprintf(xdp_filename, sizeof(xdp_filename), "%s_kern.o", argv[0]);
1752 prog_load_attr.file = xdp_filename;
1754 if (bpf_prog_load_xattr(&prog_load_attr, obj, &prog_fd))
1757 fprintf(stderr, "ERROR: no program found: %s\n",
1762 if (bpf_xdp_attach(opt_ifindex, prog_fd, opt_xdp_flags, NULL) < 0) {
1763 fprintf(stderr, "ERROR: link set xdp fd failed\n");
1768 static void enter_xsks_into_map(struct bpf_object *obj)
1770 struct bpf_map *map;
1773 map = bpf_object__find_map_by_name(obj, "xsks_map");
1774 xsks_map = bpf_map__fd(map);
1776 fprintf(stderr, "ERROR: no xsks map found: %s\n",
1777 strerror(xsks_map));
1781 for (i = 0; i < num_socks; i++) {
1782 int fd = xsk_socket__fd(xsks[i]->xsk);
1786 ret = bpf_map_update_elem(xsks_map, &key, &fd, 0);
1788 fprintf(stderr, "ERROR: bpf_map_update_elem %d\n", i);
1794 static void apply_setsockopt(struct xsk_socket_info *xsk)
1802 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_PREFER_BUSY_POLL,
1803 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1804 exit_with_error(errno);
1807 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL,
1808 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1809 exit_with_error(errno);
1811 sock_opt = opt_batch_size;
1812 if (setsockopt(xsk_socket__fd(xsk->xsk), SOL_SOCKET, SO_BUSY_POLL_BUDGET,
1813 (void *)&sock_opt, sizeof(sock_opt)) < 0)
1814 exit_with_error(errno);
1817 static int recv_xsks_map_fd_from_ctrl_node(int sock, int *_fd)
1819 char cms[CMSG_SPACE(sizeof(int))];
1820 struct cmsghdr *cmsg;
1826 iov.iov_base = &value;
1827 iov.iov_len = sizeof(int);
1830 msg.msg_namelen = 0;
1834 msg.msg_control = (caddr_t)cms;
1835 msg.msg_controllen = sizeof(cms);
1837 len = recvmsg(sock, &msg, 0);
1840 fprintf(stderr, "Recvmsg failed length incorrect.\n");
1845 fprintf(stderr, "Recvmsg failed no data\n");
1849 cmsg = CMSG_FIRSTHDR(&msg);
1850 *_fd = *(int *)CMSG_DATA(cmsg);
1856 recv_xsks_map_fd(int *xsks_map_fd)
1858 struct sockaddr_un server;
1861 sock = socket(AF_UNIX, SOCK_STREAM, 0);
1863 fprintf(stderr, "Error opening socket stream: %s", strerror(errno));
1867 server.sun_family = AF_UNIX;
1868 strcpy(server.sun_path, SOCKET_NAME);
1870 if (connect(sock, (struct sockaddr *)&server, sizeof(struct sockaddr_un)) < 0) {
1872 fprintf(stderr, "Error connecting stream socket: %s", strerror(errno));
1876 err = recv_xsks_map_fd_from_ctrl_node(sock, xsks_map_fd);
1878 fprintf(stderr, "Error %d receiving fd\n", err);
1884 int main(int argc, char **argv)
1886 struct __user_cap_header_struct hdr = { _LINUX_CAPABILITY_VERSION_3, 0 };
1887 struct __user_cap_data_struct data[2] = { { 0 } };
1888 bool rx = false, tx = false;
1889 struct sched_param schparam;
1890 struct xsk_umem_info *umem;
1891 struct bpf_object *obj;
1892 int xsks_map_fd = 0;
1897 parse_command_line(argc, argv);
1899 if (opt_reduced_cap) {
1900 if (capget(&hdr, data) < 0)
1901 fprintf(stderr, "Error getting capabilities\n");
1903 data->effective &= CAP_TO_MASK(CAP_NET_RAW);
1904 data->permitted &= CAP_TO_MASK(CAP_NET_RAW);
1906 if (capset(&hdr, data) < 0)
1907 fprintf(stderr, "Setting capabilities failed\n");
1909 if (capget(&hdr, data) < 0) {
1910 fprintf(stderr, "Error getting capabilities\n");
1912 fprintf(stderr, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1913 data[0].effective, data[0].inheritable, data[0].permitted);
1914 fprintf(stderr, "Capabilities EFF %x Caps INH %x Caps Per %x\n",
1915 data[1].effective, data[1].inheritable, data[1].permitted);
1918 /* Use libbpf 1.0 API mode */
1919 libbpf_set_strict_mode(LIBBPF_STRICT_ALL);
1921 if (opt_num_xsks > 1)
1922 load_xdp_program(argv, &obj);
1925 /* Reserve memory for the umem. Use hugepages if unaligned chunk mode */
1926 bufs = mmap(NULL, NUM_FRAMES * opt_xsk_frame_size,
1927 PROT_READ | PROT_WRITE,
1928 MAP_PRIVATE | MAP_ANONYMOUS | opt_mmap_flags, -1, 0);
1929 if (bufs == MAP_FAILED) {
1930 printf("ERROR: mmap failed\n");
1934 /* Create sockets... */
1935 umem = xsk_configure_umem(bufs, NUM_FRAMES * opt_xsk_frame_size);
1936 if (opt_bench == BENCH_RXDROP || opt_bench == BENCH_L2FWD) {
1938 xsk_populate_fill_ring(umem);
1940 if (opt_bench == BENCH_L2FWD || opt_bench == BENCH_TXONLY)
1942 for (i = 0; i < opt_num_xsks; i++)
1943 xsks[num_socks++] = xsk_configure_socket(umem, rx, tx);
1945 for (i = 0; i < opt_num_xsks; i++)
1946 apply_setsockopt(xsks[i]);
1948 if (opt_bench == BENCH_TXONLY) {
1949 if (opt_tstamp && opt_pkt_size < PKTGEN_SIZE_MIN)
1950 opt_pkt_size = PKTGEN_SIZE_MIN;
1954 for (i = 0; i < NUM_FRAMES; i++)
1955 gen_eth_frame(umem, i * opt_xsk_frame_size);
1958 if (opt_num_xsks > 1 && opt_bench != BENCH_TXONLY)
1959 enter_xsks_into_map(obj);
1961 if (opt_reduced_cap) {
1962 ret = recv_xsks_map_fd(&xsks_map_fd);
1964 fprintf(stderr, "Error %d receiving xsks_map_fd\n", ret);
1965 exit_with_error(ret);
1968 ret = xsk_socket__update_xskmap(xsks[0]->xsk, xsks_map_fd);
1970 fprintf(stderr, "Update of BPF map failed(%d)\n", ret);
1971 exit_with_error(ret);
1976 signal(SIGINT, int_exit);
1977 signal(SIGTERM, int_exit);
1978 signal(SIGABRT, int_exit);
1980 setlocale(LC_ALL, "");
1982 prev_time = get_nsecs();
1983 start_time = prev_time;
1986 ret = pthread_create(&pt, NULL, poller, NULL);
1988 exit_with_error(ret);
1991 /* Configure sched priority for better wake-up accuracy */
1992 memset(&schparam, 0, sizeof(schparam));
1993 schparam.sched_priority = opt_schprio;
1994 ret = sched_setscheduler(0, opt_schpolicy, &schparam);
1996 fprintf(stderr, "Error(%d) in setting priority(%d): %s\n",
1997 errno, opt_schprio, strerror(errno));
2001 if (opt_bench == BENCH_RXDROP)
2003 else if (opt_bench == BENCH_TXONLY)
2009 benchmark_done = true;
2012 pthread_join(pt, NULL);
2016 munmap(bufs, NUM_FRAMES * opt_xsk_frame_size);