GNU Linux-libre 5.4.257-gnu1
[releases.git] / samples / bpf / xdp_redirect_cpu_user.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright(c) 2017 Jesper Dangaard Brouer, Red Hat, Inc.
3  */
4 static const char *__doc__ =
5         " XDP redirect with a CPU-map type \"BPF_MAP_TYPE_CPUMAP\"";
6
7 #include <errno.h>
8 #include <signal.h>
9 #include <stdio.h>
10 #include <stdlib.h>
11 #include <stdbool.h>
12 #include <string.h>
13 #include <unistd.h>
14 #include <locale.h>
15 #include <sys/resource.h>
16 #include <sys/sysinfo.h>
17 #include <getopt.h>
18 #include <net/if.h>
19 #include <time.h>
20 #include <linux/limits.h>
21
22 #define __must_check
23 #include <linux/err.h>
24
25 #include <arpa/inet.h>
26 #include <linux/if_link.h>
27
28 /* How many xdp_progs are defined in _kern.c */
29 #define MAX_PROG 6
30
31 #include <bpf/bpf.h>
32 #include "libbpf.h"
33
34 #include "bpf_util.h"
35
36 static int ifindex = -1;
37 static char ifname_buf[IF_NAMESIZE];
38 static char *ifname;
39 static __u32 prog_id;
40
41 static __u32 xdp_flags = XDP_FLAGS_UPDATE_IF_NOEXIST;
42 static int n_cpus;
43 static int cpu_map_fd;
44 static int rx_cnt_map_fd;
45 static int redirect_err_cnt_map_fd;
46 static int cpumap_enqueue_cnt_map_fd;
47 static int cpumap_kthread_cnt_map_fd;
48 static int cpus_available_map_fd;
49 static int cpus_count_map_fd;
50 static int cpus_iterator_map_fd;
51 static int exception_cnt_map_fd;
52
53 #define NUM_TP 5
54 struct bpf_link *tp_links[NUM_TP] = { 0 };
55 static int tp_cnt = 0;
56
57 /* Exit return codes */
58 #define EXIT_OK         0
59 #define EXIT_FAIL               1
60 #define EXIT_FAIL_OPTION        2
61 #define EXIT_FAIL_XDP           3
62 #define EXIT_FAIL_BPF           4
63 #define EXIT_FAIL_MEM           5
64
65 static const struct option long_options[] = {
66         {"help",        no_argument,            NULL, 'h' },
67         {"dev",         required_argument,      NULL, 'd' },
68         {"skb-mode",    no_argument,            NULL, 'S' },
69         {"sec",         required_argument,      NULL, 's' },
70         {"progname",    required_argument,      NULL, 'p' },
71         {"qsize",       required_argument,      NULL, 'q' },
72         {"cpu",         required_argument,      NULL, 'c' },
73         {"stress-mode", no_argument,            NULL, 'x' },
74         {"no-separators", no_argument,          NULL, 'z' },
75         {"force",       no_argument,            NULL, 'F' },
76         {0, 0, NULL,  0 }
77 };
78
79 static void int_exit(int sig)
80 {
81         __u32 curr_prog_id = 0;
82
83         if (ifindex > -1) {
84                 if (bpf_get_link_xdp_id(ifindex, &curr_prog_id, xdp_flags)) {
85                         printf("bpf_get_link_xdp_id failed\n");
86                         exit(EXIT_FAIL);
87                 }
88                 if (prog_id == curr_prog_id) {
89                         fprintf(stderr,
90                                 "Interrupted: Removing XDP program on ifindex:%d device:%s\n",
91                                 ifindex, ifname);
92                         bpf_set_link_xdp_fd(ifindex, -1, xdp_flags);
93                 } else if (!curr_prog_id) {
94                         printf("couldn't find a prog id on a given iface\n");
95                 } else {
96                         printf("program on interface changed, not removing\n");
97                 }
98         }
99         /* Detach tracepoints */
100         while (tp_cnt)
101                 bpf_link__destroy(tp_links[--tp_cnt]);
102
103         exit(EXIT_OK);
104 }
105
106 static void print_avail_progs(struct bpf_object *obj)
107 {
108         struct bpf_program *pos;
109
110         bpf_object__for_each_program(pos, obj) {
111                 if (bpf_program__is_xdp(pos))
112                         printf(" %s\n", bpf_program__title(pos, false));
113         }
114 }
115
116 static void usage(char *argv[], struct bpf_object *obj)
117 {
118         int i;
119
120         printf("\nDOCUMENTATION:\n%s\n", __doc__);
121         printf("\n");
122         printf(" Usage: %s (options-see-below)\n", argv[0]);
123         printf(" Listing options:\n");
124         for (i = 0; long_options[i].name != 0; i++) {
125                 printf(" --%-12s", long_options[i].name);
126                 if (long_options[i].flag != NULL)
127                         printf(" flag (internal value:%d)",
128                                 *long_options[i].flag);
129                 else
130                         printf(" short-option: -%c",
131                                 long_options[i].val);
132                 printf("\n");
133         }
134         printf("\n Programs to be used for --progname:\n");
135         print_avail_progs(obj);
136         printf("\n");
137 }
138
139 /* gettime returns the current time of day in nanoseconds.
140  * Cost: clock_gettime (ns) => 26ns (CLOCK_MONOTONIC)
141  *       clock_gettime (ns) =>  9ns (CLOCK_MONOTONIC_COARSE)
142  */
143 #define NANOSEC_PER_SEC 1000000000 /* 10^9 */
144 static __u64 gettime(void)
145 {
146         struct timespec t;
147         int res;
148
149         res = clock_gettime(CLOCK_MONOTONIC, &t);
150         if (res < 0) {
151                 fprintf(stderr, "Error with gettimeofday! (%i)\n", res);
152                 exit(EXIT_FAIL);
153         }
154         return (__u64) t.tv_sec * NANOSEC_PER_SEC + t.tv_nsec;
155 }
156
157 /* Common stats data record shared with _kern.c */
158 struct datarec {
159         __u64 processed;
160         __u64 dropped;
161         __u64 issue;
162 };
163 struct record {
164         __u64 timestamp;
165         struct datarec total;
166         struct datarec *cpu;
167 };
168 struct stats_record {
169         struct record rx_cnt;
170         struct record redir_err;
171         struct record kthread;
172         struct record exception;
173         struct record enq[];
174 };
175
176 static bool map_collect_percpu(int fd, __u32 key, struct record *rec)
177 {
178         /* For percpu maps, userspace gets a value per possible CPU */
179         unsigned int nr_cpus = bpf_num_possible_cpus();
180         struct datarec values[nr_cpus];
181         __u64 sum_processed = 0;
182         __u64 sum_dropped = 0;
183         __u64 sum_issue = 0;
184         int i;
185
186         if ((bpf_map_lookup_elem(fd, &key, values)) != 0) {
187                 fprintf(stderr,
188                         "ERR: bpf_map_lookup_elem failed key:0x%X\n", key);
189                 return false;
190         }
191         /* Get time as close as possible to reading map contents */
192         rec->timestamp = gettime();
193
194         /* Record and sum values from each CPU */
195         for (i = 0; i < nr_cpus; i++) {
196                 rec->cpu[i].processed = values[i].processed;
197                 sum_processed        += values[i].processed;
198                 rec->cpu[i].dropped = values[i].dropped;
199                 sum_dropped        += values[i].dropped;
200                 rec->cpu[i].issue = values[i].issue;
201                 sum_issue        += values[i].issue;
202         }
203         rec->total.processed = sum_processed;
204         rec->total.dropped   = sum_dropped;
205         rec->total.issue     = sum_issue;
206         return true;
207 }
208
209 static struct datarec *alloc_record_per_cpu(void)
210 {
211         unsigned int nr_cpus = bpf_num_possible_cpus();
212         struct datarec *array;
213
214         array = calloc(nr_cpus, sizeof(struct datarec));
215         if (!array) {
216                 fprintf(stderr, "Mem alloc error (nr_cpus:%u)\n", nr_cpus);
217                 exit(EXIT_FAIL_MEM);
218         }
219         return array;
220 }
221
222 static struct stats_record *alloc_stats_record(void)
223 {
224         struct stats_record *rec;
225         int i, size;
226
227         size = sizeof(*rec) + n_cpus * sizeof(struct record);
228         rec = malloc(size);
229         if (!rec) {
230                 fprintf(stderr, "Mem alloc error\n");
231                 exit(EXIT_FAIL_MEM);
232         }
233         memset(rec, 0, size);
234         rec->rx_cnt.cpu    = alloc_record_per_cpu();
235         rec->redir_err.cpu = alloc_record_per_cpu();
236         rec->kthread.cpu   = alloc_record_per_cpu();
237         rec->exception.cpu = alloc_record_per_cpu();
238         for (i = 0; i < n_cpus; i++)
239                 rec->enq[i].cpu = alloc_record_per_cpu();
240
241         return rec;
242 }
243
244 static void free_stats_record(struct stats_record *r)
245 {
246         int i;
247
248         for (i = 0; i < n_cpus; i++)
249                 free(r->enq[i].cpu);
250         free(r->exception.cpu);
251         free(r->kthread.cpu);
252         free(r->redir_err.cpu);
253         free(r->rx_cnt.cpu);
254         free(r);
255 }
256
257 static double calc_period(struct record *r, struct record *p)
258 {
259         double period_ = 0;
260         __u64 period = 0;
261
262         period = r->timestamp - p->timestamp;
263         if (period > 0)
264                 period_ = ((double) period / NANOSEC_PER_SEC);
265
266         return period_;
267 }
268
269 static __u64 calc_pps(struct datarec *r, struct datarec *p, double period_)
270 {
271         __u64 packets = 0;
272         __u64 pps = 0;
273
274         if (period_ > 0) {
275                 packets = r->processed - p->processed;
276                 pps = packets / period_;
277         }
278         return pps;
279 }
280
281 static __u64 calc_drop_pps(struct datarec *r, struct datarec *p, double period_)
282 {
283         __u64 packets = 0;
284         __u64 pps = 0;
285
286         if (period_ > 0) {
287                 packets = r->dropped - p->dropped;
288                 pps = packets / period_;
289         }
290         return pps;
291 }
292
293 static __u64 calc_errs_pps(struct datarec *r,
294                             struct datarec *p, double period_)
295 {
296         __u64 packets = 0;
297         __u64 pps = 0;
298
299         if (period_ > 0) {
300                 packets = r->issue - p->issue;
301                 pps = packets / period_;
302         }
303         return pps;
304 }
305
306 static void stats_print(struct stats_record *stats_rec,
307                         struct stats_record *stats_prev,
308                         char *prog_name)
309 {
310         unsigned int nr_cpus = bpf_num_possible_cpus();
311         double pps = 0, drop = 0, err = 0;
312         struct record *rec, *prev;
313         int to_cpu;
314         double t;
315         int i;
316
317         /* Header */
318         printf("Running XDP/eBPF prog_name:%s\n", prog_name);
319         printf("%-15s %-7s %-14s %-11s %-9s\n",
320                "XDP-cpumap", "CPU:to", "pps", "drop-pps", "extra-info");
321
322         /* XDP rx_cnt */
323         {
324                 char *fmt_rx = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
325                 char *fm2_rx = "%-15s %-7s %'-14.0f %'-11.0f\n";
326                 char *errstr = "";
327
328                 rec  = &stats_rec->rx_cnt;
329                 prev = &stats_prev->rx_cnt;
330                 t = calc_period(rec, prev);
331                 for (i = 0; i < nr_cpus; i++) {
332                         struct datarec *r = &rec->cpu[i];
333                         struct datarec *p = &prev->cpu[i];
334
335                         pps = calc_pps(r, p, t);
336                         drop = calc_drop_pps(r, p, t);
337                         err  = calc_errs_pps(r, p, t);
338                         if (err > 0)
339                                 errstr = "cpu-dest/err";
340                         if (pps > 0)
341                                 printf(fmt_rx, "XDP-RX",
342                                         i, pps, drop, err, errstr);
343                 }
344                 pps  = calc_pps(&rec->total, &prev->total, t);
345                 drop = calc_drop_pps(&rec->total, &prev->total, t);
346                 err  = calc_errs_pps(&rec->total, &prev->total, t);
347                 printf(fm2_rx, "XDP-RX", "total", pps, drop);
348         }
349
350         /* cpumap enqueue stats */
351         for (to_cpu = 0; to_cpu < n_cpus; to_cpu++) {
352                 char *fmt = "%-15s %3d:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
353                 char *fm2 = "%-15s %3s:%-3d %'-14.0f %'-11.0f %'-10.2f %s\n";
354                 char *errstr = "";
355
356                 rec  =  &stats_rec->enq[to_cpu];
357                 prev = &stats_prev->enq[to_cpu];
358                 t = calc_period(rec, prev);
359                 for (i = 0; i < nr_cpus; i++) {
360                         struct datarec *r = &rec->cpu[i];
361                         struct datarec *p = &prev->cpu[i];
362
363                         pps  = calc_pps(r, p, t);
364                         drop = calc_drop_pps(r, p, t);
365                         err  = calc_errs_pps(r, p, t);
366                         if (err > 0) {
367                                 errstr = "bulk-average";
368                                 err = pps / err; /* calc average bulk size */
369                         }
370                         if (pps > 0)
371                                 printf(fmt, "cpumap-enqueue",
372                                        i, to_cpu, pps, drop, err, errstr);
373                 }
374                 pps = calc_pps(&rec->total, &prev->total, t);
375                 if (pps > 0) {
376                         drop = calc_drop_pps(&rec->total, &prev->total, t);
377                         err  = calc_errs_pps(&rec->total, &prev->total, t);
378                         if (err > 0) {
379                                 errstr = "bulk-average";
380                                 err = pps / err; /* calc average bulk size */
381                         }
382                         printf(fm2, "cpumap-enqueue",
383                                "sum", to_cpu, pps, drop, err, errstr);
384                 }
385         }
386
387         /* cpumap kthread stats */
388         {
389                 char *fmt_k = "%-15s %-7d %'-14.0f %'-11.0f %'-10.0f %s\n";
390                 char *fm2_k = "%-15s %-7s %'-14.0f %'-11.0f %'-10.0f %s\n";
391                 char *e_str = "";
392
393                 rec  = &stats_rec->kthread;
394                 prev = &stats_prev->kthread;
395                 t = calc_period(rec, prev);
396                 for (i = 0; i < nr_cpus; i++) {
397                         struct datarec *r = &rec->cpu[i];
398                         struct datarec *p = &prev->cpu[i];
399
400                         pps  = calc_pps(r, p, t);
401                         drop = calc_drop_pps(r, p, t);
402                         err  = calc_errs_pps(r, p, t);
403                         if (err > 0)
404                                 e_str = "sched";
405                         if (pps > 0)
406                                 printf(fmt_k, "cpumap_kthread",
407                                        i, pps, drop, err, e_str);
408                 }
409                 pps = calc_pps(&rec->total, &prev->total, t);
410                 drop = calc_drop_pps(&rec->total, &prev->total, t);
411                 err  = calc_errs_pps(&rec->total, &prev->total, t);
412                 if (err > 0)
413                         e_str = "sched-sum";
414                 printf(fm2_k, "cpumap_kthread", "total", pps, drop, err, e_str);
415         }
416
417         /* XDP redirect err tracepoints (very unlikely) */
418         {
419                 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
420                 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
421
422                 rec  = &stats_rec->redir_err;
423                 prev = &stats_prev->redir_err;
424                 t = calc_period(rec, prev);
425                 for (i = 0; i < nr_cpus; i++) {
426                         struct datarec *r = &rec->cpu[i];
427                         struct datarec *p = &prev->cpu[i];
428
429                         pps  = calc_pps(r, p, t);
430                         drop = calc_drop_pps(r, p, t);
431                         if (pps > 0)
432                                 printf(fmt_err, "redirect_err", i, pps, drop);
433                 }
434                 pps = calc_pps(&rec->total, &prev->total, t);
435                 drop = calc_drop_pps(&rec->total, &prev->total, t);
436                 printf(fm2_err, "redirect_err", "total", pps, drop);
437         }
438
439         /* XDP general exception tracepoints */
440         {
441                 char *fmt_err = "%-15s %-7d %'-14.0f %'-11.0f\n";
442                 char *fm2_err = "%-15s %-7s %'-14.0f %'-11.0f\n";
443
444                 rec  = &stats_rec->exception;
445                 prev = &stats_prev->exception;
446                 t = calc_period(rec, prev);
447                 for (i = 0; i < nr_cpus; i++) {
448                         struct datarec *r = &rec->cpu[i];
449                         struct datarec *p = &prev->cpu[i];
450
451                         pps  = calc_pps(r, p, t);
452                         drop = calc_drop_pps(r, p, t);
453                         if (pps > 0)
454                                 printf(fmt_err, "xdp_exception", i, pps, drop);
455                 }
456                 pps = calc_pps(&rec->total, &prev->total, t);
457                 drop = calc_drop_pps(&rec->total, &prev->total, t);
458                 printf(fm2_err, "xdp_exception", "total", pps, drop);
459         }
460
461         printf("\n");
462         fflush(stdout);
463 }
464
465 static void stats_collect(struct stats_record *rec)
466 {
467         int fd, i;
468
469         fd = rx_cnt_map_fd;
470         map_collect_percpu(fd, 0, &rec->rx_cnt);
471
472         fd = redirect_err_cnt_map_fd;
473         map_collect_percpu(fd, 1, &rec->redir_err);
474
475         fd = cpumap_enqueue_cnt_map_fd;
476         for (i = 0; i < n_cpus; i++)
477                 map_collect_percpu(fd, i, &rec->enq[i]);
478
479         fd = cpumap_kthread_cnt_map_fd;
480         map_collect_percpu(fd, 0, &rec->kthread);
481
482         fd = exception_cnt_map_fd;
483         map_collect_percpu(fd, 0, &rec->exception);
484 }
485
486
487 /* Pointer swap trick */
488 static inline void swap(struct stats_record **a, struct stats_record **b)
489 {
490         struct stats_record *tmp;
491
492         tmp = *a;
493         *a = *b;
494         *b = tmp;
495 }
496
497 static int create_cpu_entry(__u32 cpu, __u32 queue_size,
498                             __u32 avail_idx, bool new)
499 {
500         __u32 curr_cpus_count = 0;
501         __u32 key = 0;
502         int ret;
503
504         /* Add a CPU entry to cpumap, as this allocate a cpu entry in
505          * the kernel for the cpu.
506          */
507         ret = bpf_map_update_elem(cpu_map_fd, &cpu, &queue_size, 0);
508         if (ret) {
509                 fprintf(stderr, "Create CPU entry failed (err:%d)\n", ret);
510                 exit(EXIT_FAIL_BPF);
511         }
512
513         /* Inform bpf_prog's that a new CPU is available to select
514          * from via some control maps.
515          */
516         ret = bpf_map_update_elem(cpus_available_map_fd, &avail_idx, &cpu, 0);
517         if (ret) {
518                 fprintf(stderr, "Add to avail CPUs failed\n");
519                 exit(EXIT_FAIL_BPF);
520         }
521
522         /* When not replacing/updating existing entry, bump the count */
523         ret = bpf_map_lookup_elem(cpus_count_map_fd, &key, &curr_cpus_count);
524         if (ret) {
525                 fprintf(stderr, "Failed reading curr cpus_count\n");
526                 exit(EXIT_FAIL_BPF);
527         }
528         if (new) {
529                 curr_cpus_count++;
530                 ret = bpf_map_update_elem(cpus_count_map_fd, &key,
531                                           &curr_cpus_count, 0);
532                 if (ret) {
533                         fprintf(stderr, "Failed write curr cpus_count\n");
534                         exit(EXIT_FAIL_BPF);
535                 }
536         }
537         /* map_fd[7] = cpus_iterator */
538         printf("%s CPU:%u as idx:%u queue_size:%d (total cpus_count:%u)\n",
539                new ? "Add-new":"Replace", cpu, avail_idx,
540                queue_size, curr_cpus_count);
541
542         return 0;
543 }
544
545 /* CPUs are zero-indexed. Thus, add a special sentinel default value
546  * in map cpus_available to mark CPU index'es not configured
547  */
548 static void mark_cpus_unavailable(void)
549 {
550         __u32 invalid_cpu = n_cpus;
551         int ret, i;
552
553         for (i = 0; i < n_cpus; i++) {
554                 ret = bpf_map_update_elem(cpus_available_map_fd, &i,
555                                           &invalid_cpu, 0);
556                 if (ret) {
557                         fprintf(stderr, "Failed marking CPU unavailable\n");
558                         exit(EXIT_FAIL_BPF);
559                 }
560         }
561 }
562
563 /* Stress cpumap management code by concurrently changing underlying cpumap */
564 static void stress_cpumap(void)
565 {
566         /* Changing qsize will cause kernel to free and alloc a new
567          * bpf_cpu_map_entry, with an associated/complicated tear-down
568          * procedure.
569          */
570         create_cpu_entry(1,  1024, 0, false);
571         create_cpu_entry(1,     8, 0, false);
572         create_cpu_entry(1, 16000, 0, false);
573 }
574
575 static void stats_poll(int interval, bool use_separators, char *prog_name,
576                        bool stress_mode)
577 {
578         struct stats_record *record, *prev;
579
580         record = alloc_stats_record();
581         prev   = alloc_stats_record();
582         stats_collect(record);
583
584         /* Trick to pretty printf with thousands separators use %' */
585         if (use_separators)
586                 setlocale(LC_NUMERIC, "en_US");
587
588         while (1) {
589                 swap(&prev, &record);
590                 stats_collect(record);
591                 stats_print(record, prev, prog_name);
592                 sleep(interval);
593                 if (stress_mode)
594                         stress_cpumap();
595         }
596
597         free_stats_record(record);
598         free_stats_record(prev);
599 }
600
601 static struct bpf_link * attach_tp(struct bpf_object *obj,
602                                    const char *tp_category,
603                                    const char* tp_name)
604 {
605         struct bpf_program *prog;
606         struct bpf_link *link;
607         char sec_name[PATH_MAX];
608         int len;
609
610         len = snprintf(sec_name, PATH_MAX, "tracepoint/%s/%s",
611                        tp_category, tp_name);
612         if (len < 0)
613                 exit(EXIT_FAIL);
614
615         prog = bpf_object__find_program_by_title(obj, sec_name);
616         if (!prog) {
617                 fprintf(stderr, "ERR: finding progsec: %s\n", sec_name);
618                 exit(EXIT_FAIL_BPF);
619         }
620
621         link = bpf_program__attach_tracepoint(prog, tp_category, tp_name);
622         if (IS_ERR(link))
623                 exit(EXIT_FAIL_BPF);
624
625         return link;
626 }
627
628 static void init_tracepoints(struct bpf_object *obj) {
629         tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_err");
630         tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_redirect_map_err");
631         tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_exception");
632         tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_enqueue");
633         tp_links[tp_cnt++] = attach_tp(obj, "xdp", "xdp_cpumap_kthread");
634 }
635
636 static int init_map_fds(struct bpf_object *obj)
637 {
638         /* Maps updated by tracepoints */
639         redirect_err_cnt_map_fd =
640                 bpf_object__find_map_fd_by_name(obj, "redirect_err_cnt");
641         exception_cnt_map_fd =
642                 bpf_object__find_map_fd_by_name(obj, "exception_cnt");
643         cpumap_enqueue_cnt_map_fd =
644                 bpf_object__find_map_fd_by_name(obj, "cpumap_enqueue_cnt");
645         cpumap_kthread_cnt_map_fd =
646                 bpf_object__find_map_fd_by_name(obj, "cpumap_kthread_cnt");
647
648         /* Maps used by XDP */
649         rx_cnt_map_fd = bpf_object__find_map_fd_by_name(obj, "rx_cnt");
650         cpu_map_fd = bpf_object__find_map_fd_by_name(obj, "cpu_map");
651         cpus_available_map_fd =
652                 bpf_object__find_map_fd_by_name(obj, "cpus_available");
653         cpus_count_map_fd = bpf_object__find_map_fd_by_name(obj, "cpus_count");
654         cpus_iterator_map_fd =
655                 bpf_object__find_map_fd_by_name(obj, "cpus_iterator");
656
657         if (cpu_map_fd < 0 || rx_cnt_map_fd < 0 ||
658             redirect_err_cnt_map_fd < 0 || cpumap_enqueue_cnt_map_fd < 0 ||
659             cpumap_kthread_cnt_map_fd < 0 || cpus_available_map_fd < 0 ||
660             cpus_count_map_fd < 0 || cpus_iterator_map_fd < 0 ||
661             exception_cnt_map_fd < 0)
662                 return -ENOENT;
663
664         return 0;
665 }
666
667 int main(int argc, char **argv)
668 {
669         struct rlimit r = {10 * 1024 * 1024, RLIM_INFINITY};
670         char *prog_name = "xdp_cpu_map5_lb_hash_ip_pairs";
671         struct bpf_prog_load_attr prog_load_attr = {
672                 .prog_type      = BPF_PROG_TYPE_UNSPEC,
673         };
674         struct bpf_prog_info info = {};
675         __u32 info_len = sizeof(info);
676         bool use_separators = true;
677         bool stress_mode = false;
678         struct bpf_program *prog;
679         struct bpf_object *obj;
680         char filename[256];
681         int added_cpus = 0;
682         int longindex = 0;
683         int interval = 2;
684         int add_cpu = -1;
685         int opt, err;
686         int prog_fd;
687         __u32 qsize;
688
689         n_cpus = get_nprocs_conf();
690
691         /* Notice: choosing he queue size is very important with the
692          * ixgbe driver, because it's driver page recycling trick is
693          * dependend on pages being returned quickly.  The number of
694          * out-standing packets in the system must be less-than 2x
695          * RX-ring size.
696          */
697         qsize = 128+64;
698
699         snprintf(filename, sizeof(filename), "%s_kern.o", argv[0]);
700         prog_load_attr.file = filename;
701
702         if (setrlimit(RLIMIT_MEMLOCK, &r)) {
703                 perror("setrlimit(RLIMIT_MEMLOCK)");
704                 return 1;
705         }
706
707         if (bpf_prog_load_xattr(&prog_load_attr, &obj, &prog_fd))
708                 return EXIT_FAIL;
709
710         if (prog_fd < 0) {
711                 fprintf(stderr, "ERR: bpf_prog_load_xattr: %s\n",
712                         strerror(errno));
713                 return EXIT_FAIL;
714         }
715         init_tracepoints(obj);
716         if (init_map_fds(obj) < 0) {
717                 fprintf(stderr, "bpf_object__find_map_fd_by_name failed\n");
718                 return EXIT_FAIL;
719         }
720         mark_cpus_unavailable();
721
722         /* Parse commands line args */
723         while ((opt = getopt_long(argc, argv, "hSd:s:p:q:c:xzF",
724                                   long_options, &longindex)) != -1) {
725                 switch (opt) {
726                 case 'd':
727                         if (strlen(optarg) >= IF_NAMESIZE) {
728                                 fprintf(stderr, "ERR: --dev name too long\n");
729                                 goto error;
730                         }
731                         ifname = (char *)&ifname_buf;
732                         strncpy(ifname, optarg, IF_NAMESIZE);
733                         ifindex = if_nametoindex(ifname);
734                         if (ifindex == 0) {
735                                 fprintf(stderr,
736                                         "ERR: --dev name unknown err(%d):%s\n",
737                                         errno, strerror(errno));
738                                 goto error;
739                         }
740                         break;
741                 case 's':
742                         interval = atoi(optarg);
743                         break;
744                 case 'S':
745                         xdp_flags |= XDP_FLAGS_SKB_MODE;
746                         break;
747                 case 'x':
748                         stress_mode = true;
749                         break;
750                 case 'z':
751                         use_separators = false;
752                         break;
753                 case 'p':
754                         /* Selecting eBPF prog to load */
755                         prog_name = optarg;
756                         break;
757                 case 'c':
758                         /* Add multiple CPUs */
759                         add_cpu = strtoul(optarg, NULL, 0);
760                         if (add_cpu >= n_cpus) {
761                                 fprintf(stderr,
762                                 "--cpu nr too large for cpumap err(%d):%s\n",
763                                         errno, strerror(errno));
764                                 goto error;
765                         }
766                         create_cpu_entry(add_cpu, qsize, added_cpus, true);
767                         added_cpus++;
768                         break;
769                 case 'q':
770                         qsize = atoi(optarg);
771                         break;
772                 case 'F':
773                         xdp_flags &= ~XDP_FLAGS_UPDATE_IF_NOEXIST;
774                         break;
775                 case 'h':
776                 error:
777                 default:
778                         usage(argv, obj);
779                         return EXIT_FAIL_OPTION;
780                 }
781         }
782         /* Required option */
783         if (ifindex == -1) {
784                 fprintf(stderr, "ERR: required option --dev missing\n");
785                 usage(argv, obj);
786                 return EXIT_FAIL_OPTION;
787         }
788         /* Required option */
789         if (add_cpu == -1) {
790                 fprintf(stderr, "ERR: required option --cpu missing\n");
791                 fprintf(stderr, " Specify multiple --cpu option to add more\n");
792                 usage(argv, obj);
793                 return EXIT_FAIL_OPTION;
794         }
795
796         /* Remove XDP program when program is interrupted or killed */
797         signal(SIGINT, int_exit);
798         signal(SIGTERM, int_exit);
799
800         prog = bpf_object__find_program_by_title(obj, prog_name);
801         if (!prog) {
802                 fprintf(stderr, "bpf_object__find_program_by_title failed\n");
803                 return EXIT_FAIL;
804         }
805
806         prog_fd = bpf_program__fd(prog);
807         if (prog_fd < 0) {
808                 fprintf(stderr, "bpf_program__fd failed\n");
809                 return EXIT_FAIL;
810         }
811
812         if (bpf_set_link_xdp_fd(ifindex, prog_fd, xdp_flags) < 0) {
813                 fprintf(stderr, "link set xdp fd failed\n");
814                 return EXIT_FAIL_XDP;
815         }
816
817         err = bpf_obj_get_info_by_fd(prog_fd, &info, &info_len);
818         if (err) {
819                 printf("can't get prog info - %s\n", strerror(errno));
820                 return err;
821         }
822         prog_id = info.id;
823
824         stats_poll(interval, use_separators, prog_name, stress_mode);
825         return EXIT_OK;
826 }