arm64: dts: qcom: sm8550: add TRNG node
[linux-modified.git] / kernel / bpf / cpumap.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* bpf/cpumap.c
3  *
4  * Copyright (c) 2017 Jesper Dangaard Brouer, Red Hat Inc.
5  */
6
7 /**
8  * DOC: cpu map
9  * The 'cpumap' is primarily used as a backend map for XDP BPF helper
10  * call bpf_redirect_map() and XDP_REDIRECT action, like 'devmap'.
11  *
12  * Unlike devmap which redirects XDP frames out to another NIC device,
13  * this map type redirects raw XDP frames to another CPU.  The remote
14  * CPU will do SKB-allocation and call the normal network stack.
15  */
16 /*
17  * This is a scalability and isolation mechanism, that allow
18  * separating the early driver network XDP layer, from the rest of the
19  * netstack, and assigning dedicated CPUs for this stage.  This
20  * basically allows for 10G wirespeed pre-filtering via bpf.
21  */
22 #include <linux/bitops.h>
23 #include <linux/bpf.h>
24 #include <linux/filter.h>
25 #include <linux/ptr_ring.h>
26 #include <net/xdp.h>
27
28 #include <linux/sched.h>
29 #include <linux/workqueue.h>
30 #include <linux/kthread.h>
31 #include <linux/completion.h>
32 #include <trace/events/xdp.h>
33 #include <linux/btf_ids.h>
34
35 #include <linux/netdevice.h>   /* netif_receive_skb_list */
36 #include <linux/etherdevice.h> /* eth_type_trans */
37
38 /* General idea: XDP packets getting XDP redirected to another CPU,
39  * will maximum be stored/queued for one driver ->poll() call.  It is
40  * guaranteed that queueing the frame and the flush operation happen on
41  * same CPU.  Thus, cpu_map_flush operation can deduct via this_cpu_ptr()
42  * which queue in bpf_cpu_map_entry contains packets.
43  */
44
45 #define CPU_MAP_BULK_SIZE 8  /* 8 == one cacheline on 64-bit archs */
46 struct bpf_cpu_map_entry;
47 struct bpf_cpu_map;
48
49 struct xdp_bulk_queue {
50         void *q[CPU_MAP_BULK_SIZE];
51         struct list_head flush_node;
52         struct bpf_cpu_map_entry *obj;
53         unsigned int count;
54 };
55
56 /* Struct for every remote "destination" CPU in map */
57 struct bpf_cpu_map_entry {
58         u32 cpu;    /* kthread CPU and map index */
59         int map_id; /* Back reference to map */
60
61         /* XDP can run multiple RX-ring queues, need __percpu enqueue store */
62         struct xdp_bulk_queue __percpu *bulkq;
63
64         /* Queue with potential multi-producers, and single-consumer kthread */
65         struct ptr_ring *queue;
66         struct task_struct *kthread;
67
68         struct bpf_cpumap_val value;
69         struct bpf_prog *prog;
70
71         struct completion kthread_running;
72         struct rcu_work free_work;
73 };
74
75 struct bpf_cpu_map {
76         struct bpf_map map;
77         /* Below members specific for map type */
78         struct bpf_cpu_map_entry __rcu **cpu_map;
79 };
80
81 static DEFINE_PER_CPU(struct list_head, cpu_map_flush_list);
82
83 static struct bpf_map *cpu_map_alloc(union bpf_attr *attr)
84 {
85         u32 value_size = attr->value_size;
86         struct bpf_cpu_map *cmap;
87
88         /* check sanity of attributes */
89         if (attr->max_entries == 0 || attr->key_size != 4 ||
90             (value_size != offsetofend(struct bpf_cpumap_val, qsize) &&
91              value_size != offsetofend(struct bpf_cpumap_val, bpf_prog.fd)) ||
92             attr->map_flags & ~BPF_F_NUMA_NODE)
93                 return ERR_PTR(-EINVAL);
94
95         /* Pre-limit array size based on NR_CPUS, not final CPU check */
96         if (attr->max_entries > NR_CPUS)
97                 return ERR_PTR(-E2BIG);
98
99         cmap = bpf_map_area_alloc(sizeof(*cmap), NUMA_NO_NODE);
100         if (!cmap)
101                 return ERR_PTR(-ENOMEM);
102
103         bpf_map_init_from_attr(&cmap->map, attr);
104
105         /* Alloc array for possible remote "destination" CPUs */
106         cmap->cpu_map = bpf_map_area_alloc(cmap->map.max_entries *
107                                            sizeof(struct bpf_cpu_map_entry *),
108                                            cmap->map.numa_node);
109         if (!cmap->cpu_map) {
110                 bpf_map_area_free(cmap);
111                 return ERR_PTR(-ENOMEM);
112         }
113
114         return &cmap->map;
115 }
116
117 static void __cpu_map_ring_cleanup(struct ptr_ring *ring)
118 {
119         /* The tear-down procedure should have made sure that queue is
120          * empty.  See __cpu_map_entry_replace() and work-queue
121          * invoked cpu_map_kthread_stop(). Catch any broken behaviour
122          * gracefully and warn once.
123          */
124         void *ptr;
125
126         while ((ptr = ptr_ring_consume(ring))) {
127                 WARN_ON_ONCE(1);
128                 if (unlikely(__ptr_test_bit(0, &ptr))) {
129                         __ptr_clear_bit(0, &ptr);
130                         kfree_skb(ptr);
131                         continue;
132                 }
133                 xdp_return_frame(ptr);
134         }
135 }
136
137 static void cpu_map_bpf_prog_run_skb(struct bpf_cpu_map_entry *rcpu,
138                                      struct list_head *listp,
139                                      struct xdp_cpumap_stats *stats)
140 {
141         struct sk_buff *skb, *tmp;
142         struct xdp_buff xdp;
143         u32 act;
144         int err;
145
146         list_for_each_entry_safe(skb, tmp, listp, list) {
147                 act = bpf_prog_run_generic_xdp(skb, &xdp, rcpu->prog);
148                 switch (act) {
149                 case XDP_PASS:
150                         break;
151                 case XDP_REDIRECT:
152                         skb_list_del_init(skb);
153                         err = xdp_do_generic_redirect(skb->dev, skb, &xdp,
154                                                       rcpu->prog);
155                         if (unlikely(err)) {
156                                 kfree_skb(skb);
157                                 stats->drop++;
158                         } else {
159                                 stats->redirect++;
160                         }
161                         return;
162                 default:
163                         bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
164                         fallthrough;
165                 case XDP_ABORTED:
166                         trace_xdp_exception(skb->dev, rcpu->prog, act);
167                         fallthrough;
168                 case XDP_DROP:
169                         skb_list_del_init(skb);
170                         kfree_skb(skb);
171                         stats->drop++;
172                         return;
173                 }
174         }
175 }
176
177 static int cpu_map_bpf_prog_run_xdp(struct bpf_cpu_map_entry *rcpu,
178                                     void **frames, int n,
179                                     struct xdp_cpumap_stats *stats)
180 {
181         struct xdp_rxq_info rxq;
182         struct xdp_buff xdp;
183         int i, nframes = 0;
184
185         xdp_set_return_frame_no_direct();
186         xdp.rxq = &rxq;
187
188         for (i = 0; i < n; i++) {
189                 struct xdp_frame *xdpf = frames[i];
190                 u32 act;
191                 int err;
192
193                 rxq.dev = xdpf->dev_rx;
194                 rxq.mem = xdpf->mem;
195                 /* TODO: report queue_index to xdp_rxq_info */
196
197                 xdp_convert_frame_to_buff(xdpf, &xdp);
198
199                 act = bpf_prog_run_xdp(rcpu->prog, &xdp);
200                 switch (act) {
201                 case XDP_PASS:
202                         err = xdp_update_frame_from_buff(&xdp, xdpf);
203                         if (err < 0) {
204                                 xdp_return_frame(xdpf);
205                                 stats->drop++;
206                         } else {
207                                 frames[nframes++] = xdpf;
208                                 stats->pass++;
209                         }
210                         break;
211                 case XDP_REDIRECT:
212                         err = xdp_do_redirect(xdpf->dev_rx, &xdp,
213                                               rcpu->prog);
214                         if (unlikely(err)) {
215                                 xdp_return_frame(xdpf);
216                                 stats->drop++;
217                         } else {
218                                 stats->redirect++;
219                         }
220                         break;
221                 default:
222                         bpf_warn_invalid_xdp_action(NULL, rcpu->prog, act);
223                         fallthrough;
224                 case XDP_DROP:
225                         xdp_return_frame(xdpf);
226                         stats->drop++;
227                         break;
228                 }
229         }
230
231         xdp_clear_return_frame_no_direct();
232
233         return nframes;
234 }
235
236 #define CPUMAP_BATCH 8
237
238 static int cpu_map_bpf_prog_run(struct bpf_cpu_map_entry *rcpu, void **frames,
239                                 int xdp_n, struct xdp_cpumap_stats *stats,
240                                 struct list_head *list)
241 {
242         int nframes;
243
244         if (!rcpu->prog)
245                 return xdp_n;
246
247         rcu_read_lock_bh();
248
249         nframes = cpu_map_bpf_prog_run_xdp(rcpu, frames, xdp_n, stats);
250
251         if (stats->redirect)
252                 xdp_do_flush();
253
254         if (unlikely(!list_empty(list)))
255                 cpu_map_bpf_prog_run_skb(rcpu, list, stats);
256
257         rcu_read_unlock_bh(); /* resched point, may call do_softirq() */
258
259         return nframes;
260 }
261
262 static int cpu_map_kthread_run(void *data)
263 {
264         struct bpf_cpu_map_entry *rcpu = data;
265
266         complete(&rcpu->kthread_running);
267         set_current_state(TASK_INTERRUPTIBLE);
268
269         /* When kthread gives stop order, then rcpu have been disconnected
270          * from map, thus no new packets can enter. Remaining in-flight
271          * per CPU stored packets are flushed to this queue.  Wait honoring
272          * kthread_stop signal until queue is empty.
273          */
274         while (!kthread_should_stop() || !__ptr_ring_empty(rcpu->queue)) {
275                 struct xdp_cpumap_stats stats = {}; /* zero stats */
276                 unsigned int kmem_alloc_drops = 0, sched = 0;
277                 gfp_t gfp = __GFP_ZERO | GFP_ATOMIC;
278                 int i, n, m, nframes, xdp_n;
279                 void *frames[CPUMAP_BATCH];
280                 void *skbs[CPUMAP_BATCH];
281                 LIST_HEAD(list);
282
283                 /* Release CPU reschedule checks */
284                 if (__ptr_ring_empty(rcpu->queue)) {
285                         set_current_state(TASK_INTERRUPTIBLE);
286                         /* Recheck to avoid lost wake-up */
287                         if (__ptr_ring_empty(rcpu->queue)) {
288                                 schedule();
289                                 sched = 1;
290                         } else {
291                                 __set_current_state(TASK_RUNNING);
292                         }
293                 } else {
294                         sched = cond_resched();
295                 }
296
297                 /*
298                  * The bpf_cpu_map_entry is single consumer, with this
299                  * kthread CPU pinned. Lockless access to ptr_ring
300                  * consume side valid as no-resize allowed of queue.
301                  */
302                 n = __ptr_ring_consume_batched(rcpu->queue, frames,
303                                                CPUMAP_BATCH);
304                 for (i = 0, xdp_n = 0; i < n; i++) {
305                         void *f = frames[i];
306                         struct page *page;
307
308                         if (unlikely(__ptr_test_bit(0, &f))) {
309                                 struct sk_buff *skb = f;
310
311                                 __ptr_clear_bit(0, &skb);
312                                 list_add_tail(&skb->list, &list);
313                                 continue;
314                         }
315
316                         frames[xdp_n++] = f;
317                         page = virt_to_page(f);
318
319                         /* Bring struct page memory area to curr CPU. Read by
320                          * build_skb_around via page_is_pfmemalloc(), and when
321                          * freed written by page_frag_free call.
322                          */
323                         prefetchw(page);
324                 }
325
326                 /* Support running another XDP prog on this CPU */
327                 nframes = cpu_map_bpf_prog_run(rcpu, frames, xdp_n, &stats, &list);
328                 if (nframes) {
329                         m = kmem_cache_alloc_bulk(skbuff_cache, gfp, nframes, skbs);
330                         if (unlikely(m == 0)) {
331                                 for (i = 0; i < nframes; i++)
332                                         skbs[i] = NULL; /* effect: xdp_return_frame */
333                                 kmem_alloc_drops += nframes;
334                         }
335                 }
336
337                 local_bh_disable();
338                 for (i = 0; i < nframes; i++) {
339                         struct xdp_frame *xdpf = frames[i];
340                         struct sk_buff *skb = skbs[i];
341
342                         skb = __xdp_build_skb_from_frame(xdpf, skb,
343                                                          xdpf->dev_rx);
344                         if (!skb) {
345                                 xdp_return_frame(xdpf);
346                                 continue;
347                         }
348
349                         list_add_tail(&skb->list, &list);
350                 }
351                 netif_receive_skb_list(&list);
352
353                 /* Feedback loop via tracepoint */
354                 trace_xdp_cpumap_kthread(rcpu->map_id, n, kmem_alloc_drops,
355                                          sched, &stats);
356
357                 local_bh_enable(); /* resched point, may call do_softirq() */
358         }
359         __set_current_state(TASK_RUNNING);
360
361         return 0;
362 }
363
364 static int __cpu_map_load_bpf_program(struct bpf_cpu_map_entry *rcpu,
365                                       struct bpf_map *map, int fd)
366 {
367         struct bpf_prog *prog;
368
369         prog = bpf_prog_get_type(fd, BPF_PROG_TYPE_XDP);
370         if (IS_ERR(prog))
371                 return PTR_ERR(prog);
372
373         if (prog->expected_attach_type != BPF_XDP_CPUMAP ||
374             !bpf_prog_map_compatible(map, prog)) {
375                 bpf_prog_put(prog);
376                 return -EINVAL;
377         }
378
379         rcpu->value.bpf_prog.id = prog->aux->id;
380         rcpu->prog = prog;
381
382         return 0;
383 }
384
385 static struct bpf_cpu_map_entry *
386 __cpu_map_entry_alloc(struct bpf_map *map, struct bpf_cpumap_val *value,
387                       u32 cpu)
388 {
389         int numa, err, i, fd = value->bpf_prog.fd;
390         gfp_t gfp = GFP_KERNEL | __GFP_NOWARN;
391         struct bpf_cpu_map_entry *rcpu;
392         struct xdp_bulk_queue *bq;
393
394         /* Have map->numa_node, but choose node of redirect target CPU */
395         numa = cpu_to_node(cpu);
396
397         rcpu = bpf_map_kmalloc_node(map, sizeof(*rcpu), gfp | __GFP_ZERO, numa);
398         if (!rcpu)
399                 return NULL;
400
401         /* Alloc percpu bulkq */
402         rcpu->bulkq = bpf_map_alloc_percpu(map, sizeof(*rcpu->bulkq),
403                                            sizeof(void *), gfp);
404         if (!rcpu->bulkq)
405                 goto free_rcu;
406
407         for_each_possible_cpu(i) {
408                 bq = per_cpu_ptr(rcpu->bulkq, i);
409                 bq->obj = rcpu;
410         }
411
412         /* Alloc queue */
413         rcpu->queue = bpf_map_kmalloc_node(map, sizeof(*rcpu->queue), gfp,
414                                            numa);
415         if (!rcpu->queue)
416                 goto free_bulkq;
417
418         err = ptr_ring_init(rcpu->queue, value->qsize, gfp);
419         if (err)
420                 goto free_queue;
421
422         rcpu->cpu    = cpu;
423         rcpu->map_id = map->id;
424         rcpu->value.qsize  = value->qsize;
425
426         if (fd > 0 && __cpu_map_load_bpf_program(rcpu, map, fd))
427                 goto free_ptr_ring;
428
429         /* Setup kthread */
430         init_completion(&rcpu->kthread_running);
431         rcpu->kthread = kthread_create_on_node(cpu_map_kthread_run, rcpu, numa,
432                                                "cpumap/%d/map:%d", cpu,
433                                                map->id);
434         if (IS_ERR(rcpu->kthread))
435                 goto free_prog;
436
437         /* Make sure kthread runs on a single CPU */
438         kthread_bind(rcpu->kthread, cpu);
439         wake_up_process(rcpu->kthread);
440
441         /* Make sure kthread has been running, so kthread_stop() will not
442          * stop the kthread prematurely and all pending frames or skbs
443          * will be handled by the kthread before kthread_stop() returns.
444          */
445         wait_for_completion(&rcpu->kthread_running);
446
447         return rcpu;
448
449 free_prog:
450         if (rcpu->prog)
451                 bpf_prog_put(rcpu->prog);
452 free_ptr_ring:
453         ptr_ring_cleanup(rcpu->queue, NULL);
454 free_queue:
455         kfree(rcpu->queue);
456 free_bulkq:
457         free_percpu(rcpu->bulkq);
458 free_rcu:
459         kfree(rcpu);
460         return NULL;
461 }
462
463 static void __cpu_map_entry_free(struct work_struct *work)
464 {
465         struct bpf_cpu_map_entry *rcpu;
466
467         /* This cpu_map_entry have been disconnected from map and one
468          * RCU grace-period have elapsed. Thus, XDP cannot queue any
469          * new packets and cannot change/set flush_needed that can
470          * find this entry.
471          */
472         rcpu = container_of(to_rcu_work(work), struct bpf_cpu_map_entry, free_work);
473
474         /* kthread_stop will wake_up_process and wait for it to complete.
475          * cpu_map_kthread_run() makes sure the pointer ring is empty
476          * before exiting.
477          */
478         kthread_stop(rcpu->kthread);
479
480         if (rcpu->prog)
481                 bpf_prog_put(rcpu->prog);
482         /* The queue should be empty at this point */
483         __cpu_map_ring_cleanup(rcpu->queue);
484         ptr_ring_cleanup(rcpu->queue, NULL);
485         kfree(rcpu->queue);
486         free_percpu(rcpu->bulkq);
487         kfree(rcpu);
488 }
489
490 /* After the xchg of the bpf_cpu_map_entry pointer, we need to make sure the old
491  * entry is no longer in use before freeing. We use queue_rcu_work() to call
492  * __cpu_map_entry_free() in a separate workqueue after waiting for an RCU grace
493  * period. This means that (a) all pending enqueue and flush operations have
494  * completed (because of the RCU callback), and (b) we are in a workqueue
495  * context where we can stop the kthread and wait for it to exit before freeing
496  * everything.
497  */
498 static void __cpu_map_entry_replace(struct bpf_cpu_map *cmap,
499                                     u32 key_cpu, struct bpf_cpu_map_entry *rcpu)
500 {
501         struct bpf_cpu_map_entry *old_rcpu;
502
503         old_rcpu = unrcu_pointer(xchg(&cmap->cpu_map[key_cpu], RCU_INITIALIZER(rcpu)));
504         if (old_rcpu) {
505                 INIT_RCU_WORK(&old_rcpu->free_work, __cpu_map_entry_free);
506                 queue_rcu_work(system_wq, &old_rcpu->free_work);
507         }
508 }
509
510 static long cpu_map_delete_elem(struct bpf_map *map, void *key)
511 {
512         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
513         u32 key_cpu = *(u32 *)key;
514
515         if (key_cpu >= map->max_entries)
516                 return -EINVAL;
517
518         /* notice caller map_delete_elem() uses rcu_read_lock() */
519         __cpu_map_entry_replace(cmap, key_cpu, NULL);
520         return 0;
521 }
522
523 static long cpu_map_update_elem(struct bpf_map *map, void *key, void *value,
524                                 u64 map_flags)
525 {
526         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
527         struct bpf_cpumap_val cpumap_value = {};
528         struct bpf_cpu_map_entry *rcpu;
529         /* Array index key correspond to CPU number */
530         u32 key_cpu = *(u32 *)key;
531
532         memcpy(&cpumap_value, value, map->value_size);
533
534         if (unlikely(map_flags > BPF_EXIST))
535                 return -EINVAL;
536         if (unlikely(key_cpu >= cmap->map.max_entries))
537                 return -E2BIG;
538         if (unlikely(map_flags == BPF_NOEXIST))
539                 return -EEXIST;
540         if (unlikely(cpumap_value.qsize > 16384)) /* sanity limit on qsize */
541                 return -EOVERFLOW;
542
543         /* Make sure CPU is a valid possible cpu */
544         if (key_cpu >= nr_cpumask_bits || !cpu_possible(key_cpu))
545                 return -ENODEV;
546
547         if (cpumap_value.qsize == 0) {
548                 rcpu = NULL; /* Same as deleting */
549         } else {
550                 /* Updating qsize cause re-allocation of bpf_cpu_map_entry */
551                 rcpu = __cpu_map_entry_alloc(map, &cpumap_value, key_cpu);
552                 if (!rcpu)
553                         return -ENOMEM;
554         }
555         rcu_read_lock();
556         __cpu_map_entry_replace(cmap, key_cpu, rcpu);
557         rcu_read_unlock();
558         return 0;
559 }
560
561 static void cpu_map_free(struct bpf_map *map)
562 {
563         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
564         u32 i;
565
566         /* At this point bpf_prog->aux->refcnt == 0 and this map->refcnt == 0,
567          * so the bpf programs (can be more than one that used this map) were
568          * disconnected from events. Wait for outstanding critical sections in
569          * these programs to complete. synchronize_rcu() below not only
570          * guarantees no further "XDP/bpf-side" reads against
571          * bpf_cpu_map->cpu_map, but also ensure pending flush operations
572          * (if any) are completed.
573          */
574         synchronize_rcu();
575
576         /* The only possible user of bpf_cpu_map_entry is
577          * cpu_map_kthread_run().
578          */
579         for (i = 0; i < cmap->map.max_entries; i++) {
580                 struct bpf_cpu_map_entry *rcpu;
581
582                 rcpu = rcu_dereference_raw(cmap->cpu_map[i]);
583                 if (!rcpu)
584                         continue;
585
586                 /* Stop kthread and cleanup entry directly */
587                 __cpu_map_entry_free(&rcpu->free_work.work);
588         }
589         bpf_map_area_free(cmap->cpu_map);
590         bpf_map_area_free(cmap);
591 }
592
593 /* Elements are kept alive by RCU; either by rcu_read_lock() (from syscall) or
594  * by local_bh_disable() (from XDP calls inside NAPI). The
595  * rcu_read_lock_bh_held() below makes lockdep accept both.
596  */
597 static void *__cpu_map_lookup_elem(struct bpf_map *map, u32 key)
598 {
599         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
600         struct bpf_cpu_map_entry *rcpu;
601
602         if (key >= map->max_entries)
603                 return NULL;
604
605         rcpu = rcu_dereference_check(cmap->cpu_map[key],
606                                      rcu_read_lock_bh_held());
607         return rcpu;
608 }
609
610 static void *cpu_map_lookup_elem(struct bpf_map *map, void *key)
611 {
612         struct bpf_cpu_map_entry *rcpu =
613                 __cpu_map_lookup_elem(map, *(u32 *)key);
614
615         return rcpu ? &rcpu->value : NULL;
616 }
617
618 static int cpu_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
619 {
620         struct bpf_cpu_map *cmap = container_of(map, struct bpf_cpu_map, map);
621         u32 index = key ? *(u32 *)key : U32_MAX;
622         u32 *next = next_key;
623
624         if (index >= cmap->map.max_entries) {
625                 *next = 0;
626                 return 0;
627         }
628
629         if (index == cmap->map.max_entries - 1)
630                 return -ENOENT;
631         *next = index + 1;
632         return 0;
633 }
634
635 static long cpu_map_redirect(struct bpf_map *map, u64 index, u64 flags)
636 {
637         return __bpf_xdp_redirect_map(map, index, flags, 0,
638                                       __cpu_map_lookup_elem);
639 }
640
641 static u64 cpu_map_mem_usage(const struct bpf_map *map)
642 {
643         u64 usage = sizeof(struct bpf_cpu_map);
644
645         /* Currently the dynamically allocated elements are not counted */
646         usage += (u64)map->max_entries * sizeof(struct bpf_cpu_map_entry *);
647         return usage;
648 }
649
650 BTF_ID_LIST_SINGLE(cpu_map_btf_ids, struct, bpf_cpu_map)
651 const struct bpf_map_ops cpu_map_ops = {
652         .map_meta_equal         = bpf_map_meta_equal,
653         .map_alloc              = cpu_map_alloc,
654         .map_free               = cpu_map_free,
655         .map_delete_elem        = cpu_map_delete_elem,
656         .map_update_elem        = cpu_map_update_elem,
657         .map_lookup_elem        = cpu_map_lookup_elem,
658         .map_get_next_key       = cpu_map_get_next_key,
659         .map_check_btf          = map_check_no_btf,
660         .map_mem_usage          = cpu_map_mem_usage,
661         .map_btf_id             = &cpu_map_btf_ids[0],
662         .map_redirect           = cpu_map_redirect,
663 };
664
665 static void bq_flush_to_queue(struct xdp_bulk_queue *bq)
666 {
667         struct bpf_cpu_map_entry *rcpu = bq->obj;
668         unsigned int processed = 0, drops = 0;
669         const int to_cpu = rcpu->cpu;
670         struct ptr_ring *q;
671         int i;
672
673         if (unlikely(!bq->count))
674                 return;
675
676         q = rcpu->queue;
677         spin_lock(&q->producer_lock);
678
679         for (i = 0; i < bq->count; i++) {
680                 struct xdp_frame *xdpf = bq->q[i];
681                 int err;
682
683                 err = __ptr_ring_produce(q, xdpf);
684                 if (err) {
685                         drops++;
686                         xdp_return_frame_rx_napi(xdpf);
687                 }
688                 processed++;
689         }
690         bq->count = 0;
691         spin_unlock(&q->producer_lock);
692
693         __list_del_clearprev(&bq->flush_node);
694
695         /* Feedback loop via tracepoints */
696         trace_xdp_cpumap_enqueue(rcpu->map_id, processed, drops, to_cpu);
697 }
698
699 /* Runs under RCU-read-side, plus in softirq under NAPI protection.
700  * Thus, safe percpu variable access.
701  */
702 static void bq_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf)
703 {
704         struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
705         struct xdp_bulk_queue *bq = this_cpu_ptr(rcpu->bulkq);
706
707         if (unlikely(bq->count == CPU_MAP_BULK_SIZE))
708                 bq_flush_to_queue(bq);
709
710         /* Notice, xdp_buff/page MUST be queued here, long enough for
711          * driver to code invoking us to finished, due to driver
712          * (e.g. ixgbe) recycle tricks based on page-refcnt.
713          *
714          * Thus, incoming xdp_frame is always queued here (else we race
715          * with another CPU on page-refcnt and remaining driver code).
716          * Queue time is very short, as driver will invoke flush
717          * operation, when completing napi->poll call.
718          */
719         bq->q[bq->count++] = xdpf;
720
721         if (!bq->flush_node.prev)
722                 list_add(&bq->flush_node, flush_list);
723 }
724
725 int cpu_map_enqueue(struct bpf_cpu_map_entry *rcpu, struct xdp_frame *xdpf,
726                     struct net_device *dev_rx)
727 {
728         /* Info needed when constructing SKB on remote CPU */
729         xdpf->dev_rx = dev_rx;
730
731         bq_enqueue(rcpu, xdpf);
732         return 0;
733 }
734
735 int cpu_map_generic_redirect(struct bpf_cpu_map_entry *rcpu,
736                              struct sk_buff *skb)
737 {
738         int ret;
739
740         __skb_pull(skb, skb->mac_len);
741         skb_set_redirected(skb, false);
742         __ptr_set_bit(0, &skb);
743
744         ret = ptr_ring_produce(rcpu->queue, skb);
745         if (ret < 0)
746                 goto trace;
747
748         wake_up_process(rcpu->kthread);
749 trace:
750         trace_xdp_cpumap_enqueue(rcpu->map_id, !ret, !!ret, rcpu->cpu);
751         return ret;
752 }
753
754 void __cpu_map_flush(void)
755 {
756         struct list_head *flush_list = this_cpu_ptr(&cpu_map_flush_list);
757         struct xdp_bulk_queue *bq, *tmp;
758
759         list_for_each_entry_safe(bq, tmp, flush_list, flush_node) {
760                 bq_flush_to_queue(bq);
761
762                 /* If already running, costs spin_lock_irqsave + smb_mb */
763                 wake_up_process(bq->obj->kthread);
764         }
765 }
766
767 #ifdef CONFIG_DEBUG_NET
768 bool cpu_map_check_flush(void)
769 {
770         if (list_empty(this_cpu_ptr(&cpu_map_flush_list)))
771                 return false;
772         __cpu_map_flush();
773         return true;
774 }
775 #endif
776
777 static int __init cpu_map_init(void)
778 {
779         int cpu;
780
781         for_each_possible_cpu(cpu)
782                 INIT_LIST_HEAD(&per_cpu(cpu_map_flush_list, cpu));
783         return 0;
784 }
785
786 subsys_initcall(cpu_map_init);