GNU Linux-libre 5.15.137-gnu
[releases.git] / kernel / bpf / hashtab.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /* Copyright (c) 2011-2014 PLUMgrid, http://plumgrid.com
3  * Copyright (c) 2016 Facebook
4  */
5 #include <linux/bpf.h>
6 #include <linux/btf.h>
7 #include <linux/jhash.h>
8 #include <linux/filter.h>
9 #include <linux/rculist_nulls.h>
10 #include <linux/random.h>
11 #include <uapi/linux/btf.h>
12 #include <linux/rcupdate_trace.h>
13 #include "percpu_freelist.h"
14 #include "bpf_lru_list.h"
15 #include "map_in_map.h"
16
17 #define HTAB_CREATE_FLAG_MASK                                           \
18         (BPF_F_NO_PREALLOC | BPF_F_NO_COMMON_LRU | BPF_F_NUMA_NODE |    \
19          BPF_F_ACCESS_MASK | BPF_F_ZERO_SEED)
20
21 #define BATCH_OPS(_name)                        \
22         .map_lookup_batch =                     \
23         _name##_map_lookup_batch,               \
24         .map_lookup_and_delete_batch =          \
25         _name##_map_lookup_and_delete_batch,    \
26         .map_update_batch =                     \
27         generic_map_update_batch,               \
28         .map_delete_batch =                     \
29         generic_map_delete_batch
30
31 /*
32  * The bucket lock has two protection scopes:
33  *
34  * 1) Serializing concurrent operations from BPF programs on different
35  *    CPUs
36  *
37  * 2) Serializing concurrent operations from BPF programs and sys_bpf()
38  *
39  * BPF programs can execute in any context including perf, kprobes and
40  * tracing. As there are almost no limits where perf, kprobes and tracing
41  * can be invoked from the lock operations need to be protected against
42  * deadlocks. Deadlocks can be caused by recursion and by an invocation in
43  * the lock held section when functions which acquire this lock are invoked
44  * from sys_bpf(). BPF recursion is prevented by incrementing the per CPU
45  * variable bpf_prog_active, which prevents BPF programs attached to perf
46  * events, kprobes and tracing to be invoked before the prior invocation
47  * from one of these contexts completed. sys_bpf() uses the same mechanism
48  * by pinning the task to the current CPU and incrementing the recursion
49  * protection across the map operation.
50  *
51  * This has subtle implications on PREEMPT_RT. PREEMPT_RT forbids certain
52  * operations like memory allocations (even with GFP_ATOMIC) from atomic
53  * contexts. This is required because even with GFP_ATOMIC the memory
54  * allocator calls into code paths which acquire locks with long held lock
55  * sections. To ensure the deterministic behaviour these locks are regular
56  * spinlocks, which are converted to 'sleepable' spinlocks on RT. The only
57  * true atomic contexts on an RT kernel are the low level hardware
58  * handling, scheduling, low level interrupt handling, NMIs etc. None of
59  * these contexts should ever do memory allocations.
60  *
61  * As regular device interrupt handlers and soft interrupts are forced into
62  * thread context, the existing code which does
63  *   spin_lock*(); alloc(GPF_ATOMIC); spin_unlock*();
64  * just works.
65  *
66  * In theory the BPF locks could be converted to regular spinlocks as well,
67  * but the bucket locks and percpu_freelist locks can be taken from
68  * arbitrary contexts (perf, kprobes, tracepoints) which are required to be
69  * atomic contexts even on RT. These mechanisms require preallocated maps,
70  * so there is no need to invoke memory allocations within the lock held
71  * sections.
72  *
73  * BPF maps which need dynamic allocation are only used from (forced)
74  * thread context on RT and can therefore use regular spinlocks which in
75  * turn allows to invoke memory allocations from the lock held section.
76  *
77  * On a non RT kernel this distinction is neither possible nor required.
78  * spinlock maps to raw_spinlock and the extra code is optimized out by the
79  * compiler.
80  */
81 struct bucket {
82         struct hlist_nulls_head head;
83         union {
84                 raw_spinlock_t raw_lock;
85                 spinlock_t     lock;
86         };
87 };
88
89 #define HASHTAB_MAP_LOCK_COUNT 8
90 #define HASHTAB_MAP_LOCK_MASK (HASHTAB_MAP_LOCK_COUNT - 1)
91
92 struct bpf_htab {
93         struct bpf_map map;
94         struct bucket *buckets;
95         void *elems;
96         union {
97                 struct pcpu_freelist freelist;
98                 struct bpf_lru lru;
99         };
100         struct htab_elem *__percpu *extra_elems;
101         atomic_t count; /* number of elements in this hashtable */
102         u32 n_buckets;  /* number of hash buckets */
103         u32 elem_size;  /* size of each element in bytes */
104         u32 hashrnd;
105         struct lock_class_key lockdep_key;
106         int __percpu *map_locked[HASHTAB_MAP_LOCK_COUNT];
107 };
108
109 /* each htab element is struct htab_elem + key + value */
110 struct htab_elem {
111         union {
112                 struct hlist_nulls_node hash_node;
113                 struct {
114                         void *padding;
115                         union {
116                                 struct bpf_htab *htab;
117                                 struct pcpu_freelist_node fnode;
118                                 struct htab_elem *batch_flink;
119                         };
120                 };
121         };
122         union {
123                 struct rcu_head rcu;
124                 struct bpf_lru_node lru_node;
125         };
126         u32 hash;
127         char key[] __aligned(8);
128 };
129
130 static inline bool htab_is_prealloc(const struct bpf_htab *htab)
131 {
132         return !(htab->map.map_flags & BPF_F_NO_PREALLOC);
133 }
134
135 static inline bool htab_use_raw_lock(const struct bpf_htab *htab)
136 {
137         return (!IS_ENABLED(CONFIG_PREEMPT_RT) || htab_is_prealloc(htab));
138 }
139
140 static void htab_init_buckets(struct bpf_htab *htab)
141 {
142         unsigned i;
143
144         for (i = 0; i < htab->n_buckets; i++) {
145                 INIT_HLIST_NULLS_HEAD(&htab->buckets[i].head, i);
146                 if (htab_use_raw_lock(htab)) {
147                         raw_spin_lock_init(&htab->buckets[i].raw_lock);
148                         lockdep_set_class(&htab->buckets[i].raw_lock,
149                                           &htab->lockdep_key);
150                 } else {
151                         spin_lock_init(&htab->buckets[i].lock);
152                         lockdep_set_class(&htab->buckets[i].lock,
153                                           &htab->lockdep_key);
154                 }
155                 cond_resched();
156         }
157 }
158
159 static inline int htab_lock_bucket(const struct bpf_htab *htab,
160                                    struct bucket *b, u32 hash,
161                                    unsigned long *pflags)
162 {
163         unsigned long flags;
164         bool use_raw_lock;
165
166         hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
167
168         use_raw_lock = htab_use_raw_lock(htab);
169         if (use_raw_lock)
170                 preempt_disable();
171         else
172                 migrate_disable();
173         if (unlikely(__this_cpu_inc_return(*(htab->map_locked[hash])) != 1)) {
174                 __this_cpu_dec(*(htab->map_locked[hash]));
175                 if (use_raw_lock)
176                         preempt_enable();
177                 else
178                         migrate_enable();
179                 return -EBUSY;
180         }
181
182         if (use_raw_lock)
183                 raw_spin_lock_irqsave(&b->raw_lock, flags);
184         else
185                 spin_lock_irqsave(&b->lock, flags);
186         *pflags = flags;
187
188         return 0;
189 }
190
191 static inline void htab_unlock_bucket(const struct bpf_htab *htab,
192                                       struct bucket *b, u32 hash,
193                                       unsigned long flags)
194 {
195         bool use_raw_lock = htab_use_raw_lock(htab);
196
197         hash = hash & min_t(u32, HASHTAB_MAP_LOCK_MASK, htab->n_buckets - 1);
198         if (use_raw_lock)
199                 raw_spin_unlock_irqrestore(&b->raw_lock, flags);
200         else
201                 spin_unlock_irqrestore(&b->lock, flags);
202         __this_cpu_dec(*(htab->map_locked[hash]));
203         if (use_raw_lock)
204                 preempt_enable();
205         else
206                 migrate_enable();
207 }
208
209 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node);
210
211 static bool htab_is_lru(const struct bpf_htab *htab)
212 {
213         return htab->map.map_type == BPF_MAP_TYPE_LRU_HASH ||
214                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
215 }
216
217 static bool htab_is_percpu(const struct bpf_htab *htab)
218 {
219         return htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH ||
220                 htab->map.map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH;
221 }
222
223 static inline void htab_elem_set_ptr(struct htab_elem *l, u32 key_size,
224                                      void __percpu *pptr)
225 {
226         *(void __percpu **)(l->key + key_size) = pptr;
227 }
228
229 static inline void __percpu *htab_elem_get_ptr(struct htab_elem *l, u32 key_size)
230 {
231         return *(void __percpu **)(l->key + key_size);
232 }
233
234 static void *fd_htab_map_get_ptr(const struct bpf_map *map, struct htab_elem *l)
235 {
236         return *(void **)(l->key + roundup(map->key_size, 8));
237 }
238
239 static struct htab_elem *get_htab_elem(struct bpf_htab *htab, int i)
240 {
241         return (struct htab_elem *) (htab->elems + i * (u64)htab->elem_size);
242 }
243
244 static bool htab_has_extra_elems(struct bpf_htab *htab)
245 {
246         return !htab_is_percpu(htab) && !htab_is_lru(htab);
247 }
248
249 static void htab_free_prealloced_timers(struct bpf_htab *htab)
250 {
251         u32 num_entries = htab->map.max_entries;
252         int i;
253
254         if (likely(!map_value_has_timer(&htab->map)))
255                 return;
256         if (htab_has_extra_elems(htab))
257                 num_entries += num_possible_cpus();
258
259         for (i = 0; i < num_entries; i++) {
260                 struct htab_elem *elem;
261
262                 elem = get_htab_elem(htab, i);
263                 bpf_timer_cancel_and_free(elem->key +
264                                           round_up(htab->map.key_size, 8) +
265                                           htab->map.timer_off);
266                 cond_resched();
267         }
268 }
269
270 static void htab_free_elems(struct bpf_htab *htab)
271 {
272         int i;
273
274         if (!htab_is_percpu(htab))
275                 goto free_elems;
276
277         for (i = 0; i < htab->map.max_entries; i++) {
278                 void __percpu *pptr;
279
280                 pptr = htab_elem_get_ptr(get_htab_elem(htab, i),
281                                          htab->map.key_size);
282                 free_percpu(pptr);
283                 cond_resched();
284         }
285 free_elems:
286         bpf_map_area_free(htab->elems);
287 }
288
289 /* The LRU list has a lock (lru_lock). Each htab bucket has a lock
290  * (bucket_lock). If both locks need to be acquired together, the lock
291  * order is always lru_lock -> bucket_lock and this only happens in
292  * bpf_lru_list.c logic. For example, certain code path of
293  * bpf_lru_pop_free(), which is called by function prealloc_lru_pop(),
294  * will acquire lru_lock first followed by acquiring bucket_lock.
295  *
296  * In hashtab.c, to avoid deadlock, lock acquisition of
297  * bucket_lock followed by lru_lock is not allowed. In such cases,
298  * bucket_lock needs to be released first before acquiring lru_lock.
299  */
300 static struct htab_elem *prealloc_lru_pop(struct bpf_htab *htab, void *key,
301                                           u32 hash)
302 {
303         struct bpf_lru_node *node = bpf_lru_pop_free(&htab->lru, hash);
304         struct htab_elem *l;
305
306         if (node) {
307                 l = container_of(node, struct htab_elem, lru_node);
308                 memcpy(l->key, key, htab->map.key_size);
309                 return l;
310         }
311
312         return NULL;
313 }
314
315 static int prealloc_init(struct bpf_htab *htab)
316 {
317         u32 num_entries = htab->map.max_entries;
318         int err = -ENOMEM, i;
319
320         if (htab_has_extra_elems(htab))
321                 num_entries += num_possible_cpus();
322
323         htab->elems = bpf_map_area_alloc((u64)htab->elem_size * num_entries,
324                                          htab->map.numa_node);
325         if (!htab->elems)
326                 return -ENOMEM;
327
328         if (!htab_is_percpu(htab))
329                 goto skip_percpu_elems;
330
331         for (i = 0; i < num_entries; i++) {
332                 u32 size = round_up(htab->map.value_size, 8);
333                 void __percpu *pptr;
334
335                 pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
336                                             GFP_USER | __GFP_NOWARN);
337                 if (!pptr)
338                         goto free_elems;
339                 htab_elem_set_ptr(get_htab_elem(htab, i), htab->map.key_size,
340                                   pptr);
341                 cond_resched();
342         }
343
344 skip_percpu_elems:
345         if (htab_is_lru(htab))
346                 err = bpf_lru_init(&htab->lru,
347                                    htab->map.map_flags & BPF_F_NO_COMMON_LRU,
348                                    offsetof(struct htab_elem, hash) -
349                                    offsetof(struct htab_elem, lru_node),
350                                    htab_lru_map_delete_node,
351                                    htab);
352         else
353                 err = pcpu_freelist_init(&htab->freelist);
354
355         if (err)
356                 goto free_elems;
357
358         if (htab_is_lru(htab))
359                 bpf_lru_populate(&htab->lru, htab->elems,
360                                  offsetof(struct htab_elem, lru_node),
361                                  htab->elem_size, num_entries);
362         else
363                 pcpu_freelist_populate(&htab->freelist,
364                                        htab->elems + offsetof(struct htab_elem, fnode),
365                                        htab->elem_size, num_entries);
366
367         return 0;
368
369 free_elems:
370         htab_free_elems(htab);
371         return err;
372 }
373
374 static void prealloc_destroy(struct bpf_htab *htab)
375 {
376         htab_free_elems(htab);
377
378         if (htab_is_lru(htab))
379                 bpf_lru_destroy(&htab->lru);
380         else
381                 pcpu_freelist_destroy(&htab->freelist);
382 }
383
384 static int alloc_extra_elems(struct bpf_htab *htab)
385 {
386         struct htab_elem *__percpu *pptr, *l_new;
387         struct pcpu_freelist_node *l;
388         int cpu;
389
390         pptr = bpf_map_alloc_percpu(&htab->map, sizeof(struct htab_elem *), 8,
391                                     GFP_USER | __GFP_NOWARN);
392         if (!pptr)
393                 return -ENOMEM;
394
395         for_each_possible_cpu(cpu) {
396                 l = pcpu_freelist_pop(&htab->freelist);
397                 /* pop will succeed, since prealloc_init()
398                  * preallocated extra num_possible_cpus elements
399                  */
400                 l_new = container_of(l, struct htab_elem, fnode);
401                 *per_cpu_ptr(pptr, cpu) = l_new;
402         }
403         htab->extra_elems = pptr;
404         return 0;
405 }
406
407 /* Called from syscall */
408 static int htab_map_alloc_check(union bpf_attr *attr)
409 {
410         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
411                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
412         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
413                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
414         /* percpu_lru means each cpu has its own LRU list.
415          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
416          * the map's value itself is percpu.  percpu_lru has
417          * nothing to do with the map's value.
418          */
419         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
420         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
421         bool zero_seed = (attr->map_flags & BPF_F_ZERO_SEED);
422         int numa_node = bpf_map_attr_numa_node(attr);
423
424         BUILD_BUG_ON(offsetof(struct htab_elem, htab) !=
425                      offsetof(struct htab_elem, hash_node.pprev));
426         BUILD_BUG_ON(offsetof(struct htab_elem, fnode.next) !=
427                      offsetof(struct htab_elem, hash_node.pprev));
428
429         if (lru && !bpf_capable())
430                 /* LRU implementation is much complicated than other
431                  * maps.  Hence, limit to CAP_BPF.
432                  */
433                 return -EPERM;
434
435         if (zero_seed && !capable(CAP_SYS_ADMIN))
436                 /* Guard against local DoS, and discourage production use. */
437                 return -EPERM;
438
439         if (attr->map_flags & ~HTAB_CREATE_FLAG_MASK ||
440             !bpf_map_flags_access_ok(attr->map_flags))
441                 return -EINVAL;
442
443         if (!lru && percpu_lru)
444                 return -EINVAL;
445
446         if (lru && !prealloc)
447                 return -ENOTSUPP;
448
449         if (numa_node != NUMA_NO_NODE && (percpu || percpu_lru))
450                 return -EINVAL;
451
452         /* check sanity of attributes.
453          * value_size == 0 may be allowed in the future to use map as a set
454          */
455         if (attr->max_entries == 0 || attr->key_size == 0 ||
456             attr->value_size == 0)
457                 return -EINVAL;
458
459         if ((u64)attr->key_size + attr->value_size >= KMALLOC_MAX_SIZE -
460            sizeof(struct htab_elem))
461                 /* if key_size + value_size is bigger, the user space won't be
462                  * able to access the elements via bpf syscall. This check
463                  * also makes sure that the elem_size doesn't overflow and it's
464                  * kmalloc-able later in htab_map_update_elem()
465                  */
466                 return -E2BIG;
467
468         return 0;
469 }
470
471 static struct bpf_map *htab_map_alloc(union bpf_attr *attr)
472 {
473         bool percpu = (attr->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
474                        attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
475         bool lru = (attr->map_type == BPF_MAP_TYPE_LRU_HASH ||
476                     attr->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH);
477         /* percpu_lru means each cpu has its own LRU list.
478          * it is different from BPF_MAP_TYPE_PERCPU_HASH where
479          * the map's value itself is percpu.  percpu_lru has
480          * nothing to do with the map's value.
481          */
482         bool percpu_lru = (attr->map_flags & BPF_F_NO_COMMON_LRU);
483         bool prealloc = !(attr->map_flags & BPF_F_NO_PREALLOC);
484         struct bpf_htab *htab;
485         int err, i;
486
487         htab = kzalloc(sizeof(*htab), GFP_USER | __GFP_ACCOUNT);
488         if (!htab)
489                 return ERR_PTR(-ENOMEM);
490
491         lockdep_register_key(&htab->lockdep_key);
492
493         bpf_map_init_from_attr(&htab->map, attr);
494
495         if (percpu_lru) {
496                 /* ensure each CPU's lru list has >=1 elements.
497                  * since we are at it, make each lru list has the same
498                  * number of elements.
499                  */
500                 htab->map.max_entries = roundup(attr->max_entries,
501                                                 num_possible_cpus());
502                 if (htab->map.max_entries < attr->max_entries)
503                         htab->map.max_entries = rounddown(attr->max_entries,
504                                                           num_possible_cpus());
505         }
506
507         /* hash table size must be power of 2 */
508         htab->n_buckets = roundup_pow_of_two(htab->map.max_entries);
509
510         htab->elem_size = sizeof(struct htab_elem) +
511                           round_up(htab->map.key_size, 8);
512         if (percpu)
513                 htab->elem_size += sizeof(void *);
514         else
515                 htab->elem_size += round_up(htab->map.value_size, 8);
516
517         err = -E2BIG;
518         /* prevent zero size kmalloc and check for u32 overflow */
519         if (htab->n_buckets == 0 ||
520             htab->n_buckets > U32_MAX / sizeof(struct bucket))
521                 goto free_htab;
522
523         err = -ENOMEM;
524         htab->buckets = bpf_map_area_alloc(htab->n_buckets *
525                                            sizeof(struct bucket),
526                                            htab->map.numa_node);
527         if (!htab->buckets)
528                 goto free_htab;
529
530         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++) {
531                 htab->map_locked[i] = bpf_map_alloc_percpu(&htab->map,
532                                                            sizeof(int),
533                                                            sizeof(int),
534                                                            GFP_USER);
535                 if (!htab->map_locked[i])
536                         goto free_map_locked;
537         }
538
539         if (htab->map.map_flags & BPF_F_ZERO_SEED)
540                 htab->hashrnd = 0;
541         else
542                 htab->hashrnd = get_random_int();
543
544         htab_init_buckets(htab);
545
546         if (prealloc) {
547                 err = prealloc_init(htab);
548                 if (err)
549                         goto free_map_locked;
550
551                 if (!percpu && !lru) {
552                         /* lru itself can remove the least used element, so
553                          * there is no need for an extra elem during map_update.
554                          */
555                         err = alloc_extra_elems(htab);
556                         if (err)
557                                 goto free_prealloc;
558                 }
559         }
560
561         return &htab->map;
562
563 free_prealloc:
564         prealloc_destroy(htab);
565 free_map_locked:
566         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
567                 free_percpu(htab->map_locked[i]);
568         bpf_map_area_free(htab->buckets);
569 free_htab:
570         lockdep_unregister_key(&htab->lockdep_key);
571         kfree(htab);
572         return ERR_PTR(err);
573 }
574
575 static inline u32 htab_map_hash(const void *key, u32 key_len, u32 hashrnd)
576 {
577         return jhash(key, key_len, hashrnd);
578 }
579
580 static inline struct bucket *__select_bucket(struct bpf_htab *htab, u32 hash)
581 {
582         return &htab->buckets[hash & (htab->n_buckets - 1)];
583 }
584
585 static inline struct hlist_nulls_head *select_bucket(struct bpf_htab *htab, u32 hash)
586 {
587         return &__select_bucket(htab, hash)->head;
588 }
589
590 /* this lookup function can only be called with bucket lock taken */
591 static struct htab_elem *lookup_elem_raw(struct hlist_nulls_head *head, u32 hash,
592                                          void *key, u32 key_size)
593 {
594         struct hlist_nulls_node *n;
595         struct htab_elem *l;
596
597         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
598                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
599                         return l;
600
601         return NULL;
602 }
603
604 /* can be called without bucket lock. it will repeat the loop in
605  * the unlikely event when elements moved from one bucket into another
606  * while link list is being walked
607  */
608 static struct htab_elem *lookup_nulls_elem_raw(struct hlist_nulls_head *head,
609                                                u32 hash, void *key,
610                                                u32 key_size, u32 n_buckets)
611 {
612         struct hlist_nulls_node *n;
613         struct htab_elem *l;
614
615 again:
616         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
617                 if (l->hash == hash && !memcmp(&l->key, key, key_size))
618                         return l;
619
620         if (unlikely(get_nulls_value(n) != (hash & (n_buckets - 1))))
621                 goto again;
622
623         return NULL;
624 }
625
626 /* Called from syscall or from eBPF program directly, so
627  * arguments have to match bpf_map_lookup_elem() exactly.
628  * The return value is adjusted by BPF instructions
629  * in htab_map_gen_lookup().
630  */
631 static void *__htab_map_lookup_elem(struct bpf_map *map, void *key)
632 {
633         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
634         struct hlist_nulls_head *head;
635         struct htab_elem *l;
636         u32 hash, key_size;
637
638         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
639                      !rcu_read_lock_bh_held());
640
641         key_size = map->key_size;
642
643         hash = htab_map_hash(key, key_size, htab->hashrnd);
644
645         head = select_bucket(htab, hash);
646
647         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
648
649         return l;
650 }
651
652 static void *htab_map_lookup_elem(struct bpf_map *map, void *key)
653 {
654         struct htab_elem *l = __htab_map_lookup_elem(map, key);
655
656         if (l)
657                 return l->key + round_up(map->key_size, 8);
658
659         return NULL;
660 }
661
662 /* inline bpf_map_lookup_elem() call.
663  * Instead of:
664  * bpf_prog
665  *   bpf_map_lookup_elem
666  *     map->ops->map_lookup_elem
667  *       htab_map_lookup_elem
668  *         __htab_map_lookup_elem
669  * do:
670  * bpf_prog
671  *   __htab_map_lookup_elem
672  */
673 static int htab_map_gen_lookup(struct bpf_map *map, struct bpf_insn *insn_buf)
674 {
675         struct bpf_insn *insn = insn_buf;
676         const int ret = BPF_REG_0;
677
678         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
679                      (void *(*)(struct bpf_map *map, void *key))NULL));
680         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
681         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 1);
682         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
683                                 offsetof(struct htab_elem, key) +
684                                 round_up(map->key_size, 8));
685         return insn - insn_buf;
686 }
687
688 static __always_inline void *__htab_lru_map_lookup_elem(struct bpf_map *map,
689                                                         void *key, const bool mark)
690 {
691         struct htab_elem *l = __htab_map_lookup_elem(map, key);
692
693         if (l) {
694                 if (mark)
695                         bpf_lru_node_set_ref(&l->lru_node);
696                 return l->key + round_up(map->key_size, 8);
697         }
698
699         return NULL;
700 }
701
702 static void *htab_lru_map_lookup_elem(struct bpf_map *map, void *key)
703 {
704         return __htab_lru_map_lookup_elem(map, key, true);
705 }
706
707 static void *htab_lru_map_lookup_elem_sys(struct bpf_map *map, void *key)
708 {
709         return __htab_lru_map_lookup_elem(map, key, false);
710 }
711
712 static int htab_lru_map_gen_lookup(struct bpf_map *map,
713                                    struct bpf_insn *insn_buf)
714 {
715         struct bpf_insn *insn = insn_buf;
716         const int ret = BPF_REG_0;
717         const int ref_reg = BPF_REG_1;
718
719         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
720                      (void *(*)(struct bpf_map *map, void *key))NULL));
721         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
722         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 4);
723         *insn++ = BPF_LDX_MEM(BPF_B, ref_reg, ret,
724                               offsetof(struct htab_elem, lru_node) +
725                               offsetof(struct bpf_lru_node, ref));
726         *insn++ = BPF_JMP_IMM(BPF_JNE, ref_reg, 0, 1);
727         *insn++ = BPF_ST_MEM(BPF_B, ret,
728                              offsetof(struct htab_elem, lru_node) +
729                              offsetof(struct bpf_lru_node, ref),
730                              1);
731         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
732                                 offsetof(struct htab_elem, key) +
733                                 round_up(map->key_size, 8));
734         return insn - insn_buf;
735 }
736
737 static void check_and_free_timer(struct bpf_htab *htab, struct htab_elem *elem)
738 {
739         if (unlikely(map_value_has_timer(&htab->map)))
740                 bpf_timer_cancel_and_free(elem->key +
741                                           round_up(htab->map.key_size, 8) +
742                                           htab->map.timer_off);
743 }
744
745 /* It is called from the bpf_lru_list when the LRU needs to delete
746  * older elements from the htab.
747  */
748 static bool htab_lru_map_delete_node(void *arg, struct bpf_lru_node *node)
749 {
750         struct bpf_htab *htab = (struct bpf_htab *)arg;
751         struct htab_elem *l = NULL, *tgt_l;
752         struct hlist_nulls_head *head;
753         struct hlist_nulls_node *n;
754         unsigned long flags;
755         struct bucket *b;
756         int ret;
757
758         tgt_l = container_of(node, struct htab_elem, lru_node);
759         b = __select_bucket(htab, tgt_l->hash);
760         head = &b->head;
761
762         ret = htab_lock_bucket(htab, b, tgt_l->hash, &flags);
763         if (ret)
764                 return false;
765
766         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
767                 if (l == tgt_l) {
768                         hlist_nulls_del_rcu(&l->hash_node);
769                         check_and_free_timer(htab, l);
770                         break;
771                 }
772
773         htab_unlock_bucket(htab, b, tgt_l->hash, flags);
774
775         return l == tgt_l;
776 }
777
778 /* Called from syscall */
779 static int htab_map_get_next_key(struct bpf_map *map, void *key, void *next_key)
780 {
781         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
782         struct hlist_nulls_head *head;
783         struct htab_elem *l, *next_l;
784         u32 hash, key_size;
785         int i = 0;
786
787         WARN_ON_ONCE(!rcu_read_lock_held());
788
789         key_size = map->key_size;
790
791         if (!key)
792                 goto find_first_elem;
793
794         hash = htab_map_hash(key, key_size, htab->hashrnd);
795
796         head = select_bucket(htab, hash);
797
798         /* lookup the key */
799         l = lookup_nulls_elem_raw(head, hash, key, key_size, htab->n_buckets);
800
801         if (!l)
802                 goto find_first_elem;
803
804         /* key was found, get next key in the same bucket */
805         next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_next_rcu(&l->hash_node)),
806                                   struct htab_elem, hash_node);
807
808         if (next_l) {
809                 /* if next elem in this hash list is non-zero, just return it */
810                 memcpy(next_key, next_l->key, key_size);
811                 return 0;
812         }
813
814         /* no more elements in this hash list, go to the next bucket */
815         i = hash & (htab->n_buckets - 1);
816         i++;
817
818 find_first_elem:
819         /* iterate over buckets */
820         for (; i < htab->n_buckets; i++) {
821                 head = select_bucket(htab, i);
822
823                 /* pick first element in the bucket */
824                 next_l = hlist_nulls_entry_safe(rcu_dereference_raw(hlist_nulls_first_rcu(head)),
825                                           struct htab_elem, hash_node);
826                 if (next_l) {
827                         /* if it's not empty, just return it */
828                         memcpy(next_key, next_l->key, key_size);
829                         return 0;
830                 }
831         }
832
833         /* iterated over all buckets and all elements */
834         return -ENOENT;
835 }
836
837 static void htab_elem_free(struct bpf_htab *htab, struct htab_elem *l)
838 {
839         if (htab->map.map_type == BPF_MAP_TYPE_PERCPU_HASH)
840                 free_percpu(htab_elem_get_ptr(l, htab->map.key_size));
841         check_and_free_timer(htab, l);
842         kfree(l);
843 }
844
845 static void htab_elem_free_rcu(struct rcu_head *head)
846 {
847         struct htab_elem *l = container_of(head, struct htab_elem, rcu);
848         struct bpf_htab *htab = l->htab;
849
850         htab_elem_free(htab, l);
851 }
852
853 static void htab_put_fd_value(struct bpf_htab *htab, struct htab_elem *l)
854 {
855         struct bpf_map *map = &htab->map;
856         void *ptr;
857
858         if (map->ops->map_fd_put_ptr) {
859                 ptr = fd_htab_map_get_ptr(map, l);
860                 map->ops->map_fd_put_ptr(ptr);
861         }
862 }
863
864 static void free_htab_elem(struct bpf_htab *htab, struct htab_elem *l)
865 {
866         htab_put_fd_value(htab, l);
867
868         if (htab_is_prealloc(htab)) {
869                 check_and_free_timer(htab, l);
870                 __pcpu_freelist_push(&htab->freelist, &l->fnode);
871         } else {
872                 atomic_dec(&htab->count);
873                 l->htab = htab;
874                 call_rcu(&l->rcu, htab_elem_free_rcu);
875         }
876 }
877
878 static void pcpu_copy_value(struct bpf_htab *htab, void __percpu *pptr,
879                             void *value, bool onallcpus)
880 {
881         if (!onallcpus) {
882                 /* copy true value_size bytes */
883                 memcpy(this_cpu_ptr(pptr), value, htab->map.value_size);
884         } else {
885                 u32 size = round_up(htab->map.value_size, 8);
886                 int off = 0, cpu;
887
888                 for_each_possible_cpu(cpu) {
889                         bpf_long_memcpy(per_cpu_ptr(pptr, cpu),
890                                         value + off, size);
891                         off += size;
892                 }
893         }
894 }
895
896 static void pcpu_init_value(struct bpf_htab *htab, void __percpu *pptr,
897                             void *value, bool onallcpus)
898 {
899         /* When using prealloc and not setting the initial value on all cpus,
900          * zero-fill element values for other cpus (just as what happens when
901          * not using prealloc). Otherwise, bpf program has no way to ensure
902          * known initial values for cpus other than current one
903          * (onallcpus=false always when coming from bpf prog).
904          */
905         if (htab_is_prealloc(htab) && !onallcpus) {
906                 u32 size = round_up(htab->map.value_size, 8);
907                 int current_cpu = raw_smp_processor_id();
908                 int cpu;
909
910                 for_each_possible_cpu(cpu) {
911                         if (cpu == current_cpu)
912                                 bpf_long_memcpy(per_cpu_ptr(pptr, cpu), value,
913                                                 size);
914                         else
915                                 memset(per_cpu_ptr(pptr, cpu), 0, size);
916                 }
917         } else {
918                 pcpu_copy_value(htab, pptr, value, onallcpus);
919         }
920 }
921
922 static bool fd_htab_map_needs_adjust(const struct bpf_htab *htab)
923 {
924         return htab->map.map_type == BPF_MAP_TYPE_HASH_OF_MAPS &&
925                BITS_PER_LONG == 64;
926 }
927
928 static struct htab_elem *alloc_htab_elem(struct bpf_htab *htab, void *key,
929                                          void *value, u32 key_size, u32 hash,
930                                          bool percpu, bool onallcpus,
931                                          struct htab_elem *old_elem)
932 {
933         u32 size = htab->map.value_size;
934         bool prealloc = htab_is_prealloc(htab);
935         struct htab_elem *l_new, **pl_new;
936         void __percpu *pptr;
937
938         if (prealloc) {
939                 if (old_elem) {
940                         /* if we're updating the existing element,
941                          * use per-cpu extra elems to avoid freelist_pop/push
942                          */
943                         pl_new = this_cpu_ptr(htab->extra_elems);
944                         l_new = *pl_new;
945                         htab_put_fd_value(htab, old_elem);
946                         *pl_new = old_elem;
947                 } else {
948                         struct pcpu_freelist_node *l;
949
950                         l = __pcpu_freelist_pop(&htab->freelist);
951                         if (!l)
952                                 return ERR_PTR(-E2BIG);
953                         l_new = container_of(l, struct htab_elem, fnode);
954                 }
955         } else {
956                 if (atomic_inc_return(&htab->count) > htab->map.max_entries)
957                         if (!old_elem) {
958                                 /* when map is full and update() is replacing
959                                  * old element, it's ok to allocate, since
960                                  * old element will be freed immediately.
961                                  * Otherwise return an error
962                                  */
963                                 l_new = ERR_PTR(-E2BIG);
964                                 goto dec_count;
965                         }
966                 l_new = bpf_map_kmalloc_node(&htab->map, htab->elem_size,
967                                              GFP_ATOMIC | __GFP_NOWARN,
968                                              htab->map.numa_node);
969                 if (!l_new) {
970                         l_new = ERR_PTR(-ENOMEM);
971                         goto dec_count;
972                 }
973                 check_and_init_map_value(&htab->map,
974                                          l_new->key + round_up(key_size, 8));
975         }
976
977         memcpy(l_new->key, key, key_size);
978         if (percpu) {
979                 size = round_up(size, 8);
980                 if (prealloc) {
981                         pptr = htab_elem_get_ptr(l_new, key_size);
982                 } else {
983                         /* alloc_percpu zero-fills */
984                         pptr = bpf_map_alloc_percpu(&htab->map, size, 8,
985                                                     GFP_ATOMIC | __GFP_NOWARN);
986                         if (!pptr) {
987                                 kfree(l_new);
988                                 l_new = ERR_PTR(-ENOMEM);
989                                 goto dec_count;
990                         }
991                 }
992
993                 pcpu_init_value(htab, pptr, value, onallcpus);
994
995                 if (!prealloc)
996                         htab_elem_set_ptr(l_new, key_size, pptr);
997         } else if (fd_htab_map_needs_adjust(htab)) {
998                 size = round_up(size, 8);
999                 memcpy(l_new->key + round_up(key_size, 8), value, size);
1000         } else {
1001                 copy_map_value(&htab->map,
1002                                l_new->key + round_up(key_size, 8),
1003                                value);
1004         }
1005
1006         l_new->hash = hash;
1007         return l_new;
1008 dec_count:
1009         atomic_dec(&htab->count);
1010         return l_new;
1011 }
1012
1013 static int check_flags(struct bpf_htab *htab, struct htab_elem *l_old,
1014                        u64 map_flags)
1015 {
1016         if (l_old && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
1017                 /* elem already exists */
1018                 return -EEXIST;
1019
1020         if (!l_old && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
1021                 /* elem doesn't exist, cannot update it */
1022                 return -ENOENT;
1023
1024         return 0;
1025 }
1026
1027 /* Called from syscall or from eBPF program */
1028 static int htab_map_update_elem(struct bpf_map *map, void *key, void *value,
1029                                 u64 map_flags)
1030 {
1031         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1032         struct htab_elem *l_new = NULL, *l_old;
1033         struct hlist_nulls_head *head;
1034         unsigned long flags;
1035         struct bucket *b;
1036         u32 key_size, hash;
1037         int ret;
1038
1039         if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST))
1040                 /* unknown flags */
1041                 return -EINVAL;
1042
1043         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1044                      !rcu_read_lock_bh_held());
1045
1046         key_size = map->key_size;
1047
1048         hash = htab_map_hash(key, key_size, htab->hashrnd);
1049
1050         b = __select_bucket(htab, hash);
1051         head = &b->head;
1052
1053         if (unlikely(map_flags & BPF_F_LOCK)) {
1054                 if (unlikely(!map_value_has_spin_lock(map)))
1055                         return -EINVAL;
1056                 /* find an element without taking the bucket lock */
1057                 l_old = lookup_nulls_elem_raw(head, hash, key, key_size,
1058                                               htab->n_buckets);
1059                 ret = check_flags(htab, l_old, map_flags);
1060                 if (ret)
1061                         return ret;
1062                 if (l_old) {
1063                         /* grab the element lock and update value in place */
1064                         copy_map_value_locked(map,
1065                                               l_old->key + round_up(key_size, 8),
1066                                               value, false);
1067                         return 0;
1068                 }
1069                 /* fall through, grab the bucket lock and lookup again.
1070                  * 99.9% chance that the element won't be found,
1071                  * but second lookup under lock has to be done.
1072                  */
1073         }
1074
1075         ret = htab_lock_bucket(htab, b, hash, &flags);
1076         if (ret)
1077                 return ret;
1078
1079         l_old = lookup_elem_raw(head, hash, key, key_size);
1080
1081         ret = check_flags(htab, l_old, map_flags);
1082         if (ret)
1083                 goto err;
1084
1085         if (unlikely(l_old && (map_flags & BPF_F_LOCK))) {
1086                 /* first lookup without the bucket lock didn't find the element,
1087                  * but second lookup with the bucket lock found it.
1088                  * This case is highly unlikely, but has to be dealt with:
1089                  * grab the element lock in addition to the bucket lock
1090                  * and update element in place
1091                  */
1092                 copy_map_value_locked(map,
1093                                       l_old->key + round_up(key_size, 8),
1094                                       value, false);
1095                 ret = 0;
1096                 goto err;
1097         }
1098
1099         l_new = alloc_htab_elem(htab, key, value, key_size, hash, false, false,
1100                                 l_old);
1101         if (IS_ERR(l_new)) {
1102                 /* all pre-allocated elements are in use or memory exhausted */
1103                 ret = PTR_ERR(l_new);
1104                 goto err;
1105         }
1106
1107         /* add new element to the head of the list, so that
1108          * concurrent search will find it before old elem
1109          */
1110         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1111         if (l_old) {
1112                 hlist_nulls_del_rcu(&l_old->hash_node);
1113                 if (!htab_is_prealloc(htab))
1114                         free_htab_elem(htab, l_old);
1115                 else
1116                         check_and_free_timer(htab, l_old);
1117         }
1118         ret = 0;
1119 err:
1120         htab_unlock_bucket(htab, b, hash, flags);
1121         return ret;
1122 }
1123
1124 static void htab_lru_push_free(struct bpf_htab *htab, struct htab_elem *elem)
1125 {
1126         check_and_free_timer(htab, elem);
1127         bpf_lru_push_free(&htab->lru, &elem->lru_node);
1128 }
1129
1130 static int htab_lru_map_update_elem(struct bpf_map *map, void *key, void *value,
1131                                     u64 map_flags)
1132 {
1133         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1134         struct htab_elem *l_new, *l_old = NULL;
1135         struct hlist_nulls_head *head;
1136         unsigned long flags;
1137         struct bucket *b;
1138         u32 key_size, hash;
1139         int ret;
1140
1141         if (unlikely(map_flags > BPF_EXIST))
1142                 /* unknown flags */
1143                 return -EINVAL;
1144
1145         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1146                      !rcu_read_lock_bh_held());
1147
1148         key_size = map->key_size;
1149
1150         hash = htab_map_hash(key, key_size, htab->hashrnd);
1151
1152         b = __select_bucket(htab, hash);
1153         head = &b->head;
1154
1155         /* For LRU, we need to alloc before taking bucket's
1156          * spinlock because getting free nodes from LRU may need
1157          * to remove older elements from htab and this removal
1158          * operation will need a bucket lock.
1159          */
1160         l_new = prealloc_lru_pop(htab, key, hash);
1161         if (!l_new)
1162                 return -ENOMEM;
1163         copy_map_value(&htab->map,
1164                        l_new->key + round_up(map->key_size, 8), value);
1165
1166         ret = htab_lock_bucket(htab, b, hash, &flags);
1167         if (ret)
1168                 goto err_lock_bucket;
1169
1170         l_old = lookup_elem_raw(head, hash, key, key_size);
1171
1172         ret = check_flags(htab, l_old, map_flags);
1173         if (ret)
1174                 goto err;
1175
1176         /* add new element to the head of the list, so that
1177          * concurrent search will find it before old elem
1178          */
1179         hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1180         if (l_old) {
1181                 bpf_lru_node_set_ref(&l_new->lru_node);
1182                 hlist_nulls_del_rcu(&l_old->hash_node);
1183         }
1184         ret = 0;
1185
1186 err:
1187         htab_unlock_bucket(htab, b, hash, flags);
1188
1189 err_lock_bucket:
1190         if (ret)
1191                 htab_lru_push_free(htab, l_new);
1192         else if (l_old)
1193                 htab_lru_push_free(htab, l_old);
1194
1195         return ret;
1196 }
1197
1198 static int __htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1199                                          void *value, u64 map_flags,
1200                                          bool onallcpus)
1201 {
1202         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1203         struct htab_elem *l_new = NULL, *l_old;
1204         struct hlist_nulls_head *head;
1205         unsigned long flags;
1206         struct bucket *b;
1207         u32 key_size, hash;
1208         int ret;
1209
1210         if (unlikely(map_flags > BPF_EXIST))
1211                 /* unknown flags */
1212                 return -EINVAL;
1213
1214         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1215                      !rcu_read_lock_bh_held());
1216
1217         key_size = map->key_size;
1218
1219         hash = htab_map_hash(key, key_size, htab->hashrnd);
1220
1221         b = __select_bucket(htab, hash);
1222         head = &b->head;
1223
1224         ret = htab_lock_bucket(htab, b, hash, &flags);
1225         if (ret)
1226                 return ret;
1227
1228         l_old = lookup_elem_raw(head, hash, key, key_size);
1229
1230         ret = check_flags(htab, l_old, map_flags);
1231         if (ret)
1232                 goto err;
1233
1234         if (l_old) {
1235                 /* per-cpu hash map can update value in-place */
1236                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1237                                 value, onallcpus);
1238         } else {
1239                 l_new = alloc_htab_elem(htab, key, value, key_size,
1240                                         hash, true, onallcpus, NULL);
1241                 if (IS_ERR(l_new)) {
1242                         ret = PTR_ERR(l_new);
1243                         goto err;
1244                 }
1245                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1246         }
1247         ret = 0;
1248 err:
1249         htab_unlock_bucket(htab, b, hash, flags);
1250         return ret;
1251 }
1252
1253 static int __htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1254                                              void *value, u64 map_flags,
1255                                              bool onallcpus)
1256 {
1257         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1258         struct htab_elem *l_new = NULL, *l_old;
1259         struct hlist_nulls_head *head;
1260         unsigned long flags;
1261         struct bucket *b;
1262         u32 key_size, hash;
1263         int ret;
1264
1265         if (unlikely(map_flags > BPF_EXIST))
1266                 /* unknown flags */
1267                 return -EINVAL;
1268
1269         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1270                      !rcu_read_lock_bh_held());
1271
1272         key_size = map->key_size;
1273
1274         hash = htab_map_hash(key, key_size, htab->hashrnd);
1275
1276         b = __select_bucket(htab, hash);
1277         head = &b->head;
1278
1279         /* For LRU, we need to alloc before taking bucket's
1280          * spinlock because LRU's elem alloc may need
1281          * to remove older elem from htab and this removal
1282          * operation will need a bucket lock.
1283          */
1284         if (map_flags != BPF_EXIST) {
1285                 l_new = prealloc_lru_pop(htab, key, hash);
1286                 if (!l_new)
1287                         return -ENOMEM;
1288         }
1289
1290         ret = htab_lock_bucket(htab, b, hash, &flags);
1291         if (ret)
1292                 goto err_lock_bucket;
1293
1294         l_old = lookup_elem_raw(head, hash, key, key_size);
1295
1296         ret = check_flags(htab, l_old, map_flags);
1297         if (ret)
1298                 goto err;
1299
1300         if (l_old) {
1301                 bpf_lru_node_set_ref(&l_old->lru_node);
1302
1303                 /* per-cpu hash map can update value in-place */
1304                 pcpu_copy_value(htab, htab_elem_get_ptr(l_old, key_size),
1305                                 value, onallcpus);
1306         } else {
1307                 pcpu_init_value(htab, htab_elem_get_ptr(l_new, key_size),
1308                                 value, onallcpus);
1309                 hlist_nulls_add_head_rcu(&l_new->hash_node, head);
1310                 l_new = NULL;
1311         }
1312         ret = 0;
1313 err:
1314         htab_unlock_bucket(htab, b, hash, flags);
1315 err_lock_bucket:
1316         if (l_new)
1317                 bpf_lru_push_free(&htab->lru, &l_new->lru_node);
1318         return ret;
1319 }
1320
1321 static int htab_percpu_map_update_elem(struct bpf_map *map, void *key,
1322                                        void *value, u64 map_flags)
1323 {
1324         return __htab_percpu_map_update_elem(map, key, value, map_flags, false);
1325 }
1326
1327 static int htab_lru_percpu_map_update_elem(struct bpf_map *map, void *key,
1328                                            void *value, u64 map_flags)
1329 {
1330         return __htab_lru_percpu_map_update_elem(map, key, value, map_flags,
1331                                                  false);
1332 }
1333
1334 /* Called from syscall or from eBPF program */
1335 static int htab_map_delete_elem(struct bpf_map *map, void *key)
1336 {
1337         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1338         struct hlist_nulls_head *head;
1339         struct bucket *b;
1340         struct htab_elem *l;
1341         unsigned long flags;
1342         u32 hash, key_size;
1343         int ret;
1344
1345         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1346                      !rcu_read_lock_bh_held());
1347
1348         key_size = map->key_size;
1349
1350         hash = htab_map_hash(key, key_size, htab->hashrnd);
1351         b = __select_bucket(htab, hash);
1352         head = &b->head;
1353
1354         ret = htab_lock_bucket(htab, b, hash, &flags);
1355         if (ret)
1356                 return ret;
1357
1358         l = lookup_elem_raw(head, hash, key, key_size);
1359
1360         if (l) {
1361                 hlist_nulls_del_rcu(&l->hash_node);
1362                 free_htab_elem(htab, l);
1363         } else {
1364                 ret = -ENOENT;
1365         }
1366
1367         htab_unlock_bucket(htab, b, hash, flags);
1368         return ret;
1369 }
1370
1371 static int htab_lru_map_delete_elem(struct bpf_map *map, void *key)
1372 {
1373         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1374         struct hlist_nulls_head *head;
1375         struct bucket *b;
1376         struct htab_elem *l;
1377         unsigned long flags;
1378         u32 hash, key_size;
1379         int ret;
1380
1381         WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_trace_held() &&
1382                      !rcu_read_lock_bh_held());
1383
1384         key_size = map->key_size;
1385
1386         hash = htab_map_hash(key, key_size, htab->hashrnd);
1387         b = __select_bucket(htab, hash);
1388         head = &b->head;
1389
1390         ret = htab_lock_bucket(htab, b, hash, &flags);
1391         if (ret)
1392                 return ret;
1393
1394         l = lookup_elem_raw(head, hash, key, key_size);
1395
1396         if (l)
1397                 hlist_nulls_del_rcu(&l->hash_node);
1398         else
1399                 ret = -ENOENT;
1400
1401         htab_unlock_bucket(htab, b, hash, flags);
1402         if (l)
1403                 htab_lru_push_free(htab, l);
1404         return ret;
1405 }
1406
1407 static void delete_all_elements(struct bpf_htab *htab)
1408 {
1409         int i;
1410
1411         for (i = 0; i < htab->n_buckets; i++) {
1412                 struct hlist_nulls_head *head = select_bucket(htab, i);
1413                 struct hlist_nulls_node *n;
1414                 struct htab_elem *l;
1415
1416                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1417                         hlist_nulls_del_rcu(&l->hash_node);
1418                         htab_elem_free(htab, l);
1419                 }
1420         }
1421 }
1422
1423 static void htab_free_malloced_timers(struct bpf_htab *htab)
1424 {
1425         int i;
1426
1427         rcu_read_lock();
1428         for (i = 0; i < htab->n_buckets; i++) {
1429                 struct hlist_nulls_head *head = select_bucket(htab, i);
1430                 struct hlist_nulls_node *n;
1431                 struct htab_elem *l;
1432
1433                 hlist_nulls_for_each_entry(l, n, head, hash_node)
1434                         check_and_free_timer(htab, l);
1435                 cond_resched_rcu();
1436         }
1437         rcu_read_unlock();
1438 }
1439
1440 static void htab_map_free_timers(struct bpf_map *map)
1441 {
1442         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1443
1444         if (likely(!map_value_has_timer(&htab->map)))
1445                 return;
1446         if (!htab_is_prealloc(htab))
1447                 htab_free_malloced_timers(htab);
1448         else
1449                 htab_free_prealloced_timers(htab);
1450 }
1451
1452 /* Called when map->refcnt goes to zero, either from workqueue or from syscall */
1453 static void htab_map_free(struct bpf_map *map)
1454 {
1455         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1456         int i;
1457
1458         /* bpf_free_used_maps() or close(map_fd) will trigger this map_free callback.
1459          * bpf_free_used_maps() is called after bpf prog is no longer executing.
1460          * There is no need to synchronize_rcu() here to protect map elements.
1461          */
1462
1463         /* some of free_htab_elem() callbacks for elements of this map may
1464          * not have executed. Wait for them.
1465          */
1466         rcu_barrier();
1467         if (!htab_is_prealloc(htab))
1468                 delete_all_elements(htab);
1469         else
1470                 prealloc_destroy(htab);
1471
1472         free_percpu(htab->extra_elems);
1473         bpf_map_area_free(htab->buckets);
1474         for (i = 0; i < HASHTAB_MAP_LOCK_COUNT; i++)
1475                 free_percpu(htab->map_locked[i]);
1476         lockdep_unregister_key(&htab->lockdep_key);
1477         kfree(htab);
1478 }
1479
1480 static void htab_map_seq_show_elem(struct bpf_map *map, void *key,
1481                                    struct seq_file *m)
1482 {
1483         void *value;
1484
1485         rcu_read_lock();
1486
1487         value = htab_map_lookup_elem(map, key);
1488         if (!value) {
1489                 rcu_read_unlock();
1490                 return;
1491         }
1492
1493         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
1494         seq_puts(m, ": ");
1495         btf_type_seq_show(map->btf, map->btf_value_type_id, value, m);
1496         seq_puts(m, "\n");
1497
1498         rcu_read_unlock();
1499 }
1500
1501 static int __htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1502                                              void *value, bool is_lru_map,
1503                                              bool is_percpu, u64 flags)
1504 {
1505         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1506         struct hlist_nulls_head *head;
1507         unsigned long bflags;
1508         struct htab_elem *l;
1509         u32 hash, key_size;
1510         struct bucket *b;
1511         int ret;
1512
1513         key_size = map->key_size;
1514
1515         hash = htab_map_hash(key, key_size, htab->hashrnd);
1516         b = __select_bucket(htab, hash);
1517         head = &b->head;
1518
1519         ret = htab_lock_bucket(htab, b, hash, &bflags);
1520         if (ret)
1521                 return ret;
1522
1523         l = lookup_elem_raw(head, hash, key, key_size);
1524         if (!l) {
1525                 ret = -ENOENT;
1526         } else {
1527                 if (is_percpu) {
1528                         u32 roundup_value_size = round_up(map->value_size, 8);
1529                         void __percpu *pptr;
1530                         int off = 0, cpu;
1531
1532                         pptr = htab_elem_get_ptr(l, key_size);
1533                         for_each_possible_cpu(cpu) {
1534                                 bpf_long_memcpy(value + off,
1535                                                 per_cpu_ptr(pptr, cpu),
1536                                                 roundup_value_size);
1537                                 off += roundup_value_size;
1538                         }
1539                 } else {
1540                         u32 roundup_key_size = round_up(map->key_size, 8);
1541
1542                         if (flags & BPF_F_LOCK)
1543                                 copy_map_value_locked(map, value, l->key +
1544                                                       roundup_key_size,
1545                                                       true);
1546                         else
1547                                 copy_map_value(map, value, l->key +
1548                                                roundup_key_size);
1549                         check_and_init_map_value(map, value);
1550                 }
1551
1552                 hlist_nulls_del_rcu(&l->hash_node);
1553                 if (!is_lru_map)
1554                         free_htab_elem(htab, l);
1555         }
1556
1557         htab_unlock_bucket(htab, b, hash, bflags);
1558
1559         if (is_lru_map && l)
1560                 htab_lru_push_free(htab, l);
1561
1562         return ret;
1563 }
1564
1565 static int htab_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1566                                            void *value, u64 flags)
1567 {
1568         return __htab_map_lookup_and_delete_elem(map, key, value, false, false,
1569                                                  flags);
1570 }
1571
1572 static int htab_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1573                                                   void *key, void *value,
1574                                                   u64 flags)
1575 {
1576         return __htab_map_lookup_and_delete_elem(map, key, value, false, true,
1577                                                  flags);
1578 }
1579
1580 static int htab_lru_map_lookup_and_delete_elem(struct bpf_map *map, void *key,
1581                                                void *value, u64 flags)
1582 {
1583         return __htab_map_lookup_and_delete_elem(map, key, value, true, false,
1584                                                  flags);
1585 }
1586
1587 static int htab_lru_percpu_map_lookup_and_delete_elem(struct bpf_map *map,
1588                                                       void *key, void *value,
1589                                                       u64 flags)
1590 {
1591         return __htab_map_lookup_and_delete_elem(map, key, value, true, true,
1592                                                  flags);
1593 }
1594
1595 static int
1596 __htab_map_lookup_and_delete_batch(struct bpf_map *map,
1597                                    const union bpf_attr *attr,
1598                                    union bpf_attr __user *uattr,
1599                                    bool do_delete, bool is_lru_map,
1600                                    bool is_percpu)
1601 {
1602         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
1603         u32 bucket_cnt, total, key_size, value_size, roundup_key_size;
1604         void *keys = NULL, *values = NULL, *value, *dst_key, *dst_val;
1605         void __user *uvalues = u64_to_user_ptr(attr->batch.values);
1606         void __user *ukeys = u64_to_user_ptr(attr->batch.keys);
1607         void __user *ubatch = u64_to_user_ptr(attr->batch.in_batch);
1608         u32 batch, max_count, size, bucket_size;
1609         struct htab_elem *node_to_free = NULL;
1610         u64 elem_map_flags, map_flags;
1611         struct hlist_nulls_head *head;
1612         struct hlist_nulls_node *n;
1613         unsigned long flags = 0;
1614         bool locked = false;
1615         struct htab_elem *l;
1616         struct bucket *b;
1617         int ret = 0;
1618
1619         elem_map_flags = attr->batch.elem_flags;
1620         if ((elem_map_flags & ~BPF_F_LOCK) ||
1621             ((elem_map_flags & BPF_F_LOCK) && !map_value_has_spin_lock(map)))
1622                 return -EINVAL;
1623
1624         map_flags = attr->batch.flags;
1625         if (map_flags)
1626                 return -EINVAL;
1627
1628         max_count = attr->batch.count;
1629         if (!max_count)
1630                 return 0;
1631
1632         if (put_user(0, &uattr->batch.count))
1633                 return -EFAULT;
1634
1635         batch = 0;
1636         if (ubatch && copy_from_user(&batch, ubatch, sizeof(batch)))
1637                 return -EFAULT;
1638
1639         if (batch >= htab->n_buckets)
1640                 return -ENOENT;
1641
1642         key_size = htab->map.key_size;
1643         roundup_key_size = round_up(htab->map.key_size, 8);
1644         value_size = htab->map.value_size;
1645         size = round_up(value_size, 8);
1646         if (is_percpu)
1647                 value_size = size * num_possible_cpus();
1648         total = 0;
1649         /* while experimenting with hash tables with sizes ranging from 10 to
1650          * 1000, it was observed that a bucket can have upto 5 entries.
1651          */
1652         bucket_size = 5;
1653
1654 alloc:
1655         /* We cannot do copy_from_user or copy_to_user inside
1656          * the rcu_read_lock. Allocate enough space here.
1657          */
1658         keys = kvmalloc_array(key_size, bucket_size, GFP_USER | __GFP_NOWARN);
1659         values = kvmalloc_array(value_size, bucket_size, GFP_USER | __GFP_NOWARN);
1660         if (!keys || !values) {
1661                 ret = -ENOMEM;
1662                 goto after_loop;
1663         }
1664
1665 again:
1666         bpf_disable_instrumentation();
1667         rcu_read_lock();
1668 again_nocopy:
1669         dst_key = keys;
1670         dst_val = values;
1671         b = &htab->buckets[batch];
1672         head = &b->head;
1673         /* do not grab the lock unless need it (bucket_cnt > 0). */
1674         if (locked) {
1675                 ret = htab_lock_bucket(htab, b, batch, &flags);
1676                 if (ret) {
1677                         rcu_read_unlock();
1678                         bpf_enable_instrumentation();
1679                         goto after_loop;
1680                 }
1681         }
1682
1683         bucket_cnt = 0;
1684         hlist_nulls_for_each_entry_rcu(l, n, head, hash_node)
1685                 bucket_cnt++;
1686
1687         if (bucket_cnt && !locked) {
1688                 locked = true;
1689                 goto again_nocopy;
1690         }
1691
1692         if (bucket_cnt > (max_count - total)) {
1693                 if (total == 0)
1694                         ret = -ENOSPC;
1695                 /* Note that since bucket_cnt > 0 here, it is implicit
1696                  * that the locked was grabbed, so release it.
1697                  */
1698                 htab_unlock_bucket(htab, b, batch, flags);
1699                 rcu_read_unlock();
1700                 bpf_enable_instrumentation();
1701                 goto after_loop;
1702         }
1703
1704         if (bucket_cnt > bucket_size) {
1705                 bucket_size = bucket_cnt;
1706                 /* Note that since bucket_cnt > 0 here, it is implicit
1707                  * that the locked was grabbed, so release it.
1708                  */
1709                 htab_unlock_bucket(htab, b, batch, flags);
1710                 rcu_read_unlock();
1711                 bpf_enable_instrumentation();
1712                 kvfree(keys);
1713                 kvfree(values);
1714                 goto alloc;
1715         }
1716
1717         /* Next block is only safe to run if you have grabbed the lock */
1718         if (!locked)
1719                 goto next_batch;
1720
1721         hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
1722                 memcpy(dst_key, l->key, key_size);
1723
1724                 if (is_percpu) {
1725                         int off = 0, cpu;
1726                         void __percpu *pptr;
1727
1728                         pptr = htab_elem_get_ptr(l, map->key_size);
1729                         for_each_possible_cpu(cpu) {
1730                                 bpf_long_memcpy(dst_val + off,
1731                                                 per_cpu_ptr(pptr, cpu), size);
1732                                 off += size;
1733                         }
1734                 } else {
1735                         value = l->key + roundup_key_size;
1736                         if (elem_map_flags & BPF_F_LOCK)
1737                                 copy_map_value_locked(map, dst_val, value,
1738                                                       true);
1739                         else
1740                                 copy_map_value(map, dst_val, value);
1741                         check_and_init_map_value(map, dst_val);
1742                 }
1743                 if (do_delete) {
1744                         hlist_nulls_del_rcu(&l->hash_node);
1745
1746                         /* bpf_lru_push_free() will acquire lru_lock, which
1747                          * may cause deadlock. See comments in function
1748                          * prealloc_lru_pop(). Let us do bpf_lru_push_free()
1749                          * after releasing the bucket lock.
1750                          */
1751                         if (is_lru_map) {
1752                                 l->batch_flink = node_to_free;
1753                                 node_to_free = l;
1754                         } else {
1755                                 free_htab_elem(htab, l);
1756                         }
1757                 }
1758                 dst_key += key_size;
1759                 dst_val += value_size;
1760         }
1761
1762         htab_unlock_bucket(htab, b, batch, flags);
1763         locked = false;
1764
1765         while (node_to_free) {
1766                 l = node_to_free;
1767                 node_to_free = node_to_free->batch_flink;
1768                 htab_lru_push_free(htab, l);
1769         }
1770
1771 next_batch:
1772         /* If we are not copying data, we can go to next bucket and avoid
1773          * unlocking the rcu.
1774          */
1775         if (!bucket_cnt && (batch + 1 < htab->n_buckets)) {
1776                 batch++;
1777                 goto again_nocopy;
1778         }
1779
1780         rcu_read_unlock();
1781         bpf_enable_instrumentation();
1782         if (bucket_cnt && (copy_to_user(ukeys + total * key_size, keys,
1783             key_size * bucket_cnt) ||
1784             copy_to_user(uvalues + total * value_size, values,
1785             value_size * bucket_cnt))) {
1786                 ret = -EFAULT;
1787                 goto after_loop;
1788         }
1789
1790         total += bucket_cnt;
1791         batch++;
1792         if (batch >= htab->n_buckets) {
1793                 ret = -ENOENT;
1794                 goto after_loop;
1795         }
1796         goto again;
1797
1798 after_loop:
1799         if (ret == -EFAULT)
1800                 goto out;
1801
1802         /* copy # of entries and next batch */
1803         ubatch = u64_to_user_ptr(attr->batch.out_batch);
1804         if (copy_to_user(ubatch, &batch, sizeof(batch)) ||
1805             put_user(total, &uattr->batch.count))
1806                 ret = -EFAULT;
1807
1808 out:
1809         kvfree(keys);
1810         kvfree(values);
1811         return ret;
1812 }
1813
1814 static int
1815 htab_percpu_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1816                              union bpf_attr __user *uattr)
1817 {
1818         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1819                                                   false, true);
1820 }
1821
1822 static int
1823 htab_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1824                                         const union bpf_attr *attr,
1825                                         union bpf_attr __user *uattr)
1826 {
1827         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1828                                                   false, true);
1829 }
1830
1831 static int
1832 htab_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1833                       union bpf_attr __user *uattr)
1834 {
1835         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1836                                                   false, false);
1837 }
1838
1839 static int
1840 htab_map_lookup_and_delete_batch(struct bpf_map *map,
1841                                  const union bpf_attr *attr,
1842                                  union bpf_attr __user *uattr)
1843 {
1844         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1845                                                   false, false);
1846 }
1847
1848 static int
1849 htab_lru_percpu_map_lookup_batch(struct bpf_map *map,
1850                                  const union bpf_attr *attr,
1851                                  union bpf_attr __user *uattr)
1852 {
1853         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1854                                                   true, true);
1855 }
1856
1857 static int
1858 htab_lru_percpu_map_lookup_and_delete_batch(struct bpf_map *map,
1859                                             const union bpf_attr *attr,
1860                                             union bpf_attr __user *uattr)
1861 {
1862         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1863                                                   true, true);
1864 }
1865
1866 static int
1867 htab_lru_map_lookup_batch(struct bpf_map *map, const union bpf_attr *attr,
1868                           union bpf_attr __user *uattr)
1869 {
1870         return __htab_map_lookup_and_delete_batch(map, attr, uattr, false,
1871                                                   true, false);
1872 }
1873
1874 static int
1875 htab_lru_map_lookup_and_delete_batch(struct bpf_map *map,
1876                                      const union bpf_attr *attr,
1877                                      union bpf_attr __user *uattr)
1878 {
1879         return __htab_map_lookup_and_delete_batch(map, attr, uattr, true,
1880                                                   true, false);
1881 }
1882
1883 struct bpf_iter_seq_hash_map_info {
1884         struct bpf_map *map;
1885         struct bpf_htab *htab;
1886         void *percpu_value_buf; // non-zero means percpu hash
1887         u32 bucket_id;
1888         u32 skip_elems;
1889 };
1890
1891 static struct htab_elem *
1892 bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info,
1893                            struct htab_elem *prev_elem)
1894 {
1895         const struct bpf_htab *htab = info->htab;
1896         u32 skip_elems = info->skip_elems;
1897         u32 bucket_id = info->bucket_id;
1898         struct hlist_nulls_head *head;
1899         struct hlist_nulls_node *n;
1900         struct htab_elem *elem;
1901         struct bucket *b;
1902         u32 i, count;
1903
1904         if (bucket_id >= htab->n_buckets)
1905                 return NULL;
1906
1907         /* try to find next elem in the same bucket */
1908         if (prev_elem) {
1909                 /* no update/deletion on this bucket, prev_elem should be still valid
1910                  * and we won't skip elements.
1911                  */
1912                 n = rcu_dereference_raw(hlist_nulls_next_rcu(&prev_elem->hash_node));
1913                 elem = hlist_nulls_entry_safe(n, struct htab_elem, hash_node);
1914                 if (elem)
1915                         return elem;
1916
1917                 /* not found, unlock and go to the next bucket */
1918                 b = &htab->buckets[bucket_id++];
1919                 rcu_read_unlock();
1920                 skip_elems = 0;
1921         }
1922
1923         for (i = bucket_id; i < htab->n_buckets; i++) {
1924                 b = &htab->buckets[i];
1925                 rcu_read_lock();
1926
1927                 count = 0;
1928                 head = &b->head;
1929                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
1930                         if (count >= skip_elems) {
1931                                 info->bucket_id = i;
1932                                 info->skip_elems = count;
1933                                 return elem;
1934                         }
1935                         count++;
1936                 }
1937
1938                 rcu_read_unlock();
1939                 skip_elems = 0;
1940         }
1941
1942         info->bucket_id = i;
1943         info->skip_elems = 0;
1944         return NULL;
1945 }
1946
1947 static void *bpf_hash_map_seq_start(struct seq_file *seq, loff_t *pos)
1948 {
1949         struct bpf_iter_seq_hash_map_info *info = seq->private;
1950         struct htab_elem *elem;
1951
1952         elem = bpf_hash_map_seq_find_next(info, NULL);
1953         if (!elem)
1954                 return NULL;
1955
1956         if (*pos == 0)
1957                 ++*pos;
1958         return elem;
1959 }
1960
1961 static void *bpf_hash_map_seq_next(struct seq_file *seq, void *v, loff_t *pos)
1962 {
1963         struct bpf_iter_seq_hash_map_info *info = seq->private;
1964
1965         ++*pos;
1966         ++info->skip_elems;
1967         return bpf_hash_map_seq_find_next(info, v);
1968 }
1969
1970 static int __bpf_hash_map_seq_show(struct seq_file *seq, struct htab_elem *elem)
1971 {
1972         struct bpf_iter_seq_hash_map_info *info = seq->private;
1973         u32 roundup_key_size, roundup_value_size;
1974         struct bpf_iter__bpf_map_elem ctx = {};
1975         struct bpf_map *map = info->map;
1976         struct bpf_iter_meta meta;
1977         int ret = 0, off = 0, cpu;
1978         struct bpf_prog *prog;
1979         void __percpu *pptr;
1980
1981         meta.seq = seq;
1982         prog = bpf_iter_get_info(&meta, elem == NULL);
1983         if (prog) {
1984                 ctx.meta = &meta;
1985                 ctx.map = info->map;
1986                 if (elem) {
1987                         roundup_key_size = round_up(map->key_size, 8);
1988                         ctx.key = elem->key;
1989                         if (!info->percpu_value_buf) {
1990                                 ctx.value = elem->key + roundup_key_size;
1991                         } else {
1992                                 roundup_value_size = round_up(map->value_size, 8);
1993                                 pptr = htab_elem_get_ptr(elem, map->key_size);
1994                                 for_each_possible_cpu(cpu) {
1995                                         bpf_long_memcpy(info->percpu_value_buf + off,
1996                                                         per_cpu_ptr(pptr, cpu),
1997                                                         roundup_value_size);
1998                                         off += roundup_value_size;
1999                                 }
2000                                 ctx.value = info->percpu_value_buf;
2001                         }
2002                 }
2003                 ret = bpf_iter_run_prog(prog, &ctx);
2004         }
2005
2006         return ret;
2007 }
2008
2009 static int bpf_hash_map_seq_show(struct seq_file *seq, void *v)
2010 {
2011         return __bpf_hash_map_seq_show(seq, v);
2012 }
2013
2014 static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v)
2015 {
2016         if (!v)
2017                 (void)__bpf_hash_map_seq_show(seq, NULL);
2018         else
2019                 rcu_read_unlock();
2020 }
2021
2022 static int bpf_iter_init_hash_map(void *priv_data,
2023                                   struct bpf_iter_aux_info *aux)
2024 {
2025         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2026         struct bpf_map *map = aux->map;
2027         void *value_buf;
2028         u32 buf_size;
2029
2030         if (map->map_type == BPF_MAP_TYPE_PERCPU_HASH ||
2031             map->map_type == BPF_MAP_TYPE_LRU_PERCPU_HASH) {
2032                 buf_size = round_up(map->value_size, 8) * num_possible_cpus();
2033                 value_buf = kmalloc(buf_size, GFP_USER | __GFP_NOWARN);
2034                 if (!value_buf)
2035                         return -ENOMEM;
2036
2037                 seq_info->percpu_value_buf = value_buf;
2038         }
2039
2040         bpf_map_inc_with_uref(map);
2041         seq_info->map = map;
2042         seq_info->htab = container_of(map, struct bpf_htab, map);
2043         return 0;
2044 }
2045
2046 static void bpf_iter_fini_hash_map(void *priv_data)
2047 {
2048         struct bpf_iter_seq_hash_map_info *seq_info = priv_data;
2049
2050         bpf_map_put_with_uref(seq_info->map);
2051         kfree(seq_info->percpu_value_buf);
2052 }
2053
2054 static const struct seq_operations bpf_hash_map_seq_ops = {
2055         .start  = bpf_hash_map_seq_start,
2056         .next   = bpf_hash_map_seq_next,
2057         .stop   = bpf_hash_map_seq_stop,
2058         .show   = bpf_hash_map_seq_show,
2059 };
2060
2061 static const struct bpf_iter_seq_info iter_seq_info = {
2062         .seq_ops                = &bpf_hash_map_seq_ops,
2063         .init_seq_private       = bpf_iter_init_hash_map,
2064         .fini_seq_private       = bpf_iter_fini_hash_map,
2065         .seq_priv_size          = sizeof(struct bpf_iter_seq_hash_map_info),
2066 };
2067
2068 static int bpf_for_each_hash_elem(struct bpf_map *map, void *callback_fn,
2069                                   void *callback_ctx, u64 flags)
2070 {
2071         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2072         struct hlist_nulls_head *head;
2073         struct hlist_nulls_node *n;
2074         struct htab_elem *elem;
2075         u32 roundup_key_size;
2076         int i, num_elems = 0;
2077         void __percpu *pptr;
2078         struct bucket *b;
2079         void *key, *val;
2080         bool is_percpu;
2081         u64 ret = 0;
2082
2083         if (flags != 0)
2084                 return -EINVAL;
2085
2086         is_percpu = htab_is_percpu(htab);
2087
2088         roundup_key_size = round_up(map->key_size, 8);
2089         /* disable migration so percpu value prepared here will be the
2090          * same as the one seen by the bpf program with bpf_map_lookup_elem().
2091          */
2092         if (is_percpu)
2093                 migrate_disable();
2094         for (i = 0; i < htab->n_buckets; i++) {
2095                 b = &htab->buckets[i];
2096                 rcu_read_lock();
2097                 head = &b->head;
2098                 hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) {
2099                         key = elem->key;
2100                         if (is_percpu) {
2101                                 /* current cpu value for percpu map */
2102                                 pptr = htab_elem_get_ptr(elem, map->key_size);
2103                                 val = this_cpu_ptr(pptr);
2104                         } else {
2105                                 val = elem->key + roundup_key_size;
2106                         }
2107                         num_elems++;
2108                         ret = BPF_CAST_CALL(callback_fn)((u64)(long)map,
2109                                         (u64)(long)key, (u64)(long)val,
2110                                         (u64)(long)callback_ctx, 0);
2111                         /* return value: 0 - continue, 1 - stop and return */
2112                         if (ret) {
2113                                 rcu_read_unlock();
2114                                 goto out;
2115                         }
2116                 }
2117                 rcu_read_unlock();
2118         }
2119 out:
2120         if (is_percpu)
2121                 migrate_enable();
2122         return num_elems;
2123 }
2124
2125 static int htab_map_btf_id;
2126 const struct bpf_map_ops htab_map_ops = {
2127         .map_meta_equal = bpf_map_meta_equal,
2128         .map_alloc_check = htab_map_alloc_check,
2129         .map_alloc = htab_map_alloc,
2130         .map_free = htab_map_free,
2131         .map_get_next_key = htab_map_get_next_key,
2132         .map_release_uref = htab_map_free_timers,
2133         .map_lookup_elem = htab_map_lookup_elem,
2134         .map_lookup_and_delete_elem = htab_map_lookup_and_delete_elem,
2135         .map_update_elem = htab_map_update_elem,
2136         .map_delete_elem = htab_map_delete_elem,
2137         .map_gen_lookup = htab_map_gen_lookup,
2138         .map_seq_show_elem = htab_map_seq_show_elem,
2139         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2140         .map_for_each_callback = bpf_for_each_hash_elem,
2141         BATCH_OPS(htab),
2142         .map_btf_name = "bpf_htab",
2143         .map_btf_id = &htab_map_btf_id,
2144         .iter_seq_info = &iter_seq_info,
2145 };
2146
2147 static int htab_lru_map_btf_id;
2148 const struct bpf_map_ops htab_lru_map_ops = {
2149         .map_meta_equal = bpf_map_meta_equal,
2150         .map_alloc_check = htab_map_alloc_check,
2151         .map_alloc = htab_map_alloc,
2152         .map_free = htab_map_free,
2153         .map_get_next_key = htab_map_get_next_key,
2154         .map_release_uref = htab_map_free_timers,
2155         .map_lookup_elem = htab_lru_map_lookup_elem,
2156         .map_lookup_and_delete_elem = htab_lru_map_lookup_and_delete_elem,
2157         .map_lookup_elem_sys_only = htab_lru_map_lookup_elem_sys,
2158         .map_update_elem = htab_lru_map_update_elem,
2159         .map_delete_elem = htab_lru_map_delete_elem,
2160         .map_gen_lookup = htab_lru_map_gen_lookup,
2161         .map_seq_show_elem = htab_map_seq_show_elem,
2162         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2163         .map_for_each_callback = bpf_for_each_hash_elem,
2164         BATCH_OPS(htab_lru),
2165         .map_btf_name = "bpf_htab",
2166         .map_btf_id = &htab_lru_map_btf_id,
2167         .iter_seq_info = &iter_seq_info,
2168 };
2169
2170 /* Called from eBPF program */
2171 static void *htab_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2172 {
2173         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2174
2175         if (l)
2176                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2177         else
2178                 return NULL;
2179 }
2180
2181 static void *htab_lru_percpu_map_lookup_elem(struct bpf_map *map, void *key)
2182 {
2183         struct htab_elem *l = __htab_map_lookup_elem(map, key);
2184
2185         if (l) {
2186                 bpf_lru_node_set_ref(&l->lru_node);
2187                 return this_cpu_ptr(htab_elem_get_ptr(l, map->key_size));
2188         }
2189
2190         return NULL;
2191 }
2192
2193 int bpf_percpu_hash_copy(struct bpf_map *map, void *key, void *value)
2194 {
2195         struct htab_elem *l;
2196         void __percpu *pptr;
2197         int ret = -ENOENT;
2198         int cpu, off = 0;
2199         u32 size;
2200
2201         /* per_cpu areas are zero-filled and bpf programs can only
2202          * access 'value_size' of them, so copying rounded areas
2203          * will not leak any kernel data
2204          */
2205         size = round_up(map->value_size, 8);
2206         rcu_read_lock();
2207         l = __htab_map_lookup_elem(map, key);
2208         if (!l)
2209                 goto out;
2210         /* We do not mark LRU map element here in order to not mess up
2211          * eviction heuristics when user space does a map walk.
2212          */
2213         pptr = htab_elem_get_ptr(l, map->key_size);
2214         for_each_possible_cpu(cpu) {
2215                 bpf_long_memcpy(value + off,
2216                                 per_cpu_ptr(pptr, cpu), size);
2217                 off += size;
2218         }
2219         ret = 0;
2220 out:
2221         rcu_read_unlock();
2222         return ret;
2223 }
2224
2225 int bpf_percpu_hash_update(struct bpf_map *map, void *key, void *value,
2226                            u64 map_flags)
2227 {
2228         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2229         int ret;
2230
2231         rcu_read_lock();
2232         if (htab_is_lru(htab))
2233                 ret = __htab_lru_percpu_map_update_elem(map, key, value,
2234                                                         map_flags, true);
2235         else
2236                 ret = __htab_percpu_map_update_elem(map, key, value, map_flags,
2237                                                     true);
2238         rcu_read_unlock();
2239
2240         return ret;
2241 }
2242
2243 static void htab_percpu_map_seq_show_elem(struct bpf_map *map, void *key,
2244                                           struct seq_file *m)
2245 {
2246         struct htab_elem *l;
2247         void __percpu *pptr;
2248         int cpu;
2249
2250         rcu_read_lock();
2251
2252         l = __htab_map_lookup_elem(map, key);
2253         if (!l) {
2254                 rcu_read_unlock();
2255                 return;
2256         }
2257
2258         btf_type_seq_show(map->btf, map->btf_key_type_id, key, m);
2259         seq_puts(m, ": {\n");
2260         pptr = htab_elem_get_ptr(l, map->key_size);
2261         for_each_possible_cpu(cpu) {
2262                 seq_printf(m, "\tcpu%d: ", cpu);
2263                 btf_type_seq_show(map->btf, map->btf_value_type_id,
2264                                   per_cpu_ptr(pptr, cpu), m);
2265                 seq_puts(m, "\n");
2266         }
2267         seq_puts(m, "}\n");
2268
2269         rcu_read_unlock();
2270 }
2271
2272 static int htab_percpu_map_btf_id;
2273 const struct bpf_map_ops htab_percpu_map_ops = {
2274         .map_meta_equal = bpf_map_meta_equal,
2275         .map_alloc_check = htab_map_alloc_check,
2276         .map_alloc = htab_map_alloc,
2277         .map_free = htab_map_free,
2278         .map_get_next_key = htab_map_get_next_key,
2279         .map_lookup_elem = htab_percpu_map_lookup_elem,
2280         .map_lookup_and_delete_elem = htab_percpu_map_lookup_and_delete_elem,
2281         .map_update_elem = htab_percpu_map_update_elem,
2282         .map_delete_elem = htab_map_delete_elem,
2283         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2284         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2285         .map_for_each_callback = bpf_for_each_hash_elem,
2286         BATCH_OPS(htab_percpu),
2287         .map_btf_name = "bpf_htab",
2288         .map_btf_id = &htab_percpu_map_btf_id,
2289         .iter_seq_info = &iter_seq_info,
2290 };
2291
2292 static int htab_lru_percpu_map_btf_id;
2293 const struct bpf_map_ops htab_lru_percpu_map_ops = {
2294         .map_meta_equal = bpf_map_meta_equal,
2295         .map_alloc_check = htab_map_alloc_check,
2296         .map_alloc = htab_map_alloc,
2297         .map_free = htab_map_free,
2298         .map_get_next_key = htab_map_get_next_key,
2299         .map_lookup_elem = htab_lru_percpu_map_lookup_elem,
2300         .map_lookup_and_delete_elem = htab_lru_percpu_map_lookup_and_delete_elem,
2301         .map_update_elem = htab_lru_percpu_map_update_elem,
2302         .map_delete_elem = htab_lru_map_delete_elem,
2303         .map_seq_show_elem = htab_percpu_map_seq_show_elem,
2304         .map_set_for_each_callback_args = map_set_for_each_callback_args,
2305         .map_for_each_callback = bpf_for_each_hash_elem,
2306         BATCH_OPS(htab_lru_percpu),
2307         .map_btf_name = "bpf_htab",
2308         .map_btf_id = &htab_lru_percpu_map_btf_id,
2309         .iter_seq_info = &iter_seq_info,
2310 };
2311
2312 static int fd_htab_map_alloc_check(union bpf_attr *attr)
2313 {
2314         if (attr->value_size != sizeof(u32))
2315                 return -EINVAL;
2316         return htab_map_alloc_check(attr);
2317 }
2318
2319 static void fd_htab_map_free(struct bpf_map *map)
2320 {
2321         struct bpf_htab *htab = container_of(map, struct bpf_htab, map);
2322         struct hlist_nulls_node *n;
2323         struct hlist_nulls_head *head;
2324         struct htab_elem *l;
2325         int i;
2326
2327         for (i = 0; i < htab->n_buckets; i++) {
2328                 head = select_bucket(htab, i);
2329
2330                 hlist_nulls_for_each_entry_safe(l, n, head, hash_node) {
2331                         void *ptr = fd_htab_map_get_ptr(map, l);
2332
2333                         map->ops->map_fd_put_ptr(ptr);
2334                 }
2335         }
2336
2337         htab_map_free(map);
2338 }
2339
2340 /* only called from syscall */
2341 int bpf_fd_htab_map_lookup_elem(struct bpf_map *map, void *key, u32 *value)
2342 {
2343         void **ptr;
2344         int ret = 0;
2345
2346         if (!map->ops->map_fd_sys_lookup_elem)
2347                 return -ENOTSUPP;
2348
2349         rcu_read_lock();
2350         ptr = htab_map_lookup_elem(map, key);
2351         if (ptr)
2352                 *value = map->ops->map_fd_sys_lookup_elem(READ_ONCE(*ptr));
2353         else
2354                 ret = -ENOENT;
2355         rcu_read_unlock();
2356
2357         return ret;
2358 }
2359
2360 /* only called from syscall */
2361 int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
2362                                 void *key, void *value, u64 map_flags)
2363 {
2364         void *ptr;
2365         int ret;
2366         u32 ufd = *(u32 *)value;
2367
2368         ptr = map->ops->map_fd_get_ptr(map, map_file, ufd);
2369         if (IS_ERR(ptr))
2370                 return PTR_ERR(ptr);
2371
2372         ret = htab_map_update_elem(map, key, &ptr, map_flags);
2373         if (ret)
2374                 map->ops->map_fd_put_ptr(ptr);
2375
2376         return ret;
2377 }
2378
2379 static struct bpf_map *htab_of_map_alloc(union bpf_attr *attr)
2380 {
2381         struct bpf_map *map, *inner_map_meta;
2382
2383         inner_map_meta = bpf_map_meta_alloc(attr->inner_map_fd);
2384         if (IS_ERR(inner_map_meta))
2385                 return inner_map_meta;
2386
2387         map = htab_map_alloc(attr);
2388         if (IS_ERR(map)) {
2389                 bpf_map_meta_free(inner_map_meta);
2390                 return map;
2391         }
2392
2393         map->inner_map_meta = inner_map_meta;
2394
2395         return map;
2396 }
2397
2398 static void *htab_of_map_lookup_elem(struct bpf_map *map, void *key)
2399 {
2400         struct bpf_map **inner_map  = htab_map_lookup_elem(map, key);
2401
2402         if (!inner_map)
2403                 return NULL;
2404
2405         return READ_ONCE(*inner_map);
2406 }
2407
2408 static int htab_of_map_gen_lookup(struct bpf_map *map,
2409                                   struct bpf_insn *insn_buf)
2410 {
2411         struct bpf_insn *insn = insn_buf;
2412         const int ret = BPF_REG_0;
2413
2414         BUILD_BUG_ON(!__same_type(&__htab_map_lookup_elem,
2415                      (void *(*)(struct bpf_map *map, void *key))NULL));
2416         *insn++ = BPF_EMIT_CALL(BPF_CAST_CALL(__htab_map_lookup_elem));
2417         *insn++ = BPF_JMP_IMM(BPF_JEQ, ret, 0, 2);
2418         *insn++ = BPF_ALU64_IMM(BPF_ADD, ret,
2419                                 offsetof(struct htab_elem, key) +
2420                                 round_up(map->key_size, 8));
2421         *insn++ = BPF_LDX_MEM(BPF_DW, ret, ret, 0);
2422
2423         return insn - insn_buf;
2424 }
2425
2426 static void htab_of_map_free(struct bpf_map *map)
2427 {
2428         bpf_map_meta_free(map->inner_map_meta);
2429         fd_htab_map_free(map);
2430 }
2431
2432 static int htab_of_maps_map_btf_id;
2433 const struct bpf_map_ops htab_of_maps_map_ops = {
2434         .map_alloc_check = fd_htab_map_alloc_check,
2435         .map_alloc = htab_of_map_alloc,
2436         .map_free = htab_of_map_free,
2437         .map_get_next_key = htab_map_get_next_key,
2438         .map_lookup_elem = htab_of_map_lookup_elem,
2439         .map_delete_elem = htab_map_delete_elem,
2440         .map_fd_get_ptr = bpf_map_fd_get_ptr,
2441         .map_fd_put_ptr = bpf_map_fd_put_ptr,
2442         .map_fd_sys_lookup_elem = bpf_map_fd_sys_lookup_elem,
2443         .map_gen_lookup = htab_of_map_gen_lookup,
2444         .map_check_btf = map_check_no_btf,
2445         .map_btf_name = "bpf_htab",
2446         .map_btf_id = &htab_of_maps_map_btf_id,
2447 };