Mention branches and keyring.
[releases.git] / core / bpf_sk_storage.c
1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook  */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
8 #include <linux/bpf.h>
9 #include <linux/btf.h>
10 #include <linux/btf_ids.h>
11 #include <linux/bpf_local_storage.h>
12 #include <net/bpf_sk_storage.h>
13 #include <net/sock.h>
14 #include <uapi/linux/sock_diag.h>
15 #include <uapi/linux/btf.h>
16 #include <linux/rcupdate_trace.h>
17
18 DEFINE_BPF_STORAGE_CACHE(sk_cache);
19
20 static struct bpf_local_storage_data *
21 bpf_sk_storage_lookup(struct sock *sk, struct bpf_map *map, bool cacheit_lockit)
22 {
23         struct bpf_local_storage *sk_storage;
24         struct bpf_local_storage_map *smap;
25
26         sk_storage =
27                 rcu_dereference_check(sk->sk_bpf_storage, bpf_rcu_lock_held());
28         if (!sk_storage)
29                 return NULL;
30
31         smap = (struct bpf_local_storage_map *)map;
32         return bpf_local_storage_lookup(sk_storage, smap, cacheit_lockit);
33 }
34
35 static int bpf_sk_storage_del(struct sock *sk, struct bpf_map *map)
36 {
37         struct bpf_local_storage_data *sdata;
38
39         sdata = bpf_sk_storage_lookup(sk, map, false);
40         if (!sdata)
41                 return -ENOENT;
42
43         bpf_selem_unlink(SELEM(sdata), true);
44
45         return 0;
46 }
47
48 /* Called by __sk_destruct() & bpf_sk_storage_clone() */
49 void bpf_sk_storage_free(struct sock *sk)
50 {
51         struct bpf_local_storage_elem *selem;
52         struct bpf_local_storage *sk_storage;
53         bool free_sk_storage = false;
54         struct hlist_node *n;
55
56         rcu_read_lock();
57         sk_storage = rcu_dereference(sk->sk_bpf_storage);
58         if (!sk_storage) {
59                 rcu_read_unlock();
60                 return;
61         }
62
63         /* Netiher the bpf_prog nor the bpf-map's syscall
64          * could be modifying the sk_storage->list now.
65          * Thus, no elem can be added-to or deleted-from the
66          * sk_storage->list by the bpf_prog or by the bpf-map's syscall.
67          *
68          * It is racing with bpf_local_storage_map_free() alone
69          * when unlinking elem from the sk_storage->list and
70          * the map's bucket->list.
71          */
72         raw_spin_lock_bh(&sk_storage->lock);
73         hlist_for_each_entry_safe(selem, n, &sk_storage->list, snode) {
74                 /* Always unlink from map before unlinking from
75                  * sk_storage.
76                  */
77                 bpf_selem_unlink_map(selem);
78                 free_sk_storage = bpf_selem_unlink_storage_nolock(
79                         sk_storage, selem, true, false);
80         }
81         raw_spin_unlock_bh(&sk_storage->lock);
82         rcu_read_unlock();
83
84         if (free_sk_storage)
85                 kfree_rcu(sk_storage, rcu);
86 }
87
88 static void bpf_sk_storage_map_free(struct bpf_map *map)
89 {
90         struct bpf_local_storage_map *smap;
91
92         smap = (struct bpf_local_storage_map *)map;
93         bpf_local_storage_cache_idx_free(&sk_cache, smap->cache_idx);
94         bpf_local_storage_map_free(smap, NULL);
95 }
96
97 static struct bpf_map *bpf_sk_storage_map_alloc(union bpf_attr *attr)
98 {
99         struct bpf_local_storage_map *smap;
100
101         smap = bpf_local_storage_map_alloc(attr);
102         if (IS_ERR(smap))
103                 return ERR_CAST(smap);
104
105         smap->cache_idx = bpf_local_storage_cache_idx_get(&sk_cache);
106         return &smap->map;
107 }
108
109 static int notsupp_get_next_key(struct bpf_map *map, void *key,
110                                 void *next_key)
111 {
112         return -ENOTSUPP;
113 }
114
115 static void *bpf_fd_sk_storage_lookup_elem(struct bpf_map *map, void *key)
116 {
117         struct bpf_local_storage_data *sdata;
118         struct socket *sock;
119         int fd, err;
120
121         fd = *(int *)key;
122         sock = sockfd_lookup(fd, &err);
123         if (sock) {
124                 sdata = bpf_sk_storage_lookup(sock->sk, map, true);
125                 sockfd_put(sock);
126                 return sdata ? sdata->data : NULL;
127         }
128
129         return ERR_PTR(err);
130 }
131
132 static int bpf_fd_sk_storage_update_elem(struct bpf_map *map, void *key,
133                                          void *value, u64 map_flags)
134 {
135         struct bpf_local_storage_data *sdata;
136         struct socket *sock;
137         int fd, err;
138
139         fd = *(int *)key;
140         sock = sockfd_lookup(fd, &err);
141         if (sock) {
142                 sdata = bpf_local_storage_update(
143                         sock->sk, (struct bpf_local_storage_map *)map, value,
144                         map_flags, GFP_ATOMIC);
145                 sockfd_put(sock);
146                 return PTR_ERR_OR_ZERO(sdata);
147         }
148
149         return err;
150 }
151
152 static int bpf_fd_sk_storage_delete_elem(struct bpf_map *map, void *key)
153 {
154         struct socket *sock;
155         int fd, err;
156
157         fd = *(int *)key;
158         sock = sockfd_lookup(fd, &err);
159         if (sock) {
160                 err = bpf_sk_storage_del(sock->sk, map);
161                 sockfd_put(sock);
162                 return err;
163         }
164
165         return err;
166 }
167
168 static struct bpf_local_storage_elem *
169 bpf_sk_storage_clone_elem(struct sock *newsk,
170                           struct bpf_local_storage_map *smap,
171                           struct bpf_local_storage_elem *selem)
172 {
173         struct bpf_local_storage_elem *copy_selem;
174
175         copy_selem = bpf_selem_alloc(smap, newsk, NULL, true, GFP_ATOMIC);
176         if (!copy_selem)
177                 return NULL;
178
179         if (map_value_has_spin_lock(&smap->map))
180                 copy_map_value_locked(&smap->map, SDATA(copy_selem)->data,
181                                       SDATA(selem)->data, true);
182         else
183                 copy_map_value(&smap->map, SDATA(copy_selem)->data,
184                                SDATA(selem)->data);
185
186         return copy_selem;
187 }
188
189 int bpf_sk_storage_clone(const struct sock *sk, struct sock *newsk)
190 {
191         struct bpf_local_storage *new_sk_storage = NULL;
192         struct bpf_local_storage *sk_storage;
193         struct bpf_local_storage_elem *selem;
194         int ret = 0;
195
196         RCU_INIT_POINTER(newsk->sk_bpf_storage, NULL);
197
198         rcu_read_lock();
199         sk_storage = rcu_dereference(sk->sk_bpf_storage);
200
201         if (!sk_storage || hlist_empty(&sk_storage->list))
202                 goto out;
203
204         hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
205                 struct bpf_local_storage_elem *copy_selem;
206                 struct bpf_local_storage_map *smap;
207                 struct bpf_map *map;
208
209                 smap = rcu_dereference(SDATA(selem)->smap);
210                 if (!(smap->map.map_flags & BPF_F_CLONE))
211                         continue;
212
213                 /* Note that for lockless listeners adding new element
214                  * here can race with cleanup in bpf_local_storage_map_free.
215                  * Try to grab map refcnt to make sure that it's still
216                  * alive and prevent concurrent removal.
217                  */
218                 map = bpf_map_inc_not_zero(&smap->map);
219                 if (IS_ERR(map))
220                         continue;
221
222                 copy_selem = bpf_sk_storage_clone_elem(newsk, smap, selem);
223                 if (!copy_selem) {
224                         ret = -ENOMEM;
225                         bpf_map_put(map);
226                         goto out;
227                 }
228
229                 if (new_sk_storage) {
230                         bpf_selem_link_map(smap, copy_selem);
231                         bpf_selem_link_storage_nolock(new_sk_storage, copy_selem);
232                 } else {
233                         ret = bpf_local_storage_alloc(newsk, smap, copy_selem, GFP_ATOMIC);
234                         if (ret) {
235                                 kfree(copy_selem);
236                                 atomic_sub(smap->elem_size,
237                                            &newsk->sk_omem_alloc);
238                                 bpf_map_put(map);
239                                 goto out;
240                         }
241
242                         new_sk_storage =
243                                 rcu_dereference(copy_selem->local_storage);
244                 }
245                 bpf_map_put(map);
246         }
247
248 out:
249         rcu_read_unlock();
250
251         /* In case of an error, don't free anything explicitly here, the
252          * caller is responsible to call bpf_sk_storage_free.
253          */
254
255         return ret;
256 }
257
258 /* *gfp_flags* is a hidden argument provided by the verifier */
259 BPF_CALL_5(bpf_sk_storage_get, struct bpf_map *, map, struct sock *, sk,
260            void *, value, u64, flags, gfp_t, gfp_flags)
261 {
262         struct bpf_local_storage_data *sdata;
263
264         WARN_ON_ONCE(!bpf_rcu_lock_held());
265         if (!sk || !sk_fullsock(sk) || flags > BPF_SK_STORAGE_GET_F_CREATE)
266                 return (unsigned long)NULL;
267
268         sdata = bpf_sk_storage_lookup(sk, map, true);
269         if (sdata)
270                 return (unsigned long)sdata->data;
271
272         if (flags == BPF_SK_STORAGE_GET_F_CREATE &&
273             /* Cannot add new elem to a going away sk.
274              * Otherwise, the new elem may become a leak
275              * (and also other memory issues during map
276              *  destruction).
277              */
278             refcount_inc_not_zero(&sk->sk_refcnt)) {
279                 sdata = bpf_local_storage_update(
280                         sk, (struct bpf_local_storage_map *)map, value,
281                         BPF_NOEXIST, gfp_flags);
282                 /* sk must be a fullsock (guaranteed by verifier),
283                  * so sock_gen_put() is unnecessary.
284                  */
285                 sock_put(sk);
286                 return IS_ERR(sdata) ?
287                         (unsigned long)NULL : (unsigned long)sdata->data;
288         }
289
290         return (unsigned long)NULL;
291 }
292
293 BPF_CALL_2(bpf_sk_storage_delete, struct bpf_map *, map, struct sock *, sk)
294 {
295         WARN_ON_ONCE(!bpf_rcu_lock_held());
296         if (!sk || !sk_fullsock(sk))
297                 return -EINVAL;
298
299         if (refcount_inc_not_zero(&sk->sk_refcnt)) {
300                 int err;
301
302                 err = bpf_sk_storage_del(sk, map);
303                 sock_put(sk);
304                 return err;
305         }
306
307         return -ENOENT;
308 }
309
310 static int bpf_sk_storage_charge(struct bpf_local_storage_map *smap,
311                                  void *owner, u32 size)
312 {
313         int optmem_max = READ_ONCE(sysctl_optmem_max);
314         struct sock *sk = (struct sock *)owner;
315
316         /* same check as in sock_kmalloc() */
317         if (size <= optmem_max &&
318             atomic_read(&sk->sk_omem_alloc) + size < optmem_max) {
319                 atomic_add(size, &sk->sk_omem_alloc);
320                 return 0;
321         }
322
323         return -ENOMEM;
324 }
325
326 static void bpf_sk_storage_uncharge(struct bpf_local_storage_map *smap,
327                                     void *owner, u32 size)
328 {
329         struct sock *sk = owner;
330
331         atomic_sub(size, &sk->sk_omem_alloc);
332 }
333
334 static struct bpf_local_storage __rcu **
335 bpf_sk_storage_ptr(void *owner)
336 {
337         struct sock *sk = owner;
338
339         return &sk->sk_bpf_storage;
340 }
341
342 BTF_ID_LIST_SINGLE(sk_storage_map_btf_ids, struct, bpf_local_storage_map)
343 const struct bpf_map_ops sk_storage_map_ops = {
344         .map_meta_equal = bpf_map_meta_equal,
345         .map_alloc_check = bpf_local_storage_map_alloc_check,
346         .map_alloc = bpf_sk_storage_map_alloc,
347         .map_free = bpf_sk_storage_map_free,
348         .map_get_next_key = notsupp_get_next_key,
349         .map_lookup_elem = bpf_fd_sk_storage_lookup_elem,
350         .map_update_elem = bpf_fd_sk_storage_update_elem,
351         .map_delete_elem = bpf_fd_sk_storage_delete_elem,
352         .map_check_btf = bpf_local_storage_map_check_btf,
353         .map_btf_id = &sk_storage_map_btf_ids[0],
354         .map_local_storage_charge = bpf_sk_storage_charge,
355         .map_local_storage_uncharge = bpf_sk_storage_uncharge,
356         .map_owner_storage_ptr = bpf_sk_storage_ptr,
357 };
358
359 const struct bpf_func_proto bpf_sk_storage_get_proto = {
360         .func           = bpf_sk_storage_get,
361         .gpl_only       = false,
362         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
363         .arg1_type      = ARG_CONST_MAP_PTR,
364         .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
365         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
366         .arg4_type      = ARG_ANYTHING,
367 };
368
369 const struct bpf_func_proto bpf_sk_storage_get_cg_sock_proto = {
370         .func           = bpf_sk_storage_get,
371         .gpl_only       = false,
372         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
373         .arg1_type      = ARG_CONST_MAP_PTR,
374         .arg2_type      = ARG_PTR_TO_CTX, /* context is 'struct sock' */
375         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
376         .arg4_type      = ARG_ANYTHING,
377 };
378
379 const struct bpf_func_proto bpf_sk_storage_delete_proto = {
380         .func           = bpf_sk_storage_delete,
381         .gpl_only       = false,
382         .ret_type       = RET_INTEGER,
383         .arg1_type      = ARG_CONST_MAP_PTR,
384         .arg2_type      = ARG_PTR_TO_BTF_ID_SOCK_COMMON,
385 };
386
387 static bool bpf_sk_storage_tracing_allowed(const struct bpf_prog *prog)
388 {
389         const struct btf *btf_vmlinux;
390         const struct btf_type *t;
391         const char *tname;
392         u32 btf_id;
393
394         if (prog->aux->dst_prog)
395                 return false;
396
397         /* Ensure the tracing program is not tracing
398          * any bpf_sk_storage*() function and also
399          * use the bpf_sk_storage_(get|delete) helper.
400          */
401         switch (prog->expected_attach_type) {
402         case BPF_TRACE_ITER:
403         case BPF_TRACE_RAW_TP:
404                 /* bpf_sk_storage has no trace point */
405                 return true;
406         case BPF_TRACE_FENTRY:
407         case BPF_TRACE_FEXIT:
408                 btf_vmlinux = bpf_get_btf_vmlinux();
409                 if (IS_ERR_OR_NULL(btf_vmlinux))
410                         return false;
411                 btf_id = prog->aux->attach_btf_id;
412                 t = btf_type_by_id(btf_vmlinux, btf_id);
413                 tname = btf_name_by_offset(btf_vmlinux, t->name_off);
414                 return !!strncmp(tname, "bpf_sk_storage",
415                                  strlen("bpf_sk_storage"));
416         default:
417                 return false;
418         }
419
420         return false;
421 }
422
423 /* *gfp_flags* is a hidden argument provided by the verifier */
424 BPF_CALL_5(bpf_sk_storage_get_tracing, struct bpf_map *, map, struct sock *, sk,
425            void *, value, u64, flags, gfp_t, gfp_flags)
426 {
427         WARN_ON_ONCE(!bpf_rcu_lock_held());
428         if (in_hardirq() || in_nmi())
429                 return (unsigned long)NULL;
430
431         return (unsigned long)____bpf_sk_storage_get(map, sk, value, flags,
432                                                      gfp_flags);
433 }
434
435 BPF_CALL_2(bpf_sk_storage_delete_tracing, struct bpf_map *, map,
436            struct sock *, sk)
437 {
438         WARN_ON_ONCE(!bpf_rcu_lock_held());
439         if (in_hardirq() || in_nmi())
440                 return -EPERM;
441
442         return ____bpf_sk_storage_delete(map, sk);
443 }
444
445 const struct bpf_func_proto bpf_sk_storage_get_tracing_proto = {
446         .func           = bpf_sk_storage_get_tracing,
447         .gpl_only       = false,
448         .ret_type       = RET_PTR_TO_MAP_VALUE_OR_NULL,
449         .arg1_type      = ARG_CONST_MAP_PTR,
450         .arg2_type      = ARG_PTR_TO_BTF_ID,
451         .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
452         .arg3_type      = ARG_PTR_TO_MAP_VALUE_OR_NULL,
453         .arg4_type      = ARG_ANYTHING,
454         .allowed        = bpf_sk_storage_tracing_allowed,
455 };
456
457 const struct bpf_func_proto bpf_sk_storage_delete_tracing_proto = {
458         .func           = bpf_sk_storage_delete_tracing,
459         .gpl_only       = false,
460         .ret_type       = RET_INTEGER,
461         .arg1_type      = ARG_CONST_MAP_PTR,
462         .arg2_type      = ARG_PTR_TO_BTF_ID,
463         .arg2_btf_id    = &btf_sock_ids[BTF_SOCK_TYPE_SOCK_COMMON],
464         .allowed        = bpf_sk_storage_tracing_allowed,
465 };
466
467 struct bpf_sk_storage_diag {
468         u32 nr_maps;
469         struct bpf_map *maps[];
470 };
471
472 /* The reply will be like:
473  * INET_DIAG_BPF_SK_STORAGES (nla_nest)
474  *      SK_DIAG_BPF_STORAGE (nla_nest)
475  *              SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
476  *              SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
477  *      SK_DIAG_BPF_STORAGE (nla_nest)
478  *              SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
479  *              SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
480  *      ....
481  */
482 static int nla_value_size(u32 value_size)
483 {
484         /* SK_DIAG_BPF_STORAGE (nla_nest)
485          *      SK_DIAG_BPF_STORAGE_MAP_ID (nla_put_u32)
486          *      SK_DIAG_BPF_STORAGE_MAP_VALUE (nla_reserve_64bit)
487          */
488         return nla_total_size(0) + nla_total_size(sizeof(u32)) +
489                 nla_total_size_64bit(value_size);
490 }
491
492 void bpf_sk_storage_diag_free(struct bpf_sk_storage_diag *diag)
493 {
494         u32 i;
495
496         if (!diag)
497                 return;
498
499         for (i = 0; i < diag->nr_maps; i++)
500                 bpf_map_put(diag->maps[i]);
501
502         kfree(diag);
503 }
504 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_free);
505
506 static bool diag_check_dup(const struct bpf_sk_storage_diag *diag,
507                            const struct bpf_map *map)
508 {
509         u32 i;
510
511         for (i = 0; i < diag->nr_maps; i++) {
512                 if (diag->maps[i] == map)
513                         return true;
514         }
515
516         return false;
517 }
518
519 struct bpf_sk_storage_diag *
520 bpf_sk_storage_diag_alloc(const struct nlattr *nla_stgs)
521 {
522         struct bpf_sk_storage_diag *diag;
523         struct nlattr *nla;
524         u32 nr_maps = 0;
525         int rem, err;
526
527         /* bpf_local_storage_map is currently limited to CAP_SYS_ADMIN as
528          * the map_alloc_check() side also does.
529          */
530         if (!bpf_capable())
531                 return ERR_PTR(-EPERM);
532
533         nla_for_each_nested(nla, nla_stgs, rem) {
534                 if (nla_type(nla) == SK_DIAG_BPF_STORAGE_REQ_MAP_FD) {
535                         if (nla_len(nla) != sizeof(u32))
536                                 return ERR_PTR(-EINVAL);
537                         nr_maps++;
538                 }
539         }
540
541         diag = kzalloc(struct_size(diag, maps, nr_maps), GFP_KERNEL);
542         if (!diag)
543                 return ERR_PTR(-ENOMEM);
544
545         nla_for_each_nested(nla, nla_stgs, rem) {
546                 struct bpf_map *map;
547                 int map_fd;
548
549                 if (nla_type(nla) != SK_DIAG_BPF_STORAGE_REQ_MAP_FD)
550                         continue;
551
552                 map_fd = nla_get_u32(nla);
553                 map = bpf_map_get(map_fd);
554                 if (IS_ERR(map)) {
555                         err = PTR_ERR(map);
556                         goto err_free;
557                 }
558                 if (map->map_type != BPF_MAP_TYPE_SK_STORAGE) {
559                         bpf_map_put(map);
560                         err = -EINVAL;
561                         goto err_free;
562                 }
563                 if (diag_check_dup(diag, map)) {
564                         bpf_map_put(map);
565                         err = -EEXIST;
566                         goto err_free;
567                 }
568                 diag->maps[diag->nr_maps++] = map;
569         }
570
571         return diag;
572
573 err_free:
574         bpf_sk_storage_diag_free(diag);
575         return ERR_PTR(err);
576 }
577 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_alloc);
578
579 static int diag_get(struct bpf_local_storage_data *sdata, struct sk_buff *skb)
580 {
581         struct nlattr *nla_stg, *nla_value;
582         struct bpf_local_storage_map *smap;
583
584         /* It cannot exceed max nlattr's payload */
585         BUILD_BUG_ON(U16_MAX - NLA_HDRLEN < BPF_LOCAL_STORAGE_MAX_VALUE_SIZE);
586
587         nla_stg = nla_nest_start(skb, SK_DIAG_BPF_STORAGE);
588         if (!nla_stg)
589                 return -EMSGSIZE;
590
591         smap = rcu_dereference(sdata->smap);
592         if (nla_put_u32(skb, SK_DIAG_BPF_STORAGE_MAP_ID, smap->map.id))
593                 goto errout;
594
595         nla_value = nla_reserve_64bit(skb, SK_DIAG_BPF_STORAGE_MAP_VALUE,
596                                       smap->map.value_size,
597                                       SK_DIAG_BPF_STORAGE_PAD);
598         if (!nla_value)
599                 goto errout;
600
601         if (map_value_has_spin_lock(&smap->map))
602                 copy_map_value_locked(&smap->map, nla_data(nla_value),
603                                       sdata->data, true);
604         else
605                 copy_map_value(&smap->map, nla_data(nla_value), sdata->data);
606
607         nla_nest_end(skb, nla_stg);
608         return 0;
609
610 errout:
611         nla_nest_cancel(skb, nla_stg);
612         return -EMSGSIZE;
613 }
614
615 static int bpf_sk_storage_diag_put_all(struct sock *sk, struct sk_buff *skb,
616                                        int stg_array_type,
617                                        unsigned int *res_diag_size)
618 {
619         /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
620         unsigned int diag_size = nla_total_size(0);
621         struct bpf_local_storage *sk_storage;
622         struct bpf_local_storage_elem *selem;
623         struct bpf_local_storage_map *smap;
624         struct nlattr *nla_stgs;
625         unsigned int saved_len;
626         int err = 0;
627
628         rcu_read_lock();
629
630         sk_storage = rcu_dereference(sk->sk_bpf_storage);
631         if (!sk_storage || hlist_empty(&sk_storage->list)) {
632                 rcu_read_unlock();
633                 return 0;
634         }
635
636         nla_stgs = nla_nest_start(skb, stg_array_type);
637         if (!nla_stgs)
638                 /* Continue to learn diag_size */
639                 err = -EMSGSIZE;
640
641         saved_len = skb->len;
642         hlist_for_each_entry_rcu(selem, &sk_storage->list, snode) {
643                 smap = rcu_dereference(SDATA(selem)->smap);
644                 diag_size += nla_value_size(smap->map.value_size);
645
646                 if (nla_stgs && diag_get(SDATA(selem), skb))
647                         /* Continue to learn diag_size */
648                         err = -EMSGSIZE;
649         }
650
651         rcu_read_unlock();
652
653         if (nla_stgs) {
654                 if (saved_len == skb->len)
655                         nla_nest_cancel(skb, nla_stgs);
656                 else
657                         nla_nest_end(skb, nla_stgs);
658         }
659
660         if (diag_size == nla_total_size(0)) {
661                 *res_diag_size = 0;
662                 return 0;
663         }
664
665         *res_diag_size = diag_size;
666         return err;
667 }
668
669 int bpf_sk_storage_diag_put(struct bpf_sk_storage_diag *diag,
670                             struct sock *sk, struct sk_buff *skb,
671                             int stg_array_type,
672                             unsigned int *res_diag_size)
673 {
674         /* stg_array_type (e.g. INET_DIAG_BPF_SK_STORAGES) */
675         unsigned int diag_size = nla_total_size(0);
676         struct bpf_local_storage *sk_storage;
677         struct bpf_local_storage_data *sdata;
678         struct nlattr *nla_stgs;
679         unsigned int saved_len;
680         int err = 0;
681         u32 i;
682
683         *res_diag_size = 0;
684
685         /* No map has been specified.  Dump all. */
686         if (!diag->nr_maps)
687                 return bpf_sk_storage_diag_put_all(sk, skb, stg_array_type,
688                                                    res_diag_size);
689
690         rcu_read_lock();
691         sk_storage = rcu_dereference(sk->sk_bpf_storage);
692         if (!sk_storage || hlist_empty(&sk_storage->list)) {
693                 rcu_read_unlock();
694                 return 0;
695         }
696
697         nla_stgs = nla_nest_start(skb, stg_array_type);
698         if (!nla_stgs)
699                 /* Continue to learn diag_size */
700                 err = -EMSGSIZE;
701
702         saved_len = skb->len;
703         for (i = 0; i < diag->nr_maps; i++) {
704                 sdata = bpf_local_storage_lookup(sk_storage,
705                                 (struct bpf_local_storage_map *)diag->maps[i],
706                                 false);
707
708                 if (!sdata)
709                         continue;
710
711                 diag_size += nla_value_size(diag->maps[i]->value_size);
712
713                 if (nla_stgs && diag_get(sdata, skb))
714                         /* Continue to learn diag_size */
715                         err = -EMSGSIZE;
716         }
717         rcu_read_unlock();
718
719         if (nla_stgs) {
720                 if (saved_len == skb->len)
721                         nla_nest_cancel(skb, nla_stgs);
722                 else
723                         nla_nest_end(skb, nla_stgs);
724         }
725
726         if (diag_size == nla_total_size(0)) {
727                 *res_diag_size = 0;
728                 return 0;
729         }
730
731         *res_diag_size = diag_size;
732         return err;
733 }
734 EXPORT_SYMBOL_GPL(bpf_sk_storage_diag_put);
735
736 struct bpf_iter_seq_sk_storage_map_info {
737         struct bpf_map *map;
738         unsigned int bucket_id;
739         unsigned skip_elems;
740 };
741
742 static struct bpf_local_storage_elem *
743 bpf_sk_storage_map_seq_find_next(struct bpf_iter_seq_sk_storage_map_info *info,
744                                  struct bpf_local_storage_elem *prev_selem)
745         __acquires(RCU) __releases(RCU)
746 {
747         struct bpf_local_storage *sk_storage;
748         struct bpf_local_storage_elem *selem;
749         u32 skip_elems = info->skip_elems;
750         struct bpf_local_storage_map *smap;
751         u32 bucket_id = info->bucket_id;
752         u32 i, count, n_buckets;
753         struct bpf_local_storage_map_bucket *b;
754
755         smap = (struct bpf_local_storage_map *)info->map;
756         n_buckets = 1U << smap->bucket_log;
757         if (bucket_id >= n_buckets)
758                 return NULL;
759
760         /* try to find next selem in the same bucket */
761         selem = prev_selem;
762         count = 0;
763         while (selem) {
764                 selem = hlist_entry_safe(rcu_dereference(hlist_next_rcu(&selem->map_node)),
765                                          struct bpf_local_storage_elem, map_node);
766                 if (!selem) {
767                         /* not found, unlock and go to the next bucket */
768                         b = &smap->buckets[bucket_id++];
769                         rcu_read_unlock();
770                         skip_elems = 0;
771                         break;
772                 }
773                 sk_storage = rcu_dereference(selem->local_storage);
774                 if (sk_storage) {
775                         info->skip_elems = skip_elems + count;
776                         return selem;
777                 }
778                 count++;
779         }
780
781         for (i = bucket_id; i < (1U << smap->bucket_log); i++) {
782                 b = &smap->buckets[i];
783                 rcu_read_lock();
784                 count = 0;
785                 hlist_for_each_entry_rcu(selem, &b->list, map_node) {
786                         sk_storage = rcu_dereference(selem->local_storage);
787                         if (sk_storage && count >= skip_elems) {
788                                 info->bucket_id = i;
789                                 info->skip_elems = count;
790                                 return selem;
791                         }
792                         count++;
793                 }
794                 rcu_read_unlock();
795                 skip_elems = 0;
796         }
797
798         info->bucket_id = i;
799         info->skip_elems = 0;
800         return NULL;
801 }
802
803 static void *bpf_sk_storage_map_seq_start(struct seq_file *seq, loff_t *pos)
804 {
805         struct bpf_local_storage_elem *selem;
806
807         selem = bpf_sk_storage_map_seq_find_next(seq->private, NULL);
808         if (!selem)
809                 return NULL;
810
811         if (*pos == 0)
812                 ++*pos;
813         return selem;
814 }
815
816 static void *bpf_sk_storage_map_seq_next(struct seq_file *seq, void *v,
817                                          loff_t *pos)
818 {
819         struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
820
821         ++*pos;
822         ++info->skip_elems;
823         return bpf_sk_storage_map_seq_find_next(seq->private, v);
824 }
825
826 struct bpf_iter__bpf_sk_storage_map {
827         __bpf_md_ptr(struct bpf_iter_meta *, meta);
828         __bpf_md_ptr(struct bpf_map *, map);
829         __bpf_md_ptr(struct sock *, sk);
830         __bpf_md_ptr(void *, value);
831 };
832
833 DEFINE_BPF_ITER_FUNC(bpf_sk_storage_map, struct bpf_iter_meta *meta,
834                      struct bpf_map *map, struct sock *sk,
835                      void *value)
836
837 static int __bpf_sk_storage_map_seq_show(struct seq_file *seq,
838                                          struct bpf_local_storage_elem *selem)
839 {
840         struct bpf_iter_seq_sk_storage_map_info *info = seq->private;
841         struct bpf_iter__bpf_sk_storage_map ctx = {};
842         struct bpf_local_storage *sk_storage;
843         struct bpf_iter_meta meta;
844         struct bpf_prog *prog;
845         int ret = 0;
846
847         meta.seq = seq;
848         prog = bpf_iter_get_info(&meta, selem == NULL);
849         if (prog) {
850                 ctx.meta = &meta;
851                 ctx.map = info->map;
852                 if (selem) {
853                         sk_storage = rcu_dereference(selem->local_storage);
854                         ctx.sk = sk_storage->owner;
855                         ctx.value = SDATA(selem)->data;
856                 }
857                 ret = bpf_iter_run_prog(prog, &ctx);
858         }
859
860         return ret;
861 }
862
863 static int bpf_sk_storage_map_seq_show(struct seq_file *seq, void *v)
864 {
865         return __bpf_sk_storage_map_seq_show(seq, v);
866 }
867
868 static void bpf_sk_storage_map_seq_stop(struct seq_file *seq, void *v)
869         __releases(RCU)
870 {
871         if (!v)
872                 (void)__bpf_sk_storage_map_seq_show(seq, v);
873         else
874                 rcu_read_unlock();
875 }
876
877 static int bpf_iter_init_sk_storage_map(void *priv_data,
878                                         struct bpf_iter_aux_info *aux)
879 {
880         struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
881
882         bpf_map_inc_with_uref(aux->map);
883         seq_info->map = aux->map;
884         return 0;
885 }
886
887 static void bpf_iter_fini_sk_storage_map(void *priv_data)
888 {
889         struct bpf_iter_seq_sk_storage_map_info *seq_info = priv_data;
890
891         bpf_map_put_with_uref(seq_info->map);
892 }
893
894 static int bpf_iter_attach_map(struct bpf_prog *prog,
895                                union bpf_iter_link_info *linfo,
896                                struct bpf_iter_aux_info *aux)
897 {
898         struct bpf_map *map;
899         int err = -EINVAL;
900
901         if (!linfo->map.map_fd)
902                 return -EBADF;
903
904         map = bpf_map_get_with_uref(linfo->map.map_fd);
905         if (IS_ERR(map))
906                 return PTR_ERR(map);
907
908         if (map->map_type != BPF_MAP_TYPE_SK_STORAGE)
909                 goto put_map;
910
911         if (prog->aux->max_rdwr_access > map->value_size) {
912                 err = -EACCES;
913                 goto put_map;
914         }
915
916         aux->map = map;
917         return 0;
918
919 put_map:
920         bpf_map_put_with_uref(map);
921         return err;
922 }
923
924 static void bpf_iter_detach_map(struct bpf_iter_aux_info *aux)
925 {
926         bpf_map_put_with_uref(aux->map);
927 }
928
929 static const struct seq_operations bpf_sk_storage_map_seq_ops = {
930         .start  = bpf_sk_storage_map_seq_start,
931         .next   = bpf_sk_storage_map_seq_next,
932         .stop   = bpf_sk_storage_map_seq_stop,
933         .show   = bpf_sk_storage_map_seq_show,
934 };
935
936 static const struct bpf_iter_seq_info iter_seq_info = {
937         .seq_ops                = &bpf_sk_storage_map_seq_ops,
938         .init_seq_private       = bpf_iter_init_sk_storage_map,
939         .fini_seq_private       = bpf_iter_fini_sk_storage_map,
940         .seq_priv_size          = sizeof(struct bpf_iter_seq_sk_storage_map_info),
941 };
942
943 static struct bpf_iter_reg bpf_sk_storage_map_reg_info = {
944         .target                 = "bpf_sk_storage_map",
945         .attach_target          = bpf_iter_attach_map,
946         .detach_target          = bpf_iter_detach_map,
947         .show_fdinfo            = bpf_iter_map_show_fdinfo,
948         .fill_link_info         = bpf_iter_map_fill_link_info,
949         .ctx_arg_info_size      = 2,
950         .ctx_arg_info           = {
951                 { offsetof(struct bpf_iter__bpf_sk_storage_map, sk),
952                   PTR_TO_BTF_ID_OR_NULL },
953                 { offsetof(struct bpf_iter__bpf_sk_storage_map, value),
954                   PTR_TO_BUF | PTR_MAYBE_NULL },
955         },
956         .seq_info               = &iter_seq_info,
957 };
958
959 static int __init bpf_sk_storage_map_iter_init(void)
960 {
961         bpf_sk_storage_map_reg_info.ctx_arg_info[0].btf_id =
962                 btf_sock_ids[BTF_SOCK_TYPE_SOCK];
963         return bpf_iter_reg_target(&bpf_sk_storage_map_reg_info);
964 }
965 late_initcall(bpf_sk_storage_map_iter_init);