1 // SPDX-License-Identifier: GPL-2.0
2 /* Copyright (c) 2019 Facebook */
3 #include <linux/rculist.h>
4 #include <linux/list.h>
5 #include <linux/hash.h>
6 #include <linux/types.h>
7 #include <linux/spinlock.h>
9 #include <linux/btf_ids.h>
10 #include <linux/bpf_local_storage.h>
12 #include <uapi/linux/sock_diag.h>
13 #include <uapi/linux/btf.h>
14 #include <linux/rcupdate.h>
15 #include <linux/rcupdate_trace.h>
16 #include <linux/rcupdate_wait.h>
18 #define BPF_LOCAL_STORAGE_CREATE_FLAG_MASK (BPF_F_NO_PREALLOC | BPF_F_CLONE)
20 static struct bpf_local_storage_map_bucket *
21 select_bucket(struct bpf_local_storage_map *smap,
22 struct bpf_local_storage_elem *selem)
24 return &smap->buckets[hash_ptr(selem, smap->bucket_log)];
27 static int mem_charge(struct bpf_local_storage_map *smap, void *owner, u32 size)
29 struct bpf_map *map = &smap->map;
31 if (!map->ops->map_local_storage_charge)
34 return map->ops->map_local_storage_charge(smap, owner, size);
37 static void mem_uncharge(struct bpf_local_storage_map *smap, void *owner,
40 struct bpf_map *map = &smap->map;
42 if (map->ops->map_local_storage_uncharge)
43 map->ops->map_local_storage_uncharge(smap, owner, size);
46 static struct bpf_local_storage __rcu **
47 owner_storage(struct bpf_local_storage_map *smap, void *owner)
49 struct bpf_map *map = &smap->map;
51 return map->ops->map_owner_storage_ptr(owner);
54 static bool selem_linked_to_storage(const struct bpf_local_storage_elem *selem)
56 return !hlist_unhashed(&selem->snode);
59 static bool selem_linked_to_map(const struct bpf_local_storage_elem *selem)
61 return !hlist_unhashed(&selem->map_node);
64 struct bpf_local_storage_elem *
65 bpf_selem_alloc(struct bpf_local_storage_map *smap, void *owner,
66 void *value, bool charge_mem)
68 struct bpf_local_storage_elem *selem;
70 if (charge_mem && mem_charge(smap, owner, smap->elem_size))
73 selem = bpf_map_kzalloc(&smap->map, smap->elem_size,
74 GFP_ATOMIC | __GFP_NOWARN);
77 memcpy(SDATA(selem)->data, value, smap->map.value_size);
82 mem_uncharge(smap, owner, smap->elem_size);
87 void bpf_local_storage_free_rcu(struct rcu_head *rcu)
89 struct bpf_local_storage *local_storage;
91 local_storage = container_of(rcu, struct bpf_local_storage, rcu);
92 kfree_rcu(local_storage, rcu);
95 static void bpf_selem_free_rcu(struct rcu_head *rcu)
97 struct bpf_local_storage_elem *selem;
99 selem = container_of(rcu, struct bpf_local_storage_elem, rcu);
100 kfree_rcu(selem, rcu);
103 /* local_storage->lock must be held and selem->local_storage == local_storage.
104 * The caller must ensure selem->smap is still valid to be
105 * dereferenced for its smap->elem_size and smap->cache_idx.
107 bool bpf_selem_unlink_storage_nolock(struct bpf_local_storage *local_storage,
108 struct bpf_local_storage_elem *selem,
111 struct bpf_local_storage_map *smap;
112 bool free_local_storage;
115 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
116 owner = local_storage->owner;
118 /* All uncharging on the owner must be done first.
119 * The owner may be freed once the last selem is unlinked
120 * from local_storage.
123 mem_uncharge(smap, owner, smap->elem_size);
125 free_local_storage = hlist_is_singular_node(&selem->snode,
126 &local_storage->list);
127 if (free_local_storage) {
128 mem_uncharge(smap, owner, sizeof(struct bpf_local_storage));
129 local_storage->owner = NULL;
131 /* After this RCU_INIT, owner may be freed and cannot be used */
132 RCU_INIT_POINTER(*owner_storage(smap, owner), NULL);
134 /* local_storage is not freed now. local_storage->lock is
135 * still held and raw_spin_unlock_bh(&local_storage->lock)
136 * will be done by the caller.
138 * Although the unlock will be done under
139 * rcu_read_lock(), it is more intutivie to
140 * read if the freeing of the storage is done
141 * after the raw_spin_unlock_bh(&local_storage->lock).
143 * Hence, a "bool free_local_storage" is returned
144 * to the caller which then calls then frees the storage after
145 * all the RCU grace periods have expired.
148 hlist_del_init_rcu(&selem->snode);
149 if (rcu_access_pointer(local_storage->cache[smap->cache_idx]) ==
151 RCU_INIT_POINTER(local_storage->cache[smap->cache_idx], NULL);
153 call_rcu_tasks_trace(&selem->rcu, bpf_selem_free_rcu);
154 return free_local_storage;
157 static void __bpf_selem_unlink_storage(struct bpf_local_storage_elem *selem)
159 struct bpf_local_storage *local_storage;
160 bool free_local_storage = false;
163 if (unlikely(!selem_linked_to_storage(selem)))
164 /* selem has already been unlinked from sk */
167 local_storage = rcu_dereference_check(selem->local_storage,
168 bpf_rcu_lock_held());
169 raw_spin_lock_irqsave(&local_storage->lock, flags);
170 if (likely(selem_linked_to_storage(selem)))
171 free_local_storage = bpf_selem_unlink_storage_nolock(
172 local_storage, selem, true);
173 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
175 if (free_local_storage)
176 call_rcu_tasks_trace(&local_storage->rcu,
177 bpf_local_storage_free_rcu);
180 void bpf_selem_link_storage_nolock(struct bpf_local_storage *local_storage,
181 struct bpf_local_storage_elem *selem)
183 RCU_INIT_POINTER(selem->local_storage, local_storage);
184 hlist_add_head_rcu(&selem->snode, &local_storage->list);
187 void bpf_selem_unlink_map(struct bpf_local_storage_elem *selem)
189 struct bpf_local_storage_map *smap;
190 struct bpf_local_storage_map_bucket *b;
193 if (unlikely(!selem_linked_to_map(selem)))
194 /* selem has already be unlinked from smap */
197 smap = rcu_dereference_check(SDATA(selem)->smap, bpf_rcu_lock_held());
198 b = select_bucket(smap, selem);
199 raw_spin_lock_irqsave(&b->lock, flags);
200 if (likely(selem_linked_to_map(selem)))
201 hlist_del_init_rcu(&selem->map_node);
202 raw_spin_unlock_irqrestore(&b->lock, flags);
205 void bpf_selem_link_map(struct bpf_local_storage_map *smap,
206 struct bpf_local_storage_elem *selem)
208 struct bpf_local_storage_map_bucket *b = select_bucket(smap, selem);
211 raw_spin_lock_irqsave(&b->lock, flags);
212 RCU_INIT_POINTER(SDATA(selem)->smap, smap);
213 hlist_add_head_rcu(&selem->map_node, &b->list);
214 raw_spin_unlock_irqrestore(&b->lock, flags);
217 void bpf_selem_unlink(struct bpf_local_storage_elem *selem)
219 /* Always unlink from map before unlinking from local_storage
220 * because selem will be freed after successfully unlinked from
223 bpf_selem_unlink_map(selem);
224 __bpf_selem_unlink_storage(selem);
227 struct bpf_local_storage_data *
228 bpf_local_storage_lookup(struct bpf_local_storage *local_storage,
229 struct bpf_local_storage_map *smap,
232 struct bpf_local_storage_data *sdata;
233 struct bpf_local_storage_elem *selem;
235 /* Fast path (cache hit) */
236 sdata = rcu_dereference_check(local_storage->cache[smap->cache_idx],
237 bpf_rcu_lock_held());
238 if (sdata && rcu_access_pointer(sdata->smap) == smap)
241 /* Slow path (cache miss) */
242 hlist_for_each_entry_rcu(selem, &local_storage->list, snode,
243 rcu_read_lock_trace_held())
244 if (rcu_access_pointer(SDATA(selem)->smap) == smap)
250 sdata = SDATA(selem);
251 if (cacheit_lockit) {
254 /* spinlock is needed to avoid racing with the
255 * parallel delete. Otherwise, publishing an already
256 * deleted sdata to the cache will become a use-after-free
257 * problem in the next bpf_local_storage_lookup().
259 raw_spin_lock_irqsave(&local_storage->lock, flags);
260 if (selem_linked_to_storage(selem))
261 rcu_assign_pointer(local_storage->cache[smap->cache_idx],
263 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
269 static int check_flags(const struct bpf_local_storage_data *old_sdata,
272 if (old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_NOEXIST)
273 /* elem already exists */
276 if (!old_sdata && (map_flags & ~BPF_F_LOCK) == BPF_EXIST)
277 /* elem doesn't exist, cannot update it */
283 int bpf_local_storage_alloc(void *owner,
284 struct bpf_local_storage_map *smap,
285 struct bpf_local_storage_elem *first_selem)
287 struct bpf_local_storage *prev_storage, *storage;
288 struct bpf_local_storage **owner_storage_ptr;
291 err = mem_charge(smap, owner, sizeof(*storage));
295 storage = bpf_map_kzalloc(&smap->map, sizeof(*storage),
296 GFP_ATOMIC | __GFP_NOWARN);
302 INIT_HLIST_HEAD(&storage->list);
303 raw_spin_lock_init(&storage->lock);
304 storage->owner = owner;
306 bpf_selem_link_storage_nolock(storage, first_selem);
307 bpf_selem_link_map(smap, first_selem);
310 (struct bpf_local_storage **)owner_storage(smap, owner);
311 /* Publish storage to the owner.
312 * Instead of using any lock of the kernel object (i.e. owner),
313 * cmpxchg will work with any kernel object regardless what
314 * the running context is, bh, irq...etc.
316 * From now on, the owner->storage pointer (e.g. sk->sk_bpf_storage)
317 * is protected by the storage->lock. Hence, when freeing
318 * the owner->storage, the storage->lock must be held before
319 * setting owner->storage ptr to NULL.
321 prev_storage = cmpxchg(owner_storage_ptr, NULL, storage);
322 if (unlikely(prev_storage)) {
323 bpf_selem_unlink_map(first_selem);
327 /* Note that even first_selem was linked to smap's
328 * bucket->list, first_selem can be freed immediately
329 * (instead of kfree_rcu) because
330 * bpf_local_storage_map_free() does a
331 * synchronize_rcu_mult (waiting for both sleepable and
332 * normal programs) before walking the bucket->list.
333 * Hence, no one is accessing selem from the
334 * bucket->list under rcu_read_lock().
342 mem_uncharge(smap, owner, sizeof(*storage));
346 /* sk cannot be going away because it is linking new elem
347 * to sk->sk_bpf_storage. (i.e. sk->sk_refcnt cannot be 0).
348 * Otherwise, it will become a leak (and other memory issues
349 * during map destruction).
351 struct bpf_local_storage_data *
352 bpf_local_storage_update(void *owner, struct bpf_local_storage_map *smap,
353 void *value, u64 map_flags)
355 struct bpf_local_storage_data *old_sdata = NULL;
356 struct bpf_local_storage_elem *selem;
357 struct bpf_local_storage *local_storage;
361 /* BPF_EXIST and BPF_NOEXIST cannot be both set */
362 if (unlikely((map_flags & ~BPF_F_LOCK) > BPF_EXIST) ||
363 /* BPF_F_LOCK can only be used in a value with spin_lock */
364 unlikely((map_flags & BPF_F_LOCK) &&
365 !map_value_has_spin_lock(&smap->map)))
366 return ERR_PTR(-EINVAL);
368 local_storage = rcu_dereference_check(*owner_storage(smap, owner),
369 bpf_rcu_lock_held());
370 if (!local_storage || hlist_empty(&local_storage->list)) {
371 /* Very first elem for the owner */
372 err = check_flags(NULL, map_flags);
376 selem = bpf_selem_alloc(smap, owner, value, true);
378 return ERR_PTR(-ENOMEM);
380 err = bpf_local_storage_alloc(owner, smap, selem);
383 mem_uncharge(smap, owner, smap->elem_size);
390 if ((map_flags & BPF_F_LOCK) && !(map_flags & BPF_NOEXIST)) {
391 /* Hoping to find an old_sdata to do inline update
392 * such that it can avoid taking the local_storage->lock
393 * and changing the lists.
396 bpf_local_storage_lookup(local_storage, smap, false);
397 err = check_flags(old_sdata, map_flags);
400 if (old_sdata && selem_linked_to_storage(SELEM(old_sdata))) {
401 copy_map_value_locked(&smap->map, old_sdata->data,
407 raw_spin_lock_irqsave(&local_storage->lock, flags);
409 /* Recheck local_storage->list under local_storage->lock */
410 if (unlikely(hlist_empty(&local_storage->list))) {
411 /* A parallel del is happening and local_storage is going
412 * away. It has just been checked before, so very
413 * unlikely. Return instead of retry to keep things
420 old_sdata = bpf_local_storage_lookup(local_storage, smap, false);
421 err = check_flags(old_sdata, map_flags);
425 if (old_sdata && (map_flags & BPF_F_LOCK)) {
426 copy_map_value_locked(&smap->map, old_sdata->data, value,
428 selem = SELEM(old_sdata);
432 /* local_storage->lock is held. Hence, we are sure
433 * we can unlink and uncharge the old_sdata successfully
434 * later. Hence, instead of charging the new selem now
435 * and then uncharge the old selem later (which may cause
436 * a potential but unnecessary charge failure), avoid taking
437 * a charge at all here (the "!old_sdata" check) and the
438 * old_sdata will not be uncharged later during
439 * bpf_selem_unlink_storage_nolock().
441 selem = bpf_selem_alloc(smap, owner, value, !old_sdata);
447 /* First, link the new selem to the map */
448 bpf_selem_link_map(smap, selem);
450 /* Second, link (and publish) the new selem to local_storage */
451 bpf_selem_link_storage_nolock(local_storage, selem);
453 /* Third, remove old selem, SELEM(old_sdata) */
455 bpf_selem_unlink_map(SELEM(old_sdata));
456 bpf_selem_unlink_storage_nolock(local_storage, SELEM(old_sdata),
461 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
465 raw_spin_unlock_irqrestore(&local_storage->lock, flags);
469 u16 bpf_local_storage_cache_idx_get(struct bpf_local_storage_cache *cache)
471 u64 min_usage = U64_MAX;
474 spin_lock(&cache->idx_lock);
476 for (i = 0; i < BPF_LOCAL_STORAGE_CACHE_SIZE; i++) {
477 if (cache->idx_usage_counts[i] < min_usage) {
478 min_usage = cache->idx_usage_counts[i];
481 /* Found a free cache_idx */
486 cache->idx_usage_counts[res]++;
488 spin_unlock(&cache->idx_lock);
493 void bpf_local_storage_cache_idx_free(struct bpf_local_storage_cache *cache,
496 spin_lock(&cache->idx_lock);
497 cache->idx_usage_counts[idx]--;
498 spin_unlock(&cache->idx_lock);
501 void bpf_local_storage_map_free(struct bpf_local_storage_map *smap,
502 int __percpu *busy_counter)
504 struct bpf_local_storage_elem *selem;
505 struct bpf_local_storage_map_bucket *b;
508 /* Note that this map might be concurrently cloned from
509 * bpf_sk_storage_clone. Wait for any existing bpf_sk_storage_clone
510 * RCU read section to finish before proceeding. New RCU
511 * read sections should be prevented via bpf_map_inc_not_zero.
515 /* bpf prog and the userspace can no longer access this map
516 * now. No new selem (of this map) can be added
517 * to the owner->storage or to the map bucket's list.
519 * The elem of this map can be cleaned up here
520 * or when the storage is freed e.g.
521 * by bpf_sk_storage_free() during __sk_destruct().
523 for (i = 0; i < (1U << smap->bucket_log); i++) {
524 b = &smap->buckets[i];
527 /* No one is adding to b->list now */
528 while ((selem = hlist_entry_safe(
529 rcu_dereference_raw(hlist_first_rcu(&b->list)),
530 struct bpf_local_storage_elem, map_node))) {
533 __this_cpu_inc(*busy_counter);
535 bpf_selem_unlink(selem);
537 __this_cpu_dec(*busy_counter);
545 /* While freeing the storage we may still need to access the map.
547 * e.g. when bpf_sk_storage_free() has unlinked selem from the map
548 * which then made the above while((selem = ...)) loop
551 * However, while freeing the storage one still needs to access the
552 * smap->elem_size to do the uncharging in
553 * bpf_selem_unlink_storage_nolock().
555 * Hence, wait another rcu grace period for the storage to be freed.
559 kvfree(smap->buckets);
563 int bpf_local_storage_map_alloc_check(union bpf_attr *attr)
565 if (attr->map_flags & ~BPF_LOCAL_STORAGE_CREATE_FLAG_MASK ||
566 !(attr->map_flags & BPF_F_NO_PREALLOC) ||
568 attr->key_size != sizeof(int) || !attr->value_size ||
569 /* Enforce BTF for userspace sk dumping */
570 !attr->btf_key_type_id || !attr->btf_value_type_id)
576 if (attr->value_size > BPF_LOCAL_STORAGE_MAX_VALUE_SIZE)
582 struct bpf_local_storage_map *bpf_local_storage_map_alloc(union bpf_attr *attr)
584 struct bpf_local_storage_map *smap;
588 smap = kzalloc(sizeof(*smap), GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
590 return ERR_PTR(-ENOMEM);
591 bpf_map_init_from_attr(&smap->map, attr);
593 nbuckets = roundup_pow_of_two(num_possible_cpus());
594 /* Use at least 2 buckets, select_bucket() is undefined behavior with 1 bucket */
595 nbuckets = max_t(u32, 2, nbuckets);
596 smap->bucket_log = ilog2(nbuckets);
598 smap->buckets = kvcalloc(sizeof(*smap->buckets), nbuckets,
599 GFP_USER | __GFP_NOWARN | __GFP_ACCOUNT);
600 if (!smap->buckets) {
602 return ERR_PTR(-ENOMEM);
605 for (i = 0; i < nbuckets; i++) {
606 INIT_HLIST_HEAD(&smap->buckets[i].list);
607 raw_spin_lock_init(&smap->buckets[i].lock);
611 sizeof(struct bpf_local_storage_elem) + attr->value_size;
616 int bpf_local_storage_map_check_btf(const struct bpf_map *map,
617 const struct btf *btf,
618 const struct btf_type *key_type,
619 const struct btf_type *value_type)
623 if (BTF_INFO_KIND(key_type->info) != BTF_KIND_INT)
626 int_data = *(u32 *)(key_type + 1);
627 if (BTF_INT_BITS(int_data) != 32 || BTF_INT_OFFSET(int_data))