GNU Linux-libre 4.14.265-gnu1
[releases.git] / kernel / bpf / bpf_lru_list.c
1 /* Copyright (c) 2016 Facebook
2  *
3  * This program is free software; you can redistribute it and/or
4  * modify it under the terms of version 2 of the GNU General Public
5  * License as published by the Free Software Foundation.
6  */
7 #include <linux/cpumask.h>
8 #include <linux/spinlock.h>
9 #include <linux/percpu.h>
10
11 #include "bpf_lru_list.h"
12
13 #define LOCAL_FREE_TARGET               (128)
14 #define LOCAL_NR_SCANS                  LOCAL_FREE_TARGET
15
16 #define PERCPU_FREE_TARGET              (4)
17 #define PERCPU_NR_SCANS                 PERCPU_FREE_TARGET
18
19 /* Helpers to get the local list index */
20 #define LOCAL_LIST_IDX(t)       ((t) - BPF_LOCAL_LIST_T_OFFSET)
21 #define LOCAL_FREE_LIST_IDX     LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_FREE)
22 #define LOCAL_PENDING_LIST_IDX  LOCAL_LIST_IDX(BPF_LRU_LOCAL_LIST_T_PENDING)
23 #define IS_LOCAL_LIST_TYPE(t)   ((t) >= BPF_LOCAL_LIST_T_OFFSET)
24
25 static int get_next_cpu(int cpu)
26 {
27         cpu = cpumask_next(cpu, cpu_possible_mask);
28         if (cpu >= nr_cpu_ids)
29                 cpu = cpumask_first(cpu_possible_mask);
30         return cpu;
31 }
32
33 /* Local list helpers */
34 static struct list_head *local_free_list(struct bpf_lru_locallist *loc_l)
35 {
36         return &loc_l->lists[LOCAL_FREE_LIST_IDX];
37 }
38
39 static struct list_head *local_pending_list(struct bpf_lru_locallist *loc_l)
40 {
41         return &loc_l->lists[LOCAL_PENDING_LIST_IDX];
42 }
43
44 /* bpf_lru_node helpers */
45 static bool bpf_lru_node_is_ref(const struct bpf_lru_node *node)
46 {
47         return node->ref;
48 }
49
50 static void bpf_lru_list_count_inc(struct bpf_lru_list *l,
51                                    enum bpf_lru_list_type type)
52 {
53         if (type < NR_BPF_LRU_LIST_COUNT)
54                 l->counts[type]++;
55 }
56
57 static void bpf_lru_list_count_dec(struct bpf_lru_list *l,
58                                    enum bpf_lru_list_type type)
59 {
60         if (type < NR_BPF_LRU_LIST_COUNT)
61                 l->counts[type]--;
62 }
63
64 static void __bpf_lru_node_move_to_free(struct bpf_lru_list *l,
65                                         struct bpf_lru_node *node,
66                                         struct list_head *free_list,
67                                         enum bpf_lru_list_type tgt_free_type)
68 {
69         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
70                 return;
71
72         /* If the removing node is the next_inactive_rotation candidate,
73          * move the next_inactive_rotation pointer also.
74          */
75         if (&node->list == l->next_inactive_rotation)
76                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
77
78         bpf_lru_list_count_dec(l, node->type);
79
80         node->type = tgt_free_type;
81         list_move(&node->list, free_list);
82 }
83
84 /* Move nodes from local list to the LRU list */
85 static void __bpf_lru_node_move_in(struct bpf_lru_list *l,
86                                    struct bpf_lru_node *node,
87                                    enum bpf_lru_list_type tgt_type)
88 {
89         if (WARN_ON_ONCE(!IS_LOCAL_LIST_TYPE(node->type)) ||
90             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
91                 return;
92
93         bpf_lru_list_count_inc(l, tgt_type);
94         node->type = tgt_type;
95         node->ref = 0;
96         list_move(&node->list, &l->lists[tgt_type]);
97 }
98
99 /* Move nodes between or within active and inactive list (like
100  * active to inactive, inactive to active or tail of active back to
101  * the head of active).
102  */
103 static void __bpf_lru_node_move(struct bpf_lru_list *l,
104                                 struct bpf_lru_node *node,
105                                 enum bpf_lru_list_type tgt_type)
106 {
107         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)) ||
108             WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(tgt_type)))
109                 return;
110
111         if (node->type != tgt_type) {
112                 bpf_lru_list_count_dec(l, node->type);
113                 bpf_lru_list_count_inc(l, tgt_type);
114                 node->type = tgt_type;
115         }
116         node->ref = 0;
117
118         /* If the moving node is the next_inactive_rotation candidate,
119          * move the next_inactive_rotation pointer also.
120          */
121         if (&node->list == l->next_inactive_rotation)
122                 l->next_inactive_rotation = l->next_inactive_rotation->prev;
123
124         list_move(&node->list, &l->lists[tgt_type]);
125 }
126
127 static bool bpf_lru_list_inactive_low(const struct bpf_lru_list *l)
128 {
129         return l->counts[BPF_LRU_LIST_T_INACTIVE] <
130                 l->counts[BPF_LRU_LIST_T_ACTIVE];
131 }
132
133 /* Rotate the active list:
134  * 1. Start from tail
135  * 2. If the node has the ref bit set, it will be rotated
136  *    back to the head of active list with the ref bit cleared.
137  *    Give this node one more chance to survive in the active list.
138  * 3. If the ref bit is not set, move it to the head of the
139  *    inactive list.
140  * 4. It will at most scan nr_scans nodes
141  */
142 static void __bpf_lru_list_rotate_active(struct bpf_lru *lru,
143                                          struct bpf_lru_list *l)
144 {
145         struct list_head *active = &l->lists[BPF_LRU_LIST_T_ACTIVE];
146         struct bpf_lru_node *node, *tmp_node, *first_node;
147         unsigned int i = 0;
148
149         first_node = list_first_entry(active, struct bpf_lru_node, list);
150         list_for_each_entry_safe_reverse(node, tmp_node, active, list) {
151                 if (bpf_lru_node_is_ref(node))
152                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
153                 else
154                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
155
156                 if (++i == lru->nr_scans || node == first_node)
157                         break;
158         }
159 }
160
161 /* Rotate the inactive list.  It starts from the next_inactive_rotation
162  * 1. If the node has ref bit set, it will be moved to the head
163  *    of active list with the ref bit cleared.
164  * 2. If the node does not have ref bit set, it will leave it
165  *    at its current location (i.e. do nothing) so that it can
166  *    be considered during the next inactive_shrink.
167  * 3. It will at most scan nr_scans nodes
168  */
169 static void __bpf_lru_list_rotate_inactive(struct bpf_lru *lru,
170                                            struct bpf_lru_list *l)
171 {
172         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
173         struct list_head *cur, *last, *next = inactive;
174         struct bpf_lru_node *node;
175         unsigned int i = 0;
176
177         if (list_empty(inactive))
178                 return;
179
180         last = l->next_inactive_rotation->next;
181         if (last == inactive)
182                 last = last->next;
183
184         cur = l->next_inactive_rotation;
185         while (i < lru->nr_scans) {
186                 if (cur == inactive) {
187                         cur = cur->prev;
188                         continue;
189                 }
190
191                 node = list_entry(cur, struct bpf_lru_node, list);
192                 next = cur->prev;
193                 if (bpf_lru_node_is_ref(node))
194                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
195                 if (cur == last)
196                         break;
197                 cur = next;
198                 i++;
199         }
200
201         l->next_inactive_rotation = next;
202 }
203
204 /* Shrink the inactive list.  It starts from the tail of the
205  * inactive list and only move the nodes without the ref bit
206  * set to the designated free list.
207  */
208 static unsigned int
209 __bpf_lru_list_shrink_inactive(struct bpf_lru *lru,
210                                struct bpf_lru_list *l,
211                                unsigned int tgt_nshrink,
212                                struct list_head *free_list,
213                                enum bpf_lru_list_type tgt_free_type)
214 {
215         struct list_head *inactive = &l->lists[BPF_LRU_LIST_T_INACTIVE];
216         struct bpf_lru_node *node, *tmp_node;
217         unsigned int nshrinked = 0;
218         unsigned int i = 0;
219
220         list_for_each_entry_safe_reverse(node, tmp_node, inactive, list) {
221                 if (bpf_lru_node_is_ref(node)) {
222                         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_ACTIVE);
223                 } else if (lru->del_from_htab(lru->del_arg, node)) {
224                         __bpf_lru_node_move_to_free(l, node, free_list,
225                                                     tgt_free_type);
226                         if (++nshrinked == tgt_nshrink)
227                                 break;
228                 }
229
230                 if (++i == lru->nr_scans)
231                         break;
232         }
233
234         return nshrinked;
235 }
236
237 /* 1. Rotate the active list (if needed)
238  * 2. Always rotate the inactive list
239  */
240 static void __bpf_lru_list_rotate(struct bpf_lru *lru, struct bpf_lru_list *l)
241 {
242         if (bpf_lru_list_inactive_low(l))
243                 __bpf_lru_list_rotate_active(lru, l);
244
245         __bpf_lru_list_rotate_inactive(lru, l);
246 }
247
248 /* Calls __bpf_lru_list_shrink_inactive() to shrink some
249  * ref-bit-cleared nodes and move them to the designated
250  * free list.
251  *
252  * If it cannot get a free node after calling
253  * __bpf_lru_list_shrink_inactive().  It will just remove
254  * one node from either inactive or active list without
255  * honoring the ref-bit.  It prefers inactive list to active
256  * list in this situation.
257  */
258 static unsigned int __bpf_lru_list_shrink(struct bpf_lru *lru,
259                                           struct bpf_lru_list *l,
260                                           unsigned int tgt_nshrink,
261                                           struct list_head *free_list,
262                                           enum bpf_lru_list_type tgt_free_type)
263
264 {
265         struct bpf_lru_node *node, *tmp_node;
266         struct list_head *force_shrink_list;
267         unsigned int nshrinked;
268
269         nshrinked = __bpf_lru_list_shrink_inactive(lru, l, tgt_nshrink,
270                                                    free_list, tgt_free_type);
271         if (nshrinked)
272                 return nshrinked;
273
274         /* Do a force shrink by ignoring the reference bit */
275         if (!list_empty(&l->lists[BPF_LRU_LIST_T_INACTIVE]))
276                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_INACTIVE];
277         else
278                 force_shrink_list = &l->lists[BPF_LRU_LIST_T_ACTIVE];
279
280         list_for_each_entry_safe_reverse(node, tmp_node, force_shrink_list,
281                                          list) {
282                 if (lru->del_from_htab(lru->del_arg, node)) {
283                         __bpf_lru_node_move_to_free(l, node, free_list,
284                                                     tgt_free_type);
285                         return 1;
286                 }
287         }
288
289         return 0;
290 }
291
292 /* Flush the nodes from the local pending list to the LRU list */
293 static void __local_list_flush(struct bpf_lru_list *l,
294                                struct bpf_lru_locallist *loc_l)
295 {
296         struct bpf_lru_node *node, *tmp_node;
297
298         list_for_each_entry_safe_reverse(node, tmp_node,
299                                          local_pending_list(loc_l), list) {
300                 if (bpf_lru_node_is_ref(node))
301                         __bpf_lru_node_move_in(l, node, BPF_LRU_LIST_T_ACTIVE);
302                 else
303                         __bpf_lru_node_move_in(l, node,
304                                                BPF_LRU_LIST_T_INACTIVE);
305         }
306 }
307
308 static void bpf_lru_list_push_free(struct bpf_lru_list *l,
309                                    struct bpf_lru_node *node)
310 {
311         unsigned long flags;
312
313         if (WARN_ON_ONCE(IS_LOCAL_LIST_TYPE(node->type)))
314                 return;
315
316         raw_spin_lock_irqsave(&l->lock, flags);
317         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
318         raw_spin_unlock_irqrestore(&l->lock, flags);
319 }
320
321 static void bpf_lru_list_pop_free_to_local(struct bpf_lru *lru,
322                                            struct bpf_lru_locallist *loc_l)
323 {
324         struct bpf_lru_list *l = &lru->common_lru.lru_list;
325         struct bpf_lru_node *node, *tmp_node;
326         unsigned int nfree = 0;
327
328         raw_spin_lock(&l->lock);
329
330         __local_list_flush(l, loc_l);
331
332         __bpf_lru_list_rotate(lru, l);
333
334         list_for_each_entry_safe(node, tmp_node, &l->lists[BPF_LRU_LIST_T_FREE],
335                                  list) {
336                 __bpf_lru_node_move_to_free(l, node, local_free_list(loc_l),
337                                             BPF_LRU_LOCAL_LIST_T_FREE);
338                 if (++nfree == LOCAL_FREE_TARGET)
339                         break;
340         }
341
342         if (nfree < LOCAL_FREE_TARGET)
343                 __bpf_lru_list_shrink(lru, l, LOCAL_FREE_TARGET - nfree,
344                                       local_free_list(loc_l),
345                                       BPF_LRU_LOCAL_LIST_T_FREE);
346
347         raw_spin_unlock(&l->lock);
348 }
349
350 static void __local_list_add_pending(struct bpf_lru *lru,
351                                      struct bpf_lru_locallist *loc_l,
352                                      int cpu,
353                                      struct bpf_lru_node *node,
354                                      u32 hash)
355 {
356         *(u32 *)((void *)node + lru->hash_offset) = hash;
357         node->cpu = cpu;
358         node->type = BPF_LRU_LOCAL_LIST_T_PENDING;
359         node->ref = 0;
360         list_add(&node->list, local_pending_list(loc_l));
361 }
362
363 static struct bpf_lru_node *
364 __local_list_pop_free(struct bpf_lru_locallist *loc_l)
365 {
366         struct bpf_lru_node *node;
367
368         node = list_first_entry_or_null(local_free_list(loc_l),
369                                         struct bpf_lru_node,
370                                         list);
371         if (node)
372                 list_del(&node->list);
373
374         return node;
375 }
376
377 static struct bpf_lru_node *
378 __local_list_pop_pending(struct bpf_lru *lru, struct bpf_lru_locallist *loc_l)
379 {
380         struct bpf_lru_node *node;
381         bool force = false;
382
383 ignore_ref:
384         /* Get from the tail (i.e. older element) of the pending list. */
385         list_for_each_entry_reverse(node, local_pending_list(loc_l),
386                                     list) {
387                 if ((!bpf_lru_node_is_ref(node) || force) &&
388                     lru->del_from_htab(lru->del_arg, node)) {
389                         list_del(&node->list);
390                         return node;
391                 }
392         }
393
394         if (!force) {
395                 force = true;
396                 goto ignore_ref;
397         }
398
399         return NULL;
400 }
401
402 static struct bpf_lru_node *bpf_percpu_lru_pop_free(struct bpf_lru *lru,
403                                                     u32 hash)
404 {
405         struct list_head *free_list;
406         struct bpf_lru_node *node = NULL;
407         struct bpf_lru_list *l;
408         unsigned long flags;
409         int cpu = raw_smp_processor_id();
410
411         l = per_cpu_ptr(lru->percpu_lru, cpu);
412
413         raw_spin_lock_irqsave(&l->lock, flags);
414
415         __bpf_lru_list_rotate(lru, l);
416
417         free_list = &l->lists[BPF_LRU_LIST_T_FREE];
418         if (list_empty(free_list))
419                 __bpf_lru_list_shrink(lru, l, PERCPU_FREE_TARGET, free_list,
420                                       BPF_LRU_LIST_T_FREE);
421
422         if (!list_empty(free_list)) {
423                 node = list_first_entry(free_list, struct bpf_lru_node, list);
424                 *(u32 *)((void *)node + lru->hash_offset) = hash;
425                 node->ref = 0;
426                 __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_INACTIVE);
427         }
428
429         raw_spin_unlock_irqrestore(&l->lock, flags);
430
431         return node;
432 }
433
434 static struct bpf_lru_node *bpf_common_lru_pop_free(struct bpf_lru *lru,
435                                                     u32 hash)
436 {
437         struct bpf_lru_locallist *loc_l, *steal_loc_l;
438         struct bpf_common_lru *clru = &lru->common_lru;
439         struct bpf_lru_node *node;
440         int steal, first_steal;
441         unsigned long flags;
442         int cpu = raw_smp_processor_id();
443
444         loc_l = per_cpu_ptr(clru->local_list, cpu);
445
446         raw_spin_lock_irqsave(&loc_l->lock, flags);
447
448         node = __local_list_pop_free(loc_l);
449         if (!node) {
450                 bpf_lru_list_pop_free_to_local(lru, loc_l);
451                 node = __local_list_pop_free(loc_l);
452         }
453
454         if (node)
455                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
456
457         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
458
459         if (node)
460                 return node;
461
462         /* No free nodes found from the local free list and
463          * the global LRU list.
464          *
465          * Steal from the local free/pending list of the
466          * current CPU and remote CPU in RR.  It starts
467          * with the loc_l->next_steal CPU.
468          */
469
470         first_steal = loc_l->next_steal;
471         steal = first_steal;
472         do {
473                 steal_loc_l = per_cpu_ptr(clru->local_list, steal);
474
475                 raw_spin_lock_irqsave(&steal_loc_l->lock, flags);
476
477                 node = __local_list_pop_free(steal_loc_l);
478                 if (!node)
479                         node = __local_list_pop_pending(lru, steal_loc_l);
480
481                 raw_spin_unlock_irqrestore(&steal_loc_l->lock, flags);
482
483                 steal = get_next_cpu(steal);
484         } while (!node && steal != first_steal);
485
486         loc_l->next_steal = steal;
487
488         if (node) {
489                 raw_spin_lock_irqsave(&loc_l->lock, flags);
490                 __local_list_add_pending(lru, loc_l, cpu, node, hash);
491                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
492         }
493
494         return node;
495 }
496
497 struct bpf_lru_node *bpf_lru_pop_free(struct bpf_lru *lru, u32 hash)
498 {
499         if (lru->percpu)
500                 return bpf_percpu_lru_pop_free(lru, hash);
501         else
502                 return bpf_common_lru_pop_free(lru, hash);
503 }
504
505 static void bpf_common_lru_push_free(struct bpf_lru *lru,
506                                      struct bpf_lru_node *node)
507 {
508         u8 node_type = READ_ONCE(node->type);
509         unsigned long flags;
510
511         if (WARN_ON_ONCE(node_type == BPF_LRU_LIST_T_FREE) ||
512             WARN_ON_ONCE(node_type == BPF_LRU_LOCAL_LIST_T_FREE))
513                 return;
514
515         if (node_type == BPF_LRU_LOCAL_LIST_T_PENDING) {
516                 struct bpf_lru_locallist *loc_l;
517
518                 loc_l = per_cpu_ptr(lru->common_lru.local_list, node->cpu);
519
520                 raw_spin_lock_irqsave(&loc_l->lock, flags);
521
522                 if (unlikely(node->type != BPF_LRU_LOCAL_LIST_T_PENDING)) {
523                         raw_spin_unlock_irqrestore(&loc_l->lock, flags);
524                         goto check_lru_list;
525                 }
526
527                 node->type = BPF_LRU_LOCAL_LIST_T_FREE;
528                 node->ref = 0;
529                 list_move(&node->list, local_free_list(loc_l));
530
531                 raw_spin_unlock_irqrestore(&loc_l->lock, flags);
532                 return;
533         }
534
535 check_lru_list:
536         bpf_lru_list_push_free(&lru->common_lru.lru_list, node);
537 }
538
539 static void bpf_percpu_lru_push_free(struct bpf_lru *lru,
540                                      struct bpf_lru_node *node)
541 {
542         struct bpf_lru_list *l;
543         unsigned long flags;
544
545         l = per_cpu_ptr(lru->percpu_lru, node->cpu);
546
547         raw_spin_lock_irqsave(&l->lock, flags);
548
549         __bpf_lru_node_move(l, node, BPF_LRU_LIST_T_FREE);
550
551         raw_spin_unlock_irqrestore(&l->lock, flags);
552 }
553
554 void bpf_lru_push_free(struct bpf_lru *lru, struct bpf_lru_node *node)
555 {
556         if (lru->percpu)
557                 bpf_percpu_lru_push_free(lru, node);
558         else
559                 bpf_common_lru_push_free(lru, node);
560 }
561
562 static void bpf_common_lru_populate(struct bpf_lru *lru, void *buf,
563                                     u32 node_offset, u32 elem_size,
564                                     u32 nr_elems)
565 {
566         struct bpf_lru_list *l = &lru->common_lru.lru_list;
567         u32 i;
568
569         for (i = 0; i < nr_elems; i++) {
570                 struct bpf_lru_node *node;
571
572                 node = (struct bpf_lru_node *)(buf + node_offset);
573                 node->type = BPF_LRU_LIST_T_FREE;
574                 node->ref = 0;
575                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
576                 buf += elem_size;
577         }
578 }
579
580 static void bpf_percpu_lru_populate(struct bpf_lru *lru, void *buf,
581                                     u32 node_offset, u32 elem_size,
582                                     u32 nr_elems)
583 {
584         u32 i, pcpu_entries;
585         int cpu;
586         struct bpf_lru_list *l;
587
588         pcpu_entries = nr_elems / num_possible_cpus();
589
590         i = 0;
591
592         for_each_possible_cpu(cpu) {
593                 struct bpf_lru_node *node;
594
595                 l = per_cpu_ptr(lru->percpu_lru, cpu);
596 again:
597                 node = (struct bpf_lru_node *)(buf + node_offset);
598                 node->cpu = cpu;
599                 node->type = BPF_LRU_LIST_T_FREE;
600                 node->ref = 0;
601                 list_add(&node->list, &l->lists[BPF_LRU_LIST_T_FREE]);
602                 i++;
603                 buf += elem_size;
604                 if (i == nr_elems)
605                         break;
606                 if (i % pcpu_entries)
607                         goto again;
608         }
609 }
610
611 void bpf_lru_populate(struct bpf_lru *lru, void *buf, u32 node_offset,
612                       u32 elem_size, u32 nr_elems)
613 {
614         if (lru->percpu)
615                 bpf_percpu_lru_populate(lru, buf, node_offset, elem_size,
616                                         nr_elems);
617         else
618                 bpf_common_lru_populate(lru, buf, node_offset, elem_size,
619                                         nr_elems);
620 }
621
622 static void bpf_lru_locallist_init(struct bpf_lru_locallist *loc_l, int cpu)
623 {
624         int i;
625
626         for (i = 0; i < NR_BPF_LRU_LOCAL_LIST_T; i++)
627                 INIT_LIST_HEAD(&loc_l->lists[i]);
628
629         loc_l->next_steal = cpu;
630
631         raw_spin_lock_init(&loc_l->lock);
632 }
633
634 static void bpf_lru_list_init(struct bpf_lru_list *l)
635 {
636         int i;
637
638         for (i = 0; i < NR_BPF_LRU_LIST_T; i++)
639                 INIT_LIST_HEAD(&l->lists[i]);
640
641         for (i = 0; i < NR_BPF_LRU_LIST_COUNT; i++)
642                 l->counts[i] = 0;
643
644         l->next_inactive_rotation = &l->lists[BPF_LRU_LIST_T_INACTIVE];
645
646         raw_spin_lock_init(&l->lock);
647 }
648
649 int bpf_lru_init(struct bpf_lru *lru, bool percpu, u32 hash_offset,
650                  del_from_htab_func del_from_htab, void *del_arg)
651 {
652         int cpu;
653
654         if (percpu) {
655                 lru->percpu_lru = alloc_percpu(struct bpf_lru_list);
656                 if (!lru->percpu_lru)
657                         return -ENOMEM;
658
659                 for_each_possible_cpu(cpu) {
660                         struct bpf_lru_list *l;
661
662                         l = per_cpu_ptr(lru->percpu_lru, cpu);
663                         bpf_lru_list_init(l);
664                 }
665                 lru->nr_scans = PERCPU_NR_SCANS;
666         } else {
667                 struct bpf_common_lru *clru = &lru->common_lru;
668
669                 clru->local_list = alloc_percpu(struct bpf_lru_locallist);
670                 if (!clru->local_list)
671                         return -ENOMEM;
672
673                 for_each_possible_cpu(cpu) {
674                         struct bpf_lru_locallist *loc_l;
675
676                         loc_l = per_cpu_ptr(clru->local_list, cpu);
677                         bpf_lru_locallist_init(loc_l, cpu);
678                 }
679
680                 bpf_lru_list_init(&clru->lru_list);
681                 lru->nr_scans = LOCAL_NR_SCANS;
682         }
683
684         lru->percpu = percpu;
685         lru->del_from_htab = del_from_htab;
686         lru->del_arg = del_arg;
687         lru->hash_offset = hash_offset;
688
689         return 0;
690 }
691
692 void bpf_lru_destroy(struct bpf_lru *lru)
693 {
694         if (lru->percpu)
695                 free_percpu(lru->percpu_lru);
696         else
697                 free_percpu(lru->common_lru.local_list);
698 }