GNU Linux-libre 5.10.153-gnu1
[releases.git] / net / core / page_pool.c
1 /* SPDX-License-Identifier: GPL-2.0
2  *
3  * page_pool.c
4  *      Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5  *      Copyright (C) 2016 Red Hat, Inc.
6  */
7
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
12
13 #include <net/page_pool.h>
14 #include <linux/dma-direction.h>
15 #include <linux/dma-mapping.h>
16 #include <linux/page-flags.h>
17 #include <linux/mm.h> /* for __put_page() */
18
19 #include <trace/events/page_pool.h>
20
21 #define DEFER_TIME (msecs_to_jiffies(1000))
22 #define DEFER_WARN_INTERVAL (60 * HZ)
23
24 static int page_pool_init(struct page_pool *pool,
25                           const struct page_pool_params *params)
26 {
27         unsigned int ring_qsize = 1024; /* Default */
28
29         memcpy(&pool->p, params, sizeof(pool->p));
30
31         /* Validate only known flags were used */
32         if (pool->p.flags & ~(PP_FLAG_ALL))
33                 return -EINVAL;
34
35         if (pool->p.pool_size)
36                 ring_qsize = pool->p.pool_size;
37
38         /* Sanity limit mem that can be pinned down */
39         if (ring_qsize > 32768)
40                 return -E2BIG;
41
42         /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
43          * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
44          * which is the XDP_TX use-case.
45          */
46         if (pool->p.flags & PP_FLAG_DMA_MAP) {
47                 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
48                     (pool->p.dma_dir != DMA_BIDIRECTIONAL))
49                         return -EINVAL;
50         }
51
52         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
53                 /* In order to request DMA-sync-for-device the page
54                  * needs to be mapped
55                  */
56                 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
57                         return -EINVAL;
58
59                 if (!pool->p.max_len)
60                         return -EINVAL;
61
62                 /* pool->p.offset has to be set according to the address
63                  * offset used by the DMA engine to start copying rx data
64                  */
65         }
66
67         if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
68                 return -ENOMEM;
69
70         atomic_set(&pool->pages_state_release_cnt, 0);
71
72         /* Driver calling page_pool_create() also call page_pool_destroy() */
73         refcount_set(&pool->user_cnt, 1);
74
75         if (pool->p.flags & PP_FLAG_DMA_MAP)
76                 get_device(pool->p.dev);
77
78         return 0;
79 }
80
81 struct page_pool *page_pool_create(const struct page_pool_params *params)
82 {
83         struct page_pool *pool;
84         int err;
85
86         pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
87         if (!pool)
88                 return ERR_PTR(-ENOMEM);
89
90         err = page_pool_init(pool, params);
91         if (err < 0) {
92                 pr_warn("%s() gave up with errno %d\n", __func__, err);
93                 kfree(pool);
94                 return ERR_PTR(err);
95         }
96
97         return pool;
98 }
99 EXPORT_SYMBOL(page_pool_create);
100
101 static void page_pool_return_page(struct page_pool *pool, struct page *page);
102
103 noinline
104 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
105 {
106         struct ptr_ring *r = &pool->ring;
107         struct page *page;
108         int pref_nid; /* preferred NUMA node */
109
110         /* Quicker fallback, avoid locks when ring is empty */
111         if (__ptr_ring_empty(r))
112                 return NULL;
113
114         /* Softirq guarantee CPU and thus NUMA node is stable. This,
115          * assumes CPU refilling driver RX-ring will also run RX-NAPI.
116          */
117 #ifdef CONFIG_NUMA
118         pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
119 #else
120         /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
121         pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
122 #endif
123
124         /* Slower-path: Get pages from locked ring queue */
125         spin_lock(&r->consumer_lock);
126
127         /* Refill alloc array, but only if NUMA match */
128         do {
129                 page = __ptr_ring_consume(r);
130                 if (unlikely(!page))
131                         break;
132
133                 if (likely(page_to_nid(page) == pref_nid)) {
134                         pool->alloc.cache[pool->alloc.count++] = page;
135                 } else {
136                         /* NUMA mismatch;
137                          * (1) release 1 page to page-allocator and
138                          * (2) break out to fallthrough to alloc_pages_node.
139                          * This limit stress on page buddy alloactor.
140                          */
141                         page_pool_return_page(pool, page);
142                         page = NULL;
143                         break;
144                 }
145         } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
146
147         /* Return last page */
148         if (likely(pool->alloc.count > 0))
149                 page = pool->alloc.cache[--pool->alloc.count];
150
151         spin_unlock(&r->consumer_lock);
152         return page;
153 }
154
155 /* fast path */
156 static struct page *__page_pool_get_cached(struct page_pool *pool)
157 {
158         struct page *page;
159
160         /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
161         if (likely(pool->alloc.count)) {
162                 /* Fast-path */
163                 page = pool->alloc.cache[--pool->alloc.count];
164         } else {
165                 page = page_pool_refill_alloc_cache(pool);
166         }
167
168         return page;
169 }
170
171 static void page_pool_dma_sync_for_device(struct page_pool *pool,
172                                           struct page *page,
173                                           unsigned int dma_sync_size)
174 {
175         dma_addr_t dma_addr = page_pool_get_dma_addr(page);
176
177         dma_sync_size = min(dma_sync_size, pool->p.max_len);
178         dma_sync_single_range_for_device(pool->p.dev, dma_addr,
179                                          pool->p.offset, dma_sync_size,
180                                          pool->p.dma_dir);
181 }
182
183 /* slow path */
184 noinline
185 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
186                                                  gfp_t _gfp)
187 {
188         struct page *page;
189         gfp_t gfp = _gfp;
190         dma_addr_t dma;
191
192         /* We could always set __GFP_COMP, and avoid this branch, as
193          * prep_new_page() can handle order-0 with __GFP_COMP.
194          */
195         if (pool->p.order)
196                 gfp |= __GFP_COMP;
197
198         /* FUTURE development:
199          *
200          * Current slow-path essentially falls back to single page
201          * allocations, which doesn't improve performance.  This code
202          * need bulk allocation support from the page allocator code.
203          */
204
205         /* Cache was empty, do real allocation */
206 #ifdef CONFIG_NUMA
207         page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
208 #else
209         page = alloc_pages(gfp, pool->p.order);
210 #endif
211         if (!page)
212                 return NULL;
213
214         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
215                 goto skip_dma_map;
216
217         /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
218          * since dma_addr_t can be either 32 or 64 bits and does not always fit
219          * into page private data (i.e 32bit cpu with 64bit DMA caps)
220          * This mapping is kept for lifetime of page, until leaving pool.
221          */
222         dma = dma_map_page_attrs(pool->p.dev, page, 0,
223                                  (PAGE_SIZE << pool->p.order),
224                                  pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC);
225         if (dma_mapping_error(pool->p.dev, dma)) {
226                 put_page(page);
227                 return NULL;
228         }
229         page_pool_set_dma_addr(page, dma);
230
231         if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
232                 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
233
234 skip_dma_map:
235         /* Track how many pages are held 'in-flight' */
236         pool->pages_state_hold_cnt++;
237
238         trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
239
240         /* When page just alloc'ed is should/must have refcnt 1. */
241         return page;
242 }
243
244 /* For using page_pool replace: alloc_pages() API calls, but provide
245  * synchronization guarantee for allocation side.
246  */
247 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
248 {
249         struct page *page;
250
251         /* Fast-path: Get a page from cache */
252         page = __page_pool_get_cached(pool);
253         if (page)
254                 return page;
255
256         /* Slow-path: cache empty, do real allocation */
257         page = __page_pool_alloc_pages_slow(pool, gfp);
258         return page;
259 }
260 EXPORT_SYMBOL(page_pool_alloc_pages);
261
262 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
263  *  https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
264  */
265 #define _distance(a, b) (s32)((a) - (b))
266
267 static s32 page_pool_inflight(struct page_pool *pool)
268 {
269         u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
270         u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
271         s32 inflight;
272
273         inflight = _distance(hold_cnt, release_cnt);
274
275         trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
276         WARN(inflight < 0, "Negative(%d) inflight packet-pages", inflight);
277
278         return inflight;
279 }
280
281 /* Disconnects a page (from a page_pool).  API users can have a need
282  * to disconnect a page (from a page_pool), to allow it to be used as
283  * a regular page (that will eventually be returned to the normal
284  * page-allocator via put_page).
285  */
286 void page_pool_release_page(struct page_pool *pool, struct page *page)
287 {
288         dma_addr_t dma;
289         int count;
290
291         if (!(pool->p.flags & PP_FLAG_DMA_MAP))
292                 /* Always account for inflight pages, even if we didn't
293                  * map them
294                  */
295                 goto skip_dma_unmap;
296
297         dma = page_pool_get_dma_addr(page);
298
299         /* When page is unmapped, it cannot be returned to our pool */
300         dma_unmap_page_attrs(pool->p.dev, dma,
301                              PAGE_SIZE << pool->p.order, pool->p.dma_dir,
302                              DMA_ATTR_SKIP_CPU_SYNC);
303         page_pool_set_dma_addr(page, 0);
304 skip_dma_unmap:
305         /* This may be the last page returned, releasing the pool, so
306          * it is not safe to reference pool afterwards.
307          */
308         count = atomic_inc_return(&pool->pages_state_release_cnt);
309         trace_page_pool_state_release(pool, page, count);
310 }
311 EXPORT_SYMBOL(page_pool_release_page);
312
313 /* Return a page to the page allocator, cleaning up our state */
314 static void page_pool_return_page(struct page_pool *pool, struct page *page)
315 {
316         page_pool_release_page(pool, page);
317
318         put_page(page);
319         /* An optimization would be to call __free_pages(page, pool->p.order)
320          * knowing page is not part of page-cache (thus avoiding a
321          * __page_cache_release() call).
322          */
323 }
324
325 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
326 {
327         int ret;
328         /* BH protection not needed if current is serving softirq */
329         if (in_serving_softirq())
330                 ret = ptr_ring_produce(&pool->ring, page);
331         else
332                 ret = ptr_ring_produce_bh(&pool->ring, page);
333
334         return (ret == 0) ? true : false;
335 }
336
337 /* Only allow direct recycling in special circumstances, into the
338  * alloc side cache.  E.g. during RX-NAPI processing for XDP_DROP use-case.
339  *
340  * Caller must provide appropriate safe context.
341  */
342 static bool page_pool_recycle_in_cache(struct page *page,
343                                        struct page_pool *pool)
344 {
345         if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
346                 return false;
347
348         /* Caller MUST have verified/know (page_ref_count(page) == 1) */
349         pool->alloc.cache[pool->alloc.count++] = page;
350         return true;
351 }
352
353 /* page is NOT reusable when:
354  * 1) allocated when system is under some pressure. (page_is_pfmemalloc)
355  */
356 static bool pool_page_reusable(struct page_pool *pool, struct page *page)
357 {
358         return !page_is_pfmemalloc(page);
359 }
360
361 /* If the page refcnt == 1, this will try to recycle the page.
362  * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
363  * the configured size min(dma_sync_size, pool->max_len).
364  * If the page refcnt != 1, then the page will be returned to memory
365  * subsystem.
366  */
367 void page_pool_put_page(struct page_pool *pool, struct page *page,
368                         unsigned int dma_sync_size, bool allow_direct)
369 {
370         /* This allocator is optimized for the XDP mode that uses
371          * one-frame-per-page, but have fallbacks that act like the
372          * regular page allocator APIs.
373          *
374          * refcnt == 1 means page_pool owns page, and can recycle it.
375          */
376         if (likely(page_ref_count(page) == 1 &&
377                    pool_page_reusable(pool, page))) {
378                 /* Read barrier done in page_ref_count / READ_ONCE */
379
380                 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
381                         page_pool_dma_sync_for_device(pool, page,
382                                                       dma_sync_size);
383
384                 if (allow_direct && in_serving_softirq())
385                         if (page_pool_recycle_in_cache(page, pool))
386                                 return;
387
388                 if (!page_pool_recycle_in_ring(pool, page)) {
389                         /* Cache full, fallback to free pages */
390                         page_pool_return_page(pool, page);
391                 }
392                 return;
393         }
394         /* Fallback/non-XDP mode: API user have elevated refcnt.
395          *
396          * Many drivers split up the page into fragments, and some
397          * want to keep doing this to save memory and do refcnt based
398          * recycling. Support this use case too, to ease drivers
399          * switching between XDP/non-XDP.
400          *
401          * In-case page_pool maintains the DMA mapping, API user must
402          * call page_pool_put_page once.  In this elevated refcnt
403          * case, the DMA is unmapped/released, as driver is likely
404          * doing refcnt based recycle tricks, meaning another process
405          * will be invoking put_page.
406          */
407         /* Do not replace this with page_pool_return_page() */
408         page_pool_release_page(pool, page);
409         put_page(page);
410 }
411 EXPORT_SYMBOL(page_pool_put_page);
412
413 static void page_pool_empty_ring(struct page_pool *pool)
414 {
415         struct page *page;
416
417         /* Empty recycle ring */
418         while ((page = ptr_ring_consume_bh(&pool->ring))) {
419                 /* Verify the refcnt invariant of cached pages */
420                 if (!(page_ref_count(page) == 1))
421                         pr_crit("%s() page_pool refcnt %d violation\n",
422                                 __func__, page_ref_count(page));
423
424                 page_pool_return_page(pool, page);
425         }
426 }
427
428 static void page_pool_free(struct page_pool *pool)
429 {
430         if (pool->disconnect)
431                 pool->disconnect(pool);
432
433         ptr_ring_cleanup(&pool->ring, NULL);
434
435         if (pool->p.flags & PP_FLAG_DMA_MAP)
436                 put_device(pool->p.dev);
437
438         kfree(pool);
439 }
440
441 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
442 {
443         struct page *page;
444
445         if (pool->destroy_cnt)
446                 return;
447
448         /* Empty alloc cache, assume caller made sure this is
449          * no-longer in use, and page_pool_alloc_pages() cannot be
450          * call concurrently.
451          */
452         while (pool->alloc.count) {
453                 page = pool->alloc.cache[--pool->alloc.count];
454                 page_pool_return_page(pool, page);
455         }
456 }
457
458 static void page_pool_scrub(struct page_pool *pool)
459 {
460         page_pool_empty_alloc_cache_once(pool);
461         pool->destroy_cnt++;
462
463         /* No more consumers should exist, but producers could still
464          * be in-flight.
465          */
466         page_pool_empty_ring(pool);
467 }
468
469 static int page_pool_release(struct page_pool *pool)
470 {
471         int inflight;
472
473         page_pool_scrub(pool);
474         inflight = page_pool_inflight(pool);
475         if (!inflight)
476                 page_pool_free(pool);
477
478         return inflight;
479 }
480
481 static void page_pool_release_retry(struct work_struct *wq)
482 {
483         struct delayed_work *dwq = to_delayed_work(wq);
484         struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
485         int inflight;
486
487         inflight = page_pool_release(pool);
488         if (!inflight)
489                 return;
490
491         /* Periodic warning */
492         if (time_after_eq(jiffies, pool->defer_warn)) {
493                 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
494
495                 pr_warn("%s() stalled pool shutdown %d inflight %d sec\n",
496                         __func__, inflight, sec);
497                 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
498         }
499
500         /* Still not ready to be disconnected, retry later */
501         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
502 }
503
504 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *))
505 {
506         refcount_inc(&pool->user_cnt);
507         pool->disconnect = disconnect;
508 }
509
510 void page_pool_destroy(struct page_pool *pool)
511 {
512         if (!pool)
513                 return;
514
515         if (!page_pool_put(pool))
516                 return;
517
518         if (!page_pool_release(pool))
519                 return;
520
521         pool->defer_start = jiffies;
522         pool->defer_warn  = jiffies + DEFER_WARN_INTERVAL;
523
524         INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
525         schedule_delayed_work(&pool->release_dw, DEFER_TIME);
526 }
527 EXPORT_SYMBOL(page_pool_destroy);
528
529 /* Caller must provide appropriate safe context, e.g. NAPI. */
530 void page_pool_update_nid(struct page_pool *pool, int new_nid)
531 {
532         struct page *page;
533
534         trace_page_pool_update_nid(pool, new_nid);
535         pool->p.nid = new_nid;
536
537         /* Flush pool alloc cache, as refill will check NUMA node */
538         while (pool->alloc.count) {
539                 page = pool->alloc.cache[--pool->alloc.count];
540                 page_pool_return_page(pool, page);
541         }
542 }
543 EXPORT_SYMBOL(page_pool_update_nid);