1 /* SPDX-License-Identifier: GPL-2.0
4 * Author: Jesper Dangaard Brouer <netoptimizer@brouer.com>
5 * Copyright (C) 2016 Red Hat, Inc.
8 #include <linux/types.h>
9 #include <linux/kernel.h>
10 #include <linux/slab.h>
11 #include <linux/device.h>
13 #include <net/page_pool/helpers.h>
16 #include <linux/dma-direction.h>
17 #include <linux/dma-mapping.h>
18 #include <linux/page-flags.h>
19 #include <linux/mm.h> /* for put_page() */
20 #include <linux/poison.h>
21 #include <linux/ethtool.h>
22 #include <linux/netdevice.h>
24 #include <trace/events/page_pool.h>
26 #include "page_pool_priv.h"
28 #define DEFER_TIME (msecs_to_jiffies(1000))
29 #define DEFER_WARN_INTERVAL (60 * HZ)
31 #define BIAS_MAX (LONG_MAX >> 1)
33 #ifdef CONFIG_PAGE_POOL_STATS
34 /* alloc_stat_inc is intended to be used in softirq context */
35 #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
36 /* recycle_stat_inc is safe to use when preemption is possible. */
37 #define recycle_stat_inc(pool, __stat) \
39 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
40 this_cpu_inc(s->__stat); \
43 #define recycle_stat_add(pool, __stat, val) \
45 struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
46 this_cpu_add(s->__stat, val); \
49 static const char pp_stats[][ETH_GSTRING_LEN] = {
52 "rx_pp_alloc_slow_ho",
56 "rx_pp_recycle_cached",
57 "rx_pp_recycle_cache_full",
59 "rx_pp_recycle_ring_full",
60 "rx_pp_recycle_released_ref",
64 * page_pool_get_stats() - fetch page pool stats
65 * @pool: pool from which page was allocated
66 * @stats: struct page_pool_stats to fill in
68 * Retrieve statistics about the page_pool. This API is only available
69 * if the kernel has been configured with ``CONFIG_PAGE_POOL_STATS=y``.
70 * A pointer to a caller allocated struct page_pool_stats structure
71 * is passed to this API which is filled in. The caller can then report
72 * those stats to the user (perhaps via ethtool, debugfs, etc.).
74 bool page_pool_get_stats(const struct page_pool *pool,
75 struct page_pool_stats *stats)
82 /* The caller is responsible to initialize stats. */
83 stats->alloc_stats.fast += pool->alloc_stats.fast;
84 stats->alloc_stats.slow += pool->alloc_stats.slow;
85 stats->alloc_stats.slow_high_order += pool->alloc_stats.slow_high_order;
86 stats->alloc_stats.empty += pool->alloc_stats.empty;
87 stats->alloc_stats.refill += pool->alloc_stats.refill;
88 stats->alloc_stats.waive += pool->alloc_stats.waive;
90 for_each_possible_cpu(cpu) {
91 const struct page_pool_recycle_stats *pcpu =
92 per_cpu_ptr(pool->recycle_stats, cpu);
94 stats->recycle_stats.cached += pcpu->cached;
95 stats->recycle_stats.cache_full += pcpu->cache_full;
96 stats->recycle_stats.ring += pcpu->ring;
97 stats->recycle_stats.ring_full += pcpu->ring_full;
98 stats->recycle_stats.released_refcnt += pcpu->released_refcnt;
103 EXPORT_SYMBOL(page_pool_get_stats);
105 u8 *page_pool_ethtool_stats_get_strings(u8 *data)
109 for (i = 0; i < ARRAY_SIZE(pp_stats); i++) {
110 memcpy(data, pp_stats[i], ETH_GSTRING_LEN);
111 data += ETH_GSTRING_LEN;
116 EXPORT_SYMBOL(page_pool_ethtool_stats_get_strings);
118 int page_pool_ethtool_stats_get_count(void)
120 return ARRAY_SIZE(pp_stats);
122 EXPORT_SYMBOL(page_pool_ethtool_stats_get_count);
124 u64 *page_pool_ethtool_stats_get(u64 *data, void *stats)
126 struct page_pool_stats *pool_stats = stats;
128 *data++ = pool_stats->alloc_stats.fast;
129 *data++ = pool_stats->alloc_stats.slow;
130 *data++ = pool_stats->alloc_stats.slow_high_order;
131 *data++ = pool_stats->alloc_stats.empty;
132 *data++ = pool_stats->alloc_stats.refill;
133 *data++ = pool_stats->alloc_stats.waive;
134 *data++ = pool_stats->recycle_stats.cached;
135 *data++ = pool_stats->recycle_stats.cache_full;
136 *data++ = pool_stats->recycle_stats.ring;
137 *data++ = pool_stats->recycle_stats.ring_full;
138 *data++ = pool_stats->recycle_stats.released_refcnt;
142 EXPORT_SYMBOL(page_pool_ethtool_stats_get);
145 #define alloc_stat_inc(pool, __stat)
146 #define recycle_stat_inc(pool, __stat)
147 #define recycle_stat_add(pool, __stat, val)
150 static bool page_pool_producer_lock(struct page_pool *pool)
151 __acquires(&pool->ring.producer_lock)
153 bool in_softirq = in_softirq();
156 spin_lock(&pool->ring.producer_lock);
158 spin_lock_bh(&pool->ring.producer_lock);
163 static void page_pool_producer_unlock(struct page_pool *pool,
165 __releases(&pool->ring.producer_lock)
168 spin_unlock(&pool->ring.producer_lock);
170 spin_unlock_bh(&pool->ring.producer_lock);
173 static int page_pool_init(struct page_pool *pool,
174 const struct page_pool_params *params)
176 unsigned int ring_qsize = 1024; /* Default */
178 memcpy(&pool->p, ¶ms->fast, sizeof(pool->p));
179 memcpy(&pool->slow, ¶ms->slow, sizeof(pool->slow));
181 /* Validate only known flags were used */
182 if (pool->p.flags & ~(PP_FLAG_ALL))
185 if (pool->p.pool_size)
186 ring_qsize = pool->p.pool_size;
188 /* Sanity limit mem that can be pinned down */
189 if (ring_qsize > 32768)
192 /* DMA direction is either DMA_FROM_DEVICE or DMA_BIDIRECTIONAL.
193 * DMA_BIDIRECTIONAL is for allowing page used for DMA sending,
194 * which is the XDP_TX use-case.
196 if (pool->p.flags & PP_FLAG_DMA_MAP) {
197 if ((pool->p.dma_dir != DMA_FROM_DEVICE) &&
198 (pool->p.dma_dir != DMA_BIDIRECTIONAL))
202 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV) {
203 /* In order to request DMA-sync-for-device the page
206 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
209 if (!pool->p.max_len)
212 /* pool->p.offset has to be set according to the address
213 * offset used by the DMA engine to start copying rx data
217 pool->has_init_callback = !!pool->slow.init_callback;
219 #ifdef CONFIG_PAGE_POOL_STATS
220 pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
221 if (!pool->recycle_stats)
225 if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) {
226 #ifdef CONFIG_PAGE_POOL_STATS
227 free_percpu(pool->recycle_stats);
232 atomic_set(&pool->pages_state_release_cnt, 0);
234 /* Driver calling page_pool_create() also call page_pool_destroy() */
235 refcount_set(&pool->user_cnt, 1);
237 if (pool->p.flags & PP_FLAG_DMA_MAP)
238 get_device(pool->p.dev);
243 static void page_pool_uninit(struct page_pool *pool)
245 ptr_ring_cleanup(&pool->ring, NULL);
247 if (pool->p.flags & PP_FLAG_DMA_MAP)
248 put_device(pool->p.dev);
250 #ifdef CONFIG_PAGE_POOL_STATS
251 free_percpu(pool->recycle_stats);
256 * page_pool_create() - create a page pool.
257 * @params: parameters, see struct page_pool_params
259 struct page_pool *page_pool_create(const struct page_pool_params *params)
261 struct page_pool *pool;
264 pool = kzalloc_node(sizeof(*pool), GFP_KERNEL, params->nid);
266 return ERR_PTR(-ENOMEM);
268 err = page_pool_init(pool, params);
272 err = page_pool_list(pool);
279 page_pool_uninit(pool);
281 pr_warn("%s() gave up with errno %d\n", __func__, err);
285 EXPORT_SYMBOL(page_pool_create);
287 static void page_pool_return_page(struct page_pool *pool, struct page *page);
290 static struct page *page_pool_refill_alloc_cache(struct page_pool *pool)
292 struct ptr_ring *r = &pool->ring;
294 int pref_nid; /* preferred NUMA node */
296 /* Quicker fallback, avoid locks when ring is empty */
297 if (__ptr_ring_empty(r)) {
298 alloc_stat_inc(pool, empty);
302 /* Softirq guarantee CPU and thus NUMA node is stable. This,
303 * assumes CPU refilling driver RX-ring will also run RX-NAPI.
306 pref_nid = (pool->p.nid == NUMA_NO_NODE) ? numa_mem_id() : pool->p.nid;
308 /* Ignore pool->p.nid setting if !CONFIG_NUMA, helps compiler */
309 pref_nid = numa_mem_id(); /* will be zero like page_to_nid() */
312 /* Refill alloc array, but only if NUMA match */
314 page = __ptr_ring_consume(r);
318 if (likely(page_to_nid(page) == pref_nid)) {
319 pool->alloc.cache[pool->alloc.count++] = page;
322 * (1) release 1 page to page-allocator and
323 * (2) break out to fallthrough to alloc_pages_node.
324 * This limit stress on page buddy alloactor.
326 page_pool_return_page(pool, page);
327 alloc_stat_inc(pool, waive);
331 } while (pool->alloc.count < PP_ALLOC_CACHE_REFILL);
333 /* Return last page */
334 if (likely(pool->alloc.count > 0)) {
335 page = pool->alloc.cache[--pool->alloc.count];
336 alloc_stat_inc(pool, refill);
343 static struct page *__page_pool_get_cached(struct page_pool *pool)
347 /* Caller MUST guarantee safe non-concurrent access, e.g. softirq */
348 if (likely(pool->alloc.count)) {
350 page = pool->alloc.cache[--pool->alloc.count];
351 alloc_stat_inc(pool, fast);
353 page = page_pool_refill_alloc_cache(pool);
359 static void page_pool_dma_sync_for_device(struct page_pool *pool,
361 unsigned int dma_sync_size)
363 dma_addr_t dma_addr = page_pool_get_dma_addr(page);
365 dma_sync_size = min(dma_sync_size, pool->p.max_len);
366 dma_sync_single_range_for_device(pool->p.dev, dma_addr,
367 pool->p.offset, dma_sync_size,
371 static bool page_pool_dma_map(struct page_pool *pool, struct page *page)
375 /* Setup DMA mapping: use 'struct page' area for storing DMA-addr
376 * since dma_addr_t can be either 32 or 64 bits and does not always fit
377 * into page private data (i.e 32bit cpu with 64bit DMA caps)
378 * This mapping is kept for lifetime of page, until leaving pool.
380 dma = dma_map_page_attrs(pool->p.dev, page, 0,
381 (PAGE_SIZE << pool->p.order),
382 pool->p.dma_dir, DMA_ATTR_SKIP_CPU_SYNC |
383 DMA_ATTR_WEAK_ORDERING);
384 if (dma_mapping_error(pool->p.dev, dma))
387 if (page_pool_set_dma_addr(page, dma))
390 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
391 page_pool_dma_sync_for_device(pool, page, pool->p.max_len);
396 WARN_ON_ONCE("unexpected DMA address, please report to netdev@");
397 dma_unmap_page_attrs(pool->p.dev, dma,
398 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
399 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
403 static void page_pool_set_pp_info(struct page_pool *pool,
407 page->pp_magic |= PP_SIGNATURE;
409 /* Ensuring all pages have been split into one fragment initially:
410 * page_pool_set_pp_info() is only called once for every page when it
411 * is allocated from the page allocator and page_pool_fragment_page()
412 * is dirtying the same cache line as the page->pp_magic above, so
413 * the overhead is negligible.
415 page_pool_fragment_page(page, 1);
416 if (pool->has_init_callback)
417 pool->slow.init_callback(page, pool->slow.init_arg);
420 static void page_pool_clear_pp_info(struct page *page)
426 static struct page *__page_pool_alloc_page_order(struct page_pool *pool,
432 page = alloc_pages_node(pool->p.nid, gfp, pool->p.order);
436 if ((pool->p.flags & PP_FLAG_DMA_MAP) &&
437 unlikely(!page_pool_dma_map(pool, page))) {
442 alloc_stat_inc(pool, slow_high_order);
443 page_pool_set_pp_info(pool, page);
445 /* Track how many pages are held 'in-flight' */
446 pool->pages_state_hold_cnt++;
447 trace_page_pool_state_hold(pool, page, pool->pages_state_hold_cnt);
453 static struct page *__page_pool_alloc_pages_slow(struct page_pool *pool,
456 const int bulk = PP_ALLOC_CACHE_REFILL;
457 unsigned int pp_flags = pool->p.flags;
458 unsigned int pp_order = pool->p.order;
462 /* Don't support bulk alloc for high-order pages */
463 if (unlikely(pp_order))
464 return __page_pool_alloc_page_order(pool, gfp);
466 /* Unnecessary as alloc cache is empty, but guarantees zero count */
467 if (unlikely(pool->alloc.count > 0))
468 return pool->alloc.cache[--pool->alloc.count];
470 /* Mark empty alloc.cache slots "empty" for alloc_pages_bulk_array */
471 memset(&pool->alloc.cache, 0, sizeof(void *) * bulk);
473 nr_pages = alloc_pages_bulk_array_node(gfp, pool->p.nid, bulk,
475 if (unlikely(!nr_pages))
478 /* Pages have been filled into alloc.cache array, but count is zero and
479 * page element have not been (possibly) DMA mapped.
481 for (i = 0; i < nr_pages; i++) {
482 page = pool->alloc.cache[i];
483 if ((pp_flags & PP_FLAG_DMA_MAP) &&
484 unlikely(!page_pool_dma_map(pool, page))) {
489 page_pool_set_pp_info(pool, page);
490 pool->alloc.cache[pool->alloc.count++] = page;
491 /* Track how many pages are held 'in-flight' */
492 pool->pages_state_hold_cnt++;
493 trace_page_pool_state_hold(pool, page,
494 pool->pages_state_hold_cnt);
497 /* Return last page */
498 if (likely(pool->alloc.count > 0)) {
499 page = pool->alloc.cache[--pool->alloc.count];
500 alloc_stat_inc(pool, slow);
505 /* When page just alloc'ed is should/must have refcnt 1. */
509 /* For using page_pool replace: alloc_pages() API calls, but provide
510 * synchronization guarantee for allocation side.
512 struct page *page_pool_alloc_pages(struct page_pool *pool, gfp_t gfp)
516 /* Fast-path: Get a page from cache */
517 page = __page_pool_get_cached(pool);
521 /* Slow-path: cache empty, do real allocation */
522 page = __page_pool_alloc_pages_slow(pool, gfp);
525 EXPORT_SYMBOL(page_pool_alloc_pages);
527 /* Calculate distance between two u32 values, valid if distance is below 2^(31)
528 * https://en.wikipedia.org/wiki/Serial_number_arithmetic#General_Solution
530 #define _distance(a, b) (s32)((a) - (b))
532 s32 page_pool_inflight(const struct page_pool *pool, bool strict)
534 u32 release_cnt = atomic_read(&pool->pages_state_release_cnt);
535 u32 hold_cnt = READ_ONCE(pool->pages_state_hold_cnt);
538 inflight = _distance(hold_cnt, release_cnt);
541 trace_page_pool_release(pool, inflight, hold_cnt, release_cnt);
542 WARN(inflight < 0, "Negative(%d) inflight packet-pages",
545 inflight = max(0, inflight);
551 static __always_inline
552 void __page_pool_release_page_dma(struct page_pool *pool, struct page *page)
556 if (!(pool->p.flags & PP_FLAG_DMA_MAP))
557 /* Always account for inflight pages, even if we didn't
562 dma = page_pool_get_dma_addr(page);
564 /* When page is unmapped, it cannot be returned to our pool */
565 dma_unmap_page_attrs(pool->p.dev, dma,
566 PAGE_SIZE << pool->p.order, pool->p.dma_dir,
567 DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING);
568 page_pool_set_dma_addr(page, 0);
571 /* Disconnects a page (from a page_pool). API users can have a need
572 * to disconnect a page (from a page_pool), to allow it to be used as
573 * a regular page (that will eventually be returned to the normal
574 * page-allocator via put_page).
576 void page_pool_return_page(struct page_pool *pool, struct page *page)
580 __page_pool_release_page_dma(pool, page);
582 page_pool_clear_pp_info(page);
584 /* This may be the last page returned, releasing the pool, so
585 * it is not safe to reference pool afterwards.
587 count = atomic_inc_return_relaxed(&pool->pages_state_release_cnt);
588 trace_page_pool_state_release(pool, page, count);
591 /* An optimization would be to call __free_pages(page, pool->p.order)
592 * knowing page is not part of page-cache (thus avoiding a
593 * __page_cache_release() call).
597 static bool page_pool_recycle_in_ring(struct page_pool *pool, struct page *page)
600 /* BH protection not needed if current is softirq */
602 ret = ptr_ring_produce(&pool->ring, page);
604 ret = ptr_ring_produce_bh(&pool->ring, page);
607 recycle_stat_inc(pool, ring);
614 /* Only allow direct recycling in special circumstances, into the
615 * alloc side cache. E.g. during RX-NAPI processing for XDP_DROP use-case.
617 * Caller must provide appropriate safe context.
619 static bool page_pool_recycle_in_cache(struct page *page,
620 struct page_pool *pool)
622 if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
623 recycle_stat_inc(pool, cache_full);
627 /* Caller MUST have verified/know (page_ref_count(page) == 1) */
628 pool->alloc.cache[pool->alloc.count++] = page;
629 recycle_stat_inc(pool, cached);
633 /* If the page refcnt == 1, this will try to recycle the page.
634 * if PP_FLAG_DMA_SYNC_DEV is set, we'll try to sync the DMA area for
635 * the configured size min(dma_sync_size, pool->max_len).
636 * If the page refcnt != 1, then the page will be returned to memory
639 static __always_inline struct page *
640 __page_pool_put_page(struct page_pool *pool, struct page *page,
641 unsigned int dma_sync_size, bool allow_direct)
643 lockdep_assert_no_hardirq();
645 /* This allocator is optimized for the XDP mode that uses
646 * one-frame-per-page, but have fallbacks that act like the
647 * regular page allocator APIs.
649 * refcnt == 1 means page_pool owns page, and can recycle it.
651 * page is NOT reusable when allocated when system is under
652 * some pressure. (page_is_pfmemalloc)
654 if (likely(page_ref_count(page) == 1 && !page_is_pfmemalloc(page))) {
655 /* Read barrier done in page_ref_count / READ_ONCE */
657 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
658 page_pool_dma_sync_for_device(pool, page,
661 if (allow_direct && in_softirq() &&
662 page_pool_recycle_in_cache(page, pool))
665 /* Page found as candidate for recycling */
668 /* Fallback/non-XDP mode: API user have elevated refcnt.
670 * Many drivers split up the page into fragments, and some
671 * want to keep doing this to save memory and do refcnt based
672 * recycling. Support this use case too, to ease drivers
673 * switching between XDP/non-XDP.
675 * In-case page_pool maintains the DMA mapping, API user must
676 * call page_pool_put_page once. In this elevated refcnt
677 * case, the DMA is unmapped/released, as driver is likely
678 * doing refcnt based recycle tricks, meaning another process
679 * will be invoking put_page.
681 recycle_stat_inc(pool, released_refcnt);
682 page_pool_return_page(pool, page);
687 void page_pool_put_unrefed_page(struct page_pool *pool, struct page *page,
688 unsigned int dma_sync_size, bool allow_direct)
690 page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
691 if (page && !page_pool_recycle_in_ring(pool, page)) {
692 /* Cache full, fallback to free pages */
693 recycle_stat_inc(pool, ring_full);
694 page_pool_return_page(pool, page);
697 EXPORT_SYMBOL(page_pool_put_unrefed_page);
700 * page_pool_put_page_bulk() - release references on multiple pages
701 * @pool: pool from which pages were allocated
702 * @data: array holding page pointers
703 * @count: number of pages in @data
705 * Tries to refill a number of pages into the ptr_ring cache holding ptr_ring
706 * producer lock. If the ptr_ring is full, page_pool_put_page_bulk()
707 * will release leftover pages to the page allocator.
708 * page_pool_put_page_bulk() is suitable to be run inside the driver NAPI tx
709 * completion loop for the XDP_REDIRECT use case.
711 * Please note the caller must not use data area after running
712 * page_pool_put_page_bulk(), as this function overwrites it.
714 void page_pool_put_page_bulk(struct page_pool *pool, void **data,
720 for (i = 0; i < count; i++) {
721 struct page *page = virt_to_head_page(data[i]);
723 /* It is not the last user for the page frag case */
724 if (!page_pool_is_last_ref(page))
727 page = __page_pool_put_page(pool, page, -1, false);
728 /* Approved for bulk recycling in ptr_ring cache */
730 data[bulk_len++] = page;
733 if (unlikely(!bulk_len))
736 /* Bulk producer into ptr_ring page_pool cache */
737 in_softirq = page_pool_producer_lock(pool);
738 for (i = 0; i < bulk_len; i++) {
739 if (__ptr_ring_produce(&pool->ring, data[i])) {
741 recycle_stat_inc(pool, ring_full);
745 recycle_stat_add(pool, ring, i);
746 page_pool_producer_unlock(pool, in_softirq);
748 /* Hopefully all pages was return into ptr_ring */
749 if (likely(i == bulk_len))
752 /* ptr_ring cache full, free remaining pages outside producer lock
753 * since put_page() with refcnt == 1 can be an expensive operation
755 for (; i < bulk_len; i++)
756 page_pool_return_page(pool, data[i]);
758 EXPORT_SYMBOL(page_pool_put_page_bulk);
760 static struct page *page_pool_drain_frag(struct page_pool *pool,
763 long drain_count = BIAS_MAX - pool->frag_users;
765 /* Some user is still using the page frag */
766 if (likely(page_pool_unref_page(page, drain_count)))
769 if (page_ref_count(page) == 1 && !page_is_pfmemalloc(page)) {
770 if (pool->p.flags & PP_FLAG_DMA_SYNC_DEV)
771 page_pool_dma_sync_for_device(pool, page, -1);
776 page_pool_return_page(pool, page);
780 static void page_pool_free_frag(struct page_pool *pool)
782 long drain_count = BIAS_MAX - pool->frag_users;
783 struct page *page = pool->frag_page;
785 pool->frag_page = NULL;
787 if (!page || page_pool_unref_page(page, drain_count))
790 page_pool_return_page(pool, page);
793 struct page *page_pool_alloc_frag(struct page_pool *pool,
794 unsigned int *offset,
795 unsigned int size, gfp_t gfp)
797 unsigned int max_size = PAGE_SIZE << pool->p.order;
798 struct page *page = pool->frag_page;
800 if (WARN_ON(size > max_size))
803 size = ALIGN(size, dma_get_cache_alignment());
804 *offset = pool->frag_offset;
806 if (page && *offset + size > max_size) {
807 page = page_pool_drain_frag(pool, page);
809 alloc_stat_inc(pool, fast);
815 page = page_pool_alloc_pages(pool, gfp);
816 if (unlikely(!page)) {
817 pool->frag_page = NULL;
821 pool->frag_page = page;
824 pool->frag_users = 1;
826 pool->frag_offset = size;
827 page_pool_fragment_page(page, BIAS_MAX);
832 pool->frag_offset = *offset + size;
833 alloc_stat_inc(pool, fast);
836 EXPORT_SYMBOL(page_pool_alloc_frag);
838 static void page_pool_empty_ring(struct page_pool *pool)
842 /* Empty recycle ring */
843 while ((page = ptr_ring_consume_bh(&pool->ring))) {
844 /* Verify the refcnt invariant of cached pages */
845 if (!(page_ref_count(page) == 1))
846 pr_crit("%s() page_pool refcnt %d violation\n",
847 __func__, page_ref_count(page));
849 page_pool_return_page(pool, page);
853 static void __page_pool_destroy(struct page_pool *pool)
855 if (pool->disconnect)
856 pool->disconnect(pool);
858 page_pool_unlist(pool);
859 page_pool_uninit(pool);
863 static void page_pool_empty_alloc_cache_once(struct page_pool *pool)
867 if (pool->destroy_cnt)
870 /* Empty alloc cache, assume caller made sure this is
871 * no-longer in use, and page_pool_alloc_pages() cannot be
874 while (pool->alloc.count) {
875 page = pool->alloc.cache[--pool->alloc.count];
876 page_pool_return_page(pool, page);
880 static void page_pool_scrub(struct page_pool *pool)
882 page_pool_empty_alloc_cache_once(pool);
885 /* No more consumers should exist, but producers could still
888 page_pool_empty_ring(pool);
891 static int page_pool_release(struct page_pool *pool)
895 page_pool_scrub(pool);
896 inflight = page_pool_inflight(pool, true);
898 __page_pool_destroy(pool);
903 static void page_pool_release_retry(struct work_struct *wq)
905 struct delayed_work *dwq = to_delayed_work(wq);
906 struct page_pool *pool = container_of(dwq, typeof(*pool), release_dw);
910 inflight = page_pool_release(pool);
914 /* Periodic warning for page pools the user can't see */
915 netdev = READ_ONCE(pool->slow.netdev);
916 if (time_after_eq(jiffies, pool->defer_warn) &&
917 (!netdev || netdev == NET_PTR_POISON)) {
918 int sec = (s32)((u32)jiffies - (u32)pool->defer_start) / HZ;
920 pr_warn("%s() stalled pool shutdown: id %u, %d inflight %d sec\n",
921 __func__, pool->user.id, inflight, sec);
922 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
925 /* Still not ready to be disconnected, retry later */
926 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
929 void page_pool_use_xdp_mem(struct page_pool *pool, void (*disconnect)(void *),
930 struct xdp_mem_info *mem)
932 refcount_inc(&pool->user_cnt);
933 pool->disconnect = disconnect;
934 pool->xdp_mem_id = mem->id;
937 void page_pool_unlink_napi(struct page_pool *pool)
942 /* To avoid races with recycling and additional barriers make sure
943 * pool and NAPI are unlinked when NAPI is disabled.
945 WARN_ON(!test_bit(NAPI_STATE_SCHED, &pool->p.napi->state) ||
946 READ_ONCE(pool->p.napi->list_owner) != -1);
948 WRITE_ONCE(pool->p.napi, NULL);
950 EXPORT_SYMBOL(page_pool_unlink_napi);
952 void page_pool_destroy(struct page_pool *pool)
957 if (!page_pool_put(pool))
960 page_pool_unlink_napi(pool);
961 page_pool_free_frag(pool);
963 if (!page_pool_release(pool))
966 page_pool_detached(pool);
967 pool->defer_start = jiffies;
968 pool->defer_warn = jiffies + DEFER_WARN_INTERVAL;
970 INIT_DELAYED_WORK(&pool->release_dw, page_pool_release_retry);
971 schedule_delayed_work(&pool->release_dw, DEFER_TIME);
973 EXPORT_SYMBOL(page_pool_destroy);
975 /* Caller must provide appropriate safe context, e.g. NAPI. */
976 void page_pool_update_nid(struct page_pool *pool, int new_nid)
980 trace_page_pool_update_nid(pool, new_nid);
981 pool->p.nid = new_nid;
983 /* Flush pool alloc cache, as refill will check NUMA node */
984 while (pool->alloc.count) {
985 page = pool->alloc.cache[--pool->alloc.count];
986 page_pool_return_page(pool, page);
989 EXPORT_SYMBOL(page_pool_update_nid);