diff options
Diffstat (limited to 'target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch')
-rw-r--r-- | target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch | 14 |
1 files changed, 7 insertions, 7 deletions
diff --git a/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch b/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch index fb11f0035f..b425b78c75 100644 --- a/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch +++ b/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch @@ -53,7 +53,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> /* A page_pool is strictly tied to a single RX-queue being --- a/net/core/page_pool.c +++ b/net/core/page_pool.c -@@ -29,8 +29,15 @@ +@@ -52,8 +52,15 @@ static void page_pool_producer_unlock(st #ifdef CONFIG_PAGE_POOL_STATS /* alloc_stat_inc is intended to be used in softirq context */ #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++) @@ -69,7 +69,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> #endif static int page_pool_init(struct page_pool *pool, -@@ -80,6 +87,12 @@ static int page_pool_init(struct page_po +@@ -103,6 +110,12 @@ static int page_pool_init(struct page_po pool->p.flags & PP_FLAG_PAGE_FRAG) return -EINVAL; @@ -82,7 +82,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0) return -ENOMEM; -@@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st +@@ -435,7 +448,12 @@ static bool page_pool_recycle_in_ring(st else ret = ptr_ring_produce_bh(&pool->ring, page); @@ -96,7 +96,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> } /* Only allow direct recycling in special circumstances, into the -@@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st +@@ -446,11 +464,14 @@ static bool page_pool_recycle_in_ring(st static bool page_pool_recycle_in_cache(struct page *page, struct page_pool *pool) { @@ -112,7 +112,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> return true; } -@@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p +@@ -505,6 +526,7 @@ __page_pool_put_page(struct page_pool *p * doing refcnt based recycle tricks, meaning another process * will be invoking put_page. */ @@ -120,7 +120,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> /* Do not replace this with page_pool_return_page() */ page_pool_release_page(pool, page); put_page(page); -@@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool +@@ -518,6 +540,7 @@ void page_pool_put_page(struct page_pool page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct); if (page && !page_pool_recycle_in_ring(pool, page)) { /* Cache full, fallback to free pages */ @@ -128,7 +128,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> page_pool_return_page(pool, page); } } -@@ -641,6 +664,9 @@ static void page_pool_free(struct page_p +@@ -665,6 +688,9 @@ static void page_pool_free(struct page_p if (pool->p.flags & PP_FLAG_DMA_MAP) put_device(pool->p.dev); |