aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch')
-rw-r--r--target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch16
1 files changed, 8 insertions, 8 deletions
diff --git a/target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch b/target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch
index 9e383de92c..f4c5b9afee 100644
--- a/target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch
+++ b/target/linux/generic/backport-5.15/600-v5.18-page_pool-Add-allocation-stats.patch
@@ -87,9 +87,9 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
help
--- a/net/core/page_pool.c
+++ b/net/core/page_pool.c
-@@ -26,6 +26,13 @@
-
- #define BIAS_MAX LONG_MAX
+@@ -49,6 +49,13 @@ static void page_pool_producer_unlock(st
+ spin_unlock_bh(&pool->ring.producer_lock);
+ }
+#ifdef CONFIG_PAGE_POOL_STATS
+/* alloc_stat_inc is intended to be used in softirq context */
@@ -101,7 +101,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
static int page_pool_init(struct page_pool *pool,
const struct page_pool_params *params)
{
-@@ -117,8 +124,10 @@ static struct page *page_pool_refill_all
+@@ -140,8 +147,10 @@ static struct page *page_pool_refill_all
int pref_nid; /* preferred NUMA node */
/* Quicker fallback, avoid locks when ring is empty */
@@ -113,7 +113,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* Softirq guarantee CPU and thus NUMA node is stable. This,
* assumes CPU refilling driver RX-ring will also run RX-NAPI.
-@@ -148,14 +157,17 @@ static struct page *page_pool_refill_all
+@@ -171,14 +180,17 @@ static struct page *page_pool_refill_all
* This limit stress on page buddy alloactor.
*/
page_pool_return_page(pool, page);
@@ -132,7 +132,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
spin_unlock(&r->consumer_lock);
return page;
-@@ -170,6 +182,7 @@ static struct page *__page_pool_get_cach
+@@ -193,6 +205,7 @@ static struct page *__page_pool_get_cach
if (likely(pool->alloc.count)) {
/* Fast-path */
page = pool->alloc.cache[--pool->alloc.count];
@@ -140,7 +140,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
} else {
page = page_pool_refill_alloc_cache(pool);
}
-@@ -241,6 +254,7 @@ static struct page *__page_pool_alloc_pa
+@@ -264,6 +277,7 @@ static struct page *__page_pool_alloc_pa
return NULL;
}
@@ -148,7 +148,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
page_pool_set_pp_info(pool, page);
/* Track how many pages are held 'in-flight' */
-@@ -295,10 +309,12 @@ static struct page *__page_pool_alloc_pa
+@@ -318,10 +332,12 @@ static struct page *__page_pool_alloc_pa
}
/* Return last page */