aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch
diff options
context:
space:
mode:
authorDaniel Golle <daniel@makrotopia.org>2022-08-29 08:54:41 +0200
committerDaniel Golle <daniel@makrotopia.org>2022-09-22 23:55:02 +0100
commitc93c5365c0eb78ba8b479a9fe0cc5ec96f773978 (patch)
treeb250435d0f0e8e66c56d0486d1151d4eb1fc827e /target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch
parent524f52a471495d2c4e764539e000cb699610de1f (diff)
downloadupstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.tar.gz
upstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.tar.bz2
upstream-c93c5365c0eb78ba8b479a9fe0cc5ec96f773978.zip
kernel: pick patches for MediaTek Ethernet from linux-next
Pick patches with several fixes and improvements, preparation for upcoming WED (TX) [1] as well as basic XDP support [2] with MediaTek's Filogic SoCs to the mtk_eth_soc driver. Also pick follow-up patch fixing Ethernet on MT7621 [3]. Tested on Bananapi BPi-R3 (MT7986), Bananapi BPi-R64 (MT7622), Bananapi BPi-R2 (MT7623), MikroTik RouterBoard M11G (MT7621). [1]: https://patchwork.kernel.org/project/netdevbpf/list/?series=662108&state=* [2]: https://patchwork.kernel.org/project/netdevbpf/list/?series=675368&state=* (the first part of the series adding wed nodes to mt7986a.dtsi was applied to the copy of mt7986a.dtsi in our tree) [3]: https://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git/commit/?id=5e69163d3b9931098922b3fc2f8e786af8c1f37e Signed-off-by: Daniel Golle <daniel@makrotopia.org>
Diffstat (limited to 'target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch')
-rw-r--r--target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch137
1 files changed, 137 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch b/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch
new file mode 100644
index 0000000000..8330ae811d
--- /dev/null
+++ b/target/linux/generic/backport-5.15/601-v5.18-page_pool-Add-recycle-stats.patch
@@ -0,0 +1,137 @@
+commit ad6fa1e1ab1b8164f1ba296b1b4dc556a483bcad
+Author: Joe Damato <jdamato@fastly.com>
+Date: Tue Mar 1 23:55:48 2022 -0800
+
+ page_pool: Add recycle stats
+
+ Add per-cpu stats tracking page pool recycling events:
+ - cached: recycling placed page in the page pool cache
+ - cache_full: page pool cache was full
+ - ring: page placed into the ptr ring
+ - ring_full: page released from page pool because the ptr ring was full
+ - released_refcnt: page released (and not recycled) because refcnt > 1
+
+ Signed-off-by: Joe Damato <jdamato@fastly.com>
+ Acked-by: Jesper Dangaard Brouer <brouer@redhat.com>
+ Reviewed-by: Ilias Apalodimas <ilias.apalodimas@linaro.org>
+ Signed-off-by: David S. Miller <davem@davemloft.net>
+
+--- a/include/net/page_pool.h
++++ b/include/net/page_pool.h
+@@ -93,6 +93,18 @@ struct page_pool_alloc_stats {
+ u64 refill; /* allocations via successful refill */
+ u64 waive; /* failed refills due to numa zone mismatch */
+ };
++
++struct page_pool_recycle_stats {
++ u64 cached; /* recycling placed page in the cache. */
++ u64 cache_full; /* cache was full */
++ u64 ring; /* recycling placed page back into ptr ring */
++ u64 ring_full; /* page was released from page-pool because
++ * PTR ring was full.
++ */
++ u64 released_refcnt; /* page released because of elevated
++ * refcnt
++ */
++};
+ #endif
+
+ struct page_pool {
+@@ -136,6 +148,10 @@ struct page_pool {
+ */
+ struct ptr_ring ring;
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ /* recycle stats are per-cpu to avoid locking */
++ struct page_pool_recycle_stats __percpu *recycle_stats;
++#endif
+ atomic_t pages_state_release_cnt;
+
+ /* A page_pool is strictly tied to a single RX-queue being
+--- a/net/core/page_pool.c
++++ b/net/core/page_pool.c
+@@ -29,8 +29,15 @@
+ #ifdef CONFIG_PAGE_POOL_STATS
+ /* alloc_stat_inc is intended to be used in softirq context */
+ #define alloc_stat_inc(pool, __stat) (pool->alloc_stats.__stat++)
++/* recycle_stat_inc is safe to use when preemption is possible. */
++#define recycle_stat_inc(pool, __stat) \
++ do { \
++ struct page_pool_recycle_stats __percpu *s = pool->recycle_stats; \
++ this_cpu_inc(s->__stat); \
++ } while (0)
+ #else
+ #define alloc_stat_inc(pool, __stat)
++#define recycle_stat_inc(pool, __stat)
+ #endif
+
+ static int page_pool_init(struct page_pool *pool,
+@@ -80,6 +87,12 @@ static int page_pool_init(struct page_po
+ pool->p.flags & PP_FLAG_PAGE_FRAG)
+ return -EINVAL;
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ pool->recycle_stats = alloc_percpu(struct page_pool_recycle_stats);
++ if (!pool->recycle_stats)
++ return -ENOMEM;
++#endif
++
+ if (ptr_ring_init(&pool->ring, ring_qsize, GFP_KERNEL) < 0)
+ return -ENOMEM;
+
+@@ -412,7 +425,12 @@ static bool page_pool_recycle_in_ring(st
+ else
+ ret = ptr_ring_produce_bh(&pool->ring, page);
+
+- return (ret == 0) ? true : false;
++ if (!ret) {
++ recycle_stat_inc(pool, ring);
++ return true;
++ }
++
++ return false;
+ }
+
+ /* Only allow direct recycling in special circumstances, into the
+@@ -423,11 +441,14 @@ static bool page_pool_recycle_in_ring(st
+ static bool page_pool_recycle_in_cache(struct page *page,
+ struct page_pool *pool)
+ {
+- if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE))
++ if (unlikely(pool->alloc.count == PP_ALLOC_CACHE_SIZE)) {
++ recycle_stat_inc(pool, cache_full);
+ return false;
++ }
+
+ /* Caller MUST have verified/know (page_ref_count(page) == 1) */
+ pool->alloc.cache[pool->alloc.count++] = page;
++ recycle_stat_inc(pool, cached);
+ return true;
+ }
+
+@@ -482,6 +503,7 @@ __page_pool_put_page(struct page_pool *p
+ * doing refcnt based recycle tricks, meaning another process
+ * will be invoking put_page.
+ */
++ recycle_stat_inc(pool, released_refcnt);
+ /* Do not replace this with page_pool_return_page() */
+ page_pool_release_page(pool, page);
+ put_page(page);
+@@ -495,6 +517,7 @@ void page_pool_put_page(struct page_pool
+ page = __page_pool_put_page(pool, page, dma_sync_size, allow_direct);
+ if (page && !page_pool_recycle_in_ring(pool, page)) {
+ /* Cache full, fallback to free pages */
++ recycle_stat_inc(pool, ring_full);
+ page_pool_return_page(pool, page);
+ }
+ }
+@@ -641,6 +664,9 @@ static void page_pool_free(struct page_p
+ if (pool->p.flags & PP_FLAG_DMA_MAP)
+ put_device(pool->p.dev);
+
++#ifdef CONFIG_PAGE_POOL_STATS
++ free_percpu(pool->recycle_stats);
++#endif
+ kfree(pool);
+ }
+