aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic')
-rw-r--r--target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch61
-rw-r--r--target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch61
2 files changed, 0 insertions, 122 deletions
diff --git a/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch b/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch
deleted file mode 100644
index 1d1a6433d9..0000000000
--- a/target/linux/generic/pending-4.14/650-net-use-bulk-free-in-kfree_skb_list.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sat, 23 Mar 2019 18:26:10 +0100
-Subject: [PATCH] net: use bulk free in kfree_skb_list
-
-Since we're freeing multiple skbs, we might as well use bulk free to save a
-few cycles. Use the same conditions for bulk free as in napi_consume_skb.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -666,12 +666,44 @@ EXPORT_SYMBOL(kfree_skb);
-
- void kfree_skb_list(struct sk_buff *segs)
- {
-- while (segs) {
-- struct sk_buff *next = segs->next;
-+ struct sk_buff *next = segs;
-+ void *skbs[16];
-+ int n_skbs = 0;
-
-- kfree_skb(segs);
-- segs = next;
-+ while ((segs = next) != NULL) {
-+ next = segs->next;
-+
-+ if (segs->fclone != SKB_FCLONE_UNAVAILABLE) {
-+ kfree_skb(segs);
-+ continue;
-+ }
-+
-+ if (!skb_unref(segs))
-+ continue;
-+
-+ trace_kfree_skb(segs, __builtin_return_address(0));
-+
-+ /* drop skb->head and call any destructors for packet */
-+ skb_release_all(segs);
-+
-+#ifdef CONFIG_SLUB
-+ /* SLUB writes into objects when freeing */
-+ prefetchw(segs);
-+#endif
-+
-+ skbs[n_skbs++] = segs;
-+
-+ if (n_skbs < ARRAY_SIZE(skbs))
-+ continue;
-+
-+ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs);
-+ n_skbs = 0;
- }
-+
-+ if (!n_skbs)
-+ return;
-+
-+ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs);
- }
- EXPORT_SYMBOL(kfree_skb_list);
-
diff --git a/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch b/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch
deleted file mode 100644
index 1d1a6433d9..0000000000
--- a/target/linux/generic/pending-4.19/650-net-use-bulk-free-in-kfree_skb_list.patch
+++ /dev/null
@@ -1,61 +0,0 @@
-From: Felix Fietkau <nbd@nbd.name>
-Date: Sat, 23 Mar 2019 18:26:10 +0100
-Subject: [PATCH] net: use bulk free in kfree_skb_list
-
-Since we're freeing multiple skbs, we might as well use bulk free to save a
-few cycles. Use the same conditions for bulk free as in napi_consume_skb.
-
-Signed-off-by: Felix Fietkau <nbd@nbd.name>
----
-
---- a/net/core/skbuff.c
-+++ b/net/core/skbuff.c
-@@ -666,12 +666,44 @@ EXPORT_SYMBOL(kfree_skb);
-
- void kfree_skb_list(struct sk_buff *segs)
- {
-- while (segs) {
-- struct sk_buff *next = segs->next;
-+ struct sk_buff *next = segs;
-+ void *skbs[16];
-+ int n_skbs = 0;
-
-- kfree_skb(segs);
-- segs = next;
-+ while ((segs = next) != NULL) {
-+ next = segs->next;
-+
-+ if (segs->fclone != SKB_FCLONE_UNAVAILABLE) {
-+ kfree_skb(segs);
-+ continue;
-+ }
-+
-+ if (!skb_unref(segs))
-+ continue;
-+
-+ trace_kfree_skb(segs, __builtin_return_address(0));
-+
-+ /* drop skb->head and call any destructors for packet */
-+ skb_release_all(segs);
-+
-+#ifdef CONFIG_SLUB
-+ /* SLUB writes into objects when freeing */
-+ prefetchw(segs);
-+#endif
-+
-+ skbs[n_skbs++] = segs;
-+
-+ if (n_skbs < ARRAY_SIZE(skbs))
-+ continue;
-+
-+ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs);
-+ n_skbs = 0;
- }
-+
-+ if (!n_skbs)
-+ return;
-+
-+ kmem_cache_free_bulk(skbuff_head_cache, n_skbs, skbs);
- }
- EXPORT_SYMBOL(kfree_skb_list);
-