aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch
diff options
context:
space:
mode:
authorBrett Mastbergen <bmastbergen@untangle.com>2018-09-12 15:04:55 -0400
committerJo-Philipp Wich <jo@mein.io>2018-12-18 11:28:13 +0100
commit69d6da1de601eb94285752b0bea44ad4f2381dfe (patch)
tree62e535d0fd3456c13508ed4a7e7ce09deb3675f7 /target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch
parentbbc0c4d9cbfe4f93b759bbf4bb845ad4b72588f1 (diff)
downloadupstream-69d6da1de601eb94285752b0bea44ad4f2381dfe.tar.gz
upstream-69d6da1de601eb94285752b0bea44ad4f2381dfe.tar.bz2
upstream-69d6da1de601eb94285752b0bea44ad4f2381dfe.zip
kernel: generic: Fix nftables inet table breakage
Commit b7265c59ab7d ("kernel: backport a series of netfilter cleanup patches to 4.14") added patch 302-netfilter-nf_tables_inet-don-t-use- multihook-infrast.patch. That patch switches the netfilter core in the kernel to use the new native NFPROTO_INET support. Unfortunately, the new native NFPROTO_INET support does not exist in 4.14 and was not backported along with this patchset. As such, nftables inet tables never see any traffic. As an example the following nft counter rule should increment for every packet coming into the box, but never will: nft add table inet foo nft add chain inet foo bar { type filter hook input priority 0\; } nft add rule inet foo bar counter This commit pulls in the required backport patches to add the new native NFPROTO_INET support, and thus restore nftables inet table functionality. Tested on Turris Omnia (mvebu) Fixes: b7265c59ab7d ("kernel: backport a series of netfilter cleanup ...") Signed-off-by: Brett Mastbergen <bmastbergen@untangle.com> (backported from f57806b56e5f6ca7bb9fb66d5b175b5f98ece93c) (rebased patches) Signed-off-by: Jo-Philipp Wich <jo@mein.io>
Diffstat (limited to 'target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch')
-rw-r--r--target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch132
1 files changed, 132 insertions, 0 deletions
diff --git a/target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch b/target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch
new file mode 100644
index 0000000000..5eca73552b
--- /dev/null
+++ b/target/linux/generic/backport-4.14/292-v4.16-netfilter-core-free-hooks-with-call_rcu.patch
@@ -0,0 +1,132 @@
+From 8c873e2199700c2de7dbd5eedb9d90d5f109462b Mon Sep 17 00:00:00 2001
+From: Florian Westphal <fw@strlen.de>
+Date: Fri, 1 Dec 2017 00:21:04 +0100
+Subject: [PATCH 04/11] netfilter: core: free hooks with call_rcu
+
+Giuseppe Scrivano says:
+ "SELinux, if enabled, registers for each new network namespace 6
+ netfilter hooks."
+
+Cost for this is high. With synchronize_net() removed:
+ "The net benefit on an SMP machine with two cores is that creating a
+ new network namespace takes -40% of the original time."
+
+This patch replaces synchronize_net+kvfree with call_rcu().
+We store rcu_head at the tail of a structure that has no fixed layout,
+i.e. we cannot use offsetof() to compute the start of the original
+allocation. Thus store this information right after the rcu head.
+
+We could simplify this by just placing the rcu_head at the start
+of struct nf_hook_entries. However, this structure is used in
+packet processing hotpath, so only place what is needed for that
+at the beginning of the struct.
+
+Reported-by: Giuseppe Scrivano <gscrivan@redhat.com>
+Signed-off-by: Florian Westphal <fw@strlen.de>
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+ include/linux/netfilter.h | 19 +++++++++++++++----
+ net/netfilter/core.c | 34 ++++++++++++++++++++++++++++------
+ 2 files changed, 43 insertions(+), 10 deletions(-)
+
+--- a/include/linux/netfilter.h
++++ b/include/linux/netfilter.h
+@@ -77,17 +77,28 @@ struct nf_hook_entry {
+ void *priv;
+ };
+
++struct nf_hook_entries_rcu_head {
++ struct rcu_head head;
++ void *allocation;
++};
++
+ struct nf_hook_entries {
+ u16 num_hook_entries;
+ /* padding */
+ struct nf_hook_entry hooks[];
+
+- /* trailer: pointers to original orig_ops of each hook.
+- *
+- * This is not part of struct nf_hook_entry since its only
+- * needed in slow path (hook register/unregister).
++ /* trailer: pointers to original orig_ops of each hook,
++ * followed by rcu_head and scratch space used for freeing
++ * the structure via call_rcu.
+ *
++ * This is not part of struct nf_hook_entry since its only
++ * needed in slow path (hook register/unregister):
+ * const struct nf_hook_ops *orig_ops[]
++ *
++ * For the same reason, we store this at end -- its
++ * only needed when a hook is deleted, not during
++ * packet path processing:
++ * struct nf_hook_entries_rcu_head head
+ */
+ };
+
+--- a/net/netfilter/core.c
++++ b/net/netfilter/core.c
+@@ -74,7 +74,8 @@ static struct nf_hook_entries *allocate_
+ struct nf_hook_entries *e;
+ size_t alloc = sizeof(*e) +
+ sizeof(struct nf_hook_entry) * num +
+- sizeof(struct nf_hook_ops *) * num;
++ sizeof(struct nf_hook_ops *) * num +
++ sizeof(struct nf_hook_entries_rcu_head);
+
+ if (num == 0)
+ return NULL;
+@@ -85,6 +86,30 @@ static struct nf_hook_entries *allocate_
+ return e;
+ }
+
++static void __nf_hook_entries_free(struct rcu_head *h)
++{
++ struct nf_hook_entries_rcu_head *head;
++
++ head = container_of(h, struct nf_hook_entries_rcu_head, head);
++ kvfree(head->allocation);
++}
++
++static void nf_hook_entries_free(struct nf_hook_entries *e)
++{
++ struct nf_hook_entries_rcu_head *head;
++ struct nf_hook_ops **ops;
++ unsigned int num;
++
++ if (!e)
++ return;
++
++ num = e->num_hook_entries;
++ ops = nf_hook_entries_get_hook_ops(e);
++ head = (void *)&ops[num];
++ head->allocation = e;
++ call_rcu(&head->head, __nf_hook_entries_free);
++}
++
+ static unsigned int accept_all(void *priv,
+ struct sk_buff *skb,
+ const struct nf_hook_state *state)
+@@ -291,9 +316,8 @@ int nf_register_net_hook(struct net *net
+ #ifdef HAVE_JUMP_LABEL
+ static_key_slow_inc(&nf_hooks_needed[reg->pf][reg->hooknum]);
+ #endif
+- synchronize_net();
+ BUG_ON(p == new_hooks);
+- kvfree(p);
++ nf_hook_entries_free(p);
+ return 0;
+ }
+ EXPORT_SYMBOL(nf_register_net_hook);
+@@ -361,10 +385,8 @@ void nf_unregister_net_hook(struct net *
+ if (!p)
+ return;
+
+- synchronize_net();
+-
+ nf_queue_nf_hook_drop(net);
+- kvfree(p);
++ nf_hook_entries_free(p);
+ }
+ EXPORT_SYMBOL(nf_unregister_net_hook);
+