aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@nbd.name>2018-02-05 13:35:24 +0100
committerFelix Fietkau <nbd@nbd.name>2018-02-21 20:12:42 +0100
commit103335644265d96c656a7de3d5994fbd11246300 (patch)
tree2b19dea75e812b8240d6a458f0ed6dd22a8148b2 /target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
parentb7265c59ab7dd0ec5dccb96e7b0dc1432404feb7 (diff)
downloadupstream-103335644265d96c656a7de3d5994fbd11246300.tar.gz
upstream-103335644265d96c656a7de3d5994fbd11246300.tar.bz2
upstream-103335644265d96c656a7de3d5994fbd11246300.zip
kernel: backport netfilter NAT offload support to 4.14
This only works with nftables for now, iptables support will be added later. Includes a number of related upstream nftables improvements to simplify backporting follow-up changes Signed-off-by: John Crispin <john@phrozen.org> Signed-off-by: Felix Fietkau <nbd@nbd.name>
Diffstat (limited to 'target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch')
-rw-r--r--target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch47
1 files changed, 47 insertions, 0 deletions
diff --git a/target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch b/target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
new file mode 100644
index 0000000000..acca41ae3e
--- /dev/null
+++ b/target/linux/generic/backport-4.14/339-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
@@ -0,0 +1,47 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Thu, 1 Feb 2018 18:49:00 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: wait for garbage collector
+ to run after cleanup
+
+If netdevice goes down, then flowtable entries are scheduled to be
+removed. Wait for garbage collector to have a chance to run so it can
+delete them from the hashtable.
+
+The flush call might sleep, so hold the nfnl mutex from
+nft_flow_table_iterate() instead of rcu read side lock. The use of the
+nfnl mutex is also implicitly fixing races between updates via nfnetlink
+and netdevice event.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4818,13 +4818,13 @@ void nft_flow_table_iterate(struct net *
+ struct nft_flowtable *flowtable;
+ const struct nft_table *table;
+
+- rcu_read_lock();
+- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+- list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
++ nfnl_lock(NFNL_SUBSYS_NFTABLES);
++ list_for_each_entry(table, &net->nft.tables, list) {
++ list_for_each_entry(flowtable, &table->flowtables, list) {
+ iter(&flowtable->data, data);
+ }
+ }
+- rcu_read_unlock();
++ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ }
+ EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -208,6 +208,7 @@ static void nft_flow_offload_iterate_cle
+ void *data)
+ {
+ nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
++ flush_delayed_work(&flowtable->gc_work);
+ }
+
+ static int flow_offload_netdev_event(struct notifier_block *this,