aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
diff options
context:
space:
mode:
authorRafał Miłecki <rafal@milecki.pl>2018-05-08 09:40:43 +0200
committerRafał Miłecki <rafal@milecki.pl>2018-05-08 09:42:07 +0200
commitf9dcdc7fefcab5ec9b15b0f3c87dfebef37ecaa3 (patch)
tree36fd6f2cc4324384f9af19994fb69a20d95f8daf /target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
parent004cc22e4ef8187dd80d5d6be5a2575453ef3699 (diff)
downloadupstream-f9dcdc7fefcab5ec9b15b0f3c87dfebef37ecaa3.tar.gz
upstream-f9dcdc7fefcab5ec9b15b0f3c87dfebef37ecaa3.tar.bz2
upstream-f9dcdc7fefcab5ec9b15b0f3c87dfebef37ecaa3.zip
kernel: mark source kernel for netfilter backports
This helps keeping track on patches & adding new kernels in the future. Signed-off-by: Rafał Miłecki <rafal@milecki.pl>
Diffstat (limited to 'target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch')
-rw-r--r--target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch47
1 files changed, 47 insertions, 0 deletions
diff --git a/target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch b/target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
new file mode 100644
index 0000000000..acca41ae3e
--- /dev/null
+++ b/target/linux/generic/backport-4.14/339-v4.16-netfilter-nft_flow_offload-wait-for-garbage-collecto.patch
@@ -0,0 +1,47 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Thu, 1 Feb 2018 18:49:00 +0100
+Subject: [PATCH] netfilter: nft_flow_offload: wait for garbage collector
+ to run after cleanup
+
+If netdevice goes down, then flowtable entries are scheduled to be
+removed. Wait for garbage collector to have a chance to run so it can
+delete them from the hashtable.
+
+The flush call might sleep, so hold the nfnl mutex from
+nft_flow_table_iterate() instead of rcu read side lock. The use of the
+nfnl mutex is also implicitly fixing races between updates via nfnetlink
+and netdevice event.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/net/netfilter/nf_tables_api.c
++++ b/net/netfilter/nf_tables_api.c
+@@ -4818,13 +4818,13 @@ void nft_flow_table_iterate(struct net *
+ struct nft_flowtable *flowtable;
+ const struct nft_table *table;
+
+- rcu_read_lock();
+- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+- list_for_each_entry_rcu(flowtable, &table->flowtables, list) {
++ nfnl_lock(NFNL_SUBSYS_NFTABLES);
++ list_for_each_entry(table, &net->nft.tables, list) {
++ list_for_each_entry(flowtable, &table->flowtables, list) {
+ iter(&flowtable->data, data);
+ }
+ }
+- rcu_read_unlock();
++ nfnl_unlock(NFNL_SUBSYS_NFTABLES);
+ }
+ EXPORT_SYMBOL_GPL(nft_flow_table_iterate);
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -208,6 +208,7 @@ static void nft_flow_offload_iterate_cle
+ void *data)
+ {
+ nf_flow_table_iterate(flowtable, flow_offload_iterate_cleanup, data);
++ flush_delayed_work(&flowtable->gc_work);
+ }
+
+ static int flow_offload_netdev_event(struct notifier_block *this,