aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch')
-rw-r--r--target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch62
1 files changed, 62 insertions, 0 deletions
diff --git a/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch b/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch
new file mode 100644
index 0000000000..e7f46ddf9c
--- /dev/null
+++ b/target/linux/generic/backport-5.4/080-wireguard-0117-wireguard-peerlookup-take-lock-before-checking-hash-.patch
@@ -0,0 +1,62 @@
+From 0000000000000000000000000000000000000000 Mon Sep 17 00:00:00 2001
+From: "Jason A. Donenfeld" <Jason@zx2c4.com>
+Date: Wed, 9 Sep 2020 13:58:15 +0200
+Subject: [PATCH] wireguard: peerlookup: take lock before checking hash in
+ replace operation
+
+commit 6147f7b1e90ff09bd52afc8b9206a7fcd133daf7 upstream.
+
+Eric's suggested fix for the previous commit's mentioned race condition
+was to simply take the table->lock in wg_index_hashtable_replace(). The
+table->lock of the hash table is supposed to protect the bucket heads,
+not the entires, but actually, since all the mutator functions are
+already taking it, it makes sense to take it too for the test to
+hlist_unhashed, as a defense in depth measure, so that it no longer
+races with deletions, regardless of what other locks are protecting
+individual entries. This is sensible from a performance perspective
+because, as Eric pointed out, the case of being unhashed is already the
+unlikely case, so this won't add common contention. And comparing
+instructions, this basically doesn't make much of a difference other
+than pushing and popping %r13, used by the new `bool ret`. More
+generally, I like the idea of locking consistency across table mutator
+functions, and this might let me rest slightly easier at night.
+
+Suggested-by: Eric Dumazet <edumazet@google.com>
+Link: https://lore.kernel.org/wireguard/20200908145911.4090480-1-edumazet@google.com/
+Fixes: e7096c131e51 ("net: WireGuard secure network tunnel")
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+Signed-off-by: Jason A. Donenfeld <Jason@zx2c4.com>
+---
+ drivers/net/wireguard/peerlookup.c | 11 ++++++++---
+ 1 file changed, 8 insertions(+), 3 deletions(-)
+
+--- a/drivers/net/wireguard/peerlookup.c
++++ b/drivers/net/wireguard/peerlookup.c
+@@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct i
+ struct index_hashtable_entry *old,
+ struct index_hashtable_entry *new)
+ {
+- if (unlikely(hlist_unhashed(&old->index_hash)))
+- return false;
++ bool ret;
++
+ spin_lock_bh(&table->lock);
++ ret = !hlist_unhashed(&old->index_hash);
++ if (unlikely(!ret))
++ goto out;
++
+ new->index = old->index;
+ hlist_replace_rcu(&old->index_hash, &new->index_hash);
+
+@@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct i
+ * simply gets dropped, which isn't terrible.
+ */
+ INIT_HLIST_NODE(&old->index_hash);
++out:
+ spin_unlock_bh(&table->lock);
+- return true;
++ return ret;
+ }
+
+ void wg_index_hashtable_remove(struct index_hashtable *table,