summaryrefslogtreecommitdiffstats
path: root/target/linux/generic
diff options
context:
space:
mode:
authorJo-Philipp Wich <jo@mein.io>2016-08-13 15:17:42 +0200
committerJo-Philipp Wich <jo@mein.io>2016-08-13 16:23:23 +0200
commit3c2c31bb66e5b247ffbb3cafac2a21d441daef39 (patch)
tree2edd869c7011775fa3f8cd6ecb93a075b95ae38c /target/linux/generic
parentcf8da98e947056848431502b03d006ee80b5f930 (diff)
downloadmaster-31e0f0ae-3c2c31bb66e5b247ffbb3cafac2a21d441daef39.tar.gz
master-31e0f0ae-3c2c31bb66e5b247ffbb3cafac2a21d441daef39.tar.bz2
master-31e0f0ae-3c2c31bb66e5b247ffbb3cafac2a21d441daef39.zip
kernel: backport upstream challenge ACK fix (CVE-2016-5696)
Yue Cao claims that current host rate limiting of challenge ACKS (RFC 5961) could leak enough information to allow a patient attacker to hijack TCP sessions. He will soon provide details in an academic paper. Backports upstream commit 75ff39ccc1bd5d3c455b6822ab09e533c551f758 to the used LEDE kernel versions. Signed-off-by: Jo-Philipp Wich <jo@mein.io>
Diffstat (limited to 'target/linux/generic')
-rw-r--r--target/linux/generic/patches-3.18/096-tcp-make-challenge-acks-less-predictable.patch66
-rw-r--r--target/linux/generic/patches-4.1/096-tcp-make-challenge-acks-less-predictable.patch76
-rw-r--r--target/linux/generic/patches-4.4/096-tcp-make-challenge-acks-less-predictable.patch76
3 files changed, 218 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.18/096-tcp-make-challenge-acks-less-predictable.patch b/target/linux/generic/patches-3.18/096-tcp-make-challenge-acks-less-predictable.patch
new file mode 100644
index 0000000000..b984f6e86c
--- /dev/null
+++ b/target/linux/generic/patches-3.18/096-tcp-make-challenge-acks-less-predictable.patch
@@ -0,0 +1,66 @@
+From 75ff39ccc1bd5d3c455b6822ab09e533c551f758 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Jul 2016 10:04:02 +0200
+Subject: [PATCH] tcp: make challenge acks less predictable
+
+Yue Cao claims that current host rate limiting of challenge ACKS
+(RFC 5961) could leak enough information to allow a patient attacker
+to hijack TCP sessions. He will soon provide details in an academic
+paper.
+
+This patch increases the default limit from 100 to 1000, and adds
+some randomization so that the attacker can no longer hijack
+sessions without spending a considerable amount of probes.
+
+Based on initial analysis and patch from Linus.
+
+Note that we also have per socket rate limiting, so it is tempting
+to remove the host limit in the future.
+
+v2: randomize the count of challenge acks per second, not the period.
+
+Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2")
+Reported-by: Yue Cao <ycao009@ucr.edu>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/tcp_input.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -88,7 +88,7 @@ int sysctl_tcp_adv_win_scale __read_most
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
+ /* rfc5961 challenge ack rate limiting */
+-int sysctl_tcp_challenge_ack_limit = 100;
++int sysctl_tcp_challenge_ack_limit = 1000;
+
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+@@ -3325,12 +3325,18 @@ static void tcp_send_challenge_ack(struc
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ u32 now = jiffies / HZ;
++ u32 count;
+
+ if (now != challenge_timestamp) {
++ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
++
+ challenge_timestamp = now;
+- challenge_count = 0;
++ WRITE_ONCE(challenge_count, half +
++ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ }
+- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ count = READ_ONCE(challenge_count);
++ if (count > 0) {
++ WRITE_ONCE(challenge_count, count - 1);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }
diff --git a/target/linux/generic/patches-4.1/096-tcp-make-challenge-acks-less-predictable.patch b/target/linux/generic/patches-4.1/096-tcp-make-challenge-acks-less-predictable.patch
new file mode 100644
index 0000000000..cf1da63b07
--- /dev/null
+++ b/target/linux/generic/patches-4.1/096-tcp-make-challenge-acks-less-predictable.patch
@@ -0,0 +1,76 @@
+From 75ff39ccc1bd5d3c455b6822ab09e533c551f758 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Jul 2016 10:04:02 +0200
+Subject: [PATCH] tcp: make challenge acks less predictable
+
+Yue Cao claims that current host rate limiting of challenge ACKS
+(RFC 5961) could leak enough information to allow a patient attacker
+to hijack TCP sessions. He will soon provide details in an academic
+paper.
+
+This patch increases the default limit from 100 to 1000, and adds
+some randomization so that the attacker can no longer hijack
+sessions without spending a considerable amount of probes.
+
+Based on initial analysis and patch from Linus.
+
+Note that we also have per socket rate limiting, so it is tempting
+to remove the host limit in the future.
+
+v2: randomize the count of challenge acks per second, not the period.
+
+Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2")
+Reported-by: Yue Cao <ycao009@ucr.edu>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/tcp_input.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_most
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
+ /* rfc5961 challenge ack rate limiting */
+-int sysctl_tcp_challenge_ack_limit = 100;
++int sysctl_tcp_challenge_ack_limit = 1000;
+
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+@@ -3380,7 +3380,7 @@ static void tcp_send_challenge_ack(struc
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ struct tcp_sock *tp = tcp_sk(sk);
+- u32 now;
++ u32 count, now;
+
+ /* First check our per-socket dupack rate limit. */
+ if (tcp_oow_rate_limited(sock_net(sk), skb,
+@@ -3388,13 +3388,18 @@ static void tcp_send_challenge_ack(struc
+ &tp->last_oow_ack_time))
+ return;
+
+- /* Then check the check host-wide RFC 5961 rate limit. */
++ /* Then check host-wide RFC 5961 rate limit. */
+ now = jiffies / HZ;
+ if (now != challenge_timestamp) {
++ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
++
+ challenge_timestamp = now;
+- challenge_count = 0;
++ WRITE_ONCE(challenge_count, half +
++ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ }
+- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ count = READ_ONCE(challenge_count);
++ if (count > 0) {
++ WRITE_ONCE(challenge_count, count - 1);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }
diff --git a/target/linux/generic/patches-4.4/096-tcp-make-challenge-acks-less-predictable.patch b/target/linux/generic/patches-4.4/096-tcp-make-challenge-acks-less-predictable.patch
new file mode 100644
index 0000000000..a783c6ebbc
--- /dev/null
+++ b/target/linux/generic/patches-4.4/096-tcp-make-challenge-acks-less-predictable.patch
@@ -0,0 +1,76 @@
+From 75ff39ccc1bd5d3c455b6822ab09e533c551f758 Mon Sep 17 00:00:00 2001
+From: Eric Dumazet <edumazet@google.com>
+Date: Sun, 10 Jul 2016 10:04:02 +0200
+Subject: [PATCH] tcp: make challenge acks less predictable
+
+Yue Cao claims that current host rate limiting of challenge ACKS
+(RFC 5961) could leak enough information to allow a patient attacker
+to hijack TCP sessions. He will soon provide details in an academic
+paper.
+
+This patch increases the default limit from 100 to 1000, and adds
+some randomization so that the attacker can no longer hijack
+sessions without spending a considerable amount of probes.
+
+Based on initial analysis and patch from Linus.
+
+Note that we also have per socket rate limiting, so it is tempting
+to remove the host limit in the future.
+
+v2: randomize the count of challenge acks per second, not the period.
+
+Fixes: 282f23c6ee34 ("tcp: implement RFC 5961 3.2")
+Reported-by: Yue Cao <ycao009@ucr.edu>
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Suggested-by: Linus Torvalds <torvalds@linux-foundation.org>
+Cc: Yuchung Cheng <ycheng@google.com>
+Cc: Neal Cardwell <ncardwell@google.com>
+Acked-by: Neal Cardwell <ncardwell@google.com>
+Acked-by: Yuchung Cheng <ycheng@google.com>
+Signed-off-by: David S. Miller <davem@davemloft.net>
+---
+ net/ipv4/tcp_input.c | 15 ++++++++++-----
+ 1 file changed, 10 insertions(+), 5 deletions(-)
+
+--- a/net/ipv4/tcp_input.c
++++ b/net/ipv4/tcp_input.c
+@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_most
+ EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
+
+ /* rfc5961 challenge ack rate limiting */
+-int sysctl_tcp_challenge_ack_limit = 100;
++int sysctl_tcp_challenge_ack_limit = 1000;
+
+ int sysctl_tcp_stdurg __read_mostly;
+ int sysctl_tcp_rfc1337 __read_mostly;
+@@ -3427,7 +3427,7 @@ static void tcp_send_challenge_ack(struc
+ static u32 challenge_timestamp;
+ static unsigned int challenge_count;
+ struct tcp_sock *tp = tcp_sk(sk);
+- u32 now;
++ u32 count, now;
+
+ /* First check our per-socket dupack rate limit. */
+ if (tcp_oow_rate_limited(sock_net(sk), skb,
+@@ -3435,13 +3435,18 @@ static void tcp_send_challenge_ack(struc
+ &tp->last_oow_ack_time))
+ return;
+
+- /* Then check the check host-wide RFC 5961 rate limit. */
++ /* Then check host-wide RFC 5961 rate limit. */
+ now = jiffies / HZ;
+ if (now != challenge_timestamp) {
++ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
++
+ challenge_timestamp = now;
+- challenge_count = 0;
++ WRITE_ONCE(challenge_count, half +
++ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
+ }
+- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
++ count = READ_ONCE(challenge_count);
++ if (count > 0) {
++ WRITE_ONCE(challenge_count, count - 1);
+ NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
+ tcp_send_ack(sk);
+ }