From fefe1da440eede8dfaa23975c30ae2f6fcac744d Mon Sep 17 00:00:00 2001 From: Jo-Philipp Wich Date: Wed, 8 Aug 2018 11:12:18 +0200 Subject: kernel: backport upstream fix for CVE-2018-5390 Backport an upstream fix for a remotely exploitable TCP denial of service flaw in Linux 4.9+. The fixes are included in Linux 4.14.59 and later but did not yet end up in version 4.9.118. Signed-off-by: Jo-Philipp Wich --- .../100-tcp-add-tcp_ooo_try_coalesce-helper.patch | 76 ++++++++++++++++++++++ 1 file changed, 76 insertions(+) create mode 100644 target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch diff --git a/target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch b/target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch new file mode 100644 index 0000000000..4641727c57 --- /dev/null +++ b/target/linux/generic/backport-4.9/100-tcp-add-tcp_ooo_try_coalesce-helper.patch @@ -0,0 +1,76 @@ +From 74b120c45aebf4278e1dedc55f5fa24d8ea83cdc Mon Sep 17 00:00:00 2001 +From: Eric Dumazet +Date: Mon, 23 Jul 2018 09:28:21 -0700 +Subject: tcp: add tcp_ooo_try_coalesce() helper + +commit 58152ecbbcc6a0ce7fddd5bf5f6ee535834ece0c upstream. + +In case skb in out_or_order_queue is the result of +multiple skbs coalescing, we would like to get a proper gso_segs +counter tracking, so that future tcp_drop() can report an accurate +number. + +I chose to not implement this tracking for skbs in receive queue, +since they are not dropped, unless socket is disconnected. + +Signed-off-by: Eric Dumazet +Acked-by: Soheil Hassas Yeganeh +Acked-by: Yuchung Cheng +Signed-off-by: David S. Miller +Signed-off-by: David Woodhouse +Signed-off-by: Greg Kroah-Hartman +--- + net/ipv4/tcp_input.c | 23 +++++++++++++++++++++-- + 1 file changed, 21 insertions(+), 2 deletions(-) + +diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c +index a9be8df108b4..9d0b73aa649f 100644 +--- a/net/ipv4/tcp_input.c ++++ b/net/ipv4/tcp_input.c +@@ -4370,6 +4370,23 @@ static bool tcp_try_coalesce(struct sock *sk, + return true; + } + ++static bool tcp_ooo_try_coalesce(struct sock *sk, ++ struct sk_buff *to, ++ struct sk_buff *from, ++ bool *fragstolen) ++{ ++ bool res = tcp_try_coalesce(sk, to, from, fragstolen); ++ ++ /* In case tcp_drop() is called later, update to->gso_segs */ ++ if (res) { ++ u32 gso_segs = max_t(u16, 1, skb_shinfo(to)->gso_segs) + ++ max_t(u16, 1, skb_shinfo(from)->gso_segs); ++ ++ skb_shinfo(to)->gso_segs = min_t(u32, gso_segs, 0xFFFF); ++ } ++ return res; ++} ++ + static void tcp_drop(struct sock *sk, struct sk_buff *skb) + { + sk_drops_add(sk, skb); +@@ -4493,7 +4510,8 @@ static void tcp_data_queue_ofo(struct sock *sk, struct sk_buff *skb) + /* In the typical case, we are adding an skb to the end of the list. + * Use of ooo_last_skb avoids the O(Log(N)) rbtree lookup. + */ +- if (tcp_try_coalesce(sk, tp->ooo_last_skb, skb, &fragstolen)) { ++ if (tcp_ooo_try_coalesce(sk, tp->ooo_last_skb, ++ skb, &fragstolen)) { + coalesce_done: + tcp_grow_window(sk, skb); + kfree_skb_partial(skb, fragstolen); +@@ -4543,7 +4561,8 @@ coalesce_done: + tcp_drop(sk, skb1); + goto merge_right; + } +- } else if (tcp_try_coalesce(sk, skb1, skb, &fragstolen)) { ++ } else if (tcp_ooo_try_coalesce(sk, skb1, ++ skb, &fragstolen)) { + goto coalesce_done; + } + p = &parent->rb_right; +-- +cgit 1.2-0.3.lf.el7 + -- cgit v1.2.3