aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch')
-rw-r--r--target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch81
1 files changed, 81 insertions, 0 deletions
diff --git a/target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch b/target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch
new file mode 100644
index 0000000000..8b0024cd8d
--- /dev/null
+++ b/target/linux/generic/backport-4.14/364-v4.18-netfilter-nf_flow_table-tear-down-TCP-flows-if-RST-o.patch
@@ -0,0 +1,81 @@
+From: Felix Fietkau <nbd@nbd.name>
+Date: Sun, 25 Feb 2018 15:42:58 +0100
+Subject: [PATCH] netfilter: nf_flow_table: tear down TCP flows if RST or
+ FIN was seen
+
+Allow the slow path to handle the shutdown of the connection with proper
+timeouts
+
+Signed-off-by: Felix Fietkau <nbd@nbd.name>
+---
+
+--- a/net/netfilter/nf_flow_table_ip.c
++++ b/net/netfilter/nf_flow_table_ip.c
+@@ -15,6 +15,23 @@
+ #include <linux/tcp.h>
+ #include <linux/udp.h>
+
++static int nf_flow_tcp_state_check(struct flow_offload *flow,
++ struct sk_buff *skb, unsigned int thoff)
++{
++ struct tcphdr *tcph;
++
++ if (!pskb_may_pull(skb, thoff + sizeof(*tcph)))
++ return -1;
++
++ tcph = (void *)(skb_network_header(skb) + thoff);
++ if (unlikely(tcph->fin || tcph->rst)) {
++ flow_offload_teardown(flow);
++ return -1;
++ }
++
++ return 0;
++}
++
+ static int nf_flow_nat_ip_tcp(struct sk_buff *skb, unsigned int thoff,
+ __be32 addr, __be32 new_addr)
+ {
+@@ -118,10 +135,9 @@ static int nf_flow_dnat_ip(const struct
+ }
+
+ static int nf_flow_nat_ip(const struct flow_offload *flow, struct sk_buff *skb,
+- enum flow_offload_tuple_dir dir)
++ unsigned int thoff, enum flow_offload_tuple_dir dir)
+ {
+ struct iphdr *iph = ip_hdr(skb);
+- unsigned int thoff = iph->ihl * 4;
+
+ if (flow->flags & FLOW_OFFLOAD_SNAT &&
+ (nf_flow_snat_port(flow, skb, thoff, iph->protocol, dir) < 0 ||
+@@ -201,6 +217,7 @@ nf_flow_offload_ip_hook(void *priv, stru
+ struct flow_offload *flow;
+ struct net_device *outdev;
+ const struct rtable *rt;
++ unsigned int thoff;
+ struct iphdr *iph;
+ __be32 nexthop;
+
+@@ -229,8 +246,12 @@ nf_flow_offload_ip_hook(void *priv, stru
+ if (skb_try_make_writable(skb, sizeof(*iph)))
+ return NF_DROP;
+
++ thoff = ip_hdr(skb)->ihl * 4;
++ if (nf_flow_tcp_state_check(flow, skb, thoff))
++ return NF_ACCEPT;
++
+ if (flow->flags & (FLOW_OFFLOAD_SNAT | FLOW_OFFLOAD_DNAT) &&
+- nf_flow_nat_ip(flow, skb, dir) < 0)
++ nf_flow_nat_ip(flow, skb, thoff, dir) < 0)
+ return NF_DROP;
+
+ flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
+@@ -438,6 +459,9 @@ nf_flow_offload_ipv6_hook(void *priv, st
+ if (unlikely(nf_flow_exceeds_mtu(skb, flow->tuplehash[dir].tuple.mtu)))
+ return NF_ACCEPT;
+
++ if (nf_flow_tcp_state_check(flow, skb, sizeof(*ip6h)))
++ return NF_ACCEPT;
++
+ if (skb_try_make_writable(skb, sizeof(*ip6h)))
+ return NF_DROP;
+