aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.14/363-netfilter-nf_flow_table-add-support-for-sending-flow.patch
blob: b4d80a911b2114a9caaff32538ef07640829e96d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
From: Felix Fietkau <nbd@nbd.name>
Date: Sun, 25 Feb 2018 15:41:11 +0100
Subject: [PATCH] netfilter: nf_flow_table: add support for sending flows
 back to the slow path

Reset the timeout. For TCP, also set the state to indicate to use the
next incoming packets to reset window tracking.
This allows the slow path to take over again once the offload state has
been torn down

Signed-off-by: Felix Fietkau <nbd@nbd.name>
---

--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -100,6 +100,36 @@ err_ct_refcnt:
 }
 EXPORT_SYMBOL_GPL(flow_offload_alloc);
 
+static void flow_offload_fixup_ct_state(struct nf_conn *ct)
+{
+	const struct nf_conntrack_l4proto *l4proto;
+	struct net *net = nf_ct_net(ct);
+	unsigned int *timeouts;
+	unsigned int timeout;
+	int l4num;
+
+	l4num = nf_ct_protonum(ct);
+	l4proto = __nf_ct_l4proto_find(nf_ct_l3num(ct), l4num);
+	if (!l4proto)
+		return;
+
+	timeouts = l4proto->get_timeouts(net);
+	if (!timeouts)
+		return;
+
+	if (l4num == IPPROTO_TCP) {
+		timeout = timeouts[TCP_CONNTRACK_ESTABLISHED];
+		ct->proto.tcp.state = TCP_CONNTRACK_IGNORE;
+	} else if (l4num == IPPROTO_UDP) {
+		timeout = timeouts[UDP_CT_REPLIED];
+	} else {
+		return;
+	}
+
+	ct->timeout = nfct_time_stamp + timeout;
+	clear_bit(IPS_OFFLOAD_BIT, &ct->status);
+}
+
 void flow_offload_free(struct flow_offload *flow)
 {
 	struct flow_offload_entry *e;
@@ -107,7 +137,10 @@ void flow_offload_free(struct flow_offlo
 	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_ORIGINAL].tuple.dst_cache);
 	dst_release(flow->tuplehash[FLOW_OFFLOAD_DIR_REPLY].tuple.dst_cache);
 	e = container_of(flow, struct flow_offload_entry, flow);
-	nf_ct_delete(e->ct, 0, 0);
+	if (flow->flags & FLOW_OFFLOAD_DYING)
+		nf_ct_delete(e->ct, 0, 0);
+	else
+		flow_offload_fixup_ct_state(e->ct);
 	nf_ct_put(e->ct);
 	kfree_rcu(e, rcu_head);
 }