aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-5.4/370-netfilter-nf_flow_table-fix-offloaded-connection-tim.patch
blob: 4edcbe169171146f0d2138387adce68af7165c74 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
From: Felix Fietkau <nbd@nbd.name>
Date: Wed, 13 Jun 2018 12:33:39 +0200
Subject: [PATCH] netfilter: nf_flow_table: fix offloaded connection timeout
 corner case

The full teardown of offloaded flows is deferred to a gc work item,
however processing of packets by netfilter needs to happen immediately
after a teardown is requested, because the conntrack state needs to be
fixed up.

Since the IPS_OFFLOAD_BIT is still kept until the teardown is complete,
the netfilter conntrack gc can accidentally bump the timeout of a
connection where offload was just stopped, causing a conntrack entry
leak.

Fix this by moving the conntrack timeout bumping from conntrack core to
the nf_flow_offload and add a check to prevent bogus timeout bumps.

Signed-off-by: Felix Fietkau <nbd@nbd.name>
---

--- a/net/netfilter/nf_conntrack_core.c
+++ b/net/netfilter/nf_conntrack_core.c
@@ -1206,18 +1206,6 @@ static bool gc_worker_can_early_drop(con
 	return false;
 }
 
-#define	DAY	(86400 * HZ)
-
-/* Set an arbitrary timeout large enough not to ever expire, this save
- * us a check for the IPS_OFFLOAD_BIT from the packet path via
- * nf_ct_is_expired().
- */
-static void nf_ct_offload_timeout(struct nf_conn *ct)
-{
-	if (nf_ct_expires(ct) < DAY / 2)
-		ct->timeout = nfct_time_stamp + DAY;
-}
-
 static void gc_worker(struct work_struct *work)
 {
 	unsigned int min_interval = max(HZ / GC_MAX_BUCKETS_DIV, 1u);
@@ -1254,10 +1242,8 @@ static void gc_worker(struct work_struct
 			tmp = nf_ct_tuplehash_to_ctrack(h);
 
 			scanned++;
-			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status)) {
-				nf_ct_offload_timeout(tmp);
+			if (test_bit(IPS_OFFLOAD_BIT, &tmp->status))
 				continue;
-			}
 
 			if (nf_ct_is_expired(tmp)) {
 				nf_ct_gc_expired(tmp);
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -198,10 +198,29 @@ static const struct rhashtable_params nf
 	.automatic_shrinking	= true,
 };
 
+#define        DAY     (86400 * HZ)
+
+/* Set an arbitrary timeout large enough not to ever expire, this save
+ * us a check for the IPS_OFFLOAD_BIT from the packet path via
+ * nf_ct_is_expired().
+ */
+static void nf_ct_offload_timeout(struct flow_offload *flow)
+{
+	struct flow_offload_entry *entry;
+	struct nf_conn *ct;
+
+	entry = container_of(flow, struct flow_offload_entry, flow);
+	ct = entry->ct;
+
+	if (nf_ct_expires(ct) < DAY / 2)
+		ct->timeout = nfct_time_stamp + DAY;
+}
+
 int flow_offload_add(struct nf_flowtable *flow_table, struct flow_offload *flow)
 {
 	int err;
 
+	nf_ct_offload_timeout(flow);
 	flow->timeout = (u32)jiffies + NF_FLOW_TIMEOUT;
 
 	err = rhashtable_insert_fast(&flow_table->rhashtable,
@@ -304,6 +323,7 @@ nf_flow_table_iterate(struct nf_flowtabl
 	rhashtable_walk_start(&hti);
 
 	while ((tuplehash = rhashtable_walk_next(&hti))) {
+
 		if (IS_ERR(tuplehash)) {
 			if (PTR_ERR(tuplehash) != -EAGAIN) {
 				err = PTR_ERR(tuplehash);
@@ -328,10 +348,17 @@ static void nf_flow_offload_gc_step(stru
 {
 	struct nf_flowtable *flow_table = data;
 	struct flow_offload_entry *e;
+	bool teardown;
 
 	e = container_of(flow, struct flow_offload_entry, flow);
-	if (nf_flow_has_expired(flow) || nf_ct_is_dying(e->ct) ||
-	    (flow->flags & (FLOW_OFFLOAD_DYING | FLOW_OFFLOAD_TEARDOWN)))
+
+	teardown = flow->flags & (FLOW_OFFLOAD_DYING |
+				  FLOW_OFFLOAD_TEARDOWN);
+
+	if (!teardown)
+		nf_ct_offload_timeout(flow);
+
+	if (nf_flow_has_expired(flow) || teardown)
 		flow_offload_del(flow_table, flow);
 }