aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch')
-rw-r--r--target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch191
1 files changed, 191 insertions, 0 deletions
diff --git a/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch b/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
new file mode 100644
index 0000000000..34724a5696
--- /dev/null
+++ b/target/linux/generic/pending-5.10/640-05-netfilter-flowtable-use-dev_fill_forward_path-to-obt.patch
@@ -0,0 +1,191 @@
+From: Pablo Neira Ayuso <pablo@netfilter.org>
+Date: Fri, 20 Nov 2020 13:49:18 +0100
+Subject: [PATCH] netfilter: flowtable: use dev_fill_forward_path() to
+ obtain ingress device
+
+Obtain the ingress device in the tuple from the route in the reply
+direction. Use dev_fill_forward_path() instead to get the real ingress
+device for this flow.
+
+Fall back to use the ingress device that the IP forwarding route
+provides if:
+
+- dev_fill_forward_path() finds no real ingress device.
+- the ingress device that is obtained is not part of the flowtable
+ devices.
+- this route has a xfrm policy.
+
+Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
+---
+
+--- a/include/net/netfilter/nf_flow_table.h
++++ b/include/net/netfilter/nf_flow_table.h
+@@ -165,6 +165,9 @@ static inline __s32 nf_flow_timeout_delt
+ struct nf_flow_route {
+ struct {
+ struct dst_entry *dst;
++ struct {
++ u32 ifindex;
++ } in;
+ enum flow_offload_xmit_type xmit_type;
+ } tuple[FLOW_OFFLOAD_DIR_MAX];
+ };
+--- a/net/netfilter/nf_flow_table_core.c
++++ b/net/netfilter/nf_flow_table_core.c
+@@ -79,7 +79,6 @@ static int flow_offload_fill_route(struc
+ enum flow_offload_tuple_dir dir)
+ {
+ struct flow_offload_tuple *flow_tuple = &flow->tuplehash[dir].tuple;
+- struct dst_entry *other_dst = route->tuple[!dir].dst;
+ struct dst_entry *dst = route->tuple[dir].dst;
+
+ if (!dst_hold_safe(route->tuple[dir].dst))
+@@ -94,7 +93,7 @@ static int flow_offload_fill_route(struc
+ break;
+ }
+
+- flow_tuple->iifidx = other_dst->dev->ifindex;
++ flow_tuple->iifidx = route->tuple[dir].in.ifindex;
+ flow_tuple->xmit_type = route->tuple[dir].xmit_type;
+ flow_tuple->dst_cache = dst;
+
+--- a/net/netfilter/nft_flow_offload.c
++++ b/net/netfilter/nft_flow_offload.c
+@@ -31,14 +31,104 @@ static void nft_default_forward_path(str
+ struct dst_entry *dst_cache,
+ enum ip_conntrack_dir dir)
+ {
++ route->tuple[!dir].in.ifindex = dst_cache->dev->ifindex;
+ route->tuple[dir].dst = dst_cache;
+ route->tuple[dir].xmit_type = nft_xmit_type(dst_cache);
+ }
+
++static int nft_dev_fill_forward_path(const struct nf_flow_route *route,
++ const struct dst_entry *dst_cache,
++ const struct nf_conn *ct,
++ enum ip_conntrack_dir dir,
++ struct net_device_path_stack *stack)
++{
++ const void *daddr = &ct->tuplehash[!dir].tuple.src.u3;
++ struct net_device *dev = dst_cache->dev;
++ unsigned char ha[ETH_ALEN];
++ struct neighbour *n;
++ u8 nud_state;
++
++ n = dst_neigh_lookup(dst_cache, daddr);
++ if (!n)
++ return -1;
++
++ read_lock_bh(&n->lock);
++ nud_state = n->nud_state;
++ ether_addr_copy(ha, n->ha);
++ read_unlock_bh(&n->lock);
++ neigh_release(n);
++
++ if (!(nud_state & NUD_VALID))
++ return -1;
++
++ return dev_fill_forward_path(dev, ha, stack);
++}
++
++struct nft_forward_info {
++ const struct net_device *dev;
++};
++
++static void nft_dev_path_info(const struct net_device_path_stack *stack,
++ struct nft_forward_info *info)
++{
++ const struct net_device_path *path;
++ int i;
++
++ for (i = 0; i < stack->num_paths; i++) {
++ path = &stack->path[i];
++ switch (path->type) {
++ case DEV_PATH_ETHERNET:
++ info->dev = path->dev;
++ break;
++ case DEV_PATH_VLAN:
++ case DEV_PATH_BRIDGE:
++ default:
++ info->dev = NULL;
++ break;
++ }
++ }
++}
++
++static bool nft_flowtable_find_dev(const struct net_device *dev,
++ struct nft_flowtable *ft)
++{
++ struct nft_hook *hook;
++ bool found = false;
++
++ list_for_each_entry_rcu(hook, &ft->hook_list, list) {
++ if (hook->ops.dev != dev)
++ continue;
++
++ found = true;
++ break;
++ }
++
++ return found;
++}
++
++static void nft_dev_forward_path(struct nf_flow_route *route,
++ const struct nf_conn *ct,
++ enum ip_conntrack_dir dir,
++ struct nft_flowtable *ft)
++{
++ const struct dst_entry *dst = route->tuple[dir].dst;
++ struct net_device_path_stack stack;
++ struct nft_forward_info info = {};
++
++ if (nft_dev_fill_forward_path(route, dst, ct, dir, &stack) >= 0)
++ nft_dev_path_info(&stack, &info);
++
++ if (!info.dev || !nft_flowtable_find_dev(info.dev, ft))
++ return;
++
++ route->tuple[!dir].in.ifindex = info.dev->ifindex;
++}
++
+ static int nft_flow_route(const struct nft_pktinfo *pkt,
+ const struct nf_conn *ct,
+ struct nf_flow_route *route,
+- enum ip_conntrack_dir dir)
++ enum ip_conntrack_dir dir,
++ struct nft_flowtable *ft)
+ {
+ struct dst_entry *this_dst = skb_dst(pkt->skb);
+ struct dst_entry *other_dst = NULL;
+@@ -63,6 +153,12 @@ static int nft_flow_route(const struct n
+ nft_default_forward_path(route, this_dst, dir);
+ nft_default_forward_path(route, other_dst, !dir);
+
++ if (route->tuple[dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH &&
++ route->tuple[!dir].xmit_type == FLOW_OFFLOAD_XMIT_NEIGH) {
++ nft_dev_forward_path(route, ct, dir, ft);
++ nft_dev_forward_path(route, ct, !dir, ft);
++ }
++
+ return 0;
+ }
+
+@@ -90,8 +186,8 @@ static void nft_flow_offload_eval(const
+ struct nft_flow_offload *priv = nft_expr_priv(expr);
+ struct nf_flowtable *flowtable = &priv->flowtable->data;
+ struct tcphdr _tcph, *tcph = NULL;
++ struct nf_flow_route route = {};
+ enum ip_conntrack_info ctinfo;
+- struct nf_flow_route route;
+ struct flow_offload *flow;
+ enum ip_conntrack_dir dir;
+ struct nf_conn *ct;
+@@ -128,7 +224,7 @@ static void nft_flow_offload_eval(const
+ goto out;
+
+ dir = CTINFO2DIR(ctinfo);
+- if (nft_flow_route(pkt, ct, &route, dir) < 0)
++ if (nft_flow_route(pkt, ct, &route, dir, priv->flowtable) < 0)
+ goto err_flow_route;
+
+ flow = flow_offload_alloc(ct);