aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--target/linux/generic/patches-3.10/110-net_fix_multiqueue_selection.patch41
1 files changed, 41 insertions, 0 deletions
diff --git a/target/linux/generic/patches-3.10/110-net_fix_multiqueue_selection.patch b/target/linux/generic/patches-3.10/110-net_fix_multiqueue_selection.patch
new file mode 100644
index 0000000000..6a07ee8c0d
--- /dev/null
+++ b/target/linux/generic/patches-3.10/110-net_fix_multiqueue_selection.patch
@@ -0,0 +1,41 @@
+From: Eric Dumazet <edumazet@google.com>
+
+commit 416186fbf8c5b4e4465 ("net: Split core bits of netdev_pick_tx
+into __netdev_pick_tx") added a bug that disables caching of queue
+index in the socket.
+
+This is the source of packet reorders for TCP flows, and
+again this is happening more often when using FQ pacing.
+
+Old code was doing
+
+if (queue_index != old_index)
+ sk_tx_queue_set(sk, queue_index);
+
+Alexander renamed the variables but forgot to change sk_tx_queue_set()
+2nd parameter.
+
+if (queue_index != new_index)
+ sk_tx_queue_set(sk, queue_index);
+
+This means we store -1 over and over in sk->sk_tx_queue_mapping
+
+Signed-off-by: Eric Dumazet <edumazet@google.com>
+Cc: Alexander Duyck <alexander.h.duyck@intel.com>
+Acked-by: Alexander Duyck <alexander.h.duyck@intel.com>
+
+---
+net/core/flow_dissector.c | 2 +-
+ 1 file changed, 1 insertion(+), 1 deletion(-)
+
+--- a/net/core/flow_dissector.c
++++ b/net/core/flow_dissector.c
+@@ -347,7 +347,7 @@ u16 __netdev_pick_tx(struct net_device *
+
+ if (queue_index != new_index && sk &&
+ rcu_access_pointer(sk->sk_dst_cache))
+- sk_tx_queue_set(sk, queue_index);
++ sk_tx_queue_set(sk, new_index);
+
+ queue_index = new_index;
+ }