aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch')
-rw-r--r--target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch14
1 files changed, 7 insertions, 7 deletions
diff --git a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
index 292b381f8d..0a5b9ead9d 100644
--- a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
+++ b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
@@ -58,7 +58,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
goto out;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -772,14 +772,15 @@ static void tcp_tasklet_func(unsigned lo
list_for_each_safe(q, n, &list) {
tp = list_entry(q, struct tcp_sock, tsq_node);
list_del(&tp->tsq_node);
@@ -77,7 +77,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
tcp_tsq_handler(sk);
}
bh_unlock_sock(sk);
-@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -802,16 +803,15 @@ static void tcp_tasklet_func(unsigned lo
*/
void tcp_release_cb(struct sock *sk)
{
@@ -96,7 +96,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (flags & TCPF_TSQ_DEFERRED)
tcp_tsq_handler(sk);
-@@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -883,7 +883,7 @@ void tcp_wfree(struct sk_buff *skb)
if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
goto out;
@@ -105,7 +105,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
struct tsq_tasklet *tsq;
bool empty;
-@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -891,7 +891,7 @@ void tcp_wfree(struct sk_buff *skb)
goto out;
nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
@@ -114,7 +114,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (nval != oval)
continue;
-@@ -2124,7 +2124,7 @@ static bool tcp_small_queue_check(struct
+@@ -2136,7 +2136,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
@@ -123,7 +123,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2222,8 +2222,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2234,8 +2234,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
@@ -134,7 +134,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3534,8 +3534,6 @@ void tcp_send_ack(struct sock *sk)
+@@ -3546,8 +3546,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784