aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
diff options
context:
space:
mode:
authorKoen Vandeputte <koen.vandeputte@ncentric.com>2018-07-31 13:30:14 +0200
committerKoen Vandeputte <koen.vandeputte@ncentric.com>2018-08-01 09:46:59 +0200
commitfec8fe806963c96a6506c2aebc3572d3a11f285f (patch)
tree6797f955dba6a4608325c2611e34fe3cf894ea0a /target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
parentd0e0b7049f88774e67c3d5ad6b573f7070e5f900 (diff)
downloadupstream-fec8fe806963c96a6506c2aebc3572d3a11f285f.tar.gz
upstream-fec8fe806963c96a6506c2aebc3572d3a11f285f.tar.bz2
upstream-fec8fe806963c96a6506c2aebc3572d3a11f285f.zip
kernel: bump 4.9 to 4.9.116
Refreshed all patches Remove upstreamed patches. - 103-MIPS-ath79-fix-register-address-in-ath79_ddr_wb_flus.patch - 403-mtd_fix_cfi_cmdset_0002_status_check.patch - 001-4.11-01-mtd-m25p80-consider-max-message-size-in-m25p80_read.patch - 001-4.15-08-bcm63xx_enet-correct-clock-usage.patch - 001-4.15-09-bcm63xx_enet-do-not-write-to-random-DMA-channel-on-B.patch - 900-gen_stats-fix-netlink-stats-padding.patch Introduce a new backported patch to address ext4 breakage, introduced in 4.9.112 - backport-4.9/500-ext4-fix-check-to-prevent-initializing-reserved-inod.patch This patch has been slightly altered to compensate for a new helper function introduced in later kernels. Also add ARM64_SSBD symbol to ARM64 targets still running kernel 4.9 Compile-tested on: ar71xx, bcm2710 Runtime-tested on: ar71xx Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
Diffstat (limited to 'target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch')
-rw-r--r--target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch14
1 files changed, 7 insertions, 7 deletions
diff --git a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
index 292b381f8d..0a5b9ead9d 100644
--- a/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
+++ b/target/linux/generic/backport-4.9/024-8-tcp-tsq-move-tsq_flags-close-to-sk_wmem_alloc.patch
@@ -58,7 +58,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
goto out;
--- a/net/ipv4/tcp_output.c
+++ b/net/ipv4/tcp_output.c
-@@ -767,14 +767,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -772,14 +772,15 @@ static void tcp_tasklet_func(unsigned lo
list_for_each_safe(q, n, &list) {
tp = list_entry(q, struct tcp_sock, tsq_node);
list_del(&tp->tsq_node);
@@ -77,7 +77,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
tcp_tsq_handler(sk);
}
bh_unlock_sock(sk);
-@@ -797,16 +798,15 @@ static void tcp_tasklet_func(unsigned lo
+@@ -802,16 +803,15 @@ static void tcp_tasklet_func(unsigned lo
*/
void tcp_release_cb(struct sock *sk)
{
@@ -96,7 +96,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (flags & TCPF_TSQ_DEFERRED)
tcp_tsq_handler(sk);
-@@ -878,7 +878,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -883,7 +883,7 @@ void tcp_wfree(struct sk_buff *skb)
if (wmem >= SKB_TRUESIZE(1) && this_cpu_ksoftirqd() == current)
goto out;
@@ -105,7 +105,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
struct tsq_tasklet *tsq;
bool empty;
-@@ -886,7 +886,7 @@ void tcp_wfree(struct sk_buff *skb)
+@@ -891,7 +891,7 @@ void tcp_wfree(struct sk_buff *skb)
goto out;
nval = (oval & ~TSQF_THROTTLED) | TSQF_QUEUED | TCPF_TSQ_DEFERRED;
@@ -114,7 +114,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (nval != oval)
continue;
-@@ -2124,7 +2124,7 @@ static bool tcp_small_queue_check(struct
+@@ -2136,7 +2136,7 @@ static bool tcp_small_queue_check(struct
skb->prev == sk->sk_write_queue.next)
return false;
@@ -123,7 +123,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
/* It is possible TX completion already happened
* before we set TSQ_THROTTLED, so we must
* test again the condition.
-@@ -2222,8 +2222,8 @@ static bool tcp_write_xmit(struct sock *
+@@ -2234,8 +2234,8 @@ static bool tcp_write_xmit(struct sock *
unlikely(tso_fragment(sk, skb, limit, mss_now, gfp)))
break;
@@ -134,7 +134,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net>
if (tcp_small_queue_check(sk, skb, 0))
break;
-@@ -3534,8 +3534,6 @@ void tcp_send_ack(struct sock *sk)
+@@ -3546,8 +3546,6 @@ void __tcp_send_ack(struct sock *sk, u32
/* We do not want pure acks influencing TCP Small Queues or fq/pacing
* too much.
* SKB_TRUESIZE(max(1 .. 66, MAX_TCP_HEADER)) is unfortunately ~784