--- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -1372,11 +1372,18 @@ static inline int skb_network_offset(con * * Various parts of the networking layer expect at least 32 bytes of * headroom, you should not reduce this. + * + * This has been changed to 64 to acommodate for routing between ethernet + * and wireless, but only for new allocations */ #ifndef NET_SKB_PAD #define NET_SKB_PAD 32 #endif +#ifndef NET_SKB_PAD_ALLOC +#define NET_SKB_PAD_ALLOC 64 +#endif + extern int ___pskb_trim(struct sk_buff *skb, unsigned int len); static inline void __skb_trim(struct sk_buff *skb, unsigned int len) @@ -1466,9 +1473,9 @@ static inline void __skb_queue_purge(str static inline struct sk_buff *__dev_alloc_skb(unsigned int length, gfp_t gfp_mask) { - struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD, gfp_mask); + struct sk_buff *skb = alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask); if (likely(skb)) - skb_reserve(skb, NET_SKB_PAD); + skb_reserve(skb, NET_SKB_PAD_ALLOC); return skb; } @@ -1551,7 +1558,7 @@ static inline int __skb_cow(struct sk_bu delta = headroom - skb_headroom(skb); if (delta || cloned) - return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, + return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD_ALLOC), 0, GFP_ATOMIC); return 0; } --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -339,9 +339,9 @@ struct sk_buff *__netdev_alloc_skb(struc int node = dev->dev.parent ? dev_to_node(dev->dev.parent) : -1; struct sk_buff *skb; - skb = __alloc_skb(length + NET_SKB_PAD, gfp_mask, 0, node); + skb = __alloc_skb(length + NET_SKB_PAD_ALLOC, gfp_mask, 0, node); if (likely(skb)) { - skb_reserve(skb, NET_SKB_PAD); + skb_reserve(skb, NET_SKB_PAD_ALLOC); skb->dev = dev; } return skb; -rc1'>summaryrefslogtreecommitdiffstats
path: root/target/linux/ramips/dts/ZBT-WE1226.dts
blob: 1a1a986ebe875950ba8e20bfd6f3d12bb55c3aea (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121