1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
|
commit 8b7a7048220f86547db31de0abe1ea6dd2cfa892
Author: David Woodhouse <dwmw2@infradead.org>
Date: Thu Sep 24 11:38:22 2015 +0100
8139cp: Fix GSO MSS handling
When fixing the TSO support I noticed we just mask ->gso_size with the
MSSMask value and don't care about the consequences.
Provide a .ndo_features_check() method which drops the NETIF_F_TSO
feature for any skb which would exceed the maximum, and thus forces it
to be segmented by software.
Then we can stop the masking in cp_start_xmit(), and just WARN if the
maximum is exceeded, which should now never happen.
Finally, Francois Romieu noticed that we didn't even have the right
value for MSSMask anyway; it should be 0x7ff (11 bits) not 0xfff.
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
commit 5a58f227790faded5a3ef6075f3ddd65093e0f86
Author: David Woodhouse <David.Woodhouse@intel.com>
Date: Wed Sep 23 09:46:09 2015 +0100
8139cp: Enable offload features by default
I fixed TSO. Hardware checksum and scatter/gather also appear to be
working correctly both on real hardware and in QEMU's emulation.
Let's enable them by default and see if anyone screams...
Signed-off-by: David Woodhouse <David.Woodhouse@intel.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
--- a/drivers/net/ethernet/realtek/8139cp.c
+++ b/drivers/net/ethernet/realtek/8139cp.c
@@ -175,7 +175,7 @@ enum {
LastFrag = (1 << 28), /* Final segment of a packet */
LargeSend = (1 << 27), /* TCP Large Send Offload (TSO) */
MSSShift = 16, /* MSS value position */
- MSSMask = 0xfff, /* MSS value: 11 bits */
+ MSSMask = 0x7ff, /* MSS value: 11 bits */
TxError = (1 << 23), /* Tx error summary */
RxError = (1 << 20), /* Rx error summary */
IPCS = (1 << 18), /* Calculate IP checksum */
@@ -754,10 +754,16 @@ static netdev_tx_t cp_start_xmit (struct
eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
mss = skb_shinfo(skb)->gso_size;
+ if (mss > MSSMask) {
+ WARN_ONCE(1, "Net bug: GSO size %d too large for 8139CP\n",
+ mss);
+ goto out_dma_error;
+ }
+
opts2 = cpu_to_le32(cp_tx_vlan_tag(skb));
opts1 = DescOwn;
if (mss)
- opts1 |= LargeSend | ((mss & MSSMask) << MSSShift);
+ opts1 |= LargeSend | (mss << MSSShift);
else if (skb->ip_summed == CHECKSUM_PARTIAL) {
const struct iphdr *ip = ip_hdr(skb);
if (ip->protocol == IPPROTO_TCP)
@@ -1852,6 +1858,15 @@ static void cp_set_d3_state (struct cp_p
pci_set_power_state (cp->pdev, PCI_D3hot);
}
+static netdev_features_t cp_features_check(struct sk_buff *skb,
+ struct net_device *dev,
+ netdev_features_t features)
+{
+ if (skb_shinfo(skb)->gso_size > MSSMask)
+ features &= ~NETIF_F_TSO;
+
+ return vlan_features_check(skb, features);
+}
static const struct net_device_ops cp_netdev_ops = {
.ndo_open = cp_open,
.ndo_stop = cp_close,
@@ -1864,6 +1879,7 @@ static const struct net_device_ops cp_ne
.ndo_tx_timeout = cp_tx_timeout,
.ndo_set_features = cp_set_features,
.ndo_change_mtu = cp_change_mtu,
+ .ndo_features_check = cp_features_check,
#ifdef CONFIG_NET_POLL_CONTROLLER
.ndo_poll_controller = cp_poll_controller,
@@ -1983,12 +1999,12 @@ static int cp_init_one (struct pci_dev *
dev->ethtool_ops = &cp_ethtool_ops;
dev->watchdog_timeo = TX_TIMEOUT;
- dev->features |= NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
+ dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
+ NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
if (pci_using_dac)
dev->features |= NETIF_F_HIGHDMA;
- /* disabled by default until verified */
dev->hw_features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX;
dev->vlan_features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_TSO |
|