aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/generic/patches-3.10/770-bgmac-backport.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/generic/patches-3.10/770-bgmac-backport.patch')
-rw-r--r--target/linux/generic/patches-3.10/770-bgmac-backport.patch201
1 files changed, 188 insertions, 13 deletions
diff --git a/target/linux/generic/patches-3.10/770-bgmac-backport.patch b/target/linux/generic/patches-3.10/770-bgmac-backport.patch
index 99415a6d75..38a34a930f 100644
--- a/target/linux/generic/patches-3.10/770-bgmac-backport.patch
+++ b/target/linux/generic/patches-3.10/770-bgmac-backport.patch
@@ -64,7 +64,73 @@ patches for bgmac backported from net-next/master
if (freed && netif_queue_stopped(bgmac->net_dev))
netif_wake_queue(bgmac->net_dev);
}
-@@ -274,6 +285,8 @@ static int bgmac_dma_rx_read(struct bgma
+@@ -241,31 +252,59 @@ static int bgmac_dma_rx_skb_for_slot(str
+ struct bgmac_slot_info *slot)
+ {
+ struct device *dma_dev = bgmac->core->dma_dev;
++ struct sk_buff *skb;
++ dma_addr_t dma_addr;
+ struct bgmac_rx_header *rx;
+
+ /* Alloc skb */
+- slot->skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
+- if (!slot->skb)
++ skb = netdev_alloc_skb(bgmac->net_dev, BGMAC_RX_BUF_SIZE);
++ if (!skb)
+ return -ENOMEM;
+
+ /* Poison - if everything goes fine, hardware will overwrite it */
+- rx = (struct bgmac_rx_header *)slot->skb->data;
++ rx = (struct bgmac_rx_header *)skb->data;
+ rx->len = cpu_to_le16(0xdead);
+ rx->flags = cpu_to_le16(0xbeef);
+
+ /* Map skb for the DMA */
+- slot->dma_addr = dma_map_single(dma_dev, slot->skb->data,
+- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
+- if (dma_mapping_error(dma_dev, slot->dma_addr)) {
++ dma_addr = dma_map_single(dma_dev, skb->data,
++ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ if (dma_mapping_error(dma_dev, dma_addr)) {
+ bgmac_err(bgmac, "DMA mapping error\n");
++ dev_kfree_skb(skb);
+ return -ENOMEM;
+ }
++
++ /* Update the slot */
++ slot->skb = skb;
++ slot->dma_addr = dma_addr;
++
+ if (slot->dma_addr & 0xC0000000)
+ bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
+
+ return 0;
+ }
+
++static void bgmac_dma_rx_setup_desc(struct bgmac *bgmac,
++ struct bgmac_dma_ring *ring, int desc_idx)
++{
++ struct bgmac_dma_desc *dma_desc = ring->cpu_base + desc_idx;
++ u32 ctl0 = 0, ctl1 = 0;
++
++ if (desc_idx == ring->num_slots - 1)
++ ctl0 |= BGMAC_DESC_CTL0_EOT;
++ ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
++ /* Is there any BGMAC device that requires extension? */
++ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
++ * B43_DMA64_DCTL1_ADDREXT_MASK;
++ */
++
++ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[desc_idx].dma_addr));
++ dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[desc_idx].dma_addr));
++ dma_desc->ctl0 = cpu_to_le32(ctl0);
++ dma_desc->ctl1 = cpu_to_le32(ctl1);
++}
++
+ static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
+ int weight)
+ {
+@@ -274,6 +313,8 @@ static int bgmac_dma_rx_read(struct bgma
end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
end_slot &= BGMAC_DMA_RX_STATDPTR;
@@ -73,7 +139,94 @@ patches for bgmac backported from net-next/master
end_slot /= sizeof(struct bgmac_dma_desc);
ring->end = end_slot;
-@@ -418,9 +431,6 @@ static int bgmac_dma_alloc(struct bgmac
+@@ -282,7 +323,6 @@ static int bgmac_dma_rx_read(struct bgma
+ struct device *dma_dev = bgmac->core->dma_dev;
+ struct bgmac_slot_info *slot = &ring->slots[ring->start];
+ struct sk_buff *skb = slot->skb;
+- struct sk_buff *new_skb;
+ struct bgmac_rx_header *rx;
+ u16 len, flags;
+
+@@ -295,38 +335,51 @@ static int bgmac_dma_rx_read(struct bgma
+ len = le16_to_cpu(rx->len);
+ flags = le16_to_cpu(rx->flags);
+
+- /* Check for poison and drop or pass the packet */
+- if (len == 0xdead && flags == 0xbeef) {
+- bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
+- ring->start);
+- } else {
++ do {
++ dma_addr_t old_dma_addr = slot->dma_addr;
++ int err;
++
++ /* Check for poison and drop or pass the packet */
++ if (len == 0xdead && flags == 0xbeef) {
++ bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
++ ring->start);
++ dma_sync_single_for_device(dma_dev,
++ slot->dma_addr,
++ BGMAC_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++ break;
++ }
++
+ /* Omit CRC. */
+ len -= ETH_FCS_LEN;
+
+- new_skb = netdev_alloc_skb_ip_align(bgmac->net_dev, len);
+- if (new_skb) {
+- skb_put(new_skb, len);
+- skb_copy_from_linear_data_offset(skb, BGMAC_RX_FRAME_OFFSET,
+- new_skb->data,
+- len);
+- skb_checksum_none_assert(skb);
+- new_skb->protocol =
+- eth_type_trans(new_skb, bgmac->net_dev);
+- netif_receive_skb(new_skb);
+- handled++;
+- } else {
+- bgmac->net_dev->stats.rx_dropped++;
+- bgmac_err(bgmac, "Allocation of skb for copying packet failed!\n");
++ /* Prepare new skb as replacement */
++ err = bgmac_dma_rx_skb_for_slot(bgmac, slot);
++ if (err) {
++ /* Poison the old skb */
++ rx->len = cpu_to_le16(0xdead);
++ rx->flags = cpu_to_le16(0xbeef);
++
++ dma_sync_single_for_device(dma_dev,
++ slot->dma_addr,
++ BGMAC_RX_BUF_SIZE,
++ DMA_FROM_DEVICE);
++ break;
+ }
++ bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);
+
+- /* Poison the old skb */
+- rx->len = cpu_to_le16(0xdead);
+- rx->flags = cpu_to_le16(0xbeef);
+- }
+-
+- /* Make it back accessible to the hardware */
+- dma_sync_single_for_device(dma_dev, slot->dma_addr,
+- BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
++ /* Unmap old skb, we'll pass it to the netfif */
++ dma_unmap_single(dma_dev, old_dma_addr,
++ BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);
++
++ skb_put(skb, BGMAC_RX_FRAME_OFFSET + len);
++ skb_pull(skb, BGMAC_RX_FRAME_OFFSET);
++
++ skb_checksum_none_assert(skb);
++ skb->protocol = eth_type_trans(skb, bgmac->net_dev);
++ netif_receive_skb(skb);
++ handled++;
++ } while (0);
+
+ if (++ring->start >= BGMAC_RX_RING_SLOTS)
+ ring->start = 0;
+@@ -418,9 +471,6 @@ static int bgmac_dma_alloc(struct bgmac
ring = &bgmac->tx_ring[i];
ring->num_slots = BGMAC_TX_RING_SLOTS;
ring->mmio_base = ring_base[i];
@@ -83,7 +236,7 @@ patches for bgmac backported from net-next/master
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-@@ -435,6 +445,13 @@ static int bgmac_dma_alloc(struct bgmac
+@@ -435,6 +485,13 @@ static int bgmac_dma_alloc(struct bgmac
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
@@ -97,7 +250,7 @@ patches for bgmac backported from net-next/master
/* No need to alloc TX slots yet */
}
-@@ -444,9 +461,6 @@ static int bgmac_dma_alloc(struct bgmac
+@@ -444,9 +501,6 @@ static int bgmac_dma_alloc(struct bgmac
ring = &bgmac->rx_ring[i];
ring->num_slots = BGMAC_RX_RING_SLOTS;
ring->mmio_base = ring_base[i];
@@ -107,7 +260,7 @@ patches for bgmac backported from net-next/master
/* Alloc ring of descriptors */
size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-@@ -462,6 +476,13 @@ static int bgmac_dma_alloc(struct bgmac
+@@ -462,6 +516,13 @@ static int bgmac_dma_alloc(struct bgmac
if (ring->dma_base & 0xC0000000)
bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
@@ -121,7 +274,14 @@ patches for bgmac backported from net-next/master
/* Alloc RX slots */
for (j = 0; j < ring->num_slots; j++) {
err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
-@@ -489,12 +510,14 @@ static void bgmac_dma_init(struct bgmac
+@@ -482,19 +543,19 @@ err_dma_free:
+ static void bgmac_dma_init(struct bgmac *bgmac)
+ {
+ struct bgmac_dma_ring *ring;
+- struct bgmac_dma_desc *dma_desc;
+- u32 ctl0, ctl1;
+ int i;
+
for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
ring = &bgmac->tx_ring[i];
@@ -138,7 +298,7 @@ patches for bgmac backported from net-next/master
ring->start = 0;
ring->end = 0; /* Points the slot that should *not* be read */
-@@ -505,12 +528,14 @@ static void bgmac_dma_init(struct bgmac
+@@ -505,32 +566,20 @@ static void bgmac_dma_init(struct bgmac
ring = &bgmac->rx_ring[i];
@@ -153,17 +313,32 @@ patches for bgmac backported from net-next/master
+ if (ring->unaligned)
+ bgmac_dma_rx_enable(bgmac, ring);
- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
- j++, dma_desc++) {
-@@ -531,6 +556,7 @@ static void bgmac_dma_init(struct bgmac
- }
+- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
+- j++, dma_desc++) {
+- ctl0 = ctl1 = 0;
+-
+- if (j == ring->num_slots - 1)
+- ctl0 |= BGMAC_DESC_CTL0_EOT;
+- ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN;
+- /* Is there any BGMAC device that requires extension? */
+- /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) &
+- * B43_DMA64_DCTL1_ADDREXT_MASK;
+- */
+-
+- dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr));
+- dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr));
+- dma_desc->ctl0 = cpu_to_le32(ctl0);
+- dma_desc->ctl1 = cpu_to_le32(ctl1);
+- }
++ for (j = 0; j < ring->num_slots; j++)
++ bgmac_dma_rx_setup_desc(bgmac, ring, j);
bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
+ ring->index_base +
ring->num_slots * sizeof(struct bgmac_dma_desc));
ring->start = 0;
-@@ -909,9 +935,9 @@ static void bgmac_chip_reset(struct bgma
+@@ -909,9 +958,9 @@ static void bgmac_chip_reset(struct bgma
u8 et_swtype = 0;
u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY |
BGMAC_CHIPCTL_1_IF_TYPE_MII;
@@ -175,7 +350,7 @@ patches for bgmac backported from net-next/master
if (kstrtou8(buf, 0, &et_swtype))
bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n",
buf);
-@@ -970,6 +996,8 @@ static void bgmac_chip_reset(struct bgma
+@@ -970,6 +1019,8 @@ static void bgmac_chip_reset(struct bgma
bgmac_miiconfig(bgmac);
bgmac_phy_init(bgmac);