summaryrefslogtreecommitdiffstats
path: root/target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch
diff options
context:
space:
mode:
Diffstat (limited to 'target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch')
-rw-r--r--target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch140
1 files changed, 0 insertions, 140 deletions
diff --git a/target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch b/target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch
deleted file mode 100644
index 4987e5e720..0000000000
--- a/target/linux/bcm53xx/patches-3.10/212-bgmac_implement_unaligned_addressing.patch
+++ /dev/null
@@ -1,140 +0,0 @@
-bgmac: implement unaligned addressing for DMA rings that support it
-
-This is important patch for new devices that support unaligned
-addressing. That devices suffer from the backward-compatibility bug in
-DMA engine. In theory we should be able to use old mechanism, but in
-practice DMA address seems to be randomly copied into status register
-when hardware reaches end of a ring. This breaks reading slot number
-from status register and we can't use DMA anymore.
-
-Signed-off-by: Rafał Miłecki <zajec5@gmail.com>
-
---- a/drivers/net/ethernet/broadcom/bgmac.c
-+++ b/drivers/net/ethernet/broadcom/bgmac.c
-@@ -162,6 +162,7 @@ static netdev_tx_t bgmac_dma_tx_add(stru
- if (++ring->end >= BGMAC_TX_RING_SLOTS)
- ring->end = 0;
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_INDEX,
-+ ring->index_base +
- ring->end * sizeof(struct bgmac_dma_desc));
-
- /* Always keep one slot free to allow detecting bugged calls. */
-@@ -186,6 +187,8 @@ static void bgmac_dma_tx_free(struct bgm
- /* The last slot that hardware didn't consume yet */
- empty_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_TX_STATUS);
- empty_slot &= BGMAC_DMA_TX_STATDPTR;
-+ empty_slot -= ring->index_base;
-+ empty_slot &= BGMAC_DMA_TX_STATDPTR;
- empty_slot /= sizeof(struct bgmac_dma_desc);
-
- while (ring->start != empty_slot) {
-@@ -279,6 +282,8 @@ static int bgmac_dma_rx_read(struct bgma
-
- end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
- end_slot &= BGMAC_DMA_RX_STATDPTR;
-+ end_slot -= ring->index_base;
-+ end_slot &= BGMAC_DMA_RX_STATDPTR;
- end_slot /= sizeof(struct bgmac_dma_desc);
-
- ring->end = end_slot;
-@@ -423,9 +428,6 @@ static int bgmac_dma_alloc(struct bgmac
- ring = &bgmac->tx_ring[i];
- ring->num_slots = BGMAC_TX_RING_SLOTS;
- ring->mmio_base = ring_base[i];
-- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX))
-- bgmac_warn(bgmac, "TX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
-- ring->mmio_base);
-
- /* Alloc ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-@@ -440,6 +442,13 @@ static int bgmac_dma_alloc(struct bgmac
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
-+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
-+ BGMAC_DMA_RING_TX);
-+ if (ring->unaligned)
-+ ring->index_base = lower_32_bits(ring->dma_base);
-+ else
-+ ring->index_base = 0;
-+
- /* No need to alloc TX slots yet */
- }
-
-@@ -449,9 +458,6 @@ static int bgmac_dma_alloc(struct bgmac
- ring = &bgmac->rx_ring[i];
- ring->num_slots = BGMAC_RX_RING_SLOTS;
- ring->mmio_base = ring_base[i];
-- if (bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX))
-- bgmac_warn(bgmac, "RX on ring 0x%X supports unaligned addressing but this feature is not implemented\n",
-- ring->mmio_base);
-
- /* Alloc ring of descriptors */
- size = ring->num_slots * sizeof(struct bgmac_dma_desc);
-@@ -467,6 +473,13 @@ static int bgmac_dma_alloc(struct bgmac
- if (ring->dma_base & 0xC0000000)
- bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n");
-
-+ ring->unaligned = bgmac_dma_unaligned(bgmac, ring,
-+ BGMAC_DMA_RING_RX);
-+ if (ring->unaligned)
-+ ring->index_base = lower_32_bits(ring->dma_base);
-+ else
-+ ring->index_base = 0;
-+
- /* Alloc RX slots */
- for (j = 0; j < ring->num_slots; j++) {
- err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]);
-@@ -494,12 +507,14 @@ static void bgmac_dma_init(struct bgmac
- for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) {
- ring = &bgmac->tx_ring[i];
-
-- /* We don't implement unaligned addressing, so enable first */
-- bgmac_dma_tx_enable(bgmac, ring);
-+ if (!ring->unaligned)
-+ bgmac_dma_tx_enable(bgmac, ring);
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO,
- lower_32_bits(ring->dma_base));
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI,
- upper_32_bits(ring->dma_base));
-+ if (ring->unaligned)
-+ bgmac_dma_tx_enable(bgmac, ring);
-
- ring->start = 0;
- ring->end = 0; /* Points the slot that should *not* be read */
-@@ -510,12 +525,14 @@ static void bgmac_dma_init(struct bgmac
-
- ring = &bgmac->rx_ring[i];
-
-- /* We don't implement unaligned addressing, so enable first */
-- bgmac_dma_rx_enable(bgmac, ring);
-+ if (!ring->unaligned)
-+ bgmac_dma_rx_enable(bgmac, ring);
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO,
- lower_32_bits(ring->dma_base));
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI,
- upper_32_bits(ring->dma_base));
-+ if (ring->unaligned)
-+ bgmac_dma_rx_enable(bgmac, ring);
-
- for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots;
- j++, dma_desc++) {
-@@ -536,6 +553,7 @@ static void bgmac_dma_init(struct bgmac
- }
-
- bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX,
-+ ring->index_base +
- ring->num_slots * sizeof(struct bgmac_dma_desc));
-
- ring->start = 0;
---- a/drivers/net/ethernet/broadcom/bgmac.h
-+++ b/drivers/net/ethernet/broadcom/bgmac.h
-@@ -386,6 +386,8 @@ struct bgmac_dma_ring {
- u16 mmio_base;
- struct bgmac_dma_desc *cpu_base;
- dma_addr_t dma_base;
-+ u32 index_base; /* Used for unaligned rings only, otherwise 0 */
-+ bool unaligned;
-
- struct bgmac_slot_info slots[BGMAC_RX_RING_SLOTS];
- };