aboutsummaryrefslogtreecommitdiffstats
path: root/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c
diff options
context:
space:
mode:
authorFelix Fietkau <nbd@openwrt.org>2016-02-11 15:02:00 +0000
committerFelix Fietkau <nbd@openwrt.org>2016-02-11 15:02:00 +0000
commit46187031e4e4b1cd15c54cc2e421de465d03c0c7 (patch)
treed3a94f33a2fd2cbba51b946829aba7c5dce6b0f6 /target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c
parent17eee004fca7d5cf11d49b12ec288155642071e9 (diff)
downloadmaster-187ad058-46187031e4e4b1cd15c54cc2e421de465d03c0c7.tar.gz
master-187ad058-46187031e4e4b1cd15c54cc2e421de465d03c0c7.tar.bz2
master-187ad058-46187031e4e4b1cd15c54cc2e421de465d03c0c7.zip
ag71xx: store ring size order instead of ring size to avoid div/mod
Signed-off-by: Felix Fietkau <nbd@openwrt.org> git-svn-id: svn://svn.openwrt.org/openwrt/trunk@48693 3c298f89-4303-0410-b956-a3cf2f4a3e73
Diffstat (limited to 'target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c')
-rw-r--r--target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c77
1 files changed, 43 insertions, 34 deletions
diff --git a/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c b/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c
index 2f270c024f..265c089eb6 100644
--- a/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c
+++ b/target/linux/ar71xx/files/drivers/net/ethernet/atheros/ag71xx/ag71xx_main.c
@@ -99,26 +99,20 @@ static inline void ag71xx_dump_intr(struct ag71xx *ag, char *label, u32 intr)
static void ag71xx_ring_free(struct ag71xx_ring *ring)
{
+ int ring_size = BIT(ring->order);
kfree(ring->buf);
if (ring->descs_cpu)
- dma_free_coherent(NULL, ring->size * ring->desc_size,
+ dma_free_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
ring->descs_cpu, ring->descs_dma);
}
static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
{
+ int ring_size = BIT(ring->order);
int err;
- ring->desc_size = sizeof(struct ag71xx_desc);
- if (ring->desc_size % cache_line_size()) {
- DBG("ag71xx: ring %p, desc size %u rounded to %u\n",
- ring, ring->desc_size,
- roundup(ring->desc_size, cache_line_size()));
- ring->desc_size = roundup(ring->desc_size, cache_line_size());
- }
-
- ring->descs_cpu = dma_alloc_coherent(NULL, ring->size * ring->desc_size,
+ ring->descs_cpu = dma_alloc_coherent(NULL, ring_size * AG71XX_DESC_SIZE,
&ring->descs_dma, GFP_ATOMIC);
if (!ring->descs_cpu) {
err = -ENOMEM;
@@ -126,7 +120,7 @@ static int ag71xx_ring_alloc(struct ag71xx_ring *ring)
}
- ring->buf = kzalloc(ring->size * sizeof(*ring->buf), GFP_KERNEL);
+ ring->buf = kzalloc(ring_size * sizeof(*ring->buf), GFP_KERNEL);
if (!ring->buf) {
err = -ENOMEM;
goto err;
@@ -142,11 +136,12 @@ static void ag71xx_ring_tx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct net_device *dev = ag->dev;
+ int ring_mask = BIT(ring->order) - 1;
u32 bytes_compl = 0, pkts_compl = 0;
while (ring->curr != ring->dirty) {
struct ag71xx_desc *desc;
- u32 i = ring->dirty % ring->size;
+ u32 i = ring->dirty & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc)) {
@@ -172,13 +167,15 @@ static void ag71xx_ring_tx_clean(struct ag71xx *ag)
static void ag71xx_ring_tx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = ring_size - 1;
int i;
- for (i = 0; i < ring->size; i++) {
+ for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % ring->size));
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
desc->ctrl = DESC_EMPTY;
ring->buf[i].skb = NULL;
@@ -195,12 +192,13 @@ static void ag71xx_ring_tx_init(struct ag71xx *ag)
static void ag71xx_ring_rx_clean(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
int i;
if (!ring->buf)
return;
- for (i = 0; i < ring->size; i++)
+ for (i = 0; i < ring_size; i++)
if (ring->buf[i].rx_buf) {
dma_unmap_single(&ag->dev->dev, ring->buf[i].dma_addr,
ag->rx_buf_size, DMA_FROM_DEVICE);
@@ -252,22 +250,24 @@ static bool ag71xx_fill_rx_buf(struct ag71xx *ag, struct ag71xx_buf *buf,
static int ag71xx_ring_rx_init(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_size = BIT(ring->order);
+ int ring_mask = BIT(ring->order) - 1;
unsigned int i;
int ret;
int offset = ag71xx_buffer_offset(ag);
ret = 0;
- for (i = 0; i < ring->size; i++) {
+ for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
desc->next = (u32) (ring->descs_dma +
- ring->desc_size * ((i + 1) % ring->size));
+ AG71XX_DESC_SIZE * ((i + 1) & ring_mask));
DBG("ag71xx: RX desc at %p, next is %08x\n",
desc, desc->next);
}
- for (i = 0; i < ring->size; i++) {
+ for (i = 0; i < ring_size; i++) {
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_fill_rx_buf(ag, &ring->buf[i], offset,
@@ -291,6 +291,7 @@ static int ag71xx_ring_rx_init(struct ag71xx *ag)
static int ag71xx_ring_rx_refill(struct ag71xx *ag)
{
struct ag71xx_ring *ring = &ag->rx_ring;
+ int ring_mask = BIT(ring->order) - 1;
unsigned int count;
int offset = ag71xx_buffer_offset(ag);
@@ -299,7 +300,7 @@ static int ag71xx_ring_rx_refill(struct ag71xx *ag)
struct ag71xx_desc *desc;
unsigned int i;
- i = ring->dirty % ring->size;
+ i = ring->dirty & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ring->buf[i].rx_buf &&
@@ -728,6 +729,7 @@ static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
{
int i;
struct ag71xx_desc *desc;
+ int ring_mask = BIT(ring->order) - 1;
int ndesc = 0;
int split = ring->desc_split;
@@ -737,7 +739,7 @@ static int ag71xx_fill_dma_desc(struct ag71xx_ring *ring, u32 addr, int len)
while (len > 0) {
unsigned int cur_len = len;
- i = (ring->curr + ndesc) % ring->size;
+ i = (ring->curr + ndesc) & ring_mask;
desc = ag71xx_ring_desc(ring, i);
if (!ag71xx_desc_empty(desc))
@@ -777,6 +779,8 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
{
struct ag71xx *ag = netdev_priv(dev);
struct ag71xx_ring *ring = &ag->tx_ring;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
struct ag71xx_desc *desc;
dma_addr_t dma_addr;
int i, n, ring_min;
@@ -792,7 +796,7 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
dma_addr = dma_map_single(&dev->dev, skb->data, skb->len,
DMA_TO_DEVICE);
- i = ring->curr % ring->size;
+ i = ring->curr & ring_mask;
desc = ag71xx_ring_desc(ring, i);
/* setup descriptor fields */
@@ -800,7 +804,7 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
if (n < 0)
goto err_drop_unmap;
- i = (ring->curr + n - 1) % ring->size;
+ i = (ring->curr + n - 1) & ring_mask;
ring->buf[i].len = skb->len;
ring->buf[i].skb = skb;
ring->buf[i].timestamp = jiffies;
@@ -817,7 +821,7 @@ static netdev_tx_t ag71xx_hard_start_xmit(struct sk_buff *skb,
if (ring->desc_split)
ring_min *= AG71XX_TX_RING_DS_PER_PKT;
- if (ring->curr - ring->dirty >= ring->size - ring_min) {
+ if (ring->curr - ring->dirty >= ring_size - ring_min) {
DBG("%s: tx queue full\n", dev->name);
netif_stop_queue(dev);
}
@@ -938,6 +942,8 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
{
struct ag71xx_ring *ring = &ag->tx_ring;
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
int sent = 0;
int bytes_compl = 0;
int n = 0;
@@ -945,7 +951,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
DBG("%s: processing TX ring\n", ag->dev->name);
while (ring->dirty + n != ring->curr) {
- unsigned int i = (ring->dirty + n) % ring->size;
+ unsigned int i = (ring->dirty + n) & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
struct sk_buff *skb = ring->buf[i].skb;
@@ -983,7 +989,7 @@ static int ag71xx_tx_packets(struct ag71xx *ag, bool flush)
return 0;
netdev_completed_queue(ag->dev, sent, bytes_compl);
- if ((ring->curr - ring->dirty) < (ring->size * 3) / 4)
+ if ((ring->curr - ring->dirty) < (ring_size * 3) / 4)
netif_wake_queue(ag->dev);
return sent;
@@ -995,6 +1001,8 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
struct ag71xx_ring *ring = &ag->rx_ring;
int offset = ag71xx_buffer_offset(ag);
unsigned int pktlen_mask = ag->desc_pktlen_mask;
+ int ring_mask = BIT(ring->order) - 1;
+ int ring_size = BIT(ring->order);
struct sk_buff_head queue;
struct sk_buff *skb;
int done = 0;
@@ -1005,7 +1013,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
skb_queue_head_init(&queue);
while (done < limit) {
- unsigned int i = ring->curr % ring->size;
+ unsigned int i = ring->curr & ring_mask;
struct ag71xx_desc *desc = ag71xx_ring_desc(ring, i);
int pktlen;
int err = 0;
@@ -1013,7 +1021,7 @@ static int ag71xx_rx_packets(struct ag71xx *ag, int limit)
if (ag71xx_desc_empty(desc))
break;
- if ((ring->dirty + ring->size) == ring->curr) {
+ if ((ring->dirty + ring_size) == ring->curr) {
ag71xx_assert(0);
break;
}
@@ -1075,7 +1083,8 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
struct ag71xx *ag = container_of(napi, struct ag71xx, napi);
struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag);
struct net_device *dev = ag->dev;
- struct ag71xx_ring *rx_ring;
+ struct ag71xx_ring *rx_ring = &ag->rx_ring;
+ int rx_ring_size = BIT(rx_ring->order);
unsigned long flags;
u32 status;
int tx_done;
@@ -1089,8 +1098,7 @@ static int ag71xx_poll(struct napi_struct *napi, int limit)
ag71xx_debugfs_update_napi_stats(ag, rx_done, tx_done);
- rx_ring = &ag->rx_ring;
- if (rx_ring->buf[rx_ring->dirty % rx_ring->size].rx_buf == NULL)
+ if (rx_ring->buf[rx_ring->dirty % rx_ring_size].rx_buf == NULL)
goto oom;
status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS);
@@ -1241,7 +1249,7 @@ static int ag71xx_probe(struct platform_device *pdev)
struct resource *res;
struct ag71xx *ag;
struct ag71xx_platform_data *pdata;
- int err;
+ int tx_size, err;
pdata = pdev->dev.platform_data;
if (!pdata) {
@@ -1308,16 +1316,17 @@ static int ag71xx_probe(struct platform_device *pdev)
ag->oom_timer.data = (unsigned long) dev;
ag->oom_timer.function = ag71xx_oom_timer_handler;
- ag->tx_ring.size = AG71XX_TX_RING_SIZE_DEFAULT;
- ag->rx_ring.size = AG71XX_RX_RING_SIZE_DEFAULT;
+ tx_size = AG71XX_TX_RING_SIZE_DEFAULT;
+ ag->rx_ring.order = ag71xx_ring_size_order(AG71XX_RX_RING_SIZE_DEFAULT);
ag->max_frame_len = pdata->max_frame_len;
ag->desc_pktlen_mask = pdata->desc_pktlen_mask;
if (!pdata->is_ar724x && !pdata->is_ar91xx) {
ag->tx_ring.desc_split = AG71XX_TX_RING_SPLIT;
- ag->tx_ring.size *= AG71XX_TX_RING_DS_PER_PKT;
+ tx_size *= AG71XX_TX_RING_DS_PER_PKT;
}
+ ag->tx_ring.order = ag71xx_ring_size_order(tx_size);
ag->stop_desc = dma_alloc_coherent(NULL,
sizeof(struct ag71xx_desc), &ag->stop_desc_dma, GFP_KERNEL);