From a7afeb31421bd6810c903468b23734baa6999438 Mon Sep 17 00:00:00 2001 From: Adrian Schmutzler Date: Mon, 19 Oct 2020 15:30:44 +0200 Subject: ipq40xx: remove support for kernel 4.19 The target uses 5.4 as default kernel since 03/2020. Kernel 4.19 support is not really maintained anymore, it does not seem to be needed, and removing it will make upcoming driver updates easier. Thus, remove it. Signed-off-by: Adrian Schmutzler --- .../drivers/net/ethernet/qualcomm/essedma/Makefile | 9 + .../drivers/net/ethernet/qualcomm/essedma/edma.c | 2173 ++++++++++++++++++++ .../drivers/net/ethernet/qualcomm/essedma/edma.h | 455 ++++ .../net/ethernet/qualcomm/essedma/edma_axi.c | 1359 ++++++++++++ .../net/ethernet/qualcomm/essedma/edma_ethtool.c | 384 ++++ .../net/ethernet/qualcomm/essedma/ess_edma.h | 389 ++++ .../linux/ipq40xx/files/drivers/net/phy/ar40xx.c | 2118 +++++++++++++++++++ .../linux/ipq40xx/files/drivers/net/phy/ar40xx.h | 342 +++ 8 files changed, 7229 insertions(+) create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/Makefile create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.c create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.h create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_axi.c create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c create mode 100644 target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/ess_edma.h create mode 100644 target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c create mode 100644 target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h (limited to 'target/linux/ipq40xx/files/drivers') diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/Makefile b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/Makefile new file mode 100644 index 0000000000..4e6cd6505e --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/Makefile @@ -0,0 +1,9 @@ +# +## Makefile for the Qualcomm Atheros ethernet edma driver +# + + +obj-$(CONFIG_ESSEDMA) += essedma.o + +essedma-objs := edma_axi.o edma.o edma_ethtool.o + diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.c b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.c new file mode 100644 index 0000000000..5f2630ae50 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.c @@ -0,0 +1,2173 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include "ess_edma.h" +#include "edma.h" + +extern struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; +bool edma_stp_rstp; +u16 edma_ath_eth_type; + +/* edma_skb_priority_offset() + * get edma skb priority + */ +static unsigned int edma_skb_priority_offset(struct sk_buff *skb) +{ + return (skb->priority >> 2) & 1; +} + +/* edma_alloc_tx_ring() + * Allocate Tx descriptors ring + */ +static int edma_alloc_tx_ring(struct edma_common_info *edma_cinfo, + struct edma_tx_desc_ring *etdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + /* Initialize ring */ + etdr->size = sizeof(struct edma_sw_desc) * etdr->count; + etdr->sw_next_to_fill = 0; + etdr->sw_next_to_clean = 0; + + /* Allocate SW descriptors */ + etdr->sw_desc = vzalloc(etdr->size); + if (!etdr->sw_desc) { + dev_err(&pdev->dev, "buffer alloc of tx ring failed=%p", etdr); + return -ENOMEM; + } + + /* Allocate HW descriptors */ + etdr->hw_desc = dma_alloc_coherent(&pdev->dev, etdr->size, &etdr->dma, + GFP_KERNEL); + if (!etdr->hw_desc) { + dev_err(&pdev->dev, "descriptor allocation for tx ring failed"); + vfree(etdr->sw_desc); + return -ENOMEM; + } + + return 0; +} + +/* edma_free_tx_ring() + * Free tx rings allocated by edma_alloc_tx_rings + */ +static void edma_free_tx_ring(struct edma_common_info *edma_cinfo, + struct edma_tx_desc_ring *etdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + if (likely(etdr->dma)) + dma_free_coherent(&pdev->dev, etdr->size, etdr->hw_desc, + etdr->dma); + + vfree(etdr->sw_desc); + etdr->sw_desc = NULL; +} + +/* edma_alloc_rx_ring() + * allocate rx descriptor ring + */ +static int edma_alloc_rx_ring(struct edma_common_info *edma_cinfo, + struct edma_rfd_desc_ring *erxd) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + erxd->size = sizeof(struct edma_sw_desc) * erxd->count; + erxd->sw_next_to_fill = 0; + erxd->sw_next_to_clean = 0; + + /* Allocate SW descriptors */ + erxd->sw_desc = vzalloc(erxd->size); + if (!erxd->sw_desc) + return -ENOMEM; + + /* Alloc HW descriptors */ + erxd->hw_desc = dma_alloc_coherent(&pdev->dev, erxd->size, &erxd->dma, + GFP_KERNEL); + if (!erxd->hw_desc) { + vfree(erxd->sw_desc); + return -ENOMEM; + } + + /* Initialize pending_fill */ + erxd->pending_fill = 0; + + return 0; +} + +/* edma_free_rx_ring() + * Free rx ring allocated by alloc_rx_ring + */ +static void edma_free_rx_ring(struct edma_common_info *edma_cinfo, + struct edma_rfd_desc_ring *rxdr) +{ + struct platform_device *pdev = edma_cinfo->pdev; + + if (likely(rxdr->dma)) + dma_free_coherent(&pdev->dev, rxdr->size, rxdr->hw_desc, + rxdr->dma); + + vfree(rxdr->sw_desc); + rxdr->sw_desc = NULL; +} + +/* edma_configure_tx() + * Configure transmission control data + */ +static void edma_configure_tx(struct edma_common_info *edma_cinfo) +{ + u32 txq_ctrl_data; + + txq_ctrl_data = (EDMA_TPD_BURST << EDMA_TXQ_NUM_TPD_BURST_SHIFT); + txq_ctrl_data |= EDMA_TXQ_CTRL_TPD_BURST_EN; + txq_ctrl_data |= (EDMA_TXF_BURST << EDMA_TXQ_TXF_BURST_NUM_SHIFT); + edma_write_reg(EDMA_REG_TXQ_CTRL, txq_ctrl_data); +} + + +/* edma_configure_rx() + * configure reception control data + */ +static void edma_configure_rx(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + u32 rss_type, rx_desc1, rxq_ctrl_data; + + /* Set RSS type */ + rss_type = hw->rss_type; + edma_write_reg(EDMA_REG_RSS_TYPE, rss_type); + + /* Set RFD burst number */ + rx_desc1 = (EDMA_RFD_BURST << EDMA_RXQ_RFD_BURST_NUM_SHIFT); + + /* Set RFD prefetch threshold */ + rx_desc1 |= (EDMA_RFD_THR << EDMA_RXQ_RFD_PF_THRESH_SHIFT); + + /* Set RFD in host ring low threshold to generte interrupt */ + rx_desc1 |= (EDMA_RFD_LTHR << EDMA_RXQ_RFD_LOW_THRESH_SHIFT); + edma_write_reg(EDMA_REG_RX_DESC1, rx_desc1); + + /* Set Rx FIFO threshold to start to DMA data to host */ + rxq_ctrl_data = EDMA_FIFO_THRESH_128_BYTE; + + /* Set RX remove vlan bit */ + rxq_ctrl_data |= EDMA_RXQ_CTRL_RMV_VLAN; + + edma_write_reg(EDMA_REG_RXQ_CTRL, rxq_ctrl_data); +} + +/* edma_alloc_rx_buf() + * does skb allocation for the received packets. + */ +static int edma_alloc_rx_buf(struct edma_common_info + *edma_cinfo, + struct edma_rfd_desc_ring *erdr, + int cleaned_count, int queue_id) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_rx_free_desc *rx_desc; + struct edma_sw_desc *sw_desc; + struct sk_buff *skb; + unsigned int i; + u16 prod_idx, length; + u32 reg_data; + + if (cleaned_count > erdr->count) + cleaned_count = erdr->count - 1; + + i = erdr->sw_next_to_fill; + + while (cleaned_count) { + sw_desc = &erdr->sw_desc[i]; + length = edma_cinfo->rx_head_buffer_len; + + if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_REUSE) { + skb = sw_desc->skb; + + /* Clear REUSE Flag */ + sw_desc->flags &= ~EDMA_SW_DESC_FLAG_SKB_REUSE; + } else { + /* alloc skb */ + skb = netdev_alloc_skb_ip_align(edma_netdev[0], length); + if (!skb) { + /* Better luck next round */ + break; + } + } + + if (edma_cinfo->page_mode) { + struct page *pg = alloc_page(GFP_ATOMIC); + + if (!pg) { + dev_kfree_skb_any(skb); + break; + } + + sw_desc->dma = dma_map_page(&pdev->dev, pg, 0, + edma_cinfo->rx_page_buffer_len, + DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + sw_desc->dma)) { + __free_page(pg); + dev_kfree_skb_any(skb); + break; + } + + skb_fill_page_desc(skb, 0, pg, 0, + edma_cinfo->rx_page_buffer_len); + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_FRAG; + sw_desc->length = edma_cinfo->rx_page_buffer_len; + } else { + sw_desc->dma = dma_map_single(&pdev->dev, skb->data, + length, DMA_FROM_DEVICE); + if (dma_mapping_error(&pdev->dev, + sw_desc->dma)) { + dev_kfree_skb_any(skb); + break; + } + + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_HEAD; + sw_desc->length = length; + } + + /* Update the buffer info */ + sw_desc->skb = skb; + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[i]); + rx_desc->buffer_addr = cpu_to_le64(sw_desc->dma); + if (++i == erdr->count) + i = 0; + cleaned_count--; + } + + erdr->sw_next_to_fill = i; + + if (i == 0) + prod_idx = erdr->count - 1; + else + prod_idx = i - 1; + + /* Update the producer index */ + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), ®_data); + reg_data &= ~EDMA_RFD_PROD_IDX_BITS; + reg_data |= prod_idx; + edma_write_reg(EDMA_REG_RFD_IDX_Q(queue_id), reg_data); + + /* If we couldn't allocate all the buffers + * we increment the alloc failure counters + */ + if (cleaned_count) + edma_cinfo->edma_ethstats.rx_alloc_fail_ctr++; + + return cleaned_count; +} + +/* edma_init_desc() + * update descriptor ring size, buffer and producer/consumer index + */ +static void edma_init_desc(struct edma_common_info *edma_cinfo) +{ + struct edma_rfd_desc_ring *rfd_ring; + struct edma_tx_desc_ring *etdr; + int i = 0, j = 0; + u32 data = 0; + u16 hw_cons_idx = 0; + + /* Set the base address of every TPD ring. */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + etdr = edma_cinfo->tpd_ring[i]; + + /* Update descriptor ring base address */ + edma_write_reg(EDMA_REG_TPD_BASE_ADDR_Q(i), (u32)etdr->dma); + edma_read_reg(EDMA_REG_TPD_IDX_Q(i), &data); + + /* Calculate hardware consumer index */ + hw_cons_idx = (data >> EDMA_TPD_CONS_IDX_SHIFT) & 0xffff; + etdr->sw_next_to_fill = hw_cons_idx; + etdr->sw_next_to_clean = hw_cons_idx; + data &= ~(EDMA_TPD_PROD_IDX_MASK << EDMA_TPD_PROD_IDX_SHIFT); + data |= hw_cons_idx; + + /* update producer index */ + edma_write_reg(EDMA_REG_TPD_IDX_Q(i), data); + + /* update SW consumer index register */ + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(i), hw_cons_idx); + + /* Set TPD ring size */ + edma_write_reg(EDMA_REG_TPD_RING_SIZE, + edma_cinfo->tx_ring_count & + EDMA_TPD_RING_SIZE_MASK); + } + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + rfd_ring = edma_cinfo->rfd_ring[j]; + /* Update Receive Free descriptor ring base address */ + edma_write_reg(EDMA_REG_RFD_BASE_ADDR_Q(j), + (u32)(rfd_ring->dma)); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + data = edma_cinfo->rx_head_buffer_len; + if (edma_cinfo->page_mode) + data = edma_cinfo->rx_page_buffer_len; + + data &= EDMA_RX_BUF_SIZE_MASK; + data <<= EDMA_RX_BUF_SIZE_SHIFT; + + /* Update RFD ring size and RX buffer size */ + data |= (edma_cinfo->rx_ring_count & EDMA_RFD_RING_SIZE_MASK) + << EDMA_RFD_RING_SIZE_SHIFT; + + edma_write_reg(EDMA_REG_RX_DESC0, data); + + /* Disable TX FIFO low watermark and high watermark */ + edma_write_reg(EDMA_REG_TXF_WATER_MARK, 0); + + /* Load all of base address above */ + edma_read_reg(EDMA_REG_TX_SRAM_PART, &data); + data |= 1 << EDMA_LOAD_PTR_SHIFT; + edma_write_reg(EDMA_REG_TX_SRAM_PART, data); +} + +/* edma_receive_checksum + * Api to check checksum on receive packets + */ +static void edma_receive_checksum(struct edma_rx_return_desc *rd, + struct sk_buff *skb) +{ + skb_checksum_none_assert(skb); + + /* check the RRD IP/L4 checksum bit to see if + * its set, which in turn indicates checksum + * failure. + */ + if (rd->rrd6 & EDMA_RRD_CSUM_FAIL_MASK) + return; + + skb->ip_summed = CHECKSUM_UNNECESSARY; +} + +/* edma_clean_rfd() + * clean up rx resourcers on error + */ +static void edma_clean_rfd(struct edma_rfd_desc_ring *erdr, u16 index) +{ + struct edma_rx_free_desc *rx_desc; + struct edma_sw_desc *sw_desc; + + rx_desc = (&((struct edma_rx_free_desc *)(erdr->hw_desc))[index]); + sw_desc = &erdr->sw_desc[index]; + if (sw_desc->skb) { + dev_kfree_skb_any(sw_desc->skb); + sw_desc->skb = NULL; + } + + memset(rx_desc, 0, sizeof(struct edma_rx_free_desc)); +} + +/* edma_rx_complete_fraglist() + * Complete Rx processing for fraglist skbs + */ +static void edma_rx_complete_stp_rstp(struct sk_buff *skb, int port_id, struct edma_rx_return_desc *rd) +{ + int i; + u32 priority; + u16 port_type; + u8 mac_addr[EDMA_ETH_HDR_LEN]; + + port_type = (rd->rrd1 >> EDMA_RRD_PORT_TYPE_SHIFT) + & EDMA_RRD_PORT_TYPE_MASK; + /* if port type is 0x4, then only proceed with + * other stp/rstp calculation + */ + if (port_type == EDMA_RX_ATH_HDR_RSTP_PORT_TYPE) { + u8 bpdu_mac[6] = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x00}; + + /* calculate the frame priority */ + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) + & EDMA_RRD_PRIORITY_MASK; + + for (i = 0; i < EDMA_ETH_HDR_LEN; i++) + mac_addr[i] = skb->data[i]; + + /* Check if destination mac addr is bpdu addr */ + if (!memcmp(mac_addr, bpdu_mac, 6)) { + /* destination mac address is BPDU + * destination mac address, then add + * atheros header to the packet. + */ + u16 athr_hdr = (EDMA_RX_ATH_HDR_VERSION << EDMA_RX_ATH_HDR_VERSION_SHIFT) | + (priority << EDMA_RX_ATH_HDR_PRIORITY_SHIFT) | + (EDMA_RX_ATH_HDR_RSTP_PORT_TYPE << EDMA_RX_ATH_PORT_TYPE_SHIFT) | port_id; + skb_push(skb, 4); + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); + *(uint16_t *)&skb->data[12] = htons(edma_ath_eth_type); + *(uint16_t *)&skb->data[14] = htons(athr_hdr); + } + } +} + +/* + * edma_rx_complete_fraglist() + * Complete Rx processing for fraglist skbs + */ +static int edma_rx_complete_fraglist(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_hw *hw = &edma_cinfo->hw; + struct sk_buff *skb_temp; + struct edma_sw_desc *sw_desc; + int i; + u16 size_remaining; + + skb->data_len = 0; + skb->tail += (hw->rx_head_buff_size - 16); + skb->len = skb->truesize = length; + size_remaining = length - (hw->rx_head_buff_size - 16); + + /* clean-up all related sw_descs */ + for (i = 1; i < num_rfds; i++) { + struct sk_buff *skb_prev; + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb_temp = sw_desc->skb; + + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + if (size_remaining < hw->rx_head_buff_size) + skb_put(skb_temp, size_remaining); + else + skb_put(skb_temp, hw->rx_head_buff_size); + + /* + * If we are processing the first rfd, we link + * skb->frag_list to the skb corresponding to the + * first RFD + */ + if (i == 1) + skb_shinfo(skb)->frag_list = skb_temp; + else + skb_prev->next = skb_temp; + skb_prev = skb_temp; + skb_temp->next = NULL; + + skb->data_len += skb_temp->len; + size_remaining -= skb_temp->len; + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + (*cleaned_count)++; + } + + return sw_next_to_clean; +} + +/* edma_rx_complete_paged() + * Complete Rx processing for paged skbs + */ +static int edma_rx_complete_paged(struct sk_buff *skb, u16 num_rfds, u16 length, u32 sw_next_to_clean, + u16 *cleaned_count, struct edma_rfd_desc_ring *erdr, struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct sk_buff *skb_temp; + struct edma_sw_desc *sw_desc; + int i; + u16 size_remaining; + + skb_frag_t *frag = &skb_shinfo(skb)->frags[0]; + + /* Setup skbuff fields */ + skb->len = length; + + if (likely(num_rfds <= 1)) { + skb->data_len = length; + skb->truesize += edma_cinfo->rx_page_buffer_len; + skb_fill_page_desc(skb, 0, skb_frag_page(frag), + 16, length); + } else { + skb_frag_size_sub(frag, 16); + skb->data_len = skb_frag_size(frag); + skb->truesize += edma_cinfo->rx_page_buffer_len; + size_remaining = length - skb_frag_size(frag); + + skb_fill_page_desc(skb, 0, skb_frag_page(frag), + 16, skb_frag_size(frag)); + + /* clean-up all related sw_descs */ + for (i = 1; i < num_rfds; i++) { + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb_temp = sw_desc->skb; + frag = &skb_shinfo(skb_temp)->frags[0]; + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + if (size_remaining < edma_cinfo->rx_page_buffer_len) + skb_frag_size_set(frag, size_remaining); + + skb_fill_page_desc(skb, i, skb_frag_page(frag), + 0, skb_frag_size(frag)); + + skb_shinfo(skb_temp)->nr_frags = 0; + dev_kfree_skb_any(skb_temp); + + skb->data_len += skb_frag_size(frag); + skb->truesize += edma_cinfo->rx_page_buffer_len; + size_remaining -= skb_frag_size(frag); + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + (*cleaned_count)++; + } + } + + return sw_next_to_clean; +} + +/* + * edma_rx_complete() + * Main api called from the poll function to process rx packets. + */ +static u16 edma_rx_complete(struct edma_common_info *edma_cinfo, + int *work_done, int work_to_do, int queue_id, + struct napi_struct *napi) +{ + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_rfd_desc_ring *erdr = edma_cinfo->rfd_ring[queue_id]; + struct net_device *netdev; + struct edma_adapter *adapter; + struct edma_sw_desc *sw_desc; + struct sk_buff *skb; + struct edma_rx_return_desc *rd; + u16 hash_type, rrd[8], cleaned_count = 0, length = 0, num_rfds = 1, + sw_next_to_clean, hw_next_to_clean = 0, vlan = 0, ret_count = 0; + u32 data = 0; + u8 *vaddr; + int port_id, i, drop_count = 0; + u32 priority; + u16 count = erdr->count, rfd_avail; + u8 queue_to_rxid[8] = {0, 0, 1, 1, 2, 2, 3, 3}; + + cleaned_count = erdr->pending_fill; + sw_next_to_clean = erdr->sw_next_to_clean; + + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & + EDMA_RFD_CONS_IDX_MASK; + + do { + while (sw_next_to_clean != hw_next_to_clean) { + if (!work_to_do) + break; + + sw_desc = &erdr->sw_desc[sw_next_to_clean]; + skb = sw_desc->skb; + + /* Unmap the allocated buffer */ + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + else + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + + /* Get RRD */ + if (edma_cinfo->page_mode) { + vaddr = kmap_atomic(skb_frag_page(&skb_shinfo(skb)->frags[0])); + memcpy((uint8_t *)&rrd[0], vaddr, 16); + rd = (struct edma_rx_return_desc *)rrd; + kunmap_atomic(vaddr); + } else { + rd = (struct edma_rx_return_desc *)skb->data; + } + + /* Check if RRD is valid */ + if (!(rd->rrd7 & EDMA_RRD_DESC_VALID)) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + cleaned_count++; + continue; + } + + /* Get the number of RFDs from RRD */ + num_rfds = rd->rrd1 & EDMA_RRD_NUM_RFD_MASK; + + /* Get Rx port ID from switch */ + port_id = (rd->rrd1 >> EDMA_PORT_ID_SHIFT) & EDMA_PORT_ID_MASK; + if ((!port_id) || (port_id > EDMA_MAX_PORTID_SUPPORTED)) { + dev_err(&pdev->dev, "Invalid RRD source port bit set"); + for (i = 0; i < num_rfds; i++) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + cleaned_count++; + } + continue; + } + + /* check if we have a sink for the data we receive. + * If the interface isn't setup, we have to drop the + * incoming data for now. + */ + netdev = edma_cinfo->portid_netdev_lookup_tbl[port_id]; + if (!netdev) { + edma_clean_rfd(erdr, sw_next_to_clean); + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + cleaned_count++; + continue; + } + adapter = netdev_priv(netdev); + + /* This code is added to handle a usecase where high + * priority stream and a low priority stream are + * received simultaneously on DUT. The problem occurs + * if one of the Rx rings is full and the corresponding + * core is busy with other stuff. This causes ESS CPU + * port to backpressure all incoming traffic including + * high priority one. We monitor free descriptor count + * on each CPU and whenever it reaches threshold (< 80), + * we drop all low priority traffic and let only high + * priotiy traffic pass through. We can hence avoid + * ESS CPU port to send backpressure on high priroity + * stream. + */ + priority = (rd->rrd1 >> EDMA_RRD_PRIORITY_SHIFT) + & EDMA_RRD_PRIORITY_MASK; + if (likely(!priority && !edma_cinfo->page_mode && (num_rfds <= 1))) { + rfd_avail = (count + sw_next_to_clean - hw_next_to_clean - 1) & (count - 1); + if (rfd_avail < EDMA_RFD_AVAIL_THR) { + sw_desc->flags = EDMA_SW_DESC_FLAG_SKB_REUSE; + sw_next_to_clean = (sw_next_to_clean + 1) & (erdr->count - 1); + adapter->stats.rx_dropped++; + cleaned_count++; + drop_count++; + if (drop_count == 3) { + work_to_do--; + (*work_done)++; + drop_count = 0; + } + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) { + /* If buffer clean count reaches 16, we replenish HW buffers. */ + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + sw_next_to_clean); + cleaned_count = ret_count; + erdr->pending_fill = ret_count; + } + continue; + } + } + + work_to_do--; + (*work_done)++; + + /* Increment SW index */ + sw_next_to_clean = (sw_next_to_clean + 1) & + (erdr->count - 1); + + cleaned_count++; + + /* Get the packet size and allocate buffer */ + length = rd->rrd6 & EDMA_RRD_PKT_SIZE_MASK; + + if (edma_cinfo->page_mode) { + /* paged skb */ + sw_next_to_clean = edma_rx_complete_paged(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); + if (!pskb_may_pull(skb, ETH_HLEN)) { + dev_kfree_skb_any(skb); + continue; + } + } else { + /* single or fraglist skb */ + + /* Addition of 16 bytes is required, as in the packet + * first 16 bytes are rrd descriptors, so actual data + * starts from an offset of 16. + */ + skb_reserve(skb, 16); + if (likely((num_rfds <= 1) || !edma_cinfo->fraglist_mode)) { + skb_put(skb, length); + } else { + sw_next_to_clean = edma_rx_complete_fraglist(skb, num_rfds, length, sw_next_to_clean, &cleaned_count, erdr, edma_cinfo); + } + } + + if (edma_stp_rstp) { + edma_rx_complete_stp_rstp(skb, port_id, rd); + } + + skb->protocol = eth_type_trans(skb, netdev); + + /* Record Rx queue for RFS/RPS and fill flow hash from HW */ + skb_record_rx_queue(skb, queue_to_rxid[queue_id]); + if (netdev->features & NETIF_F_RXHASH) { + hash_type = (rd->rrd5 >> EDMA_HASH_TYPE_SHIFT); + if ((hash_type > EDMA_HASH_TYPE_START) && (hash_type < EDMA_HASH_TYPE_END)) + skb_set_hash(skb, rd->rrd2, PKT_HASH_TYPE_L4); + } + +#ifdef CONFIG_NF_FLOW_COOKIE + skb->flow_cookie = rd->rrd3 & EDMA_RRD_FLOW_COOKIE_MASK; +#endif + edma_receive_checksum(rd, skb); + + /* Process VLAN HW acceleration indication provided by HW */ + if (unlikely(adapter->default_vlan_tag != rd->rrd4)) { + vlan = rd->rrd4; + if (likely(rd->rrd7 & EDMA_RRD_CVLAN)) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vlan); + else if (rd->rrd1 & EDMA_RRD_SVLAN) + __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021AD), vlan); + } + + /* Update rx statistics */ + adapter->stats.rx_packets++; + adapter->stats.rx_bytes += length; + + /* Check if we reached refill threshold */ + if (cleaned_count >= EDMA_RX_BUFFER_WRITE) { + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + sw_next_to_clean); + cleaned_count = ret_count; + erdr->pending_fill = ret_count; + } + + /* At this point skb should go to stack */ + napi_gro_receive(napi, skb); + } + + /* Check if we still have NAPI budget */ + if (!work_to_do) + break; + + /* Read index once again since we still have NAPI budget */ + edma_read_reg(EDMA_REG_RFD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_RFD_CONS_IDX_SHIFT) & + EDMA_RFD_CONS_IDX_MASK; + } while (hw_next_to_clean != sw_next_to_clean); + + erdr->sw_next_to_clean = sw_next_to_clean; + + /* Refill here in case refill threshold wasn't reached */ + if (likely(cleaned_count)) { + ret_count = edma_alloc_rx_buf(edma_cinfo, erdr, cleaned_count, queue_id); + erdr->pending_fill = ret_count; + if (ret_count) { + if (net_ratelimit()) + dev_dbg(&pdev->dev, "Not all buffers was reallocated"); + } + + edma_write_reg(EDMA_REG_RX_SW_CONS_IDX_Q(queue_id), + erdr->sw_next_to_clean); + } + + return erdr->pending_fill; +} + +/* edma_delete_rfs_filter() + * Remove RFS filter from switch + */ +static int edma_delete_rfs_filter(struct edma_adapter *adapter, + struct edma_rfs_filter_node *filter_node) +{ + int res = -1; + + struct flow_keys *keys = &filter_node->keys; + + if (likely(adapter->set_rfs_rule)) + res = (*adapter->set_rfs_rule)(adapter->netdev, + flow_get_u32_src(keys), flow_get_u32_dst(keys), + keys->ports.src, keys->ports.dst, + keys->basic.ip_proto, filter_node->rq_id, 0); + + return res; +} + +/* edma_add_rfs_filter() + * Add RFS filter to switch + */ +static int edma_add_rfs_filter(struct edma_adapter *adapter, + struct flow_keys *keys, u16 rq, + struct edma_rfs_filter_node *filter_node) +{ + int res = -1; + + struct flow_keys *dest_keys = &filter_node->keys; + + memcpy(dest_keys, &filter_node->keys, sizeof(*dest_keys)); +/* + dest_keys->control = keys->control; + dest_keys->basic = keys->basic; + dest_keys->addrs = keys->addrs; + dest_keys->ports = keys->ports; + dest_keys.ip_proto = keys->ip_proto; +*/ + /* Call callback registered by ESS driver */ + if (likely(adapter->set_rfs_rule)) + res = (*adapter->set_rfs_rule)(adapter->netdev, flow_get_u32_src(keys), + flow_get_u32_dst(keys), keys->ports.src, keys->ports.dst, + keys->basic.ip_proto, rq, 1); + + return res; +} + +/* edma_rfs_key_search() + * Look for existing RFS entry + */ +static struct edma_rfs_filter_node *edma_rfs_key_search(struct hlist_head *h, + struct flow_keys *key) +{ + struct edma_rfs_filter_node *p; + + hlist_for_each_entry(p, h, node) + if (flow_get_u32_src(&p->keys) == flow_get_u32_src(key) && + flow_get_u32_dst(&p->keys) == flow_get_u32_dst(key) && + p->keys.ports.src == key->ports.src && + p->keys.ports.dst == key->ports.dst && + p->keys.basic.ip_proto == key->basic.ip_proto) + return p; + return NULL; +} + +/* edma_initialise_rfs_flow_table() + * Initialise EDMA RFS flow table + */ +static void edma_initialise_rfs_flow_table(struct edma_adapter *adapter) +{ + int i; + + spin_lock_init(&adapter->rfs.rfs_ftab_lock); + + /* Initialize EDMA flow hash table */ + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) + INIT_HLIST_HEAD(&adapter->rfs.hlist_head[i]); + + adapter->rfs.max_num_filter = EDMA_RFS_FLOW_ENTRIES; + adapter->rfs.filter_available = adapter->rfs.max_num_filter; + adapter->rfs.hashtoclean = 0; + + /* Add timer to get periodic RFS updates from OS */ + timer_setup(&adapter->rfs.expire_rfs, edma_flow_may_expire, 0); + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); +} + +/* edma_free_rfs_flow_table() + * Free EDMA RFS flow table + */ +static void edma_free_rfs_flow_table(struct edma_adapter *adapter) +{ + int i; + + /* Remove sync timer */ + del_timer_sync(&adapter->rfs.expire_rfs); + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + + /* Free EDMA RFS table entries */ + adapter->rfs.filter_available = 0; + + /* Clean-up EDMA flow hash table */ + for (i = 0; i < EDMA_RFS_FLOW_ENTRIES; i++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct edma_rfs_filter_node *filter_node; + int res; + + hhead = &adapter->rfs.hlist_head[i]; + hlist_for_each_entry_safe(filter_node, tmp, hhead, node) { + res = edma_delete_rfs_filter(adapter, filter_node); + if (res < 0) + dev_warn(&adapter->netdev->dev, + "EDMA going down but RFS entry %d not allowed to be flushed by Switch", + filter_node->flow_id); + hlist_del(&filter_node->node); + kfree(filter_node); + } + } + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); +} + +/* edma_tx_unmap_and_free() + * clean TX buffer + */ +static inline void edma_tx_unmap_and_free(struct platform_device *pdev, + struct edma_sw_desc *sw_desc) +{ + struct sk_buff *skb = sw_desc->skb; + + if (likely((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD) || + (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAGLIST))) + /* unmap_single for skb head area */ + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_TO_DEVICE); + else if (sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG) + /* unmap page for paged fragments */ + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_TO_DEVICE); + + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_LAST)) + dev_kfree_skb_any(skb); + + sw_desc->flags = 0; +} + +/* edma_tx_complete() + * Used to clean tx queues and update hardware and consumer index + */ +static void edma_tx_complete(struct edma_common_info *edma_cinfo, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i; + + u16 sw_next_to_clean = etdr->sw_next_to_clean; + u16 hw_next_to_clean; + u32 data = 0; + + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &data); + hw_next_to_clean = (data >> EDMA_TPD_CONS_IDX_SHIFT) & EDMA_TPD_CONS_IDX_MASK; + + /* clean the buffer here */ + while (sw_next_to_clean != hw_next_to_clean) { + sw_desc = &etdr->sw_desc[sw_next_to_clean]; + edma_tx_unmap_and_free(pdev, sw_desc); + sw_next_to_clean = (sw_next_to_clean + 1) & (etdr->count - 1); + } + + etdr->sw_next_to_clean = sw_next_to_clean; + + /* update the TPD consumer index register */ + edma_write_reg(EDMA_REG_TX_SW_CONS_IDX_Q(queue_id), sw_next_to_clean); + + /* Wake the queue if queue is stopped and netdev link is up */ + for (i = 0; i < EDMA_MAX_NETDEV_PER_QUEUE && etdr->nq[i] ; i++) { + if (netif_tx_queue_stopped(etdr->nq[i])) { + if ((etdr->netdev[i]) && netif_carrier_ok(etdr->netdev[i])) + netif_tx_wake_queue(etdr->nq[i]); + } + } +} + +/* edma_get_tx_buffer() + * Get sw_desc corresponding to the TPD + */ +static struct edma_sw_desc *edma_get_tx_buffer(struct edma_common_info *edma_cinfo, + struct edma_tx_desc *tpd, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + return &etdr->sw_desc[tpd - (struct edma_tx_desc *)etdr->hw_desc]; +} + +/* edma_get_next_tpd() + * Return a TPD descriptor for transfer + */ +static struct edma_tx_desc *edma_get_next_tpd(struct edma_common_info *edma_cinfo, + int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + u16 sw_next_to_fill = etdr->sw_next_to_fill; + struct edma_tx_desc *tpd_desc = + (&((struct edma_tx_desc *)(etdr->hw_desc))[sw_next_to_fill]); + + etdr->sw_next_to_fill = (etdr->sw_next_to_fill + 1) & (etdr->count - 1); + + return tpd_desc; +} + +/* edma_tpd_available() + * Check number of free TPDs + */ +static inline u16 edma_tpd_available(struct edma_common_info *edma_cinfo, + int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + + u16 sw_next_to_fill; + u16 sw_next_to_clean; + u16 count = 0; + + sw_next_to_clean = etdr->sw_next_to_clean; + sw_next_to_fill = etdr->sw_next_to_fill; + + if (likely(sw_next_to_clean <= sw_next_to_fill)) + count = etdr->count; + + return count + sw_next_to_clean - sw_next_to_fill - 1; +} + +/* edma_tx_queue_get() + * Get the starting number of the queue + */ +static inline int edma_tx_queue_get(struct edma_adapter *adapter, + struct sk_buff *skb, int txq_id) +{ + /* skb->priority is used as an index to skb priority table + * and based on packet priority, correspong queue is assigned. + */ + return adapter->tx_start_offset[txq_id] + edma_skb_priority_offset(skb); +} + +/* edma_tx_update_hw_idx() + * update the producer index for the ring transmitted + */ +static void edma_tx_update_hw_idx(struct edma_common_info *edma_cinfo, + struct sk_buff *skb, int queue_id) +{ + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[queue_id]; + u32 tpd_idx_data; + + /* Read and update the producer index */ + edma_read_reg(EDMA_REG_TPD_IDX_Q(queue_id), &tpd_idx_data); + tpd_idx_data &= ~EDMA_TPD_PROD_IDX_BITS; + tpd_idx_data |= (etdr->sw_next_to_fill & EDMA_TPD_PROD_IDX_MASK) + << EDMA_TPD_PROD_IDX_SHIFT; + + edma_write_reg(EDMA_REG_TPD_IDX_Q(queue_id), tpd_idx_data); +} + +/* edma_rollback_tx() + * Function to retrieve tx resources in case of error + */ +static void edma_rollback_tx(struct edma_adapter *adapter, + struct edma_tx_desc *start_tpd, int queue_id) +{ + struct edma_tx_desc_ring *etdr = adapter->edma_cinfo->tpd_ring[queue_id]; + struct edma_sw_desc *sw_desc; + struct edma_tx_desc *tpd = NULL; + u16 start_index, index; + + start_index = start_tpd - (struct edma_tx_desc *)(etdr->hw_desc); + + index = start_index; + while (index != etdr->sw_next_to_fill) { + tpd = (&((struct edma_tx_desc *)(etdr->hw_desc))[index]); + sw_desc = &etdr->sw_desc[index]; + edma_tx_unmap_and_free(adapter->pdev, sw_desc); + memset(tpd, 0, sizeof(struct edma_tx_desc)); + if (++index == etdr->count) + index = 0; + } + etdr->sw_next_to_fill = start_index; +} + +/* edma_tx_map_and_fill() + * gets called from edma_xmit_frame + * + * This is where the dma of the buffer to be transmitted + * gets mapped + */ +static int edma_tx_map_and_fill(struct edma_common_info *edma_cinfo, + struct edma_adapter *adapter, struct sk_buff *skb, int queue_id, + unsigned int flags_transmit, u16 from_cpu, u16 dp_bitmap, + bool packet_is_rstp, int nr_frags) +{ + struct edma_sw_desc *sw_desc = NULL; + struct platform_device *pdev = edma_cinfo->pdev; + struct edma_tx_desc *tpd = NULL, *start_tpd = NULL; + struct sk_buff *iter_skb; + int i = 0; + u32 word1 = 0, word3 = 0, lso_word1 = 0, svlan_tag = 0; + u16 buf_len, lso_desc_len = 0; + + /* It should either be a nr_frags skb or fraglist skb but not both */ + BUG_ON(nr_frags && skb_has_frag_list(skb)); + + if (skb_is_gso(skb)) { + /* TODO: What additional checks need to be performed here */ + if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) { + lso_word1 |= EDMA_TPD_IPV4_EN; + ip_hdr(skb)->check = 0; + tcp_hdr(skb)->check = ~csum_tcpudp_magic(ip_hdr(skb)->saddr, + ip_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) { + lso_word1 |= EDMA_TPD_LSO_V2_EN; + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = ~csum_ipv6_magic(&ipv6_hdr(skb)->saddr, + &ipv6_hdr(skb)->daddr, 0, IPPROTO_TCP, 0); + } else + return -EINVAL; + + lso_word1 |= EDMA_TPD_LSO_EN | ((skb_shinfo(skb)->gso_size & EDMA_TPD_MSS_MASK) << EDMA_TPD_MSS_SHIFT) | + (skb_transport_offset(skb) << EDMA_TPD_HDR_SHIFT); + } else if (flags_transmit & EDMA_HW_CHECKSUM) { + u8 css, cso; + cso = skb_checksum_start_offset(skb); + css = cso + skb->csum_offset; + + word1 |= (EDMA_TPD_CUSTOM_CSUM_EN); + word1 |= (cso >> 1) << EDMA_TPD_HDR_SHIFT; + word1 |= ((css >> 1) << EDMA_TPD_CUSTOM_CSUM_SHIFT); + } + + if (skb->protocol == htons(ETH_P_PPP_SES)) + word1 |= EDMA_TPD_PPPOE_EN; + + if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_FLAG) { + switch(skb->vlan_proto) { + case htons(ETH_P_8021Q): + word3 |= (1 << EDMA_TX_INS_CVLAN); + word3 |= skb_vlan_tag_get(skb) << EDMA_TX_CVLAN_TAG_SHIFT; + break; + case htons(ETH_P_8021AD): + word1 |= (1 << EDMA_TX_INS_SVLAN); + svlan_tag = skb_vlan_tag_get(skb) << EDMA_TX_SVLAN_TAG_SHIFT; + break; + default: + dev_err(&pdev->dev, "no ctag or stag present\n"); + goto vlan_tag_error; + } + } else if (flags_transmit & EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG) { + word3 |= (1 << EDMA_TX_INS_CVLAN); + word3 |= (adapter->default_vlan_tag) << EDMA_TX_CVLAN_TAG_SHIFT; + } + + if (packet_is_rstp) { + word3 |= dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; + word3 |= from_cpu << EDMA_TPD_FROM_CPU_SHIFT; + } else { + word3 |= adapter->dp_bitmap << EDMA_TPD_PORT_BITMAP_SHIFT; + } + + buf_len = skb_headlen(skb); + + if (lso_word1) { + if (lso_word1 & EDMA_TPD_LSO_V2_EN) { + + /* IPv6 LSOv2 descriptor */ + start_tpd = tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_NONE; + + /* LSOv2 descriptor overrides addr field to pass length */ + tpd->addr = cpu_to_le16(skb->len); + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + } + + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + if (!start_tpd) + start_tpd = tpd; + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + + /* The last buffer info contain the skb address, + * so skb will be freed after unmap + */ + sw_desc->length = lso_desc_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + skb->data, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + + /* The last buffer info contain the skb address, + * so it will be freed after unmap + */ + sw_desc->length = lso_desc_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + + buf_len = 0; + } + + if (likely(buf_len)) { + + /* TODO Do not dequeue descriptor if there is a potential error */ + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + + if (!start_tpd) + start_tpd = tpd; + + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + + /* The last buffer info contain the skb address, + * so it will be free after unmap + */ + sw_desc->length = buf_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_HEAD; + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + skb->data, buf_len, DMA_TO_DEVICE); + if (dma_mapping_error(&pdev->dev, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + } + + /* Walk through all paged fragments */ + while (nr_frags--) { + skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; + buf_len = skb_frag_size(frag); + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->length = buf_len; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAG; + + sw_desc->dma = skb_frag_dma_map(&pdev->dev, frag, 0, buf_len, DMA_TO_DEVICE); + + if (dma_mapping_error(NULL, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + i++; + } + + /* Walk through all fraglist skbs */ + skb_walk_frags(skb, iter_skb) { + buf_len = iter_skb->len; + tpd = edma_get_next_tpd(edma_cinfo, queue_id); + sw_desc = edma_get_tx_buffer(edma_cinfo, tpd, queue_id); + sw_desc->length = buf_len; + sw_desc->dma = dma_map_single(&adapter->pdev->dev, + iter_skb->data, buf_len, DMA_TO_DEVICE); + + if (dma_mapping_error(NULL, sw_desc->dma)) + goto dma_error; + + tpd->addr = cpu_to_le32(sw_desc->dma); + tpd->len = cpu_to_le16(buf_len); + tpd->svlan_tag = svlan_tag; + tpd->word1 = word1 | lso_word1; + tpd->word3 = word3; + sw_desc->flags |= EDMA_SW_DESC_FLAG_SKB_FRAGLIST; + } + + if (tpd) + tpd->word1 |= 1 << EDMA_TPD_EOP_SHIFT; + + sw_desc->skb = skb; + sw_desc->flags |= EDMA_SW_DESC_FLAG_LAST; + + return 0; + +dma_error: + edma_rollback_tx(adapter, start_tpd, queue_id); + dev_err(&pdev->dev, "TX DMA map failed\n"); +vlan_tag_error: + return -ENOMEM; +} + +/* edma_check_link() + * check Link status + */ +static int edma_check_link(struct edma_adapter *adapter) +{ + struct phy_device *phydev = adapter->phydev; + + if (!(adapter->poll_required)) + return __EDMA_LINKUP; + + if (phydev->link) + return __EDMA_LINKUP; + + return __EDMA_LINKDOWN; +} + +/* edma_adjust_link() + * check for edma link status + */ +void edma_adjust_link(struct net_device *netdev) +{ + int status; + struct edma_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = adapter->phydev; + + if (!test_bit(__EDMA_UP, &adapter->state_flags)) + return; + + status = edma_check_link(adapter); + + if (status == __EDMA_LINKUP && adapter->link_state == __EDMA_LINKDOWN) { + dev_info(&adapter->pdev->dev, "%s: GMAC Link is up with phy_speed=%d\n", netdev->name, phydev->speed); + adapter->link_state = __EDMA_LINKUP; + if (adapter->edma_cinfo->is_single_phy) { + ess_set_port_status_speed(adapter->edma_cinfo, phydev, + ffs(adapter->dp_bitmap) - 1); + } + netif_carrier_on(netdev); + if (netif_running(netdev)) + netif_tx_wake_all_queues(netdev); + } else if (status == __EDMA_LINKDOWN && adapter->link_state == __EDMA_LINKUP) { + dev_info(&adapter->pdev->dev, "%s: GMAC Link is down\n", netdev->name); + adapter->link_state = __EDMA_LINKDOWN; + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + } +} + +/* edma_get_stats() + * Statistics api used to retreive the tx/rx statistics + */ +struct net_device_stats *edma_get_stats(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + return &adapter->stats; +} + +/* edma_xmit() + * Main api to be called by the core for packet transmission + */ +netdev_tx_t edma_xmit(struct sk_buff *skb, + struct net_device *net_dev) +{ + struct edma_adapter *adapter = netdev_priv(net_dev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + struct edma_tx_desc_ring *etdr; + u16 from_cpu, dp_bitmap, txq_id; + int ret, nr_frags = 0, num_tpds_needed = 1, queue_id; + unsigned int flags_transmit = 0; + bool packet_is_rstp = false; + struct netdev_queue *nq = NULL; + + if (skb_shinfo(skb)->nr_frags) { + nr_frags = skb_shinfo(skb)->nr_frags; + num_tpds_needed += nr_frags; + } else if (skb_has_frag_list(skb)) { + struct sk_buff *iter_skb; + + skb_walk_frags(skb, iter_skb) + num_tpds_needed++; + } + + if (num_tpds_needed > EDMA_MAX_SKB_FRAGS) { + dev_err(&net_dev->dev, + "skb received with fragments %d which is more than %lu", + num_tpds_needed, EDMA_MAX_SKB_FRAGS); + dev_kfree_skb_any(skb); + adapter->stats.tx_errors++; + return NETDEV_TX_OK; + } + + if (edma_stp_rstp) { + u16 ath_hdr, ath_eth_type; + u8 mac_addr[EDMA_ETH_HDR_LEN]; + ath_eth_type = ntohs(*(uint16_t *)&skb->data[12]); + if (ath_eth_type == edma_ath_eth_type) { + packet_is_rstp = true; + ath_hdr = htons(*(uint16_t *)&skb->data[14]); + dp_bitmap = ath_hdr & EDMA_TX_ATH_HDR_PORT_BITMAP_MASK; + from_cpu = (ath_hdr & EDMA_TX_ATH_HDR_FROM_CPU_MASK) >> EDMA_TX_ATH_HDR_FROM_CPU_SHIFT; + memcpy(mac_addr, skb->data, EDMA_ETH_HDR_LEN); + + skb_pull(skb, 4); + + memcpy(skb->data, mac_addr, EDMA_ETH_HDR_LEN); + } + } + + /* this will be one of the 4 TX queues exposed to linux kernel */ + txq_id = skb_get_queue_mapping(skb); + queue_id = edma_tx_queue_get(adapter, skb, txq_id); + etdr = edma_cinfo->tpd_ring[queue_id]; + nq = netdev_get_tx_queue(net_dev, txq_id); + + local_bh_disable(); + /* Tx is not handled in bottom half context. Hence, we need to protect + * Tx from tasks and bottom half + */ + + if (num_tpds_needed > edma_tpd_available(edma_cinfo, queue_id)) { + /* not enough descriptor, just stop queue */ + netif_tx_stop_queue(nq); + local_bh_enable(); + dev_dbg(&net_dev->dev, "Not enough descriptors available"); + edma_cinfo->edma_ethstats.tx_desc_error++; + return NETDEV_TX_BUSY; + } + + /* Check and mark VLAN tag offload */ + if (unlikely(skb_vlan_tag_present(skb))) + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_FLAG; + else if (!adapter->edma_cinfo->is_single_phy && adapter->default_vlan_tag) + flags_transmit |= EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG; + + /* Check and mark checksum offload */ + if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) + flags_transmit |= EDMA_HW_CHECKSUM; + + /* Map and fill descriptor for Tx */ + ret = edma_tx_map_and_fill(edma_cinfo, adapter, skb, queue_id, + flags_transmit, from_cpu, dp_bitmap, packet_is_rstp, nr_frags); + if (ret) { + dev_kfree_skb_any(skb); + adapter->stats.tx_errors++; + goto netdev_okay; + } + + /* Update SW producer index */ + edma_tx_update_hw_idx(edma_cinfo, skb, queue_id); + + /* update tx statistics */ + adapter->stats.tx_packets++; + adapter->stats.tx_bytes += skb->len; + +netdev_okay: + local_bh_enable(); + return NETDEV_TX_OK; +} + +/* + * edma_flow_may_expire() + * Timer function called periodically to delete the node + */ +void edma_flow_may_expire(struct timer_list *t) +{ + struct edma_rfs_flow_table *table = from_timer(table, t, expire_rfs); + struct edma_adapter *adapter = + container_of(table, typeof(*adapter), rfs); + int j; + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + for (j = 0; j < EDMA_RFS_EXPIRE_COUNT_PER_CALL; j++) { + struct hlist_head *hhead; + struct hlist_node *tmp; + struct edma_rfs_filter_node *n; + bool res; + + hhead = &adapter->rfs.hlist_head[adapter->rfs.hashtoclean++]; + hlist_for_each_entry_safe(n, tmp, hhead, node) { + res = rps_may_expire_flow(adapter->netdev, n->rq_id, + n->flow_id, n->filter_id); + if (res) { + int ret; + ret = edma_delete_rfs_filter(adapter, n); + if (ret < 0) + dev_dbg(&adapter->netdev->dev, + "RFS entry %d not allowed to be flushed by Switch", + n->flow_id); + else { + hlist_del(&n->node); + kfree(n); + adapter->rfs.filter_available++; + } + } + } + } + + adapter->rfs.hashtoclean = adapter->rfs.hashtoclean & (EDMA_RFS_FLOW_ENTRIES - 1); + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + mod_timer(&adapter->rfs.expire_rfs, jiffies + HZ / 4); +} + +/* edma_rx_flow_steer() + * Called by core to to steer the flow to CPU + */ +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq, u32 flow_id) +{ + struct flow_keys keys; + struct edma_rfs_filter_node *filter_node; + struct edma_adapter *adapter = netdev_priv(dev); + u16 hash_tblid; + int res; + + if (skb->protocol == htons(ETH_P_IPV6)) { + dev_err(&adapter->pdev->dev, "IPv6 not supported\n"); + res = -EINVAL; + goto no_protocol_err; + } + + /* Dissect flow parameters + * We only support IPv4 + TCP/UDP + */ + res = skb_flow_dissect_flow_keys(skb, &keys, 0); + if (!((keys.basic.ip_proto == IPPROTO_TCP) || (keys.basic.ip_proto == IPPROTO_UDP))) { + res = -EPROTONOSUPPORT; + goto no_protocol_err; + } + + /* Check if table entry exists */ + hash_tblid = skb_get_hash_raw(skb) & EDMA_RFS_FLOW_ENTRIES_MASK; + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + filter_node = edma_rfs_key_search(&adapter->rfs.hlist_head[hash_tblid], &keys); + + if (filter_node) { + if (rxq == filter_node->rq_id) { + res = -EEXIST; + goto out; + } else { + res = edma_delete_rfs_filter(adapter, filter_node); + if (res < 0) + dev_warn(&adapter->netdev->dev, + "Cannot steer flow %d to different queue", + filter_node->flow_id); + else { + adapter->rfs.filter_available++; + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); + if (res < 0) { + dev_warn(&adapter->netdev->dev, + "Cannot steer flow %d to different queue", + filter_node->flow_id); + } else { + adapter->rfs.filter_available--; + filter_node->rq_id = rxq; + filter_node->filter_id = res; + } + } + } + } else { + if (adapter->rfs.filter_available == 0) { + res = -EBUSY; + goto out; + } + + filter_node = kmalloc(sizeof(*filter_node), GFP_ATOMIC); + if (!filter_node) { + res = -ENOMEM; + goto out; + } + + res = edma_add_rfs_filter(adapter, &keys, rxq, filter_node); + if (res < 0) { + kfree(filter_node); + goto out; + } + + adapter->rfs.filter_available--; + filter_node->rq_id = rxq; + filter_node->filter_id = res; + filter_node->flow_id = flow_id; + filter_node->keys = keys; + INIT_HLIST_NODE(&filter_node->node); + hlist_add_head(&filter_node->node, &adapter->rfs.hlist_head[hash_tblid]); + } + +out: + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); +no_protocol_err: + return res; +} + +/* edma_register_rfs_filter() + * Add RFS filter callback + */ +int edma_register_rfs_filter(struct net_device *netdev, + set_rfs_filter_callback_t set_filter) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + spin_lock_bh(&adapter->rfs.rfs_ftab_lock); + + if (adapter->set_rfs_rule) { + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + return -1; + } + + adapter->set_rfs_rule = set_filter; + spin_unlock_bh(&adapter->rfs.rfs_ftab_lock); + + return 0; +} + +/* edma_alloc_tx_rings() + * Allocate rx rings + */ +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + int i, err = 0; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + err = edma_alloc_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); + if (err) { + dev_err(&pdev->dev, "Tx Queue alloc %u failed\n", i); + return err; + } + } + + return 0; +} + +/* edma_free_tx_rings() + * Free tx rings + */ +void edma_free_tx_rings(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_free_tx_ring(edma_cinfo, edma_cinfo->tpd_ring[i]); +} + +/* edma_free_tx_resources() + * Free buffers associated with tx rings + */ +void edma_free_tx_resources(struct edma_common_info *edma_cinfo) +{ + struct edma_tx_desc_ring *etdr; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i, j; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + etdr = edma_cinfo->tpd_ring[i]; + for (j = 0; j < EDMA_TX_RING_SIZE; j++) { + sw_desc = &etdr->sw_desc[j]; + if (sw_desc->flags & (EDMA_SW_DESC_FLAG_SKB_HEAD | + EDMA_SW_DESC_FLAG_SKB_FRAG | EDMA_SW_DESC_FLAG_SKB_FRAGLIST)) + edma_tx_unmap_and_free(pdev, sw_desc); + } + } +} + +/* edma_alloc_rx_rings() + * Allocate rx rings + */ +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo) +{ + struct platform_device *pdev = edma_cinfo->pdev; + int i, j, err = 0; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + err = edma_alloc_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); + if (err) { + dev_err(&pdev->dev, "Rx Queue alloc%u failed\n", i); + return err; + } + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + return 0; +} + +/* edma_free_rx_rings() + * free rx rings + */ +void edma_free_rx_rings(struct edma_common_info *edma_cinfo) +{ + int i, j; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + edma_free_rx_ring(edma_cinfo, edma_cinfo->rfd_ring[j]); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } +} + +/* edma_free_queues() + * Free the queues allocaated + */ +void edma_free_queues(struct edma_common_info *edma_cinfo) +{ + int i , j; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + if (edma_cinfo->tpd_ring[i]) + kfree(edma_cinfo->tpd_ring[i]); + edma_cinfo->tpd_ring[i] = NULL; + } + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + if (edma_cinfo->rfd_ring[j]) + kfree(edma_cinfo->rfd_ring[j]); + edma_cinfo->rfd_ring[j] = NULL; + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + edma_cinfo->num_rx_queues = 0; + edma_cinfo->num_tx_queues = 0; + + return; +} + +/* edma_free_rx_resources() + * Free buffers associated with tx rings + */ +void edma_free_rx_resources(struct edma_common_info *edma_cinfo) +{ + struct edma_rfd_desc_ring *erdr; + struct edma_sw_desc *sw_desc; + struct platform_device *pdev = edma_cinfo->pdev; + int i, j, k; + + for (i = 0, k = 0; i < edma_cinfo->num_rx_queues; i++) { + erdr = edma_cinfo->rfd_ring[k]; + for (j = 0; j < EDMA_RX_RING_SIZE; j++) { + sw_desc = &erdr->sw_desc[j]; + if (likely(sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_HEAD)) { + dma_unmap_single(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + edma_clean_rfd(erdr, j); + } else if ((sw_desc->flags & EDMA_SW_DESC_FLAG_SKB_FRAG)) { + dma_unmap_page(&pdev->dev, sw_desc->dma, + sw_desc->length, DMA_FROM_DEVICE); + edma_clean_rfd(erdr, j); + } + } + k += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + + } +} + +/* edma_alloc_queues_tx() + * Allocate memory for all rings + */ +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + struct edma_tx_desc_ring *etdr; + etdr = kzalloc(sizeof(struct edma_tx_desc_ring), GFP_KERNEL); + if (!etdr) + goto err; + etdr->count = edma_cinfo->tx_ring_count; + edma_cinfo->tpd_ring[i] = etdr; + } + + return 0; +err: + edma_free_queues(edma_cinfo); + return -1; +} + +/* edma_alloc_queues_rx() + * Allocate memory for all rings + */ +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo) +{ + int i, j; + + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + struct edma_rfd_desc_ring *rfd_ring; + rfd_ring = kzalloc(sizeof(struct edma_rfd_desc_ring), + GFP_KERNEL); + if (!rfd_ring) + goto err; + rfd_ring->count = edma_cinfo->rx_ring_count; + edma_cinfo->rfd_ring[j] = rfd_ring; + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + return 0; +err: + edma_free_queues(edma_cinfo); + return -1; +} + +/* edma_clear_irq_status() + * Clear interrupt status + */ +void edma_clear_irq_status() +{ + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); + edma_write_reg(EDMA_REG_MISC_ISR, 0x1fff); + edma_write_reg(EDMA_REG_WOL_ISR, 0x1); +}; + +/* edma_configure() + * Configure skb, edma interrupts and control register. + */ +int edma_configure(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + u32 intr_modrt_data; + u32 intr_ctrl_data = 0; + int i, j, ret_count; + + edma_read_reg(EDMA_REG_INTR_CTRL, &intr_ctrl_data); + intr_ctrl_data &= ~(1 << EDMA_INTR_SW_IDX_W_TYP_SHIFT); + intr_ctrl_data |= hw->intr_sw_idx_w << EDMA_INTR_SW_IDX_W_TYP_SHIFT; + edma_write_reg(EDMA_REG_INTR_CTRL, intr_ctrl_data); + + edma_clear_irq_status(); + + /* Clear any WOL status */ + edma_write_reg(EDMA_REG_WOL_CTRL, 0); + intr_modrt_data = (EDMA_TX_IMT << EDMA_IRQ_MODRT_TX_TIMER_SHIFT); + intr_modrt_data |= (EDMA_RX_IMT << EDMA_IRQ_MODRT_RX_TIMER_SHIFT); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, intr_modrt_data); + edma_configure_tx(edma_cinfo); + edma_configure_rx(edma_cinfo); + + /* Allocate the RX buffer */ + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + struct edma_rfd_desc_ring *ring = edma_cinfo->rfd_ring[j]; + ret_count = edma_alloc_rx_buf(edma_cinfo, ring, ring->count, j); + if (ret_count) { + dev_dbg(&edma_cinfo->pdev->dev, "not all rx buffers allocated\n"); + } + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + + /* Configure descriptor Ring */ + edma_init_desc(edma_cinfo); + return 0; +} + +/* edma_irq_enable() + * Enable default interrupt generation settings + */ +void edma_irq_enable(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + int i, j; + + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + for (i = 0, j = 0; i < edma_cinfo->num_rx_queues; i++) { + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(j), hw->rx_intr_mask); + j += ((edma_cinfo->num_rx_queues == 4) ? 2 : 1); + } + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), hw->tx_intr_mask); +} + +/* edma_irq_disable() + * Disable Interrupt + */ +void edma_irq_disable(struct edma_common_info *edma_cinfo) +{ + int i; + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(i), 0x0); + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(i), 0x0); + edma_write_reg(EDMA_REG_MISC_IMR, 0); + edma_write_reg(EDMA_REG_WOL_IMR, 0); +} + +/* edma_free_irqs() + * Free All IRQs + */ +void edma_free_irqs(struct edma_adapter *adapter) +{ + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + int i, j; + int k = ((edma_cinfo->num_rx_queues == 4) ? 1 : 2); + + for (i = 0; i < CONFIG_NR_CPUS; i++) { + for (j = edma_cinfo->edma_percpu_info[i].tx_start; j < (edma_cinfo->edma_percpu_info[i].tx_start + 4); j++) + free_irq(edma_cinfo->tx_irq[j], &edma_cinfo->edma_percpu_info[i]); + + for (j = edma_cinfo->edma_percpu_info[i].rx_start; j < (edma_cinfo->edma_percpu_info[i].rx_start + k); j++) + free_irq(edma_cinfo->rx_irq[j], &edma_cinfo->edma_percpu_info[i]); + } +} + +/* edma_enable_rx_ctrl() + * Enable RX queue control + */ +void edma_enable_rx_ctrl(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_RXQ_CTRL, &data); + data |= EDMA_RXQ_CTRL_EN; + edma_write_reg(EDMA_REG_RXQ_CTRL, data); +} + + +/* edma_enable_tx_ctrl() + * Enable TX queue control + */ +void edma_enable_tx_ctrl(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_TXQ_CTRL, &data); + data |= EDMA_TXQ_CTRL_TXQ_EN; + edma_write_reg(EDMA_REG_TXQ_CTRL, data); +} + +/* edma_stop_rx_tx() + * Disable RX/TQ Queue control + */ +void edma_stop_rx_tx(struct edma_hw *hw) +{ + u32 data; + + edma_read_reg(EDMA_REG_RXQ_CTRL, &data); + data &= ~EDMA_RXQ_CTRL_EN; + edma_write_reg(EDMA_REG_RXQ_CTRL, data); + edma_read_reg(EDMA_REG_TXQ_CTRL, &data); + data &= ~EDMA_TXQ_CTRL_TXQ_EN; + edma_write_reg(EDMA_REG_TXQ_CTRL, data); +} + +/* edma_reset() + * Reset the EDMA + */ +int edma_reset(struct edma_common_info *edma_cinfo) +{ + struct edma_hw *hw = &edma_cinfo->hw; + + edma_irq_disable(edma_cinfo); + + edma_clear_irq_status(); + + edma_stop_rx_tx(hw); + + return 0; +} + +/* edma_fill_netdev() + * Fill netdev for each etdr + */ +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int queue_id, + int dev, int txq_id) +{ + struct edma_tx_desc_ring *etdr; + int i = 0; + + etdr = edma_cinfo->tpd_ring[queue_id]; + + while (etdr->netdev[i]) + i++; + + if (i >= EDMA_MAX_NETDEV_PER_QUEUE) + return -1; + + /* Populate the netdev associated with the tpd ring */ + etdr->netdev[i] = edma_netdev[dev]; + etdr->nq[i] = netdev_get_tx_queue(edma_netdev[dev], txq_id); + + return 0; +} + +/* edma_set_mac() + * Change the Ethernet Address of the NIC + */ +int edma_set_mac_addr(struct net_device *netdev, void *p) +{ + struct sockaddr *addr = p; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EINVAL; + + if (netif_running(netdev)) + return -EBUSY; + + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + return 0; +} + +/* edma_set_stp_rstp() + * set stp/rstp + */ +void edma_set_stp_rstp(bool rstp) +{ + edma_stp_rstp = rstp; +} + +/* edma_assign_ath_hdr_type() + * assign atheros header eth type + */ +void edma_assign_ath_hdr_type(int eth_type) +{ + edma_ath_eth_type = eth_type & EDMA_ETH_TYPE_MASK; +} + +/* edma_get_default_vlan_tag() + * Used by other modules to get the default vlan tag + */ +int edma_get_default_vlan_tag(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + if (adapter->default_vlan_tag) + return adapter->default_vlan_tag; + + return 0; +} + +/* edma_open() + * gets called when netdevice is up, start the queue. + */ +int edma_open(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct platform_device *pdev = adapter->edma_cinfo->pdev; + + netif_tx_start_all_queues(netdev); + edma_initialise_rfs_flow_table(adapter); + set_bit(__EDMA_UP, &adapter->state_flags); + + /* if Link polling is enabled, in our case enabled for WAN, then + * do a phy start, else always set link as UP + */ + if (adapter->poll_required) { + if (!IS_ERR(adapter->phydev)) { + phy_start(adapter->phydev); + phy_start_aneg(adapter->phydev); + adapter->link_state = __EDMA_LINKDOWN; + } else { + dev_dbg(&pdev->dev, "Invalid PHY device for a link polled interface\n"); + } + } else { + adapter->link_state = __EDMA_LINKUP; + netif_carrier_on(netdev); + } + + return 0; +} + + +/* edma_close() + * gets called when netdevice is down, stops the queue. + */ +int edma_close(struct net_device *netdev) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + edma_free_rfs_flow_table(adapter); + netif_carrier_off(netdev); + netif_tx_stop_all_queues(netdev); + + if (adapter->poll_required) { + if (!IS_ERR(adapter->phydev)) + phy_stop(adapter->phydev); + } + + adapter->link_state = __EDMA_LINKDOWN; + + /* Set GMAC state to UP before link state is checked + */ + clear_bit(__EDMA_UP, &adapter->state_flags); + + return 0; +} + +/* edma_poll + * polling function that gets called when the napi gets scheduled. + * + * Main sequence of task performed in this api + * is clear irq status -> clear_tx_irq -> clean_rx_irq-> + * enable interrupts. + */ +int edma_poll(struct napi_struct *napi, int budget) +{ + struct edma_per_cpu_queues_info *edma_percpu_info = container_of(napi, + struct edma_per_cpu_queues_info, napi); + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; + u32 reg_data; + u32 shadow_rx_status, shadow_tx_status; + int queue_id; + int i, work_done = 0; + u16 rx_pending_fill; + + /* Store the Rx/Tx status by ANDing it with + * appropriate CPU RX?TX mask + */ + edma_read_reg(EDMA_REG_RX_ISR, ®_data); + edma_percpu_info->rx_status |= reg_data & edma_percpu_info->rx_mask; + shadow_rx_status = edma_percpu_info->rx_status; + edma_read_reg(EDMA_REG_TX_ISR, ®_data); + edma_percpu_info->tx_status |= reg_data & edma_percpu_info->tx_mask; + shadow_tx_status = edma_percpu_info->tx_status; + + /* Every core will have a start, which will be computed + * in probe and stored in edma_percpu_info->tx_start variable. + * We will shift the status bit by tx_start to obtain + * status bits for the core on which the current processing + * is happening. Since, there are 4 tx queues per core, + * we will run the loop till we get the correct queue to clear. + */ + while (edma_percpu_info->tx_status) { + queue_id = ffs(edma_percpu_info->tx_status) - 1; + edma_tx_complete(edma_cinfo, queue_id); + edma_percpu_info->tx_status &= ~(1 << queue_id); + } + + /* Every core will have a start, which will be computed + * in probe and stored in edma_percpu_info->tx_start variable. + * We will shift the status bit by tx_start to obtain + * status bits for the core on which the current processing + * is happening. Since, there are 4 tx queues per core, we + * will run the loop till we get the correct queue to clear. + */ + while (edma_percpu_info->rx_status) { + queue_id = ffs(edma_percpu_info->rx_status) - 1; + rx_pending_fill = edma_rx_complete(edma_cinfo, &work_done, + budget, queue_id, napi); + + if (likely(work_done < budget)) { + if (rx_pending_fill) { + /* reschedule poll() to refill rx buffer deficit */ + work_done = budget; + break; + } + edma_percpu_info->rx_status &= ~(1 << queue_id); + } else { + break; + } + } + + /* Clear the status register, to avoid the interrupts to + * reoccur.This clearing of interrupt status register is + * done here as writing to status register only takes place + * once the producer/consumer index has been updated to + * reflect that the packet transmission/reception went fine. + */ + edma_write_reg(EDMA_REG_RX_ISR, shadow_rx_status); + edma_write_reg(EDMA_REG_TX_ISR, shadow_tx_status); + + /* If budget not fully consumed, exit the polling mode */ + if (likely(work_done < budget)) { + napi_complete(napi); + + /* re-enable the interrupts */ + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x1); + for (i = 0; i < edma_cinfo->num_txq_per_core; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x1); + } + + return work_done; +} + +/* edma interrupt() + * interrupt handler + */ +irqreturn_t edma_interrupt(int irq, void *dev) +{ + struct edma_per_cpu_queues_info *edma_percpu_info = (struct edma_per_cpu_queues_info *) dev; + struct edma_common_info *edma_cinfo = edma_percpu_info->edma_cinfo; + int i; + + /* Unmask the TX/RX interrupt register */ + for (i = 0; i < edma_cinfo->num_rxq_per_core; i++) + edma_write_reg(EDMA_REG_RX_INT_MASK_Q(edma_percpu_info->rx_start + i), 0x0); + + for (i = 0; i < edma_cinfo->num_txq_per_core; i++) + edma_write_reg(EDMA_REG_TX_INT_MASK_Q(edma_percpu_info->tx_start + i), 0x0); + + napi_schedule(&edma_percpu_info->napi); + + return IRQ_HANDLED; +} diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.h b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.h new file mode 100644 index 0000000000..015e5f5026 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma.h @@ -0,0 +1,455 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _EDMA_H_ +#define _EDMA_H_ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "ess_edma.h" + +#define EDMA_CPU_CORES_SUPPORTED 4 +#define EDMA_MAX_PORTID_SUPPORTED 5 +#define EDMA_MAX_VLAN_SUPPORTED EDMA_MAX_PORTID_SUPPORTED +#define EDMA_MAX_PORTID_BITMAP_INDEX (EDMA_MAX_PORTID_SUPPORTED + 1) +#define EDMA_MAX_PORTID_BITMAP_SUPPORTED 0x1f /* 0001_1111 = 0x1f */ +#define EDMA_MAX_NETDEV_PER_QUEUE 4 /* 3 Netdev per queue, 1 space for indexing */ + +#define EDMA_MAX_RECEIVE_QUEUE 8 +#define EDMA_MAX_TRANSMIT_QUEUE 16 + +/* WAN/LAN adapter number */ +#define EDMA_WAN 0 +#define EDMA_LAN 1 + +/* VLAN tag */ +#define EDMA_LAN_DEFAULT_VLAN 1 +#define EDMA_WAN_DEFAULT_VLAN 2 + +#define EDMA_DEFAULT_GROUP1_VLAN 1 +#define EDMA_DEFAULT_GROUP2_VLAN 2 +#define EDMA_DEFAULT_GROUP3_VLAN 3 +#define EDMA_DEFAULT_GROUP4_VLAN 4 +#define EDMA_DEFAULT_GROUP5_VLAN 5 + +/* Queues exposed to linux kernel */ +#define EDMA_NETDEV_TX_QUEUE 4 +#define EDMA_NETDEV_RX_QUEUE 4 + +/* Number of queues per core */ +#define EDMA_NUM_TXQ_PER_CORE 4 +#define EDMA_NUM_RXQ_PER_CORE 2 + +#define EDMA_TPD_EOP_SHIFT 31 + +#define EDMA_PORT_ID_SHIFT 12 +#define EDMA_PORT_ID_MASK 0x7 + +/* tpd word 3 bit 18-28 */ +#define EDMA_TPD_PORT_BITMAP_SHIFT 18 + +#define EDMA_TPD_FROM_CPU_SHIFT 25 + +#define EDMA_FROM_CPU_MASK 0x80 +#define EDMA_SKB_PRIORITY_MASK 0x38 + +/* TX/RX descriptor ring count */ +/* should be a power of 2 */ +#define EDMA_RX_RING_SIZE 128 +#define EDMA_TX_RING_SIZE 128 + +/* Flags used in paged/non paged mode */ +#define EDMA_RX_HEAD_BUFF_SIZE_JUMBO 256 +#define EDMA_RX_HEAD_BUFF_SIZE 1540 + +/* MAX frame size supported by switch */ +#define EDMA_MAX_JUMBO_FRAME_SIZE 9216 + +/* Configurations */ +#define EDMA_INTR_CLEAR_TYPE 0 +#define EDMA_INTR_SW_IDX_W_TYPE 0 +#define EDMA_FIFO_THRESH_TYPE 0 +#define EDMA_RSS_TYPE 0 +#define EDMA_RX_IMT 0x0020 +#define EDMA_TX_IMT 0x0050 +#define EDMA_TPD_BURST 5 +#define EDMA_TXF_BURST 0x100 +#define EDMA_RFD_BURST 8 +#define EDMA_RFD_THR 16 +#define EDMA_RFD_LTHR 0 + +/* RX/TX per CPU based mask/shift */ +#define EDMA_TX_PER_CPU_MASK 0xF +#define EDMA_RX_PER_CPU_MASK 0x3 +#define EDMA_TX_PER_CPU_MASK_SHIFT 0x2 +#define EDMA_RX_PER_CPU_MASK_SHIFT 0x1 +#define EDMA_TX_CPU_START_SHIFT 0x2 +#define EDMA_RX_CPU_START_SHIFT 0x1 + +/* FLags used in transmit direction */ +#define EDMA_HW_CHECKSUM 0x00000001 +#define EDMA_VLAN_TX_TAG_INSERT_FLAG 0x00000002 +#define EDMA_VLAN_TX_TAG_INSERT_DEFAULT_FLAG 0x00000004 + +#define EDMA_SW_DESC_FLAG_LAST 0x1 +#define EDMA_SW_DESC_FLAG_SKB_HEAD 0x2 +#define EDMA_SW_DESC_FLAG_SKB_FRAG 0x4 +#define EDMA_SW_DESC_FLAG_SKB_FRAGLIST 0x8 +#define EDMA_SW_DESC_FLAG_SKB_NONE 0x10 +#define EDMA_SW_DESC_FLAG_SKB_REUSE 0x20 + + +#define EDMA_MAX_SKB_FRAGS (MAX_SKB_FRAGS + 1) + +/* Ethtool specific list of EDMA supported features */ +#define EDMA_SUPPORTED_FEATURES (SUPPORTED_10baseT_Half \ + | SUPPORTED_10baseT_Full \ + | SUPPORTED_100baseT_Half \ + | SUPPORTED_100baseT_Full \ + | SUPPORTED_1000baseT_Full) + +/* Recevie side atheros Header */ +#define EDMA_RX_ATH_HDR_VERSION 0x2 +#define EDMA_RX_ATH_HDR_VERSION_SHIFT 14 +#define EDMA_RX_ATH_HDR_PRIORITY_SHIFT 11 +#define EDMA_RX_ATH_PORT_TYPE_SHIFT 6 +#define EDMA_RX_ATH_HDR_RSTP_PORT_TYPE 0x4 + +/* Transmit side atheros Header */ +#define EDMA_TX_ATH_HDR_PORT_BITMAP_MASK 0x7F +#define EDMA_TX_ATH_HDR_FROM_CPU_MASK 0x80 +#define EDMA_TX_ATH_HDR_FROM_CPU_SHIFT 7 + +#define EDMA_TXQ_START_CORE0 8 +#define EDMA_TXQ_START_CORE1 12 +#define EDMA_TXQ_START_CORE2 0 +#define EDMA_TXQ_START_CORE3 4 + +#define EDMA_TXQ_IRQ_MASK_CORE0 0x0F00 +#define EDMA_TXQ_IRQ_MASK_CORE1 0xF000 +#define EDMA_TXQ_IRQ_MASK_CORE2 0x000F +#define EDMA_TXQ_IRQ_MASK_CORE3 0x00F0 + +#define EDMA_ETH_HDR_LEN 12 +#define EDMA_ETH_TYPE_MASK 0xFFFF + +#define EDMA_RX_BUFFER_WRITE 16 +#define EDMA_RFD_AVAIL_THR 80 + +#define EDMA_GMAC_NO_MDIO_PHY PHY_MAX_ADDR + +extern int ssdk_rfs_ipct_rule_set(__be32 ip_src, __be32 ip_dst, + __be16 sport, __be16 dport, + uint8_t proto, u16 loadbalance, bool action); +struct edma_ethtool_statistics { + u32 tx_q0_pkt; + u32 tx_q1_pkt; + u32 tx_q2_pkt; + u32 tx_q3_pkt; + u32 tx_q4_pkt; + u32 tx_q5_pkt; + u32 tx_q6_pkt; + u32 tx_q7_pkt; + u32 tx_q8_pkt; + u32 tx_q9_pkt; + u32 tx_q10_pkt; + u32 tx_q11_pkt; + u32 tx_q12_pkt; + u32 tx_q13_pkt; + u32 tx_q14_pkt; + u32 tx_q15_pkt; + u32 tx_q0_byte; + u32 tx_q1_byte; + u32 tx_q2_byte; + u32 tx_q3_byte; + u32 tx_q4_byte; + u32 tx_q5_byte; + u32 tx_q6_byte; + u32 tx_q7_byte; + u32 tx_q8_byte; + u32 tx_q9_byte; + u32 tx_q10_byte; + u32 tx_q11_byte; + u32 tx_q12_byte; + u32 tx_q13_byte; + u32 tx_q14_byte; + u32 tx_q15_byte; + u32 rx_q0_pkt; + u32 rx_q1_pkt; + u32 rx_q2_pkt; + u32 rx_q3_pkt; + u32 rx_q4_pkt; + u32 rx_q5_pkt; + u32 rx_q6_pkt; + u32 rx_q7_pkt; + u32 rx_q0_byte; + u32 rx_q1_byte; + u32 rx_q2_byte; + u32 rx_q3_byte; + u32 rx_q4_byte; + u32 rx_q5_byte; + u32 rx_q6_byte; + u32 rx_q7_byte; + u32 tx_desc_error; + u32 rx_alloc_fail_ctr; +}; + +struct edma_mdio_data { + struct mii_bus *mii_bus; + void __iomem *membase; + int phy_irq[PHY_MAX_ADDR]; +}; + +/* EDMA LINK state */ +enum edma_link_state { + __EDMA_LINKUP, /* Indicate link is UP */ + __EDMA_LINKDOWN /* Indicate link is down */ +}; + +/* EDMA GMAC state */ +enum edma_gmac_state { + __EDMA_UP /* use to indicate GMAC is up */ +}; + +/* edma transmit descriptor */ +struct edma_tx_desc { + __le16 len; /* full packet including CRC */ + __le16 svlan_tag; /* vlan tag */ + __le32 word1; /* byte 4-7 */ + __le32 addr; /* address of buffer */ + __le32 word3; /* byte 12 */ +}; + +/* edma receive return descriptor */ +struct edma_rx_return_desc { + u16 rrd0; + u16 rrd1; + u16 rrd2; + u16 rrd3; + u16 rrd4; + u16 rrd5; + u16 rrd6; + u16 rrd7; +}; + +/* RFD descriptor */ +struct edma_rx_free_desc { + __le32 buffer_addr; /* buffer address */ +}; + +/* edma hw specific data */ +struct edma_hw { + u32 __iomem *hw_addr; /* inner register address */ + struct edma_adapter *adapter; /* netdevice adapter */ + u32 rx_intr_mask; /*rx interrupt mask */ + u32 tx_intr_mask; /* tx interrupt nask */ + u32 misc_intr_mask; /* misc interrupt mask */ + u32 wol_intr_mask; /* wake on lan interrupt mask */ + bool intr_clear_type; /* interrupt clear */ + bool intr_sw_idx_w; /* interrupt software index */ + u32 rx_head_buff_size; /* Rx buffer size */ + u8 rss_type; /* rss protocol type */ +}; + +/* edma_sw_desc stores software descriptor + * SW descriptor has 1:1 map with HW descriptor + */ +struct edma_sw_desc { + struct sk_buff *skb; + dma_addr_t dma; /* dma address */ + u16 length; /* Tx/Rx buffer length */ + u32 flags; +}; + +/* per core related information */ +struct edma_per_cpu_queues_info { + struct napi_struct napi; /* napi associated with the core */ + u32 tx_mask; /* tx interrupt mask */ + u32 rx_mask; /* rx interrupt mask */ + u32 tx_status; /* tx interrupt status */ + u32 rx_status; /* rx interrupt status */ + u32 tx_start; /* tx queue start */ + u32 rx_start; /* rx queue start */ + struct edma_common_info *edma_cinfo; /* edma common info */ +}; + +/* edma specific common info */ +struct edma_common_info { + struct edma_tx_desc_ring *tpd_ring[16]; /* 16 Tx queues */ + struct edma_rfd_desc_ring *rfd_ring[8]; /* 8 Rx queues */ + struct platform_device *pdev; /* device structure */ + struct net_device *netdev[EDMA_MAX_PORTID_SUPPORTED]; + struct net_device *portid_netdev_lookup_tbl[EDMA_MAX_PORTID_BITMAP_INDEX]; + struct ctl_table_header *edma_ctl_table_hdr; + int num_gmac; + struct edma_ethtool_statistics edma_ethstats; /* ethtool stats */ + int num_rx_queues; /* number of rx queue */ + u32 num_tx_queues; /* number of tx queue */ + u32 tx_irq[16]; /* number of tx irq */ + u32 rx_irq[8]; /* number of rx irq */ + u32 from_cpu; /* from CPU TPD field */ + u32 num_rxq_per_core; /* Rx queues per core */ + u32 num_txq_per_core; /* Tx queues per core */ + u16 tx_ring_count; /* Tx ring count */ + u16 rx_ring_count; /* Rx ring*/ + u16 rx_head_buffer_len; /* rx buffer length */ + u16 rx_page_buffer_len; /* rx buffer length */ + u32 page_mode; /* Jumbo frame supported flag */ + u32 fraglist_mode; /* fraglist supported flag */ + struct edma_hw hw; /* edma hw specific structure */ + struct edma_per_cpu_queues_info edma_percpu_info[CONFIG_NR_CPUS]; /* per cpu information */ + spinlock_t stats_lock; /* protect edma stats area for updation */ + struct timer_list edma_stats_timer; + bool is_single_phy; + void __iomem *ess_hw_addr; + struct clk *ess_clk; +}; + +/* transimit packet descriptor (tpd) ring */ +struct edma_tx_desc_ring { + struct netdev_queue *nq[EDMA_MAX_NETDEV_PER_QUEUE]; /* Linux queue index */ + struct net_device *netdev[EDMA_MAX_NETDEV_PER_QUEUE]; + /* Array of netdevs associated with the tpd ring */ + void *hw_desc; /* descriptor ring virtual address */ + struct edma_sw_desc *sw_desc; /* buffer associated with ring */ + int netdev_bmp; /* Bitmap for per-ring netdevs */ + u32 size; /* descriptor ring length in bytes */ + u16 count; /* number of descriptors in the ring */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 sw_next_to_fill; /* next Tx descriptor to fill */ + u16 sw_next_to_clean; /* next Tx descriptor to clean */ +}; + +/* receive free descriptor (rfd) ring */ +struct edma_rfd_desc_ring { + void *hw_desc; /* descriptor ring virtual address */ + struct edma_sw_desc *sw_desc; /* buffer associated with ring */ + u16 size; /* bytes allocated to sw_desc */ + u16 count; /* number of descriptors in the ring */ + dma_addr_t dma; /* descriptor ring physical address */ + u16 sw_next_to_fill; /* next descriptor to fill */ + u16 sw_next_to_clean; /* next descriptor to clean */ + u16 pending_fill; /* fill pending from previous iteration */ +}; + +/* edma_rfs_flter_node - rfs filter node in hash table */ +struct edma_rfs_filter_node { + struct flow_keys keys; + u32 flow_id; /* flow_id of filter provided by kernel */ + u16 filter_id; /* filter id of filter returned by adaptor */ + u16 rq_id; /* desired rq index */ + struct hlist_node node; /* edma rfs list node */ +}; + +/* edma_rfs_flow_tbl - rfs flow table */ +struct edma_rfs_flow_table { + u16 max_num_filter; /* Maximum number of filters edma supports */ + u16 hashtoclean; /* hash table index to clean next */ + int filter_available; /* Number of free filters available */ + struct hlist_head hlist_head[EDMA_RFS_FLOW_ENTRIES]; + spinlock_t rfs_ftab_lock; + struct timer_list expire_rfs; /* timer function for edma_rps_may_expire_flow */ +}; + +/* EDMA net device structure */ +struct edma_adapter { + struct net_device *netdev; /* netdevice */ + struct platform_device *pdev; /* platform device */ + struct edma_common_info *edma_cinfo; /* edma common info */ + struct phy_device *phydev; /* Phy device */ + struct edma_rfs_flow_table rfs; /* edma rfs flow table */ + struct net_device_stats stats; /* netdev statistics */ + set_rfs_filter_callback_t set_rfs_rule; + u32 flags;/* status flags */ + unsigned long state_flags; /* GMAC up/down flags */ + u32 forced_speed; /* link force speed */ + u32 forced_duplex; /* link force duplex */ + u32 link_state; /* phy link state */ + u32 phy_mdio_addr; /* PHY device address on MII interface */ + u32 poll_required; /* check if link polling is required */ + u32 tx_start_offset[CONFIG_NR_CPUS]; /* tx queue start */ + u32 default_vlan_tag; /* vlan tag */ + u32 dp_bitmap; + uint8_t phy_id[MII_BUS_ID_SIZE + 3]; +}; + +int edma_alloc_queues_tx(struct edma_common_info *edma_cinfo); +int edma_alloc_queues_rx(struct edma_common_info *edma_cinfo); +int edma_open(struct net_device *netdev); +int edma_close(struct net_device *netdev); +void edma_free_tx_resources(struct edma_common_info *edma_c_info); +void edma_free_rx_resources(struct edma_common_info *edma_c_info); +int edma_alloc_tx_rings(struct edma_common_info *edma_cinfo); +int edma_alloc_rx_rings(struct edma_common_info *edma_cinfo); +void edma_free_tx_rings(struct edma_common_info *edma_cinfo); +void edma_free_rx_rings(struct edma_common_info *edma_cinfo); +void edma_free_queues(struct edma_common_info *edma_cinfo); +void edma_irq_disable(struct edma_common_info *edma_cinfo); +int edma_reset(struct edma_common_info *edma_cinfo); +int edma_poll(struct napi_struct *napi, int budget); +netdev_tx_t edma_xmit(struct sk_buff *skb, + struct net_device *netdev); +int edma_configure(struct edma_common_info *edma_cinfo); +void edma_irq_enable(struct edma_common_info *edma_cinfo); +void edma_enable_tx_ctrl(struct edma_hw *hw); +void edma_enable_rx_ctrl(struct edma_hw *hw); +void edma_stop_rx_tx(struct edma_hw *hw); +void edma_free_irqs(struct edma_adapter *adapter); +irqreturn_t edma_interrupt(int irq, void *dev); +void edma_write_reg(u16 reg_addr, u32 reg_value); +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value); +struct net_device_stats *edma_get_stats(struct net_device *netdev); +int edma_set_mac_addr(struct net_device *netdev, void *p); +int edma_rx_flow_steer(struct net_device *dev, const struct sk_buff *skb, + u16 rxq, u32 flow_id); +int edma_register_rfs_filter(struct net_device *netdev, + set_rfs_filter_callback_t set_filter); +void edma_flow_may_expire(struct timer_list *t); +void edma_set_ethtool_ops(struct net_device *netdev); +void edma_set_stp_rstp(bool tag); +void edma_assign_ath_hdr_type(int tag); +int edma_get_default_vlan_tag(struct net_device *netdev); +void edma_adjust_link(struct net_device *netdev); +int edma_fill_netdev(struct edma_common_info *edma_cinfo, int qid, int num, int txq_id); +void edma_read_append_stats(struct edma_common_info *edma_cinfo); +void edma_change_tx_coalesce(int usecs); +void edma_change_rx_coalesce(int usecs); +void edma_get_tx_rx_coalesce(u32 *reg_val); +void edma_clear_irq_status(void); +void ess_set_port_status_speed(struct edma_common_info *edma_cinfo, + struct phy_device *phydev, uint8_t port_id); +#endif /* _EDMA_H_ */ diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_axi.c b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_axi.c new file mode 100644 index 0000000000..50335b0d14 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_axi.c @@ -0,0 +1,1359 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include "edma.h" +#include "ess_edma.h" + +/* Weight round robin and virtual QID mask */ +#define EDMA_WRR_VID_SCTL_MASK 0xffff + +/* Weight round robin and virtual QID shift */ +#define EDMA_WRR_VID_SCTL_SHIFT 16 + +char edma_axi_driver_name[] = "ess_edma"; +static const u32 default_msg = NETIF_MSG_DRV | NETIF_MSG_PROBE | + NETIF_MSG_LINK | NETIF_MSG_TIMER | NETIF_MSG_IFDOWN | NETIF_MSG_IFUP; + +static u32 edma_hw_addr; + +char edma_tx_irq[16][64]; +char edma_rx_irq[8][64]; +struct net_device *edma_netdev[EDMA_MAX_PORTID_SUPPORTED]; +static u16 tx_start[4] = {EDMA_TXQ_START_CORE0, EDMA_TXQ_START_CORE1, + EDMA_TXQ_START_CORE2, EDMA_TXQ_START_CORE3}; +static u32 tx_mask[4] = {EDMA_TXQ_IRQ_MASK_CORE0, EDMA_TXQ_IRQ_MASK_CORE1, + EDMA_TXQ_IRQ_MASK_CORE2, EDMA_TXQ_IRQ_MASK_CORE3}; + +static u32 edma_default_ltag __read_mostly = EDMA_LAN_DEFAULT_VLAN; +static u32 edma_default_wtag __read_mostly = EDMA_WAN_DEFAULT_VLAN; +static u32 edma_default_group1_vtag __read_mostly = EDMA_DEFAULT_GROUP1_VLAN; +static u32 edma_default_group2_vtag __read_mostly = EDMA_DEFAULT_GROUP2_VLAN; +static u32 edma_default_group3_vtag __read_mostly = EDMA_DEFAULT_GROUP3_VLAN; +static u32 edma_default_group4_vtag __read_mostly = EDMA_DEFAULT_GROUP4_VLAN; +static u32 edma_default_group5_vtag __read_mostly = EDMA_DEFAULT_GROUP5_VLAN; +static u32 edma_rss_idt_val = EDMA_RSS_IDT_VALUE; +static u32 edma_rss_idt_idx; + +static int edma_weight_assigned_to_q __read_mostly; +static int edma_queue_to_virtual_q __read_mostly; +static bool edma_enable_rstp __read_mostly; +static int edma_athr_hdr_eth_type __read_mostly; + +static int page_mode; +module_param(page_mode, int, 0); +MODULE_PARM_DESC(page_mode, "enable page mode"); + +static int overwrite_mode; +module_param(overwrite_mode, int, 0); +MODULE_PARM_DESC(overwrite_mode, "overwrite default page_mode setting"); + +static int jumbo_mru = EDMA_RX_HEAD_BUFF_SIZE; +module_param(jumbo_mru, int, 0); +MODULE_PARM_DESC(jumbo_mru, "enable fraglist support"); + +static int num_rxq = 4; +module_param(num_rxq, int, 0); +MODULE_PARM_DESC(num_rxq, "change the number of rx queues"); + +void edma_write_reg(u16 reg_addr, u32 reg_value) +{ + writel(reg_value, ((void __iomem *)(edma_hw_addr + reg_addr))); +} + +void edma_read_reg(u16 reg_addr, volatile u32 *reg_value) +{ + *reg_value = readl((void __iomem *)(edma_hw_addr + reg_addr)); +} + +static void ess_write_reg(struct edma_common_info *edma, u16 reg_addr, u32 reg_value) +{ + writel(reg_value, ((void __iomem *) + ((unsigned long)edma->ess_hw_addr + reg_addr))); +} + +static void ess_read_reg(struct edma_common_info *edma, u16 reg_addr, + volatile u32 *reg_value) +{ + *reg_value = readl((void __iomem *) + ((unsigned long)edma->ess_hw_addr + reg_addr)); +} + +static int ess_reset(struct edma_common_info *edma) +{ + struct device_node *switch_node = NULL; + struct reset_control *ess_rst; + u32 regval; + + switch_node = of_find_node_by_name(NULL, "ess-switch"); + if (!switch_node) { + pr_err("switch-node not found\n"); + return -EINVAL; + } + + ess_rst = of_reset_control_get(switch_node, "ess_rst"); + of_node_put(switch_node); + + if (IS_ERR(ess_rst)) { + pr_err("failed to find ess_rst!\n"); + return -ENOENT; + } + + reset_control_assert(ess_rst); + msleep(10); + reset_control_deassert(ess_rst); + msleep(100); + reset_control_put(ess_rst); + + /* Enable only port 5 <--> port 0 + * bits 0:6 bitmap of ports it can fwd to */ +#define SET_PORT_BMP(r,v) \ + ess_read_reg(edma, r, ®val); \ + ess_write_reg(edma, r, ((regval & ~0x3F) | v)); + + SET_PORT_BMP(ESS_PORT0_LOOKUP_CTRL,0x20); + SET_PORT_BMP(ESS_PORT1_LOOKUP_CTRL,0x00); + SET_PORT_BMP(ESS_PORT2_LOOKUP_CTRL,0x00); + SET_PORT_BMP(ESS_PORT3_LOOKUP_CTRL,0x00); + SET_PORT_BMP(ESS_PORT4_LOOKUP_CTRL,0x00); + SET_PORT_BMP(ESS_PORT5_LOOKUP_CTRL,0x01); + ess_write_reg(edma, ESS_RGMII_CTRL, 0x400); + ess_write_reg(edma, ESS_PORT0_STATUS, ESS_PORT_1G_FDX); + ess_write_reg(edma, ESS_PORT5_STATUS, ESS_PORT_1G_FDX); + ess_write_reg(edma, ESS_PORT0_HEADER_CTRL, 0); +#undef SET_PORT_BMP + + /* forward multicast and broadcast frames to CPU */ + ess_write_reg(edma, ESS_FWD_CTRL1, + (ESS_PORTS_ALL << ESS_FWD_CTRL1_UC_FLOOD_S) | + (ESS_PORTS_ALL << ESS_FWD_CTRL1_MC_FLOOD_S) | + (ESS_PORTS_ALL << ESS_FWD_CTRL1_BC_FLOOD_S)); + + return 0; +} + +void ess_set_port_status_speed(struct edma_common_info *edma, + struct phy_device *phydev, uint8_t port_id) +{ + uint16_t reg_off = ESS_PORT0_STATUS + (4 * port_id); + uint32_t reg_val = 0; + + ess_read_reg(edma, reg_off, ®_val); + + /* reset the speed bits [0:1] */ + reg_val &= ~ESS_PORT_STATUS_SPEED_INV; + + /* set the new speed */ + switch(phydev->speed) { + case SPEED_1000: reg_val |= ESS_PORT_STATUS_SPEED_1000; break; + case SPEED_100: reg_val |= ESS_PORT_STATUS_SPEED_100; break; + case SPEED_10: reg_val |= ESS_PORT_STATUS_SPEED_10; break; + default: reg_val |= ESS_PORT_STATUS_SPEED_INV; break; + } + + /* check full/half duplex */ + if (phydev->duplex) { + reg_val |= ESS_PORT_STATUS_DUPLEX_MODE; + } else { + reg_val &= ~ESS_PORT_STATUS_DUPLEX_MODE; + } + + ess_write_reg(edma, reg_off, reg_val); +} + +/* edma_change_tx_coalesce() + * change tx interrupt moderation timer + */ +void edma_change_tx_coalesce(int usecs) +{ + u32 reg_value; + + /* Here, we right shift the value from the user by 1, this is + * done because IMT resolution timer is 2usecs. 1 count + * of this register corresponds to 2 usecs. + */ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); + reg_value = ((reg_value & 0xffff) | ((usecs >> 1) << 16)); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); +} + +/* edma_change_rx_coalesce() + * change rx interrupt moderation timer + */ +void edma_change_rx_coalesce(int usecs) +{ + u32 reg_value; + + /* Here, we right shift the value from the user by 1, this is + * done because IMT resolution timer is 2usecs. 1 count + * of this register corresponds to 2 usecs. + */ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, ®_value); + reg_value = ((reg_value & 0xffff0000) | (usecs >> 1)); + edma_write_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_value); +} + +/* edma_get_tx_rx_coalesce() + * Get tx/rx interrupt moderation value + */ +void edma_get_tx_rx_coalesce(u32 *reg_val) +{ + edma_read_reg(EDMA_REG_IRQ_MODRT_TIMER_INIT, reg_val); +} + +void edma_read_append_stats(struct edma_common_info *edma_cinfo) +{ + uint32_t *p; + int i; + u32 stat; + + spin_lock_bh(&edma_cinfo->stats_lock); + p = (uint32_t *)&(edma_cinfo->edma_ethstats); + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { + edma_read_reg(EDMA_REG_TX_STAT_PKT_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_TRANSMIT_QUEUE; i++) { + edma_read_reg(EDMA_REG_TX_STAT_BYTE_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { + edma_read_reg(EDMA_REG_RX_STAT_PKT_Q(i), &stat); + *p += stat; + p++; + } + + for (i = 0; i < EDMA_MAX_RECEIVE_QUEUE; i++) { + edma_read_reg(EDMA_REG_RX_STAT_BYTE_Q(i), &stat); + *p += stat; + p++; + } + + spin_unlock_bh(&edma_cinfo->stats_lock); +} + +static void edma_statistics_timer(struct timer_list *t) +{ + struct edma_common_info *edma_cinfo = + from_timer(edma_cinfo, t, edma_stats_timer); + + edma_read_append_stats(edma_cinfo); + + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ); +} + +static int edma_enable_stp_rstp(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) + edma_set_stp_rstp(edma_enable_rstp); + + return ret; +} + +static int edma_ath_hdr_eth_type(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) + edma_assign_ath_hdr_type(edma_athr_hdr_eth_type); + + return ret; +} + +static int edma_change_default_lan_vlan(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + int ret; + + if (!edma_netdev[1]) { + pr_err("Netdevice for default_lan does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[1]); + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_ltag; + + return ret; +} + +static int edma_change_default_wan_vlan(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + int ret; + + if (!edma_netdev[0]) { + pr_err("Netdevice for default_wan does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[0]); + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_wtag; + + return ret; +} + +static int edma_change_group1_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[0]) { + pr_err("Netdevice for Group 1 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[0]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group1_vtag; + + return ret; +} + +static int edma_change_group2_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[1]) { + pr_err("Netdevice for Group 2 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[1]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group2_vtag; + + return ret; +} + +static int edma_change_group3_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[2]) { + pr_err("Netdevice for Group 3 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[2]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group3_vtag; + + return ret; +} + +static int edma_change_group4_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[3]) { + pr_err("Netdevice for Group 4 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[3]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group4_vtag; + + return ret; +} + +static int edma_change_group5_vtag(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + struct edma_adapter *adapter; + struct edma_common_info *edma_cinfo; + int ret; + + if (!edma_netdev[4]) { + pr_err("Netdevice for Group 5 does not exist\n"); + return -1; + } + + adapter = netdev_priv(edma_netdev[4]); + edma_cinfo = adapter->edma_cinfo; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + + if (write) + adapter->default_vlan_tag = edma_default_group5_vtag; + + return ret; +} + +static int edma_set_rss_idt_value(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write && !ret) + edma_write_reg(EDMA_REG_RSS_IDT(edma_rss_idt_idx), + edma_rss_idt_val); + return ret; +} + +static int edma_set_rss_idt_idx(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret; + u32 old_value = edma_rss_idt_idx; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (!write || ret) + return ret; + + if (edma_rss_idt_idx >= EDMA_NUM_IDT) { + pr_err("Invalid RSS indirection table index %d\n", + edma_rss_idt_idx); + edma_rss_idt_idx = old_value; + return -EINVAL; + } + return ret; +} + +static int edma_weight_assigned_to_queues(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret, queue_id, weight; + u32 reg_data, data, reg_addr; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) { + queue_id = edma_weight_assigned_to_q & EDMA_WRR_VID_SCTL_MASK; + if (queue_id < 0 || queue_id > 15) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + weight = edma_weight_assigned_to_q >> EDMA_WRR_VID_SCTL_SHIFT; + if (weight < 0 || weight > 0xF) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + data = weight << EDMA_WRR_SHIFT(queue_id); + + reg_addr = EDMA_REG_WRR_CTRL_Q0_Q3 + (queue_id & ~0x3); + edma_read_reg(reg_addr, ®_data); + reg_data &= ~(1 << EDMA_WRR_SHIFT(queue_id)); + edma_write_reg(reg_addr, data | reg_data); + } + + return ret; +} + +static int edma_queue_to_virtual_queue_map(struct ctl_table *table, int write, + void __user *buffer, size_t *lenp, + loff_t *ppos) +{ + int ret, queue_id, virtual_qid; + u32 reg_data, data, reg_addr; + + ret = proc_dointvec(table, write, buffer, lenp, ppos); + if (write) { + queue_id = edma_queue_to_virtual_q & EDMA_WRR_VID_SCTL_MASK; + if (queue_id < 0 || queue_id > 15) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + virtual_qid = edma_queue_to_virtual_q >> + EDMA_WRR_VID_SCTL_SHIFT; + if (virtual_qid < 0 || virtual_qid > 8) { + pr_err("queue_id not within desired range\n"); + return -EINVAL; + } + + data = virtual_qid << EDMA_VQ_ID_SHIFT(queue_id); + + reg_addr = EDMA_REG_VQ_CTRL0 + (queue_id & ~0x3); + edma_read_reg(reg_addr, ®_data); + reg_data &= ~(1 << EDMA_VQ_ID_SHIFT(queue_id)); + edma_write_reg(reg_addr, data | reg_data); + } + + return ret; +} + +static struct ctl_table edma_table[] = { + { + .procname = "default_lan_tag", + .data = &edma_default_ltag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_default_lan_vlan + }, + { + .procname = "default_wan_tag", + .data = &edma_default_wtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_default_wan_vlan + }, + { + .procname = "weight_assigned_to_queues", + .data = &edma_weight_assigned_to_q, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_weight_assigned_to_queues + }, + { + .procname = "queue_to_virtual_queue_map", + .data = &edma_queue_to_virtual_q, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_queue_to_virtual_queue_map + }, + { + .procname = "enable_stp_rstp", + .data = &edma_enable_rstp, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_enable_stp_rstp + }, + { + .procname = "athr_hdr_eth_type", + .data = &edma_athr_hdr_eth_type, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_ath_hdr_eth_type + }, + { + .procname = "default_group1_vlan_tag", + .data = &edma_default_group1_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group1_vtag + }, + { + .procname = "default_group2_vlan_tag", + .data = &edma_default_group2_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group2_vtag + }, + { + .procname = "default_group3_vlan_tag", + .data = &edma_default_group3_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group3_vtag + }, + { + .procname = "default_group4_vlan_tag", + .data = &edma_default_group4_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group4_vtag + }, + { + .procname = "default_group5_vlan_tag", + .data = &edma_default_group5_vtag, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_change_group5_vtag + }, + { + .procname = "edma_rss_idt_value", + .data = &edma_rss_idt_val, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_set_rss_idt_value + }, + { + .procname = "edma_rss_idt_idx", + .data = &edma_rss_idt_idx, + .maxlen = sizeof(int), + .mode = 0644, + .proc_handler = edma_set_rss_idt_idx + }, + {} +}; + +static int ess_parse(struct edma_common_info *edma) +{ + struct device_node *switch_node; + int ret = -EINVAL; + + switch_node = of_find_node_by_name(NULL, "ess-switch"); + if (!switch_node) { + pr_err("cannot find ess-switch node\n"); + goto out; + } + + edma->ess_hw_addr = of_io_request_and_map(switch_node, + 0, KBUILD_MODNAME); + if (!edma->ess_hw_addr) { + pr_err("%s ioremap fail.", __func__); + goto out; + } + + edma->ess_clk = of_clk_get_by_name(switch_node, "ess_clk"); + ret = clk_prepare_enable(edma->ess_clk); +out: + of_node_put(switch_node); + return ret; +} + +/* edma_axi_netdev_ops + * Describe the operations supported by registered netdevices + * + * static const struct net_device_ops edma_axi_netdev_ops = { + * .ndo_open = edma_open, + * .ndo_stop = edma_close, + * .ndo_start_xmit = edma_xmit_frame, + * .ndo_set_mac_address = edma_set_mac_addr, + * } + */ +static const struct net_device_ops edma_axi_netdev_ops = { + .ndo_open = edma_open, + .ndo_stop = edma_close, + .ndo_start_xmit = edma_xmit, + .ndo_set_mac_address = edma_set_mac_addr, +#ifdef CONFIG_RFS_ACCEL + .ndo_rx_flow_steer = edma_rx_flow_steer, + .ndo_register_rfs_filter = edma_register_rfs_filter, + .ndo_get_default_vlan_tag = edma_get_default_vlan_tag, +#endif + .ndo_get_stats = edma_get_stats, +}; + +/* edma_axi_probe() + * Initialise an adapter identified by a platform_device structure. + * + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur in the probe. + */ +static int edma_axi_probe(struct platform_device *pdev) +{ + struct edma_common_info *edma_cinfo; + struct edma_hw *hw; + struct edma_adapter *adapter[EDMA_MAX_PORTID_SUPPORTED]; + struct resource *res; + struct device_node *np = pdev->dev.of_node; + struct device_node *pnp; + struct device_node *mdio_node = NULL; + struct platform_device *mdio_plat = NULL; + struct mii_bus *miibus = NULL; + struct edma_mdio_data *mdio_data = NULL; + int i, j, k, err = 0; + int portid_bmp; + int idx = 0, idx_mac = 0; + + if (CONFIG_NR_CPUS != EDMA_CPU_CORES_SUPPORTED) { + dev_err(&pdev->dev, "Invalid CPU Cores\n"); + return -EINVAL; + } + + if ((num_rxq != 4) && (num_rxq != 8)) { + dev_err(&pdev->dev, "Invalid RX queue, edma probe failed\n"); + return -EINVAL; + } + edma_cinfo = kzalloc(sizeof(struct edma_common_info), GFP_KERNEL); + if (!edma_cinfo) { + err = -ENOMEM; + goto err_alloc; + } + + edma_cinfo->pdev = pdev; + + of_property_read_u32(np, "qcom,num_gmac", &edma_cinfo->num_gmac); + if (edma_cinfo->num_gmac > EDMA_MAX_PORTID_SUPPORTED) { + pr_err("Invalid DTSI Entry for qcom,num_gmac\n"); + err = -EINVAL; + goto err_cinfo; + } + + /* Initialize the netdev array before allocation + * to avoid double free + */ + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) + edma_netdev[i] = NULL; + + for (i = 0 ; i < edma_cinfo->num_gmac ; i++) { + edma_netdev[i] = alloc_etherdev_mqs(sizeof(struct edma_adapter), + EDMA_NETDEV_TX_QUEUE, EDMA_NETDEV_RX_QUEUE); + + if (!edma_netdev[i]) { + dev_err(&pdev->dev, + "net device alloc fails for index=%d\n", i); + err = -ENODEV; + goto err_ioremap; + } + + SET_NETDEV_DEV(edma_netdev[i], &pdev->dev); + platform_set_drvdata(pdev, edma_netdev[i]); + edma_cinfo->netdev[i] = edma_netdev[i]; + } + + /* Fill ring details */ + edma_cinfo->num_tx_queues = EDMA_MAX_TRANSMIT_QUEUE; + edma_cinfo->num_txq_per_core = (EDMA_MAX_TRANSMIT_QUEUE / 4); + edma_cinfo->tx_ring_count = EDMA_TX_RING_SIZE; + + /* Update num rx queues based on module parameter */ + edma_cinfo->num_rx_queues = num_rxq; + edma_cinfo->num_rxq_per_core = ((num_rxq == 4) ? 1 : 2); + + edma_cinfo->rx_ring_count = EDMA_RX_RING_SIZE; + + hw = &edma_cinfo->hw; + + /* Fill HW defaults */ + hw->tx_intr_mask = EDMA_TX_IMR_NORMAL_MASK; + hw->rx_intr_mask = EDMA_RX_IMR_NORMAL_MASK; + + of_property_read_u32(np, "qcom,page-mode", &edma_cinfo->page_mode); + of_property_read_u32(np, "qcom,rx_head_buf_size", + &hw->rx_head_buff_size); + + if (overwrite_mode) { + dev_info(&pdev->dev, "page mode overwritten"); + edma_cinfo->page_mode = page_mode; + } + + if (jumbo_mru) + edma_cinfo->fraglist_mode = 1; + + if (edma_cinfo->page_mode) + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE_JUMBO; + else if (edma_cinfo->fraglist_mode) + hw->rx_head_buff_size = jumbo_mru; + else if (!hw->rx_head_buff_size) + hw->rx_head_buff_size = EDMA_RX_HEAD_BUFF_SIZE; + + hw->misc_intr_mask = 0; + hw->wol_intr_mask = 0; + + hw->intr_clear_type = EDMA_INTR_CLEAR_TYPE; + hw->intr_sw_idx_w = EDMA_INTR_SW_IDX_W_TYPE; + + /* configure RSS type to the different protocol that can be + * supported + */ + hw->rss_type = EDMA_RSS_TYPE_IPV4TCP | EDMA_RSS_TYPE_IPV6_TCP | + EDMA_RSS_TYPE_IPV4_UDP | EDMA_RSS_TYPE_IPV6UDP | + EDMA_RSS_TYPE_IPV4 | EDMA_RSS_TYPE_IPV6; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + + edma_cinfo->hw.hw_addr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(edma_cinfo->hw.hw_addr)) { + err = PTR_ERR(edma_cinfo->hw.hw_addr); + goto err_ioremap; + } + + edma_hw_addr = (u32)edma_cinfo->hw.hw_addr; + + /* Parse tx queue interrupt number from device tree */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) + edma_cinfo->tx_irq[i] = platform_get_irq(pdev, i); + + /* Parse rx queue interrupt number from device tree + * Here we are setting j to point to the point where we + * left tx interrupt parsing(i.e 16) and run run the loop + * from 0 to 7 to parse rx interrupt number. + */ + for (i = 0, j = edma_cinfo->num_tx_queues, k = 0; + i < edma_cinfo->num_rx_queues; i++) { + edma_cinfo->rx_irq[k] = platform_get_irq(pdev, j); + k += ((num_rxq == 4) ? 2 : 1); + j += ((num_rxq == 4) ? 2 : 1); + } + + edma_cinfo->rx_head_buffer_len = edma_cinfo->hw.rx_head_buff_size; + edma_cinfo->rx_page_buffer_len = PAGE_SIZE; + + err = edma_alloc_queues_tx(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of TX queue failed\n"); + goto err_tx_qinit; + } + + err = edma_alloc_queues_rx(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of RX queue failed\n"); + goto err_rx_qinit; + } + + err = edma_alloc_tx_rings(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of TX resources failed\n"); + goto err_tx_rinit; + } + + err = edma_alloc_rx_rings(edma_cinfo); + if (err) { + dev_err(&pdev->dev, "Allocation of RX resources failed\n"); + goto err_rx_rinit; + } + + /* Initialize netdev and netdev bitmap for transmit descriptor rings */ + for (i = 0; i < edma_cinfo->num_tx_queues; i++) { + struct edma_tx_desc_ring *etdr = edma_cinfo->tpd_ring[i]; + int j; + + etdr->netdev_bmp = 0; + for (j = 0; j < EDMA_MAX_NETDEV_PER_QUEUE; j++) { + etdr->netdev[j] = NULL; + etdr->nq[j] = NULL; + } + } + + if (of_property_read_bool(np, "qcom,mdio_supported")) { + mdio_node = of_find_compatible_node(NULL, NULL, + "qcom,ipq4019-mdio"); + if (!mdio_node) { + dev_err(&pdev->dev, "cannot find mdio node by phandle"); + err = -EIO; + goto err_mdiobus_init_fail; + } + + mdio_plat = of_find_device_by_node(mdio_node); + if (!mdio_plat) { + dev_err(&pdev->dev, + "cannot find platform device from mdio node"); + of_node_put(mdio_node); + err = -EIO; + goto err_mdiobus_init_fail; + } + + mdio_data = dev_get_drvdata(&mdio_plat->dev); + if (!mdio_data) { + dev_err(&pdev->dev, + "cannot get mii bus reference from device data"); + of_node_put(mdio_node); + err = -EIO; + goto err_mdiobus_init_fail; + } + + miibus = mdio_data->mii_bus; + } + + if (of_property_read_bool(np, "qcom,single-phy") && + edma_cinfo->num_gmac == 1) { + err = ess_parse(edma_cinfo); + if (!err) + err = ess_reset(edma_cinfo); + if (err) + goto err_single_phy_init; + else + edma_cinfo->is_single_phy = true; + } + + for_each_available_child_of_node(np, pnp) { + const char *mac_addr; + + /* this check is needed if parent and daughter dts have + * different number of gmac nodes + */ + if (idx_mac == edma_cinfo->num_gmac) { + of_node_put(np); + break; + } + + mac_addr = of_get_mac_address(pnp); + if (!IS_ERR(mac_addr)) + memcpy(edma_netdev[idx_mac]->dev_addr, mac_addr, ETH_ALEN); + + idx_mac++; + } + + /* Populate the adapter structure register the netdevice */ + for (i = 0; i < edma_cinfo->num_gmac; i++) { + int k, m; + + adapter[i] = netdev_priv(edma_netdev[i]); + adapter[i]->netdev = edma_netdev[i]; + adapter[i]->pdev = pdev; + for (j = 0; j < CONFIG_NR_CPUS; j++) { + m = i % 2; + adapter[i]->tx_start_offset[j] = + ((j << EDMA_TX_CPU_START_SHIFT) + (m << 1)); + /* Share the queues with available net-devices. + * For instance , with 5 net-devices + * eth0/eth2/eth4 will share q0,q1,q4,q5,q8,q9,q12,q13 + * and eth1/eth3 will get the remaining. + */ + for (k = adapter[i]->tx_start_offset[j]; k < + (adapter[i]->tx_start_offset[j] + 2); k++) { + if (edma_fill_netdev(edma_cinfo, k, i, j)) { + pr_err("Netdev overflow Error\n"); + goto err_register; + } + } + } + + adapter[i]->edma_cinfo = edma_cinfo; + edma_netdev[i]->netdev_ops = &edma_axi_netdev_ops; + edma_netdev[i]->max_mtu = 9000; + edma_netdev[i]->features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM + | NETIF_F_HW_VLAN_CTAG_RX | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO; + edma_netdev[i]->hw_features = NETIF_F_HW_CSUM | NETIF_F_RXCSUM | + NETIF_F_HW_VLAN_CTAG_RX + | NETIF_F_SG | NETIF_F_TSO | NETIF_F_GRO; + edma_netdev[i]->vlan_features = NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO; + edma_netdev[i]->wanted_features = NETIF_F_HW_CSUM | NETIF_F_SG | + NETIF_F_TSO | NETIF_F_GRO; + +#ifdef CONFIG_RFS_ACCEL + edma_netdev[i]->features |= NETIF_F_NTUPLE; + edma_netdev[i]->hw_features |= NETIF_F_NTUPLE; + edma_netdev[i]->vlan_features |= NETIF_F_NTUPLE; + edma_netdev[i]->wanted_features |= NETIF_F_NTUPLE; +#endif + edma_set_ethtool_ops(edma_netdev[i]); + + /* This just fill in some default MAC address + */ + if (!is_valid_ether_addr(edma_netdev[i]->dev_addr)) { + random_ether_addr(edma_netdev[i]->dev_addr); + pr_info("EDMA using MAC@ - using"); + pr_info("%02x:%02x:%02x:%02x:%02x:%02x\n", + *(edma_netdev[i]->dev_addr), + *(edma_netdev[i]->dev_addr + 1), + *(edma_netdev[i]->dev_addr + 2), + *(edma_netdev[i]->dev_addr + 3), + *(edma_netdev[i]->dev_addr + 4), + *(edma_netdev[i]->dev_addr + 5)); + } + + err = register_netdev(edma_netdev[i]); + if (err) + goto err_register; + + /* carrier off reporting is important to + * ethtool even BEFORE open + */ + netif_carrier_off(edma_netdev[i]); + + /* Allocate reverse irq cpu mapping structure for + * receive queues + */ +#ifdef CONFIG_RFS_ACCEL + edma_netdev[i]->rx_cpu_rmap = + alloc_irq_cpu_rmap(EDMA_NETDEV_RX_QUEUE); + if (!edma_netdev[i]->rx_cpu_rmap) { + err = -ENOMEM; + goto err_rmap_alloc_fail; + } +#endif + } + + for (i = 0; i < EDMA_MAX_PORTID_BITMAP_INDEX; i++) + edma_cinfo->portid_netdev_lookup_tbl[i] = NULL; + + for_each_available_child_of_node(np, pnp) { + const uint32_t *vlan_tag = NULL; + int len; + + /* this check is needed if parent and daughter dts have + * different number of gmac nodes + */ + if (idx == edma_cinfo->num_gmac) + break; + + /* Populate port-id to netdev lookup table */ + vlan_tag = of_get_property(pnp, "vlan_tag", &len); + if (!vlan_tag) { + pr_err("Vlan tag parsing Failed.\n"); + goto err_rmap_alloc_fail; + } + + adapter[idx]->default_vlan_tag = of_read_number(vlan_tag, 1); + vlan_tag++; + portid_bmp = of_read_number(vlan_tag, 1); + adapter[idx]->dp_bitmap = portid_bmp; + + portid_bmp = portid_bmp >> 1; /* We ignore CPU Port bit 0 */ + while (portid_bmp) { + int port_bit = ffs(portid_bmp); + + if (port_bit > EDMA_MAX_PORTID_SUPPORTED) + goto err_rmap_alloc_fail; + edma_cinfo->portid_netdev_lookup_tbl[port_bit] = + edma_netdev[idx]; + portid_bmp &= ~(1 << (port_bit - 1)); + } + + if (!of_property_read_u32(pnp, "qcom,poll_required", + &adapter[idx]->poll_required)) { + if (adapter[idx]->poll_required) { + of_property_read_u32(pnp, "qcom,phy_mdio_addr", + &adapter[idx]->phy_mdio_addr); + of_property_read_u32(pnp, "qcom,forced_speed", + &adapter[idx]->forced_speed); + of_property_read_u32(pnp, "qcom,forced_duplex", + &adapter[idx]->forced_duplex); + + /* create a phyid using MDIO bus id + * and MDIO bus address + */ + snprintf(adapter[idx]->phy_id, + MII_BUS_ID_SIZE + 3, PHY_ID_FMT, + miibus->id, + adapter[idx]->phy_mdio_addr); + } + } else { + adapter[idx]->poll_required = 0; + adapter[idx]->forced_speed = SPEED_1000; + adapter[idx]->forced_duplex = DUPLEX_FULL; + } + + idx++; + } + + edma_cinfo->edma_ctl_table_hdr = register_net_sysctl(&init_net, + "net/edma", + edma_table); + if (!edma_cinfo->edma_ctl_table_hdr) { + dev_err(&pdev->dev, "edma sysctl table hdr not registered\n"); + goto err_unregister_sysctl_tbl; + } + + /* Disable all 16 Tx and 8 rx irqs */ + edma_irq_disable(edma_cinfo); + + err = edma_reset(edma_cinfo); + if (err) { + err = -EIO; + goto err_reset; + } + + /* populate per_core_info, do a napi_Add, request 16 TX irqs, + * 8 RX irqs, do a napi enable + */ + for (i = 0; i < CONFIG_NR_CPUS; i++) { + u8 rx_start; + + edma_cinfo->edma_percpu_info[i].napi.state = 0; + + netif_napi_add(edma_netdev[0], + &edma_cinfo->edma_percpu_info[i].napi, + edma_poll, 64); + napi_enable(&edma_cinfo->edma_percpu_info[i].napi); + edma_cinfo->edma_percpu_info[i].tx_mask = tx_mask[i]; + edma_cinfo->edma_percpu_info[i].rx_mask = EDMA_RX_PER_CPU_MASK + << (i << EDMA_RX_PER_CPU_MASK_SHIFT); + edma_cinfo->edma_percpu_info[i].tx_start = tx_start[i]; + edma_cinfo->edma_percpu_info[i].rx_start = + i << EDMA_RX_CPU_START_SHIFT; + rx_start = i << EDMA_RX_CPU_START_SHIFT; + edma_cinfo->edma_percpu_info[i].tx_status = 0; + edma_cinfo->edma_percpu_info[i].rx_status = 0; + edma_cinfo->edma_percpu_info[i].edma_cinfo = edma_cinfo; + + /* Request irq per core */ + for (j = edma_cinfo->edma_percpu_info[i].tx_start; + j < tx_start[i] + 4; j++) { + sprintf(&edma_tx_irq[j][0], "edma_eth_tx%d", j); + err = request_irq(edma_cinfo->tx_irq[j], + edma_interrupt, + 0, + &edma_tx_irq[j][0], + &edma_cinfo->edma_percpu_info[i]); + if (err) + goto err_reset; + } + + for (j = edma_cinfo->edma_percpu_info[i].rx_start; + j < (rx_start + + ((edma_cinfo->num_rx_queues == 4) ? 1 : 2)); + j++) { + sprintf(&edma_rx_irq[j][0], "edma_eth_rx%d", j); + err = request_irq(edma_cinfo->rx_irq[j], + edma_interrupt, + 0, + &edma_rx_irq[j][0], + &edma_cinfo->edma_percpu_info[i]); + if (err) + goto err_reset; + } + +#ifdef CONFIG_RFS_ACCEL + for (j = edma_cinfo->edma_percpu_info[i].rx_start; + j < rx_start + 2; j += 2) { + err = irq_cpu_rmap_add(edma_netdev[0]->rx_cpu_rmap, + edma_cinfo->rx_irq[j]); + if (err) + goto err_rmap_add_fail; + } +#endif + } + + /* Used to clear interrupt status, allocate rx buffer, + * configure edma descriptors registers + */ + err = edma_configure(edma_cinfo); + if (err) { + err = -EIO; + goto err_configure; + } + + /* Configure RSS indirection table. + * 128 hash will be configured in the following + * pattern: hash{0,1,2,3} = {Q0,Q2,Q4,Q6} respectively + * and so on + */ + for (i = 0; i < EDMA_NUM_IDT; i++) + edma_write_reg(EDMA_REG_RSS_IDT(i), EDMA_RSS_IDT_VALUE); + + /* Configure load balance mapping table. + * 4 table entry will be configured according to the + * following pattern: load_balance{0,1,2,3} = {Q0,Q1,Q3,Q4} + * respectively. + */ + edma_write_reg(EDMA_REG_LB_RING, EDMA_LB_REG_VALUE); + + /* Configure Virtual queue for Tx rings + * User can also change this value runtime through + * a sysctl + */ + edma_write_reg(EDMA_REG_VQ_CTRL0, EDMA_VQ_REG_VALUE); + edma_write_reg(EDMA_REG_VQ_CTRL1, EDMA_VQ_REG_VALUE); + + /* Configure Max AXI Burst write size to 128 bytes*/ + edma_write_reg(EDMA_REG_AXIW_CTRL_MAXWRSIZE, + EDMA_AXIW_MAXWRSIZE_VALUE); + + /* Enable All 16 tx and 8 rx irq mask */ + edma_irq_enable(edma_cinfo); + edma_enable_tx_ctrl(&edma_cinfo->hw); + edma_enable_rx_ctrl(&edma_cinfo->hw); + + for (i = 0; i < edma_cinfo->num_gmac; i++) { + if (adapter[i]->poll_required) { + int phy_mode = of_get_phy_mode(np); + + if (phy_mode < 0) + phy_mode = PHY_INTERFACE_MODE_SGMII; + adapter[i]->phydev = + phy_connect(edma_netdev[i], + (const char *)adapter[i]->phy_id, + &edma_adjust_link, + phy_mode); + if (IS_ERR(adapter[i]->phydev)) { + dev_dbg(&pdev->dev, "PHY attach FAIL"); + err = -EIO; + goto edma_phy_attach_fail; + } else { + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + adapter[i]->phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + adapter[i]->phydev->advertising); + linkmode_set_bit(ETHTOOL_LINK_MODE_Pause_BIT, + adapter[i]->phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_Asym_Pause_BIT, + adapter[i]->phydev->supported); + } + } else { + adapter[i]->phydev = NULL; + } + } + + spin_lock_init(&edma_cinfo->stats_lock); + + timer_setup(&edma_cinfo->edma_stats_timer, edma_statistics_timer, 0); + mod_timer(&edma_cinfo->edma_stats_timer, jiffies + 1*HZ); + + return 0; + +edma_phy_attach_fail: + miibus = NULL; +err_configure: +#ifdef CONFIG_RFS_ACCEL + for (i = 0; i < edma_cinfo->num_gmac; i++) { + free_irq_cpu_rmap(adapter[i]->netdev->rx_cpu_rmap); + adapter[i]->netdev->rx_cpu_rmap = NULL; + } +#endif +err_rmap_add_fail: + edma_free_irqs(adapter[0]); + for (i = 0; i < CONFIG_NR_CPUS; i++) + napi_disable(&edma_cinfo->edma_percpu_info[i].napi); +err_reset: +err_unregister_sysctl_tbl: +err_rmap_alloc_fail: + for (i = 0; i < edma_cinfo->num_gmac; i++) + unregister_netdev(edma_netdev[i]); +err_register: +err_single_phy_init: + iounmap(edma_cinfo->ess_hw_addr); + clk_disable_unprepare(edma_cinfo->ess_clk); +err_mdiobus_init_fail: + edma_free_rx_rings(edma_cinfo); +err_rx_rinit: + edma_free_tx_rings(edma_cinfo); +err_tx_rinit: + edma_free_queues(edma_cinfo); +err_rx_qinit: +err_tx_qinit: + iounmap(edma_cinfo->hw.hw_addr); +err_ioremap: + for (i = 0; i < edma_cinfo->num_gmac; i++) { + if (edma_netdev[i]) + free_netdev(edma_netdev[i]); + } +err_cinfo: + kfree(edma_cinfo); +err_alloc: + return err; +} + +/* edma_axi_remove() + * Device Removal Routine + * + * edma_axi_remove is called by the platform subsystem to alert the driver + * that it should release a platform device. + */ +static int edma_axi_remove(struct platform_device *pdev) +{ + struct edma_adapter *adapter = netdev_priv(edma_netdev[0]); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + struct edma_hw *hw = &edma_cinfo->hw; + int i; + + for (i = 0; i < edma_cinfo->num_gmac; i++) + unregister_netdev(edma_netdev[i]); + + edma_stop_rx_tx(hw); + for (i = 0; i < CONFIG_NR_CPUS; i++) + napi_disable(&edma_cinfo->edma_percpu_info[i].napi); + + edma_irq_disable(edma_cinfo); + edma_write_reg(EDMA_REG_RX_ISR, 0xff); + edma_write_reg(EDMA_REG_TX_ISR, 0xffff); +#ifdef CONFIG_RFS_ACCEL + for (i = 0; i < edma_cinfo->num_gmac; i++) { + free_irq_cpu_rmap(edma_netdev[i]->rx_cpu_rmap); + edma_netdev[i]->rx_cpu_rmap = NULL; + } +#endif + + for (i = 0; i < edma_cinfo->num_gmac; i++) { + struct edma_adapter *adapter = netdev_priv(edma_netdev[i]); + + if (adapter->phydev) + phy_disconnect(adapter->phydev); + } + + del_timer_sync(&edma_cinfo->edma_stats_timer); + edma_free_irqs(adapter); + unregister_net_sysctl_table(edma_cinfo->edma_ctl_table_hdr); + iounmap(edma_cinfo->ess_hw_addr); + clk_disable_unprepare(edma_cinfo->ess_clk); + edma_free_tx_resources(edma_cinfo); + edma_free_rx_resources(edma_cinfo); + edma_free_tx_rings(edma_cinfo); + edma_free_rx_rings(edma_cinfo); + edma_free_queues(edma_cinfo); + for (i = 0; i < edma_cinfo->num_gmac; i++) + free_netdev(edma_netdev[i]); + + kfree(edma_cinfo); + + return 0; +} + +static const struct of_device_id edma_of_mtable[] = { + {.compatible = "qcom,ess-edma" }, + {} +}; +MODULE_DEVICE_TABLE(of, edma_of_mtable); + +static struct platform_driver edma_axi_driver = { + .driver = { + .name = edma_axi_driver_name, + .of_match_table = edma_of_mtable, + }, + .probe = edma_axi_probe, + .remove = edma_axi_remove, +}; + +module_platform_driver(edma_axi_driver); + +MODULE_AUTHOR("Qualcomm Atheros Inc"); +MODULE_DESCRIPTION("QCA ESS EDMA driver"); +MODULE_LICENSE("GPL"); diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c new file mode 100644 index 0000000000..1270e20a90 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/edma_ethtool.c @@ -0,0 +1,384 @@ +/* + * Copyright (c) 2015 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include "edma.h" + +struct edma_ethtool_stats { + uint8_t stat_string[ETH_GSTRING_LEN]; + uint32_t stat_offset; +}; + +#define EDMA_STAT(m) offsetof(struct edma_ethtool_statistics, m) +#define DRVINFO_LEN 32 + +/* Array of strings describing statistics + */ +static const struct edma_ethtool_stats edma_gstrings_stats[] = { + {"tx_q0_pkt", EDMA_STAT(tx_q0_pkt)}, + {"tx_q1_pkt", EDMA_STAT(tx_q1_pkt)}, + {"tx_q2_pkt", EDMA_STAT(tx_q2_pkt)}, + {"tx_q3_pkt", EDMA_STAT(tx_q3_pkt)}, + {"tx_q4_pkt", EDMA_STAT(tx_q4_pkt)}, + {"tx_q5_pkt", EDMA_STAT(tx_q5_pkt)}, + {"tx_q6_pkt", EDMA_STAT(tx_q6_pkt)}, + {"tx_q7_pkt", EDMA_STAT(tx_q7_pkt)}, + {"tx_q8_pkt", EDMA_STAT(tx_q8_pkt)}, + {"tx_q9_pkt", EDMA_STAT(tx_q9_pkt)}, + {"tx_q10_pkt", EDMA_STAT(tx_q10_pkt)}, + {"tx_q11_pkt", EDMA_STAT(tx_q11_pkt)}, + {"tx_q12_pkt", EDMA_STAT(tx_q12_pkt)}, + {"tx_q13_pkt", EDMA_STAT(tx_q13_pkt)}, + {"tx_q14_pkt", EDMA_STAT(tx_q14_pkt)}, + {"tx_q15_pkt", EDMA_STAT(tx_q15_pkt)}, + {"tx_q0_byte", EDMA_STAT(tx_q0_byte)}, + {"tx_q1_byte", EDMA_STAT(tx_q1_byte)}, + {"tx_q2_byte", EDMA_STAT(tx_q2_byte)}, + {"tx_q3_byte", EDMA_STAT(tx_q3_byte)}, + {"tx_q4_byte", EDMA_STAT(tx_q4_byte)}, + {"tx_q5_byte", EDMA_STAT(tx_q5_byte)}, + {"tx_q6_byte", EDMA_STAT(tx_q6_byte)}, + {"tx_q7_byte", EDMA_STAT(tx_q7_byte)}, + {"tx_q8_byte", EDMA_STAT(tx_q8_byte)}, + {"tx_q9_byte", EDMA_STAT(tx_q9_byte)}, + {"tx_q10_byte", EDMA_STAT(tx_q10_byte)}, + {"tx_q11_byte", EDMA_STAT(tx_q11_byte)}, + {"tx_q12_byte", EDMA_STAT(tx_q12_byte)}, + {"tx_q13_byte", EDMA_STAT(tx_q13_byte)}, + {"tx_q14_byte", EDMA_STAT(tx_q14_byte)}, + {"tx_q15_byte", EDMA_STAT(tx_q15_byte)}, + {"rx_q0_pkt", EDMA_STAT(rx_q0_pkt)}, + {"rx_q1_pkt", EDMA_STAT(rx_q1_pkt)}, + {"rx_q2_pkt", EDMA_STAT(rx_q2_pkt)}, + {"rx_q3_pkt", EDMA_STAT(rx_q3_pkt)}, + {"rx_q4_pkt", EDMA_STAT(rx_q4_pkt)}, + {"rx_q5_pkt", EDMA_STAT(rx_q5_pkt)}, + {"rx_q6_pkt", EDMA_STAT(rx_q6_pkt)}, + {"rx_q7_pkt", EDMA_STAT(rx_q7_pkt)}, + {"rx_q0_byte", EDMA_STAT(rx_q0_byte)}, + {"rx_q1_byte", EDMA_STAT(rx_q1_byte)}, + {"rx_q2_byte", EDMA_STAT(rx_q2_byte)}, + {"rx_q3_byte", EDMA_STAT(rx_q3_byte)}, + {"rx_q4_byte", EDMA_STAT(rx_q4_byte)}, + {"rx_q5_byte", EDMA_STAT(rx_q5_byte)}, + {"rx_q6_byte", EDMA_STAT(rx_q6_byte)}, + {"rx_q7_byte", EDMA_STAT(rx_q7_byte)}, + {"tx_desc_error", EDMA_STAT(tx_desc_error)}, + {"rx_alloc_fail_ctr", EDMA_STAT(rx_alloc_fail_ctr)}, +}; + +#define EDMA_STATS_LEN ARRAY_SIZE(edma_gstrings_stats) + +/* edma_get_strset_count() + * Get strset count + */ +static int edma_get_strset_count(struct net_device *netdev, + int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return EDMA_STATS_LEN; + default: + netdev_dbg(netdev, "%s: Invalid string set", __func__); + return -EOPNOTSUPP; + } +} + + +/* edma_get_strings() + * get stats string + */ +static void edma_get_strings(struct net_device *netdev, uint32_t stringset, + uint8_t *data) +{ + uint8_t *p = data; + uint32_t i; + + switch (stringset) { + case ETH_SS_STATS: + for (i = 0; i < EDMA_STATS_LEN; i++) { + memcpy(p, edma_gstrings_stats[i].stat_string, + min((size_t)ETH_GSTRING_LEN, + strlen(edma_gstrings_stats[i].stat_string) + + 1)); + p += ETH_GSTRING_LEN; + } + break; + } +} + +/* edma_get_ethtool_stats() + * Get ethtool statistics + */ +static void edma_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, uint64_t *data) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + int i; + uint8_t *p = NULL; + + edma_read_append_stats(edma_cinfo); + + for(i = 0; i < EDMA_STATS_LEN; i++) { + p = (uint8_t *)&(edma_cinfo->edma_ethstats) + + edma_gstrings_stats[i].stat_offset; + data[i] = *(uint32_t *)p; + } +} + +/* edma_get_drvinfo() + * get edma driver info + */ +static void edma_get_drvinfo(struct net_device *dev, + struct ethtool_drvinfo *info) +{ + strlcpy(info->driver, "ess_edma", DRVINFO_LEN); + strlcpy(info->bus_info, "axi", ETHTOOL_BUSINFO_LEN); +} + +/* edma_nway_reset() + * Reset the phy, if available. + */ +static int edma_nway_reset(struct net_device *netdev) +{ + return -EINVAL; +} + +/* edma_get_wol() + * get wake on lan info + */ +static void edma_get_wol(struct net_device *netdev, + struct ethtool_wolinfo *wol) +{ + wol->supported = 0; + wol->wolopts = 0; +} + +/* edma_get_msglevel() + * get message level. + */ +static uint32_t edma_get_msglevel(struct net_device *netdev) +{ + return 0; +} + +/* edma_get_settings() + * Get edma settings + */ +static int edma_get_settings(struct net_device *netdev, + struct ethtool_link_ksettings *cmd) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + + if (adapter->poll_required) { + struct phy_device *phydev = NULL; + uint16_t phyreg; + + if ((adapter->forced_speed != SPEED_UNKNOWN) + && !(adapter->poll_required)) + return -EPERM; + + phydev = adapter->phydev; + + linkmode_copy(cmd->link_modes.advertising, phydev->advertising); + linkmode_copy(cmd->link_modes.supported, phydev->supported); + + cmd->base.autoneg = phydev->autoneg; + + if (adapter->link_state == __EDMA_LINKDOWN) { + cmd->base.speed = SPEED_UNKNOWN; + cmd->base.duplex = DUPLEX_UNKNOWN; + } else { + cmd->base.speed = phydev->speed; + cmd->base.duplex = phydev->duplex; + } + + cmd->base.phy_address = adapter->phy_mdio_addr; + + phyreg = (uint16_t)phy_read(adapter->phydev, MII_LPA); + if (phyreg & LPA_10HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Half_BIT, + cmd->link_modes.lp_advertising); + + if (phyreg & LPA_10FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_10baseT_Full_BIT, + cmd->link_modes.lp_advertising); + + if (phyreg & LPA_100HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Half_BIT, + cmd->link_modes.lp_advertising); + + if (phyreg & LPA_100FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_100baseT_Full_BIT, + cmd->link_modes.lp_advertising); + + phyreg = (uint16_t)phy_read(adapter->phydev, MII_STAT1000); + if (phyreg & LPA_1000HALF) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Half_BIT, + cmd->link_modes.lp_advertising); + + if (phyreg & LPA_1000FULL) + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, + cmd->link_modes.lp_advertising); + } else { + /* If the speed/duplex for this GMAC is forced and we + * are not polling for link state changes, return the + * values as specified by platform. This will be true + * for GMACs connected to switch, and interfaces that + * do not use a PHY. + */ + if (!(adapter->poll_required)) { + if (adapter->forced_speed != SPEED_UNKNOWN) { + /* set speed and duplex */ + cmd->base.speed = SPEED_1000; + cmd->base.duplex = DUPLEX_FULL; + + /* Populate capabilities advertised by self */ + linkmode_zero(cmd->link_modes.advertising); + cmd->base.autoneg = 0; + cmd->base.port = PORT_TP; + cmd->base.transceiver = XCVR_EXTERNAL; + } else { + /* non link polled and non + * forced speed/duplex interface + */ + return -EIO; + } + } + } + + return 0; +} + +/* edma_set_settings() + * Set EDMA settings + */ +static int edma_set_settings(struct net_device *netdev, + const struct ethtool_link_ksettings *cmd) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct phy_device *phydev = NULL; + + if ((adapter->forced_speed != SPEED_UNKNOWN) && + !adapter->poll_required) + return -EPERM; + + phydev = adapter->phydev; + linkmode_copy(phydev->advertising, cmd->link_modes.advertising); + linkmode_copy(phydev->supported, cmd->link_modes.supported); + phydev->autoneg = cmd->base.autoneg; + phydev->speed = cmd->base.speed; + phydev->duplex = cmd->base.duplex; + + genphy_config_aneg(phydev); + + return 0; +} + +/* edma_get_coalesce + * get interrupt mitigation + */ +static int edma_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + u32 reg_val; + + edma_get_tx_rx_coalesce(®_val); + + /* We read the Interrupt Moderation Timer(IMT) register value, + * use lower 16 bit for rx and higher 16 bit for Tx. We do a + * left shift by 1, because IMT resolution timer is 2usecs. + * Hence the value given by the register is multiplied by 2 to + * get the actual time in usecs. + */ + ec->tx_coalesce_usecs = (((reg_val >> 16) & 0xffff) << 1); + ec->rx_coalesce_usecs = ((reg_val & 0xffff) << 1); + + return 0; +} + +/* edma_set_coalesce + * set interrupt mitigation + */ +static int edma_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) +{ + if (ec->tx_coalesce_usecs) + edma_change_tx_coalesce(ec->tx_coalesce_usecs); + if (ec->rx_coalesce_usecs) + edma_change_rx_coalesce(ec->rx_coalesce_usecs); + + return 0; +} + +/* edma_set_priv_flags() + * Set EDMA private flags + */ +static int edma_set_priv_flags(struct net_device *netdev, u32 flags) +{ + return 0; +} + +/* edma_get_priv_flags() + * get edma driver flags + */ +static u32 edma_get_priv_flags(struct net_device *netdev) +{ + return 0; +} + +/* edma_get_ringparam() + * get ring size + */ +static void edma_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) +{ + struct edma_adapter *adapter = netdev_priv(netdev); + struct edma_common_info *edma_cinfo = adapter->edma_cinfo; + + ring->tx_max_pending = edma_cinfo->tx_ring_count; + ring->rx_max_pending = edma_cinfo->rx_ring_count; +} + +/* Ethtool operations + */ +static const struct ethtool_ops edma_ethtool_ops = { + .get_drvinfo = &edma_get_drvinfo, + .get_link = ðtool_op_get_link, + .get_msglevel = &edma_get_msglevel, + .nway_reset = &edma_nway_reset, + .get_wol = &edma_get_wol, + .get_link_ksettings = &edma_get_settings, + .set_link_ksettings = &edma_set_settings, + .get_strings = &edma_get_strings, + .get_sset_count = &edma_get_strset_count, + .get_ethtool_stats = &edma_get_ethtool_stats, + .get_coalesce = &edma_get_coalesce, + .set_coalesce = &edma_set_coalesce, + .get_priv_flags = edma_get_priv_flags, + .set_priv_flags = edma_set_priv_flags, + .get_ringparam = edma_get_ringparam, +}; + +/* edma_set_ethtool_ops + * Set ethtool operations + */ +void edma_set_ethtool_ops(struct net_device *netdev) +{ + netdev->ethtool_ops = &edma_ethtool_ops; +} diff --git a/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/ess_edma.h b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/ess_edma.h new file mode 100644 index 0000000000..021be98a38 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/ethernet/qualcomm/essedma/ess_edma.h @@ -0,0 +1,389 @@ +/* + * Copyright (c) 2014 - 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#ifndef _ESS_EDMA_H_ +#define _ESS_EDMA_H_ + +#include + +struct edma_adapter; +struct edma_hw; + +/* register definition */ +#define EDMA_REG_MAS_CTRL 0x0 +#define EDMA_REG_TIMEOUT_CTRL 0x004 +#define EDMA_REG_DBG0 0x008 +#define EDMA_REG_DBG1 0x00C +#define EDMA_REG_SW_CTRL0 0x100 +#define EDMA_REG_SW_CTRL1 0x104 + +/* Interrupt Status Register */ +#define EDMA_REG_RX_ISR 0x200 +#define EDMA_REG_TX_ISR 0x208 +#define EDMA_REG_MISC_ISR 0x210 +#define EDMA_REG_WOL_ISR 0x218 + +#define EDMA_MISC_ISR_RX_URG_Q(x) (1 << x) + +#define EDMA_MISC_ISR_AXIR_TIMEOUT 0x00000100 +#define EDMA_MISC_ISR_AXIR_ERR 0x00000200 +#define EDMA_MISC_ISR_TXF_DEAD 0x00000400 +#define EDMA_MISC_ISR_AXIW_ERR 0x00000800 +#define EDMA_MISC_ISR_AXIW_TIMEOUT 0x00001000 + +#define EDMA_WOL_ISR 0x00000001 + +/* Interrupt Mask Register */ +#define EDMA_REG_MISC_IMR 0x214 +#define EDMA_REG_WOL_IMR 0x218 + +#define EDMA_RX_IMR_NORMAL_MASK 0x1 +#define EDMA_TX_IMR_NORMAL_MASK 0x1 +#define EDMA_MISC_IMR_NORMAL_MASK 0x80001FFF +#define EDMA_WOL_IMR_NORMAL_MASK 0x1 + +/* Edma receive consumer index */ +#define EDMA_REG_RX_SW_CONS_IDX_Q(x) (0x220 + ((x) << 2)) /* x is the queue id */ +/* Edma transmit consumer index */ +#define EDMA_REG_TX_SW_CONS_IDX_Q(x) (0x240 + ((x) << 2)) /* x is the queue id */ + +/* IRQ Moderator Initial Timer Register */ +#define EDMA_REG_IRQ_MODRT_TIMER_INIT 0x280 +#define EDMA_IRQ_MODRT_TIMER_MASK 0xFFFF +#define EDMA_IRQ_MODRT_RX_TIMER_SHIFT 0 +#define EDMA_IRQ_MODRT_TX_TIMER_SHIFT 16 + +/* Interrupt Control Register */ +#define EDMA_REG_INTR_CTRL 0x284 +#define EDMA_INTR_CLR_TYP_SHIFT 0 +#define EDMA_INTR_SW_IDX_W_TYP_SHIFT 1 +#define EDMA_INTR_CLEAR_TYPE_W1 0 +#define EDMA_INTR_CLEAR_TYPE_R 1 + +/* RX Interrupt Mask Register */ +#define EDMA_REG_RX_INT_MASK_Q(x) (0x300 + ((x) << 2)) /* x = queue id */ + +/* TX Interrupt mask register */ +#define EDMA_REG_TX_INT_MASK_Q(x) (0x340 + ((x) << 2)) /* x = queue id */ + +/* Load Ptr Register + * Software sets this bit after the initialization of the head and tail + */ +#define EDMA_REG_TX_SRAM_PART 0x400 +#define EDMA_LOAD_PTR_SHIFT 16 + +/* TXQ Control Register */ +#define EDMA_REG_TXQ_CTRL 0x404 +#define EDMA_TXQ_CTRL_IP_OPTION_EN 0x10 +#define EDMA_TXQ_CTRL_TXQ_EN 0x20 +#define EDMA_TXQ_CTRL_ENH_MODE 0x40 +#define EDMA_TXQ_CTRL_LS_8023_EN 0x80 +#define EDMA_TXQ_CTRL_TPD_BURST_EN 0x100 +#define EDMA_TXQ_CTRL_LSO_BREAK_EN 0x200 +#define EDMA_TXQ_NUM_TPD_BURST_MASK 0xF +#define EDMA_TXQ_TXF_BURST_NUM_MASK 0xFFFF +#define EDMA_TXQ_NUM_TPD_BURST_SHIFT 0 +#define EDMA_TXQ_TXF_BURST_NUM_SHIFT 16 + +#define EDMA_REG_TXF_WATER_MARK 0x408 /* In 8-bytes */ +#define EDMA_TXF_WATER_MARK_MASK 0x0FFF +#define EDMA_TXF_LOW_WATER_MARK_SHIFT 0 +#define EDMA_TXF_HIGH_WATER_MARK_SHIFT 16 +#define EDMA_TXQ_CTRL_BURST_MODE_EN 0x80000000 + +/* WRR Control Register */ +#define EDMA_REG_WRR_CTRL_Q0_Q3 0x40c +#define EDMA_REG_WRR_CTRL_Q4_Q7 0x410 +#define EDMA_REG_WRR_CTRL_Q8_Q11 0x414 +#define EDMA_REG_WRR_CTRL_Q12_Q15 0x418 + +/* Weight round robin(WRR), it takes queue as input, and computes + * starting bits where we need to write the weight for a particular + * queue + */ +#define EDMA_WRR_SHIFT(x) (((x) * 5) % 20) + +/* Tx Descriptor Control Register */ +#define EDMA_REG_TPD_RING_SIZE 0x41C +#define EDMA_TPD_RING_SIZE_SHIFT 0 +#define EDMA_TPD_RING_SIZE_MASK 0xFFFF + +/* Transmit descriptor base address */ +#define EDMA_REG_TPD_BASE_ADDR_Q(x) (0x420 + ((x) << 2)) /* x = queue id */ + +/* TPD Index Register */ +#define EDMA_REG_TPD_IDX_Q(x) (0x460 + ((x) << 2)) /* x = queue id */ + +#define EDMA_TPD_PROD_IDX_BITS 0x0000FFFF +#define EDMA_TPD_CONS_IDX_BITS 0xFFFF0000 +#define EDMA_TPD_PROD_IDX_MASK 0xFFFF +#define EDMA_TPD_CONS_IDX_MASK 0xFFFF +#define EDMA_TPD_PROD_IDX_SHIFT 0 +#define EDMA_TPD_CONS_IDX_SHIFT 16 + +/* TX Virtual Queue Mapping Control Register */ +#define EDMA_REG_VQ_CTRL0 0x4A0 +#define EDMA_REG_VQ_CTRL1 0x4A4 + +/* Virtual QID shift, it takes queue as input, and computes + * Virtual QID position in virtual qid control register + */ +#define EDMA_VQ_ID_SHIFT(i) (((i) * 3) % 24) + +/* Virtual Queue Default Value */ +#define EDMA_VQ_REG_VALUE 0x240240 + +/* Tx side Port Interface Control Register */ +#define EDMA_REG_PORT_CTRL 0x4A8 +#define EDMA_PAD_EN_SHIFT 15 + +/* Tx side VLAN Configuration Register */ +#define EDMA_REG_VLAN_CFG 0x4AC + +#define EDMA_TX_CVLAN 16 +#define EDMA_TX_INS_CVLAN 17 +#define EDMA_TX_CVLAN_TAG_SHIFT 0 + +#define EDMA_TX_SVLAN 14 +#define EDMA_TX_INS_SVLAN 15 +#define EDMA_TX_SVLAN_TAG_SHIFT 16 + +/* Tx Queue Packet Statistic Register */ +#define EDMA_REG_TX_STAT_PKT_Q(x) (0x700 + ((x) << 3)) /* x = queue id */ + +#define EDMA_TX_STAT_PKT_MASK 0xFFFFFF + +/* Tx Queue Byte Statistic Register */ +#define EDMA_REG_TX_STAT_BYTE_Q(x) (0x704 + ((x) << 3)) /* x = queue id */ + +/* Load Balance Based Ring Offset Register */ +#define EDMA_REG_LB_RING 0x800 +#define EDMA_LB_RING_ENTRY_MASK 0xff +#define EDMA_LB_RING_ID_MASK 0x7 +#define EDMA_LB_RING_PROFILE_ID_MASK 0x3 +#define EDMA_LB_RING_ENTRY_BIT_OFFSET 8 +#define EDMA_LB_RING_ID_OFFSET 0 +#define EDMA_LB_RING_PROFILE_ID_OFFSET 3 +#define EDMA_LB_REG_VALUE 0x6040200 + +/* Load Balance Priority Mapping Register */ +#define EDMA_REG_LB_PRI_START 0x804 +#define EDMA_REG_LB_PRI_END 0x810 +#define EDMA_LB_PRI_REG_INC 4 +#define EDMA_LB_PRI_ENTRY_BIT_OFFSET 4 +#define EDMA_LB_PRI_ENTRY_MASK 0xf + +/* RSS Priority Mapping Register */ +#define EDMA_REG_RSS_PRI 0x820 +#define EDMA_RSS_PRI_ENTRY_MASK 0xf +#define EDMA_RSS_RING_ID_MASK 0x7 +#define EDMA_RSS_PRI_ENTRY_BIT_OFFSET 4 + +/* RSS Indirection Register */ +#define EDMA_REG_RSS_IDT(x) (0x840 + ((x) << 2)) /* x = No. of indirection table */ +#define EDMA_NUM_IDT 16 +#define EDMA_RSS_IDT_VALUE 0x64206420 + +/* Default RSS Ring Register */ +#define EDMA_REG_DEF_RSS 0x890 +#define EDMA_DEF_RSS_MASK 0x7 + +/* RSS Hash Function Type Register */ +#define EDMA_REG_RSS_TYPE 0x894 +#define EDMA_RSS_TYPE_NONE 0x01 +#define EDMA_RSS_TYPE_IPV4TCP 0x02 +#define EDMA_RSS_TYPE_IPV6_TCP 0x04 +#define EDMA_RSS_TYPE_IPV4_UDP 0x08 +#define EDMA_RSS_TYPE_IPV6UDP 0x10 +#define EDMA_RSS_TYPE_IPV4 0x20 +#define EDMA_RSS_TYPE_IPV6 0x40 +#define EDMA_RSS_HASH_MODE_MASK 0x7f + +#define EDMA_REG_RSS_HASH_VALUE 0x8C0 + +#define EDMA_REG_RSS_TYPE_RESULT 0x8C4 + +#define EDMA_HASH_TYPE_START 0 +#define EDMA_HASH_TYPE_END 5 +#define EDMA_HASH_TYPE_SHIFT 12 + +#define EDMA_RFS_FLOW_ENTRIES 1024 +#define EDMA_RFS_FLOW_ENTRIES_MASK (EDMA_RFS_FLOW_ENTRIES - 1) +#define EDMA_RFS_EXPIRE_COUNT_PER_CALL 128 + +/* RFD Base Address Register */ +#define EDMA_REG_RFD_BASE_ADDR_Q(x) (0x950 + ((x) << 2)) /* x = queue id */ + +/* RFD Index Register */ +#define EDMA_REG_RFD_IDX_Q(x) (0x9B0 + ((x) << 2)) + +#define EDMA_RFD_PROD_IDX_BITS 0x00000FFF +#define EDMA_RFD_CONS_IDX_BITS 0x0FFF0000 +#define EDMA_RFD_PROD_IDX_MASK 0xFFF +#define EDMA_RFD_CONS_IDX_MASK 0xFFF +#define EDMA_RFD_PROD_IDX_SHIFT 0 +#define EDMA_RFD_CONS_IDX_SHIFT 16 + +/* Rx Descriptor Control Register */ +#define EDMA_REG_RX_DESC0 0xA10 +#define EDMA_RFD_RING_SIZE_MASK 0xFFF +#define EDMA_RX_BUF_SIZE_MASK 0xFFFF +#define EDMA_RFD_RING_SIZE_SHIFT 0 +#define EDMA_RX_BUF_SIZE_SHIFT 16 + +#define EDMA_REG_RX_DESC1 0xA14 +#define EDMA_RXQ_RFD_BURST_NUM_MASK 0x3F +#define EDMA_RXQ_RFD_PF_THRESH_MASK 0x1F +#define EDMA_RXQ_RFD_LOW_THRESH_MASK 0xFFF +#define EDMA_RXQ_RFD_BURST_NUM_SHIFT 0 +#define EDMA_RXQ_RFD_PF_THRESH_SHIFT 8 +#define EDMA_RXQ_RFD_LOW_THRESH_SHIFT 16 + +/* RXQ Control Register */ +#define EDMA_REG_RXQ_CTRL 0xA18 +#define EDMA_FIFO_THRESH_TYPE_SHIF 0 +#define EDMA_FIFO_THRESH_128_BYTE 0x0 +#define EDMA_FIFO_THRESH_64_BYTE 0x1 +#define EDMA_RXQ_CTRL_RMV_VLAN 0x00000002 +#define EDMA_RXQ_CTRL_EN 0x0000FF00 + +/* AXI Burst Size Config */ +#define EDMA_REG_AXIW_CTRL_MAXWRSIZE 0xA1C +#define EDMA_AXIW_MAXWRSIZE_VALUE 0x0 + +/* Rx Statistics Register */ +#define EDMA_REG_RX_STAT_BYTE_Q(x) (0xA30 + ((x) << 2)) /* x = queue id */ +#define EDMA_REG_RX_STAT_PKT_Q(x) (0xA50 + ((x) << 2)) /* x = queue id */ + +/* WoL Pattern Length Register */ +#define EDMA_REG_WOL_PATTERN_LEN0 0xC00 +#define EDMA_WOL_PT_LEN_MASK 0xFF +#define EDMA_WOL_PT0_LEN_SHIFT 0 +#define EDMA_WOL_PT1_LEN_SHIFT 8 +#define EDMA_WOL_PT2_LEN_SHIFT 16 +#define EDMA_WOL_PT3_LEN_SHIFT 24 + +#define EDMA_REG_WOL_PATTERN_LEN1 0xC04 +#define EDMA_WOL_PT4_LEN_SHIFT 0 +#define EDMA_WOL_PT5_LEN_SHIFT 8 +#define EDMA_WOL_PT6_LEN_SHIFT 16 + +/* WoL Control Register */ +#define EDMA_REG_WOL_CTRL 0xC08 +#define EDMA_WOL_WK_EN 0x00000001 +#define EDMA_WOL_MG_EN 0x00000002 +#define EDMA_WOL_PT0_EN 0x00000004 +#define EDMA_WOL_PT1_EN 0x00000008 +#define EDMA_WOL_PT2_EN 0x00000010 +#define EDMA_WOL_PT3_EN 0x00000020 +#define EDMA_WOL_PT4_EN 0x00000040 +#define EDMA_WOL_PT5_EN 0x00000080 +#define EDMA_WOL_PT6_EN 0x00000100 + +/* MAC Control Register */ +#define EDMA_REG_MAC_CTRL0 0xC20 +#define EDMA_REG_MAC_CTRL1 0xC24 + +/* WoL Pattern Register */ +#define EDMA_REG_WOL_PATTERN_START 0x5000 +#define EDMA_PATTERN_PART_REG_OFFSET 0x40 + + +/* TX descriptor fields */ +#define EDMA_TPD_HDR_SHIFT 0 +#define EDMA_TPD_PPPOE_EN 0x00000100 +#define EDMA_TPD_IP_CSUM_EN 0x00000200 +#define EDMA_TPD_TCP_CSUM_EN 0x0000400 +#define EDMA_TPD_UDP_CSUM_EN 0x00000800 +#define EDMA_TPD_CUSTOM_CSUM_EN 0x00000C00 +#define EDMA_TPD_LSO_EN 0x00001000 +#define EDMA_TPD_LSO_V2_EN 0x00002000 +#define EDMA_TPD_IPV4_EN 0x00010000 +#define EDMA_TPD_MSS_MASK 0x1FFF +#define EDMA_TPD_MSS_SHIFT 18 +#define EDMA_TPD_CUSTOM_CSUM_SHIFT 18 + +/* RRD descriptor fields */ +#define EDMA_RRD_NUM_RFD_MASK 0x000F +#define EDMA_RRD_SVLAN 0x8000 +#define EDMA_RRD_FLOW_COOKIE_MASK 0x07FF; + +#define EDMA_RRD_PKT_SIZE_MASK 0x3FFF +#define EDMA_RRD_CSUM_FAIL_MASK 0xC000 +#define EDMA_RRD_CVLAN 0x0001 +#define EDMA_RRD_DESC_VALID 0x8000 + +#define EDMA_RRD_PRIORITY_SHIFT 4 +#define EDMA_RRD_PRIORITY_MASK 0x7 +#define EDMA_RRD_PORT_TYPE_SHIFT 7 +#define EDMA_RRD_PORT_TYPE_MASK 0x1F + +#define ESS_RGMII_CTRL 0x0004 + +/* Port status registers */ +#define ESS_PORT0_STATUS 0x007C +#define ESS_PORT1_STATUS 0x0080 +#define ESS_PORT2_STATUS 0x0084 +#define ESS_PORT3_STATUS 0x0088 +#define ESS_PORT4_STATUS 0x008C +#define ESS_PORT5_STATUS 0x0090 + +#define ESS_PORT_STATUS_HDX_FLOW_CTL 0x80 +#define ESS_PORT_STATUS_DUPLEX_MODE 0x40 +#define ESS_PORT_STATUS_RX_FLOW_EN 0x20 +#define ESS_PORT_STATUS_TX_FLOW_EN 0x10 +#define ESS_PORT_STATUS_RX_MAC_EN 0x08 +#define ESS_PORT_STATUS_TX_MAC_EN 0x04 +#define ESS_PORT_STATUS_SPEED_INV 0x03 +#define ESS_PORT_STATUS_SPEED_1000 0x02 +#define ESS_PORT_STATUS_SPEED_100 0x01 +#define ESS_PORT_STATUS_SPEED_10 0x00 + +#define ESS_PORT_1G_FDX (ESS_PORT_STATUS_DUPLEX_MODE | ESS_PORT_STATUS_RX_FLOW_EN | \ + ESS_PORT_STATUS_TX_FLOW_EN | ESS_PORT_STATUS_RX_MAC_EN | \ + ESS_PORT_STATUS_TX_MAC_EN | ESS_PORT_STATUS_SPEED_1000) + +#define PHY_STATUS_REG 0x11 +#define PHY_STATUS_SPEED 0xC000 +#define PHY_STATUS_SPEED_SHIFT 14 +#define PHY_STATUS_DUPLEX 0x2000 +#define PHY_STATUS_DUPLEX_SHIFT 13 +#define PHY_STATUS_SPEED_DUPLEX_RESOLVED 0x0800 +#define PHY_STATUS_CARRIER 0x0400 +#define PHY_STATUS_CARRIER_SHIFT 10 + +/* Port lookup control registers */ +#define ESS_PORT0_LOOKUP_CTRL 0x0660 +#define ESS_PORT1_LOOKUP_CTRL 0x066C +#define ESS_PORT2_LOOKUP_CTRL 0x0678 +#define ESS_PORT3_LOOKUP_CTRL 0x0684 +#define ESS_PORT4_LOOKUP_CTRL 0x0690 +#define ESS_PORT5_LOOKUP_CTRL 0x069C + +#define ESS_PORT0_HEADER_CTRL 0x009C + +#define ESS_PORTS_ALL 0x3f + +#define ESS_FWD_CTRL1 0x0624 +#define ESS_FWD_CTRL1_UC_FLOOD BITS(0, 7) +#define ESS_FWD_CTRL1_UC_FLOOD_S 0 +#define ESS_FWD_CTRL1_MC_FLOOD BITS(8, 7) +#define ESS_FWD_CTRL1_MC_FLOOD_S 8 +#define ESS_FWD_CTRL1_BC_FLOOD BITS(16, 7) +#define ESS_FWD_CTRL1_BC_FLOOD_S 16 +#define ESS_FWD_CTRL1_IGMP BITS(24, 7) +#define ESS_FWD_CTRL1_IGMP_S 24 + +#endif /* _ESS_EDMA_H_ */ diff --git a/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c new file mode 100644 index 0000000000..db21547a03 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.c @@ -0,0 +1,2118 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "ar40xx.h" + +static struct ar40xx_priv *ar40xx_priv; + +#define MIB_DESC(_s , _o, _n) \ + { \ + .size = (_s), \ + .offset = (_o), \ + .name = (_n), \ + } + +static const struct ar40xx_mib_desc ar40xx_mibs[] = { + MIB_DESC(1, AR40XX_STATS_RXBROAD, "RxBroad"), + MIB_DESC(1, AR40XX_STATS_RXPAUSE, "RxPause"), + MIB_DESC(1, AR40XX_STATS_RXMULTI, "RxMulti"), + MIB_DESC(1, AR40XX_STATS_RXFCSERR, "RxFcsErr"), + MIB_DESC(1, AR40XX_STATS_RXALIGNERR, "RxAlignErr"), + MIB_DESC(1, AR40XX_STATS_RXRUNT, "RxRunt"), + MIB_DESC(1, AR40XX_STATS_RXFRAGMENT, "RxFragment"), + MIB_DESC(1, AR40XX_STATS_RX64BYTE, "Rx64Byte"), + MIB_DESC(1, AR40XX_STATS_RX128BYTE, "Rx128Byte"), + MIB_DESC(1, AR40XX_STATS_RX256BYTE, "Rx256Byte"), + MIB_DESC(1, AR40XX_STATS_RX512BYTE, "Rx512Byte"), + MIB_DESC(1, AR40XX_STATS_RX1024BYTE, "Rx1024Byte"), + MIB_DESC(1, AR40XX_STATS_RX1518BYTE, "Rx1518Byte"), + MIB_DESC(1, AR40XX_STATS_RXMAXBYTE, "RxMaxByte"), + MIB_DESC(1, AR40XX_STATS_RXTOOLONG, "RxTooLong"), + MIB_DESC(2, AR40XX_STATS_RXGOODBYTE, "RxGoodByte"), + MIB_DESC(2, AR40XX_STATS_RXBADBYTE, "RxBadByte"), + MIB_DESC(1, AR40XX_STATS_RXOVERFLOW, "RxOverFlow"), + MIB_DESC(1, AR40XX_STATS_FILTERED, "Filtered"), + MIB_DESC(1, AR40XX_STATS_TXBROAD, "TxBroad"), + MIB_DESC(1, AR40XX_STATS_TXPAUSE, "TxPause"), + MIB_DESC(1, AR40XX_STATS_TXMULTI, "TxMulti"), + MIB_DESC(1, AR40XX_STATS_TXUNDERRUN, "TxUnderRun"), + MIB_DESC(1, AR40XX_STATS_TX64BYTE, "Tx64Byte"), + MIB_DESC(1, AR40XX_STATS_TX128BYTE, "Tx128Byte"), + MIB_DESC(1, AR40XX_STATS_TX256BYTE, "Tx256Byte"), + MIB_DESC(1, AR40XX_STATS_TX512BYTE, "Tx512Byte"), + MIB_DESC(1, AR40XX_STATS_TX1024BYTE, "Tx1024Byte"), + MIB_DESC(1, AR40XX_STATS_TX1518BYTE, "Tx1518Byte"), + MIB_DESC(1, AR40XX_STATS_TXMAXBYTE, "TxMaxByte"), + MIB_DESC(1, AR40XX_STATS_TXOVERSIZE, "TxOverSize"), + MIB_DESC(2, AR40XX_STATS_TXBYTE, "TxByte"), + MIB_DESC(1, AR40XX_STATS_TXCOLLISION, "TxCollision"), + MIB_DESC(1, AR40XX_STATS_TXABORTCOL, "TxAbortCol"), + MIB_DESC(1, AR40XX_STATS_TXMULTICOL, "TxMultiCol"), + MIB_DESC(1, AR40XX_STATS_TXSINGLECOL, "TxSingleCol"), + MIB_DESC(1, AR40XX_STATS_TXEXCDEFER, "TxExcDefer"), + MIB_DESC(1, AR40XX_STATS_TXDEFER, "TxDefer"), + MIB_DESC(1, AR40XX_STATS_TXLATECOL, "TxLateCol"), +}; + +static u32 +ar40xx_read(struct ar40xx_priv *priv, int reg) +{ + return readl(priv->hw_addr + reg); +} + +static u32 +ar40xx_psgmii_read(struct ar40xx_priv *priv, int reg) +{ + return readl(priv->psgmii_hw_addr + reg); +} + +static void +ar40xx_write(struct ar40xx_priv *priv, int reg, u32 val) +{ + writel(val, priv->hw_addr + reg); +} + +static u32 +ar40xx_rmw(struct ar40xx_priv *priv, int reg, u32 mask, u32 val) +{ + u32 ret; + + ret = ar40xx_read(priv, reg); + ret &= ~mask; + ret |= val; + ar40xx_write(priv, reg, ret); + return ret; +} + +static void +ar40xx_psgmii_write(struct ar40xx_priv *priv, int reg, u32 val) +{ + writel(val, priv->psgmii_hw_addr + reg); +} + +static void +ar40xx_phy_dbg_write(struct ar40xx_priv *priv, int phy_addr, + u16 dbg_addr, u16 dbg_data) +{ + struct mii_bus *bus = priv->mii_bus; + + mutex_lock(&bus->mdio_lock); + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr); + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA, dbg_data); + mutex_unlock(&bus->mdio_lock); +} + +static void +ar40xx_phy_dbg_read(struct ar40xx_priv *priv, int phy_addr, + u16 dbg_addr, u16 *dbg_data) +{ + struct mii_bus *bus = priv->mii_bus; + + mutex_lock(&bus->mdio_lock); + bus->write(bus, phy_addr, AR40XX_MII_ATH_DBG_ADDR, dbg_addr); + *dbg_data = bus->read(bus, phy_addr, AR40XX_MII_ATH_DBG_DATA); + mutex_unlock(&bus->mdio_lock); +} + +static void +ar40xx_phy_mmd_write(struct ar40xx_priv *priv, u32 phy_id, + u16 mmd_num, u16 reg_id, u16 reg_val) +{ + struct mii_bus *bus = priv->mii_bus; + + mutex_lock(&bus->mdio_lock); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_ADDR, mmd_num); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_DATA, reg_id); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_ADDR, + 0x4000 | mmd_num); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_DATA, reg_val); + mutex_unlock(&bus->mdio_lock); +} + +static u16 +ar40xx_phy_mmd_read(struct ar40xx_priv *priv, u32 phy_id, + u16 mmd_num, u16 reg_id) +{ + u16 value; + struct mii_bus *bus = priv->mii_bus; + + mutex_lock(&bus->mdio_lock); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_ADDR, mmd_num); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_DATA, reg_id); + bus->write(bus, phy_id, + AR40XX_MII_ATH_MMD_ADDR, + 0x4000 | mmd_num); + value = bus->read(bus, phy_id, AR40XX_MII_ATH_MMD_DATA); + mutex_unlock(&bus->mdio_lock); + return value; +} + +/* Start of swconfig support */ + +static void +ar40xx_phy_poll_reset(struct ar40xx_priv *priv) +{ + u32 i, in_reset, retries = 500; + struct mii_bus *bus = priv->mii_bus; + + /* Assume RESET was recently issued to some or all of the phys */ + in_reset = GENMASK(AR40XX_NUM_PHYS - 1, 0); + + while (retries--) { + /* 1ms should be plenty of time. + * 802.3 spec allows for a max wait time of 500ms + */ + usleep_range(1000, 2000); + + for (i = 0; i < AR40XX_NUM_PHYS; i++) { + int val; + + /* skip devices which have completed reset */ + if (!(in_reset & BIT(i))) + continue; + + val = mdiobus_read(bus, i, MII_BMCR); + if (val < 0) + continue; + + /* mark when phy is no longer in reset state */ + if (!(val & BMCR_RESET)) + in_reset &= ~BIT(i); + } + + if (!in_reset) + return; + } + + dev_warn(&bus->dev, "Failed to reset all phys! (in_reset: 0x%x)\n", + in_reset); +} + +static void +ar40xx_phy_init(struct ar40xx_priv *priv) +{ + int i; + struct mii_bus *bus; + u16 val; + + bus = priv->mii_bus; + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val); + val &= ~AR40XX_PHY_MANU_CTRL_EN; + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val); + mdiobus_write(bus, i, + MII_ADVERTISE, ADVERTISE_ALL | + ADVERTISE_PAUSE_CAP | + ADVERTISE_PAUSE_ASYM); + mdiobus_write(bus, i, MII_CTRL1000, ADVERTISE_1000FULL); + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); + } + + ar40xx_phy_poll_reset(priv); +} + +static void +ar40xx_port_phy_linkdown(struct ar40xx_priv *priv) +{ + struct mii_bus *bus; + int i; + u16 val; + + bus = priv->mii_bus; + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { + mdiobus_write(bus, i, MII_CTRL1000, 0); + mdiobus_write(bus, i, MII_ADVERTISE, 0); + mdiobus_write(bus, i, MII_BMCR, BMCR_RESET | BMCR_ANENABLE); + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_0, &val); + val |= AR40XX_PHY_MANU_CTRL_EN; + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_0, val); + /* disable transmit */ + ar40xx_phy_dbg_read(priv, i, AR40XX_PHY_DEBUG_2, &val); + val &= 0xf00f; + ar40xx_phy_dbg_write(priv, i, AR40XX_PHY_DEBUG_2, val); + } +} + +static void +ar40xx_set_mirror_regs(struct ar40xx_priv *priv) +{ + int port; + + /* reset all mirror registers */ + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0, + AR40XX_FWD_CTRL0_MIRROR_PORT, + (0xF << AR40XX_FWD_CTRL0_MIRROR_PORT_S)); + for (port = 0; port < AR40XX_NUM_PORTS; port++) { + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(port), + AR40XX_PORT_LOOKUP_ING_MIRROR_EN, 0); + + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(port), + AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN, 0); + } + + /* now enable mirroring if necessary */ + if (priv->source_port >= AR40XX_NUM_PORTS || + priv->monitor_port >= AR40XX_NUM_PORTS || + priv->source_port == priv->monitor_port) { + return; + } + + ar40xx_rmw(priv, AR40XX_REG_FWD_CTRL0, + AR40XX_FWD_CTRL0_MIRROR_PORT, + (priv->monitor_port << AR40XX_FWD_CTRL0_MIRROR_PORT_S)); + + if (priv->mirror_rx) + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(priv->source_port), 0, + AR40XX_PORT_LOOKUP_ING_MIRROR_EN); + + if (priv->mirror_tx) + ar40xx_rmw(priv, AR40XX_REG_PORT_HOL_CTRL1(priv->source_port), + 0, AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN); +} + +static int +ar40xx_sw_get_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + u8 ports = priv->vlan_table[val->port_vlan]; + int i; + + val->len = 0; + for (i = 0; i < dev->ports; i++) { + struct switch_port *p; + + if (!(ports & BIT(i))) + continue; + + p = &val->value.ports[val->len++]; + p->id = i; + if ((priv->vlan_tagged & BIT(i)) || + (priv->pvid[i] != val->port_vlan)) + p->flags = BIT(SWITCH_PORT_FLAG_TAGGED); + else + p->flags = 0; + } + return 0; +} + +static int +ar40xx_sw_set_ports(struct switch_dev *dev, struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + u8 *vt = &priv->vlan_table[val->port_vlan]; + int i; + + *vt = 0; + for (i = 0; i < val->len; i++) { + struct switch_port *p = &val->value.ports[i]; + + if (p->flags & BIT(SWITCH_PORT_FLAG_TAGGED)) { + if (val->port_vlan == priv->pvid[p->id]) + priv->vlan_tagged |= BIT(p->id); + } else { + priv->vlan_tagged &= ~BIT(p->id); + priv->pvid[p->id] = val->port_vlan; + } + + *vt |= BIT(p->id); + } + return 0; +} + +static int +ar40xx_reg_wait(struct ar40xx_priv *priv, u32 reg, u32 mask, u32 val, + unsigned timeout) +{ + int i; + + for (i = 0; i < timeout; i++) { + u32 t; + + t = ar40xx_read(priv, reg); + if ((t & mask) == val) + return 0; + + usleep_range(1000, 2000); + } + + return -ETIMEDOUT; +} + +static int +ar40xx_mib_op(struct ar40xx_priv *priv, u32 op) +{ + int ret; + + lockdep_assert_held(&priv->mib_lock); + + /* Capture the hardware statistics for all ports */ + ar40xx_rmw(priv, AR40XX_REG_MIB_FUNC, + AR40XX_MIB_FUNC, (op << AR40XX_MIB_FUNC_S)); + + /* Wait for the capturing to complete. */ + ret = ar40xx_reg_wait(priv, AR40XX_REG_MIB_FUNC, + AR40XX_MIB_BUSY, 0, 10); + + return ret; +} + +static void +ar40xx_mib_fetch_port_stat(struct ar40xx_priv *priv, int port, bool flush) +{ + unsigned int base; + u64 *mib_stats; + int i; + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); + + WARN_ON(port >= priv->dev.ports); + + lockdep_assert_held(&priv->mib_lock); + + base = AR40XX_REG_PORT_STATS_START + + AR40XX_REG_PORT_STATS_LEN * port; + + mib_stats = &priv->mib_stats[port * num_mibs]; + if (flush) { + u32 len; + + len = num_mibs * sizeof(*mib_stats); + memset(mib_stats, 0, len); + return; + } + for (i = 0; i < num_mibs; i++) { + const struct ar40xx_mib_desc *mib; + u64 t; + + mib = &ar40xx_mibs[i]; + t = ar40xx_read(priv, base + mib->offset); + if (mib->size == 2) { + u64 hi; + + hi = ar40xx_read(priv, base + mib->offset + 4); + t |= hi << 32; + } + + mib_stats[i] += t; + } +} + +static int +ar40xx_mib_capture(struct ar40xx_priv *priv) +{ + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_CAPTURE); +} + +static int +ar40xx_mib_flush(struct ar40xx_priv *priv) +{ + return ar40xx_mib_op(priv, AR40XX_MIB_FUNC_FLUSH); +} + +static int +ar40xx_sw_set_reset_mibs(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + unsigned int len; + int ret; + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); + + mutex_lock(&priv->mib_lock); + + len = priv->dev.ports * num_mibs * sizeof(*priv->mib_stats); + memset(priv->mib_stats, 0, len); + ret = ar40xx_mib_flush(priv); + + mutex_unlock(&priv->mib_lock); + return ret; +} + +static int +ar40xx_sw_set_vlan(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + priv->vlan = !!val->value.i; + return 0; +} + +static int +ar40xx_sw_get_vlan(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + val->value.i = priv->vlan; + return 0; +} + +static int +ar40xx_sw_set_mirror_rx_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + priv->mirror_rx = !!val->value.i; + ar40xx_set_mirror_regs(priv); + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int +ar40xx_sw_get_mirror_rx_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + val->value.i = priv->mirror_rx; + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int +ar40xx_sw_set_mirror_tx_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + priv->mirror_tx = !!val->value.i; + ar40xx_set_mirror_regs(priv); + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int +ar40xx_sw_get_mirror_tx_enable(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + val->value.i = priv->mirror_tx; + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int +ar40xx_sw_set_mirror_monitor_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + priv->monitor_port = val->value.i; + ar40xx_set_mirror_regs(priv); + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int +ar40xx_sw_get_mirror_monitor_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + val->value.i = priv->monitor_port; + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int +ar40xx_sw_set_mirror_source_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + priv->source_port = val->value.i; + ar40xx_set_mirror_regs(priv); + mutex_unlock(&priv->reg_mutex); + + return 0; +} + +static int +ar40xx_sw_get_mirror_source_port(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + mutex_lock(&priv->reg_mutex); + val->value.i = priv->source_port; + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int +ar40xx_sw_set_linkdown(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + if (val->value.i == 1) + ar40xx_port_phy_linkdown(priv); + else + ar40xx_phy_init(priv); + + return 0; +} + +static int +ar40xx_sw_set_port_reset_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + int port; + int ret; + + port = val->port_vlan; + if (port >= dev->ports) + return -EINVAL; + + mutex_lock(&priv->mib_lock); + ret = ar40xx_mib_capture(priv); + if (ret) + goto unlock; + + ar40xx_mib_fetch_port_stat(priv, port, true); + +unlock: + mutex_unlock(&priv->mib_lock); + return ret; +} + +static int +ar40xx_sw_get_port_mib(struct switch_dev *dev, + const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + u64 *mib_stats; + int port; + int ret; + char *buf = priv->buf; + int i, len = 0; + u32 num_mibs = ARRAY_SIZE(ar40xx_mibs); + + port = val->port_vlan; + if (port >= dev->ports) + return -EINVAL; + + mutex_lock(&priv->mib_lock); + ret = ar40xx_mib_capture(priv); + if (ret) + goto unlock; + + ar40xx_mib_fetch_port_stat(priv, port, false); + + len += snprintf(buf + len, sizeof(priv->buf) - len, + "Port %d MIB counters\n", + port); + + mib_stats = &priv->mib_stats[port * num_mibs]; + for (i = 0; i < num_mibs; i++) + len += snprintf(buf + len, sizeof(priv->buf) - len, + "%-12s: %llu\n", + ar40xx_mibs[i].name, + mib_stats[i]); + + val->value.s = buf; + val->len = len; + +unlock: + mutex_unlock(&priv->mib_lock); + return ret; +} + +static int +ar40xx_sw_set_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + priv->vlan_id[val->port_vlan] = val->value.i; + return 0; +} + +static int +ar40xx_sw_get_vid(struct switch_dev *dev, const struct switch_attr *attr, + struct switch_val *val) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + val->value.i = priv->vlan_id[val->port_vlan]; + return 0; +} + +static int +ar40xx_sw_get_pvid(struct switch_dev *dev, int port, int *vlan) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + *vlan = priv->pvid[port]; + return 0; +} + +static int +ar40xx_sw_set_pvid(struct switch_dev *dev, int port, int vlan) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + /* make sure no invalid PVIDs get set */ + if (vlan >= dev->vlans) + return -EINVAL; + + priv->pvid[port] = vlan; + return 0; +} + +static void +ar40xx_read_port_link(struct ar40xx_priv *priv, int port, + struct switch_port_link *link) +{ + u32 status; + u32 speed; + + memset(link, 0, sizeof(*link)); + + status = ar40xx_read(priv, AR40XX_REG_PORT_STATUS(port)); + + link->aneg = !!(status & AR40XX_PORT_AUTO_LINK_EN); + if (link->aneg || (port != AR40XX_PORT_CPU)) + link->link = !!(status & AR40XX_PORT_STATUS_LINK_UP); + else + link->link = true; + + if (!link->link) + return; + + link->duplex = !!(status & AR40XX_PORT_DUPLEX); + link->tx_flow = !!(status & AR40XX_PORT_STATUS_TXFLOW); + link->rx_flow = !!(status & AR40XX_PORT_STATUS_RXFLOW); + + speed = (status & AR40XX_PORT_SPEED) >> + AR40XX_PORT_STATUS_SPEED_S; + + switch (speed) { + case AR40XX_PORT_SPEED_10M: + link->speed = SWITCH_PORT_SPEED_10; + break; + case AR40XX_PORT_SPEED_100M: + link->speed = SWITCH_PORT_SPEED_100; + break; + case AR40XX_PORT_SPEED_1000M: + link->speed = SWITCH_PORT_SPEED_1000; + break; + default: + link->speed = SWITCH_PORT_SPEED_UNKNOWN; + break; + } +} + +static int +ar40xx_sw_get_port_link(struct switch_dev *dev, int port, + struct switch_port_link *link) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + + ar40xx_read_port_link(priv, port, link); + return 0; +} + +static const struct switch_attr ar40xx_sw_attr_globals[] = { + { + .type = SWITCH_TYPE_INT, + .name = "enable_vlan", + .description = "Enable VLAN mode", + .set = ar40xx_sw_set_vlan, + .get = ar40xx_sw_get_vlan, + .max = 1 + }, + { + .type = SWITCH_TYPE_NOVAL, + .name = "reset_mibs", + .description = "Reset all MIB counters", + .set = ar40xx_sw_set_reset_mibs, + }, + { + .type = SWITCH_TYPE_INT, + .name = "enable_mirror_rx", + .description = "Enable mirroring of RX packets", + .set = ar40xx_sw_set_mirror_rx_enable, + .get = ar40xx_sw_get_mirror_rx_enable, + .max = 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "enable_mirror_tx", + .description = "Enable mirroring of TX packets", + .set = ar40xx_sw_set_mirror_tx_enable, + .get = ar40xx_sw_get_mirror_tx_enable, + .max = 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "mirror_monitor_port", + .description = "Mirror monitor port", + .set = ar40xx_sw_set_mirror_monitor_port, + .get = ar40xx_sw_get_mirror_monitor_port, + .max = AR40XX_NUM_PORTS - 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "mirror_source_port", + .description = "Mirror source port", + .set = ar40xx_sw_set_mirror_source_port, + .get = ar40xx_sw_get_mirror_source_port, + .max = AR40XX_NUM_PORTS - 1 + }, + { + .type = SWITCH_TYPE_INT, + .name = "linkdown", + .description = "Link down all the PHYs", + .set = ar40xx_sw_set_linkdown, + .max = 1 + }, +}; + +static const struct switch_attr ar40xx_sw_attr_port[] = { + { + .type = SWITCH_TYPE_NOVAL, + .name = "reset_mib", + .description = "Reset single port MIB counters", + .set = ar40xx_sw_set_port_reset_mib, + }, + { + .type = SWITCH_TYPE_STRING, + .name = "mib", + .description = "Get port's MIB counters", + .set = NULL, + .get = ar40xx_sw_get_port_mib, + }, +}; + +const struct switch_attr ar40xx_sw_attr_vlan[] = { + { + .type = SWITCH_TYPE_INT, + .name = "vid", + .description = "VLAN ID (0-4094)", + .set = ar40xx_sw_set_vid, + .get = ar40xx_sw_get_vid, + .max = 4094, + }, +}; + +/* End of swconfig support */ + +static int +ar40xx_wait_bit(struct ar40xx_priv *priv, int reg, u32 mask, u32 val) +{ + int timeout = 20; + u32 t; + + while (1) { + t = ar40xx_read(priv, reg); + if ((t & mask) == val) + return 0; + + if (timeout-- <= 0) + break; + + usleep_range(10, 20); + } + + pr_err("ar40xx: timeout for reg %08x: %08x & %08x != %08x\n", + (unsigned int)reg, t, mask, val); + return -ETIMEDOUT; +} + +static int +ar40xx_atu_flush(struct ar40xx_priv *priv) +{ + int ret; + + ret = ar40xx_wait_bit(priv, AR40XX_REG_ATU_FUNC, + AR40XX_ATU_FUNC_BUSY, 0); + if (!ret) + ar40xx_write(priv, AR40XX_REG_ATU_FUNC, + AR40XX_ATU_FUNC_OP_FLUSH | + AR40XX_ATU_FUNC_BUSY); + + return ret; +} + +static void +ar40xx_ess_reset(struct ar40xx_priv *priv) +{ + reset_control_assert(priv->ess_rst); + mdelay(10); + reset_control_deassert(priv->ess_rst); + /* Waiting for all inner tables init done. + * It cost 5~10ms. + */ + mdelay(10); + + pr_info("ESS reset ok!\n"); +} + +/* Start of psgmii self test */ + +static void +ar40xx_malibu_psgmii_ess_reset(struct ar40xx_priv *priv) +{ + u32 n; + struct mii_bus *bus = priv->mii_bus; + /* reset phy psgmii */ + /* fix phy psgmii RX 20bit */ + mdiobus_write(bus, 5, 0x0, 0x005b); + /* reset phy psgmii */ + mdiobus_write(bus, 5, 0x0, 0x001b); + /* release reset phy psgmii */ + mdiobus_write(bus, 5, 0x0, 0x005b); + + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) { + u16 status; + + status = ar40xx_phy_mmd_read(priv, 5, 1, 0x28); + if (status & BIT(0)) + break; + /* Polling interval to check PSGMII PLL in malibu is ready + * the worst time is 8.67ms + * for 25MHz reference clock + * [512+(128+2048)*49]*80ns+100us + */ + mdelay(2); + } + + /*check malibu psgmii calibration done end..*/ + + /*freeze phy psgmii RX CDR*/ + mdiobus_write(bus, 5, 0x1a, 0x2230); + + ar40xx_ess_reset(priv); + + /*check psgmii calibration done start*/ + for (n = 0; n < AR40XX_PSGMII_CALB_NUM; n++) { + u32 status; + + status = ar40xx_psgmii_read(priv, 0xa0); + if (status & BIT(0)) + break; + /* Polling interval to check PSGMII PLL in ESS is ready */ + mdelay(2); + } + + /* check dakota psgmii calibration done end..*/ + + /* relesae phy psgmii RX CDR */ + mdiobus_write(bus, 5, 0x1a, 0x3230); + /* release phy psgmii RX 20bit */ + mdiobus_write(bus, 5, 0x0, 0x005f); +} + +static void +ar40xx_psgmii_single_phy_testing(struct ar40xx_priv *priv, int phy) +{ + int j; + u32 tx_ok, tx_error; + u32 rx_ok, rx_error; + u32 tx_ok_high16; + u32 rx_ok_high16; + u32 tx_all_ok, rx_all_ok; + struct mii_bus *bus = priv->mii_bus; + + mdiobus_write(bus, phy, 0x0, 0x9000); + mdiobus_write(bus, phy, 0x0, 0x4140); + + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { + u16 status; + + status = mdiobus_read(bus, phy, 0x11); + if (status & AR40XX_PHY_SPEC_STATUS_LINK) + break; + /* the polling interval to check if the PHY link up or not + * maxwait_timer: 750 ms +/-10 ms + * minwait_timer : 1 us +/- 0.1us + * time resides in minwait_timer ~ maxwait_timer + * see IEEE 802.3 section 40.4.5.2 + */ + mdelay(8); + } + + /* enable check */ + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0000); + ar40xx_phy_mmd_write(priv, phy, 7, 0x8029, 0x0003); + + /* start traffic */ + ar40xx_phy_mmd_write(priv, phy, 7, 0x8020, 0xa000); + /* wait for all traffic end + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms + */ + mdelay(50); + + /* check counter */ + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e); + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d); + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f); + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b); + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a); + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c); + tx_all_ok = tx_ok + (tx_ok_high16 << 16); + rx_all_ok = rx_ok + (rx_ok_high16 << 16); + if (tx_all_ok == 0x1000 && tx_error == 0) { + /* success */ + priv->phy_t_status &= (~BIT(phy)); + } else { + pr_info("PHY %d single test PSGMII issue happen!\n", phy); + priv->phy_t_status |= BIT(phy); + } + + mdiobus_write(bus, phy, 0x0, 0x1840); +} + +static void +ar40xx_psgmii_all_phy_testing(struct ar40xx_priv *priv) +{ + int phy, j; + struct mii_bus *bus = priv->mii_bus; + + mdiobus_write(bus, 0x1f, 0x0, 0x9000); + mdiobus_write(bus, 0x1f, 0x0, 0x4140); + + for (j = 0; j < AR40XX_PSGMII_CALB_NUM; j++) { + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { + u16 status; + + status = mdiobus_read(bus, phy, 0x11); + if (!(status & BIT(10))) + break; + } + + if (phy >= (AR40XX_NUM_PORTS - 1)) + break; + /* The polling interva to check if the PHY link up or not */ + mdelay(8); + } + /* enable check */ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0000); + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0003); + + /* start traffic */ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0xa000); + /* wait for all traffic end + * 4096(pkt num)*1524(size)*8ns(125MHz)=49.9ms + */ + mdelay(50); + + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { + u32 tx_ok, tx_error; + u32 rx_ok, rx_error; + u32 tx_ok_high16; + u32 rx_ok_high16; + u32 tx_all_ok, rx_all_ok; + + /* check counter */ + tx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802e); + tx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802d); + tx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802f); + rx_ok = ar40xx_phy_mmd_read(priv, phy, 7, 0x802b); + rx_ok_high16 = ar40xx_phy_mmd_read(priv, phy, 7, 0x802a); + rx_error = ar40xx_phy_mmd_read(priv, phy, 7, 0x802c); + tx_all_ok = tx_ok + (tx_ok_high16<<16); + rx_all_ok = rx_ok + (rx_ok_high16<<16); + if (tx_all_ok == 0x1000 && tx_error == 0) { + /* success */ + priv->phy_t_status &= ~BIT(phy + 8); + } else { + pr_info("PHY%d test see issue!\n", phy); + priv->phy_t_status |= BIT(phy + 8); + } + } + + pr_debug("PHY all test 0x%x \r\n", priv->phy_t_status); +} + +void +ar40xx_psgmii_self_test(struct ar40xx_priv *priv) +{ + u32 i, phy; + struct mii_bus *bus = priv->mii_bus; + + ar40xx_malibu_psgmii_ess_reset(priv); + + /* switch to access MII reg for copper */ + mdiobus_write(bus, 4, 0x1f, 0x8500); + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { + /*enable phy mdio broadcast write*/ + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x801f); + } + /* force no link by power down */ + mdiobus_write(bus, 0x1f, 0x0, 0x1840); + /*packet number*/ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x1000); + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8062, 0x05e0); + + /*fix mdi status */ + mdiobus_write(bus, 0x1f, 0x10, 0x6800); + for (i = 0; i < AR40XX_PSGMII_CALB_NUM; i++) { + priv->phy_t_status = 0; + + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1), + AR40XX_PORT_LOOKUP_LOOPBACK, + AR40XX_PORT_LOOKUP_LOOPBACK); + } + + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) + ar40xx_psgmii_single_phy_testing(priv, phy); + + ar40xx_psgmii_all_phy_testing(priv); + + if (priv->phy_t_status) + ar40xx_malibu_psgmii_ess_reset(priv); + else + break; + } + + if (i >= AR40XX_PSGMII_CALB_NUM) + pr_info("PSGMII cannot recover\n"); + else + pr_debug("PSGMII recovered after %d times reset\n", i); + + /* configuration recover */ + /* packet number */ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8021, 0x0); + /* disable check */ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8029, 0x0); + /* disable traffic */ + ar40xx_phy_mmd_write(priv, 0x1f, 7, 0x8020, 0x0); +} + +void +ar40xx_psgmii_self_test_clean(struct ar40xx_priv *priv) +{ + int phy; + struct mii_bus *bus = priv->mii_bus; + + /* disable phy internal loopback */ + mdiobus_write(bus, 0x1f, 0x10, 0x6860); + mdiobus_write(bus, 0x1f, 0x0, 0x9040); + + for (phy = 0; phy < AR40XX_NUM_PORTS - 1; phy++) { + /* disable mac loop back */ + ar40xx_rmw(priv, AR40XX_REG_PORT_LOOKUP(phy + 1), + AR40XX_PORT_LOOKUP_LOOPBACK, 0); + /* disable phy mdio broadcast write */ + ar40xx_phy_mmd_write(priv, phy, 7, 0x8028, 0x001f); + } + + /* clear fdb entry */ + ar40xx_atu_flush(priv); +} + +/* End of psgmii self test */ + +static void +ar40xx_mac_mode_init(struct ar40xx_priv *priv, u32 mode) +{ + if (mode == PORT_WRAPPER_PSGMII) { + ar40xx_psgmii_write(priv, AR40XX_PSGMII_MODE_CONTROL, 0x2200); + ar40xx_psgmii_write(priv, AR40XX_PSGMIIPHY_TX_CONTROL, 0x8380); + } +} + +static +int ar40xx_cpuport_setup(struct ar40xx_priv *priv) +{ + u32 t; + + t = AR40XX_PORT_STATUS_TXFLOW | + AR40XX_PORT_STATUS_RXFLOW | + AR40XX_PORT_TXHALF_FLOW | + AR40XX_PORT_DUPLEX | + AR40XX_PORT_SPEED_1000M; + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t); + usleep_range(10, 20); + + t |= AR40XX_PORT_TX_EN | + AR40XX_PORT_RX_EN; + ar40xx_write(priv, AR40XX_REG_PORT_STATUS(0), t); + + return 0; +} + +static void +ar40xx_init_port(struct ar40xx_priv *priv, int port) +{ + u32 t; + + ar40xx_rmw(priv, AR40XX_REG_PORT_STATUS(port), + AR40XX_PORT_AUTO_LINK_EN, 0); + + /* CPU port is setting headers to limit output ports */ + if (port == 0) + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0x8); + else + ar40xx_write(priv, AR40XX_REG_PORT_HEADER(port), 0); + + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), 0); + + t = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH << AR40XX_PORT_VLAN1_OUT_MODE_S; + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t); + + t = AR40XX_PORT_LOOKUP_LEARN; + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t); +} + +void +ar40xx_init_globals(struct ar40xx_priv *priv) +{ + u32 t; + + /* enable CPU port and disable mirror port */ + t = AR40XX_FWD_CTRL0_CPU_PORT_EN | + AR40XX_FWD_CTRL0_MIRROR_PORT; + ar40xx_write(priv, AR40XX_REG_FWD_CTRL0, t); + + /* forward multicast and broadcast frames to CPU */ + t = (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_UC_FLOOD_S) | + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_MC_FLOOD_S) | + (AR40XX_PORTS_ALL << AR40XX_FWD_CTRL1_BC_FLOOD_S); + ar40xx_write(priv, AR40XX_REG_FWD_CTRL1, t); + + /* enable jumbo frames */ + ar40xx_rmw(priv, AR40XX_REG_MAX_FRAME_SIZE, + AR40XX_MAX_FRAME_SIZE_MTU, 9018 + 8 + 2); + + /* Enable MIB counters */ + ar40xx_rmw(priv, AR40XX_REG_MODULE_EN, 0, + AR40XX_MODULE_EN_MIB); + + /* Disable AZ */ + ar40xx_write(priv, AR40XX_REG_EEE_CTRL, 0); + + /* set flowctrl thershold for cpu port */ + t = (AR40XX_PORT0_FC_THRESH_ON_DFLT << 16) | + AR40XX_PORT0_FC_THRESH_OFF_DFLT; + ar40xx_write(priv, AR40XX_REG_PORT_FLOWCTRL_THRESH(0), t); + + /* set service tag to 802.1q */ + t = ETH_P_8021Q | AR40XX_ESS_SERVICE_TAG_STAG; + ar40xx_write(priv, AR40XX_ESS_SERVICE_TAG, t); +} + +static void +ar40xx_malibu_init(struct ar40xx_priv *priv) +{ + int i; + struct mii_bus *bus; + u16 val; + + bus = priv->mii_bus; + + /* war to enable AZ transmitting ability */ + ar40xx_phy_mmd_write(priv, AR40XX_PSGMII_ID, 1, + AR40XX_MALIBU_PSGMII_MODE_CTRL, + AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL); + for (i = 0; i < AR40XX_NUM_PORTS - 1; i++) { + /* change malibu control_dac */ + val = ar40xx_phy_mmd_read(priv, i, 7, + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL); + val &= ~AR40XX_MALIBU_DAC_CTRL_MASK; + val |= AR40XX_MALIBU_DAC_CTRL_VALUE; + ar40xx_phy_mmd_write(priv, i, 7, + AR40XX_MALIBU_PHY_MMD7_DAC_CTRL, val); + if (i == AR40XX_MALIBU_PHY_LAST_ADDR) { + /* to avoid goes into hibernation */ + val = ar40xx_phy_mmd_read(priv, i, 3, + AR40XX_MALIBU_PHY_RLP_CTRL); + val &= (~(1<<1)); + ar40xx_phy_mmd_write(priv, i, 3, + AR40XX_MALIBU_PHY_RLP_CTRL, val); + } + } + + /* adjust psgmii serdes tx amp */ + mdiobus_write(bus, AR40XX_PSGMII_ID, AR40XX_PSGMII_TX_DRIVER_1_CTRL, + AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP); +} + +static int +ar40xx_hw_init(struct ar40xx_priv *priv) +{ + u32 i; + + ar40xx_ess_reset(priv); + + if (priv->mii_bus) + ar40xx_malibu_init(priv); + else + return -1; + + ar40xx_psgmii_self_test(priv); + ar40xx_psgmii_self_test_clean(priv); + + ar40xx_mac_mode_init(priv, priv->mac_mode); + + for (i = 0; i < priv->dev.ports; i++) + ar40xx_init_port(priv, i); + + ar40xx_init_globals(priv); + + return 0; +} + +/* Start of qm error WAR */ + +static +int ar40xx_force_1g_full(struct ar40xx_priv *priv, u32 port_id) +{ + u32 reg; + + if (port_id < 0 || port_id > 6) + return -1; + + reg = AR40XX_REG_PORT_STATUS(port_id); + return ar40xx_rmw(priv, reg, AR40XX_PORT_SPEED, + (AR40XX_PORT_SPEED_1000M | AR40XX_PORT_DUPLEX)); +} + +static +int ar40xx_get_qm_status(struct ar40xx_priv *priv, + u32 port_id, u32 *qm_buffer_err) +{ + u32 reg; + u32 qm_val; + + if (port_id < 1 || port_id > 5) { + *qm_buffer_err = 0; + return -1; + } + + if (port_id < 4) { + reg = AR40XX_REG_QM_PORT0_3_QNUM; + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg); + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE); + /* every 8 bits for each port */ + *qm_buffer_err = (qm_val >> (port_id * 8)) & 0xFF; + } else { + reg = AR40XX_REG_QM_PORT4_6_QNUM; + ar40xx_write(priv, AR40XX_REG_QM_DEBUG_ADDR, reg); + qm_val = ar40xx_read(priv, AR40XX_REG_QM_DEBUG_VALUE); + /* every 8 bits for each port */ + *qm_buffer_err = (qm_val >> ((port_id-4) * 8)) & 0xFF; + } + + return 0; +} + +static void +ar40xx_sw_mac_polling_task(struct ar40xx_priv *priv) +{ + static int task_count; + u32 i; + u32 reg, value; + u32 link, speed, duplex; + u32 qm_buffer_err; + u16 port_phy_status[AR40XX_NUM_PORTS]; + static u32 qm_err_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0}; + static u32 link_cnt[AR40XX_NUM_PORTS] = {0, 0, 0, 0, 0, 0}; + struct mii_bus *bus = NULL; + + if (!priv || !priv->mii_bus) + return; + + bus = priv->mii_bus; + + ++task_count; + + for (i = 1; i < AR40XX_NUM_PORTS; ++i) { + port_phy_status[i] = + mdiobus_read(bus, i-1, AR40XX_PHY_SPEC_STATUS); + speed = link = duplex = port_phy_status[i]; + speed &= AR40XX_PHY_SPEC_STATUS_SPEED; + speed >>= 14; + link &= AR40XX_PHY_SPEC_STATUS_LINK; + link >>= 10; + duplex &= AR40XX_PHY_SPEC_STATUS_DUPLEX; + duplex >>= 13; + + if (link != priv->ar40xx_port_old_link[i]) { + ++link_cnt[i]; + /* Up --> Down */ + if ((priv->ar40xx_port_old_link[i] == + AR40XX_PORT_LINK_UP) && + (link == AR40XX_PORT_LINK_DOWN)) { + /* LINK_EN disable(MAC force mode)*/ + reg = AR40XX_REG_PORT_STATUS(i); + ar40xx_rmw(priv, reg, + AR40XX_PORT_AUTO_LINK_EN, 0); + + /* Check queue buffer */ + qm_err_cnt[i] = 0; + ar40xx_get_qm_status(priv, i, &qm_buffer_err); + if (qm_buffer_err) { + priv->ar40xx_port_qm_buf[i] = + AR40XX_QM_NOT_EMPTY; + } else { + u16 phy_val = 0; + + priv->ar40xx_port_qm_buf[i] = + AR40XX_QM_EMPTY; + ar40xx_force_1g_full(priv, i); + /* Ref:QCA8337 Datasheet,Clearing + * MENU_CTRL_EN prevents phy to + * stuck in 100BT mode when + * bringing up the link + */ + ar40xx_phy_dbg_read(priv, i-1, + AR40XX_PHY_DEBUG_0, + &phy_val); + phy_val &= (~AR40XX_PHY_MANU_CTRL_EN); + ar40xx_phy_dbg_write(priv, i-1, + AR40XX_PHY_DEBUG_0, + phy_val); + } + priv->ar40xx_port_old_link[i] = link; + } else if ((priv->ar40xx_port_old_link[i] == + AR40XX_PORT_LINK_DOWN) && + (link == AR40XX_PORT_LINK_UP)) { + /* Down --> Up */ + if (priv->port_link_up[i] < 1) { + ++priv->port_link_up[i]; + } else { + /* Change port status */ + reg = AR40XX_REG_PORT_STATUS(i); + value = ar40xx_read(priv, reg); + priv->port_link_up[i] = 0; + + value &= ~(AR40XX_PORT_DUPLEX | + AR40XX_PORT_SPEED); + value |= speed | (duplex ? BIT(6) : 0); + ar40xx_write(priv, reg, value); + /* clock switch need such time + * to avoid glitch + */ + usleep_range(100, 200); + + value |= AR40XX_PORT_AUTO_LINK_EN; + ar40xx_write(priv, reg, value); + /* HW need such time to make sure link + * stable before enable MAC + */ + usleep_range(100, 200); + + if (speed == AR40XX_PORT_SPEED_100M) { + u16 phy_val = 0; + /* Enable @100M, if down to 10M + * clock will change smoothly + */ + ar40xx_phy_dbg_read(priv, i-1, + 0, + &phy_val); + phy_val |= + AR40XX_PHY_MANU_CTRL_EN; + ar40xx_phy_dbg_write(priv, i-1, + 0, + phy_val); + } + priv->ar40xx_port_old_link[i] = link; + } + } + } + + if (priv->ar40xx_port_qm_buf[i] == AR40XX_QM_NOT_EMPTY) { + /* Check QM */ + ar40xx_get_qm_status(priv, i, &qm_buffer_err); + if (qm_buffer_err) { + ++qm_err_cnt[i]; + } else { + priv->ar40xx_port_qm_buf[i] = + AR40XX_QM_EMPTY; + qm_err_cnt[i] = 0; + ar40xx_force_1g_full(priv, i); + } + } + } +} + +static void +ar40xx_qm_err_check_work_task(struct work_struct *work) +{ + struct ar40xx_priv *priv = container_of(work, struct ar40xx_priv, + qm_dwork.work); + + mutex_lock(&priv->qm_lock); + + ar40xx_sw_mac_polling_task(priv); + + mutex_unlock(&priv->qm_lock); + + schedule_delayed_work(&priv->qm_dwork, + msecs_to_jiffies(AR40XX_QM_WORK_DELAY)); +} + +static int +ar40xx_qm_err_check_work_start(struct ar40xx_priv *priv) +{ + mutex_init(&priv->qm_lock); + + INIT_DELAYED_WORK(&priv->qm_dwork, ar40xx_qm_err_check_work_task); + + schedule_delayed_work(&priv->qm_dwork, + msecs_to_jiffies(AR40XX_QM_WORK_DELAY)); + + return 0; +} + +/* End of qm error WAR */ + +static int +ar40xx_vlan_init(struct ar40xx_priv *priv) +{ + int port; + unsigned long bmp; + + /* By default Enable VLAN */ + priv->vlan = 1; + priv->vlan_table[AR40XX_LAN_VLAN] = priv->cpu_bmp | priv->lan_bmp; + priv->vlan_table[AR40XX_WAN_VLAN] = priv->cpu_bmp | priv->wan_bmp; + priv->vlan_tagged = priv->cpu_bmp; + bmp = priv->lan_bmp; + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS) + priv->pvid[port] = AR40XX_LAN_VLAN; + + bmp = priv->wan_bmp; + for_each_set_bit(port, &bmp, AR40XX_NUM_PORTS) + priv->pvid[port] = AR40XX_WAN_VLAN; + + return 0; +} + +static void +ar40xx_mib_work_func(struct work_struct *work) +{ + struct ar40xx_priv *priv; + int err; + + priv = container_of(work, struct ar40xx_priv, mib_work.work); + + mutex_lock(&priv->mib_lock); + + err = ar40xx_mib_capture(priv); + if (err) + goto next_port; + + ar40xx_mib_fetch_port_stat(priv, priv->mib_next_port, false); + +next_port: + priv->mib_next_port++; + if (priv->mib_next_port >= priv->dev.ports) + priv->mib_next_port = 0; + + mutex_unlock(&priv->mib_lock); + + schedule_delayed_work(&priv->mib_work, + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY)); +} + +static void +ar40xx_setup_port(struct ar40xx_priv *priv, int port, u32 members) +{ + u32 t; + u32 egress, ingress; + u32 pvid = priv->vlan_id[priv->pvid[port]]; + + if (priv->vlan) { + if (priv->vlan_tagged & BIT(port)) + egress = AR40XX_PORT_VLAN1_OUT_MODE_TAG; + else + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNMOD; + + ingress = AR40XX_IN_SECURE; + } else { + egress = AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH; + ingress = AR40XX_IN_PORT_ONLY; + } + + t = pvid << AR40XX_PORT_VLAN0_DEF_SVID_S; + t |= pvid << AR40XX_PORT_VLAN0_DEF_CVID_S; + ar40xx_write(priv, AR40XX_REG_PORT_VLAN0(port), t); + + t = egress << AR40XX_PORT_VLAN1_OUT_MODE_S; + + /* set CPU port to core port */ + if (port == 0) + t |= AR40XX_PORT_VLAN1_CORE_PORT; + + if (priv->vlan_tagged & BIT(port)) + t |= AR40XX_PORT_VLAN1_PORT_VLAN_PROP; + else + t |= AR40XX_PORT_VLAN1_PORT_TLS_MODE; + + ar40xx_write(priv, AR40XX_REG_PORT_VLAN1(port), t); + + t = members; + t |= AR40XX_PORT_LOOKUP_LEARN; + t |= ingress << AR40XX_PORT_LOOKUP_IN_MODE_S; + t |= AR40XX_PORT_STATE_FORWARD << AR40XX_PORT_LOOKUP_STATE_S; + ar40xx_write(priv, AR40XX_REG_PORT_LOOKUP(port), t); +} + +static void +ar40xx_vtu_op(struct ar40xx_priv *priv, u32 op, u32 val) +{ + if (ar40xx_wait_bit(priv, AR40XX_REG_VTU_FUNC1, + AR40XX_VTU_FUNC1_BUSY, 0)) + return; + + if ((op & AR40XX_VTU_FUNC1_OP) == AR40XX_VTU_FUNC1_OP_LOAD) + ar40xx_write(priv, AR40XX_REG_VTU_FUNC0, val); + + op |= AR40XX_VTU_FUNC1_BUSY; + ar40xx_write(priv, AR40XX_REG_VTU_FUNC1, op); +} + +static void +ar40xx_vtu_load_vlan(struct ar40xx_priv *priv, u32 vid, u32 port_mask) +{ + u32 op; + u32 val; + int i; + + op = AR40XX_VTU_FUNC1_OP_LOAD | (vid << AR40XX_VTU_FUNC1_VID_S); + val = AR40XX_VTU_FUNC0_VALID | AR40XX_VTU_FUNC0_IVL; + for (i = 0; i < AR40XX_NUM_PORTS; i++) { + u32 mode; + + if ((port_mask & BIT(i)) == 0) + mode = AR40XX_VTU_FUNC0_EG_MODE_NOT; + else if (priv->vlan == 0) + mode = AR40XX_VTU_FUNC0_EG_MODE_KEEP; + else if ((priv->vlan_tagged & BIT(i)) || + (priv->vlan_id[priv->pvid[i]] != vid)) + mode = AR40XX_VTU_FUNC0_EG_MODE_TAG; + else + mode = AR40XX_VTU_FUNC0_EG_MODE_UNTAG; + + val |= mode << AR40XX_VTU_FUNC0_EG_MODE_S(i); + } + ar40xx_vtu_op(priv, op, val); +} + +static void +ar40xx_vtu_flush(struct ar40xx_priv *priv) +{ + ar40xx_vtu_op(priv, AR40XX_VTU_FUNC1_OP_FLUSH, 0); +} + +static int +ar40xx_sw_hw_apply(struct switch_dev *dev) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + u8 portmask[AR40XX_NUM_PORTS]; + int i, j; + + mutex_lock(&priv->reg_mutex); + /* flush all vlan entries */ + ar40xx_vtu_flush(priv); + + memset(portmask, 0, sizeof(portmask)); + if (priv->vlan) { + for (j = 0; j < AR40XX_MAX_VLANS; j++) { + u8 vp = priv->vlan_table[j]; + + if (!vp) + continue; + + for (i = 0; i < dev->ports; i++) { + u8 mask = BIT(i); + + if (vp & mask) + portmask[i] |= vp & ~mask; + } + + ar40xx_vtu_load_vlan(priv, priv->vlan_id[j], + priv->vlan_table[j]); + } + } else { + /* 8021q vlan disabled */ + for (i = 0; i < dev->ports; i++) { + if (i == AR40XX_PORT_CPU) + continue; + + portmask[i] = BIT(AR40XX_PORT_CPU); + portmask[AR40XX_PORT_CPU] |= BIT(i); + } + } + + /* update the port destination mask registers and tag settings */ + for (i = 0; i < dev->ports; i++) + ar40xx_setup_port(priv, i, portmask[i]); + + ar40xx_set_mirror_regs(priv); + + mutex_unlock(&priv->reg_mutex); + return 0; +} + +static int +ar40xx_sw_reset_switch(struct switch_dev *dev) +{ + struct ar40xx_priv *priv = swdev_to_ar40xx(dev); + int i, rv; + + mutex_lock(&priv->reg_mutex); + memset(&priv->vlan, 0, sizeof(struct ar40xx_priv) - + offsetof(struct ar40xx_priv, vlan)); + + for (i = 0; i < AR40XX_MAX_VLANS; i++) + priv->vlan_id[i] = i; + + ar40xx_vlan_init(priv); + + priv->mirror_rx = false; + priv->mirror_tx = false; + priv->source_port = 0; + priv->monitor_port = 0; + + mutex_unlock(&priv->reg_mutex); + + rv = ar40xx_sw_hw_apply(dev); + return rv; +} + +static int +ar40xx_start(struct ar40xx_priv *priv) +{ + int ret; + + ret = ar40xx_hw_init(priv); + if (ret) + return ret; + + ret = ar40xx_sw_reset_switch(&priv->dev); + if (ret) + return ret; + + /* at last, setup cpu port */ + ret = ar40xx_cpuport_setup(priv); + if (ret) + return ret; + + schedule_delayed_work(&priv->mib_work, + msecs_to_jiffies(AR40XX_MIB_WORK_DELAY)); + + ar40xx_qm_err_check_work_start(priv); + + return 0; +} + +static const struct switch_dev_ops ar40xx_sw_ops = { + .attr_global = { + .attr = ar40xx_sw_attr_globals, + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_globals), + }, + .attr_port = { + .attr = ar40xx_sw_attr_port, + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_port), + }, + .attr_vlan = { + .attr = ar40xx_sw_attr_vlan, + .n_attr = ARRAY_SIZE(ar40xx_sw_attr_vlan), + }, + .get_port_pvid = ar40xx_sw_get_pvid, + .set_port_pvid = ar40xx_sw_set_pvid, + .get_vlan_ports = ar40xx_sw_get_ports, + .set_vlan_ports = ar40xx_sw_set_ports, + .apply_config = ar40xx_sw_hw_apply, + .reset_switch = ar40xx_sw_reset_switch, + .get_port_link = ar40xx_sw_get_port_link, +}; + +/* Start of phy driver support */ + +static const u32 ar40xx_phy_ids[] = { + 0x004dd0b1, + 0x004dd0b2, /* AR40xx */ +}; + +static bool +ar40xx_phy_match(u32 phy_id) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(ar40xx_phy_ids); i++) + if (phy_id == ar40xx_phy_ids[i]) + return true; + + return false; +} + +static bool +is_ar40xx_phy(struct mii_bus *bus) +{ + unsigned i; + + for (i = 0; i < 4; i++) { + u32 phy_id; + + phy_id = mdiobus_read(bus, i, MII_PHYSID1) << 16; + phy_id |= mdiobus_read(bus, i, MII_PHYSID2); + if (!ar40xx_phy_match(phy_id)) + return false; + } + + return true; +} + +static int +ar40xx_phy_probe(struct phy_device *phydev) +{ + if (!is_ar40xx_phy(phydev->mdio.bus)) + return -ENODEV; + + ar40xx_priv->mii_bus = phydev->mdio.bus; + phydev->priv = ar40xx_priv; + if (phydev->mdio.addr == 0) + ar40xx_priv->phy = phydev; + + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->supported); + linkmode_set_bit(ETHTOOL_LINK_MODE_1000baseT_Full_BIT, phydev->advertising); + return 0; +} + +static void +ar40xx_phy_remove(struct phy_device *phydev) +{ + ar40xx_priv->mii_bus = NULL; + phydev->priv = NULL; +} + +static int +ar40xx_phy_config_init(struct phy_device *phydev) +{ + return 0; +} + +static int +ar40xx_phy_read_status(struct phy_device *phydev) +{ + if (phydev->mdio.addr != 0) + return genphy_read_status(phydev); + + return 0; +} + +static int +ar40xx_phy_config_aneg(struct phy_device *phydev) +{ + if (phydev->mdio.addr == 0) + return 0; + + return genphy_config_aneg(phydev); +} + +static struct phy_driver ar40xx_phy_driver = { + .phy_id = 0x004d0000, + .name = "QCA Malibu", + .phy_id_mask = 0xffff0000, + .features = PHY_GBIT_FEATURES, + .probe = ar40xx_phy_probe, + .remove = ar40xx_phy_remove, + .config_init = ar40xx_phy_config_init, + .config_aneg = ar40xx_phy_config_aneg, + .read_status = ar40xx_phy_read_status, +}; + +static uint16_t ar40xx_gpio_get_phy(unsigned int offset) +{ + return offset / 4; +} + +static uint16_t ar40xx_gpio_get_reg(unsigned int offset) +{ + return 0x8074 + offset % 4; +} + +static void ar40xx_gpio_set(struct gpio_chip *gc, unsigned int offset, + int value) +{ + struct ar40xx_priv *priv = gpiochip_get_data(gc); + + ar40xx_phy_mmd_write(priv, ar40xx_gpio_get_phy(offset), 0x7, + ar40xx_gpio_get_reg(offset), + value ? 0xA000 : 0x8000); +} + +static int ar40xx_gpio_get(struct gpio_chip *gc, unsigned offset) +{ + struct ar40xx_priv *priv = gpiochip_get_data(gc); + + return ar40xx_phy_mmd_read(priv, ar40xx_gpio_get_phy(offset), 0x7, + ar40xx_gpio_get_reg(offset)) == 0xA000; +} + +static int ar40xx_gpio_get_dir(struct gpio_chip *gc, unsigned offset) +{ + return 0; /* only out direction */ +} + +static int ar40xx_gpio_dir_out(struct gpio_chip *gc, unsigned offset, + int value) +{ + /* + * the direction out value is used to set the initial value. + * support of this function is required by leds-gpio.c + */ + ar40xx_gpio_set(gc, offset, value); + return 0; +} + +static void ar40xx_register_gpio(struct device *pdev, + struct ar40xx_priv *priv, + struct device_node *switch_node) +{ + struct gpio_chip *gc; + int err; + + gc = devm_kzalloc(pdev, sizeof(*gc), GFP_KERNEL); + if (!gc) + return; + + gc->label = "ar40xx_gpio", + gc->base = -1, + gc->ngpio = 5 /* mmd 0 - 4 */ * 4 /* 0x8074 - 0x8077 */, + gc->parent = pdev; + gc->owner = THIS_MODULE; + + gc->get_direction = ar40xx_gpio_get_dir; + gc->direction_output = ar40xx_gpio_dir_out; + gc->get = ar40xx_gpio_get; + gc->set = ar40xx_gpio_set; + gc->can_sleep = true; + gc->label = priv->dev.name; + gc->of_node = switch_node; + + err = devm_gpiochip_add_data(pdev, gc, priv); + if (err != 0) + dev_err(pdev, "Failed to register gpio %d.\n", err); +} + +/* End of phy driver support */ + +/* Platform driver probe function */ + +static int ar40xx_probe(struct platform_device *pdev) +{ + struct device_node *switch_node; + struct device_node *psgmii_node; + const __be32 *mac_mode; + struct clk *ess_clk; + struct switch_dev *swdev; + struct ar40xx_priv *priv; + u32 len; + u32 num_mibs; + struct resource psgmii_base = {0}; + struct resource switch_base = {0}; + int ret; + + priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + platform_set_drvdata(pdev, priv); + ar40xx_priv = priv; + + switch_node = of_node_get(pdev->dev.of_node); + if (of_address_to_resource(switch_node, 0, &switch_base) != 0) + return -EIO; + + priv->hw_addr = devm_ioremap_resource(&pdev->dev, &switch_base); + if (IS_ERR(priv->hw_addr)) { + dev_err(&pdev->dev, "Failed to ioremap switch_base!\n"); + return PTR_ERR(priv->hw_addr); + } + + /*psgmii dts get*/ + psgmii_node = of_find_node_by_name(NULL, "ess-psgmii"); + if (!psgmii_node) { + dev_err(&pdev->dev, "Failed to find ess-psgmii node!\n"); + return -EINVAL; + } + + if (of_address_to_resource(psgmii_node, 0, &psgmii_base) != 0) + return -EIO; + + priv->psgmii_hw_addr = devm_ioremap_resource(&pdev->dev, &psgmii_base); + if (IS_ERR(priv->psgmii_hw_addr)) { + dev_err(&pdev->dev, "psgmii ioremap fail!\n"); + return PTR_ERR(priv->psgmii_hw_addr); + } + + mac_mode = of_get_property(switch_node, "switch_mac_mode", &len); + if (!mac_mode) { + dev_err(&pdev->dev, "Failed to read switch_mac_mode\n"); + return -EINVAL; + } + priv->mac_mode = be32_to_cpup(mac_mode); + + ess_clk = of_clk_get_by_name(switch_node, "ess_clk"); + if (ess_clk) + clk_prepare_enable(ess_clk); + + priv->ess_rst = devm_reset_control_get(&pdev->dev, "ess_rst"); + if (IS_ERR(priv->ess_rst)) { + dev_err(&pdev->dev, "Failed to get ess_rst control!\n"); + return PTR_ERR(priv->ess_rst); + } + + if (of_property_read_u32(switch_node, "switch_cpu_bmp", + &priv->cpu_bmp) || + of_property_read_u32(switch_node, "switch_lan_bmp", + &priv->lan_bmp) || + of_property_read_u32(switch_node, "switch_wan_bmp", + &priv->wan_bmp)) { + dev_err(&pdev->dev, "Failed to read port properties\n"); + return -EIO; + } + + ret = phy_driver_register(&ar40xx_phy_driver, THIS_MODULE); + if (ret) { + dev_err(&pdev->dev, "Failed to register ar40xx phy driver!\n"); + return -EIO; + } + + mutex_init(&priv->reg_mutex); + mutex_init(&priv->mib_lock); + INIT_DELAYED_WORK(&priv->mib_work, ar40xx_mib_work_func); + + /* register switch */ + swdev = &priv->dev; + + if (priv->mii_bus == NULL) { + dev_err(&pdev->dev, "Probe failed - Missing PHYs!\n"); + ret = -ENODEV; + goto err_missing_phy; + } + + swdev->alias = dev_name(&priv->mii_bus->dev); + + swdev->cpu_port = AR40XX_PORT_CPU; + swdev->name = "QCA AR40xx"; + swdev->vlans = AR40XX_MAX_VLANS; + swdev->ports = AR40XX_NUM_PORTS; + swdev->ops = &ar40xx_sw_ops; + ret = register_switch(swdev, NULL); + if (ret) + goto err_unregister_phy; + + num_mibs = ARRAY_SIZE(ar40xx_mibs); + len = priv->dev.ports * num_mibs * + sizeof(*priv->mib_stats); + priv->mib_stats = devm_kzalloc(&pdev->dev, len, GFP_KERNEL); + if (!priv->mib_stats) { + ret = -ENOMEM; + goto err_unregister_switch; + } + + ar40xx_start(priv); + + if (of_property_read_bool(switch_node, "gpio-controller")) + ar40xx_register_gpio(&pdev->dev, ar40xx_priv, switch_node); + + return 0; + +err_unregister_switch: + unregister_switch(&priv->dev); +err_unregister_phy: + phy_driver_unregister(&ar40xx_phy_driver); +err_missing_phy: + platform_set_drvdata(pdev, NULL); + return ret; +} + +static int ar40xx_remove(struct platform_device *pdev) +{ + struct ar40xx_priv *priv = platform_get_drvdata(pdev); + + cancel_delayed_work_sync(&priv->qm_dwork); + cancel_delayed_work_sync(&priv->mib_work); + + unregister_switch(&priv->dev); + + phy_driver_unregister(&ar40xx_phy_driver); + + return 0; +} + +static const struct of_device_id ar40xx_of_mtable[] = { + {.compatible = "qcom,ess-switch" }, + {} +}; + +struct platform_driver ar40xx_drv = { + .probe = ar40xx_probe, + .remove = ar40xx_remove, + .driver = { + .name = "ar40xx", + .of_match_table = ar40xx_of_mtable, + }, +}; + +module_platform_driver(ar40xx_drv); + +MODULE_DESCRIPTION("IPQ40XX ESS driver"); +MODULE_LICENSE("Dual BSD/GPL"); diff --git a/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h new file mode 100644 index 0000000000..7ba40ccf75 --- /dev/null +++ b/target/linux/ipq40xx/files/drivers/net/phy/ar40xx.h @@ -0,0 +1,342 @@ +/* + * Copyright (c) 2016, The Linux Foundation. All rights reserved. + * + * Permission to use, copy, modify, and/or distribute this software for + * any purpose with or without fee is hereby granted, provided that the + * above copyright notice and this permission notice appear in all copies. + * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES + * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF + * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR + * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES + * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN + * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT + * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. + */ + + #ifndef __AR40XX_H +#define __AR40XX_H + +#define AR40XX_MAX_VLANS 128 +#define AR40XX_NUM_PORTS 6 +#define AR40XX_NUM_PHYS 5 + +#define BITS(_s, _n) (((1UL << (_n)) - 1) << _s) + +struct ar40xx_priv { + struct switch_dev dev; + + u8 __iomem *hw_addr; + u8 __iomem *psgmii_hw_addr; + u32 mac_mode; + struct reset_control *ess_rst; + u32 cpu_bmp; + u32 lan_bmp; + u32 wan_bmp; + + struct mii_bus *mii_bus; + struct phy_device *phy; + + /* mutex for qm task */ + struct mutex qm_lock; + struct delayed_work qm_dwork; + u32 port_link_up[AR40XX_NUM_PORTS]; + u32 ar40xx_port_old_link[AR40XX_NUM_PORTS]; + u32 ar40xx_port_qm_buf[AR40XX_NUM_PORTS]; + + u32 phy_t_status; + + /* mutex for switch reg access */ + struct mutex reg_mutex; + + /* mutex for mib task */ + struct mutex mib_lock; + struct delayed_work mib_work; + int mib_next_port; + u64 *mib_stats; + + char buf[2048]; + + /* all fields below will be cleared on reset */ + bool vlan; + u16 vlan_id[AR40XX_MAX_VLANS]; + u8 vlan_table[AR40XX_MAX_VLANS]; + u8 vlan_tagged; + u16 pvid[AR40XX_NUM_PORTS]; + + /* mirror */ + bool mirror_rx; + bool mirror_tx; + int source_port; + int monitor_port; +}; + +#define AR40XX_PORT_LINK_UP 1 +#define AR40XX_PORT_LINK_DOWN 0 +#define AR40XX_QM_NOT_EMPTY 1 +#define AR40XX_QM_EMPTY 0 + +#define AR40XX_LAN_VLAN 1 +#define AR40XX_WAN_VLAN 2 + +enum ar40xx_port_wrapper_cfg { + PORT_WRAPPER_PSGMII = 0, +}; + +struct ar40xx_mib_desc { + u32 size; + u32 offset; + const char *name; +}; + +#define AR40XX_PORT_CPU 0 + +#define AR40XX_PSGMII_MODE_CONTROL 0x1b4 +#define AR40XX_PSGMII_ATHR_CSCO_MODE_25M BIT(0) + +#define AR40XX_PSGMIIPHY_TX_CONTROL 0x288 + +#define AR40XX_MII_ATH_MMD_ADDR 0x0d +#define AR40XX_MII_ATH_MMD_DATA 0x0e +#define AR40XX_MII_ATH_DBG_ADDR 0x1d +#define AR40XX_MII_ATH_DBG_DATA 0x1e + +#define AR40XX_STATS_RXBROAD 0x00 +#define AR40XX_STATS_RXPAUSE 0x04 +#define AR40XX_STATS_RXMULTI 0x08 +#define AR40XX_STATS_RXFCSERR 0x0c +#define AR40XX_STATS_RXALIGNERR 0x10 +#define AR40XX_STATS_RXRUNT 0x14 +#define AR40XX_STATS_RXFRAGMENT 0x18 +#define AR40XX_STATS_RX64BYTE 0x1c +#define AR40XX_STATS_RX128BYTE 0x20 +#define AR40XX_STATS_RX256BYTE 0x24 +#define AR40XX_STATS_RX512BYTE 0x28 +#define AR40XX_STATS_RX1024BYTE 0x2c +#define AR40XX_STATS_RX1518BYTE 0x30 +#define AR40XX_STATS_RXMAXBYTE 0x34 +#define AR40XX_STATS_RXTOOLONG 0x38 +#define AR40XX_STATS_RXGOODBYTE 0x3c +#define AR40XX_STATS_RXBADBYTE 0x44 +#define AR40XX_STATS_RXOVERFLOW 0x4c +#define AR40XX_STATS_FILTERED 0x50 +#define AR40XX_STATS_TXBROAD 0x54 +#define AR40XX_STATS_TXPAUSE 0x58 +#define AR40XX_STATS_TXMULTI 0x5c +#define AR40XX_STATS_TXUNDERRUN 0x60 +#define AR40XX_STATS_TX64BYTE 0x64 +#define AR40XX_STATS_TX128BYTE 0x68 +#define AR40XX_STATS_TX256BYTE 0x6c +#define AR40XX_STATS_TX512BYTE 0x70 +#define AR40XX_STATS_TX1024BYTE 0x74 +#define AR40XX_STATS_TX1518BYTE 0x78 +#define AR40XX_STATS_TXMAXBYTE 0x7c +#define AR40XX_STATS_TXOVERSIZE 0x80 +#define AR40XX_STATS_TXBYTE 0x84 +#define AR40XX_STATS_TXCOLLISION 0x8c +#define AR40XX_STATS_TXABORTCOL 0x90 +#define AR40XX_STATS_TXMULTICOL 0x94 +#define AR40XX_STATS_TXSINGLECOL 0x98 +#define AR40XX_STATS_TXEXCDEFER 0x9c +#define AR40XX_STATS_TXDEFER 0xa0 +#define AR40XX_STATS_TXLATECOL 0xa4 + +#define AR40XX_REG_MODULE_EN 0x030 +#define AR40XX_MODULE_EN_MIB BIT(0) + +#define AR40XX_REG_MIB_FUNC 0x034 +#define AR40XX_MIB_BUSY BIT(17) +#define AR40XX_MIB_CPU_KEEP BIT(20) +#define AR40XX_MIB_FUNC BITS(24, 3) +#define AR40XX_MIB_FUNC_S 24 +#define AR40XX_MIB_FUNC_NO_OP 0x0 +#define AR40XX_MIB_FUNC_FLUSH 0x1 + +#define AR40XX_ESS_SERVICE_TAG 0x48 +#define AR40XX_ESS_SERVICE_TAG_STAG BIT(17) + +#define AR40XX_REG_PORT_STATUS(_i) (0x07c + (_i) * 4) +#define AR40XX_PORT_SPEED BITS(0, 2) +#define AR40XX_PORT_STATUS_SPEED_S 0 +#define AR40XX_PORT_TX_EN BIT(2) +#define AR40XX_PORT_RX_EN BIT(3) +#define AR40XX_PORT_STATUS_TXFLOW BIT(4) +#define AR40XX_PORT_STATUS_RXFLOW BIT(5) +#define AR40XX_PORT_DUPLEX BIT(6) +#define AR40XX_PORT_TXHALF_FLOW BIT(7) +#define AR40XX_PORT_STATUS_LINK_UP BIT(8) +#define AR40XX_PORT_AUTO_LINK_EN BIT(9) +#define AR40XX_PORT_STATUS_FLOW_CONTROL BIT(12) + +#define AR40XX_REG_MAX_FRAME_SIZE 0x078 +#define AR40XX_MAX_FRAME_SIZE_MTU BITS(0, 14) + +#define AR40XX_REG_PORT_HEADER(_i) (0x09c + (_i) * 4) + +#define AR40XX_REG_EEE_CTRL 0x100 +#define AR40XX_EEE_CTRL_DISABLE_PHY(_i) BIT(4 + (_i) * 2) + +#define AR40XX_REG_PORT_VLAN0(_i) (0x420 + (_i) * 0x8) +#define AR40XX_PORT_VLAN0_DEF_SVID BITS(0, 12) +#define AR40XX_PORT_VLAN0_DEF_SVID_S 0 +#define AR40XX_PORT_VLAN0_DEF_CVID BITS(16, 12) +#define AR40XX_PORT_VLAN0_DEF_CVID_S 16 + +#define AR40XX_REG_PORT_VLAN1(_i) (0x424 + (_i) * 0x8) +#define AR40XX_PORT_VLAN1_CORE_PORT BIT(9) +#define AR40XX_PORT_VLAN1_PORT_TLS_MODE BIT(7) +#define AR40XX_PORT_VLAN1_PORT_VLAN_PROP BIT(6) +#define AR40XX_PORT_VLAN1_OUT_MODE BITS(12, 2) +#define AR40XX_PORT_VLAN1_OUT_MODE_S 12 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNMOD 0 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTAG 1 +#define AR40XX_PORT_VLAN1_OUT_MODE_TAG 2 +#define AR40XX_PORT_VLAN1_OUT_MODE_UNTOUCH 3 + +#define AR40XX_REG_VTU_FUNC0 0x0610 +#define AR40XX_VTU_FUNC0_EG_MODE BITS(4, 14) +#define AR40XX_VTU_FUNC0_EG_MODE_S(_i) (4 + (_i) * 2) +#define AR40XX_VTU_FUNC0_EG_MODE_KEEP 0 +#define AR40XX_VTU_FUNC0_EG_MODE_UNTAG 1 +#define AR40XX_VTU_FUNC0_EG_MODE_TAG 2 +#define AR40XX_VTU_FUNC0_EG_MODE_NOT 3 +#define AR40XX_VTU_FUNC0_IVL BIT(19) +#define AR40XX_VTU_FUNC0_VALID BIT(20) + +#define AR40XX_REG_VTU_FUNC1 0x0614 +#define AR40XX_VTU_FUNC1_OP BITS(0, 3) +#define AR40XX_VTU_FUNC1_OP_NOOP 0 +#define AR40XX_VTU_FUNC1_OP_FLUSH 1 +#define AR40XX_VTU_FUNC1_OP_LOAD 2 +#define AR40XX_VTU_FUNC1_OP_PURGE 3 +#define AR40XX_VTU_FUNC1_OP_REMOVE_PORT 4 +#define AR40XX_VTU_FUNC1_OP_GET_NEXT 5 +#define AR40XX7_VTU_FUNC1_OP_GET_ONE 6 +#define AR40XX_VTU_FUNC1_FULL BIT(4) +#define AR40XX_VTU_FUNC1_PORT BIT(8, 4) +#define AR40XX_VTU_FUNC1_PORT_S 8 +#define AR40XX_VTU_FUNC1_VID BIT(16, 12) +#define AR40XX_VTU_FUNC1_VID_S 16 +#define AR40XX_VTU_FUNC1_BUSY BIT(31) + +#define AR40XX_REG_FWD_CTRL0 0x620 +#define AR40XX_FWD_CTRL0_CPU_PORT_EN BIT(10) +#define AR40XX_FWD_CTRL0_MIRROR_PORT BITS(4, 4) +#define AR40XX_FWD_CTRL0_MIRROR_PORT_S 4 + +#define AR40XX_REG_FWD_CTRL1 0x624 +#define AR40XX_FWD_CTRL1_UC_FLOOD BITS(0, 7) +#define AR40XX_FWD_CTRL1_UC_FLOOD_S 0 +#define AR40XX_FWD_CTRL1_MC_FLOOD BITS(8, 7) +#define AR40XX_FWD_CTRL1_MC_FLOOD_S 8 +#define AR40XX_FWD_CTRL1_BC_FLOOD BITS(16, 7) +#define AR40XX_FWD_CTRL1_BC_FLOOD_S 16 +#define AR40XX_FWD_CTRL1_IGMP BITS(24, 7) +#define AR40XX_FWD_CTRL1_IGMP_S 24 + +#define AR40XX_REG_PORT_LOOKUP(_i) (0x660 + (_i) * 0xc) +#define AR40XX_PORT_LOOKUP_MEMBER BITS(0, 7) +#define AR40XX_PORT_LOOKUP_IN_MODE BITS(8, 2) +#define AR40XX_PORT_LOOKUP_IN_MODE_S 8 +#define AR40XX_PORT_LOOKUP_STATE BITS(16, 3) +#define AR40XX_PORT_LOOKUP_STATE_S 16 +#define AR40XX_PORT_LOOKUP_LEARN BIT(20) +#define AR40XX_PORT_LOOKUP_LOOPBACK BIT(21) +#define AR40XX_PORT_LOOKUP_ING_MIRROR_EN BIT(25) + +#define AR40XX_REG_ATU_FUNC 0x60c +#define AR40XX_ATU_FUNC_OP BITS(0, 4) +#define AR40XX_ATU_FUNC_OP_NOOP 0x0 +#define AR40XX_ATU_FUNC_OP_FLUSH 0x1 +#define AR40XX_ATU_FUNC_OP_LOAD 0x2 +#define AR40XX_ATU_FUNC_OP_PURGE 0x3 +#define AR40XX_ATU_FUNC_OP_FLUSH_LOCKED 0x4 +#define AR40XX_ATU_FUNC_OP_FLUSH_UNICAST 0x5 +#define AR40XX_ATU_FUNC_OP_GET_NEXT 0x6 +#define AR40XX_ATU_FUNC_OP_SEARCH_MAC 0x7 +#define AR40XX_ATU_FUNC_OP_CHANGE_TRUNK 0x8 +#define AR40XX_ATU_FUNC_BUSY BIT(31) + +#define AR40XX_REG_QM_DEBUG_ADDR 0x820 +#define AR40XX_REG_QM_DEBUG_VALUE 0x824 +#define AR40XX_REG_QM_PORT0_3_QNUM 0x1d +#define AR40XX_REG_QM_PORT4_6_QNUM 0x1e + +#define AR40XX_REG_PORT_HOL_CTRL1(_i) (0x974 + (_i) * 0x8) +#define AR40XX_PORT_HOL_CTRL1_EG_MIRROR_EN BIT(16) + +#define AR40XX_REG_PORT_FLOWCTRL_THRESH(_i) (0x9b0 + (_i) * 0x4) +#define AR40XX_PORT0_FC_THRESH_ON_DFLT 0x60 +#define AR40XX_PORT0_FC_THRESH_OFF_DFLT 0x90 + +#define AR40XX_PHY_DEBUG_0 0 +#define AR40XX_PHY_MANU_CTRL_EN BIT(12) + +#define AR40XX_PHY_DEBUG_2 2 + +#define AR40XX_PHY_SPEC_STATUS 0x11 +#define AR40XX_PHY_SPEC_STATUS_LINK BIT(10) +#define AR40XX_PHY_SPEC_STATUS_DUPLEX BIT(13) +#define AR40XX_PHY_SPEC_STATUS_SPEED BITS(14, 2) + +/* port forwarding state */ +enum { + AR40XX_PORT_STATE_DISABLED = 0, + AR40XX_PORT_STATE_BLOCK = 1, + AR40XX_PORT_STATE_LISTEN = 2, + AR40XX_PORT_STATE_LEARN = 3, + AR40XX_PORT_STATE_FORWARD = 4 +}; + +/* ingress 802.1q mode */ +enum { + AR40XX_IN_PORT_ONLY = 0, + AR40XX_IN_PORT_FALLBACK = 1, + AR40XX_IN_VLAN_ONLY = 2, + AR40XX_IN_SECURE = 3 +}; + +/* egress 802.1q mode */ +enum { + AR40XX_OUT_KEEP = 0, + AR40XX_OUT_STRIP_VLAN = 1, + AR40XX_OUT_ADD_VLAN = 2 +}; + +/* port speed */ +enum { + AR40XX_PORT_SPEED_10M = 0, + AR40XX_PORT_SPEED_100M = 1, + AR40XX_PORT_SPEED_1000M = 2, + AR40XX_PORT_SPEED_ERR = 3, +}; + +#define AR40XX_MIB_WORK_DELAY 2000 /* msecs */ + +#define AR40XX_QM_WORK_DELAY 100 + +#define AR40XX_MIB_FUNC_CAPTURE 0x3 + +#define AR40XX_REG_PORT_STATS_START 0x1000 +#define AR40XX_REG_PORT_STATS_LEN 0x100 + +#define AR40XX_PORTS_ALL 0x3f + +#define AR40XX_PSGMII_ID 5 +#define AR40XX_PSGMII_CALB_NUM 100 +#define AR40XX_MALIBU_PSGMII_MODE_CTRL 0x6d +#define AR40XX_MALIBU_PHY_PSGMII_MODE_CTRL_ADJUST_VAL 0x220c +#define AR40XX_MALIBU_PHY_MMD7_DAC_CTRL 0x801a +#define AR40XX_MALIBU_DAC_CTRL_MASK 0x380 +#define AR40XX_MALIBU_DAC_CTRL_VALUE 0x280 +#define AR40XX_MALIBU_PHY_RLP_CTRL 0x805a +#define AR40XX_PSGMII_TX_DRIVER_1_CTRL 0xb +#define AR40XX_MALIBU_PHY_PSGMII_REDUCE_SERDES_TX_AMP 0x8a +#define AR40XX_MALIBU_PHY_LAST_ADDR 4 + +static inline struct ar40xx_priv * +swdev_to_ar40xx(struct switch_dev *swdev) +{ + return container_of(swdev, struct ar40xx_priv, dev); +} + +#endif -- cgit v1.2.3