diff options
author | Zoltan Herpai <wigyori@uid0.hu> | 2016-07-04 22:17:33 +0200 |
---|---|---|
committer | GitHub <noreply@github.com> | 2016-07-04 22:17:33 +0200 |
commit | 70069b0f57bb2f1721fbf8393985c13c7800f66f (patch) | |
tree | 2d4bbb30414da84feb6222ef5a0110d01dcf3445 /target | |
parent | 131f8241ca664f8a16632c32e46132efbf2defda (diff) | |
parent | dd33f253baf68875f99498137f050c756f7427b6 (diff) | |
download | master-187ad058-70069b0f57bb2f1721fbf8393985c13c7800f66f.tar.gz master-187ad058-70069b0f57bb2f1721fbf8393985c13c7800f66f.tar.bz2 master-187ad058-70069b0f57bb2f1721fbf8393985c13c7800f66f.zip |
Merge pull request #20 from wigyori/master
bump mvebu to 4.4, swconfig / mt76 updates, other small fixes
Diffstat (limited to 'target')
74 files changed, 6000 insertions, 195 deletions
diff --git a/target/linux/bcm53xx/config-4.4 b/target/linux/bcm53xx/config-4.4 index 634786177a..5611cfe0d4 100644 --- a/target/linux/bcm53xx/config-4.4 +++ b/target/linux/bcm53xx/config-4.4 @@ -54,10 +54,10 @@ CONFIG_ARM_THUMB=y CONFIG_ARM_VIRT_EXT=y CONFIG_ATAGS=y CONFIG_AUTO_ZRELADDR=y -CONFIG_B53=y -# CONFIG_B53_MMAP_DRIVER is not set -# CONFIG_B53_PHY_DRIVER is not set -CONFIG_B53_SRAB_DRIVER=y +CONFIG_SWCONFIG_B53=y +# CONFIG_SWCONFIG_B53_MMAP_DRIVER is not set +# CONFIG_SWCONFIG_B53_PHY_DRIVER is not set +CONFIG_SWCONFIG_B53_SRAB_DRIVER=y CONFIG_BCM47XX_NVRAM=y CONFIG_BCM47XX_SPROM=y CONFIG_BCM47XX_WDT=y diff --git a/target/linux/brcm47xx/config-4.1 b/target/linux/brcm47xx/config-4.1 index 0b812d55a7..5cf8550045 100644 --- a/target/linux/brcm47xx/config-4.1 +++ b/target/linux/brcm47xx/config-4.1 @@ -12,11 +12,11 @@ CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_B53=y -# CONFIG_B53_MMAP_DRIVER is not set -CONFIG_B53_PHY_DRIVER=y -CONFIG_B53_PHY_FIXUP=y -# CONFIG_B53_SRAB_DRIVER is not set +CONFIG_SWCONFIG_B53=y +# CONFIG_SWCONFIG_B53_MMAP_DRIVER is not set +CONFIG_SWCONFIG_B53_PHY_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_FIXUP=y +# CONFIG_SWCONFIG_B53_SRAB_DRIVER is not set CONFIG_BCM47XX=y CONFIG_BCM47XX_BCMA=y CONFIG_BCM47XX_SSB=y diff --git a/target/linux/brcm47xx/config-4.4 b/target/linux/brcm47xx/config-4.4 index b72a7248ff..492b12b45a 100644 --- a/target/linux/brcm47xx/config-4.4 +++ b/target/linux/brcm47xx/config-4.4 @@ -14,11 +14,11 @@ CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ARCH_USE_BUILTIN_BSWAP=y CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_B53=y -# CONFIG_B53_MMAP_DRIVER is not set -CONFIG_B53_PHY_DRIVER=y -CONFIG_B53_PHY_FIXUP=y -# CONFIG_B53_SRAB_DRIVER is not set +CONFIG_SWCONFIG_B53=y +# CONFIG_SWCONFIG_B53_MMAP_DRIVER is not set +CONFIG_SWCONFIG_B53_PHY_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_FIXUP=y +# CONFIG_SWCONFIG_B53_SRAB_DRIVER is not set CONFIG_BCM47XX=y CONFIG_BCM47XX_BCMA=y CONFIG_BCM47XX_NVRAM=y diff --git a/target/linux/brcm63xx/config-4.4 b/target/linux/brcm63xx/config-4.4 index 4739627fa3..fb448adc1a 100644 --- a/target/linux/brcm63xx/config-4.4 +++ b/target/linux/brcm63xx/config-4.4 @@ -13,12 +13,12 @@ CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_ARCH_SUSPEND_POSSIBLE=y CONFIG_ARCH_USE_BUILTIN_BSWAP=y CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y -CONFIG_B53=y -CONFIG_B53_MMAP_DRIVER=y -CONFIG_B53_PHY_DRIVER=y -CONFIG_B53_PHY_FIXUP=y -CONFIG_B53_SPI_DRIVER=y -# CONFIG_B53_SRAB_DRIVER is not set +CONFIG_SWCONFIG_B53=y +CONFIG_SWCONFIG_B53_MMAP_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_FIXUP=y +CONFIG_SWCONFIG_B53_SPI_DRIVER=y +# CONFIG_SWCONFIG_B53_SRAB_DRIVER is not set CONFIG_BCM6345_EXT_IRQ=y CONFIG_BCM6345_PERIPH_IRQ=y CONFIG_BCM63XX=y diff --git a/target/linux/generic/config-3.18 b/target/linux/generic/config-3.18 index c2fedc53a8..3ed9a28df0 100644 --- a/target/linux/generic/config-3.18 +++ b/target/linux/generic/config-3.18 @@ -330,8 +330,8 @@ CONFIG_ATM_CLIP_NO_ICMP=y # CONFIG_B43 is not set # CONFIG_B43LEGACY is not set # CONFIG_B44 is not set -# CONFIG_B53 is not set -# CONFIG_B53_SPI_DRIVER is not set +# CONFIG_SWCONFIG_B53 is not set +# CONFIG_SWCONFIG_B53_SPI_DRIVER is not set # CONFIG_BACKLIGHT_BD6107 is not set # CONFIG_BACKLIGHT_GPIO is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set diff --git a/target/linux/generic/config-4.1 b/target/linux/generic/config-4.1 index 3247b24e90..abfa7dc0d0 100644 --- a/target/linux/generic/config-4.1 +++ b/target/linux/generic/config-4.1 @@ -344,8 +344,8 @@ CONFIG_ATM_CLIP_NO_ICMP=y # CONFIG_B43 is not set # CONFIG_B43LEGACY is not set # CONFIG_B44 is not set -# CONFIG_B53 is not set -# CONFIG_B53_SPI_DRIVER is not set +# CONFIG_SWCONFIG_B53 is not set +# CONFIG_SWCONFIG_B53_SPI_DRIVER is not set # CONFIG_BACKLIGHT_ADP8860 is not set # CONFIG_BACKLIGHT_ADP8870 is not set # CONFIG_BACKLIGHT_BD6107 is not set diff --git a/target/linux/generic/config-4.4 b/target/linux/generic/config-4.4 index 9bd304f692..1afaaca125 100644 --- a/target/linux/generic/config-4.4 +++ b/target/linux/generic/config-4.4 @@ -321,8 +321,8 @@ CONFIG_ATM_CLIP_NO_ICMP=y # CONFIG_B43 is not set # CONFIG_B43LEGACY is not set # CONFIG_B44 is not set -# CONFIG_B53 is not set -# CONFIG_B53_SPI_DRIVER is not set +# CONFIG_SWCONFIG_B53 is not set +# CONFIG_SWCONFIG_B53_SPI_DRIVER is not set # CONFIG_BACKLIGHT_ADP8860 is not set # CONFIG_BACKLIGHT_ADP8870 is not set # CONFIG_BACKLIGHT_BD6107 is not set @@ -1242,6 +1242,13 @@ CONFIG_GENERIC_NET_UTILS=y # CONFIG_HARDLOCKUP_DETECTOR is not set # CONFIG_HAVE_AOUT is not set # CONFIG_HAVE_ARM_ARCH_TIMER is not set +CONFIG_HAVE_KERNEL_BZIP2=y +CONFIG_HAVE_KERNEL_CAT=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_KPROBES=y CONFIG_HAVE_KRETPROBES=y # CONFIG_HCALL_STATS is not set @@ -1795,6 +1802,7 @@ CONFIG_JOLIET=y # CONFIG_KALLSYMS_UNCOMPRESSED is not set # CONFIG_KARMA_PARTITION is not set # CONFIG_KERNEL_BZIP2 is not set +# CONFIG_KERNEL_CAT is not set # CONFIG_KERNEL_GZIP is not set # CONFIG_KERNEL_LZ4 is not set # CONFIG_KERNEL_LZMA is not set diff --git a/target/linux/generic/files/drivers/net/phy/b53/Kconfig b/target/linux/generic/files/drivers/net/phy/b53/Kconfig index 67e053ea0f..08287e7adf 100644 --- a/target/linux/generic/files/drivers/net/phy/b53/Kconfig +++ b/target/linux/generic/files/drivers/net/phy/b53/Kconfig @@ -1,4 +1,4 @@ -menuconfig B53 +menuconfig SWCONFIG_B53 tristate "Broadcom bcm53xx managed switch support" depends on SWCONFIG help @@ -6,32 +6,32 @@ menuconfig B53 BCM5325E, BCM5365, BCM539x, BCM53115 and BCM53125 as well as BCM63XX integrated switches. -config B53_SPI_DRIVER +config SWCONFIG_B53_SPI_DRIVER tristate "B53 SPI connected switch driver" - depends on B53 && SPI + depends on SWCONFIG_B53 && SPI help Select to enable support for registering switches configured through SPI. -config B53_PHY_DRIVER +config SWCONFIG_B53_PHY_DRIVER tristate "B53 MDIO connected switch driver" - depends on B53 - select B53_PHY_FIXUP + depends on SWCONFIG_B53 + select SWCONFIG_B53_PHY_FIXUP help Select to enable support for registering switches configured through MDIO. -config B53_MMAP_DRIVER +config SWCONFIG_B53_MMAP_DRIVER tristate "B53 MMAP connected switch driver" - depends on B53 + depends on SWCONFIG_B53 help Select to enable support for memory-mapped switches like the BCM63XX integrated switches. -config B53_SRAB_DRIVER +config SWCONFIG_B53_SRAB_DRIVER tristate "B53 SRAB connected switch driver" - depends on B53 + depends on SWCONFIG_B53 help Select to enable support for memory-mapped Switch Register Access Bridge Registers (SRAB) like it is found on the BCM53010 -config B53_PHY_FIXUP +config SWCONFIG_B53_PHY_FIXUP bool diff --git a/target/linux/generic/files/drivers/net/phy/b53/Makefile b/target/linux/generic/files/drivers/net/phy/b53/Makefile index 7cc39c7628..13ff366448 100644 --- a/target/linux/generic/files/drivers/net/phy/b53/Makefile +++ b/target/linux/generic/files/drivers/net/phy/b53/Makefile @@ -1,10 +1,10 @@ -obj-$(CONFIG_B53) += b53_common.o +obj-$(CONFIG_SWCONFIG_B53) += b53_common.o -obj-$(CONFIG_B53_PHY_FIXUP) += b53_phy_fixup.o +obj-$(CONFIG_SWCONFIG_B53_PHY_FIXUP) += b53_phy_fixup.o -obj-$(CONFIG_B53_MMAP_DRIVER) += b53_mmap.o -obj-$(CONFIG_B53_SRAB_DRIVER) += b53_srab.o -obj-$(CONFIG_B53_PHY_DRIVER) += b53_mdio.o -obj-$(CONFIG_B53_SPI_DRIVER) += b53_spi.o +obj-$(CONFIG_SWCONFIG_B53_MMAP_DRIVER) += b53_mmap.o +obj-$(CONFIG_SWCONFIG_B53_SRAB_DRIVER) += b53_srab.o +obj-$(CONFIG_SWCONFIG_B53_PHY_DRIVER) += b53_mdio.o +obj-$(CONFIG_SWCONFIG_B53_SPI_DRIVER) += b53_spi.o -ccflags-y += -Werror +ccflags-y += -Werror diff --git a/target/linux/generic/files/drivers/net/phy/swconfig.c b/target/linux/generic/files/drivers/net/phy/swconfig.c index b556510aef..c70ca74cad 100644 --- a/target/linux/generic/files/drivers/net/phy/swconfig.c +++ b/target/linux/generic/files/drivers/net/phy/swconfig.c @@ -635,6 +635,9 @@ swconfig_set_attr(struct sk_buff *skb, struct genl_info *info) struct switch_val val; int err = -EINVAL; + if (!capable(CAP_NET_ADMIN)) + return -EPERM; + dev = swconfig_get_dev(info); if (!dev) return -EINVAL; @@ -1022,16 +1025,19 @@ static struct genl_ops swconfig_ops[] = { }, { .cmd = SWITCH_CMD_SET_GLOBAL, + .flags = GENL_ADMIN_PERM, .doit = swconfig_set_attr, .policy = switch_policy, }, { .cmd = SWITCH_CMD_SET_VLAN, + .flags = GENL_ADMIN_PERM, .doit = swconfig_set_attr, .policy = switch_policy, }, { .cmd = SWITCH_CMD_SET_PORT, + .flags = GENL_ADMIN_PERM, .doit = swconfig_set_attr, .policy = switch_policy, }, diff --git a/target/linux/generic/patches-3.18/730-phy_b53.patch b/target/linux/generic/patches-3.18/730-phy_b53.patch index c2dfcfa501..03fc369c9d 100644 --- a/target/linux/generic/patches-3.18/730-phy_b53.patch +++ b/target/linux/generic/patches-3.18/730-phy_b53.patch @@ -15,7 +15,7 @@ obj-$(CONFIG_RTL8367B_PHY) += rtl8367b.o obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_PSB6970_PHY) += psb6970.o -+obj-$(CONFIG_B53) += b53/ ++obj-$(CONFIG_SWCONFIG_B53) += b53/ obj-$(CONFIG_FIXED_PHY) += fixed.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o diff --git a/target/linux/generic/patches-4.1/730-phy_b53.patch b/target/linux/generic/patches-4.1/730-phy_b53.patch index bdcfba6e86..ff166890d4 100644 --- a/target/linux/generic/patches-4.1/730-phy_b53.patch +++ b/target/linux/generic/patches-4.1/730-phy_b53.patch @@ -15,7 +15,7 @@ obj-$(CONFIG_RTL8367B_PHY) += rtl8367b.o obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_PSB6970_PHY) += psb6970.o -+obj-$(CONFIG_B53) += b53/ ++obj-$(CONFIG_SWCONFIG_B53) += b53/ obj-$(CONFIG_FIXED_PHY) += fixed_phy.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o diff --git a/target/linux/generic/patches-4.4/070-0001-bgmac-fix-a-missing-check-for-build_skb.patch b/target/linux/generic/patches-4.4/070-0001-bgmac-fix-a-missing-check-for-build_skb.patch new file mode 100644 index 0000000000..7e6dc2035d --- /dev/null +++ b/target/linux/generic/patches-4.4/070-0001-bgmac-fix-a-missing-check-for-build_skb.patch @@ -0,0 +1,28 @@ +From f1640c3ddeec12804bc9a21feee85fc15aca95f6 Mon Sep 17 00:00:00 2001 +From: wangweidong <wangweidong1@huawei.com> +Date: Wed, 13 Jan 2016 11:06:41 +0800 +Subject: [PATCH] bgmac: fix a missing check for build_skb + +when build_skb failed, it may occure a NULL pointer. +So add a 'NULL check' for it. + +Signed-off-by: Weidong Wang <wangweidong1@huawei.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac.c | 5 +++++ + 1 file changed, 5 insertions(+) + +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -466,6 +466,11 @@ static int bgmac_dma_rx_read(struct bgma + len -= ETH_FCS_LEN; + + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); ++ if (unlikely(skb)) { ++ bgmac_err(bgmac, "build_skb failed\n"); ++ put_page(virt_to_head_page(buf)); ++ break; ++ } + skb_put(skb, BGMAC_RX_FRAME_OFFSET + + BGMAC_RX_BUF_OFFSET + len); + skb_pull(skb, BGMAC_RX_FRAME_OFFSET + diff --git a/target/linux/generic/patches-4.4/070-0002-bgmac-Fix-reversed-test-of-build_skb-return-value.patch b/target/linux/generic/patches-4.4/070-0002-bgmac-Fix-reversed-test-of-build_skb-return-value.patch new file mode 100644 index 0000000000..0b0a28d81a --- /dev/null +++ b/target/linux/generic/patches-4.4/070-0002-bgmac-Fix-reversed-test-of-build_skb-return-value.patch @@ -0,0 +1,22 @@ +From 750afbf8ee9c6a1c74a1fe5fc9852146b1d72687 Mon Sep 17 00:00:00 2001 +From: "David S. Miller" <davem@davemloft.net> +Date: Fri, 15 Jan 2016 16:07:13 -0500 +Subject: [PATCH] bgmac: Fix reversed test of build_skb() return value. + +Fixes: f1640c3ddeec ("bgmac: fix a missing check for build_skb") +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac.c | 2 +- + 1 file changed, 1 insertion(+), 1 deletion(-) + +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -466,7 +466,7 @@ static int bgmac_dma_rx_read(struct bgma + len -= ETH_FCS_LEN; + + skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE); +- if (unlikely(skb)) { ++ if (unlikely(!skb)) { + bgmac_err(bgmac, "build_skb failed\n"); + put_page(virt_to_head_page(buf)); + break; diff --git a/target/linux/generic/patches-4.4/070-bgmac-add-helper-checking-for-BCM4707-BCM53018-chip-.patch b/target/linux/generic/patches-4.4/071-0001-bgmac-add-helper-checking-for-BCM4707-BCM53018-chip-.patch index 63010c1450..4008c29963 100644 --- a/target/linux/generic/patches-4.4/070-bgmac-add-helper-checking-for-BCM4707-BCM53018-chip-.patch +++ b/target/linux/generic/patches-4.4/071-0001-bgmac-add-helper-checking-for-BCM4707-BCM53018-chip-.patch @@ -36,7 +36,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> static bool bgmac_wait_value(struct bcma_device *core, u16 reg, u32 mask, u32 value, int timeout) { -@@ -982,11 +993,9 @@ static void bgmac_mac_speed(struct bgmac +@@ -987,11 +998,9 @@ static void bgmac_mac_speed(struct bgmac static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; @@ -49,7 +49,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); -@@ -1050,9 +1059,7 @@ static void bgmac_chip_reset(struct bgma +@@ -1055,9 +1064,7 @@ static void bgmac_chip_reset(struct bgma } /* Request Misc PLL for corerev > 2 */ @@ -60,7 +60,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, -@@ -1188,8 +1195,7 @@ static void bgmac_enable(struct bgmac *b +@@ -1193,8 +1200,7 @@ static void bgmac_enable(struct bgmac *b break; } @@ -70,7 +70,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> rxq_ctl = bgmac_read(bgmac, BGMAC_RXQ_CTL); rxq_ctl &= ~BGMAC_RXQ_CTL_MDP_MASK; bp_clk = bcma_pmu_get_bus_clock(&bgmac->core->bus->drv_cc) / -@@ -1467,14 +1473,12 @@ static int bgmac_fixed_phy_register(stru +@@ -1472,14 +1478,12 @@ static int bgmac_fixed_phy_register(stru static int bgmac_mii_register(struct bgmac *bgmac) { @@ -86,7 +86,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> return bgmac_fixed_phy_register(bgmac); mii_bus = mdiobus_alloc(); -@@ -1545,7 +1549,6 @@ static void bgmac_mii_unregister(struct +@@ -1550,7 +1554,6 @@ static void bgmac_mii_unregister(struct /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipattach */ static int bgmac_probe(struct bcma_device *core) { @@ -94,7 +94,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> struct net_device *net_dev; struct bgmac *bgmac; struct ssb_sprom *sprom = &core->bus->sprom; -@@ -1626,8 +1629,7 @@ static int bgmac_probe(struct bcma_devic +@@ -1631,8 +1634,7 @@ static int bgmac_probe(struct bcma_devic bgmac_chip_reset(bgmac); /* For Northstar, we have to take all GMAC core out of reset */ diff --git a/target/linux/generic/patches-4.4/071-bgmac-support-Ethernet-device-on-BCM47094-SoC.patch b/target/linux/generic/patches-4.4/071-0002-bgmac-support-Ethernet-device-on-BCM47094-SoC.patch index 88db7b2fff..514fe26f1b 100644 --- a/target/linux/generic/patches-4.4/071-bgmac-support-Ethernet-device-on-BCM47094-SoC.patch +++ b/target/linux/generic/patches-4.4/071-0002-bgmac-support-Ethernet-device-on-BCM47094-SoC.patch @@ -25,7 +25,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> case BCMA_CHIP_ID_BCM53018: return true; default: -@@ -1047,8 +1048,9 @@ static void bgmac_chip_reset(struct bgma +@@ -1052,8 +1053,9 @@ static void bgmac_chip_reset(struct bgma (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) iost &= ~BGMAC_BCMA_IOST_ATTACHED; diff --git a/target/linux/generic/patches-4.4/072-bgmac-reset-enable-Ethernet-core-before-using-it.patch b/target/linux/generic/patches-4.4/071-0003-bgmac-reset-enable-Ethernet-core-before-using-it.patch index 1306072c84..4375868b16 100644 --- a/target/linux/generic/patches-4.4/072-bgmac-reset-enable-Ethernet-core-before-using-it.patch +++ b/target/linux/generic/patches-4.4/071-0003-bgmac-reset-enable-Ethernet-core-before-using-it.patch @@ -17,7 +17,7 @@ Signed-off-by: David S. Miller <davem@davemloft.net> --- a/drivers/net/ethernet/broadcom/bgmac.c +++ b/drivers/net/ethernet/broadcom/bgmac.c -@@ -1578,6 +1578,11 @@ static int bgmac_probe(struct bcma_devic +@@ -1583,6 +1583,11 @@ static int bgmac_probe(struct bcma_devic dev_warn(&core->dev, "Using random MAC: %pM\n", mac); } diff --git a/target/linux/generic/patches-4.4/073-bgmac-fix-MAC-soft-reset-bit-for-corerev-4.patch b/target/linux/generic/patches-4.4/071-0004-bgmac-fix-MAC-soft-reset-bit-for-corerev-4.patch index a5fcaab51c..a5fcaab51c 100644 --- a/target/linux/generic/patches-4.4/073-bgmac-fix-MAC-soft-reset-bit-for-corerev-4.patch +++ b/target/linux/generic/patches-4.4/071-0004-bgmac-fix-MAC-soft-reset-bit-for-corerev-4.patch diff --git a/target/linux/generic/patches-4.4/072-0001-bgmac-Bind-net_device-with-backing-device-structure.patch b/target/linux/generic/patches-4.4/072-0001-bgmac-Bind-net_device-with-backing-device-structure.patch new file mode 100644 index 0000000000..764b16801d --- /dev/null +++ b/target/linux/generic/patches-4.4/072-0001-bgmac-Bind-net_device-with-backing-device-structure.patch @@ -0,0 +1,25 @@ +From 2022e9d50798aa592887ccb5a7d045e537f3855f Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Tue, 7 Jun 2016 15:06:13 -0700 +Subject: [PATCH 1/3] bgmac: Bind net_device with backing device structure + +In preparation for allowing different helpers to be utilized against +network devices created by the bgmac driver, make sure that we bind the +net_device with core->dev. + +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac.c | 1 + + 1 file changed, 1 insertion(+) + +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -1599,6 +1599,7 @@ static int bgmac_probe(struct bcma_devic + bgmac->net_dev = net_dev; + bgmac->core = core; + bcma_set_drvdata(core, bgmac); ++ SET_NETDEV_DEV(net_dev, &core->dev); + + /* Defaults */ + memcpy(bgmac->net_dev->dev_addr, mac, ETH_ALEN); diff --git a/target/linux/generic/patches-4.4/072-0002-bgmac-Add-support-for-ethtool-statistics.patch b/target/linux/generic/patches-4.4/072-0002-bgmac-Add-support-for-ethtool-statistics.patch new file mode 100644 index 0000000000..8f6b6c80fc --- /dev/null +++ b/target/linux/generic/patches-4.4/072-0002-bgmac-Add-support-for-ethtool-statistics.patch @@ -0,0 +1,175 @@ +From f6613d4fa937fa8388f2c1cb4e69ccc25e9e2336 Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Tue, 7 Jun 2016 15:06:14 -0700 +Subject: [PATCH 2/3] bgmac: Add support for ethtool statistics + +Read the statistics from the BGMAC's builtin MAC and return them to +user-space using the standard ethtool helpers. + +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac.c | 124 ++++++++++++++++++++++++++++++++++ + drivers/net/ethernet/broadcom/bgmac.h | 4 +- + 2 files changed, 126 insertions(+), 2 deletions(-) + +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -1382,6 +1382,127 @@ static const struct net_device_ops bgmac + * ethtool_ops + **************************************************/ + ++struct bgmac_stat { ++ u8 size; ++ u32 offset; ++ const char *name; ++}; ++ ++static struct bgmac_stat bgmac_get_strings_stats[] = { ++ { 8, BGMAC_TX_GOOD_OCTETS, "tx_good_octets" }, ++ { 4, BGMAC_TX_GOOD_PKTS, "tx_good" }, ++ { 8, BGMAC_TX_OCTETS, "tx_octets" }, ++ { 4, BGMAC_TX_PKTS, "tx_pkts" }, ++ { 4, BGMAC_TX_BROADCAST_PKTS, "tx_broadcast" }, ++ { 4, BGMAC_TX_MULTICAST_PKTS, "tx_multicast" }, ++ { 4, BGMAC_TX_LEN_64, "tx_64" }, ++ { 4, BGMAC_TX_LEN_65_TO_127, "tx_65_127" }, ++ { 4, BGMAC_TX_LEN_128_TO_255, "tx_128_255" }, ++ { 4, BGMAC_TX_LEN_256_TO_511, "tx_256_511" }, ++ { 4, BGMAC_TX_LEN_512_TO_1023, "tx_512_1023" }, ++ { 4, BGMAC_TX_LEN_1024_TO_1522, "tx_1024_1522" }, ++ { 4, BGMAC_TX_LEN_1523_TO_2047, "tx_1523_2047" }, ++ { 4, BGMAC_TX_LEN_2048_TO_4095, "tx_2048_4095" }, ++ { 4, BGMAC_TX_LEN_4096_TO_8191, "tx_4096_8191" }, ++ { 4, BGMAC_TX_LEN_8192_TO_MAX, "tx_8192_max" }, ++ { 4, BGMAC_TX_JABBER_PKTS, "tx_jabber" }, ++ { 4, BGMAC_TX_OVERSIZE_PKTS, "tx_oversize" }, ++ { 4, BGMAC_TX_FRAGMENT_PKTS, "tx_fragment" }, ++ { 4, BGMAC_TX_UNDERRUNS, "tx_underruns" }, ++ { 4, BGMAC_TX_TOTAL_COLS, "tx_total_cols" }, ++ { 4, BGMAC_TX_SINGLE_COLS, "tx_single_cols" }, ++ { 4, BGMAC_TX_MULTIPLE_COLS, "tx_multiple_cols" }, ++ { 4, BGMAC_TX_EXCESSIVE_COLS, "tx_excessive_cols" }, ++ { 4, BGMAC_TX_LATE_COLS, "tx_late_cols" }, ++ { 4, BGMAC_TX_DEFERED, "tx_defered" }, ++ { 4, BGMAC_TX_CARRIER_LOST, "tx_carrier_lost" }, ++ { 4, BGMAC_TX_PAUSE_PKTS, "tx_pause" }, ++ { 4, BGMAC_TX_UNI_PKTS, "tx_unicast" }, ++ { 4, BGMAC_TX_Q0_PKTS, "tx_q0" }, ++ { 8, BGMAC_TX_Q0_OCTETS, "tx_q0_octets" }, ++ { 4, BGMAC_TX_Q1_PKTS, "tx_q1" }, ++ { 8, BGMAC_TX_Q1_OCTETS, "tx_q1_octets" }, ++ { 4, BGMAC_TX_Q2_PKTS, "tx_q2" }, ++ { 8, BGMAC_TX_Q2_OCTETS, "tx_q2_octets" }, ++ { 4, BGMAC_TX_Q3_PKTS, "tx_q3" }, ++ { 8, BGMAC_TX_Q3_OCTETS, "tx_q3_octets" }, ++ { 8, BGMAC_RX_GOOD_OCTETS, "rx_good_octets" }, ++ { 4, BGMAC_RX_GOOD_PKTS, "rx_good" }, ++ { 8, BGMAC_RX_OCTETS, "rx_octets" }, ++ { 4, BGMAC_RX_PKTS, "rx_pkts" }, ++ { 4, BGMAC_RX_BROADCAST_PKTS, "rx_broadcast" }, ++ { 4, BGMAC_RX_MULTICAST_PKTS, "rx_multicast" }, ++ { 4, BGMAC_RX_LEN_64, "rx_64" }, ++ { 4, BGMAC_RX_LEN_65_TO_127, "rx_65_127" }, ++ { 4, BGMAC_RX_LEN_128_TO_255, "rx_128_255" }, ++ { 4, BGMAC_RX_LEN_256_TO_511, "rx_256_511" }, ++ { 4, BGMAC_RX_LEN_512_TO_1023, "rx_512_1023" }, ++ { 4, BGMAC_RX_LEN_1024_TO_1522, "rx_1024_1522" }, ++ { 4, BGMAC_RX_LEN_1523_TO_2047, "rx_1523_2047" }, ++ { 4, BGMAC_RX_LEN_2048_TO_4095, "rx_2048_4095" }, ++ { 4, BGMAC_RX_LEN_4096_TO_8191, "rx_4096_8191" }, ++ { 4, BGMAC_RX_LEN_8192_TO_MAX, "rx_8192_max" }, ++ { 4, BGMAC_RX_JABBER_PKTS, "rx_jabber" }, ++ { 4, BGMAC_RX_OVERSIZE_PKTS, "rx_oversize" }, ++ { 4, BGMAC_RX_FRAGMENT_PKTS, "rx_fragment" }, ++ { 4, BGMAC_RX_MISSED_PKTS, "rx_missed" }, ++ { 4, BGMAC_RX_CRC_ALIGN_ERRS, "rx_crc_align" }, ++ { 4, BGMAC_RX_UNDERSIZE, "rx_undersize" }, ++ { 4, BGMAC_RX_CRC_ERRS, "rx_crc" }, ++ { 4, BGMAC_RX_ALIGN_ERRS, "rx_align" }, ++ { 4, BGMAC_RX_SYMBOL_ERRS, "rx_symbol" }, ++ { 4, BGMAC_RX_PAUSE_PKTS, "rx_pause" }, ++ { 4, BGMAC_RX_NONPAUSE_PKTS, "rx_nonpause" }, ++ { 4, BGMAC_RX_SACHANGES, "rx_sa_changes" }, ++ { 4, BGMAC_RX_UNI_PKTS, "rx_unicast" }, ++}; ++ ++#define BGMAC_STATS_LEN ARRAY_SIZE(bgmac_get_strings_stats) ++ ++static int bgmac_get_sset_count(struct net_device *dev, int string_set) ++{ ++ switch (string_set) { ++ case ETH_SS_STATS: ++ return BGMAC_STATS_LEN; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++static void bgmac_get_strings(struct net_device *dev, u32 stringset, ++ u8 *data) ++{ ++ int i; ++ ++ if (stringset != ETH_SS_STATS) ++ return; ++ ++ for (i = 0; i < BGMAC_STATS_LEN; i++) ++ strlcpy(data + i * ETH_GSTRING_LEN, ++ bgmac_get_strings_stats[i].name, ETH_GSTRING_LEN); ++} ++ ++static void bgmac_get_ethtool_stats(struct net_device *dev, ++ struct ethtool_stats *ss, uint64_t *data) ++{ ++ struct bgmac *bgmac = netdev_priv(dev); ++ const struct bgmac_stat *s; ++ unsigned int i; ++ u64 val; ++ ++ if (!netif_running(dev)) ++ return; ++ ++ for (i = 0; i < BGMAC_STATS_LEN; i++) { ++ s = &bgmac_get_strings_stats[i]; ++ val = 0; ++ if (s->size == 8) ++ val = (u64)bgmac_read(bgmac, s->offset + 4) << 32; ++ val |= bgmac_read(bgmac, s->offset); ++ data[i] = val; ++ } ++} ++ + static int bgmac_get_settings(struct net_device *net_dev, + struct ethtool_cmd *cmd) + { +@@ -1406,6 +1527,9 @@ static void bgmac_get_drvinfo(struct net + } + + static const struct ethtool_ops bgmac_ethtool_ops = { ++ .get_strings = bgmac_get_strings, ++ .get_sset_count = bgmac_get_sset_count, ++ .get_ethtool_stats = bgmac_get_ethtool_stats, + .get_settings = bgmac_get_settings, + .set_settings = bgmac_set_settings, + .get_drvinfo = bgmac_get_drvinfo, +--- a/drivers/net/ethernet/broadcom/bgmac.h ++++ b/drivers/net/ethernet/broadcom/bgmac.h +@@ -123,7 +123,7 @@ + #define BGMAC_TX_LEN_1024_TO_1522 0x334 + #define BGMAC_TX_LEN_1523_TO_2047 0x338 + #define BGMAC_TX_LEN_2048_TO_4095 0x33c +-#define BGMAC_TX_LEN_4095_TO_8191 0x340 ++#define BGMAC_TX_LEN_4096_TO_8191 0x340 + #define BGMAC_TX_LEN_8192_TO_MAX 0x344 + #define BGMAC_TX_JABBER_PKTS 0x348 /* Error */ + #define BGMAC_TX_OVERSIZE_PKTS 0x34c /* Error */ +@@ -166,7 +166,7 @@ + #define BGMAC_RX_LEN_1024_TO_1522 0x3e4 + #define BGMAC_RX_LEN_1523_TO_2047 0x3e8 + #define BGMAC_RX_LEN_2048_TO_4095 0x3ec +-#define BGMAC_RX_LEN_4095_TO_8191 0x3f0 ++#define BGMAC_RX_LEN_4096_TO_8191 0x3f0 + #define BGMAC_RX_LEN_8192_TO_MAX 0x3f4 + #define BGMAC_RX_JABBER_PKTS 0x3f8 /* Error */ + #define BGMAC_RX_OVERSIZE_PKTS 0x3fc /* Error */ diff --git a/target/linux/generic/patches-4.4/072-0003-bgmac-Maintain-some-netdev-statistics.patch b/target/linux/generic/patches-4.4/072-0003-bgmac-Maintain-some-netdev-statistics.patch new file mode 100644 index 0000000000..17a7ce060c --- /dev/null +++ b/target/linux/generic/patches-4.4/072-0003-bgmac-Maintain-some-netdev-statistics.patch @@ -0,0 +1,68 @@ +From 6d490f62a4c7f11c552591bdd08eda3636aa0db9 Mon Sep 17 00:00:00 2001 +From: Florian Fainelli <f.fainelli@gmail.com> +Date: Tue, 7 Jun 2016 15:06:15 -0700 +Subject: [PATCH 3/3] bgmac: Maintain some netdev statistics + +Add a few netdev statistics to report transmitted and received bytes and +packets and a few obvious errors. + +Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + drivers/net/ethernet/broadcom/bgmac.c | 10 ++++++++++ + 1 file changed, 10 insertions(+) + +--- a/drivers/net/ethernet/broadcom/bgmac.c ++++ b/drivers/net/ethernet/broadcom/bgmac.c +@@ -246,6 +246,8 @@ err_dma_head: + + err_drop: + dev_kfree_skb(skb); ++ net_dev->stats.tx_dropped++; ++ net_dev->stats.tx_errors++; + return NETDEV_TX_OK; + } + +@@ -284,6 +286,8 @@ static void bgmac_dma_tx_free(struct bgm + DMA_TO_DEVICE); + + if (slot->skb) { ++ bgmac->net_dev->stats.tx_bytes += slot->skb->len; ++ bgmac->net_dev->stats.tx_packets++; + bytes_compl += slot->skb->len; + pkts_compl++; + +@@ -464,6 +468,7 @@ static int bgmac_dma_rx_read(struct bgma + bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n", + ring->start); + put_page(virt_to_head_page(buf)); ++ bgmac->net_dev->stats.rx_errors++; + break; + } + +@@ -471,6 +476,8 @@ static int bgmac_dma_rx_read(struct bgma + bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n", + ring->start); + put_page(virt_to_head_page(buf)); ++ bgmac->net_dev->stats.rx_length_errors++; ++ bgmac->net_dev->stats.rx_errors++; + break; + } + +@@ -481,6 +488,7 @@ static int bgmac_dma_rx_read(struct bgma + if (unlikely(!skb)) { + bgmac_err(bgmac, "build_skb failed\n"); + put_page(virt_to_head_page(buf)); ++ bgmac->net_dev->stats.rx_errors++; + break; + } + skb_put(skb, BGMAC_RX_FRAME_OFFSET + +@@ -490,6 +498,8 @@ static int bgmac_dma_rx_read(struct bgma + + skb_checksum_none_assert(skb); + skb->protocol = eth_type_trans(skb, bgmac->net_dev); ++ bgmac->net_dev->stats.rx_bytes += len; ++ bgmac->net_dev->stats.rx_packets++; + napi_gro_receive(&bgmac->napi, skb); + handled++; + } while (0); diff --git a/target/linux/generic/patches-4.4/090-MIPS-c-r4k-Use-IPI-calls-for-CM-indexed-cache-ops.patch b/target/linux/generic/patches-4.4/090-MIPS-c-r4k-Use-IPI-calls-for-CM-indexed-cache-ops.patch new file mode 100644 index 0000000000..0c1c0a4509 --- /dev/null +++ b/target/linux/generic/patches-4.4/090-MIPS-c-r4k-Use-IPI-calls-for-CM-indexed-cache-ops.patch @@ -0,0 +1,317 @@ +From: James Hogan <james.hogan@imgtec.com> +Date: Mon, 25 Jan 2016 21:30:00 +0000 +Subject: [PATCH] MIPS: c-r4k: Use IPI calls for CM indexed cache ops + +The Coherence Manager (CM) can propagate address-based ("hit") cache +operations to other cores in the coherent system, alleviating software +of the need to use IPI calls, however indexed cache operations are not +propagated since doing so makes no sense for separate caches. + +r4k_on_each_cpu() previously had a special case for CONFIG_MIPS_MT_SMP, +intended to avoid the IPIs when the only other CPUs in the system were +other VPEs in the same core, and hence sharing the same caches. This was +changed by commit cccf34e9411c ("MIPS: c-r4k: Fix cache flushing for MT +cores") to apparently handle multi-core multi-VPE systems, but it +focussed mainly on hit cache ops, so the IPI calls were still disabled +entirely for CM systems. + +This doesn't normally cause problems, but tests can be written to hit +these corner cases by using multiple threads, or changing task +affinities to force the process to migrate cores. For example the +failure of mprotect RW->RX to globally sync icaches (via +flush_cache_range) can be detected by modifying and mprotecting a code +page on one core, and migrating to a different core to execute from it. + +Most of the functions called by r4k_on_each_cpu() perform cache +operations exclusively with a single addressing-type (virtual address vs +indexed), so add a type argument and modify the callers to pass in +R4K_USER (user virtual addressing), R4K_KERN (global kernel virtual +addressing) or R4K_INDEX (index into cache). + +local_r4k_flush_icache_range() is split up, to allow it to be called +from the rest of the kernel, or from r4k_flush_icache_range() where it +will choose either indexed or hit cache operations based on the size of +the range and the cache sizes. + +local_r4k_flush_kernel_vmap_range() is split into two functions, each of +which uses cache operations with a single addressing-type, with +r4k_flush_kernel_vmap_range() making the decision whether to use indexed +cache ops or not. + +Signed-off-by: James Hogan <james.hogan@imgtec.com> +Cc: Ralf Baechle <ralf@linux-mips.org> +Cc: Paul Burton <paul.burton@imgtec.com> +Cc: Leonid Yegoshin <leonid.yegoshin@imgtec.com> +Cc: linux-mips@linux-mips.org +--- + +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -40,6 +40,50 @@ + #include <asm/mips-cm.h> + + /* ++ * Bits describing what cache ops an IPI callback function may perform. ++ * ++ * R4K_USER - Virtual user address based cache operations. ++ * Ineffective on other CPUs. ++ * R4K_KERN - Virtual kernel address based cache operations (including kmap). ++ * Effective on other CPUs. ++ * R4K_INDEX - Index based cache operations. ++ * Effective on other CPUs. ++ */ ++ ++#define R4K_USER BIT(0) ++#define R4K_KERN BIT(1) ++#define R4K_INDEX BIT(2) ++ ++#ifdef CONFIG_SMP ++/* The Coherence manager propagates address-based cache ops to other cores */ ++#define r4k_hit_globalized mips_cm_present() ++#define r4k_index_globalized 0 ++#else ++/* If there's only 1 CPU, then all cache ops are globalized to that 1 CPU */ ++#define r4k_hit_globalized 1 ++#define r4k_index_globalized 1 ++#endif ++ ++/** ++ * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core. ++ * @type: Type of cache operations (R4K_USER, R4K_KERN or R4K_INDEX). ++ * ++ * Returns: 1 if the cache operation @type should be done on every core in ++ * the system. ++ * 0 if the cache operation @type is globalized and only needs to ++ * be performed on a simple CPU. ++ */ ++static inline bool r4k_op_needs_ipi(unsigned int type) ++{ ++ /* ++ * If hardware doesn't globalize the required cache ops we must use IPIs ++ * to do so. ++ */ ++ return (type & R4K_KERN && !r4k_hit_globalized) || ++ (type & R4K_INDEX && !r4k_index_globalized); ++} ++ ++/* + * Special Variant of smp_call_function for use by cache functions: + * + * o No return value +@@ -48,19 +92,11 @@ + * primary cache. + * o doesn't disable interrupts on the local CPU + */ +-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info) ++static inline void r4k_on_each_cpu(unsigned int type, ++ void (*func) (void *info), void *info) + { + preempt_disable(); +- +- /* +- * The Coherent Manager propagates address-based cache ops to other +- * cores but not index-based ops. However, r4k_on_each_cpu is used +- * in both cases so there is no easy way to tell what kind of op is +- * executed to the other cores. The best we can probably do is +- * to restrict that call when a CM is not present because both +- * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops. +- */ +- if (!mips_cm_present()) ++ if (r4k_op_needs_ipi(type)) + smp_call_function_many(&cpu_foreign_map, func, info, 1); + func(info); + preempt_enable(); +@@ -456,7 +492,7 @@ static inline void local_r4k___flush_cac + + static void r4k___flush_cache_all(void) + { +- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL); ++ r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL); + } + + static inline int has_valid_asid(const struct mm_struct *mm) +@@ -503,7 +539,7 @@ static void r4k_flush_cache_range(struct + int exec = vma->vm_flags & VM_EXEC; + + if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc)) +- r4k_on_each_cpu(local_r4k_flush_cache_range, vma); ++ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma); + } + + static inline void local_r4k_flush_cache_mm(void * args) +@@ -535,7 +571,7 @@ static void r4k_flush_cache_mm(struct mm + if (!cpu_has_dc_aliases) + return; + +- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm); ++ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm); + } + + struct flush_cache_page_args { +@@ -629,7 +665,7 @@ static void r4k_flush_cache_page(struct + args.addr = addr; + args.pfn = pfn; + +- r4k_on_each_cpu(local_r4k_flush_cache_page, &args); ++ r4k_on_each_cpu(R4K_KERN, local_r4k_flush_cache_page, &args); + } + + static inline void local_r4k_flush_data_cache_page(void * addr) +@@ -642,18 +678,23 @@ static void r4k_flush_data_cache_page(un + if (in_atomic()) + local_r4k_flush_data_cache_page((void *)addr); + else +- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr); ++ r4k_on_each_cpu(R4K_KERN, local_r4k_flush_data_cache_page, ++ (void *) addr); + } + + struct flush_icache_range_args { + unsigned long start; + unsigned long end; ++ unsigned int type; + }; + +-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end) ++static inline void __local_r4k_flush_icache_range(unsigned long start, ++ unsigned long end, ++ unsigned int type) + { + if (!cpu_has_ic_fills_f_dc) { +- if (end - start >= dcache_size) { ++ if (type == R4K_INDEX || ++ (type & R4K_INDEX && end - start >= dcache_size)) { + r4k_blast_dcache(); + } else { + R4600_HIT_CACHEOP_WAR_IMPL; +@@ -661,7 +702,8 @@ static inline void local_r4k_flush_icach + } + } + +- if (end - start > icache_size) ++ if (type == R4K_INDEX || ++ (type & R4K_INDEX && end - start > icache_size)) + r4k_blast_icache(); + else { + switch (boot_cpu_type()) { +@@ -687,23 +729,59 @@ static inline void local_r4k_flush_icach + #endif + } + ++static inline void local_r4k_flush_icache_range(unsigned long start, ++ unsigned long end) ++{ ++ __local_r4k_flush_icache_range(start, end, R4K_KERN | R4K_INDEX); ++} ++ + static inline void local_r4k_flush_icache_range_ipi(void *args) + { + struct flush_icache_range_args *fir_args = args; + unsigned long start = fir_args->start; + unsigned long end = fir_args->end; ++ unsigned int type = fir_args->type; + +- local_r4k_flush_icache_range(start, end); ++ __local_r4k_flush_icache_range(start, end, type); + } + + static void r4k_flush_icache_range(unsigned long start, unsigned long end) + { + struct flush_icache_range_args args; ++ unsigned long size, cache_size; + + args.start = start; + args.end = end; ++ args.type = R4K_KERN | R4K_INDEX; + +- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args); ++ if (in_atomic()) { ++ /* ++ * We can't do blocking IPI calls from atomic context, so fall ++ * back to pure address-based cache ops if they globalize. ++ */ ++ if (!r4k_index_globalized && r4k_hit_globalized) { ++ args.type &= ~R4K_INDEX; ++ } else { ++ /* Just do it locally instead. */ ++ local_r4k_flush_icache_range(start, end); ++ instruction_hazard(); ++ return; ++ } ++ } else if (!r4k_index_globalized && r4k_hit_globalized) { ++ /* ++ * If address-based cache ops are globalized, then we may be ++ * able to avoid the IPI for small flushes. ++ */ ++ size = start - end; ++ cache_size = icache_size; ++ if (!cpu_has_ic_fills_f_dc) { ++ size *= 2; ++ cache_size += dcache_size; ++ } ++ if (size <= cache_size) ++ args.type &= ~R4K_INDEX; ++ } ++ r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args); + instruction_hazard(); + } + +@@ -823,7 +901,12 @@ static void local_r4k_flush_cache_sigtra + + static void r4k_flush_cache_sigtramp(unsigned long addr) + { +- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr); ++ /* ++ * FIXME this is a bit broken when !r4k_hit_globalized, since the user ++ * code probably won't be mapped on other CPUs, so if the process is ++ * migrated, it could end up hitting stale icache lines. ++ */ ++ r4k_on_each_cpu(R4K_USER, local_r4k_flush_cache_sigtramp, (void *)addr); + } + + static void r4k_flush_icache_all(void) +@@ -837,6 +920,15 @@ struct flush_kernel_vmap_range_args { + int size; + }; + ++static inline void local_r4k_flush_kernel_vmap_range_index(void *args) ++{ ++ /* ++ * Aliases only affect the primary caches so don't bother with ++ * S-caches or T-caches. ++ */ ++ r4k_blast_dcache(); ++} ++ + static inline void local_r4k_flush_kernel_vmap_range(void *args) + { + struct flush_kernel_vmap_range_args *vmra = args; +@@ -847,12 +939,8 @@ static inline void local_r4k_flush_kerne + * Aliases only affect the primary caches so don't bother with + * S-caches or T-caches. + */ +- if (cpu_has_safe_index_cacheops && size >= dcache_size) +- r4k_blast_dcache(); +- else { +- R4600_HIT_CACHEOP_WAR_IMPL; +- blast_dcache_range(vaddr, vaddr + size); +- } ++ R4600_HIT_CACHEOP_WAR_IMPL; ++ blast_dcache_range(vaddr, vaddr + size); + } + + static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size) +@@ -862,7 +950,12 @@ static void r4k_flush_kernel_vmap_range( + args.vaddr = (unsigned long) vaddr; + args.size = size; + +- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args); ++ if (cpu_has_safe_index_cacheops && size >= dcache_size) ++ r4k_on_each_cpu(R4K_INDEX, ++ local_r4k_flush_kernel_vmap_range_index, NULL); ++ else ++ r4k_on_each_cpu(R4K_KERN, local_r4k_flush_kernel_vmap_range, ++ &args); + } + + static inline void rm7k_erratum31(void) diff --git a/target/linux/generic/patches-4.4/091-MIPS-c-r4k-Exclude-sibling-CPUs-in-SMP-calls.patch b/target/linux/generic/patches-4.4/091-MIPS-c-r4k-Exclude-sibling-CPUs-in-SMP-calls.patch new file mode 100644 index 0000000000..8d5030c84b --- /dev/null +++ b/target/linux/generic/patches-4.4/091-MIPS-c-r4k-Exclude-sibling-CPUs-in-SMP-calls.patch @@ -0,0 +1,37 @@ +From: James Hogan <james.hogan@imgtec.com> +Date: Thu, 3 Mar 2016 21:30:42 +0000 +Subject: [PATCH] MIPS: c-r4k: Exclude sibling CPUs in SMP calls + +When performing SMP calls to foreign cores, exclude sibling CPUs from +the provided map, as we already handle the local core on the current +CPU. This prevents an IPI call from for example core 0, VPE 1 to VPE 0 +on the same core. + +Signed-off-by: James Hogan <james.hogan@imgtec.com> +Cc: Ralf Baechle <ralf@linux-mips.org> +Cc: Paul Burton <paul.burton@imgtec.com> +Cc: linux-mips@linux-mips.org +--- + +--- a/arch/mips/mm/c-r4k.c ++++ b/arch/mips/mm/c-r4k.c +@@ -96,8 +96,17 @@ static inline void r4k_on_each_cpu(unsig + void (*func) (void *info), void *info) + { + preempt_disable(); +- if (r4k_op_needs_ipi(type)) +- smp_call_function_many(&cpu_foreign_map, func, info, 1); ++ /* cpu_foreign_map and cpu_sibling_map[] undeclared when !CONFIG_SMP */ ++#ifdef CONFIG_SMP ++ if (r4k_op_needs_ipi(type)) { ++ struct cpumask mask; ++ ++ /* exclude sibling CPUs */ ++ cpumask_andnot(&mask, &cpu_foreign_map, ++ &cpu_sibling_map[smp_processor_id()]); ++ smp_call_function_many(&mask, func, info, 1); ++ } ++#endif + func(info); + preempt_enable(); + } diff --git a/target/linux/generic/patches-4.4/222-arm_zimage_none.patch b/target/linux/generic/patches-4.4/222-arm_zimage_none.patch new file mode 100644 index 0000000000..9add8158f9 --- /dev/null +++ b/target/linux/generic/patches-4.4/222-arm_zimage_none.patch @@ -0,0 +1,133 @@ +ARM: implement "uncompressed zImage" + +Based on RFC patch by Uwe Kleine-König +http://www.spinics.net/lists/arm-kernel/msg230153.html + +Signed-off-by: Felix Fietkau <nbd@nbd.name> +--- +--- a/arch/arm/boot/compressed/Makefile ++++ b/arch/arm/boot/compressed/Makefile +@@ -71,6 +71,7 @@ suffix_$(CONFIG_KERNEL_LZO) = lzo + suffix_$(CONFIG_KERNEL_LZMA) = lzma + suffix_$(CONFIG_KERNEL_XZ) = xzkern + suffix_$(CONFIG_KERNEL_LZ4) = lz4 ++suffix_$(CONFIG_KERNEL_CAT) = cat + + # Borrowed libfdt files for the ATAG compatibility mode + +@@ -95,7 +96,7 @@ targets := vmlinux vmlinux.lds \ + bswapsdi2.S font.o font.c head.o misc.o $(OBJS) + + # Make sure files are removed during clean +-extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 \ ++extra-y += piggy.gzip piggy.lzo piggy.lzma piggy.xzkern piggy.lz4 piggy.cat \ + lib1funcs.S ashldi3.S bswapsdi2.S $(libfdt) $(libfdt_hdrs) \ + hyp-stub.S + +--- a/arch/arm/boot/compressed/decompress.c ++++ b/arch/arm/boot/compressed/decompress.c +@@ -55,6 +55,10 @@ extern char * strstr(const char * s1, co + #include "../../../../lib/decompress_unlz4.c" + #endif + ++#ifdef CONFIG_KERNEL_CAT ++#include "../../../../lib/decompress_uncat.c" ++#endif ++ + int do_decompress(u8 *input, int len, u8 *output, void (*error)(char *x)) + { + return __decompress(input, len, NULL, NULL, output, 0, NULL, error); +--- /dev/null ++++ b/arch/arm/boot/compressed/piggy.cat.S +@@ -0,0 +1,6 @@ ++ .section .piggydata,#alloc ++ .globl input_data ++input_data: ++ .incbin "arch/arm/boot/compressed/piggy.cat" ++ .globl input_data_end ++input_data_end: +--- a/init/Kconfig ++++ b/init/Kconfig +@@ -115,6 +115,9 @@ config HAVE_KERNEL_LZO + config HAVE_KERNEL_LZ4 + bool + ++config HAVE_KERNEL_CAT ++ bool ++ + choice + prompt "Kernel compression mode" + default KERNEL_GZIP +@@ -181,9 +184,10 @@ config KERNEL_LZO + bool "LZO" + depends on HAVE_KERNEL_LZO + help +- Its compression ratio is the poorest among the choices. The kernel +- size is about 10% bigger than gzip; however its speed +- (both compression and decompression) is the fastest. ++ Its compression ratio is the poorest among the choices (apart from ++ uncompressed below). The kernel size is about 10% bigger than gzip; ++ however its speed (both compression and decompression) is the ++ fastest. + + config KERNEL_LZ4 + bool "LZ4" +@@ -197,6 +201,12 @@ config KERNEL_LZ4 + is about 8% bigger than LZO. But the decompression speed is + faster than LZO. + ++config KERNEL_CAT ++ bool "uncompressed" ++ depends on HAVE_KERNEL_CAT ++ help ++ Don't use compression at all. ++ + endchoice + + config DEFAULT_HOSTNAME +--- /dev/null ++++ b/lib/decompress_uncat.c +@@ -0,0 +1,17 @@ ++#include <linux/types.h> ++#include <linux/compiler.h> ++ ++#ifdef STATIC ++ ++STATIC int __decompress(unsigned char *buf, long in_len, ++ long (*fill)(void*, unsigned long), ++ long (*flush)(void*, unsigned long), ++ unsigned char *output, long out_len, ++ long *posp, ++ void (*error)(char *x)) ++{ ++ memmove(output, buf, in_len); ++ return 0; ++} ++ ++#endif +--- a/scripts/Makefile.lib ++++ b/scripts/Makefile.lib +@@ -337,6 +337,13 @@ cmd_lz4 = (cat $(filter-out FORCE,$^) | + lz4c -l -c1 stdin stdout && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ + (rm -f $@ ; false) + ++# uncompressed ++# --------------------------------------------------------------------------- ++quiet_cmd_cat = CAT $@ ++cmd_cat = (cat $(filter-out FORCE,$^) \ ++ && $(call size_append, $(filter-out FORCE,$^))) > $@ || \ ++ (rm -f $@ ; false) ++ + # U-Boot mkimage + # --------------------------------------------------------------------------- + +--- a/arch/arm/Kconfig ++++ b/arch/arm/Kconfig +@@ -59,6 +59,7 @@ config ARM + select HAVE_KERNEL_LZMA + select HAVE_KERNEL_LZO + select HAVE_KERNEL_XZ ++ select HAVE_KERNEL_CAT + select HAVE_KPROBES if !XIP_KERNEL && !CPU_ENDIAN_BE32 && !CPU_V7M + select HAVE_KRETPROBES if (HAVE_KPROBES) + select HAVE_MEMBLOCK diff --git a/target/linux/generic/patches-4.4/730-phy_b53.patch b/target/linux/generic/patches-4.4/730-phy_b53.patch index 71a53fded4..1a6de96726 100644 --- a/target/linux/generic/patches-4.4/730-phy_b53.patch +++ b/target/linux/generic/patches-4.4/730-phy_b53.patch @@ -15,7 +15,7 @@ obj-$(CONFIG_RTL8367B_PHY) += rtl8367b.o obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o obj-$(CONFIG_PSB6970_PHY) += psb6970.o -+obj-$(CONFIG_B53) += b53/ ++obj-$(CONFIG_SWCONFIG_B53) += b53/ obj-$(CONFIG_FIXED_PHY) += fixed_phy.o obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o diff --git a/target/linux/generic/patches-4.4/773-bgmac-add-srab-switch.patch b/target/linux/generic/patches-4.4/773-bgmac-add-srab-switch.patch index 8a68e7e401..4348574cbb 100644 --- a/target/linux/generic/patches-4.4/773-bgmac-add-srab-switch.patch +++ b/target/linux/generic/patches-4.4/773-bgmac-add-srab-switch.patch @@ -12,7 +12,7 @@ Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> #include <linux/bcm47xx_nvram.h> static const struct bcma_device_id bgmac_bcma_tbl[] = { -@@ -1544,6 +1545,17 @@ static void bgmac_mii_unregister(struct +@@ -1683,6 +1684,17 @@ static void bgmac_mii_unregister(struct mdiobus_free(mii_bus); } @@ -30,7 +30,7 @@ Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> /************************************************** * BCMA bus ops **************************************************/ -@@ -1688,6 +1700,14 @@ static int bgmac_probe(struct bcma_devic +@@ -1828,6 +1840,14 @@ static int bgmac_probe(struct bcma_devic net_dev->hw_features = net_dev->features; net_dev->vlan_features = net_dev->features; @@ -45,7 +45,7 @@ Signed-off-by: Hauke Mehrtens <hauke@hauke-m.de> err = register_netdev(bgmac->net_dev); if (err) { bgmac_err(bgmac, "Cannot register net device\n"); -@@ -1714,6 +1734,10 @@ static void bgmac_remove(struct bcma_dev +@@ -1854,6 +1874,10 @@ static void bgmac_remove(struct bcma_dev { struct bgmac *bgmac = bcma_get_drvdata(core); diff --git a/target/linux/generic/patches-4.4/930-crashlog.patch b/target/linux/generic/patches-4.4/930-crashlog.patch index 27a7f6ab35..936366e259 100644 --- a/target/linux/generic/patches-4.4/930-crashlog.patch +++ b/target/linux/generic/patches-4.4/930-crashlog.patch @@ -20,7 +20,7 @@ +#endif --- a/init/Kconfig +++ b/init/Kconfig -@@ -1286,6 +1286,10 @@ config RELAY +@@ -1296,6 +1296,10 @@ config RELAY If unsure, say N. diff --git a/target/linux/generic/patches-4.4/995-mangle_bootargs.patch b/target/linux/generic/patches-4.4/995-mangle_bootargs.patch index eb525618aa..2483e1bb98 100644 --- a/target/linux/generic/patches-4.4/995-mangle_bootargs.patch +++ b/target/linux/generic/patches-4.4/995-mangle_bootargs.patch @@ -40,7 +40,7 @@ setup_per_cpu_areas(); --- a/init/Kconfig +++ b/init/Kconfig -@@ -1638,6 +1638,15 @@ config EMBEDDED +@@ -1648,6 +1648,15 @@ config EMBEDDED an embedded system so certain expert options are available for configuration. diff --git a/target/linux/mvebu/Makefile b/target/linux/mvebu/Makefile index 54bb15d68e..bda69f64c7 100644 --- a/target/linux/mvebu/Makefile +++ b/target/linux/mvebu/Makefile @@ -14,12 +14,12 @@ CPU_TYPE:=cortex-a9 CPU_SUBTYPE:=vfpv3 MAINTAINER:=Imre Kaloz <kaloz@openwrt.org> -KERNEL_PATCHVER:=4.1 +KERNEL_PATCHVER:=4.4 include $(INCLUDE_DIR)/target.mk KERNELNAME:=zImage dtbs -DEFAULT_PACKAGES += uboot-envtools +DEFAULT_PACKAGES += uboot-envtools kmod-gpio-button-hotplug $(eval $(call BuildTarget)) diff --git a/target/linux/mvebu/base-files/etc/board.d/02_network b/target/linux/mvebu/base-files/etc/board.d/02_network index f81d0ac172..8f633c9bad 100755 --- a/target/linux/mvebu/base-files/etc/board.d/02_network +++ b/target/linux/mvebu/base-files/etc/board.d/02_network @@ -11,14 +11,12 @@ board_config_update board=$(mvebu_board_name) case "$board" in -armada-385-linksys-caiman) - ucidef_set_interfaces_lan_wan "eth1" "eth0" - ;; -armada-385-linksys-cobra) - ucidef_set_interfaces_lan_wan "eth1" "eth0" - ;; +armada-385-linksys-caiman|\ +armada-385-linksys-cobra|\ armada-385-linksys-shelby) ucidef_set_interfaces_lan_wan "eth1" "eth0" + ucidef_add_switch "switch0" \ + "0:lan:4" "1:lan:3" "2:lan:2" "3:lan:1" "6@eth1" "4:wan" "5@eth0" ;; armada-xp-linksys-mamba) ucidef_set_interfaces_lan_wan "eth0" "eth1" diff --git a/target/linux/mvebu/base-files/etc/diag.sh b/target/linux/mvebu/base-files/etc/diag.sh index 6534ac3f93..82760fa0de 100644 --- a/target/linux/mvebu/base-files/etc/diag.sh +++ b/target/linux/mvebu/base-files/etc/diag.sh @@ -6,6 +6,9 @@ get_status_led() { case $(mvebu_board_name) in + armada-385-linksys-shelby) + status_led="shelby:white:power" + ;; armada-xp-linksys-mamba) status_led="mamba:white:power" ;; diff --git a/target/linux/mvebu/config-4.4 b/target/linux/mvebu/config-4.4 index ee5c983f71..216e8da607 100644 --- a/target/linux/mvebu/config-4.4 +++ b/target/linux/mvebu/config-4.4 @@ -1,6 +1,7 @@ +CONFIG_AHCI_MVEBU=y CONFIG_ALIGNMENT_TRAP=y -CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y CONFIG_ARCH_HAS_SG_CHAIN=y CONFIG_ARCH_HAS_TICK_BROADCAST=y @@ -29,6 +30,7 @@ CONFIG_ARM=y CONFIG_ARMADA_370_CLK=y CONFIG_ARMADA_370_XP_TIMER=y CONFIG_ARMADA_38X_CLK=y +CONFIG_ARMADA_THERMAL=y CONFIG_ARMADA_XP_CLK=y CONFIG_ARM_APPENDED_DTB=y CONFIG_ARM_ATAG_DTB_COMPAT=y @@ -38,7 +40,7 @@ CONFIG_ARM_CPU_SUSPEND=y CONFIG_ARM_ERRATA_720789=y CONFIG_ARM_GIC=y CONFIG_ARM_HAS_SG_CHAIN=y -# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set +CONFIG_ARM_HEAVY_MB=y CONFIG_ARM_L1_CACHE_SHIFT=6 CONFIG_ARM_L1_CACHE_SHIFT_6=y # CONFIG_ARM_LPAE is not set @@ -46,8 +48,10 @@ CONFIG_ARM_MVEBU_V7_CPUIDLE=y CONFIG_ARM_PATCH_PHYS_VIRT=y CONFIG_ARM_THUMB=y # CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_UNWIND=y CONFIG_ARM_VIRT_EXT=y CONFIG_ASYNC_TX_ENABLE_CHANNEL_SWITCH=y +CONFIG_ATA=y CONFIG_ATAGS=y CONFIG_AUTO_ZRELADDR=y CONFIG_BOUNCE=y @@ -56,6 +60,7 @@ CONFIG_CACHE_L2X0=y CONFIG_CLKDEV_LOOKUP=y CONFIG_CLKSRC_MMIO=y CONFIG_CLKSRC_OF=y +CONFIG_CLKSRC_PROBE=y CONFIG_CLONE_BACKWARDS=y CONFIG_COMMON_CLK=y CONFIG_CPUFREQ_DT=y @@ -70,11 +75,8 @@ CONFIG_CPU_COPY_V6=y CONFIG_CPU_CP15=y CONFIG_CPU_CP15_MMU=y CONFIG_CPU_FREQ=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y # CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set CONFIG_CPU_FREQ_GOV_COMMON=y # CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set CONFIG_CPU_FREQ_GOV_ONDEMAND=y @@ -82,7 +84,6 @@ CONFIG_CPU_FREQ_GOV_PERFORMANCE=y # CONFIG_CPU_FREQ_GOV_POWERSAVE is not set # CONFIG_CPU_FREQ_GOV_USERSPACE is not set CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set CONFIG_CPU_HAS_ASID=y # CONFIG_CPU_ICACHE_DISABLE is not set CONFIG_CPU_IDLE=y @@ -91,12 +92,19 @@ CONFIG_CPU_PABRT_V7=y CONFIG_CPU_PJ4B=y CONFIG_CPU_PM=y CONFIG_CPU_RMAP=y -# CONFIG_CPU_THERMAL is not set +CONFIG_CPU_THERMAL=y CONFIG_CPU_TLB_V7=y CONFIG_CPU_V7=y CONFIG_CRC16=y CONFIG_CRYPTO_DEFLATE=y +CONFIG_CRYPTO_DES=y +CONFIG_CRYPTO_DEV_MARVELL_CESA=y +CONFIG_CRYPTO_HASH=y +CONFIG_CRYPTO_HASH2=y +CONFIG_CRYPTO_HW=y CONFIG_CRYPTO_LZO=y +CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_WORKQUEUE=y CONFIG_CRYPTO_XZ=y CONFIG_DCACHE_WORD_ACCESS=y CONFIG_DEBUG_INFO=y @@ -118,29 +126,31 @@ CONFIG_DMA_ENGINE=y CONFIG_DMA_ENGINE_RAID=y CONFIG_DMA_OF=y CONFIG_DTC=y -# CONFIG_DW_DMAC_CORE is not set # CONFIG_DW_DMAC_PCI is not set CONFIG_EARLY_PRINTK=y +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y CONFIG_FIXED_PHY=y -CONFIG_FRAME_POINTER=y +CONFIG_FIX_EARLYCON_MEM=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_GENERIC_BUG=y CONFIG_GENERIC_CLOCKEVENTS=y CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_GENERIC_IDLE_POLL_SETUP=y CONFIG_GENERIC_IO=y CONFIG_GENERIC_IRQ_CHIP=y CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y CONFIG_GENERIC_MSI_IRQ=y CONFIG_GENERIC_PCI_IOMAP=y +CONFIG_GENERIC_PHY=y CONFIG_GENERIC_SCHED_CLOCK=y CONFIG_GENERIC_SMP_IDLE_THREAD=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y +CONFIG_GLOB=y CONFIG_GPIOLIB=y CONFIG_GPIO_DEVRES=y -CONFIG_GPIO_GENERIC=y CONFIG_GPIO_MVEBU=y CONFIG_GPIO_MVEBU_PWM=y CONFIG_GPIO_SYSFS=y @@ -184,6 +194,7 @@ CONFIG_HAVE_KERNEL_LZMA=y CONFIG_HAVE_KERNEL_LZO=y CONFIG_HAVE_KERNEL_XZ=y CONFIG_HAVE_MEMBLOCK=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y CONFIG_HAVE_NET_DSA=y CONFIG_HAVE_OPROFILE=y CONFIG_HAVE_OPTPROBES=y @@ -198,8 +209,14 @@ CONFIG_HAVE_UID16=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y CONFIG_HIGHMEM=y # CONFIG_HIGHPTE is not set +CONFIG_HWBM=y +CONFIG_HWMON=y CONFIG_HZ_FIXED=0 CONFIG_HZ_PERIODIC=y +CONFIG_I2C=y +CONFIG_I2C_BOARDINFO=y +CONFIG_I2C_CHARDEV=y +CONFIG_I2C_MV64XXX=y CONFIG_INITRAMFS_SOURCE="" CONFIG_IOMMU_HELPER=y CONFIG_IRQCHIP=y @@ -210,13 +227,12 @@ CONFIG_IRQ_FORCED_THREADING=y CONFIG_IRQ_WORK=y # CONFIG_IWMMXT is not set CONFIG_LEDS_GPIO=y -# CONFIG_LEDS_REGULATOR is not set -# CONFIG_LEDS_TLC59116 is not set +CONFIG_LEDS_PCA963X=y +CONFIG_LEDS_TLC591XX=y +CONFIG_LEDS_TRIGGER_USBDEV=y CONFIG_LIBFDT=y CONFIG_LOCK_SPIN_ON_OWNER=y CONFIG_LOG_BUF_SHIFT=14 -# CONFIG_LZ4_COMPRESS is not set -# CONFIG_LZ4_DECOMPRESS is not set CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y CONFIG_MACH_ARMADA_370=y @@ -234,6 +250,9 @@ CONFIG_MDIO_BOARDINFO=y CONFIG_MEMORY=y CONFIG_MIGHT_HAVE_CACHE_L2X0=y CONFIG_MIGHT_HAVE_PCI=y +CONFIG_MMC=y +CONFIG_MMC_MVSDIO=y +# CONFIG_MMC_TIFM_SD is not set CONFIG_MODULES_USE_ELF_REL=y CONFIG_MTD_CFI_STAA=y CONFIG_MTD_M25P80=y @@ -248,7 +267,6 @@ CONFIG_MTD_UBI_BLOCK=y # CONFIG_MTD_UBI_FASTMAP is not set # CONFIG_MTD_UBI_GLUEBI is not set CONFIG_MTD_UBI_WL_THRESHOLD=4096 -CONFIG_MTD_UIMAGE_SPLIT=y CONFIG_MULTI_IRQ_HANDLER=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_MVEBU_CLK_COMMON=y @@ -258,11 +276,13 @@ CONFIG_MVEBU_DEVBUS=y CONFIG_MVEBU_MBUS=y CONFIG_MVMDIO=y CONFIG_MVNETA=y +CONFIG_MVNETA_BM=y CONFIG_MVSW61XX_PHY=y CONFIG_MV_XOR=y CONFIG_NEED_DMA_MAP_STATE=y # CONFIG_NEON is not set CONFIG_NET_FLOW_LIMIT=y +CONFIG_NLS=y CONFIG_NOP_USB_XCEIV=y CONFIG_NO_BOOTMEM=y CONFIG_NR_CPUS=4 @@ -281,15 +301,16 @@ CONFIG_OF_PCI_IRQ=y CONFIG_OF_RESERVED_MEM=y CONFIG_OLD_SIGACTION=y CONFIG_OLD_SIGSUSPEND3=y +CONFIG_ORION_WATCHDOG=y CONFIG_OUTER_CACHE=y CONFIG_OUTER_CACHE_SYNC=y -CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_PAGE_OFFSET=0xC0000000 CONFIG_PCI=y # CONFIG_PCI_DOMAINS_GENERIC is not set CONFIG_PCI_MSI=y CONFIG_PCI_MVEBU=y CONFIG_PERF_USE_VMALLOC=y +CONFIG_PGTABLE_LEVELS=2 CONFIG_PHYLIB=y CONFIG_PINCTRL=y CONFIG_PINCTRL_ARMADA_370=y @@ -305,24 +326,29 @@ CONFIG_PJ4B_ERRATA_4742=y CONFIG_PLAT_ORION=y CONFIG_PM_OPP=y CONFIG_PWM=y -# CONFIG_PWM_FSL_FTM is not set CONFIG_PWM_SYSFS=y +CONFIG_RATIONAL=y CONFIG_RCU_STALL_COMMON=y CONFIG_REGMAP=y +CONFIG_REGMAP_I2C=y CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set CONFIG_REGULATOR_FIXED_VOLTAGE=y -# CONFIG_REGULATOR_PWM is not set -# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set CONFIG_RFS_ACCEL=y CONFIG_RPS=y CONFIG_RTC_CLASS=y -# CONFIG_RTC_DRV_MV is not set +CONFIG_RTC_DRV_ARMADA38X=y +CONFIG_RTC_DRV_MV=y CONFIG_RWSEM_SPIN_ON_OWNER=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y +CONFIG_SATA_AHCI_PLATFORM=y +CONFIG_SATA_MV=y CONFIG_SCHED_HRTICK=y -# CONFIG_SCSI_DMA is not set +# CONFIG_SCHED_INFO is not set +CONFIG_SCSI=y +CONFIG_SENSORS_PWM_FAN=y +CONFIG_SENSORS_TMP421=y CONFIG_SERIAL_8250_DW=y +CONFIG_SERIAL_8250_FSL=y CONFIG_SMP=y CONFIG_SMP_ON_UP=y CONFIG_SOC_BUS=y @@ -330,12 +356,17 @@ CONFIG_SPARSE_IRQ=y CONFIG_SPI=y CONFIG_SPI_MASTER=y CONFIG_SPI_ORION=y +CONFIG_SRAM=y CONFIG_SRCU=y -CONFIG_STOP_MACHINE=y CONFIG_SWCONFIG=y CONFIG_SWIOTLB=y CONFIG_SWP_EMULATE=y CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_THERMAL=y +CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y +CONFIG_THERMAL_GOV_STEP_WISE=y +CONFIG_THERMAL_HWMON=y +CONFIG_THERMAL_OF=y # CONFIG_THUMB2_KERNEL is not set CONFIG_TICK_CPU_ACCOUNTING=y CONFIG_TIMER_STATS=y @@ -345,15 +376,26 @@ CONFIG_UBIFS_FS=y CONFIG_UBIFS_FS_LZO=y CONFIG_UBIFS_FS_XZ=y CONFIG_UBIFS_FS_ZLIB=y -CONFIG_UID16=y CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +CONFIG_USB=y +CONFIG_USB_COMMON=y +CONFIG_USB_EHCI_HCD=y +CONFIG_USB_EHCI_HCD_ORION=y +CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_PCI=y CONFIG_USB_PHY=y +CONFIG_USB_STORAGE=y CONFIG_USB_SUPPORT=y +# CONFIG_USB_UHCI_HCD is not set +CONFIG_USB_XHCI_HCD=y +CONFIG_USB_XHCI_MVEBU=y +CONFIG_USB_XHCI_PCI=y +CONFIG_USB_XHCI_PLATFORM=y CONFIG_USE_OF=y CONFIG_VECTORS_BASE=0xffff0000 CONFIG_VFP=y CONFIG_VFPv3=y -# CONFIG_XEN is not set +CONFIG_WATCHDOG_CORE=y CONFIG_XPS=y CONFIG_XZ_DEC_ARM=y CONFIG_XZ_DEC_BCJ=y diff --git a/target/linux/mvebu/image/Makefile b/target/linux/mvebu/image/Makefile index cb73c3bea6..d866a9ae21 100644 --- a/target/linux/mvebu/image/Makefile +++ b/target/linux/mvebu/image/Makefile @@ -89,8 +89,8 @@ define NORProfile define Image/Build/Profile/$(1)/squashfs ( \ - dd if=$(KDIR)/uImage-$(2) bs=$(3) conv=sync; \ - dd if=$(KDIR)/root.squashfs bs=$(3) conv=sync; \ + dd if=$(KDIR)/uImage-$(2) bs=$(patsubst %KiB,%k,$(3)) conv=sync; \ + dd if=$(KDIR)/root.squashfs bs=$(patsubst %KiB,%k,$(3)) conv=sync; \ ) > $$(BIN_DIR)/$$(IMG_PREFIX)-$(2)-squashfs-firmware.bin endef @@ -171,7 +171,7 @@ $(eval $(call MMCProfile,Solidrun-Clearfog-A1,armada-388-clearfog)) define Image/Build/Profile/Caiman/squashfs $(call Image/Build/UbinizeImage,armada-385-linksys-caiman,,squashfs, -p 128KiB -m 2048 -s 512 -O 2048) ( \ - dd if=$(KDIR)/uImage-armada-385-linksys-caiman bs=6M conv=sync; \ + dd if=$(KDIR)/uImage-armada-385-linksys-caiman bs=6144k conv=sync; \ dd if=$(KDIR)/$(IMG_PREFIX)-armada-385-linksys-caiman-squashfs-ubinized.bin \ bs=2048 conv=sync; \ ) > $(BIN_DIR)/$(IMG_PREFIX)-armada-385-linksys-caiman-squashfs-factory.img @@ -181,7 +181,7 @@ endef define Image/Build/Profile/Cobra/squashfs $(call Image/Build/UbinizeImage,armada-385-linksys-cobra,,squashfs, -p 128KiB -m 2048 -s 512 -O 2048) ( \ - dd if=$(KDIR)/uImage-armada-385-linksys-cobra bs=6M conv=sync; \ + dd if=$(KDIR)/uImage-armada-385-linksys-cobra bs=6144k conv=sync; \ dd if=$(KDIR)/$(IMG_PREFIX)-armada-385-linksys-cobra-squashfs-ubinized.bin \ bs=2048 conv=sync; \ ) > $(BIN_DIR)/$(IMG_PREFIX)-armada-385-linksys-cobra-squashfs-factory.img @@ -201,7 +201,7 @@ endef define Image/Build/Profile/Shelby/squashfs $(call Image/Build/UbinizeImage,armada-385-linksys-shelby,,squashfs, -p 128KiB -m 2048 -s 512 -O 2048) ( \ - dd if=$(KDIR)/uImage-armada-385-linksys-shelby bs=6M conv=sync; \ + dd if=$(KDIR)/uImage-armada-385-linksys-shelby bs=6144k conv=sync; \ dd if=$(KDIR)/$(IMG_PREFIX)-armada-385-linksys-shelby-squashfs-ubinized.bin \ bs=2048 conv=sync; \ ) > $(BIN_DIR)/$(IMG_PREFIX)-armada-385-linksys-shelby-squashfs-factory.img diff --git a/target/linux/mvebu/patches-4.4/020-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch b/target/linux/mvebu/patches-4.4/020-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch new file mode 100644 index 0000000000..2e6709781f --- /dev/null +++ b/target/linux/mvebu/patches-4.4/020-mtd-nand-pxa3xx_nand-add-support-for-partial-chunks.patch @@ -0,0 +1,428 @@ +From: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +Date: Wed, 10 Feb 2016 14:54:21 +0100 +Subject: [PATCH] mtd: nand: pxa3xx_nand: add support for partial chunks + +This commit is needed to properly support the 8-bits ECC configuration +with 4KB pages. + +When pages larger than 2 KB are used on platforms using the PXA3xx +NAND controller, the reading/programming operations need to be split +in chunks of 2 KBs or less because the controller FIFO is limited to +about 2 KB (i.e a bit more than 2 KB to accommodate OOB data). Due to +this requirement, the data layout on NAND is a bit strange, with ECC +interleaved with data, at the end of each chunk. + +When a 4-bits ECC configuration is used with 4 KB pages, the physical +data layout on the NAND looks like this: + +| 2048 data | 32 spare | 30 ECC | 2048 data | 32 spare | 30 ECC | + +So the data chunks have an equal size, 2080 bytes for each chunk, +which the driver supports properly. + +When a 8-bits ECC configuration is used with 4KB pages, the physical +data layout on the NAND looks like this: + +| 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 1024 data | 30 ECC | 64 spare | 30 ECC | + +So, the spare area is stored in its own chunk, which has a different +size than the other chunks. Since OOB is not used by UBIFS, the initial +implementation of the driver has chosen to not support reading this +additional "spare" chunk of data. + +Unfortunately, Marvell has chosen to store the BBT signature in the +OOB area. Therefore, if the driver doesn't read this spare area, Linux +has no way of finding the BBT. It thinks there is no BBT, and rewrites +one, which U-Boot does not recognize, causing compatibility problems +between the bootloader and the kernel in terms of NAND usage. + +To fix this, this commit implements the support for reading a partial +last chunk. This support is currently only useful for the case of 8 +bits ECC with 4 KB pages, but it will be useful in the future to +enable other configurations such as 12 bits and 16 bits ECC with 4 KB +pages, or 8 bits ECC with 8 KB pages, etc. All those configurations +have a "last" chunk that doesn't have the same size as the other +chunks. + +In order to implement reading of the last chunk, this commit: + + - Adds a number of new fields to the pxa3xx_nand_info to describe how + many full chunks and how many chunks we have, the size of full + chunks and partial chunks, both in terms of data area and spare + area. + + - Fills in the step_chunk_size and step_spare_size variables to + describe how much data and spare should be read/written for the + current read/program step. + + - Reworks the state machine to accommodate doing the additional read + or program step when a last partial chunk is used. + +This commit has been tested on a Marvell Armada 398 DB board, with a +4KB page NAND, tested in both 4 bits ECC and 8 bits ECC +configurations. Robert Jarzmik has tested on some PXA platforms. + +Signed-off-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +Tested-by: Robert Jarzmik <robert.jarzmik@free.fr> +Acked-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> +Signed-off-by: Brian Norris <computersforpeace@gmail.com> +--- + +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -228,15 +228,44 @@ struct pxa3xx_nand_info { + int use_spare; /* use spare ? */ + int need_wait; + +- unsigned int data_size; /* data to be read from FIFO */ +- unsigned int chunk_size; /* split commands chunk size */ +- unsigned int oob_size; ++ /* Amount of real data per full chunk */ ++ unsigned int chunk_size; ++ ++ /* Amount of spare data per full chunk */ + unsigned int spare_size; ++ ++ /* Number of full chunks (i.e chunk_size + spare_size) */ ++ unsigned int nfullchunks; ++ ++ /* ++ * Total number of chunks. If equal to nfullchunks, then there ++ * are only full chunks. Otherwise, there is one last chunk of ++ * size (last_chunk_size + last_spare_size) ++ */ ++ unsigned int ntotalchunks; ++ ++ /* Amount of real data in the last chunk */ ++ unsigned int last_chunk_size; ++ ++ /* Amount of spare data in the last chunk */ ++ unsigned int last_spare_size; ++ + unsigned int ecc_size; + unsigned int ecc_err_cnt; + unsigned int max_bitflips; + int retcode; + ++ /* ++ * Variables only valid during command ++ * execution. step_chunk_size and step_spare_size is the ++ * amount of real data and spare data in the current ++ * chunk. cur_chunk is the current chunk being ++ * read/programmed. ++ */ ++ unsigned int step_chunk_size; ++ unsigned int step_spare_size; ++ unsigned int cur_chunk; ++ + /* cached register value */ + uint32_t reg_ndcr; + uint32_t ndtr0cs0; +@@ -531,25 +560,6 @@ static int pxa3xx_nand_init(struct pxa3x + return 0; + } + +-/* +- * Set the data and OOB size, depending on the selected +- * spare and ECC configuration. +- * Only applicable to READ0, READOOB and PAGEPROG commands. +- */ +-static void pxa3xx_set_datasize(struct pxa3xx_nand_info *info, +- struct mtd_info *mtd) +-{ +- int oob_enable = info->reg_ndcr & NDCR_SPARE_EN; +- +- info->data_size = mtd->writesize; +- if (!oob_enable) +- return; +- +- info->oob_size = info->spare_size; +- if (!info->use_ecc) +- info->oob_size += info->ecc_size; +-} +- + /** + * NOTE: it is a must to set ND_RUN firstly, then write + * command buffer, otherwise, it does not work. +@@ -665,28 +675,28 @@ static void drain_fifo(struct pxa3xx_nan + + static void handle_data_pio(struct pxa3xx_nand_info *info) + { +- unsigned int do_bytes = min(info->data_size, info->chunk_size); +- + switch (info->state) { + case STATE_PIO_WRITING: +- writesl(info->mmio_base + NDDB, +- info->data_buff + info->data_buff_pos, +- DIV_ROUND_UP(do_bytes, 4)); ++ if (info->step_chunk_size) ++ writesl(info->mmio_base + NDDB, ++ info->data_buff + info->data_buff_pos, ++ DIV_ROUND_UP(info->step_chunk_size, 4)); + +- if (info->oob_size > 0) ++ if (info->step_spare_size) + writesl(info->mmio_base + NDDB, + info->oob_buff + info->oob_buff_pos, +- DIV_ROUND_UP(info->oob_size, 4)); ++ DIV_ROUND_UP(info->step_spare_size, 4)); + break; + case STATE_PIO_READING: +- drain_fifo(info, +- info->data_buff + info->data_buff_pos, +- DIV_ROUND_UP(do_bytes, 4)); ++ if (info->step_chunk_size) ++ drain_fifo(info, ++ info->data_buff + info->data_buff_pos, ++ DIV_ROUND_UP(info->step_chunk_size, 4)); + +- if (info->oob_size > 0) ++ if (info->step_spare_size) + drain_fifo(info, + info->oob_buff + info->oob_buff_pos, +- DIV_ROUND_UP(info->oob_size, 4)); ++ DIV_ROUND_UP(info->step_spare_size, 4)); + break; + default: + dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, +@@ -695,9 +705,8 @@ static void handle_data_pio(struct pxa3x + } + + /* Update buffer pointers for multi-page read/write */ +- info->data_buff_pos += do_bytes; +- info->oob_buff_pos += info->oob_size; +- info->data_size -= do_bytes; ++ info->data_buff_pos += info->step_chunk_size; ++ info->oob_buff_pos += info->step_spare_size; + } + + static void pxa3xx_nand_data_dma_irq(void *data) +@@ -738,8 +747,9 @@ static void start_data_dma(struct pxa3xx + info->state); + BUG(); + } +- info->sg.length = info->data_size + +- (info->oob_size ? info->spare_size + info->ecc_size : 0); ++ info->sg.length = info->chunk_size; ++ if (info->use_spare) ++ info->sg.length += info->spare_size + info->ecc_size; + dma_map_sg(info->dma_chan->device->dev, &info->sg, 1, info->dma_dir); + + tx = dmaengine_prep_slave_sg(info->dma_chan, &info->sg, 1, direction, +@@ -900,9 +910,11 @@ static void prepare_start_command(struct + /* reset data and oob column point to handle data */ + info->buf_start = 0; + info->buf_count = 0; +- info->oob_size = 0; + info->data_buff_pos = 0; + info->oob_buff_pos = 0; ++ info->step_chunk_size = 0; ++ info->step_spare_size = 0; ++ info->cur_chunk = 0; + info->use_ecc = 0; + info->use_spare = 1; + info->retcode = ERR_NONE; +@@ -914,8 +926,6 @@ static void prepare_start_command(struct + case NAND_CMD_READ0: + case NAND_CMD_PAGEPROG: + info->use_ecc = 1; +- case NAND_CMD_READOOB: +- pxa3xx_set_datasize(info, mtd); + break; + case NAND_CMD_PARAM: + info->use_spare = 0; +@@ -974,6 +984,14 @@ static int prepare_set_command(struct px + if (command == NAND_CMD_READOOB) + info->buf_start += mtd->writesize; + ++ if (info->cur_chunk < info->nfullchunks) { ++ info->step_chunk_size = info->chunk_size; ++ info->step_spare_size = info->spare_size; ++ } else { ++ info->step_chunk_size = info->last_chunk_size; ++ info->step_spare_size = info->last_spare_size; ++ } ++ + /* + * Multiple page read needs an 'extended command type' field, + * which is either naked-read or last-read according to the +@@ -985,8 +1003,8 @@ static int prepare_set_command(struct px + info->ndcb0 |= NDCB0_DBC | (NAND_CMD_READSTART << 8) + | NDCB0_LEN_OVRD + | NDCB0_EXT_CMD_TYPE(ext_cmd_type); +- info->ndcb3 = info->chunk_size + +- info->oob_size; ++ info->ndcb3 = info->step_chunk_size + ++ info->step_spare_size; + } + + set_command_address(info, mtd->writesize, column, page_addr); +@@ -1006,8 +1024,6 @@ static int prepare_set_command(struct px + | NDCB0_EXT_CMD_TYPE(ext_cmd_type) + | addr_cycle + | command; +- /* No data transfer in this case */ +- info->data_size = 0; + exec_cmd = 1; + } + break; +@@ -1019,6 +1035,14 @@ static int prepare_set_command(struct px + break; + } + ++ if (info->cur_chunk < info->nfullchunks) { ++ info->step_chunk_size = info->chunk_size; ++ info->step_spare_size = info->spare_size; ++ } else { ++ info->step_chunk_size = info->last_chunk_size; ++ info->step_spare_size = info->last_spare_size; ++ } ++ + /* Second command setting for large pages */ + if (mtd->writesize > PAGE_CHUNK_SIZE) { + /* +@@ -1029,14 +1053,14 @@ static int prepare_set_command(struct px + info->ndcb0 |= NDCB0_CMD_TYPE(0x1) + | NDCB0_LEN_OVRD + | NDCB0_EXT_CMD_TYPE(ext_cmd_type); +- info->ndcb3 = info->chunk_size + +- info->oob_size; ++ info->ndcb3 = info->step_chunk_size + ++ info->step_spare_size; + + /* + * This is the command dispatch that completes a chunked + * page program operation. + */ +- if (info->data_size == 0) { ++ if (info->cur_chunk == info->ntotalchunks) { + info->ndcb0 = NDCB0_CMD_TYPE(0x1) + | NDCB0_EXT_CMD_TYPE(ext_cmd_type) + | command; +@@ -1063,7 +1087,7 @@ static int prepare_set_command(struct px + | command; + info->ndcb1 = (column & 0xFF); + info->ndcb3 = INIT_BUFFER_SIZE; +- info->data_size = INIT_BUFFER_SIZE; ++ info->step_chunk_size = INIT_BUFFER_SIZE; + break; + + case NAND_CMD_READID: +@@ -1073,7 +1097,7 @@ static int prepare_set_command(struct px + | command; + info->ndcb1 = (column & 0xFF); + +- info->data_size = 8; ++ info->step_chunk_size = 8; + break; + case NAND_CMD_STATUS: + info->buf_count = 1; +@@ -1081,7 +1105,7 @@ static int prepare_set_command(struct px + | NDCB0_ADDR_CYC(1) + | command; + +- info->data_size = 8; ++ info->step_chunk_size = 8; + break; + + case NAND_CMD_ERASE1: +@@ -1220,6 +1244,7 @@ static void nand_cmdfunc_extended(struct + init_completion(&info->dev_ready); + do { + info->state = STATE_PREPARED; ++ + exec_cmd = prepare_set_command(info, command, ext_cmd_type, + column, page_addr); + if (!exec_cmd) { +@@ -1239,22 +1264,30 @@ static void nand_cmdfunc_extended(struct + break; + } + ++ /* Only a few commands need several steps */ ++ if (command != NAND_CMD_PAGEPROG && ++ command != NAND_CMD_READ0 && ++ command != NAND_CMD_READOOB) ++ break; ++ ++ info->cur_chunk++; ++ + /* Check if the sequence is complete */ +- if (info->data_size == 0 && command != NAND_CMD_PAGEPROG) ++ if (info->cur_chunk == info->ntotalchunks && command != NAND_CMD_PAGEPROG) + break; + + /* + * After a splitted program command sequence has issued + * the command dispatch, the command sequence is complete. + */ +- if (info->data_size == 0 && ++ if (info->cur_chunk == (info->ntotalchunks + 1) && + command == NAND_CMD_PAGEPROG && + ext_cmd_type == EXT_CMD_TYPE_DISPATCH) + break; + + if (command == NAND_CMD_READ0 || command == NAND_CMD_READOOB) { + /* Last read: issue a 'last naked read' */ +- if (info->data_size == info->chunk_size) ++ if (info->cur_chunk == info->ntotalchunks - 1) + ext_cmd_type = EXT_CMD_TYPE_LAST_RW; + else + ext_cmd_type = EXT_CMD_TYPE_NAKED_RW; +@@ -1264,7 +1297,7 @@ static void nand_cmdfunc_extended(struct + * the command dispatch must be issued to complete. + */ + } else if (command == NAND_CMD_PAGEPROG && +- info->data_size == 0) { ++ info->cur_chunk == info->ntotalchunks) { + ext_cmd_type = EXT_CMD_TYPE_DISPATCH; + } + } while (1); +@@ -1514,6 +1547,8 @@ static int pxa_ecc_init(struct pxa3xx_na + int strength, int ecc_stepsize, int page_size) + { + if (strength == 1 && ecc_stepsize == 512 && page_size == 2048) { ++ info->nfullchunks = 1; ++ info->ntotalchunks = 1; + info->chunk_size = 2048; + info->spare_size = 40; + info->ecc_size = 24; +@@ -1522,6 +1557,8 @@ static int pxa_ecc_init(struct pxa3xx_na + ecc->strength = 1; + + } else if (strength == 1 && ecc_stepsize == 512 && page_size == 512) { ++ info->nfullchunks = 1; ++ info->ntotalchunks = 1; + info->chunk_size = 512; + info->spare_size = 8; + info->ecc_size = 8; +@@ -1535,6 +1572,8 @@ static int pxa_ecc_init(struct pxa3xx_na + */ + } else if (strength == 4 && ecc_stepsize == 512 && page_size == 2048) { + info->ecc_bch = 1; ++ info->nfullchunks = 1; ++ info->ntotalchunks = 1; + info->chunk_size = 2048; + info->spare_size = 32; + info->ecc_size = 32; +@@ -1545,6 +1584,8 @@ static int pxa_ecc_init(struct pxa3xx_na + + } else if (strength == 4 && ecc_stepsize == 512 && page_size == 4096) { + info->ecc_bch = 1; ++ info->nfullchunks = 2; ++ info->ntotalchunks = 2; + info->chunk_size = 2048; + info->spare_size = 32; + info->ecc_size = 32; +@@ -1559,8 +1600,12 @@ static int pxa_ecc_init(struct pxa3xx_na + */ + } else if (strength == 8 && ecc_stepsize == 512 && page_size == 4096) { + info->ecc_bch = 1; ++ info->nfullchunks = 4; ++ info->ntotalchunks = 5; + info->chunk_size = 1024; + info->spare_size = 0; ++ info->last_chunk_size = 0; ++ info->last_spare_size = 64; + info->ecc_size = 32; + ecc->mode = NAND_ECC_HW; + ecc->size = info->chunk_size; diff --git a/target/linux/mvebu/patches-4.4/021-mtd-pxa3xx_nand-Increase-the-initial-chunk-size.patch b/target/linux/mvebu/patches-4.4/021-mtd-pxa3xx_nand-Increase-the-initial-chunk-size.patch new file mode 100644 index 0000000000..0b0e047c02 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/021-mtd-pxa3xx_nand-Increase-the-initial-chunk-size.patch @@ -0,0 +1,42 @@ +From: =?UTF-8?q?Ezequiel=20Garc=C3=ADa?= <ezequiel@vanguardiasur.com.ar> +Date: Wed, 4 Nov 2015 13:13:41 -0300 +Subject: [PATCH] mtd: pxa3xx_nand: Increase the initial chunk size + +The chunk size represents the size of the data chunks, which +is used by the controllers that allow to split transfered data. + +However, the initial chunk size is used in a non-splitted way, +during device identification. Therefore, it must be large enough +for all the NAND commands issued during device identification. +This includes NAND_CMD_PARAM which was recently changed to +transfer up to 2048 bytes (for the redundant parameter pages). + +Thus, the initial chunk size should be 2048 as well. + +On Armada 370/XP platforms (NFCv2) booted without the keep-config +devicetree property, this commit fixes a timeout on the NAND_CMD_PARAM +command: + + [..] + pxa3xx-nand f10d0000.nand: This platform can't do DMA on this device + pxa3xx-nand f10d0000.nand: Wait time out!!! + nand: device found, Manufacturer ID: 0x2c, Chip ID: 0x38 + nand: Micron MT29F8G08ABABAWP + nand: 1024 MiB, SLC, erase size: 512 KiB, page size: 4096, OOB size: 224 + +Signed-off-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> +Acked-by: Robert Jarzmik <robert.jarzmik@free.fr> +Signed-off-by: Brian Norris <computersforpeace@gmail.com> +--- + +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -1637,7 +1637,7 @@ static int pxa3xx_nand_scan(struct mtd_i + goto KEEP_CONFIG; + + /* Set a default chunk size */ +- info->chunk_size = 512; ++ info->chunk_size = PAGE_CHUNK_SIZE; + + ret = pxa3xx_nand_config_flash(info); + if (ret) diff --git a/target/linux/mvebu/patches-4.4/022-mtd-pxa3xx_nand-Fix-initial-controller-configuration.patch b/target/linux/mvebu/patches-4.4/022-mtd-pxa3xx_nand-Fix-initial-controller-configuration.patch new file mode 100644 index 0000000000..7d07fb9423 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/022-mtd-pxa3xx_nand-Fix-initial-controller-configuration.patch @@ -0,0 +1,104 @@ +From: =?UTF-8?q?Ezequiel=20Garc=C3=ADa?= <ezequiel@vanguardiasur.com.ar> +Date: Wed, 4 Nov 2015 13:13:42 -0300 +Subject: [PATCH] mtd: pxa3xx_nand: Fix initial controller configuration + +The Data Flash Control Register (NDCR) contains two types +of parameters: those that are needed for device identification, +and those that can only be set after device identification. + +Therefore, the driver can't set them all at once and instead +needs to configure the first group before nand_scan_ident() +and the second group later. + +Let's split pxa3xx_nand_config in two halves, and set the +parameters that depend on the device geometry once this is known. + +Signed-off-by: Ezequiel Garcia <ezequiel@vanguardiasur.com.ar> +Signed-off-by: Brian Norris <computersforpeace@gmail.com> +--- + +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -1420,34 +1420,43 @@ static int pxa3xx_nand_waitfunc(struct m + return NAND_STATUS_READY; + } + +-static int pxa3xx_nand_config_flash(struct pxa3xx_nand_info *info) ++static int pxa3xx_nand_config_ident(struct pxa3xx_nand_info *info) + { + struct platform_device *pdev = info->pdev; + struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); +- struct pxa3xx_nand_host *host = info->host[info->cs]; +- struct mtd_info *mtd = host->mtd; +- struct nand_chip *chip = mtd->priv; + +- /* configure default flash values */ ++ /* Configure default flash values */ ++ info->chunk_size = PAGE_CHUNK_SIZE; + info->reg_ndcr = 0x0; /* enable all interrupts */ + info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; + info->reg_ndcr |= NDCR_RD_ID_CNT(READ_ID_BYTES); +- info->reg_ndcr |= NDCR_SPARE_EN; /* enable spare by default */ ++ info->reg_ndcr |= NDCR_SPARE_EN; ++ ++ return 0; ++} ++ ++static void pxa3xx_nand_config_tail(struct pxa3xx_nand_info *info) ++{ ++ struct pxa3xx_nand_host *host = info->host[info->cs]; ++ struct mtd_info *mtd = host->mtd; ++ struct nand_chip *chip = mtd->priv; ++ + info->reg_ndcr |= (host->col_addr_cycles == 2) ? NDCR_RA_START : 0; + info->reg_ndcr |= (chip->page_shift == 6) ? NDCR_PG_PER_BLK : 0; + info->reg_ndcr |= (mtd->writesize == 2048) ? NDCR_PAGE_SZ : 0; +- +- return 0; + } + + static int pxa3xx_nand_detect_config(struct pxa3xx_nand_info *info) + { ++ struct platform_device *pdev = info->pdev; ++ struct pxa3xx_nand_platform_data *pdata = dev_get_platdata(&pdev->dev); + uint32_t ndcr = nand_readl(info, NDCR); + + /* Set an initial chunk size */ + info->chunk_size = ndcr & NDCR_PAGE_SZ ? 2048 : 512; + info->reg_ndcr = ndcr & + ~(NDCR_INT_MASK | NDCR_ND_ARB_EN | NFCV1_NDCR_ARB_CNTL); ++ info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; + info->ndtr0cs0 = nand_readl(info, NDTR0CS0); + info->ndtr1cs0 = nand_readl(info, NDTR1CS0); + return 0; +@@ -1636,10 +1645,7 @@ static int pxa3xx_nand_scan(struct mtd_i + if (pdata->keep_config && !pxa3xx_nand_detect_config(info)) + goto KEEP_CONFIG; + +- /* Set a default chunk size */ +- info->chunk_size = PAGE_CHUNK_SIZE; +- +- ret = pxa3xx_nand_config_flash(info); ++ ret = pxa3xx_nand_config_ident(info); + if (ret) + return ret; + +@@ -1652,7 +1658,6 @@ static int pxa3xx_nand_scan(struct mtd_i + } + + KEEP_CONFIG: +- info->reg_ndcr |= (pdata->enable_arbiter) ? NDCR_ND_ARB_EN : 0; + if (info->reg_ndcr & NDCR_DWIDTH_M) + chip->options |= NAND_BUSWIDTH_16; + +@@ -1737,6 +1742,10 @@ KEEP_CONFIG: + host->row_addr_cycles = 3; + else + host->row_addr_cycles = 2; ++ ++ if (!pdata->keep_config) ++ pxa3xx_nand_config_tail(info); ++ + return nand_scan_tail(mtd); + } + diff --git a/target/linux/mvebu/patches-4.4/023-bus-mvebu-mbus-provide-api-for-obtaining-IO-and-DRAM.patch b/target/linux/mvebu/patches-4.4/023-bus-mvebu-mbus-provide-api-for-obtaining-IO-and-DRAM.patch new file mode 100644 index 0000000000..58687f36ad --- /dev/null +++ b/target/linux/mvebu/patches-4.4/023-bus-mvebu-mbus-provide-api-for-obtaining-IO-and-DRAM.patch @@ -0,0 +1,94 @@ +From: Marcin Wojtas <mw@semihalf.com> +Date: Mon, 14 Mar 2016 09:39:02 +0100 +Subject: [PATCH] bus: mvebu-mbus: provide api for obtaining IO and DRAM window + information + +This commit enables finding appropriate mbus window and obtaining its +target id and attribute for given physical address in two separate +routines, both for IO and DRAM windows. This functionality +is needed for Armada XP/38x Network Controller's Buffer Manager and +PnC configuration. + +[gregory.clement@free-electrons.com: Fix size test for +mvebu_mbus_get_dram_win_info] + +Signed-off-by: Marcin Wojtas <mw@semihalf.com> +[DRAM window information reference in LKv3.10] +Signed-off-by: Evan Wang <xswang@marvell.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/bus/mvebu-mbus.c ++++ b/drivers/bus/mvebu-mbus.c +@@ -948,6 +948,58 @@ void mvebu_mbus_get_pcie_io_aperture(str + *res = mbus_state.pcie_io_aperture; + } + ++int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr) ++{ ++ const struct mbus_dram_target_info *dram; ++ int i; ++ ++ /* Get dram info */ ++ dram = mv_mbus_dram_info(); ++ if (!dram) { ++ pr_err("missing DRAM information\n"); ++ return -ENODEV; ++ } ++ ++ /* Try to find matching DRAM window for phyaddr */ ++ for (i = 0; i < dram->num_cs; i++) { ++ const struct mbus_dram_window *cs = dram->cs + i; ++ ++ if (cs->base <= phyaddr && ++ phyaddr <= (cs->base + cs->size - 1)) { ++ *target = dram->mbus_dram_target_id; ++ *attr = cs->mbus_attr; ++ return 0; ++ } ++ } ++ ++ pr_err("invalid dram address 0x%x\n", phyaddr); ++ return -EINVAL; ++} ++EXPORT_SYMBOL_GPL(mvebu_mbus_get_dram_win_info); ++ ++int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, ++ u8 *attr) ++{ ++ int win; ++ ++ for (win = 0; win < mbus_state.soc->num_wins; win++) { ++ u64 wbase; ++ int enabled; ++ ++ mvebu_mbus_read_window(&mbus_state, win, &enabled, &wbase, ++ size, target, attr, NULL); ++ ++ if (!enabled) ++ continue; ++ ++ if (wbase <= phyaddr && phyaddr <= wbase + *size) ++ return win; ++ } ++ ++ return -EINVAL; ++} ++EXPORT_SYMBOL_GPL(mvebu_mbus_get_io_win_info); ++ + static __init int mvebu_mbus_debugfs_init(void) + { + struct mvebu_mbus_state *s = &mbus_state; +--- a/include/linux/mbus.h ++++ b/include/linux/mbus.h +@@ -69,6 +69,9 @@ static inline const struct mbus_dram_tar + int mvebu_mbus_save_cpu_target(u32 *store_addr); + void mvebu_mbus_get_pcie_mem_aperture(struct resource *res); + void mvebu_mbus_get_pcie_io_aperture(struct resource *res); ++int mvebu_mbus_get_dram_win_info(phys_addr_t phyaddr, u8 *target, u8 *attr); ++int mvebu_mbus_get_io_win_info(phys_addr_t phyaddr, u32 *size, u8 *target, ++ u8 *attr); + int mvebu_mbus_add_window_remap_by_id(unsigned int target, + unsigned int attribute, + phys_addr_t base, size_t size, diff --git a/target/linux/mvebu/patches-4.4/024-mvebu-make-device-IO-strongly-ordered.patch b/target/linux/mvebu/patches-4.4/024-mvebu-make-device-IO-strongly-ordered.patch new file mode 100644 index 0000000000..dc669978ff --- /dev/null +++ b/target/linux/mvebu/patches-4.4/024-mvebu-make-device-IO-strongly-ordered.patch @@ -0,0 +1,47 @@ +On Cortex-A9 based Marvell SoCs, when HW I/O coherency is enabled, all +non-RAM space needs to be mapped strongly ordered. +In upstream this was added for PCIe I/O only, this change expands it +to cover all device memory. Fixes issues with CESA. +Based on patch from Thomas Petazzoni. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> + +--- a/arch/arm/mach-mvebu/coherency.c ++++ b/arch/arm/mach-mvebu/coherency.c +@@ -162,22 +162,16 @@ exit: + } + + /* +- * This ioremap hook is used on Armada 375/38x to ensure that PCIe ++ * This ioremap hook is used on Armada 375/38x to ensure that all non-RAM + * memory areas are mapped as MT_UNCACHED instead of MT_DEVICE. This +- * is needed as a workaround for a deadlock issue between the PCIe ++ * is needed as a workaround for a deadlock issue between the bus + * interface and the cache controller. + */ + static void __iomem * +-armada_pcie_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, +- unsigned int mtype, void *caller) ++armada_wa_ioremap_caller(phys_addr_t phys_addr, size_t size, ++ unsigned int mtype, void *caller) + { +- struct resource pcie_mem; +- +- mvebu_mbus_get_pcie_mem_aperture(&pcie_mem); +- +- if (pcie_mem.start <= phys_addr && (phys_addr + size) <= pcie_mem.end) +- mtype = MT_UNCACHED; +- ++ mtype = MT_UNCACHED; + return __arm_ioremap_caller(phys_addr, size, mtype, caller); + } + +@@ -186,7 +180,7 @@ static void __init armada_375_380_cohere + struct device_node *cache_dn; + + coherency_cpu_base = of_iomap(np, 0); +- arch_ioremap_caller = armada_pcie_wa_ioremap_caller; ++ arch_ioremap_caller = armada_wa_ioremap_caller; + + /* + * We should switch the PL310 to I/O coherency mode only if diff --git a/target/linux/mvebu/patches-4.4/030-mvneta-consolidate-autoneg-enabling.patch b/target/linux/mvebu/patches-4.4/030-mvneta-consolidate-autoneg-enabling.patch new file mode 100644 index 0000000000..1d98c7decc --- /dev/null +++ b/target/linux/mvebu/patches-4.4/030-mvneta-consolidate-autoneg-enabling.patch @@ -0,0 +1,55 @@ +From: Stas Sergeev <stsp@list.ru> +Date: Wed, 2 Dec 2015 20:33:56 +0300 +Subject: [PATCH] mvneta: consolidate autoneg enabling + +This moves autoneg-related bit manipulations to the single place. + +CC: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +CC: netdev@vger.kernel.org +CC: linux-kernel@vger.kernel.org + +Signed-off-by: Stas Sergeev <stsp@users.sourceforge.net> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -1067,15 +1067,28 @@ static void mvneta_defaults_set(struct m + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN; + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); ++ + val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); + val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; + mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); ++ val |= MVNETA_GMAC2_INBAND_AN_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } else { + val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); + val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | + MVNETA_GMAC_AN_SPEED_EN | + MVNETA_GMAC_AN_DUPLEX_EN); + mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); ++ val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); ++ val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); + } + + mvneta_set_ucast_table(pp, -1); +@@ -3230,9 +3243,6 @@ static int mvneta_port_power_up(struct m + return -EINVAL; + } + +- if (pp->use_inband_status) +- ctrl |= MVNETA_GMAC2_INBAND_AN_ENABLE; +- + /* Cancel Port Reset */ + ctrl &= ~MVNETA_GMAC2_PORT_RESET; + mvreg_write(pp, MVNETA_GMAC_CTRL_2, ctrl); diff --git a/target/linux/mvebu/patches-4.4/031-mvneta-implement-ethtool-autonegotiation-control.patch b/target/linux/mvebu/patches-4.4/031-mvneta-implement-ethtool-autonegotiation-control.patch new file mode 100644 index 0000000000..626b511c45 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/031-mvneta-implement-ethtool-autonegotiation-control.patch @@ -0,0 +1,165 @@ +From: Stas Sergeev <stsp@list.ru> +Date: Wed, 2 Dec 2015 20:35:11 +0300 +Subject: [PATCH] mvneta: implement ethtool autonegotiation control + +This patch allows to do +ethtool -s eth0 autoneg off +ethtool -s eth0 autoneg on +to disable or enable autonegotiation at run-time. +Without that functionality, the only way to control the autonegotiation +is to modify the device tree. + +This is needed if you plan to use the same kernel with +different ethernet switches, the ones that support the in-band +status and the ones that not. + +CC: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +CC: netdev@vger.kernel.org +CC: linux-kernel@vger.kernel.org + +Signed-off-by: Stas Sergeev <stsp@users.sourceforge.net> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -371,7 +371,7 @@ struct mvneta_port { + unsigned int duplex; + unsigned int speed; + unsigned int tx_csum_limit; +- int use_inband_status:1; ++ unsigned int use_inband_status:1; + + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + }; +@@ -973,6 +973,44 @@ static void mvneta_set_other_mcast_table + mvreg_write(pp, MVNETA_DA_FILT_OTH_MCAST + offset, val); + } + ++static void mvneta_set_autoneg(struct mvneta_port *pp, int enable) ++{ ++ u32 val; ++ ++ if (enable) { ++ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); ++ val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | ++ MVNETA_GMAC_FORCE_LINK_DOWN | ++ MVNETA_GMAC_AN_FLOW_CTRL_EN); ++ val |= MVNETA_GMAC_INBAND_AN_ENABLE | ++ MVNETA_GMAC_AN_SPEED_EN | ++ MVNETA_GMAC_AN_DUPLEX_EN; ++ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); ++ val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); ++ val |= MVNETA_GMAC2_INBAND_AN_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); ++ } else { ++ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); ++ val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | ++ MVNETA_GMAC_AN_SPEED_EN | ++ MVNETA_GMAC_AN_DUPLEX_EN); ++ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); ++ val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); ++ ++ val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); ++ val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; ++ mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); ++ } ++} ++ + /* This method sets defaults to the NETA port: + * Clears interrupt Cause and Mask registers. + * Clears all MAC tables. +@@ -1058,39 +1096,7 @@ static void mvneta_defaults_set(struct m + val &= ~MVNETA_PHY_POLLING_ENABLE; + mvreg_write(pp, MVNETA_UNIT_CONTROL, val); + +- if (pp->use_inband_status) { +- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); +- val &= ~(MVNETA_GMAC_FORCE_LINK_PASS | +- MVNETA_GMAC_FORCE_LINK_DOWN | +- MVNETA_GMAC_AN_FLOW_CTRL_EN); +- val |= MVNETA_GMAC_INBAND_AN_ENABLE | +- MVNETA_GMAC_AN_SPEED_EN | +- MVNETA_GMAC_AN_DUPLEX_EN; +- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); +- +- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); +- val |= MVNETA_GMAC_1MS_CLOCK_ENABLE; +- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); +- +- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); +- val |= MVNETA_GMAC2_INBAND_AN_ENABLE; +- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); +- } else { +- val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); +- val &= ~(MVNETA_GMAC_INBAND_AN_ENABLE | +- MVNETA_GMAC_AN_SPEED_EN | +- MVNETA_GMAC_AN_DUPLEX_EN); +- mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); +- +- val = mvreg_read(pp, MVNETA_GMAC_CLOCK_DIVIDER); +- val &= ~MVNETA_GMAC_1MS_CLOCK_ENABLE; +- mvreg_write(pp, MVNETA_GMAC_CLOCK_DIVIDER, val); +- +- val = mvreg_read(pp, MVNETA_GMAC_CTRL_2); +- val &= ~MVNETA_GMAC2_INBAND_AN_ENABLE; +- mvreg_write(pp, MVNETA_GMAC_CTRL_2, val); +- } +- ++ mvneta_set_autoneg(pp, pp->use_inband_status); + mvneta_set_ucast_table(pp, -1); + mvneta_set_special_mcast_table(pp, -1); + mvneta_set_other_mcast_table(pp, -1); +@@ -2956,10 +2962,43 @@ int mvneta_ethtool_get_settings(struct n + int mvneta_ethtool_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) + { + struct mvneta_port *pp = netdev_priv(dev); ++ struct phy_device *phydev = pp->phy_dev; + +- if (!pp->phy_dev) ++ if (!phydev) + return -ENODEV; + ++ if ((cmd->autoneg == AUTONEG_ENABLE) != pp->use_inband_status) { ++ u32 val; ++ ++ mvneta_set_autoneg(pp, cmd->autoneg == AUTONEG_ENABLE); ++ ++ if (cmd->autoneg == AUTONEG_DISABLE) { ++ val = mvreg_read(pp, MVNETA_GMAC_AUTONEG_CONFIG); ++ val &= ~(MVNETA_GMAC_CONFIG_MII_SPEED | ++ MVNETA_GMAC_CONFIG_GMII_SPEED | ++ MVNETA_GMAC_CONFIG_FULL_DUPLEX); ++ ++ if (phydev->duplex) ++ val |= MVNETA_GMAC_CONFIG_FULL_DUPLEX; ++ ++ if (phydev->speed == SPEED_1000) ++ val |= MVNETA_GMAC_CONFIG_GMII_SPEED; ++ else if (phydev->speed == SPEED_100) ++ val |= MVNETA_GMAC_CONFIG_MII_SPEED; ++ ++ mvreg_write(pp, MVNETA_GMAC_AUTONEG_CONFIG, val); ++ } ++ ++ pp->use_inband_status = (cmd->autoneg == AUTONEG_ENABLE); ++ netdev_info(pp->dev, "autoneg status set to %i\n", ++ pp->use_inband_status); ++ ++ if (netif_running(dev)) { ++ mvneta_port_down(pp); ++ mvneta_port_up(pp); ++ } ++ } ++ + return phy_ethtool_sset(pp->phy_dev, cmd); + } + diff --git a/target/linux/mvebu/patches-4.4/032-net-mvneta-Make-the-default-queue-related-for-each-p.patch b/target/linux/mvebu/patches-4.4/032-net-mvneta-Make-the-default-queue-related-for-each-p.patch new file mode 100644 index 0000000000..54c93998f6 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/032-net-mvneta-Make-the-default-queue-related-for-each-p.patch @@ -0,0 +1,131 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Wed, 9 Dec 2015 18:23:48 +0100 +Subject: [PATCH] net: mvneta: Make the default queue related for each port + +Instead of using the same default queue for all the port. Move it in the +port struct. It will allow have a different default queue for each port. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -356,6 +356,7 @@ struct mvneta_port { + struct mvneta_tx_queue *txqs; + struct net_device *dev; + struct notifier_block cpu_notifier; ++ int rxq_def; + + /* Core clock */ + struct clk *clk; +@@ -819,7 +820,7 @@ static void mvneta_port_up(struct mvneta + mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + + /* Enable all initialized RXQs. */ +- mvreg_write(pp, MVNETA_RXQ_CMD, BIT(rxq_def)); ++ mvreg_write(pp, MVNETA_RXQ_CMD, BIT(pp->rxq_def)); + } + + /* Stop the Ethernet port activity */ +@@ -1067,7 +1068,7 @@ static void mvneta_defaults_set(struct m + mvreg_write(pp, MVNETA_ACC_MODE, val); + + /* Update val of portCfg register accordingly with all RxQueue types */ +- val = MVNETA_PORT_CONFIG_DEFL_VALUE(rxq_def); ++ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + val = 0; +@@ -2101,19 +2102,19 @@ static void mvneta_set_rx_mode(struct ne + if (dev->flags & IFF_PROMISC) { + /* Accept all: Multicast + Unicast */ + mvneta_rx_unicast_promisc_set(pp, 1); +- mvneta_set_ucast_table(pp, rxq_def); +- mvneta_set_special_mcast_table(pp, rxq_def); +- mvneta_set_other_mcast_table(pp, rxq_def); ++ mvneta_set_ucast_table(pp, pp->rxq_def); ++ mvneta_set_special_mcast_table(pp, pp->rxq_def); ++ mvneta_set_other_mcast_table(pp, pp->rxq_def); + } else { + /* Accept single Unicast */ + mvneta_rx_unicast_promisc_set(pp, 0); + mvneta_set_ucast_table(pp, -1); +- mvneta_mac_addr_set(pp, dev->dev_addr, rxq_def); ++ mvneta_mac_addr_set(pp, dev->dev_addr, pp->rxq_def); + + if (dev->flags & IFF_ALLMULTI) { + /* Accept all multicast */ +- mvneta_set_special_mcast_table(pp, rxq_def); +- mvneta_set_other_mcast_table(pp, rxq_def); ++ mvneta_set_special_mcast_table(pp, pp->rxq_def); ++ mvneta_set_other_mcast_table(pp, pp->rxq_def); + } else { + /* Accept only initialized multicast */ + mvneta_set_special_mcast_table(pp, -1); +@@ -2122,7 +2123,7 @@ static void mvneta_set_rx_mode(struct ne + if (!netdev_mc_empty(dev)) { + netdev_for_each_mc_addr(ha, dev) { + mvneta_mcast_addr_set(pp, ha->addr, +- rxq_def); ++ pp->rxq_def); + } + } + } +@@ -2205,7 +2206,7 @@ static int mvneta_poll(struct napi_struc + * RX packets + */ + cause_rx_tx |= port->cause_rx_tx; +- rx_done = mvneta_rx(pp, budget, &pp->rxqs[rxq_def]); ++ rx_done = mvneta_rx(pp, budget, &pp->rxqs[pp->rxq_def]); + budget -= rx_done; + + if (budget > 0) { +@@ -2418,17 +2419,17 @@ static void mvneta_cleanup_txqs(struct m + /* Cleanup all Rx queues */ + static void mvneta_cleanup_rxqs(struct mvneta_port *pp) + { +- mvneta_rxq_deinit(pp, &pp->rxqs[rxq_def]); ++ mvneta_rxq_deinit(pp, &pp->rxqs[pp->rxq_def]); + } + + + /* Init all Rx queues */ + static int mvneta_setup_rxqs(struct mvneta_port *pp) + { +- int err = mvneta_rxq_init(pp, &pp->rxqs[rxq_def]); ++ int err = mvneta_rxq_init(pp, &pp->rxqs[pp->rxq_def]); + if (err) { + netdev_err(pp->dev, "%s: can't create rxq=%d\n", +- __func__, rxq_def); ++ __func__, pp->rxq_def); + mvneta_cleanup_rxqs(pp); + return err; + } +@@ -2634,7 +2635,7 @@ static int mvneta_set_mac_addr(struct ne + mvneta_mac_addr_set(pp, dev->dev_addr, -1); + + /* Set new addr in hw */ +- mvneta_mac_addr_set(pp, sockaddr->sa_data, rxq_def); ++ mvneta_mac_addr_set(pp, sockaddr->sa_data, pp->rxq_def); + + eth_commit_mac_addr_change(dev, addr); + return 0; +@@ -2753,7 +2754,7 @@ static void mvneta_percpu_elect(struct m + { + int online_cpu_idx, cpu, i = 0; + +- online_cpu_idx = rxq_def % num_online_cpus(); ++ online_cpu_idx = pp->rxq_def % num_online_cpus(); + + for_each_online_cpu(cpu) { + if (i == online_cpu_idx) +@@ -3363,6 +3364,8 @@ static int mvneta_probe(struct platform_ + strcmp(managed, "in-band-status") == 0); + pp->cpu_notifier.notifier_call = mvneta_percpu_notifier; + ++ pp->rxq_def = rxq_def; ++ + pp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pp->clk)) { + err = PTR_ERR(pp->clk); diff --git a/target/linux/mvebu/patches-4.4/033-net-mvneta-Associate-RX-queues-with-each-CPU.patch b/target/linux/mvebu/patches-4.4/033-net-mvneta-Associate-RX-queues-with-each-CPU.patch new file mode 100644 index 0000000000..636c6cf364 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/033-net-mvneta-Associate-RX-queues-with-each-CPU.patch @@ -0,0 +1,278 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Wed, 9 Dec 2015 18:23:49 +0100 +Subject: [PATCH] net: mvneta: Associate RX queues with each CPU + +We enable the percpu interrupt for all the CPU and we just associate a +CPU to a few queue at the neta level. The mapping between the CPUs and +the queues is static. The queues are associated to the CPU module the +number of CPUs. However currently we only use on RX queue for a given +Ethernet port. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -110,9 +110,16 @@ + #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) + #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff + #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 ++#define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) + #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) + +-/* Exception Interrupt Port/Queue Cause register */ ++/* Exception Interrupt Port/Queue Cause register ++ * ++ * Their behavior depend of the mapping done using the PCPX2Q ++ * registers. For a given CPU if the bit associated to a queue is not ++ * set, then for the register a read from this CPU will always return ++ * 0 and a write won't do anything ++ */ + + #define MVNETA_INTR_NEW_CAUSE 0x25a0 + #define MVNETA_INTR_NEW_MASK 0x25a4 +@@ -820,7 +827,13 @@ static void mvneta_port_up(struct mvneta + mvreg_write(pp, MVNETA_TXQ_CMD, q_map); + + /* Enable all initialized RXQs. */ +- mvreg_write(pp, MVNETA_RXQ_CMD, BIT(pp->rxq_def)); ++ for (queue = 0; queue < rxq_number; queue++) { ++ struct mvneta_rx_queue *rxq = &pp->rxqs[queue]; ++ ++ if (rxq->descs != NULL) ++ q_map |= (1 << queue); ++ } ++ mvreg_write(pp, MVNETA_RXQ_CMD, q_map); + } + + /* Stop the Ethernet port activity */ +@@ -1026,6 +1039,7 @@ static void mvneta_defaults_set(struct m + int cpu; + int queue; + u32 val; ++ int max_cpu = num_present_cpus(); + + /* Clear all Cause registers */ + mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); +@@ -1041,13 +1055,23 @@ static void mvneta_defaults_set(struct m + /* Enable MBUS Retry bit16 */ + mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); + +- /* Set CPU queue access map - all CPUs have access to all RX +- * queues and to all TX queues ++ /* Set CPU queue access map. CPUs are assigned to the RX ++ * queues modulo their number and all the TX queues are ++ * assigned to the CPU associated to the default RX queue. + */ +- for_each_present_cpu(cpu) +- mvreg_write(pp, MVNETA_CPU_MAP(cpu), +- (MVNETA_CPU_RXQ_ACCESS_ALL_MASK | +- MVNETA_CPU_TXQ_ACCESS_ALL_MASK)); ++ for_each_present_cpu(cpu) { ++ int rxq_map = 0, txq_map = 0; ++ int rxq; ++ ++ for (rxq = 0; rxq < rxq_number; rxq++) ++ if ((rxq % max_cpu) == cpu) ++ rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); ++ ++ if (cpu == rxq_def) ++ txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; ++ ++ mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); ++ } + + /* Reset RX and TX DMAs */ + mvreg_write(pp, MVNETA_PORT_RX_RESET, MVNETA_PORT_RX_DMA_RESET); +@@ -2174,6 +2198,7 @@ static int mvneta_poll(struct napi_struc + { + int rx_done = 0; + u32 cause_rx_tx; ++ int rx_queue; + struct mvneta_port *pp = netdev_priv(napi->dev); + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); + +@@ -2205,8 +2230,15 @@ static int mvneta_poll(struct napi_struc + /* For the case where the last mvneta_poll did not process all + * RX packets + */ ++ rx_queue = fls(((cause_rx_tx >> 8) & 0xff)); ++ + cause_rx_tx |= port->cause_rx_tx; +- rx_done = mvneta_rx(pp, budget, &pp->rxqs[pp->rxq_def]); ++ ++ if (rx_queue) { ++ rx_queue = rx_queue - 1; ++ rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); ++ } ++ + budget -= rx_done; + + if (budget > 0) { +@@ -2419,19 +2451,27 @@ static void mvneta_cleanup_txqs(struct m + /* Cleanup all Rx queues */ + static void mvneta_cleanup_rxqs(struct mvneta_port *pp) + { +- mvneta_rxq_deinit(pp, &pp->rxqs[pp->rxq_def]); ++ int queue; ++ ++ for (queue = 0; queue < txq_number; queue++) ++ mvneta_rxq_deinit(pp, &pp->rxqs[queue]); + } + + + /* Init all Rx queues */ + static int mvneta_setup_rxqs(struct mvneta_port *pp) + { +- int err = mvneta_rxq_init(pp, &pp->rxqs[pp->rxq_def]); +- if (err) { +- netdev_err(pp->dev, "%s: can't create rxq=%d\n", +- __func__, pp->rxq_def); +- mvneta_cleanup_rxqs(pp); +- return err; ++ int queue; ++ ++ for (queue = 0; queue < rxq_number; queue++) { ++ int err = mvneta_rxq_init(pp, &pp->rxqs[queue]); ++ ++ if (err) { ++ netdev_err(pp->dev, "%s: can't create rxq=%d\n", ++ __func__, queue); ++ mvneta_cleanup_rxqs(pp); ++ return err; ++ } + } + + return 0; +@@ -2455,6 +2495,19 @@ static int mvneta_setup_txqs(struct mvne + return 0; + } + ++static void mvneta_percpu_unmask_interrupt(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ /* All the queue are unmasked, but actually only the ones ++ * maped to this CPU will be unmasked ++ */ ++ mvreg_write(pp, MVNETA_INTR_NEW_MASK, ++ MVNETA_RX_INTR_MASK_ALL | ++ MVNETA_TX_INTR_MASK_ALL | ++ MVNETA_MISCINTR_INTR_MASK); ++} ++ + static void mvneta_start_dev(struct mvneta_port *pp) + { + unsigned int cpu; +@@ -2472,11 +2525,10 @@ static void mvneta_start_dev(struct mvne + napi_enable(&port->napi); + } + +- /* Unmask interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, +- MVNETA_RX_INTR_MASK(rxq_number) | +- MVNETA_TX_INTR_MASK(txq_number) | +- MVNETA_MISCINTR_INTR_MASK); ++ /* Unmask interrupts. It has to be done from each CPU */ ++ for_each_online_cpu(cpu) ++ smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, ++ pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | +@@ -2752,22 +2804,35 @@ static void mvneta_percpu_disable(void * + + static void mvneta_percpu_elect(struct mvneta_port *pp) + { +- int online_cpu_idx, cpu, i = 0; ++ int online_cpu_idx, max_cpu, cpu, i = 0; + + online_cpu_idx = pp->rxq_def % num_online_cpus(); ++ max_cpu = num_present_cpus(); + + for_each_online_cpu(cpu) { +- if (i == online_cpu_idx) +- /* Enable per-CPU interrupt on the one CPU we +- * just elected ++ int rxq_map = 0, txq_map = 0; ++ int rxq; ++ ++ for (rxq = 0; rxq < rxq_number; rxq++) ++ if ((rxq % max_cpu) == cpu) ++ rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); ++ ++ if (i == online_cpu_idx) { ++ /* Map the default receive queue and transmit ++ * queue to the elected CPU + */ +- smp_call_function_single(cpu, mvneta_percpu_enable, +- pp, true); +- else +- /* Disable per-CPU interrupt on all the other CPU */ +- smp_call_function_single(cpu, mvneta_percpu_disable, +- pp, true); ++ rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); ++ txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; ++ } ++ mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); ++ ++ /* Update the interrupt mask on each CPU according the ++ * new mapping ++ */ ++ smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, ++ pp, true); + i++; ++ + } + }; + +@@ -2802,12 +2867,22 @@ static int mvneta_percpu_notifier(struct + mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); + napi_enable(&port->napi); + ++ ++ /* Enable per-CPU interrupts on the CPU that is ++ * brought up. ++ */ ++ smp_call_function_single(cpu, mvneta_percpu_enable, ++ pp, true); ++ + /* Enable per-CPU interrupt on the one CPU we care + * about. + */ + mvneta_percpu_elect(pp); + +- /* Unmask all ethernet port interrupts */ ++ /* Unmask all ethernet port interrupts, as this ++ * notifier is called for each CPU then the CPU to ++ * Queue mapping is applied ++ */ + mvreg_write(pp, MVNETA_INTR_NEW_MASK, + MVNETA_RX_INTR_MASK(rxq_number) | + MVNETA_TX_INTR_MASK(txq_number) | +@@ -2858,7 +2933,7 @@ static int mvneta_percpu_notifier(struct + static int mvneta_open(struct net_device *dev) + { + struct mvneta_port *pp = netdev_priv(dev); +- int ret; ++ int ret, cpu; + + pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + +@@ -2888,8 +2963,13 @@ static int mvneta_open(struct net_device + */ + mvneta_percpu_disable(pp); + +- /* Elect a CPU to handle our RX queue interrupt */ +- mvneta_percpu_elect(pp); ++ /* Enable per-CPU interrupt on all the CPU to handle our RX ++ * queue interrupts ++ */ ++ for_each_online_cpu(cpu) ++ smp_call_function_single(cpu, mvneta_percpu_enable, ++ pp, true); ++ + + /* Register a CPU notifier to handle the case where our CPU + * might be taken offline. diff --git a/target/linux/mvebu/patches-4.4/034-net-mvneta-Add-naive-RSS-support.patch b/target/linux/mvebu/patches-4.4/034-net-mvneta-Add-naive-RSS-support.patch new file mode 100644 index 0000000000..629c44c8a1 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/034-net-mvneta-Add-naive-RSS-support.patch @@ -0,0 +1,191 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Wed, 9 Dec 2015 18:23:50 +0100 +Subject: [PATCH] net: mvneta: Add naive RSS support + +This patch adds the support for the RSS related ethtool +function. Currently it only uses one entry in the indirection table which +allows associating an mvneta interface to a given CPU. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Tested-by: Marcin Wojtas <mw@semihalf.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -261,6 +261,11 @@ + + #define MVNETA_TX_MTU_MAX 0x3ffff + ++/* The RSS lookup table actually has 256 entries but we do not use ++ * them yet ++ */ ++#define MVNETA_RSS_LU_TABLE_SIZE 1 ++ + /* TSO header size */ + #define TSO_HEADER_SIZE 128 + +@@ -382,6 +387,8 @@ struct mvneta_port { + unsigned int use_inband_status:1; + + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; ++ ++ u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; + }; + + /* The mvneta_tx_desc and mvneta_rx_desc structures describe the +@@ -1067,7 +1074,7 @@ static void mvneta_defaults_set(struct m + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + +- if (cpu == rxq_def) ++ if (cpu == pp->rxq_def) + txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); +@@ -2508,6 +2515,18 @@ static void mvneta_percpu_unmask_interru + MVNETA_MISCINTR_INTR_MASK); + } + ++static void mvneta_percpu_mask_interrupt(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ /* All the queue are masked, but actually only the ones ++ * maped to this CPU will be masked ++ */ ++ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); ++ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); ++ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++} ++ + static void mvneta_start_dev(struct mvneta_port *pp) + { + unsigned int cpu; +@@ -3231,6 +3250,106 @@ static int mvneta_ethtool_get_sset_count + return -EOPNOTSUPP; + } + ++static u32 mvneta_ethtool_get_rxfh_indir_size(struct net_device *dev) ++{ ++ return MVNETA_RSS_LU_TABLE_SIZE; ++} ++ ++static int mvneta_ethtool_get_rxnfc(struct net_device *dev, ++ struct ethtool_rxnfc *info, ++ u32 *rules __always_unused) ++{ ++ switch (info->cmd) { ++ case ETHTOOL_GRXRINGS: ++ info->data = rxq_number; ++ return 0; ++ case ETHTOOL_GRXFH: ++ return -EOPNOTSUPP; ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ ++static int mvneta_config_rss(struct mvneta_port *pp) ++{ ++ int cpu; ++ u32 val; ++ ++ netif_tx_stop_all_queues(pp->dev); ++ ++ for_each_online_cpu(cpu) ++ smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, ++ pp, true); ++ ++ /* We have to synchronise on the napi of each CPU */ ++ for_each_online_cpu(cpu) { ++ struct mvneta_pcpu_port *pcpu_port = ++ per_cpu_ptr(pp->ports, cpu); ++ ++ napi_synchronize(&pcpu_port->napi); ++ napi_disable(&pcpu_port->napi); ++ } ++ ++ pp->rxq_def = pp->indir[0]; ++ ++ /* Update unicast mapping */ ++ mvneta_set_rx_mode(pp->dev); ++ ++ /* Update val of portCfg register accordingly with all RxQueue types */ ++ val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); ++ mvreg_write(pp, MVNETA_PORT_CONFIG, val); ++ ++ /* Update the elected CPU matching the new rxq_def */ ++ mvneta_percpu_elect(pp); ++ ++ /* We have to synchronise on the napi of each CPU */ ++ for_each_online_cpu(cpu) { ++ struct mvneta_pcpu_port *pcpu_port = ++ per_cpu_ptr(pp->ports, cpu); ++ ++ napi_enable(&pcpu_port->napi); ++ } ++ ++ netif_tx_start_all_queues(pp->dev); ++ ++ return 0; ++} ++ ++static int mvneta_ethtool_set_rxfh(struct net_device *dev, const u32 *indir, ++ const u8 *key, const u8 hfunc) ++{ ++ struct mvneta_port *pp = netdev_priv(dev); ++ /* We require at least one supported parameter to be changed ++ * and no change in any of the unsupported parameters ++ */ ++ if (key || ++ (hfunc != ETH_RSS_HASH_NO_CHANGE && hfunc != ETH_RSS_HASH_TOP)) ++ return -EOPNOTSUPP; ++ ++ if (!indir) ++ return 0; ++ ++ memcpy(pp->indir, indir, MVNETA_RSS_LU_TABLE_SIZE); ++ ++ return mvneta_config_rss(pp); ++} ++ ++static int mvneta_ethtool_get_rxfh(struct net_device *dev, u32 *indir, u8 *key, ++ u8 *hfunc) ++{ ++ struct mvneta_port *pp = netdev_priv(dev); ++ ++ if (hfunc) ++ *hfunc = ETH_RSS_HASH_TOP; ++ ++ if (!indir) ++ return 0; ++ ++ memcpy(indir, pp->indir, MVNETA_RSS_LU_TABLE_SIZE); ++ ++ return 0; ++} ++ + static const struct net_device_ops mvneta_netdev_ops = { + .ndo_open = mvneta_open, + .ndo_stop = mvneta_stop, +@@ -3255,6 +3374,10 @@ const struct ethtool_ops mvneta_eth_tool + .get_strings = mvneta_ethtool_get_strings, + .get_ethtool_stats = mvneta_ethtool_get_stats, + .get_sset_count = mvneta_ethtool_get_sset_count, ++ .get_rxfh_indir_size = mvneta_ethtool_get_rxfh_indir_size, ++ .get_rxnfc = mvneta_ethtool_get_rxnfc, ++ .get_rxfh = mvneta_ethtool_get_rxfh, ++ .set_rxfh = mvneta_ethtool_set_rxfh, + }; + + /* Initialize hw */ +@@ -3446,6 +3569,8 @@ static int mvneta_probe(struct platform_ + + pp->rxq_def = rxq_def; + ++ pp->indir[0] = rxq_def; ++ + pp->clk = devm_clk_get(&pdev->dev, NULL); + if (IS_ERR(pp->clk)) { + err = PTR_ERR(pp->clk); diff --git a/target/linux/mvebu/patches-4.4/035-net-mvneta-Configure-XPS-support.patch b/target/linux/mvebu/patches-4.4/035-net-mvneta-Configure-XPS-support.patch new file mode 100644 index 0000000000..5b35b6442b --- /dev/null +++ b/target/linux/mvebu/patches-4.4/035-net-mvneta-Configure-XPS-support.patch @@ -0,0 +1,124 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Wed, 9 Dec 2015 18:23:51 +0100 +Subject: [PATCH] net: mvneta: Configure XPS support + +With this patch each CPU is associated with its own set of TX queues. + +It also setup the XPS with an initial configuration which set the +affinity matching the hardware configuration. + +Suggested-by: Arnd Bergmann <arnd@arndb.de> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -111,6 +111,7 @@ + #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff + #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 + #define MVNETA_CPU_RXQ_ACCESS(rxq) BIT(rxq) ++#define MVNETA_CPU_TXQ_ACCESS(txq) BIT(txq + 8) + #define MVNETA_RXQ_TIME_COAL_REG(q) (0x2580 + ((q) << 2)) + + /* Exception Interrupt Port/Queue Cause register +@@ -514,6 +515,9 @@ struct mvneta_tx_queue { + + /* DMA address of TSO headers */ + dma_addr_t tso_hdrs_phys; ++ ++ /* Affinity mask for CPUs*/ ++ cpumask_t affinity_mask; + }; + + struct mvneta_rx_queue { +@@ -1062,20 +1066,30 @@ static void mvneta_defaults_set(struct m + /* Enable MBUS Retry bit16 */ + mvreg_write(pp, MVNETA_MBUS_RETRY, 0x20); + +- /* Set CPU queue access map. CPUs are assigned to the RX +- * queues modulo their number and all the TX queues are +- * assigned to the CPU associated to the default RX queue. ++ /* Set CPU queue access map. CPUs are assigned to the RX and ++ * TX queues modulo their number. If there is only one TX ++ * queue then it is assigned to the CPU associated to the ++ * default RX queue. + */ + for_each_present_cpu(cpu) { + int rxq_map = 0, txq_map = 0; +- int rxq; ++ int rxq, txq; + + for (rxq = 0; rxq < rxq_number; rxq++) + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + +- if (cpu == pp->rxq_def) +- txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; ++ for (txq = 0; txq < txq_number; txq++) ++ if ((txq % max_cpu) == cpu) ++ txq_map |= MVNETA_CPU_TXQ_ACCESS(txq); ++ ++ /* With only one TX queue we configure a special case ++ * which will allow to get all the irq on a single ++ * CPU ++ */ ++ if (txq_number == 1) ++ txq_map = (cpu == pp->rxq_def) ? ++ MVNETA_CPU_TXQ_ACCESS(1) : 0; + + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + } +@@ -2362,6 +2376,8 @@ static void mvneta_rxq_deinit(struct mvn + static int mvneta_txq_init(struct mvneta_port *pp, + struct mvneta_tx_queue *txq) + { ++ int cpu; ++ + txq->size = pp->tx_ring_size; + + /* A queue must always have room for at least one skb. +@@ -2414,6 +2430,14 @@ static int mvneta_txq_init(struct mvneta + } + mvneta_tx_done_pkts_coal_set(pp, txq, txq->done_pkts_coal); + ++ /* Setup XPS mapping */ ++ if (txq_number > 1) ++ cpu = txq->id % num_present_cpus(); ++ else ++ cpu = pp->rxq_def % num_present_cpus(); ++ cpumask_set_cpu(cpu, &txq->affinity_mask); ++ netif_set_xps_queue(pp->dev, &txq->affinity_mask, txq->id); ++ + return 0; + } + +@@ -2836,13 +2860,23 @@ static void mvneta_percpu_elect(struct m + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + +- if (i == online_cpu_idx) { +- /* Map the default receive queue and transmit +- * queue to the elected CPU ++ if (i == online_cpu_idx) ++ /* Map the default receive queue queue to the ++ * elected CPU + */ + rxq_map |= MVNETA_CPU_RXQ_ACCESS(pp->rxq_def); +- txq_map = MVNETA_CPU_TXQ_ACCESS_ALL_MASK; +- } ++ ++ /* We update the TX queue map only if we have one ++ * queue. In this case we associate the TX queue to ++ * the CPU bound to the default RX queue ++ */ ++ if (txq_number == 1) ++ txq_map = (i == online_cpu_idx) ? ++ MVNETA_CPU_TXQ_ACCESS(1) : 0; ++ else ++ txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & ++ MVNETA_CPU_TXQ_ACCESS_ALL_MASK; ++ + mvreg_write(pp, MVNETA_CPU_MAP(cpu), rxq_map | txq_map); + + /* Update the interrupt mask on each CPU according the diff --git a/target/linux/mvebu/patches-4.4/036-net-mvneta-fix-trivial-cut-off-issue-in-mvneta_ethto.patch b/target/linux/mvebu/patches-4.4/036-net-mvneta-fix-trivial-cut-off-issue-in-mvneta_ethto.patch new file mode 100644 index 0000000000..2bd7a88032 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/036-net-mvneta-fix-trivial-cut-off-issue-in-mvneta_ethto.patch @@ -0,0 +1,46 @@ +From: Jisheng Zhang <jszhang@marvell.com> +Date: Wed, 20 Jan 2016 16:36:25 +0800 +Subject: [PATCH] net: mvneta: fix trivial cut-off issue in + mvneta_ethtool_update_stats + +When s->type is T_REG_64, the high 32bits are lost in val. This patch +fixes this trivial issue. + +Signed-off-by: Jisheng Zhang <jszhang@marvell.com> +Fixes: 9b0cdefa4cd5 ("net: mvneta: add ethtool statistics") +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -3242,26 +3242,25 @@ static void mvneta_ethtool_update_stats( + const struct mvneta_statistic *s; + void __iomem *base = pp->base; + u32 high, low, val; ++ u64 val64; + int i; + + for (i = 0, s = mvneta_statistics; + s < mvneta_statistics + ARRAY_SIZE(mvneta_statistics); + s++, i++) { +- val = 0; +- + switch (s->type) { + case T_REG_32: + val = readl_relaxed(base + s->offset); ++ pp->ethtool_stats[i] += val; + break; + case T_REG_64: + /* Docs say to read low 32-bit then high */ + low = readl_relaxed(base + s->offset); + high = readl_relaxed(base + s->offset + 4); +- val = (u64)high << 32 | low; ++ val64 = (u64)high << 32 | low; ++ pp->ethtool_stats[i] += val64; + break; + } +- +- pp->ethtool_stats[i] += val; + } + } + diff --git a/target/linux/mvebu/patches-4.4/037-net-mvneta-Fix-for_each_present_cpu-usage.patch b/target/linux/mvebu/patches-4.4/037-net-mvneta-Fix-for_each_present_cpu-usage.patch new file mode 100644 index 0000000000..9ded5b71fe --- /dev/null +++ b/target/linux/mvebu/patches-4.4/037-net-mvneta-Fix-for_each_present_cpu-usage.patch @@ -0,0 +1,55 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:23 +0100 +Subject: [PATCH] net: mvneta: Fix for_each_present_cpu usage + +This patch convert the for_each_present in on_each_cpu, instead of +applying on the present cpus it will be applied only on the online cpus. +This fix a bug reported on +http://thread.gmane.org/gmane.linux.ports.arm.kernel/468173. + +Using the macro on_each_cpu (instead of a for_each_* loop) also ensures +that all the calls will be done all at once. + +Fixes: f86428854480 ("net: mvneta: Statically assign queues to CPUs") +Reported-by: Stefan Roese <stefan.roese@gmail.com> +Suggested-by: Jisheng Zhang <jszhang@marvell.com> +Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2562,7 +2562,7 @@ static void mvneta_start_dev(struct mvne + mvneta_port_enable(pp); + + /* Enable polling on the port */ +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_enable(&port->napi); +@@ -2587,7 +2587,7 @@ static void mvneta_stop_dev(struct mvnet + + phy_stop(pp->phy_dev); + +- for_each_present_cpu(cpu) { ++ for_each_online_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + + napi_disable(&port->napi); +@@ -3055,13 +3055,11 @@ err_cleanup_rxqs: + static int mvneta_stop(struct net_device *dev) + { + struct mvneta_port *pp = netdev_priv(dev); +- int cpu; + + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + unregister_cpu_notifier(&pp->cpu_notifier); +- for_each_present_cpu(cpu) +- smp_call_function_single(cpu, mvneta_percpu_disable, pp, true); ++ on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + mvneta_cleanup_rxqs(pp); + mvneta_cleanup_txqs(pp); diff --git a/target/linux/mvebu/patches-4.4/038-net-mvneta-Fix-the-CPU-choice-in-mvneta_percpu_elect.patch b/target/linux/mvebu/patches-4.4/038-net-mvneta-Fix-the-CPU-choice-in-mvneta_percpu_elect.patch new file mode 100644 index 0000000000..4fc9ccc76a --- /dev/null +++ b/target/linux/mvebu/patches-4.4/038-net-mvneta-Fix-the-CPU-choice-in-mvneta_percpu_elect.patch @@ -0,0 +1,57 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:24 +0100 +Subject: [PATCH] net: mvneta: Fix the CPU choice in mvneta_percpu_elect + +When passing to the management of multiple RX queue, the +mvneta_percpu_elect function was broken. The use of the modulo can lead +to elect the wrong cpu. For example with rxq_def=2, if the CPU 2 goes +offline and then online, we ended with the third RX queue activated in +the same time on CPU 0 and CPU2, which lead to a kernel crash. + +With this fix, we don't try to get "the closer" CPU if the default CPU is +gone, now we just use CPU 0 which always be there. Thanks to this, the +code becomes more readable, easier to maintain and more predicable. + +Cc: stable@vger.kernel.org +Fixes: 2dcf75e2793c ("net: mvneta: Associate RX queues with each CPU") +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2847,9 +2847,14 @@ static void mvneta_percpu_disable(void * + + static void mvneta_percpu_elect(struct mvneta_port *pp) + { +- int online_cpu_idx, max_cpu, cpu, i = 0; ++ int elected_cpu = 0, max_cpu, cpu, i = 0; ++ ++ /* Use the cpu associated to the rxq when it is online, in all ++ * the other cases, use the cpu 0 which can't be offline. ++ */ ++ if (cpu_online(pp->rxq_def)) ++ elected_cpu = pp->rxq_def; + +- online_cpu_idx = pp->rxq_def % num_online_cpus(); + max_cpu = num_present_cpus(); + + for_each_online_cpu(cpu) { +@@ -2860,7 +2865,7 @@ static void mvneta_percpu_elect(struct m + if ((rxq % max_cpu) == cpu) + rxq_map |= MVNETA_CPU_RXQ_ACCESS(rxq); + +- if (i == online_cpu_idx) ++ if (cpu == elected_cpu) + /* Map the default receive queue queue to the + * elected CPU + */ +@@ -2871,7 +2876,7 @@ static void mvneta_percpu_elect(struct m + * the CPU bound to the default RX queue + */ + if (txq_number == 1) +- txq_map = (i == online_cpu_idx) ? ++ txq_map = (cpu == elected_cpu) ? + MVNETA_CPU_TXQ_ACCESS(1) : 0; + else + txq_map = mvreg_read(pp, MVNETA_CPU_MAP(cpu)) & diff --git a/target/linux/mvebu/patches-4.4/039-net-mvneta-Use-on_each_cpu-when-possible.patch b/target/linux/mvebu/patches-4.4/039-net-mvneta-Use-on_each_cpu-when-possible.patch new file mode 100644 index 0000000000..76257a2a71 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/039-net-mvneta-Use-on_each_cpu-when-possible.patch @@ -0,0 +1,68 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:25 +0100 +Subject: [PATCH] net: mvneta: Use on_each_cpu when possible + +Instead of using a for_each_* loop in which we just call the +smp_call_function_single macro, it is more simple to directly use the +on_each_cpu macro. Moreover, this macro ensures that the calls will be +done all at once. + +Suggested-by: Russell King <rmk+kernel@arm.linux.org.uk> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -2553,7 +2553,7 @@ static void mvneta_percpu_mask_interrupt + + static void mvneta_start_dev(struct mvneta_port *pp) + { +- unsigned int cpu; ++ int cpu; + + mvneta_max_rx_size_set(pp, pp->pkt_size); + mvneta_txq_max_tx_size_set(pp, pp->pkt_size); +@@ -2569,9 +2569,8 @@ static void mvneta_start_dev(struct mvne + } + + /* Unmask interrupts. It has to be done from each CPU */ +- for_each_online_cpu(cpu) +- smp_call_function_single(cpu, mvneta_percpu_unmask_interrupt, +- pp, true); ++ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); ++ + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | +@@ -2991,7 +2990,7 @@ static int mvneta_percpu_notifier(struct + static int mvneta_open(struct net_device *dev) + { + struct mvneta_port *pp = netdev_priv(dev); +- int ret, cpu; ++ int ret; + + pp->pkt_size = MVNETA_RX_PKT_SIZE(pp->dev->mtu); + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + +@@ -3024,9 +3023,7 @@ static int mvneta_open(struct net_device + /* Enable per-CPU interrupt on all the CPU to handle our RX + * queue interrupts + */ +- for_each_online_cpu(cpu) +- smp_call_function_single(cpu, mvneta_percpu_enable, +- pp, true); ++ on_each_cpu(mvneta_percpu_enable, pp, true); + + + /* Register a CPU notifier to handle the case where our CPU +@@ -3313,9 +3310,7 @@ static int mvneta_config_rss(struct mvn + + netif_tx_stop_all_queues(pp->dev); + +- for_each_online_cpu(cpu) +- smp_call_function_single(cpu, mvneta_percpu_mask_interrupt, +- pp, true); ++ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { diff --git a/target/linux/mvebu/patches-4.4/040-net-mvneta-Modify-the-queue-related-fields-from-each.patch b/target/linux/mvebu/patches-4.4/040-net-mvneta-Modify-the-queue-related-fields-from-each.patch new file mode 100644 index 0000000000..b025777df9 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/040-net-mvneta-Modify-the-queue-related-fields-from-each.patch @@ -0,0 +1,179 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:27 +0100 +Subject: [PATCH] net: mvneta: Modify the queue related fields from each cpu + +In the MVNETA_INTR_* registers, the queues related fields are per cpu, +according to the datasheet (comment in [] are added by me): +"In a multi-CPU system, bits of RX[or TX] queues for which the access by +the reading[or writing] CPU is disabled are read as 0, and cannot be +cleared[or written]." + +That means that each time we want to manipulate these bits we had to do +it on each cpu and not only on the current cpu. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -1036,6 +1036,43 @@ static void mvneta_set_autoneg(struct mv + } + } + ++static void mvneta_percpu_unmask_interrupt(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ /* All the queue are unmasked, but actually only the ones ++ * mapped to this CPU will be unmasked ++ */ ++ mvreg_write(pp, MVNETA_INTR_NEW_MASK, ++ MVNETA_RX_INTR_MASK_ALL | ++ MVNETA_TX_INTR_MASK_ALL | ++ MVNETA_MISCINTR_INTR_MASK); ++} ++ ++static void mvneta_percpu_mask_interrupt(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ /* All the queue are masked, but actually only the ones ++ * mapped to this CPU will be masked ++ */ ++ mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); ++ mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); ++ mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++} ++ ++static void mvneta_percpu_clear_intr_cause(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ /* All the queue are cleared, but actually only the ones ++ * mapped to this CPU will be cleared ++ */ ++ mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); ++ mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); ++ mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); ++} ++ + /* This method sets defaults to the NETA port: + * Clears interrupt Cause and Mask registers. + * Clears all MAC tables. +@@ -1053,14 +1090,10 @@ static void mvneta_defaults_set(struct m + int max_cpu = num_present_cpus(); + + /* Clear all Cause registers */ +- mvreg_write(pp, MVNETA_INTR_NEW_CAUSE, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); ++ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); + + /* Mask all interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_ENABLE, 0); + + /* Enable MBUS Retry bit16 */ +@@ -2526,31 +2559,6 @@ static int mvneta_setup_txqs(struct mvne + return 0; + } + +-static void mvneta_percpu_unmask_interrupt(void *arg) +-{ +- struct mvneta_port *pp = arg; +- +- /* All the queue are unmasked, but actually only the ones +- * maped to this CPU will be unmasked +- */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, +- MVNETA_RX_INTR_MASK_ALL | +- MVNETA_TX_INTR_MASK_ALL | +- MVNETA_MISCINTR_INTR_MASK); +-} +- +-static void mvneta_percpu_mask_interrupt(void *arg) +-{ +- struct mvneta_port *pp = arg; +- +- /* All the queue are masked, but actually only the ones +- * maped to this CPU will be masked +- */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); +-} +- + static void mvneta_start_dev(struct mvneta_port *pp) + { + int cpu; +@@ -2601,13 +2609,10 @@ static void mvneta_stop_dev(struct mvnet + mvneta_port_disable(pp); + + /* Clear all ethernet port interrupts */ +- mvreg_write(pp, MVNETA_INTR_MISC_CAUSE, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_CAUSE, 0); ++ on_each_cpu(mvneta_percpu_clear_intr_cause, pp, true); + + /* Mask all ethernet port interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + + mvneta_tx_reset(pp); + mvneta_rx_reset(pp); +@@ -2919,9 +2924,7 @@ static int mvneta_percpu_notifier(struct + } + + /* Mask all ethernet port interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + napi_enable(&port->napi); + + +@@ -2936,14 +2939,8 @@ static int mvneta_percpu_notifier(struct + */ + mvneta_percpu_elect(pp); + +- /* Unmask all ethernet port interrupts, as this +- * notifier is called for each CPU then the CPU to +- * Queue mapping is applied +- */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, +- MVNETA_RX_INTR_MASK(rxq_number) | +- MVNETA_TX_INTR_MASK(txq_number) | +- MVNETA_MISCINTR_INTR_MASK); ++ /* Unmask all ethernet port interrupts */ ++ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | +@@ -2954,9 +2951,7 @@ static int mvneta_percpu_notifier(struct + case CPU_DOWN_PREPARE_FROZEN: + netif_tx_stop_all_queues(pp->dev); + /* Mask all ethernet port interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_OLD_MASK, 0); +- mvreg_write(pp, MVNETA_INTR_MISC_MASK, 0); ++ on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); + + napi_synchronize(&port->napi); + napi_disable(&port->napi); +@@ -2972,10 +2967,7 @@ static int mvneta_percpu_notifier(struct + /* Check if a new CPU must be elected now this on is down */ + mvneta_percpu_elect(pp); + /* Unmask all ethernet port interrupts */ +- mvreg_write(pp, MVNETA_INTR_NEW_MASK, +- MVNETA_RX_INTR_MASK(rxq_number) | +- MVNETA_TX_INTR_MASK(txq_number) | +- MVNETA_MISCINTR_INTR_MASK); ++ on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, + MVNETA_CAUSE_PHY_STATUS_CHANGE | + MVNETA_CAUSE_LINK_CHANGE | diff --git a/target/linux/mvebu/patches-4.4/041-net-mvneta-The-mvneta_percpu_elect-function-should-b.patch b/target/linux/mvebu/patches-4.4/041-net-mvneta-The-mvneta_percpu_elect-function-should-b.patch new file mode 100644 index 0000000000..a77bef44e4 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/041-net-mvneta-The-mvneta_percpu_elect-function-should-b.patch @@ -0,0 +1,68 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:28 +0100 +Subject: [PATCH] net: mvneta: The mvneta_percpu_elect function should be + atomic + +Electing a CPU must be done in an atomic way: it should be done after or +before the removal/insertion of a CPU and this function is not reentrant. + +During the loop of mvneta_percpu_elect we associates the queues to the +CPUs, if there is a topology change during this loop, then the mapping +between the CPUs and the queues could be wrong. During this loop the +interrupt mask is also updating for each CPUs, It should not be changed +in the same time by other part of the driver. + +This patch adds spinlock to create the needed critical sections. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -370,6 +370,10 @@ struct mvneta_port { + struct net_device *dev; + struct notifier_block cpu_notifier; + int rxq_def; ++ /* Protect the access to the percpu interrupt registers, ++ * ensuring that the configuration remains coherent. ++ */ ++ spinlock_t lock; + + /* Core clock */ + struct clk *clk; +@@ -2853,6 +2857,12 @@ static void mvneta_percpu_elect(struct m + { + int elected_cpu = 0, max_cpu, cpu, i = 0; + ++ /* Electing a CPU must be done in an atomic way: it should be ++ * done after or before the removal/insertion of a CPU and ++ * this function is not reentrant. ++ */ ++ spin_lock(&pp->lock); ++ + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ +@@ -2896,6 +2906,7 @@ static void mvneta_percpu_elect(struct m + i++; + + } ++ spin_unlock(&pp->lock); + }; + + static int mvneta_percpu_notifier(struct notifier_block *nfb, +@@ -2950,8 +2961,13 @@ static int mvneta_percpu_notifier(struct + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: + netif_tx_stop_all_queues(pp->dev); ++ /* Thanks to this lock we are sure that any pending ++ * cpu election is done ++ */ ++ spin_lock(&pp->lock); + /* Mask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); ++ spin_unlock(&pp->lock); + + napi_synchronize(&port->napi); + napi_disable(&port->napi); diff --git a/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch b/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch new file mode 100644 index 0000000000..9936ebf320 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch @@ -0,0 +1,128 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:29 +0100 +Subject: [PATCH] net: mvneta: Fix race condition during stopping + +When stopping the port, the CPU notifier are still there whereas the +mvneta_stop_dev function calls mvneta_percpu_disable() on each CPUs. +It was possible to have a new CPU coming at this point which could be +racy. + +This patch adds a flag preventing executing the code notifier for a new +CPU when the port is stopping. It also uses the spinlock introduces +previously. To avoid the deadlock, the lock has been moved outside the +mvneta_percpu_elect function. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -374,6 +374,7 @@ struct mvneta_port { + * ensuring that the configuration remains coherent. + */ + spinlock_t lock; ++ bool is_stopped; + + /* Core clock */ + struct clk *clk; +@@ -2853,16 +2854,14 @@ static void mvneta_percpu_disable(void * + disable_percpu_irq(pp->dev->irq); + } + ++/* Electing a CPU must be done in an atomic way: it should be done ++ * after or before the removal/insertion of a CPU and this function is ++ * not reentrant. ++ */ + static void mvneta_percpu_elect(struct mvneta_port *pp) + { + int elected_cpu = 0, max_cpu, cpu, i = 0; + +- /* Electing a CPU must be done in an atomic way: it should be +- * done after or before the removal/insertion of a CPU and +- * this function is not reentrant. +- */ +- spin_lock(&pp->lock); +- + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ +@@ -2906,7 +2905,6 @@ static void mvneta_percpu_elect(struct m + i++; + + } +- spin_unlock(&pp->lock); + }; + + static int mvneta_percpu_notifier(struct notifier_block *nfb, +@@ -2920,6 +2918,14 @@ static int mvneta_percpu_notifier(struct + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: ++ spin_lock(&pp->lock); ++ /* Configuring the driver for a new CPU while the ++ * driver is stopping is racy, so just avoid it. ++ */ ++ if (pp->is_stopped) { ++ spin_unlock(&pp->lock); ++ break; ++ } + netif_tx_stop_all_queues(pp->dev); + + /* We have to synchronise on tha napi of each CPU +@@ -2957,6 +2963,7 @@ static int mvneta_percpu_notifier(struct + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); ++ spin_unlock(&pp->lock); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: +@@ -2981,7 +2988,9 @@ static int mvneta_percpu_notifier(struct + case CPU_DEAD: + case CPU_DEAD_FROZEN: + /* Check if a new CPU must be elected now this on is down */ ++ spin_lock(&pp->lock); + mvneta_percpu_elect(pp); ++ spin_unlock(&pp->lock); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, +@@ -3033,7 +3042,7 @@ static int mvneta_open(struct net_device + */ + on_each_cpu(mvneta_percpu_enable, pp, true); + +- ++ pp->is_stopped = false; + /* Register a CPU notifier to handle the case where our CPU + * might be taken offline. + */ +@@ -3066,9 +3075,18 @@ static int mvneta_stop(struct net_device + { + struct mvneta_port *pp = netdev_priv(dev); + ++ /* Inform that we are stopping so we don't want to setup the ++ * driver for new CPUs in the notifiers ++ */ ++ spin_lock(&pp->lock); ++ pp->is_stopped = true; + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + unregister_cpu_notifier(&pp->cpu_notifier); ++ /* Now that the notifier are unregistered, we can release le ++ * lock ++ */ ++ spin_unlock(&pp->lock); + on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + mvneta_cleanup_rxqs(pp); +@@ -3339,7 +3357,9 @@ static int mvneta_config_rss(struct mvn + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ ++ spin_lock(&pp->lock); + mvneta_percpu_elect(pp); ++ spin_unlock(&pp->lock); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { diff --git a/target/linux/mvebu/patches-4.4/043-net-mvneta-sort-the-headers-in-alphabetic-order.patch b/target/linux/mvebu/patches-4.4/043-net-mvneta-sort-the-headers-in-alphabetic-order.patch new file mode 100644 index 0000000000..502c2584d9 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/043-net-mvneta-sort-the-headers-in-alphabetic-order.patch @@ -0,0 +1,56 @@ +From: Jisheng Zhang <jszhang@marvell.com> +Date: Wed, 20 Jan 2016 19:27:22 +0800 +Subject: [PATCH] net: mvneta: sort the headers in alphabetic order + +Sorting the headers in alphabetic order will help to reduce the conflict +when adding new headers in the future. + +Signed-off-by: Jisheng Zhang <jszhang@marvell.com> +Acked-by: Thomas Petazzoni <thomas.petazzoni@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -11,28 +11,28 @@ + * warranty of any kind, whether express or implied. + */ + +-#include <linux/kernel.h> +-#include <linux/netdevice.h> ++#include <linux/clk.h> ++#include <linux/cpu.h> + #include <linux/etherdevice.h> +-#include <linux/platform_device.h> +-#include <linux/skbuff.h> ++#include <linux/if_vlan.h> + #include <linux/inetdevice.h> +-#include <linux/mbus.h> +-#include <linux/module.h> + #include <linux/interrupt.h> +-#include <linux/if_vlan.h> +-#include <net/ip.h> +-#include <net/ipv6.h> + #include <linux/io.h> +-#include <net/tso.h> ++#include <linux/kernel.h> ++#include <linux/mbus.h> ++#include <linux/module.h> ++#include <linux/netdevice.h> + #include <linux/of.h> ++#include <linux/of_address.h> + #include <linux/of_irq.h> + #include <linux/of_mdio.h> + #include <linux/of_net.h> +-#include <linux/of_address.h> + #include <linux/phy.h> +-#include <linux/clk.h> +-#include <linux/cpu.h> ++#include <linux/platform_device.h> ++#include <linux/skbuff.h> ++#include <net/ip.h> ++#include <net/ipv6.h> ++#include <net/tso.h> + + /* Registers */ + #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) diff --git a/target/linux/mvebu/patches-4.4/044-net-add-a-hardware-buffer-management-helper-API.patch b/target/linux/mvebu/patches-4.4/044-net-add-a-hardware-buffer-management-helper-API.patch new file mode 100644 index 0000000000..d4bc6a0088 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/044-net-add-a-hardware-buffer-management-helper-API.patch @@ -0,0 +1,159 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Mon, 14 Mar 2016 09:39:04 +0100 +Subject: [PATCH] net: add a hardware buffer management helper API + +This basic implementation allows to share code between driver using +hardware buffer management. As the code is hardware agnostic, there is +few helpers, most of the optimization brought by the an HW BM has to be +done at driver level. + +Tested-by: Sebastian Careba <nitroshift@yahoo.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + create mode 100644 include/net/hwbm.h + create mode 100644 net/core/hwbm.c + +--- /dev/null ++++ b/include/net/hwbm.h +@@ -0,0 +1,28 @@ ++#ifndef _HWBM_H ++#define _HWBM_H ++ ++struct hwbm_pool { ++ /* Capacity of the pool */ ++ int size; ++ /* Size of the buffers managed */ ++ int frag_size; ++ /* Number of buffers currently used by this pool */ ++ int buf_num; ++ /* constructor called during alocation */ ++ int (*construct)(struct hwbm_pool *bm_pool, void *buf); ++ /* protect acces to the buffer counter*/ ++ spinlock_t lock; ++ /* private data */ ++ void *priv; ++}; ++#ifdef CONFIG_HWBM ++void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf); ++int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp); ++int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp); ++#else ++void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) {} ++int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) { return 0; } ++int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) ++{ return 0; } ++#endif /* CONFIG_HWBM */ ++#endif /* _HWBM_H */ +--- a/net/Kconfig ++++ b/net/Kconfig +@@ -259,6 +259,9 @@ config XPS + depends on SMP + default y + ++config HWBM ++ bool ++ + config CGROUP_NET_PRIO + bool "Network priority cgroup" + depends on CGROUPS +--- a/net/core/Makefile ++++ b/net/core/Makefile +@@ -14,6 +14,7 @@ obj-y += dev.o ethtool.o dev_addr_ + obj-$(CONFIG_SOCK_DIAG) += sock_diag.o + obj-$(CONFIG_XFRM) += flow.o + obj-y += net-sysfs.o ++obj-$(CONFIG_HWBM) += hwbm.o + obj-$(CONFIG_PROC_FS) += net-procfs.o + obj-$(CONFIG_NET_PKTGEN) += pktgen.o + obj-$(CONFIG_NETPOLL) += netpoll.o +--- /dev/null ++++ b/net/core/hwbm.c +@@ -0,0 +1,87 @@ ++/* Support for hardware buffer manager. ++ * ++ * Copyright (C) 2016 Marvell ++ * ++ * Gregory CLEMENT <gregory.clement@free-electrons.com> ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++#include <linux/kernel.h> ++#include <linux/printk.h> ++#include <linux/skbuff.h> ++#include <net/hwbm.h> ++ ++void hwbm_buf_free(struct hwbm_pool *bm_pool, void *buf) ++{ ++ if (likely(bm_pool->frag_size <= PAGE_SIZE)) ++ skb_free_frag(buf); ++ else ++ kfree(buf); ++} ++EXPORT_SYMBOL_GPL(hwbm_buf_free); ++ ++/* Refill processing for HW buffer management */ ++int hwbm_pool_refill(struct hwbm_pool *bm_pool, gfp_t gfp) ++{ ++ int frag_size = bm_pool->frag_size; ++ void *buf; ++ ++ if (likely(frag_size <= PAGE_SIZE)) ++ buf = netdev_alloc_frag(frag_size); ++ else ++ buf = kmalloc(frag_size, gfp); ++ ++ if (!buf) ++ return -ENOMEM; ++ ++ if (bm_pool->construct) ++ if (bm_pool->construct(bm_pool, buf)) { ++ hwbm_buf_free(bm_pool, buf); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(hwbm_pool_refill); ++ ++int hwbm_pool_add(struct hwbm_pool *bm_pool, unsigned int buf_num, gfp_t gfp) ++{ ++ int err, i; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&bm_pool->lock, flags); ++ if (bm_pool->buf_num == bm_pool->size) { ++ pr_warn("pool already filled\n"); ++ return bm_pool->buf_num; ++ } ++ ++ if (buf_num + bm_pool->buf_num > bm_pool->size) { ++ pr_warn("cannot allocate %d buffers for pool\n", ++ buf_num); ++ return 0; ++ } ++ ++ if ((buf_num + bm_pool->buf_num) < bm_pool->buf_num) { ++ pr_warn("Adding %d buffers to the %d current buffers will overflow\n", ++ buf_num, bm_pool->buf_num); ++ return 0; ++ } ++ ++ for (i = 0; i < buf_num; i++) { ++ err = hwbm_pool_refill(bm_pool, gfp); ++ if (err < 0) ++ break; ++ } ++ ++ /* Update BM driver with number of buffers added to pool */ ++ bm_pool->buf_num += i; ++ ++ pr_debug("hwpm pool: %d of %d buffers added\n", i, buf_num); ++ spin_unlock_irqrestore(&bm_pool->lock, flags); ++ ++ return i; ++} ++EXPORT_SYMBOL_GPL(hwbm_pool_add); diff --git a/target/linux/mvebu/patches-4.4/045-net-mvneta-bm-add-support-for-hardware-buffer-manage.patch b/target/linux/mvebu/patches-4.4/045-net-mvneta-bm-add-support-for-hardware-buffer-manage.patch new file mode 100644 index 0000000000..d343b3983f --- /dev/null +++ b/target/linux/mvebu/patches-4.4/045-net-mvneta-bm-add-support-for-hardware-buffer-manage.patch @@ -0,0 +1,1684 @@ +From: Marcin Wojtas <mw@semihalf.com> +Date: Mon, 14 Mar 2016 09:39:03 +0100 +Subject: [PATCH] net: mvneta: bm: add support for hardware buffer management + +Buffer manager (BM) is a dedicated hardware unit that can be used by all +ethernet ports of Armada XP and 38x SoC's. It allows to offload CPU on RX +path by sparing DRAM access on refilling buffer pool, hardware-based +filling of descriptor ring data and better memory utilization due to HW +arbitration for using 'short' pools for small packets. + +Tests performed with A388 SoC working as a network bridge between two +packet generators showed increase of maximum processed 64B packets by +~20k (~555k packets with BM enabled vs ~535 packets without BM). Also +when pushing 1500B-packets with a line rate achieved, CPU load decreased +from around 25% without BM to 20% with BM. + +BM comprise up to 4 buffer pointers' (BP) rings kept in DRAM, which +are called external BP pools - BPPE. Allocating and releasing buffer +pointers (BP) to/from BPPE is performed indirectly by write/read access +to a dedicated internal SRAM, where internal BP pools (BPPI) are placed. +BM hardware controls status of BPPE automatically, as well as assigning +proper buffers to RX descriptors. For more details please refer to +Functional Specification of Armada XP or 38x SoC. + +In order to enable support for a separate hardware block, common for all +ports, a new driver has to be implemented ('mvneta_bm'). It provides +initialization sequence of address space, clocks, registers, SRAM, +empty pools' structures and also obtaining optional configuration +from DT (please refer to device tree binding documentation). mvneta_bm +exposes also a necessary API to mvneta driver, as well as a dedicated +structure with BM information (bm_priv), whose presence is used as a +flag notifying of BM usage by port. It has to be ensured that mvneta_bm +probe is executed prior to the ones in ports' driver. In case BM is not +used or its probe fails, mvneta falls back to use software buffer +management. + +A sequence executed in mvneta_probe function is modified in order to have +an access to needed resources before possible port's BM initialization is +done. According to port-pools mapping provided by DT appropriate registers +are configured and the buffer pools are filled. RX path is modified +accordingly. Becaues the hardware allows a wide variety of configuration +options, following assumptions are made: +* using BM mechanisms can be selectively disabled/enabled basing + on DT configuration among the ports +* 'long' pool's single buffer size is tied to port's MTU +* using 'long' pool by port is obligatory and it cannot be shared +* using 'short' pool for smaller packets is optional +* one 'short' pool can be shared among all ports + +This commit enables hardware buffer management operation cooperating with +existing mvneta driver. New device tree binding documentation is added and +the one of mvneta is updated accordingly. + +[gregory.clement@free-electrons.com: removed the suspend/resume part] + +Signed-off-by: Marcin Wojtas <mw@semihalf.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + create mode 100644 Documentation/devicetree/bindings/net/marvell-neta-bm.txt + create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.c + create mode 100644 drivers/net/ethernet/marvell/mvneta_bm.h + +--- a/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt ++++ b/Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt +@@ -13,15 +13,30 @@ Optional properties: + Value is presented in bytes. If not used, by default 1600B is set for + "marvell,armada-370-neta" and 9800B for others. + ++Optional properties (valid only for Armada XP/38x): ++ ++- buffer-manager: a phandle to a buffer manager node. Please refer to ++ Documentation/devicetree/bindings/net/marvell-neta-bm.txt ++- bm,pool-long: ID of a pool, that will accept all packets of a size ++ higher than 'short' pool's threshold (if set) and up to MTU value. ++ Obligatory, when the port is supposed to use hardware ++ buffer management. ++- bm,pool-short: ID of a pool, that will be used for accepting ++ packets of a size lower than given threshold. If not set, the port ++ will use a single 'long' pool for all packets, as defined above. ++ + Example: + +-ethernet@d0070000 { ++ethernet@70000 { + compatible = "marvell,armada-370-neta"; +- reg = <0xd0070000 0x2500>; ++ reg = <0x70000 0x2500>; + interrupts = <8>; + clocks = <&gate_clk 4>; + tx-csum-limit = <9800> + status = "okay"; + phy = <&phy0>; + phy-mode = "rgmii-id"; ++ buffer-manager = <&bm>; ++ bm,pool-long = <0>; ++ bm,pool-short = <1>; + }; +--- /dev/null ++++ b/Documentation/devicetree/bindings/net/marvell-neta-bm.txt +@@ -0,0 +1,49 @@ ++* Marvell Armada 380/XP Buffer Manager driver (BM) ++ ++Required properties: ++ ++- compatible: should be "marvell,armada-380-neta-bm". ++- reg: address and length of the register set for the device. ++- clocks: a pointer to the reference clock for this device. ++- internal-mem: a phandle to BM internal SRAM definition. ++ ++Optional properties (port): ++ ++- pool<0 : 3>,capacity: size of external buffer pointers' ring maintained ++ in DRAM. Can be set for each pool (id 0 : 3) separately. The value has ++ to be chosen between 128 and 16352 and it also has to be aligned to 32. ++ Otherwise the driver would adjust a given number or choose default if ++ not set. ++- pool<0 : 3>,pkt-size: maximum size of a packet accepted by a given buffer ++ pointers' pool (id 0 : 3). It will be taken into consideration only when pool ++ type is 'short'. For 'long' ones it would be overridden by port's MTU. ++ If not set a driver will choose a default value. ++ ++In order to see how to hook the BM to a given ethernet port, please ++refer to Documentation/devicetree/bindings/net/marvell-armada-370-neta.txt. ++ ++Example: ++ ++- main node: ++ ++bm: bm@c8000 { ++ compatible = "marvell,armada-380-neta-bm"; ++ reg = <0xc8000 0xac>; ++ clocks = <&gateclk 13>; ++ internal-mem = <&bm_bppi>; ++ status = "okay"; ++ pool2,capacity = <4096>; ++ pool1,pkt-size = <512>; ++}; ++ ++- internal SRAM node: ++ ++bm_bppi: bm-bppi { ++ compatible = "mmio-sram"; ++ reg = <MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ ranges = <0 MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&gateclk 13>; ++ status = "okay"; ++}; +--- a/drivers/net/ethernet/marvell/Kconfig ++++ b/drivers/net/ethernet/marvell/Kconfig +@@ -40,6 +40,19 @@ config MVMDIO + + This driver is used by the MV643XX_ETH and MVNETA drivers. + ++config MVNETA_BM ++ tristate "Marvell Armada 38x/XP network interface BM support" ++ depends on MVNETA ++ ---help--- ++ This driver supports auxiliary block of the network ++ interface units in the Marvell ARMADA XP and ARMADA 38x SoC ++ family, which is called buffer manager. ++ ++ This driver, when enabled, strictly cooperates with mvneta ++ driver and is common for all network ports of the devices, ++ even for Armada 370 SoC, which doesn't support hardware ++ buffer management. ++ + config MVNETA + tristate "Marvell Armada 370/38x/XP network interface support" + depends on PLAT_ORION +--- a/drivers/net/ethernet/marvell/Makefile ++++ b/drivers/net/ethernet/marvell/Makefile +@@ -4,6 +4,7 @@ + + obj-$(CONFIG_MVMDIO) += mvmdio.o + obj-$(CONFIG_MV643XX_ETH) += mv643xx_eth.o ++obj-$(CONFIG_MVNETA_BM) += mvneta_bm.o + obj-$(CONFIG_MVNETA) += mvneta.o + obj-$(CONFIG_MVPP2) += mvpp2.o + obj-$(CONFIG_PXA168_ETH) += pxa168_eth.o +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -30,6 +30,7 @@ + #include <linux/phy.h> + #include <linux/platform_device.h> + #include <linux/skbuff.h> ++#include "mvneta_bm.h" + #include <net/ip.h> + #include <net/ipv6.h> + #include <net/tso.h> +@@ -37,6 +38,10 @@ + /* Registers */ + #define MVNETA_RXQ_CONFIG_REG(q) (0x1400 + ((q) << 2)) + #define MVNETA_RXQ_HW_BUF_ALLOC BIT(0) ++#define MVNETA_RXQ_SHORT_POOL_ID_SHIFT 4 ++#define MVNETA_RXQ_SHORT_POOL_ID_MASK 0x30 ++#define MVNETA_RXQ_LONG_POOL_ID_SHIFT 6 ++#define MVNETA_RXQ_LONG_POOL_ID_MASK 0xc0 + #define MVNETA_RXQ_PKT_OFFSET_ALL_MASK (0xf << 8) + #define MVNETA_RXQ_PKT_OFFSET_MASK(offs) ((offs) << 8) + #define MVNETA_RXQ_THRESHOLD_REG(q) (0x14c0 + ((q) << 2)) +@@ -50,6 +55,9 @@ + #define MVNETA_RXQ_STATUS_UPDATE_REG(q) (0x1500 + ((q) << 2)) + #define MVNETA_RXQ_ADD_NON_OCCUPIED_SHIFT 16 + #define MVNETA_RXQ_ADD_NON_OCCUPIED_MAX 255 ++#define MVNETA_PORT_POOL_BUFFER_SZ_REG(pool) (0x1700 + ((pool) << 2)) ++#define MVNETA_PORT_POOL_BUFFER_SZ_SHIFT 3 ++#define MVNETA_PORT_POOL_BUFFER_SZ_MASK 0xfff8 + #define MVNETA_PORT_RX_RESET 0x1cc0 + #define MVNETA_PORT_RX_DMA_RESET BIT(0) + #define MVNETA_PHY_ADDR 0x2000 +@@ -107,6 +115,7 @@ + #define MVNETA_GMAC_CLOCK_DIVIDER 0x24f4 + #define MVNETA_GMAC_1MS_CLOCK_ENABLE BIT(31) + #define MVNETA_ACC_MODE 0x2500 ++#define MVNETA_BM_ADDRESS 0x2504 + #define MVNETA_CPU_MAP(cpu) (0x2540 + ((cpu) << 2)) + #define MVNETA_CPU_RXQ_ACCESS_ALL_MASK 0x000000ff + #define MVNETA_CPU_TXQ_ACCESS_ALL_MASK 0x0000ff00 +@@ -253,7 +262,10 @@ + #define MVNETA_CPU_D_CACHE_LINE_SIZE 32 + #define MVNETA_TX_CSUM_DEF_SIZE 1600 + #define MVNETA_TX_CSUM_MAX_SIZE 9800 +-#define MVNETA_ACC_MODE_EXT 1 ++#define MVNETA_ACC_MODE_EXT1 1 ++#define MVNETA_ACC_MODE_EXT2 2 ++ ++#define MVNETA_MAX_DECODE_WIN 6 + + /* Timeout constants */ + #define MVNETA_TX_DISABLE_TIMEOUT_MSEC 1000 +@@ -293,7 +305,8 @@ + ((addr >= txq->tso_hdrs_phys) && \ + (addr < txq->tso_hdrs_phys + txq->size * TSO_HEADER_SIZE)) + +-#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) ++#define MVNETA_RX_GET_BM_POOL_ID(rxd) \ ++ (((rxd)->status & MVNETA_RXD_BM_POOL_MASK) >> MVNETA_RXD_BM_POOL_SHIFT) + + struct mvneta_statistic { + unsigned short offset; +@@ -359,6 +372,7 @@ struct mvneta_pcpu_port { + }; + + struct mvneta_port { ++ u8 id; + struct mvneta_pcpu_port __percpu *ports; + struct mvneta_pcpu_stats __percpu *stats; + +@@ -392,6 +406,11 @@ struct mvneta_port { + unsigned int tx_csum_limit; + unsigned int use_inband_status:1; + ++ struct mvneta_bm *bm_priv; ++ struct mvneta_bm_pool *pool_long; ++ struct mvneta_bm_pool *pool_short; ++ int bm_win_id; ++ + u64 ethtool_stats[ARRAY_SIZE(mvneta_statistics)]; + + u32 indir[MVNETA_RSS_LU_TABLE_SIZE]; +@@ -417,6 +436,8 @@ struct mvneta_port { + #define MVNETA_TX_L4_CSUM_NOT BIT(31) + + #define MVNETA_RXD_ERR_CRC 0x0 ++#define MVNETA_RXD_BM_POOL_SHIFT 13 ++#define MVNETA_RXD_BM_POOL_MASK (BIT(13) | BIT(14)) + #define MVNETA_RXD_ERR_SUMMARY BIT(16) + #define MVNETA_RXD_ERR_OVERRUN BIT(17) + #define MVNETA_RXD_ERR_LEN BIT(18) +@@ -561,6 +582,9 @@ static int rxq_def; + + static int rx_copybreak __read_mostly = 256; + ++/* HW BM need that each port be identify by a unique ID */ ++static int global_port_id; ++ + #define MVNETA_DRIVER_NAME "mvneta" + #define MVNETA_DRIVER_VERSION "1.0" + +@@ -827,6 +851,214 @@ static void mvneta_rxq_bm_disable(struct + mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); + } + ++/* Enable buffer management (BM) */ ++static void mvneta_rxq_bm_enable(struct mvneta_port *pp, ++ struct mvneta_rx_queue *rxq) ++{ ++ u32 val; ++ ++ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); ++ val |= MVNETA_RXQ_HW_BUF_ALLOC; ++ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); ++} ++ ++/* Notify HW about port's assignment of pool for bigger packets */ ++static void mvneta_rxq_long_pool_set(struct mvneta_port *pp, ++ struct mvneta_rx_queue *rxq) ++{ ++ u32 val; ++ ++ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); ++ val &= ~MVNETA_RXQ_LONG_POOL_ID_MASK; ++ val |= (pp->pool_long->id << MVNETA_RXQ_LONG_POOL_ID_SHIFT); ++ ++ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); ++} ++ ++/* Notify HW about port's assignment of pool for smaller packets */ ++static void mvneta_rxq_short_pool_set(struct mvneta_port *pp, ++ struct mvneta_rx_queue *rxq) ++{ ++ u32 val; ++ ++ val = mvreg_read(pp, MVNETA_RXQ_CONFIG_REG(rxq->id)); ++ val &= ~MVNETA_RXQ_SHORT_POOL_ID_MASK; ++ val |= (pp->pool_short->id << MVNETA_RXQ_SHORT_POOL_ID_SHIFT); ++ ++ mvreg_write(pp, MVNETA_RXQ_CONFIG_REG(rxq->id), val); ++} ++ ++/* Set port's receive buffer size for assigned BM pool */ ++static inline void mvneta_bm_pool_bufsize_set(struct mvneta_port *pp, ++ int buf_size, ++ u8 pool_id) ++{ ++ u32 val; ++ ++ if (!IS_ALIGNED(buf_size, 8)) { ++ dev_warn(pp->dev->dev.parent, ++ "illegal buf_size value %d, round to %d\n", ++ buf_size, ALIGN(buf_size, 8)); ++ buf_size = ALIGN(buf_size, 8); ++ } ++ ++ val = mvreg_read(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id)); ++ val |= buf_size & MVNETA_PORT_POOL_BUFFER_SZ_MASK; ++ mvreg_write(pp, MVNETA_PORT_POOL_BUFFER_SZ_REG(pool_id), val); ++} ++ ++/* Configure MBUS window in order to enable access BM internal SRAM */ ++static int mvneta_mbus_io_win_set(struct mvneta_port *pp, u32 base, u32 wsize, ++ u8 target, u8 attr) ++{ ++ u32 win_enable, win_protect; ++ int i; ++ ++ win_enable = mvreg_read(pp, MVNETA_BASE_ADDR_ENABLE); ++ ++ if (pp->bm_win_id < 0) { ++ /* Find first not occupied window */ ++ for (i = 0; i < MVNETA_MAX_DECODE_WIN; i++) { ++ if (win_enable & (1 << i)) { ++ pp->bm_win_id = i; ++ break; ++ } ++ } ++ if (i == MVNETA_MAX_DECODE_WIN) ++ return -ENOMEM; ++ } else { ++ i = pp->bm_win_id; ++ } ++ ++ mvreg_write(pp, MVNETA_WIN_BASE(i), 0); ++ mvreg_write(pp, MVNETA_WIN_SIZE(i), 0); ++ ++ if (i < 4) ++ mvreg_write(pp, MVNETA_WIN_REMAP(i), 0); ++ ++ mvreg_write(pp, MVNETA_WIN_BASE(i), (base & 0xffff0000) | ++ (attr << 8) | target); ++ ++ mvreg_write(pp, MVNETA_WIN_SIZE(i), (wsize - 1) & 0xffff0000); ++ ++ win_protect = mvreg_read(pp, MVNETA_ACCESS_PROTECT_ENABLE); ++ win_protect |= 3 << (2 * i); ++ mvreg_write(pp, MVNETA_ACCESS_PROTECT_ENABLE, win_protect); ++ ++ win_enable &= ~(1 << i); ++ mvreg_write(pp, MVNETA_BASE_ADDR_ENABLE, win_enable); ++ ++ return 0; ++} ++ ++/* Assign and initialize pools for port. In case of fail ++ * buffer manager will remain disabled for current port. ++ */ ++static int mvneta_bm_port_init(struct platform_device *pdev, ++ struct mvneta_port *pp) ++{ ++ struct device_node *dn = pdev->dev.of_node; ++ u32 long_pool_id, short_pool_id, wsize; ++ u8 target, attr; ++ int err; ++ ++ /* Get BM window information */ ++ err = mvebu_mbus_get_io_win_info(pp->bm_priv->bppi_phys_addr, &wsize, ++ &target, &attr); ++ if (err < 0) ++ return err; ++ ++ pp->bm_win_id = -1; ++ ++ /* Open NETA -> BM window */ ++ err = mvneta_mbus_io_win_set(pp, pp->bm_priv->bppi_phys_addr, wsize, ++ target, attr); ++ if (err < 0) { ++ netdev_info(pp->dev, "fail to configure mbus window to BM\n"); ++ return err; ++ } ++ ++ if (of_property_read_u32(dn, "bm,pool-long", &long_pool_id)) { ++ netdev_info(pp->dev, "missing long pool id\n"); ++ return -EINVAL; ++ } ++ ++ /* Create port's long pool depending on mtu */ ++ pp->pool_long = mvneta_bm_pool_use(pp->bm_priv, long_pool_id, ++ MVNETA_BM_LONG, pp->id, ++ MVNETA_RX_PKT_SIZE(pp->dev->mtu)); ++ if (!pp->pool_long) { ++ netdev_info(pp->dev, "fail to obtain long pool for port\n"); ++ return -ENOMEM; ++ } ++ ++ pp->pool_long->port_map |= 1 << pp->id; ++ ++ mvneta_bm_pool_bufsize_set(pp, pp->pool_long->buf_size, ++ pp->pool_long->id); ++ ++ /* If short pool id is not defined, assume using single pool */ ++ if (of_property_read_u32(dn, "bm,pool-short", &short_pool_id)) ++ short_pool_id = long_pool_id; ++ ++ /* Create port's short pool */ ++ pp->pool_short = mvneta_bm_pool_use(pp->bm_priv, short_pool_id, ++ MVNETA_BM_SHORT, pp->id, ++ MVNETA_BM_SHORT_PKT_SIZE); ++ if (!pp->pool_short) { ++ netdev_info(pp->dev, "fail to obtain short pool for port\n"); ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); ++ return -ENOMEM; ++ } ++ ++ if (short_pool_id != long_pool_id) { ++ pp->pool_short->port_map |= 1 << pp->id; ++ mvneta_bm_pool_bufsize_set(pp, pp->pool_short->buf_size, ++ pp->pool_short->id); ++ } ++ ++ return 0; ++} ++ ++/* Update settings of a pool for bigger packets */ ++static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) ++{ ++ struct mvneta_bm_pool *bm_pool = pp->pool_long; ++ int num; ++ ++ /* Release all buffers from long pool */ ++ mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); ++ if (bm_pool->buf_num) { ++ WARN(1, "cannot free all buffers in pool %d\n", ++ bm_pool->id); ++ goto bm_mtu_err; ++ } ++ ++ bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); ++ bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); ++ bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + ++ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); ++ ++ /* Fill entire long pool */ ++ num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size); ++ if (num != bm_pool->size) { ++ WARN(1, "pool %d: %d of %d allocated\n", ++ bm_pool->id, num, bm_pool->size); ++ goto bm_mtu_err; ++ } ++ mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); ++ ++ return; ++ ++bm_mtu_err: ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); ++ ++ pp->bm_priv = NULL; ++ mvreg_write(pp, MVNETA_ACC_MODE, MVNETA_ACC_MODE_EXT1); ++ netdev_info(pp->dev, "fail to update MTU, fall back to software BM\n"); ++} ++ + /* Start the Ethernet port RX and TX activity */ + static void mvneta_port_up(struct mvneta_port *pp) + { +@@ -1147,9 +1379,17 @@ static void mvneta_defaults_set(struct m + mvreg_write(pp, MVNETA_PORT_RX_RESET, 0); + + /* Set Port Acceleration Mode */ +- val = MVNETA_ACC_MODE_EXT; ++ if (pp->bm_priv) ++ /* HW buffer management + legacy parser */ ++ val = MVNETA_ACC_MODE_EXT2; ++ else ++ /* SW buffer management + legacy parser */ ++ val = MVNETA_ACC_MODE_EXT1; + mvreg_write(pp, MVNETA_ACC_MODE, val); + ++ if (pp->bm_priv) ++ mvreg_write(pp, MVNETA_BM_ADDRESS, pp->bm_priv->bppi_phys_addr); ++ + /* Update val of portCfg register accordingly with all RxQueue types */ + val = MVNETA_PORT_CONFIG_DEFL_VALUE(pp->rxq_def); + mvreg_write(pp, MVNETA_PORT_CONFIG, val); +@@ -1516,23 +1756,25 @@ static void mvneta_txq_done(struct mvnet + } + } + +-static void *mvneta_frag_alloc(const struct mvneta_port *pp) ++void *mvneta_frag_alloc(unsigned int frag_size) + { +- if (likely(pp->frag_size <= PAGE_SIZE)) +- return netdev_alloc_frag(pp->frag_size); ++ if (likely(frag_size <= PAGE_SIZE)) ++ return netdev_alloc_frag(frag_size); + else +- return kmalloc(pp->frag_size, GFP_ATOMIC); ++ return kmalloc(frag_size, GFP_ATOMIC); + } ++EXPORT_SYMBOL_GPL(mvneta_frag_alloc); + +-static void mvneta_frag_free(const struct mvneta_port *pp, void *data) ++void mvneta_frag_free(unsigned int frag_size, void *data) + { +- if (likely(pp->frag_size <= PAGE_SIZE)) ++ if (likely(frag_size <= PAGE_SIZE)) + skb_free_frag(data); + else + kfree(data); + } ++EXPORT_SYMBOL_GPL(mvneta_frag_free); + +-/* Refill processing */ ++/* Refill processing for SW buffer management */ + static int mvneta_rx_refill(struct mvneta_port *pp, + struct mvneta_rx_desc *rx_desc) + +@@ -1540,7 +1782,7 @@ static int mvneta_rx_refill(struct mvnet + dma_addr_t phys_addr; + void *data; + +- data = mvneta_frag_alloc(pp); ++ data = mvneta_frag_alloc(pp->frag_size); + if (!data) + return -ENOMEM; + +@@ -1548,7 +1790,7 @@ static int mvneta_rx_refill(struct mvnet + MVNETA_RX_BUF_SIZE(pp->pkt_size), + DMA_FROM_DEVICE); + if (unlikely(dma_mapping_error(pp->dev->dev.parent, phys_addr))) { +- mvneta_frag_free(pp, data); ++ mvneta_frag_free(pp->frag_size, data); + return -ENOMEM; + } + +@@ -1594,22 +1836,156 @@ static void mvneta_rxq_drop_pkts(struct + int rx_done, i; + + rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); ++ if (rx_done) ++ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); ++ ++ if (pp->bm_priv) { ++ for (i = 0; i < rx_done; i++) { ++ struct mvneta_rx_desc *rx_desc = ++ mvneta_rxq_next_desc_get(rxq); ++ u8 pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); ++ struct mvneta_bm_pool *bm_pool; ++ ++ bm_pool = &pp->bm_priv->bm_pools[pool_id]; ++ /* Return dropped buffer to the pool */ ++ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, ++ rx_desc->buf_phys_addr); ++ } ++ return; ++ } ++ + for (i = 0; i < rxq->size; i++) { + struct mvneta_rx_desc *rx_desc = rxq->descs + i; + void *data = (void *)rx_desc->buf_cookie; + + dma_unmap_single(pp->dev->dev.parent, rx_desc->buf_phys_addr, + MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); +- mvneta_frag_free(pp, data); ++ mvneta_frag_free(pp->frag_size, data); + } ++} + +- if (rx_done) +- mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); ++/* Main rx processing when using software buffer management */ ++static int mvneta_rx_swbm(struct mvneta_port *pp, int rx_todo, ++ struct mvneta_rx_queue *rxq) ++{ ++ struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); ++ struct net_device *dev = pp->dev; ++ int rx_done; ++ u32 rcvd_pkts = 0; ++ u32 rcvd_bytes = 0; ++ ++ /* Get number of received packets */ ++ rx_done = mvneta_rxq_busy_desc_num_get(pp, rxq); ++ ++ if (rx_todo > rx_done) ++ rx_todo = rx_done; ++ ++ rx_done = 0; ++ ++ /* Fairness NAPI loop */ ++ while (rx_done < rx_todo) { ++ struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); ++ struct sk_buff *skb; ++ unsigned char *data; ++ dma_addr_t phys_addr; ++ u32 rx_status, frag_size; ++ int rx_bytes, err; ++ ++ rx_done++; ++ rx_status = rx_desc->status; ++ rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); ++ data = (unsigned char *)rx_desc->buf_cookie; ++ phys_addr = rx_desc->buf_phys_addr; ++ ++ if (!mvneta_rxq_desc_is_first_last(rx_status) || ++ (rx_status & MVNETA_RXD_ERR_SUMMARY)) { ++err_drop_frame: ++ dev->stats.rx_errors++; ++ mvneta_rx_error(pp, rx_desc); ++ /* leave the descriptor untouched */ ++ continue; ++ } ++ ++ if (rx_bytes <= rx_copybreak) { ++ /* better copy a small frame and not unmap the DMA region */ ++ skb = netdev_alloc_skb_ip_align(dev, rx_bytes); ++ if (unlikely(!skb)) ++ goto err_drop_frame; ++ ++ dma_sync_single_range_for_cpu(dev->dev.parent, ++ rx_desc->buf_phys_addr, ++ MVNETA_MH_SIZE + NET_SKB_PAD, ++ rx_bytes, ++ DMA_FROM_DEVICE); ++ memcpy(skb_put(skb, rx_bytes), ++ data + MVNETA_MH_SIZE + NET_SKB_PAD, ++ rx_bytes); ++ ++ skb->protocol = eth_type_trans(skb, dev); ++ mvneta_rx_csum(pp, rx_status, skb); ++ napi_gro_receive(&port->napi, skb); ++ ++ rcvd_pkts++; ++ rcvd_bytes += rx_bytes; ++ ++ /* leave the descriptor and buffer untouched */ ++ continue; ++ } ++ ++ /* Refill processing */ ++ err = mvneta_rx_refill(pp, rx_desc); ++ if (err) { ++ netdev_err(dev, "Linux processing - Can't refill\n"); ++ rxq->missed++; ++ goto err_drop_frame; ++ } ++ ++ frag_size = pp->frag_size; ++ ++ skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); ++ ++ /* After refill old buffer has to be unmapped regardless ++ * the skb is successfully built or not. ++ */ ++ dma_unmap_single(dev->dev.parent, phys_addr, ++ MVNETA_RX_BUF_SIZE(pp->pkt_size), ++ DMA_FROM_DEVICE); ++ ++ if (!skb) ++ goto err_drop_frame; ++ ++ rcvd_pkts++; ++ rcvd_bytes += rx_bytes; ++ ++ /* Linux processing */ ++ skb_reserve(skb, MVNETA_MH_SIZE + NET_SKB_PAD); ++ skb_put(skb, rx_bytes); ++ ++ skb->protocol = eth_type_trans(skb, dev); ++ ++ mvneta_rx_csum(pp, rx_status, skb); ++ ++ napi_gro_receive(&port->napi, skb); ++ } ++ ++ if (rcvd_pkts) { ++ struct mvneta_pcpu_stats *stats = this_cpu_ptr(pp->stats); ++ ++ u64_stats_update_begin(&stats->syncp); ++ stats->rx_packets += rcvd_pkts; ++ stats->rx_bytes += rcvd_bytes; ++ u64_stats_update_end(&stats->syncp); ++ } ++ ++ /* Update rxq management counters */ ++ mvneta_rxq_desc_num_update(pp, rxq, rx_done, rx_done); ++ ++ return rx_done; + } + +-/* Main rx processing */ +-static int mvneta_rx(struct mvneta_port *pp, int rx_todo, +- struct mvneta_rx_queue *rxq) ++/* Main rx processing when using hardware buffer management */ ++static int mvneta_rx_hwbm(struct mvneta_port *pp, int rx_todo, ++ struct mvneta_rx_queue *rxq) + { + struct mvneta_pcpu_port *port = this_cpu_ptr(pp->ports); + struct net_device *dev = pp->dev; +@@ -1628,21 +2004,29 @@ static int mvneta_rx(struct mvneta_port + /* Fairness NAPI loop */ + while (rx_done < rx_todo) { + struct mvneta_rx_desc *rx_desc = mvneta_rxq_next_desc_get(rxq); ++ struct mvneta_bm_pool *bm_pool = NULL; + struct sk_buff *skb; + unsigned char *data; + dma_addr_t phys_addr; +- u32 rx_status; ++ u32 rx_status, frag_size; + int rx_bytes, err; ++ u8 pool_id; + + rx_done++; + rx_status = rx_desc->status; + rx_bytes = rx_desc->data_size - (ETH_FCS_LEN + MVNETA_MH_SIZE); + data = (unsigned char *)rx_desc->buf_cookie; + phys_addr = rx_desc->buf_phys_addr; ++ pool_id = MVNETA_RX_GET_BM_POOL_ID(rx_desc); ++ bm_pool = &pp->bm_priv->bm_pools[pool_id]; + + if (!mvneta_rxq_desc_is_first_last(rx_status) || + (rx_status & MVNETA_RXD_ERR_SUMMARY)) { +- err_drop_frame: ++err_drop_frame_ret_pool: ++ /* Return the buffer to the pool */ ++ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, ++ rx_desc->buf_phys_addr); ++err_drop_frame: + dev->stats.rx_errors++; + mvneta_rx_error(pp, rx_desc); + /* leave the descriptor untouched */ +@@ -1653,7 +2037,7 @@ static int mvneta_rx(struct mvneta_port + /* better copy a small frame and not unmap the DMA region */ + skb = netdev_alloc_skb_ip_align(dev, rx_bytes); + if (unlikely(!skb)) +- goto err_drop_frame; ++ goto err_drop_frame_ret_pool; + + dma_sync_single_range_for_cpu(dev->dev.parent, + rx_desc->buf_phys_addr, +@@ -1671,26 +2055,31 @@ static int mvneta_rx(struct mvneta_port + rcvd_pkts++; + rcvd_bytes += rx_bytes; + ++ /* Return the buffer to the pool */ ++ mvneta_bm_pool_put_bp(pp->bm_priv, bm_pool, ++ rx_desc->buf_phys_addr); ++ + /* leave the descriptor and buffer untouched */ + continue; + } + + /* Refill processing */ +- err = mvneta_rx_refill(pp, rx_desc); ++ err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; +- goto err_drop_frame; ++ goto err_drop_frame_ret_pool; + } + +- skb = build_skb(data, pp->frag_size > PAGE_SIZE ? 0 : pp->frag_size); ++ frag_size = bm_pool->frag_size; ++ ++ skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); + + /* After refill old buffer has to be unmapped regardless + * the skb is successfully built or not. + */ +- dma_unmap_single(dev->dev.parent, phys_addr, +- MVNETA_RX_BUF_SIZE(pp->pkt_size), DMA_FROM_DEVICE); +- ++ dma_unmap_single(&pp->bm_priv->pdev->dev, phys_addr, ++ bm_pool->buf_size, DMA_FROM_DEVICE); + if (!skb) + goto err_drop_frame; + +@@ -2295,7 +2684,10 @@ static int mvneta_poll(struct napi_struc + + if (rx_queue) { + rx_queue = rx_queue - 1; +- rx_done = mvneta_rx(pp, budget, &pp->rxqs[rx_queue]); ++ if (pp->bm_priv) ++ rx_done = mvneta_rx_hwbm(pp, budget, &pp->rxqs[rx_queue]); ++ else ++ rx_done = mvneta_rx_swbm(pp, budget, &pp->rxqs[rx_queue]); + } + + budget -= rx_done; +@@ -2384,9 +2776,17 @@ static int mvneta_rxq_init(struct mvneta + mvneta_rx_pkts_coal_set(pp, rxq, rxq->pkts_coal); + mvneta_rx_time_coal_set(pp, rxq, rxq->time_coal); + +- /* Fill RXQ with buffers from RX pool */ +- mvneta_rxq_buf_size_set(pp, rxq, MVNETA_RX_BUF_SIZE(pp->pkt_size)); +- mvneta_rxq_bm_disable(pp, rxq); ++ if (!pp->bm_priv) { ++ /* Fill RXQ with buffers from RX pool */ ++ mvneta_rxq_buf_size_set(pp, rxq, ++ MVNETA_RX_BUF_SIZE(pp->pkt_size)); ++ mvneta_rxq_bm_disable(pp, rxq); ++ } else { ++ mvneta_rxq_bm_enable(pp, rxq); ++ mvneta_rxq_long_pool_set(pp, rxq); ++ mvneta_rxq_short_pool_set(pp, rxq); ++ } ++ + mvneta_rxq_fill(pp, rxq, rxq->size); + + return 0; +@@ -2659,6 +3059,9 @@ static int mvneta_change_mtu(struct net_ + dev->mtu = mtu; + + if (!netif_running(dev)) { ++ if (pp->bm_priv) ++ mvneta_bm_update_mtu(pp, mtu); ++ + netdev_update_features(dev); + return 0; + } +@@ -2671,6 +3074,9 @@ static int mvneta_change_mtu(struct net_ + mvneta_cleanup_txqs(pp); + mvneta_cleanup_rxqs(pp); + ++ if (pp->bm_priv) ++ mvneta_bm_update_mtu(pp, mtu); ++ + pp->pkt_size = MVNETA_RX_PKT_SIZE(dev->mtu); + pp->frag_size = SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(pp->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); +@@ -3563,6 +3969,7 @@ static int mvneta_probe(struct platform_ + struct resource *res; + struct device_node *dn = pdev->dev.of_node; + struct device_node *phy_node; ++ struct device_node *bm_node; + struct mvneta_port *pp; + struct net_device *dev; + const char *dt_mac_addr; +@@ -3690,26 +4097,39 @@ static int mvneta_probe(struct platform_ + + pp->tx_csum_limit = tx_csum_limit; + ++ dram_target_info = mv_mbus_dram_info(); ++ if (dram_target_info) ++ mvneta_conf_mbus_windows(pp, dram_target_info); ++ + pp->tx_ring_size = MVNETA_MAX_TXD; + pp->rx_ring_size = MVNETA_MAX_RXD; + + pp->dev = dev; + SET_NETDEV_DEV(dev, &pdev->dev); + ++ pp->id = global_port_id++; ++ ++ /* Obtain access to BM resources if enabled and already initialized */ ++ bm_node = of_parse_phandle(dn, "buffer-manager", 0); ++ if (bm_node && bm_node->data) { ++ pp->bm_priv = bm_node->data; ++ err = mvneta_bm_port_init(pdev, pp); ++ if (err < 0) { ++ dev_info(&pdev->dev, "use SW buffer management\n"); ++ pp->bm_priv = NULL; ++ } ++ } ++ + err = mvneta_init(&pdev->dev, pp); + if (err < 0) +- goto err_free_stats; ++ goto err_netdev; + + err = mvneta_port_power_up(pp, phy_mode); + if (err < 0) { + dev_err(&pdev->dev, "can't power up port\n"); +- goto err_free_stats; ++ goto err_netdev; + } + +- dram_target_info = mv_mbus_dram_info(); +- if (dram_target_info) +- mvneta_conf_mbus_windows(pp, dram_target_info); +- + for_each_present_cpu(cpu) { + struct mvneta_pcpu_port *port = per_cpu_ptr(pp->ports, cpu); + +@@ -3744,6 +4164,13 @@ static int mvneta_probe(struct platform_ + + return 0; + ++err_netdev: ++ unregister_netdev(dev); ++ if (pp->bm_priv) { ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, ++ 1 << pp->id); ++ } + err_free_stats: + free_percpu(pp->stats); + err_free_ports: +@@ -3773,6 +4200,12 @@ static int mvneta_remove(struct platform + of_node_put(pp->phy_node); + free_netdev(dev); + ++ if (pp->bm_priv) { ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_long, 1 << pp->id); ++ mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, ++ 1 << pp->id); ++ } ++ + return 0; + } + +--- /dev/null ++++ b/drivers/net/ethernet/marvell/mvneta_bm.c +@@ -0,0 +1,546 @@ ++/* ++ * Driver for Marvell NETA network controller Buffer Manager. ++ * ++ * Copyright (C) 2015 Marvell ++ * ++ * Marcin Wojtas <mw@semihalf.com> ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/genalloc.h> ++#include <linux/platform_device.h> ++#include <linux/netdevice.h> ++#include <linux/skbuff.h> ++#include <linux/mbus.h> ++#include <linux/module.h> ++#include <linux/io.h> ++#include <linux/of.h> ++#include <linux/clk.h> ++#include "mvneta_bm.h" ++ ++#define MVNETA_BM_DRIVER_NAME "mvneta_bm" ++#define MVNETA_BM_DRIVER_VERSION "1.0" ++ ++static void mvneta_bm_write(struct mvneta_bm *priv, u32 offset, u32 data) ++{ ++ writel(data, priv->reg_base + offset); ++} ++ ++static u32 mvneta_bm_read(struct mvneta_bm *priv, u32 offset) ++{ ++ return readl(priv->reg_base + offset); ++} ++ ++static void mvneta_bm_pool_enable(struct mvneta_bm *priv, int pool_id) ++{ ++ u32 val; ++ ++ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); ++ val |= MVNETA_BM_POOL_ENABLE_MASK; ++ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); ++ ++ /* Clear BM cause register */ ++ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); ++} ++ ++static void mvneta_bm_pool_disable(struct mvneta_bm *priv, int pool_id) ++{ ++ u32 val; ++ ++ val = mvneta_bm_read(priv, MVNETA_BM_POOL_BASE_REG(pool_id)); ++ val &= ~MVNETA_BM_POOL_ENABLE_MASK; ++ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(pool_id), val); ++} ++ ++static inline void mvneta_bm_config_set(struct mvneta_bm *priv, u32 mask) ++{ ++ u32 val; ++ ++ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); ++ val |= mask; ++ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); ++} ++ ++static inline void mvneta_bm_config_clear(struct mvneta_bm *priv, u32 mask) ++{ ++ u32 val; ++ ++ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); ++ val &= ~mask; ++ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); ++} ++ ++static void mvneta_bm_pool_target_set(struct mvneta_bm *priv, int pool_id, ++ u8 target_id, u8 attr) ++{ ++ u32 val; ++ ++ val = mvneta_bm_read(priv, MVNETA_BM_XBAR_POOL_REG(pool_id)); ++ val &= ~MVNETA_BM_TARGET_ID_MASK(pool_id); ++ val &= ~MVNETA_BM_XBAR_ATTR_MASK(pool_id); ++ val |= MVNETA_BM_TARGET_ID_VAL(pool_id, target_id); ++ val |= MVNETA_BM_XBAR_ATTR_VAL(pool_id, attr); ++ ++ mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); ++} ++ ++/* Allocate skb for BM pool */ ++void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ dma_addr_t *buf_phys_addr) ++{ ++ void *buf; ++ dma_addr_t phys_addr; ++ ++ buf = mvneta_frag_alloc(bm_pool->frag_size); ++ if (!buf) ++ return NULL; ++ ++ /* In order to update buf_cookie field of RX descriptor properly, ++ * BM hardware expects buf virtual address to be placed in the ++ * first four bytes of mapped buffer. ++ */ ++ *(u32 *)buf = (u32)buf; ++ phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, ++ DMA_FROM_DEVICE); ++ if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) { ++ mvneta_frag_free(bm_pool->frag_size, buf); ++ return NULL; ++ } ++ *buf_phys_addr = phys_addr; ++ ++ return buf; ++} ++ ++/* Refill processing for HW buffer management */ ++int mvneta_bm_pool_refill(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool) ++{ ++ dma_addr_t buf_phys_addr; ++ void *buf; ++ ++ buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr); ++ if (!buf) ++ return -ENOMEM; ++ ++ mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr); ++ ++ return 0; ++} ++EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill); ++ ++/* Allocate buffers for the pool */ ++int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ int buf_num) ++{ ++ int err, i; ++ ++ if (bm_pool->buf_num == bm_pool->size) { ++ dev_dbg(&priv->pdev->dev, "pool %d already filled\n", ++ bm_pool->id); ++ return bm_pool->buf_num; ++ } ++ ++ if (buf_num < 0 || ++ (buf_num + bm_pool->buf_num > bm_pool->size)) { ++ dev_err(&priv->pdev->dev, ++ "cannot allocate %d buffers for pool %d\n", ++ buf_num, bm_pool->id); ++ return 0; ++ } ++ ++ for (i = 0; i < buf_num; i++) { ++ err = mvneta_bm_pool_refill(priv, bm_pool); ++ if (err < 0) ++ break; ++ } ++ ++ /* Update BM driver with number of buffers added to pool */ ++ bm_pool->buf_num += i; ++ ++ dev_dbg(&priv->pdev->dev, ++ "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n", ++ bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", ++ bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size, ++ bm_pool->frag_size); ++ ++ dev_dbg(&priv->pdev->dev, ++ "%s pool %d: %d of %d buffers added\n", ++ bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", ++ bm_pool->id, i, buf_num); ++ ++ return i; ++} ++EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add); ++ ++/* Create pool */ ++static int mvneta_bm_pool_create(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool) ++{ ++ struct platform_device *pdev = priv->pdev; ++ u8 target_id, attr; ++ int size_bytes, err; ++ ++ size_bytes = sizeof(u32) * bm_pool->size; ++ bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, ++ &bm_pool->phys_addr, ++ GFP_KERNEL); ++ if (!bm_pool->virt_addr) ++ return -ENOMEM; ++ ++ if (!IS_ALIGNED((u32)bm_pool->virt_addr, MVNETA_BM_POOL_PTR_ALIGN)) { ++ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, ++ bm_pool->phys_addr); ++ dev_err(&pdev->dev, "BM pool %d is not %d bytes aligned\n", ++ bm_pool->id, MVNETA_BM_POOL_PTR_ALIGN); ++ return -ENOMEM; ++ } ++ ++ err = mvebu_mbus_get_dram_win_info(bm_pool->phys_addr, &target_id, ++ &attr); ++ if (err < 0) { ++ dma_free_coherent(&pdev->dev, size_bytes, bm_pool->virt_addr, ++ bm_pool->phys_addr); ++ return err; ++ } ++ ++ /* Set pool address */ ++ mvneta_bm_write(priv, MVNETA_BM_POOL_BASE_REG(bm_pool->id), ++ bm_pool->phys_addr); ++ ++ mvneta_bm_pool_target_set(priv, bm_pool->id, target_id, attr); ++ mvneta_bm_pool_enable(priv, bm_pool->id); ++ ++ return 0; ++} ++ ++/* Notify the driver that BM pool is being used as specific type and return the ++ * pool pointer on success ++ */ ++struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, ++ enum mvneta_bm_type type, u8 port_id, ++ int pkt_size) ++{ ++ struct mvneta_bm_pool *new_pool = &priv->bm_pools[pool_id]; ++ int num, err; ++ ++ if (new_pool->type == MVNETA_BM_LONG && ++ new_pool->port_map != 1 << port_id) { ++ dev_err(&priv->pdev->dev, ++ "long pool cannot be shared by the ports\n"); ++ return NULL; ++ } ++ ++ if (new_pool->type == MVNETA_BM_SHORT && new_pool->type != type) { ++ dev_err(&priv->pdev->dev, ++ "mixing pools' types between the ports is forbidden\n"); ++ return NULL; ++ } ++ ++ if (new_pool->pkt_size == 0 || type != MVNETA_BM_SHORT) ++ new_pool->pkt_size = pkt_size; ++ ++ /* Allocate buffers in case BM pool hasn't been used yet */ ++ if (new_pool->type == MVNETA_BM_FREE) { ++ new_pool->type = type; ++ new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); ++ new_pool->frag_size = ++ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + ++ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ ++ /* Create new pool */ ++ err = mvneta_bm_pool_create(priv, new_pool); ++ if (err) { ++ dev_err(&priv->pdev->dev, "fail to create pool %d\n", ++ new_pool->id); ++ return NULL; ++ } ++ ++ /* Allocate buffers for this pool */ ++ num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size); ++ if (num != new_pool->size) { ++ WARN(1, "pool %d: %d of %d allocated\n", ++ new_pool->id, num, new_pool->size); ++ return NULL; ++ } ++ } ++ ++ return new_pool; ++} ++EXPORT_SYMBOL_GPL(mvneta_bm_pool_use); ++ ++/* Free all buffers from the pool */ ++void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ u8 port_map) ++{ ++ int i; ++ ++ bm_pool->port_map &= ~port_map; ++ if (bm_pool->port_map) ++ return; ++ ++ mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); ++ ++ for (i = 0; i < bm_pool->buf_num; i++) { ++ dma_addr_t buf_phys_addr; ++ u32 *vaddr; ++ ++ /* Get buffer physical address (indirect access) */ ++ buf_phys_addr = mvneta_bm_pool_get_bp(priv, bm_pool); ++ ++ /* Work-around to the problems when destroying the pool, ++ * when it occurs that a read access to BPPI returns 0. ++ */ ++ if (buf_phys_addr == 0) ++ continue; ++ ++ vaddr = phys_to_virt(buf_phys_addr); ++ if (!vaddr) ++ break; ++ ++ dma_unmap_single(&priv->pdev->dev, buf_phys_addr, ++ bm_pool->buf_size, DMA_FROM_DEVICE); ++ mvneta_frag_free(bm_pool->frag_size, vaddr); ++ } ++ ++ mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); ++ ++ /* Update BM driver with number of buffers removed from pool */ ++ bm_pool->buf_num -= i; ++} ++EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); ++ ++/* Cleanup pool */ ++void mvneta_bm_pool_destroy(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool, u8 port_map) ++{ ++ bm_pool->port_map &= ~port_map; ++ if (bm_pool->port_map) ++ return; ++ ++ bm_pool->type = MVNETA_BM_FREE; ++ ++ mvneta_bm_bufs_free(priv, bm_pool, port_map); ++ if (bm_pool->buf_num) ++ WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); ++ ++ if (bm_pool->virt_addr) { ++ dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size, ++ bm_pool->virt_addr, bm_pool->phys_addr); ++ bm_pool->virt_addr = NULL; ++ } ++ ++ mvneta_bm_pool_disable(priv, bm_pool->id); ++} ++EXPORT_SYMBOL_GPL(mvneta_bm_pool_destroy); ++ ++static void mvneta_bm_pools_init(struct mvneta_bm *priv) ++{ ++ struct device_node *dn = priv->pdev->dev.of_node; ++ struct mvneta_bm_pool *bm_pool; ++ char prop[15]; ++ u32 size; ++ int i; ++ ++ /* Activate BM unit */ ++ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_START_MASK); ++ ++ /* Create all pools with maximum size */ ++ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { ++ bm_pool = &priv->bm_pools[i]; ++ bm_pool->id = i; ++ bm_pool->type = MVNETA_BM_FREE; ++ ++ /* Reset read pointer */ ++ mvneta_bm_write(priv, MVNETA_BM_POOL_READ_PTR_REG(i), 0); ++ ++ /* Reset write pointer */ ++ mvneta_bm_write(priv, MVNETA_BM_POOL_WRITE_PTR_REG(i), 0); ++ ++ /* Configure pool size according to DT or use default value */ ++ sprintf(prop, "pool%d,capacity", i); ++ if (of_property_read_u32(dn, prop, &size)) { ++ size = MVNETA_BM_POOL_CAP_DEF; ++ } else if (size > MVNETA_BM_POOL_CAP_MAX) { ++ dev_warn(&priv->pdev->dev, ++ "Illegal pool %d capacity %d, set to %d\n", ++ i, size, MVNETA_BM_POOL_CAP_MAX); ++ size = MVNETA_BM_POOL_CAP_MAX; ++ } else if (size < MVNETA_BM_POOL_CAP_MIN) { ++ dev_warn(&priv->pdev->dev, ++ "Illegal pool %d capacity %d, set to %d\n", ++ i, size, MVNETA_BM_POOL_CAP_MIN); ++ size = MVNETA_BM_POOL_CAP_MIN; ++ } else if (!IS_ALIGNED(size, MVNETA_BM_POOL_CAP_ALIGN)) { ++ dev_warn(&priv->pdev->dev, ++ "Illegal pool %d capacity %d, round to %d\n", ++ i, size, ALIGN(size, ++ MVNETA_BM_POOL_CAP_ALIGN)); ++ size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); ++ } ++ bm_pool->size = size; ++ ++ mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), ++ bm_pool->size); ++ ++ /* Obtain custom pkt_size from DT */ ++ sprintf(prop, "pool%d,pkt-size", i); ++ if (of_property_read_u32(dn, prop, &bm_pool->pkt_size)) ++ bm_pool->pkt_size = 0; ++ } ++} ++ ++static void mvneta_bm_default_set(struct mvneta_bm *priv) ++{ ++ u32 val; ++ ++ /* Mask BM all interrupts */ ++ mvneta_bm_write(priv, MVNETA_BM_INTR_MASK_REG, 0); ++ ++ /* Clear BM cause register */ ++ mvneta_bm_write(priv, MVNETA_BM_INTR_CAUSE_REG, 0); ++ ++ /* Set BM configuration register */ ++ val = mvneta_bm_read(priv, MVNETA_BM_CONFIG_REG); ++ ++ /* Reduce MaxInBurstSize from 32 BPs to 16 BPs */ ++ val &= ~MVNETA_BM_MAX_IN_BURST_SIZE_MASK; ++ val |= MVNETA_BM_MAX_IN_BURST_SIZE_16BP; ++ mvneta_bm_write(priv, MVNETA_BM_CONFIG_REG, val); ++} ++ ++static int mvneta_bm_init(struct mvneta_bm *priv) ++{ ++ mvneta_bm_default_set(priv); ++ ++ /* Allocate and initialize BM pools structures */ ++ priv->bm_pools = devm_kcalloc(&priv->pdev->dev, MVNETA_BM_POOLS_NUM, ++ sizeof(struct mvneta_bm_pool), ++ GFP_KERNEL); ++ if (!priv->bm_pools) ++ return -ENOMEM; ++ ++ mvneta_bm_pools_init(priv); ++ ++ return 0; ++} ++ ++static int mvneta_bm_get_sram(struct device_node *dn, ++ struct mvneta_bm *priv) ++{ ++ priv->bppi_pool = of_gen_pool_get(dn, "internal-mem", 0); ++ if (!priv->bppi_pool) ++ return -ENOMEM; ++ ++ priv->bppi_virt_addr = gen_pool_dma_alloc(priv->bppi_pool, ++ MVNETA_BM_BPPI_SIZE, ++ &priv->bppi_phys_addr); ++ if (!priv->bppi_virt_addr) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++static void mvneta_bm_put_sram(struct mvneta_bm *priv) ++{ ++ gen_pool_free(priv->bppi_pool, priv->bppi_phys_addr, ++ MVNETA_BM_BPPI_SIZE); ++} ++ ++static int mvneta_bm_probe(struct platform_device *pdev) ++{ ++ struct device_node *dn = pdev->dev.of_node; ++ struct mvneta_bm *priv; ++ struct resource *res; ++ int err; ++ ++ priv = devm_kzalloc(&pdev->dev, sizeof(struct mvneta_bm), GFP_KERNEL); ++ if (!priv) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ priv->reg_base = devm_ioremap_resource(&pdev->dev, res); ++ if (IS_ERR(priv->reg_base)) ++ return PTR_ERR(priv->reg_base); ++ ++ priv->clk = devm_clk_get(&pdev->dev, NULL); ++ if (IS_ERR(priv->clk)) ++ return PTR_ERR(priv->clk); ++ err = clk_prepare_enable(priv->clk); ++ if (err < 0) ++ return err; ++ ++ err = mvneta_bm_get_sram(dn, priv); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to allocate internal memory\n"); ++ goto err_clk; ++ } ++ ++ priv->pdev = pdev; ++ ++ /* Initialize buffer manager internals */ ++ err = mvneta_bm_init(priv); ++ if (err < 0) { ++ dev_err(&pdev->dev, "failed to initialize controller\n"); ++ goto err_sram; ++ } ++ ++ dn->data = priv; ++ platform_set_drvdata(pdev, priv); ++ ++ dev_info(&pdev->dev, "Buffer Manager for network controller enabled\n"); ++ ++ return 0; ++ ++err_sram: ++ mvneta_bm_put_sram(priv); ++err_clk: ++ clk_disable_unprepare(priv->clk); ++ return err; ++} ++ ++static int mvneta_bm_remove(struct platform_device *pdev) ++{ ++ struct mvneta_bm *priv = platform_get_drvdata(pdev); ++ u8 all_ports_map = 0xff; ++ int i = 0; ++ ++ for (i = 0; i < MVNETA_BM_POOLS_NUM; i++) { ++ struct mvneta_bm_pool *bm_pool = &priv->bm_pools[i]; ++ ++ mvneta_bm_pool_destroy(priv, bm_pool, all_ports_map); ++ } ++ ++ mvneta_bm_put_sram(priv); ++ ++ /* Dectivate BM unit */ ++ mvneta_bm_write(priv, MVNETA_BM_COMMAND_REG, MVNETA_BM_STOP_MASK); ++ ++ clk_disable_unprepare(priv->clk); ++ ++ return 0; ++} ++ ++static const struct of_device_id mvneta_bm_match[] = { ++ { .compatible = "marvell,armada-380-neta-bm" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, mvneta_bm_match); ++ ++static struct platform_driver mvneta_bm_driver = { ++ .probe = mvneta_bm_probe, ++ .remove = mvneta_bm_remove, ++ .driver = { ++ .name = MVNETA_BM_DRIVER_NAME, ++ .of_match_table = mvneta_bm_match, ++ }, ++}; ++ ++module_platform_driver(mvneta_bm_driver); ++ ++MODULE_DESCRIPTION("Marvell NETA Buffer Manager Driver - www.marvell.com"); ++MODULE_AUTHOR("Marcin Wojtas <mw@semihalf.com>"); ++MODULE_LICENSE("GPL v2"); +--- /dev/null ++++ b/drivers/net/ethernet/marvell/mvneta_bm.h +@@ -0,0 +1,189 @@ ++/* ++ * Driver for Marvell NETA network controller Buffer Manager. ++ * ++ * Copyright (C) 2015 Marvell ++ * ++ * Marcin Wojtas <mw@semihalf.com> ++ * ++ * This file is licensed under the terms of the GNU General Public ++ * License version 2. This program is licensed "as is" without any ++ * warranty of any kind, whether express or implied. ++ */ ++ ++#ifndef _MVNETA_BM_H_ ++#define _MVNETA_BM_H_ ++ ++/* BM Configuration Register */ ++#define MVNETA_BM_CONFIG_REG 0x0 ++#define MVNETA_BM_STATUS_MASK 0x30 ++#define MVNETA_BM_ACTIVE_MASK BIT(4) ++#define MVNETA_BM_MAX_IN_BURST_SIZE_MASK 0x60000 ++#define MVNETA_BM_MAX_IN_BURST_SIZE_16BP BIT(18) ++#define MVNETA_BM_EMPTY_LIMIT_MASK BIT(19) ++ ++/* BM Activation Register */ ++#define MVNETA_BM_COMMAND_REG 0x4 ++#define MVNETA_BM_START_MASK BIT(0) ++#define MVNETA_BM_STOP_MASK BIT(1) ++#define MVNETA_BM_PAUSE_MASK BIT(2) ++ ++/* BM Xbar interface Register */ ++#define MVNETA_BM_XBAR_01_REG 0x8 ++#define MVNETA_BM_XBAR_23_REG 0xc ++#define MVNETA_BM_XBAR_POOL_REG(pool) \ ++ (((pool) < 2) ? MVNETA_BM_XBAR_01_REG : MVNETA_BM_XBAR_23_REG) ++#define MVNETA_BM_TARGET_ID_OFFS(pool) (((pool) & 1) ? 16 : 0) ++#define MVNETA_BM_TARGET_ID_MASK(pool) \ ++ (0xf << MVNETA_BM_TARGET_ID_OFFS(pool)) ++#define MVNETA_BM_TARGET_ID_VAL(pool, id) \ ++ ((id) << MVNETA_BM_TARGET_ID_OFFS(pool)) ++#define MVNETA_BM_XBAR_ATTR_OFFS(pool) (((pool) & 1) ? 20 : 4) ++#define MVNETA_BM_XBAR_ATTR_MASK(pool) \ ++ (0xff << MVNETA_BM_XBAR_ATTR_OFFS(pool)) ++#define MVNETA_BM_XBAR_ATTR_VAL(pool, attr) \ ++ ((attr) << MVNETA_BM_XBAR_ATTR_OFFS(pool)) ++ ++/* Address of External Buffer Pointers Pool Register */ ++#define MVNETA_BM_POOL_BASE_REG(pool) (0x10 + ((pool) << 4)) ++#define MVNETA_BM_POOL_ENABLE_MASK BIT(0) ++ ++/* External Buffer Pointers Pool RD pointer Register */ ++#define MVNETA_BM_POOL_READ_PTR_REG(pool) (0x14 + ((pool) << 4)) ++#define MVNETA_BM_POOL_SET_READ_PTR_MASK 0xfffc ++#define MVNETA_BM_POOL_GET_READ_PTR_OFFS 16 ++#define MVNETA_BM_POOL_GET_READ_PTR_MASK 0xfffc0000 ++ ++/* External Buffer Pointers Pool WR pointer */ ++#define MVNETA_BM_POOL_WRITE_PTR_REG(pool) (0x18 + ((pool) << 4)) ++#define MVNETA_BM_POOL_SET_WRITE_PTR_OFFS 0 ++#define MVNETA_BM_POOL_SET_WRITE_PTR_MASK 0xfffc ++#define MVNETA_BM_POOL_GET_WRITE_PTR_OFFS 16 ++#define MVNETA_BM_POOL_GET_WRITE_PTR_MASK 0xfffc0000 ++ ++/* External Buffer Pointers Pool Size Register */ ++#define MVNETA_BM_POOL_SIZE_REG(pool) (0x1c + ((pool) << 4)) ++#define MVNETA_BM_POOL_SIZE_MASK 0x3fff ++ ++/* BM Interrupt Cause Register */ ++#define MVNETA_BM_INTR_CAUSE_REG (0x50) ++ ++/* BM interrupt Mask Register */ ++#define MVNETA_BM_INTR_MASK_REG (0x54) ++ ++/* Other definitions */ ++#define MVNETA_BM_SHORT_PKT_SIZE 256 ++#define MVNETA_BM_POOLS_NUM 4 ++#define MVNETA_BM_POOL_CAP_MIN 128 ++#define MVNETA_BM_POOL_CAP_DEF 2048 ++#define MVNETA_BM_POOL_CAP_MAX \ ++ (16 * 1024 - MVNETA_BM_POOL_CAP_ALIGN) ++#define MVNETA_BM_POOL_CAP_ALIGN 32 ++#define MVNETA_BM_POOL_PTR_ALIGN 32 ++ ++#define MVNETA_BM_POOL_ACCESS_OFFS 8 ++ ++#define MVNETA_BM_BPPI_SIZE 0x100000 ++ ++#define MVNETA_RX_BUF_SIZE(pkt_size) ((pkt_size) + NET_SKB_PAD) ++ ++enum mvneta_bm_type { ++ MVNETA_BM_FREE, ++ MVNETA_BM_LONG, ++ MVNETA_BM_SHORT ++}; ++ ++struct mvneta_bm { ++ void __iomem *reg_base; ++ struct clk *clk; ++ struct platform_device *pdev; ++ ++ struct gen_pool *bppi_pool; ++ /* BPPI virtual base address */ ++ void __iomem *bppi_virt_addr; ++ /* BPPI physical base address */ ++ dma_addr_t bppi_phys_addr; ++ ++ /* BM pools */ ++ struct mvneta_bm_pool *bm_pools; ++}; ++ ++struct mvneta_bm_pool { ++ /* Pool number in the range 0-3 */ ++ u8 id; ++ enum mvneta_bm_type type; ++ ++ /* Buffer Pointers Pool External (BPPE) size in number of bytes */ ++ int size; ++ /* Number of buffers used by this pool */ ++ int buf_num; ++ /* Pool buffer size */ ++ int buf_size; ++ /* Packet size */ ++ int pkt_size; ++ /* Single frag size */ ++ u32 frag_size; ++ ++ /* BPPE virtual base address */ ++ u32 *virt_addr; ++ /* BPPE physical base address */ ++ dma_addr_t phys_addr; ++ ++ /* Ports using BM pool */ ++ u8 port_map; ++ ++ struct mvneta_bm *priv; ++}; ++ ++/* Declarations and definitions */ ++void *mvneta_frag_alloc(unsigned int frag_size); ++void mvneta_frag_free(unsigned int frag_size, void *data); ++ ++#if defined(CONFIG_MVNETA_BM) || defined(CONFIG_MVNETA_BM_MODULE) ++void mvneta_bm_pool_destroy(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool, u8 port_map); ++void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ u8 port_map); ++int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ int buf_num); ++int mvneta_bm_pool_refill(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool); ++struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, ++ enum mvneta_bm_type type, u8 port_id, ++ int pkt_size); ++ ++static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool, ++ dma_addr_t buf_phys_addr) ++{ ++ writel_relaxed(buf_phys_addr, priv->bppi_virt_addr + ++ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); ++} ++ ++static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool) ++{ ++ return readl_relaxed(priv->bppi_virt_addr + ++ (bm_pool->id << MVNETA_BM_POOL_ACCESS_OFFS)); ++} ++#else ++void mvneta_bm_pool_destroy(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool, u8 port_map) {} ++void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ u8 port_map) {} ++int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, ++ int buf_num) { return 0; } ++int mvneta_bm_pool_refill(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool) {return 0; } ++struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, ++ enum mvneta_bm_type type, u8 port_id, ++ int pkt_size) { return NULL; } ++ ++static inline void mvneta_bm_pool_put_bp(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool, ++ dma_addr_t buf_phys_addr) {} ++ ++static inline u32 mvneta_bm_pool_get_bp(struct mvneta_bm *priv, ++ struct mvneta_bm_pool *bm_pool) ++{ return 0; } ++#endif /* CONFIG_MVNETA_BM */ ++#endif diff --git a/target/linux/mvebu/patches-4.4/046-net-mvneta-Use-the-new-hwbm-framework.patch b/target/linux/mvebu/patches-4.4/046-net-mvneta-Use-the-new-hwbm-framework.patch new file mode 100644 index 0000000000..210504c39e --- /dev/null +++ b/target/linux/mvebu/patches-4.4/046-net-mvneta-Use-the-new-hwbm-framework.patch @@ -0,0 +1,359 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Mon, 14 Mar 2016 09:39:05 +0100 +Subject: [PATCH] net: mvneta: Use the new hwbm framework + +Now that the hardware buffer management framework had been introduced, +let's use it. + +Tested-by: Sebastian Careba <nitroshift@yahoo.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/Kconfig ++++ b/drivers/net/ethernet/marvell/Kconfig +@@ -43,6 +43,7 @@ config MVMDIO + config MVNETA_BM + tristate "Marvell Armada 38x/XP network interface BM support" + depends on MVNETA ++ select HWBM + ---help--- + This driver supports auxiliary block of the network + interface units in the Marvell ARMADA XP and ARMADA 38x SoC +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -30,6 +30,7 @@ + #include <linux/phy.h> + #include <linux/platform_device.h> + #include <linux/skbuff.h> ++#include <net/hwbm.h> + #include "mvneta_bm.h" + #include <net/ip.h> + #include <net/ipv6.h> +@@ -1024,11 +1025,12 @@ static int mvneta_bm_port_init(struct pl + static void mvneta_bm_update_mtu(struct mvneta_port *pp, int mtu) + { + struct mvneta_bm_pool *bm_pool = pp->pool_long; ++ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + int num; + + /* Release all buffers from long pool */ + mvneta_bm_bufs_free(pp->bm_priv, bm_pool, 1 << pp->id); +- if (bm_pool->buf_num) { ++ if (hwbm_pool->buf_num) { + WARN(1, "cannot free all buffers in pool %d\n", + bm_pool->id); + goto bm_mtu_err; +@@ -1036,14 +1038,14 @@ static void mvneta_bm_update_mtu(struct + + bm_pool->pkt_size = MVNETA_RX_PKT_SIZE(mtu); + bm_pool->buf_size = MVNETA_RX_BUF_SIZE(bm_pool->pkt_size); +- bm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + +- SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); ++ hwbm_pool->frag_size = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + ++ SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(bm_pool->pkt_size)); + + /* Fill entire long pool */ +- num = mvneta_bm_bufs_add(pp->bm_priv, bm_pool, bm_pool->size); +- if (num != bm_pool->size) { ++ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); ++ if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", +- bm_pool->id, num, bm_pool->size); ++ bm_pool->id, num, hwbm_pool->size); + goto bm_mtu_err; + } + mvneta_bm_pool_bufsize_set(pp, bm_pool->buf_size, bm_pool->id); +@@ -2064,14 +2066,14 @@ err_drop_frame: + } + + /* Refill processing */ +- err = mvneta_bm_pool_refill(pp->bm_priv, bm_pool); ++ err = hwbm_pool_refill(&bm_pool->hwbm_pool, GFP_ATOMIC); + if (err) { + netdev_err(dev, "Linux processing - Can't refill\n"); + rxq->missed++; + goto err_drop_frame_ret_pool; + } + +- frag_size = bm_pool->frag_size; ++ frag_size = bm_pool->hwbm_pool.frag_size; + + skb = build_skb(data, frag_size > PAGE_SIZE ? 0 : frag_size); + +--- a/drivers/net/ethernet/marvell/mvneta_bm.c ++++ b/drivers/net/ethernet/marvell/mvneta_bm.c +@@ -10,16 +10,17 @@ + * warranty of any kind, whether express or implied. + */ + +-#include <linux/kernel.h> ++#include <linux/clk.h> + #include <linux/genalloc.h> +-#include <linux/platform_device.h> +-#include <linux/netdevice.h> +-#include <linux/skbuff.h> ++#include <linux/io.h> ++#include <linux/kernel.h> + #include <linux/mbus.h> + #include <linux/module.h> +-#include <linux/io.h> ++#include <linux/netdevice.h> + #include <linux/of.h> +-#include <linux/clk.h> ++#include <linux/platform_device.h> ++#include <linux/skbuff.h> ++#include <net/hwbm.h> + #include "mvneta_bm.h" + + #define MVNETA_BM_DRIVER_NAME "mvneta_bm" +@@ -88,17 +89,13 @@ static void mvneta_bm_pool_target_set(st + mvneta_bm_write(priv, MVNETA_BM_XBAR_POOL_REG(pool_id), val); + } + +-/* Allocate skb for BM pool */ +-void *mvneta_buf_alloc(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, +- dma_addr_t *buf_phys_addr) ++int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) + { +- void *buf; ++ struct mvneta_bm_pool *bm_pool = ++ (struct mvneta_bm_pool *)hwbm_pool->priv; ++ struct mvneta_bm *priv = bm_pool->priv; + dma_addr_t phys_addr; + +- buf = mvneta_frag_alloc(bm_pool->frag_size); +- if (!buf) +- return NULL; +- + /* In order to update buf_cookie field of RX descriptor properly, + * BM hardware expects buf virtual address to be placed in the + * first four bytes of mapped buffer. +@@ -106,75 +103,13 @@ void *mvneta_buf_alloc(struct mvneta_bm + *(u32 *)buf = (u32)buf; + phys_addr = dma_map_single(&priv->pdev->dev, buf, bm_pool->buf_size, + DMA_FROM_DEVICE); +- if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) { +- mvneta_frag_free(bm_pool->frag_size, buf); +- return NULL; +- } +- *buf_phys_addr = phys_addr; +- +- return buf; +-} +- +-/* Refill processing for HW buffer management */ +-int mvneta_bm_pool_refill(struct mvneta_bm *priv, +- struct mvneta_bm_pool *bm_pool) +-{ +- dma_addr_t buf_phys_addr; +- void *buf; +- +- buf = mvneta_buf_alloc(priv, bm_pool, &buf_phys_addr); +- if (!buf) ++ if (unlikely(dma_mapping_error(&priv->pdev->dev, phys_addr))) + return -ENOMEM; + +- mvneta_bm_pool_put_bp(priv, bm_pool, buf_phys_addr); +- ++ mvneta_bm_pool_put_bp(priv, bm_pool, phys_addr); + return 0; + } +-EXPORT_SYMBOL_GPL(mvneta_bm_pool_refill); +- +-/* Allocate buffers for the pool */ +-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, +- int buf_num) +-{ +- int err, i; +- +- if (bm_pool->buf_num == bm_pool->size) { +- dev_dbg(&priv->pdev->dev, "pool %d already filled\n", +- bm_pool->id); +- return bm_pool->buf_num; +- } +- +- if (buf_num < 0 || +- (buf_num + bm_pool->buf_num > bm_pool->size)) { +- dev_err(&priv->pdev->dev, +- "cannot allocate %d buffers for pool %d\n", +- buf_num, bm_pool->id); +- return 0; +- } +- +- for (i = 0; i < buf_num; i++) { +- err = mvneta_bm_pool_refill(priv, bm_pool); +- if (err < 0) +- break; +- } +- +- /* Update BM driver with number of buffers added to pool */ +- bm_pool->buf_num += i; +- +- dev_dbg(&priv->pdev->dev, +- "%s pool %d: pkt_size=%4d, buf_size=%4d, frag_size=%4d\n", +- bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", +- bm_pool->id, bm_pool->pkt_size, bm_pool->buf_size, +- bm_pool->frag_size); +- +- dev_dbg(&priv->pdev->dev, +- "%s pool %d: %d of %d buffers added\n", +- bm_pool->type == MVNETA_BM_SHORT ? "short" : "long", +- bm_pool->id, i, buf_num); +- +- return i; +-} +-EXPORT_SYMBOL_GPL(mvneta_bm_bufs_add); ++EXPORT_SYMBOL_GPL(mvneta_bm_construct); + + /* Create pool */ + static int mvneta_bm_pool_create(struct mvneta_bm *priv, +@@ -183,8 +118,7 @@ static int mvneta_bm_pool_create(struct + struct platform_device *pdev = priv->pdev; + u8 target_id, attr; + int size_bytes, err; +- +- size_bytes = sizeof(u32) * bm_pool->size; ++ size_bytes = sizeof(u32) * bm_pool->hwbm_pool.size; + bm_pool->virt_addr = dma_alloc_coherent(&pdev->dev, size_bytes, + &bm_pool->phys_addr, + GFP_KERNEL); +@@ -245,11 +179,16 @@ struct mvneta_bm_pool *mvneta_bm_pool_us + + /* Allocate buffers in case BM pool hasn't been used yet */ + if (new_pool->type == MVNETA_BM_FREE) { ++ struct hwbm_pool *hwbm_pool = &new_pool->hwbm_pool; ++ ++ new_pool->priv = priv; + new_pool->type = type; + new_pool->buf_size = MVNETA_RX_BUF_SIZE(new_pool->pkt_size); +- new_pool->frag_size = ++ hwbm_pool->frag_size = + SKB_DATA_ALIGN(MVNETA_RX_BUF_SIZE(new_pool->pkt_size)) + + SKB_DATA_ALIGN(sizeof(struct skb_shared_info)); ++ hwbm_pool->construct = mvneta_bm_construct; ++ hwbm_pool->priv = new_pool; + + /* Create new pool */ + err = mvneta_bm_pool_create(priv, new_pool); +@@ -260,10 +199,10 @@ struct mvneta_bm_pool *mvneta_bm_pool_us + } + + /* Allocate buffers for this pool */ +- num = mvneta_bm_bufs_add(priv, new_pool, new_pool->size); +- if (num != new_pool->size) { ++ num = hwbm_pool_add(hwbm_pool, hwbm_pool->size, GFP_ATOMIC); ++ if (num != hwbm_pool->size) { + WARN(1, "pool %d: %d of %d allocated\n", +- new_pool->id, num, new_pool->size); ++ new_pool->id, num, hwbm_pool->size); + return NULL; + } + } +@@ -284,7 +223,7 @@ void mvneta_bm_bufs_free(struct mvneta_b + + mvneta_bm_config_set(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + +- for (i = 0; i < bm_pool->buf_num; i++) { ++ for (i = 0; i < bm_pool->hwbm_pool.buf_num; i++) { + dma_addr_t buf_phys_addr; + u32 *vaddr; + +@@ -303,13 +242,13 @@ void mvneta_bm_bufs_free(struct mvneta_b + + dma_unmap_single(&priv->pdev->dev, buf_phys_addr, + bm_pool->buf_size, DMA_FROM_DEVICE); +- mvneta_frag_free(bm_pool->frag_size, vaddr); ++ hwbm_buf_free(&bm_pool->hwbm_pool, vaddr); + } + + mvneta_bm_config_clear(priv, MVNETA_BM_EMPTY_LIMIT_MASK); + + /* Update BM driver with number of buffers removed from pool */ +- bm_pool->buf_num -= i; ++ bm_pool->hwbm_pool.buf_num -= i; + } + EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); + +@@ -317,6 +256,7 @@ EXPORT_SYMBOL_GPL(mvneta_bm_bufs_free); + void mvneta_bm_pool_destroy(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool, u8 port_map) + { ++ struct hwbm_pool *hwbm_pool = &bm_pool->hwbm_pool; + bm_pool->port_map &= ~port_map; + if (bm_pool->port_map) + return; +@@ -324,11 +264,12 @@ void mvneta_bm_pool_destroy(struct mvnet + bm_pool->type = MVNETA_BM_FREE; + + mvneta_bm_bufs_free(priv, bm_pool, port_map); +- if (bm_pool->buf_num) ++ if (hwbm_pool->buf_num) + WARN(1, "cannot free all buffers in pool %d\n", bm_pool->id); + + if (bm_pool->virt_addr) { +- dma_free_coherent(&priv->pdev->dev, sizeof(u32) * bm_pool->size, ++ dma_free_coherent(&priv->pdev->dev, ++ sizeof(u32) * hwbm_pool->size, + bm_pool->virt_addr, bm_pool->phys_addr); + bm_pool->virt_addr = NULL; + } +@@ -381,10 +322,10 @@ static void mvneta_bm_pools_init(struct + MVNETA_BM_POOL_CAP_ALIGN)); + size = ALIGN(size, MVNETA_BM_POOL_CAP_ALIGN); + } +- bm_pool->size = size; ++ bm_pool->hwbm_pool.size = size; + + mvneta_bm_write(priv, MVNETA_BM_POOL_SIZE_REG(i), +- bm_pool->size); ++ bm_pool->hwbm_pool.size); + + /* Obtain custom pkt_size from DT */ + sprintf(prop, "pool%d,pkt-size", i); +--- a/drivers/net/ethernet/marvell/mvneta_bm.h ++++ b/drivers/net/ethernet/marvell/mvneta_bm.h +@@ -108,20 +108,15 @@ struct mvneta_bm { + }; + + struct mvneta_bm_pool { ++ struct hwbm_pool hwbm_pool; + /* Pool number in the range 0-3 */ + u8 id; + enum mvneta_bm_type type; + +- /* Buffer Pointers Pool External (BPPE) size in number of bytes */ +- int size; +- /* Number of buffers used by this pool */ +- int buf_num; +- /* Pool buffer size */ +- int buf_size; + /* Packet size */ + int pkt_size; +- /* Single frag size */ +- u32 frag_size; ++ /* Size of the buffer acces through DMA*/ ++ u32 buf_size; + + /* BPPE virtual base address */ + u32 *virt_addr; +@@ -143,8 +138,7 @@ void mvneta_bm_pool_destroy(struct mvnet + struct mvneta_bm_pool *bm_pool, u8 port_map); + void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map); +-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, +- int buf_num); ++int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf); + int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool); + struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, +@@ -170,8 +164,7 @@ void mvneta_bm_pool_destroy(struct mvnet + struct mvneta_bm_pool *bm_pool, u8 port_map) {} + void mvneta_bm_bufs_free(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, + u8 port_map) {} +-int mvneta_bm_bufs_add(struct mvneta_bm *priv, struct mvneta_bm_pool *bm_pool, +- int buf_num) { return 0; } ++int mvneta_bm_construct(struct hwbm_pool *hwbm_pool, void *buf) { return 0; } + int mvneta_bm_pool_refill(struct mvneta_bm *priv, + struct mvneta_bm_pool *bm_pool) {return 0; } + struct mvneta_bm_pool *mvneta_bm_pool_use(struct mvneta_bm *priv, u8 pool_id, diff --git a/target/linux/mvebu/patches-4.4/047-net-mvneta-Fix-spinlock-usage.patch b/target/linux/mvebu/patches-4.4/047-net-mvneta-Fix-spinlock-usage.patch new file mode 100644 index 0000000000..a8e78df2e9 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/047-net-mvneta-Fix-spinlock-usage.patch @@ -0,0 +1,52 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Sat, 12 Mar 2016 18:44:17 +0100 +Subject: [PATCH] net: mvneta: Fix spinlock usage + +In the previous patch, the spinlock was not initialized. While it didn't +cause any trouble yet it could be a problem to use it uninitialized. + +The most annoying part was the critical section protected by the spinlock +in mvneta_stop(). Some of the functions could sleep as pointed when +activated CONFIG_DEBUG_ATOMIC_SLEEP. Actually, in mvneta_stop() we only +need to protect the is_stopped flagged, indeed the code of the notifier +for CPU online is protected by the same spinlock, so when we get the +lock, the notifer work is done. + +Reported-by: Patrick Uiterwijk <patrick@puiterwijk.org> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -3484,17 +3484,17 @@ static int mvneta_stop(struct net_device + struct mvneta_port *pp = netdev_priv(dev); + + /* Inform that we are stopping so we don't want to setup the +- * driver for new CPUs in the notifiers ++ * driver for new CPUs in the notifiers. The code of the ++ * notifier for CPU online is protected by the same spinlock, ++ * so when we get the lock, the notifer work is done. + */ + spin_lock(&pp->lock); + pp->is_stopped = true; ++ spin_unlock(&pp->lock); ++ + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + unregister_cpu_notifier(&pp->cpu_notifier); +- /* Now that the notifier are unregistered, we can release le +- * lock +- */ +- spin_unlock(&pp->lock); + on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + mvneta_cleanup_rxqs(pp); +@@ -4027,6 +4027,7 @@ static int mvneta_probe(struct platform_ + dev->ethtool_ops = &mvneta_eth_tool_ops; + + pp = netdev_priv(dev); ++ spin_lock_init(&pp->lock); + pp->phy_node = phy_node; + pp->phy_interface = phy_mode; + diff --git a/target/linux/mvebu/patches-4.4/048-net-mvneta-fix-error-messages-in-mvneta_port_down-fu.patch b/target/linux/mvebu/patches-4.4/048-net-mvneta-fix-error-messages-in-mvneta_port_down-fu.patch new file mode 100644 index 0000000000..fd1f1ae203 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/048-net-mvneta-fix-error-messages-in-mvneta_port_down-fu.patch @@ -0,0 +1,33 @@ +From: Dmitri Epshtein <dima@marvell.com> +Date: Sat, 12 Mar 2016 18:44:19 +0100 +Subject: [PATCH] net: mvneta: fix error messages in mvneta_port_down function + +This commit corrects error printing when shutting down the port. + +[gregory.clement@free-electrons.com: split initial commit in two +individual changes] +Signed-off-by: Dmitri Epshtein <dima@marvell.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -1105,7 +1105,7 @@ static void mvneta_port_down(struct mvne + do { + if (count++ >= MVNETA_RX_DISABLE_TIMEOUT_MSEC) { + netdev_warn(pp->dev, +- "TIMEOUT for RX stopped ! rx_queue_cmd: 0x08%x\n", ++ "TIMEOUT for RX stopped ! rx_queue_cmd: 0x%08x\n", + val); + break; + } +@@ -1144,7 +1144,7 @@ static void mvneta_port_down(struct mvne + do { + if (count++ >= MVNETA_TX_FIFO_EMPTY_TIMEOUT) { + netdev_warn(pp->dev, +- "TX FIFO empty timeout status=0x08%x\n", ++ "TX FIFO empty timeout status=0x%08x\n", + val); + break; + } diff --git a/target/linux/mvebu/patches-4.4/049-net-mvneta-replace-MVNETA_CPU_D_CACHE_LINE_SIZE-with.patch b/target/linux/mvebu/patches-4.4/049-net-mvneta-replace-MVNETA_CPU_D_CACHE_LINE_SIZE-with.patch new file mode 100644 index 0000000000..c12d98a4e2 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/049-net-mvneta-replace-MVNETA_CPU_D_CACHE_LINE_SIZE-with.patch @@ -0,0 +1,56 @@ +From: Jisheng Zhang <jszhang@marvell.com> +Date: Wed, 30 Mar 2016 19:55:21 +0800 +Subject: [PATCH] net: mvneta: replace MVNETA_CPU_D_CACHE_LINE_SIZE with + L1_CACHE_BYTES + +The mvneta is also used in some Marvell berlin family SoCs which may +have 64bytes cacheline size. Replace the MVNETA_CPU_D_CACHE_LINE_SIZE +usage with L1_CACHE_BYTES. + +And since dma_alloc_coherent() is always cacheline size aligned, so +remove the align checks. + +Signed-off-by: Jisheng Zhang <jszhang@marvell.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -260,7 +260,6 @@ + + #define MVNETA_VLAN_TAG_LEN 4 + +-#define MVNETA_CPU_D_CACHE_LINE_SIZE 32 + #define MVNETA_TX_CSUM_DEF_SIZE 1600 + #define MVNETA_TX_CSUM_MAX_SIZE 9800 + #define MVNETA_ACC_MODE_EXT1 1 +@@ -300,7 +299,7 @@ + #define MVNETA_RX_PKT_SIZE(mtu) \ + ALIGN((mtu) + MVNETA_MH_SIZE + MVNETA_VLAN_TAG_LEN + \ + ETH_HLEN + ETH_FCS_LEN, \ +- MVNETA_CPU_D_CACHE_LINE_SIZE) ++ L1_CACHE_BYTES) + + #define IS_TSO_HEADER(txq, addr) \ + ((addr >= txq->tso_hdrs_phys) && \ +@@ -2762,9 +2761,6 @@ static int mvneta_rxq_init(struct mvneta + if (rxq->descs == NULL) + return -ENOMEM; + +- BUG_ON(rxq->descs != +- PTR_ALIGN(rxq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); +- + rxq->last_desc = rxq->size - 1; + + /* Set Rx descriptors queue starting address */ +@@ -2835,10 +2831,6 @@ static int mvneta_txq_init(struct mvneta + if (txq->descs == NULL) + return -ENOMEM; + +- /* Make sure descriptor address is cache line size aligned */ +- BUG_ON(txq->descs != +- PTR_ALIGN(txq->descs, MVNETA_CPU_D_CACHE_LINE_SIZE)); +- + txq->last_desc = txq->size - 1; + + /* Set maximum bandwidth for enabled TXQs */ diff --git a/target/linux/mvebu/patches-4.4/050-net-mvneta-fix-changing-MTU-when-using-per-cpu-proce.patch b/target/linux/mvebu/patches-4.4/050-net-mvneta-fix-changing-MTU-when-using-per-cpu-proce.patch new file mode 100644 index 0000000000..07862008a7 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/050-net-mvneta-fix-changing-MTU-when-using-per-cpu-proce.patch @@ -0,0 +1,75 @@ +From: Marcin Wojtas <mw@semihalf.com> +Date: Fri, 1 Apr 2016 15:21:18 +0200 +Subject: [PATCH] net: mvneta: fix changing MTU when using per-cpu processing + +After enabling per-cpu processing it appeared that under heavy load +changing MTU can result in blocking all port's interrupts and +transmitting data is not possible after the change. + +This commit fixes above issue by disabling percpu interrupts for the +time, when TXQs and RXQs are reconfigured. + +Signed-off-by: Marcin Wojtas <mw@semihalf.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -3040,6 +3040,20 @@ static int mvneta_check_mtu_valid(struct + return mtu; + } + ++static void mvneta_percpu_enable(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); ++} ++ ++static void mvneta_percpu_disable(void *arg) ++{ ++ struct mvneta_port *pp = arg; ++ ++ disable_percpu_irq(pp->dev->irq); ++} ++ + /* Change the device mtu */ + static int mvneta_change_mtu(struct net_device *dev, int mtu) + { +@@ -3064,6 +3078,7 @@ static int mvneta_change_mtu(struct net_ + * reallocation of the queues + */ + mvneta_stop_dev(pp); ++ on_each_cpu(mvneta_percpu_disable, pp, true); + + mvneta_cleanup_txqs(pp); + mvneta_cleanup_rxqs(pp); +@@ -3087,6 +3102,7 @@ static int mvneta_change_mtu(struct net_ + return ret; + } + ++ on_each_cpu(mvneta_percpu_enable, pp, true); + mvneta_start_dev(pp); + mvneta_port_up(pp); + +@@ -3240,20 +3256,6 @@ static void mvneta_mdio_remove(struct mv + pp->phy_dev = NULL; + } + +-static void mvneta_percpu_enable(void *arg) +-{ +- struct mvneta_port *pp = arg; +- +- enable_percpu_irq(pp->dev->irq, IRQ_TYPE_NONE); +-} +- +-static void mvneta_percpu_disable(void *arg) +-{ +- struct mvneta_port *pp = arg; +- +- disable_percpu_irq(pp->dev->irq); +-} +- + /* Electing a CPU must be done in an atomic way: it should be done + * after or before the removal/insertion of a CPU and this function is + * not reentrant. diff --git a/target/linux/mvebu/patches-4.4/051-ARM-dts-armada-38x-add-buffer-manager-nodes.patch b/target/linux/mvebu/patches-4.4/051-ARM-dts-armada-38x-add-buffer-manager-nodes.patch new file mode 100644 index 0000000000..b56de94e3d --- /dev/null +++ b/target/linux/mvebu/patches-4.4/051-ARM-dts-armada-38x-add-buffer-manager-nodes.patch @@ -0,0 +1,53 @@ +From: Marcin Wojtas <mw@semihalf.com> +Date: Mon, 14 Mar 2016 09:38:57 +0100 +Subject: [PATCH] ARM: dts: armada-38x: add buffer manager nodes + +Armada 38x network controller supports hardware buffer management (BM). +Since it is now enabled in mvneta driver, appropriate nodes can be added +to armada-38x.dtsi - for the actual common BM unit (bm@c8000) and its +internal SRAM (bm-bppi), which is used for indirect access to buffer +pointer ring residing in DRAM. + +Pools - ports mapping, bm-bppi entry in 'soc' node's ranges and optional +parameters are supposed to be set in board files. + +Signed-off-by: Marcin Wojtas <mw@semihalf.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/arch/arm/boot/dts/armada-38x.dtsi ++++ b/arch/arm/boot/dts/armada-38x.dtsi +@@ -540,6 +540,14 @@ + status = "disabled"; + }; + ++ bm: bm@c8000 { ++ compatible = "marvell,armada-380-neta-bm"; ++ reg = <0xc8000 0xac>; ++ clocks = <&gateclk 13>; ++ internal-mem = <&bm_bppi>; ++ status = "disabled"; ++ }; ++ + sata@e0000 { + compatible = "marvell,armada-380-ahci"; + reg = <0xe0000 0x2000>; +@@ -618,6 +626,17 @@ + #size-cells = <1>; + ranges = <0 MBUS_ID(0x09, 0x15) 0 0x800>; + }; ++ ++ bm_bppi: bm-bppi { ++ compatible = "mmio-sram"; ++ reg = <MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ ranges = <0 MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&gateclk 13>; ++ no-memory-wc; ++ status = "disabled"; ++ }; + }; + + clocks { diff --git a/target/linux/mvebu/patches-4.4/052-ARM-dts-armada-xp-add-buffer-manager-nodes.patch b/target/linux/mvebu/patches-4.4/052-ARM-dts-armada-xp-add-buffer-manager-nodes.patch new file mode 100644 index 0000000000..65fafd7101 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/052-ARM-dts-armada-xp-add-buffer-manager-nodes.patch @@ -0,0 +1,53 @@ +From: Marcin Wojtas <mw@semihalf.com> +Date: Mon, 14 Mar 2016 09:38:59 +0100 +Subject: [PATCH] ARM: dts: armada-xp: add buffer manager nodes + +Armada XP network controller supports hardware buffer management (BM). +Since it is now enabled in mvneta driver, appropriate nodes can be added +to armada-xp.dtsi - for the actual common BM unit (bm@c0000) and its +internal SRAM (bm-bppi), which is used for indirect access to buffer +pointer ring residing in DRAM. + +Pools - ports mapping, bm-bppi entry in 'soc' node's ranges and optional +parameters are supposed to be set in board files. + +Signed-off-by: Marcin Wojtas <mw@semihalf.com> +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/arch/arm/boot/dts/armada-xp.dtsi ++++ b/arch/arm/boot/dts/armada-xp.dtsi +@@ -253,6 +253,14 @@ + marvell,crypto-sram-size = <0x800>; + }; + ++ bm: bm@c0000 { ++ compatible = "marvell,armada-380-neta-bm"; ++ reg = <0xc0000 0xac>; ++ clocks = <&gateclk 13>; ++ internal-mem = <&bm_bppi>; ++ status = "disabled"; ++ }; ++ + xor@f0900 { + compatible = "marvell,orion-xor"; + reg = <0xF0900 0x100 +@@ -291,6 +299,17 @@ + #size-cells = <1>; + ranges = <0 MBUS_ID(0x09, 0x05) 0 0x800>; + }; ++ ++ bm_bppi: bm-bppi { ++ compatible = "mmio-sram"; ++ reg = <MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ ranges = <0 MBUS_ID(0x0c, 0x04) 0 0x100000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ clocks = <&gateclk 13>; ++ no-memory-wc; ++ status = "disabled"; ++ }; + }; + + clocks { diff --git a/target/linux/mvebu/patches-4.4/103-remove-nand-driver-bug.patch b/target/linux/mvebu/patches-4.4/103-remove-nand-driver-bug.patch new file mode 100644 index 0000000000..e9cc027b3c --- /dev/null +++ b/target/linux/mvebu/patches-4.4/103-remove-nand-driver-bug.patch @@ -0,0 +1,13 @@ +Remove a BUG() call that would crash on a race condition that should +otherwise be harmless. + +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -701,7 +701,6 @@ static void handle_data_pio(struct pxa3x + default: + dev_err(&info->pdev->dev, "%s: invalid state %d\n", __func__, + info->state); +- BUG(); + } + + /* Update buffer pointers for multi-page read/write */ diff --git a/target/linux/mvebu/patches-4.4/104-linksys_mamba_disable_keep_config.patch b/target/linux/mvebu/patches-4.4/104-linksys_mamba_disable_keep_config.patch new file mode 100644 index 0000000000..4c6b3115ed --- /dev/null +++ b/target/linux/mvebu/patches-4.4/104-linksys_mamba_disable_keep_config.patch @@ -0,0 +1,10 @@ +--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts ++++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +@@ -298,7 +298,6 @@ + nand@d0000 { + status = "okay"; + num-cs = <1>; +- marvell,nand-keep-config; + marvell,nand-enable-arbiter; + nand-on-flash-bbt; + nand-ecc-strength = <4>; diff --git a/target/linux/mvebu/patches-4.4/105-linksys_armada_385_fix_crypto_sram.patch b/target/linux/mvebu/patches-4.4/105-linksys_armada_385_fix_crypto_sram.patch new file mode 100644 index 0000000000..3963c352f7 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/105-linksys_armada_385_fix_crypto_sram.patch @@ -0,0 +1,13 @@ +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi +@@ -58,8 +58,8 @@ + soc { + ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 + MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000 +- MBUS_ID(0x09, 0x09) 0 0xf1100000 0x10000 +- MBUS_ID(0x09, 0x05) 0 0xf1110000 0x10000>; ++ MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000 ++ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>; + + internal-regs { + diff --git a/target/linux/mvebu/patches-4.4/106-enable-bm-on-linksys-devices.patch b/target/linux/mvebu/patches-4.4/106-enable-bm-on-linksys-devices.patch new file mode 100644 index 0000000000..e80bc39a69 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/106-enable-bm-on-linksys-devices.patch @@ -0,0 +1,107 @@ +--- a/arch/arm/boot/dts/armada-385-linksys.dtsi ++++ b/arch/arm/boot/dts/armada-385-linksys.dtsi +@@ -59,7 +59,8 @@ + ranges = <MBUS_ID(0xf0, 0x01) 0 0xf1000000 0x100000 + MBUS_ID(0x01, 0x1d) 0 0xfff00000 0x100000 + MBUS_ID(0x09, 0x19) 0 0xf1100000 0x10000 +- MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000>; ++ MBUS_ID(0x09, 0x15) 0 0xf1110000 0x10000 ++ MBUS_ID(0x0c, 0x04) 0 0xf1200000 0x100000>; + + internal-regs { + +@@ -93,6 +94,9 @@ + ethernet@70000 { + status = "okay"; + phy-mode = "rgmii-id"; ++ buffer-manager = <&bm>; ++ bm,pool-long = <0>; ++ bm,pool-short = <3>; + fixed-link { + speed = <1000>; + full-duplex; +@@ -102,6 +106,9 @@ + ethernet@34000 { + status = "okay"; + phy-mode = "sgmii"; ++ buffer-manager = <&bm>; ++ bm,pool-long = <2>; ++ bm,pool-short = <3>; + fixed-link { + speed = <1000>; + full-duplex; +@@ -112,6 +119,10 @@ + status = "okay"; + }; + ++ bm@c8000 { ++ status = "okay"; ++ }; ++ + sata@a8000 { + status = "okay"; + }; +@@ -198,6 +209,10 @@ + }; + }; + ++ bm-bppi { ++ status = "okay"; ++ }; ++ + pcie-controller { + status = "okay"; + +--- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts ++++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +@@ -71,7 +71,8 @@ + ranges = <MBUS_ID(0xf0, 0x01) 0 0 0xf1000000 0x100000 + MBUS_ID(0x01, 0x1d) 0 0 0xfff00000 0x100000 + MBUS_ID(0x09, 0x09) 0 0 0xf1100000 0x10000 +- MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000>; ++ MBUS_ID(0x09, 0x05) 0 0 0xf1110000 0x10000 ++ MBUS_ID(0x0c, 0x04) 0 0 0xf1200000 0x100000>; + + pcie-controller { + status = "okay"; +@@ -205,6 +206,9 @@ + pinctrl-names = "default"; + status = "okay"; + phy-mode = "rgmii-id"; ++ buffer-manager = <&bm>; ++ bm,pool-long = <0>; ++ bm,pool-short = <3>; + fixed-link { + speed = <1000>; + full-duplex; +@@ -216,12 +220,19 @@ + pinctrl-names = "default"; + status = "okay"; + phy-mode = "rgmii-id"; ++ buffer-manager = <&bm>; ++ bm,pool-long = <1>; ++ bm,pool-short = <3>; + fixed-link { + speed = <1000>; + full-duplex; + }; + }; + ++ bm@c0000 { ++ status = "okay"; ++ }; ++ + /* USB part of the eSATA/USB 2.0 port */ + usb@50000 { + status = "okay"; +@@ -379,6 +390,10 @@ + }; + }; + }; ++ ++ bm-bppi { ++ status = "okay"; ++ }; + }; + + gpio_keys { diff --git a/target/linux/mvebu/patches-4.4/110-pxa3xxx_nand-handle-PIO-delays.patch b/target/linux/mvebu/patches-4.4/110-pxa3xxx_nand-handle-PIO-delays.patch new file mode 100644 index 0000000000..7fa56970c1 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/110-pxa3xxx_nand-handle-PIO-delays.patch @@ -0,0 +1,36 @@ +Handle delays/excessive latency during flash command processing with PIO. + +Signed-off-by: Felix Fietkau <nbd@nbd.name> + +--- a/drivers/mtd/nand/pxa3xx_nand.c ++++ b/drivers/mtd/nand/pxa3xx_nand.c +@@ -227,6 +227,7 @@ struct pxa3xx_nand_info { + int use_dma; /* use DMA ? */ + int use_spare; /* use spare ? */ + int need_wait; ++ int pio_progress; + + /* Amount of real data per full chunk */ + unsigned int chunk_size; +@@ -769,6 +770,7 @@ static irqreturn_t pxa3xx_nand_irq_threa + { + struct pxa3xx_nand_info *info = data; + ++ info->pio_progress = 1; + handle_data_pio(info); + + info->state = STATE_CMD_DONE; +@@ -1175,8 +1177,13 @@ static void nand_cmdfunc(struct mtd_info + info->need_wait = 1; + pxa3xx_nand_start(info); + ++retry: ++ info->pio_progress = 0; + if (!wait_for_completion_timeout(&info->cmd_complete, + CHIP_DELAY_TIMEOUT)) { ++ if (info->pio_progress) ++ goto retry; ++ + dev_err(&info->pdev->dev, "Wait time out!!!\n"); + /* Stop State Machine for next command cycle */ + pxa3xx_nand_stop(info); diff --git a/target/linux/mvebu/patches-4.4/206-mvebu_wrt1900ac_use_pwm-fan_rather_than_gpio-fan.patch b/target/linux/mvebu/patches-4.4/206-mvebu_wrt1900ac_use_pwm-fan_rather_than_gpio-fan.patch index 740fdcf8cf..bff58e9b75 100644 --- a/target/linux/mvebu/patches-4.4/206-mvebu_wrt1900ac_use_pwm-fan_rather_than_gpio-fan.patch +++ b/target/linux/mvebu/patches-4.4/206-mvebu_wrt1900ac_use_pwm-fan_rather_than_gpio-fan.patch @@ -9,7 +9,7 @@ Signed-off-by: Andrew Lunn <andrew@lunn.ch> --- a/arch/arm/boot/dts/armada-xp-linksys-mamba.dts +++ b/arch/arm/boot/dts/armada-xp-linksys-mamba.dts -@@ -414,13 +414,11 @@ +@@ -428,13 +428,11 @@ }; }; diff --git a/target/linux/mvebu/profiles/000-Default.mk b/target/linux/mvebu/profiles/000-Default.mk index 56608368ee..7f71f3e01f 100644 --- a/target/linux/mvebu/profiles/000-Default.mk +++ b/target/linux/mvebu/profiles/000-Default.mk @@ -7,16 +7,7 @@ define Profile/Default NAME:=Default Profile (all drivers) - PACKAGES:= \ - kmod-mmc kmod-mvsdio swconfig \ - kmod-usb2 kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-hwmon-tmp421 \ - kmod-hwmon-pwmfan kmod-leds-tlc59116 \ - kmod-ledtrig-usbdev kmod-mwlwifi wpad-mini \ - kmod-ata-mvebu-ahci + PACKAGES:= kmod-mwlwifi wpad-mini swconfig endef define Profile/Default/Description diff --git a/target/linux/mvebu/profiles/globalscale.mk b/target/linux/mvebu/profiles/globalscale.mk index 7938c35be3..1b9f780e79 100644 --- a/target/linux/mvebu/profiles/globalscale.mk +++ b/target/linux/mvebu/profiles/globalscale.mk @@ -7,11 +7,7 @@ define Profile/Mirabox NAME:=Globalscale Mirabox - PACKAGES:= \ - kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-rtc-marvell kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-mmc kmod-mvsdio + PACKAGES:= endef define Profile/Mirabox/Description diff --git a/target/linux/mvebu/profiles/linksys.mk b/target/linux/mvebu/profiles/linksys.mk index 9c954bd305..41f9c7d5bb 100644 --- a/target/linux/mvebu/profiles/linksys.mk +++ b/target/linux/mvebu/profiles/linksys.mk @@ -7,15 +7,7 @@ define Profile/Caiman NAME:=Linksys WRT1200AC (Caiman) - PACKAGES:= \ - kmod-usb2 kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-mvebu-ahci \ - kmod-rtc-armada38x kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-hwmon-tmp421 \ - kmod-leds-pca963x \ - kmod-ledtrig-usbdev kmod-mwlwifi wpad-mini \ - swconfig + PACKAGES:= kmod-mwlwifi wpad-mini swconfig endef define Profile/Caiman/Description @@ -27,15 +19,7 @@ $(eval $(call Profile,Caiman)) define Profile/Cobra NAME:=Linksys WRT1900ACv2 (Cobra) - PACKAGES:= \ - kmod-usb2 kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-mvebu-ahci \ - kmod-rtc-armada38x kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-hwmon-tmp421 \ - kmod-leds-pca963x \ - kmod-ledtrig-usbdev kmod-mwlwifi wpad-mini \ - swconfig + PACKAGES:= kmod-mwlwifi wpad-mini swconfig endef define Profile/Cobra/Description @@ -47,15 +31,7 @@ $(eval $(call Profile,Cobra)) define Profile/Mamba NAME:=Linksys WRT1900AC (Mamba) - PACKAGES:= \ - kmod-usb2 kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-hwmon-tmp421 \ - kmod-hwmon-pwmfan kmod-leds-tlc591xx \ - kmod-ledtrig-usbdev kmod-mwlwifi wpad-mini \ - swconfig + PACKAGES:= kmod-mwlwifi wpad-mini swconfig endef define Profile/Mamba/Description @@ -67,15 +43,7 @@ $(eval $(call Profile,Mamba)) define Profile/Shelby NAME:=Linksys WRT1900ACS (Shelby) - PACKAGES:= \ - kmod-usb2 kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-mvebu-ahci \ - kmod-rtc-armada38x kmod-thermal-armada \ - kmod-gpio-button-hotplug kmod-hwmon-tmp421 \ - kmod-leds-pca963x \ - kmod-ledtrig-usbdev kmod-mwlwifi wpad-mini \ - swconfig + PACKAGES:= kmod-mwlwifi wpad-mini swconfig endef define Profile/Shelby/Description diff --git a/target/linux/mvebu/profiles/marvell.mk b/target/linux/mvebu/profiles/marvell.mk index e8ca9bd250..d4a1701c13 100644 --- a/target/linux/mvebu/profiles/marvell.mk +++ b/target/linux/mvebu/profiles/marvell.mk @@ -7,11 +7,7 @@ define Profile/370-DB NAME:=Marvell Armada 370 DB (DB-88F6710-BP-DDR3) - PACKAGES:= \ - kmod-mmc kmod-mvsdio kmod-usb2 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada + PACKAGES:= endef define Profile/370-DB/Description @@ -22,11 +18,7 @@ $(eval $(call Profile,370-DB)) define Profile/370-RD NAME:=Marvell Armada 370 RD (RD-88F6710-A1) - PACKAGES:= \ - kmod-mmc kmod-mvsdio kmod-usb2 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada + PACKAGES:= endef define Profile/370-RD/Description @@ -37,10 +29,7 @@ $(eval $(call Profile,370-RD)) define Profile/385-RD NAME:=Marvell Armada 385 RD (RD-88F6820-AP) - PACKAGES:= \ - kmod-mmc kmod-mvsdio kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-thermal-armada + PACKAGES:= endef define Profile/385-RD/Description @@ -51,11 +40,7 @@ $(eval $(call Profile,385-RD)) define Profile/385-DB-AP NAME:=Marvell Armada 385 DB AP (DB-88F6820-AP) - PACKAGES:= \ - kmod-usb3 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-thermal-armada + PACKAGES:= endef define Profile/385-DB-AP/Description @@ -66,11 +51,7 @@ $(eval $(call Profile,385-DB-AP)) define Profile/XP-DB NAME:=Marvell Armada XP DB (DB-78460-BP) - PACKAGES:= \ - kmod-mmc kmod-mvsdio kmod-usb2 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada + PACKAGES:= endef define Profile/XP-DB/Description @@ -81,11 +62,7 @@ $(eval $(call Profile,XP-DB)) define Profile/XP-GP NAME:=Marvell Armada XP GP (DB-MV784MP-GP) - PACKAGES:= \ - kmod-mmc kmod-mvsdio kmod-usb2 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada + PACKAGES:= endef define Profile/XP-GP/Description diff --git a/target/linux/mvebu/profiles/plathome.mk b/target/linux/mvebu/profiles/plathome.mk index 63cdb71139..02503a190a 100644 --- a/target/linux/mvebu/profiles/plathome.mk +++ b/target/linux/mvebu/profiles/plathome.mk @@ -7,11 +7,7 @@ define Profile/OpenBlocks-AX-3-4 NAME:=Plat'Home OpenBlocks AX3 - PACKAGES:= \ - kmod-usb2 kmod-usb-storage \ - kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-rtc-marvell kmod-thermal-armada + PACKAGES:= endef define Profile/OpenBlocks-AX-3-4/Description diff --git a/target/linux/mvebu/profiles/solidrun.mk b/target/linux/mvebu/profiles/solidrun.mk index 5aa61e28a2..e66d2cbc7d 100644 --- a/target/linux/mvebu/profiles/solidrun.mk +++ b/target/linux/mvebu/profiles/solidrun.mk @@ -7,11 +7,7 @@ define Profile/Solidrun-Clearfog-A1 NAME:=SolidRun ClearFog A1 board - PACKAGES:= \ - kmod-usb3 kmod-usb2 kmod-usb-storage \ - kmod-of-i2c kmod-i2c-core kmod-i2c-mv64xxx \ - kmod-ata-core kmod-ata-marvell-sata \ - kmod-thermal-armada kmod-rtc-marvell + PACKAGES:= endef define Profile/Solidrun-Clearfog-A1/Description diff --git a/target/linux/sunxi/config-4.4 b/target/linux/sunxi/config-4.4 index 74b009a0a2..e97538de9c 100644 --- a/target/linux/sunxi/config-4.4 +++ b/target/linux/sunxi/config-4.4 @@ -57,11 +57,11 @@ CONFIG_AUDIT=y CONFIG_AUDIT_GENERIC=y CONFIG_AUTO_ZRELADDR=y CONFIG_AXP20X_POWER=y -CONFIG_B53=y -# CONFIG_B53_MMAP_DRIVER is not set -CONFIG_B53_PHY_DRIVER=y -CONFIG_B53_PHY_FIXUP=y -# CONFIG_B53_SRAB_DRIVER is not set +CONFIG_SWCONFIG_B53=y +# CONFIG_SWCONFIG_B53_MMAP_DRIVER is not set +CONFIG_SWCONFIG_B53_PHY_DRIVER=y +CONFIG_SWCONFIG_B53_PHY_FIXUP=y +# CONFIG_SWCONFIG_B53_SRAB_DRIVER is not set CONFIG_BACKLIGHT_CLASS_DEVICE=y CONFIG_BACKLIGHT_LCD_SUPPORT=y CONFIG_BACKLIGHT_PWM=y |