diff options
Diffstat (limited to 'target/linux/ixp4xx/patches/100-npe_driver.patch')
-rw-r--r-- | target/linux/ixp4xx/patches/100-npe_driver.patch | 4881 |
1 files changed, 4881 insertions, 0 deletions
diff --git a/target/linux/ixp4xx/patches/100-npe_driver.patch b/target/linux/ixp4xx/patches/100-npe_driver.patch new file mode 100644 index 0000000000..78fb119bcd --- /dev/null +++ b/target/linux/ixp4xx/patches/100-npe_driver.patch @@ -0,0 +1,4881 @@ +diff --git a/Documentation/networking/ixp4xx/IxNpeMicrocode.h b/Documentation/networking/ixp4xx/IxNpeMicrocode.h +new file mode 100644 +index 0000000..e5a4bd3 +Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/IxNpeMicrocode.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,143 @@ ++/* ++ * IxNpeMicrocode.h - Headerfile for compiling the Intel microcode C file ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ * ++ * ++ * compile with ++ * ++ * gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode ++ * ++ * Executing the resulting binary on your build-host creates the ++ * "NPE-[ABC].xxxxxxxx" files containing the selected microcode ++ * ++ * fetch the IxNpeMicrocode.c from the Intel Access Library. ++ * It will include this header. ++ * ++ * select Images for every NPE from the following ++ * (used C++ comments for easy uncommenting ....) ++ */ ++ ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_TSLOT_SWITCH ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS ++// #define IX_NPEDL_NPEIMAGE_NPEA_ETH_LEARN_FILTER_SPAN_FIREWALL ++// #define IX_NPEDL_NPEIMAGE_NPEA_HSS_2_PORT ++// #define IX_NPEDL_NPEIMAGE_NPEA_DMA ++// #define IX_NPEDL_NPEIMAGE_NPEA_ATM_MPHY_12_PORT ++// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_MPHY_1_PORT ++// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0_ATM_SPHY_1_PORT ++// #define IX_NPEDL_NPEIMAGE_NPEA_HSS0 ++// #define IX_NPEDL_NPEIMAGE_NPEA_WEP ++ ++ ++// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB ++//#define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEB_DMA ++// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV ++// #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS ++ #define IX_NPEDL_NPEIMAGE_NPEB_ETH_LEARN_FILTER_SPAN_FIREWALL ++ ++ ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_MASK_FIREWALL_VLAN_QOS_HDR_CONV_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_VLAN_QOS_HDR_CONV_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_MASK_FIREWALL_VLAN_QOS_EXTMIB ++// #define IX_NPEDL_NPEIMAGE_NPEC_DMA ++// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_SPAN ++// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_ETH_LEARN_FILTER_FIREWALL ++ #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_AES_CCM_ETH ++// #define IX_NPEDL_NPEIMAGE_NPEC_CRYPTO_ETH_LEARN_FILTER_SPAN_FIREWALL ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_SPAN_FIREWALL_VLAN_QOS_HDR_CONV ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL_VLAN_QOS ++// #define IX_NPEDL_NPEIMAGE_NPEC_ETH_LEARN_FILTER_SPAN_FIREWALL ++ ++ ++#include <stdio.h> ++#include <unistd.h> ++#include <stdlib.h> ++#include <netinet/in.h> ++#include <sys/types.h> ++#include <sys/stat.h> ++#include <fcntl.h> ++#include <errno.h> ++#include <endian.h> ++#include <byteswap.h> ++#include <string.h> ++ ++#if __BYTE_ORDER == __LITTLE_ENDIAN ++#define to_le32(x) (x) ++#define to_be32(x) bswap_32(x) ++#else ++#define to_be32(x) (x) ++#define to_le32(x) bswap_32(x) ++#endif ++ ++struct dl_image { ++ unsigned magic; ++ unsigned id; ++ unsigned size; ++ unsigned data[0]; ++}; ++ ++const unsigned IxNpeMicrocode_array[]; ++ ++int main(int argc, char *argv[]) ++{ ++ struct dl_image *image = (struct dl_image *)IxNpeMicrocode_array; ++ int imgsiz, i, fd, cnt; ++ const unsigned *arrayptr = IxNpeMicrocode_array; ++ const char *names[] = { "IXP425", "IXP465", "unknown" }; ++ int bigendian = 1; ++ ++ if (argc > 1) { ++ if (!strcmp(argv[1], "-le")) ++ bigendian = 0; ++ else if (!strcmp(argv[1], "-be")) ++ bigendian = 1; ++ else { ++ printf("Usage: %s <-le|-be>\n", argv[0]); ++ return EXIT_FAILURE; ++ } ++ } ++ ++ for (image = (struct dl_image *)arrayptr, cnt=0; ++ (image->id != 0xfeedf00d) && (image->magic == 0xfeedf00d); ++ image = (struct dl_image *)(arrayptr), cnt++) ++ { ++ unsigned char field[4]; ++ imgsiz = image->size + 3; ++ *(unsigned*)field = to_be32(image->id); ++ char filename[40], slnk[10]; ++ ++ sprintf(filename, "NPE-%c.%08x", (field[0] & 0xf) + 'A', ++ image->id); ++ sprintf(slnk, "NPE-%c", (field[0] & 0xf) + 'A'); ++ printf("Writing image: %s.NPE_%c Func: %2x Rev: %02x.%02x " ++ "Size: %5d to: '%s'\n", ++ names[field[0] >> 4], (field[0] & 0xf) + 'A', ++ field[1], field[2], field[3], imgsiz*4, filename); ++ fd = open(filename, O_CREAT | O_RDWR | O_TRUNC, 0644); ++ if (fd >= 0) { ++ for (i=0; i<imgsiz; i++) { ++ *(unsigned*)field = bigendian ? ++ to_be32(arrayptr[i]) : ++ to_le32(arrayptr[i]); ++ write(fd, field, sizeof(field)); ++ } ++ close(fd); ++ unlink(slnk); ++ symlink(filename, slnk); ++ } else { ++ perror(filename); ++ } ++ arrayptr += imgsiz; ++ } ++ close(fd); ++ return 0; ++} +Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/README 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,62 @@ ++Informations about the Networking Driver using the IXP4XX CPU internal NPEs ++and Queue manager. ++ ++If this driver is used, the IAL (Intel Access Library) must not be loaded. ++However, the IAL may be loaded, if this Modules are unloaded: ++ ixp4xx_npe.ko, ixp4xx_qmgr.ko ixp4xx_mac.ko ++ ++This also means that HW crypto accelleration does NOT work when using this ++driver, unless I have finished my crypto driver for NPE-C ++ ++ ++Adoption to your custom board: ++------------------------------ ++use "arch/arm/mach-ixp4xx/ixdp425-setup.c" as template: ++ ++in "static struct mac_plat_info" adopt the entry "phy_id" to your needs ++(Ask your hardware designer about the PHY id) ++ ++The order of "&mac0" and "&mac1" in the "struct platform_device" ++determines which of them becomes eth0 and eth1 ++ ++ ++The Microcode: ++--------------- ++Solution 1) ++ Configure "CONFIG_HOTPLUG" and "CONFIG_FW_LOADER" and configure ++ IXP4XX_NPE as module. ++ The default hotplug script will load the Firmware from ++ /usr/lib/hotplug/firmware/NPE-[ABC] ++ see Documentation/firmware_class/hotplug-script ++ ++ You should take care, that $ACTION is "add" and $SUBSYSTEM is "firmware" ++ to avoid unnessecary calls: ++ test $ACTION = "remove" -o $SUBSYSTEM != "firmware" && exit ++ ++Solution 2) ++ create a char-dev: "mknod /dev/misc/npe c 10 184" ++ cat the Microcode into it: ++ cat /usr/lib/hotplug/firmware/NPE-* > /dev/misc/npe ++ This also works if the driver is linked to the kernel ++ ++ Having a mix of both (e.g. solution 1 for NPE-B and solution 2 for NPE-C) ++ is perfectly ok and works. ++ ++ The state of the NPEs can be seen and changed at: ++ /sys/bus/platform/devices/ixp4xx_npe.X/state ++ ++ ++Obtaining the Microcode: ++------------------------ ++1) IxNpeMicrocode.h in this directory: ++ Download IPL_IXP400NPELIBRARYWITHCRYPTO-2_1.ZIP from Intel ++ It unpacks the Microcode IxNpeMicrocode.c ++ Read the Licence ! ++ Compile it with "gcc -Wall IxNpeMicrocode.c -o IxNpeMicrocode" on your host. ++ The resulting images can be moved to "/usr/lib/hotplug/firmware" ++ ++2) mc_grab.c in this directory: ++ Compile and execute it either on the host or on the target ++ to grab the microcode from a binary image like the RedBoot bootloader. ++ ++ +Index: linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/Documentation/networking/ixp4xx/mc_grab.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,97 @@ ++/* ++ * mc_grab.c - grabs IXP4XX microcode from a binary datastream ++ * e.g. The redboot bootloader.... ++ * ++ * usage: mc_grab 1010200 2010200 < /dev/mtd/0 > /dev/misc/npe ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++ ++#include <stdlib.h> ++#include <stdio.h> ++#include <unistd.h> ++#include <netinet/in.h> ++#include <sys/types.h> ++#include <sys/stat.h> ++#include <fcntl.h> ++#include <errno.h> ++#include <string.h> ++ ++#define MAX_IMG 6 ++ ++static void print_mc_info(unsigned id, int siz) ++{ ++ unsigned char buf[sizeof(unsigned)]; ++ *(unsigned*)buf = id; ++ unsigned idx; ++ const char *names[] = { "IXP425", "IXP465", "unknown" }; ++ ++ idx = (buf[0] >> 4) < 2 ? (buf[0] >> 4) : 2; ++ ++ fprintf(stderr, "Device: %s:NPE_%c Func: %2x Rev: %02x.%02x " ++ "Size: %5d bytes ID:%08x\n", names[idx], (buf[0] & 0xf)+'A', ++ buf[1], buf[2], buf[3], siz*4, ntohl(id)); ++} ++ ++int main(int argc, char *argv[]) ++{ ++ int i,j; ++ unsigned char buf[sizeof(unsigned)]; ++ unsigned magic = htonl(0xfeedf00d); ++ unsigned id, my_ids[MAX_IMG+1], siz, sizbe; ++ int ret=1, verbose=0; ++ ++ for (i=0, j=0; i<argc-1 && j<MAX_IMG; i++) { ++ if (!strcmp(argv[i+1], "-v")) ++ verbose = 1; ++ else ++ my_ids[j++] = htonl(strtoul(argv[i+1], NULL, 16)); ++ } ++ my_ids[j] = 0; ++ if (my_ids[0] == 0 && !verbose) { ++ fprintf(stderr, "Usage: %s <-v> [ID1] [ID2] [IDn]\n", argv[0]); ++ return 1; ++ } ++ ++ while ((ret=read(0, buf, sizeof(unsigned))) == sizeof(unsigned)) { ++ if (*(unsigned*)buf != magic) ++ continue; ++ if ((ret=read(0, buf, sizeof(unsigned))) != sizeof(unsigned) ) ++ break; ++ id = *(unsigned*)buf; ++ ++ if (read(0, buf, sizeof(siz)) != sizeof(siz) ) ++ break; ++ sizbe = *(unsigned*)buf; ++ siz = ntohl(sizbe); ++ ++ if (verbose) ++ print_mc_info(id, siz); ++ ++ for(i=0; my_ids[i]; i++) ++ if (id == my_ids[i]) ++ break; ++ if (!my_ids[i]) ++ continue; ++ ++ if (!verbose) ++ print_mc_info(id, siz); ++ ++ write(1, &magic, sizeof(magic)); ++ write(1, &id, sizeof(id)); ++ write(1, &sizbe, sizeof(sizbe)); ++ for (i=0; i<siz; i++) { ++ if (read(0, buf, sizeof(unsigned)) != sizeof(unsigned)) ++ break; ++ write(1, buf, sizeof(unsigned)); ++ } ++ if (i != siz) ++ break; ++ } ++ if (ret) ++ fprintf(stderr, "Error reading Microcode\n"); ++ return ret; ++} +Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c +=================================================================== +--- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/common.c 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/common.c 2007-02-21 02:24:35.000000000 -0800 +@@ -357,6 +357,90 @@ + &ixp46x_i2c_controller + }; + ++static struct npe_plat_data npea = { ++ .name = "NPE-A", ++ .data_size = 0x800, ++ .inst_size = 0x1000, ++ .id = 0, ++}; ++ ++static struct npe_plat_data npeb = { ++ .name = "NPE-B", ++ .data_size = 0x800, ++ .inst_size = 0x800, ++ .id = 1, ++}; ++ ++static struct npe_plat_data npec = { ++ .name = "NPE-C", ++ .data_size = 0x800, ++ .inst_size = 0x800, ++ .id = 2, ++}; ++ ++static struct resource res_npea = { ++ .start = IXP4XX_NPEA_BASE_PHYS, ++ .end = IXP4XX_NPEA_BASE_PHYS + 0xfff, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct resource res_npeb = { ++ .start = IXP4XX_NPEB_BASE_PHYS, ++ .end = IXP4XX_NPEB_BASE_PHYS + 0xfff, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct resource res_npec = { ++ .start = IXP4XX_NPEC_BASE_PHYS, ++ .end = IXP4XX_NPEC_BASE_PHYS + 0xfff, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct platform_device dev_npea = { ++ .name = "ixp4xx_npe", ++ .id = 0, ++ .dev.platform_data = &npea, ++ .num_resources = 1, ++ .resource = &res_npea, ++}; ++ ++static struct platform_device dev_npeb = { ++ .name = "ixp4xx_npe", ++ .id = 1, ++ .dev.platform_data = &npeb, ++ .num_resources = 1, ++ .resource = &res_npeb, ++}; ++ ++static struct platform_device dev_npec = { ++ .name = "ixp4xx_npe", ++ .id = 2, ++ .dev.platform_data = &npec, ++ .num_resources = 1, ++ .resource = &res_npec, ++}; ++ ++/* QMGR */ ++static struct resource res_qmgr[] = { ++{ ++ .start = IXP4XX_QMGR_BASE_PHYS, ++ .end = IXP4XX_QMGR_BASE_PHYS + IXP4XX_QMGR_REGION_SIZE -1, ++ .flags = IORESOURCE_MEM, ++}, { ++ .start = IRQ_IXP4XX_QM1, ++ .flags = IORESOURCE_IRQ, ++} }; ++ ++static struct platform_device qmgr = { ++ .name = "ixp4xx_qmgr", ++ .id = 0, ++ .dev = { ++ .coherent_dma_mask = DMA_32BIT_MASK, ++ }, ++ .num_resources = ARRAY_SIZE(res_qmgr), ++ .resource = res_qmgr, ++}; ++ + unsigned long ixp4xx_exp_bus_size; + EXPORT_SYMBOL(ixp4xx_exp_bus_size); + +@@ -378,8 +462,19 @@ + break; + } + } ++ npeb.inst_size = 0x1000; ++ npec.inst_size = 0x1000; + } + ++ platform_device_register(&qmgr); ++ ++ if (ix_fuse() & IX_FUSE_NPEA) ++ platform_device_register(&dev_npea); ++ if (ix_fuse() & IX_FUSE_NPEB) ++ platform_device_register(&dev_npeb); ++ if (ix_fuse() & IX_FUSE_NPEC) ++ platform_device_register(&dev_npec); ++ + printk("IXP4xx: Using %luMiB expansion bus window size\n", + ixp4xx_exp_bus_size >> 20); + } +Index: linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c +=================================================================== +--- linux-2.6.21-rc1-arm.orig/arch/arm/mach-ixp4xx/ixdp425-setup.c 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/arch/arm/mach-ixp4xx/ixdp425-setup.c 2007-02-21 02:24:35.000000000 -0800 +@@ -101,10 +101,59 @@ + .resource = ixdp425_uart_resources + }; + ++/* MACs */ ++static struct resource res_mac0 = { ++ .start = IXP4XX_EthB_BASE_PHYS, ++ .end = IXP4XX_EthB_BASE_PHYS + 0x1ff, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct resource res_mac1 = { ++ .start = IXP4XX_EthC_BASE_PHYS, ++ .end = IXP4XX_EthC_BASE_PHYS + 0x1ff, ++ .flags = IORESOURCE_MEM, ++}; ++ ++static struct mac_plat_info plat_mac0 = { ++ .npe_id = 1, ++ .phy_id = 0, ++ .eth_id = 0, ++ .rxq_id = 27, ++ .txq_id = 24, ++ .rxdoneq_id = 4, ++}; ++ ++static struct mac_plat_info plat_mac1 = { ++ .npe_id = 2, ++ .phy_id = 1, ++ .eth_id = 1, ++ .rxq_id = 28, ++ .txq_id = 25, ++ .rxdoneq_id = 5, ++}; ++ ++static struct platform_device mac0 = { ++ .name = "ixp4xx_mac", ++ .id = 0, ++ .dev.platform_data = &plat_mac0, ++ .num_resources = 1, ++ .resource = &res_mac0, ++}; ++ ++static struct platform_device mac1 = { ++ .name = "ixp4xx_mac", ++ .id = 1, ++ .dev.platform_data = &plat_mac1, ++ .num_resources = 1, ++ .resource = &res_mac1, ++}; ++ + static struct platform_device *ixdp425_devices[] __initdata = { + &ixdp425_i2c_controller, + &ixdp425_flash, +- &ixdp425_uart ++ &ixdp425_uart, ++ &mac0, ++ &mac1, + }; + + static void __init ixdp425_init(void) +Index: linux-2.6.21-rc1-arm/drivers/net/Kconfig +=================================================================== +--- linux-2.6.21-rc1-arm.orig/drivers/net/Kconfig 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/drivers/net/Kconfig 2007-02-21 02:24:35.000000000 -0800 +@@ -201,6 +201,8 @@ + + source "drivers/net/arm/Kconfig" + ++source "drivers/net/ixp4xx/Kconfig" ++ + config MACE + tristate "MACE (Power Mac ethernet) support" + depends on NET_ETHERNET && PPC_PMAC && PPC32 +Index: linux-2.6.21-rc1-arm/drivers/net/Makefile +=================================================================== +--- linux-2.6.21-rc1-arm.orig/drivers/net/Makefile 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/drivers/net/Makefile 2007-02-21 02:24:35.000000000 -0800 +@@ -212,6 +212,7 @@ + obj-$(CONFIG_IRDA) += irda/ + obj-$(CONFIG_ETRAX_ETHERNET) += cris/ + obj-$(CONFIG_ENP2611_MSF_NET) += ixp2000/ ++obj-$(CONFIG_IXP4XX_NPE) += ixp4xx/ + + obj-$(CONFIG_NETCONSOLE) += netconsole.o + +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Kconfig 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,48 @@ ++config IXP4XX_QMGR ++ tristate "IXP4xx Queue Manager support" ++ depends on ARCH_IXP4XX ++ depends on NET_ETHERNET ++ help ++ The IXP4XX Queue manager is a configurable hardware ringbuffer. ++ It is used by the NPEs to exchange data from and to the CPU. ++ You can either use this OR the Intel Access Library (IAL) ++ ++config IXP4XX_NPE ++ tristate "IXP4xx NPE support" ++ depends on ARCH_IXP4XX ++ depends on NET_ETHERNET ++ help ++ The IXP4XX NPE driver supports the 3 CPU co-processors called ++ "Network Processing Engines" (NPE). It adds support fo downloading ++ the Microcode (firmware) via Hotplug or character-special-device. ++ More about this at: Documentation/networking/ixp4xx/README. ++ You can either use this OR the Intel Access Library (IAL) ++ ++config IXP4XX_FW_LOAD ++ bool "Use Firmware hotplug for Microcode download" ++ depends on IXP4XX_NPE ++ select HOTPLUG ++ select FW_LOADER ++ help ++ The default hotplug script will load the Firmware from ++ /usr/lib/hotplug/firmware/NPE-[ABC] ++ see Documentation/firmware_class/hotplug-script ++ ++config IXP4XX_MAC ++ tristate "IXP4xx MAC support" ++ depends on IXP4XX_NPE ++ depends on IXP4XX_QMGR ++ depends on NET_ETHERNET ++ select MII ++ help ++ The IXP4XX MAC driver supports the MACs on the IXP4XX CPUs. ++ There are 2 on ixp425 and up to 5 on ixdp465. ++ You can either use this OR the Intel Access Library (IAL) ++ ++config IXP4XX_CRYPTO ++ tristate "IXP4xx crypto support" ++ depends on IXP4XX_NPE ++ depends on IXP4XX_QMGR ++ help ++ This driver is a generic NPE-crypto access layer. ++ You need additional code in OCF for example. +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/Makefile 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,7 @@ ++obj-$(CONFIG_IXP4XX_QMGR) += ixp4xx_qmgr.o ++obj-$(CONFIG_IXP4XX_NPE) += ixp4xx_npe.o ++obj-$(CONFIG_IXP4XX_MAC) += ixp4xx_mac.o ++obj-$(CONFIG_IXP4XX_CRYPTO) += ixp4xx_crypto.o ++ ++ixp4xx_npe-objs := ucode_dl.o npe_mh.o npe.o ++ixp4xx_mac-objs := mac_driver.o phy.o +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_crypto.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,851 @@ ++/* ++ * ixp4xx_crypto.c - interface to the HW crypto ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/ixp_qmgr.h> ++#include <linux/ixp_npe.h> ++#include <linux/dma-mapping.h> ++#include <linux/dmapool.h> ++#include <linux/device.h> ++#include <linux/delay.h> ++#include <linux/slab.h> ++#include <linux/kernel.h> ++#include <linux/ixp_crypto.h> ++ ++#define SEND_QID 29 ++#define RECV_QID 30 ++ ++#define NPE_ID 2 /* NPE C */ ++ ++#define QUEUE_SIZE 64 ++#define MY_VERSION "0.0.1" ++ ++/* local head for all sa_ctx */ ++static struct ix_sa_master sa_master; ++ ++static const struct ix_hash_algo _hash_algos[] = { ++{ ++ .name = "MD5", ++ .cfgword = 0xAA010004, ++ .digest_len = 16, ++ .icv = "\x01\x23\x45\x67\x89\xAB\xCD\xEF" ++ "\xFE\xDC\xBA\x98\x76\x54\x32\x10", ++ .type = HASH_TYPE_MD5, ++},{ ++ .name = "SHA1", ++ .cfgword = 0x00000005, ++ .digest_len = 20, ++ .icv = "\x67\x45\x23\x01\xEF\xCD\xAB\x89\x98\xBA" ++ "\xDC\xFE\x10\x32\x54\x76\xC3\xD2\xE1\xF0", ++ .type = HASH_TYPE_SHA1, ++#if 0 ++},{ ++ .name = "CBC MAC", ++ .digest_len = 64, ++ .aad_len = 48, ++ .type = HASH_TYPE_CBCMAC, ++#endif ++} }; ++ ++static const struct ix_cipher_algo _cipher_algos[] = { ++{ ++ .name = "DES ECB", ++ .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_ECB | KEYLEN_192, ++ .cfgword_dec = CIPH_DECR | MOD_DES | MOD_ECB | KEYLEN_192, ++ .block_len = 8, ++ .type = CIPHER_TYPE_DES, ++ .mode = CIPHER_MODE_ECB, ++},{ ++ .name = "DES CBC", ++ .cfgword_enc = CIPH_ENCR | MOD_DES | MOD_CBC_ENC | KEYLEN_192, ++ .cfgword_dec = CIPH_DECR | MOD_DES | MOD_CBC_DEC | KEYLEN_192, ++ .iv_len = 8, ++ .block_len = 8, ++ .type = CIPHER_TYPE_DES, ++ .mode = CIPHER_MODE_CBC, ++},{ ++ .name = "3DES ECB", ++ .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_ECB | KEYLEN_192, ++ .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_ECB | KEYLEN_192, ++ .block_len = 8, ++ .type = CIPHER_TYPE_3DES, ++ .mode = CIPHER_MODE_ECB, ++},{ ++ .name = "3DES CBC", ++ .cfgword_enc = CIPH_ENCR | MOD_TDEA3 | MOD_CBC_ENC | KEYLEN_192, ++ .cfgword_dec = CIPH_DECR | MOD_TDEA3 | MOD_CBC_DEC | KEYLEN_192, ++ .iv_len = 8, ++ .block_len = 8, ++ .type = CIPHER_TYPE_3DES, ++ .mode = CIPHER_MODE_CBC, ++},{ ++ .name = "AES ECB", ++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_ECB, ++ .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_ECB, ++ .block_len = 16, ++ .type = CIPHER_TYPE_AES, ++ .mode = CIPHER_MODE_ECB, ++},{ ++ .name = "AES CBC", ++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CBC_ENC, ++ .cfgword_dec = CIPH_DECR | ALGO_AES | MOD_CBC_DEC, ++ .block_len = 16, ++ .iv_len = 16, ++ .type = CIPHER_TYPE_AES, ++ .mode = CIPHER_MODE_CBC, ++},{ ++ .name = "AES CTR", ++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CTR, ++ .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CTR, ++ .block_len = 16, ++ .iv_len = 16, ++ .type = CIPHER_TYPE_AES, ++ .mode = CIPHER_MODE_CTR, ++#if 0 ++},{ ++ .name = "AES CCM", ++ .cfgword_enc = CIPH_ENCR | ALGO_AES | MOD_CCM_ENC, ++ .cfgword_dec = CIPH_ENCR | ALGO_AES | MOD_CCM_DEC, ++ .block_len = 16, ++ .iv_len = 16, ++ .type = CIPHER_TYPE_AES, ++ .mode = CIPHER_MODE_CCM, ++#endif ++} }; ++ ++const struct ix_hash_algo *ix_hash_by_id(int type) ++{ ++ int i; ++ ++ for(i=0; i<ARRAY_SIZE(_hash_algos); i++) { ++ if (_hash_algos[i].type == type) ++ return _hash_algos + i; ++ } ++ return NULL; ++} ++ ++const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode) ++{ ++ int i; ++ ++ for(i=0; i<ARRAY_SIZE(_cipher_algos); i++) { ++ if (_cipher_algos[i].type==type && _cipher_algos[i].mode==mode) ++ return _cipher_algos + i; ++ } ++ return NULL; ++} ++ ++static void irqcb_recv(struct qm_queue *queue); ++ ++static int init_sa_master(struct ix_sa_master *master) ++{ ++ struct npe_info *npe; ++ int ret = -ENODEV; ++ ++ if (! (ix_fuse() & (IX_FUSE_HASH | IX_FUSE_AES | IX_FUSE_DES))) { ++ printk(KERN_ERR "ixp_crypto: No HW crypto available\n"); ++ return ret; ++ } ++ memset(master, 0, sizeof(struct ix_sa_master)); ++ master->npe_dev = get_npe_by_id(NPE_ID); ++ if (! master->npe_dev) ++ goto err; ++ ++ npe = dev_get_drvdata(master->npe_dev); ++ ++ if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) { ++ switch (npe->img_info[1]) { ++ case 4: ++ printk(KERN_INFO "Crypto AES avaialable\n"); ++ break; ++ case 5: ++ printk(KERN_INFO "Crypto AES and CCM avaialable\n"); ++ break; ++ default: ++ printk(KERN_WARNING "Current microcode for %s has no" ++ " crypto capabilities\n", npe->plat->name); ++ break; ++ } ++ } ++ rwlock_init(&master->lock); ++ master->dmapool = dma_pool_create("ixp4xx_crypto", master->npe_dev, ++ sizeof(struct npe_crypt_cont), 32, 0); ++ if (!master->dmapool) { ++ ret = -ENOMEM; ++ goto err; ++ } ++ master->sendq = request_queue(SEND_QID, QUEUE_SIZE); ++ if (IS_ERR(master->sendq)) { ++ printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n", ++ SEND_QID); ++ ret = PTR_ERR(master->sendq); ++ goto err; ++ } ++ master->recvq = request_queue(RECV_QID, QUEUE_SIZE); ++ if (IS_ERR(master->recvq)) { ++ printk(KERN_ERR "ixp4xx_crypto: Error requesting Q: %d\n", ++ RECV_QID); ++ ret = PTR_ERR(master->recvq); ++ release_queue(master->sendq); ++ goto err; ++ } ++ ++ master->recvq->irq_cb = irqcb_recv; ++ queue_set_watermarks(master->recvq, 0, 0); ++ queue_set_irq_src(master->recvq, Q_IRQ_ID_NOT_E); ++ queue_enable_irq(master->recvq); ++ printk(KERN_INFO "ixp4xx_crypto " MY_VERSION " registered successfully\n"); ++ ++ return 0; ++err: ++ if (master->dmapool) ++ dma_pool_destroy(master->dmapool); ++ if (! master->npe_dev) ++ put_device(master->npe_dev); ++ return ret; ++ ++} ++ ++static void release_sa_master(struct ix_sa_master *master) ++{ ++ struct npe_crypt_cont *cont; ++ unsigned long flags; ++ ++ write_lock_irqsave(&master->lock, flags); ++ while (master->pool) { ++ cont = master->pool; ++ master->pool = cont->next; ++ dma_pool_free(master->dmapool, cont, cont->phys); ++ master->pool_size--; ++ } ++ write_unlock_irqrestore(&master->lock, flags); ++ if (master->pool_size) { ++ printk(KERN_ERR "ixp4xx_crypto: %d items lost from DMA pool\n", ++ master->pool_size); ++ } ++ ++ dma_pool_destroy(master->dmapool); ++ release_queue(master->sendq); ++ release_queue(master->recvq); ++ return_npe_dev(master->npe_dev); ++} ++ ++static struct npe_crypt_cont *ix_sa_get_cont(struct ix_sa_master *master) ++{ ++ unsigned long flags; ++ struct npe_crypt_cont *cont; ++ dma_addr_t handle; ++ ++ write_lock_irqsave(&master->lock, flags); ++ if (!master->pool) { ++ cont = dma_pool_alloc(master->dmapool, GFP_ATOMIC, &handle); ++ if (cont) { ++ master->pool_size++; ++ cont->phys = handle; ++ cont->virt = cont; ++ } ++ } else { ++ cont = master->pool; ++ master->pool = cont->next; ++ } ++ write_unlock_irqrestore(&master->lock, flags); ++ return cont; ++} ++ ++static void ++ix_sa_return_cont(struct ix_sa_master *master,struct npe_crypt_cont *cont) ++{ ++ unsigned long flags; ++ ++ write_lock_irqsave(&master->lock, flags); ++ cont->next = master->pool; ++ master->pool = cont; ++ write_unlock_irqrestore(&master->lock, flags); ++} ++ ++static void free_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir) ++{ ++ memset(dir->npe_ctx, 0, NPE_CTX_LEN); ++ dma_pool_free(sa_ctx->master->dmapool, dir->npe_ctx, ++ dir->npe_ctx_phys); ++} ++ ++static void ix_sa_ctx_destroy(struct ix_sa_ctx *sa_ctx) ++{ ++ BUG_ON(sa_ctx->state != STATE_UNLOADING); ++ free_sa_dir(sa_ctx, &sa_ctx->encrypt); ++ free_sa_dir(sa_ctx, &sa_ctx->decrypt); ++ kfree(sa_ctx); ++ module_put(THIS_MODULE); ++} ++ ++static void recv_pack(struct qm_queue *queue, u32 phys) ++{ ++ struct ix_sa_ctx *sa_ctx; ++ struct npe_crypt_cont *cr_cont; ++ struct npe_cont *cont; ++ int failed; ++ ++ failed = phys & 0x1; ++ phys &= ~0x3; ++ ++ cr_cont = dma_to_virt(queue->dev, phys); ++ cr_cont = cr_cont->virt; ++ sa_ctx = cr_cont->ctl.crypt.sa_ctx; ++ ++ phys = npe_to_cpu32(cr_cont->ctl.crypt.src_buf); ++ if (phys) { ++ cont = dma_to_virt(queue->dev, phys); ++ cont = cont->virt; ++ } else { ++ cont = NULL; ++ } ++ if (cr_cont->ctl.crypt.oper_type == OP_PERFORM) { ++ dma_unmap_single(sa_ctx->master->npe_dev, ++ cont->eth.phys_addr, ++ cont->eth.buf_len, ++ DMA_BIDIRECTIONAL); ++ if (sa_ctx->perf_cb) ++ sa_ctx->perf_cb(sa_ctx, cont->data, failed); ++ qmgr_return_cont(dev_get_drvdata(queue->dev), cont); ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ if (atomic_dec_and_test(&sa_ctx->use_cnt)) ++ ix_sa_ctx_destroy(sa_ctx); ++ return; ++ } ++ ++ /* We are registering */ ++ switch (cr_cont->ctl.crypt.mode) { ++ case NPE_OP_HASH_GEN_ICV: ++ /* 1 out of 2 HMAC preparation operations completed */ ++ dma_unmap_single(sa_ctx->master->npe_dev, ++ cont->eth.phys_addr, ++ cont->eth.buf_len, ++ DMA_TO_DEVICE); ++ kfree(cont->data); ++ qmgr_return_cont(dev_get_drvdata(queue->dev), cont); ++ break; ++ case NPE_OP_ENC_GEN_KEY: ++ memcpy(sa_ctx->decrypt.npe_ctx + sizeof(u32), ++ sa_ctx->rev_aes->ctl.rev_aes_key + sizeof(u32), ++ sa_ctx->c_key.len); ++ /* REV AES data not needed anymore, free it */ ++ ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes); ++ sa_ctx->rev_aes = NULL; ++ break; ++ default: ++ printk(KERN_ERR "Unknown crypt-register mode: %x\n", ++ cr_cont->ctl.crypt.mode); ++ ++ } ++ if (cr_cont->ctl.crypt.oper_type == OP_REG_DONE) { ++ if (sa_ctx->state == STATE_UNREGISTERED) ++ sa_ctx->state = STATE_REGISTERED; ++ if (sa_ctx->reg_cb) ++ sa_ctx->reg_cb(sa_ctx, failed); ++ } ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ if (atomic_dec_and_test(&sa_ctx->use_cnt)) ++ ix_sa_ctx_destroy(sa_ctx); ++} ++ ++static void irqcb_recv(struct qm_queue *queue) ++{ ++ u32 phys; ++ ++ queue_ack_irq(queue); ++ while ((phys = queue_get_entry(queue))) ++ recv_pack(queue, phys); ++} ++ ++static int init_sa_dir(struct ix_sa_ctx *sa_ctx, struct ix_sa_dir *dir) ++{ ++ dir->npe_ctx = dma_pool_alloc(sa_ctx->master->dmapool, ++ sa_ctx->gfp_flags, &dir->npe_ctx_phys); ++ if (!dir->npe_ctx) { ++ return 1; ++ } ++ memset(dir->npe_ctx, 0, NPE_CTX_LEN); ++ return 0; ++} ++ ++struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags) ++{ ++ struct ix_sa_ctx *sa_ctx; ++ struct ix_sa_master *master = &sa_master; ++ struct npe_info *npe = dev_get_drvdata(master->npe_dev); ++ ++ /* first check if Microcode was downloaded into this NPE */ ++ if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) { ++ printk(KERN_ERR "%s not running\n", npe->plat->name); ++ return NULL; ++ } ++ switch (npe->img_info[1]) { ++ case 4: ++ case 5: ++ break; ++ default: ++ /* No crypto Microcode */ ++ return NULL; ++ } ++ if (!try_module_get(THIS_MODULE)) { ++ return NULL; ++ } ++ ++ sa_ctx = kzalloc(sizeof(struct ix_sa_ctx) + priv_len, flags); ++ if (!sa_ctx) { ++ goto err_put; ++ } ++ ++ sa_ctx->master = master; ++ sa_ctx->gfp_flags = flags; ++ ++ if (init_sa_dir(sa_ctx, &sa_ctx->encrypt)) ++ goto err_free; ++ if (init_sa_dir(sa_ctx, &sa_ctx->decrypt)) { ++ free_sa_dir(sa_ctx, &sa_ctx->encrypt); ++ goto err_free; ++ } ++ if (priv_len) ++ sa_ctx->priv = sa_ctx + 1; ++ ++ atomic_set(&sa_ctx->use_cnt, 1); ++ return sa_ctx; ++ ++err_free: ++ kfree(sa_ctx); ++err_put: ++ module_put(THIS_MODULE); ++ return NULL; ++} ++ ++void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx) ++{ ++ sa_ctx->state = STATE_UNLOADING; ++ if (atomic_dec_and_test(&sa_ctx->use_cnt)) ++ ix_sa_ctx_destroy(sa_ctx); ++ else ++ printk("ix_sa_ctx_free -> delayed: %p %d\n", ++ sa_ctx, atomic_read(&sa_ctx->use_cnt)); ++} ++ ++/* http://www.ietf.org/rfc/rfc2104.txt */ ++#define HMAC_IPAD_VALUE 0x36 ++#define HMAC_OPAD_VALUE 0x5C ++#define PAD_BLOCKLEN 64 ++ ++static int register_chain_var(struct ix_sa_ctx *sa_ctx, ++ unsigned char *pad, u32 target, int init_len, u32 ctx_addr, int oper) ++{ ++ struct npe_crypt_cont *cr_cont; ++ struct npe_cont *cont; ++ ++ cr_cont = ix_sa_get_cont(sa_ctx->master); ++ if (!cr_cont) ++ return -ENOMEM; ++ ++ cr_cont->ctl.crypt.sa_ctx = sa_ctx; ++ cr_cont->ctl.crypt.auth_offs = 0; ++ cr_cont->ctl.crypt.auth_len =cpu_to_npe16(PAD_BLOCKLEN); ++ cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(ctx_addr); ++ ++ cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev)); ++ if (!cont) { ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ return -ENOMEM; ++ } ++ ++ cont->data = pad; ++ cont->eth.next = 0; ++ cont->eth.buf_len = cpu_to_npe16(PAD_BLOCKLEN); ++ cont->eth.pkt_len = 0; ++ ++ cont->eth.phys_addr = cpu_to_npe32(dma_map_single( ++ sa_ctx->master->npe_dev, pad, PAD_BLOCKLEN, DMA_TO_DEVICE)); ++ ++ cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys); ++ cr_cont->ctl.crypt.oper_type = oper; ++ ++ cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(target); ++ cr_cont->ctl.crypt.mode = NPE_OP_HASH_GEN_ICV; ++ cr_cont->ctl.crypt.init_len = init_len; ++ ++ atomic_inc(&sa_ctx->use_cnt); ++ queue_put_entry(sa_ctx->master->sendq, cr_cont->phys); ++ if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */ ++ atomic_dec(&sa_ctx->use_cnt); ++ qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), ++ cont); ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ return -ENOMEM; ++ } ++ return 0; ++} ++ ++/* Return value ++ * 0 if nothing registered, ++ * 1 if something registered and ++ * < 0 on error ++ */ ++static int ix_sa_ctx_setup_auth(struct ix_sa_ctx *sa_ctx, ++ const struct ix_hash_algo *algo, int len, int oper, int encrypt) ++{ ++ unsigned char *ipad, *opad; ++ u32 itarget, otarget, ctx_addr; ++ unsigned char *cinfo; ++ int init_len, i, ret = 0; ++ struct qm_qmgr *qmgr; ++ struct ix_sa_dir *dir; ++ u32 cfgword; ++ ++ dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt; ++ cinfo = dir->npe_ctx + dir->npe_ctx_idx; ++ ++ qmgr = dev_get_drvdata(sa_ctx->master->sendq->dev); ++ ++ cinfo = dir->npe_ctx + dir->npe_ctx_idx; ++ sa_ctx->h_algo = algo; ++ ++ if (!algo) { ++ dir->npe_mode |= NPE_OP_HMAC_DISABLE; ++ return 0; ++ } ++ if (algo->type == HASH_TYPE_CBCMAC) { ++ dir->npe_mode |= NPE_OP_CCM_ENABLE | NPE_OP_HMAC_DISABLE; ++ return 0; ++ } ++ if (sa_ctx->h_key.len > 64 || sa_ctx->h_key.len < algo->digest_len) ++ return -EINVAL; ++ if (len > algo->digest_len || (len % 4)) ++ return -EINVAL; ++ if (!len) ++ len = algo->digest_len; ++ ++ sa_ctx->digest_len = len; ++ ++ /* write cfg word to cryptinfo */ ++ cfgword = algo->cfgword | ((len/4) << 8); ++ *(u32*)cinfo = cpu_to_be32(cfgword); ++ cinfo += sizeof(cfgword); ++ ++ /* write ICV to cryptinfo */ ++ memcpy(cinfo, algo->icv, algo->digest_len); ++ cinfo += algo->digest_len; ++ ++ itarget = dir->npe_ctx_phys + dir->npe_ctx_idx ++ + sizeof(algo->cfgword); ++ otarget = itarget + algo->digest_len; ++ ++ opad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA); ++ if (!opad) { ++ return -ENOMEM; ++ } ++ ipad = kzalloc(PAD_BLOCKLEN, sa_ctx->gfp_flags | GFP_DMA); ++ if (!ipad) { ++ kfree(opad); ++ return -ENOMEM; ++ } ++ memcpy(ipad, sa_ctx->h_key.key, sa_ctx->h_key.len); ++ memcpy(opad, sa_ctx->h_key.key, sa_ctx->h_key.len); ++ for (i = 0; i < PAD_BLOCKLEN; i++) { ++ ipad[i] ^= HMAC_IPAD_VALUE; ++ opad[i] ^= HMAC_OPAD_VALUE; ++ } ++ init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); ++ ctx_addr = dir->npe_ctx_phys + dir->npe_ctx_idx; ++ ++ dir->npe_ctx_idx += init_len; ++ dir->npe_mode |= NPE_OP_HASH_ENABLE; ++ ++ if (!encrypt) ++ dir->npe_mode |= NPE_OP_HASH_VERIFY; ++ ++ /* register first chainvar */ ++ ret = register_chain_var(sa_ctx, opad, otarget, ++ init_len, ctx_addr, OP_REGISTER); ++ if (ret) { ++ kfree(ipad); ++ kfree(opad); ++ return ret; ++ } ++ ++ /* register second chainvar */ ++ ret = register_chain_var(sa_ctx, ipad, itarget, ++ init_len, ctx_addr, oper); ++ if (ret) { ++ kfree(ipad); ++ return ret; ++ } ++ ++ return 1; ++} ++ ++static int gen_rev_aes_key(struct ix_sa_ctx *sa_ctx, ++ u32 keylen_cfg, int cipher_op) ++{ ++ unsigned char *cinfo; ++ struct npe_crypt_cont *cr_cont; ++ ++ keylen_cfg |= CIPH_ENCR | ALGO_AES | MOD_ECB; ++ sa_ctx->rev_aes = ix_sa_get_cont(sa_ctx->master); ++ if (!sa_ctx->rev_aes) ++ return -ENOMEM; ++ ++ cinfo = sa_ctx->rev_aes->ctl.rev_aes_key; ++ *(u32*)cinfo = cpu_to_be32(keylen_cfg); ++ cinfo += sizeof(keylen_cfg); ++ ++ memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len); ++ ++ cr_cont = ix_sa_get_cont(sa_ctx->master); ++ if (!cr_cont) { ++ ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes); ++ sa_ctx->rev_aes = NULL; ++ return -ENOMEM; ++ } ++ cr_cont->ctl.crypt.sa_ctx = sa_ctx; ++ cr_cont->ctl.crypt.oper_type = cipher_op; ++ ++ cr_cont->ctl.crypt.crypt_offs = 0; ++ cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(AES_BLOCK128); ++ cr_cont->ctl.crypt.addr.rev_aes = cpu_to_npe32( ++ sa_ctx->rev_aes->phys + sizeof(keylen_cfg)); ++ ++ cr_cont->ctl.crypt.src_buf = 0; ++ cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(sa_ctx->rev_aes->phys); ++ cr_cont->ctl.crypt.mode = NPE_OP_ENC_GEN_KEY; ++ cr_cont->ctl.crypt.init_len = sa_ctx->decrypt.npe_ctx_idx; ++ ++ atomic_inc(&sa_ctx->use_cnt); ++ queue_put_entry(sa_ctx->master->sendq, cr_cont->phys); ++ if (queue_stat(sa_ctx->master->sendq) == 2) { /* overflow */ ++ atomic_dec(&sa_ctx->use_cnt); ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ ix_sa_return_cont(sa_ctx->master, sa_ctx->rev_aes); ++ sa_ctx->rev_aes = NULL; ++ return -ENOMEM; ++ } ++ ++ return 1; ++} ++ ++/* Return value ++ * 0 if nothing registered, ++ * 1 if something registered and ++ * < 0 on error ++ */ ++static int ix_sa_ctx_setup_cipher(struct ix_sa_ctx *sa_ctx, ++ const struct ix_cipher_algo *algo, int cipher_op, int encrypt) ++{ ++ unsigned char *cinfo; ++ int keylen, init_len; ++ u32 cipher_cfg; ++ u32 keylen_cfg = 0; ++ struct ix_sa_dir *dir; ++ ++ dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt; ++ cinfo = dir->npe_ctx + dir->npe_ctx_idx; ++ ++ sa_ctx->c_algo = algo; ++ ++ if (!algo) ++ return 0; ++ ++ if (algo->type == CIPHER_TYPE_DES && sa_ctx->c_key.len != 8) ++ return -EINVAL; ++ ++ if (algo->type == CIPHER_TYPE_3DES && sa_ctx->c_key.len != 24) ++ return -EINVAL; ++ ++ keylen = 24; ++ ++ if (encrypt) { ++ cipher_cfg = algo->cfgword_enc; ++ dir->npe_mode |= NPE_OP_CRYPT_ENCRYPT; ++ } else { ++ cipher_cfg = algo->cfgword_dec; ++ } ++ if (algo->type == CIPHER_TYPE_AES) { ++ switch (sa_ctx->c_key.len) { ++ case 16: keylen_cfg = MOD_AES128 | KEYLEN_128; break; ++ case 24: keylen_cfg = MOD_AES192 | KEYLEN_192; break; ++ case 32: keylen_cfg = MOD_AES256 | KEYLEN_256; break; ++ default: return -EINVAL; ++ } ++ keylen = sa_ctx->c_key.len; ++ cipher_cfg |= keylen_cfg; ++ } ++ ++ /* write cfg word to cryptinfo */ ++ *(u32*)cinfo = cpu_to_be32(cipher_cfg); ++ cinfo += sizeof(cipher_cfg); ++ ++ /* write cipher key to cryptinfo */ ++ memcpy(cinfo, sa_ctx->c_key.key, sa_ctx->c_key.len); ++ cinfo += keylen; ++ ++ init_len = cinfo - (dir->npe_ctx + dir->npe_ctx_idx); ++ dir->npe_ctx_idx += init_len; ++ ++ dir->npe_mode |= NPE_OP_CRYPT_ENABLE; ++ ++ if (algo->type == CIPHER_TYPE_AES && !encrypt) { ++ return gen_rev_aes_key(sa_ctx, keylen_cfg, cipher_op); ++ } ++ ++ return 0; ++} ++ ++/* returns 0 on OK, <0 on error and 1 on overflow */ ++int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr, ++ int datalen, int c_offs, int c_len, int a_offs, int a_len, ++ int hmac, char *iv, int encrypt) ++{ ++ struct npe_crypt_cont *cr_cont; ++ struct npe_cont *cont; ++ u32 data_phys; ++ int ret = -ENOMEM; ++ struct ix_sa_dir *dir; ++ ++ dir = encrypt ? &sa_ctx->encrypt : &sa_ctx->decrypt; ++ ++ if (sa_ctx->state != STATE_REGISTERED) ++ return -ENOENT; ++ ++ cr_cont = ix_sa_get_cont(sa_ctx->master); ++ if (!cr_cont) ++ return ret; ++ ++ cr_cont->ctl.crypt.sa_ctx = sa_ctx; ++ cr_cont->ctl.crypt.crypto_ctx = cpu_to_npe32(dir->npe_ctx_phys); ++ cr_cont->ctl.crypt.oper_type = OP_PERFORM; ++ cr_cont->ctl.crypt.mode = dir->npe_mode; ++ cr_cont->ctl.crypt.init_len = dir->npe_ctx_idx; ++ ++ if (sa_ctx->c_algo) { ++ cr_cont->ctl.crypt.crypt_offs = cpu_to_npe16(c_offs); ++ cr_cont->ctl.crypt.crypt_len = cpu_to_npe16(c_len); ++ if (sa_ctx->c_algo->iv_len) { ++ if (!iv) { ++ ret = -EINVAL; ++ goto err_cr; ++ } ++ memcpy(cr_cont->ctl.crypt.iv, iv, ++ sa_ctx->c_algo->iv_len); ++ } ++ } ++ ++ if (sa_ctx->h_algo) { ++ /* prepare hashing */ ++ cr_cont->ctl.crypt.auth_offs = cpu_to_npe16(a_offs); ++ cr_cont->ctl.crypt.auth_len = cpu_to_npe16(a_len); ++ } ++ ++ data_phys = dma_map_single(sa_ctx->master->npe_dev, ++ data, datalen, DMA_BIDIRECTIONAL); ++ if (hmac) ++ cr_cont->ctl.crypt.addr.icv = cpu_to_npe32(data_phys + hmac); ++ ++ /* Prepare the data ptr */ ++ cont = qmgr_get_cont(dev_get_drvdata(sa_ctx->master->sendq->dev)); ++ if (!cont) { ++ goto err_unmap; ++ } ++ ++ cont->data = ptr; ++ cont->eth.next = 0; ++ cont->eth.buf_len = cpu_to_npe16(datalen); ++ cont->eth.pkt_len = 0; ++ ++ cont->eth.phys_addr = cpu_to_npe32(data_phys); ++ cr_cont->ctl.crypt.src_buf = cpu_to_npe32(cont->phys); ++ ++ atomic_inc(&sa_ctx->use_cnt); ++ queue_put_entry(sa_ctx->master->sendq, cr_cont->phys); ++ if (queue_stat(sa_ctx->master->sendq) != 2) { ++ return 0; ++ } ++ ++ /* overflow */ ++ printk("%s: Overflow\n", __FUNCTION__); ++ ret = -EAGAIN; ++ atomic_dec(&sa_ctx->use_cnt); ++ qmgr_return_cont(dev_get_drvdata(sa_ctx->master->sendq->dev), cont); ++ ++err_unmap: ++ dma_unmap_single(sa_ctx->master->npe_dev, data_phys, datalen, ++ DMA_BIDIRECTIONAL); ++err_cr: ++ ix_sa_return_cont(sa_ctx->master, cr_cont); ++ ++ return ret; ++} ++ ++int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx, ++ const struct ix_cipher_algo *cipher, ++ const struct ix_hash_algo *auth, int len) ++{ ++ int ret = 0, sum = 0; ++ int cipher_op; ++ ++ if (sa_ctx->state != STATE_UNREGISTERED) ++ return -ENOENT; ++ ++ atomic_inc(&sa_ctx->use_cnt); ++ ++ cipher_op = auth ? OP_REGISTER : OP_REG_DONE; ++ if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, OP_REGISTER, 1)) < 0) ++ goto out; ++ sum += ret; ++ if ((ret = ix_sa_ctx_setup_cipher(sa_ctx, cipher, cipher_op, 0)) < 0) ++ goto out; ++ sum += ret; ++ if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REGISTER, 1)) < 0) ++ goto out; ++ sum += ret; ++ if ((ret = ix_sa_ctx_setup_auth(sa_ctx, auth, len, OP_REG_DONE, 0)) < 0) ++ goto out; ++ sum += ret; ++ ++ /* Nothing registered ? ++ * Ok, then we are done and call the callback here. ++ */ ++ if (!sum) { ++ if (sa_ctx->state == STATE_UNREGISTERED) ++ sa_ctx->state = STATE_REGISTERED; ++ if (sa_ctx->reg_cb) ++ sa_ctx->reg_cb(sa_ctx, 0); ++ } ++out: ++ atomic_dec(&sa_ctx->use_cnt); ++ return ret; ++} ++ ++static int __init init_crypto(void) ++{ ++ return init_sa_master(&sa_master); ++} ++ ++static void __exit finish_crypto(void) ++{ ++ release_sa_master(&sa_master); ++} ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); ++ ++EXPORT_SYMBOL(ix_hash_by_id); ++EXPORT_SYMBOL(ix_cipher_by_id); ++ ++EXPORT_SYMBOL(ix_sa_ctx_new); ++EXPORT_SYMBOL(ix_sa_ctx_free); ++EXPORT_SYMBOL(ix_sa_ctx_setup_cipher_auth); ++EXPORT_SYMBOL(ix_sa_crypto_perform); ++ ++module_init(init_crypto); ++module_exit(finish_crypto); ++ +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ixp4xx_qmgr.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,474 @@ ++/* ++ * qmgr.c - reimplementation of the queue configuration interface. ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/fs.h> ++#include <linux/init.h> ++#include <linux/slab.h> ++#include <linux/dmapool.h> ++#include <linux/interrupt.h> ++#include <linux/err.h> ++#include <linux/delay.h> ++#include <asm/uaccess.h> ++#include <asm/io.h> ++ ++#include <linux/ixp_qmgr.h> ++#include <linux/ixp_npe.h> ++ ++#define IXQMGR_VERSION "IXP4XX Q Manager 0.2.1" ++ ++static struct device *qmgr_dev = NULL; ++ ++static int poll_freq = 4000; ++static int poll_enable = 0; ++static u32 timer_countup_ticks; ++ ++module_param(poll_freq, int, 0644); ++module_param(poll_enable, int, 0644); ++ ++int queue_len(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ int diff, offs; ++ u32 val; ++ ++ offs = queue->id/8 + QUE_LOW_STAT0; ++ val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id); ++ ++ diff = (val - (val >> 7)) & 0x7f; ++ if (!diff) { ++ /* diff == 0 means either empty or full, must look at STAT0 */ ++ if ((*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x04) ++ diff = queue->len; ++ } ++ return diff; ++} ++ ++static int request_pool(struct device *dev, int count) ++{ ++ int i; ++ struct npe_cont *cont; ++ struct qm_qmgr *qmgr = dev_get_drvdata(dev); ++ dma_addr_t handle; ++ ++ for (i=0; i<count; i++) { ++ cont = dma_pool_alloc(qmgr->dmapool, GFP_KERNEL, &handle); ++ if (!cont) { ++ return -ENOMEM; ++ } ++ cont->phys = handle; ++ cont->virt = cont; ++ write_lock(&qmgr->lock); ++ cont->next = qmgr->pool; ++ qmgr->pool = cont; ++ write_unlock(&qmgr->lock); ++ } ++ return 0; ++} ++ ++static int free_pool(struct device *dev, int count) ++{ ++ int i; ++ struct npe_cont *cont; ++ struct qm_qmgr *qmgr = dev_get_drvdata(dev); ++ ++ for (i=0; i<count; i++) { ++ write_lock(&qmgr->lock); ++ cont = qmgr->pool; ++ if (!cont) { ++ write_unlock(&qmgr->lock); ++ return -1; ++ } ++ qmgr->pool = cont->next; ++ write_unlock(&qmgr->lock); ++ dma_pool_free(qmgr->dmapool, cont, cont->phys); ++ } ++ return 0; ++} ++ ++static int get_free_qspace(struct qm_qmgr *qmgr, int len) ++{ ++ int words = (qmgr->res->end - qmgr->res->start + 1) / 4 - ++ IX_QMGR_SRAM_SPACE; ++ int i,q; ++ ++ for (i=0; i<words; i+=len) { ++ for (q=0; q<MAX_QUEUES; q++) { ++ struct qm_queue *qu = qmgr->queues[q]; ++ if (!qu) ++ continue; ++ if ((qu->addr + qu->len > i) && (qu->addr < i + len)) ++ break; ++ } ++ if (q == MAX_QUEUES) { ++ /* we have a free address */ ++ return i; ++ } ++ } ++ return -1; ++} ++ ++static inline int _log2(int x) ++{ ++ int r=0; ++ while(x>>=1) ++ r++; ++ return r; ++} ++ ++/* ++ * 32bit Config registers at IX_QMGR_QUECONFIG_BASE_OFFSET[Qid] ++ * 0 - 6 WRPTR Word offset to baseaddr (index 0 .. BSIZE-1) ++ * 7 -13 RDPTR '' ++ * 14 -21 BADDR baseaddr = (offset to IX_QMGR_QUEBUFFER_SPACE_OFFSET) >> 6 ++ * 22 -23 ESIZE entrySizeInWords (always 00 because entrySizeInWords==1) ++ * 24 -25 BSIZE qSizeInWords 00=16,01=32,10=64,11=128 ++ * 26 -28 NE nearly empty ++ * 29 -31 NF nearly full ++ */ ++static int conf_q_regs(struct qm_queue *queue) ++{ ++ int bsize = _log2(queue->len/16); ++ int baddr = queue->addr + IX_QMGR_QCFG_SIZE; ++ ++ /* +2, because baddr is in words and not in bytes */ ++ queue_write_cfg_reg(queue, (bsize << 24) | (baddr<<(14-6+2)) ); ++ ++ return 0; ++} ++ ++static void pmu_timer_restart(void) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ __asm__(" mcr p14,0,%0,c1,c1,0\n" /* write current counter */ ++ : : "r" (timer_countup_ticks)); ++ ++ __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */ ++ " orr r1,r1,#1; " ++ " mcr p14,0,r1,c5,c1,0; " /* clear overflow */ ++ " mcr p14,0,r1,c4,c1,0\n" /* enable interrupts */ ++ : : : "r1"); ++ ++ local_irq_restore(flags); ++} ++ ++static void pmu_timer_init(void) ++{ ++ u32 controlRegisterMask = ++ BIT(0) | /* enable counters */ ++ BIT(2); /* reset clock counter; */ ++ ++ /* ++ * Compute the number of xscale cycles needed between each ++ * PMU IRQ. This is done from the result of an OS calibration loop. ++ * ++ * For 533MHz CPU, 533000000 tick/s / 4000 times/sec = 138250 ++ * 4000 times/sec = 37 mbufs/interrupt at line rate ++ * The pmu timer is reset to -138250 = 0xfffde3f6, to trigger an IRQ ++ * when this up counter overflows. ++ * ++ * The multiplication gives a number of instructions per second. ++ * which is close to the processor frequency, and then close to the ++ * PMU clock rate. ++ * ++ * 2 is the number of instructions per loop ++ * ++ */ ++ ++ timer_countup_ticks = - ((loops_per_jiffy * HZ * 2) / poll_freq); ++ ++ /* enable the CCNT (clock count) timer from the PMU */ ++ __asm__(" mcr p14,0,%0,c0,c1,0\n" ++ : : "r" (controlRegisterMask)); ++} ++ ++static void pmu_timer_disable(void) ++{ ++ unsigned long flags; ++ ++ local_irq_save(flags); ++ ++ __asm__(" mrc p14,0,r1,c4,c1,0; " /* get int enable register */ ++ " and r1,r1,#0x1e; " ++ " mcr p14,0,r1,c4,c1,0\n" /* disable interrupts */ ++ : : : "r1"); ++ local_irq_restore(flags); ++} ++ ++void queue_set_watermarks(struct qm_queue *queue, unsigned ne, unsigned nf) ++{ ++ u32 val; ++ /* calculate the register values ++ * 0->0, 1->1, 2->2, 4->3, 8->4 16->5...*/ ++ ne = _log2(ne<<1) & 0x7; ++ nf = _log2(nf<<1) & 0x7; ++ ++ /* Mask out old watermarks */ ++ val = queue_read_cfg_reg(queue) & ~0xfc000000; ++ queue_write_cfg_reg(queue, val | (ne << 26) | (nf << 29)); ++} ++ ++int queue_set_irq_src(struct qm_queue *queue, int flag) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ u32 reg; ++ int offs, bitoffs; ++ ++ /* Q 0-7 are in REG0, 8-15 are in REG1, etc. They occupy 4 bits/Q */ ++ offs = queue->id/8 + INT0_SRC_SELREG0; ++ bitoffs = (queue->id % 8)*4; ++ ++ reg = *(qmgr->addr + offs) & ~(0xf << bitoffs); ++ *(qmgr->addr + offs) = reg | (flag << bitoffs); ++ ++ return 0; ++} ++ ++static irqreturn_t irq_qm1(int irq, void *dev_id) ++{ ++ struct qm_qmgr *qmgr = dev_id; ++ int offs, reg; ++ struct qm_queue *queue; ++ ++ if (poll_enable) ++ pmu_timer_restart(); ++ ++ reg = *(qmgr->addr + QUE_INT_REG0); ++ while(reg) { ++ /* ++ * count leading zeros. "offs" gets ++ * the amount of leading 0 in "reg" ++ */ ++ asm ("clz %0, %1;" : "=r"(offs) : "r"(reg)); ++ offs = 31 - offs; ++ reg &= ~(1 << offs); ++ queue = qmgr->queues[offs]; ++ if (likely(queue)) { ++ if (likely(queue->irq_cb)) { ++ queue->irq_cb(queue); ++ } else { ++ printk(KERN_ERR "Missing callback for Q %d\n", ++ offs); ++ } ++ } else { ++ printk(KERN_ERR "IRQ for unregistered Q %d\n", offs); ++ } ++ } ++ return IRQ_HANDLED; ++} ++ ++struct qm_queue *request_queue(int qid, int len) ++{ ++ int ram; ++ struct qm_qmgr *qmgr; ++ struct qm_queue *queue; ++ ++ if (!qmgr_dev) ++ return ERR_PTR(-ENODEV); ++ ++ if ((qid < 0) || (qid > MAX_QUEUES)) ++ return ERR_PTR(-ERANGE); ++ ++ switch (len) { ++ case 16: ++ case 32: ++ case 64: ++ case 128: break; ++ default : return ERR_PTR(-EINVAL); ++ } ++ ++ qmgr = dev_get_drvdata(qmgr_dev); ++ ++ if (qmgr->queues[qid]) { ++ /* not an error, just in use already */ ++ return NULL; ++ } ++ if ((ram = get_free_qspace(qmgr, len)) < 0) { ++ printk(KERN_ERR "No free SRAM space for this queue\n"); ++ return ERR_PTR(-ENOMEM); ++ } ++ if (!(queue = kzalloc(sizeof(struct qm_queue), GFP_KERNEL))) ++ return ERR_PTR(-ENOMEM); ++ ++ if (!try_module_get(THIS_MODULE)) { ++ kfree(queue); ++ return ERR_PTR(-ENODEV); ++ } ++ ++ queue->addr = ram; ++ queue->len = len; ++ queue->id = qid; ++ queue->dev = get_device(qmgr_dev); ++ queue->acc_reg = qmgr->addr + (4 * qid); ++ qmgr->queues[qid] = queue; ++ if (request_pool(qmgr_dev, len)) { ++ printk(KERN_ERR "Failed to request DMA pool of Q %d\n", qid); ++ } ++ ++ conf_q_regs(queue); ++ return queue; ++} ++ ++void release_queue(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ ++ BUG_ON(qmgr->queues[queue->id] != queue); ++ qmgr->queues[queue->id] = NULL; ++ ++ if (free_pool(queue->dev, queue->len)) { ++ printk(KERN_ERR "Failed to release DMA pool of Q %d\n", ++ queue->id); ++ } ++ queue_disable_irq(queue); ++ queue_write_cfg_reg(queue, 0); ++ ++ module_put(THIS_MODULE); ++ put_device(queue->dev); ++ kfree(queue); ++} ++ ++ ++ ++ ++static int qmgr_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct qm_qmgr *qmgr; ++ int size, ret=0, i; ++ ++ if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) ++ return -EIO; ++ ++ if ((i = platform_get_irq(pdev, 0)) < 0) ++ return -EIO; ++ ++ if (!(qmgr = kzalloc(sizeof(struct qm_qmgr), GFP_KERNEL))) ++ return -ENOMEM; ++ ++ qmgr->irq = i; ++ size = res->end - res->start +1; ++ qmgr->res = request_mem_region(res->start, size, "ixp_qmgr"); ++ if (!qmgr->res) { ++ ret = -EBUSY; ++ goto out_free; ++ } ++ ++ qmgr->addr = ioremap(res->start, size); ++ if (!qmgr->addr) { ++ ret = -ENOMEM; ++ goto out_rel; ++ } ++ ++ /* Reset Q registers */ ++ for (i=0; i<4; i++) ++ *(qmgr->addr + QUE_LOW_STAT0 +i) = 0x33333333; ++ for (i=0; i<10; i++) ++ *(qmgr->addr + QUE_UO_STAT0 +i) = 0x0; ++ for (i=0; i<4; i++) ++ *(qmgr->addr + INT0_SRC_SELREG0 +i) = 0x0; ++ for (i=0; i<2; i++) { ++ *(qmgr->addr + QUE_IE_REG0 +i) = 0x00; ++ *(qmgr->addr + QUE_INT_REG0 +i) = 0xffffffff; ++ } ++ for (i=0; i<64; i++) { ++ *(qmgr->addr + IX_QMGR_QCFG_BASE + i) = 0x0; ++ } ++ ++ if (poll_enable) { ++ pmu_timer_init(); ++ qmgr->irq = IRQ_IXP4XX_XSCALE_PMU; ++ } ++ ret = request_irq(qmgr->irq, irq_qm1, SA_SHIRQ | SA_INTERRUPT, ++ "qmgr", qmgr); ++ if (ret) { ++ printk(KERN_ERR "Failed to request IRQ(%d)\n", qmgr->irq); ++ ret = -EIO; ++ goto out_rel; ++ } ++ if (poll_enable) ++ pmu_timer_restart(); ++ ++ rwlock_init(&qmgr->lock); ++ qmgr->dmapool = dma_pool_create("qmgr", &pdev->dev, ++ sizeof(struct npe_cont), 32, 0); ++ platform_set_drvdata(pdev, qmgr); ++ ++ qmgr_dev = &pdev->dev; ++ ++ printk(KERN_INFO IXQMGR_VERSION " initialized.\n"); ++ ++ return 0; ++ ++out_rel: ++ release_resource(qmgr->res); ++out_free: ++ kfree(qmgr); ++ return ret; ++} ++ ++static int qmgr_remove(struct platform_device *pdev) ++{ ++ struct qm_qmgr *qmgr = platform_get_drvdata(pdev); ++ int i; ++ ++ for (i=0; i<MAX_QUEUES; i++) { ++ if (qmgr->queues[i]) { ++ printk(KERN_ERR "WARNING Unreleased Q: %d\n", i); ++ release_queue(qmgr->queues[i]); ++ } ++ } ++ ++ if (poll_enable) ++ pmu_timer_disable(); ++ ++ synchronize_irq (qmgr->irq); ++ free_irq(qmgr->irq, qmgr); ++ ++ dma_pool_destroy(qmgr->dmapool); ++ iounmap(qmgr->addr); ++ release_resource(qmgr->res); ++ platform_set_drvdata(pdev, NULL); ++ qmgr_dev = NULL; ++ kfree(qmgr); ++ return 0; ++} ++ ++static struct platform_driver ixp4xx_qmgr = { ++ .driver.name = "ixp4xx_qmgr", ++ .probe = qmgr_probe, ++ .remove = qmgr_remove, ++}; ++ ++ ++static int __init init_qmgr(void) ++{ ++ return platform_driver_register(&ixp4xx_qmgr); ++} ++ ++static void __exit finish_qmgr(void) ++{ ++ platform_driver_unregister(&ixp4xx_qmgr); ++} ++ ++module_init(init_qmgr); ++module_exit(finish_qmgr); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); ++ ++EXPORT_SYMBOL(request_queue); ++EXPORT_SYMBOL(release_queue); ++EXPORT_SYMBOL(queue_set_irq_src); ++EXPORT_SYMBOL(queue_set_watermarks); ++EXPORT_SYMBOL(queue_len); +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,275 @@ ++/* ++ * Copyright (C) 2002-2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/resource.h> ++#include <linux/netdevice.h> ++#include <linux/io.h> ++#include <linux/mii.h> ++#include <linux/workqueue.h> ++#include <asm/hardware.h> ++#include <linux/ixp_qmgr.h> ++ ++/* 32 bit offsets to be added to u32 *pointers */ ++#define MAC_TX_CNTRL1 0x00 // 0x000 ++#define MAC_TX_CNTRL2 0x01 // 0x004 ++#define MAC_RX_CNTRL1 0x04 // 0x010 ++#define MAC_RX_CNTRL2 0x05 // 0x014 ++#define MAC_RANDOM_SEED 0x08 // 0x020 ++#define MAC_THRESH_P_EMPTY 0x0c // 0x030 ++#define MAC_THRESH_P_FULL 0x0e // 0x038 ++#define MAC_BUF_SIZE_TX 0x10 // 0x040 ++#define MAC_TX_DEFER 0x14 // 0x050 ++#define MAC_RX_DEFER 0x15 // 0x054 ++#define MAC_TX_TWO_DEFER_1 0x18 // 0x060 ++#define MAC_TX_TWO_DEFER_2 0x19 // 0x064 ++#define MAC_SLOT_TIME 0x1c // 0x070 ++#define MAC_MDIO_CMD 0x20 // 0x080 4 registers 0x20 - 0x23 ++#define MAC_MDIO_STS 0x24 // 0x090 4 registers 0x24 - 0x27 ++#define MAC_ADDR_MASK 0x28 // 0x0A0 6 registers 0x28 - 0x2d ++#define MAC_ADDR 0x30 // 0x0C0 6 registers 0x30 - 0x35 ++#define MAC_INT_CLK_THRESH 0x38 // 0x0E0 1 register ++#define MAC_UNI_ADDR 0x3c // 0x0F0 6 registers 0x3c - 0x41 ++#define MAC_CORE_CNTRL 0x7f // 0x1fC ++ ++/* TX Control Register 1*/ ++ ++#define TX_CNTRL1_TX_EN BIT(0) ++#define TX_CNTRL1_DUPLEX BIT(1) ++#define TX_CNTRL1_RETRY BIT(2) ++#define TX_CNTRL1_PAD_EN BIT(3) ++#define TX_CNTRL1_FCS_EN BIT(4) ++#define TX_CNTRL1_2DEFER BIT(5) ++#define TX_CNTRL1_RMII BIT(6) ++ ++/* TX Control Register 2 */ ++#define TX_CNTRL2_RETRIES_MASK 0xf ++ ++/* RX Control Register 1 */ ++#define RX_CNTRL1_RX_EN BIT(0) ++#define RX_CNTRL1_PADSTRIP_EN BIT(1) ++#define RX_CNTRL1_CRC_EN BIT(2) ++#define RX_CNTRL1_PAUSE_EN BIT(3) ++#define RX_CNTRL1_LOOP_EN BIT(4) ++#define RX_CNTRL1_ADDR_FLTR_EN BIT(5) ++#define RX_CNTRL1_RX_RUNT_EN BIT(6) ++#define RX_CNTRL1_BCAST_DIS BIT(7) ++ ++/* RX Control Register 2 */ ++#define RX_CNTRL2_DEFER_EN BIT(0) ++ ++/* Core Control Register */ ++#define CORE_RESET BIT(0) ++#define CORE_RX_FIFO_FLUSH BIT(1) ++#define CORE_TX_FIFO_FLUSH BIT(2) ++#define CORE_SEND_JAM BIT(3) ++#define CORE_MDC_EN BIT(4) ++ ++/* Definitions for MII access routines*/ ++ ++#define MII_REG_SHL 16 ++#define MII_ADDR_SHL 21 ++ ++#define MII_GO BIT(31) ++#define MII_WRITE BIT(26) ++#define MII_READ_FAIL BIT(31) ++ ++#define MII_TIMEOUT_10TH_SECS 5 ++#define MII_10TH_SEC_IN_MILLIS 100 ++ ++/* ++ * ++ * Default values ++ * ++ */ ++ ++#define MAC_DEF_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK) ++ ++#define MAC_TX_CNTRL1_DEFAULT (\ ++ TX_CNTRL1_TX_EN | \ ++ TX_CNTRL1_RETRY | \ ++ TX_CNTRL1_FCS_EN | \ ++ TX_CNTRL1_2DEFER | \ ++ TX_CNTRL1_PAD_EN ) ++ ++#define MAC_TX_MAX_RETRIES_DEFAULT 0x0f ++ ++#define MAC_RX_CNTRL1_DEFAULT ( \ ++ RX_CNTRL1_PADSTRIP_EN | \ ++ RX_CNTRL1_CRC_EN | \ ++ RX_CNTRL1_RX_EN ) ++ ++#define MAC_RX_CNTRL2_DEFAULT 0x0 ++#define MAC_TX_CNTRL2_DEFAULT TX_CNTRL2_RETRIES_MASK ++ ++/* Thresholds determined by NPE firmware FS */ ++#define MAC_THRESH_P_EMPTY_DEFAULT 0x12 ++#define MAC_THRESH_P_FULL_DEFAULT 0x30 ++ ++/* Number of bytes that must be in the tx fifo before ++ * transmission commences */ ++#define MAC_BUF_SIZE_TX_DEFAULT 0x8 ++ ++/* One-part deferral values */ ++#define MAC_TX_DEFER_DEFAULT 0x15 ++#define MAC_RX_DEFER_DEFAULT 0x16 ++ ++/* Two-part deferral values... */ ++#define MAC_TX_TWO_DEFER_1_DEFAULT 0x08 ++#define MAC_TX_TWO_DEFER_2_DEFAULT 0x07 ++ ++/* This value applies to MII */ ++#define MAC_SLOT_TIME_DEFAULT 0x80 ++ ++/* This value applies to RMII */ ++#define MAC_SLOT_TIME_RMII_DEFAULT 0xFF ++ ++#define MAC_ADDR_MASK_DEFAULT 0xFF ++ ++#define MAC_INT_CLK_THRESH_DEFAULT 0x1 ++/* The following is a value chosen at random */ ++#define MAC_RANDOM_SEED_DEFAULT 0x8 ++ ++/* By default we must configure the MAC to generate the MDC clock*/ ++#define CORE_DEFAULT (CORE_MDC_EN) ++ ++/* End of Intel provided register information */ ++ ++extern int ++mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg); ++extern void ++mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val); ++extern void init_mdio(struct net_device *dev, int phy_id); ++ ++struct mac_info { ++ u32 __iomem *addr; ++ struct resource *res; ++ struct device *npe_dev; ++ struct net_device *netdev; ++ struct qm_qmgr *qmgr; ++ struct qm_queue *rxq; ++ struct qm_queue *txq; ++ struct qm_queue *rxdoneq; ++ u32 irqflags; ++ struct net_device_stats stat; ++ struct mii_if_info mii; ++ struct delayed_work mdio_thread; ++ int rxq_pkt; ++ int txq_pkt; ++ int unloading; ++ struct mac_plat_info *plat; ++ int npe_stat_num; ++ spinlock_t rx_lock; ++ u32 msg_enable; ++}; ++ ++static inline void mac_write_reg(struct mac_info *mac, int offset, u32 val) ++{ ++ *(mac->addr + offset) = val; ++} ++static inline u32 mac_read_reg(struct mac_info *mac, int offset) ++{ ++ return *(mac->addr + offset); ++} ++static inline void mac_set_regbit(struct mac_info *mac, int offset, u32 bit) ++{ ++ mac_write_reg(mac, offset, mac_read_reg(mac, offset) | bit); ++} ++static inline void mac_reset_regbit(struct mac_info *mac, int offset, u32 bit) ++{ ++ mac_write_reg(mac, offset, mac_read_reg(mac, offset) & ~bit); ++} ++ ++static inline void mac_mdio_cmd_write(struct mac_info *mac, u32 cmd) ++{ ++ int i; ++ for(i=0; i<4; i++) { ++ mac_write_reg(mac, MAC_MDIO_CMD + i, cmd & 0xff); ++ cmd >>=8; ++ } ++} ++ ++#define mac_mdio_cmd_read(mac) mac_mdio_read((mac), MAC_MDIO_CMD) ++#define mac_mdio_status_read(mac) mac_mdio_read((mac), MAC_MDIO_STS) ++static inline u32 mac_mdio_read(struct mac_info *mac, int offset) ++{ ++ int i; ++ u32 data = 0; ++ for(i=0; i<4; i++) { ++ data |= (mac_read_reg(mac, offset + i) & 0xff) << (i*8); ++ } ++ return data; ++} ++ ++static inline u32 mdio_cmd(int phy_addr, int phy_reg) ++{ ++ return phy_addr << MII_ADDR_SHL | ++ phy_reg << MII_REG_SHL | ++ MII_GO; ++} ++ ++#define MAC_REG_LIST { \ ++ MAC_TX_CNTRL1, MAC_TX_CNTRL2, \ ++ MAC_RX_CNTRL1, MAC_RX_CNTRL2, \ ++ MAC_RANDOM_SEED, MAC_THRESH_P_EMPTY, MAC_THRESH_P_FULL, \ ++ MAC_BUF_SIZE_TX, MAC_TX_DEFER, MAC_RX_DEFER, \ ++ MAC_TX_TWO_DEFER_1, MAC_TX_TWO_DEFER_2, MAC_SLOT_TIME, \ ++ MAC_ADDR_MASK +0, MAC_ADDR_MASK +1, MAC_ADDR_MASK +2, \ ++ MAC_ADDR_MASK +3, MAC_ADDR_MASK +4, MAC_ADDR_MASK +5, \ ++ MAC_ADDR +0, MAC_ADDR +1, MAC_ADDR +2, \ ++ MAC_ADDR +3, MAC_ADDR +4, MAC_ADDR +5, \ ++ MAC_INT_CLK_THRESH, \ ++ MAC_UNI_ADDR +0, MAC_UNI_ADDR +1, MAC_UNI_ADDR +2, \ ++ MAC_UNI_ADDR +3, MAC_UNI_ADDR +4, MAC_UNI_ADDR +5, \ ++ MAC_CORE_CNTRL \ ++} ++ ++#define NPE_STAT_NUM 34 ++#define NPE_STAT_NUM_BASE 22 ++#define NPE_Q_STAT_NUM 4 ++ ++#define NPE_Q_STAT_STRINGS \ ++ {"RX ready to use queue len "}, \ ++ {"RX received queue len "}, \ ++ {"TX to be send queue len "}, \ ++ {"TX done queue len "}, ++ ++#define NPE_STAT_STRINGS \ ++ {"StatsAlignmentErrors "}, \ ++ {"StatsFCSErrors "}, \ ++ {"StatsInternalMacReceiveErrors "}, \ ++ {"RxOverrunDiscards "}, \ ++ {"RxLearnedEntryDiscards "}, \ ++ {"RxLargeFramesDiscards "}, \ ++ {"RxSTPBlockedDiscards "}, \ ++ {"RxVLANTypeFilterDiscards "}, \ ++ {"RxVLANIdFilterDiscards "}, \ ++ {"RxInvalidSourceDiscards "}, \ ++ {"RxBlackListDiscards "}, \ ++ {"RxWhiteListDiscards "}, \ ++ {"RxUnderflowEntryDiscards "}, \ ++ {"StatsSingleCollisionFrames "}, \ ++ {"StatsMultipleCollisionFrames "}, \ ++ {"StatsDeferredTransmissions "}, \ ++ {"StatsLateCollisions "}, \ ++ {"StatsExcessiveCollsions "}, \ ++ {"StatsInternalMacTransmitErrors"}, \ ++ {"StatsCarrierSenseErrors "}, \ ++ {"TxLargeFrameDiscards "}, \ ++ {"TxVLANIdFilterDiscards "}, \ ++\ ++ {"RxValidFramesTotalOctets "}, \ ++ {"RxUcastPkts "}, \ ++ {"RxBcastPkts "}, \ ++ {"RxMcastPkts "}, \ ++ {"RxPkts64Octets "}, \ ++ {"RxPkts65to127Octets "}, \ ++ {"RxPkts128to255Octets "}, \ ++ {"RxPkts256to511Octets "}, \ ++ {"RxPkts512to1023Octets "}, \ ++ {"RxPkts1024to1518Octets "}, \ ++ {"RxInternalNPEReceiveErrors "}, \ ++ {"TxInternalNPETransmitErrors "} ++ +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/mac_driver.c 2007-02-21 02:24:46.000000000 -0800 +@@ -0,0 +1,850 @@ ++/* ++ * mac_driver.c - provide a network interface for each MAC ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/platform_device.h> ++#include <linux/netdevice.h> ++#include <linux/etherdevice.h> ++#include <linux/ethtool.h> ++#include <linux/slab.h> ++#include <linux/delay.h> ++#include <linux/err.h> ++#include <linux/dma-mapping.h> ++#include <linux/workqueue.h> ++#include <asm/io.h> ++#include <asm/irq.h> ++ ++ ++#include <linux/ixp_qmgr.h> ++#include <linux/ixp_npe.h> ++#include "mac.h" ++ ++#define MDIO_INTERVAL (3*HZ) ++#define RX_QUEUE_PREFILL 64 ++#define TX_QUEUE_PREFILL 16 ++ ++#define IXMAC_NAME "ixp4xx_mac" ++#define IXMAC_VERSION "0.3.1" ++ ++#define MAC_DEFAULT_REG(mac, name) \ ++ mac_write_reg(mac, MAC_ ## name, MAC_ ## name ## _DEFAULT) ++ ++#define TX_DONE_QID 31 ++ ++#define DMA_ALLOC_SIZE 2048 ++#define DMA_HDR_SIZE (sizeof(struct npe_cont)) ++#define DMA_BUF_SIZE (DMA_ALLOC_SIZE - DMA_HDR_SIZE) ++ ++/* Since the NPEs use 1 Return Q for sent frames, we need a device ++ * independent return Q. We call it tx_doneq. ++ * It will be initialized during module load and uninitialized ++ * during module unload. Evil hack, but there is no choice :-( ++ */ ++ ++static struct qm_queue *tx_doneq = NULL; ++static int debug = -1; ++module_param(debug, int, 0); ++ ++static int init_buffer(struct qm_queue *queue, int count) ++{ ++ int i; ++ struct npe_cont *cont; ++ ++ for (i=0; i<count; i++) { ++ cont = kmalloc(DMA_ALLOC_SIZE, GFP_KERNEL | GFP_DMA); ++ if (!cont) ++ goto err; ++ ++ cont->phys = dma_map_single(queue->dev, cont, DMA_ALLOC_SIZE, ++ DMA_BIDIRECTIONAL); ++ if (dma_mapping_error(cont->phys)) ++ goto err; ++ ++ cont->data = cont+1; ++ /* now the buffer is on a 32 bit boundary. ++ * we add 2 bytes for good alignment to SKB */ ++ cont->data+=2; ++ cont->eth.next = 0; ++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE); ++ cont->eth.pkt_len = 0; ++ /* also add 2 alignment bytes from cont->data*/ ++ cont->eth.phys_addr = cpu_to_npe32(cont->phys+ DMA_HDR_SIZE+ 2); ++ ++ dma_sync_single(queue->dev, cont->phys, DMA_HDR_SIZE, ++ DMA_TO_DEVICE); ++ ++ queue_put_entry(queue, cont->phys); ++ if (queue_stat(queue) == 2) { /* overflow */ ++ dma_unmap_single(queue->dev, cont->phys, DMA_ALLOC_SIZE, ++ DMA_BIDIRECTIONAL); ++ goto err; ++ } ++ } ++ return i; ++err: ++ if (cont) ++ kfree(cont); ++ return i; ++} ++ ++static int destroy_buffer(struct qm_queue *queue, int count) ++{ ++ u32 phys; ++ int i; ++ struct npe_cont *cont; ++ ++ for (i=0; i<count; i++) { ++ phys = queue_get_entry(queue) & ~0xf; ++ if (!phys) ++ break; ++ dma_unmap_single(queue->dev, phys, DMA_ALLOC_SIZE, ++ DMA_BIDIRECTIONAL); ++ cont = dma_to_virt(queue->dev, phys); ++ kfree(cont); ++ } ++ return i; ++} ++ ++static void mac_init(struct mac_info *mac) ++{ ++ MAC_DEFAULT_REG(mac, TX_CNTRL2); ++ MAC_DEFAULT_REG(mac, RANDOM_SEED); ++ MAC_DEFAULT_REG(mac, THRESH_P_EMPTY); ++ MAC_DEFAULT_REG(mac, THRESH_P_FULL); ++ MAC_DEFAULT_REG(mac, TX_DEFER); ++ MAC_DEFAULT_REG(mac, TX_TWO_DEFER_1); ++ MAC_DEFAULT_REG(mac, TX_TWO_DEFER_2); ++ MAC_DEFAULT_REG(mac, SLOT_TIME); ++ MAC_DEFAULT_REG(mac, INT_CLK_THRESH); ++ MAC_DEFAULT_REG(mac, BUF_SIZE_TX); ++ MAC_DEFAULT_REG(mac, TX_CNTRL1); ++ MAC_DEFAULT_REG(mac, RX_CNTRL1); ++} ++ ++static void mac_set_uniaddr(struct net_device *dev) ++{ ++ int i; ++ struct mac_info *mac = netdev_priv(dev); ++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev); ++ ++ /* check for multicast */ ++ if (dev->dev_addr[0] & 1) ++ return; ++ ++ npe_mh_setportaddr(npe, mac->plat, dev->dev_addr); ++ npe_mh_disable_firewall(npe, mac->plat); ++ for (i=0; i<dev->addr_len; i++) ++ mac_write_reg(mac, MAC_UNI_ADDR + i, dev->dev_addr[i]); ++} ++ ++static void update_duplex_mode(struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ if (netif_msg_link(mac)) { ++ printk(KERN_DEBUG "Link of %s is %s-duplex\n", dev->name, ++ mac->mii.full_duplex ? "full" : "half"); ++ } ++ if (mac->mii.full_duplex) { ++ mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX); ++ } else { ++ mac_set_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_DUPLEX); ++ } ++} ++ ++static int media_check(struct net_device *dev, int init) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ ++ if (mii_check_media(&mac->mii, netif_msg_link(mac), init)) { ++ update_duplex_mode(dev); ++ return 1; ++ } ++ return 0; ++} ++ ++static void get_npe_stats(struct mac_info *mac, u32 *buf, int len, int reset) ++{ ++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev); ++ u32 phys; ++ ++ memset(buf, len, 0); ++ phys = dma_map_single(mac->npe_dev, buf, len, DMA_BIDIRECTIONAL); ++ npe_mh_get_stats(npe, mac->plat, phys, reset); ++ dma_unmap_single(mac->npe_dev, phys, len, DMA_BIDIRECTIONAL); ++} ++ ++static void irqcb_recv(struct qm_queue *queue) ++{ ++ struct net_device *dev = queue->cb_data; ++ ++ queue_ack_irq(queue); ++ queue_disable_irq(queue); ++ if (netif_running(dev)) ++ netif_rx_schedule(dev); ++} ++ ++int ix_recv(struct net_device *dev, int *budget, struct qm_queue *queue) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct sk_buff *skb; ++ u32 phys; ++ struct npe_cont *cont; ++ ++ while (*budget > 0 && netif_running(dev) ) { ++ int len; ++ phys = queue_get_entry(queue) & ~0xf; ++ if (!phys) ++ break; ++ dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, ++ DMA_FROM_DEVICE); ++ cont = dma_to_virt(queue->dev, phys); ++ len = npe_to_cpu16(cont->eth.pkt_len) -4; /* strip FCS */ ++ ++ if (unlikely(netif_msg_rx_status(mac))) { ++ printk(KERN_DEBUG "%s: RX packet size: %u\n", ++ dev->name, len); ++ queue_state(mac->rxq); ++ queue_state(mac->rxdoneq); ++ } ++ skb = dev_alloc_skb(len + 2); ++ if (likely(skb)) { ++ skb->dev = dev; ++ skb_reserve(skb, 2); ++ dma_sync_single(queue->dev, cont->eth.phys_addr, len, ++ DMA_FROM_DEVICE); ++#ifdef CONFIG_NPE_ADDRESS_COHERENT ++ /* swap the payload of the SKB */ ++ { ++ u32 *t = (u32*)(skb->data-2); ++ u32 *s = (u32*)(cont->data-2); ++ int i, j = (len+5)/4; ++ for (i=0; i<j; i++) ++ t[i] = cpu_to_be32(s[i]); ++ } ++#else ++ eth_copy_and_sum(skb, cont->data, len, 0); ++#endif ++ skb_put(skb, len); ++ skb->protocol = eth_type_trans(skb, dev); ++ dev->last_rx = jiffies; ++ netif_receive_skb(skb); ++ mac->stat.rx_packets++; ++ mac->stat.rx_bytes += skb->len; ++ } else { ++ mac->stat.rx_dropped++; ++ } ++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE); ++ cont->eth.pkt_len = 0; ++ dma_sync_single(queue->dev, phys, DMA_HDR_SIZE, DMA_TO_DEVICE); ++ queue_put_entry(mac->rxq, phys); ++ dev->quota--; ++ (*budget)--; ++ } ++ ++ return !budget; ++} ++ ++static int ix_poll(struct net_device *dev, int *budget) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct qm_queue *queue = mac->rxdoneq; ++ ++ for (;;) { ++ if (ix_recv(dev, budget, queue)) ++ return 1; ++ netif_rx_complete(dev); ++ queue_enable_irq(queue); ++ if (!queue_len(queue)) ++ break; ++ queue_disable_irq(queue); ++ if (netif_rx_reschedule(dev, 0)) ++ break; ++ } ++ return 0; ++} ++ ++static void ixmac_set_rx_mode (struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct dev_mc_list *mclist; ++ u8 aset[dev->addr_len], aclear[dev->addr_len]; ++ int i,j; ++ ++ if (dev->flags & IFF_PROMISC) { ++ mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN); ++ } else { ++ mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_ADDR_FLTR_EN); ++ ++ mclist = dev->mc_list; ++ memset(aset, 0xff, dev->addr_len); ++ memset(aclear, 0x00, dev->addr_len); ++ for (i = 0; mclist && i < dev->mc_count; i++) { ++ for (j=0; j< dev->addr_len; j++) { ++ aset[j] &= mclist->dmi_addr[j]; ++ aclear[j] |= mclist->dmi_addr[j]; ++ } ++ mclist = mclist->next; ++ } ++ for (j=0; j< dev->addr_len; j++) { ++ aclear[j] = aset[j] | ~aclear[j]; ++ } ++ for (i=0; i<dev->addr_len; i++) { ++ mac_write_reg(mac, MAC_ADDR + i, aset[i]); ++ mac_write_reg(mac, MAC_ADDR_MASK + i, aclear[i]); ++ } ++ } ++} ++ ++static int ixmac_open (struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev); ++ u32 buf[NPE_STAT_NUM]; ++ int i; ++ u32 phys; ++ ++ /* first check if the NPE is up and running */ ++ if (!( npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN)) { ++ printk(KERN_ERR "%s: %s not running\n", dev->name, ++ npe->plat->name); ++ return -EIO; ++ } ++ if (npe_mh_status(npe)) { ++ printk(KERN_ERR "%s: %s not responding\n", dev->name, ++ npe->plat->name); ++ return -EIO; ++ } ++ mac->txq_pkt += init_buffer(mac->txq, TX_QUEUE_PREFILL - mac->txq_pkt); ++ mac->rxq_pkt += init_buffer(mac->rxq, RX_QUEUE_PREFILL - mac->rxq_pkt); ++ ++ queue_enable_irq(mac->rxdoneq); ++ ++ /* drain all buffers from then RX-done-q to make the IRQ happen */ ++ while ((phys = queue_get_entry(mac->rxdoneq) & ~0xf)) { ++ struct npe_cont *cont; ++ cont = dma_to_virt(mac->rxdoneq->dev, phys); ++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE); ++ cont->eth.pkt_len = 0; ++ dma_sync_single(mac->rxdoneq->dev, phys, DMA_HDR_SIZE, ++ DMA_TO_DEVICE); ++ queue_put_entry(mac->rxq, phys); ++ } ++ mac_init(mac); ++ npe_mh_set_rxqid(npe, mac->plat, mac->plat->rxdoneq_id); ++ get_npe_stats(mac, buf, sizeof(buf), 1); /* reset stats */ ++ get_npe_stats(mac, buf, sizeof(buf), 0); ++ /* ++ * if the extended stats contain random values ++ * the NPE image lacks extendet statistic counters ++ */ ++ for (i=NPE_STAT_NUM_BASE; i<NPE_STAT_NUM; i++) { ++ if (buf[i] >10000) ++ break; ++ } ++ mac->npe_stat_num = i<NPE_STAT_NUM ? NPE_STAT_NUM_BASE : NPE_STAT_NUM; ++ mac->npe_stat_num += NPE_Q_STAT_NUM; ++ ++ mac_set_uniaddr(dev); ++ media_check(dev, 1); ++ ixmac_set_rx_mode(dev); ++ netif_start_queue(dev); ++ schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL); ++ if (netif_msg_ifup(mac)) { ++ printk(KERN_DEBUG "%s: open " IXMAC_NAME ++ " RX queue %d bufs, TX queue %d bufs\n", ++ dev->name, mac->rxq_pkt, mac->txq_pkt); ++ } ++ return 0; ++} ++ ++static int ixmac_start_xmit (struct sk_buff *skb, struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct npe_cont *cont; ++ u32 phys; ++ struct qm_queue *queue = mac->txq; ++ ++ if (unlikely(skb->len > DMA_BUF_SIZE)) { ++ dev_kfree_skb(skb); ++ mac->stat.tx_errors++; ++ return NETDEV_TX_OK; ++ } ++ phys = queue_get_entry(tx_doneq) & ~0xf; ++ if (!phys) ++ goto busy; ++ cont = dma_to_virt(queue->dev, phys); ++#ifdef CONFIG_NPE_ADDRESS_COHERENT ++ /* swap the payload of the SKB */ ++ { ++ u32 *s = (u32*)(skb->data-2); ++ u32 *t = (u32*)(cont->data-2); ++ int i,j = (skb->len+5) / 4; ++ for (i=0; i<j; i++) ++ t[i] = cpu_to_be32(s[i]); ++ } ++#else ++ //skb_copy_and_csum_dev(skb, cont->data); ++ memcpy(cont->data, skb->data, skb->len); ++#endif ++ cont->eth.buf_len = cpu_to_npe16(DMA_BUF_SIZE); ++ cont->eth.pkt_len = cpu_to_npe16(skb->len); ++ /* disable VLAN functions in NPE image for now */ ++ cont->eth.flags = 0; ++ dma_sync_single(queue->dev, phys, skb->len + DMA_HDR_SIZE, ++ DMA_TO_DEVICE); ++ queue_put_entry(queue, phys); ++ if (queue_stat(queue) == 2) { /* overflow */ ++ queue_put_entry(tx_doneq, phys); ++ goto busy; ++ } ++ dev_kfree_skb(skb); ++ ++ mac->stat.tx_packets++; ++ mac->stat.tx_bytes += skb->len; ++ dev->trans_start = jiffies; ++ if (netif_msg_tx_queued(mac)) { ++ printk(KERN_DEBUG "%s: TX packet size %u\n", ++ dev->name, skb->len); ++ queue_state(mac->txq); ++ queue_state(tx_doneq); ++ } ++ return NETDEV_TX_OK; ++busy: ++ return NETDEV_TX_BUSY; ++} ++ ++static int ixmac_close (struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ ++ netif_stop_queue (dev); ++ queue_disable_irq(mac->rxdoneq); ++ ++ mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt); ++ mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt); ++ ++ cancel_rearming_delayed_work(&(mac->mdio_thread)); ++ ++ if (netif_msg_ifdown(mac)) { ++ printk(KERN_DEBUG "%s: close " IXMAC_NAME ++ " RX queue %d bufs, TX queue %d bufs\n", ++ dev->name, mac->rxq_pkt, mac->txq_pkt); ++ } ++ return 0; ++} ++ ++static int ixmac_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ int rc, duplex_changed; ++ ++ if (!netif_running(dev)) ++ return -EINVAL; ++ if (!try_module_get(THIS_MODULE)) ++ return -ENODEV; ++ rc = generic_mii_ioctl(&mac->mii, if_mii(rq), cmd, &duplex_changed); ++ module_put(THIS_MODULE); ++ if (duplex_changed) ++ update_duplex_mode(dev); ++ return rc; ++} ++ ++static struct net_device_stats *ixmac_stats (struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ return &mac->stat; ++} ++ ++static void ixmac_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev); ++ ++ strcpy(info->driver, IXMAC_NAME); ++ strcpy(info->version, IXMAC_VERSION); ++ if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) { ++ snprintf(info->fw_version, 32, "%d.%d func [%d]", ++ npe->img_info[2], npe->img_info[3], npe->img_info[1]); ++ } ++ strncpy(info->bus_info, npe->plat->name, ETHTOOL_BUSINFO_LEN); ++} ++ ++static int ixmac_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ mii_ethtool_gset(&mac->mii, cmd); ++ return 0; ++} ++ ++static int ixmac_set_settings(struct net_device *dev, struct ethtool_cmd *cmd) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ int rc; ++ rc = mii_ethtool_sset(&mac->mii, cmd); ++ return rc; ++} ++ ++static int ixmac_nway_reset(struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ return mii_nway_restart(&mac->mii); ++} ++ ++static u32 ixmac_get_link(struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ return mii_link_ok(&mac->mii); ++} ++ ++static const int mac_reg_list[] = MAC_REG_LIST; ++ ++static int ixmac_get_regs_len(struct net_device *dev) ++{ ++ return ARRAY_SIZE(mac_reg_list); ++} ++ ++static void ++ixmac_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *regbuf) ++{ ++ int i; ++ struct mac_info *mac = netdev_priv(dev); ++ u8 *buf = regbuf; ++ ++ for (i=0; i<regs->len; i++) { ++ buf[i] = mac_read_reg(mac, mac_reg_list[i]); ++ } ++} ++ ++static struct { ++ const char str[ETH_GSTRING_LEN]; ++} ethtool_stats_keys[NPE_STAT_NUM + NPE_Q_STAT_NUM] = { ++ NPE_Q_STAT_STRINGS ++ NPE_STAT_STRINGS ++}; ++ ++static void ixmac_get_strings(struct net_device *dev, u32 stringset, u8 *data) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ memcpy(data, ethtool_stats_keys, mac->npe_stat_num * ETH_GSTRING_LEN); ++} ++ ++static int ixmac_get_stats_count(struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ return mac->npe_stat_num; ++} ++ ++static u32 ixmac_get_msglevel(struct net_device *dev) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ return mac->msg_enable; ++} ++ ++static void ixmac_set_msglevel(struct net_device *dev, u32 datum) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ mac->msg_enable = datum; ++} ++ ++static void ixmac_get_ethtool_stats(struct net_device *dev, ++ struct ethtool_stats *stats, u64 *data) ++{ ++ int i; ++ struct mac_info *mac = netdev_priv(dev); ++ u32 buf[NPE_STAT_NUM]; ++ ++ data[0] = queue_len(mac->rxq); ++ data[1] = queue_len(mac->rxdoneq); ++ data[2] = queue_len(mac->txq); ++ data[3] = queue_len(tx_doneq); ++ ++ get_npe_stats(mac, buf, sizeof(buf), 0); ++ ++ for (i=0; i<stats->n_stats-4; i++) { ++ data[i+4] = npe_to_cpu32(buf[i]); ++ } ++} ++ ++static struct ethtool_ops ixmac_ethtool_ops = { ++ .get_drvinfo = ixmac_get_drvinfo, ++ .get_settings = ixmac_get_settings, ++ .set_settings = ixmac_set_settings, ++ .nway_reset = ixmac_nway_reset, ++ .get_link = ixmac_get_link, ++ .get_msglevel = ixmac_get_msglevel, ++ .set_msglevel = ixmac_set_msglevel, ++ .get_regs_len = ixmac_get_regs_len, ++ .get_regs = ixmac_get_regs, ++ .get_perm_addr = ethtool_op_get_perm_addr, ++ .get_strings = ixmac_get_strings, ++ .get_stats_count = ixmac_get_stats_count, ++ .get_ethtool_stats = ixmac_get_ethtool_stats, ++}; ++ ++static void mac_mdio_thread(struct work_struct *work) ++{ ++ struct mac_info *mac = container_of(work, struct mac_info, ++ mdio_thread.work); ++ struct net_device *dev = mac->netdev; ++ ++ media_check(dev, 0); ++ schedule_delayed_work(&mac->mdio_thread, MDIO_INTERVAL); ++} ++ ++static int mac_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct mac_info *mac; ++ struct net_device *dev; ++ struct npe_info *npe; ++ struct mac_plat_info *plat = pdev->dev.platform_data; ++ int size, ret; ++ ++ if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) { ++ return -EIO; ++ } ++ if (!(dev = alloc_etherdev (sizeof(struct mac_info)))) { ++ return -ENOMEM; ++ } ++ SET_MODULE_OWNER(dev); ++ SET_NETDEV_DEV(dev, &pdev->dev); ++ mac = netdev_priv(dev); ++ mac->netdev = dev; ++ ++ size = res->end - res->start +1; ++ mac->res = request_mem_region(res->start, size, IXMAC_NAME); ++ if (!mac->res) { ++ ret = -EBUSY; ++ goto out_free; ++ } ++ ++ mac->addr = ioremap(res->start, size); ++ if (!mac->addr) { ++ ret = -ENOMEM; ++ goto out_rel; ++ } ++ ++ dev->open = ixmac_open; ++ dev->hard_start_xmit = ixmac_start_xmit; ++ dev->poll = ix_poll; ++ dev->stop = ixmac_close; ++ dev->get_stats = ixmac_stats; ++ dev->do_ioctl = ixmac_ioctl; ++ dev->set_multicast_list = ixmac_set_rx_mode; ++ dev->ethtool_ops = &ixmac_ethtool_ops; ++ ++ dev->weight = 16; ++ dev->tx_queue_len = 100; ++ ++ mac->npe_dev = get_npe_by_id(plat->npe_id); ++ if (!mac->npe_dev) { ++ ret = -EIO; ++ goto out_unmap; ++ } ++ npe = dev_get_drvdata(mac->npe_dev); ++ ++ mac->rxq = request_queue(plat->rxq_id, 128); ++ if (IS_ERR(mac->rxq)) { ++ printk(KERN_ERR "Error requesting Q: %d\n", plat->rxq_id); ++ ret = -EBUSY; ++ goto out_putmod; ++ } ++ mac->txq = request_queue(plat->txq_id, 128); ++ if (IS_ERR(mac->txq)) { ++ printk(KERN_ERR "Error requesting Q: %d\n", plat->txq_id); ++ ret = -EBUSY; ++ goto out_putmod; ++ } ++ mac->rxdoneq = request_queue(plat->rxdoneq_id, 128); ++ if (IS_ERR(mac->rxdoneq)) { ++ printk(KERN_ERR "Error requesting Q: %d\n", plat->rxdoneq_id); ++ ret = -EBUSY; ++ goto out_putmod; ++ } ++ mac->rxdoneq->irq_cb = irqcb_recv; ++ mac->rxdoneq->cb_data = dev; ++ queue_set_watermarks(mac->rxdoneq, 0, 0); ++ queue_set_irq_src(mac->rxdoneq, Q_IRQ_ID_NOT_E); ++ ++ mac->qmgr = dev_get_drvdata(mac->rxq->dev); ++ if (register_netdev (dev)) { ++ ret = -EIO; ++ goto out_putmod; ++ } ++ ++ mac->plat = plat; ++ mac->npe_stat_num = NPE_STAT_NUM_BASE; ++ mac->msg_enable = netif_msg_init(debug, MAC_DEF_MSG_ENABLE); ++ ++ platform_set_drvdata(pdev, dev); ++ ++ mac_write_reg(mac, MAC_CORE_CNTRL, CORE_RESET); ++ udelay(500); ++ mac_write_reg(mac, MAC_CORE_CNTRL, CORE_MDC_EN); ++ ++ init_mdio(dev, plat->phy_id); ++ ++ INIT_DELAYED_WORK(&mac->mdio_thread, mac_mdio_thread); ++ ++ /* The place of the MAC address is very system dependent. ++ * Here we use a random one to be replaced by one of the ++ * following commands: ++ * "ip link set address 02:03:04:04:04:01 dev eth0" ++ * "ifconfig eth0 hw ether 02:03:04:04:04:07" ++ */ ++ ++ if (is_zero_ether_addr(plat->hwaddr)) { ++ random_ether_addr(dev->dev_addr); ++ dev->dev_addr[5] = plat->phy_id; ++ } ++ else ++ memcpy(dev->dev_addr, plat->hwaddr, 6); ++ ++ printk(KERN_INFO IXMAC_NAME " driver " IXMAC_VERSION ++ ": %s on %s with PHY[%d] initialized\n", ++ dev->name, npe->plat->name, plat->phy_id); ++ ++ return 0; ++ ++out_putmod: ++ if (mac->rxq) ++ release_queue(mac->rxq); ++ if (mac->txq) ++ release_queue(mac->txq); ++ if (mac->rxdoneq) ++ release_queue(mac->rxdoneq); ++ module_put(mac->npe_dev->driver->owner); ++out_unmap: ++ iounmap(mac->addr); ++out_rel: ++ release_resource(mac->res); ++out_free: ++ kfree(mac); ++ return ret; ++} ++ ++static void drain_npe(struct mac_info *mac) ++{ ++ struct npe_info *npe = dev_get_drvdata(mac->npe_dev); ++ struct npe_cont *cont; ++ u32 phys; ++ int loop = 0; ++ ++ /* Now there are some skb hold by the NPE. ++ * We switch the MAC in loopback mode and send a pseudo packet ++ * that will be returned by the NPE in its last SKB. ++ * We will also try to isolate the PHY to keep the packets internal. ++ */ ++ ++ if (mac->txq_pkt <2) ++ mac->txq_pkt += init_buffer(tx_doneq, 5); ++ ++ if (npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN) { ++ mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_MDC_EN); ++ mac_set_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN); ++ ++ npe_mh_npe_loopback_mode(npe, mac->plat, 1); ++ mdelay(200); ++ ++ while (mac->rxq_pkt && loop++ < 2000 ) { ++ phys = queue_get_entry(tx_doneq) & ~0xf; ++ if (!phys) ++ break; ++ cont = dma_to_virt(queue->dev, phys); ++ /* actually the packets should never leave the system, ++ * but if they do, they shall contain 0s instead of ++ * intresting random data.... ++ */ ++ memset(cont->data, 0, 64); ++ cont->eth.pkt_len = 64; ++ dma_sync_single(mac->txq->dev, phys, 64 + DMA_HDR_SIZE, ++ DMA_TO_DEVICE); ++ queue_put_entry(mac->txq, phys); ++ if (queue_stat(mac->txq) == 2) { /* overflow */ ++ queue_put_entry(tx_doneq, phys); ++ break; ++ } ++ mdelay(1); ++ mac->rxq_pkt -= destroy_buffer(mac->rxdoneq, ++ mac->rxq_pkt); ++ } ++ npe_mh_npe_loopback_mode(npe, mac->plat, 0); ++ } ++ /* Flush MAC TX fifo to drain the bogus packages */ ++ mac_set_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH); ++ mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_RX_EN); ++ mac_reset_regbit(mac, MAC_TX_CNTRL1, TX_CNTRL1_TX_EN); ++ mac_reset_regbit(mac, MAC_RX_CNTRL1, RX_CNTRL1_LOOP_EN); ++ mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH); ++ mac_reset_regbit(mac, MAC_CORE_CNTRL, CORE_TX_FIFO_FLUSH); ++} ++ ++static int mac_remove(struct platform_device *pdev) ++{ ++ struct net_device* dev = platform_get_drvdata(pdev); ++ struct mac_info *mac = netdev_priv(dev); ++ ++ unregister_netdev(dev); ++ ++ mac->rxq_pkt -= destroy_buffer(mac->rxq, mac->rxq_pkt); ++ if (mac->rxq_pkt) ++ drain_npe(mac); ++ ++ mac->txq_pkt -= destroy_buffer(mac->txq, mac->txq_pkt); ++ mac->txq_pkt -= destroy_buffer(tx_doneq, mac->txq_pkt); ++ ++ if (mac->rxq_pkt || mac->txq_pkt) ++ printk("Buffers lost in NPE: RX:%d, TX:%d\n", ++ mac->rxq_pkt, mac->txq_pkt); ++ ++ release_queue(mac->txq); ++ release_queue(mac->rxq); ++ release_queue(mac->rxdoneq); ++ ++ flush_scheduled_work(); ++ return_npe_dev(mac->npe_dev); ++ ++ iounmap(mac->addr); ++ release_resource(mac->res); ++ platform_set_drvdata(pdev, NULL); ++ free_netdev(dev); ++ return 0; ++} ++ ++static struct platform_driver ixp4xx_mac = { ++ .driver.name = IXMAC_NAME, ++ .probe = mac_probe, ++ .remove = mac_remove, ++}; ++ ++static int __init init_mac(void) ++{ ++ /* The TX done Queue handles skbs sent out by the NPE */ ++ tx_doneq = request_queue(TX_DONE_QID, 128); ++ if (IS_ERR(tx_doneq)) { ++ printk(KERN_ERR "Error requesting Q: %d\n", TX_DONE_QID); ++ return -EBUSY; ++ } ++ return platform_driver_register(&ixp4xx_mac); ++} ++ ++static void __exit finish_mac(void) ++{ ++ platform_driver_unregister(&ixp4xx_mac); ++ if (tx_doneq) { ++ release_queue(tx_doneq); ++ } ++} ++ ++module_init(init_mac); ++module_exit(finish_mac); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); ++ +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,291 @@ ++ ++#include <linux/ixp_npe.h> ++#include <asm/hardware.h> ++ ++#define RESET_NPE_PARITY 0x0800 ++#define PARITY_BIT_MASK 0x3F00FFFF ++#define CONFIG_CTRL_REG_MASK 0x3F3FFFFF ++#define MAX_RETRIES 1000000 ++#define NPE_PHYS_REG 32 ++#define RESET_MBST_VAL 0x0000F0F0 ++#define NPE_REGMAP 0x0000001E ++#define INSTR_WR_REG_SHORT 0x0000C000 ++#define INSTR_WR_REG_BYTE 0x00004000 ++#define MASK_ECS_REG_0_NEXTPC 0x1FFF0000 ++ ++#define INSTR_RD_FIFO 0x0F888220 ++#define INSTR_RESET_MBOX 0x0FAC8210 ++ ++#define ECS_REG_0_LDUR 8 ++#define ECS_REG_1_CCTXT 16 ++#define ECS_REG_1_SELCTXT 0 ++ ++#define ECS_BG_CTXT_REG_0 0x00 ++#define ECS_BG_CTXT_REG_1 0x01 ++#define ECS_BG_CTXT_REG_2 0x02 ++#define ECS_PRI_1_CTXT_REG_0 0x04 ++#define ECS_PRI_1_CTXT_REG_1 0x05 ++#define ECS_PRI_1_CTXT_REG_2 0x06 ++#define ECS_PRI_2_CTXT_REG_0 0x08 ++#define ECS_PRI_2_CTXT_REG_1 0x09 ++#define ECS_PRI_2_CTXT_REG_2 0x0A ++#define ECS_DBG_CTXT_REG_0 0x0C ++#define ECS_DBG_CTXT_REG_1 0x0D ++#define ECS_DBG_CTXT_REG_2 0x0E ++#define ECS_INSTRUCT_REG 0x11 ++ ++#define ECS_BG_CTXT_REG_0_RESET 0xA0000000 ++#define ECS_BG_CTXT_REG_1_RESET 0x01000000 ++#define ECS_BG_CTXT_REG_2_RESET 0x00008000 ++#define ECS_PRI_1_CTXT_REG_0_RESET 0x20000080 ++#define ECS_PRI_1_CTXT_REG_1_RESET 0x01000000 ++#define ECS_PRI_1_CTXT_REG_2_RESET 0x00008000 ++#define ECS_PRI_2_CTXT_REG_0_RESET 0x20000080 ++#define ECS_PRI_2_CTXT_REG_1_RESET 0x01000000 ++#define ECS_PRI_2_CTXT_REG_2_RESET 0x00008000 ++#define ECS_DBG_CTXT_REG_0_RESET 0x20000000 ++#define ECS_DBG_CTXT_REG_1_RESET 0x00000000 ++#define ECS_DBG_CTXT_REG_2_RESET 0x001E0000 ++#define ECS_INSTRUCT_REG_RESET 0x1003C00F ++ ++static struct { u32 reg; u32 val; } ecs_reset[] = ++{ ++ { ECS_BG_CTXT_REG_0, ECS_BG_CTXT_REG_0_RESET }, ++ { ECS_BG_CTXT_REG_1, ECS_BG_CTXT_REG_1_RESET }, ++ { ECS_BG_CTXT_REG_2, ECS_BG_CTXT_REG_2_RESET }, ++ { ECS_PRI_1_CTXT_REG_0, ECS_PRI_1_CTXT_REG_0_RESET }, ++ { ECS_PRI_1_CTXT_REG_1, ECS_PRI_1_CTXT_REG_1_RESET }, ++ { ECS_PRI_1_CTXT_REG_2, ECS_PRI_1_CTXT_REG_2_RESET }, ++ { ECS_PRI_2_CTXT_REG_0, ECS_PRI_2_CTXT_REG_0_RESET }, ++ { ECS_PRI_2_CTXT_REG_1, ECS_PRI_2_CTXT_REG_1_RESET }, ++ { ECS_PRI_2_CTXT_REG_2, ECS_PRI_2_CTXT_REG_2_RESET }, ++ { ECS_DBG_CTXT_REG_0, ECS_DBG_CTXT_REG_0_RESET }, ++ { ECS_DBG_CTXT_REG_1, ECS_DBG_CTXT_REG_1_RESET }, ++ { ECS_DBG_CTXT_REG_2, ECS_DBG_CTXT_REG_2_RESET }, ++ { ECS_INSTRUCT_REG, ECS_INSTRUCT_REG_RESET } ++}; ++ ++/* actually I have no idea what I'm doing here !! ++ * I only rewrite the "reset" sequence the way Intel does it. ++ */ ++ ++static void npe_debg_preexec(struct npe_info *npe) ++{ ++ u32 r = IX_NPEDL_MASK_ECS_DBG_REG_2_IF | IX_NPEDL_MASK_ECS_DBG_REG_2_IE; ++ ++ npe->exec_count = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCT); ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, 0); ++ npe->ctx_reg2 = npe_read_ecs_reg(npe, ECS_DBG_CTXT_REG_2); ++ npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2 | r); ++} ++ ++static void npe_debg_postexec(struct npe_info *npe) ++{ ++ npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0, 0); ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCT, npe->exec_count); ++ npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_2, npe->ctx_reg2); ++} ++ ++static int ++npe_debg_inst_exec(struct npe_info *npe, u32 instr, u32 ctx, u32 ldur) ++{ ++ u32 regval, wc; ++ int c = 0; ++ ++ regval = IX_NPEDL_MASK_ECS_REG_0_ACTIVE | ++ (ldur << ECS_REG_0_LDUR); ++ npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_0 , regval); ++ /* set CCTXT at ECS DEBUG L3 to specify in which context ++ * to execute the instruction ++ */ ++ regval = (ctx << ECS_REG_1_CCTXT) | ++ (ctx << ECS_REG_1_SELCTXT); ++ npe_write_ecs_reg(npe, ECS_DBG_CTXT_REG_1, regval); ++ ++ /* clear the pipeline */ ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); ++ ++ /* load NPE instruction into the instruction register */ ++ npe_write_ecs_reg(npe, ECS_INSTRUCT_REG, instr); ++ /* we need this value later to wait for ++ * completion of NPE execution step ++ */ ++ wc = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC); ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STEP); ++ ++ /* Watch Count register increments when NPE completes an instruction */ ++ while (wc == npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WC) && ++ ++c < MAX_RETRIES); ++ ++ if (c >= MAX_RETRIES) { ++ printk(KERN_ERR "%s reset:npe_debg_inst_exec(): Timeout\n", ++ npe->plat->name); ++ return 1; ++ } ++ return 0; ++} ++ ++static int npe_logical_reg_write8(struct npe_info *npe, u32 addr, u32 val) ++{ ++ u32 instr; ++ val &= 0xff; ++ /* here we build the NPE assembler instruction: ++ * mov8 d0, #0 */ ++ instr = INSTR_WR_REG_BYTE | /* OpCode */ ++ addr << 9 | /* base Operand */ ++ (val & 0x1f) << 4 | /* lower 5 bits to immediate data */ ++ (val & ~0x1f) << (18-5);/* higher 3 bits to CoProc instr. */ ++ /* and execute it */ ++ return npe_debg_inst_exec(npe, instr, 0, 1); ++} ++ ++static int npe_logical_reg_write16(struct npe_info *npe, u32 addr, u32 val) ++{ ++ u32 instr; ++ /* here we build the NPE assembler instruction: ++ * mov16 d0, #0 */ ++ val &= 0xffff; ++ instr = INSTR_WR_REG_SHORT | /* OpCode */ ++ addr << 9 | /* base Operand */ ++ (val & 0x1f) << 4 | /* lower 5 bits to immediate data */ ++ (val & ~0x1f) << (18-5);/* higher 11 bits to CoProc instr. */ ++ /* and execute it */ ++ return npe_debg_inst_exec(npe, instr, 0, 1); ++} ++ ++static int npe_logical_reg_write32(struct npe_info *npe, u32 addr, u32 val) ++{ ++ /* write in 16 bit steps first the high and then the low value */ ++ npe_logical_reg_write16(npe, addr, val >> 16); ++ return npe_logical_reg_write16(npe, addr+2, val & 0xffff); ++} ++ ++void npe_reset(struct npe_info *npe) ++{ ++ u32 reg, cfg_ctrl; ++ int i; ++ struct { u32 reset; int addr; int size; } ctx_reg[] = { ++ { 0x80, 0x1b, 8 }, ++ { 0, 0x1c, 16 }, ++ { 0x820, 0x1e, 16 }, ++ { 0, 0x1f, 8 } ++ }, *cr; ++ ++ cfg_ctrl = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL); ++ cfg_ctrl |= 0x3F000000; ++ /* disable the parity interrupt */ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, cfg_ctrl & PARITY_BIT_MASK); ++ ++ npe_debg_preexec(npe); ++ ++ /* clear the FIFOs */ ++ while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_WFIFO) == ++ IX_NPEDL_MASK_WFIFO_VALID); ++ while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) == ++ IX_NPEDL_MASK_STAT_OFNE) ++ { ++ u32 reg; ++ reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_FIFO); ++ printk("%s reset: Read FIFO:=%x\n", npe->plat->name, reg); ++ } ++ while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) == ++ IX_NPEDL_MASK_STAT_IFNE) { ++ npe_debg_inst_exec(npe, INSTR_RD_FIFO, 0, 0); ++ } ++ ++ /* Reset the mailbox reg */ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_MBST, RESET_MBST_VAL); ++ npe_debg_inst_exec(npe, INSTR_RESET_MBOX, 0, 0); ++ ++ /* Reset the physical registers in the NPE register file */ ++ for (i=0; i<NPE_PHYS_REG; i++) { ++ npe_logical_reg_write16(npe, NPE_REGMAP, i >> 1); ++ npe_logical_reg_write32(npe, (i&1) *4, 0); ++ } ++ ++ /* Reset the context store. Iterate over the 16 ctx s */ ++ for(i=0; i<16; i++) { ++ for (reg=0; reg<4; reg++) { ++ /* There is no (STEVT) register for Context 0. ++ * ignore if register=0 and ctx=0 */ ++ if (!(reg || i)) ++ continue; ++ /* Context 0 has no STARTPC. Instead, this value is ++ * used to set NextPC for Background ECS, ++ * to set where NPE starts executing code ++ */ ++ if (!i && reg==1) { ++ u32 r; ++ r = npe_read_ecs_reg(npe, ECS_BG_CTXT_REG_0); ++ r &= ~MASK_ECS_REG_0_NEXTPC; ++ r |= (cr->reset << 16) & MASK_ECS_REG_0_NEXTPC; ++ continue; ++ } ++ cr = ctx_reg + reg; ++ switch (cr->size) { ++ case 8: ++ npe_logical_reg_write8(npe, cr->addr, ++ cr->reset); ++ break; ++ case 16: ++ npe_logical_reg_write16(npe, cr->addr, ++ cr->reset); ++ } ++ } ++ } ++ npe_debg_postexec(npe); ++ ++ for (i=0; i< ARRAY_SIZE(ecs_reset); i++) { ++ npe_write_ecs_reg(npe, ecs_reset[i].reg, ecs_reset[i].val); ++ } ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT); ++ ++ for (i=IX_NPEDL_REG_OFFSET_EXCT; i<=IX_NPEDL_REG_OFFSET_AP3; i+=4) { ++ npe_reg_write(npe, i, 0); ++ } ++ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_WC, 0); ++ ++ reg = *IXP4XX_EXP_CFG2; ++ reg |= 0x800 << npe->plat->id; /* IX_FUSE_NPE[ABC] */ ++ *IXP4XX_EXP_CFG2 = reg; ++ reg &= ~(0x800 << npe->plat->id); /* IX_FUSE_NPE[ABC] */ ++ *IXP4XX_EXP_CFG2 = reg; ++ ++ npe_stop(npe); ++ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, ++ cfg_ctrl & CONFIG_CTRL_REG_MASK); ++ npe->loaded = 0; ++} ++ ++ ++void npe_stop(struct npe_info *npe) ++{ ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_STOP); ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); ++} ++ ++static void npe_reset_active(struct npe_info *npe, u32 reg) ++{ ++ u32 regval; ++ ++ regval = npe_read_ecs_reg(npe, reg); ++ regval &= ~IX_NPEDL_MASK_ECS_REG_0_ACTIVE; ++ npe_write_ecs_reg(npe, reg, regval); ++} ++ ++void npe_start(struct npe_info *npe) ++{ ++ npe_reset_active(npe, IX_NPEDL_ECS_PRI_1_CTXT_REG_0); ++ npe_reset_active(npe, IX_NPEDL_ECS_PRI_2_CTXT_REG_0); ++ npe_reset_active(npe, IX_NPEDL_ECS_DBG_CTXT_REG_0); ++ ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE); ++ npe_write_exctl(npe, IX_NPEDL_EXCTL_CMD_NPE_START); ++} ++ ++EXPORT_SYMBOL(npe_stop); ++EXPORT_SYMBOL(npe_start); ++EXPORT_SYMBOL(npe_reset); +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/npe_mh.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,170 @@ ++/* ++ * npe_mh.c - NPE message handler. ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/ixp_npe.h> ++#include <linux/slab.h> ++ ++#define MAX_RETRY 200 ++ ++struct npe_mh_msg { ++ union { ++ u8 byte[8]; /* Very desciptive name, I know ... */ ++ u32 data[2]; ++ } u; ++}; ++ ++/* ++ * The whole code in this function must be reworked. ++ * It is in a state that works but is not rock solid ++ */ ++static int send_message(struct npe_info *npe, struct npe_mh_msg *msg) ++{ ++ int i,j; ++ u32 send[2], recv[2]; ++ ++ for (i=0; i<2; i++) ++ send[i] = be32_to_cpu(msg->u.data[i]); ++ ++ if ((npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) & ++ IX_NPEMH_NPE_STAT_IFNE)) ++ return -1; ++ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[0]); ++ for(i=0; i<MAX_RETRY; i++) { ++ /* if the IFNF status bit is unset then the inFIFO is full */ ++ if (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) & ++ IX_NPEMH_NPE_STAT_IFNF) ++ break; ++ } ++ if (i>=MAX_RETRY) ++ return -1; ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_FIFO, send[1]); ++ i=0; ++ while (!(npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) & ++ IX_NPEMH_NPE_STAT_OFNE)) { ++ if (i++>MAX_RETRY) { ++ printk("Waiting for Output FIFO NotEmpty failed\n"); ++ return -1; ++ } ++ } ++ //printk("Output FIFO Not Empty. Loops: %d\n", i); ++ j=0; ++ while (npe_reg_read(npe, IX_NPEDL_REG_OFFSET_STAT) & ++ IX_NPEMH_NPE_STAT_OFNE) { ++ recv[j&1] = npe_reg_read(npe,IX_NPEDL_REG_OFFSET_FIFO); ++ j++; ++ } ++ if ((recv[0] != send[0]) || (recv[1] != send[1])) { ++ if (send[0] || send[1]) { ++ /* all CMDs return the complete message as answer, ++ * only GETSTATUS returns the ImageID of the NPE ++ */ ++ printk("Unexpected answer: " ++ "Send %08x:%08x Ret %08x:%08x\n", ++ send[0], send[1], recv[0], recv[1]); ++ } ++ } ++ return 0; ++} ++ ++#define CMD 0 ++#define PORT 1 ++#define MAC 2 ++ ++#define IX_ETHNPE_NPE_GETSTATUS 0x00 ++#define IX_ETHNPE_EDB_SETPORTADDRESS 0x01 ++#define IX_ETHNPE_GETSTATS 0x04 ++#define IX_ETHNPE_RESETSTATS 0x05 ++#define IX_ETHNPE_FW_SETFIREWALLMODE 0x0E ++#define IX_ETHNPE_VLAN_SETRXQOSENTRY 0x0B ++#define IX_ETHNPE_SETLOOPBACK_MODE 0x12 ++ ++#define logical_id(mp) (((mp)->npe_id << 4) | ((mp)->port_id & 0xf)) ++ ++int npe_mh_status(struct npe_info *npe) ++{ ++ struct npe_mh_msg msg; ++ ++ memset(&msg, 0, sizeof(msg)); ++ msg.u.byte[CMD] = IX_ETHNPE_NPE_GETSTATUS; ++ return send_message(npe, &msg); ++} ++ ++int npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, ++ u8 *macaddr) ++{ ++ struct npe_mh_msg msg; ++ ++ msg.u.byte[CMD] = IX_ETHNPE_EDB_SETPORTADDRESS; ++ msg.u.byte[PORT] = mp->eth_id; ++ memcpy(msg.u.byte + MAC, macaddr, 6); ++ ++ return send_message(npe, &msg); ++} ++ ++int npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp) ++{ ++ struct npe_mh_msg msg; ++ ++ memset(&msg, 0, sizeof(msg)); ++ msg.u.byte[CMD] = IX_ETHNPE_FW_SETFIREWALLMODE; ++ msg.u.byte[PORT] = logical_id(mp); ++ ++ return send_message(npe, &msg); ++} ++ ++int npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, ++ int enable) ++{ ++ struct npe_mh_msg msg; ++ ++ memset(&msg, 0, sizeof(msg)); ++ msg.u.byte[CMD] = IX_ETHNPE_SETLOOPBACK_MODE; ++ msg.u.byte[PORT] = logical_id(mp); ++ msg.u.byte[3] = enable ? 1 : 0; ++ ++ return send_message(npe, &msg); ++} ++ ++int npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid) ++{ ++ struct npe_mh_msg msg; ++ int i, ret; ++ ++ memset(&msg, 0, sizeof(msg)); ++ msg.u.byte[CMD] = IX_ETHNPE_VLAN_SETRXQOSENTRY; ++ msg.u.byte[PORT] = logical_id(mp); ++ msg.u.byte[5] = qid | 0x80; ++ msg.u.byte[7] = qid<<4; ++ for(i=0; i<8; i++) { ++ msg.u.byte[3] = i; ++ if ((ret = send_message(npe, &msg))) ++ return ret; ++ } ++ return 0; ++} ++ ++int npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, ++ int reset) ++{ ++ struct npe_mh_msg msg; ++ memset(&msg, 0, sizeof(msg)); ++ msg.u.byte[CMD] = reset ? IX_ETHNPE_RESETSTATS : IX_ETHNPE_GETSTATS; ++ msg.u.byte[PORT] = logical_id(mp); ++ msg.u.data[1] = cpu_to_npe32(cpu_to_be32(phys)); ++ ++ return send_message(npe, &msg); ++} ++ ++ ++EXPORT_SYMBOL(npe_mh_status); ++EXPORT_SYMBOL(npe_mh_setportaddr); ++EXPORT_SYMBOL(npe_mh_disable_firewall); ++EXPORT_SYMBOL(npe_mh_set_rxqid); ++EXPORT_SYMBOL(npe_mh_npe_loopback_mode); ++EXPORT_SYMBOL(npe_mh_get_stats); +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/phy.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,113 @@ ++/* ++ * phy.c - MDIO functions and mii initialisation ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++ ++#include <linux/mutex.h> ++#include "mac.h" ++ ++#define MAX_PHYS (1<<5) ++ ++/* ++ * We must always use the same MAC for acessing the MDIO ++ * We may not use each MAC for its PHY :-( ++ */ ++ ++static struct net_device *phy_dev = NULL; ++static struct mutex mtx; ++ ++/* here we remember if the PHY is alive, to avoid log dumping */ ++static int phy_works[MAX_PHYS]; ++ ++int mdio_read_register(struct net_device *dev, int phy_addr, int phy_reg) ++{ ++ struct mac_info *mac; ++ u32 cmd, reg; ++ int cnt = 0; ++ ++ if (!phy_dev) ++ return 0; ++ ++ mac = netdev_priv(phy_dev); ++ cmd = mdio_cmd(phy_addr, phy_reg); ++ mutex_lock_interruptible(&mtx); ++ mac_mdio_cmd_write(mac, cmd); ++ while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) { ++ if (++cnt >= 100) { ++ printk("%s: PHY[%d] access failed\n", ++ dev->name, phy_addr); ++ break; ++ } ++ schedule(); ++ } ++ reg = mac_mdio_status_read(mac); ++ mutex_unlock(&mtx); ++ if (reg & MII_READ_FAIL) { ++ if (phy_works[phy_addr]) { ++ printk("%s: PHY[%d] unresponsive\n", ++ dev->name, phy_addr); ++ } ++ reg = 0; ++ phy_works[phy_addr] = 0; ++ } else { ++ if ( !phy_works[phy_addr]) { ++ printk("%s: PHY[%d] responsive again\n", ++ dev->name, phy_addr); ++ } ++ phy_works[phy_addr] = 1; ++ } ++ return reg & 0xffff; ++} ++ ++void ++mdio_write_register(struct net_device *dev, int phy_addr, int phy_reg, int val) ++{ ++ struct mac_info *mac; ++ u32 cmd; ++ int cnt=0; ++ ++ if (!phy_dev) ++ return; ++ ++ mac = netdev_priv(phy_dev); ++ cmd = mdio_cmd(phy_addr, phy_reg) | MII_WRITE | val; ++ ++ mutex_lock_interruptible(&mtx); ++ mac_mdio_cmd_write(mac, cmd); ++ while((cmd = mac_mdio_cmd_read(mac)) & MII_GO) { ++ if (++cnt >= 100) { ++ printk("%s: PHY[%d] access failed\n", ++ dev->name, phy_addr); ++ break; ++ } ++ schedule(); ++ } ++ mutex_unlock(&mtx); ++} ++ ++void init_mdio(struct net_device *dev, int phy_id) ++{ ++ struct mac_info *mac = netdev_priv(dev); ++ int i; ++ ++ /* All phy operations should use the same MAC ++ * (my experience) ++ */ ++ if (mac->plat->eth_id == 0) { ++ mutex_init(&mtx); ++ phy_dev = dev; ++ for (i=0; i<MAX_PHYS; i++) ++ phy_works[i] = 1; ++ } ++ mac->mii.dev = dev; ++ mac->mii.phy_id = phy_id; ++ mac->mii.phy_id_mask = MAX_PHYS - 1; ++ mac->mii.reg_num_mask = 0x1f; ++ mac->mii.mdio_read = mdio_read_register; ++ mac->mii.mdio_write = mdio_write_register; ++} ++ +Index: linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/drivers/net/ixp4xx/ucode_dl.c 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,479 @@ ++/* ++ * ucode_dl.c - provide an NPE device and a char-dev for microcode download ++ * ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#include <linux/kernel.h> ++#include <linux/module.h> ++#include <linux/miscdevice.h> ++#include <linux/platform_device.h> ++#include <linux/fs.h> ++#include <linux/init.h> ++#include <linux/slab.h> ++#include <linux/firmware.h> ++#include <linux/dma-mapping.h> ++#include <linux/byteorder/swab.h> ++#include <asm/uaccess.h> ++#include <asm/io.h> ++ ++#include <linux/ixp_npe.h> ++ ++#define IXNPE_VERSION "IXP4XX NPE driver Version 0.3.0" ++ ++#define DL_MAGIC 0xfeedf00d ++#define DL_MAGIC_SWAP 0x0df0edfe ++ ++#define EOF_BLOCK 0xf ++#define IMG_SIZE(image) (((image)->size * sizeof(u32)) + \ ++ sizeof(struct dl_image)) ++ ++#define BT_INSTR 0 ++#define BT_DATA 1 ++ ++enum blk_type { ++ instruction, ++ data, ++}; ++ ++struct dl_block { ++ u32 type; ++ u32 offset; ++}; ++ ++struct dl_image { ++ u32 magic; ++ u32 id; ++ u32 size; ++ union { ++ u32 data[0]; ++ struct dl_block block[0]; ++ } u; ++}; ++ ++struct dl_codeblock { ++ u32 npe_addr; ++ u32 size; ++ u32 data[0]; ++}; ++ ++static struct platform_driver ixp4xx_npe_driver; ++ ++static int match_by_npeid(struct device *dev, void *id) ++{ ++ struct npe_info *npe = dev_get_drvdata(dev); ++ if (!npe->plat) ++ return 0; ++ return (npe->plat->id == *(int*)id); ++} ++ ++struct device *get_npe_by_id(int id) ++{ ++ struct device *dev = driver_find_device(&ixp4xx_npe_driver.driver, ++ NULL, &id, match_by_npeid); ++ if (dev) { ++ struct npe_info *npe = dev_get_drvdata(dev); ++ if (!try_module_get(THIS_MODULE)) { ++ put_device(dev); ++ return NULL; ++ } ++ npe->usage++; ++ } ++ return dev; ++} ++ ++void return_npe_dev(struct device *dev) ++{ ++ struct npe_info *npe = dev_get_drvdata(dev); ++ put_device(dev); ++ module_put(THIS_MODULE); ++ npe->usage--; ++} ++ ++static int ++download_block(struct npe_info *npe, struct dl_codeblock *cb, unsigned type) ++{ ++ int i; ++ int cmd; ++ ++ switch (type) { ++ case BT_DATA: ++ cmd = IX_NPEDL_EXCTL_CMD_WR_DATA_MEM; ++ if (cb->npe_addr + cb->size > npe->plat->data_size) { ++ printk(KERN_INFO "Data size too large: %d+%d > %d\n", ++ cb->npe_addr, cb->size, npe->plat->data_size); ++ return -EIO; ++ } ++ break; ++ case BT_INSTR: ++ cmd = IX_NPEDL_EXCTL_CMD_WR_INS_MEM; ++ if (cb->npe_addr + cb->size > npe->plat->inst_size) { ++ printk(KERN_INFO "Instr size too large: %d+%d > %d\n", ++ cb->npe_addr, cb->size, npe->plat->inst_size); ++ return -EIO; ++ } ++ break; ++ default: ++ printk(KERN_INFO "Unknown CMD: %d\n", type); ++ return -EIO; ++ } ++ ++ for (i=0; i < cb->size; i++) { ++ npe_write_cmd(npe, cb->npe_addr + i, cb->data[i], cmd); ++ } ++ ++ return 0; ++} ++ ++static int store_npe_image(struct dl_image *image, struct device *dev) ++{ ++ struct dl_block *blk; ++ struct dl_codeblock *cb; ++ struct npe_info *npe; ++ int ret=0; ++ ++ if (!dev) { ++ dev = get_npe_by_id( (image->id >> 24) & 0xf); ++ return_npe_dev(dev); ++ } ++ if (!dev) ++ return -ENODEV; ++ ++ npe = dev_get_drvdata(dev); ++ if (npe->loaded && (npe->usage > 0)) { ++ printk(KERN_INFO "Cowardly refusing to reload an Image " ++ "into the used and running %s\n", npe->plat->name); ++ return 0; /* indicate success anyway... */ ++ } ++ if (!cpu_is_ixp46x() && ((image->id >> 28) & 0xf)) { ++ printk(KERN_INFO "IXP46x NPE image ignored on IXP42x\n"); ++ return -EIO; ++ } ++ ++ npe_stop(npe); ++ npe_reset(npe); ++ ++ for (blk = image->u.block; blk->type != EOF_BLOCK; blk++) { ++ if (blk->offset > image->size) { ++ printk(KERN_INFO "Block offset out of range\n"); ++ return -EIO; ++ } ++ cb = (struct dl_codeblock*)&image->u.data[blk->offset]; ++ if (blk->offset + cb->size + 2 > image->size) { ++ printk(KERN_INFO "Codeblock size out of range\n"); ++ return -EIO; ++ } ++ if ((ret = download_block(npe, cb, blk->type))) ++ return ret; ++ } ++ *(u32*)npe->img_info = cpu_to_be32(image->id); ++ npe_start(npe); ++ ++ printk(KERN_INFO "Image loaded to %s Func:%x, Rel: %x:%x, Status: %x\n", ++ npe->plat->name, npe->img_info[1], npe->img_info[2], ++ npe->img_info[3], npe_status(npe)); ++ if (npe_mh_status(npe)) { ++ printk(KERN_ERR "%s not responding\n", npe->plat->name); ++ } ++ npe->loaded = 1; ++ return 0; ++} ++ ++static int ucode_open(struct inode *inode, struct file *file) ++{ ++ file->private_data = kmalloc(sizeof(struct dl_image), GFP_KERNEL); ++ if (!file->private_data) ++ return -ENOMEM; ++ return 0; ++} ++ ++static int ucode_close(struct inode *inode, struct file *file) ++{ ++ kfree(file->private_data); ++ return 0; ++} ++ ++static ssize_t ucode_write(struct file *file, const char __user *buf, ++ size_t count, loff_t *ppos) ++{ ++ union { ++ char *data; ++ struct dl_image *image; ++ } u; ++ const char __user *cbuf = buf; ++ ++ u.data = file->private_data; ++ ++ while (count) { ++ int len; ++ if (*ppos < sizeof(struct dl_image)) { ++ len = sizeof(struct dl_image) - *ppos; ++ len = len > count ? count : len; ++ if (copy_from_user(u.data + *ppos, cbuf, len)) ++ return -EFAULT; ++ count -= len; ++ *ppos += len; ++ cbuf += len; ++ continue; ++ } else if (*ppos == sizeof(struct dl_image)) { ++ void *data; ++ if (u.image->magic == DL_MAGIC_SWAP) { ++ printk(KERN_INFO "swapped image found\n"); ++ u.image->id = swab32(u.image->id); ++ u.image->size = swab32(u.image->size); ++ } else if (u.image->magic != DL_MAGIC) { ++ printk(KERN_INFO "Bad magic:%x\n", ++ u.image->magic); ++ return -EFAULT; ++ } ++ len = IMG_SIZE(u.image); ++ data = kmalloc(len, GFP_KERNEL); ++ if (!data) ++ return -ENOMEM; ++ memcpy(data, u.data, *ppos); ++ kfree(u.data); ++ u.data = (char*)data; ++ file->private_data = data; ++ } ++ len = IMG_SIZE(u.image) - *ppos; ++ len = len > count ? count : len; ++ if (copy_from_user(u.data + *ppos, cbuf, len)) ++ return -EFAULT; ++ count -= len; ++ *ppos += len; ++ cbuf += len; ++ if (*ppos == IMG_SIZE(u.image)) { ++ int ret, i; ++ *ppos = 0; ++ if (u.image->magic == DL_MAGIC_SWAP) { ++ for (i=0; i<u.image->size; i++) { ++ u.image->u.data[i] = ++ swab32(u.image->u.data[i]); ++ } ++ u.image->magic = swab32(u.image->magic); ++ } ++ ret = store_npe_image(u.image, NULL); ++ if (ret) { ++ printk(KERN_INFO "Error in NPE image: %x\n", ++ u.image->id); ++ return ret; ++ } ++ } ++ } ++ return (cbuf-buf); ++} ++ ++static void npe_firmware_probe(struct device *dev) ++{ ++#if (defined(CONFIG_FW_LOADER) || defined(CONFIG_FW_LOADER_MODULE)) \ ++ && defined(MODULE) ++ const struct firmware *fw_entry; ++ struct npe_info *npe = dev_get_drvdata(dev); ++ struct dl_image *image; ++ int ret = -1, i; ++ ++ if (request_firmware(&fw_entry, npe->plat->name, dev) != 0) { ++ return; ++ } ++ image = (struct dl_image*)fw_entry->data; ++ /* Sanity checks */ ++ if (fw_entry->size < sizeof(struct dl_image)) { ++ printk(KERN_ERR "Firmware error: too small\n"); ++ goto out; ++ } ++ if (image->magic == DL_MAGIC_SWAP) { ++ printk(KERN_INFO "swapped image found\n"); ++ image->id = swab32(image->id); ++ image->size = swab32(image->size); ++ } else if (image->magic != DL_MAGIC) { ++ printk(KERN_ERR "Bad magic:%x\n", image->magic); ++ goto out; ++ } ++ if (IMG_SIZE(image) != fw_entry->size) { ++ printk(KERN_ERR "Firmware error: bad size\n"); ++ goto out; ++ } ++ if (((image->id >> 24) & 0xf) != npe->plat->id) { ++ printk(KERN_ERR "NPE id missmatch\n"); ++ goto out; ++ } ++ if (image->magic == DL_MAGIC_SWAP) { ++ for (i=0; i<image->size; i++) { ++ image->u.data[i] = swab32(image->u.data[i]); ++ } ++ image->magic = swab32(image->magic); ++ } ++ ++ ret = store_npe_image(image, dev); ++out: ++ if (ret) { ++ printk(KERN_ERR "Error downloading Firmware for %s\n", ++ npe->plat->name); ++ } ++ release_firmware(fw_entry); ++#endif ++} ++ ++static void disable_npe_irq(struct npe_info *npe) ++{ ++ u32 reg; ++ reg = npe_reg_read(npe, IX_NPEDL_REG_OFFSET_CTL); ++ reg &= ~(IX_NPEMH_NPE_CTL_OFE | IX_NPEMH_NPE_CTL_IFE); ++ reg |= IX_NPEMH_NPE_CTL_OFEWE | IX_NPEMH_NPE_CTL_IFEWE; ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_CTL, reg); ++} ++ ++static ssize_t show_npe_state(struct device *dev, struct device_attribute *attr, ++ char *buf) ++{ ++ struct npe_info *npe = dev_get_drvdata(dev); ++ ++ strcpy(buf, npe_status(npe) & IX_NPEDL_EXCTL_STATUS_RUN ? ++ "start\n" : "stop\n"); ++ return strlen(buf); ++} ++ ++static ssize_t set_npe_state(struct device *dev, struct device_attribute *attr, ++ const char *buf, size_t count) ++{ ++ struct npe_info *npe = dev_get_drvdata(dev); ++ ++ if (npe->usage) { ++ printk("%s in use: read-only\n", npe->plat->name); ++ return count; ++ } ++ if (!strncmp(buf, "start", 5)) { ++ npe_start(npe); ++ } ++ if (!strncmp(buf, "stop", 4)) { ++ npe_stop(npe); ++ } ++ if (!strncmp(buf, "reset", 5)) { ++ npe_stop(npe); ++ npe_reset(npe); ++ } ++ return count; ++} ++ ++static DEVICE_ATTR(state, S_IRUGO | S_IWUSR, show_npe_state, set_npe_state); ++ ++static int npe_probe(struct platform_device *pdev) ++{ ++ struct resource *res; ++ struct npe_info *npe; ++ struct npe_plat_data *plat = pdev->dev.platform_data; ++ int err, size, ret=0; ++ ++ if (!(res = platform_get_resource(pdev, IORESOURCE_MEM, 0))) ++ return -EIO; ++ ++ if (!(npe = kzalloc(sizeof(struct npe_info), GFP_KERNEL))) ++ return -ENOMEM; ++ ++ size = res->end - res->start +1; ++ npe->res = request_mem_region(res->start, size, plat->name); ++ if (!npe->res) { ++ ret = -EBUSY; ++ printk(KERN_ERR "Failed to get memregion(%x, %x)\n", ++ res->start, size); ++ goto out_free; ++ } ++ ++ npe->addr = ioremap(res->start, size); ++ if (!npe->addr) { ++ ret = -ENOMEM; ++ printk(KERN_ERR "Failed to ioremap(%x, %x)\n", ++ res->start, size); ++ goto out_rel; ++ } ++ ++ pdev->dev.coherent_dma_mask = DMA_32BIT_MASK; ++ ++ platform_set_drvdata(pdev, npe); ++ ++ err = device_create_file(&pdev->dev, &dev_attr_state); ++ if (err) ++ goto out_rel; ++ ++ npe->plat = plat; ++ disable_npe_irq(npe); ++ npe->usage = 0; ++ npe_reset(npe); ++ npe_firmware_probe(&pdev->dev); ++ ++ return 0; ++ ++out_rel: ++ release_resource(npe->res); ++out_free: ++ kfree(npe); ++ return ret; ++} ++ ++static struct file_operations ucode_dl_fops = { ++ .owner = THIS_MODULE, ++ .write = ucode_write, ++ .open = ucode_open, ++ .release = ucode_close, ++}; ++ ++static struct miscdevice ucode_dl_dev = { ++ .minor = MICROCODE_MINOR, ++ .name = "ixp4xx_ucode", ++ .fops = &ucode_dl_fops, ++}; ++ ++static int npe_remove(struct platform_device *pdev) ++{ ++ struct npe_info *npe = platform_get_drvdata(pdev); ++ ++ device_remove_file(&pdev->dev, &dev_attr_state); ++ ++ iounmap(npe->addr); ++ release_resource(npe->res); ++ kfree(npe); ++ return 0; ++} ++ ++static struct platform_driver ixp4xx_npe_driver = { ++ .driver = { ++ .name = "ixp4xx_npe", ++ .owner = THIS_MODULE, ++ }, ++ .probe = npe_probe, ++ .remove = npe_remove, ++}; ++ ++static int __init init_npedriver(void) ++{ ++ int ret; ++ if ((ret = misc_register(&ucode_dl_dev))){ ++ printk(KERN_ERR "Failed to register misc device %d\n", ++ MICROCODE_MINOR); ++ return ret; ++ } ++ if ((ret = platform_driver_register(&ixp4xx_npe_driver))) ++ misc_deregister(&ucode_dl_dev); ++ else ++ printk(KERN_INFO IXNPE_VERSION " initialized\n"); ++ ++ return ret; ++ ++} ++ ++static void __exit finish_npedriver(void) ++{ ++ misc_deregister(&ucode_dl_dev); ++ platform_driver_unregister(&ixp4xx_npe_driver); ++} ++ ++module_init(init_npedriver); ++module_exit(finish_npedriver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Christian Hohnstaedt <chohnstaedt@innominate.com>"); ++ ++EXPORT_SYMBOL(get_npe_by_id); ++EXPORT_SYMBOL(return_npe_dev); +Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h +=================================================================== +--- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/ixp4xx-regs.h 2007-02-21 02:24:35.000000000 -0800 +@@ -22,6 +22,8 @@ + #ifndef _ASM_ARM_IXP4XX_H_ + #define _ASM_ARM_IXP4XX_H_ + ++#include "npe_regs.h" ++ + /* + * IXP4xx Linux Memory Map: + * +@@ -44,6 +46,12 @@ + */ + + /* ++ * PCI Memory Space ++ */ ++#define IXP4XX_PCIMEM_BASE_PHYS (0x48000000) ++#define IXP4XX_PCIMEM_REGION_SIZE (0x04000000) ++#define IXP4XX_PCIMEM_BAR_SIZE (0x01000000) ++/* + * Queue Manager + */ + #define IXP4XX_QMGR_BASE_PHYS (0x60000000) +@@ -322,7 +330,13 @@ + #define PCI_ATPDMA0_LENADDR_OFFSET 0x48 + #define PCI_ATPDMA1_AHBADDR_OFFSET 0x4C + #define PCI_ATPDMA1_PCIADDR_OFFSET 0x50 +-#define PCI_ATPDMA1_LENADDR_OFFSET 0x54 ++#define PCI_ATPDMA1_LENADDR_OFFSET 0x54 ++#define PCI_PTADMA0_AHBADDR_OFFSET 0x58 ++#define PCI_PTADMA0_PCIADDR_OFFSET 0x5c ++#define PCI_PTADMA0_LENADDR_OFFSET 0x60 ++#define PCI_PTADMA1_AHBADDR_OFFSET 0x64 ++#define PCI_PTADMA1_PCIADDR_OFFSET 0x68 ++#define PCI_PTADMA1_LENADDR_OFFSET 0x6c + + /* + * PCI Control/Status Registers +@@ -351,6 +365,12 @@ + #define PCI_ATPDMA1_AHBADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_AHBADDR_OFFSET) + #define PCI_ATPDMA1_PCIADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_PCIADDR_OFFSET) + #define PCI_ATPDMA1_LENADDR IXP4XX_PCI_CSR(PCI_ATPDMA1_LENADDR_OFFSET) ++#define PCI_PTADMA0_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA0_AHBADDR_OFFSET) ++#define PCI_PTADMA0_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA0_PCIADDR_OFFSET) ++#define PCI_PTADMA0_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA0_LENADDR_OFFSET) ++#define PCI_PTADMA1_AHBADDR IXP4XX_PCI_CSR(PCI_PTADMA1_AHBADDR_OFFSET) ++#define PCI_PTADMA1_PCIADDR IXP4XX_PCI_CSR(PCI_PTADMA1_PCIADDR_OFFSET) ++#define PCI_PTADMA1_LENADDR IXP4XX_PCI_CSR(PCI_PTADMA1_LENADDR_OFFSET) + + /* + * PCI register values and bit definitions +@@ -607,6 +627,34 @@ + + #define DCMD_LENGTH 0x01fff /* length mask (max = 8K - 1) */ + ++ ++/* Fuse Bits of IXP_EXP_CFG2 */ ++#define IX_FUSE_RCOMP (1 << 0) ++#define IX_FUSE_USB (1 << 1) ++#define IX_FUSE_HASH (1 << 2) ++#define IX_FUSE_AES (1 << 3) ++#define IX_FUSE_DES (1 << 4) ++#define IX_FUSE_HDLC (1 << 5) ++#define IX_FUSE_AAL (1 << 6) ++#define IX_FUSE_HSS (1 << 7) ++#define IX_FUSE_UTOPIA (1 << 8) ++#define IX_FUSE_ETH0 (1 << 9) ++#define IX_FUSE_ETH1 (1 << 10) ++#define IX_FUSE_NPEA (1 << 11) ++#define IX_FUSE_NPEB (1 << 12) ++#define IX_FUSE_NPEC (1 << 13) ++#define IX_FUSE_PCI (1 << 14) ++#define IX_FUSE_ECC (1 << 15) ++#define IX_FUSE_UTOPIA_PHY_LIMIT (3 << 16) ++#define IX_FUSE_USB_HOST (1 << 18) ++#define IX_FUSE_NPEA_ETH (1 << 19) ++#define IX_FUSE_NPEB_ETH (1 << 20) ++#define IX_FUSE_RSA (1 << 21) ++#define IX_FUSE_XSCALE_MAX_FREQ (3 << 22) ++ ++#define IX_FUSE_IXP46X_ONLY IX_FUSE_XSCALE_MAX_FREQ | IX_FUSE_RSA | \ ++ IX_FUSE_NPEB_ETH | IX_FUSE_NPEA_ETH | IX_FUSE_USB_HOST | IX_FUSE_ECC ++ + #ifndef __ASSEMBLY__ + static inline int cpu_is_ixp46x(void) + { +@@ -620,6 +668,15 @@ + #endif + return 0; + } ++ ++static inline u32 ix_fuse(void) ++{ ++ unsigned int fuses = ~(*IXP4XX_EXP_CFG2); ++ if (!cpu_is_ixp46x()) ++ fuses &= ~IX_FUSE_IXP46X_ONLY; ++ ++ return fuses; ++} + #endif + + #endif +Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/npe_regs.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,82 @@ ++#ifndef NPE_REGS_H ++#define NPE_REGS_H ++ ++/* Execution Address */ ++#define IX_NPEDL_REG_OFFSET_EXAD 0x00 ++/* Execution Data */ ++#define IX_NPEDL_REG_OFFSET_EXDATA 0x04 ++/* Execution Control */ ++#define IX_NPEDL_REG_OFFSET_EXCTL 0x08 ++/* Execution Count */ ++#define IX_NPEDL_REG_OFFSET_EXCT 0x0C ++/* Action Point 0 */ ++#define IX_NPEDL_REG_OFFSET_AP0 0x10 ++/* Action Point 1 */ ++#define IX_NPEDL_REG_OFFSET_AP1 0x14 ++/* Action Point 2 */ ++#define IX_NPEDL_REG_OFFSET_AP2 0x18 ++/* Action Point 3 */ ++#define IX_NPEDL_REG_OFFSET_AP3 0x1C ++/* Watchpoint FIFO */ ++#define IX_NPEDL_REG_OFFSET_WFIFO 0x20 ++/* Watch Count */ ++#define IX_NPEDL_REG_OFFSET_WC 0x24 ++/* Profile Count */ ++#define IX_NPEDL_REG_OFFSET_PROFCT 0x28 ++ ++/* Messaging Status */ ++#define IX_NPEDL_REG_OFFSET_STAT 0x2C ++/* Messaging Control */ ++#define IX_NPEDL_REG_OFFSET_CTL 0x30 ++/* Mailbox Status */ ++#define IX_NPEDL_REG_OFFSET_MBST 0x34 ++/* messaging in/out FIFO */ ++#define IX_NPEDL_REG_OFFSET_FIFO 0x38 ++ ++ ++#define IX_NPEDL_MASK_ECS_DBG_REG_2_IF 0x00100000 ++#define IX_NPEDL_MASK_ECS_DBG_REG_2_IE 0x00080000 ++#define IX_NPEDL_MASK_ECS_REG_0_ACTIVE 0x80000000 ++ ++#define IX_NPEDL_EXCTL_CMD_NPE_STEP 0x01 ++#define IX_NPEDL_EXCTL_CMD_NPE_START 0x02 ++#define IX_NPEDL_EXCTL_CMD_NPE_STOP 0x03 ++#define IX_NPEDL_EXCTL_CMD_NPE_CLR_PIPE 0x04 ++#define IX_NPEDL_EXCTL_CMD_CLR_PROFILE_CNT 0x0C ++#define IX_NPEDL_EXCTL_CMD_RD_INS_MEM 0x10 ++#define IX_NPEDL_EXCTL_CMD_WR_INS_MEM 0x11 ++#define IX_NPEDL_EXCTL_CMD_RD_DATA_MEM 0x12 ++#define IX_NPEDL_EXCTL_CMD_WR_DATA_MEM 0x13 ++#define IX_NPEDL_EXCTL_CMD_RD_ECS_REG 0x14 ++#define IX_NPEDL_EXCTL_CMD_WR_ECS_REG 0x15 ++ ++#define IX_NPEDL_EXCTL_STATUS_RUN 0x80000000 ++#define IX_NPEDL_EXCTL_STATUS_STOP 0x40000000 ++#define IX_NPEDL_EXCTL_STATUS_CLEAR 0x20000000 ++ ++#define IX_NPEDL_MASK_WFIFO_VALID 0x80000000 ++#define IX_NPEDL_MASK_STAT_OFNE 0x00010000 ++#define IX_NPEDL_MASK_STAT_IFNE 0x00080000 ++ ++#define IX_NPEDL_ECS_DBG_CTXT_REG_0 0x0C ++#define IX_NPEDL_ECS_PRI_1_CTXT_REG_0 0x04 ++#define IX_NPEDL_ECS_PRI_2_CTXT_REG_0 0x08 ++ ++/* NPE control register bit definitions */ ++#define IX_NPEMH_NPE_CTL_OFE (1 << 16) /**< OutFifoEnable */ ++#define IX_NPEMH_NPE_CTL_IFE (1 << 17) /**< InFifoEnable */ ++#define IX_NPEMH_NPE_CTL_OFEWE (1 << 24) /**< OutFifoEnableWriteEnable */ ++#define IX_NPEMH_NPE_CTL_IFEWE (1 << 25) /**< InFifoEnableWriteEnable */ ++ ++/* NPE status register bit definitions */ ++#define IX_NPEMH_NPE_STAT_OFNE (1 << 16) /**< OutFifoNotEmpty */ ++#define IX_NPEMH_NPE_STAT_IFNF (1 << 17) /**< InFifoNotFull */ ++#define IX_NPEMH_NPE_STAT_OFNF (1 << 18) /**< OutFifoNotFull */ ++#define IX_NPEMH_NPE_STAT_IFNE (1 << 19) /**< InFifoNotEmpty */ ++#define IX_NPEMH_NPE_STAT_MBINT (1 << 20) /**< Mailbox interrupt */ ++#define IX_NPEMH_NPE_STAT_IFINT (1 << 21) /**< InFifo interrupt */ ++#define IX_NPEMH_NPE_STAT_OFINT (1 << 22) /**< OutFifo interrupt */ ++#define IX_NPEMH_NPE_STAT_WFINT (1 << 23) /**< WatchFifo interrupt */ ++ ++#endif ++ +Index: linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h +=================================================================== +--- linux-2.6.21-rc1-arm.orig/include/asm-arm/arch-ixp4xx/platform.h 2007-02-21 02:24:18.000000000 -0800 ++++ linux-2.6.21-rc1-arm/include/asm-arm/arch-ixp4xx/platform.h 2007-02-21 02:24:35.000000000 -0800 +@@ -86,6 +86,25 @@ + unsigned long scl_pin; + }; + ++struct npe_plat_data { ++ const char *name; ++ int data_size; ++ int inst_size; ++ int id; /* Node ID */ ++}; ++ ++struct mac_plat_info { ++ int npe_id; /* Node ID of the NPE for this port */ ++ int port_id; /* Port ID for NPE-B @ ixp465 */ ++ int eth_id; /* Physical ID */ ++ int phy_id; /* ID of the connected PHY (PCB/platform dependent) */ ++ int rxq_id; /* Queue ID of the RX-free q */ ++ int rxdoneq_id; /* where incoming packets are returned */ ++ int txq_id; /* Where to push the outgoing packets */ ++ unsigned char hwaddr[6]; /* Desired hardware address */ ++ ++}; ++ + /* + * This structure provide a means for the board setup code + * to give information to th pata_ixp4xx driver. It is +Index: linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/include/linux/ixp_crypto.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,192 @@ ++ ++#ifndef IX_CRYPTO_H ++#define IX_CRYPTO_H ++ ++#define MAX_KEYLEN 64 ++#define NPE_CTX_LEN 80 ++#define AES_BLOCK128 16 ++ ++#define NPE_OP_HASH_GEN_ICV 0x50 ++#define NPE_OP_ENC_GEN_KEY 0xc9 ++ ++ ++#define NPE_OP_HASH_VERIFY 0x01 ++#define NPE_OP_CCM_ENABLE 0x04 ++#define NPE_OP_CRYPT_ENABLE 0x08 ++#define NPE_OP_HASH_ENABLE 0x10 ++#define NPE_OP_NOT_IN_PLACE 0x20 ++#define NPE_OP_HMAC_DISABLE 0x40 ++#define NPE_OP_CRYPT_ENCRYPT 0x80 ++ ++#define MOD_ECB 0x0000 ++#define MOD_CTR 0x1000 ++#define MOD_CBC_ENC 0x2000 ++#define MOD_CBC_DEC 0x3000 ++#define MOD_CCM_ENC 0x4000 ++#define MOD_CCM_DEC 0x5000 ++ ++#define ALGO_AES 0x0800 ++#define CIPH_DECR 0x0000 ++#define CIPH_ENCR 0x0400 ++ ++#define MOD_DES 0x0000 ++#define MOD_TDEA2 0x0100 ++#define MOD_TDEA3 0x0200 ++#define MOD_AES128 0x0000 ++#define MOD_AES192 0x0100 ++#define MOD_AES256 0x0200 ++ ++#define KEYLEN_128 4 ++#define KEYLEN_192 6 ++#define KEYLEN_256 8 ++ ++#define CIPHER_TYPE_NULL 0 ++#define CIPHER_TYPE_DES 1 ++#define CIPHER_TYPE_3DES 2 ++#define CIPHER_TYPE_AES 3 ++ ++#define CIPHER_MODE_ECB 1 ++#define CIPHER_MODE_CTR 2 ++#define CIPHER_MODE_CBC 3 ++#define CIPHER_MODE_CCM 4 ++ ++#define HASH_TYPE_NULL 0 ++#define HASH_TYPE_MD5 1 ++#define HASH_TYPE_SHA1 2 ++#define HASH_TYPE_CBCMAC 3 ++ ++#define OP_REG_DONE 1 ++#define OP_REGISTER 2 ++#define OP_PERFORM 3 ++ ++#define STATE_UNREGISTERED 0 ++#define STATE_REGISTERED 1 ++#define STATE_UNLOADING 2 ++ ++struct crypt_ctl { ++#ifndef CONFIG_NPE_ADDRESS_COHERENT ++ u8 mode; /* NPE operation */ ++ u8 init_len; ++ u16 reserved; ++#else ++ u16 reserved; ++ u8 init_len; ++ u8 mode; /* NPE operation */ ++#endif ++ u8 iv[16]; /* IV for CBC mode or CTR IV for CTR mode */ ++ union { ++ u32 icv; ++ u32 rev_aes; ++ } addr; ++ u32 src_buf; ++ u32 dest_buf; ++#ifndef CONFIG_NPE_ADDRESS_COHERENT ++ u16 auth_offs; /* Authentication start offset */ ++ u16 auth_len; /* Authentication data length */ ++ u16 crypt_offs; /* Cryption start offset */ ++ u16 crypt_len; /* Cryption data length */ ++#else ++ u16 auth_len; /* Authentication data length */ ++ u16 auth_offs; /* Authentication start offset */ ++ u16 crypt_len; /* Cryption data length */ ++ u16 crypt_offs; /* Cryption start offset */ ++#endif ++ u32 aadAddr; /* Additional Auth Data Addr for CCM mode */ ++ u32 crypto_ctx; /* NPE Crypto Param structure address */ ++ ++ /* Used by Host */ ++ struct ix_sa_ctx *sa_ctx; ++ int oper_type; ++}; ++ ++struct npe_crypt_cont { ++ union { ++ struct crypt_ctl crypt; ++ u8 rev_aes_key[NPE_CTX_LEN]; ++ } ctl; ++ struct npe_crypt_cont *next; ++ struct npe_crypt_cont *virt; ++ dma_addr_t phys; ++}; ++ ++struct ix_hash_algo { ++ char *name; ++ u32 cfgword; ++ int digest_len; ++ int aad_len; ++ unsigned char *icv; ++ int type; ++}; ++ ++struct ix_cipher_algo { ++ char *name; ++ u32 cfgword_enc; ++ u32 cfgword_dec; ++ int block_len; ++ int iv_len; ++ int type; ++ int mode; ++}; ++ ++struct ix_key { ++ u8 key[MAX_KEYLEN]; ++ int len; ++}; ++ ++struct ix_sa_master { ++ struct device *npe_dev; ++ struct qm_queue *sendq; ++ struct qm_queue *recvq; ++ struct dma_pool *dmapool; ++ struct npe_crypt_cont *pool; ++ int pool_size; ++ rwlock_t lock; ++}; ++ ++struct ix_sa_dir { ++ unsigned char *npe_ctx; ++ dma_addr_t npe_ctx_phys; ++ int npe_ctx_idx; ++ u8 npe_mode; ++}; ++ ++struct ix_sa_ctx { ++ struct list_head list; ++ struct ix_sa_master *master; ++ ++ const struct ix_hash_algo *h_algo; ++ const struct ix_cipher_algo *c_algo; ++ struct ix_key c_key; ++ struct ix_key h_key; ++ ++ int digest_len; ++ ++ struct ix_sa_dir encrypt; ++ struct ix_sa_dir decrypt; ++ ++ struct npe_crypt_cont *rev_aes; ++ gfp_t gfp_flags; ++ ++ int state; ++ void *priv; ++ ++ void(*reg_cb)(struct ix_sa_ctx*, int); ++ void(*perf_cb)(struct ix_sa_ctx*, void*, int); ++ atomic_t use_cnt; ++}; ++ ++const struct ix_hash_algo *ix_hash_by_id(int type); ++const struct ix_cipher_algo *ix_cipher_by_id(int type, int mode); ++ ++struct ix_sa_ctx *ix_sa_ctx_new(int priv_len, gfp_t flags); ++void ix_sa_ctx_free(struct ix_sa_ctx *sa_ctx); ++ ++int ix_sa_crypto_perform(struct ix_sa_ctx *sa_ctx, u8 *data, void *ptr, ++ int datalen, int c_offs, int c_len, int a_offs, int a_len, ++ int hmac, char *iv, int encrypt); ++ ++int ix_sa_ctx_setup_cipher_auth(struct ix_sa_ctx *sa_ctx, ++ const struct ix_cipher_algo *cipher, ++ const struct ix_hash_algo *auth, int len); ++ ++#endif +Index: linux-2.6.21-rc1-arm/include/linux/ixp_npe.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/include/linux/ixp_npe.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,117 @@ ++/* ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#ifndef NPE_DEVICE_H ++#define NPE_DEVICE_H ++ ++#include <linux/miscdevice.h> ++#include <asm/hardware.h> ++ ++#ifdef __ARMEB__ ++#undef CONFIG_NPE_ADDRESS_COHERENT ++#else ++#define CONFIG_NPE_ADDRESS_COHERENT ++#endif ++ ++#if defined(__ARMEB__) || defined (CONFIG_NPE_ADDRESS_COHERENT) ++#define npe_to_cpu32(x) (x) ++#define npe_to_cpu16(x) (x) ++#define cpu_to_npe32(x) (x) ++#define cpu_to_npe16(x) (x) ++#else ++#error NPE_DATA_COHERENT ++#define NPE_DATA_COHERENT ++#define npe_to_cpu32(x) be32_to_cpu(x) ++#define npe_to_cpu16(x) be16_to_cpu(x) ++#define cpu_to_npe32(x) cpu_to_be32(x) ++#define cpu_to_npe16(x) cpu_to_be16(x) ++#endif ++ ++ ++struct npe_info { ++ struct resource *res; ++ void __iomem *addr; ++ struct npe_plat_data *plat; ++ u8 img_info[4]; ++ int usage; ++ int loaded; ++ u32 exec_count; ++ u32 ctx_reg2; ++}; ++ ++ ++static inline void npe_reg_write(struct npe_info *npe, u32 reg, u32 val) ++{ ++ *(volatile u32*)((u8*)(npe->addr) + reg) = val; ++} ++ ++static inline u32 npe_reg_read(struct npe_info *npe, u32 reg) ++{ ++ return *(volatile u32*)((u8*)(npe->addr) + reg); ++} ++ ++static inline u32 npe_status(struct npe_info *npe) ++{ ++ return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXCTL); ++} ++ ++/* ixNpeDlNpeMgrCommandIssue */ ++static inline void npe_write_exctl(struct npe_info *npe, u32 cmd) ++{ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd); ++} ++/* ixNpeDlNpeMgrWriteCommandIssue */ ++static inline void ++npe_write_cmd(struct npe_info *npe, u32 addr, u32 data, int cmd) ++{ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXDATA, data); ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr); ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd); ++} ++/* ixNpeDlNpeMgrReadCommandIssue */ ++static inline u32 ++npe_read_cmd(struct npe_info *npe, u32 addr, int cmd) ++{ ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXAD, addr); ++ npe_reg_write(npe, IX_NPEDL_REG_OFFSET_EXCTL, cmd); ++ /* Intel reads the data twice - so do we... */ ++ npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA); ++ return npe_reg_read(npe, IX_NPEDL_REG_OFFSET_EXDATA); ++} ++ ++/* ixNpeDlNpeMgrExecAccRegWrite */ ++static inline void npe_write_ecs_reg(struct npe_info *npe, u32 addr, u32 data) ++{ ++ npe_write_cmd(npe, addr, data, IX_NPEDL_EXCTL_CMD_WR_ECS_REG); ++} ++/* ixNpeDlNpeMgrExecAccRegRead */ ++static inline u32 npe_read_ecs_reg(struct npe_info *npe, u32 addr) ++{ ++ return npe_read_cmd(npe, addr, IX_NPEDL_EXCTL_CMD_RD_ECS_REG); ++} ++ ++extern void npe_stop(struct npe_info *npe); ++extern void npe_start(struct npe_info *npe); ++extern void npe_reset(struct npe_info *npe); ++ ++extern struct device *get_npe_by_id(int id); ++extern void return_npe_dev(struct device *dev); ++ ++/* NPE Messages */ ++extern int ++npe_mh_status(struct npe_info *npe); ++extern int ++npe_mh_setportaddr(struct npe_info *npe, struct mac_plat_info *mp, u8 *macaddr); ++extern int ++npe_mh_disable_firewall(struct npe_info *npe, struct mac_plat_info *mp); ++extern int ++npe_mh_set_rxqid(struct npe_info *npe, struct mac_plat_info *mp, int qid); ++extern int ++npe_mh_npe_loopback_mode(struct npe_info *npe, struct mac_plat_info *mp, int enable); ++extern int ++npe_mh_get_stats(struct npe_info *npe, struct mac_plat_info *mp, u32 phys, int reset); ++ ++#endif +Index: linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h +=================================================================== +--- /dev/null 1970-01-01 00:00:00.000000000 +0000 ++++ linux-2.6.21-rc1-arm/include/linux/ixp_qmgr.h 2007-02-21 02:24:35.000000000 -0800 +@@ -0,0 +1,202 @@ ++/* ++ * Copyright (C) 2006 Christian Hohnstaedt <chohnstaedt@innominate.com> ++ * ++ * This file is released under the GPLv2 ++ */ ++ ++#ifndef IX_QMGR_H ++#define IX_QMGR_H ++ ++#include <linux/skbuff.h> ++#include <linux/list.h> ++#include <linux/if_ether.h> ++#include <linux/spinlock.h> ++#include <linux/platform_device.h> ++#include <linux/ixp_npe.h> ++#include <asm/atomic.h> ++ ++/* All offsets are in 32bit words */ ++#define QUE_LOW_STAT0 0x100 /* 4x Status of the 32 lower queues 0-31 */ ++#define QUE_UO_STAT0 0x104 /* 2x Underflow/Overflow status bits*/ ++#define QUE_UPP_STAT0 0x106 /* 2x Status of thew 32 upper queues 32-63 */ ++#define INT0_SRC_SELREG0 0x108 /* 4x */ ++#define QUE_IE_REG0 0x10c /* 2x */ ++#define QUE_INT_REG0 0x10e /* 2x IRQ reg, write 1 to reset IRQ */ ++ ++#define IX_QMGR_QCFG_BASE 0x800 ++#define IX_QMGR_QCFG_SIZE 0x40 ++#define IX_QMGR_SRAM_SPACE (IX_QMGR_QCFG_BASE + IX_QMGR_QCFG_SIZE) ++ ++#define MAX_QUEUES 32 /* first, we only support the lower 32 queues */ ++#define MAX_NPES 3 ++ ++enum { ++ Q_IRQ_ID_E = 0, /* Queue Empty due to last read */ ++ Q_IRQ_ID_NE, /* Queue Nearly Empty due to last read */ ++ Q_IRQ_ID_NF, /* Queue Nearly Full due to last write */ ++ Q_IRQ_ID_F, /* Queue Full due to last write */ ++ Q_IRQ_ID_NOT_E, /* Queue Not Empty due to last write */ ++ Q_IRQ_ID_NOT_NE, /* Queue Not Nearly Empty due to last write */ ++ Q_IRQ_ID_NOT_NF, /* Queue Not Nearly Full due to last read */ ++ Q_IRQ_ID_NOT_F /* Queue Not Full due to last read */ ++}; ++ ++extern struct qm_queue *request_queue(int qid, int len); ++extern void release_queue(struct qm_queue *queue); ++extern int queue_set_irq_src(struct qm_queue *queue, int flag); ++extern void queue_set_watermarks(struct qm_queue *, unsigned ne, unsigned nf); ++extern int queue_len(struct qm_queue *queue); ++ ++struct qm_qmgr; ++struct qm_queue; ++ ++typedef void(*queue_cb)(struct qm_queue *); ++ ++struct qm_queue { ++ int addr; /* word offset from IX_QMGR_SRAM_SPACE */ ++ int len; /* size in words */ ++ int id; /* Q Id */ ++ u32 __iomem *acc_reg; ++ struct device *dev; ++ atomic_t use; ++ queue_cb irq_cb; ++ void *cb_data; ++}; ++ ++#ifndef CONFIG_NPE_ADDRESS_COHERENT ++struct eth_ctl { ++ u32 next; ++ u16 buf_len; ++ u16 pkt_len; ++ u32 phys_addr; ++ u8 dest_id; ++ u8 src_id; ++ u16 flags; ++ u8 qos; ++ u8 padlen; ++ u16 vlan_tci; ++ u8 dest_mac[ETH_ALEN]; ++ u8 src_mac[ETH_ALEN]; ++}; ++ ++#else ++struct eth_ctl { ++ u32 next; ++ u16 pkt_len; ++ u16 buf_len; ++ u32 phys_addr; ++ u16 flags; ++ u8 src_id; ++ u8 dest_id; ++ u16 vlan_tci; ++ u8 padlen; ++ u8 qos; ++ u8 dest_mac[ETH_ALEN]; ++ u8 src_mac[ETH_ALEN]; ++}; ++#endif ++ ++struct npe_cont { ++ struct eth_ctl eth; ++ void *data; ++ struct npe_cont *next; ++ struct npe_cont *virt; ++ dma_addr_t phys; ++}; ++ ++struct qm_qmgr { ++ u32 __iomem *addr; ++ struct resource *res; ++ struct qm_queue *queues[MAX_QUEUES]; ++ rwlock_t lock; ++ struct npe_cont *pool; ++ struct dma_pool *dmapool; ++ int irq; ++}; ++ ++static inline void queue_write_cfg_reg(struct qm_queue *queue, u32 val) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id) = val; ++} ++static inline u32 queue_read_cfg_reg(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ return *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id); ++} ++ ++static inline void queue_ack_irq(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ *(qmgr->addr + QUE_INT_REG0) = 1 << queue->id; ++} ++ ++static inline void queue_enable_irq(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ *(qmgr->addr + QUE_IE_REG0) |= 1 << queue->id; ++} ++ ++static inline void queue_disable_irq(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ *(qmgr->addr + QUE_IE_REG0) &= ~(1 << queue->id); ++} ++ ++static inline void queue_put_entry(struct qm_queue *queue, u32 entry) ++{ ++ *(queue->acc_reg) = npe_to_cpu32(entry); ++} ++ ++static inline u32 queue_get_entry(struct qm_queue *queue) ++{ ++ return cpu_to_npe32(*queue->acc_reg); ++} ++ ++static inline struct npe_cont *qmgr_get_cont(struct qm_qmgr *qmgr) ++{ ++ unsigned long flags; ++ struct npe_cont *cont; ++ ++ if (!qmgr->pool) ++ return NULL; ++ write_lock_irqsave(&qmgr->lock, flags); ++ cont = qmgr->pool; ++ qmgr->pool = cont->next; ++ write_unlock_irqrestore(&qmgr->lock, flags); ++ return cont; ++} ++ ++static inline void qmgr_return_cont(struct qm_qmgr *qmgr,struct npe_cont *cont) ++{ ++ unsigned long flags; ++ ++ write_lock_irqsave(&qmgr->lock, flags); ++ cont->next = qmgr->pool; ++ qmgr->pool = cont; ++ write_unlock_irqrestore(&qmgr->lock, flags); ++} ++ ++static inline int queue_stat(struct qm_queue *queue) ++{ ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ u32 reg = *(qmgr->addr + QUE_UO_STAT0 + (queue->id >> 4)); ++ return (reg >> (queue->id & 0xf) << 1) & 3; ++} ++ ++/* Prints the queue state, which is very, very helpfull for debugging */ ++static inline void queue_state(struct qm_queue *queue) ++{ ++ u32 val=0, lstat=0; ++ int offs; ++ struct qm_qmgr *qmgr = dev_get_drvdata(queue->dev); ++ ++ offs = queue->id/8 + QUE_LOW_STAT0; ++ val = *(qmgr->addr + IX_QMGR_QCFG_BASE + queue->id); ++ lstat = (*(qmgr->addr + offs) >> ((queue->id % 8)*4)) & 0x0f; ++ ++ printk("Qid[%02d]: Wptr=%4x, Rptr=%4x, diff=%4x, Stat:%x\n", queue->id, ++ val&0x7f, (val>>7) &0x7f, (val - (val >> 7)) & 0x7f, lstat); ++} ++ ++#endif |