diff options
author | Koen Vandeputte <koen.vandeputte@ncentric.com> | 2019-06-11 18:25:36 +0200 |
---|---|---|
committer | Koen Vandeputte <koen.vandeputte@ncentric.com> | 2019-06-12 15:04:09 +0200 |
commit | a7e68927d047c5c979a2bf7e9203e9da72ee80e7 (patch) | |
tree | 4becc0c792e4d9419b8f6259d4800aa817770358 | |
parent | f2f7cc67e546c6f93a821d25b18c095996a27547 (diff) | |
download | upstream-a7e68927d047c5c979a2bf7e9203e9da72ee80e7.tar.gz upstream-a7e68927d047c5c979a2bf7e9203e9da72ee80e7.tar.bz2 upstream-a7e68927d047c5c979a2bf7e9203e9da72ee80e7.zip |
kernel: bump 4.14 to 4.14.125 (FS#2305 FS#2297)
Refreshed all patches.
This bump contains upstream commits which seem to avoid (not properly fix)
the errors as seen in FS#2305 and FS#2297
Altered patches:
- 403-net-mvneta-convert-to-phylink.patch
- 410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch
Compile-tested on: ar71xx, cns3xxx, imx6, mvebu, x86_64
Runtime-tested on: ar71xx, cns3xxx, imx6, x86_64
Signed-off-by: Koen Vandeputte <koen.vandeputte@ncentric.com>
27 files changed, 352 insertions, 1103 deletions
diff --git a/include/kernel-version.mk b/include/kernel-version.mk index db792826af..bfdd17d449 100644 --- a/include/kernel-version.mk +++ b/include/kernel-version.mk @@ -7,11 +7,11 @@ ifdef CONFIG_TESTING_KERNEL endif LINUX_VERSION-4.9 = .181 -LINUX_VERSION-4.14 = .123 +LINUX_VERSION-4.14 = .125 LINUX_VERSION-4.19 = .48 LINUX_KERNEL_HASH-4.9.181 = 8fcd223e11cba322801bc38cdb8b581d64c0115f585dcb6604de8561b574fced -LINUX_KERNEL_HASH-4.14.123 = 25f58cb56bde388ac9bcee984f5f2d0ca094b0a8af6b92ad1f5b2fd0e6725b85 +LINUX_KERNEL_HASH-4.14.125 = 3e3dbc20215a28385bf46e2d0b8d9019df38ef1ee677e5e8870c7c3cde2ab318 LINUX_KERNEL_HASH-4.19.48 = 01d0db4f10cc8e384241a605e29413e32c442aa6853c116877538b07c16893fa remove_uri_prefix=$(subst git://,,$(subst http://,,$(subst https://,,$(1)))) diff --git a/target/linux/apm821xx/patches-4.14/802-usb-xhci-force-msi-renesas-xhci.patch b/target/linux/apm821xx/patches-4.14/802-usb-xhci-force-msi-renesas-xhci.patch index ed14505e18..47ae33062b 100644 --- a/target/linux/apm821xx/patches-4.14/802-usb-xhci-force-msi-renesas-xhci.patch +++ b/target/linux/apm821xx/patches-4.14/802-usb-xhci-force-msi-renesas-xhci.patch @@ -24,7 +24,7 @@ produce a noisy warning. --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c -@@ -372,10 +372,14 @@ static int xhci_try_enable_msi(struct us +@@ -370,10 +370,14 @@ static int xhci_try_enable_msi(struct us free_irq(hcd->irq, hcd); hcd->irq = 0; diff --git a/target/linux/ar71xx/patches-4.14/509-MIPS-ath79-process-board-kernel-option.patch b/target/linux/ar71xx/patches-4.14/509-MIPS-ath79-process-board-kernel-option.patch index 139b09c1b0..3091ab69b6 100644 --- a/target/linux/ar71xx/patches-4.14/509-MIPS-ath79-process-board-kernel-option.patch +++ b/target/linux/ar71xx/patches-4.14/509-MIPS-ath79-process-board-kernel-option.patch @@ -1,6 +1,6 @@ --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c -@@ -277,6 +277,8 @@ void __init plat_time_init(void) +@@ -283,6 +283,8 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_clk_rate / 2; } diff --git a/target/linux/ar71xx/patches-4.14/604-MIPS-ath79-no-of.patch b/target/linux/ar71xx/patches-4.14/604-MIPS-ath79-no-of.patch index 2e7d5d8232..5be33f9b08 100644 --- a/target/linux/ar71xx/patches-4.14/604-MIPS-ath79-no-of.patch +++ b/target/linux/ar71xx/patches-4.14/604-MIPS-ath79-no-of.patch @@ -10,7 +10,7 @@ --- a/arch/mips/ath79/setup.c +++ b/arch/mips/ath79/setup.c -@@ -190,16 +190,20 @@ unsigned int get_c0_compare_int(void) +@@ -196,16 +196,20 @@ unsigned int get_c0_compare_int(void) void __init plat_mem_setup(void) { @@ -31,7 +31,7 @@ if (mips_machtype != ATH79_MACH_GENERIC_OF) { ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE, -@@ -295,17 +299,21 @@ static int __init ath79_setup(void) +@@ -301,17 +305,21 @@ static int __init ath79_setup(void) arch_initcall(ath79_setup); diff --git a/target/linux/ar71xx/patches-4.14/910-unaligned_access_hacks.patch b/target/linux/ar71xx/patches-4.14/910-unaligned_access_hacks.patch index 4164c1e593..9778e37af0 100644 --- a/target/linux/ar71xx/patches-4.14/910-unaligned_access_hacks.patch +++ b/target/linux/ar71xx/patches-4.14/910-unaligned_access_hacks.patch @@ -457,7 +457,7 @@ memcpy(p, foc->val, foc->len); --- a/net/ipv4/igmp.c +++ b/net/ipv4/igmp.c -@@ -537,7 +537,7 @@ static struct sk_buff *add_grec(struct s +@@ -548,7 +548,7 @@ static struct sk_buff *add_grec(struct s if (!skb) return NULL; psrc = skb_put(skb, sizeof(__be32)); diff --git a/target/linux/ath79/patches-4.14/0027-MIPS-ath79-drop-legacy-IRQ-code.patch b/target/linux/ath79/patches-4.14/0027-MIPS-ath79-drop-legacy-IRQ-code.patch index 6586f08431..79f003d2ea 100644 --- a/target/linux/ath79/patches-4.14/0027-MIPS-ath79-drop-legacy-IRQ-code.patch +++ b/target/linux/ath79/patches-4.14/0027-MIPS-ath79-drop-legacy-IRQ-code.patch @@ -208,7 +208,7 @@ Signed-off-by: John Crispin <john@phrozen.org> #include <asm/bootinfo.h> #include <asm/idle.h> -@@ -305,6 +306,11 @@ void __init plat_time_init(void) +@@ -311,6 +312,11 @@ void __init plat_time_init(void) mips_hpt_frequency = cpu_clk_rate / 2; } diff --git a/target/linux/ath79/patches-4.14/0028-MIPS-ath79-drop-machfiles.patch b/target/linux/ath79/patches-4.14/0028-MIPS-ath79-drop-machfiles.patch index bb5acde8bb..2d87f70074 100644 --- a/target/linux/ath79/patches-4.14/0028-MIPS-ath79-drop-machfiles.patch +++ b/target/linux/ath79/patches-4.14/0028-MIPS-ath79-drop-machfiles.patch @@ -938,7 +938,7 @@ Signed-off-by: John Crispin <john@phrozen.org> #define ATH79_SYS_TYPE_LEN 64 -@@ -230,25 +229,21 @@ void __init plat_mem_setup(void) +@@ -236,25 +235,21 @@ void __init plat_mem_setup(void) else if (fw_passed_dtb) __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb)); @@ -973,7 +973,7 @@ Signed-off-by: John Crispin <john@phrozen.org> { struct device_node *np; struct clk *clk; -@@ -278,66 +273,12 @@ static void __init ath79_of_plat_time_in +@@ -284,66 +279,12 @@ static void __init ath79_of_plat_time_in clk_put(clk); } diff --git a/target/linux/bcm53xx/patches-4.14/180-usb-xhci-add-support-for-performing-fake-doorbell.patch b/target/linux/bcm53xx/patches-4.14/180-usb-xhci-add-support-for-performing-fake-doorbell.patch index 0108427464..5784f5e5ab 100644 --- a/target/linux/bcm53xx/patches-4.14/180-usb-xhci-add-support-for-performing-fake-doorbell.patch +++ b/target/linux/bcm53xx/patches-4.14/180-usb-xhci-add-support-for-performing-fake-doorbell.patch @@ -40,7 +40,7 @@ it on BCM4708 family. /* called during probe() after chip reset completes */ --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c -@@ -168,6 +168,49 @@ int xhci_start(struct xhci_hcd *xhci) +@@ -166,6 +166,49 @@ int xhci_start(struct xhci_hcd *xhci) return ret; } @@ -90,7 +90,7 @@ it on BCM4708 family. /* * Reset a halted HC. * -@@ -551,10 +594,20 @@ static int xhci_init(struct usb_hcd *hcd +@@ -549,10 +592,20 @@ static int xhci_init(struct usb_hcd *hcd static int xhci_run_finished(struct xhci_hcd *xhci) { @@ -114,7 +114,7 @@ it on BCM4708 family. xhci->shared_hcd->state = HC_STATE_RUNNING; xhci->cmd_ring_state = CMD_RING_STATE_RUNNING; -@@ -564,6 +617,10 @@ static int xhci_run_finished(struct xhci +@@ -562,6 +615,10 @@ static int xhci_run_finished(struct xhci xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Finished xhci_run for USB3 roothub"); return 0; diff --git a/target/linux/brcm2708/patches-4.14/950-0424-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch b/target/linux/brcm2708/patches-4.14/950-0424-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch index 0c93a14dc0..cdab03c604 100644 --- a/target/linux/brcm2708/patches-4.14/950-0424-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch +++ b/target/linux/brcm2708/patches-4.14/950-0424-vchiq_2835_arm-Implement-a-DMA-pool-for-small-bulk-t.patch @@ -95,7 +95,7 @@ Signed-off-by: Oliver Gjoneski <ogjoneski@gmail.com> size_t pagelist_size; struct scatterlist *scatterlist, *sg; int dma_buffers; -@@ -417,10 +436,16 @@ create_pagelist(char __user *buf, size_t +@@ -426,10 +445,16 @@ create_pagelist(char __user *buf, size_t /* Allocate enough storage to hold the page pointers and the page ** list */ @@ -116,7 +116,7 @@ Signed-off-by: Oliver Gjoneski <ogjoneski@gmail.com> vchiq_log_trace(vchiq_arm_log_level, "create_pagelist - %pK", pagelist); -@@ -441,6 +466,7 @@ create_pagelist(char __user *buf, size_t +@@ -450,6 +475,7 @@ create_pagelist(char __user *buf, size_t pagelistinfo->pagelist = pagelist; pagelistinfo->pagelist_buffer_size = pagelist_size; pagelistinfo->dma_addr = dma_addr; diff --git a/target/linux/generic/backport-4.14/012-kbuild-add-macro-for-controlling-warnings-to-linux-c.patch b/target/linux/generic/backport-4.14/012-kbuild-add-macro-for-controlling-warnings-to-linux-c.patch index 8d91266e24..8993b6376c 100644 --- a/target/linux/generic/backport-4.14/012-kbuild-add-macro-for-controlling-warnings-to-linux-c.patch +++ b/target/linux/generic/backport-4.14/012-kbuild-add-macro-for-controlling-warnings-to-linux-c.patch @@ -84,7 +84,7 @@ Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> --- a/include/linux/compiler-gcc.h +++ b/include/linux/compiler-gcc.h -@@ -362,3 +362,30 @@ +@@ -366,3 +366,30 @@ #if GCC_VERSION >= 50100 #define COMPILER_HAS_GENERIC_BUILTIN_OVERFLOW 1 #endif @@ -117,7 +117,7 @@ Signed-off-by: Masahiro Yamada <yamada.masahiro@socionext.com> + --- a/include/linux/compiler_types.h +++ b/include/linux/compiler_types.h -@@ -283,4 +283,22 @@ struct ftrace_likely_data { +@@ -287,4 +287,22 @@ struct ftrace_likely_data { # define __native_word(t) (sizeof(t) == sizeof(char) || sizeof(t) == sizeof(short) || sizeof(t) == sizeof(int) || sizeof(t) == sizeof(long)) #endif diff --git a/target/linux/generic/backport-4.14/380-v5.3-net-sched-Introduce-act_ctinfo-action.patch b/target/linux/generic/backport-4.14/380-v5.3-net-sched-Introduce-act_ctinfo-action.patch index 26063985c0..cd729a93b1 100644 --- a/target/linux/generic/backport-4.14/380-v5.3-net-sched-Introduce-act_ctinfo-action.patch +++ b/target/linux/generic/backport-4.14/380-v5.3-net-sched-Introduce-act_ctinfo-action.patch @@ -55,9 +55,6 @@ Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk> create mode 100644 include/uapi/linux/tc_act/tc_ctinfo.h create mode 100644 net/sched/act_ctinfo.c -diff --git a/include/net/tc_act/tc_ctinfo.h b/include/net/tc_act/tc_ctinfo.h -new file mode 100644 -index 000000000000..d6a688571672 --- /dev/null +++ b/include/net/tc_act/tc_ctinfo.h @@ -0,0 +1,28 @@ @@ -89,8 +86,6 @@ index 000000000000..d6a688571672 +#define to_ctinfo(a) ((struct tcf_ctinfo *)a) + +#endif /* __NET_TC_CTINFO_H */ -diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h -index 46c506615f4a..408b02fbb34a 100644 --- a/include/uapi/linux/pkt_cls.h +++ b/include/uapi/linux/pkt_cls.h @@ -66,7 +66,8 @@ enum { @@ -103,9 +98,6 @@ index 46c506615f4a..408b02fbb34a 100644 }; #define TCA_ID_MAX __TCA_ID_MAX -diff --git a/include/uapi/linux/tc_act/tc_ctinfo.h b/include/uapi/linux/tc_act/tc_ctinfo.h -new file mode 100644 -index 000000000000..da803e05a89b --- /dev/null +++ b/include/uapi/linux/tc_act/tc_ctinfo.h @@ -0,0 +1,34 @@ @@ -143,8 +135,6 @@ index 000000000000..da803e05a89b +}; + +#endif -diff --git a/net/sched/Kconfig b/net/sched/Kconfig -index e70ed26485a2..962d90f72f54 100644 --- a/net/sched/Kconfig +++ b/net/sched/Kconfig @@ -808,6 +808,19 @@ config NET_ACT_CONNMARK @@ -167,8 +157,6 @@ index e70ed26485a2..962d90f72f54 100644 config NET_ACT_SKBMOD tristate "skb data modification action" depends on NET_CLS_ACT -diff --git a/net/sched/Makefile b/net/sched/Makefile -index 9e43a4721ef8..44ee5b87b895 100644 --- a/net/sched/Makefile +++ b/net/sched/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_NET_ACT_CSUM) += act_csum.o @@ -179,9 +167,6 @@ index 9e43a4721ef8..44ee5b87b895 100644 obj-$(CONFIG_NET_ACT_SKBMOD) += act_skbmod.o obj-$(CONFIG_NET_ACT_IFE) += act_ife.o obj-$(CONFIG_NET_IFE_SKBMARK) += act_meta_mark.o -diff --git a/net/sched/act_ctinfo.c b/net/sched/act_ctinfo.c -new file mode 100644 -index 000000000000..e65344e32801 --- /dev/null +++ b/net/sched/act_ctinfo.c @@ -0,0 +1,394 @@ @@ -579,6 +564,3 @@ index 000000000000..e65344e32801 +MODULE_AUTHOR("Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>"); +MODULE_DESCRIPTION("Conntrack mark to DSCP restoring"); +MODULE_LICENSE("GPL"); --- -2.20.1 (Apple Git-117) - diff --git a/target/linux/generic/config-4.14 b/target/linux/generic/config-4.14 index 8572992be3..1e8497b503 100644 --- a/target/linux/generic/config-4.14 +++ b/target/linux/generic/config-4.14 @@ -3344,6 +3344,7 @@ CONFIG_NMI_LOG_BUF_SHIFT=13 # CONFIG_NOP_USB_XCEIV is not set # CONFIG_NORTEL_HERMES is not set # CONFIG_NOTIFIER_ERROR_INJECTION is not set +# CONFIG_NOUVEAU_LEGACY_CTX_SUPPORT is not set # CONFIG_NOZOMI is not set # CONFIG_NO_BOOTMEM is not set # CONFIG_NO_HZ is not set diff --git a/target/linux/generic/pending-4.14/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch b/target/linux/generic/pending-4.14/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch index 4754554823..e3ae02a067 100644 --- a/target/linux/generic/pending-4.14/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch +++ b/target/linux/generic/pending-4.14/342-powerpc-Enable-kernel-XZ-compression-option-on-PPC_8.patch @@ -12,8 +12,6 @@ Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> arch/powerpc/Kconfig | 1 + 1 file changed, 1 insertion(+) -diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig -index de3b07c7be30..53f87983fb42 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -199,6 +199,7 @@ config PPC @@ -24,6 +22,3 @@ index de3b07c7be30..53f87983fb42 100644 select HAVE_KPROBES select HAVE_KPROBES_ON_FTRACE select HAVE_KRETPROBES --- -2.20.1 - diff --git a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch b/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch index 9ccacba3ae..1656ddf3e8 100644 --- a/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.14/702-dpaa2-ethernet-support-layerscape.patch @@ -3166,7 +3166,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> err = dpni_enable(priv->mc_io, 0, priv->mc_token); if (err < 0) { netdev_err(net_dev, "dpni_enable() failed\n"); -@@ -1047,51 +1355,20 @@ static int dpaa2_eth_open(struct net_dev +@@ -1047,48 +1355,17 @@ static int dpaa2_eth_open(struct net_dev link_state_err: enable_err: @@ -3181,8 +3181,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> - */ -static u32 drain_channel(struct dpaa2_eth_priv *priv, - struct dpaa2_eth_channel *ch) -+static int dpaa2_eth_stop(struct net_device *net_dev) - { +-{ - u32 drained = 0, total = 0; - - do { @@ -3193,11 +3192,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> - - return total; -} -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int dpni_enabled = 0; -+ int retries = 10, i; -+ int err = 0; - +- -static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) -{ - struct dpaa2_eth_channel *ch; @@ -3212,20 +3207,18 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> - return drained; -} - --static int dpaa2_eth_stop(struct net_device *net_dev) --{ -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); + static int dpaa2_eth_stop(struct net_device *net_dev) + { + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); - int dpni_enabled; - int retries = 10; - u32 drained; -- -- netif_tx_stop_all_queues(net_dev); -- netif_carrier_off(net_dev); -+ netif_tx_stop_all_queues(net_dev); -+ netif_carrier_off(net_dev); ++ int dpni_enabled = 0; ++ int retries = 10, i; ++ int err = 0; - /* Loop while dpni_disable() attempts to drain the egress FQs - * and confirm them back to us. + netif_tx_stop_all_queues(net_dev); + netif_carrier_off(net_dev); @@ -1105,56 +1382,24 @@ static int dpaa2_eth_stop(struct net_dev } while (dpni_enabled && --retries); if (!retries) { @@ -4496,7 +4489,7 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { dev_err(dev, "error adding key extraction rule, too many rules?\n"); -@@ -2020,49 +2966,107 @@ static int dpaa2_eth_set_hash(struct net +@@ -2020,12 +2966,10 @@ static int dpaa2_eth_set_hash(struct net } key->type = DPKG_EXTRACT_FROM_HDR; @@ -4506,27 +4499,37 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> - key->extract.from_hdr.field = hash_fields[i].cls_field; + key->extract.from_hdr.field = dist_fields[i].cls_field; cls_cfg.num_extracts++; -+ } -+ -+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); +- +- priv->rx_hash_fields |= hash_fields[i].rxnfc_field; + } + + dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); +@@ -2035,36 +2979,96 @@ static int dpaa2_eth_set_hash(struct net + err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); + if (err) { + dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); +- goto err_prep_key; + goto free_key; -+ } -+ -+ /* Prepare for setting the rx dist */ + } + +- memset(&dist_cfg, 0, sizeof(dist_cfg)); +- + /* Prepare for setting the rx dist */ +- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, +- DPAA2_CLASSIFIER_DMA_SIZE, +- DMA_TO_DEVICE); +- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { + key_iova = dma_map_single(dev, dma_mem, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (dma_mapping_error(dev, key_iova)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ err = -ENOMEM; + dev_err(dev, "DMA mapping failed\n"); + err = -ENOMEM; +- goto err_dma_map; + goto free_key; -+ } -+ + } + +- dist_cfg.dist_size = dpaa2_eth_queue_count(priv); +- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; + if (type == DPAA2_ETH_RX_DIST_HASH) { + if (dpaa2_eth_has_legacy_dist(priv)) + err = config_legacy_hash_key(priv, key_iova); @@ -4535,17 +4538,24 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> + } else { + err = config_cls_key(priv, key_iova); + } -+ + +- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); +- dma_unmap_single(dev, dist_cfg.key_cfg_iova, +- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); +- if (err) +- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); + dma_unmap_single(dev, key_iova, DPAA2_CLASSIFIER_DMA_SIZE, + DMA_TO_DEVICE); + if (!err && type == DPAA2_ETH_RX_DIST_HASH) + priv->rx_hash_fields = rx_hash_fields; -+ + +-err_dma_map: +-err_prep_key: +free_key: -+ kfree(dma_mem); -+ return err; -+} -+ + kfree(dma_mem); + return err; + } + +int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) +{ + struct dpaa2_eth_priv *priv = netdev_priv(net_dev); @@ -4571,70 +4581,43 @@ Signed-off-by: Yangbo Lu <yangbo.lu@nxp.com> +{ + struct device *dev = priv->net_dev->dev.parent; + int err; - -- priv->rx_hash_fields |= hash_fields[i].rxnfc_field; ++ + /* Check if we actually support Rx flow classification */ + if (dpaa2_eth_has_legacy_dist(priv)) { + dev_dbg(dev, "Rx cls not supported by current MC version\n"); + return -EOPNOTSUPP; - } - -- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_KERNEL); -- if (!dma_mem) -- return -ENOMEM; -- -- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -- if (err) { -- dev_err(dev, "dpni_prepare_key_cfg error %d\n", err); -- goto err_prep_key; ++ } ++ + if (!dpaa2_eth_fs_enabled(priv)) { + dev_dbg(dev, "Rx cls disabled in DPNI options\n"); + return -EOPNOTSUPP; - } - -- memset(&dist_cfg, 0, sizeof(dist_cfg)); -- -- /* Prepare for setting the rx dist */ -- dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, -- DPAA2_CLASSIFIER_DMA_SIZE, -- DMA_TO_DEVICE); -- if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { -- dev_err(dev, "DMA mapping failed\n"); -- err = -ENOMEM; -- goto err_dma_map; ++ } ++ + if (!dpaa2_eth_hash_enabled(priv)) { + dev_dbg(dev, "Rx cls disabled for single queue DPNIs\n"); + return -EOPNOTSUPP; - } - -- dist_cfg.dist_size = dpaa2_eth_queue_count(priv); -- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; ++ } ++ + /* If there is no support for masking in the classification table, + * we don't set a default key, as it will depend on the rules + * added by the user at runtime. + */ + if (!dpaa2_eth_fs_mask_enabled(priv)) + goto out; - -- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -- dma_unmap_single(dev, dist_cfg.key_cfg_iova, -- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); ++ + err = dpaa2_eth_set_cls(priv->net_dev, DPAA2_ETH_DIST_ALL); - if (err) -- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); ++ if (err) + return err; - --err_dma_map: --err_prep_key: -- kfree(dma_mem); -- return err; ++ +out: + priv->rx_cls_enabled = 1; + + return 0; - } - ++} ++ /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, + * frame queues and channels + */ @@ -2080,6 +3084,7 @@ static int bind_dpni(struct dpaa2_eth_pr pools_params.num_dpbp = 1; pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; diff --git a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch b/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch index ea39a1433c..988afe10ca 100644 --- a/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.14/807-usb-support-layerscape.patch @@ -1312,7 +1312,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com> xhci->quirks |= XHCI_BROKEN_PORT_PED; --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c -@@ -1972,10 +1972,12 @@ static int finish_td(struct xhci_hcd *xh +@@ -1976,10 +1976,12 @@ static int finish_td(struct xhci_hcd *xh union xhci_trb *ep_trb, struct xhci_transfer_event *event, struct xhci_virt_ep *ep, int *status) { @@ -1325,7 +1325,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com> u32 trb_comp_code; int ep_index; -@@ -1998,14 +2000,30 @@ static int finish_td(struct xhci_hcd *xh +@@ -2002,14 +2004,30 @@ static int finish_td(struct xhci_hcd *xh if (trb_comp_code == COMP_STALL_ERROR || xhci_requires_manual_halt_cleanup(xhci, ep_ctx, trb_comp_code)) { @@ -1363,7 +1363,7 @@ Signed-off-by: Zhao Chenhui <chenhui.zhao@freescale.com> while (ep_ring->dequeue != td->last_trb) --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c -@@ -1597,13 +1597,38 @@ static int xhci_urb_dequeue(struct usb_h +@@ -1595,13 +1595,38 @@ static int xhci_urb_dequeue(struct usb_h ret = -ENOMEM; goto done; } diff --git a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch index 0d26aca797..95e6894b93 100644 --- a/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch +++ b/target/linux/layerscape/patches-4.14/820-sec-support-layerscape.patch @@ -3839,17 +3839,12 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> ctx->cdata.keylen = keys.enckeylen; ret = aead_set_sh_desc(aead); -@@ -258,55 +284,139 @@ badkey: +@@ -258,6 +284,468 @@ badkey: return -EINVAL; } --static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, -- const u8 *key, unsigned int keylen) +static int tls_set_sh_desc(struct crypto_aead *tls) - { -- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); -- struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); -- const char *alg_name = crypto_tfm_alg_name(tfm); ++{ + struct caam_ctx *ctx = crypto_aead_ctx(tls); + unsigned int ivsize = crypto_aead_ivsize(tls); + unsigned int blocksize = crypto_aead_blocksize(tls); @@ -3919,45 +3914,26 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + unsigned int keylen) +{ + struct caam_ctx *ctx = crypto_aead_ctx(tls); - struct device *jrdev = ctx->jrdev; -- unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -- u32 ctx1_iv_off = 0; -- const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == -- OP_ALG_AAI_CTR_MOD128); -- const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); ++ struct device *jrdev = ctx->jrdev; + struct caam_drv_private *ctrlpriv = dev_get_drvdata(jrdev->parent); + struct crypto_authenc_keys keys; - int ret = 0; - -- memcpy(ctx->key, key, keylen); ++ int ret = 0; ++ + if (crypto_authenc_extractkeys(&keys, key, keylen) != 0) + goto badkey; + - #ifdef DEBUG ++#ifdef DEBUG + dev_err(jrdev, "keylen %d enckeylen %d authkeylen %d\n", + keys.authkeylen + keys.enckeylen, keys.enckeylen, + keys.authkeylen); - print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", - DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); - #endif -- /* -- * AES-CTR needs to load IV in CONTEXT1 reg -- * at an offset of 128bits (16bytes) -- * CONTEXT1[255:128] = IV -- */ -- if (ctr_mode) -- ctx1_iv_off = 16; - - /* -- * RFC3686 specific: -- * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} -- * | *key = {KEY, NONCE} ++ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", ++ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); ++#endif ++ ++ /* + * If DKP is supported, use it in the shared descriptor to generate + * the split key. - */ -- if (is_rfc3686) { -- ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; -- keylen -= CTR_RFC3686_NONCE_SIZE; ++ */ + if (ctrlpriv->era >= 6) { + ctx->adata.keylen = keys.authkeylen; + ctx->adata.keylen_pad = split_key_len(ctx->adata.algtype & @@ -3973,25 +3949,14 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + ctx->adata.keylen_pad + + keys.enckeylen, ctx->dir); + goto skip_split_key; - } - -- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); -- ctx->cdata.keylen = keylen; -- ctx->cdata.key_virt = ctx->key; -- ctx->cdata.key_inline = true; ++ } ++ + ret = gen_split_key(jrdev, ctx->key, &ctx->adata, keys.authkey, + keys.authkeylen, CAAM_MAX_KEY_SIZE - + keys.enckeylen); + if (ret) + goto badkey; - -- /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ -- cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, -- is_rfc3686, ctx1_iv_off); -- cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, -- is_rfc3686, ctx1_iv_off); -- cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata, -- ivsize, is_rfc3686, ctx1_iv_off); ++ + /* postpend encryption key to auth split key */ + memcpy(ctx->key + ctx->adata.keylen_pad, keys.enckey, keys.enckeylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, ctx->adata.keylen_pad + @@ -4011,44 +3976,39 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + ret = tls_set_sh_desc(tls); + if (ret) + goto badkey; - - /* Now update the driver contexts with the new shared descriptor */ - if (ctx->drv_ctx[ENCRYPT]) { -@@ -327,42 +437,84 @@ static int ablkcipher_setkey(struct cryp - } - } - -- if (ctx->drv_ctx[GIVENCRYPT]) { -- ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT], -- ctx->sh_desc_givenc); -- if (ret) { -- dev_err(jrdev, "driver givenc context update failed\n"); -- goto badkey; -- } -- } -- - return ret; - badkey: -- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); ++ goto badkey; ++ } ++ } ++ ++ return ret; ++badkey: + crypto_aead_set_flags(tls, CRYPTO_TFM_RES_BAD_KEY_LEN); - return -EINVAL; - } - --static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, -- const u8 *key, unsigned int keylen) ++ return -EINVAL; ++} ++ +static int gcm_set_sh_desc(struct crypto_aead *aead) - { -- struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); -- struct device *jrdev = ctx->jrdev; -- int ret = 0; ++{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; - -- if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { -- dev_err(jrdev, "key size mismatch\n"); -- goto badkey; ++ + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + @@ -4077,8 +4037,8 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; - } - ++ } ++ + cnstr_shdsc_gcm_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + ctx->authsize, true); + @@ -4107,129 +4067,62 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); +#endif + - memcpy(ctx->key, key, keylen); -- dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); ++ memcpy(ctx->key, key, keylen); + dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, ctx->dir); - ctx->cdata.keylen = keylen; -- ctx->cdata.key_virt = ctx->key; -- ctx->cdata.key_inline = true; - -- /* xts ablkcipher encrypt, decrypt shared descriptors */ -- cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); -- cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); ++ ctx->cdata.keylen = keylen; ++ + ret = gcm_set_sh_desc(aead); + if (ret) + return ret; - - /* Now update the driver contexts with the new shared descriptor */ - if (ctx->drv_ctx[ENCRYPT]) { -@@ -370,7 +522,7 @@ static int xts_ablkcipher_setkey(struct - ctx->sh_desc_enc); - if (ret) { - dev_err(jrdev, "driver enc context update failed\n"); -- goto badkey; ++ ++ /* Now update the driver contexts with the new shared descriptor */ ++ if (ctx->drv_ctx[ENCRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], ++ ctx->sh_desc_enc); ++ if (ret) { ++ dev_err(jrdev, "driver enc context update failed\n"); + return ret; - } - } - -@@ -379,151 +531,829 @@ static int xts_ablkcipher_setkey(struct - ctx->sh_desc_dec); - if (ret) { - dev_err(jrdev, "driver dec context update failed\n"); -- goto badkey; ++ } ++ } ++ ++ if (ctx->drv_ctx[DECRYPT]) { ++ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], ++ ctx->sh_desc_dec); ++ if (ret) { ++ dev_err(jrdev, "driver dec context update failed\n"); + return ret; - } - } - -- return ret; --badkey: -- crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); -- return -EINVAL; ++ } ++ } ++ + return 0; - } - --/* -- * aead_edesc - s/w-extended aead descriptor -- * @src_nents: number of segments in input scatterlist -- * @dst_nents: number of segments in output scatterlist -- * @iv_dma: dma address of iv for checking continuity and link table -- * @qm_sg_bytes: length of dma mapped h/w link table -- * @qm_sg_dma: bus physical mapped address of h/w link table -- * @assoclen: associated data length, in CAAM endianness -- * @assoclen_dma: bus physical mapped address of req->assoclen -- * @drv_req: driver-specific request structure -- * @sgt: the h/w link table, followed by IV -- */ --struct aead_edesc { -- int src_nents; -- int dst_nents; -- dma_addr_t iv_dma; -- int qm_sg_bytes; -- dma_addr_t qm_sg_dma; -- unsigned int assoclen; -- dma_addr_t assoclen_dma; -- struct caam_drv_req drv_req; -- struct qm_sg_entry sgt[0]; --}; ++} ++ +static int rfc4106_set_sh_desc(struct crypto_aead *aead) +{ + struct caam_ctx *ctx = crypto_aead_ctx(aead); + unsigned int ivsize = crypto_aead_ivsize(aead); + int rem_bytes = CAAM_DESC_BYTES_MAX - DESC_JOB_IO_LEN - + ctx->cdata.keylen; - --/* -- * ablkcipher_edesc - s/w-extended ablkcipher descriptor -- * @src_nents: number of segments in input scatterlist -- * @dst_nents: number of segments in output scatterlist -- * @iv_dma: dma address of iv for checking continuity and link table -- * @qm_sg_bytes: length of dma mapped h/w link table -- * @qm_sg_dma: bus physical mapped address of h/w link table -- * @drv_req: driver-specific request structure -- * @sgt: the h/w link table, followed by IV -- */ --struct ablkcipher_edesc { -- int src_nents; -- int dst_nents; -- dma_addr_t iv_dma; -- int qm_sg_bytes; -- dma_addr_t qm_sg_dma; -- struct caam_drv_req drv_req; -- struct qm_sg_entry sgt[0]; --}; ++ + if (!ctx->cdata.keylen || !ctx->authsize) + return 0; + + ctx->cdata.key_virt = ctx->key; - --static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, -- enum optype type) --{ - /* -- * This function is called on the fast path with values of 'type' -- * known at compile time. Invalid arguments are not expected and -- * thus no checks are made. ++ ++ /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer - */ -- struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; -- u32 *desc; ++ */ + if (rem_bytes >= DESC_QI_RFC4106_ENC_LEN) { + ctx->cdata.key_inline = true; + } else { + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } - -- if (unlikely(!drv_ctx)) { -- spin_lock(&ctx->lock); ++ + cnstr_shdsc_rfc4106_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, + ctx->authsize, true); - -- /* Read again to check if some other core init drv_ctx */ -- drv_ctx = ctx->drv_ctx[type]; -- if (!drv_ctx) { -- int cpu; ++ + /* + * Job Descriptor and Shared Descriptor + * must fit into the 64-word Descriptor h/w Buffer @@ -4240,13 +4133,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + ctx->cdata.key_inline = false; + ctx->cdata.key_dma = ctx->key_dma; + } - -- if (type == ENCRYPT) -- desc = ctx->sh_desc_enc; -- else if (type == DECRYPT) -- desc = ctx->sh_desc_dec; -- else /* (type == GIVENCRYPT) */ -- desc = ctx->sh_desc_givenc; ++ + cnstr_shdsc_rfc4106_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, + ctx->authsize, true); + @@ -4418,162 +4305,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + return 0; +} + -+static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, -+ const u8 *key, unsigned int keylen) -+{ -+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); -+ struct crypto_tfm *tfm = crypto_ablkcipher_tfm(ablkcipher); -+ const char *alg_name = crypto_tfm_alg_name(tfm); -+ struct device *jrdev = ctx->jrdev; -+ unsigned int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -+ u32 ctx1_iv_off = 0; -+ const bool ctr_mode = ((ctx->cdata.algtype & OP_ALG_AAI_MASK) == -+ OP_ALG_AAI_CTR_MOD128); -+ const bool is_rfc3686 = (ctr_mode && strstr(alg_name, "rfc3686")); -+ int ret = 0; -+ -+ memcpy(ctx->key, key, keylen); -+#ifdef DEBUG -+ print_hex_dump(KERN_ERR, "key in @" __stringify(__LINE__)": ", -+ DUMP_PREFIX_ADDRESS, 16, 4, key, keylen, 1); -+#endif -+ /* -+ * AES-CTR needs to load IV in CONTEXT1 reg -+ * at an offset of 128bits (16bytes) -+ * CONTEXT1[255:128] = IV -+ */ -+ if (ctr_mode) -+ ctx1_iv_off = 16; -+ -+ /* -+ * RFC3686 specific: -+ * | CONTEXT1[255:128] = {NONCE, IV, COUNTER} -+ * | *key = {KEY, NONCE} -+ */ -+ if (is_rfc3686) { -+ ctx1_iv_off = 16 + CTR_RFC3686_NONCE_SIZE; -+ keylen -= CTR_RFC3686_NONCE_SIZE; -+ } -+ -+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); -+ ctx->cdata.keylen = keylen; -+ ctx->cdata.key_virt = ctx->key; -+ ctx->cdata.key_inline = true; -+ -+ /* ablkcipher encrypt, decrypt, givencrypt shared descriptors */ -+ cnstr_shdsc_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata, ivsize, -+ is_rfc3686, ctx1_iv_off); -+ cnstr_shdsc_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata, ivsize, -+ is_rfc3686, ctx1_iv_off); -+ cnstr_shdsc_ablkcipher_givencap(ctx->sh_desc_givenc, &ctx->cdata, -+ ivsize, is_rfc3686, ctx1_iv_off); -+ -+ /* Now update the driver contexts with the new shared descriptor */ -+ if (ctx->drv_ctx[ENCRYPT]) { -+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], -+ ctx->sh_desc_enc); -+ if (ret) { -+ dev_err(jrdev, "driver enc context update failed\n"); -+ goto badkey; -+ } -+ } -+ -+ if (ctx->drv_ctx[DECRYPT]) { -+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], -+ ctx->sh_desc_dec); -+ if (ret) { -+ dev_err(jrdev, "driver dec context update failed\n"); -+ goto badkey; -+ } -+ } -+ -+ if (ctx->drv_ctx[GIVENCRYPT]) { -+ ret = caam_drv_ctx_update(ctx->drv_ctx[GIVENCRYPT], -+ ctx->sh_desc_givenc); -+ if (ret) { -+ dev_err(jrdev, "driver givenc context update failed\n"); -+ goto badkey; -+ } -+ } -+ -+ return ret; -+badkey: -+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ return -EINVAL; -+} -+ -+static int xts_ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, -+ const u8 *key, unsigned int keylen) -+{ -+ struct caam_ctx *ctx = crypto_ablkcipher_ctx(ablkcipher); -+ struct device *jrdev = ctx->jrdev; -+ int ret = 0; -+ -+ if (keylen != 2 * AES_MIN_KEY_SIZE && keylen != 2 * AES_MAX_KEY_SIZE) { -+ dev_err(jrdev, "key size mismatch\n"); -+ goto badkey; -+ } -+ -+ memcpy(ctx->key, key, keylen); -+ dma_sync_single_for_device(jrdev, ctx->key_dma, keylen, DMA_TO_DEVICE); -+ ctx->cdata.keylen = keylen; -+ ctx->cdata.key_virt = ctx->key; -+ ctx->cdata.key_inline = true; -+ -+ /* xts ablkcipher encrypt, decrypt shared descriptors */ -+ cnstr_shdsc_xts_ablkcipher_encap(ctx->sh_desc_enc, &ctx->cdata); -+ cnstr_shdsc_xts_ablkcipher_decap(ctx->sh_desc_dec, &ctx->cdata); -+ -+ /* Now update the driver contexts with the new shared descriptor */ -+ if (ctx->drv_ctx[ENCRYPT]) { -+ ret = caam_drv_ctx_update(ctx->drv_ctx[ENCRYPT], -+ ctx->sh_desc_enc); -+ if (ret) { -+ dev_err(jrdev, "driver enc context update failed\n"); -+ goto badkey; -+ } -+ } -+ -+ if (ctx->drv_ctx[DECRYPT]) { -+ ret = caam_drv_ctx_update(ctx->drv_ctx[DECRYPT], -+ ctx->sh_desc_dec); -+ if (ret) { -+ dev_err(jrdev, "driver dec context update failed\n"); -+ goto badkey; -+ } -+ } -+ -+ return ret; -+badkey: -+ crypto_ablkcipher_set_flags(ablkcipher, CRYPTO_TFM_RES_BAD_KEY_LEN); -+ return -EINVAL; -+} -+ -+/* -+ * aead_edesc - s/w-extended aead descriptor -+ * @src_nents: number of segments in input scatterlist -+ * @dst_nents: number of segments in output scatterlist -+ * @iv_dma: dma address of iv for checking continuity and link table -+ * @qm_sg_bytes: length of dma mapped h/w link table -+ * @qm_sg_dma: bus physical mapped address of h/w link table -+ * @assoclen: associated data length, in CAAM endianness -+ * @assoclen_dma: bus physical mapped address of req->assoclen -+ * @drv_req: driver-specific request structure -+ * @sgt: the h/w link table, followed by IV -+ */ -+struct aead_edesc { -+ int src_nents; -+ int dst_nents; -+ dma_addr_t iv_dma; -+ int qm_sg_bytes; -+ dma_addr_t qm_sg_dma; -+ unsigned int assoclen; -+ dma_addr_t assoclen_dma; -+ struct caam_drv_req drv_req; -+ struct qm_sg_entry sgt[0]; -+}; -+ -+/* + static int ablkcipher_setkey(struct crypto_ablkcipher *ablkcipher, + const u8 *key, unsigned int keylen) + { +@@ -414,6 +902,29 @@ struct aead_edesc { + }; + + /* + * tls_edesc - s/w-extended tls descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist @@ -4597,100 +4335,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> +}; + +/* -+ * ablkcipher_edesc - s/w-extended ablkcipher descriptor -+ * @src_nents: number of segments in input scatterlist -+ * @dst_nents: number of segments in output scatterlist -+ * @iv_dma: dma address of iv for checking continuity and link table -+ * @qm_sg_bytes: length of dma mapped h/w link table -+ * @qm_sg_dma: bus physical mapped address of h/w link table -+ * @drv_req: driver-specific request structure -+ * @sgt: the h/w link table, followed by IV -+ */ -+struct ablkcipher_edesc { -+ int src_nents; -+ int dst_nents; -+ dma_addr_t iv_dma; -+ int qm_sg_bytes; -+ dma_addr_t qm_sg_dma; -+ struct caam_drv_req drv_req; -+ struct qm_sg_entry sgt[0]; -+}; -+ -+static struct caam_drv_ctx *get_drv_ctx(struct caam_ctx *ctx, -+ enum optype type) -+{ -+ /* -+ * This function is called on the fast path with values of 'type' -+ * known at compile time. Invalid arguments are not expected and -+ * thus no checks are made. -+ */ -+ struct caam_drv_ctx *drv_ctx = ctx->drv_ctx[type]; -+ u32 *desc; -+ -+ if (unlikely(!drv_ctx)) { -+ spin_lock(&ctx->lock); -+ -+ /* Read again to check if some other core init drv_ctx */ -+ drv_ctx = ctx->drv_ctx[type]; -+ if (!drv_ctx) { -+ int cpu; -+ -+ if (type == ENCRYPT) -+ desc = ctx->sh_desc_enc; -+ else if (type == DECRYPT) -+ desc = ctx->sh_desc_dec; -+ else /* (type == GIVENCRYPT) */ -+ desc = ctx->sh_desc_givenc; -+ -+ cpu = smp_processor_id(); -+ drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); -+ if (likely(!IS_ERR_OR_NULL(drv_ctx))) -+ drv_ctx->op_type = type; -+ -+ ctx->drv_ctx[type] = drv_ctx; -+ } -+ -+ spin_unlock(&ctx->lock); -+ } -+ -+ return drv_ctx; -+} -+ -+static void caam_unmap(struct device *dev, struct scatterlist *src, -+ struct scatterlist *dst, int src_nents, -+ int dst_nents, dma_addr_t iv_dma, int ivsize, -+ enum optype op_type, dma_addr_t qm_sg_dma, -+ int qm_sg_bytes) -+{ -+ if (dst != src) { -+ if (src_nents) -+ dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); -+ dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); -+ } else { -+ dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); -+ } -+ -+ if (iv_dma) -+ dma_unmap_single(dev, iv_dma, ivsize, -+ op_type == GIVENCRYPT ? DMA_FROM_DEVICE : -+ DMA_TO_DEVICE); -+ if (qm_sg_bytes) -+ dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); -+} -+ -+static void aead_unmap(struct device *dev, -+ struct aead_edesc *edesc, -+ struct aead_request *req) -+{ -+ struct crypto_aead *aead = crypto_aead_reqtfm(req); -+ int ivsize = crypto_aead_ivsize(aead); -+ -+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, -+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, -+ edesc->qm_sg_dma, edesc->qm_sg_bytes); -+ dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); -+} -+ + * ablkcipher_edesc - s/w-extended ablkcipher descriptor + * @src_nents: number of segments in input scatterlist + * @dst_nents: number of segments in output scatterlist +@@ -508,6 +1019,19 @@ static void aead_unmap(struct device *de + dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); + } + +static void tls_unmap(struct device *dev, + struct tls_edesc *edesc, + struct aead_request *req) @@ -4704,22 +4355,80 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + edesc->qm_sg_bytes); +} + -+static void ablkcipher_unmap(struct device *dev, -+ struct ablkcipher_edesc *edesc, -+ struct ablkcipher_request *req) + static void ablkcipher_unmap(struct device *dev, + struct ablkcipher_edesc *edesc, + struct ablkcipher_request *req) +@@ -532,8 +1056,18 @@ static void aead_done(struct caam_drv_re + qidev = caam_ctx->qidev; + + if (unlikely(status)) { ++ u32 ssrc = status & JRSTA_SSRC_MASK; ++ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; ++ + caam_jr_strstatus(qidev, status); +- ecode = -EIO; ++ /* ++ * verify hw auth check passed else return -EBADMSG ++ */ ++ if (ssrc == JRSTA_SSRC_CCB_ERROR && ++ err_id == JRSTA_CCBERR_ERRID_ICVCHK) ++ ecode = -EBADMSG; ++ else ++ ecode = -EIO; + } + + edesc = container_of(drv_req, typeof(*edesc), drv_req); +@@ -647,9 +1181,24 @@ static struct aead_edesc *aead_edesc_all + /* + * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. + * Input is not contiguous. ++ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond ++ * the end of the table by allocating more S/G entries. Logic: ++ * if (src != dst && output S/G) ++ * pad output S/G, if needed ++ * else if (src == dst && S/G) ++ * overlapping S/Gs; pad one of them ++ * else if (input S/G) ... ++ * pad input S/G, if needed + */ +- qm_sg_ents = 1 + !!ivsize + mapped_src_nents + +- (mapped_dst_nents > 1 ? mapped_dst_nents : 0); ++ qm_sg_ents = 1 + !!ivsize + mapped_src_nents; ++ if (mapped_dst_nents > 1) ++ qm_sg_ents += ALIGN(mapped_dst_nents, 4); ++ else if ((req->src == req->dst) && (mapped_src_nents > 1)) ++ qm_sg_ents = max(ALIGN(qm_sg_ents, 4), ++ 1 + !!ivsize + ALIGN(mapped_src_nents, 4)); ++ else ++ qm_sg_ents = ALIGN(qm_sg_ents, 4); ++ + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); + if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > +@@ -785,6 +1334,260 @@ static int aead_decrypt(struct aead_requ + return aead_crypt(req, false); + } + ++static int ipsec_gcm_encrypt(struct aead_request *req) +{ -+ struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); -+ int ivsize = crypto_ablkcipher_ivsize(ablkcipher); ++ if (req->assoclen < 8) ++ return -EINVAL; + -+ caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, -+ edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, -+ edesc->qm_sg_dma, edesc->qm_sg_bytes); ++ return aead_crypt(req, true); +} + -+static void aead_done(struct caam_drv_req *drv_req, u32 status) ++static int ipsec_gcm_decrypt(struct aead_request *req) ++{ ++ if (req->assoclen < 8) ++ return -EINVAL; ++ ++ return aead_crypt(req, false); ++} ++ ++static void tls_done(struct caam_drv_req *drv_req, u32 status) +{ + struct device *qidev; -+ struct aead_edesc *edesc; ++ struct tls_edesc *edesc; + struct aead_request *aead_req = drv_req->app_ctx; + struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); + struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); @@ -4728,56 +4437,56 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + qidev = caam_ctx->qidev; + + if (unlikely(status)) { -+ u32 ssrc = status & JRSTA_SSRC_MASK; -+ u8 err_id = status & JRSTA_CCBERR_ERRID_MASK; -+ + caam_jr_strstatus(qidev, status); -+ /* -+ * verify hw auth check passed else return -EBADMSG -+ */ -+ if (ssrc == JRSTA_SSRC_CCB_ERROR && -+ err_id == JRSTA_CCBERR_ERRID_ICVCHK) -+ ecode = -EBADMSG; -+ else -+ ecode = -EIO; ++ ecode = -EIO; + } + + edesc = container_of(drv_req, typeof(*edesc), drv_req); -+ aead_unmap(qidev, edesc, aead_req); ++ tls_unmap(qidev, edesc, aead_req); + + aead_request_complete(aead_req, ecode); + qi_cache_free(edesc); +} + +/* -+ * allocate and map the aead extended descriptor ++ * allocate and map the tls extended descriptor + */ -+static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, -+ bool encrypt) ++static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt) +{ + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); ++ unsigned int blocksize = crypto_aead_blocksize(aead); ++ unsigned int padsize, authsize; + struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), + typeof(*alg), aead); + struct device *qidev = ctx->qidev; + gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? -+ GFP_KERNEL : GFP_ATOMIC; ++ GFP_KERNEL : GFP_ATOMIC; + int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; -+ struct aead_edesc *edesc; ++ struct tls_edesc *edesc; + dma_addr_t qm_sg_dma, iv_dma = 0; + int ivsize = 0; -+ unsigned int authsize = ctx->authsize; -+ int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; ++ u8 *iv; ++ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; + int in_len, out_len; + struct qm_sg_entry *sg_table, *fd_sgt; + struct caam_drv_ctx *drv_ctx; + enum optype op_type = encrypt ? ENCRYPT : DECRYPT; ++ struct scatterlist *dst; ++ ++ if (encrypt) { ++ padsize = blocksize - ((req->cryptlen + ctx->authsize) % ++ blocksize); ++ authsize = ctx->authsize + padsize; ++ } else { ++ authsize = ctx->authsize; ++ } + + drv_ctx = get_drv_ctx(ctx, op_type); + if (unlikely(IS_ERR_OR_NULL(drv_ctx))) -+ return (struct aead_edesc *)drv_ctx; ++ return (struct tls_edesc *)drv_ctx; + -+ /* allocate space for base edesc and hw desc commands, link tables */ ++ /* allocate space for base edesc, link tables and IV */ + edesc = qi_cache_alloc(GFP_DMA | flags); + if (unlikely(!edesc)) { + dev_err(qidev, "could not allocate extended descriptor\n"); @@ -4787,7 +4496,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + if (likely(req->src == req->dst)) { + src_nents = sg_nents_for_len(req->src, req->assoclen + + req->cryptlen + -+ (encrypt ? authsize : 0)); ++ (encrypt ? authsize : 0)); + if (unlikely(src_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", + req->assoclen + req->cryptlen + @@ -4803,6 +4512,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } ++ dst = req->dst; + } else { + src_nents = sg_nents_for_len(req->src, req->assoclen + + req->cryptlen); @@ -4813,14 +4523,13 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + return ERR_PTR(src_nents); + } + -+ dst_nents = sg_nents_for_len(req->dst, req->assoclen + -+ req->cryptlen + -+ (encrypt ? authsize : -+ (-authsize))); ++ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); ++ dst_nents = sg_nents_for_len(dst, req->cryptlen + ++ (encrypt ? authsize : 0)); + if (unlikely(dst_nents < 0)) { + dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", -+ req->assoclen + req->cryptlen + -+ (encrypt ? authsize : (-authsize))); ++ req->cryptlen + ++ (encrypt ? authsize : 0)); + qi_cache_free(edesc); + return ERR_PTR(dst_nents); + } @@ -4837,7 +4546,7 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + mapped_src_nents = 0; + } + -+ mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, ++ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents, + DMA_FROM_DEVICE); + if (unlikely(!mapped_dst_nents)) { + dev_err(qidev, "unable to map destination\n"); @@ -4847,95 +4556,51 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + } + } + -+ if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) -+ ivsize = crypto_aead_ivsize(aead); -+ + /* -+ * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. ++ * Create S/G table: IV, src, dst. + * Input is not contiguous. -+ * HW reads 4 S/G entries at a time; make sure the reads don't go beyond -+ * the end of the table by allocating more S/G entries. Logic: -+ * if (src != dst && output S/G) -+ * pad output S/G, if needed -+ * else if (src == dst && S/G) -+ * overlapping S/Gs; pad one of them -+ * else if (input S/G) ... -+ * pad input S/G, if needed + */ -+ qm_sg_ents = 1 + !!ivsize + mapped_src_nents; -+ if (mapped_dst_nents > 1) -+ qm_sg_ents += ALIGN(mapped_dst_nents, 4); -+ else if ((req->src == req->dst) && (mapped_src_nents > 1)) -+ qm_sg_ents = max(ALIGN(qm_sg_ents, 4), -+ 1 + !!ivsize + ALIGN(mapped_src_nents, 4)); -+ else -+ qm_sg_ents = ALIGN(qm_sg_ents, 4); -+ ++ qm_sg_ents = 1 + mapped_src_nents + ++ (mapped_dst_nents > 1 ? mapped_dst_nents : 0); + sg_table = &edesc->sgt[0]; + qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); -+ if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > -+ CAAM_QI_MEMCACHE_SIZE)) { -+ dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", -+ qm_sg_ents, ivsize); -+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, -+ 0, 0, 0, 0); ++ ++ ivsize = crypto_aead_ivsize(aead); ++ iv = (u8 *)(sg_table + qm_sg_ents); ++ /* Make sure IV is located in a DMAable area */ ++ memcpy(iv, req->iv, ivsize); ++ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); ++ if (dma_mapping_error(qidev, iv_dma)) { ++ dev_err(qidev, "unable to map IV\n"); ++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0, ++ 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } + -+ if (ivsize) { -+ u8 *iv = (u8 *)(sg_table + qm_sg_ents); -+ -+ /* Make sure IV is located in a DMAable area */ -+ memcpy(iv, req->iv, ivsize); -+ -+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); -+ if (dma_mapping_error(qidev, iv_dma)) { -+ dev_err(qidev, "unable to map IV\n"); -+ caam_unmap(qidev, req->src, req->dst, src_nents, -+ dst_nents, 0, 0, 0, 0, 0); -+ qi_cache_free(edesc); -+ return ERR_PTR(-ENOMEM); -+ } -+ } -+ + edesc->src_nents = src_nents; + edesc->dst_nents = dst_nents; ++ edesc->dst = dst; + edesc->iv_dma = iv_dma; + edesc->drv_req.app_ctx = req; -+ edesc->drv_req.cbk = aead_done; ++ edesc->drv_req.cbk = tls_done; + edesc->drv_req.drv_ctx = drv_ctx; + -+ edesc->assoclen = cpu_to_caam32(req->assoclen); -+ edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(qidev, edesc->assoclen_dma)) { -+ dev_err(qidev, "unable to map assoclen\n"); -+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, -+ iv_dma, ivsize, op_type, 0, 0); -+ qi_cache_free(edesc); -+ return ERR_PTR(-ENOMEM); -+ } ++ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); ++ qm_sg_index = 1; + -+ dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); -+ qm_sg_index++; -+ if (ivsize) { -+ dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); -+ qm_sg_index++; -+ } + sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); + qm_sg_index += mapped_src_nents; + + if (mapped_dst_nents > 1) -+ sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + ++ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + + qm_sg_index, 0); + + qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); + if (dma_mapping_error(qidev, qm_sg_dma)) { + dev_err(qidev, "unable to map S/G table\n"); -+ dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); -+ caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, -+ iv_dma, ivsize, op_type, 0, 0); ++ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma, ++ ivsize, op_type, 0, 0); + qi_cache_free(edesc); + return ERR_PTR(-ENOMEM); + } @@ -4943,431 +4608,64 @@ Signed-off-by: Zhao Qiang <qiang.zhao@nxp.com> + edesc->qm_sg_dma = qm_sg_dma; + edesc->qm_sg_bytes = qm_sg_bytes; + -+ out_len = req->assoclen + req->cryptlen + -+ (encrypt ? ctx->authsize : (-ctx->authsize)); -+ in_len = 4 + ivsize + req->assoclen + req->cryptlen; ++ out_len = req->cryptlen + (encrypt ? authsize : 0); ++ in_len = ivsize + req->assoclen + req->cryptlen; + + fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ + dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); + -+ if (req->dst == req->src) { -+ if (mapped_src_nents == 1) -+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), -+ out_len, 0); -+ else -+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + -+ (1 + !!ivsize) * sizeof(*sg_table), -+ out_len, 0); -+ } else if (mapped_dst_nents == 1) { -+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, -+ 0); -+ } else { ++ if (req->dst == req->src) ++ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + ++ (sg_nents_for_len(req->src, req->assoclen) + ++ 1) * sizeof(*sg_table), out_len, 0); ++ else if (mapped_dst_nents == 1) ++ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0); ++ else + dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * + qm_sg_index, out_len, 0); -+ } + + return edesc; +} - -- cpu = smp_processor_id(); -- drv_ctx = caam_drv_ctx_init(ctx->qidev, &cpu, desc); -- if (likely(!IS_ERR_OR_NULL(drv_ctx))) -- drv_ctx->op_type = type; -+static inline int aead_crypt(struct aead_request *req, bool encrypt) ++ ++static int tls_crypt(struct aead_request *req, bool encrypt) +{ -+ struct aead_edesc *edesc; ++ struct tls_edesc *edesc; + struct crypto_aead *aead = crypto_aead_reqtfm(req); + struct caam_ctx *ctx = crypto_aead_ctx(aead); + int ret; - -- ctx->drv_ctx[type] = drv_ctx; -- } ++ + if (unlikely(caam_congested)) + return -EAGAIN; - -- spin_unlock(&ctx->lock); -+ /* allocate extended descriptor */ -+ edesc = aead_edesc_alloc(req, encrypt); ++ ++ edesc = tls_edesc_alloc(req, encrypt); + if (IS_ERR_OR_NULL(edesc)) + return PTR_ERR(edesc); + -+ /* Create and submit job descriptor */ + ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); + if (!ret) { + ret = -EINPROGRESS; + } else { -+ aead_unmap(ctx->qidev, edesc, req); ++ tls_unmap(ctx->qidev, edesc, req); + qi_cache_free(edesc); - } - -- return drv_ctx; -+ return ret; - } - --static void caam_unmap(struct device *dev, struct scatterlist *src, -- struct scatterlist *dst, int src_nents, -- int dst_nents, dma_addr_t iv_dma, int ivsize, -- enum optype op_type, dma_addr_t qm_sg_dma, -- int qm_sg_bytes) -+static int aead_encrypt(struct aead_request *req) - { -- if (dst != src) { -- if (src_nents) -- dma_unmap_sg(dev, src, src_nents, DMA_TO_DEVICE); -- dma_unmap_sg(dev, dst, dst_nents, DMA_FROM_DEVICE); -- } else { -- dma_unmap_sg(dev, src, src_nents, DMA_BIDIRECTIONAL); -- } -+ return aead_crypt(req, true); -+} - -- if (iv_dma) -- dma_unmap_single(dev, iv_dma, ivsize, -- op_type == GIVENCRYPT ? DMA_FROM_DEVICE : -- DMA_TO_DEVICE); -- if (qm_sg_bytes) -- dma_unmap_single(dev, qm_sg_dma, qm_sg_bytes, DMA_TO_DEVICE); -+static int aead_decrypt(struct aead_request *req) -+{ -+ return aead_crypt(req, false); - } - --static void aead_unmap(struct device *dev, -- struct aead_edesc *edesc, -- struct aead_request *req) -+static int ipsec_gcm_encrypt(struct aead_request *req) - { -- struct crypto_aead *aead = crypto_aead_reqtfm(req); -- int ivsize = crypto_aead_ivsize(aead); -+ if (req->assoclen < 8) -+ return -EINVAL; - -- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, -- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, -- edesc->qm_sg_dma, edesc->qm_sg_bytes); -- dma_unmap_single(dev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); -+ return aead_crypt(req, true); - } - --static void ablkcipher_unmap(struct device *dev, -- struct ablkcipher_edesc *edesc, -- struct ablkcipher_request *req) -+static int ipsec_gcm_decrypt(struct aead_request *req) - { -- struct crypto_ablkcipher *ablkcipher = crypto_ablkcipher_reqtfm(req); -- int ivsize = crypto_ablkcipher_ivsize(ablkcipher); -+ if (req->assoclen < 8) -+ return -EINVAL; - -- caam_unmap(dev, req->src, req->dst, edesc->src_nents, edesc->dst_nents, -- edesc->iv_dma, ivsize, edesc->drv_req.drv_ctx->op_type, -- edesc->qm_sg_dma, edesc->qm_sg_bytes); -+ return aead_crypt(req, false); - } - --static void aead_done(struct caam_drv_req *drv_req, u32 status) -+static void tls_done(struct caam_drv_req *drv_req, u32 status) - { - struct device *qidev; -- struct aead_edesc *edesc; -+ struct tls_edesc *edesc; - struct aead_request *aead_req = drv_req->app_ctx; - struct crypto_aead *aead = crypto_aead_reqtfm(aead_req); - struct caam_ctx *caam_ctx = crypto_aead_ctx(aead); -@@ -537,41 +1367,51 @@ static void aead_done(struct caam_drv_re - } - - edesc = container_of(drv_req, typeof(*edesc), drv_req); -- aead_unmap(qidev, edesc, aead_req); -+ tls_unmap(qidev, edesc, aead_req); - - aead_request_complete(aead_req, ecode); - qi_cache_free(edesc); - } - - /* -- * allocate and map the aead extended descriptor -+ * allocate and map the tls extended descriptor - */ --static struct aead_edesc *aead_edesc_alloc(struct aead_request *req, -- bool encrypt) -+static struct tls_edesc *tls_edesc_alloc(struct aead_request *req, bool encrypt) - { - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); -+ unsigned int blocksize = crypto_aead_blocksize(aead); -+ unsigned int padsize, authsize; - struct caam_aead_alg *alg = container_of(crypto_aead_alg(aead), - typeof(*alg), aead); - struct device *qidev = ctx->qidev; - gfp_t flags = (req->base.flags & CRYPTO_TFM_REQ_MAY_SLEEP) ? -- GFP_KERNEL : GFP_ATOMIC; -+ GFP_KERNEL : GFP_ATOMIC; - int src_nents, mapped_src_nents, dst_nents = 0, mapped_dst_nents = 0; -- struct aead_edesc *edesc; -+ struct tls_edesc *edesc; - dma_addr_t qm_sg_dma, iv_dma = 0; - int ivsize = 0; -- unsigned int authsize = ctx->authsize; -- int qm_sg_index = 0, qm_sg_ents = 0, qm_sg_bytes; -+ u8 *iv; -+ int qm_sg_index, qm_sg_ents = 0, qm_sg_bytes; - int in_len, out_len; - struct qm_sg_entry *sg_table, *fd_sgt; - struct caam_drv_ctx *drv_ctx; - enum optype op_type = encrypt ? ENCRYPT : DECRYPT; -+ struct scatterlist *dst; -+ -+ if (encrypt) { -+ padsize = blocksize - ((req->cryptlen + ctx->authsize) % -+ blocksize); -+ authsize = ctx->authsize + padsize; -+ } else { -+ authsize = ctx->authsize; + } - - drv_ctx = get_drv_ctx(ctx, op_type); - if (unlikely(IS_ERR_OR_NULL(drv_ctx))) -- return (struct aead_edesc *)drv_ctx; -+ return (struct tls_edesc *)drv_ctx; - -- /* allocate space for base edesc and hw desc commands, link tables */ -+ /* allocate space for base edesc, link tables and IV */ - edesc = qi_cache_alloc(GFP_DMA | flags); - if (unlikely(!edesc)) { - dev_err(qidev, "could not allocate extended descriptor\n"); -@@ -581,7 +1421,7 @@ static struct aead_edesc *aead_edesc_all - if (likely(req->src == req->dst)) { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen + -- (encrypt ? authsize : 0)); -+ (encrypt ? authsize : 0)); - if (unlikely(src_nents < 0)) { - dev_err(qidev, "Insufficient bytes (%d) in src S/G\n", - req->assoclen + req->cryptlen + -@@ -597,6 +1437,7 @@ static struct aead_edesc *aead_edesc_all - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } -+ dst = req->dst; - } else { - src_nents = sg_nents_for_len(req->src, req->assoclen + - req->cryptlen); -@@ -607,14 +1448,13 @@ static struct aead_edesc *aead_edesc_all - return ERR_PTR(src_nents); - } - -- dst_nents = sg_nents_for_len(req->dst, req->assoclen + -- req->cryptlen + -- (encrypt ? authsize : -- (-authsize))); -+ dst = scatterwalk_ffwd(edesc->tmp, req->dst, req->assoclen); -+ dst_nents = sg_nents_for_len(dst, req->cryptlen + -+ (encrypt ? authsize : 0)); - if (unlikely(dst_nents < 0)) { - dev_err(qidev, "Insufficient bytes (%d) in dst S/G\n", -- req->assoclen + req->cryptlen + -- (encrypt ? authsize : (-authsize))); -+ req->cryptlen + -+ (encrypt ? authsize : 0)); - qi_cache_free(edesc); - return ERR_PTR(dst_nents); - } -@@ -631,7 +1471,7 @@ static struct aead_edesc *aead_edesc_all - mapped_src_nents = 0; - } - -- mapped_dst_nents = dma_map_sg(qidev, req->dst, dst_nents, -+ mapped_dst_nents = dma_map_sg(qidev, dst, dst_nents, - DMA_FROM_DEVICE); - if (unlikely(!mapped_dst_nents)) { - dev_err(qidev, "unable to map destination\n"); -@@ -641,80 +1481,51 @@ static struct aead_edesc *aead_edesc_all - } - } - -- if ((alg->caam.rfc3686 && encrypt) || !alg->caam.geniv) -- ivsize = crypto_aead_ivsize(aead); -- - /* -- * Create S/G table: req->assoclen, [IV,] req->src [, req->dst]. -+ * Create S/G table: IV, src, dst. - * Input is not contiguous. - */ -- qm_sg_ents = 1 + !!ivsize + mapped_src_nents + -+ qm_sg_ents = 1 + mapped_src_nents + - (mapped_dst_nents > 1 ? mapped_dst_nents : 0); - sg_table = &edesc->sgt[0]; - qm_sg_bytes = qm_sg_ents * sizeof(*sg_table); -- if (unlikely(offsetof(struct aead_edesc, sgt) + qm_sg_bytes + ivsize > -- CAAM_QI_MEMCACHE_SIZE)) { -- dev_err(qidev, "No space for %d S/G entries and/or %dB IV\n", -- qm_sg_ents, ivsize); -- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, 0, -- 0, 0, 0, 0); + -+ ivsize = crypto_aead_ivsize(aead); -+ iv = (u8 *)(sg_table + qm_sg_ents); -+ /* Make sure IV is located in a DMAable area */ -+ memcpy(iv, req->iv, ivsize); -+ iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); -+ if (dma_mapping_error(qidev, iv_dma)) { -+ dev_err(qidev, "unable to map IV\n"); -+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, 0, 0, 0, -+ 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } - -- if (ivsize) { -- u8 *iv = (u8 *)(sg_table + qm_sg_ents); -- -- /* Make sure IV is located in a DMAable area */ -- memcpy(iv, req->iv, ivsize); -- -- iv_dma = dma_map_single(qidev, iv, ivsize, DMA_TO_DEVICE); -- if (dma_mapping_error(qidev, iv_dma)) { -- dev_err(qidev, "unable to map IV\n"); -- caam_unmap(qidev, req->src, req->dst, src_nents, -- dst_nents, 0, 0, 0, 0, 0); -- qi_cache_free(edesc); -- return ERR_PTR(-ENOMEM); -- } -- } -- - edesc->src_nents = src_nents; - edesc->dst_nents = dst_nents; -+ edesc->dst = dst; - edesc->iv_dma = iv_dma; - edesc->drv_req.app_ctx = req; -- edesc->drv_req.cbk = aead_done; -+ edesc->drv_req.cbk = tls_done; - edesc->drv_req.drv_ctx = drv_ctx; - -- edesc->assoclen = cpu_to_caam32(req->assoclen); -- edesc->assoclen_dma = dma_map_single(qidev, &edesc->assoclen, 4, -- DMA_TO_DEVICE); -- if (dma_mapping_error(qidev, edesc->assoclen_dma)) { -- dev_err(qidev, "unable to map assoclen\n"); -- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, -- iv_dma, ivsize, op_type, 0, 0); -- qi_cache_free(edesc); -- return ERR_PTR(-ENOMEM); -- } -+ dma_to_qm_sg_one(sg_table, iv_dma, ivsize, 0); -+ qm_sg_index = 1; - -- dma_to_qm_sg_one(sg_table, edesc->assoclen_dma, 4, 0); -- qm_sg_index++; -- if (ivsize) { -- dma_to_qm_sg_one(sg_table + qm_sg_index, iv_dma, ivsize, 0); -- qm_sg_index++; -- } - sg_to_qm_sg_last(req->src, mapped_src_nents, sg_table + qm_sg_index, 0); - qm_sg_index += mapped_src_nents; - - if (mapped_dst_nents > 1) -- sg_to_qm_sg_last(req->dst, mapped_dst_nents, sg_table + -+ sg_to_qm_sg_last(dst, mapped_dst_nents, sg_table + - qm_sg_index, 0); - - qm_sg_dma = dma_map_single(qidev, sg_table, qm_sg_bytes, DMA_TO_DEVICE); - if (dma_mapping_error(qidev, qm_sg_dma)) { - dev_err(qidev, "unable to map S/G table\n"); -- dma_unmap_single(qidev, edesc->assoclen_dma, 4, DMA_TO_DEVICE); -- caam_unmap(qidev, req->src, req->dst, src_nents, dst_nents, -- iv_dma, ivsize, op_type, 0, 0); -+ caam_unmap(qidev, req->src, dst, src_nents, dst_nents, iv_dma, -+ ivsize, op_type, 0, 0); - qi_cache_free(edesc); - return ERR_PTR(-ENOMEM); - } -@@ -722,35 +1533,29 @@ static struct aead_edesc *aead_edesc_all - edesc->qm_sg_dma = qm_sg_dma; - edesc->qm_sg_bytes = qm_sg_bytes; - -- out_len = req->assoclen + req->cryptlen + -- (encrypt ? ctx->authsize : (-ctx->authsize)); -- in_len = 4 + ivsize + req->assoclen + req->cryptlen; -+ out_len = req->cryptlen + (encrypt ? authsize : 0); -+ in_len = ivsize + req->assoclen + req->cryptlen; - - fd_sgt = &edesc->drv_req.fd_sgt[0]; ++ return ret; ++} + - dma_to_qm_sg_one_last_ext(&fd_sgt[1], qm_sg_dma, in_len, 0); - -- if (req->dst == req->src) { -- if (mapped_src_nents == 1) -- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->src), -- out_len, 0); -- else -- dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + -- (1 + !!ivsize) * sizeof(*sg_table), -- out_len, 0); -- } else if (mapped_dst_nents == 1) { -- dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(req->dst), out_len, -- 0); -- } else { -+ if (req->dst == req->src) -+ dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + -+ (sg_nents_for_len(req->src, req->assoclen) + -+ 1) * sizeof(*sg_table), out_len, 0); -+ else if (mapped_dst_nents == 1) -+ dma_to_qm_sg_one(&fd_sgt[0], sg_dma_address(dst), out_len, 0); -+ else - dma_to_qm_sg_one_ext(&fd_sgt[0], qm_sg_dma + sizeof(*sg_table) * - qm_sg_index, out_len, 0); -- } - - return edesc; - } - --static inline int aead_crypt(struct aead_request *req, bool encrypt) -+static int tls_crypt(struct aead_request *req, bool encrypt) - { -- struct aead_edesc *edesc; -+ struct tls_edesc *edesc; - struct crypto_aead *aead = crypto_aead_reqtfm(req); - struct caam_ctx *ctx = crypto_aead_ctx(aead); - int ret; -@@ -758,31 +1563,29 @@ static inline int aead_crypt(struct aead - if (unlikely(caam_congested)) - return -EAGAIN; - -- /* allocate extended descriptor */ -- edesc = aead_edesc_alloc(req, encrypt); -+ edesc = tls_edesc_alloc(req, encrypt); - if (IS_ERR_OR_NULL(edesc)) - return PTR_ERR(edesc); - -- /* Create and submit job descriptor */ - ret = caam_qi_enqueue(ctx->qidev, &edesc->drv_req); - if (!ret) { - ret = -EINPROGRESS; - } else { -- aead_unmap(ctx->qidev, edesc, req); -+ tls_unmap(ctx->qidev, edesc, req); - qi_cache_free(edesc); - } - - return ret; - } - --static int aead_encrypt(struct aead_request *req) +static int tls_encrypt(struct aead_request *req) - { -- return aead_crypt(req, true); ++{ + return tls_crypt(req, true); - } - --static int aead_decrypt(struct aead_request *req) ++} ++ +static int tls_decrypt(struct aead_request *req) - { -- return aead_crypt(req, false); ++{ + return tls_crypt(req, false); - } - ++} ++ static void ablkcipher_done(struct caam_drv_req *drv_req, u32 status) + { + struct ablkcipher_edesc *edesc; @@ -900,7 +1703,24 @@ static struct ablkcipher_edesc *ablkciph qm_sg_ents = 1 + mapped_src_nents; dst_sg_idx = qm_sg_ents; diff --git a/target/linux/mediatek/patches-4.14/0191-usb-xhci-allow-imod-interval-to-be-configurable.patch b/target/linux/mediatek/patches-4.14/0191-usb-xhci-allow-imod-interval-to-be-configurable.patch index 2a6f5f25d4..d3cb19150d 100644 --- a/target/linux/mediatek/patches-4.14/0191-usb-xhci-allow-imod-interval-to-be-configurable.patch +++ b/target/linux/mediatek/patches-4.14/0191-usb-xhci-allow-imod-interval-to-be-configurable.patch @@ -112,7 +112,7 @@ Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org> ret = PTR_ERR(hcd->usb_phy); --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c -@@ -612,11 +612,7 @@ int xhci_run(struct usb_hcd *hcd) +@@ -610,11 +610,7 @@ int xhci_run(struct usb_hcd *hcd) "// Set the interrupt modulation register"); temp = readl(&xhci->ir_set->irq_control); temp &= ~ER_IRQ_INTERVAL_MASK; diff --git a/target/linux/mpc85xx/patches-4.14/100-powerpc-85xx-tl-wdr4900-v1-support.patch b/target/linux/mpc85xx/patches-4.14/100-powerpc-85xx-tl-wdr4900-v1-support.patch index f74ed6fd8a..86b6a57735 100644 --- a/target/linux/mpc85xx/patches-4.14/100-powerpc-85xx-tl-wdr4900-v1-support.patch +++ b/target/linux/mpc85xx/patches-4.14/100-powerpc-85xx-tl-wdr4900-v1-support.patch @@ -17,11 +17,9 @@ Signed-off-by: Pawel Dembicki <paweldembicki@gmail.com> arch/powerpc/platforms/85xx/Makefile | 1 + 4 files changed, 19 insertions(+), 1 deletion(-) -diff --git a/arch/powerpc/boot/Makefile b/arch/powerpc/boot/Makefile -index e2a5a932c24a..925140f8350b 100644 --- a/arch/powerpc/boot/Makefile +++ b/arch/powerpc/boot/Makefile -@@ -156,6 +156,7 @@ src-plat-$(CONFIG_PPC_PSERIES) += pseries-head.S +@@ -156,6 +156,7 @@ src-plat-$(CONFIG_PPC_PSERIES) += pserie src-plat-$(CONFIG_PPC_POWERNV) += pseries-head.S src-plat-$(CONFIG_PPC_IBM_CELL_BLADE) += pseries-head.S src-plat-$(CONFIG_MVME7100) += motload-head.S mvme7100.c @@ -29,7 +27,7 @@ index e2a5a932c24a..925140f8350b 100644 src-wlib := $(sort $(src-wlib-y)) src-plat := $(sort $(src-plat-y)) -@@ -335,7 +336,7 @@ image-$(CONFIG_TQM8555) += cuImage.tqm8555 +@@ -335,7 +336,7 @@ image-$(CONFIG_TQM8555) += cuImage.tqm image-$(CONFIG_TQM8560) += cuImage.tqm8560 image-$(CONFIG_SBC8548) += cuImage.sbc8548 image-$(CONFIG_KSI8560) += cuImage.ksi8560 @@ -38,8 +36,6 @@ index e2a5a932c24a..925140f8350b 100644 # Board ports in arch/powerpc/platform/86xx/Kconfig image-$(CONFIG_MVME7100) += dtbImage.mvme7100 -diff --git a/arch/powerpc/boot/wrapper b/arch/powerpc/boot/wrapper -index 76fe3ccfd381..96fd853a1ed0 100755 --- a/arch/powerpc/boot/wrapper +++ b/arch/powerpc/boot/wrapper @@ -302,6 +302,11 @@ adder875-redboot) @@ -54,8 +50,6 @@ index 76fe3ccfd381..96fd853a1ed0 100755 simpleboot-virtex405-*) platformo="$object/virtex405-head.o $object/simpleboot.o $object/virtex.o" binary=y -diff --git a/arch/powerpc/platforms/85xx/Kconfig b/arch/powerpc/platforms/85xx/Kconfig -index 68920d42b4bc..477782740f0e 100644 --- a/arch/powerpc/platforms/85xx/Kconfig +++ b/arch/powerpc/platforms/85xx/Kconfig @@ -170,6 +170,17 @@ config STX_GP3 @@ -76,11 +70,9 @@ index 68920d42b4bc..477782740f0e 100644 config TQM8540 bool "TQ Components TQM8540" help -diff --git a/arch/powerpc/platforms/85xx/Makefile b/arch/powerpc/platforms/85xx/Makefile -index d1dd0dca5ebf..d0dab29a33eb 100644 --- a/arch/powerpc/platforms/85xx/Makefile +++ b/arch/powerpc/platforms/85xx/Makefile -@@ -26,6 +26,7 @@ obj-$(CONFIG_CORENET_GENERIC) += corenet_generic.o +@@ -26,6 +26,7 @@ obj-$(CONFIG_CORENET_GENERIC) += coren obj-$(CONFIG_FB_FSL_DIU) += t1042rdb_diu.o obj-$(CONFIG_STX_GP3) += stx_gp3.o obj-$(CONFIG_TQM85xx) += tqm85xx.o @@ -88,6 +80,3 @@ index d1dd0dca5ebf..d0dab29a33eb 100644 obj-$(CONFIG_SBC8548) += sbc8548.o obj-$(CONFIG_PPA8548) += ppa8548.o obj-$(CONFIG_SOCRATES) += socrates.o socrates_fpga_pic.o --- -2.20.1 - diff --git a/target/linux/mpc85xx/patches-4.14/102-powerpc-add-cmdline-override.patch b/target/linux/mpc85xx/patches-4.14/102-powerpc-add-cmdline-override.patch index 6a16a34a18..69dd1305d7 100644 --- a/target/linux/mpc85xx/patches-4.14/102-powerpc-add-cmdline-override.patch +++ b/target/linux/mpc85xx/patches-4.14/102-powerpc-add-cmdline-override.patch @@ -1,6 +1,6 @@ --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig -@@ -835,6 +835,14 @@ config CMDLINE_FORCE +@@ -836,6 +836,14 @@ config CMDLINE_FORCE This is useful if you cannot or don't want to change the command-line options your boot loader passes to the kernel. diff --git a/target/linux/mvebu/patches-4.14/402-sfp-display-SFP-module-information.patch b/target/linux/mvebu/patches-4.14/402-sfp-display-SFP-module-information.patch index eeb6b1ef19..ccc9896b50 100644 --- a/target/linux/mvebu/patches-4.14/402-sfp-display-SFP-module-information.patch +++ b/target/linux/mvebu/patches-4.14/402-sfp-display-SFP-module-information.patch @@ -10,7 +10,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c -@@ -248,6 +248,184 @@ static unsigned int sfp_check(void *buf, +@@ -264,6 +264,184 @@ static unsigned int sfp_check(void *buf, return check; } @@ -195,7 +195,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> /* Helpers */ static void sfp_module_tx_disable(struct sfp *sfp) { -@@ -416,6 +594,7 @@ static int sfp_sm_mod_probe(struct sfp * +@@ -432,6 +610,7 @@ static int sfp_sm_mod_probe(struct sfp * char sn[17]; char date[9]; char rev[5]; @@ -203,7 +203,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> u8 check; int err; -@@ -459,10 +638,83 @@ static int sfp_sm_mod_probe(struct sfp * +@@ -475,10 +654,83 @@ static int sfp_sm_mod_probe(struct sfp * rev[4] = '\0'; memcpy(sn, sfp->id.ext.vendor_sn, 16); sn[16] = '\0'; diff --git a/target/linux/mvebu/patches-4.14/403-net-mvneta-convert-to-phylink.patch b/target/linux/mvebu/patches-4.14/403-net-mvneta-convert-to-phylink.patch index ea51cc2ef0..577317a84d 100644 --- a/target/linux/mvebu/patches-4.14/403-net-mvneta-convert-to-phylink.patch +++ b/target/linux/mvebu/patches-4.14/403-net-mvneta-convert-to-phylink.patch @@ -928,10 +928,10 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> return 0; err_netdev: -@@ -4386,16 +4492,14 @@ err_netdev: +@@ -4384,16 +4490,14 @@ err_netdev: + mvneta_bm_pool_destroy(pp->bm_priv, pp->pool_short, 1 << pp->id); } - err_free_stats: + if (pp->phylink) + phylink_destroy(pp->phylink); free_percpu(pp->stats); @@ -947,7 +947,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> err_free_irq: irq_dispose_mapping(dev->irq); err_free_netdev: -@@ -4407,7 +4511,6 @@ err_free_netdev: +@@ -4405,7 +4509,6 @@ err_free_netdev: static int mvneta_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); @@ -955,7 +955,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> struct mvneta_port *pp = netdev_priv(dev); unregister_netdev(dev); -@@ -4415,10 +4518,8 @@ static int mvneta_remove(struct platform +@@ -4413,10 +4516,8 @@ static int mvneta_remove(struct platform clk_disable_unprepare(pp->clk); free_percpu(pp->ports); free_percpu(pp->stats); @@ -967,7 +967,7 @@ Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk> free_netdev(dev); if (pp->bm_priv) { -@@ -4470,9 +4571,6 @@ static int mvneta_resume(struct device * +@@ -4468,9 +4569,6 @@ static int mvneta_resume(struct device * return err; } diff --git a/target/linux/mvebu/patches-4.14/408-sfp-move-module-eeprom-ethtool-access-into-netdev-co.patch b/target/linux/mvebu/patches-4.14/408-sfp-move-module-eeprom-ethtool-access-into-netdev-co.patch index 1de8caab8b..3fd07d231f 100644 --- a/target/linux/mvebu/patches-4.14/408-sfp-move-module-eeprom-ethtool-access-into-netdev-co.patch +++ b/target/linux/mvebu/patches-4.14/408-sfp-move-module-eeprom-ethtool-access-into-netdev-co.patch @@ -159,7 +159,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> #include <linux/slab.h> #include <linux/rtnetlink.h> #include <linux/sched/signal.h> -@@ -2201,6 +2202,9 @@ static int __ethtool_get_module_info(str +@@ -2212,6 +2213,9 @@ static int __ethtool_get_module_info(str const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; @@ -169,7 +169,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> if (phydev && phydev->drv && phydev->drv->module_info) return phydev->drv->module_info(phydev, modinfo); -@@ -2235,6 +2239,9 @@ static int __ethtool_get_module_eeprom(s +@@ -2246,6 +2250,9 @@ static int __ethtool_get_module_eeprom(s const struct ethtool_ops *ops = dev->ethtool_ops; struct phy_device *phydev = dev->phydev; diff --git a/target/linux/mvebu/patches-4.14/410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch b/target/linux/mvebu/patches-4.14/410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch index 6ca43043fa..d6e5fbf33f 100644 --- a/target/linux/mvebu/patches-4.14/410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch +++ b/target/linux/mvebu/patches-4.14/410-sfp-hack-allow-marvell-10G-phy-support-to-use-SFP.patch @@ -25,8 +25,8 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> +#include <linux/sfp.h> enum { - MV_PCS_BASE_T = 0x0000, -@@ -38,6 +40,11 @@ enum { + MV_PMA_BOOT = 0xc050, +@@ -41,6 +43,11 @@ enum { MV_AN_RESULT_SPD_10000 = BIT(15), }; @@ -38,7 +38,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> static int mv3310_modify(struct phy_device *phydev, int devad, u16 reg, u16 mask, u16 bits) { -@@ -56,17 +63,52 @@ static int mv3310_modify(struct phy_devi +@@ -59,8 +66,25 @@ static int mv3310_modify(struct phy_devi return ret < 0 ? ret : 1; } @@ -62,10 +62,11 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> { + struct mv3310_priv *priv; u32 mmd_mask = MDIO_DEVS_PMAPMD | MDIO_DEVS_AN; + int ret; - if (!phydev->is_c45 || - (phydev->c45_ids.devices_in_package & mmd_mask) != mmd_mask) +@@ -78,9 +102,27 @@ static int mv3310_probe(struct phy_devic return -ENODEV; + } + priv = devm_kzalloc(&phydev->mdio.dev, sizeof(*priv), GFP_KERNEL); + if (!priv) @@ -91,7 +92,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> /* * Resetting the MV88X3310 causes it to become non-responsive. Avoid * setting the reset bit(s). -@@ -78,6 +120,7 @@ static int mv3310_soft_reset(struct phy_ +@@ -92,6 +134,7 @@ static int mv3310_soft_reset(struct phy_ static int mv3310_config_init(struct phy_device *phydev) { @@ -99,7 +100,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> __ETHTOOL_DECLARE_LINK_MODE_MASK(supported) = { 0, }; u32 mask; int val; -@@ -166,6 +209,14 @@ static int mv3310_config_init(struct phy +@@ -180,6 +223,14 @@ static int mv3310_config_init(struct phy phydev->supported &= mask; phydev->advertising &= phydev->supported; @@ -114,7 +115,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> return 0; } -@@ -349,12 +400,13 @@ static struct phy_driver mv3310_drivers[ +@@ -363,12 +414,13 @@ static struct phy_driver mv3310_drivers[ SUPPORTED_FIBRE | SUPPORTED_10000baseT_Full | SUPPORTED_Backplane, diff --git a/target/linux/mvebu/patches-4.14/411-sfp-add-sfp-compatible.patch b/target/linux/mvebu/patches-4.14/411-sfp-add-sfp-compatible.patch index 4cbd30a6e2..d2e7d22ce5 100644 --- a/target/linux/mvebu/patches-4.14/411-sfp-add-sfp-compatible.patch +++ b/target/linux/mvebu/patches-4.14/411-sfp-add-sfp-compatible.patch @@ -14,7 +14,7 @@ Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk> --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c -@@ -1148,6 +1148,7 @@ static int sfp_remove(struct platform_de +@@ -1164,6 +1164,7 @@ static int sfp_remove(struct platform_de static const struct of_device_id sfp_of_match[] = { { .compatible = "sff,sfp", }, diff --git a/target/linux/mvebu/patches-4.14/450-reprobe_sfp_phy.patch b/target/linux/mvebu/patches-4.14/450-reprobe_sfp_phy.patch index e05d5ad47c..19c3d68ee5 100644 --- a/target/linux/mvebu/patches-4.14/450-reprobe_sfp_phy.patch +++ b/target/linux/mvebu/patches-4.14/450-reprobe_sfp_phy.patch @@ -13,7 +13,7 @@ Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com> --- a/drivers/net/phy/sfp.c +++ b/drivers/net/phy/sfp.c -@@ -489,7 +489,7 @@ static void sfp_sm_phy_detach(struct sfp +@@ -505,7 +505,7 @@ static void sfp_sm_phy_detach(struct sfp sfp->mod_phy = NULL; } @@ -22,7 +22,7 @@ Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com> { struct phy_device *phy; int err; -@@ -499,11 +499,11 @@ static void sfp_sm_probe_phy(struct sfp +@@ -515,11 +515,11 @@ static void sfp_sm_probe_phy(struct sfp phy = mdiobus_scan(sfp->i2c_mii, SFP_PHY_ADDR); if (phy == ERR_PTR(-ENODEV)) { dev_info(sfp->dev, "no PHY detected\n"); @@ -36,7 +36,7 @@ Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com> } err = sfp_add_phy(sfp->sfp_bus, phy); -@@ -511,11 +511,13 @@ static void sfp_sm_probe_phy(struct sfp +@@ -527,11 +527,13 @@ static void sfp_sm_probe_phy(struct sfp phy_device_remove(phy); phy_device_free(phy); dev_err(sfp->dev, "sfp_add_phy failed: %d\n", err); @@ -51,7 +51,7 @@ Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com> } static void sfp_sm_link_up(struct sfp *sfp) -@@ -561,14 +563,9 @@ static void sfp_sm_fault(struct sfp *sfp +@@ -577,14 +579,9 @@ static void sfp_sm_fault(struct sfp *sfp static void sfp_sm_mod_init(struct sfp *sfp) { @@ -68,7 +68,7 @@ Signed-off-by: Jonas Gorski <jonas.gorski@gmail.com> /* Setting the serdes link mode is guesswork: there's no * field in the EEPROM which indicates what mode should -@@ -582,7 +579,22 @@ static void sfp_sm_mod_init(struct sfp * +@@ -598,7 +595,22 @@ static void sfp_sm_mod_init(struct sfp * if (sfp->id.base.e1000_base_t || sfp->id.base.e100_base_lx || sfp->id.base.e100_base_fx) diff --git a/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch index e10f3a31a1..5536e3f61c 100644 --- a/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch +++ b/target/linux/ramips/patches-4.14/0034-NET-multi-phy-support.patch @@ -11,7 +11,7 @@ Signed-off-by: John Crispin <blogic@openwrt.org> --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c -@@ -980,7 +980,10 @@ void phy_state_machine(struct work_struc +@@ -913,7 +913,10 @@ void phy_state_machine(struct work_struc /* If the link is down, give up on negotiation for now */ if (!phydev->link) { phydev->state = PHY_NOLINK; @@ -23,7 +23,7 @@ Signed-off-by: John Crispin <blogic@openwrt.org> break; } -@@ -1067,7 +1070,10 @@ void phy_state_machine(struct work_struc +@@ -1000,7 +1003,10 @@ void phy_state_machine(struct work_struc phy_link_up(phydev); } else { phydev->state = PHY_NOLINK; @@ -35,7 +35,7 @@ Signed-off-by: John Crispin <blogic@openwrt.org> } if (phy_interrupt_is_valid(phydev)) -@@ -1077,7 +1083,10 @@ void phy_state_machine(struct work_struc +@@ -1010,7 +1016,10 @@ void phy_state_machine(struct work_struc case PHY_HALTED: if (phydev->link) { phydev->link = 0; diff --git a/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch index d238ed9728..2ad1f6f9f8 100644 --- a/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch +++ b/target/linux/ramips/patches-4.14/0051-serial-add-ugly-custom-baud-rate-hack.patch @@ -10,7 +10,7 @@ Signed-off-by: John Crispin <blogic@openwrt.org> --- a/drivers/tty/serial/serial_core.c +++ b/drivers/tty/serial/serial_core.c -@@ -431,6 +431,9 @@ uart_get_baud_rate(struct uart_port *por +@@ -428,6 +428,9 @@ uart_get_baud_rate(struct uart_port *por break; } |