From f97fbd8a9adb481d195cecbcf0b1b19322dc9709 Mon Sep 17 00:00:00 2001 From: Daniel Golle Date: Thu, 8 Sep 2022 21:14:32 +0100 Subject: mediatek: rename some patches To ease maintainance rename patches to contain the kernel version they have first been part of. Signed-off-by: Daniel Golle --- ...td-nand-make-mtk_ecc.c-a-separated-module.patch | 1383 ------------------ ...td-nand-make-mtk_ecc.c-a-separated-module.patch | 1383 ++++++++++++++++++ ...d-driver-for-MTK-SPI-NAND-Flash-Interface.patch | 1537 -------------------- ...d-driver-for-MTK-SPI-NAND-Flash-Interface.patch | 1537 ++++++++++++++++++++ ...k-ecc-also-parse-nand-ecc-engine-if-avail.patch | 30 - ...k-ecc-also-parse-nand-ecc-engine-if-avail.patch | 30 + ...rm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch | 35 - ...rm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch | 35 + 8 files changed, 2985 insertions(+), 2985 deletions(-) delete mode 100644 target/linux/mediatek/patches-5.15/120-11-mtd-nand-make-mtk_ecc.c-a-separated-module.patch create mode 100644 target/linux/mediatek/patches-5.15/120-11-v5.19-mtd-nand-make-mtk_ecc.c-a-separated-module.patch delete mode 100644 target/linux/mediatek/patches-5.15/120-12-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch create mode 100644 target/linux/mediatek/patches-5.15/120-12-v5.19-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch delete mode 100644 target/linux/mediatek/patches-5.15/120-13-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch create mode 100644 target/linux/mediatek/patches-5.15/120-13-v5.19-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch delete mode 100644 target/linux/mediatek/patches-5.15/120-14-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch create mode 100644 target/linux/mediatek/patches-5.15/120-14-v5.19-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch diff --git a/target/linux/mediatek/patches-5.15/120-11-mtd-nand-make-mtk_ecc.c-a-separated-module.patch b/target/linux/mediatek/patches-5.15/120-11-mtd-nand-make-mtk_ecc.c-a-separated-module.patch deleted file mode 100644 index fd9098eae9..0000000000 --- a/target/linux/mediatek/patches-5.15/120-11-mtd-nand-make-mtk_ecc.c-a-separated-module.patch +++ /dev/null @@ -1,1383 +0,0 @@ -From ebb9653d4a87c64fb679e4c339e867556dada719 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Tue, 22 Mar 2022 18:44:21 +0800 -Subject: [PATCH 11/15] mtd: nand: make mtk_ecc.c a separated module - -this code will be used in mediatek snfi spi-mem controller with -pipelined ECC engine. - -Signed-off-by: Chuanhong Guo -(cherry picked from commit 316f47cec4ce5b81aa8006de202d8769c117a52d) ---- - drivers/mtd/nand/Kconfig | 7 +++++++ - drivers/mtd/nand/Makefile | 1 + - drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} | 3 +-- - drivers/mtd/nand/raw/Kconfig | 1 + - drivers/mtd/nand/raw/Makefile | 2 +- - drivers/mtd/nand/raw/mtk_nand.c | 2 +- - .../nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h | 0 - 7 files changed, 12 insertions(+), 4 deletions(-) - rename drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} (99%) - rename drivers/mtd/nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h (100%) - ---- a/drivers/mtd/nand/Kconfig -+++ b/drivers/mtd/nand/Kconfig -@@ -50,6 +50,13 @@ config MTD_NAND_MTK_BMT - bool "Support MediaTek NAND Bad-block Management Table" - default n - -+config MTD_NAND_ECC_MEDIATEK -+ tristate "Mediatek hardware ECC engine" -+ depends on HAS_IOMEM -+ select MTD_NAND_ECC -+ help -+ This enables support for the hardware ECC engine from Mediatek. -+ - endmenu - - endmenu ---- a/drivers/mtd/nand/Makefile -+++ b/drivers/mtd/nand/Makefile -@@ -3,6 +3,7 @@ - nandcore-objs := core.o bbt.o - obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o - obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o -+obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o - - obj-y += onenand/ - obj-y += raw/ ---- a/drivers/mtd/nand/raw/mtk_ecc.c -+++ /dev/null -@@ -1,599 +0,0 @@ --// SPDX-License-Identifier: GPL-2.0 OR MIT --/* -- * MTK ECC controller driver. -- * Copyright (C) 2016 MediaTek Inc. -- * Authors: Xiaolei Li -- * Jorge Ramirez-Ortiz -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#include "mtk_ecc.h" -- --#define ECC_IDLE_MASK BIT(0) --#define ECC_IRQ_EN BIT(0) --#define ECC_PG_IRQ_SEL BIT(1) --#define ECC_OP_ENABLE (1) --#define ECC_OP_DISABLE (0) -- --#define ECC_ENCCON (0x00) --#define ECC_ENCCNFG (0x04) --#define ECC_MS_SHIFT (16) --#define ECC_ENCDIADDR (0x08) --#define ECC_ENCIDLE (0x0C) --#define ECC_DECCON (0x100) --#define ECC_DECCNFG (0x104) --#define DEC_EMPTY_EN BIT(31) --#define DEC_CNFG_CORRECT (0x3 << 12) --#define ECC_DECIDLE (0x10C) --#define ECC_DECENUM0 (0x114) -- --#define ECC_TIMEOUT (500000) -- --#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) --#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) -- --struct mtk_ecc_caps { -- u32 err_mask; -- u32 err_shift; -- const u8 *ecc_strength; -- const u32 *ecc_regs; -- u8 num_ecc_strength; -- u8 ecc_mode_shift; -- u32 parity_bits; -- int pg_irq_sel; --}; -- --struct mtk_ecc { -- struct device *dev; -- const struct mtk_ecc_caps *caps; -- void __iomem *regs; -- struct clk *clk; -- -- struct completion done; -- struct mutex lock; -- u32 sectors; -- -- u8 *eccdata; --}; -- --/* ecc strength that each IP supports */ --static const u8 ecc_strength_mt2701[] = { -- 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, -- 40, 44, 48, 52, 56, 60 --}; -- --static const u8 ecc_strength_mt2712[] = { -- 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, -- 40, 44, 48, 52, 56, 60, 68, 72, 80 --}; -- --static const u8 ecc_strength_mt7622[] = { -- 4, 6, 8, 10, 12 --}; -- --enum mtk_ecc_regs { -- ECC_ENCPAR00, -- ECC_ENCIRQ_EN, -- ECC_ENCIRQ_STA, -- ECC_DECDONE, -- ECC_DECIRQ_EN, -- ECC_DECIRQ_STA, --}; -- --static int mt2701_ecc_regs[] = { -- [ECC_ENCPAR00] = 0x10, -- [ECC_ENCIRQ_EN] = 0x80, -- [ECC_ENCIRQ_STA] = 0x84, -- [ECC_DECDONE] = 0x124, -- [ECC_DECIRQ_EN] = 0x200, -- [ECC_DECIRQ_STA] = 0x204, --}; -- --static int mt2712_ecc_regs[] = { -- [ECC_ENCPAR00] = 0x300, -- [ECC_ENCIRQ_EN] = 0x80, -- [ECC_ENCIRQ_STA] = 0x84, -- [ECC_DECDONE] = 0x124, -- [ECC_DECIRQ_EN] = 0x200, -- [ECC_DECIRQ_STA] = 0x204, --}; -- --static int mt7622_ecc_regs[] = { -- [ECC_ENCPAR00] = 0x10, -- [ECC_ENCIRQ_EN] = 0x30, -- [ECC_ENCIRQ_STA] = 0x34, -- [ECC_DECDONE] = 0x11c, -- [ECC_DECIRQ_EN] = 0x140, -- [ECC_DECIRQ_STA] = 0x144, --}; -- --static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, -- enum mtk_ecc_operation op) --{ -- struct device *dev = ecc->dev; -- u32 val; -- int ret; -- -- ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, -- val & ECC_IDLE_MASK, -- 10, ECC_TIMEOUT); -- if (ret) -- dev_warn(dev, "%s NOT idle\n", -- op == ECC_ENCODE ? "encoder" : "decoder"); --} -- --static irqreturn_t mtk_ecc_irq(int irq, void *id) --{ -- struct mtk_ecc *ecc = id; -- u32 dec, enc; -- -- dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]) -- & ECC_IRQ_EN; -- if (dec) { -- dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); -- if (dec & ecc->sectors) { -- /* -- * Clear decode IRQ status once again to ensure that -- * there will be no extra IRQ. -- */ -- readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]); -- ecc->sectors = 0; -- complete(&ecc->done); -- } else { -- return IRQ_HANDLED; -- } -- } else { -- enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA]) -- & ECC_IRQ_EN; -- if (enc) -- complete(&ecc->done); -- else -- return IRQ_NONE; -- } -- -- return IRQ_HANDLED; --} -- --static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) --{ -- u32 ecc_bit, dec_sz, enc_sz; -- u32 reg, i; -- -- for (i = 0; i < ecc->caps->num_ecc_strength; i++) { -- if (ecc->caps->ecc_strength[i] == config->strength) -- break; -- } -- -- if (i == ecc->caps->num_ecc_strength) { -- dev_err(ecc->dev, "invalid ecc strength %d\n", -- config->strength); -- return -EINVAL; -- } -- -- ecc_bit = i; -- -- if (config->op == ECC_ENCODE) { -- /* configure ECC encoder (in bits) */ -- enc_sz = config->len << 3; -- -- reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); -- reg |= (enc_sz << ECC_MS_SHIFT); -- writel(reg, ecc->regs + ECC_ENCCNFG); -- -- if (config->mode != ECC_NFI_MODE) -- writel(lower_32_bits(config->addr), -- ecc->regs + ECC_ENCDIADDR); -- -- } else { -- /* configure ECC decoder (in bits) */ -- dec_sz = (config->len << 3) + -- config->strength * ecc->caps->parity_bits; -- -- reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); -- reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; -- reg |= DEC_EMPTY_EN; -- writel(reg, ecc->regs + ECC_DECCNFG); -- -- if (config->sectors) -- ecc->sectors = 1 << (config->sectors - 1); -- } -- -- return 0; --} -- --void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, -- int sectors) --{ -- u32 offset, i, err; -- u32 bitflips = 0; -- -- stats->corrected = 0; -- stats->failed = 0; -- -- for (i = 0; i < sectors; i++) { -- offset = (i >> 2) << 2; -- err = readl(ecc->regs + ECC_DECENUM0 + offset); -- err = err >> ((i % 4) * ecc->caps->err_shift); -- err &= ecc->caps->err_mask; -- if (err == ecc->caps->err_mask) { -- /* uncorrectable errors */ -- stats->failed++; -- continue; -- } -- -- stats->corrected += err; -- bitflips = max_t(u32, bitflips, err); -- } -- -- stats->bitflips = bitflips; --} --EXPORT_SYMBOL(mtk_ecc_get_stats); -- --void mtk_ecc_release(struct mtk_ecc *ecc) --{ -- clk_disable_unprepare(ecc->clk); -- put_device(ecc->dev); --} --EXPORT_SYMBOL(mtk_ecc_release); -- --static void mtk_ecc_hw_init(struct mtk_ecc *ecc) --{ -- mtk_ecc_wait_idle(ecc, ECC_ENCODE); -- writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); -- -- mtk_ecc_wait_idle(ecc, ECC_DECODE); -- writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); --} -- --static struct mtk_ecc *mtk_ecc_get(struct device_node *np) --{ -- struct platform_device *pdev; -- struct mtk_ecc *ecc; -- -- pdev = of_find_device_by_node(np); -- if (!pdev) -- return ERR_PTR(-EPROBE_DEFER); -- -- ecc = platform_get_drvdata(pdev); -- if (!ecc) { -- put_device(&pdev->dev); -- return ERR_PTR(-EPROBE_DEFER); -- } -- -- clk_prepare_enable(ecc->clk); -- mtk_ecc_hw_init(ecc); -- -- return ecc; --} -- --struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) --{ -- struct mtk_ecc *ecc = NULL; -- struct device_node *np; -- -- np = of_parse_phandle(of_node, "ecc-engine", 0); -- if (np) { -- ecc = mtk_ecc_get(np); -- of_node_put(np); -- } -- -- return ecc; --} --EXPORT_SYMBOL(of_mtk_ecc_get); -- --int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) --{ -- enum mtk_ecc_operation op = config->op; -- u16 reg_val; -- int ret; -- -- ret = mutex_lock_interruptible(&ecc->lock); -- if (ret) { -- dev_err(ecc->dev, "interrupted when attempting to lock\n"); -- return ret; -- } -- -- mtk_ecc_wait_idle(ecc, op); -- -- ret = mtk_ecc_config(ecc, config); -- if (ret) { -- mutex_unlock(&ecc->lock); -- return ret; -- } -- -- if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { -- init_completion(&ecc->done); -- reg_val = ECC_IRQ_EN; -- /* -- * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it -- * means this chip can only generate one ecc irq during page -- * read / write. If is 0, generate one ecc irq each ecc step. -- */ -- if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) -- reg_val |= ECC_PG_IRQ_SEL; -- if (op == ECC_ENCODE) -- writew(reg_val, ecc->regs + -- ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); -- else -- writew(reg_val, ecc->regs + -- ecc->caps->ecc_regs[ECC_DECIRQ_EN]); -- } -- -- writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); -- -- return 0; --} --EXPORT_SYMBOL(mtk_ecc_enable); -- --void mtk_ecc_disable(struct mtk_ecc *ecc) --{ -- enum mtk_ecc_operation op = ECC_ENCODE; -- -- /* find out the running operation */ -- if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) -- op = ECC_DECODE; -- -- /* disable it */ -- mtk_ecc_wait_idle(ecc, op); -- if (op == ECC_DECODE) { -- /* -- * Clear decode IRQ status in case there is a timeout to wait -- * decode IRQ. -- */ -- readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); -- writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]); -- } else { -- writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); -- } -- -- writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); -- -- mutex_unlock(&ecc->lock); --} --EXPORT_SYMBOL(mtk_ecc_disable); -- --int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) --{ -- int ret; -- -- ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); -- if (!ret) { -- dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", -- (op == ECC_ENCODE) ? "encoder" : "decoder"); -- return -ETIMEDOUT; -- } -- -- return 0; --} --EXPORT_SYMBOL(mtk_ecc_wait_done); -- --int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, -- u8 *data, u32 bytes) --{ -- dma_addr_t addr; -- u32 len; -- int ret; -- -- addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); -- ret = dma_mapping_error(ecc->dev, addr); -- if (ret) { -- dev_err(ecc->dev, "dma mapping error\n"); -- return -EINVAL; -- } -- -- config->op = ECC_ENCODE; -- config->addr = addr; -- ret = mtk_ecc_enable(ecc, config); -- if (ret) { -- dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); -- return ret; -- } -- -- ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); -- if (ret) -- goto timeout; -- -- mtk_ecc_wait_idle(ecc, ECC_ENCODE); -- -- /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ -- len = (config->strength * ecc->caps->parity_bits + 7) >> 3; -- -- /* write the parity bytes generated by the ECC back to temp buffer */ -- __ioread32_copy(ecc->eccdata, -- ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00], -- round_up(len, 4)); -- -- /* copy into possibly unaligned OOB region with actual length */ -- memcpy(data + bytes, ecc->eccdata, len); --timeout: -- -- dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); -- mtk_ecc_disable(ecc); -- -- return ret; --} --EXPORT_SYMBOL(mtk_ecc_encode); -- --void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) --{ -- const u8 *ecc_strength = ecc->caps->ecc_strength; -- int i; -- -- for (i = 0; i < ecc->caps->num_ecc_strength; i++) { -- if (*p <= ecc_strength[i]) { -- if (!i) -- *p = ecc_strength[i]; -- else if (*p != ecc_strength[i]) -- *p = ecc_strength[i - 1]; -- return; -- } -- } -- -- *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; --} --EXPORT_SYMBOL(mtk_ecc_adjust_strength); -- --unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) --{ -- return ecc->caps->parity_bits; --} --EXPORT_SYMBOL(mtk_ecc_get_parity_bits); -- --static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { -- .err_mask = 0x3f, -- .err_shift = 8, -- .ecc_strength = ecc_strength_mt2701, -- .ecc_regs = mt2701_ecc_regs, -- .num_ecc_strength = 20, -- .ecc_mode_shift = 5, -- .parity_bits = 14, -- .pg_irq_sel = 0, --}; -- --static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { -- .err_mask = 0x7f, -- .err_shift = 8, -- .ecc_strength = ecc_strength_mt2712, -- .ecc_regs = mt2712_ecc_regs, -- .num_ecc_strength = 23, -- .ecc_mode_shift = 5, -- .parity_bits = 14, -- .pg_irq_sel = 1, --}; -- --static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { -- .err_mask = 0x1f, -- .err_shift = 5, -- .ecc_strength = ecc_strength_mt7622, -- .ecc_regs = mt7622_ecc_regs, -- .num_ecc_strength = 5, -- .ecc_mode_shift = 4, -- .parity_bits = 13, -- .pg_irq_sel = 0, --}; -- --static const struct of_device_id mtk_ecc_dt_match[] = { -- { -- .compatible = "mediatek,mt2701-ecc", -- .data = &mtk_ecc_caps_mt2701, -- }, { -- .compatible = "mediatek,mt2712-ecc", -- .data = &mtk_ecc_caps_mt2712, -- }, { -- .compatible = "mediatek,mt7622-ecc", -- .data = &mtk_ecc_caps_mt7622, -- }, -- {}, --}; -- --static int mtk_ecc_probe(struct platform_device *pdev) --{ -- struct device *dev = &pdev->dev; -- struct mtk_ecc *ecc; -- struct resource *res; -- u32 max_eccdata_size; -- int irq, ret; -- -- ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); -- if (!ecc) -- return -ENOMEM; -- -- ecc->caps = of_device_get_match_data(dev); -- -- max_eccdata_size = ecc->caps->num_ecc_strength - 1; -- max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; -- max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3; -- max_eccdata_size = round_up(max_eccdata_size, 4); -- ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); -- if (!ecc->eccdata) -- return -ENOMEM; -- -- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -- ecc->regs = devm_ioremap_resource(dev, res); -- if (IS_ERR(ecc->regs)) -- return PTR_ERR(ecc->regs); -- -- ecc->clk = devm_clk_get(dev, NULL); -- if (IS_ERR(ecc->clk)) { -- dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); -- return PTR_ERR(ecc->clk); -- } -- -- irq = platform_get_irq(pdev, 0); -- if (irq < 0) -- return irq; -- -- ret = dma_set_mask(dev, DMA_BIT_MASK(32)); -- if (ret) { -- dev_err(dev, "failed to set DMA mask\n"); -- return ret; -- } -- -- ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); -- if (ret) { -- dev_err(dev, "failed to request irq\n"); -- return -EINVAL; -- } -- -- ecc->dev = dev; -- mutex_init(&ecc->lock); -- platform_set_drvdata(pdev, ecc); -- dev_info(dev, "probed\n"); -- -- return 0; --} -- --#ifdef CONFIG_PM_SLEEP --static int mtk_ecc_suspend(struct device *dev) --{ -- struct mtk_ecc *ecc = dev_get_drvdata(dev); -- -- clk_disable_unprepare(ecc->clk); -- -- return 0; --} -- --static int mtk_ecc_resume(struct device *dev) --{ -- struct mtk_ecc *ecc = dev_get_drvdata(dev); -- int ret; -- -- ret = clk_prepare_enable(ecc->clk); -- if (ret) { -- dev_err(dev, "failed to enable clk\n"); -- return ret; -- } -- -- return 0; --} -- --static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); --#endif -- --MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); -- --static struct platform_driver mtk_ecc_driver = { -- .probe = mtk_ecc_probe, -- .driver = { -- .name = "mtk-ecc", -- .of_match_table = of_match_ptr(mtk_ecc_dt_match), --#ifdef CONFIG_PM_SLEEP -- .pm = &mtk_ecc_pm_ops, --#endif -- }, --}; -- --module_platform_driver(mtk_ecc_driver); -- --MODULE_AUTHOR("Xiaolei Li "); --MODULE_DESCRIPTION("MTK Nand ECC Driver"); --MODULE_LICENSE("Dual MIT/GPL"); ---- /dev/null -+++ b/drivers/mtd/nand/ecc-mtk.c -@@ -0,0 +1,598 @@ -+// SPDX-License-Identifier: GPL-2.0 OR MIT -+/* -+ * MTK ECC controller driver. -+ * Copyright (C) 2016 MediaTek Inc. -+ * Authors: Xiaolei Li -+ * Jorge Ramirez-Ortiz -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define ECC_IDLE_MASK BIT(0) -+#define ECC_IRQ_EN BIT(0) -+#define ECC_PG_IRQ_SEL BIT(1) -+#define ECC_OP_ENABLE (1) -+#define ECC_OP_DISABLE (0) -+ -+#define ECC_ENCCON (0x00) -+#define ECC_ENCCNFG (0x04) -+#define ECC_MS_SHIFT (16) -+#define ECC_ENCDIADDR (0x08) -+#define ECC_ENCIDLE (0x0C) -+#define ECC_DECCON (0x100) -+#define ECC_DECCNFG (0x104) -+#define DEC_EMPTY_EN BIT(31) -+#define DEC_CNFG_CORRECT (0x3 << 12) -+#define ECC_DECIDLE (0x10C) -+#define ECC_DECENUM0 (0x114) -+ -+#define ECC_TIMEOUT (500000) -+ -+#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) -+#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) -+ -+struct mtk_ecc_caps { -+ u32 err_mask; -+ u32 err_shift; -+ const u8 *ecc_strength; -+ const u32 *ecc_regs; -+ u8 num_ecc_strength; -+ u8 ecc_mode_shift; -+ u32 parity_bits; -+ int pg_irq_sel; -+}; -+ -+struct mtk_ecc { -+ struct device *dev; -+ const struct mtk_ecc_caps *caps; -+ void __iomem *regs; -+ struct clk *clk; -+ -+ struct completion done; -+ struct mutex lock; -+ u32 sectors; -+ -+ u8 *eccdata; -+}; -+ -+/* ecc strength that each IP supports */ -+static const u8 ecc_strength_mt2701[] = { -+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, -+ 40, 44, 48, 52, 56, 60 -+}; -+ -+static const u8 ecc_strength_mt2712[] = { -+ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, -+ 40, 44, 48, 52, 56, 60, 68, 72, 80 -+}; -+ -+static const u8 ecc_strength_mt7622[] = { -+ 4, 6, 8, 10, 12 -+}; -+ -+enum mtk_ecc_regs { -+ ECC_ENCPAR00, -+ ECC_ENCIRQ_EN, -+ ECC_ENCIRQ_STA, -+ ECC_DECDONE, -+ ECC_DECIRQ_EN, -+ ECC_DECIRQ_STA, -+}; -+ -+static int mt2701_ecc_regs[] = { -+ [ECC_ENCPAR00] = 0x10, -+ [ECC_ENCIRQ_EN] = 0x80, -+ [ECC_ENCIRQ_STA] = 0x84, -+ [ECC_DECDONE] = 0x124, -+ [ECC_DECIRQ_EN] = 0x200, -+ [ECC_DECIRQ_STA] = 0x204, -+}; -+ -+static int mt2712_ecc_regs[] = { -+ [ECC_ENCPAR00] = 0x300, -+ [ECC_ENCIRQ_EN] = 0x80, -+ [ECC_ENCIRQ_STA] = 0x84, -+ [ECC_DECDONE] = 0x124, -+ [ECC_DECIRQ_EN] = 0x200, -+ [ECC_DECIRQ_STA] = 0x204, -+}; -+ -+static int mt7622_ecc_regs[] = { -+ [ECC_ENCPAR00] = 0x10, -+ [ECC_ENCIRQ_EN] = 0x30, -+ [ECC_ENCIRQ_STA] = 0x34, -+ [ECC_DECDONE] = 0x11c, -+ [ECC_DECIRQ_EN] = 0x140, -+ [ECC_DECIRQ_STA] = 0x144, -+}; -+ -+static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, -+ enum mtk_ecc_operation op) -+{ -+ struct device *dev = ecc->dev; -+ u32 val; -+ int ret; -+ -+ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, -+ val & ECC_IDLE_MASK, -+ 10, ECC_TIMEOUT); -+ if (ret) -+ dev_warn(dev, "%s NOT idle\n", -+ op == ECC_ENCODE ? "encoder" : "decoder"); -+} -+ -+static irqreturn_t mtk_ecc_irq(int irq, void *id) -+{ -+ struct mtk_ecc *ecc = id; -+ u32 dec, enc; -+ -+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]) -+ & ECC_IRQ_EN; -+ if (dec) { -+ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); -+ if (dec & ecc->sectors) { -+ /* -+ * Clear decode IRQ status once again to ensure that -+ * there will be no extra IRQ. -+ */ -+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]); -+ ecc->sectors = 0; -+ complete(&ecc->done); -+ } else { -+ return IRQ_HANDLED; -+ } -+ } else { -+ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA]) -+ & ECC_IRQ_EN; -+ if (enc) -+ complete(&ecc->done); -+ else -+ return IRQ_NONE; -+ } -+ -+ return IRQ_HANDLED; -+} -+ -+static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) -+{ -+ u32 ecc_bit, dec_sz, enc_sz; -+ u32 reg, i; -+ -+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) { -+ if (ecc->caps->ecc_strength[i] == config->strength) -+ break; -+ } -+ -+ if (i == ecc->caps->num_ecc_strength) { -+ dev_err(ecc->dev, "invalid ecc strength %d\n", -+ config->strength); -+ return -EINVAL; -+ } -+ -+ ecc_bit = i; -+ -+ if (config->op == ECC_ENCODE) { -+ /* configure ECC encoder (in bits) */ -+ enc_sz = config->len << 3; -+ -+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); -+ reg |= (enc_sz << ECC_MS_SHIFT); -+ writel(reg, ecc->regs + ECC_ENCCNFG); -+ -+ if (config->mode != ECC_NFI_MODE) -+ writel(lower_32_bits(config->addr), -+ ecc->regs + ECC_ENCDIADDR); -+ -+ } else { -+ /* configure ECC decoder (in bits) */ -+ dec_sz = (config->len << 3) + -+ config->strength * ecc->caps->parity_bits; -+ -+ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); -+ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; -+ reg |= DEC_EMPTY_EN; -+ writel(reg, ecc->regs + ECC_DECCNFG); -+ -+ if (config->sectors) -+ ecc->sectors = 1 << (config->sectors - 1); -+ } -+ -+ return 0; -+} -+ -+void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, -+ int sectors) -+{ -+ u32 offset, i, err; -+ u32 bitflips = 0; -+ -+ stats->corrected = 0; -+ stats->failed = 0; -+ -+ for (i = 0; i < sectors; i++) { -+ offset = (i >> 2) << 2; -+ err = readl(ecc->regs + ECC_DECENUM0 + offset); -+ err = err >> ((i % 4) * ecc->caps->err_shift); -+ err &= ecc->caps->err_mask; -+ if (err == ecc->caps->err_mask) { -+ /* uncorrectable errors */ -+ stats->failed++; -+ continue; -+ } -+ -+ stats->corrected += err; -+ bitflips = max_t(u32, bitflips, err); -+ } -+ -+ stats->bitflips = bitflips; -+} -+EXPORT_SYMBOL(mtk_ecc_get_stats); -+ -+void mtk_ecc_release(struct mtk_ecc *ecc) -+{ -+ clk_disable_unprepare(ecc->clk); -+ put_device(ecc->dev); -+} -+EXPORT_SYMBOL(mtk_ecc_release); -+ -+static void mtk_ecc_hw_init(struct mtk_ecc *ecc) -+{ -+ mtk_ecc_wait_idle(ecc, ECC_ENCODE); -+ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); -+ -+ mtk_ecc_wait_idle(ecc, ECC_DECODE); -+ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); -+} -+ -+static struct mtk_ecc *mtk_ecc_get(struct device_node *np) -+{ -+ struct platform_device *pdev; -+ struct mtk_ecc *ecc; -+ -+ pdev = of_find_device_by_node(np); -+ if (!pdev) -+ return ERR_PTR(-EPROBE_DEFER); -+ -+ ecc = platform_get_drvdata(pdev); -+ if (!ecc) { -+ put_device(&pdev->dev); -+ return ERR_PTR(-EPROBE_DEFER); -+ } -+ -+ clk_prepare_enable(ecc->clk); -+ mtk_ecc_hw_init(ecc); -+ -+ return ecc; -+} -+ -+struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) -+{ -+ struct mtk_ecc *ecc = NULL; -+ struct device_node *np; -+ -+ np = of_parse_phandle(of_node, "ecc-engine", 0); -+ if (np) { -+ ecc = mtk_ecc_get(np); -+ of_node_put(np); -+ } -+ -+ return ecc; -+} -+EXPORT_SYMBOL(of_mtk_ecc_get); -+ -+int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) -+{ -+ enum mtk_ecc_operation op = config->op; -+ u16 reg_val; -+ int ret; -+ -+ ret = mutex_lock_interruptible(&ecc->lock); -+ if (ret) { -+ dev_err(ecc->dev, "interrupted when attempting to lock\n"); -+ return ret; -+ } -+ -+ mtk_ecc_wait_idle(ecc, op); -+ -+ ret = mtk_ecc_config(ecc, config); -+ if (ret) { -+ mutex_unlock(&ecc->lock); -+ return ret; -+ } -+ -+ if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { -+ init_completion(&ecc->done); -+ reg_val = ECC_IRQ_EN; -+ /* -+ * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it -+ * means this chip can only generate one ecc irq during page -+ * read / write. If is 0, generate one ecc irq each ecc step. -+ */ -+ if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) -+ reg_val |= ECC_PG_IRQ_SEL; -+ if (op == ECC_ENCODE) -+ writew(reg_val, ecc->regs + -+ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); -+ else -+ writew(reg_val, ecc->regs + -+ ecc->caps->ecc_regs[ECC_DECIRQ_EN]); -+ } -+ -+ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); -+ -+ return 0; -+} -+EXPORT_SYMBOL(mtk_ecc_enable); -+ -+void mtk_ecc_disable(struct mtk_ecc *ecc) -+{ -+ enum mtk_ecc_operation op = ECC_ENCODE; -+ -+ /* find out the running operation */ -+ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) -+ op = ECC_DECODE; -+ -+ /* disable it */ -+ mtk_ecc_wait_idle(ecc, op); -+ if (op == ECC_DECODE) { -+ /* -+ * Clear decode IRQ status in case there is a timeout to wait -+ * decode IRQ. -+ */ -+ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); -+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]); -+ } else { -+ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); -+ } -+ -+ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); -+ -+ mutex_unlock(&ecc->lock); -+} -+EXPORT_SYMBOL(mtk_ecc_disable); -+ -+int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) -+{ -+ int ret; -+ -+ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); -+ if (!ret) { -+ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", -+ (op == ECC_ENCODE) ? "encoder" : "decoder"); -+ return -ETIMEDOUT; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL(mtk_ecc_wait_done); -+ -+int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, -+ u8 *data, u32 bytes) -+{ -+ dma_addr_t addr; -+ u32 len; -+ int ret; -+ -+ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); -+ ret = dma_mapping_error(ecc->dev, addr); -+ if (ret) { -+ dev_err(ecc->dev, "dma mapping error\n"); -+ return -EINVAL; -+ } -+ -+ config->op = ECC_ENCODE; -+ config->addr = addr; -+ ret = mtk_ecc_enable(ecc, config); -+ if (ret) { -+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); -+ return ret; -+ } -+ -+ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); -+ if (ret) -+ goto timeout; -+ -+ mtk_ecc_wait_idle(ecc, ECC_ENCODE); -+ -+ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ -+ len = (config->strength * ecc->caps->parity_bits + 7) >> 3; -+ -+ /* write the parity bytes generated by the ECC back to temp buffer */ -+ __ioread32_copy(ecc->eccdata, -+ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00], -+ round_up(len, 4)); -+ -+ /* copy into possibly unaligned OOB region with actual length */ -+ memcpy(data + bytes, ecc->eccdata, len); -+timeout: -+ -+ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); -+ mtk_ecc_disable(ecc); -+ -+ return ret; -+} -+EXPORT_SYMBOL(mtk_ecc_encode); -+ -+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) -+{ -+ const u8 *ecc_strength = ecc->caps->ecc_strength; -+ int i; -+ -+ for (i = 0; i < ecc->caps->num_ecc_strength; i++) { -+ if (*p <= ecc_strength[i]) { -+ if (!i) -+ *p = ecc_strength[i]; -+ else if (*p != ecc_strength[i]) -+ *p = ecc_strength[i - 1]; -+ return; -+ } -+ } -+ -+ *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; -+} -+EXPORT_SYMBOL(mtk_ecc_adjust_strength); -+ -+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) -+{ -+ return ecc->caps->parity_bits; -+} -+EXPORT_SYMBOL(mtk_ecc_get_parity_bits); -+ -+static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { -+ .err_mask = 0x3f, -+ .err_shift = 8, -+ .ecc_strength = ecc_strength_mt2701, -+ .ecc_regs = mt2701_ecc_regs, -+ .num_ecc_strength = 20, -+ .ecc_mode_shift = 5, -+ .parity_bits = 14, -+ .pg_irq_sel = 0, -+}; -+ -+static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { -+ .err_mask = 0x7f, -+ .err_shift = 8, -+ .ecc_strength = ecc_strength_mt2712, -+ .ecc_regs = mt2712_ecc_regs, -+ .num_ecc_strength = 23, -+ .ecc_mode_shift = 5, -+ .parity_bits = 14, -+ .pg_irq_sel = 1, -+}; -+ -+static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { -+ .err_mask = 0x1f, -+ .err_shift = 5, -+ .ecc_strength = ecc_strength_mt7622, -+ .ecc_regs = mt7622_ecc_regs, -+ .num_ecc_strength = 5, -+ .ecc_mode_shift = 4, -+ .parity_bits = 13, -+ .pg_irq_sel = 0, -+}; -+ -+static const struct of_device_id mtk_ecc_dt_match[] = { -+ { -+ .compatible = "mediatek,mt2701-ecc", -+ .data = &mtk_ecc_caps_mt2701, -+ }, { -+ .compatible = "mediatek,mt2712-ecc", -+ .data = &mtk_ecc_caps_mt2712, -+ }, { -+ .compatible = "mediatek,mt7622-ecc", -+ .data = &mtk_ecc_caps_mt7622, -+ }, -+ {}, -+}; -+ -+static int mtk_ecc_probe(struct platform_device *pdev) -+{ -+ struct device *dev = &pdev->dev; -+ struct mtk_ecc *ecc; -+ struct resource *res; -+ u32 max_eccdata_size; -+ int irq, ret; -+ -+ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); -+ if (!ecc) -+ return -ENOMEM; -+ -+ ecc->caps = of_device_get_match_data(dev); -+ -+ max_eccdata_size = ecc->caps->num_ecc_strength - 1; -+ max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; -+ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3; -+ max_eccdata_size = round_up(max_eccdata_size, 4); -+ ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); -+ if (!ecc->eccdata) -+ return -ENOMEM; -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ ecc->regs = devm_ioremap_resource(dev, res); -+ if (IS_ERR(ecc->regs)) -+ return PTR_ERR(ecc->regs); -+ -+ ecc->clk = devm_clk_get(dev, NULL); -+ if (IS_ERR(ecc->clk)) { -+ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); -+ return PTR_ERR(ecc->clk); -+ } -+ -+ irq = platform_get_irq(pdev, 0); -+ if (irq < 0) -+ return irq; -+ -+ ret = dma_set_mask(dev, DMA_BIT_MASK(32)); -+ if (ret) { -+ dev_err(dev, "failed to set DMA mask\n"); -+ return ret; -+ } -+ -+ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); -+ if (ret) { -+ dev_err(dev, "failed to request irq\n"); -+ return -EINVAL; -+ } -+ -+ ecc->dev = dev; -+ mutex_init(&ecc->lock); -+ platform_set_drvdata(pdev, ecc); -+ dev_info(dev, "probed\n"); -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static int mtk_ecc_suspend(struct device *dev) -+{ -+ struct mtk_ecc *ecc = dev_get_drvdata(dev); -+ -+ clk_disable_unprepare(ecc->clk); -+ -+ return 0; -+} -+ -+static int mtk_ecc_resume(struct device *dev) -+{ -+ struct mtk_ecc *ecc = dev_get_drvdata(dev); -+ int ret; -+ -+ ret = clk_prepare_enable(ecc->clk); -+ if (ret) { -+ dev_err(dev, "failed to enable clk\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); -+#endif -+ -+MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); -+ -+static struct platform_driver mtk_ecc_driver = { -+ .probe = mtk_ecc_probe, -+ .driver = { -+ .name = "mtk-ecc", -+ .of_match_table = of_match_ptr(mtk_ecc_dt_match), -+#ifdef CONFIG_PM_SLEEP -+ .pm = &mtk_ecc_pm_ops, -+#endif -+ }, -+}; -+ -+module_platform_driver(mtk_ecc_driver); -+ -+MODULE_AUTHOR("Xiaolei Li "); -+MODULE_DESCRIPTION("MTK Nand ECC Driver"); -+MODULE_LICENSE("Dual MIT/GPL"); ---- a/drivers/mtd/nand/raw/Kconfig -+++ b/drivers/mtd/nand/raw/Kconfig -@@ -360,6 +360,7 @@ config MTD_NAND_QCOM - - config MTD_NAND_MTK - tristate "MTK NAND controller" -+ depends on MTD_NAND_ECC_MEDIATEK - depends on ARCH_MEDIATEK || COMPILE_TEST - depends on HAS_IOMEM - help ---- a/drivers/mtd/nand/raw/Makefile -+++ b/drivers/mtd/nand/raw/Makefile -@@ -48,7 +48,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_n - obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o - obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ - obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o --obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o -+obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o - obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o - obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o - obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o ---- a/drivers/mtd/nand/raw/mtk_nand.c -+++ b/drivers/mtd/nand/raw/mtk_nand.c -@@ -17,7 +17,7 @@ - #include - #include - #include --#include "mtk_ecc.h" -+#include - - /* NAND controller register definition */ - #define NFI_CNFG (0x00) ---- a/drivers/mtd/nand/raw/mtk_ecc.h -+++ /dev/null -@@ -1,47 +0,0 @@ --/* SPDX-License-Identifier: GPL-2.0 OR MIT */ --/* -- * MTK SDG1 ECC controller -- * -- * Copyright (c) 2016 Mediatek -- * Authors: Xiaolei Li -- * Jorge Ramirez-Ortiz -- */ -- --#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ --#define __DRIVERS_MTD_NAND_MTK_ECC_H__ -- --#include -- --enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; --enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; -- --struct device_node; --struct mtk_ecc; -- --struct mtk_ecc_stats { -- u32 corrected; -- u32 bitflips; -- u32 failed; --}; -- --struct mtk_ecc_config { -- enum mtk_ecc_operation op; -- enum mtk_ecc_mode mode; -- dma_addr_t addr; -- u32 strength; -- u32 sectors; -- u32 len; --}; -- --int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); --void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); --int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); --int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); --void mtk_ecc_disable(struct mtk_ecc *); --void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); --unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); -- --struct mtk_ecc *of_mtk_ecc_get(struct device_node *); --void mtk_ecc_release(struct mtk_ecc *); -- --#endif ---- /dev/null -+++ b/include/linux/mtd/nand-ecc-mtk.h -@@ -0,0 +1,47 @@ -+/* SPDX-License-Identifier: GPL-2.0 OR MIT */ -+/* -+ * MTK SDG1 ECC controller -+ * -+ * Copyright (c) 2016 Mediatek -+ * Authors: Xiaolei Li -+ * Jorge Ramirez-Ortiz -+ */ -+ -+#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ -+#define __DRIVERS_MTD_NAND_MTK_ECC_H__ -+ -+#include -+ -+enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; -+enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; -+ -+struct device_node; -+struct mtk_ecc; -+ -+struct mtk_ecc_stats { -+ u32 corrected; -+ u32 bitflips; -+ u32 failed; -+}; -+ -+struct mtk_ecc_config { -+ enum mtk_ecc_operation op; -+ enum mtk_ecc_mode mode; -+ dma_addr_t addr; -+ u32 strength; -+ u32 sectors; -+ u32 len; -+}; -+ -+int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); -+void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); -+int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); -+int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); -+void mtk_ecc_disable(struct mtk_ecc *); -+void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); -+unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); -+ -+struct mtk_ecc *of_mtk_ecc_get(struct device_node *); -+void mtk_ecc_release(struct mtk_ecc *); -+ -+#endif diff --git a/target/linux/mediatek/patches-5.15/120-11-v5.19-mtd-nand-make-mtk_ecc.c-a-separated-module.patch b/target/linux/mediatek/patches-5.15/120-11-v5.19-mtd-nand-make-mtk_ecc.c-a-separated-module.patch new file mode 100644 index 0000000000..fd9098eae9 --- /dev/null +++ b/target/linux/mediatek/patches-5.15/120-11-v5.19-mtd-nand-make-mtk_ecc.c-a-separated-module.patch @@ -0,0 +1,1383 @@ +From ebb9653d4a87c64fb679e4c339e867556dada719 Mon Sep 17 00:00:00 2001 +From: Chuanhong Guo +Date: Tue, 22 Mar 2022 18:44:21 +0800 +Subject: [PATCH 11/15] mtd: nand: make mtk_ecc.c a separated module + +this code will be used in mediatek snfi spi-mem controller with +pipelined ECC engine. + +Signed-off-by: Chuanhong Guo +(cherry picked from commit 316f47cec4ce5b81aa8006de202d8769c117a52d) +--- + drivers/mtd/nand/Kconfig | 7 +++++++ + drivers/mtd/nand/Makefile | 1 + + drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} | 3 +-- + drivers/mtd/nand/raw/Kconfig | 1 + + drivers/mtd/nand/raw/Makefile | 2 +- + drivers/mtd/nand/raw/mtk_nand.c | 2 +- + .../nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h | 0 + 7 files changed, 12 insertions(+), 4 deletions(-) + rename drivers/mtd/nand/{raw/mtk_ecc.c => ecc-mtk.c} (99%) + rename drivers/mtd/nand/raw/mtk_ecc.h => include/linux/mtd/nand-ecc-mtk.h (100%) + +--- a/drivers/mtd/nand/Kconfig ++++ b/drivers/mtd/nand/Kconfig +@@ -50,6 +50,13 @@ config MTD_NAND_MTK_BMT + bool "Support MediaTek NAND Bad-block Management Table" + default n + ++config MTD_NAND_ECC_MEDIATEK ++ tristate "Mediatek hardware ECC engine" ++ depends on HAS_IOMEM ++ select MTD_NAND_ECC ++ help ++ This enables support for the hardware ECC engine from Mediatek. ++ + endmenu + + endmenu +--- a/drivers/mtd/nand/Makefile ++++ b/drivers/mtd/nand/Makefile +@@ -3,6 +3,7 @@ + nandcore-objs := core.o bbt.o + obj-$(CONFIG_MTD_NAND_CORE) += nandcore.o + obj-$(CONFIG_MTD_NAND_MTK_BMT) += mtk_bmt.o mtk_bmt_v2.o mtk_bmt_bbt.o mtk_bmt_nmbm.o ++obj-$(CONFIG_MTD_NAND_ECC_MEDIATEK) += ecc-mtk.o + + obj-y += onenand/ + obj-y += raw/ +--- a/drivers/mtd/nand/raw/mtk_ecc.c ++++ /dev/null +@@ -1,599 +0,0 @@ +-// SPDX-License-Identifier: GPL-2.0 OR MIT +-/* +- * MTK ECC controller driver. +- * Copyright (C) 2016 MediaTek Inc. +- * Authors: Xiaolei Li +- * Jorge Ramirez-Ortiz +- */ +- +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +-#include +- +-#include "mtk_ecc.h" +- +-#define ECC_IDLE_MASK BIT(0) +-#define ECC_IRQ_EN BIT(0) +-#define ECC_PG_IRQ_SEL BIT(1) +-#define ECC_OP_ENABLE (1) +-#define ECC_OP_DISABLE (0) +- +-#define ECC_ENCCON (0x00) +-#define ECC_ENCCNFG (0x04) +-#define ECC_MS_SHIFT (16) +-#define ECC_ENCDIADDR (0x08) +-#define ECC_ENCIDLE (0x0C) +-#define ECC_DECCON (0x100) +-#define ECC_DECCNFG (0x104) +-#define DEC_EMPTY_EN BIT(31) +-#define DEC_CNFG_CORRECT (0x3 << 12) +-#define ECC_DECIDLE (0x10C) +-#define ECC_DECENUM0 (0x114) +- +-#define ECC_TIMEOUT (500000) +- +-#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) +-#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) +- +-struct mtk_ecc_caps { +- u32 err_mask; +- u32 err_shift; +- const u8 *ecc_strength; +- const u32 *ecc_regs; +- u8 num_ecc_strength; +- u8 ecc_mode_shift; +- u32 parity_bits; +- int pg_irq_sel; +-}; +- +-struct mtk_ecc { +- struct device *dev; +- const struct mtk_ecc_caps *caps; +- void __iomem *regs; +- struct clk *clk; +- +- struct completion done; +- struct mutex lock; +- u32 sectors; +- +- u8 *eccdata; +-}; +- +-/* ecc strength that each IP supports */ +-static const u8 ecc_strength_mt2701[] = { +- 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, +- 40, 44, 48, 52, 56, 60 +-}; +- +-static const u8 ecc_strength_mt2712[] = { +- 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, +- 40, 44, 48, 52, 56, 60, 68, 72, 80 +-}; +- +-static const u8 ecc_strength_mt7622[] = { +- 4, 6, 8, 10, 12 +-}; +- +-enum mtk_ecc_regs { +- ECC_ENCPAR00, +- ECC_ENCIRQ_EN, +- ECC_ENCIRQ_STA, +- ECC_DECDONE, +- ECC_DECIRQ_EN, +- ECC_DECIRQ_STA, +-}; +- +-static int mt2701_ecc_regs[] = { +- [ECC_ENCPAR00] = 0x10, +- [ECC_ENCIRQ_EN] = 0x80, +- [ECC_ENCIRQ_STA] = 0x84, +- [ECC_DECDONE] = 0x124, +- [ECC_DECIRQ_EN] = 0x200, +- [ECC_DECIRQ_STA] = 0x204, +-}; +- +-static int mt2712_ecc_regs[] = { +- [ECC_ENCPAR00] = 0x300, +- [ECC_ENCIRQ_EN] = 0x80, +- [ECC_ENCIRQ_STA] = 0x84, +- [ECC_DECDONE] = 0x124, +- [ECC_DECIRQ_EN] = 0x200, +- [ECC_DECIRQ_STA] = 0x204, +-}; +- +-static int mt7622_ecc_regs[] = { +- [ECC_ENCPAR00] = 0x10, +- [ECC_ENCIRQ_EN] = 0x30, +- [ECC_ENCIRQ_STA] = 0x34, +- [ECC_DECDONE] = 0x11c, +- [ECC_DECIRQ_EN] = 0x140, +- [ECC_DECIRQ_STA] = 0x144, +-}; +- +-static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, +- enum mtk_ecc_operation op) +-{ +- struct device *dev = ecc->dev; +- u32 val; +- int ret; +- +- ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, +- val & ECC_IDLE_MASK, +- 10, ECC_TIMEOUT); +- if (ret) +- dev_warn(dev, "%s NOT idle\n", +- op == ECC_ENCODE ? "encoder" : "decoder"); +-} +- +-static irqreturn_t mtk_ecc_irq(int irq, void *id) +-{ +- struct mtk_ecc *ecc = id; +- u32 dec, enc; +- +- dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]) +- & ECC_IRQ_EN; +- if (dec) { +- dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); +- if (dec & ecc->sectors) { +- /* +- * Clear decode IRQ status once again to ensure that +- * there will be no extra IRQ. +- */ +- readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]); +- ecc->sectors = 0; +- complete(&ecc->done); +- } else { +- return IRQ_HANDLED; +- } +- } else { +- enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA]) +- & ECC_IRQ_EN; +- if (enc) +- complete(&ecc->done); +- else +- return IRQ_NONE; +- } +- +- return IRQ_HANDLED; +-} +- +-static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) +-{ +- u32 ecc_bit, dec_sz, enc_sz; +- u32 reg, i; +- +- for (i = 0; i < ecc->caps->num_ecc_strength; i++) { +- if (ecc->caps->ecc_strength[i] == config->strength) +- break; +- } +- +- if (i == ecc->caps->num_ecc_strength) { +- dev_err(ecc->dev, "invalid ecc strength %d\n", +- config->strength); +- return -EINVAL; +- } +- +- ecc_bit = i; +- +- if (config->op == ECC_ENCODE) { +- /* configure ECC encoder (in bits) */ +- enc_sz = config->len << 3; +- +- reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); +- reg |= (enc_sz << ECC_MS_SHIFT); +- writel(reg, ecc->regs + ECC_ENCCNFG); +- +- if (config->mode != ECC_NFI_MODE) +- writel(lower_32_bits(config->addr), +- ecc->regs + ECC_ENCDIADDR); +- +- } else { +- /* configure ECC decoder (in bits) */ +- dec_sz = (config->len << 3) + +- config->strength * ecc->caps->parity_bits; +- +- reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); +- reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; +- reg |= DEC_EMPTY_EN; +- writel(reg, ecc->regs + ECC_DECCNFG); +- +- if (config->sectors) +- ecc->sectors = 1 << (config->sectors - 1); +- } +- +- return 0; +-} +- +-void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, +- int sectors) +-{ +- u32 offset, i, err; +- u32 bitflips = 0; +- +- stats->corrected = 0; +- stats->failed = 0; +- +- for (i = 0; i < sectors; i++) { +- offset = (i >> 2) << 2; +- err = readl(ecc->regs + ECC_DECENUM0 + offset); +- err = err >> ((i % 4) * ecc->caps->err_shift); +- err &= ecc->caps->err_mask; +- if (err == ecc->caps->err_mask) { +- /* uncorrectable errors */ +- stats->failed++; +- continue; +- } +- +- stats->corrected += err; +- bitflips = max_t(u32, bitflips, err); +- } +- +- stats->bitflips = bitflips; +-} +-EXPORT_SYMBOL(mtk_ecc_get_stats); +- +-void mtk_ecc_release(struct mtk_ecc *ecc) +-{ +- clk_disable_unprepare(ecc->clk); +- put_device(ecc->dev); +-} +-EXPORT_SYMBOL(mtk_ecc_release); +- +-static void mtk_ecc_hw_init(struct mtk_ecc *ecc) +-{ +- mtk_ecc_wait_idle(ecc, ECC_ENCODE); +- writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); +- +- mtk_ecc_wait_idle(ecc, ECC_DECODE); +- writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); +-} +- +-static struct mtk_ecc *mtk_ecc_get(struct device_node *np) +-{ +- struct platform_device *pdev; +- struct mtk_ecc *ecc; +- +- pdev = of_find_device_by_node(np); +- if (!pdev) +- return ERR_PTR(-EPROBE_DEFER); +- +- ecc = platform_get_drvdata(pdev); +- if (!ecc) { +- put_device(&pdev->dev); +- return ERR_PTR(-EPROBE_DEFER); +- } +- +- clk_prepare_enable(ecc->clk); +- mtk_ecc_hw_init(ecc); +- +- return ecc; +-} +- +-struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) +-{ +- struct mtk_ecc *ecc = NULL; +- struct device_node *np; +- +- np = of_parse_phandle(of_node, "ecc-engine", 0); +- if (np) { +- ecc = mtk_ecc_get(np); +- of_node_put(np); +- } +- +- return ecc; +-} +-EXPORT_SYMBOL(of_mtk_ecc_get); +- +-int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) +-{ +- enum mtk_ecc_operation op = config->op; +- u16 reg_val; +- int ret; +- +- ret = mutex_lock_interruptible(&ecc->lock); +- if (ret) { +- dev_err(ecc->dev, "interrupted when attempting to lock\n"); +- return ret; +- } +- +- mtk_ecc_wait_idle(ecc, op); +- +- ret = mtk_ecc_config(ecc, config); +- if (ret) { +- mutex_unlock(&ecc->lock); +- return ret; +- } +- +- if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { +- init_completion(&ecc->done); +- reg_val = ECC_IRQ_EN; +- /* +- * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it +- * means this chip can only generate one ecc irq during page +- * read / write. If is 0, generate one ecc irq each ecc step. +- */ +- if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) +- reg_val |= ECC_PG_IRQ_SEL; +- if (op == ECC_ENCODE) +- writew(reg_val, ecc->regs + +- ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); +- else +- writew(reg_val, ecc->regs + +- ecc->caps->ecc_regs[ECC_DECIRQ_EN]); +- } +- +- writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); +- +- return 0; +-} +-EXPORT_SYMBOL(mtk_ecc_enable); +- +-void mtk_ecc_disable(struct mtk_ecc *ecc) +-{ +- enum mtk_ecc_operation op = ECC_ENCODE; +- +- /* find out the running operation */ +- if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) +- op = ECC_DECODE; +- +- /* disable it */ +- mtk_ecc_wait_idle(ecc, op); +- if (op == ECC_DECODE) { +- /* +- * Clear decode IRQ status in case there is a timeout to wait +- * decode IRQ. +- */ +- readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); +- writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]); +- } else { +- writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); +- } +- +- writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); +- +- mutex_unlock(&ecc->lock); +-} +-EXPORT_SYMBOL(mtk_ecc_disable); +- +-int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) +-{ +- int ret; +- +- ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); +- if (!ret) { +- dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", +- (op == ECC_ENCODE) ? "encoder" : "decoder"); +- return -ETIMEDOUT; +- } +- +- return 0; +-} +-EXPORT_SYMBOL(mtk_ecc_wait_done); +- +-int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, +- u8 *data, u32 bytes) +-{ +- dma_addr_t addr; +- u32 len; +- int ret; +- +- addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); +- ret = dma_mapping_error(ecc->dev, addr); +- if (ret) { +- dev_err(ecc->dev, "dma mapping error\n"); +- return -EINVAL; +- } +- +- config->op = ECC_ENCODE; +- config->addr = addr; +- ret = mtk_ecc_enable(ecc, config); +- if (ret) { +- dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); +- return ret; +- } +- +- ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); +- if (ret) +- goto timeout; +- +- mtk_ecc_wait_idle(ecc, ECC_ENCODE); +- +- /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ +- len = (config->strength * ecc->caps->parity_bits + 7) >> 3; +- +- /* write the parity bytes generated by the ECC back to temp buffer */ +- __ioread32_copy(ecc->eccdata, +- ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00], +- round_up(len, 4)); +- +- /* copy into possibly unaligned OOB region with actual length */ +- memcpy(data + bytes, ecc->eccdata, len); +-timeout: +- +- dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); +- mtk_ecc_disable(ecc); +- +- return ret; +-} +-EXPORT_SYMBOL(mtk_ecc_encode); +- +-void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) +-{ +- const u8 *ecc_strength = ecc->caps->ecc_strength; +- int i; +- +- for (i = 0; i < ecc->caps->num_ecc_strength; i++) { +- if (*p <= ecc_strength[i]) { +- if (!i) +- *p = ecc_strength[i]; +- else if (*p != ecc_strength[i]) +- *p = ecc_strength[i - 1]; +- return; +- } +- } +- +- *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; +-} +-EXPORT_SYMBOL(mtk_ecc_adjust_strength); +- +-unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) +-{ +- return ecc->caps->parity_bits; +-} +-EXPORT_SYMBOL(mtk_ecc_get_parity_bits); +- +-static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { +- .err_mask = 0x3f, +- .err_shift = 8, +- .ecc_strength = ecc_strength_mt2701, +- .ecc_regs = mt2701_ecc_regs, +- .num_ecc_strength = 20, +- .ecc_mode_shift = 5, +- .parity_bits = 14, +- .pg_irq_sel = 0, +-}; +- +-static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { +- .err_mask = 0x7f, +- .err_shift = 8, +- .ecc_strength = ecc_strength_mt2712, +- .ecc_regs = mt2712_ecc_regs, +- .num_ecc_strength = 23, +- .ecc_mode_shift = 5, +- .parity_bits = 14, +- .pg_irq_sel = 1, +-}; +- +-static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { +- .err_mask = 0x1f, +- .err_shift = 5, +- .ecc_strength = ecc_strength_mt7622, +- .ecc_regs = mt7622_ecc_regs, +- .num_ecc_strength = 5, +- .ecc_mode_shift = 4, +- .parity_bits = 13, +- .pg_irq_sel = 0, +-}; +- +-static const struct of_device_id mtk_ecc_dt_match[] = { +- { +- .compatible = "mediatek,mt2701-ecc", +- .data = &mtk_ecc_caps_mt2701, +- }, { +- .compatible = "mediatek,mt2712-ecc", +- .data = &mtk_ecc_caps_mt2712, +- }, { +- .compatible = "mediatek,mt7622-ecc", +- .data = &mtk_ecc_caps_mt7622, +- }, +- {}, +-}; +- +-static int mtk_ecc_probe(struct platform_device *pdev) +-{ +- struct device *dev = &pdev->dev; +- struct mtk_ecc *ecc; +- struct resource *res; +- u32 max_eccdata_size; +- int irq, ret; +- +- ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); +- if (!ecc) +- return -ENOMEM; +- +- ecc->caps = of_device_get_match_data(dev); +- +- max_eccdata_size = ecc->caps->num_ecc_strength - 1; +- max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; +- max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3; +- max_eccdata_size = round_up(max_eccdata_size, 4); +- ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); +- if (!ecc->eccdata) +- return -ENOMEM; +- +- res = platform_get_resource(pdev, IORESOURCE_MEM, 0); +- ecc->regs = devm_ioremap_resource(dev, res); +- if (IS_ERR(ecc->regs)) +- return PTR_ERR(ecc->regs); +- +- ecc->clk = devm_clk_get(dev, NULL); +- if (IS_ERR(ecc->clk)) { +- dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); +- return PTR_ERR(ecc->clk); +- } +- +- irq = platform_get_irq(pdev, 0); +- if (irq < 0) +- return irq; +- +- ret = dma_set_mask(dev, DMA_BIT_MASK(32)); +- if (ret) { +- dev_err(dev, "failed to set DMA mask\n"); +- return ret; +- } +- +- ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); +- if (ret) { +- dev_err(dev, "failed to request irq\n"); +- return -EINVAL; +- } +- +- ecc->dev = dev; +- mutex_init(&ecc->lock); +- platform_set_drvdata(pdev, ecc); +- dev_info(dev, "probed\n"); +- +- return 0; +-} +- +-#ifdef CONFIG_PM_SLEEP +-static int mtk_ecc_suspend(struct device *dev) +-{ +- struct mtk_ecc *ecc = dev_get_drvdata(dev); +- +- clk_disable_unprepare(ecc->clk); +- +- return 0; +-} +- +-static int mtk_ecc_resume(struct device *dev) +-{ +- struct mtk_ecc *ecc = dev_get_drvdata(dev); +- int ret; +- +- ret = clk_prepare_enable(ecc->clk); +- if (ret) { +- dev_err(dev, "failed to enable clk\n"); +- return ret; +- } +- +- return 0; +-} +- +-static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); +-#endif +- +-MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); +- +-static struct platform_driver mtk_ecc_driver = { +- .probe = mtk_ecc_probe, +- .driver = { +- .name = "mtk-ecc", +- .of_match_table = of_match_ptr(mtk_ecc_dt_match), +-#ifdef CONFIG_PM_SLEEP +- .pm = &mtk_ecc_pm_ops, +-#endif +- }, +-}; +- +-module_platform_driver(mtk_ecc_driver); +- +-MODULE_AUTHOR("Xiaolei Li "); +-MODULE_DESCRIPTION("MTK Nand ECC Driver"); +-MODULE_LICENSE("Dual MIT/GPL"); +--- /dev/null ++++ b/drivers/mtd/nand/ecc-mtk.c +@@ -0,0 +1,598 @@ ++// SPDX-License-Identifier: GPL-2.0 OR MIT ++/* ++ * MTK ECC controller driver. ++ * Copyright (C) 2016 MediaTek Inc. ++ * Authors: Xiaolei Li ++ * Jorge Ramirez-Ortiz ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define ECC_IDLE_MASK BIT(0) ++#define ECC_IRQ_EN BIT(0) ++#define ECC_PG_IRQ_SEL BIT(1) ++#define ECC_OP_ENABLE (1) ++#define ECC_OP_DISABLE (0) ++ ++#define ECC_ENCCON (0x00) ++#define ECC_ENCCNFG (0x04) ++#define ECC_MS_SHIFT (16) ++#define ECC_ENCDIADDR (0x08) ++#define ECC_ENCIDLE (0x0C) ++#define ECC_DECCON (0x100) ++#define ECC_DECCNFG (0x104) ++#define DEC_EMPTY_EN BIT(31) ++#define DEC_CNFG_CORRECT (0x3 << 12) ++#define ECC_DECIDLE (0x10C) ++#define ECC_DECENUM0 (0x114) ++ ++#define ECC_TIMEOUT (500000) ++ ++#define ECC_IDLE_REG(op) ((op) == ECC_ENCODE ? ECC_ENCIDLE : ECC_DECIDLE) ++#define ECC_CTL_REG(op) ((op) == ECC_ENCODE ? ECC_ENCCON : ECC_DECCON) ++ ++struct mtk_ecc_caps { ++ u32 err_mask; ++ u32 err_shift; ++ const u8 *ecc_strength; ++ const u32 *ecc_regs; ++ u8 num_ecc_strength; ++ u8 ecc_mode_shift; ++ u32 parity_bits; ++ int pg_irq_sel; ++}; ++ ++struct mtk_ecc { ++ struct device *dev; ++ const struct mtk_ecc_caps *caps; ++ void __iomem *regs; ++ struct clk *clk; ++ ++ struct completion done; ++ struct mutex lock; ++ u32 sectors; ++ ++ u8 *eccdata; ++}; ++ ++/* ecc strength that each IP supports */ ++static const u8 ecc_strength_mt2701[] = { ++ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, ++ 40, 44, 48, 52, 56, 60 ++}; ++ ++static const u8 ecc_strength_mt2712[] = { ++ 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 28, 32, 36, ++ 40, 44, 48, 52, 56, 60, 68, 72, 80 ++}; ++ ++static const u8 ecc_strength_mt7622[] = { ++ 4, 6, 8, 10, 12 ++}; ++ ++enum mtk_ecc_regs { ++ ECC_ENCPAR00, ++ ECC_ENCIRQ_EN, ++ ECC_ENCIRQ_STA, ++ ECC_DECDONE, ++ ECC_DECIRQ_EN, ++ ECC_DECIRQ_STA, ++}; ++ ++static int mt2701_ecc_regs[] = { ++ [ECC_ENCPAR00] = 0x10, ++ [ECC_ENCIRQ_EN] = 0x80, ++ [ECC_ENCIRQ_STA] = 0x84, ++ [ECC_DECDONE] = 0x124, ++ [ECC_DECIRQ_EN] = 0x200, ++ [ECC_DECIRQ_STA] = 0x204, ++}; ++ ++static int mt2712_ecc_regs[] = { ++ [ECC_ENCPAR00] = 0x300, ++ [ECC_ENCIRQ_EN] = 0x80, ++ [ECC_ENCIRQ_STA] = 0x84, ++ [ECC_DECDONE] = 0x124, ++ [ECC_DECIRQ_EN] = 0x200, ++ [ECC_DECIRQ_STA] = 0x204, ++}; ++ ++static int mt7622_ecc_regs[] = { ++ [ECC_ENCPAR00] = 0x10, ++ [ECC_ENCIRQ_EN] = 0x30, ++ [ECC_ENCIRQ_STA] = 0x34, ++ [ECC_DECDONE] = 0x11c, ++ [ECC_DECIRQ_EN] = 0x140, ++ [ECC_DECIRQ_STA] = 0x144, ++}; ++ ++static inline void mtk_ecc_wait_idle(struct mtk_ecc *ecc, ++ enum mtk_ecc_operation op) ++{ ++ struct device *dev = ecc->dev; ++ u32 val; ++ int ret; ++ ++ ret = readl_poll_timeout_atomic(ecc->regs + ECC_IDLE_REG(op), val, ++ val & ECC_IDLE_MASK, ++ 10, ECC_TIMEOUT); ++ if (ret) ++ dev_warn(dev, "%s NOT idle\n", ++ op == ECC_ENCODE ? "encoder" : "decoder"); ++} ++ ++static irqreturn_t mtk_ecc_irq(int irq, void *id) ++{ ++ struct mtk_ecc *ecc = id; ++ u32 dec, enc; ++ ++ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]) ++ & ECC_IRQ_EN; ++ if (dec) { ++ dec = readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); ++ if (dec & ecc->sectors) { ++ /* ++ * Clear decode IRQ status once again to ensure that ++ * there will be no extra IRQ. ++ */ ++ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_STA]); ++ ecc->sectors = 0; ++ complete(&ecc->done); ++ } else { ++ return IRQ_HANDLED; ++ } ++ } else { ++ enc = readl(ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_STA]) ++ & ECC_IRQ_EN; ++ if (enc) ++ complete(&ecc->done); ++ else ++ return IRQ_NONE; ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static int mtk_ecc_config(struct mtk_ecc *ecc, struct mtk_ecc_config *config) ++{ ++ u32 ecc_bit, dec_sz, enc_sz; ++ u32 reg, i; ++ ++ for (i = 0; i < ecc->caps->num_ecc_strength; i++) { ++ if (ecc->caps->ecc_strength[i] == config->strength) ++ break; ++ } ++ ++ if (i == ecc->caps->num_ecc_strength) { ++ dev_err(ecc->dev, "invalid ecc strength %d\n", ++ config->strength); ++ return -EINVAL; ++ } ++ ++ ecc_bit = i; ++ ++ if (config->op == ECC_ENCODE) { ++ /* configure ECC encoder (in bits) */ ++ enc_sz = config->len << 3; ++ ++ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); ++ reg |= (enc_sz << ECC_MS_SHIFT); ++ writel(reg, ecc->regs + ECC_ENCCNFG); ++ ++ if (config->mode != ECC_NFI_MODE) ++ writel(lower_32_bits(config->addr), ++ ecc->regs + ECC_ENCDIADDR); ++ ++ } else { ++ /* configure ECC decoder (in bits) */ ++ dec_sz = (config->len << 3) + ++ config->strength * ecc->caps->parity_bits; ++ ++ reg = ecc_bit | (config->mode << ecc->caps->ecc_mode_shift); ++ reg |= (dec_sz << ECC_MS_SHIFT) | DEC_CNFG_CORRECT; ++ reg |= DEC_EMPTY_EN; ++ writel(reg, ecc->regs + ECC_DECCNFG); ++ ++ if (config->sectors) ++ ecc->sectors = 1 << (config->sectors - 1); ++ } ++ ++ return 0; ++} ++ ++void mtk_ecc_get_stats(struct mtk_ecc *ecc, struct mtk_ecc_stats *stats, ++ int sectors) ++{ ++ u32 offset, i, err; ++ u32 bitflips = 0; ++ ++ stats->corrected = 0; ++ stats->failed = 0; ++ ++ for (i = 0; i < sectors; i++) { ++ offset = (i >> 2) << 2; ++ err = readl(ecc->regs + ECC_DECENUM0 + offset); ++ err = err >> ((i % 4) * ecc->caps->err_shift); ++ err &= ecc->caps->err_mask; ++ if (err == ecc->caps->err_mask) { ++ /* uncorrectable errors */ ++ stats->failed++; ++ continue; ++ } ++ ++ stats->corrected += err; ++ bitflips = max_t(u32, bitflips, err); ++ } ++ ++ stats->bitflips = bitflips; ++} ++EXPORT_SYMBOL(mtk_ecc_get_stats); ++ ++void mtk_ecc_release(struct mtk_ecc *ecc) ++{ ++ clk_disable_unprepare(ecc->clk); ++ put_device(ecc->dev); ++} ++EXPORT_SYMBOL(mtk_ecc_release); ++ ++static void mtk_ecc_hw_init(struct mtk_ecc *ecc) ++{ ++ mtk_ecc_wait_idle(ecc, ECC_ENCODE); ++ writew(ECC_OP_DISABLE, ecc->regs + ECC_ENCCON); ++ ++ mtk_ecc_wait_idle(ecc, ECC_DECODE); ++ writel(ECC_OP_DISABLE, ecc->regs + ECC_DECCON); ++} ++ ++static struct mtk_ecc *mtk_ecc_get(struct device_node *np) ++{ ++ struct platform_device *pdev; ++ struct mtk_ecc *ecc; ++ ++ pdev = of_find_device_by_node(np); ++ if (!pdev) ++ return ERR_PTR(-EPROBE_DEFER); ++ ++ ecc = platform_get_drvdata(pdev); ++ if (!ecc) { ++ put_device(&pdev->dev); ++ return ERR_PTR(-EPROBE_DEFER); ++ } ++ ++ clk_prepare_enable(ecc->clk); ++ mtk_ecc_hw_init(ecc); ++ ++ return ecc; ++} ++ ++struct mtk_ecc *of_mtk_ecc_get(struct device_node *of_node) ++{ ++ struct mtk_ecc *ecc = NULL; ++ struct device_node *np; ++ ++ np = of_parse_phandle(of_node, "ecc-engine", 0); ++ if (np) { ++ ecc = mtk_ecc_get(np); ++ of_node_put(np); ++ } ++ ++ return ecc; ++} ++EXPORT_SYMBOL(of_mtk_ecc_get); ++ ++int mtk_ecc_enable(struct mtk_ecc *ecc, struct mtk_ecc_config *config) ++{ ++ enum mtk_ecc_operation op = config->op; ++ u16 reg_val; ++ int ret; ++ ++ ret = mutex_lock_interruptible(&ecc->lock); ++ if (ret) { ++ dev_err(ecc->dev, "interrupted when attempting to lock\n"); ++ return ret; ++ } ++ ++ mtk_ecc_wait_idle(ecc, op); ++ ++ ret = mtk_ecc_config(ecc, config); ++ if (ret) { ++ mutex_unlock(&ecc->lock); ++ return ret; ++ } ++ ++ if (config->mode != ECC_NFI_MODE || op != ECC_ENCODE) { ++ init_completion(&ecc->done); ++ reg_val = ECC_IRQ_EN; ++ /* ++ * For ECC_NFI_MODE, if ecc->caps->pg_irq_sel is 1, then it ++ * means this chip can only generate one ecc irq during page ++ * read / write. If is 0, generate one ecc irq each ecc step. ++ */ ++ if (ecc->caps->pg_irq_sel && config->mode == ECC_NFI_MODE) ++ reg_val |= ECC_PG_IRQ_SEL; ++ if (op == ECC_ENCODE) ++ writew(reg_val, ecc->regs + ++ ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); ++ else ++ writew(reg_val, ecc->regs + ++ ecc->caps->ecc_regs[ECC_DECIRQ_EN]); ++ } ++ ++ writew(ECC_OP_ENABLE, ecc->regs + ECC_CTL_REG(op)); ++ ++ return 0; ++} ++EXPORT_SYMBOL(mtk_ecc_enable); ++ ++void mtk_ecc_disable(struct mtk_ecc *ecc) ++{ ++ enum mtk_ecc_operation op = ECC_ENCODE; ++ ++ /* find out the running operation */ ++ if (readw(ecc->regs + ECC_CTL_REG(op)) != ECC_OP_ENABLE) ++ op = ECC_DECODE; ++ ++ /* disable it */ ++ mtk_ecc_wait_idle(ecc, op); ++ if (op == ECC_DECODE) { ++ /* ++ * Clear decode IRQ status in case there is a timeout to wait ++ * decode IRQ. ++ */ ++ readw(ecc->regs + ecc->caps->ecc_regs[ECC_DECDONE]); ++ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_DECIRQ_EN]); ++ } else { ++ writew(0, ecc->regs + ecc->caps->ecc_regs[ECC_ENCIRQ_EN]); ++ } ++ ++ writew(ECC_OP_DISABLE, ecc->regs + ECC_CTL_REG(op)); ++ ++ mutex_unlock(&ecc->lock); ++} ++EXPORT_SYMBOL(mtk_ecc_disable); ++ ++int mtk_ecc_wait_done(struct mtk_ecc *ecc, enum mtk_ecc_operation op) ++{ ++ int ret; ++ ++ ret = wait_for_completion_timeout(&ecc->done, msecs_to_jiffies(500)); ++ if (!ret) { ++ dev_err(ecc->dev, "%s timeout - interrupt did not arrive)\n", ++ (op == ECC_ENCODE) ? "encoder" : "decoder"); ++ return -ETIMEDOUT; ++ } ++ ++ return 0; ++} ++EXPORT_SYMBOL(mtk_ecc_wait_done); ++ ++int mtk_ecc_encode(struct mtk_ecc *ecc, struct mtk_ecc_config *config, ++ u8 *data, u32 bytes) ++{ ++ dma_addr_t addr; ++ u32 len; ++ int ret; ++ ++ addr = dma_map_single(ecc->dev, data, bytes, DMA_TO_DEVICE); ++ ret = dma_mapping_error(ecc->dev, addr); ++ if (ret) { ++ dev_err(ecc->dev, "dma mapping error\n"); ++ return -EINVAL; ++ } ++ ++ config->op = ECC_ENCODE; ++ config->addr = addr; ++ ret = mtk_ecc_enable(ecc, config); ++ if (ret) { ++ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); ++ return ret; ++ } ++ ++ ret = mtk_ecc_wait_done(ecc, ECC_ENCODE); ++ if (ret) ++ goto timeout; ++ ++ mtk_ecc_wait_idle(ecc, ECC_ENCODE); ++ ++ /* Program ECC bytes to OOB: per sector oob = FDM + ECC + SPARE */ ++ len = (config->strength * ecc->caps->parity_bits + 7) >> 3; ++ ++ /* write the parity bytes generated by the ECC back to temp buffer */ ++ __ioread32_copy(ecc->eccdata, ++ ecc->regs + ecc->caps->ecc_regs[ECC_ENCPAR00], ++ round_up(len, 4)); ++ ++ /* copy into possibly unaligned OOB region with actual length */ ++ memcpy(data + bytes, ecc->eccdata, len); ++timeout: ++ ++ dma_unmap_single(ecc->dev, addr, bytes, DMA_TO_DEVICE); ++ mtk_ecc_disable(ecc); ++ ++ return ret; ++} ++EXPORT_SYMBOL(mtk_ecc_encode); ++ ++void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p) ++{ ++ const u8 *ecc_strength = ecc->caps->ecc_strength; ++ int i; ++ ++ for (i = 0; i < ecc->caps->num_ecc_strength; i++) { ++ if (*p <= ecc_strength[i]) { ++ if (!i) ++ *p = ecc_strength[i]; ++ else if (*p != ecc_strength[i]) ++ *p = ecc_strength[i - 1]; ++ return; ++ } ++ } ++ ++ *p = ecc_strength[ecc->caps->num_ecc_strength - 1]; ++} ++EXPORT_SYMBOL(mtk_ecc_adjust_strength); ++ ++unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc) ++{ ++ return ecc->caps->parity_bits; ++} ++EXPORT_SYMBOL(mtk_ecc_get_parity_bits); ++ ++static const struct mtk_ecc_caps mtk_ecc_caps_mt2701 = { ++ .err_mask = 0x3f, ++ .err_shift = 8, ++ .ecc_strength = ecc_strength_mt2701, ++ .ecc_regs = mt2701_ecc_regs, ++ .num_ecc_strength = 20, ++ .ecc_mode_shift = 5, ++ .parity_bits = 14, ++ .pg_irq_sel = 0, ++}; ++ ++static const struct mtk_ecc_caps mtk_ecc_caps_mt2712 = { ++ .err_mask = 0x7f, ++ .err_shift = 8, ++ .ecc_strength = ecc_strength_mt2712, ++ .ecc_regs = mt2712_ecc_regs, ++ .num_ecc_strength = 23, ++ .ecc_mode_shift = 5, ++ .parity_bits = 14, ++ .pg_irq_sel = 1, ++}; ++ ++static const struct mtk_ecc_caps mtk_ecc_caps_mt7622 = { ++ .err_mask = 0x1f, ++ .err_shift = 5, ++ .ecc_strength = ecc_strength_mt7622, ++ .ecc_regs = mt7622_ecc_regs, ++ .num_ecc_strength = 5, ++ .ecc_mode_shift = 4, ++ .parity_bits = 13, ++ .pg_irq_sel = 0, ++}; ++ ++static const struct of_device_id mtk_ecc_dt_match[] = { ++ { ++ .compatible = "mediatek,mt2701-ecc", ++ .data = &mtk_ecc_caps_mt2701, ++ }, { ++ .compatible = "mediatek,mt2712-ecc", ++ .data = &mtk_ecc_caps_mt2712, ++ }, { ++ .compatible = "mediatek,mt7622-ecc", ++ .data = &mtk_ecc_caps_mt7622, ++ }, ++ {}, ++}; ++ ++static int mtk_ecc_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct mtk_ecc *ecc; ++ struct resource *res; ++ u32 max_eccdata_size; ++ int irq, ret; ++ ++ ecc = devm_kzalloc(dev, sizeof(*ecc), GFP_KERNEL); ++ if (!ecc) ++ return -ENOMEM; ++ ++ ecc->caps = of_device_get_match_data(dev); ++ ++ max_eccdata_size = ecc->caps->num_ecc_strength - 1; ++ max_eccdata_size = ecc->caps->ecc_strength[max_eccdata_size]; ++ max_eccdata_size = (max_eccdata_size * ecc->caps->parity_bits + 7) >> 3; ++ max_eccdata_size = round_up(max_eccdata_size, 4); ++ ecc->eccdata = devm_kzalloc(dev, max_eccdata_size, GFP_KERNEL); ++ if (!ecc->eccdata) ++ return -ENOMEM; ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ecc->regs = devm_ioremap_resource(dev, res); ++ if (IS_ERR(ecc->regs)) ++ return PTR_ERR(ecc->regs); ++ ++ ecc->clk = devm_clk_get(dev, NULL); ++ if (IS_ERR(ecc->clk)) { ++ dev_err(dev, "failed to get clock: %ld\n", PTR_ERR(ecc->clk)); ++ return PTR_ERR(ecc->clk); ++ } ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq < 0) ++ return irq; ++ ++ ret = dma_set_mask(dev, DMA_BIT_MASK(32)); ++ if (ret) { ++ dev_err(dev, "failed to set DMA mask\n"); ++ return ret; ++ } ++ ++ ret = devm_request_irq(dev, irq, mtk_ecc_irq, 0x0, "mtk-ecc", ecc); ++ if (ret) { ++ dev_err(dev, "failed to request irq\n"); ++ return -EINVAL; ++ } ++ ++ ecc->dev = dev; ++ mutex_init(&ecc->lock); ++ platform_set_drvdata(pdev, ecc); ++ dev_info(dev, "probed\n"); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++static int mtk_ecc_suspend(struct device *dev) ++{ ++ struct mtk_ecc *ecc = dev_get_drvdata(dev); ++ ++ clk_disable_unprepare(ecc->clk); ++ ++ return 0; ++} ++ ++static int mtk_ecc_resume(struct device *dev) ++{ ++ struct mtk_ecc *ecc = dev_get_drvdata(dev); ++ int ret; ++ ++ ret = clk_prepare_enable(ecc->clk); ++ if (ret) { ++ dev_err(dev, "failed to enable clk\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static SIMPLE_DEV_PM_OPS(mtk_ecc_pm_ops, mtk_ecc_suspend, mtk_ecc_resume); ++#endif ++ ++MODULE_DEVICE_TABLE(of, mtk_ecc_dt_match); ++ ++static struct platform_driver mtk_ecc_driver = { ++ .probe = mtk_ecc_probe, ++ .driver = { ++ .name = "mtk-ecc", ++ .of_match_table = of_match_ptr(mtk_ecc_dt_match), ++#ifdef CONFIG_PM_SLEEP ++ .pm = &mtk_ecc_pm_ops, ++#endif ++ }, ++}; ++ ++module_platform_driver(mtk_ecc_driver); ++ ++MODULE_AUTHOR("Xiaolei Li "); ++MODULE_DESCRIPTION("MTK Nand ECC Driver"); ++MODULE_LICENSE("Dual MIT/GPL"); +--- a/drivers/mtd/nand/raw/Kconfig ++++ b/drivers/mtd/nand/raw/Kconfig +@@ -360,6 +360,7 @@ config MTD_NAND_QCOM + + config MTD_NAND_MTK + tristate "MTK NAND controller" ++ depends on MTD_NAND_ECC_MEDIATEK + depends on ARCH_MEDIATEK || COMPILE_TEST + depends on HAS_IOMEM + help +--- a/drivers/mtd/nand/raw/Makefile ++++ b/drivers/mtd/nand/raw/Makefile +@@ -48,7 +48,7 @@ obj-$(CONFIG_MTD_NAND_SUNXI) += sunxi_n + obj-$(CONFIG_MTD_NAND_HISI504) += hisi504_nand.o + obj-$(CONFIG_MTD_NAND_BRCMNAND) += brcmnand/ + obj-$(CONFIG_MTD_NAND_QCOM) += qcom_nandc.o +-obj-$(CONFIG_MTD_NAND_MTK) += mtk_ecc.o mtk_nand.o ++obj-$(CONFIG_MTD_NAND_MTK) += mtk_nand.o + obj-$(CONFIG_MTD_NAND_MXIC) += mxic_nand.o + obj-$(CONFIG_MTD_NAND_TEGRA) += tegra_nand.o + obj-$(CONFIG_MTD_NAND_STM32_FMC2) += stm32_fmc2_nand.o +--- a/drivers/mtd/nand/raw/mtk_nand.c ++++ b/drivers/mtd/nand/raw/mtk_nand.c +@@ -17,7 +17,7 @@ + #include + #include + #include +-#include "mtk_ecc.h" ++#include + + /* NAND controller register definition */ + #define NFI_CNFG (0x00) +--- a/drivers/mtd/nand/raw/mtk_ecc.h ++++ /dev/null +@@ -1,47 +0,0 @@ +-/* SPDX-License-Identifier: GPL-2.0 OR MIT */ +-/* +- * MTK SDG1 ECC controller +- * +- * Copyright (c) 2016 Mediatek +- * Authors: Xiaolei Li +- * Jorge Ramirez-Ortiz +- */ +- +-#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ +-#define __DRIVERS_MTD_NAND_MTK_ECC_H__ +- +-#include +- +-enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; +-enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; +- +-struct device_node; +-struct mtk_ecc; +- +-struct mtk_ecc_stats { +- u32 corrected; +- u32 bitflips; +- u32 failed; +-}; +- +-struct mtk_ecc_config { +- enum mtk_ecc_operation op; +- enum mtk_ecc_mode mode; +- dma_addr_t addr; +- u32 strength; +- u32 sectors; +- u32 len; +-}; +- +-int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); +-void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); +-int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); +-int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); +-void mtk_ecc_disable(struct mtk_ecc *); +-void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); +-unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); +- +-struct mtk_ecc *of_mtk_ecc_get(struct device_node *); +-void mtk_ecc_release(struct mtk_ecc *); +- +-#endif +--- /dev/null ++++ b/include/linux/mtd/nand-ecc-mtk.h +@@ -0,0 +1,47 @@ ++/* SPDX-License-Identifier: GPL-2.0 OR MIT */ ++/* ++ * MTK SDG1 ECC controller ++ * ++ * Copyright (c) 2016 Mediatek ++ * Authors: Xiaolei Li ++ * Jorge Ramirez-Ortiz ++ */ ++ ++#ifndef __DRIVERS_MTD_NAND_MTK_ECC_H__ ++#define __DRIVERS_MTD_NAND_MTK_ECC_H__ ++ ++#include ++ ++enum mtk_ecc_mode {ECC_DMA_MODE = 0, ECC_NFI_MODE = 1}; ++enum mtk_ecc_operation {ECC_ENCODE, ECC_DECODE}; ++ ++struct device_node; ++struct mtk_ecc; ++ ++struct mtk_ecc_stats { ++ u32 corrected; ++ u32 bitflips; ++ u32 failed; ++}; ++ ++struct mtk_ecc_config { ++ enum mtk_ecc_operation op; ++ enum mtk_ecc_mode mode; ++ dma_addr_t addr; ++ u32 strength; ++ u32 sectors; ++ u32 len; ++}; ++ ++int mtk_ecc_encode(struct mtk_ecc *, struct mtk_ecc_config *, u8 *, u32); ++void mtk_ecc_get_stats(struct mtk_ecc *, struct mtk_ecc_stats *, int); ++int mtk_ecc_wait_done(struct mtk_ecc *, enum mtk_ecc_operation); ++int mtk_ecc_enable(struct mtk_ecc *, struct mtk_ecc_config *); ++void mtk_ecc_disable(struct mtk_ecc *); ++void mtk_ecc_adjust_strength(struct mtk_ecc *ecc, u32 *p); ++unsigned int mtk_ecc_get_parity_bits(struct mtk_ecc *ecc); ++ ++struct mtk_ecc *of_mtk_ecc_get(struct device_node *); ++void mtk_ecc_release(struct mtk_ecc *); ++ ++#endif diff --git a/target/linux/mediatek/patches-5.15/120-12-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch b/target/linux/mediatek/patches-5.15/120-12-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch deleted file mode 100644 index b77b4ad4c4..0000000000 --- a/target/linux/mediatek/patches-5.15/120-12-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch +++ /dev/null @@ -1,1537 +0,0 @@ -From 8170bafa8936e9fbfdce992932a63bd20eca3bc3 Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sat, 2 Apr 2022 10:16:11 +0800 -Subject: [PATCH v6 2/5] spi: add driver for MTK SPI NAND Flash Interface - -This driver implements support for the SPI-NAND mode of MTK NAND Flash -Interface as a SPI-MEM controller with pipelined ECC capability. - -Signed-off-by: Chuanhong Guo -Tested-by: Daniel Golle ---- -Change since v1: - fix CI warnings - -Changes since v2: - use streamed DMA api to avoid an extra memory copy during read - make ECC engine config a per-nand context - take user-requested ECC strength into account - -Change since v3: none -Changes since v4: - fix missing OOB write - print page format with dev_dbg - replace uint*_t copied from vendor driver with u* - -Changes since v5: - add missing nfi mode register configuration in probe - fix an off-by-one bug in mtk_snand_mac_io - - drivers/spi/Kconfig | 10 + - drivers/spi/Makefile | 1 + - drivers/spi/spi-mtk-snfi.c | 1470 ++++++++++++++++++++++++++++++++++++ - 3 files changed, 1481 insertions(+) - create mode 100644 drivers/spi/spi-mtk-snfi.c - ---- a/drivers/spi/Kconfig -+++ b/drivers/spi/Kconfig -@@ -530,6 +530,16 @@ config SPI_MTK_NOR - SPI interface as well as several SPI NOR specific instructions - via SPI MEM interface. - -+config SPI_MTK_SNFI -+ tristate "MediaTek SPI NAND Flash Interface" -+ depends on ARCH_MEDIATEK || COMPILE_TEST -+ depends on MTD_NAND_ECC_MEDIATEK -+ help -+ This enables support for SPI-NAND mode on the MediaTek NAND -+ Flash Interface found on MediaTek ARM SoCs. This controller -+ is implemented as a SPI-MEM controller with pipelined ECC -+ capcability. -+ - config SPI_NPCM_FIU - tristate "Nuvoton NPCM FLASH Interface Unit" - depends on ARCH_NPCM || COMPILE_TEST ---- a/drivers/spi/Makefile -+++ b/drivers/spi/Makefile -@@ -71,6 +71,7 @@ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52x - obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o - obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o - obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o -+obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o - obj-$(CONFIG_SPI_MXIC) += spi-mxic.o - obj-$(CONFIG_SPI_MXS) += spi-mxs.o - obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o ---- /dev/null -+++ b/drivers/spi/spi-mtk-snfi.c -@@ -0,0 +1,1470 @@ -+// SPDX-License-Identifier: GPL-2.0 -+// -+// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface -+// -+// Copyright (c) 2022 Chuanhong Guo -+// -+// This driver is based on the SPI-NAND mtd driver from Mediatek SDK: -+// -+// Copyright (C) 2020 MediaTek Inc. -+// Author: Weijie Gao -+// -+// This controller organize the page data as several interleaved sectors -+// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) -+// +---------+------+------+---------+------+------+-----+ -+// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | -+// +---------+------+------+---------+------+------+-----+ -+// With auto-format turned on, DMA only returns this part: -+// +---------+---------+-----+ -+// | Sector1 | Sector2 | ... | -+// +---------+---------+-----+ -+// The FDM data will be filled to the registers, and ECC parity data isn't -+// accessible. -+// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA -+// in it's original order shown in the first table. ECC can't be turned on when -+// auto-format is off. -+// -+// However, Linux SPI-NAND driver expects the data returned as: -+// +------+-----+ -+// | Page | OOB | -+// +------+-----+ -+// where the page data is continuously stored instead of interleaved. -+// So we assume all instructions matching the page_op template between ECC -+// prepare_io_req and finish_io_req are for page cache r/w. -+// Here's how this spi-mem driver operates when reading: -+// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). -+// 2. Perform page ops and let the controller fill the DMA bounce buffer with -+// de-interleaved sector data and set FDM registers. -+// 3. Return the data as: -+// +---------+---------+-----+------+------+-----+ -+// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | -+// +---------+---------+-----+------+------+-----+ -+// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, -+// read the data with auto-format off into the bounce buffer and copy -+// needed data to the buffer specified in the request. -+// -+// Write requests operates in a similar manner. -+// As a limitation of this strategy, we won't be able to access any ECC parity -+// data at all in Linux. -+// -+// Here's the bad block mark situation on MTK chips: -+// In older chips like mt7622, MTK uses the first FDM byte in the first sector -+// as the bad block mark. After de-interleaving, this byte appears at [pagesize] -+// in the returned data, which is the BBM position expected by kernel. However, -+// the conventional bad block mark is the first byte of the OOB, which is part -+// of the last sector data in the interleaved layout. Instead of fixing their -+// hardware, MTK decided to address this inconsistency in software. On these -+// later chips, the BootROM expects the following: -+// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at -+// (page_size - (nsectors - 1) * spare_size) in the DMA buffer. -+// 2. The original byte stored at that position in the DMA buffer will be stored -+// as the first byte of the FDM section in the last sector. -+// We can't disagree with the BootROM, so after de-interleaving, we need to -+// perform the following swaps in read: -+// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], -+// which is the expected BBM position by kernel. -+// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to -+// [page_size - (nsectors - 1) * spare_size] -+// Similarly, when writing, we need to perform swaps in the other direction. -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+// NFI registers -+#define NFI_CNFG 0x000 -+#define CNFG_OP_MODE_S 12 -+#define CNFG_OP_MODE_CUST 6 -+#define CNFG_OP_MODE_PROGRAM 3 -+#define CNFG_AUTO_FMT_EN BIT(9) -+#define CNFG_HW_ECC_EN BIT(8) -+#define CNFG_DMA_BURST_EN BIT(2) -+#define CNFG_READ_MODE BIT(1) -+#define CNFG_DMA_MODE BIT(0) -+ -+#define NFI_PAGEFMT 0x0004 -+#define NFI_SPARE_SIZE_LS_S 16 -+#define NFI_FDM_ECC_NUM_S 12 -+#define NFI_FDM_NUM_S 8 -+#define NFI_SPARE_SIZE_S 4 -+#define NFI_SEC_SEL_512 BIT(2) -+#define NFI_PAGE_SIZE_S 0 -+#define NFI_PAGE_SIZE_512_2K 0 -+#define NFI_PAGE_SIZE_2K_4K 1 -+#define NFI_PAGE_SIZE_4K_8K 2 -+#define NFI_PAGE_SIZE_8K_16K 3 -+ -+#define NFI_CON 0x008 -+#define CON_SEC_NUM_S 12 -+#define CON_BWR BIT(9) -+#define CON_BRD BIT(8) -+#define CON_NFI_RST BIT(1) -+#define CON_FIFO_FLUSH BIT(0) -+ -+#define NFI_INTR_EN 0x010 -+#define NFI_INTR_STA 0x014 -+#define NFI_IRQ_INTR_EN BIT(31) -+#define NFI_IRQ_CUS_READ BIT(8) -+#define NFI_IRQ_CUS_PG BIT(7) -+ -+#define NFI_CMD 0x020 -+#define NFI_CMD_DUMMY_READ 0x00 -+#define NFI_CMD_DUMMY_WRITE 0x80 -+ -+#define NFI_STRDATA 0x040 -+#define STR_DATA BIT(0) -+ -+#define NFI_STA 0x060 -+#define NFI_NAND_FSM GENMASK(28, 24) -+#define NFI_FSM GENMASK(19, 16) -+#define READ_EMPTY BIT(12) -+ -+#define NFI_FIFOSTA 0x064 -+#define FIFO_WR_REMAIN_S 8 -+#define FIFO_RD_REMAIN_S 0 -+ -+#define NFI_ADDRCNTR 0x070 -+#define SEC_CNTR GENMASK(16, 12) -+#define SEC_CNTR_S 12 -+#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) -+ -+#define NFI_STRADDR 0x080 -+ -+#define NFI_BYTELEN 0x084 -+#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) -+ -+#define NFI_FDM0L 0x0a0 -+#define NFI_FDM0M 0x0a4 -+#define NFI_FDML(n) (NFI_FDM0L + (n)*8) -+#define NFI_FDMM(n) (NFI_FDM0M + (n)*8) -+ -+#define NFI_DEBUG_CON1 0x220 -+#define WBUF_EN BIT(2) -+ -+#define NFI_MASTERSTA 0x224 -+#define MAS_ADDR GENMASK(11, 9) -+#define MAS_RD GENMASK(8, 6) -+#define MAS_WR GENMASK(5, 3) -+#define MAS_RDDLY GENMASK(2, 0) -+#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) -+ -+// SNFI registers -+#define SNF_MAC_CTL 0x500 -+#define MAC_XIO_SEL BIT(4) -+#define SF_MAC_EN BIT(3) -+#define SF_TRIG BIT(2) -+#define WIP_READY BIT(1) -+#define WIP BIT(0) -+ -+#define SNF_MAC_OUTL 0x504 -+#define SNF_MAC_INL 0x508 -+ -+#define SNF_RD_CTL2 0x510 -+#define DATA_READ_DUMMY_S 8 -+#define DATA_READ_MAX_DUMMY 0xf -+#define DATA_READ_CMD_S 0 -+ -+#define SNF_RD_CTL3 0x514 -+ -+#define SNF_PG_CTL1 0x524 -+#define PG_LOAD_CMD_S 8 -+ -+#define SNF_PG_CTL2 0x528 -+ -+#define SNF_MISC_CTL 0x538 -+#define SW_RST BIT(28) -+#define FIFO_RD_LTC_S 25 -+#define PG_LOAD_X4_EN BIT(20) -+#define DATA_READ_MODE_S 16 -+#define DATA_READ_MODE GENMASK(18, 16) -+#define DATA_READ_MODE_X1 0 -+#define DATA_READ_MODE_X2 1 -+#define DATA_READ_MODE_X4 2 -+#define DATA_READ_MODE_DUAL 5 -+#define DATA_READ_MODE_QUAD 6 -+#define PG_LOAD_CUSTOM_EN BIT(7) -+#define DATARD_CUSTOM_EN BIT(6) -+#define CS_DESELECT_CYC_S 0 -+ -+#define SNF_MISC_CTL2 0x53c -+#define PROGRAM_LOAD_BYTE_NUM_S 16 -+#define READ_DATA_BYTE_NUM_S 11 -+ -+#define SNF_DLY_CTL3 0x548 -+#define SFCK_SAM_DLY_S 0 -+ -+#define SNF_STA_CTL1 0x550 -+#define CUS_PG_DONE BIT(28) -+#define CUS_READ_DONE BIT(27) -+#define SPI_STATE_S 0 -+#define SPI_STATE GENMASK(3, 0) -+ -+#define SNF_CFG 0x55c -+#define SPI_MODE BIT(0) -+ -+#define SNF_GPRAM 0x800 -+#define SNF_GPRAM_SIZE 0xa0 -+ -+#define SNFI_POLL_INTERVAL 1000000 -+ -+static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; -+ -+struct mtk_snand_caps { -+ u16 sector_size; -+ u16 max_sectors; -+ u16 fdm_size; -+ u16 fdm_ecc_size; -+ u16 fifo_size; -+ -+ bool bbm_swap; -+ bool empty_page_check; -+ u32 mastersta_mask; -+ -+ const u8 *spare_sizes; -+ u32 num_spare_size; -+}; -+ -+static const struct mtk_snand_caps mt7622_snand_caps = { -+ .sector_size = 512, -+ .max_sectors = 8, -+ .fdm_size = 8, -+ .fdm_ecc_size = 1, -+ .fifo_size = 32, -+ .bbm_swap = false, -+ .empty_page_check = false, -+ .mastersta_mask = NFI_MASTERSTA_MASK_7622, -+ .spare_sizes = mt7622_spare_sizes, -+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) -+}; -+ -+static const struct mtk_snand_caps mt7629_snand_caps = { -+ .sector_size = 512, -+ .max_sectors = 8, -+ .fdm_size = 8, -+ .fdm_ecc_size = 1, -+ .fifo_size = 32, -+ .bbm_swap = true, -+ .empty_page_check = false, -+ .mastersta_mask = NFI_MASTERSTA_MASK_7622, -+ .spare_sizes = mt7622_spare_sizes, -+ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) -+}; -+ -+struct mtk_snand_conf { -+ size_t page_size; -+ size_t oob_size; -+ u8 nsectors; -+ u8 spare_size; -+}; -+ -+struct mtk_snand { -+ struct spi_controller *ctlr; -+ struct device *dev; -+ struct clk *nfi_clk; -+ struct clk *pad_clk; -+ void __iomem *nfi_base; -+ int irq; -+ struct completion op_done; -+ const struct mtk_snand_caps *caps; -+ struct mtk_ecc_config *ecc_cfg; -+ struct mtk_ecc *ecc; -+ struct mtk_snand_conf nfi_cfg; -+ struct mtk_ecc_stats ecc_stats; -+ struct nand_ecc_engine ecc_eng; -+ bool autofmt; -+ u8 *buf; -+ size_t buf_len; -+}; -+ -+static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) -+{ -+ struct nand_ecc_engine *eng = nand->ecc.engine; -+ -+ return container_of(eng, struct mtk_snand, ecc_eng); -+} -+ -+static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) -+{ -+ if (snf->buf_len >= size) -+ return 0; -+ kfree(snf->buf); -+ snf->buf = kmalloc(size, GFP_KERNEL); -+ if (!snf->buf) -+ return -ENOMEM; -+ snf->buf_len = size; -+ memset(snf->buf, 0xff, snf->buf_len); -+ return 0; -+} -+ -+static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) -+{ -+ return readl(snf->nfi_base + reg); -+} -+ -+static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) -+{ -+ writel(val, snf->nfi_base + reg); -+} -+ -+static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) -+{ -+ writew(val, snf->nfi_base + reg); -+} -+ -+static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) -+{ -+ u32 val; -+ -+ val = readl(snf->nfi_base + reg); -+ val &= ~clr; -+ val |= set; -+ writel(val, snf->nfi_base + reg); -+} -+ -+static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) -+{ -+ u32 i, val = 0, es = sizeof(u32); -+ -+ for (i = reg; i < reg + len; i++) { -+ if (i == reg || i % es == 0) -+ val = nfi_read32(snf, i & ~(es - 1)); -+ -+ *data++ = (u8)(val >> (8 * (i % es))); -+ } -+} -+ -+static int mtk_nfi_reset(struct mtk_snand *snf) -+{ -+ u32 val, fifo_mask; -+ int ret; -+ -+ nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); -+ -+ ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, -+ !(val & snf->caps->mastersta_mask), 0, -+ SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "NFI master is still busy after reset\n"); -+ return ret; -+ } -+ -+ ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, -+ !(val & (NFI_FSM | NFI_NAND_FSM)), 0, -+ SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "Failed to reset NFI\n"); -+ return ret; -+ } -+ -+ fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | -+ ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); -+ ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, -+ !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "NFI FIFOs are not empty\n"); -+ return ret; -+ } -+ -+ return 0; -+} -+ -+static int mtk_snand_mac_reset(struct mtk_snand *snf) -+{ -+ int ret; -+ u32 val; -+ -+ nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); -+ -+ ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, -+ !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); -+ if (ret) -+ dev_err(snf->dev, "Failed to reset SNFI MAC\n"); -+ -+ nfi_write32(snf, SNF_MISC_CTL, -+ (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); -+ -+ return ret; -+} -+ -+static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) -+{ -+ int ret; -+ u32 val; -+ -+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); -+ nfi_write32(snf, SNF_MAC_OUTL, outlen); -+ nfi_write32(snf, SNF_MAC_INL, inlen); -+ -+ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); -+ -+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, -+ val & WIP_READY, 0, SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); -+ goto cleanup; -+ } -+ -+ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), -+ 0, SNFI_POLL_INTERVAL); -+ if (ret) -+ dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); -+ -+cleanup: -+ nfi_write32(snf, SNF_MAC_CTL, 0); -+ -+ return ret; -+} -+ -+static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) -+{ -+ u32 rx_len = 0; -+ u32 reg_offs = 0; -+ u32 val = 0; -+ const u8 *tx_buf = NULL; -+ u8 *rx_buf = NULL; -+ int i, ret; -+ u8 b; -+ -+ if (op->data.dir == SPI_MEM_DATA_IN) { -+ rx_len = op->data.nbytes; -+ rx_buf = op->data.buf.in; -+ } else { -+ tx_buf = op->data.buf.out; -+ } -+ -+ mtk_snand_mac_reset(snf); -+ -+ for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { -+ b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; -+ val |= b << (8 * (reg_offs % 4)); -+ if (reg_offs % 4 == 3) { -+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); -+ val = 0; -+ } -+ } -+ -+ for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { -+ b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; -+ val |= b << (8 * (reg_offs % 4)); -+ if (reg_offs % 4 == 3) { -+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); -+ val = 0; -+ } -+ } -+ -+ for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { -+ if (reg_offs % 4 == 3) { -+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); -+ val = 0; -+ } -+ } -+ -+ if (op->data.dir == SPI_MEM_DATA_OUT) { -+ for (i = 0; i < op->data.nbytes; i++, reg_offs++) { -+ val |= tx_buf[i] << (8 * (reg_offs % 4)); -+ if (reg_offs % 4 == 3) { -+ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); -+ val = 0; -+ } -+ } -+ } -+ -+ if (reg_offs % 4) -+ nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); -+ -+ for (i = 0; i < reg_offs; i += 4) -+ dev_dbg(snf->dev, "%d: %08X", i, -+ nfi_read32(snf, SNF_GPRAM + i)); -+ -+ dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); -+ -+ ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); -+ if (ret) -+ return ret; -+ -+ if (!rx_len) -+ return 0; -+ -+ nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); -+ return 0; -+} -+ -+static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, -+ u32 oob_size) -+{ -+ int spare_idx = -1; -+ u32 spare_size, spare_size_shift, pagesize_idx; -+ u32 sector_size_512; -+ u8 nsectors; -+ int i; -+ -+ // skip if it's already configured as required. -+ if (snf->nfi_cfg.page_size == page_size && -+ snf->nfi_cfg.oob_size == oob_size) -+ return 0; -+ -+ nsectors = page_size / snf->caps->sector_size; -+ if (nsectors > snf->caps->max_sectors) { -+ dev_err(snf->dev, "too many sectors required.\n"); -+ goto err; -+ } -+ -+ if (snf->caps->sector_size == 512) { -+ sector_size_512 = NFI_SEC_SEL_512; -+ spare_size_shift = NFI_SPARE_SIZE_S; -+ } else { -+ sector_size_512 = 0; -+ spare_size_shift = NFI_SPARE_SIZE_LS_S; -+ } -+ -+ switch (page_size) { -+ case SZ_512: -+ pagesize_idx = NFI_PAGE_SIZE_512_2K; -+ break; -+ case SZ_2K: -+ if (snf->caps->sector_size == 512) -+ pagesize_idx = NFI_PAGE_SIZE_2K_4K; -+ else -+ pagesize_idx = NFI_PAGE_SIZE_512_2K; -+ break; -+ case SZ_4K: -+ if (snf->caps->sector_size == 512) -+ pagesize_idx = NFI_PAGE_SIZE_4K_8K; -+ else -+ pagesize_idx = NFI_PAGE_SIZE_2K_4K; -+ break; -+ case SZ_8K: -+ if (snf->caps->sector_size == 512) -+ pagesize_idx = NFI_PAGE_SIZE_8K_16K; -+ else -+ pagesize_idx = NFI_PAGE_SIZE_4K_8K; -+ break; -+ case SZ_16K: -+ pagesize_idx = NFI_PAGE_SIZE_8K_16K; -+ break; -+ default: -+ dev_err(snf->dev, "unsupported page size.\n"); -+ goto err; -+ } -+ -+ spare_size = oob_size / nsectors; -+ // If we're using the 1KB sector size, HW will automatically double the -+ // spare size. We should only use half of the value in this case. -+ if (snf->caps->sector_size == 1024) -+ spare_size /= 2; -+ -+ for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { -+ if (snf->caps->spare_sizes[i] <= spare_size) { -+ spare_size = snf->caps->spare_sizes[i]; -+ if (snf->caps->sector_size == 1024) -+ spare_size *= 2; -+ spare_idx = i; -+ break; -+ } -+ } -+ -+ if (spare_idx < 0) { -+ dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); -+ goto err; -+ } -+ -+ nfi_write32(snf, NFI_PAGEFMT, -+ (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | -+ (snf->caps->fdm_size << NFI_FDM_NUM_S) | -+ (spare_idx << spare_size_shift) | -+ (pagesize_idx << NFI_PAGE_SIZE_S) | -+ sector_size_512); -+ -+ snf->nfi_cfg.page_size = page_size; -+ snf->nfi_cfg.oob_size = oob_size; -+ snf->nfi_cfg.nsectors = nsectors; -+ snf->nfi_cfg.spare_size = spare_size; -+ -+ dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", -+ snf->caps->sector_size, spare_size, nsectors); -+ return snand_prepare_bouncebuf(snf, page_size + oob_size); -+err: -+ dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, -+ oob_size); -+ return -EOPNOTSUPP; -+} -+ -+static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *oobecc) -+{ -+ // ECC area is not accessible -+ return -ERANGE; -+} -+ -+static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, -+ struct mtd_oob_region *oobfree) -+{ -+ struct nand_device *nand = mtd_to_nanddev(mtd); -+ struct mtk_snand *ms = nand_to_mtk_snand(nand); -+ -+ if (section >= ms->nfi_cfg.nsectors) -+ return -ERANGE; -+ -+ oobfree->length = ms->caps->fdm_size - 1; -+ oobfree->offset = section * ms->caps->fdm_size + 1; -+ return 0; -+} -+ -+static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { -+ .ecc = mtk_snand_ooblayout_ecc, -+ .free = mtk_snand_ooblayout_free, -+}; -+ -+static int mtk_snand_ecc_init_ctx(struct nand_device *nand) -+{ -+ struct mtk_snand *snf = nand_to_mtk_snand(nand); -+ struct nand_ecc_props *conf = &nand->ecc.ctx.conf; -+ struct nand_ecc_props *reqs = &nand->ecc.requirements; -+ struct nand_ecc_props *user = &nand->ecc.user_conf; -+ struct mtd_info *mtd = nanddev_to_mtd(nand); -+ int step_size = 0, strength = 0, desired_correction = 0, steps; -+ bool ecc_user = false; -+ int ret; -+ u32 parity_bits, max_ecc_bytes; -+ struct mtk_ecc_config *ecc_cfg; -+ -+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, -+ nand->memorg.oobsize); -+ if (ret) -+ return ret; -+ -+ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); -+ if (!ecc_cfg) -+ return -ENOMEM; -+ -+ nand->ecc.ctx.priv = ecc_cfg; -+ -+ if (user->step_size && user->strength) { -+ step_size = user->step_size; -+ strength = user->strength; -+ ecc_user = true; -+ } else if (reqs->step_size && reqs->strength) { -+ step_size = reqs->step_size; -+ strength = reqs->strength; -+ } -+ -+ if (step_size && strength) { -+ steps = mtd->writesize / step_size; -+ desired_correction = steps * strength; -+ strength = desired_correction / snf->nfi_cfg.nsectors; -+ } -+ -+ ecc_cfg->mode = ECC_NFI_MODE; -+ ecc_cfg->sectors = snf->nfi_cfg.nsectors; -+ ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; -+ -+ // calculate the max possible strength under current page format -+ parity_bits = mtk_ecc_get_parity_bits(snf->ecc); -+ max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; -+ ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; -+ mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); -+ -+ // if there's a user requested strength, find the minimum strength that -+ // meets the requirement. Otherwise use the maximum strength which is -+ // expected by BootROM. -+ if (ecc_user && strength) { -+ u32 s_next = ecc_cfg->strength - 1; -+ -+ while (1) { -+ mtk_ecc_adjust_strength(snf->ecc, &s_next); -+ if (s_next >= ecc_cfg->strength) -+ break; -+ if (s_next < strength) -+ break; -+ s_next = ecc_cfg->strength - 1; -+ } -+ } -+ -+ mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); -+ -+ conf->step_size = snf->caps->sector_size; -+ conf->strength = ecc_cfg->strength; -+ -+ if (ecc_cfg->strength < strength) -+ dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", -+ strength); -+ dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", -+ ecc_cfg->strength, snf->caps->sector_size); -+ -+ return 0; -+} -+ -+static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) -+{ -+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); -+ -+ kfree(ecc_cfg); -+} -+ -+static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, -+ struct nand_page_io_req *req) -+{ -+ struct mtk_snand *snf = nand_to_mtk_snand(nand); -+ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); -+ int ret; -+ -+ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, -+ nand->memorg.oobsize); -+ if (ret) -+ return ret; -+ snf->autofmt = true; -+ snf->ecc_cfg = ecc_cfg; -+ return 0; -+} -+ -+static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, -+ struct nand_page_io_req *req) -+{ -+ struct mtk_snand *snf = nand_to_mtk_snand(nand); -+ struct mtd_info *mtd = nanddev_to_mtd(nand); -+ -+ snf->ecc_cfg = NULL; -+ snf->autofmt = false; -+ if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) -+ return 0; -+ -+ if (snf->ecc_stats.failed) -+ mtd->ecc_stats.failed += snf->ecc_stats.failed; -+ mtd->ecc_stats.corrected += snf->ecc_stats.corrected; -+ return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; -+} -+ -+static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { -+ .init_ctx = mtk_snand_ecc_init_ctx, -+ .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, -+ .prepare_io_req = mtk_snand_ecc_prepare_io_req, -+ .finish_io_req = mtk_snand_ecc_finish_io_req, -+}; -+ -+static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) -+{ -+ u32 vall, valm; -+ u8 *oobptr = buf; -+ int i, j; -+ -+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) { -+ vall = nfi_read32(snf, NFI_FDML(i)); -+ valm = nfi_read32(snf, NFI_FDMM(i)); -+ -+ for (j = 0; j < snf->caps->fdm_size; j++) -+ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); -+ -+ oobptr += snf->caps->fdm_size; -+ } -+} -+ -+static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) -+{ -+ u32 fdm_size = snf->caps->fdm_size; -+ const u8 *oobptr = buf; -+ u32 vall, valm; -+ int i, j; -+ -+ for (i = 0; i < snf->nfi_cfg.nsectors; i++) { -+ vall = 0; -+ valm = 0; -+ -+ for (j = 0; j < 8; j++) { -+ if (j < 4) -+ vall |= (j < fdm_size ? oobptr[j] : 0xff) -+ << (j * 8); -+ else -+ valm |= (j < fdm_size ? oobptr[j] : 0xff) -+ << ((j - 4) * 8); -+ } -+ -+ nfi_write32(snf, NFI_FDML(i), vall); -+ nfi_write32(snf, NFI_FDMM(i), valm); -+ -+ oobptr += fdm_size; -+ } -+} -+ -+static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) -+{ -+ u32 buf_bbm_pos, fdm_bbm_pos; -+ -+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) -+ return; -+ -+ // swap [pagesize] byte on nand with the first fdm byte -+ // in the last sector. -+ buf_bbm_pos = snf->nfi_cfg.page_size - -+ (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; -+ fdm_bbm_pos = snf->nfi_cfg.page_size + -+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; -+ -+ swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); -+} -+ -+static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) -+{ -+ u32 fdm_bbm_pos1, fdm_bbm_pos2; -+ -+ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) -+ return; -+ -+ // swap the first fdm byte in the first and the last sector. -+ fdm_bbm_pos1 = snf->nfi_cfg.page_size; -+ fdm_bbm_pos2 = snf->nfi_cfg.page_size + -+ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; -+ swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); -+} -+ -+static int mtk_snand_read_page_cache(struct mtk_snand *snf, -+ const struct spi_mem_op *op) -+{ -+ u8 *buf = snf->buf; -+ u8 *buf_fdm = buf + snf->nfi_cfg.page_size; -+ // the address part to be sent by the controller -+ u32 op_addr = op->addr.val; -+ // where to start copying data from bounce buffer -+ u32 rd_offset = 0; -+ u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); -+ u32 op_mode = 0; -+ u32 dma_len = snf->buf_len; -+ int ret = 0; -+ u32 rd_mode, rd_bytes, val; -+ dma_addr_t buf_dma; -+ -+ if (snf->autofmt) { -+ u32 last_bit; -+ u32 mask; -+ -+ dma_len = snf->nfi_cfg.page_size; -+ op_mode = CNFG_AUTO_FMT_EN; -+ if (op->data.ecc) -+ op_mode |= CNFG_HW_ECC_EN; -+ // extract the plane bit: -+ // Find the highest bit set in (pagesize+oobsize). -+ // Bits higher than that in op->addr are kept and sent over SPI -+ // Lower bits are used as an offset for copying data from DMA -+ // bounce buffer. -+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); -+ mask = (1 << last_bit) - 1; -+ rd_offset = op_addr & mask; -+ op_addr &= ~mask; -+ -+ // check if we can dma to the caller memory -+ if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) -+ buf = op->data.buf.in; -+ } -+ mtk_snand_mac_reset(snf); -+ mtk_nfi_reset(snf); -+ -+ // command and dummy cycles -+ nfi_write32(snf, SNF_RD_CTL2, -+ (dummy_clk << DATA_READ_DUMMY_S) | -+ (op->cmd.opcode << DATA_READ_CMD_S)); -+ -+ // read address -+ nfi_write32(snf, SNF_RD_CTL3, op_addr); -+ -+ // Set read op_mode -+ if (op->data.buswidth == 4) -+ rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : -+ DATA_READ_MODE_X4; -+ else if (op->data.buswidth == 2) -+ rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : -+ DATA_READ_MODE_X2; -+ else -+ rd_mode = DATA_READ_MODE_X1; -+ rd_mode <<= DATA_READ_MODE_S; -+ nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, -+ rd_mode | DATARD_CUSTOM_EN); -+ -+ // Set bytes to read -+ rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * -+ snf->nfi_cfg.nsectors; -+ nfi_write32(snf, SNF_MISC_CTL2, -+ (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); -+ -+ // NFI read prepare -+ nfi_write16(snf, NFI_CNFG, -+ (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | -+ CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); -+ -+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); -+ -+ buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); -+ if (dma_mapping_error(snf->dev, buf_dma)) { -+ dev_err(snf->dev, "DMA mapping failed.\n"); -+ goto cleanup; -+ } -+ nfi_write32(snf, NFI_STRADDR, buf_dma); -+ if (op->data.ecc) { -+ snf->ecc_cfg->op = ECC_DECODE; -+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); -+ if (ret) -+ goto cleanup_dma; -+ } -+ // Prepare for custom read interrupt -+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); -+ reinit_completion(&snf->op_done); -+ -+ // Trigger NFI into custom mode -+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); -+ -+ // Start DMA read -+ nfi_rmw32(snf, NFI_CON, 0, CON_BRD); -+ nfi_write16(snf, NFI_STRDATA, STR_DATA); -+ -+ if (!wait_for_completion_timeout( -+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { -+ dev_err(snf->dev, "DMA timed out for reading from cache.\n"); -+ ret = -ETIMEDOUT; -+ goto cleanup; -+ } -+ -+ // Wait for BUS_SEC_CNTR returning expected value -+ ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, -+ BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, -+ SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); -+ goto cleanup2; -+ } -+ -+ // Wait for bus becoming idle -+ ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, -+ !(val & snf->caps->mastersta_mask), 0, -+ SNFI_POLL_INTERVAL); -+ if (ret) { -+ dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); -+ goto cleanup2; -+ } -+ -+ if (op->data.ecc) { -+ ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); -+ if (ret) { -+ dev_err(snf->dev, "wait ecc done timeout\n"); -+ goto cleanup2; -+ } -+ // save status before disabling ecc -+ mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, -+ snf->nfi_cfg.nsectors); -+ } -+ -+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); -+ -+ if (snf->autofmt) { -+ mtk_snand_read_fdm(snf, buf_fdm); -+ if (snf->caps->bbm_swap) { -+ mtk_snand_bm_swap(snf, buf); -+ mtk_snand_fdm_bm_swap(snf); -+ } -+ } -+ -+ // copy data back -+ if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { -+ memset(op->data.buf.in, 0xff, op->data.nbytes); -+ snf->ecc_stats.bitflips = 0; -+ snf->ecc_stats.failed = 0; -+ snf->ecc_stats.corrected = 0; -+ } else { -+ if (buf == op->data.buf.in) { -+ u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; -+ u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; -+ -+ if (req_left) -+ memcpy(op->data.buf.in + snf->nfi_cfg.page_size, -+ buf_fdm, -+ cap_len < req_left ? cap_len : req_left); -+ } else if (rd_offset < snf->buf_len) { -+ u32 cap_len = snf->buf_len - rd_offset; -+ -+ if (op->data.nbytes < cap_len) -+ cap_len = op->data.nbytes; -+ memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); -+ } -+ } -+cleanup2: -+ if (op->data.ecc) -+ mtk_ecc_disable(snf->ecc); -+cleanup_dma: -+ // unmap dma only if any error happens. (otherwise it's done before -+ // data copying) -+ if (ret) -+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); -+cleanup: -+ // Stop read -+ nfi_write32(snf, NFI_CON, 0); -+ nfi_write16(snf, NFI_CNFG, 0); -+ -+ // Clear SNF done flag -+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); -+ nfi_write32(snf, SNF_STA_CTL1, 0); -+ -+ // Disable interrupt -+ nfi_read32(snf, NFI_INTR_STA); -+ nfi_write32(snf, NFI_INTR_EN, 0); -+ -+ nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); -+ return ret; -+} -+ -+static int mtk_snand_write_page_cache(struct mtk_snand *snf, -+ const struct spi_mem_op *op) -+{ -+ // the address part to be sent by the controller -+ u32 op_addr = op->addr.val; -+ // where to start copying data from bounce buffer -+ u32 wr_offset = 0; -+ u32 op_mode = 0; -+ int ret = 0; -+ u32 wr_mode = 0; -+ u32 dma_len = snf->buf_len; -+ u32 wr_bytes, val; -+ size_t cap_len; -+ dma_addr_t buf_dma; -+ -+ if (snf->autofmt) { -+ u32 last_bit; -+ u32 mask; -+ -+ dma_len = snf->nfi_cfg.page_size; -+ op_mode = CNFG_AUTO_FMT_EN; -+ if (op->data.ecc) -+ op_mode |= CNFG_HW_ECC_EN; -+ -+ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); -+ mask = (1 << last_bit) - 1; -+ wr_offset = op_addr & mask; -+ op_addr &= ~mask; -+ } -+ mtk_snand_mac_reset(snf); -+ mtk_nfi_reset(snf); -+ -+ if (wr_offset) -+ memset(snf->buf, 0xff, wr_offset); -+ -+ cap_len = snf->buf_len - wr_offset; -+ if (op->data.nbytes < cap_len) -+ cap_len = op->data.nbytes; -+ memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); -+ if (snf->autofmt) { -+ if (snf->caps->bbm_swap) { -+ mtk_snand_fdm_bm_swap(snf); -+ mtk_snand_bm_swap(snf, snf->buf); -+ } -+ mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); -+ } -+ -+ // Command -+ nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); -+ -+ // write address -+ nfi_write32(snf, SNF_PG_CTL2, op_addr); -+ -+ // Set read op_mode -+ if (op->data.buswidth == 4) -+ wr_mode = PG_LOAD_X4_EN; -+ -+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, -+ wr_mode | PG_LOAD_CUSTOM_EN); -+ -+ // Set bytes to write -+ wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * -+ snf->nfi_cfg.nsectors; -+ nfi_write32(snf, SNF_MISC_CTL2, -+ (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); -+ -+ // NFI write prepare -+ nfi_write16(snf, NFI_CNFG, -+ (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | -+ CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); -+ -+ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); -+ buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); -+ if (dma_mapping_error(snf->dev, buf_dma)) { -+ dev_err(snf->dev, "DMA mapping failed.\n"); -+ goto cleanup; -+ } -+ nfi_write32(snf, NFI_STRADDR, buf_dma); -+ if (op->data.ecc) { -+ snf->ecc_cfg->op = ECC_ENCODE; -+ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); -+ if (ret) -+ goto cleanup_dma; -+ } -+ // Prepare for custom write interrupt -+ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); -+ reinit_completion(&snf->op_done); -+ ; -+ -+ // Trigger NFI into custom mode -+ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); -+ -+ // Start DMA write -+ nfi_rmw32(snf, NFI_CON, 0, CON_BWR); -+ nfi_write16(snf, NFI_STRDATA, STR_DATA); -+ -+ if (!wait_for_completion_timeout( -+ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { -+ dev_err(snf->dev, "DMA timed out for program load.\n"); -+ ret = -ETIMEDOUT; -+ goto cleanup_ecc; -+ } -+ -+ // Wait for NFI_SEC_CNTR returning expected value -+ ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, -+ NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, -+ SNFI_POLL_INTERVAL); -+ if (ret) -+ dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); -+ -+cleanup_ecc: -+ if (op->data.ecc) -+ mtk_ecc_disable(snf->ecc); -+cleanup_dma: -+ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); -+cleanup: -+ // Stop write -+ nfi_write32(snf, NFI_CON, 0); -+ nfi_write16(snf, NFI_CNFG, 0); -+ -+ // Clear SNF done flag -+ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); -+ nfi_write32(snf, SNF_STA_CTL1, 0); -+ -+ // Disable interrupt -+ nfi_read32(snf, NFI_INTR_STA); -+ nfi_write32(snf, NFI_INTR_EN, 0); -+ -+ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); -+ -+ return ret; -+} -+ -+/** -+ * mtk_snand_is_page_ops() - check if the op is a controller supported page op. -+ * @op spi-mem op to check -+ * -+ * Check whether op can be executed with read_from_cache or program_load -+ * mode in the controller. -+ * This controller can execute typical Read From Cache and Program Load -+ * instructions found on SPI-NAND with 2-byte address. -+ * DTR and cmd buswidth & nbytes should be checked before calling this. -+ * -+ * Return: true if the op matches the instruction template -+ */ -+static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) -+{ -+ if (op->addr.nbytes != 2) -+ return false; -+ -+ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && -+ op->addr.buswidth != 4) -+ return false; -+ -+ // match read from page instructions -+ if (op->data.dir == SPI_MEM_DATA_IN) { -+ // check dummy cycle first -+ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > -+ DATA_READ_MAX_DUMMY) -+ return false; -+ // quad io / quad out -+ if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && -+ op->data.buswidth == 4) -+ return true; -+ -+ // dual io / dual out -+ if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && -+ op->data.buswidth == 2) -+ return true; -+ -+ // standard spi -+ if (op->addr.buswidth == 1 && op->data.buswidth == 1) -+ return true; -+ } else if (op->data.dir == SPI_MEM_DATA_OUT) { -+ // check dummy cycle first -+ if (op->dummy.nbytes) -+ return false; -+ // program load quad out -+ if (op->addr.buswidth == 1 && op->data.buswidth == 4) -+ return true; -+ // standard spi -+ if (op->addr.buswidth == 1 && op->data.buswidth == 1) -+ return true; -+ } -+ return false; -+} -+ -+static bool mtk_snand_supports_op(struct spi_mem *mem, -+ const struct spi_mem_op *op) -+{ -+ if (!spi_mem_default_supports_op(mem, op)) -+ return false; -+ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) -+ return false; -+ if (mtk_snand_is_page_ops(op)) -+ return true; -+ return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && -+ (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && -+ (op->data.nbytes == 0 || op->data.buswidth == 1)); -+} -+ -+static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) -+{ -+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); -+ // page ops transfer size must be exactly ((sector_size + spare_size) * -+ // nsectors). Limit the op size if the caller requests more than that. -+ // exec_op will read more than needed and discard the leftover if the -+ // caller requests less data. -+ if (mtk_snand_is_page_ops(op)) { -+ size_t l; -+ // skip adjust_op_size for page ops -+ if (ms->autofmt) -+ return 0; -+ l = ms->caps->sector_size + ms->nfi_cfg.spare_size; -+ l *= ms->nfi_cfg.nsectors; -+ if (op->data.nbytes > l) -+ op->data.nbytes = l; -+ } else { -+ size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; -+ -+ if (hl >= SNF_GPRAM_SIZE) -+ return -EOPNOTSUPP; -+ if (op->data.nbytes > SNF_GPRAM_SIZE - hl) -+ op->data.nbytes = SNF_GPRAM_SIZE - hl; -+ } -+ return 0; -+} -+ -+static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) -+{ -+ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); -+ -+ dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, -+ op->addr.val, op->addr.buswidth, op->addr.nbytes, -+ op->data.buswidth, op->data.nbytes); -+ if (mtk_snand_is_page_ops(op)) { -+ if (op->data.dir == SPI_MEM_DATA_IN) -+ return mtk_snand_read_page_cache(ms, op); -+ else -+ return mtk_snand_write_page_cache(ms, op); -+ } else { -+ return mtk_snand_mac_io(ms, op); -+ } -+} -+ -+static const struct spi_controller_mem_ops mtk_snand_mem_ops = { -+ .adjust_op_size = mtk_snand_adjust_op_size, -+ .supports_op = mtk_snand_supports_op, -+ .exec_op = mtk_snand_exec_op, -+}; -+ -+static const struct spi_controller_mem_caps mtk_snand_mem_caps = { -+ .ecc = true, -+}; -+ -+static irqreturn_t mtk_snand_irq(int irq, void *id) -+{ -+ struct mtk_snand *snf = id; -+ u32 sta, ien; -+ -+ sta = nfi_read32(snf, NFI_INTR_STA); -+ ien = nfi_read32(snf, NFI_INTR_EN); -+ -+ if (!(sta & ien)) -+ return IRQ_NONE; -+ -+ nfi_write32(snf, NFI_INTR_EN, 0); -+ complete(&snf->op_done); -+ return IRQ_HANDLED; -+} -+ -+static const struct of_device_id mtk_snand_ids[] = { -+ { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, -+ { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, mtk_snand_ids); -+ -+static int mtk_snand_enable_clk(struct mtk_snand *ms) -+{ -+ int ret; -+ -+ ret = clk_prepare_enable(ms->nfi_clk); -+ if (ret) { -+ dev_err(ms->dev, "unable to enable nfi clk\n"); -+ return ret; -+ } -+ ret = clk_prepare_enable(ms->pad_clk); -+ if (ret) { -+ dev_err(ms->dev, "unable to enable pad clk\n"); -+ goto err1; -+ } -+ return 0; -+err1: -+ clk_disable_unprepare(ms->nfi_clk); -+ return ret; -+} -+ -+static void mtk_snand_disable_clk(struct mtk_snand *ms) -+{ -+ clk_disable_unprepare(ms->pad_clk); -+ clk_disable_unprepare(ms->nfi_clk); -+} -+ -+static int mtk_snand_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ const struct of_device_id *dev_id; -+ struct spi_controller *ctlr; -+ struct mtk_snand *ms; -+ int ret; -+ -+ dev_id = of_match_node(mtk_snand_ids, np); -+ if (!dev_id) -+ return -EINVAL; -+ -+ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); -+ if (!ctlr) -+ return -ENOMEM; -+ platform_set_drvdata(pdev, ctlr); -+ -+ ms = spi_controller_get_devdata(ctlr); -+ -+ ms->ctlr = ctlr; -+ ms->caps = dev_id->data; -+ -+ ms->ecc = of_mtk_ecc_get(np); -+ if (IS_ERR(ms->ecc)) -+ return PTR_ERR(ms->ecc); -+ else if (!ms->ecc) -+ return -ENODEV; -+ -+ ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); -+ if (IS_ERR(ms->nfi_base)) { -+ ret = PTR_ERR(ms->nfi_base); -+ goto release_ecc; -+ } -+ -+ ms->dev = &pdev->dev; -+ -+ ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); -+ if (IS_ERR(ms->nfi_clk)) { -+ ret = PTR_ERR(ms->nfi_clk); -+ dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); -+ goto release_ecc; -+ } -+ -+ ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); -+ if (IS_ERR(ms->pad_clk)) { -+ ret = PTR_ERR(ms->pad_clk); -+ dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); -+ goto release_ecc; -+ } -+ -+ ret = mtk_snand_enable_clk(ms); -+ if (ret) -+ goto release_ecc; -+ -+ init_completion(&ms->op_done); -+ -+ ms->irq = platform_get_irq(pdev, 0); -+ if (ms->irq < 0) { -+ ret = ms->irq; -+ goto disable_clk; -+ } -+ ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, -+ "mtk-snand", ms); -+ if (ret) { -+ dev_err(ms->dev, "failed to request snfi irq\n"); -+ goto disable_clk; -+ } -+ -+ ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); -+ if (ret) { -+ dev_err(ms->dev, "failed to set dma mask\n"); -+ goto disable_clk; -+ } -+ -+ // switch to SNFI mode -+ nfi_write32(ms, SNF_CFG, SPI_MODE); -+ -+ // setup an initial page format for ops matching page_cache_op template -+ // before ECC is called. -+ ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size, -+ ms->caps->spare_sizes[0]); -+ if (ret) { -+ dev_err(ms->dev, "failed to set initial page format\n"); -+ goto disable_clk; -+ } -+ -+ // setup ECC engine -+ ms->ecc_eng.dev = &pdev->dev; -+ ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; -+ ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; -+ ms->ecc_eng.priv = ms; -+ -+ ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); -+ if (ret) { -+ dev_err(&pdev->dev, "failed to register ecc engine.\n"); -+ goto disable_clk; -+ } -+ -+ ctlr->num_chipselect = 1; -+ ctlr->mem_ops = &mtk_snand_mem_ops; -+ ctlr->mem_caps = &mtk_snand_mem_caps; -+ ctlr->bits_per_word_mask = SPI_BPW_MASK(8); -+ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; -+ ctlr->dev.of_node = pdev->dev.of_node; -+ ret = spi_register_controller(ctlr); -+ if (ret) { -+ dev_err(&pdev->dev, "spi_register_controller failed.\n"); -+ goto disable_clk; -+ } -+ -+ return 0; -+disable_clk: -+ mtk_snand_disable_clk(ms); -+release_ecc: -+ mtk_ecc_release(ms->ecc); -+ return ret; -+} -+ -+static int mtk_snand_remove(struct platform_device *pdev) -+{ -+ struct spi_controller *ctlr = platform_get_drvdata(pdev); -+ struct mtk_snand *ms = spi_controller_get_devdata(ctlr); -+ -+ spi_unregister_controller(ctlr); -+ mtk_snand_disable_clk(ms); -+ mtk_ecc_release(ms->ecc); -+ kfree(ms->buf); -+ return 0; -+} -+ -+static struct platform_driver mtk_snand_driver = { -+ .probe = mtk_snand_probe, -+ .remove = mtk_snand_remove, -+ .driver = { -+ .name = "mtk-snand", -+ .of_match_table = mtk_snand_ids, -+ }, -+}; -+ -+module_platform_driver(mtk_snand_driver); -+ -+MODULE_LICENSE("GPL"); -+MODULE_AUTHOR("Chuanhong Guo "); -+MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver"); diff --git a/target/linux/mediatek/patches-5.15/120-12-v5.19-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch b/target/linux/mediatek/patches-5.15/120-12-v5.19-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch new file mode 100644 index 0000000000..b77b4ad4c4 --- /dev/null +++ b/target/linux/mediatek/patches-5.15/120-12-v5.19-spi-add-driver-for-MTK-SPI-NAND-Flash-Interface.patch @@ -0,0 +1,1537 @@ +From 8170bafa8936e9fbfdce992932a63bd20eca3bc3 Mon Sep 17 00:00:00 2001 +From: Chuanhong Guo +Date: Sat, 2 Apr 2022 10:16:11 +0800 +Subject: [PATCH v6 2/5] spi: add driver for MTK SPI NAND Flash Interface + +This driver implements support for the SPI-NAND mode of MTK NAND Flash +Interface as a SPI-MEM controller with pipelined ECC capability. + +Signed-off-by: Chuanhong Guo +Tested-by: Daniel Golle +--- +Change since v1: + fix CI warnings + +Changes since v2: + use streamed DMA api to avoid an extra memory copy during read + make ECC engine config a per-nand context + take user-requested ECC strength into account + +Change since v3: none +Changes since v4: + fix missing OOB write + print page format with dev_dbg + replace uint*_t copied from vendor driver with u* + +Changes since v5: + add missing nfi mode register configuration in probe + fix an off-by-one bug in mtk_snand_mac_io + + drivers/spi/Kconfig | 10 + + drivers/spi/Makefile | 1 + + drivers/spi/spi-mtk-snfi.c | 1470 ++++++++++++++++++++++++++++++++++++ + 3 files changed, 1481 insertions(+) + create mode 100644 drivers/spi/spi-mtk-snfi.c + +--- a/drivers/spi/Kconfig ++++ b/drivers/spi/Kconfig +@@ -530,6 +530,16 @@ config SPI_MTK_NOR + SPI interface as well as several SPI NOR specific instructions + via SPI MEM interface. + ++config SPI_MTK_SNFI ++ tristate "MediaTek SPI NAND Flash Interface" ++ depends on ARCH_MEDIATEK || COMPILE_TEST ++ depends on MTD_NAND_ECC_MEDIATEK ++ help ++ This enables support for SPI-NAND mode on the MediaTek NAND ++ Flash Interface found on MediaTek ARM SoCs. This controller ++ is implemented as a SPI-MEM controller with pipelined ECC ++ capcability. ++ + config SPI_NPCM_FIU + tristate "Nuvoton NPCM FLASH Interface Unit" + depends on ARCH_NPCM || COMPILE_TEST +--- a/drivers/spi/Makefile ++++ b/drivers/spi/Makefile +@@ -71,6 +71,7 @@ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52x + obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o + obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o + obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o ++obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o + obj-$(CONFIG_SPI_MXIC) += spi-mxic.o + obj-$(CONFIG_SPI_MXS) += spi-mxs.o + obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o +--- /dev/null ++++ b/drivers/spi/spi-mtk-snfi.c +@@ -0,0 +1,1470 @@ ++// SPDX-License-Identifier: GPL-2.0 ++// ++// Driver for the SPI-NAND mode of Mediatek NAND Flash Interface ++// ++// Copyright (c) 2022 Chuanhong Guo ++// ++// This driver is based on the SPI-NAND mtd driver from Mediatek SDK: ++// ++// Copyright (C) 2020 MediaTek Inc. ++// Author: Weijie Gao ++// ++// This controller organize the page data as several interleaved sectors ++// like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size) ++// +---------+------+------+---------+------+------+-----+ ++// | Sector1 | FDM1 | ECC1 | Sector2 | FDM2 | ECC2 | ... | ++// +---------+------+------+---------+------+------+-----+ ++// With auto-format turned on, DMA only returns this part: ++// +---------+---------+-----+ ++// | Sector1 | Sector2 | ... | ++// +---------+---------+-----+ ++// The FDM data will be filled to the registers, and ECC parity data isn't ++// accessible. ++// With auto-format off, all ((Sector+FDM+ECC)*nsectors) will be read over DMA ++// in it's original order shown in the first table. ECC can't be turned on when ++// auto-format is off. ++// ++// However, Linux SPI-NAND driver expects the data returned as: ++// +------+-----+ ++// | Page | OOB | ++// +------+-----+ ++// where the page data is continuously stored instead of interleaved. ++// So we assume all instructions matching the page_op template between ECC ++// prepare_io_req and finish_io_req are for page cache r/w. ++// Here's how this spi-mem driver operates when reading: ++// 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off). ++// 2. Perform page ops and let the controller fill the DMA bounce buffer with ++// de-interleaved sector data and set FDM registers. ++// 3. Return the data as: ++// +---------+---------+-----+------+------+-----+ ++// | Sector1 | Sector2 | ... | FDM1 | FDM2 | ... | ++// +---------+---------+-----+------+------+-----+ ++// 4. For other matching spi_mem ops outside a prepare/finish_io_req pair, ++// read the data with auto-format off into the bounce buffer and copy ++// needed data to the buffer specified in the request. ++// ++// Write requests operates in a similar manner. ++// As a limitation of this strategy, we won't be able to access any ECC parity ++// data at all in Linux. ++// ++// Here's the bad block mark situation on MTK chips: ++// In older chips like mt7622, MTK uses the first FDM byte in the first sector ++// as the bad block mark. After de-interleaving, this byte appears at [pagesize] ++// in the returned data, which is the BBM position expected by kernel. However, ++// the conventional bad block mark is the first byte of the OOB, which is part ++// of the last sector data in the interleaved layout. Instead of fixing their ++// hardware, MTK decided to address this inconsistency in software. On these ++// later chips, the BootROM expects the following: ++// 1. The [pagesize] byte on a nand page is used as BBM, which will appear at ++// (page_size - (nsectors - 1) * spare_size) in the DMA buffer. ++// 2. The original byte stored at that position in the DMA buffer will be stored ++// as the first byte of the FDM section in the last sector. ++// We can't disagree with the BootROM, so after de-interleaving, we need to ++// perform the following swaps in read: ++// 1. Store the BBM at [page_size - (nsectors - 1) * spare_size] to [page_size], ++// which is the expected BBM position by kernel. ++// 2. Store the page data byte at [pagesize + (nsectors-1) * fdm] back to ++// [page_size - (nsectors - 1) * spare_size] ++// Similarly, when writing, we need to perform swaps in the other direction. ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++// NFI registers ++#define NFI_CNFG 0x000 ++#define CNFG_OP_MODE_S 12 ++#define CNFG_OP_MODE_CUST 6 ++#define CNFG_OP_MODE_PROGRAM 3 ++#define CNFG_AUTO_FMT_EN BIT(9) ++#define CNFG_HW_ECC_EN BIT(8) ++#define CNFG_DMA_BURST_EN BIT(2) ++#define CNFG_READ_MODE BIT(1) ++#define CNFG_DMA_MODE BIT(0) ++ ++#define NFI_PAGEFMT 0x0004 ++#define NFI_SPARE_SIZE_LS_S 16 ++#define NFI_FDM_ECC_NUM_S 12 ++#define NFI_FDM_NUM_S 8 ++#define NFI_SPARE_SIZE_S 4 ++#define NFI_SEC_SEL_512 BIT(2) ++#define NFI_PAGE_SIZE_S 0 ++#define NFI_PAGE_SIZE_512_2K 0 ++#define NFI_PAGE_SIZE_2K_4K 1 ++#define NFI_PAGE_SIZE_4K_8K 2 ++#define NFI_PAGE_SIZE_8K_16K 3 ++ ++#define NFI_CON 0x008 ++#define CON_SEC_NUM_S 12 ++#define CON_BWR BIT(9) ++#define CON_BRD BIT(8) ++#define CON_NFI_RST BIT(1) ++#define CON_FIFO_FLUSH BIT(0) ++ ++#define NFI_INTR_EN 0x010 ++#define NFI_INTR_STA 0x014 ++#define NFI_IRQ_INTR_EN BIT(31) ++#define NFI_IRQ_CUS_READ BIT(8) ++#define NFI_IRQ_CUS_PG BIT(7) ++ ++#define NFI_CMD 0x020 ++#define NFI_CMD_DUMMY_READ 0x00 ++#define NFI_CMD_DUMMY_WRITE 0x80 ++ ++#define NFI_STRDATA 0x040 ++#define STR_DATA BIT(0) ++ ++#define NFI_STA 0x060 ++#define NFI_NAND_FSM GENMASK(28, 24) ++#define NFI_FSM GENMASK(19, 16) ++#define READ_EMPTY BIT(12) ++ ++#define NFI_FIFOSTA 0x064 ++#define FIFO_WR_REMAIN_S 8 ++#define FIFO_RD_REMAIN_S 0 ++ ++#define NFI_ADDRCNTR 0x070 ++#define SEC_CNTR GENMASK(16, 12) ++#define SEC_CNTR_S 12 ++#define NFI_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) ++ ++#define NFI_STRADDR 0x080 ++ ++#define NFI_BYTELEN 0x084 ++#define BUS_SEC_CNTR(val) (((val)&SEC_CNTR) >> SEC_CNTR_S) ++ ++#define NFI_FDM0L 0x0a0 ++#define NFI_FDM0M 0x0a4 ++#define NFI_FDML(n) (NFI_FDM0L + (n)*8) ++#define NFI_FDMM(n) (NFI_FDM0M + (n)*8) ++ ++#define NFI_DEBUG_CON1 0x220 ++#define WBUF_EN BIT(2) ++ ++#define NFI_MASTERSTA 0x224 ++#define MAS_ADDR GENMASK(11, 9) ++#define MAS_RD GENMASK(8, 6) ++#define MAS_WR GENMASK(5, 3) ++#define MAS_RDDLY GENMASK(2, 0) ++#define NFI_MASTERSTA_MASK_7622 (MAS_ADDR | MAS_RD | MAS_WR | MAS_RDDLY) ++ ++// SNFI registers ++#define SNF_MAC_CTL 0x500 ++#define MAC_XIO_SEL BIT(4) ++#define SF_MAC_EN BIT(3) ++#define SF_TRIG BIT(2) ++#define WIP_READY BIT(1) ++#define WIP BIT(0) ++ ++#define SNF_MAC_OUTL 0x504 ++#define SNF_MAC_INL 0x508 ++ ++#define SNF_RD_CTL2 0x510 ++#define DATA_READ_DUMMY_S 8 ++#define DATA_READ_MAX_DUMMY 0xf ++#define DATA_READ_CMD_S 0 ++ ++#define SNF_RD_CTL3 0x514 ++ ++#define SNF_PG_CTL1 0x524 ++#define PG_LOAD_CMD_S 8 ++ ++#define SNF_PG_CTL2 0x528 ++ ++#define SNF_MISC_CTL 0x538 ++#define SW_RST BIT(28) ++#define FIFO_RD_LTC_S 25 ++#define PG_LOAD_X4_EN BIT(20) ++#define DATA_READ_MODE_S 16 ++#define DATA_READ_MODE GENMASK(18, 16) ++#define DATA_READ_MODE_X1 0 ++#define DATA_READ_MODE_X2 1 ++#define DATA_READ_MODE_X4 2 ++#define DATA_READ_MODE_DUAL 5 ++#define DATA_READ_MODE_QUAD 6 ++#define PG_LOAD_CUSTOM_EN BIT(7) ++#define DATARD_CUSTOM_EN BIT(6) ++#define CS_DESELECT_CYC_S 0 ++ ++#define SNF_MISC_CTL2 0x53c ++#define PROGRAM_LOAD_BYTE_NUM_S 16 ++#define READ_DATA_BYTE_NUM_S 11 ++ ++#define SNF_DLY_CTL3 0x548 ++#define SFCK_SAM_DLY_S 0 ++ ++#define SNF_STA_CTL1 0x550 ++#define CUS_PG_DONE BIT(28) ++#define CUS_READ_DONE BIT(27) ++#define SPI_STATE_S 0 ++#define SPI_STATE GENMASK(3, 0) ++ ++#define SNF_CFG 0x55c ++#define SPI_MODE BIT(0) ++ ++#define SNF_GPRAM 0x800 ++#define SNF_GPRAM_SIZE 0xa0 ++ ++#define SNFI_POLL_INTERVAL 1000000 ++ ++static const u8 mt7622_spare_sizes[] = { 16, 26, 27, 28 }; ++ ++struct mtk_snand_caps { ++ u16 sector_size; ++ u16 max_sectors; ++ u16 fdm_size; ++ u16 fdm_ecc_size; ++ u16 fifo_size; ++ ++ bool bbm_swap; ++ bool empty_page_check; ++ u32 mastersta_mask; ++ ++ const u8 *spare_sizes; ++ u32 num_spare_size; ++}; ++ ++static const struct mtk_snand_caps mt7622_snand_caps = { ++ .sector_size = 512, ++ .max_sectors = 8, ++ .fdm_size = 8, ++ .fdm_ecc_size = 1, ++ .fifo_size = 32, ++ .bbm_swap = false, ++ .empty_page_check = false, ++ .mastersta_mask = NFI_MASTERSTA_MASK_7622, ++ .spare_sizes = mt7622_spare_sizes, ++ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) ++}; ++ ++static const struct mtk_snand_caps mt7629_snand_caps = { ++ .sector_size = 512, ++ .max_sectors = 8, ++ .fdm_size = 8, ++ .fdm_ecc_size = 1, ++ .fifo_size = 32, ++ .bbm_swap = true, ++ .empty_page_check = false, ++ .mastersta_mask = NFI_MASTERSTA_MASK_7622, ++ .spare_sizes = mt7622_spare_sizes, ++ .num_spare_size = ARRAY_SIZE(mt7622_spare_sizes) ++}; ++ ++struct mtk_snand_conf { ++ size_t page_size; ++ size_t oob_size; ++ u8 nsectors; ++ u8 spare_size; ++}; ++ ++struct mtk_snand { ++ struct spi_controller *ctlr; ++ struct device *dev; ++ struct clk *nfi_clk; ++ struct clk *pad_clk; ++ void __iomem *nfi_base; ++ int irq; ++ struct completion op_done; ++ const struct mtk_snand_caps *caps; ++ struct mtk_ecc_config *ecc_cfg; ++ struct mtk_ecc *ecc; ++ struct mtk_snand_conf nfi_cfg; ++ struct mtk_ecc_stats ecc_stats; ++ struct nand_ecc_engine ecc_eng; ++ bool autofmt; ++ u8 *buf; ++ size_t buf_len; ++}; ++ ++static struct mtk_snand *nand_to_mtk_snand(struct nand_device *nand) ++{ ++ struct nand_ecc_engine *eng = nand->ecc.engine; ++ ++ return container_of(eng, struct mtk_snand, ecc_eng); ++} ++ ++static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size) ++{ ++ if (snf->buf_len >= size) ++ return 0; ++ kfree(snf->buf); ++ snf->buf = kmalloc(size, GFP_KERNEL); ++ if (!snf->buf) ++ return -ENOMEM; ++ snf->buf_len = size; ++ memset(snf->buf, 0xff, snf->buf_len); ++ return 0; ++} ++ ++static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg) ++{ ++ return readl(snf->nfi_base + reg); ++} ++ ++static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val) ++{ ++ writel(val, snf->nfi_base + reg); ++} ++ ++static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val) ++{ ++ writew(val, snf->nfi_base + reg); ++} ++ ++static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set) ++{ ++ u32 val; ++ ++ val = readl(snf->nfi_base + reg); ++ val &= ~clr; ++ val |= set; ++ writel(val, snf->nfi_base + reg); ++} ++ ++static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len) ++{ ++ u32 i, val = 0, es = sizeof(u32); ++ ++ for (i = reg; i < reg + len; i++) { ++ if (i == reg || i % es == 0) ++ val = nfi_read32(snf, i & ~(es - 1)); ++ ++ *data++ = (u8)(val >> (8 * (i % es))); ++ } ++} ++ ++static int mtk_nfi_reset(struct mtk_snand *snf) ++{ ++ u32 val, fifo_mask; ++ int ret; ++ ++ nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST); ++ ++ ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, ++ !(val & snf->caps->mastersta_mask), 0, ++ SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "NFI master is still busy after reset\n"); ++ return ret; ++ } ++ ++ ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val, ++ !(val & (NFI_FSM | NFI_NAND_FSM)), 0, ++ SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "Failed to reset NFI\n"); ++ return ret; ++ } ++ ++ fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) | ++ ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S); ++ ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val, ++ !(val & fifo_mask), 0, SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "NFI FIFOs are not empty\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int mtk_snand_mac_reset(struct mtk_snand *snf) ++{ ++ int ret; ++ u32 val; ++ ++ nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST); ++ ++ ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val, ++ !(val & SPI_STATE), 0, SNFI_POLL_INTERVAL); ++ if (ret) ++ dev_err(snf->dev, "Failed to reset SNFI MAC\n"); ++ ++ nfi_write32(snf, SNF_MISC_CTL, ++ (2 << FIFO_RD_LTC_S) | (10 << CS_DESELECT_CYC_S)); ++ ++ return ret; ++} ++ ++static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen) ++{ ++ int ret; ++ u32 val; ++ ++ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN); ++ nfi_write32(snf, SNF_MAC_OUTL, outlen); ++ nfi_write32(snf, SNF_MAC_INL, inlen); ++ ++ nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG); ++ ++ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, ++ val & WIP_READY, 0, SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "Timed out waiting for WIP_READY\n"); ++ goto cleanup; ++ } ++ ++ ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP), ++ 0, SNFI_POLL_INTERVAL); ++ if (ret) ++ dev_err(snf->dev, "Timed out waiting for WIP cleared\n"); ++ ++cleanup: ++ nfi_write32(snf, SNF_MAC_CTL, 0); ++ ++ return ret; ++} ++ ++static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op) ++{ ++ u32 rx_len = 0; ++ u32 reg_offs = 0; ++ u32 val = 0; ++ const u8 *tx_buf = NULL; ++ u8 *rx_buf = NULL; ++ int i, ret; ++ u8 b; ++ ++ if (op->data.dir == SPI_MEM_DATA_IN) { ++ rx_len = op->data.nbytes; ++ rx_buf = op->data.buf.in; ++ } else { ++ tx_buf = op->data.buf.out; ++ } ++ ++ mtk_snand_mac_reset(snf); ++ ++ for (i = 0; i < op->cmd.nbytes; i++, reg_offs++) { ++ b = (op->cmd.opcode >> ((op->cmd.nbytes - i - 1) * 8)) & 0xff; ++ val |= b << (8 * (reg_offs % 4)); ++ if (reg_offs % 4 == 3) { ++ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); ++ val = 0; ++ } ++ } ++ ++ for (i = 0; i < op->addr.nbytes; i++, reg_offs++) { ++ b = (op->addr.val >> ((op->addr.nbytes - i - 1) * 8)) & 0xff; ++ val |= b << (8 * (reg_offs % 4)); ++ if (reg_offs % 4 == 3) { ++ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); ++ val = 0; ++ } ++ } ++ ++ for (i = 0; i < op->dummy.nbytes; i++, reg_offs++) { ++ if (reg_offs % 4 == 3) { ++ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); ++ val = 0; ++ } ++ } ++ ++ if (op->data.dir == SPI_MEM_DATA_OUT) { ++ for (i = 0; i < op->data.nbytes; i++, reg_offs++) { ++ val |= tx_buf[i] << (8 * (reg_offs % 4)); ++ if (reg_offs % 4 == 3) { ++ nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val); ++ val = 0; ++ } ++ } ++ } ++ ++ if (reg_offs % 4) ++ nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val); ++ ++ for (i = 0; i < reg_offs; i += 4) ++ dev_dbg(snf->dev, "%d: %08X", i, ++ nfi_read32(snf, SNF_GPRAM + i)); ++ ++ dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len); ++ ++ ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len); ++ if (ret) ++ return ret; ++ ++ if (!rx_len) ++ return 0; ++ ++ nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len); ++ return 0; ++} ++ ++static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size, ++ u32 oob_size) ++{ ++ int spare_idx = -1; ++ u32 spare_size, spare_size_shift, pagesize_idx; ++ u32 sector_size_512; ++ u8 nsectors; ++ int i; ++ ++ // skip if it's already configured as required. ++ if (snf->nfi_cfg.page_size == page_size && ++ snf->nfi_cfg.oob_size == oob_size) ++ return 0; ++ ++ nsectors = page_size / snf->caps->sector_size; ++ if (nsectors > snf->caps->max_sectors) { ++ dev_err(snf->dev, "too many sectors required.\n"); ++ goto err; ++ } ++ ++ if (snf->caps->sector_size == 512) { ++ sector_size_512 = NFI_SEC_SEL_512; ++ spare_size_shift = NFI_SPARE_SIZE_S; ++ } else { ++ sector_size_512 = 0; ++ spare_size_shift = NFI_SPARE_SIZE_LS_S; ++ } ++ ++ switch (page_size) { ++ case SZ_512: ++ pagesize_idx = NFI_PAGE_SIZE_512_2K; ++ break; ++ case SZ_2K: ++ if (snf->caps->sector_size == 512) ++ pagesize_idx = NFI_PAGE_SIZE_2K_4K; ++ else ++ pagesize_idx = NFI_PAGE_SIZE_512_2K; ++ break; ++ case SZ_4K: ++ if (snf->caps->sector_size == 512) ++ pagesize_idx = NFI_PAGE_SIZE_4K_8K; ++ else ++ pagesize_idx = NFI_PAGE_SIZE_2K_4K; ++ break; ++ case SZ_8K: ++ if (snf->caps->sector_size == 512) ++ pagesize_idx = NFI_PAGE_SIZE_8K_16K; ++ else ++ pagesize_idx = NFI_PAGE_SIZE_4K_8K; ++ break; ++ case SZ_16K: ++ pagesize_idx = NFI_PAGE_SIZE_8K_16K; ++ break; ++ default: ++ dev_err(snf->dev, "unsupported page size.\n"); ++ goto err; ++ } ++ ++ spare_size = oob_size / nsectors; ++ // If we're using the 1KB sector size, HW will automatically double the ++ // spare size. We should only use half of the value in this case. ++ if (snf->caps->sector_size == 1024) ++ spare_size /= 2; ++ ++ for (i = snf->caps->num_spare_size - 1; i >= 0; i--) { ++ if (snf->caps->spare_sizes[i] <= spare_size) { ++ spare_size = snf->caps->spare_sizes[i]; ++ if (snf->caps->sector_size == 1024) ++ spare_size *= 2; ++ spare_idx = i; ++ break; ++ } ++ } ++ ++ if (spare_idx < 0) { ++ dev_err(snf->dev, "unsupported spare size: %u\n", spare_size); ++ goto err; ++ } ++ ++ nfi_write32(snf, NFI_PAGEFMT, ++ (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) | ++ (snf->caps->fdm_size << NFI_FDM_NUM_S) | ++ (spare_idx << spare_size_shift) | ++ (pagesize_idx << NFI_PAGE_SIZE_S) | ++ sector_size_512); ++ ++ snf->nfi_cfg.page_size = page_size; ++ snf->nfi_cfg.oob_size = oob_size; ++ snf->nfi_cfg.nsectors = nsectors; ++ snf->nfi_cfg.spare_size = spare_size; ++ ++ dev_dbg(snf->dev, "page format: (%u + %u) * %u\n", ++ snf->caps->sector_size, spare_size, nsectors); ++ return snand_prepare_bouncebuf(snf, page_size + oob_size); ++err: ++ dev_err(snf->dev, "page size %u + %u is not supported\n", page_size, ++ oob_size); ++ return -EOPNOTSUPP; ++} ++ ++static int mtk_snand_ooblayout_ecc(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobecc) ++{ ++ // ECC area is not accessible ++ return -ERANGE; ++} ++ ++static int mtk_snand_ooblayout_free(struct mtd_info *mtd, int section, ++ struct mtd_oob_region *oobfree) ++{ ++ struct nand_device *nand = mtd_to_nanddev(mtd); ++ struct mtk_snand *ms = nand_to_mtk_snand(nand); ++ ++ if (section >= ms->nfi_cfg.nsectors) ++ return -ERANGE; ++ ++ oobfree->length = ms->caps->fdm_size - 1; ++ oobfree->offset = section * ms->caps->fdm_size + 1; ++ return 0; ++} ++ ++static const struct mtd_ooblayout_ops mtk_snand_ooblayout = { ++ .ecc = mtk_snand_ooblayout_ecc, ++ .free = mtk_snand_ooblayout_free, ++}; ++ ++static int mtk_snand_ecc_init_ctx(struct nand_device *nand) ++{ ++ struct mtk_snand *snf = nand_to_mtk_snand(nand); ++ struct nand_ecc_props *conf = &nand->ecc.ctx.conf; ++ struct nand_ecc_props *reqs = &nand->ecc.requirements; ++ struct nand_ecc_props *user = &nand->ecc.user_conf; ++ struct mtd_info *mtd = nanddev_to_mtd(nand); ++ int step_size = 0, strength = 0, desired_correction = 0, steps; ++ bool ecc_user = false; ++ int ret; ++ u32 parity_bits, max_ecc_bytes; ++ struct mtk_ecc_config *ecc_cfg; ++ ++ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, ++ nand->memorg.oobsize); ++ if (ret) ++ return ret; ++ ++ ecc_cfg = kzalloc(sizeof(*ecc_cfg), GFP_KERNEL); ++ if (!ecc_cfg) ++ return -ENOMEM; ++ ++ nand->ecc.ctx.priv = ecc_cfg; ++ ++ if (user->step_size && user->strength) { ++ step_size = user->step_size; ++ strength = user->strength; ++ ecc_user = true; ++ } else if (reqs->step_size && reqs->strength) { ++ step_size = reqs->step_size; ++ strength = reqs->strength; ++ } ++ ++ if (step_size && strength) { ++ steps = mtd->writesize / step_size; ++ desired_correction = steps * strength; ++ strength = desired_correction / snf->nfi_cfg.nsectors; ++ } ++ ++ ecc_cfg->mode = ECC_NFI_MODE; ++ ecc_cfg->sectors = snf->nfi_cfg.nsectors; ++ ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size; ++ ++ // calculate the max possible strength under current page format ++ parity_bits = mtk_ecc_get_parity_bits(snf->ecc); ++ max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size; ++ ecc_cfg->strength = max_ecc_bytes * 8 / parity_bits; ++ mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength); ++ ++ // if there's a user requested strength, find the minimum strength that ++ // meets the requirement. Otherwise use the maximum strength which is ++ // expected by BootROM. ++ if (ecc_user && strength) { ++ u32 s_next = ecc_cfg->strength - 1; ++ ++ while (1) { ++ mtk_ecc_adjust_strength(snf->ecc, &s_next); ++ if (s_next >= ecc_cfg->strength) ++ break; ++ if (s_next < strength) ++ break; ++ s_next = ecc_cfg->strength - 1; ++ } ++ } ++ ++ mtd_set_ooblayout(mtd, &mtk_snand_ooblayout); ++ ++ conf->step_size = snf->caps->sector_size; ++ conf->strength = ecc_cfg->strength; ++ ++ if (ecc_cfg->strength < strength) ++ dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n", ++ strength); ++ dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n", ++ ecc_cfg->strength, snf->caps->sector_size); ++ ++ return 0; ++} ++ ++static void mtk_snand_ecc_cleanup_ctx(struct nand_device *nand) ++{ ++ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); ++ ++ kfree(ecc_cfg); ++} ++ ++static int mtk_snand_ecc_prepare_io_req(struct nand_device *nand, ++ struct nand_page_io_req *req) ++{ ++ struct mtk_snand *snf = nand_to_mtk_snand(nand); ++ struct mtk_ecc_config *ecc_cfg = nand_to_ecc_ctx(nand); ++ int ret; ++ ++ ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize, ++ nand->memorg.oobsize); ++ if (ret) ++ return ret; ++ snf->autofmt = true; ++ snf->ecc_cfg = ecc_cfg; ++ return 0; ++} ++ ++static int mtk_snand_ecc_finish_io_req(struct nand_device *nand, ++ struct nand_page_io_req *req) ++{ ++ struct mtk_snand *snf = nand_to_mtk_snand(nand); ++ struct mtd_info *mtd = nanddev_to_mtd(nand); ++ ++ snf->ecc_cfg = NULL; ++ snf->autofmt = false; ++ if ((req->mode == MTD_OPS_RAW) || (req->type != NAND_PAGE_READ)) ++ return 0; ++ ++ if (snf->ecc_stats.failed) ++ mtd->ecc_stats.failed += snf->ecc_stats.failed; ++ mtd->ecc_stats.corrected += snf->ecc_stats.corrected; ++ return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips; ++} ++ ++static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_ops = { ++ .init_ctx = mtk_snand_ecc_init_ctx, ++ .cleanup_ctx = mtk_snand_ecc_cleanup_ctx, ++ .prepare_io_req = mtk_snand_ecc_prepare_io_req, ++ .finish_io_req = mtk_snand_ecc_finish_io_req, ++}; ++ ++static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf) ++{ ++ u32 vall, valm; ++ u8 *oobptr = buf; ++ int i, j; ++ ++ for (i = 0; i < snf->nfi_cfg.nsectors; i++) { ++ vall = nfi_read32(snf, NFI_FDML(i)); ++ valm = nfi_read32(snf, NFI_FDMM(i)); ++ ++ for (j = 0; j < snf->caps->fdm_size; j++) ++ oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8); ++ ++ oobptr += snf->caps->fdm_size; ++ } ++} ++ ++static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf) ++{ ++ u32 fdm_size = snf->caps->fdm_size; ++ const u8 *oobptr = buf; ++ u32 vall, valm; ++ int i, j; ++ ++ for (i = 0; i < snf->nfi_cfg.nsectors; i++) { ++ vall = 0; ++ valm = 0; ++ ++ for (j = 0; j < 8; j++) { ++ if (j < 4) ++ vall |= (j < fdm_size ? oobptr[j] : 0xff) ++ << (j * 8); ++ else ++ valm |= (j < fdm_size ? oobptr[j] : 0xff) ++ << ((j - 4) * 8); ++ } ++ ++ nfi_write32(snf, NFI_FDML(i), vall); ++ nfi_write32(snf, NFI_FDMM(i), valm); ++ ++ oobptr += fdm_size; ++ } ++} ++ ++static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf) ++{ ++ u32 buf_bbm_pos, fdm_bbm_pos; ++ ++ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) ++ return; ++ ++ // swap [pagesize] byte on nand with the first fdm byte ++ // in the last sector. ++ buf_bbm_pos = snf->nfi_cfg.page_size - ++ (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size; ++ fdm_bbm_pos = snf->nfi_cfg.page_size + ++ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; ++ ++ swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]); ++} ++ ++static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf) ++{ ++ u32 fdm_bbm_pos1, fdm_bbm_pos2; ++ ++ if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1) ++ return; ++ ++ // swap the first fdm byte in the first and the last sector. ++ fdm_bbm_pos1 = snf->nfi_cfg.page_size; ++ fdm_bbm_pos2 = snf->nfi_cfg.page_size + ++ (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size; ++ swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]); ++} ++ ++static int mtk_snand_read_page_cache(struct mtk_snand *snf, ++ const struct spi_mem_op *op) ++{ ++ u8 *buf = snf->buf; ++ u8 *buf_fdm = buf + snf->nfi_cfg.page_size; ++ // the address part to be sent by the controller ++ u32 op_addr = op->addr.val; ++ // where to start copying data from bounce buffer ++ u32 rd_offset = 0; ++ u32 dummy_clk = (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth); ++ u32 op_mode = 0; ++ u32 dma_len = snf->buf_len; ++ int ret = 0; ++ u32 rd_mode, rd_bytes, val; ++ dma_addr_t buf_dma; ++ ++ if (snf->autofmt) { ++ u32 last_bit; ++ u32 mask; ++ ++ dma_len = snf->nfi_cfg.page_size; ++ op_mode = CNFG_AUTO_FMT_EN; ++ if (op->data.ecc) ++ op_mode |= CNFG_HW_ECC_EN; ++ // extract the plane bit: ++ // Find the highest bit set in (pagesize+oobsize). ++ // Bits higher than that in op->addr are kept and sent over SPI ++ // Lower bits are used as an offset for copying data from DMA ++ // bounce buffer. ++ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); ++ mask = (1 << last_bit) - 1; ++ rd_offset = op_addr & mask; ++ op_addr &= ~mask; ++ ++ // check if we can dma to the caller memory ++ if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size) ++ buf = op->data.buf.in; ++ } ++ mtk_snand_mac_reset(snf); ++ mtk_nfi_reset(snf); ++ ++ // command and dummy cycles ++ nfi_write32(snf, SNF_RD_CTL2, ++ (dummy_clk << DATA_READ_DUMMY_S) | ++ (op->cmd.opcode << DATA_READ_CMD_S)); ++ ++ // read address ++ nfi_write32(snf, SNF_RD_CTL3, op_addr); ++ ++ // Set read op_mode ++ if (op->data.buswidth == 4) ++ rd_mode = op->addr.buswidth == 4 ? DATA_READ_MODE_QUAD : ++ DATA_READ_MODE_X4; ++ else if (op->data.buswidth == 2) ++ rd_mode = op->addr.buswidth == 2 ? DATA_READ_MODE_DUAL : ++ DATA_READ_MODE_X2; ++ else ++ rd_mode = DATA_READ_MODE_X1; ++ rd_mode <<= DATA_READ_MODE_S; ++ nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE, ++ rd_mode | DATARD_CUSTOM_EN); ++ ++ // Set bytes to read ++ rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * ++ snf->nfi_cfg.nsectors; ++ nfi_write32(snf, SNF_MISC_CTL2, ++ (rd_bytes << PROGRAM_LOAD_BYTE_NUM_S) | rd_bytes); ++ ++ // NFI read prepare ++ nfi_write16(snf, NFI_CNFG, ++ (CNFG_OP_MODE_CUST << CNFG_OP_MODE_S) | CNFG_DMA_BURST_EN | ++ CNFG_READ_MODE | CNFG_DMA_MODE | op_mode); ++ ++ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); ++ ++ buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE); ++ if (dma_mapping_error(snf->dev, buf_dma)) { ++ dev_err(snf->dev, "DMA mapping failed.\n"); ++ goto cleanup; ++ } ++ nfi_write32(snf, NFI_STRADDR, buf_dma); ++ if (op->data.ecc) { ++ snf->ecc_cfg->op = ECC_DECODE; ++ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); ++ if (ret) ++ goto cleanup_dma; ++ } ++ // Prepare for custom read interrupt ++ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ); ++ reinit_completion(&snf->op_done); ++ ++ // Trigger NFI into custom mode ++ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ); ++ ++ // Start DMA read ++ nfi_rmw32(snf, NFI_CON, 0, CON_BRD); ++ nfi_write16(snf, NFI_STRDATA, STR_DATA); ++ ++ if (!wait_for_completion_timeout( ++ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { ++ dev_err(snf->dev, "DMA timed out for reading from cache.\n"); ++ ret = -ETIMEDOUT; ++ goto cleanup; ++ } ++ ++ // Wait for BUS_SEC_CNTR returning expected value ++ ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val, ++ BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, ++ SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n"); ++ goto cleanup2; ++ } ++ ++ // Wait for bus becoming idle ++ ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val, ++ !(val & snf->caps->mastersta_mask), 0, ++ SNFI_POLL_INTERVAL); ++ if (ret) { ++ dev_err(snf->dev, "Timed out waiting for bus becoming idle\n"); ++ goto cleanup2; ++ } ++ ++ if (op->data.ecc) { ++ ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE); ++ if (ret) { ++ dev_err(snf->dev, "wait ecc done timeout\n"); ++ goto cleanup2; ++ } ++ // save status before disabling ecc ++ mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats, ++ snf->nfi_cfg.nsectors); ++ } ++ ++ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); ++ ++ if (snf->autofmt) { ++ mtk_snand_read_fdm(snf, buf_fdm); ++ if (snf->caps->bbm_swap) { ++ mtk_snand_bm_swap(snf, buf); ++ mtk_snand_fdm_bm_swap(snf); ++ } ++ } ++ ++ // copy data back ++ if (nfi_read32(snf, NFI_STA) & READ_EMPTY) { ++ memset(op->data.buf.in, 0xff, op->data.nbytes); ++ snf->ecc_stats.bitflips = 0; ++ snf->ecc_stats.failed = 0; ++ snf->ecc_stats.corrected = 0; ++ } else { ++ if (buf == op->data.buf.in) { ++ u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size; ++ u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size; ++ ++ if (req_left) ++ memcpy(op->data.buf.in + snf->nfi_cfg.page_size, ++ buf_fdm, ++ cap_len < req_left ? cap_len : req_left); ++ } else if (rd_offset < snf->buf_len) { ++ u32 cap_len = snf->buf_len - rd_offset; ++ ++ if (op->data.nbytes < cap_len) ++ cap_len = op->data.nbytes; ++ memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len); ++ } ++ } ++cleanup2: ++ if (op->data.ecc) ++ mtk_ecc_disable(snf->ecc); ++cleanup_dma: ++ // unmap dma only if any error happens. (otherwise it's done before ++ // data copying) ++ if (ret) ++ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE); ++cleanup: ++ // Stop read ++ nfi_write32(snf, NFI_CON, 0); ++ nfi_write16(snf, NFI_CNFG, 0); ++ ++ // Clear SNF done flag ++ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE); ++ nfi_write32(snf, SNF_STA_CTL1, 0); ++ ++ // Disable interrupt ++ nfi_read32(snf, NFI_INTR_STA); ++ nfi_write32(snf, NFI_INTR_EN, 0); ++ ++ nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0); ++ return ret; ++} ++ ++static int mtk_snand_write_page_cache(struct mtk_snand *snf, ++ const struct spi_mem_op *op) ++{ ++ // the address part to be sent by the controller ++ u32 op_addr = op->addr.val; ++ // where to start copying data from bounce buffer ++ u32 wr_offset = 0; ++ u32 op_mode = 0; ++ int ret = 0; ++ u32 wr_mode = 0; ++ u32 dma_len = snf->buf_len; ++ u32 wr_bytes, val; ++ size_t cap_len; ++ dma_addr_t buf_dma; ++ ++ if (snf->autofmt) { ++ u32 last_bit; ++ u32 mask; ++ ++ dma_len = snf->nfi_cfg.page_size; ++ op_mode = CNFG_AUTO_FMT_EN; ++ if (op->data.ecc) ++ op_mode |= CNFG_HW_ECC_EN; ++ ++ last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size); ++ mask = (1 << last_bit) - 1; ++ wr_offset = op_addr & mask; ++ op_addr &= ~mask; ++ } ++ mtk_snand_mac_reset(snf); ++ mtk_nfi_reset(snf); ++ ++ if (wr_offset) ++ memset(snf->buf, 0xff, wr_offset); ++ ++ cap_len = snf->buf_len - wr_offset; ++ if (op->data.nbytes < cap_len) ++ cap_len = op->data.nbytes; ++ memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len); ++ if (snf->autofmt) { ++ if (snf->caps->bbm_swap) { ++ mtk_snand_fdm_bm_swap(snf); ++ mtk_snand_bm_swap(snf, snf->buf); ++ } ++ mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size); ++ } ++ ++ // Command ++ nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S)); ++ ++ // write address ++ nfi_write32(snf, SNF_PG_CTL2, op_addr); ++ ++ // Set read op_mode ++ if (op->data.buswidth == 4) ++ wr_mode = PG_LOAD_X4_EN; ++ ++ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN, ++ wr_mode | PG_LOAD_CUSTOM_EN); ++ ++ // Set bytes to write ++ wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) * ++ snf->nfi_cfg.nsectors; ++ nfi_write32(snf, SNF_MISC_CTL2, ++ (wr_bytes << PROGRAM_LOAD_BYTE_NUM_S) | wr_bytes); ++ ++ // NFI write prepare ++ nfi_write16(snf, NFI_CNFG, ++ (CNFG_OP_MODE_PROGRAM << CNFG_OP_MODE_S) | ++ CNFG_DMA_BURST_EN | CNFG_DMA_MODE | op_mode); ++ ++ nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S)); ++ buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE); ++ if (dma_mapping_error(snf->dev, buf_dma)) { ++ dev_err(snf->dev, "DMA mapping failed.\n"); ++ goto cleanup; ++ } ++ nfi_write32(snf, NFI_STRADDR, buf_dma); ++ if (op->data.ecc) { ++ snf->ecc_cfg->op = ECC_ENCODE; ++ ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg); ++ if (ret) ++ goto cleanup_dma; ++ } ++ // Prepare for custom write interrupt ++ nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG); ++ reinit_completion(&snf->op_done); ++ ; ++ ++ // Trigger NFI into custom mode ++ nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE); ++ ++ // Start DMA write ++ nfi_rmw32(snf, NFI_CON, 0, CON_BWR); ++ nfi_write16(snf, NFI_STRDATA, STR_DATA); ++ ++ if (!wait_for_completion_timeout( ++ &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) { ++ dev_err(snf->dev, "DMA timed out for program load.\n"); ++ ret = -ETIMEDOUT; ++ goto cleanup_ecc; ++ } ++ ++ // Wait for NFI_SEC_CNTR returning expected value ++ ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val, ++ NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0, ++ SNFI_POLL_INTERVAL); ++ if (ret) ++ dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n"); ++ ++cleanup_ecc: ++ if (op->data.ecc) ++ mtk_ecc_disable(snf->ecc); ++cleanup_dma: ++ dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE); ++cleanup: ++ // Stop write ++ nfi_write32(snf, NFI_CON, 0); ++ nfi_write16(snf, NFI_CNFG, 0); ++ ++ // Clear SNF done flag ++ nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE); ++ nfi_write32(snf, SNF_STA_CTL1, 0); ++ ++ // Disable interrupt ++ nfi_read32(snf, NFI_INTR_STA); ++ nfi_write32(snf, NFI_INTR_EN, 0); ++ ++ nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0); ++ ++ return ret; ++} ++ ++/** ++ * mtk_snand_is_page_ops() - check if the op is a controller supported page op. ++ * @op spi-mem op to check ++ * ++ * Check whether op can be executed with read_from_cache or program_load ++ * mode in the controller. ++ * This controller can execute typical Read From Cache and Program Load ++ * instructions found on SPI-NAND with 2-byte address. ++ * DTR and cmd buswidth & nbytes should be checked before calling this. ++ * ++ * Return: true if the op matches the instruction template ++ */ ++static bool mtk_snand_is_page_ops(const struct spi_mem_op *op) ++{ ++ if (op->addr.nbytes != 2) ++ return false; ++ ++ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 && ++ op->addr.buswidth != 4) ++ return false; ++ ++ // match read from page instructions ++ if (op->data.dir == SPI_MEM_DATA_IN) { ++ // check dummy cycle first ++ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > ++ DATA_READ_MAX_DUMMY) ++ return false; ++ // quad io / quad out ++ if ((op->addr.buswidth == 4 || op->addr.buswidth == 1) && ++ op->data.buswidth == 4) ++ return true; ++ ++ // dual io / dual out ++ if ((op->addr.buswidth == 2 || op->addr.buswidth == 1) && ++ op->data.buswidth == 2) ++ return true; ++ ++ // standard spi ++ if (op->addr.buswidth == 1 && op->data.buswidth == 1) ++ return true; ++ } else if (op->data.dir == SPI_MEM_DATA_OUT) { ++ // check dummy cycle first ++ if (op->dummy.nbytes) ++ return false; ++ // program load quad out ++ if (op->addr.buswidth == 1 && op->data.buswidth == 4) ++ return true; ++ // standard spi ++ if (op->addr.buswidth == 1 && op->data.buswidth == 1) ++ return true; ++ } ++ return false; ++} ++ ++static bool mtk_snand_supports_op(struct spi_mem *mem, ++ const struct spi_mem_op *op) ++{ ++ if (!spi_mem_default_supports_op(mem, op)) ++ return false; ++ if (op->cmd.nbytes != 1 || op->cmd.buswidth != 1) ++ return false; ++ if (mtk_snand_is_page_ops(op)) ++ return true; ++ return ((op->addr.nbytes == 0 || op->addr.buswidth == 1) && ++ (op->dummy.nbytes == 0 || op->dummy.buswidth == 1) && ++ (op->data.nbytes == 0 || op->data.buswidth == 1)); ++} ++ ++static int mtk_snand_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op) ++{ ++ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); ++ // page ops transfer size must be exactly ((sector_size + spare_size) * ++ // nsectors). Limit the op size if the caller requests more than that. ++ // exec_op will read more than needed and discard the leftover if the ++ // caller requests less data. ++ if (mtk_snand_is_page_ops(op)) { ++ size_t l; ++ // skip adjust_op_size for page ops ++ if (ms->autofmt) ++ return 0; ++ l = ms->caps->sector_size + ms->nfi_cfg.spare_size; ++ l *= ms->nfi_cfg.nsectors; ++ if (op->data.nbytes > l) ++ op->data.nbytes = l; ++ } else { ++ size_t hl = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes; ++ ++ if (hl >= SNF_GPRAM_SIZE) ++ return -EOPNOTSUPP; ++ if (op->data.nbytes > SNF_GPRAM_SIZE - hl) ++ op->data.nbytes = SNF_GPRAM_SIZE - hl; ++ } ++ return 0; ++} ++ ++static int mtk_snand_exec_op(struct spi_mem *mem, const struct spi_mem_op *op) ++{ ++ struct mtk_snand *ms = spi_controller_get_devdata(mem->spi->master); ++ ++ dev_dbg(ms->dev, "OP %02x ADDR %08llX@%d:%u DATA %d:%u", op->cmd.opcode, ++ op->addr.val, op->addr.buswidth, op->addr.nbytes, ++ op->data.buswidth, op->data.nbytes); ++ if (mtk_snand_is_page_ops(op)) { ++ if (op->data.dir == SPI_MEM_DATA_IN) ++ return mtk_snand_read_page_cache(ms, op); ++ else ++ return mtk_snand_write_page_cache(ms, op); ++ } else { ++ return mtk_snand_mac_io(ms, op); ++ } ++} ++ ++static const struct spi_controller_mem_ops mtk_snand_mem_ops = { ++ .adjust_op_size = mtk_snand_adjust_op_size, ++ .supports_op = mtk_snand_supports_op, ++ .exec_op = mtk_snand_exec_op, ++}; ++ ++static const struct spi_controller_mem_caps mtk_snand_mem_caps = { ++ .ecc = true, ++}; ++ ++static irqreturn_t mtk_snand_irq(int irq, void *id) ++{ ++ struct mtk_snand *snf = id; ++ u32 sta, ien; ++ ++ sta = nfi_read32(snf, NFI_INTR_STA); ++ ien = nfi_read32(snf, NFI_INTR_EN); ++ ++ if (!(sta & ien)) ++ return IRQ_NONE; ++ ++ nfi_write32(snf, NFI_INTR_EN, 0); ++ complete(&snf->op_done); ++ return IRQ_HANDLED; ++} ++ ++static const struct of_device_id mtk_snand_ids[] = { ++ { .compatible = "mediatek,mt7622-snand", .data = &mt7622_snand_caps }, ++ { .compatible = "mediatek,mt7629-snand", .data = &mt7629_snand_caps }, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, mtk_snand_ids); ++ ++static int mtk_snand_enable_clk(struct mtk_snand *ms) ++{ ++ int ret; ++ ++ ret = clk_prepare_enable(ms->nfi_clk); ++ if (ret) { ++ dev_err(ms->dev, "unable to enable nfi clk\n"); ++ return ret; ++ } ++ ret = clk_prepare_enable(ms->pad_clk); ++ if (ret) { ++ dev_err(ms->dev, "unable to enable pad clk\n"); ++ goto err1; ++ } ++ return 0; ++err1: ++ clk_disable_unprepare(ms->nfi_clk); ++ return ret; ++} ++ ++static void mtk_snand_disable_clk(struct mtk_snand *ms) ++{ ++ clk_disable_unprepare(ms->pad_clk); ++ clk_disable_unprepare(ms->nfi_clk); ++} ++ ++static int mtk_snand_probe(struct platform_device *pdev) ++{ ++ struct device_node *np = pdev->dev.of_node; ++ const struct of_device_id *dev_id; ++ struct spi_controller *ctlr; ++ struct mtk_snand *ms; ++ int ret; ++ ++ dev_id = of_match_node(mtk_snand_ids, np); ++ if (!dev_id) ++ return -EINVAL; ++ ++ ctlr = devm_spi_alloc_master(&pdev->dev, sizeof(*ms)); ++ if (!ctlr) ++ return -ENOMEM; ++ platform_set_drvdata(pdev, ctlr); ++ ++ ms = spi_controller_get_devdata(ctlr); ++ ++ ms->ctlr = ctlr; ++ ms->caps = dev_id->data; ++ ++ ms->ecc = of_mtk_ecc_get(np); ++ if (IS_ERR(ms->ecc)) ++ return PTR_ERR(ms->ecc); ++ else if (!ms->ecc) ++ return -ENODEV; ++ ++ ms->nfi_base = devm_platform_ioremap_resource(pdev, 0); ++ if (IS_ERR(ms->nfi_base)) { ++ ret = PTR_ERR(ms->nfi_base); ++ goto release_ecc; ++ } ++ ++ ms->dev = &pdev->dev; ++ ++ ms->nfi_clk = devm_clk_get(&pdev->dev, "nfi_clk"); ++ if (IS_ERR(ms->nfi_clk)) { ++ ret = PTR_ERR(ms->nfi_clk); ++ dev_err(&pdev->dev, "unable to get nfi_clk, err = %d\n", ret); ++ goto release_ecc; ++ } ++ ++ ms->pad_clk = devm_clk_get(&pdev->dev, "pad_clk"); ++ if (IS_ERR(ms->pad_clk)) { ++ ret = PTR_ERR(ms->pad_clk); ++ dev_err(&pdev->dev, "unable to get pad_clk, err = %d\n", ret); ++ goto release_ecc; ++ } ++ ++ ret = mtk_snand_enable_clk(ms); ++ if (ret) ++ goto release_ecc; ++ ++ init_completion(&ms->op_done); ++ ++ ms->irq = platform_get_irq(pdev, 0); ++ if (ms->irq < 0) { ++ ret = ms->irq; ++ goto disable_clk; ++ } ++ ret = devm_request_irq(ms->dev, ms->irq, mtk_snand_irq, 0x0, ++ "mtk-snand", ms); ++ if (ret) { ++ dev_err(ms->dev, "failed to request snfi irq\n"); ++ goto disable_clk; ++ } ++ ++ ret = dma_set_mask(ms->dev, DMA_BIT_MASK(32)); ++ if (ret) { ++ dev_err(ms->dev, "failed to set dma mask\n"); ++ goto disable_clk; ++ } ++ ++ // switch to SNFI mode ++ nfi_write32(ms, SNF_CFG, SPI_MODE); ++ ++ // setup an initial page format for ops matching page_cache_op template ++ // before ECC is called. ++ ret = mtk_snand_setup_pagefmt(ms, ms->caps->sector_size, ++ ms->caps->spare_sizes[0]); ++ if (ret) { ++ dev_err(ms->dev, "failed to set initial page format\n"); ++ goto disable_clk; ++ } ++ ++ // setup ECC engine ++ ms->ecc_eng.dev = &pdev->dev; ++ ms->ecc_eng.integration = NAND_ECC_ENGINE_INTEGRATION_PIPELINED; ++ ms->ecc_eng.ops = &mtk_snfi_ecc_engine_ops; ++ ms->ecc_eng.priv = ms; ++ ++ ret = nand_ecc_register_on_host_hw_engine(&ms->ecc_eng); ++ if (ret) { ++ dev_err(&pdev->dev, "failed to register ecc engine.\n"); ++ goto disable_clk; ++ } ++ ++ ctlr->num_chipselect = 1; ++ ctlr->mem_ops = &mtk_snand_mem_ops; ++ ctlr->mem_caps = &mtk_snand_mem_caps; ++ ctlr->bits_per_word_mask = SPI_BPW_MASK(8); ++ ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_DUAL | SPI_TX_QUAD; ++ ctlr->dev.of_node = pdev->dev.of_node; ++ ret = spi_register_controller(ctlr); ++ if (ret) { ++ dev_err(&pdev->dev, "spi_register_controller failed.\n"); ++ goto disable_clk; ++ } ++ ++ return 0; ++disable_clk: ++ mtk_snand_disable_clk(ms); ++release_ecc: ++ mtk_ecc_release(ms->ecc); ++ return ret; ++} ++ ++static int mtk_snand_remove(struct platform_device *pdev) ++{ ++ struct spi_controller *ctlr = platform_get_drvdata(pdev); ++ struct mtk_snand *ms = spi_controller_get_devdata(ctlr); ++ ++ spi_unregister_controller(ctlr); ++ mtk_snand_disable_clk(ms); ++ mtk_ecc_release(ms->ecc); ++ kfree(ms->buf); ++ return 0; ++} ++ ++static struct platform_driver mtk_snand_driver = { ++ .probe = mtk_snand_probe, ++ .remove = mtk_snand_remove, ++ .driver = { ++ .name = "mtk-snand", ++ .of_match_table = mtk_snand_ids, ++ }, ++}; ++ ++module_platform_driver(mtk_snand_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Chuanhong Guo "); ++MODULE_DESCRIPTION("MeidaTek SPI-NAND Flash Controller Driver"); diff --git a/target/linux/mediatek/patches-5.15/120-13-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch b/target/linux/mediatek/patches-5.15/120-13-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch deleted file mode 100644 index 01d9d12088..0000000000 --- a/target/linux/mediatek/patches-5.15/120-13-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch +++ /dev/null @@ -1,30 +0,0 @@ -From 433b76fa0f3ca2865841abc21538dd8077ca3edd Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Mon, 4 Apr 2022 00:05:38 +0800 -Subject: [PATCH 13/15] mtd: nand: mtk-ecc: also parse nand-ecc-engine if - available - -The recently added ECC engine support introduced a generic property -named nand-ecc-engine for ecc engine phandle. This patch adds support -for this new property. - -Signed-off-by: Chuanhong Guo -(cherry picked from commit a41f25feb6e47c1c4d8d3279ae990ccbd8dfab54) ---- - drivers/mtd/nand/ecc-mtk.c | 5 ++++- - 1 file changed, 4 insertions(+), 1 deletion(-) - ---- a/drivers/mtd/nand/ecc-mtk.c -+++ b/drivers/mtd/nand/ecc-mtk.c -@@ -279,7 +279,10 @@ struct mtk_ecc *of_mtk_ecc_get(struct de - struct mtk_ecc *ecc = NULL; - struct device_node *np; - -- np = of_parse_phandle(of_node, "ecc-engine", 0); -+ np = of_parse_phandle(of_node, "nand-ecc-engine", 0); -+ /* for backward compatibility */ -+ if (!np) -+ np = of_parse_phandle(of_node, "ecc-engine", 0); - if (np) { - ecc = mtk_ecc_get(np); - of_node_put(np); diff --git a/target/linux/mediatek/patches-5.15/120-13-v5.19-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch b/target/linux/mediatek/patches-5.15/120-13-v5.19-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch new file mode 100644 index 0000000000..01d9d12088 --- /dev/null +++ b/target/linux/mediatek/patches-5.15/120-13-v5.19-mtd-nand-mtk-ecc-also-parse-nand-ecc-engine-if-avail.patch @@ -0,0 +1,30 @@ +From 433b76fa0f3ca2865841abc21538dd8077ca3edd Mon Sep 17 00:00:00 2001 +From: Chuanhong Guo +Date: Mon, 4 Apr 2022 00:05:38 +0800 +Subject: [PATCH 13/15] mtd: nand: mtk-ecc: also parse nand-ecc-engine if + available + +The recently added ECC engine support introduced a generic property +named nand-ecc-engine for ecc engine phandle. This patch adds support +for this new property. + +Signed-off-by: Chuanhong Guo +(cherry picked from commit a41f25feb6e47c1c4d8d3279ae990ccbd8dfab54) +--- + drivers/mtd/nand/ecc-mtk.c | 5 ++++- + 1 file changed, 4 insertions(+), 1 deletion(-) + +--- a/drivers/mtd/nand/ecc-mtk.c ++++ b/drivers/mtd/nand/ecc-mtk.c +@@ -279,7 +279,10 @@ struct mtk_ecc *of_mtk_ecc_get(struct de + struct mtk_ecc *ecc = NULL; + struct device_node *np; + +- np = of_parse_phandle(of_node, "ecc-engine", 0); ++ np = of_parse_phandle(of_node, "nand-ecc-engine", 0); ++ /* for backward compatibility */ ++ if (!np) ++ np = of_parse_phandle(of_node, "ecc-engine", 0); + if (np) { + ecc = mtk_ecc_get(np); + of_node_put(np); diff --git a/target/linux/mediatek/patches-5.15/120-14-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch b/target/linux/mediatek/patches-5.15/120-14-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch deleted file mode 100644 index d5285676fe..0000000000 --- a/target/linux/mediatek/patches-5.15/120-14-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch +++ /dev/null @@ -1,35 +0,0 @@ -From 9ba7c246063ae43baf2e53ccc8c8b5f8d025aaaa Mon Sep 17 00:00:00 2001 -From: Chuanhong Guo -Date: Sun, 3 Apr 2022 10:19:29 +0800 -Subject: [PATCH 15/15] arm64: dts: mediatek: add mtk-snfi for mt7622 - -This patch adds a device-tree node for the MTK SPI-NAND Flash Interface -for MT7622 device tree. - -Signed-off-by: Chuanhong Guo -(cherry picked from commit 2e022641709011ef0843d0416b0f264b5fc217af) ---- - arch/arm64/boot/dts/mediatek/mt7622.dtsi | 12 ++++++++++++ - 1 file changed, 12 insertions(+) - ---- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi -+++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi -@@ -552,6 +552,18 @@ - status = "disabled"; - }; - -+ snfi: spi@1100d000 { -+ compatible = "mediatek,mt7622-snand"; -+ reg = <0 0x1100d000 0 0x1000>; -+ interrupts = ; -+ clocks = <&pericfg CLK_PERI_NFI_PD>, <&pericfg CLK_PERI_SNFI_PD>; -+ clock-names = "nfi_clk", "pad_clk"; -+ nand-ecc-engine = <&bch>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ status = "disabled"; -+ }; -+ - bch: ecc@1100e000 { - compatible = "mediatek,mt7622-ecc"; - reg = <0 0x1100e000 0 0x1000>; diff --git a/target/linux/mediatek/patches-5.15/120-14-v5.19-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch b/target/linux/mediatek/patches-5.15/120-14-v5.19-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch new file mode 100644 index 0000000000..d5285676fe --- /dev/null +++ b/target/linux/mediatek/patches-5.15/120-14-v5.19-arm64-dts-mediatek-add-mtk-snfi-for-mt7622.patch @@ -0,0 +1,35 @@ +From 9ba7c246063ae43baf2e53ccc8c8b5f8d025aaaa Mon Sep 17 00:00:00 2001 +From: Chuanhong Guo +Date: Sun, 3 Apr 2022 10:19:29 +0800 +Subject: [PATCH 15/15] arm64: dts: mediatek: add mtk-snfi for mt7622 + +This patch adds a device-tree node for the MTK SPI-NAND Flash Interface +for MT7622 device tree. + +Signed-off-by: Chuanhong Guo +(cherry picked from commit 2e022641709011ef0843d0416b0f264b5fc217af) +--- + arch/arm64/boot/dts/mediatek/mt7622.dtsi | 12 ++++++++++++ + 1 file changed, 12 insertions(+) + +--- a/arch/arm64/boot/dts/mediatek/mt7622.dtsi ++++ b/arch/arm64/boot/dts/mediatek/mt7622.dtsi +@@ -552,6 +552,18 @@ + status = "disabled"; + }; + ++ snfi: spi@1100d000 { ++ compatible = "mediatek,mt7622-snand"; ++ reg = <0 0x1100d000 0 0x1000>; ++ interrupts = ; ++ clocks = <&pericfg CLK_PERI_NFI_PD>, <&pericfg CLK_PERI_SNFI_PD>; ++ clock-names = "nfi_clk", "pad_clk"; ++ nand-ecc-engine = <&bch>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ status = "disabled"; ++ }; ++ + bch: ecc@1100e000 { + compatible = "mediatek,mt7622-ecc"; + reg = <0 0x1100e000 0 0x1000>; -- cgit v1.2.3