[RFC,v4,3/5] spi: mtk: Add mediatek SPI Nand Flash interface driver
Miquel Raynal
miquel.raynal at bootlin.com
Thu Dec 9 02:20:30 PST 2021
Hi Xiangsheng,
xiangsheng.hou at mediatek.com wrote on Tue, 30 Nov 2021 16:32:00 +0800:
> The SPI Nand Flash interface driver cowork with Mediatek pipelined
> HW ECC engine.
>
> Signed-off-by: Xiangsheng Hou <xiangsheng.hou at mediatek.com>
> ---
> drivers/spi/Kconfig | 11 +
> drivers/spi/Makefile | 1 +
> drivers/spi/spi-mtk-snfi.c | 1117 ++++++++++++++++++++++++++++++++++++
> 3 files changed, 1129 insertions(+)
> create mode 100644 drivers/spi/spi-mtk-snfi.c
>
> diff --git a/drivers/spi/Kconfig b/drivers/spi/Kconfig
> index 596705d24400..9cb6a173b1ef 100644
> --- a/drivers/spi/Kconfig
> +++ b/drivers/spi/Kconfig
> @@ -535,6 +535,17 @@ config SPI_MT65XX
> say Y or M here.If you are not sure, say N.
> SPI drivers for Mediatek MT65XX and MT81XX series ARM SoCs.
>
> +config SPI_MTK_SNFI
> + tristate "MediaTek SPI NAND interface"
> + depends on MTD
> + select MTD_SPI_NAND
> + select MTD_NAND_ECC_MTK
> + help
> + This selects the SPI NAND FLASH interface(SNFI),
> + which could be found on MediaTek Soc.
> + Say Y or M here.If you are not sure, say N.
> + Note Parallel Nand and SPI NAND is alternative on MediaTek SoCs.
> +
> config SPI_MT7621
> tristate "MediaTek MT7621 SPI Controller"
> depends on RALINK || COMPILE_TEST
> diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
> index dd7393a6046f..57d11eecf662 100644
> --- a/drivers/spi/Makefile
> +++ b/drivers/spi/Makefile
> @@ -73,6 +73,7 @@ obj-$(CONFIG_SPI_MPC52xx) += spi-mpc52xx.o
> obj-$(CONFIG_SPI_MT65XX) += spi-mt65xx.o
> obj-$(CONFIG_SPI_MT7621) += spi-mt7621.o
> obj-$(CONFIG_SPI_MTK_NOR) += spi-mtk-nor.o
> +obj-$(CONFIG_SPI_MTK_SNFI) += spi-mtk-snfi.o
> obj-$(CONFIG_SPI_MXIC) += spi-mxic.o
> obj-$(CONFIG_SPI_MXS) += spi-mxs.o
> obj-$(CONFIG_SPI_NPCM_FIU) += spi-npcm-fiu.o
> diff --git a/drivers/spi/spi-mtk-snfi.c b/drivers/spi/spi-mtk-snfi.c
> new file mode 100644
> index 000000000000..b4dce6d78176
> --- /dev/null
> +++ b/drivers/spi/spi-mtk-snfi.c
> @@ -0,0 +1,1117 @@
> +// SPDX-License-Identifier: GPL-2.0
> +/*
> + * Driver for MediaTek SPI Nand Flash interface
> + *
> + * Copyright (C) 2021 MediaTek Inc.
> + * Authors: Xiangsheng Hou <xiangsheng.hou at mediatek.com>
> + */
> +
> +#include <linux/clk.h>
> +#include <linux/dma-mapping.h>
> +#include <linux/interrupt.h>
> +#include <linux/iopoll.h>
> +#include <linux/module.h>
> +#include <linux/mtd/nand.h>
> +#include <linux/mtd/nand-ecc-mtk.h>
> +#include <linux/of.h>
> +#include <linux/of_device.h>
> +#include <linux/platform_device.h>
> +#include <linux/pm_runtime.h>
> +#include <linux/spi/spi.h>
> +#include <linux/spi/spi-mem.h>
> +
> +/* Registers used by the driver */
> +#define NFI_CNFG (0x00)
> +#define CNFG_DMA BIT(0)
> +#define CNFG_READ_EN BIT(1)
> +#define CNFG_DMA_BURST_EN BIT(2)
> +#define CNFG_HW_ECC_EN BIT(8)
> +#define CNFG_AUTO_FMT_EN BIT(9)
> +#define CNFG_OP_CUST GENMASK(14, 13)
> +#define NFI_PAGEFMT (0x04)
> +#define PAGEFMT_512_2K (0)
> +#define PAGEFMT_2K_4K (1)
> +#define PAGEFMT_4K_8K (2)
> +#define PAGEFMT_8K_16K (3)
> +#define PAGEFMT_PAGE_MASK GENMASK(1, 0)
> +#define PAGEFMT_SEC_SEL_512 BIT(2)
> +#define PAGEFMT_FDM_SHIFT (8)
> +#define PAGEFMT_FDM_ECC_SHIFT (12)
> +#define PAGEFMT_SPARE_SHIFT (16)
> +#define PAGEFMT_SPARE_MASK GENMASK(21, 16)
> +#define NFI_CON (0x08)
> +#define CON_FIFO_FLUSH BIT(0)
> +#define CON_NFI_RST BIT(1)
> +#define CON_BRD BIT(8)
> +#define CON_BWR BIT(9)
> +#define CON_SEC_SHIFT (12)
> +#define CON_SEC_MASK GENMASK(16, 12)
> +#define NFI_INTR_EN (0x10)
> +#define INTR_CUS_PROG_EN BIT(7)
> +#define INTR_CUS_READ_EN BIT(8)
> +#define INTR_IRQ_EN BIT(31)
> +#define NFI_INTR_STA (0x14)
> +#define NFI_CMD (0x20)
> +#define CMD_DUMMY (0x00)
> +#define NFI_STRDATA (0x40)
> +#define STAR_EN BIT(0)
> +#define NFI_STA (0x60)
> +#define NFI_FSM_MASK GENMASK(19, 16)
> +#define STA_EMP_PAGE BIT(12)
> +#define NFI_ADDRCNTR (0x70)
> +#define CNTR_MASK GENMASK(16, 12)
> +#define ADDRCNTR_SEC_SHIFT (12)
> +#define ADDRCNTR_SEC(val) \
> + (((val) & CNTR_MASK) >> ADDRCNTR_SEC_SHIFT)
> +#define NFI_STRADDR (0x80)
> +#define NFI_BYTELEN (0x84)
> +#define NFI_FDML(x) (0xA0 + (x) * sizeof(u32) * 2)
> +#define NFI_FDMM(x) (0xA4 + (x) * sizeof(u32) * 2)
> +#define NFI_MASTERSTA (0x224)
> +#define AHB_BUS_BUSY GENMASK(1, 0)
> +#define SNFI_MAC_CTL (0x500)
> +#define MAC_WIP BIT(0)
> +#define MAC_WIP_READY BIT(1)
> +#define MAC_TRIG BIT(2)
> +#define MAC_EN BIT(3)
> +#define MAC_SIO_SEL BIT(4)
> +#define SNFI_MAC_OUTL (0x504)
> +#define SNFI_MAC_INL (0x508)
> +#define SNFI_RD_CTL2 (0x510)
> +#define RD_CMD_MASK GENMASK(7, 0)
> +#define RD_DUMMY_SHIFT (8)
> +#define SNFI_RD_CTL3 (0x514)
> +#define RD_ADDR_MASK GENMASK(16, 0)
> +#define SNFI_PG_CTL1 (0x524)
> +#define WR_LOAD_CMD_MASK GENMASK(15, 8)
> +#define WR_LOAD_CMD_SHIFT (8)
> +#define SNFI_PG_CTL2 (0x528)
> +#define WR_LOAD_ADDR_MASK GENMASK(15, 0)
> +#define SNFI_MISC_CTL (0x538)
> +#define RD_CUSTOM_EN BIT(6)
> +#define WR_CUSTOM_EN BIT(7)
> +#define LATCH_LAT_SHIFT (8)
> +#define LATCH_LAT_MASK GENMASK(9, 8)
> +#define RD_MODE_X2 BIT(16)
> +#define RD_MODE_X4 BIT(17)
> +#define RD_MODE_DQUAL BIT(18)
> +#define RD_MODE_MASK GENMASK(18, 16)
> +#define WR_X4_EN BIT(20)
> +#define SW_RST BIT(28)
> +#define SNFI_MISC_CTL2 (0x53c)
> +#define WR_LEN_SHIFT (16)
> +#define SNFI_DLY_CTL3 (0x548)
> +#define SAM_DLY_MASK GENMASK(5, 0)
> +#define SNFI_STA_CTL1 (0x550)
> +#define SPI_STATE GENMASK(3, 0)
> +#define CUS_READ_DONE BIT(27)
> +#define CUS_PROG_DONE BIT(28)
> +#define SNFI_CNFG (0x55c)
> +#define SNFI_MODE_EN BIT(0)
> +#define SNFI_GPRAM_DATA (0x800)
> +#define SNFI_GPRAM_MAX_LEN (160)
> +
> +#define MTK_SNFI_TIMEOUT (500000)
> +#define MTK_SNFI_RESET_TIMEOUT (1000000)
> +#define MTK_SNFI_AUTOSUSPEND_DELAY (1000)
> +#define KB(x) ((x) * 1024UL)
> +
> +struct mtk_snfi_caps {
> + u8 pageformat_spare_shift;
> +};
> +
> +struct mtk_snfi {
> + struct device *dev;
> + struct completion done;
> + void __iomem *regs;
> + const struct mtk_snfi_caps *caps;
> +
> + struct clk *nfi_clk;
> + struct clk *snfi_clk;
> + struct clk *hclk;
> +
> + struct nand_ecc_engine *engine;
> +
> + u32 sample_delay;
> + u32 read_latency;
> +
> + void *tx_buf;
> + dma_addr_t dma_addr;
> +};
> +
> +static struct mtk_ecc_engine *mtk_snfi_to_ecc_engine(struct mtk_snfi *snfi)
> +{
> + return snfi->engine->priv;
> +}
> +
> +static void mtk_snfi_mac_enable(struct mtk_snfi *snfi)
> +{
> + u32 val;
> +
> + val = readl(snfi->regs + SNFI_MAC_CTL);
> + val &= ~MAC_SIO_SEL;
> + val |= MAC_EN;
> +
> + writel(val, snfi->regs + SNFI_MAC_CTL);
> +}
> +
> +static int mtk_snfi_mac_trigger(struct mtk_snfi *snfi)
> +{
> + int ret;
> + u32 val;
> +
> + val = readl(snfi->regs + SNFI_MAC_CTL);
> + val |= MAC_TRIG;
> + writel(val, snfi->regs + SNFI_MAC_CTL);
> +
> + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL,
> + val, val & MAC_WIP_READY,
> + 0, MTK_SNFI_TIMEOUT);
> + if (ret < 0) {
> + dev_err(snfi->dev, "wait for wip ready timeout\n");
> + return -EIO;
> + }
> +
> + ret = readl_poll_timeout_atomic(snfi->regs + SNFI_MAC_CTL,
> + val, !(val & MAC_WIP), 0,
> + MTK_SNFI_TIMEOUT);
> + if (ret < 0) {
> + dev_err(snfi->dev, "command write timeout\n");
> + return -EIO;
> + }
> +
> + return 0;
> +}
> +
> +static void mtk_snfi_mac_disable(struct mtk_snfi *snfi)
> +{
> + u32 val;
> +
> + val = readl(snfi->regs + SNFI_MAC_CTL);
> + val &= ~(MAC_TRIG | MAC_EN);
> + writel(val, snfi->regs + SNFI_MAC_CTL);
> +}
> +
> +static int mtk_snfi_mac_op(struct mtk_snfi *snfi)
> +{
> + int ret;
> +
> + mtk_snfi_mac_enable(snfi);
> + ret = mtk_snfi_mac_trigger(snfi);
> + mtk_snfi_mac_disable(snfi);
> +
> + return ret;
> +}
> +
> +static inline void mtk_snfi_read_oob_free(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + u8 *oobptr = op->data.buf.in;
> + u32 vall, valm;
> + int i, j;
> +
> + oobptr += eng->section_size * eng->nsteps;
> + for (i = 0; i < eng->nsteps; i++) {
> + vall = readl(snfi->regs + NFI_FDML(i));
> + valm = readl(snfi->regs + NFI_FDMM(i));
> +
> + for (j = 0; j < eng->oob_free; j++)
> + oobptr[j] = (j >= 4 ? valm : vall) >> ((j % 4) * 8);
> +
> + oobptr += eng->oob_free;
> + }
> +}
> +
> +static inline void mtk_snfi_write_oob_free(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + const u8 *oobptr = op->data.buf.out;
> + u32 vall, valm;
> + int i, j;
> +
> + oobptr += eng->section_size * eng->nsteps;
> + for (i = 0; i < eng->nsteps; i++) {
> + vall = 0;
> + valm = 0;
> + for (j = 0; j < 8; j++) {
> + if (j < 4)
> + vall |= (j < eng->oob_free ? oobptr[j] : 0xff)
> + << (j * 8);
> + else
> + valm |= (j < eng->oob_free ? oobptr[j] : 0xff)
> + << ((j - 4) * 8);
> + }
> +
> + writel(vall, snfi->regs + NFI_FDML(i));
> + writel(valm, snfi->regs + NFI_FDMM(i));
> + oobptr += eng->oob_free;
> + }
> +}
> +
> +static irqreturn_t mtk_snfi_irq(int irq, void *id)
> +{
> + struct mtk_snfi *snfi = id;
> + u32 sta, ien;
> +
> + sta = readl(snfi->regs + NFI_INTR_STA);
> + ien = readl(snfi->regs + NFI_INTR_EN);
> +
> + if (!(sta & ien))
> + return IRQ_NONE;
> +
> + writel(0, snfi->regs + NFI_INTR_EN);
> + complete(&snfi->done);
> +
> + return IRQ_HANDLED;
> +}
> +
> +static int mtk_snfi_enable_clk(struct device *dev, struct mtk_snfi *snfi)
> +{
> + int ret;
> +
> + ret = clk_prepare_enable(snfi->nfi_clk);
> + if (ret) {
> + dev_err(dev, "failed to enable nfi clk\n");
> + return ret;
> + }
> +
> + ret = clk_prepare_enable(snfi->snfi_clk);
> + if (ret) {
> + dev_err(dev, "failed to enable snfi clk\n");
> + clk_disable_unprepare(snfi->nfi_clk);
> + return ret;
> + }
> +
> + ret = clk_prepare_enable(snfi->hclk);
> + if (ret) {
> + dev_err(dev, "failed to enable hclk\n");
> + clk_disable_unprepare(snfi->nfi_clk);
> + clk_disable_unprepare(snfi->snfi_clk);
definitely deserves goto statements :)
> + return ret;
> + }
> +
> + return 0;
> +}
> +
> +static void mtk_snfi_disable_clk(struct mtk_snfi *snfi)
> +{
> + clk_disable_unprepare(snfi->nfi_clk);
> + clk_disable_unprepare(snfi->snfi_clk);
> + clk_disable_unprepare(snfi->hclk);
> +}
> +
> +static int mtk_snfi_reset(struct mtk_snfi *snfi)
> +{
> + u32 val;
> + int ret;
> +
> + val = readl(snfi->regs + SNFI_MISC_CTL) | SW_RST;
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> +
> + ret = readw_poll_timeout(snfi->regs + SNFI_STA_CTL1, val,
> + !(val & SPI_STATE), 0,
> + MTK_SNFI_RESET_TIMEOUT);
> + if (ret) {
> + dev_warn(snfi->dev, "wait spi idle timeout 0x%x\n", val);
> + return ret;
> + }
> +
> + val = readl(snfi->regs + SNFI_MISC_CTL);
> + val &= ~SW_RST;
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> +
> + writew(CON_FIFO_FLUSH | CON_NFI_RST, snfi->regs + NFI_CON);
> + ret = readw_poll_timeout(snfi->regs + NFI_STA, val,
> + !(val & NFI_FSM_MASK), 0,
> + MTK_SNFI_RESET_TIMEOUT);
> + if (ret) {
> + dev_warn(snfi->dev, "wait nfi fsm idle timeout 0x%x\n", val);
> + return ret;
> + }
> +
> + val = readl(snfi->regs + NFI_STRDATA);
> + val &= ~STAR_EN;
> + writew(val, snfi->regs + NFI_STRDATA);
> +
> + return 0;
> +}
> +
> +static int mtk_snfi_init(struct mtk_snfi *snfi)
> +{
> + int ret;
> + u32 val;
> +
> + ret = mtk_snfi_reset(snfi);
> + if (ret)
> + return ret;
> +
> + writel(SNFI_MODE_EN, snfi->regs + SNFI_CNFG);
> +
> + if (snfi->sample_delay) {
> + val = readl(snfi->regs + SNFI_DLY_CTL3);
> + val &= ~SAM_DLY_MASK;
> + val |= snfi->sample_delay;
> + writel(val, snfi->regs + SNFI_DLY_CTL3);
> + }
> +
> + if (snfi->read_latency) {
> + val = readl(snfi->regs + SNFI_MISC_CTL);
> + val &= ~LATCH_LAT_MASK;
> + val |= (snfi->read_latency << LATCH_LAT_SHIFT);
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> + }
> +
> + return 0;
> +}
> +
> +static void mtk_snfi_prepare_for_tx(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + u32 val;
> +
> + val = readl(snfi->regs + SNFI_PG_CTL1);
> + val &= ~WR_LOAD_CMD_MASK;
> + val |= op->cmd.opcode << WR_LOAD_CMD_SHIFT;
> + writel(val, snfi->regs + SNFI_PG_CTL1);
> +
> + writel(op->addr.val & WR_LOAD_ADDR_MASK,
> + snfi->regs + SNFI_PG_CTL2);
> +
> + val = readl(snfi->regs + SNFI_MISC_CTL);
> + val |= WR_CUSTOM_EN;
> + if (op->data.buswidth == 4)
> + val |= WR_X4_EN;
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> +
> + val = eng->nsteps * (eng->oob_per_section + eng->section_size);
> + writel(val << WR_LEN_SHIFT, snfi->regs + SNFI_MISC_CTL2);
> +
> + writel(INTR_CUS_PROG_EN | INTR_IRQ_EN, snfi->regs + NFI_INTR_EN);
> +}
> +
> +static void mtk_snfi_prepare_for_rx(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + u32 val, dummy_cycle;
> +
> + dummy_cycle = (op->dummy.nbytes << 3) >>
> + (ffs(op->dummy.buswidth) - 1);
> + val = (op->cmd.opcode & RD_CMD_MASK) |
> + (dummy_cycle << RD_DUMMY_SHIFT);
> + writel(val, snfi->regs + SNFI_RD_CTL2);
> +
> + writel(op->addr.val & RD_ADDR_MASK,
> + snfi->regs + SNFI_RD_CTL3);
> +
> + val = readl(snfi->regs + SNFI_MISC_CTL);
> + val |= RD_CUSTOM_EN;
> + val &= ~RD_MODE_MASK;
> + if (op->data.buswidth == 4)
> + val |= RD_MODE_X4;
> + else if (op->data.buswidth == 2)
> + val |= RD_MODE_X2;
> +
> + if (op->addr.buswidth != 1)
> + val |= RD_MODE_DQUAL;
> +
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> +
> + val = eng->nsteps * (eng->oob_per_section + eng->section_size);
> + writel(val, snfi->regs + SNFI_MISC_CTL2);
> +
> + writel(INTR_CUS_READ_EN | INTR_IRQ_EN, snfi->regs + NFI_INTR_EN);
> +}
> +
> +static int mtk_snfi_prepare(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op, bool rx)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + dma_addr_t addr;
> + int ret;
> + u32 val;
> +
> + addr = dma_map_single(snfi->dev,
> + op->data.buf.in, op->data.nbytes,
> + rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
> + ret = dma_mapping_error(snfi->dev, addr);
> + if (ret) {
> + dev_err(snfi->dev, "dma mapping error\n");
> + return -EINVAL;
> + }
> +
> + snfi->dma_addr = addr;
> + writel(lower_32_bits(addr), snfi->regs + NFI_STRADDR);
> +
> + if (op->ecc_en && !rx)
> + mtk_snfi_write_oob_free(snfi, op);
> +
> + val = readw(snfi->regs + NFI_CNFG);
> + val |= CNFG_DMA | CNFG_DMA_BURST_EN | CNFG_OP_CUST;
> + val |= rx ? CNFG_READ_EN : 0;
> +
> + if (op->ecc_en)
> + val |= CNFG_HW_ECC_EN | CNFG_AUTO_FMT_EN;
> +
> + writew(val, snfi->regs + NFI_CNFG);
> +
> + writel(eng->nsteps << CON_SEC_SHIFT, snfi->regs + NFI_CON);
> +
> + init_completion(&snfi->done);
> +
> + /* trigger state machine to custom op mode */
> + writel(CMD_DUMMY, snfi->regs + NFI_CMD);
> +
> + if (rx)
> + mtk_snfi_prepare_for_rx(snfi, op);
> + else
> + mtk_snfi_prepare_for_tx(snfi, op);
> +
> + return 0;
> +}
> +
> +static void mtk_snfi_trigger(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op, bool rx)
> +{
> + u32 val;
> +
> + val = readl(snfi->regs + NFI_CON);
> + val |= rx ? CON_BRD : CON_BWR;
> + writew(val, snfi->regs + NFI_CON);
> +
> + writew(STAR_EN, snfi->regs + NFI_STRDATA);
> +}
> +
> +static int mtk_snfi_wait_done(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op, bool rx)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + struct device *dev = snfi->dev;
> + u32 val;
> + int ret;
> +
> + ret = wait_for_completion_timeout(&snfi->done, msecs_to_jiffies(500));
> + if (!ret) {
> + dev_err(dev, "wait for %d completion done timeout\n", rx);
> + return -ETIMEDOUT;
> + }
> +
> + if (rx) {
> + ret = readl_poll_timeout_atomic(snfi->regs + NFI_BYTELEN,
> + val,
> + ADDRCNTR_SEC(val) >=
> + eng->nsteps,
> + 0, MTK_SNFI_TIMEOUT);
> + if (ret) {
> + dev_err(dev, "wait for rx section count timeout\n");
> + return -ETIMEDOUT;
> + }
> +
> + ret = readl_poll_timeout_atomic(snfi->regs + NFI_MASTERSTA,
> + val,
> + !(val & AHB_BUS_BUSY),
> + 0, MTK_SNFI_TIMEOUT);
> + if (ret) {
> + dev_err(dev, "wait for bus busy timeout\n");
> + return -ETIMEDOUT;
> + }
> + } else {
> + ret = readl_poll_timeout_atomic(snfi->regs + NFI_ADDRCNTR,
> + val,
> + ADDRCNTR_SEC(val) >=
> + eng->nsteps,
> + 0, MTK_SNFI_TIMEOUT);
> + if (ret) {
> + dev_err(dev, "wait for tx section count timeout\n");
> + return -ETIMEDOUT;
> + }
> + }
> +
> + return 0;
> +}
> +
> +static void mtk_snfi_complete(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op, bool rx)
> +{
> + u32 val;
> +
> + dma_unmap_single(snfi->dev,
> + snfi->dma_addr, op->data.nbytes,
> + rx ? DMA_FROM_DEVICE : DMA_TO_DEVICE);
> +
> + if (op->ecc_en && rx)
> + mtk_snfi_read_oob_free(snfi, op);
> +
> + val = readl(snfi->regs + SNFI_MISC_CTL);
> + val &= rx ? ~RD_CUSTOM_EN : ~WR_CUSTOM_EN;
> + writel(val, snfi->regs + SNFI_MISC_CTL);
> +
> + val = readl(snfi->regs + SNFI_STA_CTL1);
> + val |= rx ? CUS_READ_DONE : CUS_PROG_DONE;
> + writew(val, snfi->regs + SNFI_STA_CTL1);
> + val &= rx ? ~CUS_READ_DONE : ~CUS_PROG_DONE;
> + writew(val, snfi->regs + SNFI_STA_CTL1);
> +
> + /* Disable interrupt */
> + val = readl(snfi->regs + NFI_INTR_EN);
> + val &= rx ? ~INTR_CUS_READ_EN : ~INTR_CUS_PROG_EN;
> + writew(val, snfi->regs + NFI_INTR_EN);
> +
> + writew(0, snfi->regs + NFI_CNFG);
> + writew(0, snfi->regs + NFI_CON);
> +}
> +
> +static int mtk_snfi_transfer_dma(struct mtk_snfi *snfi,
> + const struct spi_mem_op *op, bool rx)
> +{
> + int ret;
> +
> + ret = mtk_snfi_prepare(snfi, op, rx);
> + if (ret)
> + return ret;
> +
> + mtk_snfi_trigger(snfi, op, rx);
> +
> + ret = mtk_snfi_wait_done(snfi, op, rx);
> +
> + mtk_snfi_complete(snfi, op, rx);
> +
> + return ret;
> +}
> +
> +static int mtk_snfi_transfer_mac(struct mtk_snfi *snfi,
> + const u8 *txbuf, u8 *rxbuf,
> + const u32 txlen, const u32 rxlen)
> +{
> + u32 i, j, val, tmp;
> + u8 *p_tmp = (u8 *)(&tmp);
> + u32 offset = 0;
> + int ret = 0;
> +
> + /* Move tx data to gpram in snfi mac mode */
> + for (i = 0; i < txlen; ) {
> + for (j = 0, tmp = 0; i < txlen && j < 4; i++, j++)
> + p_tmp[j] = txbuf[i];
> +
> + writel(tmp, snfi->regs + SNFI_GPRAM_DATA + offset);
> + offset += 4;
> + }
> +
> + writel(txlen, snfi->regs + SNFI_MAC_OUTL);
> + writel(rxlen, snfi->regs + SNFI_MAC_INL);
> +
> + ret = mtk_snfi_mac_op(snfi);
> + if (ret) {
> + dev_warn(snfi->dev, "snfi mac operation fail\n");
> + return ret;
> + }
> +
> + /* Get tx data from gpram in snfi mac mode */
> + if (rxlen)
> + for (i = 0, offset = rounddown(txlen, 4); i < rxlen; ) {
> + val = readl(snfi->regs +
> + SNFI_GPRAM_DATA + offset);
> + for (j = 0; i < rxlen && j < 4; i++, j++, rxbuf++) {
> + if (i == 0)
> + j = txlen % 4;
> + *rxbuf = (val >> (j * 8)) & 0xff;
> + }
> + offset += 4;
> + }
> +
> + return ret;
> +}
> +
> +static int mtk_snfi_exec_op(struct spi_mem *mem,
> + const struct spi_mem_op *op)
> +{
> + struct mtk_snfi *snfi = spi_controller_get_devdata(mem->spi->master);
> + u8 *buf, *txbuf = snfi->tx_buf, *rxbuf = NULL;
> + u32 txlen = 0, rxlen = 0;
> + int i, ret = 0;
> + bool rx;
> +
> + rx = op->data.dir == SPI_MEM_DATA_IN;
> +
> + ret = mtk_snfi_reset(snfi);
> + if (ret) {
> + dev_warn(snfi->dev, "snfi reset fail\n");
> + return ret;
> + }
> +
> + /*
> + * If tx/rx data buswidth is not 0/1, use snfi DMA mode.
> + * Otherwise, use snfi mac mode.
> + */
> + if (op->data.buswidth != 1 && op->data.buswidth != 0) {
> + ret = mtk_snfi_transfer_dma(snfi, op, rx);
> + if (ret)
> + dev_warn(snfi->dev, "snfi dma transfer %d fail %d\n",
> + rx, ret);
> + return ret;
> + }
> +
> + txbuf[txlen++] = op->cmd.opcode;
> +
> + if (op->addr.nbytes)
> + for (i = 0; i < op->addr.nbytes; i++)
> + txbuf[txlen++] = op->addr.val >>
> + (8 * (op->addr.nbytes - i - 1));
> +
> + txlen += op->dummy.nbytes;
> +
> + if (op->data.dir == SPI_MEM_DATA_OUT) {
> + buf = (u8 *)op->data.buf.out;
> + for (i = 0; i < op->data.nbytes; i++)
> + txbuf[txlen++] = buf[i];
> + }
> +
> + if (op->data.dir == SPI_MEM_DATA_IN) {
> + rxbuf = (u8 *)op->data.buf.in;
> + rxlen = op->data.nbytes;
> + }
> +
> + ret = mtk_snfi_transfer_mac(snfi, txbuf, rxbuf, txlen, rxlen);
> + if (ret)
> + dev_warn(snfi->dev, "snfi mac transfer %d fail %d\n",
> + op->data.dir, ret);
> +
> + return ret;
> +}
> +
> +static int mtk_snfi_check_buswidth(u8 width)
> +{
> + switch (width) {
> + case 1:
> + case 2:
> + case 4:
> + return 0;
> +
> + default:
> + break;
> + }
> +
> + return -EOPNOTSUPP;
> +}
> +
> +static bool mtk_snfi_supports_op(struct spi_mem *mem,
> + const struct spi_mem_op *op)
> +{
> + int ret = 0;
> +
> + if (!spi_mem_default_supports_op(mem, op))
With the integration properly set, this call would always return false
when ecc_en = true. You should switch to
spi_mem_generic_supports_op(mem, op, false, true);
> + return false;
> +
> + if (op->cmd.buswidth != 1)
> + return false;
> +
> + /*
> + * For one operation will use snfi mac mode when data
> + * buswidth is 0/1. However, the HW ECC engine can not
> + * be used in mac mode.
> + */
> + if (op->ecc_en && op->data.buswidth == 1 &&
> + op->data.nbytes >= SNFI_GPRAM_MAX_LEN)
> + return false;
> +
> + switch (op->data.dir) {
> + /* For spi mem data in, can support 1/2/4 buswidth */
> + case SPI_MEM_DATA_IN:
> + if (op->addr.nbytes)
> + ret |= mtk_snfi_check_buswidth(op->addr.buswidth);
> +
> + if (op->dummy.nbytes)
> + ret |= mtk_snfi_check_buswidth(op->dummy.buswidth);
> +
> + if (op->data.nbytes)
> + ret |= mtk_snfi_check_buswidth(op->data.buswidth);
> +
> + if (ret)
> + return false;
> +
> + break;
> + case SPI_MEM_DATA_OUT:
> + /*
> + * For spi mem data out, can support 0/1 buswidth
> + * for addr/dummy and 1/4 buswidth for data.
> + */
> + if (op->addr.buswidth != 0 && op->addr.buswidth != 1)
> + return false;
> +
> + if (op->dummy.buswidth != 0 && op->dummy.buswidth != 1)
> + return false;
> +
> + if (op->data.buswidth != 1 && op->data.buswidth != 4)
> + return false;
> +
> + break;
> + default:
> + break;
> + }
> +
> + return true;
> +}
> +
> +static int mtk_snfi_adjust_op_size(struct spi_mem *mem,
> + struct spi_mem_op *op)
> +{
> + u32 len, max_len;
> +
> + /*
> + * The op size only support SNFI_GPRAM_MAX_LEN which will
> + * use the snfi mac mode when data buswidth is 0/1.
> + * Otherwise, the snfi can max support 16KB.
> + */
> + if (op->data.buswidth == 1 || op->data.buswidth == 0)
> + max_len = SNFI_GPRAM_MAX_LEN;
> + else
> + max_len = KB(16);
> +
> + len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
> + if (len > max_len)
> + return -EOPNOTSUPP;
> +
> + if ((len + op->data.nbytes) > max_len)
> + op->data.nbytes = max_len - len;
> +
> + return 0;
> +}
> +
> +static const struct mtk_snfi_caps mtk_snfi_caps_mt7622 = {
> + .pageformat_spare_shift = 16,
> +};
> +
> +static const struct spi_controller_mem_ops mtk_snfi_ops = {
> + .adjust_op_size = mtk_snfi_adjust_op_size,
> + .supports_op = mtk_snfi_supports_op,
> + .exec_op = mtk_snfi_exec_op,
> +};
> +
> +static const struct of_device_id mtk_snfi_id_table[] = {
> + { .compatible = "mediatek,mt7622-snfi",
> + .data = &mtk_snfi_caps_mt7622,
> + },
> + { /* sentinel */ }
> +};
> +
> +/* ECC wrapper */
> +static struct mtk_snfi *mtk_nand_to_spi(struct nand_device *nand)
> +{
> + struct device *dev = nand->ecc.engine->dev;
> + struct spi_master *master = dev_get_drvdata(dev);
> + struct mtk_snfi *snfi = spi_master_get_devdata(master);
> +
> + return snfi;
> +}
> +
> +static int mtk_snfi_config(struct nand_device *nand,
> + struct mtk_snfi *snfi)
> +{
> + struct mtk_ecc_engine *eng = mtk_snfi_to_ecc_engine(snfi);
> + u32 val;
> +
> + switch (nanddev_page_size(nand)) {
> + case 512:
> + val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
> + break;
> + case KB(2):
> + if (eng->section_size == 512)
> + val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
> + else
> + val = PAGEFMT_512_2K;
> + break;
> + case KB(4):
> + if (eng->section_size == 512)
> + val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
> + else
> + val = PAGEFMT_2K_4K;
> + break;
> + case KB(8):
> + if (eng->section_size == 512)
> + val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
> + else
> + val = PAGEFMT_4K_8K;
> + break;
> + case KB(16):
> + val = PAGEFMT_8K_16K;
> + break;
> + default:
> + dev_err(snfi->dev, "invalid page len: %d\n",
> + nanddev_page_size(nand));
> + return -EINVAL;
> + }
> +
> + val |= eng->oob_per_section_idx << PAGEFMT_SPARE_SHIFT;
> + val |= eng->oob_free << PAGEFMT_FDM_SHIFT;
> + val |= eng->oob_free_protected << PAGEFMT_FDM_ECC_SHIFT;
> + writel(val, snfi->regs + NFI_PAGEFMT);
Shouldn't this be calculated only once?
> +
> + return 0;
> +}
> +
> +static int mtk_snfi_ecc_init_ctx(struct nand_device *nand)
> +{
> + struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +
> + return ops->init_ctx(nand);
> +}
> +
> +static void mtk_snfi_ecc_cleanup_ctx(struct nand_device *nand)
> +{
> + struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +
> + ops->cleanup_ctx(nand);
> +}
> +
> +static int mtk_snfi_ecc_prepare_io_req(struct nand_device *nand,
> + struct nand_page_io_req *req)
> +{
> + struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> + struct mtk_snfi *snfi = mtk_nand_to_spi(nand);
> + int ret;
> +
> + ret = mtk_snfi_config(nand, snfi);
> + if (ret)
> + return ret;
> +
> + return ops->prepare_io_req(nand, req);
> +}
> +
> +static int mtk_snfi_ecc_finish_io_req(struct nand_device *nand,
> + struct nand_page_io_req *req)
> +{
> + struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> + struct mtk_ecc_engine *eng = nand_to_ecc_ctx(nand);
> + struct mtk_snfi *snfi = mtk_nand_to_spi(nand);
> +
> + if (req->mode != MTD_OPS_RAW)
> + eng->read_empty = readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
> +
> + return ops->finish_io_req(nand, req);
> +}
> +
> +static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_pipelined_ops = {
> + .init_ctx = mtk_snfi_ecc_init_ctx,
> + .cleanup_ctx = mtk_snfi_ecc_cleanup_ctx,
> + .prepare_io_req = mtk_snfi_ecc_prepare_io_req,
> + .finish_io_req = mtk_snfi_ecc_finish_io_req,
> +};
> +
> +static int mtk_snfi_ecc_probe(struct platform_device *pdev,
> + struct mtk_snfi *snfi)
> +{
> + struct nand_ecc_engine *ecceng;
> +
> + if (!mtk_ecc_get_pipelined_ops())
> + return -EOPNOTSUPP;
> +
> + ecceng = devm_kzalloc(&pdev->dev, sizeof(*ecceng), GFP_KERNEL);
> + if (!ecceng)
> + return -ENOMEM;
> +
> + ecceng->dev = &pdev->dev;
> + ecceng->ops = &mtk_snfi_ecc_engine_pipelined_ops;
You need to tell the core that this is a pipelined engine (look at the
integration entry).
> +
> + nand_ecc_register_on_host_hw_engine(ecceng);
> +
> + snfi->engine = ecceng;
> +
> + return 0;
> +}
> +
> +static int mtk_snfi_probe(struct platform_device *pdev)
> +{
> + struct device_node *np = pdev->dev.of_node;
> + struct spi_controller *ctlr;
> + struct mtk_snfi *snfi;
> + struct resource *res;
> + int ret, irq;
> + u32 val = 0;
> +
> + ctlr = spi_alloc_master(&pdev->dev, sizeof(*snfi));
> + if (!ctlr)
> + return -ENOMEM;
> +
> + snfi = spi_controller_get_devdata(ctlr);
> + snfi->dev = &pdev->dev;
> +
> + res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
> + snfi->regs = devm_ioremap_resource(snfi->dev, res);
> + if (IS_ERR(snfi->regs)) {
> + ret = PTR_ERR(snfi->regs);
> + goto err_put_master;
> + }
> +
> + ret = of_property_read_u32(np, "sample-delay", &val);
> + if (!ret)
> + snfi->sample_delay = val;
> +
> + ret = of_property_read_u32(np, "read-latency", &val);
> + if (!ret)
> + snfi->read_latency = val;
> +
> + snfi->nfi_clk = devm_clk_get(snfi->dev, "nfi_clk");
> + if (IS_ERR(snfi->nfi_clk)) {
> + dev_err(snfi->dev, "not found nfi clk\n");
> + ret = PTR_ERR(snfi->nfi_clk);
> + goto err_put_master;
> + }
> +
> + snfi->snfi_clk = devm_clk_get(snfi->dev, "snfi_clk");
> + if (IS_ERR(snfi->snfi_clk)) {
> + dev_err(snfi->dev, "not found snfi clk\n");
> + ret = PTR_ERR(snfi->snfi_clk);
> + goto err_put_master;
> + }
> +
> + snfi->hclk = devm_clk_get(snfi->dev, "hclk");
> + if (IS_ERR(snfi->hclk)) {
> + dev_err(snfi->dev, "not found hclk\n");
> + ret = PTR_ERR(snfi->hclk);
> + goto err_put_master;
> + }
> +
> + ret = mtk_snfi_enable_clk(snfi->dev, snfi);
> + if (ret)
> + goto err_put_master;
> +
> + snfi->caps = of_device_get_match_data(snfi->dev);
> +
> + irq = platform_get_irq(pdev, 0);
> + if (irq < 0) {
> + dev_err(snfi->dev, "not found snfi irq resource\n");
> + ret = -EINVAL;
> + goto clk_disable;
> + }
> +
> + ret = devm_request_irq(snfi->dev, irq, mtk_snfi_irq,
> + 0, "mtk-snfi", snfi);
> + if (ret) {
> + dev_err(snfi->dev, "failed to request snfi irq\n");
> + goto clk_disable;
> + }
> +
> + ret = dma_set_mask(snfi->dev, DMA_BIT_MASK(32));
> + if (ret) {
> + dev_err(snfi->dev, "failed to set dma mask\n");
> + goto clk_disable;
> + }
> +
> + snfi->tx_buf = kzalloc(SNFI_GPRAM_MAX_LEN, GFP_KERNEL);
> + if (!snfi->tx_buf) {
> + ret = -ENOMEM;
> + goto clk_disable;
> + }
> +
> + ctlr->dev.of_node = np;
> + ctlr->mem_ops = &mtk_snfi_ops;
> + ctlr->mode_bits = SPI_RX_DUAL | SPI_RX_QUAD | SPI_TX_QUAD;
> + ctlr->auto_runtime_pm = true;
> +
> + dev_set_drvdata(snfi->dev, ctlr);
> +
> + ret = mtk_snfi_init(snfi);
> + if (ret) {
> + dev_err(snfi->dev, "failed to init snfi\n");
> + goto free_buf;
> + }
> +
> + ret = mtk_snfi_ecc_probe(pdev, snfi);
> + if (ret) {
> + dev_warn(snfi->dev, "ECC engine not available\n");
> + goto free_buf;
> + }
> +
> + pm_runtime_enable(snfi->dev);
> +
> + ret = devm_spi_register_master(snfi->dev, ctlr);
> + if (ret) {
> + dev_err(snfi->dev, "failed to register spi master\n");
> + goto disable_pm_runtime;
> + }
> +
> + return 0;
> +
> +disable_pm_runtime:
> + pm_runtime_disable(snfi->dev);
> +
> +free_buf:
> + kfree(snfi->tx_buf);
> +
> +clk_disable:
> + mtk_snfi_disable_clk(snfi);
> +
> +err_put_master:
> + spi_master_put(ctlr);
> +
> + return ret;
> +}
> +
> +static int mtk_snfi_remove(struct platform_device *pdev)
> +{
> + struct spi_controller *ctlr = dev_get_drvdata(&pdev->dev);
> + struct mtk_snfi *snfi = spi_controller_get_devdata(ctlr);
> + struct nand_ecc_engine *eng = snfi->engine;
> +
> + pm_runtime_disable(snfi->dev);
> + nand_ecc_unregister_on_host_hw_engine(eng);
> + kfree(snfi->tx_buf);
> + spi_master_put(ctlr);
> +
> + return 0;
> +}
> +
> +#ifdef CONFIG_PM
> +static int mtk_snfi_runtime_suspend(struct device *dev)
> +{
> + struct spi_controller *ctlr = dev_get_drvdata(dev);
> + struct mtk_snfi *snfi = spi_controller_get_devdata(ctlr);
> +
> + mtk_snfi_disable_clk(snfi);
> +
> + return 0;
> +}
> +
> +static int mtk_snfi_runtime_resume(struct device *dev)
> +{
> + struct spi_controller *ctlr = dev_get_drvdata(dev);
> + struct mtk_snfi *snfi = spi_controller_get_devdata(ctlr);
> + int ret;
> +
> + ret = mtk_snfi_enable_clk(dev, snfi);
> + if (ret)
> + return ret;
> +
> + ret = mtk_snfi_init(snfi);
> + if (ret)
> + dev_err(dev, "failed to init snfi\n");
> +
> + return ret;
> +}
> +#endif /* CONFIG_PM */
> +
> +static const struct dev_pm_ops mtk_snfi_pm_ops = {
> + SET_RUNTIME_PM_OPS(mtk_snfi_runtime_suspend,
> + mtk_snfi_runtime_resume, NULL)
> + SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
> + pm_runtime_force_resume)
> +};
> +
> +static struct platform_driver mtk_snfi_driver = {
> + .driver = {
> + .name = "mtk-snfi",
> + .of_match_table = mtk_snfi_id_table,
> + .pm = &mtk_snfi_pm_ops,
> + },
> + .probe = mtk_snfi_probe,
> + .remove = mtk_snfi_remove,
> +};
> +
> +module_platform_driver(mtk_snfi_driver);
> +
> +MODULE_LICENSE("GPL v2");
> +MODULE_AUTHOR("Xiangsheng Hou <xiangsheng.hou at mediatek.com>");
> +MODULE_DESCRIPTION("Mediatek SPI Nand Flash interface driver");
Otherwise looks good, I believe you can drop the RFC prefix now.
Thanks,
Miquèl
More information about the linux-mtd
mailing list