[RFC,v3 3/5] spi: add Mediatek SPI Nand controller driver

Miquel Raynal miquel.raynal at bootlin.com
Tue Nov 9 03:46:47 PST 2021


Hi Xiangsheng,

xiangsheng.hou at mediatek.com wrote on Fri, 22 Oct 2021 10:40:19 +0800:

> This version the SPI driver cowork with MTK pipelined
> HW ECC engine.
> 
> Signed-off-by: Xiangsheng Hou <xiangsheng.hou at mediatek.com>
> ---

[...]

> +/* ECC wrapper */
> +static struct mtk_snfi *mtk_nand_to_spi(struct nand_device *nand)
> +{
> +	struct device *dev = nand->ecc.engine->dev;
> +	struct spi_master *master = dev_get_drvdata(dev);
> +	struct mtk_snfi *snfi = spi_master_get_devdata(master);
> +
> +	return snfi;
> +}
> +
> +static int mtk_snfi_set_spare_per_sector(struct nand_device *nand,
> +				struct mtk_snfi *snfi, u32 *sps, u32 *idx)
> +{
> +	struct mtk_ecc_engine *eng = nand->ecc.ctx.priv;
> +	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
> +	const u8 *spare = snfi->caps->spare_size;
> +	u32 i, closest_spare = 0;
> +
> +	eng->nsteps = nand->memorg.pagesize / conf->step_size;
> +	*sps = nand->memorg.oobsize / eng->nsteps;
> +
> +	if (conf->step_size == 1024)
> +		*sps >>= 1;
> +
> +	if ((*sps < snfi->fdm_size) || (*sps < MTK_NFI_MIN_SPARE))
> +		return -EINVAL;
> +
> +	for (i = 0; i < snfi->caps->num_spare_size; i++) {
> +		if (*sps >= spare[i] && spare[i] >= spare[closest_spare]) {
> +			closest_spare = i;
> +			if (*sps == spare[i])
> +				break;
> +		}
> +	}
> +
> +	*sps = spare[closest_spare];
> +	*idx = closest_spare;
> +
> +	if (conf->step_size == 1024)
> +		*sps <<= 1;
> +
> +	return 0;
> +}
> +
> +static int mtk_snfi_ecc_init(struct nand_device *nand)
> +{
> +	struct nand_ecc_props *reqs = &nand->ecc.requirements;
> +	struct nand_ecc_props *user = &nand->ecc.user_conf;
> +	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
> +	struct mtk_snfi *snfi = mtk_nand_to_spi(nand);
> +	struct mtk_ecc_engine *eng;
> +	u32 spare, idx;
> +	int ret;
> +
> +	eng = kzalloc(sizeof(*eng), GFP_KERNEL);
> +	if (!eng)
> +		return -ENOMEM;
> +
> +	nand->ecc.ctx.priv = eng;
> +	nand->ecc.engine->priv = eng;
> +
> +	/* Configure the correction depending on the NAND device topology */
> +	if (user->step_size && user->strength) {
> +		conf->step_size = user->step_size;
> +		conf->strength = user->strength;
> +	} else if (reqs->step_size && reqs->strength) {
> +		conf->step_size = reqs->step_size;
> +		conf->strength = reqs->strength;
> +	}
> +
> +	/*
> +	 * align eccstrength and eccsize.
> +	 * The MTK HW ECC engine only support 512 and 1024 eccsize.
> +	 */
> +	if (conf->step_size < 1024) {
> +		if (nand->memorg.pagesize > 512 &&
> +			    snfi->caps->max_sector_size > 512) {
> +			conf->step_size = 1024;
> +			conf->strength <<= 1;
> +		} else {
> +			conf->step_size = 512;
> +		}
> +	} else {
> +		conf->step_size = 1024;
> +	}
> +
> +	ret = mtk_snfi_set_spare_per_sector(nand, snfi, &spare, &idx);
> +
> +	/* These will be used by the snfi driver */
> +	snfi->ecc.page_size = nand->memorg.pagesize;
> +	snfi->ecc.spare_per_sector = spare;
> +	snfi->ecc.spare_idx = idx;
> +	snfi->ecc.sectors = nand->memorg.pagesize / conf->step_size;
> +
> +	/* These will be used by HW ECC engine */
> +	eng->oob_per_sector = spare;
> +	eng->nsteps = snfi->ecc.sectors;

I believe most of this function should move into mtk_ecc.c.

> +
> +	return ret;
> +}
> +
> +static int mtk_snfi_ecc_init_ctx(struct nand_device *nand)
> +{
> +	struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +	int ret;
> +
> +	ret = mtk_snfi_ecc_init(nand);
> +	if (ret) {
> +		pr_info("mtk snfi ecc init fail!\n");
> +		return ret;
> +	}
> +
> +	return ops->init_ctx(nand);
> +}
> +
> +static void mtk_snfi_ecc_cleanup_ctx(struct nand_device *nand)
> +{
> +	struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +
> +	ops->cleanup_ctx(nand);
> +}
> +
> +static int mtk_snfi_prepare_for_ecc(struct nand_device *nand,
> +					struct mtk_snfi *snfi)
> +{
> +	struct nand_ecc_props *conf = &nand->ecc.ctx.conf;
> +	struct mtk_ecc_engine *eng = nand->ecc.ctx.priv;
> +	u32 val;
> +
> +	switch (nand->memorg.pagesize) {
> +	case 512:
> +		val = PAGEFMT_512_2K | PAGEFMT_SEC_SEL_512;
> +		break;
> +	case KB(2):
> +		if (conf->step_size == 512)
> +			val = PAGEFMT_2K_4K | PAGEFMT_SEC_SEL_512;
> +		else
> +			val = PAGEFMT_512_2K;
> +		break;
> +	case KB(4):
> +		if (conf->step_size == 512)
> +			val = PAGEFMT_4K_8K | PAGEFMT_SEC_SEL_512;
> +		else
> +			val = PAGEFMT_2K_4K;
> +		break;
> +	case KB(8):
> +		if (conf->step_size == 512)
> +			val = PAGEFMT_8K_16K | PAGEFMT_SEC_SEL_512;
> +		else
> +			val = PAGEFMT_4K_8K;
> +		break;
> +	case KB(16):
> +		val = PAGEFMT_8K_16K;
> +		break;
> +	default:
> +		dev_err(snfi->dev, "invalid page len: %d\n",
> +				nand->memorg.pagesize);
> +		return -EINVAL;
> +	}
> +
> +	snfi->fdm_size = eng->fdm_size;
> +	snfi->fdm_ecc_size = eng->fdm_ecc_size;
> +
> +	val |= snfi->ecc.spare_idx << PAGEFMT_SPARE_SHIFT;
> +	val |= snfi->fdm_size << PAGEFMT_FDM_SHIFT;
> +	val |= snfi->fdm_ecc_size << PAGEFMT_FDM_ECC_SHIFT;
> +	writel(val, snfi->regs + NFI_PAGEFMT);
> +
> +	return 0;
> +}
> +
> +static int mtk_snfi_ecc_prepare_io_req(struct nand_device *nand,
> +						struct nand_page_io_req *req)
> +{
> +	struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +	struct mtk_snfi *snfi = mtk_nand_to_spi(nand);
> +	int ret;
> +
> +	snfi->ecc.enabled = (req->mode != MTD_OPS_RAW);

Shouldn't you check ecc.enabled before calling prepare_for_ecc ?

> +	ret = mtk_snfi_prepare_for_ecc(nand, snfi);
> +	if (ret)
> +		return ret;
> +
> +	return ops->prepare_io_req(nand, req);
> +}
> +
> +static int mtk_snfi_ecc_finish_io_req(struct nand_device *nand,
> +						struct nand_page_io_req *req)
> +{
> +	struct nand_ecc_engine_ops *ops = mtk_ecc_get_pipelined_ops();
> +	struct mtk_ecc_engine *eng = nand->ecc.ctx.priv;
> +	struct mtk_snfi *snfi = mtk_nand_to_spi(nand);
> +
> +	if (snfi->ecc.enabled) {

I am currently looking at a better way of keeping this
information, while being safer against concurrent accesses. So
far parallel operations are not supported so this is safe, but
we might improve the core in a little while and I don't want
this to be an issue. My next iteration on the Macronix engine will
solve this.

> +		eng->read_empty =
> +			readl(snfi->regs + NFI_STA) & STA_EMP_PAGE;
> +		snfi->ecc.enabled = false;
> +	}
> +
> +	return ops->finish_io_req(nand, req);
> +}
> +
> +static struct nand_ecc_engine_ops mtk_snfi_ecc_engine_pipelined_ops = {
> +	.init_ctx = mtk_snfi_ecc_init_ctx,
> +	.cleanup_ctx = mtk_snfi_ecc_cleanup_ctx,
> +	.prepare_io_req = mtk_snfi_ecc_prepare_io_req,
> +	.finish_io_req = mtk_snfi_ecc_finish_io_req,
> +};
> +
> +static int mtk_snfi_ecc_probe(struct platform_device *pdev,
> +				struct mtk_snfi *snfi)
> +{
> +	struct nand_ecc_engine *ecceng;
> +
> +	if (!mtk_ecc_get_pipelined_ops())
> +		return -EOPNOTSUPP;
> +
> +	ecceng = devm_kzalloc(&pdev->dev, sizeof(*ecceng), GFP_KERNEL);
> +	if (!ecceng)
> +		return -ENOMEM;
> +
> +	ecceng->dev = &pdev->dev;
> +	ecceng->ops = &mtk_snfi_ecc_engine_pipelined_ops;
> +
> +	nand_ecc_register_on_host_hw_engine(ecceng);
> +	snfi->ecc.engine = ecceng;
> +
> +	return 0;
> +}
> +

Thanks,
Miquèl



More information about the linux-mtd mailing list