[PATCH 20/26] mtd: nand: Update to Linux-5.9
Sascha Hauer
s.hauer at pengutronix.de
Fri Nov 6 08:38:54 EST 2020
This updates the barebox NAND layer and parts of the mtd layer to
Linux-5.9.
This patch is huge, but the barebox NAND layer is so far away from the
Linux NAND layer that a step by step update would have taken ages.
Unlike Linux barebox has functions to mark a block as good. This feature
has been preserved. Also barebox used to make NAND write support
optional, this feature is lost during the update for the sake of better
compatibility to the Linux NAND layer.
This patch has been tested:
- GPMI aka nand_mxs on i.MX6
- nand_imx on i.MX25
- nand_omap_gpmc on AM335x
- atmel_nand on Atmel sama5d3
- nand_denali on SoCFPGA
Currently untested:
- nand_orion
- nand_mrvl_nfc
- nand_s3c24xx
The nand_denali driver is tested with the update of that driver to
Linux-5.9 following in the next patch.
I could only test the drivers with the NAND chips found on my boards, so
there's still enough room for regressions, especially given that the
NAND drivers themselves are mostly not updated. With the NAND layer
being up-to-date with Linux it should hopefully be easy to update
drivers to their Linux counterpart as well if necessary.
Signed-off-by: Sascha Hauer <s.hauer at pengutronix.de>
---
arch/arm/boards/sama5d3_xplained/init.c | 1 +
arch/arm/configs/nhk8815_defconfig | 2 +-
arch/arm/mach-imx/external-nand-boot.c | 1 +
arch/arm/mach-imx/xload-imx-nand.c | 1 +
commands/nand-bitflip.c | 21 +-
drivers/bus/omap-gpmc.c | 1 +
drivers/mtd/Kconfig | 2 -
drivers/mtd/core.c | 557 +-
drivers/mtd/mtdraw.c | 1 -
drivers/mtd/nand/Kconfig | 53 +-
drivers/mtd/nand/Makefile | 11 +-
drivers/mtd/nand/atmel_nand.c | 95 +-
drivers/mtd/nand/bbt.c | 132 +
drivers/mtd/nand/core.c | 275 +
drivers/mtd/nand/denali.h | 1 +
drivers/mtd/nand/internals.h | 170 +
drivers/mtd/nand/nand_amd.c | 53 +
drivers/mtd/nand/nand_base.c | 7755 ++++++++++++++---------
drivers/mtd/nand/nand_bbt.c | 317 +-
drivers/mtd/nand/nand_bch.c | 88 +-
drivers/mtd/nand/nand_denali.c | 67 +-
drivers/mtd/nand/nand_ecc.c | 573 +-
drivers/mtd/nand/nand_esmt.c | 54 +
drivers/mtd/nand/nand_hynix.c | 716 +++
drivers/mtd/nand/nand_ids.c | 258 +-
drivers/mtd/nand/nand_imx.c | 46 +-
drivers/mtd/nand/nand_jedec.c | 135 +
drivers/mtd/nand/nand_legacy.c | 629 ++
drivers/mtd/nand/nand_macronix.c | 334 +
drivers/mtd/nand/nand_micron.c | 595 ++
drivers/mtd/nand/nand_mrvl_nfc.c | 33 +-
drivers/mtd/nand/nand_mxs.c | 37 +-
drivers/mtd/nand/nand_omap_gpmc.c | 60 +-
drivers/mtd/nand/nand_onfi.c | 326 +
drivers/mtd/nand/nand_orion.c | 3 +-
drivers/mtd/nand/nand_s3c24xx.c | 7 +-
drivers/mtd/nand/nand_samsung.c | 135 +
drivers/mtd/nand/nand_timings.c | 574 +-
drivers/mtd/nand/nand_toshiba.c | 274 +
drivers/mtd/nand/nomadik_nand.c | 7 +-
drivers/mtd/partition.c | 1 +
drivers/mtd/peb.c | 1 -
drivers/of/of_mtd.c | 3 +
include/linux/mtd/jedec.h | 91 +
include/linux/mtd/mtd-abi.h | 1 +
include/linux/mtd/mtd.h | 98 +-
include/linux/mtd/nand.h | 1354 ++--
include/linux/mtd/nand_bch.h | 22 +-
include/linux/mtd/nand_ecc.h | 35 +-
include/linux/mtd/onfi.h | 178 +
include/linux/mtd/partitions.h | 115 +
include/linux/mtd/rawnand.h | 1464 +++++
52 files changed, 13209 insertions(+), 4554 deletions(-)
create mode 100644 drivers/mtd/nand/bbt.c
create mode 100644 drivers/mtd/nand/core.c
create mode 100644 drivers/mtd/nand/internals.h
create mode 100644 drivers/mtd/nand/nand_amd.c
create mode 100644 drivers/mtd/nand/nand_esmt.c
create mode 100644 drivers/mtd/nand/nand_hynix.c
create mode 100644 drivers/mtd/nand/nand_jedec.c
create mode 100644 drivers/mtd/nand/nand_legacy.c
create mode 100644 drivers/mtd/nand/nand_macronix.c
create mode 100644 drivers/mtd/nand/nand_micron.c
create mode 100644 drivers/mtd/nand/nand_onfi.c
create mode 100644 drivers/mtd/nand/nand_samsung.c
create mode 100644 drivers/mtd/nand/nand_toshiba.c
create mode 100644 include/linux/mtd/jedec.h
create mode 100644 include/linux/mtd/onfi.h
create mode 100644 include/linux/mtd/partitions.h
create mode 100644 include/linux/mtd/rawnand.h
diff --git a/arch/arm/boards/sama5d3_xplained/init.c b/arch/arm/boards/sama5d3_xplained/init.c
index ccddd01dea..b648d71722 100644
--- a/arch/arm/boards/sama5d3_xplained/init.c
+++ b/arch/arm/boards/sama5d3_xplained/init.c
@@ -23,6 +23,7 @@
#include <mach/at91_pmc.h>
#include <mach/at91_rstc.h>
#include <mach/at91sam9x5_matrix.h>
+#include <linux/mtd/rawnand.h>
#include <readkey.h>
#include <poller.h>
#include <linux/clk.h>
diff --git a/arch/arm/configs/nhk8815_defconfig b/arch/arm/configs/nhk8815_defconfig
index a29e38fad9..bc19301249 100644
--- a/arch/arm/configs/nhk8815_defconfig
+++ b/arch/arm/configs/nhk8815_defconfig
@@ -44,7 +44,7 @@ CONFIG_DRIVER_NET_SMC91111=y
# CONFIG_SPI is not set
CONFIG_MTD=y
CONFIG_NAND=y
-CONFIG_MTD_NAND_ECC_SMC=y
+CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC=y
CONFIG_MTD_NAND_NOMADIK=y
CONFIG_FS_CRAMFS=y
CONFIG_FS_TFTP=y
diff --git a/arch/arm/mach-imx/external-nand-boot.c b/arch/arm/mach-imx/external-nand-boot.c
index 123589c071..893bfdb77f 100644
--- a/arch/arm/mach-imx/external-nand-boot.c
+++ b/arch/arm/mach-imx/external-nand-boot.c
@@ -15,6 +15,7 @@
#include <init.h>
#include <io.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <asm/cache.h>
#include <asm/sections.h>
#include <asm/barebox-arm.h>
diff --git a/arch/arm/mach-imx/xload-imx-nand.c b/arch/arm/mach-imx/xload-imx-nand.c
index 22e41fac77..ff54941a0c 100644
--- a/arch/arm/mach-imx/xload-imx-nand.c
+++ b/arch/arm/mach-imx/xload-imx-nand.c
@@ -15,6 +15,7 @@
#include <common.h>
#include <init.h>
#include <io.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <mach/imx-nand.h>
#include <mach/generic.h>
diff --git a/commands/nand-bitflip.c b/commands/nand-bitflip.c
index 7335f77379..217a243f66 100644
--- a/commands/nand-bitflip.c
+++ b/commands/nand-bitflip.c
@@ -10,6 +10,25 @@
#include <linux/mtd/mtd.h>
#include <mtd/mtd-peb.h>
+static int bitflip_mtd_read(struct mtd_info *mtd, loff_t from, size_t len,
+ size_t *retlen, void *buf)
+{
+ int ret_code;
+
+ if (mtd->_read_oob) {
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+
+ ret_code = mtd->_read_oob(mtd, from, &ops);
+ } else {
+ ret_code = mtd->_read(mtd, from, len, retlen, buf);
+ }
+
+ return ret_code;
+}
+
static int do_nand_bitflip(int argc, char *argv[])
{
int opt, ret, fd;
@@ -77,7 +96,7 @@ static int do_nand_bitflip(int argc, char *argv[])
buf = xzalloc(meminfo.writesize);
roffset = (loff_t)block * meminfo.mtd->erasesize + offset;
- ret = meminfo.mtd->read(meminfo.mtd, roffset, meminfo.writesize, &r, buf);
+ ret = bitflip_mtd_read(meminfo.mtd, roffset, meminfo.writesize, &r, buf);
if (ret > 0) {
printf("page at block %d, offset 0x%08llx has %d bitflips%s\n",
block, offset, ret,
diff --git a/drivers/bus/omap-gpmc.c b/drivers/bus/omap-gpmc.c
index 8fd7a91740..cd5b6d5e03 100644
--- a/drivers/bus/omap-gpmc.c
+++ b/drivers/bus/omap-gpmc.c
@@ -18,6 +18,7 @@
#include <of_mtd.h>
#include <linux/clk.h>
#include <linux/err.h>
+#include <linux/mtd/rawnand.h>
#include <mach/gpmc_nand.h>
#include <mach/gpmc.h>
diff --git a/drivers/mtd/Kconfig b/drivers/mtd/Kconfig
index db9c287b45..36dbe9f825 100644
--- a/drivers/mtd/Kconfig
+++ b/drivers/mtd/Kconfig
@@ -10,13 +10,11 @@ config MTD_WRITE
config MTD_OOB_DEVICE
bool
- select NAND_READ_OOB if NAND
default y
prompt "Create a device for reading the OOB data"
config MTD_RAW_DEVICE
bool
- select NAND_READ_OOB if NAND
default n
prompt "mtdraw device to read/write both data+oob"
diff --git a/drivers/mtd/core.c b/drivers/mtd/core.c
index 8d018fde57..22ed8d2d54 100644
--- a/drivers/mtd/core.c
+++ b/drivers/mtd/core.c
@@ -15,6 +15,7 @@
#include <common.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/partitions.h>
#include <mtd/mtd-peb.h>
#include <mtd/ubi-user.h>
#include <cmdlinepart.h>
@@ -252,9 +253,7 @@ int mtd_ioctl(struct cdev *cdev, int request, void *buf)
int ret = 0;
struct mtd_info *mtd = cdev->priv;
struct mtd_info_user *user = buf;
-#if (defined(CONFIG_NAND_ECC_HW) || defined(CONFIG_NAND_ECC_SOFT))
struct mtd_ecc_stats *ecc = buf;
-#endif
struct region_info_user *reg = buf;
#ifdef CONFIG_MTD_WRITE
struct erase_info_user *ei = buf;
@@ -292,14 +291,12 @@ int mtd_ioctl(struct cdev *cdev, int request, void *buf)
user->ecctype = -1;
user->eccsize = 0;
break;
-#if (defined(CONFIG_NAND_ECC_HW) || defined(CONFIG_NAND_ECC_SOFT))
case ECCGETSTATS:
ecc->corrected = mtd->ecc_stats.corrected;
ecc->failed = mtd->ecc_stats.failed;
ecc->badblocks = mtd->ecc_stats.badblocks;
ecc->bbtblocks = mtd->ecc_stats.bbtblocks;
break;
-#endif
case MEMGETREGIONINFO:
if (cdev->mtd) {
unsigned long size = cdev->size;
@@ -377,42 +374,66 @@ int mtd_block_markgood(struct mtd_info *mtd, loff_t ofs)
}
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
- u_char *buf)
+ u_char *buf)
{
- int ret_code;
- *retlen = 0;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = buf,
+ };
+ int ret;
if (from < 0 || from >= mtd->size || len > mtd->size - from)
return -EINVAL;
if (!len)
return 0;
- /*
- * In the absence of an error, drivers return a non-negative integer
- * representing the maximum number of bitflips that were corrected on
- * any one ecc region (if applicable; zero otherwise).
- */
- ret_code = mtd->_read(mtd, from, len, retlen, buf);
- if (unlikely(ret_code < 0))
- return ret_code;
- if (mtd->ecc_strength == 0)
- return 0; /* device lacks ecc */
- return ret_code >= mtd->bitflip_threshold ? -EUCLEAN : 0;
+ ret = mtd_read_oob(mtd, from, &ops);
+ *retlen = ops.retlen;
+
+ return ret;
+}
+
+int mtd_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ int ret;
+
+ ops->retlen = ops->oobretlen = 0;
+
+ if (!(mtd->flags & MTD_WRITEABLE))
+ return -EROFS;
+
+ /* Check the validity of a potential fallback on mtd->_write */
+ if (!mtd->_write_oob && (!mtd->_write || ops->oobbuf))
+ return -EOPNOTSUPP;
+
+ if (mtd->_write_oob)
+ ret = mtd->_write_oob(mtd, to, ops);
+ else
+ ret = mtd->_write(mtd, to, ops->len, &ops->retlen,
+ ops->datbuf);
+
+ return ret;
}
int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
- const u_char *buf)
+ const u_char *buf)
{
- *retlen = 0;
+ struct mtd_oob_ops ops = {
+ .len = len,
+ .datbuf = (u8 *)buf,
+ };
+ int ret;
if (to < 0 || to >= mtd->size || len > mtd->size - to)
return -EINVAL;
- if (!mtd->_write || !(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
if (!len)
return 0;
- return mtd->_write(mtd, to, len, retlen, buf);
+ ret = mtd_write_oob(mtd, to, &ops);
+ *retlen = ops.retlen;
+
+ return ret;
}
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr)
@@ -433,15 +454,23 @@ int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops)
int ret_code;
ops->retlen = ops->oobretlen = 0;
- if (!mtd->_read_oob)
+
+ /* Check the validity of a potential fallback on mtd->_read */
+ if (!mtd->_read_oob && (!mtd->_read || ops->oobbuf))
return -EOPNOTSUPP;
+
/*
* In cases where ops->datbuf != NULL, mtd->_read_oob() has semantics
* similar to mtd->_read(), returning a non-negative integer
* representing max bitflips. In other cases, mtd->_read_oob() may
* return -EUCLEAN. In all cases, perform similar logic to mtd_read().
*/
- ret_code = mtd->_read_oob(mtd, from, ops);
+ if (mtd->_read_oob)
+ ret_code = mtd->_read_oob(mtd, from, ops);
+ else
+ ret_code = mtd->_read(mtd, from, ops->len, &ops->retlen,
+ ops->datbuf);
+
if (unlikely(ret_code < 0))
return ret_code;
if (mtd->ecc_strength == 0)
@@ -742,3 +771,481 @@ const char *mtd_type_str(struct mtd_info *mtd)
return "unknown";
}
}
+
+/**
+ * mtd_ooblayout_ecc - Get the OOB region definition of a specific ECC section
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ * information
+ *
+ * This function returns ECC section information in the OOB area. If you want
+ * to get all the ECC bytes information, then you should call
+ * mtd_ooblayout_ecc(mtd, section++, oobecc) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ memset(oobecc, 0, sizeof(*oobecc));
+
+ if (section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->ecc)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->ecc(mtd, section, oobecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_ecc);
+
+/**
+ * mtd_ooblayout_free - Get the OOB region definition of a specific free
+ * section
+ * @mtd: MTD device structure
+ * @section: Free section you are interested in. Depending on the layout
+ * you may have all the free bytes stored in a single contiguous
+ * section, or one section per ECC chunk plus an extra section
+ * for the remaining bytes (or other funky layout).
+ * @oobfree: OOB region struct filled with the appropriate free position
+ * information
+ *
+ * This function returns free bytes position in the OOB area. If you want
+ * to get all the free bytes information, then you should call
+ * mtd_ooblayout_free(mtd, section++, oobfree) until it returns -ERANGE.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ memset(oobfree, 0, sizeof(*oobfree));
+
+ if (section < 0)
+ return -EINVAL;
+
+ if (!mtd->ooblayout || !mtd->ooblayout->free)
+ return -ENOTSUPP;
+
+ return mtd->ooblayout->free(mtd, section, oobfree);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_free);
+
+/**
+ * mtd_ooblayout_find_region - Find the region attached to a specific byte
+ * @mtd: mtd info structure
+ * @byte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: used to retrieve the ECC position
+ * @iter: iterator function. Should be either mtd_ooblayout_free or
+ * mtd_ooblayout_ecc depending on the region type you're searching for
+ *
+ * This function returns the section id and oobregion information of a
+ * specific byte. For example, say you want to know where the 4th ECC byte is
+ * stored, you'll use:
+ *
+ * mtd_ooblayout_find_region(mtd, 3, §ion, &oobregion, mtd_ooblayout_ecc);
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_find_region(struct mtd_info *mtd, int byte,
+ int *sectionp, struct mtd_oob_region *oobregion,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ int pos = 0, ret, section = 0;
+
+ memset(oobregion, 0, sizeof(*oobregion));
+
+ while (1) {
+ ret = iter(mtd, section, oobregion);
+ if (ret)
+ return ret;
+
+ if (pos + oobregion->length > byte)
+ break;
+
+ pos += oobregion->length;
+ section++;
+ }
+
+ /*
+ * Adjust region info to make it start at the beginning at the
+ * 'start' ECC byte.
+ */
+ oobregion->offset += byte - pos;
+ oobregion->length -= byte - pos;
+ *sectionp = section;
+
+ return 0;
+}
+
+/**
+ * mtd_ooblayout_find_eccregion - Find the ECC region attached to a specific
+ * ECC byte
+ * @mtd: mtd info structure
+ * @eccbyte: the byte we are searching for
+ * @sectionp: pointer where the section id will be stored
+ * @oobregion: OOB region information
+ *
+ * Works like mtd_ooblayout_find_region() except it searches for a specific ECC
+ * byte.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion)
+{
+ return mtd_ooblayout_find_region(mtd, eccbyte, section, oobregion,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_find_eccregion);
+
+/**
+ * mtd_ooblayout_get_bytes - Extract OOB bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @buf: destination buffer to store OOB bytes
+ * @oobbuf: OOB buffer
+ * @start: first byte to retrieve
+ * @nbytes: number of bytes to retrieve
+ * @iter: section iterator
+ *
+ * Extract bytes attached to a specific category (ECC or free)
+ * from the OOB buffer and copy them into buf.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_get_bytes(struct mtd_info *mtd, u8 *buf,
+ const u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, §ion,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(buf, oobbuf + oobregion.offset, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_set_bytes - put OOB bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @buf: source buffer to get OOB bytes from
+ * @oobbuf: OOB buffer
+ * @start: first OOB byte to set
+ * @nbytes: number of OOB bytes to set
+ * @iter: section iterator
+ *
+ * Fill the OOB buffer with data provided in buf. The category (ECC or free)
+ * is selected by passing the appropriate iterator.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_set_bytes(struct mtd_info *mtd, const u8 *buf,
+ u8 *oobbuf, int start, int nbytes,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section, ret;
+
+ ret = mtd_ooblayout_find_region(mtd, start, §ion,
+ &oobregion, iter);
+
+ while (!ret) {
+ int cnt;
+
+ cnt = min_t(int, nbytes, oobregion.length);
+ memcpy(oobbuf + oobregion.offset, buf, cnt);
+ buf += cnt;
+ nbytes -= cnt;
+
+ if (!nbytes)
+ break;
+
+ ret = iter(mtd, ++section, &oobregion);
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_count_bytes - count the number of bytes in a OOB category
+ * @mtd: mtd info structure
+ * @iter: category iterator
+ *
+ * Count the number of bytes in a given category.
+ *
+ * Returns a positive value on success, a negative error code otherwise.
+ */
+static int mtd_ooblayout_count_bytes(struct mtd_info *mtd,
+ int (*iter)(struct mtd_info *,
+ int section,
+ struct mtd_oob_region *oobregion))
+{
+ struct mtd_oob_region oobregion;
+ int section = 0, ret, nbytes = 0;
+
+ while (1) {
+ ret = iter(mtd, section++, &oobregion);
+ if (ret) {
+ if (ret == -ERANGE)
+ ret = nbytes;
+ break;
+ }
+
+ nbytes += oobregion.length;
+ }
+
+ return ret;
+}
+
+/**
+ * mtd_ooblayout_get_eccbytes - extract ECC bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_eccbytes);
+
+/**
+ * mtd_ooblayout_set_eccbytes - set ECC bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @eccbuf: source buffer to get ECC bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, eccbuf, oobbuf, start, nbytes,
+ mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_eccbytes);
+
+/**
+ * mtd_ooblayout_get_databytes - extract data bytes from the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: destination buffer to store ECC bytes
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to retrieve
+ * @nbytes: number of ECC bytes to retrieve
+ *
+ * Works like mtd_ooblayout_get_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_get_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_get_databytes);
+
+/**
+ * mtd_ooblayout_set_databytes - set data bytes into the oob buffer
+ * @mtd: mtd info structure
+ * @databuf: source buffer to get data bytes from
+ * @oobbuf: OOB buffer
+ * @start: first ECC byte to set
+ * @nbytes: number of ECC bytes to set
+ *
+ * Works like mtd_ooblayout_set_bytes(), except it acts on free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes)
+{
+ return mtd_ooblayout_set_bytes(mtd, databuf, oobbuf, start, nbytes,
+ mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_set_databytes);
+
+/**
+ * mtd_ooblayout_count_freebytes - count the number of free bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count free bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_free);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_freebytes);
+
+/**
+ * mtd_ooblayout_count_eccbytes - count the number of ECC bytes in OOB
+ * @mtd: mtd info structure
+ *
+ * Works like mtd_ooblayout_count_bytes(), except it count ECC bytes.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd)
+{
+ return mtd_ooblayout_count_bytes(mtd, mtd_ooblayout_ecc);
+}
+EXPORT_SYMBOL_GPL(mtd_ooblayout_count_eccbytes);
+
+
+/**
+ * mtd_ecclayout_ecc - Default ooblayout_ecc iterator implementation
+ * @mtd: MTD device structure
+ * @section: ECC section. Depending on the layout you may have all the ECC
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobecc: OOB region struct filled with the appropriate ECC position
+ * information
+ *
+ * This function is just a wrapper around the mtd->ecclayout field and is
+ * here to ease the transition to the mtd_ooblayout_ops approach.
+ * All it does is convert the layout->eccpos information into proper oob
+ * region definitions.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ecclayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc)
+{
+ int eccbyte = 0, cursection = 0, length = 0, eccpos = 0;
+
+ if (!mtd->ecclayout)
+ return -ENOTSUPP;
+
+ /*
+ * This logic allows us to reuse the ->ecclayout information and
+ * expose them as ECC regions (as done for the OOB free regions).
+ *
+ * TODO: this should be dropped as soon as we get rid of the
+ * ->ecclayout field.
+ */
+ for (eccbyte = 0; eccbyte < mtd->ecclayout->eccbytes; eccbyte++) {
+ eccpos = mtd->ecclayout->eccpos[eccbyte];
+
+ if (eccbyte < mtd->ecclayout->eccbytes - 1) {
+ int neccpos = mtd->ecclayout->eccpos[eccbyte + 1];
+
+ if (eccpos + 1 == neccpos) {
+ length++;
+ continue;
+ }
+ }
+
+ if (section == cursection)
+ break;
+
+ length = 0;
+ cursection++;
+ }
+
+ if (cursection != section || eccbyte >= mtd->ecclayout->eccbytes)
+ return -ERANGE;
+
+ oobecc->length = length + 1;
+ oobecc->offset = eccpos - length;
+
+ return 0;
+}
+
+/**
+ * mtd_ecclayout_ecc - Default ooblayout_free iterator implementation
+ * @mtd: MTD device structure
+ * @section: Free section. Depending on the layout you may have all the free
+ * bytes stored in a single contiguous section, or one section
+ * per ECC chunk (and sometime several sections for a single ECC
+ * ECC chunk)
+ * @oobfree: OOB region struct filled with the appropriate free position
+ * information
+ *
+ * This function is just a wrapper around the mtd->ecclayout field and is
+ * here to ease the transition to the mtd_ooblayout_ops approach.
+ * All it does is convert the layout->oobfree information into proper oob
+ * region definitions.
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+static int mtd_ecclayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree)
+{
+ struct nand_ecclayout *layout = mtd->ecclayout;
+
+ if (!layout)
+ return -ENOTSUPP;
+
+ if (section >= MTD_MAX_OOBFREE_ENTRIES_LARGE ||
+ !layout->oobfree[section].length)
+ return -ERANGE;
+
+ oobfree->offset = layout->oobfree[section].offset;
+ oobfree->length = layout->oobfree[section].length;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops mtd_ecclayout_wrapper_ops = {
+ .ecc = mtd_ecclayout_ecc,
+ .free = mtd_ecclayout_free,
+};
+
+/**
+ * mtd_set_ecclayout - Attach an ecclayout to an MTD device
+ * @mtd: MTD device structure
+ * @ecclayout: The ecclayout to attach to the device
+ *
+ * Returns zero on success, a negative error code otherwise.
+ */
+void mtd_set_ecclayout(struct mtd_info *mtd, struct nand_ecclayout *ecclayout)
+{
+ if (!mtd || !ecclayout)
+ return;
+
+ mtd->ecclayout = ecclayout;
+ mtd_set_ooblayout(mtd, &mtd_ecclayout_wrapper_ops);
+}
+EXPORT_SYMBOL_GPL(mtd_set_ecclayout);
diff --git a/drivers/mtd/mtdraw.c b/drivers/mtd/mtdraw.c
index 19a24cc650..57630647b7 100644
--- a/drivers/mtd/mtdraw.c
+++ b/drivers/mtd/mtdraw.c
@@ -247,7 +247,6 @@ static int mtdraw_erase(struct cdev *cdev, loff_t count, loff_t offset)
count = mtdraw_raw_to_mtd_offset(mtdraw, count);
memset(&erase, 0, sizeof(erase));
- erase.mtd = mtd;
erase.addr = offset;
erase.len = mtd->erasesize;
diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig
index b81e72d6b7..6a162f81f6 100644
--- a/drivers/mtd/nand/Kconfig
+++ b/drivers/mtd/nand/Kconfig
@@ -1,6 +1,5 @@
menuconfig NAND
bool "NAND support"
- select MTD_NAND_IDS
help
This enables support for accessing all type of NAND flash
devices. For further information see
@@ -8,55 +7,11 @@ menuconfig NAND
if NAND
-config NAND_ECC_SOFT
- bool
- default y
- prompt "Support software ecc"
-
-config NAND_ECC_BCH
+config MTD_NAND_ECC_SW_BCH
select BCH
bool
prompt "Support software BCH ecc"
-config NAND_ECC_HW
- bool
- default y
- prompt "Support hardware ecc"
-
-config NAND_ECC_HW_OOB_FIRST
- bool
- prompt "Support hardware ecc (oob first)"
-
-config NAND_ECC_HW_SYNDROME
- bool
- default y
- prompt "Support syndrome hardware ecc controllers"
-
-config NAND_ECC_HW_NONE
- bool
- default y
- prompt "Support skipping ecc support"
-
-config NAND_INFO
- bool
- default y
- prompt "Nand vendor/size information"
- help
- Show informational strings about the vendor and nand flash type
- during startup
-
-config NAND_READ_OOB
- bool
-
-config NAND_BBT
- bool
- select NAND_READ_OOB
- default y
- prompt "support bad block tables"
- help
- Say y here to include support for bad block tables. This speeds
- up the process of checking for bad blocks
-
config NAND_ALLOW_ERASE_BAD
bool
depends on MTD_WRITE
@@ -119,7 +74,6 @@ config NAND_ATMEL_PMECC
bool
prompt "PMECC support"
depends on NAND_ATMEL || COMPILE_TEST
- select NAND_ECC_HW
help
Support for PMECC present on the SoC sam9x5 and sam9n12
@@ -130,16 +84,13 @@ config NAND_S3C24XX
help
Add support for processor's NAND device controller.
-config MTD_NAND_ECC_SMC
+config MTD_NAND_ECC_SW_HAMMING_SMC
bool "NAND ECC Smart Media byte order"
default n
help
Software ECC according to the Smart Media Specification.
The original Linux implementation had byte 0 and 1 swapped.
-config MTD_NAND_IDS
- tristate
-
config MTD_NAND_NOMADIK
tristate "ST Nomadik 8815 NAND support"
depends on ARCH_NOMADIK
diff --git a/drivers/mtd/nand/Makefile b/drivers/mtd/nand/Makefile
index 274bc29ee7..f6e5b41e94 100644
--- a/drivers/mtd/nand/Makefile
+++ b/drivers/mtd/nand/Makefile
@@ -1,10 +1,14 @@
# Generic NAND options
obj-$(CONFIG_NAND) += nand_ecc.o
-obj-$(CONFIG_NAND_ECC_BCH) += nand_bch.o
-obj-$(CONFIG_MTD_NAND_IDS) += nand_ids.o
+obj-$(CONFIG_MTD_NAND_ECC_SW_BCH) += nand_bch.o
+obj-$(CONFIG_NAND) += nand_ids.o
obj-$(CONFIG_NAND) += nand_base.o nand-bb.o nand_timings.o
-obj-$(CONFIG_NAND_BBT) += nand_bbt.o
+obj-$(CONFIG_NAND) += nand_legacy.o nand_onfi.o nand_amd.o
+obj-$(CONFIG_NAND) += nand_esmt.o nand_hynix.o nand_macronix.o
+obj-$(CONFIG_NAND) += nand_micron.o nand_samsung.o nand_toshiba.o
+obj-$(CONFIG_NAND) += nand_jedec.o core.o bbt.o
+obj-$(CONFIG_NAND) += nand_bbt.o
obj-$(CONFIG_MTD_NAND_NOMADIK) += nomadik_nand.o
obj-$(CONFIG_NAND_IMX) += nand_imx.o
@@ -18,4 +22,3 @@ pbl-$(CONFIG_NAND_S3C24XX) += nand_s3c24xx.o
obj-$(CONFIG_NAND_MXS) += nand_mxs.o
obj-$(CONFIG_MTD_NAND_DENALI) += nand_denali.o
obj-$(CONFIG_MTD_NAND_DENALI_DT) += nand_denali_dt.o
-
diff --git a/drivers/mtd/nand/atmel_nand.c b/drivers/mtd/nand/atmel_nand.c
index d86498c86e..6903fb4c9d 100644
--- a/drivers/mtd/nand/atmel_nand.c
+++ b/drivers/mtd/nand/atmel_nand.c
@@ -33,6 +33,7 @@
#include <of_mtd.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <linux/err.h>
@@ -102,6 +103,8 @@ struct atmel_nand_host {
int *pmecc_mu;
int *pmecc_dmu;
int *pmecc_delta;
+ struct nand_ecclayout *ecclayout;
+ void *ecc_code;
};
static struct nand_ecclayout atmel_pmecc_oobinfo;
@@ -535,7 +538,7 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct atmel_nand_host *host = nand_chip->priv;
int i = 0;
- int byte_pos, bit_pos, sector_size, pos;
+ int byte_pos, bit_pos, sector_size;
uint32_t tmp;
uint8_t err_byte;
@@ -552,20 +555,12 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
if (byte_pos < sector_size) {
err_byte = *(buf + byte_pos);
*(buf + byte_pos) ^= (1 << bit_pos);
-
- pos = sector_num * host->board->pmecc_sector_size + byte_pos;
- dev_info(host->dev, "Bit flip in data area, byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
- pos, bit_pos, err_byte, *(buf + byte_pos));
} else {
/* Bit flip in OOB area */
tmp = sector_num * host->pmecc_bytes_per_sector
+ (byte_pos - sector_size);
err_byte = ecc[tmp];
ecc[tmp] ^= (1 << bit_pos);
-
- pos = tmp + nand_chip->ecc.layout->eccpos[0];
- dev_info(host->dev, "Bit flip in OOB, oob_byte_pos: %d, bit_pos: %d, 0x%02x -> 0x%02x\n",
- pos, bit_pos, err_byte, ecc[tmp]);
}
i++;
@@ -575,17 +570,21 @@ static void pmecc_correct_data(struct mtd_info *mtd, uint8_t *buf, uint8_t *ecc,
return;
}
-static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf,
- u8 *ecc)
+static int pmecc_correction(struct mtd_info *mtd, u32 pmecc_stat, uint8_t *buf)
{
struct nand_chip *nand_chip = mtd_to_nand(mtd);
struct atmel_nand_host *host = nand_chip->priv;
- int i, err_nbr, eccbytes;
+ int i, err_nbr, ret;
uint8_t *buf_pos;
+ uint8_t *ecc_code = host->ecc_code;
- eccbytes = nand_chip->ecc.bytes;
- for (i = 0; i < eccbytes; i++)
- if (ecc[i] != 0xff)
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, nand_chip->oob_poi, 0,
+ nand_chip->ecc.total);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < nand_chip->ecc.bytes; i++)
+ if (ecc_code[i] != 0xff)
goto normal_check;
/* Erased page, return OK */
return 0;
@@ -606,7 +605,7 @@ normal_check:
mtd->ecc_stats.failed++;
return -EIO;
} else {
- pmecc_correct_data(mtd, buf_pos, ecc, i,
+ pmecc_correct_data(mtd, buf_pos, ecc_code, i,
host->pmecc_bytes_per_sector, err_nbr);
mtd->ecc_stats.corrected += err_nbr;
}
@@ -623,8 +622,6 @@ static int atmel_nand_pmecc_read_page(struct nand_chip *chip, uint8_t *buf,
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand_host *host = chip->priv;
int eccsize = chip->ecc.size;
- uint8_t *oob = chip->oob_poi;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
uint32_t stat;
int ret;
@@ -636,8 +633,10 @@ static int atmel_nand_pmecc_read_page(struct nand_chip *chip, uint8_t *buf,
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_ENABLE);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DATA);
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
chip->legacy.read_buf(chip, buf, eccsize);
- chip->legacy.read_buf(chip, oob, mtd->oobsize);
+ chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
ret = wait_on_timeout(PMECC_MAX_TIMEOUT_MS,
!(pmecc_readl_relaxed(host->ecc, SR) & PMECC_SR_BUSY));
@@ -648,20 +647,24 @@ static int atmel_nand_pmecc_read_page(struct nand_chip *chip, uint8_t *buf,
stat = pmecc_readl_relaxed(host->ecc, ISR);
if (stat != 0)
- if (pmecc_correction(mtd, stat, buf, &oob[eccpos[0]]) != 0)
+ if (pmecc_correction(mtd, stat, buf) != 0)
return -EIO;
return 0;
}
static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct atmel_nand_host *host = chip->priv;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = host->ecc_code;
int i, j, ret;
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_RST);
pmecc_writel(host->ecc, CTRL, PMECC_CTRL_DISABLE);
@@ -685,13 +688,22 @@ static int atmel_nand_pmecc_write_page(struct nand_chip *chip, const uint8_t *bu
int pos;
pos = i * host->pmecc_bytes_per_sector + j;
- chip->oob_poi[eccpos[pos]] =
- pmecc_readb_ecc_relaxed(host->ecc, i, j);
+ ecc_calc[pos] = pmecc_readb_ecc_relaxed(host->ecc, i, j);
}
}
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc,
+ chip->oob_poi, 0, chip->ecc.total);
+ if (ret)
+ return ret;
+
chip->legacy.write_buf(chip, chip->oob_poi, mtd->oobsize);
- return 0;
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
static void atmel_pmecc_core_init(struct mtd_info *mtd)
@@ -746,7 +758,7 @@ static void atmel_pmecc_core_init(struct mtd_info *mtd)
| PMECC_CFG_AUTO_DISABLE);
pmecc_writel(host->ecc, CFG, val);
- ecc_layout = nand_chip->ecc.layout;
+ ecc_layout = host->ecclayout;
pmecc_writel(host->ecc, SAREA, mtd->oobsize - 1);
pmecc_writel(host->ecc, SADDR, ecc_layout->eccpos[0]);
pmecc_writel(host->ecc, EADDR,
@@ -853,7 +865,7 @@ static int __init atmel_pmecc_nand_init_params(struct device_d *dev,
{
struct resource *iores;
struct nand_chip *nand_chip = &host->nand_chip;
- struct mtd_info *mtd = &nand_chip->mtd;
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
int cap, sector_size, err_no;
int ret;
@@ -930,7 +942,8 @@ static int __init atmel_pmecc_nand_init_params(struct device_d *dev,
pmecc_config_ecc_layout(&atmel_pmecc_oobinfo,
mtd->oobsize,
nand_chip->ecc.bytes);
- nand_chip->ecc.layout = &atmel_pmecc_oobinfo;
+ host->ecclayout = &atmel_pmecc_oobinfo;
+ mtd_set_ecclayout(mtd, host->ecclayout);
break;
case 512:
case 1024:
@@ -1002,14 +1015,17 @@ static int atmel_nand_read_page(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
+ struct atmel_nand_host *host = chip->priv;
int eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint32_t *eccpos = host->ecclayout->eccpos;
uint8_t *p = buf;
uint8_t *oob = chip->oob_poi;
uint8_t *ecc_pos;
int stat;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
/*
* Errata: ALE is incorrectly wired up to the ECC controller
* on the AP7000, so it will include the address cycles in the
@@ -1241,7 +1257,7 @@ static int atmel_hw_nand_init_params(struct device_d *dev,
{
struct resource *iores;
struct nand_chip *nand_chip = &host->nand_chip;
- struct mtd_info *mtd = &nand_chip->mtd;
+ struct mtd_info *mtd = nand_to_mtd(nand_chip);
iores = dev_request_mem_resource(dev, 1);
if (IS_ERR(iores))
@@ -1258,19 +1274,23 @@ static int atmel_hw_nand_init_params(struct device_d *dev,
/* set ECC page size and oob layout */
switch (mtd->writesize) {
case 512:
- nand_chip->ecc.layout = &atmel_oobinfo_small;
+ host->ecclayout = &atmel_oobinfo_small;
+ mtd_set_ecclayout(mtd, host->ecclayout);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_528);
break;
case 1024:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ host->ecclayout = &atmel_oobinfo_large;
+ mtd_set_ecclayout(mtd, host->ecclayout);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_1056);
break;
case 2048:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ host->ecclayout = &atmel_oobinfo_large;
+ mtd_set_ecclayout(mtd, host->ecclayout);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_2112);
break;
case 4096:
- nand_chip->ecc.layout = &atmel_oobinfo_large;
+ host->ecclayout = &atmel_oobinfo_large;
+ mtd_set_ecclayout(mtd, host->ecclayout);
ecc_writel(host->ecc, MR, ATMEL_ECC_PAGESIZE_4224);
break;
default:
@@ -1318,7 +1338,7 @@ static int __init atmel_nand_probe(struct device_d *dev)
host->io_base = IOMEM(iores->start);
nand_chip = &host->nand_chip;
- mtd = &nand_chip->mtd;
+ mtd = nand_to_mtd(nand_chip);
host->board = pdata;
host->dev = dev;
@@ -1437,6 +1457,8 @@ static int __init atmel_nand_probe(struct device_d *dev)
goto err_scan_ident;
}
+ host->ecc_code = xmalloc(mtd->oobsize);
+
if (IS_ENABLED(CONFIG_NAND_ECC_HW) &&
nand_chip->ecc.mode == NAND_ECC_HW) {
if (IS_ENABLED(CONFIG_NAND_ATMEL_PMECC) && pdata->has_pmecc)
@@ -1454,12 +1476,11 @@ static int __init atmel_nand_probe(struct device_d *dev)
goto err_scan_tail;
}
- add_mtd_nand_device(nand_chip, "nand");
+ add_mtd_nand_device(mtd, "nand");
if (!res)
return res;
- nand_release(nand_chip);
err_scan_tail:
err_hw_ecc:
err_scan_ident:
diff --git a/drivers/mtd/nand/bbt.c b/drivers/mtd/nand/bbt.c
new file mode 100644
index 0000000000..172ab5ffbc
--- /dev/null
+++ b/drivers/mtd/nand/bbt.c
@@ -0,0 +1,132 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon at free-electrons.com>
+ * Peter Pan <peterpandong at micron.com>
+ */
+
+#define pr_fmt(fmt) "nand-bbt: " fmt
+
+#include <common.h>
+#include <linux/mtd/nand.h>
+#include <linux/slab.h>
+
+/**
+ * nanddev_bbt_init() - Initialize the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Initialize the in-memory BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_init(struct nand_device *nand)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned int nblocks = nanddev_neraseblocks(nand);
+ unsigned int nwords = DIV_ROUND_UP(nblocks * bits_per_block,
+ BITS_PER_LONG);
+
+ nand->bbt.cache = kcalloc(nwords, sizeof(*nand->bbt.cache),
+ GFP_KERNEL);
+ if (!nand->bbt.cache)
+ return -ENOMEM;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_init);
+
+/**
+ * nanddev_bbt_cleanup() - Cleanup the BBT (Bad Block Table)
+ * @nand: NAND device
+ *
+ * Undoes what has been done in nanddev_bbt_init()
+ */
+void nanddev_bbt_cleanup(struct nand_device *nand)
+{
+ kfree(nand->bbt.cache);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_cleanup);
+
+/**
+ * nanddev_bbt_update() - Update a BBT
+ * @nand: nand device
+ *
+ * Update the BBT. Currently a NOP function since on-flash bbt is not yet
+ * supported.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_bbt_update(struct nand_device *nand)
+{
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_update);
+
+/**
+ * nanddev_bbt_get_block_status() - Return the status of an eraseblock
+ * @nand: nand device
+ * @entry: the BBT entry
+ *
+ * Return: a positive number nand_bbt_block_status status or -%ERANGE if @entry
+ * is bigger than the BBT size.
+ */
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long status;
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ status = pos[0] >> offs;
+ if (bits_per_block + offs > BITS_PER_LONG)
+ status |= pos[1] << (BITS_PER_LONG - offs);
+
+ return status & GENMASK(bits_per_block - 1, 0);
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_get_block_status);
+
+/**
+ * nanddev_bbt_set_block_status() - Update the status of an eraseblock in the
+ * in-memory BBT
+ * @nand: nand device
+ * @entry: the BBT entry to update
+ * @status: the new status
+ *
+ * Update an entry of the in-memory BBT. If you want to push the updated BBT
+ * the NAND you should call nanddev_bbt_update().
+ *
+ * Return: 0 in case of success or -%ERANGE if @entry is bigger than the BBT
+ * size.
+ */
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status)
+{
+ unsigned int bits_per_block = fls(NAND_BBT_BLOCK_NUM_STATUS);
+ unsigned long *pos = nand->bbt.cache +
+ ((entry * bits_per_block) / BITS_PER_LONG);
+ unsigned int offs = (entry * bits_per_block) % BITS_PER_LONG;
+ unsigned long val = status & GENMASK(bits_per_block - 1, 0);
+
+ if (entry >= nanddev_neraseblocks(nand))
+ return -ERANGE;
+
+ pos[0] &= ~GENMASK(offs + bits_per_block - 1, offs);
+ pos[0] |= val << offs;
+
+ if (bits_per_block + offs > BITS_PER_LONG) {
+ unsigned int rbits = bits_per_block + offs - BITS_PER_LONG;
+
+ pos[1] &= ~GENMASK(rbits - 1, 0);
+ pos[1] |= val >> rbits;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_bbt_set_block_status);
diff --git a/drivers/mtd/nand/core.c b/drivers/mtd/nand/core.c
new file mode 100644
index 0000000000..ba22662e2f
--- /dev/null
+++ b/drivers/mtd/nand/core.c
@@ -0,0 +1,275 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2017 Free Electrons
+ *
+ * Authors:
+ * Boris Brezillon <boris.brezillon at free-electrons.com>
+ * Peter Pan <peterpandong at micron.com>
+ */
+
+#define pr_fmt(fmt) "nand: " fmt
+
+#include <common.h>
+#include <linux/mtd/nand.h>
+
+/**
+ * nanddev_isbad() - Check if a block is bad
+ * @nand: NAND device
+ * @pos: position pointing to the block we want to check
+ *
+ * Return: true if the block is bad, false otherwise.
+ */
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ if (nanddev_bbt_is_initialized(nand)) {
+ unsigned int entry;
+ int status;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ /* Lazy block status retrieval */
+ if (status == NAND_BBT_BLOCK_STATUS_UNKNOWN) {
+ if (nand->ops->isbad(nand, pos))
+ status = NAND_BBT_BLOCK_FACTORY_BAD;
+ else
+ status = NAND_BBT_BLOCK_GOOD;
+
+ nanddev_bbt_set_block_status(nand, entry, status);
+ }
+
+ if (status == NAND_BBT_BLOCK_WORN ||
+ status == NAND_BBT_BLOCK_FACTORY_BAD)
+ return true;
+
+ return false;
+ }
+
+ return nand->ops->isbad(nand, pos);
+}
+EXPORT_SYMBOL_GPL(nanddev_isbad);
+
+/**
+ * nanddev_markbad() - Mark a block as bad
+ * @nand: NAND device
+ * @pos: position of the block to mark bad
+ *
+ * Mark a block bad. This function is updating the BBT if available and
+ * calls the low-level markbad hook (nand->ops->markbad()).
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ unsigned int entry;
+ int ret = 0;
+
+ if (nanddev_isbad(nand, pos))
+ return 0;
+
+ ret = nand->ops->markbad(nand, pos);
+ if (ret)
+ pr_warn("failed to write BBM to block @%llx (err = %d)\n",
+ nanddev_pos_to_offs(nand, pos), ret);
+
+ if (!nanddev_bbt_is_initialized(nand))
+ goto out;
+
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ ret = nanddev_bbt_set_block_status(nand, entry, NAND_BBT_BLOCK_WORN);
+ if (ret)
+ goto out;
+
+ ret = nanddev_bbt_update(nand);
+
+out:
+ if (!ret)
+ mtd->ecc_stats.badblocks++;
+
+ return ret;
+}
+EXPORT_SYMBOL_GPL(nanddev_markbad);
+
+/**
+ * nanddev_isreserved() - Check whether an eraseblock is reserved or not
+ * @nand: NAND device
+ * @pos: NAND position to test
+ *
+ * Checks whether the eraseblock pointed by @pos is reserved or not.
+ *
+ * Return: true if the eraseblock is reserved, false otherwise.
+ */
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos)
+{
+ unsigned int entry;
+ int status;
+
+ if (!nanddev_bbt_is_initialized(nand))
+ return false;
+
+ /* Return info from the table */
+ entry = nanddev_bbt_pos_to_entry(nand, pos);
+ status = nanddev_bbt_get_block_status(nand, entry);
+ return status == NAND_BBT_BLOCK_RESERVED;
+}
+EXPORT_SYMBOL_GPL(nanddev_isreserved);
+
+/**
+ * nanddev_erase() - Erase a NAND portion
+ * @nand: NAND device
+ * @pos: position of the block to erase
+ *
+ * Erases the block if it's not bad.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ if (nanddev_isbad(nand, pos) || nanddev_isreserved(nand, pos)) {
+ pr_warn("attempt to erase a bad/reserved block @%llx\n",
+ nanddev_pos_to_offs(nand, pos));
+ return -EIO;
+ }
+
+ return nand->ops->erase(nand, pos);
+}
+EXPORT_SYMBOL_GPL(nanddev_erase);
+
+/**
+ * nanddev_mtd_erase() - Generic mtd->_erase() implementation for NAND devices
+ * @mtd: MTD device
+ * @einfo: erase request
+ *
+ * This is a simple mtd->_erase() implementation iterating over all blocks
+ * concerned by @einfo and calling nand->ops->erase() on each of them.
+ *
+ * Note that mtd->_erase should not be directly assigned to this helper,
+ * because there's no locking here. NAND specialized layers should instead
+ * implement there own wrapper around nanddev_mtd_erase() taking the
+ * appropriate lock before calling nanddev_mtd_erase().
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, last;
+ int ret;
+
+ nanddev_offs_to_pos(nand, einfo->addr, &pos);
+ nanddev_offs_to_pos(nand, einfo->addr + einfo->len - 1, &last);
+ while (nanddev_pos_cmp(&pos, &last) <= 0) {
+ ret = nanddev_erase(nand, &pos);
+ if (ret) {
+ einfo->fail_addr = nanddev_pos_to_offs(nand, &pos);
+
+ return ret;
+ }
+
+ nanddev_pos_next_eraseblock(nand, &pos);
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_erase);
+
+/**
+ * nanddev_mtd_max_bad_blocks() - Get the maximum number of bad eraseblock on
+ * a specific region of the NAND device
+ * @mtd: MTD device
+ * @offs: offset of the NAND region
+ * @len: length of the NAND region
+ *
+ * Default implementation for mtd->_max_bad_blocks(). Only works if
+ * nand->memorg.max_bad_eraseblocks_per_lun is > 0.
+ *
+ * Return: a positive number encoding the maximum number of eraseblocks on a
+ * portion of memory, a negative error code otherwise.
+ */
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len)
+{
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ struct nand_pos pos, end;
+ unsigned int max_bb = 0;
+
+ if (!nand->memorg.max_bad_eraseblocks_per_lun)
+ return -ENOTSUPP;
+
+ nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_offs_to_pos(nand, offs + len, &end);
+
+ for (nanddev_offs_to_pos(nand, offs, &pos);
+ nanddev_pos_cmp(&pos, &end) < 0;
+ nanddev_pos_next_lun(nand, &pos))
+ max_bb += nand->memorg.max_bad_eraseblocks_per_lun;
+
+ return max_bb;
+}
+EXPORT_SYMBOL_GPL(nanddev_mtd_max_bad_blocks);
+
+/**
+ * nanddev_init() - Initialize a NAND device
+ * @nand: NAND device
+ * @ops: NAND device operations
+ * @owner: NAND device owner
+ *
+ * Initializes a NAND device object. Consistency checks are done on @ops and
+ * @nand->memorg. Also takes care of initializing the BBT.
+ *
+ * Return: 0 in case of success, a negative error code otherwise.
+ */
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner)
+{
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+ struct nand_memory_organization *memorg = nanddev_get_memorg(nand);
+
+ if (!nand || !ops)
+ return -EINVAL;
+
+ if (!ops->erase || !ops->markbad || !ops->isbad)
+ return -EINVAL;
+
+ if (!memorg->bits_per_cell || !memorg->pagesize ||
+ !memorg->pages_per_eraseblock || !memorg->eraseblocks_per_lun ||
+ !memorg->planes_per_lun || !memorg->luns_per_target ||
+ !memorg->ntargets)
+ return -EINVAL;
+
+ nand->rowconv.eraseblock_addr_shift =
+ fls(memorg->pages_per_eraseblock - 1);
+ nand->rowconv.lun_addr_shift = fls(memorg->eraseblocks_per_lun - 1) +
+ nand->rowconv.eraseblock_addr_shift;
+
+ nand->ops = ops;
+
+ mtd->type = memorg->bits_per_cell == 1 ?
+ MTD_NANDFLASH : MTD_MLCNANDFLASH;
+ mtd->flags = MTD_CAP_NANDFLASH;
+ mtd->erasesize = memorg->pagesize * memorg->pages_per_eraseblock;
+ mtd->writesize = memorg->pagesize;
+ mtd->writebufsize = memorg->pagesize;
+ mtd->oobsize = memorg->oobsize;
+ mtd->size = nanddev_size(nand);
+ mtd->owner = owner;
+
+ return nanddev_bbt_init(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_init);
+
+/**
+ * nanddev_cleanup() - Release resources allocated in nanddev_init()
+ * @nand: NAND device
+ *
+ * Basically undoes what has been done in nanddev_init().
+ */
+void nanddev_cleanup(struct nand_device *nand)
+{
+ if (nanddev_bbt_is_initialized(nand))
+ nanddev_bbt_cleanup(nand);
+}
+EXPORT_SYMBOL_GPL(nanddev_cleanup);
+
+MODULE_DESCRIPTION("Generic NAND framework");
+MODULE_AUTHOR("Boris Brezillon <boris.brezillon at free-electrons.com>");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/mtd/nand/denali.h b/drivers/mtd/nand/denali.h
index 7698b59720..699e6ec6b4 100644
--- a/drivers/mtd/nand/denali.h
+++ b/drivers/mtd/nand/denali.h
@@ -21,6 +21,7 @@
#define __DENALI_H__
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/spinlock.h>
#define DEVICE_RESET 0x0
diff --git a/drivers/mtd/nand/internals.h b/drivers/mtd/nand/internals.h
new file mode 100644
index 0000000000..7716470a56
--- /dev/null
+++ b/drivers/mtd/nand/internals.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2018 - Bootlin
+ *
+ * Author: Boris Brezillon <boris.brezillon at bootlin.com>
+ *
+ * Header containing internal definitions to be used only by core files.
+ * NAND controller drivers should not include this file.
+ */
+
+#ifndef __LINUX_RAWNAND_INTERNALS
+#define __LINUX_RAWNAND_INTERNALS
+
+#include <linux/mtd/rawnand.h>
+
+/*
+ * NAND Flash Manufacturer ID Codes
+ */
+#define NAND_MFR_AMD 0x01
+#define NAND_MFR_ATO 0x9b
+#define NAND_MFR_EON 0x92
+#define NAND_MFR_ESMT 0xc8
+#define NAND_MFR_FUJITSU 0x04
+#define NAND_MFR_HYNIX 0xad
+#define NAND_MFR_INTEL 0x89
+#define NAND_MFR_MACRONIX 0xc2
+#define NAND_MFR_MICRON 0x2c
+#define NAND_MFR_NATIONAL 0x8f
+#define NAND_MFR_RENESAS 0x07
+#define NAND_MFR_SAMSUNG 0xec
+#define NAND_MFR_SANDISK 0x45
+#define NAND_MFR_STMICRO 0x20
+/* Kioxia is new name of Toshiba memory. */
+#define NAND_MFR_TOSHIBA 0x98
+#define NAND_MFR_WINBOND 0xef
+
+/**
+ * struct nand_manufacturer_ops - NAND Manufacturer operations
+ * @detect: detect the NAND memory organization and capabilities
+ * @init: initialize all vendor specific fields (like the ->read_retry()
+ * implementation) if any.
+ * @cleanup: the ->init() function may have allocated resources, ->cleanup()
+ * is here to let vendor specific code release those resources.
+ * @fixup_onfi_param_page: apply vendor specific fixups to the ONFI parameter
+ * page. This is called after the checksum is verified.
+ */
+struct nand_manufacturer_ops {
+ void (*detect)(struct nand_chip *chip);
+ int (*init)(struct nand_chip *chip);
+ void (*cleanup)(struct nand_chip *chip);
+ void (*fixup_onfi_param_page)(struct nand_chip *chip,
+ struct nand_onfi_params *p);
+};
+
+/**
+ * struct nand_manufacturer_desc - NAND Flash Manufacturer descriptor
+ * @name: Manufacturer name
+ * @id: manufacturer ID code of device.
+ * @ops: manufacturer operations
+ */
+struct nand_manufacturer_desc {
+ int id;
+ char *name;
+ const struct nand_manufacturer_ops *ops;
+};
+
+
+extern struct nand_flash_dev nand_flash_ids[];
+
+extern const struct nand_manufacturer_ops amd_nand_manuf_ops;
+extern const struct nand_manufacturer_ops esmt_nand_manuf_ops;
+extern const struct nand_manufacturer_ops hynix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops macronix_nand_manuf_ops;
+extern const struct nand_manufacturer_ops micron_nand_manuf_ops;
+extern const struct nand_manufacturer_ops samsung_nand_manuf_ops;
+extern const struct nand_manufacturer_ops toshiba_nand_manuf_ops;
+
+/* MLC pairing schemes */
+extern const struct mtd_pairing_scheme dist3_pairing_scheme;
+
+/* Core functions */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id);
+int nand_bbm_get_next_page(struct nand_chip *chip, int page);
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs);
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt);
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode);
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings);
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings);
+const struct nand_interface_config *nand_get_reset_interface_config(void);
+int nand_get_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_set_features(struct nand_chip *chip, int addr, u8 *subfeature_param);
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page);
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page);
+int nand_exit_status_op(struct nand_chip *chip);
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len);
+void nand_decode_ext_id(struct nand_chip *chip);
+void panic_nand_wait(struct nand_chip *chip, unsigned long timeo);
+void sanitize_string(uint8_t *s, size_t len);
+
+static inline bool nand_has_exec_op(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->exec_op)
+ return false;
+
+ return true;
+}
+
+static inline int nand_check_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return 0;
+
+ return chip->controller->ops->exec_op(chip, op, true);
+}
+
+static inline int nand_exec_op(struct nand_chip *chip,
+ const struct nand_operation *op)
+{
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
+
+ if (WARN_ON(op->cs >= nanddev_ntargets(&chip->base)))
+ return -EINVAL;
+
+ return chip->controller->ops->exec_op(chip, op, false);
+}
+
+static inline bool nand_controller_can_setup_interface(struct nand_chip *chip)
+{
+ if (!chip->controller || !chip->controller->ops ||
+ !chip->controller->ops->setup_interface)
+ return false;
+
+ if (chip->options & NAND_KEEP_TIMINGS)
+ return false;
+
+ return true;
+}
+
+/* BBT functions */
+int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
+int nand_markgood_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isreserved_bbt(struct nand_chip *chip, loff_t offs);
+int nand_isbad_bbt(struct nand_chip *chip, loff_t offs, int allowbbt);
+
+/* Legacy */
+void nand_legacy_set_defaults(struct nand_chip *chip);
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip);
+int nand_legacy_check_hooks(struct nand_chip *chip);
+
+/* ONFI functions */
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len);
+int nand_onfi_detect(struct nand_chip *chip);
+
+/* JEDEC functions */
+int nand_jedec_detect(struct nand_chip *chip);
+
+#endif /* __LINUX_RAWNAND_INTERNALS */
diff --git a/drivers/mtd/nand/nand_amd.c b/drivers/mtd/nand/nand_amd.c
new file mode 100644
index 0000000000..c3d4dae3cd
--- /dev/null
+++ b/drivers/mtd/nand/nand_amd.c
@@ -0,0 +1,53 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void amd_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Check for Spansion/AMD ID + repeating 5th, 6th byte since
+ * some Spansion chips have erasesize that conflicts with size
+ * listed in nand_ids table.
+ * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
+ */
+ if (chip->id.data[4] != 0x00 && chip->id.data[5] == 0x00 &&
+ chip->id.data[6] == 0x00 && chip->id.data[7] == 0x00 &&
+ memorg->pagesize == 512) {
+ memorg->pages_per_eraseblock = 256;
+ memorg->pages_per_eraseblock <<= ((chip->id.data[3] & 0x03) << 1);
+ mtd->erasesize = memorg->pages_per_eraseblock *
+ memorg->pagesize;
+ }
+}
+
+static int amd_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * According to the datasheet of some Cypress SLC NANDs,
+ * the bad block markers can be in the first, second or last
+ * page of a block. So let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops amd_nand_manuf_ops = {
+ .detect = amd_nand_decode_id,
+ .init = amd_nand_init,
+};
diff --git a/drivers/mtd/nand/nand_base.c b/drivers/mtd/nand/nand_base.c
index 1816822e78..e9c3d7e7c8 100644
--- a/drivers/mtd/nand/nand_base.c
+++ b/drivers/mtd/nand/nand_base.c
@@ -1,6 +1,5 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/mtd/nand.c
- *
* Overview:
* This is the generic MTD driver for NAND flash devices. It should be
* capable of working with almost all NAND chips currently available.
@@ -22,14 +21,9 @@
* Check, if mtd->ecctype should be set to MTD_ECC_HW
* if we have HW ECC support.
* BBT table is not serialized, has to be fixed
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
-#define pr_fmt(fmt) "nand: " fmt
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
#include <common.h>
#include <errno.h>
@@ -45,68 +39,180 @@
#include <module.h>
#include <of_mtd.h>
+#include "internals.h"
+
/* Define default oob placement schemes for large and small page devices */
-static struct nand_ecclayout nand_oob_8 = {
- .eccbytes = 3,
- .eccpos = {0, 1, 2},
- .oobfree = {
- {.offset = 3,
- .length = 2},
- {.offset = 6,
- .length = 2} }
-};
+static int nand_ooblayout_ecc_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
-static struct nand_ecclayout nand_oob_16 = {
- .eccbytes = 6,
- .eccpos = {0, 1, 2, 3, 6, 7},
- .oobfree = {
- {.offset = 8,
- . length = 8} }
-};
+ if (section > 1)
+ return -ERANGE;
+
+ if (!section) {
+ oobregion->offset = 0;
+ if (mtd->oobsize == 16)
+ oobregion->length = 4;
+ else
+ oobregion->length = 3;
+ } else {
+ if (mtd->oobsize == 8)
+ return -ERANGE;
+
+ oobregion->offset = 6;
+ oobregion->length = ecc->total - 4;
+ }
+
+ return 0;
+}
-static struct nand_ecclayout nand_oob_64 = {
- .eccbytes = 24,
- .eccpos = {
- 40, 41, 42, 43, 44, 45, 46, 47,
- 48, 49, 50, 51, 52, 53, 54, 55,
- 56, 57, 58, 59, 60, 61, 62, 63},
- .oobfree = {
- {.offset = 2,
- .length = 38} }
+static int nand_ooblayout_free_sp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->oobsize == 16) {
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 8;
+ oobregion->offset = 8;
+ } else {
+ oobregion->length = 2;
+ if (!section)
+ oobregion->offset = 3;
+ else
+ oobregion->offset = 6;
+ }
+
+ return 0;
+}
+
+const struct mtd_ooblayout_ops nand_ooblayout_sp_ops = {
+ .ecc = nand_ooblayout_ecc_sp,
+ .free = nand_ooblayout_free_sp,
};
+EXPORT_SYMBOL_GPL(nand_ooblayout_sp_ops);
+
+static int nand_ooblayout_ecc_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section || !ecc->total)
+ return -ERANGE;
+
+ oobregion->length = ecc->total;
+ oobregion->offset = mtd->oobsize - oobregion->length;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = mtd->oobsize - ecc->total - 2;
+ oobregion->offset = 2;
+
+ return 0;
+}
-static struct nand_ecclayout nand_oob_128 = {
- .eccbytes = 48,
- .eccpos = {
- 80, 81, 82, 83, 84, 85, 86, 87,
- 88, 89, 90, 91, 92, 93, 94, 95,
- 96, 97, 98, 99, 100, 101, 102, 103,
- 104, 105, 106, 107, 108, 109, 110, 111,
- 112, 113, 114, 115, 116, 117, 118, 119,
- 120, 121, 122, 123, 124, 125, 126, 127},
- .oobfree = {
- {.offset = 2,
- .length = 78} }
+const struct mtd_ooblayout_ops nand_ooblayout_lp_ops = {
+ .ecc = nand_ooblayout_ecc_lp,
+ .free = nand_ooblayout_free_lp,
};
+EXPORT_SYMBOL_GPL(nand_ooblayout_lp_ops);
+
+/*
+ * Support the old "large page" layout used for 1-bit Hamming ECC where ECC
+ * are placed at a fixed offset.
+ */
+static int nand_ooblayout_ecc_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
-static int nand_get_device(struct nand_chip *chip, int new_state);
+ if (section)
+ return -ERANGE;
-static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
- struct mtd_oob_ops *ops);
+ switch (mtd->oobsize) {
+ case 64:
+ oobregion->offset = 40;
+ break;
+ case 128:
+ oobregion->offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ oobregion->length = ecc->total;
+ if (oobregion->offset + oobregion->length > mtd->oobsize)
+ return -ERANGE;
+
+ return 0;
+}
+
+static int nand_ooblayout_free_lp_hamming(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ecc_offset = 0;
+
+ if (section < 0 || section > 1)
+ return -ERANGE;
+
+ switch (mtd->oobsize) {
+ case 64:
+ ecc_offset = 40;
+ break;
+ case 128:
+ ecc_offset = 80;
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (section == 0) {
+ oobregion->offset = 2;
+ oobregion->length = ecc_offset - 2;
+ } else {
+ oobregion->offset = ecc_offset + ecc->total;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops nand_ooblayout_lp_hamming_ops = {
+ .ecc = nand_ooblayout_ecc_lp_hamming,
+ .free = nand_ooblayout_free_lp_hamming,
+};
-static int check_offs_len(struct nand_chip *chip,
- loff_t ofs, uint64_t len)
+static int check_offs_len(struct nand_chip *chip, loff_t ofs, uint64_t len)
{
int ret = 0;
/* Start address must align on block boundary */
- if (ofs & ((1 << chip->phys_erase_shift) - 1)) {
+ if (ofs & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: unaligned address\n", __func__);
ret = -EINVAL;
}
/* Length must align on block boundary */
- if (len & ((1 << chip->phys_erase_shift) - 1)) {
+ if (len & ((1ULL << chip->phys_erase_shift) - 1)) {
pr_debug("%s: length not block aligned\n", __func__);
ret = -EINVAL;
}
@@ -115,220 +221,348 @@ static int check_offs_len(struct nand_chip *chip,
}
/**
- * nand_release_device - [GENERIC] release chip
- * @mtd: MTD device structure
+ * nand_extract_bits - Copy unaligned bits from one buffer to another one
+ * @dst: destination buffer
+ * @dst_off: bit offset at which the writing starts
+ * @src: source buffer
+ * @src_off: bit offset at which the reading starts
+ * @nbits: number of bits to copy from @src to @dst
*
- * Release chip lock and wake up anyone waiting on the device.
+ * Copy bits from one memory region to another (overlap authorized).
*/
-static void nand_release_device(struct nand_chip *chip)
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits)
{
- /* Release the controller and the chip */
- chip->controller->active = NULL;
- chip->state = FL_READY;
+ unsigned int tmp, n;
+
+ dst += dst_off / 8;
+ dst_off %= 8;
+ src += src_off / 8;
+ src_off %= 8;
+
+ while (nbits) {
+ n = min3(8 - dst_off, 8 - src_off, nbits);
+
+ tmp = (*src >> src_off) & GENMASK(n - 1, 0);
+ *dst &= ~GENMASK(n - 1 + dst_off, dst_off);
+ *dst |= tmp << dst_off;
+
+ dst_off += n;
+ if (dst_off >= 8) {
+ dst++;
+ dst_off -= 8;
+ }
+
+ src_off += n;
+ if (src_off >= 8) {
+ src++;
+ src_off -= 8;
+ }
+
+ nbits -= n;
+ }
}
+EXPORT_SYMBOL_GPL(nand_extract_bits);
/**
- * nand_read_byte - [DEFAULT] read one byte from the chip
- * @mtd: MTD device structure
+ * nand_select_target() - Select a NAND target (A.K.A. die)
+ * @chip: NAND chip object
+ * @cs: the CS line to select. Note that this CS id is always from the chip
+ * PoV, not the controller one
*
- * Default read function for 8bit buswidth
+ * Select a NAND target so that further operations executed on @chip go to the
+ * selected NAND target.
*/
-static uint8_t nand_read_byte(struct nand_chip *chip)
+void nand_select_target(struct nand_chip *chip, unsigned int cs)
{
- return readb(chip->legacy.IO_ADDR_R);
+ /*
+ * cs should always lie between 0 and nanddev_ntargets(), when that's
+ * not the case it's a bug and the caller should be fixed.
+ */
+ if (WARN_ON(cs > nanddev_ntargets(&chip->base)))
+ return;
+
+ chip->cur_cs = cs;
+
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, cs);
}
+EXPORT_SYMBOL_GPL(nand_select_target);
/**
- * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
- * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
- * @mtd: MTD device structure
- *
- * Default read function for 16bit buswidth with endianness conversion.
+ * nand_deselect_target() - Deselect the currently selected target
+ * @chip: NAND chip object
*
+ * Deselect the currently selected NAND target. The result of operations
+ * executed on @chip after the target has been deselected is undefined.
*/
-static uint8_t nand_read_byte16(struct nand_chip *chip)
+void nand_deselect_target(struct nand_chip *chip)
{
- return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
+ if (chip->legacy.select_chip)
+ chip->legacy.select_chip(chip, -1);
+
+ chip->cur_cs = -1;
}
+EXPORT_SYMBOL_GPL(nand_deselect_target);
/**
- * nand_read_word - [DEFAULT] read one word from the chip
- * @mtd: MTD device structure
+ * nand_release_device - [GENERIC] release chip
+ * @chip: NAND chip object
*
- * Default read function for 16bit buswidth without endianness conversion.
+ * Release chip lock and wake up anyone waiting on the device.
*/
-static u16 nand_read_word(struct nand_chip *chip)
+static void nand_release_device(struct nand_chip *chip)
{
- return readw(chip->legacy.IO_ADDR_R);
+ /* Release the controller and the chip */
+ mutex_unlock(&chip->controller->lock);
+ mutex_unlock(&chip->lock);
}
/**
- * nand_select_chip - [DEFAULT] control CE line
- * @mtd: MTD device structure
- * @chipnr: chipnumber to select, -1 for deselect
+ * nand_bbm_get_next_page - Get the next page for bad block markers
+ * @chip: NAND chip object
+ * @page: First page to start checking for bad block marker usage
*
- * Default select function for 1 chip devices.
+ * Returns an integer that corresponds to the page offset within a block, for
+ * a page that is used to store bad block markers. If no more pages are
+ * available, -EINVAL is returned.
*/
-static void nand_select_chip(struct nand_chip *chip, int chipnr)
+int nand_bbm_get_next_page(struct nand_chip *chip, int page)
{
- switch (chipnr) {
- case -1:
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, 0 | NAND_CTRL_CHANGE);
- break;
- case 0:
- break;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int last_page = ((mtd->erasesize - mtd->writesize) >>
+ chip->page_shift) & chip->pagemask;
+ unsigned int bbm_flags = NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE
+ | NAND_BBM_LASTPAGE;
- default:
- BUG();
- }
+ if (page == 0 && !(chip->options & bbm_flags))
+ return 0;
+ if (page == 0 && chip->options & NAND_BBM_FIRSTPAGE)
+ return 0;
+ if (page <= 1 && chip->options & NAND_BBM_SECONDPAGE)
+ return 1;
+ if (page <= last_page && chip->options & NAND_BBM_LASTPAGE)
+ return last_page;
+
+ return -EINVAL;
}
/**
- * nand_write_buf - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * nand_block_bad - [DEFAULT] Read bad block marker from the chip
+ * @chip: NAND chip object
+ * @ofs: offset from device start
*
- * Default write function for 8bit buswidth.
+ * Check, if the block is bad.
*/
-static __maybe_unused void nand_write_buf(struct nand_chip *chip,
- const uint8_t *buf, int len)
+static int nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
- int i;
+ int first_page, page_offset;
+ int res;
+ u8 bad;
- for (i = 0; i < len; i++)
- writeb(buf[i], chip->legacy.IO_ADDR_W);
+ first_page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = chip->ecc.read_oob(chip, first_page + page_offset);
+ if (res < 0)
+ return res;
+
+ bad = chip->oob_poi[chip->badblockpos];
+
+ if (likely(chip->badblockbits == 8))
+ res = bad != 0xFF;
+ else
+ res = hweight8(bad) < chip->badblockbits;
+ if (res)
+ return res;
+
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
+
+ return 0;
+}
+
+static int nand_isbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->options & NAND_NO_BBM_QUIRK)
+ return 0;
+
+ if (chip->legacy.block_bad)
+ return chip->legacy.block_bad(chip, ofs);
+
+ return nand_block_bad(chip, ofs);
}
/**
- * nand_read_buf - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
+ * nand_get_device - [GENERIC] Get chip for selected access
+ * @chip: NAND chip structure
*
- * Default read function for 8bit buswidth.
+ * Lock the device and its controller for exclusive access
+ *
+ * Return: -EBUSY if the chip has been suspended, 0 otherwise
*/
-static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+static int nand_get_device(struct nand_chip *chip)
{
- int i;
+ mutex_lock(&chip->lock);
+ if (chip->suspended) {
+ mutex_unlock(&chip->lock);
+ return -EBUSY;
+ }
+ mutex_lock(&chip->controller->lock);
- for (i = 0; i < len; i++)
- buf[i] = readb(chip->legacy.IO_ADDR_R);
+ return 0;
}
/**
- * nand_write_buf16 - [DEFAULT] write buffer to chip
- * @mtd: MTD device structure
- * @buf: data buffer
- * @len: number of bytes to write
+ * nand_check_wp - [GENERIC] check if the chip is write protected
+ * @chip: NAND chip object
*
- * Default write function for 16bit buswidth.
+ * Check, if the device is write protected. The function expects, that the
+ * device is already selected.
*/
-static __maybe_unused void nand_write_buf16(struct nand_chip *chip,
- const uint8_t *buf, int len)
+static int nand_check_wp(struct nand_chip *chip)
{
- int i;
- u16 *p = (u16 *) buf;
- len >>= 1;
+ u8 status;
+ int ret;
- for (i = 0; i < len; i++)
- writew(p[i], chip->legacy.IO_ADDR_W);
+ /* Broken xD cards report WP despite being writable */
+ if (chip->options & NAND_BROKEN_XD)
+ return 0;
+
+ /* Check the WP bit */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ return status & NAND_STATUS_WP ? 0 : 1;
}
/**
- * nand_read_buf16 - [DEFAULT] read chip data into buffer
- * @mtd: MTD device structure
- * @buf: buffer to store date
- * @len: number of bytes to read
- *
- * Default read function for 16bit buswidth.
+ * nand_fill_oob - [INTERN] Transfer client buffer to oob
+ * @chip: NAND chip object
+ * @oob: oob data buffer
+ * @len: oob data write length
+ * @ops: oob ops structure
*/
-static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
+ struct mtd_oob_ops *ops)
{
- int i;
- u16 *p = (u16 *) buf;
- len >>= 1;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
- for (i = 0; i < len; i++)
- p[i] = readw(chip->legacy.IO_ADDR_R);
+ /*
+ * Initialise to all 0xFF, to avoid the possibility of left over OOB
+ * data from a previous OOB read.
+ */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+
+ switch (ops->mode) {
+
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_RAW:
+ memcpy(chip->oob_poi + ops->ooboffs, oob, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_set_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
+
+ default:
+ BUG();
+ }
+ return NULL;
}
/**
- * nand_block_bad - [DEFAULT] Read bad block marker from the chip
- * @mtd: MTD device structure
- * @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
+ * nand_do_write_oob - [MTD Interface] NAND write out-of-band
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operation description structure
*
- * Check, if the block is bad.
+ * NAND write out-of-band.
*/
-static int nand_block_bad(struct nand_chip *chip, loff_t ofs, int getchip)
+static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
{
- int page, chipnr, res = 0, i = 0;
- u16 bad;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, page, status, len, ret;
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += chip->mtd.erasesize - chip->mtd.writesize;
+ pr_debug("%s: to = 0x%08x, len = %i\n",
+ __func__, (unsigned int)to, (int)ops->ooblen);
- page = (int)(ofs >> chip->page_shift) & chip->pagemask;
+ len = mtd_oobavail(mtd, ops);
- if (getchip) {
- chipnr = (int)(ofs >> chip->chip_shift);
+ /* Do not allow write past end of page */
+ if ((ops->ooboffs + ops->ooblen) > len) {
+ pr_debug("%s: attempt to write past end of page\n",
+ __func__);
+ return -EINVAL;
+ }
- nand_get_device(chip, FL_READING);
+ chipnr = (int)(to >> chip->chip_shift);
- /* Select the NAND device */
- chip->legacy.select_chip(chip, chipnr);
- }
+ /*
+ * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
+ * of my DiskOnChip 2000 test units) will clear the whole data page too
+ * if we don't do this. I have no clue why, but I seem to have 'fixed'
+ * it in the doc2000 driver in August 1999. dwmw2.
+ */
+ ret = nand_reset(chip, chipnr);
+ if (ret)
+ return ret;
- do {
- if (chip->options & NAND_BUSWIDTH_16) {
- chip->legacy.cmdfunc(chip, NAND_CMD_READOOB,
- chip->badblockpos & 0xFE, page);
- bad = cpu_to_le16(chip->legacy.read_word(chip));
- if (chip->badblockpos & 0x1)
- bad >>= 8;
- else
- bad &= 0xFF;
- } else {
- chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, chip->badblockpos,
- page);
- bad = chip->legacy.read_byte(chip);
- }
+ nand_select_target(chip, chipnr);
- if (likely(chip->badblockbits == 8))
- res = bad != 0xFF;
- else
- res = hweight8(bad) < chip->badblockbits;
- ofs += chip->mtd.writesize;
- page = (int)(ofs >> chip->page_shift) & chip->pagemask;
- i++;
- } while (!res && i < 2 && (chip->bbt_options & NAND_BBT_SCAN2NDPAGE));
+ /* Shift to get page */
+ page = (int)(to >> chip->page_shift);
- if (getchip) {
- chip->legacy.select_chip(chip, -1);
- nand_release_device(chip);
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ nand_deselect_target(chip);
+ return -EROFS;
}
- return res;
+ /* Invalidate the page cache, if we write to the cached page */
+ if (page == chip->pagecache.page)
+ chip->pagecache.page = -1;
+
+ nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+
+ if (ops->mode == MTD_OPS_RAW)
+ status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
+ else
+ status = chip->ecc.write_oob(chip, page & chip->pagemask);
+
+ nand_deselect_target(chip);
+
+ if (status)
+ return status;
+
+ ops->oobretlen = ops->ooblen;
+
+ return 0;
}
/**
* nand_default_block_markbad - [DEFAULT] mark a block bad via bad block marker
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* This is the default implementation, which can be overridden by a hardware
* specific driver. It provides the details for writing a bad block marker to a
* block.
*/
-static __maybe_unused int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
+static int nand_default_block_markbad(struct nand_chip *chip, loff_t ofs)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
struct mtd_oob_ops ops;
uint8_t buf[2] = { 0, 0 };
- int ret = 0, res, i = 0;
+ int ret = 0, res, page_offset;
- ops.datbuf = NULL;
+ memset(&ops, 0, sizeof(ops));
ops.oobbuf = buf;
ops.ooboffs = chip->badblockpos;
if (chip->options & NAND_BUSWIDTH_16) {
@@ -339,24 +573,38 @@ static __maybe_unused int nand_default_block_markbad(struct nand_chip *chip, lof
}
ops.mode = MTD_OPS_PLACE_OOB;
- /* Write to first/last page(s) if necessary */
- if (chip->bbt_options & NAND_BBT_SCANLASTPAGE)
- ofs += chip->mtd.erasesize - chip->mtd.writesize;
- do {
- res = nand_do_write_oob(chip, ofs, &ops);
- if (!ret)
+ page_offset = nand_bbm_get_next_page(chip, 0);
+
+ while (page_offset >= 0) {
+ res = nand_do_write_oob(chip,
+ ofs + (page_offset * mtd->writesize),
+ &ops);
+
+ if (!ret)
ret = res;
- i++;
- ofs += chip->mtd.writesize;
- } while ((chip->bbt_options & NAND_BBT_SCAN2NDPAGE) && i < 2);
+ page_offset = nand_bbm_get_next_page(chip, page_offset + 1);
+ }
return ret;
}
+/**
+ * nand_markbad_bbm - mark a block by updating the BBM
+ * @chip: NAND chip object
+ * @ofs: offset of the block to mark bad
+ */
+int nand_markbad_bbm(struct nand_chip *chip, loff_t ofs)
+{
+ if (chip->legacy.block_markbad)
+ return chip->legacy.block_markbad(chip, ofs);
+
+ return nand_default_block_markbad(chip, ofs);
+}
+
/**
* nand_block_markbad_lowlevel - mark a block bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
*
* This function performs the generic NAND bad block marking steps (i.e., bad
@@ -364,15 +612,18 @@ static __maybe_unused int nand_default_block_markbad(struct nand_chip *chip, lof
* specify how to write bad block markers to OOB (chip->legacy.block_markbad).
*
* We try operations in the following order:
+ *
* (1) erase the affected block, to allow OOB marker to be written cleanly
* (2) write bad block marker to OOB area of affected block (unless flag
* NAND_BBT_NO_OOB_BBM is present)
* (3) update the BBT
+ *
* Note that we retain the first error encountered in (2) or (3), finish the
* procedures, and dump the error in the end.
- */
+*/
static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
int res, ret = 0;
if (!(chip->bbt_options & NAND_BBT_NO_OOB_BBM)) {
@@ -380,26 +631,28 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
/* Attempt erase before marking OOB */
memset(&einfo, 0, sizeof(einfo));
- einfo.mtd = &chip->mtd;
einfo.addr = ofs;
- einfo.len = 1 << chip->phys_erase_shift;
+ einfo.len = 1ULL << chip->phys_erase_shift;
nand_erase_nand(chip, &einfo, 0);
/* Write bad block marker to OOB */
- nand_get_device(chip, FL_WRITING);
- ret = chip->legacy.block_markbad(chip, ofs);
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
+ ret = nand_markbad_bbm(chip, ofs);
nand_release_device(chip);
}
/* Mark block bad in BBT */
- if (IS_ENABLED(CONFIG_NAND_BBT) && chip->bbt) {
+ if (chip->bbt) {
res = nand_markbad_bbt(chip, ofs);
if (!ret)
ret = res;
}
if (!ret)
- chip->mtd.ecc_stats.badblocks++;
+ mtd->ecc_stats.badblocks++;
return ret;
}
@@ -416,6 +669,7 @@ static int nand_block_markbad_lowlevel(struct nand_chip *chip, loff_t ofs)
*/
static int nand_block_markgood_lowlevel(struct nand_chip *chip, loff_t ofs)
{
+ struct mtd_info *mtd = nand_to_mtd(chip);
bool allow_erasebad;
int ret;
@@ -423,22 +677,14 @@ static int nand_block_markgood_lowlevel(struct nand_chip *chip, loff_t ofs)
struct erase_info einfo;
/* Attempt erase possibly bad block */
- allow_erasebad = chip->mtd.allow_erasebad;
- chip->mtd.allow_erasebad = true;
+ allow_erasebad = mtd->allow_erasebad;
+ mtd->allow_erasebad = true;
memset(&einfo, 0, sizeof(einfo));
- einfo.mtd = &chip->mtd;
+ einfo.mtd = mtd;
einfo.addr = ofs;
einfo.len = 1 << chip->phys_erase_shift;
nand_erase_nand(chip, &einfo, 0);
- chip->mtd.allow_erasebad = allow_erasebad;
-
- /*
- * Verify erase succeded. We need to select chip again,
- * as nand_erase_nand deselected it.
- */
- ret = chip->legacy.block_bad(chip, ofs, 1);
- if (ret)
- return ret;
+ mtd->allow_erasebad = allow_erasebad;
}
/* Mark block good in BBT */
@@ -448,3111 +694,4790 @@ static int nand_block_markgood_lowlevel(struct nand_chip *chip, loff_t ofs)
return ret;
}
- if (chip->mtd.ecc_stats.badblocks > 0)
- chip->mtd.ecc_stats.badblocks--;
+ if (mtd->ecc_stats.badblocks > 0)
+ mtd->ecc_stats.badblocks--;
return 0;
}
-/**
- * nand_check_wp - [GENERIC] check if the chip is write protected
- * @mtd: MTD device structure
- *
- * Check, if the device is write protected. The function expects, that the
- * device is already selected.
- */
-static int nand_check_wp(struct nand_chip *chip)
-{
- /* Broken xD cards report WP despite being writable */
- if (chip->options & NAND_BROKEN_XD)
- return 0;
-
- /* Check the WP bit */
- chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
- return (chip->legacy.read_byte(chip) & NAND_STATUS_WP) ? 0 : 1;
-}
-
/**
* nand_block_checkbad - [GENERIC] Check if a block is marked bad
- * @mtd: MTD device structure
+ * @chip: NAND chip object
* @ofs: offset from device start
- * @getchip: 0, if the chip is already selected
* @allowbbt: 1, if its allowed to access the bbt area
*
* Check, if the block is bad. Either by reading the bad block table or
* calling of the scan function.
*/
-static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int getchip,
- int allowbbt)
+static int nand_block_checkbad(struct nand_chip *chip, loff_t ofs, int allowbbt)
{
- if (IS_ENABLED(CONFIG_NAND_BBT) && chip->bbt) {
- /* Return info from the table */
+ /* Return info from the table */
+ if (chip->bbt)
return nand_isbad_bbt(chip, ofs, allowbbt);
- }
-
- return chip->legacy.block_bad(chip, ofs, getchip);
-}
-/* Wait for the ready pin, after a command. The timeout is caught later. */
-void nand_wait_ready(struct nand_chip *chip)
-{
- uint64_t start = get_time_ns();
-
- /* wait until command is processed or timeout occures */
- do {
- if (chip->legacy.dev_ready(chip))
- break;
- } while (!is_timeout(start, SECOND * 2));
+ return nand_isbad_bbm(chip, ofs);
}
/**
- * nand_command - [DEFAULT] Send command to NAND device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * nand_soft_waitrdy - Poll STATUS reg until RDY bit is set to 1
+ * @chip: NAND chip structure
+ * @timeout_ms: Timeout in ms
+ *
+ * Poll the STATUS register using ->exec_op() until the RDY bit becomes 1.
+ * If that does not happen whitin the specified timeout, -ETIMEDOUT is
+ * returned.
+ *
+ * This helper is intended to be used when the controller does not have access
+ * to the NAND R/B pin.
+ *
+ * Be aware that calling this helper from an ->exec_op() implementation means
+ * ->exec_op() must be re-entrant.
*
- * Send command to NAND device. This function is used for small page devices
- * (512 Bytes per page).
+ * Return 0 if the NAND chip is ready, a negative error otherwise.
*/
-static void nand_command(struct nand_chip *chip, unsigned int command,
- int column, int page_addr)
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms)
{
- int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+ const struct nand_sdr_timings *timings;
+ u8 status = 0;
+ int ret;
+ uint64_t start;
- /* Write out the command to the device */
- if (IS_ENABLED(CONFIG_MTD_WRITE) && command == NAND_CMD_SEQIN) {
- int readcmd;
+ if (!nand_has_exec_op(chip))
+ return -ENOTSUPP;
- if (column >= chip->mtd.writesize) {
- /* OOB area */
- column -= chip->mtd.writesize;
- readcmd = NAND_CMD_READOOB;
- } else if (column < 256) {
- /* First 256 bytes --> READ0 */
- readcmd = NAND_CMD_READ0;
- } else {
- column -= 256;
- readcmd = NAND_CMD_READ1;
- }
- chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- }
- chip->legacy.cmd_ctrl(chip, command, ctrl);
-
- /* Address cycle, when necessary */
- ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
- column >>= 1;
- chip->legacy.cmd_ctrl(chip, column, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- }
- if (page_addr != -1) {
- chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
- /* One more address cycle for devices > 32MiB */
- if (chip->chipsize > (32 << 20))
- chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
- }
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ /* Wait tWB before polling the STATUS reg. */
+ timings = nand_get_sdr_timings(nand_get_interface_config(chip));
+ ndelay(PSEC_TO_NSEC(timings->tWB_max));
- /*
- * Program and erase have their own busy handlers status and sequential
- * in needs no delay
- */
- switch (command) {
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_STATUS:
- return;
+ start = get_time_ns();
+ do {
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ break;
- case NAND_CMD_RESET:
- if (chip->legacy.dev_ready)
+ if (status & NAND_STATUS_READY)
break;
- udelay(chip->legacy.chip_delay);
- chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
- NAND_CTRL_CLE | NAND_CTRL_CHANGE);
- chip->legacy.cmd_ctrl(chip,
- NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->legacy.read_byte(chip) & NAND_STATUS_READY))
- ;
- return;
- /* This applies to read commands */
- default:
/*
- * If we don't have access to the busy pin, we apply the given
- * command delay
+ * Typical lowest execution time for a tR on most NANDs is 10us,
+ * use this as polling delay before doing something smarter (ie.
+ * deriving a delay from the timeout value, timeout_ms/ratio).
*/
- if (!chip->legacy.dev_ready) {
- udelay(chip->legacy.chip_delay);
- return;
- }
- }
+ udelay(10);
+ } while (!is_timeout(start, timeout_ms * MSECOND));
+
/*
- * Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine.
+ * We have to exit READ_STATUS mode in order to read real data on the
+ * bus in case the WAITRDY instruction is preceding a DATA_IN
+ * instruction.
*/
- ndelay(100);
+ nand_exit_status_op(chip);
+
+ if (ret)
+ return ret;
+
+ return status & NAND_STATUS_READY ? 0 : -ETIMEDOUT;
+};
+EXPORT_SYMBOL_GPL(nand_soft_waitrdy);
+
+static bool nand_supports_get_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.get_feature_list));
+}
- nand_wait_ready(chip);
+bool nand_supports_set_features(struct nand_chip *chip, int addr)
+{
+ return (chip->parameters.supports_set_get_features &&
+ test_bit(addr, chip->parameters.set_feature_list));
}
/**
- * nand_command_lp - [DEFAULT] Send command to NAND large page device
- * @mtd: MTD device structure
- * @command: the command to be sent
- * @column: the column address for this command, -1 if none
- * @page_addr: the page address for this command, -1 if none
+ * nand_reset_interface - Reset data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
*
- * Send command to NAND device. This is the version for the new large page
- * devices. We don't have the separate regions as we have in the small page
- * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ * Reset the Data interface and timings to ONFI mode 0.
+ *
+ * Returns 0 for success or negative error code otherwise.
*/
-static void nand_command_lp(struct nand_chip *chip, unsigned int command,
- int column, int page_addr)
+static int nand_reset_interface(struct nand_chip *chip, int chipnr)
{
- /* Emulate NAND_CMD_READOOB */
- if (command == NAND_CMD_READOOB) {
- column += chip->mtd.writesize;
- command = NAND_CMD_READ0;
- }
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
- /* Command latch cycle */
- chip->legacy.cmd_ctrl(chip, command, NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ /*
+ * The ONFI specification says:
+ * "
+ * To transition from NV-DDR or NV-DDR2 to the SDR data
+ * interface, the host shall use the Reset (FFh) command
+ * using SDR timing mode 0. A device in any timing mode is
+ * required to recognize Reset (FFh) command issued in SDR
+ * timing mode 0.
+ * "
+ *
+ * Configure the data interface in SDR mode and set the
+ * timings to timing mode 0.
+ */
- if (column != -1 || page_addr != -1) {
- int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+ chip->current_interface_config = nand_get_reset_interface_config();
+ ret = ops->setup_interface(chip, chipnr,
+ chip->current_interface_config);
+ if (ret)
+ pr_err("Failed to configure data interface to SDR timing mode 0\n");
- /* Serially input address */
- if (column != -1) {
- /* Adjust columns for 16 bit buswidth */
- if (chip->options & NAND_BUSWIDTH_16)
- column >>= 1;
- chip->legacy.cmd_ctrl(chip, column, ctrl);
- ctrl &= ~NAND_CTRL_CHANGE;
- chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
- }
- if (page_addr != -1) {
- chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
- chip->legacy.cmd_ctrl(chip, page_addr >> 8,
- NAND_NCE | NAND_ALE);
- /* One more address cycle for devices > 128MiB */
- if (chip->chipsize > (128 << 20))
- chip->legacy.cmd_ctrl(chip, page_addr >> 16,
- NAND_NCE | NAND_ALE);
- }
- }
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE, NAND_NCE | NAND_CTRL_CHANGE);
+ return ret;
+}
+
+/**
+ * nand_setup_interface - Setup the best data interface and timings
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Configure what has been reported to be the best data interface and NAND
+ * timings supported by the chip and the driver.
+ *
+ * Returns 0 for success or negative error code otherwise.
+ */
+static int nand_setup_interface(struct nand_chip *chip, int chipnr)
+{
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ u8 tmode_param[ONFI_SUBFEATURE_PARAM_LEN] = { };
+ int ret;
+
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
/*
- * Program and erase have their own busy handlers status, sequential
- * in, and deplete1 need no delay.
+ * A nand_reset_interface() put both the NAND chip and the NAND
+ * controller in timings mode 0. If the default mode for this chip is
+ * also 0, no need to proceed to the change again. Plus, at probe time,
+ * nand_setup_interface() uses ->set/get_features() which would
+ * fail anyway as the parameter page is not available yet.
*/
- switch (command) {
-
- case NAND_CMD_CACHEDPROG:
- case NAND_CMD_PAGEPROG:
- case NAND_CMD_ERASE1:
- case NAND_CMD_ERASE2:
- case NAND_CMD_SEQIN:
- case NAND_CMD_RNDIN:
- case NAND_CMD_STATUS:
- return;
+ if (!chip->best_interface_config)
+ return 0;
- case NAND_CMD_RESET:
- if (chip->legacy.dev_ready)
- break;
- udelay(chip->legacy.chip_delay);
- chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
- while (!(chip->legacy.read_byte(chip) & NAND_STATUS_READY))
- ;
- return;
+ tmode_param[0] = chip->best_interface_config->timings.mode;
- case NAND_CMD_RNDOUT:
- /* No ready / busy check necessary */
- chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
- return;
+ /* Change the mode on the chip side (if supported by the NAND chip) */
+ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
+ nand_select_target(chip, chipnr);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
+ }
- case NAND_CMD_READ0:
- chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
- NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
- chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
- NAND_NCE | NAND_CTRL_CHANGE);
+ /* Change the mode on the controller side */
+ ret = ops->setup_interface(chip, chipnr, chip->best_interface_config);
+ if (ret)
+ return ret;
- /* This applies to read commands */
- default:
- /*
- * If we don't have access to the busy pin, we apply the given
- * command delay.
- */
- if (!chip->legacy.dev_ready) {
- udelay(chip->legacy.chip_delay);
- return;
- }
+ /* Check the mode has been accepted by the chip, if supported */
+ if (!nand_supports_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE))
+ goto update_interface_config;
+
+ memset(tmode_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ nand_select_target(chip, chipnr);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE,
+ tmode_param);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_reset_chip;
+
+ if (tmode_param[0] != chip->best_interface_config->timings.mode) {
+ pr_warn("timing mode %d not acknowledged by the NAND chip\n",
+ chip->best_interface_config->timings.mode);
+ goto err_reset_chip;
}
+update_interface_config:
+ chip->current_interface_config = chip->best_interface_config;
+
+ return 0;
+
+err_reset_chip:
/*
- * Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine.
+ * Fallback to mode 0 if the chip explicitly did not ack the chosen
+ * timing mode.
*/
- ndelay(100);
+ nand_reset_interface(chip, chipnr);
+ nand_select_target(chip, chipnr);
+ nand_reset_op(chip);
+ nand_deselect_target(chip);
- nand_wait_ready(chip);
+ return ret;
}
/**
- * nand_get_device - [GENERIC] Get chip for selected access
- * @mtd: MTD device structure
- * @new_state: the state which is requested
+ * nand_choose_best_sdr_timings - Pick up the best SDR timings that both the
+ * NAND controller and the NAND chip support
+ * @chip: the NAND chip
+ * @iface: the interface configuration (can eventually be updated)
+ * @spec_timings: specific timings, when not fitting the ONFI specification
*
- * Get the device and lock it for exclusive access
+ * If specific timings are provided, use them. Otherwise, retrieve supported
+ * timing modes from ONFI information.
*/
-static int
-nand_get_device(struct nand_chip *chip, int new_state)
+int nand_choose_best_sdr_timings(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ struct nand_sdr_timings *spec_timings)
{
-retry:
+ const struct nand_controller_ops *ops = chip->controller->ops;
+ int best_mode = 0, mode, ret;
- /* Hardware controller shared among independent devices */
- if (!chip->controller->active)
- chip->controller->active = chip;
+ iface->type = NAND_SDR_IFACE;
- if (chip->controller->active == chip && chip->state == FL_READY) {
- chip->state = new_state;
- return 0;
- }
- if (new_state == FL_PM_SUSPENDED) {
- if (chip->controller->active->state == FL_PM_SUSPENDED) {
- chip->state = FL_PM_SUSPENDED;
- return 0;
+ if (spec_timings) {
+ iface->timings.sdr = *spec_timings;
+ iface->timings.mode = onfi_find_closest_sdr_mode(spec_timings);
+
+ /* Verify the controller supports the requested interface */
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret) {
+ chip->best_interface_config = iface;
+ return ret;
}
+
+ /* Fallback to slower modes */
+ best_mode = iface->timings.mode;
+ } else if (chip->parameters.onfi) {
+ best_mode = fls(chip->parameters.onfi->async_timing_mode) - 1;
+ }
+
+ for (mode = best_mode; mode >= 0; mode--) {
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, mode);
+
+ ret = ops->setup_interface(chip, NAND_DATA_IFACE_CHECK_ONLY,
+ iface);
+ if (!ret)
+ break;
}
- goto retry;
+
+ chip->best_interface_config = iface;
+
+ return 0;
}
/**
- * nand_wait - [DEFAULT] wait until the command is done
- * @mtd: MTD device structure
- * @chip: NAND chip structure
+ * nand_choose_interface_config - find the best data interface and timings
+ * @chip: The NAND chip
+ *
+ * Find the best data interface and NAND timings supported by the chip
+ * and the driver. Eventually let the NAND manufacturer driver propose his own
+ * set of timings.
+ *
+ * After this function nand_chip->interface_config is initialized with the best
+ * timing mode available.
*
- * Wait for command done. This applies to erase and program only
- * Erase can take up to 400ms and program up to 20ms according to
- * general NAND and SmartMedia specs
+ * Returns 0 for success or negative error code otherwise.
*/
-static int nand_wait(struct nand_chip *chip)
+static int nand_choose_interface_config(struct nand_chip *chip)
{
+ struct nand_interface_config *iface;
+ int ret;
- uint64_t start = get_time_ns();
- uint64_t timeo;
- int status, state = chip->state;
-
- if (state == FL_ERASING)
- timeo = 400 * MSECOND;
- else
- timeo = 20 * MSECOND;
+ if (!nand_controller_can_setup_interface(chip))
+ return 0;
- /* Apply this short delay always to ensure that we do wait tWB in
- * any case on any machine. */
- ndelay(100);
+ iface = kzalloc(sizeof(*iface), GFP_KERNEL);
+ if (!iface)
+ return -ENOMEM;
- chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+ if (chip->ops.choose_interface_config)
+ ret = chip->ops.choose_interface_config(chip, iface);
+ else
+ ret = nand_choose_best_sdr_timings(chip, iface, NULL);
- while (!is_timeout(start, timeo)) {
- if (chip->legacy.dev_ready) {
- if (chip->legacy.dev_ready(chip))
- break;
- } else {
- if (chip->legacy.read_byte(chip) & NAND_STATUS_READY)
- break;
- }
- }
+ if (ret)
+ kfree(iface);
- status = (int)chip->legacy.read_byte(chip);
- return status;
+ return ret;
}
/**
- * __nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
- * @invert: when = 0, unlock the range of blocks within the lower and
- * upper boundary address
- * when = 1, unlock the range of blocks outside the boundaries
- * of the lower and upper boundary address
+ * nand_fill_column_cycles - fill the column cycles of an address
+ * @chip: The NAND chip
+ * @addrs: Array of address cycles to fill
+ * @offset_in_page: The offset in the page
+ *
+ * Fills the first or the first two bytes of the @addrs field depending
+ * on the NAND bus width and the page size.
*
- * Returs unlock status.
+ * Returns the number of cycles needed to encode the column, or a negative
+ * error code in case one of the arguments is invalid.
*/
-static int __nand_unlock(struct nand_chip *chip, loff_t ofs,
- uint64_t len, int invert)
+static int nand_fill_column_cycles(struct nand_chip *chip, u8 *addrs,
+ unsigned int offset_in_page)
{
- int ret = 0;
- int status, page;
-
- /* Submit address of first page to unlock */
- page = ofs >> chip->page_shift;
- chip->legacy.cmdfunc(chip, NAND_CMD_UNLOCK1, -1, page & chip->pagemask);
-
- /* Submit address of last page to unlock */
- page = (ofs + len) >> chip->page_shift;
- chip->legacy.cmdfunc(chip, NAND_CMD_UNLOCK2, -1,
- (page | invert) & chip->pagemask);
-
- /* Call wait ready function */
- status = chip->legacy.waitfunc(chip);
- /* See if device thinks it succeeded */
- if (status & NAND_STATUS_FAIL) {
- pr_debug("%s: error status = 0x%08x\n",
- __func__, status);
- ret = -EIO;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Make sure the offset is less than the actual page size. */
+ if (offset_in_page > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ /*
+ * On small page NANDs, there's a dedicated command to access the OOB
+ * area, and the column address is relative to the start of the OOB
+ * area, not the start of the page. Asjust the address accordingly.
+ */
+ if (mtd->writesize <= 512 && offset_in_page >= mtd->writesize)
+ offset_in_page -= mtd->writesize;
+
+ /*
+ * The offset in page is expressed in bytes, if the NAND bus is 16-bit
+ * wide, then it must be divided by 2.
+ */
+ if (chip->options & NAND_BUSWIDTH_16) {
+ if (WARN_ON(offset_in_page % 2))
+ return -EINVAL;
+
+ offset_in_page /= 2;
}
- return ret;
+ addrs[0] = offset_in_page;
+
+ /*
+ * Small page NANDs use 1 cycle for the columns, while large page NANDs
+ * need 2
+ */
+ if (mtd->writesize <= 512)
+ return 1;
+
+ addrs[1] = offset_in_page >> 8;
+
+ return 2;
}
-/**
- * nand_unlock - [REPLACEABLE] unlocks specified locked blocks
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
- *
- * Returns unlock status.
- */
-int nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+static int nand_sp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
struct mtd_info *mtd = nand_to_mtd(chip);
- int ret = 0;
- int chipnr;
+ u8 addrs[4];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(3, addrs, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
- pr_debug("%s: start = 0x%012llx, len = %llu\n",
- __func__, (unsigned long long)ofs, len);
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
- if (check_offs_len(chip, ofs, len))
- ret = -EINVAL;
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
- /* Align to last block address if size addresses end of the device */
- if (ofs + len == mtd->size)
- len -= mtd->erasesize;
+ addrs[1] = page;
+ addrs[2] = page >> 8;
- nand_get_device(chip, FL_UNLOCKING);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[3] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
- /* Shift to get chip number */
- chipnr = ofs >> chip->chip_shift;
+ return nand_exec_op(chip, &op);
+}
- chip->legacy.select_chip(chip, chipnr);
+static int nand_lp_exec_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[5];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_ADDR(4, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_READSTART, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
- /*
- * Reset the chip.
- * If we want to check the WP through READ STATUS and check the bit 7
- * we must reset the chip
- * some operation can also clear the bit 7 of status register
- * eg. erase/program a locked block
- */
- chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
- /* Check, if it is write protected */
- if (nand_check_wp(chip)) {
- pr_debug("%s: device is write protected!\n",
- __func__);
- ret = -EIO;
- goto out;
- }
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
- ret = __nand_unlock(chip, ofs, len, 0);
+ addrs[2] = page;
+ addrs[3] = page >> 8;
-out:
- chip->legacy.select_chip(chip, -1);
- nand_release_device(chip);
+ if (chip->options & NAND_ROW_ADDR_3) {
+ addrs[4] = page >> 16;
+ instrs[1].ctx.addr.naddrs++;
+ }
- return ret;
+ return nand_exec_op(chip, &op);
}
-EXPORT_SYMBOL(nand_unlock);
/**
- * nand_lock - [REPLACEABLE] locks all blocks present in the device
- * @mtd: mtd info
- * @ofs: offset to start unlock from
- * @len: length to unlock
+ * nand_read_page_op - Do a READ PAGE operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
*
- * This feature is not supported in many NAND parts. 'Micron' NAND parts do
- * have this feature, but it allows only to lock all blocks, not for specified
- * range for block. Implementing 'lock' feature by making use of 'unlock', for
- * now.
+ * This function issues a READ PAGE operation.
+ * This function does not select/unselect the CS line.
*
- * Returns lock status.
+ * Returns 0 on success, a negative error code otherwise.
*/
-int nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len)
{
- int ret = 0;
- int chipnr, status, page;
+ struct mtd_info *mtd = nand_to_mtd(chip);
- pr_debug("%s: start = 0x%012llx, len = %llu\n",
- __func__, (unsigned long long)ofs, len);
+ if (len && !buf)
+ return -EINVAL;
- if (check_offs_len(chip, ofs, len))
- ret = -EINVAL;
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- nand_get_device(chip, FL_LOCKING);
+ if (nand_has_exec_op(chip)) {
+ if (mtd->writesize > 512)
+ return nand_lp_exec_read_page_op(chip, page,
+ offset_in_page, buf,
+ len);
- /* Shift to get chip number */
- chipnr = ofs >> chip->chip_shift;
+ return nand_sp_exec_read_page_op(chip, page, offset_in_page,
+ buf, len);
+ }
- chip->legacy.select_chip(chip, chipnr);
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, offset_in_page, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
- /*
- * Reset the chip.
- * If we want to check the WP through READ STATUS and check the bit 7
- * we must reset the chip
- * some operation can also clear the bit 7 of status register
- * eg. erase/program a locked block
- */
- chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_page_op);
- /* Check, if it is write protected */
- if (nand_check_wp(chip)) {
- pr_debug("%s: device is write protected!\n",
- __func__);
- ret = -EIO;
- goto out;
- }
+/**
+ * nand_read_param_page_op - Do a READ PARAMETER PAGE operation
+ * @chip: The NAND chip
+ * @page: parameter page to read
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ *
+ * This function issues a READ PARAMETER PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_read_param_page_op(struct nand_chip *chip, u8 page, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *p = buf;
- /* Submit address of first page to lock */
- page = ofs >> chip->page_shift;
- chip->legacy.cmdfunc(chip, NAND_CMD_LOCK, -1, page & chip->pagemask);
+ if (len && !buf)
+ return -EINVAL;
- /* Call wait ready function */
- status = chip->legacy.waitfunc(chip);
- /* See if device thinks it succeeded */
- if (status & NAND_STATUS_FAIL) {
- pr_debug("%s: error status = 0x%08x\n",
- __func__, status);
- ret = -EIO;
- goto out;
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PARAM, 0),
+ NAND_OP_ADDR(1, &page, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tR_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
}
- ret = __nand_unlock(chip, ofs, len, 0x1);
-
-out:
- chip->legacy.select_chip(chip, -1);
- nand_release_device(chip);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, page, -1);
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
- return ret;
+ return 0;
}
-EXPORT_SYMBOL(nand_lock);
/**
- * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
- * @buf: buffer to test
- * @len: buffer length
- * @bitflips_threshold: maximum number of bitflips
+ * nand_change_read_column_op - Do a CHANGE READ COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
*
- * Check if a buffer contains only 0xff, which means the underlying region
- * has been erased and is ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region is not erased.
- * Note: The logic of this function has been extracted from the memweight
- * implementation, except that nand_check_erased_buf function exit before
- * testing the whole buffer if the number of bitflips exceed the
- * bitflips_threshold value.
+ * This function issues a CHANGE READ COLUMN operation.
+ * This function does not select/unselect the CS line.
*
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold.
+ * Returns 0 on success, a negative error code otherwise.
*/
-int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit)
{
- const unsigned char *bitmap = buf;
- int bitflips = 0;
- int weight;
+ struct mtd_info *mtd = nand_to_mtd(chip);
- for (; len && ((uintptr_t)bitmap) % sizeof(long);
- len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
+ if (len && !buf)
+ return -EINVAL;
- for (; len >= sizeof(long);
- len -= sizeof(long), bitmap += sizeof(long)) {
- weight = hweight_long(*((unsigned long *)bitmap));
- bitflips += BITS_PER_LONG - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
- }
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- for (; len > 0; len--, bitmap++) {
- weight = hweight8(*bitmap);
- bitflips += BITS_PER_BYTE - weight;
- if (unlikely(bitflips > bitflips_threshold))
- return -EBADMSG;
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[2] = {};
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDOUT, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_RNDOUTSTART,
+ PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ instrs[3].ctx.data.force_8bit = force_8bit;
+
+ return nand_exec_op(chip, &op);
}
- return bitflips;
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, offset_in_page, -1);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_change_read_column_op);
/**
- * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
- * 0xff data
- * @data: data buffer to test
- * @datalen: data length
- * @ecc: ECC buffer
- * @ecclen: ECC length
- * @extraoob: extra OOB buffer
- * @extraooblen: extra OOB length
- * @bitflips_threshold: maximum number of bitflips
- *
- * Check if a data buffer and its associated ECC and OOB data contains only
- * 0xff pattern, which means the underlying region has been erased and is
- * ready to be programmed.
- * The bitflips_threshold specify the maximum number of bitflips before
- * considering the region as not erased.
+ * nand_read_oob_op - Do a READ OOB operation
+ * @chip: The NAND chip
+ * @page: page to read
+ * @offset_in_oob: offset within the OOB area
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
*
- * Note:
- * 1/ ECC algorithms are working on pre-defined block sizes which are usually
- * different from the NAND page size. When fixing bitflips, ECC engines will
- * report the number of errors per chunk, and the NAND core infrastructure
- * expect you to return the maximum number of bitflips for the whole page.
- * This is why you should always use this function on a single chunk and
- * not on the whole page. After checking each chunk you should update your
- * max_bitflips value accordingly.
- * 2/ When checking for bitflips in erased pages you should not only check
- * the payload data but also their associated ECC data, because a user might
- * have programmed almost all bits to 1 but a few. In this case, we
- * shouldn't consider the chunk as erased, and checking ECC bytes prevent
- * this case.
- * 3/ The extraoob argument is optional, and should be used if some of your OOB
- * data are protected by the ECC engine.
- * It could also be used if you support subpages and want to attach some
- * extra OOB data to an ECC chunk.
+ * This function issues a READ OOB operation.
+ * This function does not select/unselect the CS line.
*
- * Returns a positive number of bitflips less than or equal to
- * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
- * threshold. In case of success, the passed buffers are filled with 0xff.
+ * Returns 0 on success, a negative error code otherwise.
*/
-int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int bitflips_threshold)
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_oob, void *buf, unsigned int len)
{
- int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
+ struct mtd_info *mtd = nand_to_mtd(chip);
- data_bitflips = nand_check_erased_buf(data, datalen,
- bitflips_threshold);
- if (data_bitflips < 0)
- return data_bitflips;
+ if (len && !buf)
+ return -EINVAL;
- bitflips_threshold -= data_bitflips;
+ if (offset_in_oob + len > mtd->oobsize)
+ return -EINVAL;
- ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
- if (ecc_bitflips < 0)
- return ecc_bitflips;
+ if (nand_has_exec_op(chip))
+ return nand_read_page_op(chip, page,
+ mtd->writesize + offset_in_oob,
+ buf, len);
- bitflips_threshold -= ecc_bitflips;
+ chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, offset_in_oob, page);
+ if (len)
+ chip->legacy.read_buf(chip, buf, len);
- extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
- bitflips_threshold);
- if (extraoob_bitflips < 0)
- return extraoob_bitflips;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_oob_op);
- if (data_bitflips)
- memset(data, 0xff, datalen);
+static int nand_exec_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool prog)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ u8 addrs[5] = {};
+ struct nand_op_instr instrs[] = {
+ /*
+ * The first instruction will be dropped if we're dealing
+ * with a large page NAND and adjusted if we're dealing
+ * with a small page NAND and the page offset is > 255.
+ */
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ NAND_OP_CMD(NAND_CMD_SEQIN, 0),
+ NAND_OP_ADDR(0, addrs, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ NAND_OP_CMD(NAND_CMD_PAGEPROG, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int naddrs = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ int ret;
+ u8 status;
- if (ecc_bitflips)
- memset(ecc, 0xff, ecclen);
+ if (naddrs < 0)
+ return naddrs;
- if (extraoob_bitflips)
- memset(extraoob, 0xff, extraooblen);
+ addrs[naddrs++] = page;
+ addrs[naddrs++] = page >> 8;
+ if (chip->options & NAND_ROW_ADDR_3)
+ addrs[naddrs++] = page >> 16;
- return data_bitflips + ecc_bitflips + extraoob_bitflips;
+ instrs[2].ctx.addr.naddrs = naddrs;
+
+ /* Drop the last two instructions if we're not programming the page. */
+ if (!prog) {
+ op.ninstrs -= 2;
+ /* Also drop the DATA_OUT instruction if empty. */
+ if (!len)
+ op.ninstrs--;
+ }
+
+ if (mtd->writesize <= 512) {
+ /*
+ * Small pages need some more tweaking: we have to adjust the
+ * first instruction depending on the page offset we're trying
+ * to access.
+ */
+ if (offset_in_page >= mtd->writesize)
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READOOB;
+ else if (offset_in_page >= 256 &&
+ !(chip->options & NAND_BUSWIDTH_16))
+ instrs[0].ctx.cmd.opcode = NAND_CMD_READ1;
+ } else {
+ /*
+ * Drop the first command if we're dealing with a large page
+ * NAND.
+ */
+ op.instrs++;
+ op.ninstrs--;
+ }
+
+ ret = nand_exec_op(chip, &op);
+ if (!prog || ret)
+ return ret;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ return status;
}
-EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
- * nand_read_page_raw - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_prog_page_begin_op - starts a PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
*
- * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ * This function issues the first half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_raw(struct nand_chip *chip,
- uint8_t *buf, int oob_required,
- int page)
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
- chip->legacy.read_buf(chip, buf, chip->mtd.writesize);
- if (oob_required)
- chip->legacy.read_buf(chip, chip->oob_poi, chip->mtd.oobsize);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (len && !buf)
+ return -EINVAL;
+
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
+
+ if (nand_has_exec_op(chip))
+ return nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, false);
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page, page);
+
+ if (buf)
+ chip->legacy.write_buf(chip, buf, len);
+
return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_begin_op);
/**
- * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_prog_page_end_op - ends a PROG PAGE operation
+ * @chip: The NAND chip
*
- * We need a special oob layout and handling even when OOB isn't used.
+ * This function issues the second half of a PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_raw_syndrome(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_prog_page_end_op(struct nand_chip *chip)
{
- int eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- uint8_t *oob = chip->oob_poi;
- int steps, size;
-
- for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->legacy.read_buf(chip, buf, eccsize);
- buf += eccsize;
-
- if (chip->ecc.prepad) {
- chip->legacy.read_buf(chip, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_PAGEPROG,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tPROG_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
- chip->legacy.read_buf(chip, oob, eccbytes);
- oob += eccbytes;
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
- if (chip->ecc.postpad) {
- chip->legacy.read_buf(chip, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
+ status = ret;
}
- size = chip->mtd.oobsize - (oob - chip->oob_poi);
- if (size)
- chip->legacy.read_buf(chip, oob, size);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_end_op);
/**
- * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_prog_page_op - Do a full PROG PAGE operation
+ * @chip: The NAND chip
+ * @page: page to write
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to write to the page
+ * @len: length of the buffer
+ *
+ * This function issues a full PROG PAGE operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_swecc(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- unsigned int max_bitflips = 0;
-
- chip->ecc.read_page_raw(chip, buf, 1, page);
+ int status;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ if (!len || !buf)
+ return -EINVAL;
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- eccsteps = chip->ecc.steps;
- p = buf;
+ if (nand_has_exec_op(chip)) {
+ status = nand_exec_prog_page_op(chip, page, offset_in_page, buf,
+ len, true);
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, offset_in_page,
+ page);
+ chip->legacy.write_buf(chip, buf, len);
+ chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
+ status = chip->legacy.waitfunc(chip);
+ }
- for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
- stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_prog_page_op);
/**
- * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @data_offs: offset of requested data within the page
- * @readlen: data length
- * @bufpoi: buffer to store read data
+ * nand_change_write_column_op - Do a CHANGE WRITE COLUMN operation
+ * @chip: The NAND chip
+ * @offset_in_page: offset within the page
+ * @buf: buffer containing the data to send to the NAND
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function issues a CHANGE WRITE COLUMN operation.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_subpage(struct nand_chip *chip,
- uint32_t data_offs, uint32_t readlen,
- uint8_t *bufpoi, int page)
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page,
+ const void *buf, unsigned int len,
+ bool force_8bit)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int start_step, end_step, num_steps;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- uint8_t *p;
- int data_col_addr, i, gaps = 0;
- int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
- int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
- int index = 0;
- unsigned int max_bitflips = 0;
- /*
- * Currently we have no users in barebox, so disable this for now
- */
- return -ENOTSUPP;
+ if (len && !buf)
+ return -EINVAL;
- /* Column address within the page aligned to ECC size (256bytes) */
- start_step = data_offs / chip->ecc.size;
- end_step = (data_offs + readlen - 1) / chip->ecc.size;
- num_steps = end_step - start_step + 1;
+ if (offset_in_page + len > mtd->writesize + mtd->oobsize)
+ return -EINVAL;
- /* Data size aligned to ECC ecc.size */
- datafrag_len = num_steps * chip->ecc.size;
- eccfrag_len = num_steps * chip->ecc.bytes;
+ /* Small page NANDs do not support column change. */
+ if (mtd->writesize <= 512)
+ return -ENOTSUPP;
- data_col_addr = start_step * chip->ecc.size;
- /* If we read not a page aligned data */
- if (data_col_addr != 0)
- chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, data_col_addr, -1);
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[2];
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RNDIN, 0),
+ NAND_OP_ADDR(2, addrs, PSEC_TO_NSEC(sdr->tCCS_min)),
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+ int ret;
+
+ ret = nand_fill_column_cycles(chip, addrs, offset_in_page);
+ if (ret < 0)
+ return ret;
- p = bufpoi + data_col_addr;
- chip->legacy.read_buf(chip, p, datafrag_len);
+ instrs[2].ctx.data.force_8bit = force_8bit;
- /* Calculate ECC */
- for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
- chip->ecc.calculate(chip, p, &chip->buffers->ecccalc[i]);
+ /* Drop the DATA_OUT instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
- /*
- * The performance is faster if we position offsets according to
- * ecc.pos. Let's make sure that there are no gaps in ECC positions.
- */
- for (i = 0; i < eccfrag_len - 1; i++) {
- if (eccpos[i + start_step * chip->ecc.bytes] + 1 !=
- eccpos[i + start_step * chip->ecc.bytes + 1]) {
- gaps = 1;
- break;
- }
+ return nand_exec_op(chip, &op);
}
- if (gaps) {
- chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, mtd->writesize, -1);
- chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
- } else {
- /*
- * Send the command to read the particular ECC bytes take care
- * about buswidth alignment in read_buf.
- */
- index = start_step * chip->ecc.bytes;
- aligned_pos = eccpos[index] & ~(busw - 1);
- aligned_len = eccfrag_len;
- if (eccpos[index] & (busw - 1))
- aligned_len++;
- if (eccpos[index + (num_steps * chip->ecc.bytes)] & (busw - 1))
- aligned_len++;
+ chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, offset_in_page, -1);
+ if (len)
+ chip->legacy.write_buf(chip, buf, len);
- chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT,
- mtd->writesize + aligned_pos, -1);
- chip->legacy.read_buf(chip, &chip->oob_poi[aligned_pos], aligned_len);
- }
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_change_write_column_op);
- for (i = 0; i < eccfrag_len; i++)
- chip->buffers->ecccode[i] = chip->oob_poi[eccpos[i + index]];
+/**
+ * nand_readid_op - Do a READID operation
+ * @chip: The NAND chip
+ * @addr: address cycle to pass after the READID command
+ * @buf: buffer used to store the ID
+ * @len: length of the buffer
+ *
+ * This function sends a READID command and reads back the ID returned by the
+ * NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len)
+{
+ unsigned int i;
+ u8 *id = buf;
- p = bufpoi + data_col_addr;
- for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
- int stat;
+ if (len && !buf)
+ return -EINVAL;
- stat = chip->ecc.correct(chip, p,
- &chip->buffers->ecccode[i], &chip->buffers->ecccalc[i]);
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READID, 0),
+ NAND_OP_ADDR(1, &addr, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ /* Drop the DATA_IN instruction if len is set to 0. */
+ if (!len)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
}
- return max_bitflips;
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READID, addr, -1);
+
+ for (i = 0; i < len; i++)
+ id[i] = chip->legacy.read_byte(chip);
+
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_readid_op);
/**
- * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_status_op - Do a STATUS operation
+ * @chip: The NAND chip
+ * @status: out variable to store the NAND status
*
- * Not for syndrome calculating ECC controllers which need a special oob layout.
+ * This function sends a STATUS command and reads back the status returned by
+ * the NAND.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_hwecc(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_status_op(struct nand_chip *chip, u8 *status)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- unsigned int max_bitflips = 0;
-
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(chip, NAND_ECC_READ);
- chip->legacy.read_buf(chip, p, eccsize);
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_STATUS,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(1, status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ if (!status)
+ op.ninstrs--;
+
+ return nand_exec_op(chip, &op);
}
- chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ chip->legacy.cmdfunc(chip, NAND_CMD_STATUS, -1, -1);
+ if (status)
+ *status = chip->legacy.read_byte(chip);
- eccsteps = chip->ecc.steps;
- p = buf;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_status_op);
- for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
+/**
+ * nand_exit_status_op - Exit a STATUS operation
+ * @chip: The NAND chip
+ *
+ * This function sends a READ0 command to cancel the effect of the STATUS
+ * command to avoid reading only the status until a new read command is sent.
+ *
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_exit_status_op(struct nand_chip *chip)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_READ0, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
- stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
+ return nand_exec_op(chip, &op);
}
- return max_bitflips;
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_READ0, -1, -1);
+
+ return 0;
}
/**
- * nand_read_page_hwecc_oob_first - [REPLACEABLE] hw ecc, read oob first
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_erase_op - Do an erase operation
+ * @chip: The NAND chip
+ * @eraseblock: block to erase
+ *
+ * This function sends an ERASE command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
*
- * Hardware ECC for large page chips, require OOB to be read first. For this
- * ECC mode, the write_page method is re-used from ECC_HW. These methods
- * read/write ECC from the OOB area, unlike the ECC_HW_SYNDROME support with
- * multiple ECC steps, follows the "infix ECC" scheme and reads/writes ECC from
- * the data area, by overwriting the NAND manufacturer bad block markings.
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_hwecc_oob_first(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- unsigned int max_bitflips = 0;
+ unsigned int page = eraseblock <<
+ (chip->phys_erase_shift - chip->page_shift);
+ int ret;
+ u8 status;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ u8 addrs[3] = { page, page >> 8, page >> 16 };
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_ERASE1, 0),
+ NAND_OP_ADDR(2, addrs, 0),
+ NAND_OP_CMD(NAND_CMD_ERASE2,
+ PSEC_TO_MSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tBERS_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ if (chip->options & NAND_ROW_ADDR_3)
+ instrs[1].ctx.addr.naddrs++;
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
- /* Read the OOB area first */
- chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, 0, page);
- chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
- chip->legacy.cmdfunc(chip, NAND_CMD_READ0, 0, page);
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+ } else {
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
+ chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
+ status = ret;
+ }
- chip->ecc.hwctl(chip, NAND_ECC_READ);
- chip->legacy.read_buf(chip, p, eccsize);
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ if (status & NAND_STATUS_FAIL)
+ return -EIO;
- stat = chip->ecc.correct(chip, p, &ecc_code[i], NULL);
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
- }
- return max_bitflips;
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_erase_op);
/**
- * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: buffer to store read data
- * @oob_required: caller requires OOB data read to chip->oob_poi
- * @page: page number to read
+ * nand_set_features_op - Do a SET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
*
- * The hw generator calculates the error syndrome automatically. Therefore we
- * need a special oob layout and handling.
+ * This function sends a SET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_read_page_syndrome(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page)
+static int nand_set_features_op(struct nand_chip *chip, u8 feature,
+ const void *data)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *p = buf;
- uint8_t *oob = chip->oob_poi;
- unsigned int max_bitflips = 0;
+ const u8 *params = data;
+ int i, ret;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_SET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_OUT(ONFI_SUBFEATURE_PARAM_LEN, data,
+ PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- int stat;
+ chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ chip->legacy.write_byte(chip, params[i]);
- chip->ecc.hwctl(chip, NAND_ECC_READ);
- chip->legacy.read_buf(chip, p, eccsize);
+ ret = chip->legacy.waitfunc(chip);
+ if (ret < 0)
+ return ret;
- if (chip->ecc.prepad) {
- chip->legacy.read_buf(chip, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
+ if (ret & NAND_STATUS_FAIL)
+ return -EIO;
- chip->ecc.hwctl(chip, NAND_ECC_READSYN);
- chip->legacy.read_buf(chip, oob, eccbytes);
- stat = chip->ecc.correct(chip, p, oob, NULL);
+ return 0;
+}
- if (stat < 0) {
- mtd->ecc_stats.failed++;
- } else {
- mtd->ecc_stats.corrected += stat;
- max_bitflips = max_t(unsigned int, max_bitflips, stat);
- }
+/**
+ * nand_get_features_op - Do a GET FEATURES operation
+ * @chip: The NAND chip
+ * @feature: feature id
+ * @data: 4 bytes of data
+ *
+ * This function sends a GET FEATURES command and waits for the NAND to be
+ * ready before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+static int nand_get_features_op(struct nand_chip *chip, u8 feature,
+ void *data)
+{
+ u8 *params = data;
+ int i;
- oob += eccbytes;
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_GET_FEATURES, 0),
+ NAND_OP_ADDR(1, &feature, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tFEAT_max),
+ PSEC_TO_NSEC(sdr->tRR_min)),
+ NAND_OP_8BIT_DATA_IN(ONFI_SUBFEATURE_PARAM_LEN,
+ data, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
- if (chip->ecc.postpad) {
- chip->legacy.read_buf(chip, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
+ chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, feature, -1);
+ for (i = 0; i < ONFI_SUBFEATURE_PARAM_LEN; ++i)
+ params[i] = chip->legacy.read_byte(chip);
+
+ return 0;
+}
+
+static int nand_wait_rdy_op(struct nand_chip *chip, unsigned int timeout_ms,
+ unsigned int delay_ns)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(timeout_ms),
+ PSEC_TO_NSEC(delay_ns)),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
}
- /* Calculate remaining oob bytes */
- i = mtd->oobsize - (oob - chip->oob_poi);
- if (i)
- chip->legacy.read_buf(chip, oob, i);
+ /* Apply delay or wait for ready/busy pin */
+ if (!chip->legacy.dev_ready)
+ udelay(chip->legacy.chip_delay);
+ else
+ nand_wait_ready(chip);
- return max_bitflips;
+ return 0;
}
/**
- * nand_transfer_oob - [INTERN] Transfer oob to client buffer
- * @chip: nand chip structure
- * @oob: oob destination address
- * @ops: oob ops structure
- * @len: size of oob to transfer
+ * nand_reset_op - Do a reset operation
+ * @chip: The NAND chip
+ *
+ * This function sends a RESET command and waits for the NAND to be ready
+ * before returning.
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
- struct mtd_oob_ops *ops, size_t len)
+int nand_reset_op(struct nand_chip *chip)
{
- switch (ops->mode) {
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(NAND_CMD_RESET, PSEC_TO_NSEC(sdr->tWB_max)),
+ NAND_OP_WAIT_RDY(PSEC_TO_MSEC(sdr->tRST_max), 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_RAW:
- memcpy(oob, chip->oob_poi + ops->ooboffs, len);
- return oob + len;
+ chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
- case MTD_OPS_AUTO_OOB: {
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
- uint32_t boffs = 0, roffs = ops->ooboffs;
- size_t bytes = 0;
-
- for (; free->length && len; free++, len -= bytes) {
- /* Read request not from offset 0? */
- if (unlikely(roffs)) {
- if (roffs >= free->length) {
- roffs -= free->length;
- continue;
- }
- boffs = free->offset + roffs;
- bytes = min_t(size_t, len,
- (free->length - roffs));
- roffs = 0;
- } else {
- bytes = min_t(size_t, len, free->length);
- boffs = free->offset;
- }
- memcpy(oob, chip->oob_poi + boffs, bytes);
- oob += bytes;
- }
- return oob;
- }
- default:
- BUG();
- }
- return NULL;
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset_op);
/**
- * nand_do_read_ops - [INTERN] Read data with ECC
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob ops structure
+ * nand_read_data_op - Read data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer used to store the data
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ * @check_only: do not actually run the command, only checks if the
+ * controller driver supports it
*
- * Internal function. Called with chip held.
+ * This function does a raw data read on the bus. Usually used after launching
+ * another NAND operation like nand_read_page_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
- struct mtd_oob_ops *ops)
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only)
{
- int chipnr, page, realpage, col, bytes, aligned, oob_required;
- struct mtd_info *mtd = nand_to_mtd(chip);
- struct mtd_ecc_stats stats;
- int ret = 0;
- uint32_t readlen = ops->len;
- uint32_t oobreadlen = ops->ooblen;
- uint32_t max_oobsize = ops->mode == MTD_OPS_AUTO_OOB ?
- chip->mtd.oobavail : chip->mtd.oobsize;
-
- uint8_t *bufpoi, *oob, *buf;
- unsigned int max_bitflips = 0;
+ if (!len || !buf)
+ return -EINVAL;
- stats = mtd->ecc_stats;
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_IN(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
- chipnr = (int)(from >> chip->chip_shift);
- chip->legacy.select_chip(chip, chipnr);
+ instrs[0].ctx.data.force_8bit = force_8bit;
- realpage = (int)(from >> chip->page_shift);
- page = realpage & chip->pagemask;
+ if (check_only)
+ return nand_check_op(chip, &op);
- col = (int)(from & (mtd->writesize - 1));
+ return nand_exec_op(chip, &op);
+ }
- buf = ops->datbuf;
- oob = ops->oobbuf;
- oob_required = oob ? 1 : 0;
+ if (check_only)
+ return 0;
- while (1) {
- bytes = min(mtd->writesize - col, readlen);
- aligned = (bytes == mtd->writesize);
+ if (force_8bit) {
+ u8 *p = buf;
+ unsigned int i;
- /* Is the current page in the buffer? */
- if (realpage != chip->pagebuf || oob) {
- bufpoi = aligned ? buf : chip->buffers->databuf;
+ for (i = 0; i < len; i++)
+ p[i] = chip->legacy.read_byte(chip);
+ } else {
+ chip->legacy.read_buf(chip, buf, len);
+ }
- chip->legacy.cmdfunc(chip, NAND_CMD_READ0, 0x00, page);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_read_data_op);
- /*
- * Now read the page into the buffer. Absent an error,
- * the read methods return max bitflips per ecc step.
- */
- if (unlikely(ops->mode == MTD_OPS_RAW))
- ret = chip->ecc.read_page_raw(chip, bufpoi,
- oob_required,
- page);
- else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
- !oob)
- ret = chip->ecc.read_subpage(chip,
- col, bytes, bufpoi, page);
- else
- ret = chip->ecc.read_page(chip, bufpoi,
- oob_required, page);
- if (ret < 0) {
- if (!aligned)
- /* Invalidate page cache */
- chip->pagebuf = -1;
- break;
- }
+/**
+ * nand_write_data_op - Write data from the NAND
+ * @chip: The NAND chip
+ * @buf: buffer containing the data to send on the bus
+ * @len: length of the buffer
+ * @force_8bit: force 8-bit bus access
+ *
+ * This function does a raw data write on the bus. Usually used after launching
+ * another NAND operation like nand_write_page_begin_op().
+ * This function does not select/unselect the CS line.
+ *
+ * Returns 0 on success, a negative error code otherwise.
+ */
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit)
+{
+ if (!len || !buf)
+ return -EINVAL;
- max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_DATA_OUT(len, buf, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
- /* Transfer not aligned data */
- if (!aligned) {
- if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
- !(mtd->ecc_stats.failed - stats.failed) &&
- (ops->mode != MTD_OPS_RAW)) {
- chip->pagebuf = realpage;
- chip->pagebuf_bitflips = ret;
- } else {
- /* Invalidate page cache */
- chip->pagebuf = -1;
- }
- memcpy(buf, chip->buffers->databuf + col, bytes);
- }
+ instrs[0].ctx.data.force_8bit = force_8bit;
- buf += bytes;
+ return nand_exec_op(chip, &op);
+ }
- if (unlikely(oob)) {
- int toread = min(oobreadlen, max_oobsize);
+ if (force_8bit) {
+ const u8 *p = buf;
+ unsigned int i;
- if (toread) {
- oob = nand_transfer_oob(chip,
- oob, ops, toread);
- oobreadlen -= toread;
- }
- }
-
- if (chip->options & NAND_NEED_READRDY) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->legacy.dev_ready)
- udelay(chip->legacy.chip_delay);
- else
- nand_wait_ready(chip);
- }
- } else {
- memcpy(buf, chip->buffers->databuf + col, bytes);
- buf += bytes;
- max_bitflips = max_t(unsigned int, max_bitflips,
- chip->pagebuf_bitflips);
- }
-
- readlen -= bytes;
-
- if (!readlen)
- break;
-
- /* For subsequent reads align to page boundary */
- col = 0;
- /* Increment page address */
- realpage++;
-
- page = realpage & chip->pagemask;
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- chip->legacy.select_chip(chip, -1);
- chip->legacy.select_chip(chip, chipnr);
- }
+ for (i = 0; i < len; i++)
+ chip->legacy.write_byte(chip, p[i]);
+ } else {
+ chip->legacy.write_buf(chip, buf, len);
}
- chip->legacy.select_chip(chip, -1);
-
- ops->retlen = ops->len - (size_t) readlen;
- if (oob)
- ops->oobretlen = ops->ooblen - oobreadlen;
-
- if (ret < 0)
- return ret;
-
- if (mtd->ecc_stats.failed - stats.failed)
- return -EBADMSG;
- return max_bitflips;
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_write_data_op);
/**
- * nand_read - [MTD Interface] MTD compatibility function for nand_do_read_ecc
- * @mtd: MTD device structure
- * @from: offset to read from
- * @len: number of bytes to read
- * @retlen: pointer to variable to store the number of read bytes
- * @buf: the databuffer to put data
+ * struct nand_op_parser_ctx - Context used by the parser
+ * @instrs: array of all the instructions that must be addressed
+ * @ninstrs: length of the @instrs array
+ * @subop: Sub-operation to be passed to the NAND controller
*
- * Get hold of the chip and call nand_do_read.
+ * This structure is used by the core to split NAND operations into
+ * sub-operations that can be handled by the NAND controller.
*/
-static int nand_read(struct mtd_info *mtd, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct mtd_oob_ops ops;
- int ret;
-
- nand_get_device(chip, FL_READING);
- ops.len = len;
- ops.datbuf = buf;
- ops.ooblen = 0;
- ops.oobbuf = NULL;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_read_ops(chip, from, &ops);
- *retlen = ops.retlen;
- nand_release_device(chip);
- return ret;
-}
+struct nand_op_parser_ctx {
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ struct nand_subop subop;
+};
/**
- * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
+ * nand_op_parser_must_split_instr - Checks if an instruction must be split
+ * @pat: the parser pattern element that matches @instr
+ * @instr: pointer to the instruction to check
+ * @start_offset: this is an in/out parameter. If @instr has already been
+ * split, then @start_offset is the offset from which to start
+ * (either an address cycle or an offset in the data buffer).
+ * Conversely, if the function returns true (ie. instr must be
+ * split), this parameter is updated to point to the first
+ * data/address cycle that has not been taken care of.
+ *
+ * Some NAND controllers are limited and cannot send X address cycles with a
+ * unique operation, or cannot read/write more than Y bytes at the same time.
+ * In this case, split the instruction that does not fit in a single
+ * controller-operation into two or more chunks.
+ *
+ * Returns true if the instruction must be split, false otherwise.
+ * The @start_offset parameter is also updated to the offset at which the next
+ * bundle of instruction must start (if an address or a data instruction).
*/
-static __maybe_unused int nand_read_oob_std(struct nand_chip *chip, int page)
+static bool
+nand_op_parser_must_split_instr(const struct nand_op_parser_pattern_elem *pat,
+ const struct nand_op_instr *instr,
+ unsigned int *start_offset)
{
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
- return -ENOTSUPP;
+ switch (pat->type) {
+ case NAND_OP_ADDR_INSTR:
+ if (!pat->ctx.addr.maxcycles)
+ break;
- chip->legacy.cmdfunc(chip, NAND_CMD_READOOB, 0, page);
- chip->legacy.read_buf(chip, chip->oob_poi, chip->mtd.oobsize);
- return 0;
-}
+ if (instr->ctx.addr.naddrs - *start_offset >
+ pat->ctx.addr.maxcycles) {
+ *start_offset += pat->ctx.addr.maxcycles;
+ return true;
+ }
+ break;
-/**
- * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
- * with syndromes
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to read
- */
-static __maybe_unused int nand_read_oob_syndrome(struct nand_chip *chip,
- int page)
-{
- uint8_t *buf = chip->oob_poi;
- int length = chip->mtd.oobsize;
- int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
- int eccsize = chip->ecc.size;
- uint8_t *bufpoi = buf;
- int i, toread, sndrnd = 0, pos;
+ case NAND_OP_DATA_IN_INSTR:
+ case NAND_OP_DATA_OUT_INSTR:
+ if (!pat->ctx.data.maxlen)
+ break;
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
- return -ENOTSUPP;
+ if (instr->ctx.data.len - *start_offset >
+ pat->ctx.data.maxlen) {
+ *start_offset += pat->ctx.data.maxlen;
+ return true;
+ }
+ break;
- chip->legacy.cmdfunc(chip, NAND_CMD_READ0, chip->ecc.size, page);
- for (i = 0; i < chip->ecc.steps; i++) {
- if (sndrnd) {
- pos = eccsize + i * (eccsize + chunk);
- if (chip->mtd.writesize > 512)
- chip->legacy.cmdfunc(chip, NAND_CMD_RNDOUT, pos, -1);
- else
- chip->legacy.cmdfunc(chip, NAND_CMD_READ0, pos, page);
- } else
- sndrnd = 1;
- toread = min_t(int, length, chunk);
- chip->legacy.read_buf(chip, bufpoi, toread);
- bufpoi += toread;
- length -= toread;
+ default:
+ break;
}
- if (length > 0)
- chip->legacy.read_buf(chip, bufpoi, length);
- return 0;
+ return false;
}
/**
- * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
+ * nand_op_parser_match_pat - Checks if a pattern matches the instructions
+ * remaining in the parser context
+ * @pat: the pattern to test
+ * @ctx: the parser context structure to match with the pattern @pat
+ *
+ * Check if @pat matches the set or a sub-set of instructions remaining in @ctx.
+ * Returns true if this is the case, false ortherwise. When true is returned,
+ * @ctx->subop is updated with the set of instructions to be passed to the
+ * controller driver.
*/
-static __maybe_unused int nand_write_oob_std(struct nand_chip *chip, int page)
+static bool
+nand_op_parser_match_pat(const struct nand_op_parser_pattern *pat,
+ struct nand_op_parser_ctx *ctx)
{
- int status = 0;
- const uint8_t *buf = chip->oob_poi;
- int length = chip->mtd.oobsize;
+ unsigned int instr_offset = ctx->subop.first_instr_start_off;
+ const struct nand_op_instr *end = ctx->instrs + ctx->ninstrs;
+ const struct nand_op_instr *instr = ctx->subop.instrs;
+ unsigned int i, ninstrs;
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB) || !IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
-
- chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, chip->mtd.writesize, page);
- chip->legacy.write_buf(chip, buf, length);
- /* Send command to program the OOB data */
- chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
-
- status = chip->legacy.waitfunc(chip);
+ for (i = 0, ninstrs = 0; i < pat->nelems && instr < end; i++) {
+ /*
+ * The pattern instruction does not match the operation
+ * instruction. If the instruction is marked optional in the
+ * pattern definition, we skip the pattern element and continue
+ * to the next one. If the element is mandatory, there's no
+ * match and we can return false directly.
+ */
+ if (instr->type != pat->elems[i].type) {
+ if (!pat->elems[i].optional)
+ return false;
- return status & NAND_STATUS_FAIL ? -EIO : 0;
-}
+ continue;
+ }
-/**
- * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
- * with syndrome - only for large page flash
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @page: page number to write
- */
-static __maybe_unused int nand_write_oob_syndrome(struct nand_chip *chip, int page)
-{
- int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
- int eccsize = chip->ecc.size, length = chip->mtd.oobsize;
- int i, len, pos, status = 0, sndcmd = 0, steps = chip->ecc.steps;
- const uint8_t *bufpoi = chip->oob_poi;
+ /*
+ * Now check the pattern element constraints. If the pattern is
+ * not able to handle the whole instruction in a single step,
+ * we have to split it.
+ * The last_instr_end_off value comes back updated to point to
+ * the position where we have to split the instruction (the
+ * start of the next subop chunk).
+ */
+ if (nand_op_parser_must_split_instr(&pat->elems[i], instr,
+ &instr_offset)) {
+ ninstrs++;
+ i++;
+ break;
+ }
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB) || !IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ instr++;
+ ninstrs++;
+ instr_offset = 0;
+ }
/*
- * data-ecc-data-ecc ... ecc-oob
- * or
- * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ * This can happen if all instructions of a pattern are optional.
+ * Still, if there's not at least one instruction handled by this
+ * pattern, this is not a match, and we should try the next one (if
+ * any).
*/
- if (!chip->ecc.prepad && !chip->ecc.postpad) {
- pos = steps * (eccsize + chunk);
- steps = 0;
- } else
- pos = eccsize;
-
- chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, pos, page);
- for (i = 0; i < steps; i++) {
- if (sndcmd) {
- if (chip->mtd.writesize <= 512) {
- uint32_t fill = 0xFFFFFFFF;
+ if (!ninstrs)
+ return false;
- len = eccsize;
- while (len > 0) {
- int num = min_t(int, len, 4);
- chip->legacy.write_buf(chip, (uint8_t *)&fill,
- num);
- len -= num;
- }
- } else {
- pos = eccsize + i * (eccsize + chunk);
- chip->legacy.cmdfunc(chip, NAND_CMD_RNDIN, pos, -1);
- }
- } else
- sndcmd = 1;
- len = min_t(int, length, chunk);
- chip->legacy.write_buf(chip, bufpoi, len);
- bufpoi += len;
- length -= len;
+ /*
+ * We had a match on the pattern head, but the pattern may be longer
+ * than the instructions we're asked to execute. We need to make sure
+ * there's no mandatory elements in the pattern tail.
+ */
+ for (; i < pat->nelems; i++) {
+ if (!pat->elems[i].optional)
+ return false;
}
- if (length > 0)
- chip->legacy.write_buf(chip, bufpoi, length);
- chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->legacy.waitfunc(chip);
+ /*
+ * We have a match: update the subop structure accordingly and return
+ * true.
+ */
+ ctx->subop.ninstrs = ninstrs;
+ ctx->subop.last_instr_end_off = instr_offset;
- return status & NAND_STATUS_FAIL ? -EIO : 0;
+ return true;
}
-/**
- * nand_do_read_oob - [INTERN] NAND read out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operations description structure
- *
- * NAND read out-of-band data from the spare area.
- */
-static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
- struct mtd_oob_ops *ops)
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
- int page, realpage, chipnr;
- struct mtd_ecc_stats stats;
- int readlen = ops->ooblen;
- int len;
- uint8_t *buf = ops->oobbuf;
- int ret = 0;
+ const struct nand_op_instr *instr;
+ char *prefix = " ";
+ unsigned int i;
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
- return -ENOTSUPP;
+ pr_debug("executing subop (CS%d):\n", ctx->subop.cs);
- pr_debug("%s: from = 0x%08Lx, len = %i\n",
- __func__, (unsigned long long)from, readlen);
+ for (i = 0; i < ctx->ninstrs; i++) {
+ instr = &ctx->instrs[i];
- stats = mtd->ecc_stats;
+ if (instr == &ctx->subop.instrs[0])
+ prefix = " ->";
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ nand_op_trace(prefix, instr);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start read outside oob\n",
- __func__);
- return -EINVAL;
+ if (instr == &ctx->subop.instrs[ctx->subop.ninstrs - 1])
+ prefix = " ";
}
+}
+#else
+static void nand_op_parser_trace(const struct nand_op_parser_ctx *ctx)
+{
+ /* NOP */
+}
+#endif
- /* Do not allow reads past end of device */
- if (unlikely(from >= mtd->size ||
- ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) -
- (from >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
+static int nand_op_parser_cmp_ctx(const struct nand_op_parser_ctx *a,
+ const struct nand_op_parser_ctx *b)
+{
+ if (a->subop.ninstrs < b->subop.ninstrs)
+ return -1;
+ else if (a->subop.ninstrs > b->subop.ninstrs)
+ return 1;
- chipnr = (int)(from >> chip->chip_shift);
- chip->legacy.select_chip(chip, chipnr);
+ if (a->subop.last_instr_end_off < b->subop.last_instr_end_off)
+ return -1;
+ else if (a->subop.last_instr_end_off > b->subop.last_instr_end_off)
+ return 1;
- /* Shift to get page */
- realpage = (int)(from >> chip->page_shift);
- page = realpage & chip->pagemask;
+ return 0;
+}
- while (1) {
- if (ops->mode == MTD_OPS_RAW)
- ret = chip->ecc.read_oob_raw(chip, page);
- else
- ret = chip->ecc.read_oob(chip, page);
+/**
+ * nand_op_parser_exec_op - exec_op parser
+ * @chip: the NAND chip
+ * @parser: patterns description provided by the controller driver
+ * @op: the NAND operation to address
+ * @check_only: when true, the function only checks if @op can be handled but
+ * does not execute the operation
+ *
+ * Helper function designed to ease integration of NAND controller drivers that
+ * only support a limited set of instruction sequences. The supported sequences
+ * are described in @parser, and the framework takes care of splitting @op into
+ * multiple sub-operations (if required) and pass them back to the ->exec()
+ * callback of the matching pattern if @check_only is set to false.
+ *
+ * NAND controller drivers should call this function from their own ->exec_op()
+ * implementation.
+ *
+ * Returns 0 on success, a negative error code otherwise. A failure can be
+ * caused by an unsupported operation (none of the supported patterns is able
+ * to handle the requested operation), or an error returned by one of the
+ * matching pattern->exec() hook.
+ */
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only)
+{
+ struct nand_op_parser_ctx ctx = {
+ .subop.cs = op->cs,
+ .subop.instrs = op->instrs,
+ .instrs = op->instrs,
+ .ninstrs = op->ninstrs,
+ };
+ unsigned int i;
+
+ while (ctx.subop.instrs < op->instrs + op->ninstrs) {
+ const struct nand_op_parser_pattern *pattern;
+ struct nand_op_parser_ctx best_ctx;
+ int ret, best_pattern = -1;
+
+ for (i = 0; i < parser->npatterns; i++) {
+ struct nand_op_parser_ctx test_ctx = ctx;
+
+ pattern = &parser->patterns[i];
+ if (!nand_op_parser_match_pat(pattern, &test_ctx))
+ continue;
+
+ if (best_pattern >= 0 &&
+ nand_op_parser_cmp_ctx(&test_ctx, &best_ctx) <= 0)
+ continue;
+
+ best_pattern = i;
+ best_ctx = test_ctx;
+ }
- if (ret < 0)
- break;
+ if (best_pattern < 0) {
+ pr_debug("->exec_op() parser: pattern not found!\n");
+ return -ENOTSUPP;
+ }
- len = min(len, readlen);
- buf = nand_transfer_oob(chip, buf, ops, len);
+ ctx = best_ctx;
+ nand_op_parser_trace(&ctx);
- if (chip->options & NAND_NEED_READRDY) {
- /* Apply delay or wait for ready/busy pin */
- if (!chip->legacy.dev_ready)
- udelay(chip->legacy.chip_delay);
- else
- nand_wait_ready(chip);
+ if (!check_only) {
+ pattern = &parser->patterns[best_pattern];
+ ret = pattern->exec(chip, &ctx.subop);
+ if (ret)
+ return ret;
}
- readlen -= len;
- if (!readlen)
- break;
-
- /* Increment page address */
- realpage++;
+ /*
+ * Update the context structure by pointing to the start of the
+ * next subop.
+ */
+ ctx.subop.instrs = ctx.subop.instrs + ctx.subop.ninstrs;
+ if (ctx.subop.last_instr_end_off)
+ ctx.subop.instrs -= 1;
- page = realpage & chip->pagemask;
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- chip->legacy.select_chip(chip, -1);
- chip->legacy.select_chip(chip, chipnr);
- }
+ ctx.subop.first_instr_start_off = ctx.subop.last_instr_end_off;
}
- chip->legacy.select_chip(chip, -1);
- ops->oobretlen = ops->ooblen - readlen;
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nand_op_parser_exec_op);
- if (ret < 0)
- return ret;
+static bool nand_instr_is_data(const struct nand_op_instr *instr)
+{
+ return instr && (instr->type == NAND_OP_DATA_IN_INSTR ||
+ instr->type == NAND_OP_DATA_OUT_INSTR);
+}
- if (mtd->ecc_stats.failed - stats.failed)
- return -EBADMSG;
+static bool nand_subop_instr_is_valid(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ return subop && instr_idx < subop->ninstrs;
+}
- return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0;
+static unsigned int nand_subop_get_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ if (instr_idx)
+ return 0;
+
+ return subop->first_instr_start_off;
}
/**
- * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
- * @mtd: MTD device structure
- * @from: offset to read from
- * @ops: oob operation description structure
+ * nand_subop_get_addr_start_off - Get the start offset in an address array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
*
- * NAND read data and/or out-of-band data.
+ * During driver development, one could be tempted to directly use the
+ * ->addr.addrs field of address instructions. This is wrong as address
+ * instructions might be split.
+ *
+ * Given an address instruction, returns the offset of the first cycle to issue.
*/
-static int nand_read_oob(struct mtd_info *mtd, loff_t from,
- struct mtd_oob_ops *ops)
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret = -ENOTSUPP;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
- if (!IS_ENABLED(CONFIG_NAND_READ_OOB))
- return -ENOTSUPP;
+ return nand_subop_get_start_off(subop, instr_idx);
+}
+EXPORT_SYMBOL_GPL(nand_subop_get_addr_start_off);
- ops->retlen = 0;
+/**
+ * nand_subop_get_num_addr_cyc - Get the remaining address cycles to assert
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
+ *
+ * During driver development, one could be tempted to directly use the
+ * ->addr->naddrs field of a data instruction. This is wrong as instructions
+ * might be split.
+ *
+ * Given an address instruction, returns the number of address cycle to issue.
+ */
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int instr_idx)
+{
+ int start_off, end_off;
- /* Do not allow reads past end of device */
- if (ops->datbuf && (from + ops->len) > chip->mtd.size) {
- pr_debug("%s: attempt to read beyond end of device\n",
- __func__);
- return -EINVAL;
- }
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ subop->instrs[instr_idx].type != NAND_OP_ADDR_INSTR))
+ return 0;
- nand_get_device(chip, FL_READING);
+ start_off = nand_subop_get_addr_start_off(subop, instr_idx);
- switch (ops->mode) {
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_AUTO_OOB:
- case MTD_OPS_RAW:
- break;
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.addr.naddrs;
- default:
- goto out;
- }
-
- if (!ops->datbuf)
- ret = nand_do_read_oob(chip, from, ops);
- else
- ret = nand_do_read_ops(chip, from, ops);
-
-out:
- nand_release_device(chip);
- return ret;
+ return end_off - start_off;
}
-
+EXPORT_SYMBOL_GPL(nand_subop_get_num_addr_cyc);
/**
- * nand_write_page_raw - [INTERN] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_subop_get_data_start_off - Get the start offset in a data array
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
*
- * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ * During driver development, one could be tempted to directly use the
+ * ->data->buf.{in,out} field of data instructions. This is wrong as data
+ * instructions might be split.
+ *
+ * Given a data instruction, returns the offset to start from.
*/
-static __maybe_unused int nand_write_page_raw(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
-
- chip->legacy.write_buf(chip, buf, chip->mtd.writesize);
- if (oob_required)
- chip->legacy.write_buf(chip, chip->oob_poi, chip->mtd.oobsize);
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
- return 0;
+ return nand_subop_get_start_off(subop, instr_idx);
}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_start_off);
/**
- * nand_write_page_raw_syndrome - [INTERN] raw page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_subop_get_data_len - Get the number of bytes to retrieve
+ * @subop: The entire sub-operation
+ * @instr_idx: Index of the instruction inside the sub-operation
*
- * We need a special oob layout and handling even when ECC isn't checked.
+ * During driver development, one could be tempted to directly use the
+ * ->data->len field of a data instruction. This is wrong as data instructions
+ * might be split.
+ *
+ * Returns the length of the chunk of data to send/receive.
*/
-static __maybe_unused int nand_write_page_raw_syndrome(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int instr_idx)
{
- int eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- uint8_t *oob = chip->oob_poi;
- int steps, size;
-
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
-
- for (steps = chip->ecc.steps; steps > 0; steps--) {
- chip->legacy.write_buf(chip, buf, eccsize);
- buf += eccsize;
-
- if (chip->ecc.prepad) {
- chip->legacy.write_buf(chip, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
+ int start_off = 0, end_off;
- chip->legacy.read_buf(chip, oob, eccbytes);
- oob += eccbytes;
+ if (WARN_ON(!nand_subop_instr_is_valid(subop, instr_idx) ||
+ !nand_instr_is_data(&subop->instrs[instr_idx])))
+ return 0;
- if (chip->ecc.postpad) {
- chip->legacy.write_buf(chip, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
- }
+ start_off = nand_subop_get_data_start_off(subop, instr_idx);
- size = chip->mtd.oobsize - (oob - chip->oob_poi);
- if (size)
- chip->legacy.write_buf(chip, oob, size);
+ if (instr_idx == subop->ninstrs - 1 &&
+ subop->last_instr_end_off)
+ end_off = subop->last_instr_end_off;
+ else
+ end_off = subop->instrs[instr_idx].ctx.data.len;
- return 0;
+ return end_off - start_off;
}
+EXPORT_SYMBOL_GPL(nand_subop_get_data_len);
+
/**
- * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_reset - Reset and initialize a NAND device
+ * @chip: The NAND chip
+ * @chipnr: Internal die id
+ *
+ * Save the timings data structure, then apply SDR timings mode 0 (see
+ * nand_reset_interface for details), do the reset operation, and apply
+ * back the previous timings.
+ *
+ * Returns 0 on success, a negative error code otherwise.
*/
-static __maybe_unused int nand_write_page_swecc(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+int nand_reset(struct nand_chip *chip, int chipnr)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ int ret;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ ret = nand_reset_interface(chip, chipnr);
+ if (ret)
+ return ret;
- /* Software ECC calculation */
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ /*
+ * The CS line has to be released before we can apply the new NAND
+ * interface settings, hence this weird nand_select_target()
+ * nand_deselect_target() dance.
+ */
+ nand_select_target(chip, chipnr);
+ ret = nand_reset_op(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ return ret;
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ ret = nand_setup_interface(chip, chipnr);
+ if (ret)
+ return ret;
- return chip->ecc.write_page_raw(chip, buf, 1);
+ return 0;
}
+EXPORT_SYMBOL_GPL(nand_reset);
/**
- * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_get_features - wrapper to perform a GET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
*/
-static __maybe_unused int nand_write_page_hwecc(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+int nand_get_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- const uint8_t *p = buf;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
-
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ if (!nand_supports_get_features(chip, addr))
return -ENOTSUPP;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
- chip->ecc.hwctl(chip, NAND_ECC_WRITE);
- chip->legacy.write_buf(chip, p, eccsize);
- chip->ecc.calculate(chip, p, &ecc_calc[i]);
- }
-
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ if (chip->legacy.get_features)
+ return chip->legacy.get_features(chip, addr, subfeature_param);
- chip->legacy.write_buf(chip, chip->oob_poi, chip->mtd.oobsize);
-
- return 0;
+ return nand_get_features_op(chip, addr, subfeature_param);
}
-
/**
- * nand_write_subpage_hwecc - [REPLACABLE] hardware ECC based subpage write
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @column: column address of subpage within the page
- * @data_len: data length
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_set_features - wrapper to perform a SET_FEATURE
+ * @chip: NAND chip info structure
+ * @addr: feature address
+ * @subfeature_param: the subfeature parameters, a four bytes array
+ *
+ * Returns 0 for success, a negative error otherwise. Returns -ENOTSUPP if the
+ * operation cannot be handled.
*/
-static __maybe_unused int nand_write_subpage_hwecc(struct nand_chip *chip,
- uint32_t offset,
- uint32_t data_len, const uint8_t *data_buf,
- int oob_required)
+int nand_set_features(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
{
- uint8_t *oob_buf = chip->oob_poi;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- int ecc_size = chip->ecc.size;
- int ecc_bytes = chip->ecc.bytes;
- int ecc_steps = chip->ecc.steps;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
- uint32_t start_step = offset / ecc_size;
- uint32_t end_step = (offset + data_len - 1) / ecc_size;
- int oob_bytes = chip->mtd.oobsize / ecc_steps;
- int step, i;
-
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
+ if (!nand_supports_set_features(chip, addr))
return -ENOTSUPP;
- for (step = 0; step < ecc_steps; step++) {
- /* configure controller for WRITE access */
- chip->ecc.hwctl(chip, NAND_ECC_WRITE);
-
- /* write data (untouched subpages already masked by 0xFF) */
- chip->legacy.write_buf(chip, data_buf, ecc_size);
+ if (chip->legacy.set_features)
+ return chip->legacy.set_features(chip, addr, subfeature_param);
- /* mask ECC of un-touched subpages by padding 0xFF */
- if ((step < start_step) || (step > end_step))
- memset(ecc_calc, 0xff, ecc_bytes);
- else
- chip->ecc.calculate(chip, data_buf, ecc_calc);
+ return nand_set_features_op(chip, addr, subfeature_param);
+}
- /* mask OOB of un-touched subpages by padding 0xFF */
- /* if oob_required, preserve OOB metadata of written subpage */
- if (!oob_required || (step < start_step) || (step > end_step))
- memset(oob_buf, 0xff, oob_bytes);
+/**
+ * nand_check_erased_buf - check if a buffer contains (almost) only 0xff data
+ * @buf: buffer to test
+ * @len: buffer length
+ * @bitflips_threshold: maximum number of bitflips
+ *
+ * Check if a buffer contains only 0xff, which means the underlying region
+ * has been erased and is ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region is not erased.
+ * Note: The logic of this function has been extracted from the memweight
+ * implementation, except that nand_check_erased_buf function exit before
+ * testing the whole buffer if the number of bitflips exceed the
+ * bitflips_threshold value.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold.
+ */
+int nand_check_erased_buf(void *buf, int len, int bitflips_threshold)
+{
+ const unsigned char *bitmap = buf;
+ int bitflips = 0;
+ int weight;
- data_buf += ecc_size;
- ecc_calc += ecc_bytes;
- oob_buf += oob_bytes;
+ for (; len && ((uintptr_t)bitmap) % sizeof(long);
+ len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
}
- /* copy calculated ECC for whole page to chip->buffer->oob */
- /* this include masked-value(0xFF) for unwritten subpages */
- ecc_calc = chip->buffers->ecccalc;
- for (i = 0; i < chip->ecc.total; i++)
- chip->oob_poi[eccpos[i]] = ecc_calc[i];
+ for (; len >= sizeof(long);
+ len -= sizeof(long), bitmap += sizeof(long)) {
+ unsigned long d = *((unsigned long *)bitmap);
+ if (d == ~0UL)
+ continue;
+ weight = hweight_long(d);
+ bitflips += BITS_PER_LONG - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
- /* write OOB buffer to NAND device */
- chip->legacy.write_buf(chip, chip->oob_poi, chip->mtd.oobsize);
+ for (; len > 0; len--, bitmap++) {
+ weight = hweight8(*bitmap);
+ bitflips += BITS_PER_BYTE - weight;
+ if (unlikely(bitflips > bitflips_threshold))
+ return -EBADMSG;
+ }
- return 0;
+ return bitflips;
}
-
/**
- * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
- * @mtd: mtd info structure
- * @chip: nand chip info structure
- * @buf: data buffer
- * @oob_required: must write chip->oob_poi to OOB
+ * nand_check_erased_ecc_chunk - check if an ECC chunk contains (almost) only
+ * 0xff data
+ * @data: data buffer to test
+ * @datalen: data length
+ * @ecc: ECC buffer
+ * @ecclen: ECC length
+ * @extraoob: extra OOB buffer
+ * @extraooblen: extra OOB length
+ * @bitflips_threshold: maximum number of bitflips
*
- * The hw generator calculates the error syndrome automatically. Therefore we
- * need a special oob layout and handling.
+ * Check if a data buffer and its associated ECC and OOB data contains only
+ * 0xff pattern, which means the underlying region has been erased and is
+ * ready to be programmed.
+ * The bitflips_threshold specify the maximum number of bitflips before
+ * considering the region as not erased.
+ *
+ * Note:
+ * 1/ ECC algorithms are working on pre-defined block sizes which are usually
+ * different from the NAND page size. When fixing bitflips, ECC engines will
+ * report the number of errors per chunk, and the NAND core infrastructure
+ * expect you to return the maximum number of bitflips for the whole page.
+ * This is why you should always use this function on a single chunk and
+ * not on the whole page. After checking each chunk you should update your
+ * max_bitflips value accordingly.
+ * 2/ When checking for bitflips in erased pages you should not only check
+ * the payload data but also their associated ECC data, because a user might
+ * have programmed almost all bits to 1 but a few. In this case, we
+ * shouldn't consider the chunk as erased, and checking ECC bytes prevent
+ * this case.
+ * 3/ The extraoob argument is optional, and should be used if some of your OOB
+ * data are protected by the ECC engine.
+ * It could also be used if you support subpages and want to attach some
+ * extra OOB data to an ECC chunk.
+ *
+ * Returns a positive number of bitflips less than or equal to
+ * bitflips_threshold, or -ERROR_CODE for bitflips in excess of the
+ * threshold. In case of success, the passed buffers are filled with 0xff.
*/
-static __maybe_unused int nand_write_page_syndrome(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int bitflips_threshold)
{
- int i, eccsize = chip->ecc.size;
- int eccbytes = chip->ecc.bytes;
- int eccsteps = chip->ecc.steps;
- const uint8_t *p = buf;
- uint8_t *oob = chip->oob_poi;
+ int data_bitflips = 0, ecc_bitflips = 0, extraoob_bitflips = 0;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ data_bitflips = nand_check_erased_buf(data, datalen,
+ bitflips_threshold);
+ if (data_bitflips < 0)
+ return data_bitflips;
- for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ bitflips_threshold -= data_bitflips;
- chip->ecc.hwctl(chip, NAND_ECC_WRITE);
- chip->legacy.write_buf(chip, p, eccsize);
+ ecc_bitflips = nand_check_erased_buf(ecc, ecclen, bitflips_threshold);
+ if (ecc_bitflips < 0)
+ return ecc_bitflips;
- if (chip->ecc.prepad) {
- chip->legacy.write_buf(chip, oob, chip->ecc.prepad);
- oob += chip->ecc.prepad;
- }
+ bitflips_threshold -= ecc_bitflips;
- chip->ecc.calculate(chip, p, oob);
- chip->legacy.write_buf(chip, oob, eccbytes);
- oob += eccbytes;
+ extraoob_bitflips = nand_check_erased_buf(extraoob, extraooblen,
+ bitflips_threshold);
+ if (extraoob_bitflips < 0)
+ return extraoob_bitflips;
- if (chip->ecc.postpad) {
- chip->legacy.write_buf(chip, oob, chip->ecc.postpad);
- oob += chip->ecc.postpad;
- }
- }
+ if (data_bitflips)
+ memset(data, 0xff, datalen);
- /* Calculate remaining oob bytes */
- i = chip->mtd.oobsize - (oob - chip->oob_poi);
- if (i)
- chip->legacy.write_buf(chip, oob, i);
+ if (ecc_bitflips)
+ memset(ecc, 0xff, ecclen);
- return 0;
+ if (extraoob_bitflips)
+ memset(extraoob, 0xff, extraooblen);
+
+ return data_bitflips + ecc_bitflips + extraoob_bitflips;
}
+EXPORT_SYMBOL(nand_check_erased_ecc_chunk);
/**
- * nand_write_page - [REPLACEABLE] write one page
- * @mtd: MTD device structure
- * @chip: NAND chip descriptor
- * @offset: address offset within the page
- * @data_len: length of actual data to be written
- * @buf: the data to write
- * @oob_required: must write chip->oob_poi to OOB
- * @page: page number to write
- * @cached: cached programming
- * @raw: use _raw version of write_page
+ * nand_read_page_raw_notsupp - dummy read raw page function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Returns -ENOTSUPP unconditionally.
*/
-static int nand_write_page(struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
+int nand_read_page_raw_notsupp(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
- int status, subpage;
-
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
-
- if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
- chip->ecc.write_subpage)
- subpage = offset || (data_len < chip->mtd.writesize);
- else
- subpage = 0;
-
- chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, 0x00, page);
-
- if (unlikely(raw))
- status = chip->ecc.write_page_raw(chip, buf,
- oob_required);
- else if (subpage)
- status = chip->ecc.write_subpage(chip, offset, data_len,
- buf, oob_required);
- else
- status = chip->ecc.write_page(chip, buf, oob_required);
-
- if (status < 0)
- return status;
-
- /*
- * Cached progamming disabled for now. Not sure if it's worth the
- * trouble. The speed gain is not very impressive. (2.3->2.6Mib/s).
- */
- cached = 0;
-
- if (!cached || !NAND_HAS_CACHEPROG(chip)) {
-
- chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
- status = chip->legacy.waitfunc(chip);
-
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- } else {
- chip->legacy.cmdfunc(chip, NAND_CMD_CACHEDPROG, -1, -1);
- status = chip->legacy.waitfunc(chip);
- }
-
- return 0;
+ return -ENOTSUPP;
}
/**
- * nand_fill_oob - [INTERN] Transfer client buffer to oob
- * @mtd: MTD device structure
- * @oob: oob data buffer
- * @len: oob data write length
- * @ops: oob ops structure
+ * nand_read_page_raw - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
*/
-static uint8_t *nand_fill_oob(struct nand_chip *chip, uint8_t *oob, size_t len,
- struct mtd_oob_ops *ops)
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page)
{
- /*
- * Initialise to all 0xFF, to avoid the possibility of left over OOB
- * data from a previous OOB read.
- */
- memset(chip->oob_poi, 0xff, chip->mtd.oobsize);
-
- switch (ops->mode) {
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
- case MTD_OPS_PLACE_OOB:
- case MTD_OPS_RAW:
- memcpy(chip->oob_poi + ops->ooboffs, oob, len);
- return oob + len;
+ ret = nand_read_page_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
- case MTD_OPS_AUTO_OOB: {
- struct nand_oobfree *free = chip->ecc.layout->oobfree;
- uint32_t boffs = 0, woffs = ops->ooboffs;
- size_t bytes = 0;
-
- for (; free->length && len; free++, len -= bytes) {
- /* Write request not from offset 0? */
- if (unlikely(woffs)) {
- if (woffs >= free->length) {
- woffs -= free->length;
- continue;
- }
- boffs = free->offset + woffs;
- bytes = min_t(size_t, len,
- (free->length - woffs));
- woffs = 0;
- } else {
- bytes = min_t(size_t, len, free->length);
- boffs = free->offset;
- }
- memcpy(chip->oob_poi + boffs, oob, bytes);
- oob += bytes;
- }
- return oob;
- }
- default:
- BUG();
+ if (oob_required) {
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false, false);
+ if (ret)
+ return ret;
}
- return NULL;
-}
-#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+ return 0;
+}
+EXPORT_SYMBOL(nand_read_page_raw);
/**
- * nand_do_write_ops - [INTERN] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operations description structure
+ * nand_monolithic_read_page_raw - Monolithic page read in raw mode
+ * @chip: NAND chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*
- * NAND write with ECC.
+ * This is a raw page read, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be loaded in the NAND cache and sent over the
+ * bus (from the NAND chip to the NAND controller) in a single
+ * operation. This is an alternative to nand_read_page_raw(), which
+ * first reads the main data, and if the OOB data is requested too,
+ * then reads more data on the bus.
*/
-static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
- struct mtd_oob_ops *ops)
+int nand_monolithic_read_page_raw(struct nand_chip *chip, u8 *buf,
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int chipnr, realpage, page, blockmask, column;
- uint32_t writelen = ops->len;
-
- uint32_t oobwritelen = ops->ooblen;
- uint32_t oobmaxlen = ops->mode == MTD_OPS_AUTO_OOB ?
- mtd->oobavail : mtd->oobsize;
-
- uint8_t *oob = ops->oobbuf;
- uint8_t *buf = ops->datbuf;
+ unsigned int size = mtd->writesize;
+ u8 *read_buf = buf;
int ret;
- int oob_required = oob ? 1 : 0;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ if (oob_required) {
+ size += mtd->oobsize;
- ops->retlen = 0;
- if (!writelen)
- return 0;
+ if (buf != chip->data_buf)
+ read_buf = nand_get_data_buf(chip);
+ }
- column = to & (mtd->writesize - 1);
+ ret = nand_read_page_op(chip, page, 0, read_buf, size);
+ if (ret)
+ return ret;
- chipnr = (int)(to >> chip->chip_shift);
- chip->legacy.select_chip(chip, chipnr);
+ if (buf != chip->data_buf)
+ memcpy(buf, read_buf, mtd->writesize);
- /* Check, if it is write protected */
- if (nand_check_wp(chip)) {
- ret = -EIO;
- goto err_out;
- }
+ return 0;
+}
+EXPORT_SYMBOL(nand_monolithic_read_page_raw);
- realpage = (int)(to >> chip->page_shift);
- page = realpage & chip->pagemask;
- blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1;
+/**
+ * nand_read_page_raw_syndrome - [INTERN] read raw page data without ecc
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * We need a special oob layout and handling even when OOB isn't used.
+ */
+static int nand_read_page_raw_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
- /* Invalidate the page cache, when we write to the cached page */
- if (to <= (chip->pagebuf << chip->page_shift) &&
- (chip->pagebuf << chip->page_shift) < (to + ops->len))
- chip->pagebuf = -1;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- /* Don't allow multipage oob writes with offset */
- if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
- ret = -EINVAL;
- goto err_out;
- }
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_read_data_op(chip, buf, eccsize, false, false);
+ if (ret)
+ return ret;
- while (1) {
- int bytes = mtd->writesize;
- int cached = writelen > bytes && page != blockmask;
- uint8_t *wbuf = buf;
+ buf += eccsize;
- /* Partial page write? */
- if (unlikely(column || writelen < (mtd->writesize - 1))) {
- cached = 0;
- bytes = min_t(int, bytes - column, (int) writelen);
- chip->pagebuf = -1;
- memset(chip->buffers->databuf, 0xff, mtd->writesize);
- memcpy(&chip->buffers->databuf[column], buf, bytes);
- wbuf = chip->buffers->databuf;
- }
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
- if (unlikely(oob)) {
- size_t len = min(oobwritelen, oobmaxlen);
- oob = nand_fill_oob(chip, oob, len, ops);
- oobwritelen -= len;
- } else {
- /* We still need to erase leftover OOB data */
- memset(chip->oob_poi, 0xff, mtd->oobsize);
+ oob += chip->ecc.prepad;
}
- if (oob || !mtd_buf_all_ff(wbuf, mtd->writesize)) {
- ret = chip->write_page(chip, column, bytes, wbuf,
- oob_required, page, cached,
- (ops->mode == MTD_OPS_RAW));
- if (ret)
- break;
- }
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
- writelen -= bytes;
- if (!writelen)
- break;
+ oob += eccbytes;
- column = 0;
- buf += bytes;
- realpage++;
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
- page = realpage & chip->pagemask;
- /* Check, if we cross a chip boundary */
- if (!page) {
- chipnr++;
- chip->legacy.select_chip(chip, -1);
- chip->legacy.select_chip(chip, chipnr);
+ oob += chip->ecc.postpad;
}
}
- ops->retlen = ops->len - writelen;
- if (unlikely(oob))
- ops->oobretlen = ops->ooblen;
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_read_data_op(chip, oob, size, false, false);
+ if (ret)
+ return ret;
+ }
-err_out:
- chip->legacy.select_chip(chip, -1);
- return ret;
+ return 0;
}
/**
- * nand_write - [MTD Interface] NAND write with ECC
- * @mtd: MTD device structure
- * @to: offset to write to
- * @len: number of bytes to write
- * @retlen: pointer to variable to store the number of written bytes
- * @buf: the data to write
- *
- * NAND write with ECC.
+ * nand_read_page_swecc - [REPLACEABLE] software ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
*/
-static int nand_write(struct mtd_info *mtd, loff_t to, size_t len,
- size_t *retlen, const uint8_t *buf)
+static int nand_read_page_swecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- struct mtd_oob_ops ops;
- int ret;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ chip->ecc.read_page_raw(chip, buf, 1, page);
- nand_get_device(chip, FL_WRITING);
- ops.len = len;
- ops.datbuf = (uint8_t *)buf;
- ops.oobbuf = NULL;
- ops.mode = MTD_OPS_PLACE_OOB;
- ret = nand_do_write_ops(chip, to, &ops);
- *retlen = ops.retlen;
- nand_release_device(chip);
- return ret;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
}
/**
- * nand_do_write_oob - [MTD Interface] NAND write out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
- *
- * NAND write out-of-band.
+ * nand_read_subpage - [REPLACEABLE] ECC based sub-page read function
+ * @chip: nand chip info structure
+ * @data_offs: offset of requested data within the page
+ * @readlen: data length
+ * @bufpoi: buffer to store read data
+ * @page: page number to read
*/
-static int nand_do_write_oob(struct nand_chip *chip, loff_t to,
- struct mtd_oob_ops *ops)
+static int nand_read_subpage(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int chipnr, page, status, len;
+ int start_step, end_step, num_steps, ret;
+ uint8_t *p;
+ int data_col_addr, i, gaps = 0;
+ int datafrag_len, eccfrag_len, aligned_len, aligned_pos;
+ int busw = (chip->options & NAND_BUSWIDTH_16) ? 2 : 1;
+ int index, section = 0;
+ unsigned int max_bitflips = 0;
+ struct mtd_oob_region oobregion = { };
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ /* Column address within the page aligned to ECC size (256bytes) */
+ start_step = data_offs / chip->ecc.size;
+ end_step = (data_offs + readlen - 1) / chip->ecc.size;
+ num_steps = end_step - start_step + 1;
+ index = start_step * chip->ecc.bytes;
- pr_debug("%s: to = 0x%08x, len = %i\n",
- __func__, (unsigned int)to, (int)ops->ooblen);
+ /* Data size aligned to ECC ecc.size */
+ datafrag_len = num_steps * chip->ecc.size;
+ eccfrag_len = num_steps * chip->ecc.bytes;
- if (ops->mode == MTD_OPS_AUTO_OOB)
- len = chip->ecc.layout->oobavail;
- else
- len = mtd->oobsize;
+ data_col_addr = start_step * chip->ecc.size;
+ /* If we read not a page aligned data */
+ p = bufpoi + data_col_addr;
+ ret = nand_read_page_op(chip, page, data_col_addr, p, datafrag_len);
+ if (ret)
+ return ret;
- /* Do not allow write past end of page */
- if ((ops->ooboffs + ops->ooblen) > len) {
- pr_debug("%s: attempt to write past end of page\n",
- __func__);
- return -EINVAL;
- }
+ /* Calculate ECC */
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size)
+ chip->ecc.calculate(chip, p, &chip->ecc.calc_buf[i]);
- if (unlikely(ops->ooboffs >= len)) {
- pr_debug("%s: attempt to start write outside oob\n",
- __func__);
- return -EINVAL;
- }
+ /*
+ * The performance is faster if we position offsets according to
+ * ecc.pos. Let's make sure that there are no gaps in ECC positions.
+ */
+ ret = mtd_ooblayout_find_eccregion(mtd, index, §ion, &oobregion);
+ if (ret)
+ return ret;
- /* Do not allow write past end of device */
- if (unlikely(to >= mtd->size ||
- ops->ooboffs + ops->ooblen >
- ((mtd->size >> chip->page_shift) -
- (to >> chip->page_shift)) * len)) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
+ if (oobregion.length < eccfrag_len)
+ gaps = 1;
+
+ if (gaps) {
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ } else {
+ /*
+ * Send the command to read the particular ECC bytes take care
+ * about buswidth alignment in read_buf.
+ */
+ aligned_pos = oobregion.offset & ~(busw - 1);
+ aligned_len = eccfrag_len;
+ if (oobregion.offset & (busw - 1))
+ aligned_len++;
+ if ((oobregion.offset + (num_steps * chip->ecc.bytes)) &
+ (busw - 1))
+ aligned_len++;
+
+ ret = nand_change_read_column_op(chip,
+ mtd->writesize + aligned_pos,
+ &chip->oob_poi[aligned_pos],
+ aligned_len, false);
+ if (ret)
+ return ret;
}
- chipnr = (int)(to >> chip->chip_shift);
- chip->legacy.select_chip(chip, chipnr);
+ ret = mtd_ooblayout_get_eccbytes(mtd, chip->ecc.code_buf,
+ chip->oob_poi, index, eccfrag_len);
+ if (ret)
+ return ret;
- /* Shift to get page */
- page = (int)(to >> chip->page_shift);
+ p = bufpoi + data_col_addr;
+ for (i = 0; i < eccfrag_len ; i += chip->ecc.bytes, p += chip->ecc.size) {
+ int stat;
- /*
- * Reset the chip. Some chips (like the Toshiba TC5832DC found in one
- * of my DiskOnChip 2000 test units) will clear the whole data page too
- * if we don't do this. I have no clue why, but I seem to have 'fixed'
- * it in the doc2000 driver in August 1999. dwmw2.
- */
- chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+ stat = chip->ecc.correct(chip, p, &chip->ecc.code_buf[i],
+ &chip->ecc.calc_buf[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ &chip->ecc.code_buf[i],
+ chip->ecc.bytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
- /* Check, if it is write protected */
- if (nand_check_wp(chip)) {
- chip->legacy.select_chip(chip, -1);
- return -EROFS;
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
}
+ return max_bitflips;
+}
- /* Invalidate the page cache, if we write to the cached page */
- if (page == chip->pagebuf)
- chip->pagebuf = -1;
+/**
+ * nand_read_page_hwecc - [REPLACEABLE] hardware ECC based page read function
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * Not for syndrome calculating ECC controllers which need a special oob layout.
+ */
+static int nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *p = buf;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
+ unsigned int max_bitflips = 0;
- nand_fill_oob(chip, ops->oobbuf, ops->ooblen, ops);
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- if (ops->mode == MTD_OPS_RAW)
- status = chip->ecc.write_oob_raw(chip, page & chip->pagemask);
- else
- status = chip->ecc.write_oob(chip, page & chip->pagemask);
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
- chip->legacy.select_chip(chip, -1);
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
- if (status)
- return status;
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
- ops->oobretlen = ops->ooblen;
+ ret = nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ false);
+ if (ret)
+ return ret;
- return 0;
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ eccsteps = chip->ecc.steps;
+ p = buf;
+
+ for (i = 0 ; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
+
+ stat = chip->ecc.correct(chip, p, &ecc_code[i], &ecc_calc[i]);
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, eccsize,
+ &ecc_code[i], eccbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+ return max_bitflips;
}
/**
- * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
- * @mtd: MTD device structure
- * @to: offset to write to
- * @ops: oob operation description structure
+ * nand_read_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page read
+ * @chip: nand chip info structure
+ * @buf: buffer to store read data
+ * @oob_required: caller requires OOB data read to chip->oob_poi
+ * @page: page number to read
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
*/
-static int nand_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
+static int nand_read_page_syndrome(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret = -ENOTSUPP;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret, i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ int eccpadbytes = eccbytes + chip->ecc.prepad + chip->ecc.postpad;
+ uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ unsigned int max_bitflips = 0;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
- ops->retlen = 0;
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ int stat;
- /* Do not allow writes past end of device */
- if (ops->datbuf && (to + ops->len) > mtd->size) {
- pr_debug("%s: attempt to write beyond end of device\n",
- __func__);
- return -EINVAL;
+ chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ ret = nand_read_data_op(chip, p, eccsize, false, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.prepad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.hwctl(chip, NAND_ECC_READSYN);
+
+ ret = nand_read_data_op(chip, oob, eccbytes, false, false);
+ if (ret)
+ return ret;
+
+ stat = chip->ecc.correct(chip, p, oob, NULL);
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_read_data_op(chip, oob, chip->ecc.postpad,
+ false, false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+
+ if (stat == -EBADMSG &&
+ (chip->ecc.options & NAND_ECC_GENERIC_ERASED_CHECK)) {
+ /* check for empty pages with bitflips */
+ stat = nand_check_erased_ecc_chunk(p, chip->ecc.size,
+ oob - eccpadbytes,
+ eccpadbytes,
+ NULL, 0,
+ chip->ecc.strength);
+ }
+
+ if (stat < 0) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += stat;
+ max_bitflips = max_t(unsigned int, max_bitflips, stat);
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_read_data_op(chip, oob, i, false, false);
+ if (ret)
+ return ret;
}
- nand_get_device(chip, FL_WRITING);
+ return max_bitflips;
+}
+
+/**
+ * nand_transfer_oob - [INTERN] Transfer oob to client buffer
+ * @chip: NAND chip object
+ * @oob: oob destination address
+ * @ops: oob ops structure
+ * @len: size of oob to transfer
+ */
+static uint8_t *nand_transfer_oob(struct nand_chip *chip, uint8_t *oob,
+ struct mtd_oob_ops *ops, size_t len)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
switch (ops->mode) {
+
case MTD_OPS_PLACE_OOB:
- case MTD_OPS_AUTO_OOB:
case MTD_OPS_RAW:
- break;
+ memcpy(oob, chip->oob_poi + ops->ooboffs, len);
+ return oob + len;
+
+ case MTD_OPS_AUTO_OOB:
+ ret = mtd_ooblayout_get_databytes(mtd, oob, chip->oob_poi,
+ ops->ooboffs, len);
+ BUG_ON(ret);
+ return oob + len;
default:
- goto out;
+ BUG();
+ }
+ return NULL;
+}
+
+/**
+ * nand_setup_read_retry - [INTERN] Set the READ RETRY mode
+ * @chip: NAND chip object
+ * @retry_mode: the retry mode to use
+ *
+ * Some vendors supply a special command to shift the Vt threshold, to be used
+ * when there are too many bitflips in a page (i.e., ECC error). After setting
+ * a new threshold, the host should retry reading the page.
+ */
+static int nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ pr_debug("setting READ RETRY mode %d\n", retry_mode);
+
+ if (retry_mode >= chip->read_retries)
+ return -EINVAL;
+
+ if (!chip->ops.setup_read_retry)
+ return -EOPNOTSUPP;
+
+ return chip->ops.setup_read_retry(chip, retry_mode);
+}
+
+static void nand_wait_readrdy(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr;
+
+ if (!(chip->options & NAND_NEED_READRDY))
+ return;
+
+ sdr = nand_get_sdr_timings(nand_get_interface_config(chip));
+ WARN_ON(nand_wait_rdy_op(chip, PSEC_TO_MSEC(sdr->tR_max), 0));
+}
+
+/**
+ * nand_do_read_ops - [INTERN] Read data with ECC
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob ops structure
+ *
+ * Internal function. Called with chip held.
+ */
+static int nand_do_read_ops(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ int chipnr, page, realpage, col, bytes, aligned, oob_required;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret = 0;
+ uint32_t readlen = ops->len;
+ uint32_t oobreadlen = ops->ooblen;
+ uint32_t max_oobsize = mtd_oobavail(mtd, ops);
+
+ uint8_t *bufpoi, *oob, *buf;
+ int use_bounce_buf;
+ unsigned int max_bitflips = 0;
+ int retry_mode = 0;
+ bool ecc_fail = false;
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ col = (int)(from & (mtd->writesize - 1));
+
+ buf = ops->datbuf;
+ oob = ops->oobbuf;
+ oob_required = oob ? 1 : 0;
+
+ while (1) {
+ struct mtd_ecc_stats ecc_stats = mtd->ecc_stats;
+
+ bytes = min(mtd->writesize - col, readlen);
+ aligned = (bytes == mtd->writesize);
+
+ if (!aligned)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /* Is the current page in the buffer? */
+ if (realpage != chip->pagecache.page || oob) {
+ bufpoi = use_bounce_buf ? chip->data_buf : buf;
+
+ if (use_bounce_buf && aligned)
+ pr_debug("%s: using read bounce buffer for buf@%p\n",
+ __func__, buf);
+
+read_retry:
+ /*
+ * Now read the page into the buffer. Absent an error,
+ * the read methods return max bitflips per ecc step.
+ */
+ if (unlikely(ops->mode == MTD_OPS_RAW))
+ ret = chip->ecc.read_page_raw(chip, bufpoi,
+ oob_required,
+ page);
+ else if (!aligned && NAND_HAS_SUBPAGE_READ(chip) &&
+ !oob)
+ ret = chip->ecc.read_subpage(chip, col, bytes,
+ bufpoi, page);
+ else
+ ret = chip->ecc.read_page(chip, bufpoi,
+ oob_required, page);
+ if (ret < 0) {
+ if (use_bounce_buf)
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ break;
+ }
+
+ /*
+ * Copy back the data in the initial buffer when reading
+ * partial pages or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ if (!NAND_HAS_SUBPAGE_READ(chip) && !oob &&
+ !(mtd->ecc_stats.failed - ecc_stats.failed) &&
+ (ops->mode != MTD_OPS_RAW)) {
+ chip->pagecache.page = realpage;
+ chip->pagecache.bitflips = ret;
+ } else {
+ /* Invalidate page cache */
+ chip->pagecache.page = -1;
+ }
+ memcpy(buf, bufpoi + col, bytes);
+ }
+
+ if (unlikely(oob)) {
+ int toread = min(oobreadlen, max_oobsize);
+
+ if (toread) {
+ oob = nand_transfer_oob(chip, oob, ops,
+ toread);
+ oobreadlen -= toread;
+ }
+ }
+
+ nand_wait_readrdy(chip);
+
+ if (mtd->ecc_stats.failed - ecc_stats.failed) {
+ if (retry_mode + 1 < chip->read_retries) {
+ retry_mode++;
+ ret = nand_setup_read_retry(chip,
+ retry_mode);
+ if (ret < 0)
+ break;
+
+ /* Reset ecc_stats; retry */
+ mtd->ecc_stats = ecc_stats;
+ goto read_retry;
+ } else {
+ /* No more retry modes; real failure */
+ ecc_fail = true;
+ }
+ }
+
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+ } else {
+ memcpy(buf, chip->data_buf + col, bytes);
+ buf += bytes;
+ max_bitflips = max_t(unsigned int, max_bitflips,
+ chip->pagecache.bitflips);
+ }
+
+ readlen -= bytes;
+
+ /* Reset to retry mode 0 */
+ if (retry_mode) {
+ ret = nand_setup_read_retry(chip, 0);
+ if (ret < 0)
+ break;
+ retry_mode = 0;
+ }
+
+ if (!readlen)
+ break;
+
+ /* For subsequent reads align to page boundary */
+ col = 0;
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->retlen = ops->len - (size_t) readlen;
+ if (oob)
+ ops->oobretlen = ops->ooblen - oobreadlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (ecc_fail)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob_std - [REPLACEABLE] the most common OOB data read function
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+int nand_read_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_read_oob_op(chip, page, 0, chip->oob_poi, mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_read_oob_std);
+
+/**
+ * nand_read_oob_syndrome - [REPLACEABLE] OOB data read function for HW ECC
+ * with syndromes
+ * @chip: nand chip info structure
+ * @page: page number to read
+ */
+static int nand_read_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int length = mtd->oobsize;
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size;
+ uint8_t *bufpoi = chip->oob_poi;
+ int i, toread, sndrnd = 0, pos, ret;
+
+ ret = nand_read_page_op(chip, page, chip->ecc.size, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ if (sndrnd) {
+ int ret;
+
+ pos = eccsize + i * (eccsize + chunk);
+ if (mtd->writesize > 512)
+ ret = nand_change_read_column_op(chip, pos,
+ NULL, 0,
+ false);
+ else
+ ret = nand_read_page_op(chip, page, pos, NULL,
+ 0);
+
+ if (ret)
+ return ret;
+ } else
+ sndrnd = 1;
+ toread = min_t(int, length, chunk);
+
+ ret = nand_read_data_op(chip, bufpoi, toread, false, false);
+ if (ret)
+ return ret;
+
+ bufpoi += toread;
+ length -= toread;
+ }
+ if (length > 0) {
+ ret = nand_read_data_op(chip, bufpoi, length, false, false);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+/**
+ * nand_write_oob_std - [REPLACEABLE] the most common OOB data write function
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+int nand_write_oob_std(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ return nand_prog_page_op(chip, page, mtd->writesize, chip->oob_poi,
+ mtd->oobsize);
+}
+EXPORT_SYMBOL(nand_write_oob_std);
+
+/**
+ * nand_write_oob_syndrome - [REPLACEABLE] OOB data write function for HW ECC
+ * with syndrome - only for large page flash
+ * @chip: nand chip info structure
+ * @page: page number to write
+ */
+static int nand_write_oob_syndrome(struct nand_chip *chip, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chunk = chip->ecc.bytes + chip->ecc.prepad + chip->ecc.postpad;
+ int eccsize = chip->ecc.size, length = mtd->oobsize;
+ int ret, i, len, pos, sndcmd = 0, steps = chip->ecc.steps;
+ const uint8_t *bufpoi = chip->oob_poi;
+
+ /*
+ * data-ecc-data-ecc ... ecc-oob
+ * or
+ * data-pad-ecc-pad-data-pad .... ecc-pad-oob
+ */
+ if (!chip->ecc.prepad && !chip->ecc.postpad) {
+ pos = steps * (eccsize + chunk);
+ steps = 0;
+ } else
+ pos = eccsize;
+
+ ret = nand_prog_page_begin_op(chip, page, pos, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < steps; i++) {
+ if (sndcmd) {
+ if (mtd->writesize <= 512) {
+ uint32_t fill = 0xFFFFFFFF;
+
+ len = eccsize;
+ while (len > 0) {
+ int num = min_t(int, len, 4);
+
+ ret = nand_write_data_op(chip, &fill,
+ num, false);
+ if (ret)
+ return ret;
+
+ len -= num;
+ }
+ } else {
+ pos = eccsize + i * (eccsize + chunk);
+ ret = nand_change_write_column_op(chip, pos,
+ NULL, 0,
+ false);
+ if (ret)
+ return ret;
+ }
+ } else
+ sndcmd = 1;
+ len = min_t(int, length, chunk);
+
+ ret = nand_write_data_op(chip, bufpoi, len, false);
+ if (ret)
+ return ret;
+
+ bufpoi += len;
+ length -= len;
+ }
+ if (length > 0) {
+ ret = nand_write_data_op(chip, bufpoi, length, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_do_read_oob - [INTERN] NAND read out-of-band
+ * @chip: NAND chip object
+ * @from: offset to read from
+ * @ops: oob operations description structure
+ *
+ * NAND read out-of-band data from the spare area.
+ */
+static int nand_do_read_oob(struct nand_chip *chip, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int max_bitflips = 0;
+ int page, realpage, chipnr;
+ struct mtd_ecc_stats stats;
+ int readlen = ops->ooblen;
+ int len;
+ uint8_t *buf = ops->oobbuf;
+ int ret = 0;
+
+ pr_debug("%s: from = 0x%08Lx, len = %i\n",
+ __func__, (unsigned long long)from, readlen);
+
+ stats = mtd->ecc_stats;
+
+ len = mtd_oobavail(mtd, ops);
+
+ chipnr = (int)(from >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Shift to get page */
+ realpage = (int)(from >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ while (1) {
+ if (ops->mode == MTD_OPS_RAW)
+ ret = chip->ecc.read_oob_raw(chip, page);
+ else
+ ret = chip->ecc.read_oob(chip, page);
+
+ if (ret < 0)
+ break;
+
+ len = min(len, readlen);
+ buf = nand_transfer_oob(chip, buf, ops, len);
+
+ nand_wait_readrdy(chip);
+
+ max_bitflips = max_t(unsigned int, max_bitflips, ret);
+
+ readlen -= len;
+ if (!readlen)
+ break;
+
+ /* Increment page address */
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+ nand_deselect_target(chip);
+
+ ops->oobretlen = ops->ooblen - readlen;
+
+ if (ret < 0)
+ return ret;
+
+ if (mtd->ecc_stats.failed - stats.failed)
+ return -EBADMSG;
+
+ return max_bitflips;
+}
+
+/**
+ * nand_read_oob - [MTD Interface] NAND read data and/or out-of-band
+ * @mtd: MTD device structure
+ * @from: offset to read from
+ * @ops: oob operation description structure
+ *
+ * NAND read data and/or out-of-band data.
+ */
+static int nand_read_oob(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
+ ops->retlen = 0;
+
+ if (ops->mode != MTD_OPS_PLACE_OOB &&
+ ops->mode != MTD_OPS_AUTO_OOB &&
+ ops->mode != MTD_OPS_RAW)
+ return -ENOTSUPP;
+
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
+ if (!ops->datbuf)
+ ret = nand_do_read_oob(chip, from, ops);
+ else
+ ret = nand_do_read_ops(chip, from, ops);
+
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_write_page_raw_notsupp - dummy raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Returns -ENOTSUPP unconditionally.
+ */
+int nand_write_page_raw_notsupp(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ return -ENOTSUPP;
+}
+
+/**
+ * nand_write_page_raw - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * Not for syndrome calculating ECC controllers, which use a special oob layout.
+ */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, buf, mtd->writesize);
+ if (ret)
+ return ret;
+
+ if (oob_required) {
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize,
+ false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+EXPORT_SYMBOL(nand_write_page_raw);
+
+/**
+ * nand_monolithic_write_page_raw - Monolithic page write in raw mode
+ * @chip: NAND chip info structure
+ * @buf: data buffer to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * This is a raw page write, ie. without any error detection/correction.
+ * Monolithic means we are requesting all the relevant data (main plus
+ * eventually OOB) to be sent over the bus and effectively programmed
+ * into the NAND chip arrays in a single operation. This is an
+ * alternative to nand_write_page_raw(), which first sends the main
+ * data, then eventually send the OOB data by latching more data
+ * cycles on the NAND bus, and finally sends the program command to
+ * synchronyze the NAND chip cache.
+ */
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const u8 *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int size = mtd->writesize;
+ u8 *write_buf = (u8 *)buf;
+
+ if (oob_required) {
+ size += mtd->oobsize;
+
+ if (buf != chip->data_buf) {
+ write_buf = nand_get_data_buf(chip);
+ memcpy(write_buf, buf, mtd->writesize);
+ }
+ }
+
+ return nand_prog_page_op(chip, page, 0, write_buf, size);
+}
+EXPORT_SYMBOL(nand_monolithic_write_page_raw);
+
+/**
+ * nand_write_page_raw_syndrome - [INTERN] raw page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * We need a special oob layout and handling even when ECC isn't checked.
+ */
+static int nand_write_page_raw_syndrome(struct nand_chip *chip,
+ const uint8_t *buf, int oob_required,
+ int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ uint8_t *oob = chip->oob_poi;
+ int steps, size, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (steps = chip->ecc.steps; steps > 0; steps--) {
+ ret = nand_write_data_op(chip, buf, eccsize, false);
+ if (ret)
+ return ret;
+
+ buf += eccsize;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ size = mtd->oobsize - (oob - chip->oob_poi);
+ if (size) {
+ ret = nand_write_data_op(chip, oob, size, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+/**
+ * nand_write_page_swecc - [REPLACEABLE] software ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_swecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ /* Software ECC calculation */
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize)
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ return chip->ecc.write_page_raw(chip, buf, 1, page);
+}
+
+/**
+ * nand_write_page_hwecc - [REPLACEABLE] hardware ECC based page write function
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size, ret;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ const uint8_t *p = buf;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ chip->ecc.calculate(chip, p, &ecc_calc[i]);
+ }
+
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_subpage_hwecc - [REPLACEABLE] hardware ECC based subpage write
+ * @chip: nand chip info structure
+ * @offset: column address of subpage within the page
+ * @data_len: data length
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ */
+static int nand_write_subpage_hwecc(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ uint8_t *oob_buf = chip->oob_poi;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ int ecc_size = chip->ecc.size;
+ int ecc_bytes = chip->ecc.bytes;
+ int ecc_steps = chip->ecc.steps;
+ uint32_t start_step = offset / ecc_size;
+ uint32_t end_step = (offset + data_len - 1) / ecc_size;
+ int oob_bytes = mtd->oobsize / ecc_steps;
+ int step, ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < ecc_steps; step++) {
+ /* configure controller for WRITE access */
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ /* write data (untouched subpages already masked by 0xFF) */
+ ret = nand_write_data_op(chip, buf, ecc_size, false);
+ if (ret)
+ return ret;
+
+ /* mask ECC of un-touched subpages by padding 0xFF */
+ if ((step < start_step) || (step > end_step))
+ memset(ecc_calc, 0xff, ecc_bytes);
+ else
+ chip->ecc.calculate(chip, buf, ecc_calc);
+
+ /* mask OOB of un-touched subpages by padding 0xFF */
+ /* if oob_required, preserve OOB metadata of written subpage */
+ if (!oob_required || (step < start_step) || (step > end_step))
+ memset(oob_buf, 0xff, oob_bytes);
+
+ buf += ecc_size;
+ ecc_calc += ecc_bytes;
+ oob_buf += oob_bytes;
+ }
+
+ /* copy calculated ECC for whole page to chip->buffer->oob */
+ /* this include masked-value(0xFF) for unwritten subpages */
+ ecc_calc = chip->ecc.calc_buf;
+ ret = mtd_ooblayout_set_eccbytes(mtd, ecc_calc, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
+
+ /* write OOB buffer to NAND device */
+ ret = nand_write_data_op(chip, chip->oob_poi, mtd->oobsize, false);
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
+}
+
+
+/**
+ * nand_write_page_syndrome - [REPLACEABLE] hardware ECC syndrome based page write
+ * @chip: nand chip info structure
+ * @buf: data buffer
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ *
+ * The hw generator calculates the error syndrome automatically. Therefore we
+ * need a special oob layout and handling.
+ */
+static int nand_write_page_syndrome(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int i, eccsize = chip->ecc.size;
+ int eccbytes = chip->ecc.bytes;
+ int eccsteps = chip->ecc.steps;
+ const uint8_t *p = buf;
+ uint8_t *oob = chip->oob_poi;
+ int ret;
+
+ ret = nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+ if (ret)
+ return ret;
+
+ for (i = 0; eccsteps; eccsteps--, i += eccbytes, p += eccsize) {
+ chip->ecc.hwctl(chip, NAND_ECC_WRITE);
+
+ ret = nand_write_data_op(chip, p, eccsize, false);
+ if (ret)
+ return ret;
+
+ if (chip->ecc.prepad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.prepad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.prepad;
+ }
+
+ chip->ecc.calculate(chip, p, oob);
+
+ ret = nand_write_data_op(chip, oob, eccbytes, false);
+ if (ret)
+ return ret;
+
+ oob += eccbytes;
+
+ if (chip->ecc.postpad) {
+ ret = nand_write_data_op(chip, oob, chip->ecc.postpad,
+ false);
+ if (ret)
+ return ret;
+
+ oob += chip->ecc.postpad;
+ }
+ }
+
+ /* Calculate remaining oob bytes */
+ i = mtd->oobsize - (oob - chip->oob_poi);
+ if (i) {
+ ret = nand_write_data_op(chip, oob, i, false);
+ if (ret)
+ return ret;
+ }
+
+ return nand_prog_page_end_op(chip);
+}
+
+/**
+ * nand_write_page - write one page
+ * @chip: NAND chip descriptor
+ * @offset: address offset within the page
+ * @data_len: length of actual data to be written
+ * @buf: the data to write
+ * @oob_required: must write chip->oob_poi to OOB
+ * @page: page number to write
+ * @raw: use _raw version of write_page
+ */
+static int nand_write_page(struct nand_chip *chip, uint32_t offset,
+ int data_len, const uint8_t *buf, int oob_required,
+ int page, int raw)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int status, subpage;
+
+ if (!(chip->options & NAND_NO_SUBPAGE_WRITE) &&
+ chip->ecc.write_subpage)
+ subpage = offset || (data_len < mtd->writesize);
+ else
+ subpage = 0;
+
+ if (unlikely(raw))
+ status = chip->ecc.write_page_raw(chip, buf, oob_required,
+ page);
+ else if (subpage)
+ status = chip->ecc.write_subpage(chip, offset, data_len, buf,
+ oob_required, page);
+ else
+ status = chip->ecc.write_page(chip, buf, oob_required, page);
+
+ if (status < 0)
+ return status;
+
+ return 0;
+}
+
+#define NOTALIGNED(x) ((x & (chip->subpagesize - 1)) != 0)
+
+/**
+ * nand_do_write_ops - [INTERN] NAND write with ECC
+ * @chip: NAND chip object
+ * @to: offset to write to
+ * @ops: oob operations description structure
+ *
+ * NAND write with ECC.
+ */
+static int nand_do_write_ops(struct nand_chip *chip, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int chipnr, realpage, page, column;
+ uint32_t writelen = ops->len;
+
+ uint32_t oobwritelen = ops->ooblen;
+ uint32_t oobmaxlen = mtd_oobavail(mtd, ops);
+
+ uint8_t *oob = ops->oobbuf;
+ uint8_t *buf = ops->datbuf;
+ int ret;
+ int oob_required = oob ? 1 : 0;
+
+ ops->retlen = 0;
+ if (!writelen)
+ return 0;
+
+ /* Reject writes, which are not page aligned */
+ if (NOTALIGNED(to) || NOTALIGNED(ops->len)) {
+ pr_notice("%s: attempt to write non page aligned data\n",
+ __func__);
+ return -EINVAL;
+ }
+
+ column = to & (mtd->writesize - 1);
+
+ chipnr = (int)(to >> chip->chip_shift);
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ ret = -EIO;
+ goto err_out;
+ }
+
+ realpage = (int)(to >> chip->page_shift);
+ page = realpage & chip->pagemask;
+
+ /* Invalidate the page cache, when we write to the cached page */
+ if (to <= ((loff_t)chip->pagecache.page << chip->page_shift) &&
+ ((loff_t)chip->pagecache.page << chip->page_shift) < (to + ops->len))
+ chip->pagecache.page = -1;
+
+ /* Don't allow multipage oob writes with offset */
+ if (oob && ops->ooboffs && (ops->ooboffs + ops->ooblen > oobmaxlen)) {
+ ret = -EINVAL;
+ goto err_out;
+ }
+
+ while (1) {
+ int bytes = mtd->writesize;
+ uint8_t *wbuf = buf;
+ int use_bounce_buf;
+ int part_pagewr = (column || writelen < mtd->writesize);
+
+ if (part_pagewr)
+ use_bounce_buf = 1;
+ else if (chip->options & NAND_USES_DMA)
+ use_bounce_buf = !IS_ALIGNED((unsigned long)buf,
+ chip->buf_align);
+ else
+ use_bounce_buf = 0;
+
+ /*
+ * Copy the data from the initial buffer when doing partial page
+ * writes or when a bounce buffer is required.
+ */
+ if (use_bounce_buf) {
+ pr_debug("%s: using write bounce buffer for buf@%p\n",
+ __func__, buf);
+ if (part_pagewr)
+ bytes = min_t(int, bytes - column, writelen);
+ wbuf = nand_get_data_buf(chip);
+ memset(wbuf, 0xff, mtd->writesize);
+ memcpy(&wbuf[column], buf, bytes);
+ }
+
+ if (unlikely(oob)) {
+ size_t len = min(oobwritelen, oobmaxlen);
+ oob = nand_fill_oob(chip, oob, len, ops);
+ oobwritelen -= len;
+ } else {
+ /* We still need to erase leftover OOB data */
+ memset(chip->oob_poi, 0xff, mtd->oobsize);
+ }
+
+ ret = nand_write_page(chip, column, bytes, wbuf,
+ oob_required, page,
+ (ops->mode == MTD_OPS_RAW));
+ if (ret)
+ break;
+
+ writelen -= bytes;
+ if (!writelen)
+ break;
+
+ column = 0;
+ buf += bytes;
+ realpage++;
+
+ page = realpage & chip->pagemask;
+ /* Check, if we cross a chip boundary */
+ if (!page) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ops->retlen = ops->len - writelen;
+ if (unlikely(oob))
+ ops->oobretlen = ops->ooblen;
+
+err_out:
+ nand_deselect_target(chip);
+ return ret;
+}
+
+/**
+ * nand_write_oob - [MTD Interface] NAND write data and/or out-of-band
+ * @mtd: MTD device structure
+ * @to: offset to write to
+ * @ops: oob operation description structure
+ */
+static int nand_write_oob(struct mtd_info *mtd, loff_t to,
+ struct mtd_oob_ops *ops)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int ret;
+
+ ops->retlen = 0;
+
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
+ switch (ops->mode) {
+ case MTD_OPS_PLACE_OOB:
+ case MTD_OPS_AUTO_OOB:
+ case MTD_OPS_RAW:
+ break;
+
+ default:
+ goto out;
+ }
+
+ if (!ops->datbuf)
+ ret = nand_do_write_oob(chip, to, ops);
+ else
+ ret = nand_do_write_ops(chip, to, ops);
+
+out:
+ nand_release_device(chip);
+ return ret;
+}
+
+/**
+ * nand_erase - [MTD Interface] erase block(s)
+ * @mtd: MTD device structure
+ * @instr: erase instruction
+ *
+ * Erase one ore more blocks.
+ */
+static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+{
+ return nand_erase_nand(mtd_to_nand(mtd), instr, 0);
+}
+
+/**
+ * nand_erase_nand - [INTERN] erase block(s)
+ * @chip: NAND chip object
+ * @instr: erase instruction
+ * @allowbbt: allow erasing the bbt area
+ *
+ * Erase one ore more blocks.
+ */
+int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
+ int allowbbt)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ int page, pages_per_block, ret, chipnr;
+ loff_t len;
+
+ pr_debug("%s: start = 0x%012llx, len = %llu\n",
+ __func__, (unsigned long long)instr->addr,
+ (unsigned long long)instr->len);
+
+ if (check_offs_len(chip, instr->addr, instr->len))
+ return -EINVAL;
+
+ /* Grab the lock and see if the device is available */
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
+ /* Shift to get first page */
+ page = (int)(instr->addr >> chip->page_shift);
+ chipnr = (int)(instr->addr >> chip->chip_shift);
+
+ /* Calculate pages in each block */
+ pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+
+ /* Select the NAND device */
+ nand_select_target(chip, chipnr);
+
+ /* Check, if it is write protected */
+ if (nand_check_wp(chip)) {
+ pr_debug("%s: device is write protected!\n",
+ __func__);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /* Loop through the pages */
+ len = instr->len;
+
+ while (len) {
+ /* Check if we have a bad block, we do not erase bad blocks! */
+ if (!mtd->allow_erasebad &&
+ nand_block_checkbad(chip, ((loff_t) page) <<
+ chip->page_shift, allowbbt)) {
+ pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
+ __func__, page);
+ ret = -EIO;
+ goto erase_exit;
+ }
+
+ /*
+ * Invalidate the page cache, if we erase the block which
+ * contains the current cached page.
+ */
+ if (page <= chip->pagecache.page && chip->pagecache.page <
+ (page + pages_per_block))
+ chip->pagecache.page = -1;
+
+ ret = nand_erase_op(chip, (page & chip->pagemask) >>
+ (chip->phys_erase_shift - chip->page_shift));
+ if (ret) {
+ pr_debug("%s: failed erase, page 0x%08x\n",
+ __func__, page);
+ instr->fail_addr =
+ ((loff_t)page << chip->page_shift);
+ goto erase_exit;
+ }
+
+ /* Increment page address and decrement length */
+ len -= (1ULL << chip->phys_erase_shift);
+ page += pages_per_block;
+
+ /* Check, if we cross a chip boundary */
+ if (len && !(page & chip->pagemask)) {
+ chipnr++;
+ nand_deselect_target(chip);
+ nand_select_target(chip, chipnr);
+ }
+ }
+
+ ret = 0;
+erase_exit:
+
+ /* Deselect and wake up anyone waiting on the device */
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ /* Return more or less happy */
+ return ret;
+}
+
+/**
+ * nand_sync - [MTD Interface] sync
+ * @mtd: MTD device structure
+ *
+ * Sync is actually a wait for chip ready function.
+ */
+static void nand_sync(struct mtd_info *mtd)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ pr_debug("%s: called\n", __func__);
+
+ /* Grab the lock and see if the device is available */
+ WARN_ON(nand_get_device(chip));
+ /* Release it and go back */
+ nand_release_device(chip);
+}
+
+/**
+ * nand_block_isbad - [MTD Interface] Check if block at offset is bad
+ * @mtd: MTD device structure
+ * @offs: offset relative to mtd start
+ */
+static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+ int chipnr = (int)(offs >> chip->chip_shift);
+ int ret;
+
+ /* Select the NAND device */
+ ret = nand_get_device(chip);
+ if (ret)
+ return ret;
+
+ nand_select_target(chip, chipnr);
+
+ ret = nand_block_checkbad(chip, offs, 0);
+
+ nand_deselect_target(chip);
+ nand_release_device(chip);
+
+ return ret;
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret) {
+ /* If it was bad already, return success and do nothing */
+ if (ret > 0)
+ return 0;
+ return ret;
+ }
+
+ return nand_block_markbad_lowlevel(mtd_to_nand(mtd), ofs);
+}
+
+/**
+ * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
+ * @mtd: MTD device structure
+ * @ofs: offset relative to mtd start
+ */
+static int nand_block_markgood(struct mtd_info *mtd, loff_t ofs)
+{
+ int ret;
+
+ ret = nand_block_isbad(mtd, ofs);
+ if (ret < 0)
+ return ret;
+
+ if (!ret)
+ /* If it was good already, return success and do nothing */
+ return 0;
+
+ return nand_block_markgood_lowlevel(mtd_to_nand(mtd), ofs);
+}
+
+/**
+ * nand_lock - [MTD Interface] Lock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to lock (must be a multiple of block/page size)
+ */
+static int nand_lock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.lock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.lock_area(chip, ofs, len);
+}
+
+/**
+ * nand_unlock - [MTD Interface] Unlock the NAND flash
+ * @mtd: MTD device structure
+ * @ofs: offset byte address
+ * @len: number of bytes to unlock (must be a multiple of block/page size)
+ */
+static int nand_unlock(struct mtd_info *mtd, loff_t ofs, size_t len)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (!chip->ops.unlock_area)
+ return -ENOTSUPP;
+
+ return chip->ops.unlock_area(chip, ofs, len);
+}
+
+/* Set default functions */
+static void nand_set_defaults(struct nand_chip *chip)
+{
+ /* If no controller is provided, use the dummy, legacy one. */
+ if (!chip->controller) {
+ chip->controller = &chip->legacy.dummy_controller;
+ nand_controller_init(chip->controller);
+ }
+
+ nand_legacy_set_defaults(chip);
+
+ if (!chip->buf_align)
+ chip->buf_align = 1;
+}
+
+/* Sanitize ONFI strings so we can safely print them */
+void sanitize_string(uint8_t *s, size_t len)
+{
+ ssize_t i;
+
+ /* Null terminate */
+ s[len - 1] = 0;
+
+ /* Remove non printable chars */
+ for (i = 0; i < len - 1; i++) {
+ if (s[i] < ' ' || s[i] > 127)
+ s[i] = '?';
+ }
+
+ /* Remove trailing spaces */
+ strim(s);
+}
+
+/*
+ * nand_id_has_period - Check if an ID string has a given wraparound period
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+ * @period: the period of repitition
+ *
+ * Check if an ID string is repeated within a given sequence of bytes at
+ * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
+ * period of 3). This is a helper function for nand_id_len(). Returns non-zero
+ * if the repetition has a period of @period; otherwise, returns zero.
+ */
+static int nand_id_has_period(u8 *id_data, int arrlen, int period)
+{
+ int i, j;
+ for (i = 0; i < period; i++)
+ for (j = i + period; j < arrlen; j += period)
+ if (id_data[i] != id_data[j])
+ return 0;
+ return 1;
+}
+
+/*
+ * nand_id_len - Get the length of an ID string returned by CMD_READID
+ * @id_data: the ID string
+ * @arrlen: the length of the @id_data array
+
+ * Returns the length of the ID string, according to known wraparound/trailing
+ * zero patterns. If no pattern exists, returns the length of the array.
+ */
+static int nand_id_len(u8 *id_data, int arrlen)
+{
+ int last_nonzero, period;
+
+ /* Find last non-zero byte */
+ for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
+ if (id_data[last_nonzero])
+ break;
+
+ /* All zeros */
+ if (last_nonzero < 0)
+ return 0;
+
+ /* Calculate wraparound period */
+ for (period = 1; period < arrlen; period++)
+ if (nand_id_has_period(id_data, arrlen, period))
+ break;
+
+ /* There's a repeated pattern */
+ if (period < arrlen)
+ return period;
+
+ /* There are trailing zeros */
+ if (last_nonzero < arrlen - 1)
+ return last_nonzero + 1;
+
+ /* No pattern detected */
+ return arrlen;
+}
+
+/* Extract the bits of per cell from the 3rd byte of the extended ID */
+static int nand_get_bits_per_cell(u8 cellinfo)
+{
+ int bits;
+
+ bits = cellinfo & NAND_CI_CELLTYPE_MSK;
+ bits >>= NAND_CI_CELLTYPE_SHIFT;
+ return bits + 1;
+}
+
+/*
+ * Many new NAND share similar device ID codes, which represent the size of the
+ * chip. The rest of the parameters must be decoded according to generic or
+ * manufacturer-specific "extended ID" decoding patterns.
+ */
+void nand_decode_ext_id(struct nand_chip *chip)
+{
+ struct nand_memory_organization *memorg;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int extid;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ /* The 4th id byte is the important one */
+ extid = id_data[3];
+
+ /* Calc pagesize */
+ memorg->pagesize = 1024 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+ extid >>= 2;
+ /* Calc oobsize */
+ memorg->oobsize = (8 << (extid & 0x01)) * (mtd->writesize >> 9);
+ mtd->oobsize = memorg->oobsize;
+ extid >>= 2;
+ /* Calc blocksize. Blocksize is multiples of 64KiB */
+ memorg->pages_per_eraseblock = ((64 * 1024) << (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (64 * 1024) << (extid & 0x03);
+ extid >>= 2;
+ /* Get buswidth information */
+ if (extid & 0x1)
+ chip->options |= NAND_BUSWIDTH_16;
+}
+EXPORT_SYMBOL_GPL(nand_decode_ext_id);
+
+/*
+ * Old devices have chip data hardcoded in the device ID table. nand_decode_id
+ * decodes a matching ID table entry and assigns the MTD size parameters for
+ * the chip.
+ */
+static void nand_decode_id(struct nand_chip *chip, struct nand_flash_dev *type)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ memorg->pages_per_eraseblock = type->erasesize / type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->oobsize = memorg->pagesize / 32;
+ mtd->oobsize = memorg->oobsize;
+
+ /* All legacy ID NAND are small-page, SLC */
+ memorg->bits_per_cell = 1;
+}
+
+/*
+ * Set the bad block marker/indicator (BBM/BBI) patterns according to some
+ * heuristic patterns using various detected parameters (e.g., manufacturer,
+ * page size, cell-type information).
+ */
+static void nand_decode_bbm_options(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Set the bad block position */
+ if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
+ chip->badblockpos = NAND_BBM_POS_LARGE;
+ else
+ chip->badblockpos = NAND_BBM_POS_SMALL;
+}
+
+static inline bool is_full_id_nand(struct nand_flash_dev *type)
+{
+ return type->id_len;
+}
+
+static bool find_full_id_nand(struct nand_chip *chip,
+ struct nand_flash_dev *type)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 *id_data = chip->id.data;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ if (!strncmp(type->id, id_data, type->id_len)) {
+ memorg->pagesize = type->pagesize;
+ mtd->writesize = memorg->pagesize;
+ memorg->pages_per_eraseblock = type->erasesize /
+ type->pagesize;
+ mtd->erasesize = type->erasesize;
+ memorg->oobsize = type->oobsize;
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
+ chip->options |= type->options;
+ chip->base.eccreq.strength = NAND_ECC_STRENGTH(type);
+ chip->base.eccreq.step_size = NAND_ECC_STEP(type);
+
+ chip->parameters.model = strdup(type->name);
+ if (!chip->parameters.model)
+ return false;
+
+ return true;
}
+ return false;
+}
- if (!ops->datbuf)
- ret = nand_do_write_oob(chip, to, ops);
- else
- ret = nand_do_write_ops(chip, to, ops);
+/*
+ * Manufacturer detection. Only used when the NAND is not ONFI or JEDEC
+ * compliant and does not have a full-id or legacy-id entry in the nand_ids
+ * table.
+ */
+static void nand_manufacturer_detect(struct nand_chip *chip)
+{
+ /*
+ * Try manufacturer detection if available and use
+ * nand_decode_ext_id() otherwise.
+ */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->detect) {
+ struct nand_memory_organization *memorg;
-out:
- nand_release_device(chip);
- return ret;
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* The 3rd id byte holds MLC / multichip data */
+ memorg->bits_per_cell = nand_get_bits_per_cell(chip->id.data[2]);
+ chip->manufacturer.desc->ops->detect(chip);
+ } else {
+ nand_decode_ext_id(chip);
+ }
}
-/**
- * single_erase_cmd - [GENERIC] NAND standard block erase command function
- * @mtd: MTD device structure
- * @page: the page address of the block which will be erased
- *
- * Standard erase command for NAND chips.
+/*
+ * Manufacturer initialization. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific initialization code in
+ * their ->init() hook.
*/
-static void single_erase_cmd(struct nand_chip *chip, int page)
+static int nand_manufacturer_init(struct nand_chip *chip)
{
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return;
+ if (!chip->manufacturer.desc || !chip->manufacturer.desc->ops ||
+ !chip->manufacturer.desc->ops->init)
+ return 0;
- /* Send commands to erase a block */
- chip->legacy.cmdfunc(chip, NAND_CMD_ERASE1, -1, page);
- chip->legacy.cmdfunc(chip, NAND_CMD_ERASE2, -1, -1);
+ return chip->manufacturer.desc->ops->init(chip);
}
-/**
- * nand_erase - [MTD Interface] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- *
- * Erase one ore more blocks.
+/*
+ * Manufacturer cleanup. This function is called for all NANDs including
+ * ONFI and JEDEC compliant ones.
+ * Manufacturer drivers should put all their specific cleanup code in their
+ * ->cleanup() hook.
*/
-static int nand_erase(struct mtd_info *mtd, struct erase_info *instr)
+static void nand_manufacturer_cleanup(struct nand_chip *chip)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
-
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ /* Release manufacturer private data */
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->cleanup)
+ chip->manufacturer.desc->ops->cleanup(chip);
+}
- return nand_erase_nand(chip, instr, 0);
+static const char *
+nand_manufacturer_name(const struct nand_manufacturer_desc *manufacturer_desc)
+{
+ return manufacturer_desc ? manufacturer_desc->name : "Unknown";
}
-/**
- * nand_erase_nand - [INTERN] erase block(s)
- * @mtd: MTD device structure
- * @instr: erase instruction
- * @allowbbt: allow erasing the bbt area
- *
- * Erase one ore more blocks.
+/*
+ * Get the flash and manufacturer id and lookup if the type is supported.
*/
-int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
- int allowbbt)
+static int nand_detect(struct nand_chip *chip, struct nand_flash_dev *type)
{
- int page, status, pages_per_block, ret, chipnr;
- loff_t len;
+ const struct nand_manufacturer_desc *manufacturer_desc;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ int busw, ret;
+ u8 *id_data = chip->id.data;
+ u8 maf_id, dev_id;
+ u64 targetsize;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ /*
+ * Let's start by initializing memorg fields that might be left
+ * unassigned by the ID-based detection logic.
+ */
+ memorg = nanddev_get_memorg(&chip->base);
+ memorg->planes_per_lun = 1;
+ memorg->luns_per_target = 1;
- pr_debug("%s: start = 0x%012llx, len = %llu\n",
- __func__, (unsigned long long)instr->addr,
- (unsigned long long)instr->len);
+ /*
+ * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
+ * after power-up.
+ */
+ ret = nand_reset(chip, 0);
+ if (ret)
+ return ret;
- if (check_offs_len(chip, instr->addr, instr->len))
- return -EINVAL;
+ /* Select the device */
+ nand_select_target(chip, 0);
- /* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_ERASING);
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id_data, 2);
+ if (ret)
+ return ret;
- /* Shift to get first page */
- page = (int)(instr->addr >> chip->page_shift);
- chipnr = (int)(instr->addr >> chip->chip_shift);
+ /* Read manufacturer and device IDs */
+ maf_id = id_data[0];
+ dev_id = id_data[1];
- /* Calculate pages in each block */
- pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift);
+ /*
+ * Try again to make sure, as some systems the bus-hold or other
+ * interface concerns can cause random data which looks like a
+ * possibly credible NAND flash to appear. If the two results do
+ * not match, ignore the device completely.
+ */
- /* Select the NAND device */
- chip->legacy.select_chip(chip, chipnr);
+ /* Read entire ID string */
+ ret = nand_readid_op(chip, 0, id_data, sizeof(chip->id.data));
+ if (ret)
+ return ret;
- /* Check, if it is write protected */
- if (nand_check_wp(chip)) {
- pr_debug("%s: device is write protected!\n",
- __func__);
- ret = -EIO;
- goto erase_exit;
+ if (id_data[0] != maf_id || id_data[1] != dev_id) {
+ pr_info("second ID read did not match %02x,%02x against %02x,%02x\n",
+ maf_id, dev_id, id_data[0], id_data[1]);
+ return -ENODEV;
}
- /* Loop through the pages */
- len = instr->len;
+ chip->id.len = nand_id_len(id_data, ARRAY_SIZE(chip->id.data));
- while (len) {
- /* Check if we have a bad block, we do not erase bad blocks! */
- if (!chip->mtd.allow_erasebad &&
- nand_block_checkbad(chip, ((loff_t) page) <<
- chip->page_shift, 0, allowbbt)) {
- pr_warn("%s: attempt to erase a bad block at page 0x%08x\n",
- __func__, page);
- ret = -EIO;
- goto erase_exit;
- }
+ /* Try to identify manufacturer */
+ manufacturer_desc = nand_get_manufacturer_desc(maf_id);
+ chip->manufacturer.desc = manufacturer_desc;
- /*
- * Invalidate the page cache, if we erase the block which
- * contains the current cached page.
- */
- if (page <= chip->pagebuf && chip->pagebuf <
- (page + pages_per_block))
- chip->pagebuf = -1;
+ if (!type)
+ type = nand_flash_ids;
- single_erase_cmd(chip, page & chip->pagemask);
+ /*
+ * Save the NAND_BUSWIDTH_16 flag before letting auto-detection logic
+ * override it.
+ * This is required to make sure initial NAND bus width set by the
+ * NAND controller driver is coherent with the real NAND bus width
+ * (extracted by auto-detection code).
+ */
+ busw = chip->options & NAND_BUSWIDTH_16;
- status = chip->legacy.waitfunc(chip);
+ /*
+ * The flag is only set (never cleared), reset it to its default value
+ * before starting auto-detection.
+ */
+ chip->options &= ~NAND_BUSWIDTH_16;
- /* See if block erase succeeded */
- if (status & NAND_STATUS_FAIL) {
- pr_debug("%s: failed erase, page 0x%08x\n",
- __func__, page);
- ret = -EIO;
- instr->fail_addr =
- ((loff_t)page << chip->page_shift);
- goto erase_exit;
+ for (; type->name != NULL; type++) {
+ if (is_full_id_nand(type)) {
+ if (find_full_id_nand(chip, type))
+ goto ident_done;
+ } else if (dev_id == type->dev_id) {
+ break;
}
+ }
- /* Increment page address and decrement length */
- len -= (1 << chip->phys_erase_shift);
- page += pages_per_block;
+ if (!type->name || !type->pagesize) {
+ /* Check if the chip is ONFI compliant */
+ ret = nand_onfi_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
- /* Check, if we cross a chip boundary */
- if (len && !(page & chip->pagemask)) {
- chipnr++;
- chip->legacy.select_chip(chip, -1);
- chip->legacy.select_chip(chip, chipnr);
- }
+ /* Check if the chip is JEDEC compliant */
+ ret = nand_jedec_detect(chip);
+ if (ret < 0)
+ return ret;
+ else if (ret)
+ goto ident_done;
}
- ret = 0;
+ if (!type->name)
+ return -ENODEV;
-erase_exit:
- /* Deselect and wake up anyone waiting on the device */
- chip->legacy.select_chip(chip, -1);
- nand_release_device(chip);
+ chip->parameters.model = strdup(type->name);
+ if (!chip->parameters.model)
+ return -ENOMEM;
- /* Return more or less happy */
- return ret;
-}
+ if (!type->pagesize)
+ nand_manufacturer_detect(chip);
+ else
+ nand_decode_id(chip, type);
-/**
- * nand_sync - [MTD Interface] sync
- * @mtd: MTD device structure
- *
- * Sync is actually a wait for chip ready function.
- */
-static void nand_sync(struct mtd_info *mtd)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ /* Get chip options */
+ chip->options |= type->options;
- pr_debug("%s: called\n", __func__);
+ memorg->eraseblocks_per_lun =
+ DIV_ROUND_DOWN_ULL((u64)type->chipsize << 20,
+ memorg->pagesize *
+ memorg->pages_per_eraseblock);
- /* Grab the lock and see if the device is available */
- nand_get_device(chip, FL_SYNCING);
- /* Release it and go back */
- nand_release_device(chip);
-}
+ident_done:
+ if (!mtd->name)
+ mtd->name = strdup(chip->parameters.model);
-/**
- * nand_block_isbad - [MTD Interface] Check if block at offset is bad
- * @mtd: MTD device structure
- * @offs: offset relative to mtd start
- */
-static int nand_block_isbad(struct mtd_info *mtd, loff_t offs)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
+ if (chip->options & NAND_BUSWIDTH_AUTO) {
+ WARN_ON(busw & NAND_BUSWIDTH_16);
+ nand_set_defaults(chip);
+ } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
+ /*
+ * Check, if buswidth is correct. Hardware drivers should set
+ * chip correct!
+ */
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ mtd->name);
+ pr_warn("bus width %d instead of %d bits\n", busw ? 16 : 8,
+ (chip->options & NAND_BUSWIDTH_16) ? 16 : 8);
+ ret = -EINVAL;
- return nand_block_checkbad(chip, offs, 1, 0);
-}
+ goto free_detect_allocation;
+ }
-/**
- * nand_block_markbad - [MTD Interface] Mark block at the given offset as bad
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-static int nand_block_markbad(struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
+ nand_decode_bbm_options(chip);
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ /* Calculate the address shift from the page size */
+ chip->page_shift = ffs(mtd->writesize) - 1;
+ /* Convert chipsize to number of pages per chip -1 */
+ targetsize = nanddev_target_size(&chip->base);
+ chip->pagemask = (targetsize >> chip->page_shift) - 1;
- ret = nand_block_isbad(mtd, ofs);
- if (ret) {
- /* If it was bad already, return success and do nothing */
- if (ret > 0)
- return 0;
- return ret;
+ chip->bbt_erase_shift = chip->phys_erase_shift =
+ ffs(mtd->erasesize) - 1;
+ if (targetsize & 0xffffffff)
+ chip->chip_shift = ffs((unsigned)targetsize) - 1;
+ else {
+ chip->chip_shift = ffs((unsigned)(targetsize >> 32));
+ chip->chip_shift += 32 - 1;
}
- return nand_block_markbad_lowlevel(chip, ofs);
-}
+ if (chip->chip_shift - chip->page_shift > 16)
+ chip->options |= NAND_ROW_ADDR_3;
-/**
- * nand_block_markgood - [MTD Interface] Mark block at the given offset as good
- * @mtd: MTD device structure
- * @ofs: offset relative to mtd start
- */
-static int nand_block_markgood(struct mtd_info *mtd, loff_t ofs)
-{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ret;
+ chip->badblockbits = 8;
- if (!IS_ENABLED(CONFIG_MTD_WRITE))
- return -ENOTSUPP;
+ nand_legacy_adjust_cmdfunc(chip);
- ret = nand_block_isbad(mtd, ofs);
- if (ret < 0)
- return ret;
+ pr_info("device found, Manufacturer ID: 0x%02x, Chip ID: 0x%02x\n",
+ maf_id, dev_id);
+ pr_info("%s %s\n", nand_manufacturer_name(manufacturer_desc),
+ chip->parameters.model);
+ pr_info("%d MiB, %s, erase size: %d KiB, page size: %d, OOB size: %d\n",
+ (int)(targetsize >> 20), nand_is_slc(chip) ? "SLC" : "MLC",
+ mtd->erasesize >> 10, mtd->writesize, mtd->oobsize);
+ return 0;
- /* If it was good already, return success and do nothing */
- if (!ret)
- return 0;
+free_detect_allocation:
+ kfree(chip->parameters.model);
- return nand_block_markgood_lowlevel(chip, ofs);
+ return ret;
}
-/**
- * nand_onfi_set_features- [REPLACEABLE] set features for ONFI nand
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_onfi_set_features(struct nand_chip *chip,
- int addr, uint8_t *subfeature_param)
+static const char * const nand_ecc_algos[] = {
+ [NAND_ECC_HAMMING] = "hamming",
+ [NAND_ECC_BCH] = "bch",
+ [NAND_ECC_RS] = "rs",
+};
+
+static enum nand_ecc_algo of_get_nand_ecc_algo(struct device_node *np)
{
- int status;
+ enum nand_ecc_algo ecc_algo;
+ const char *pm;
+ int err;
+
+ err = of_property_read_string(np, "nand-ecc-algo", &pm);
+ if (!err) {
+ for (ecc_algo = NAND_ECC_HAMMING;
+ ecc_algo < ARRAY_SIZE(nand_ecc_algos);
+ ecc_algo++) {
+ if (!strcasecmp(pm, nand_ecc_algos[ecc_algo]))
+ return ecc_algo;
+ }
+ }
- if (!chip->onfi_version ||
- !(le16_to_cpu(chip->onfi_params.opt_cmd)
- & ONFI_OPT_CMD_SET_GET_FEATURES))
- return -EINVAL;
+ /*
+ * For backward compatibility we also read "nand-ecc-mode" checking
+ * for some obsoleted values that were specifying ECC algorithm.
+ */
+ err = of_property_read_string(np, "nand-ecc-mode", &pm);
+ if (!err) {
+ if (!strcasecmp(pm, "soft"))
+ return NAND_ECC_HAMMING;
+ else if (!strcasecmp(pm, "soft_bch"))
+ return NAND_ECC_BCH;
+ }
- chip->legacy.cmdfunc(chip, NAND_CMD_SET_FEATURES, addr, -1);
- chip->legacy.write_buf(chip, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
- status = chip->legacy.waitfunc(chip);
- if (status & NAND_STATUS_FAIL)
- return -EIO;
- return 0;
+ return NAND_ECC_UNKNOWN;
}
-/**
- * nand_onfi_get_features- [REPLACEABLE] get features for ONFI nand
- * @mtd: MTD device structure
- * @chip: nand chip info structure
- * @addr: feature address.
- * @subfeature_param: the subfeature parameters, a four bytes array.
- */
-static int nand_onfi_get_features(struct nand_chip *chip,
- int addr, uint8_t *subfeature_param)
+static int nand_dt_init(struct nand_chip *chip)
{
- if (!chip->onfi_version ||
- !(le16_to_cpu(chip->onfi_params.opt_cmd)
- & ONFI_OPT_CMD_SET_GET_FEATURES))
- return -EINVAL;
+ struct device_node *dn = nand_get_flash_node(chip);
+ enum nand_ecc_algo ecc_algo;
+ int ecc_mode, ecc_strength, ecc_step;
- /* clear the sub feature parameters */
- memset(subfeature_param, 0, ONFI_SUBFEATURE_PARAM_LEN);
+ if (!dn)
+ return 0;
- chip->legacy.cmdfunc(chip, NAND_CMD_GET_FEATURES, addr, -1);
- chip->legacy.read_buf(chip, subfeature_param, ONFI_SUBFEATURE_PARAM_LEN);
- return 0;
-}
+ if (of_get_nand_bus_width(dn) == 16)
+ chip->options |= NAND_BUSWIDTH_16;
-/* Set default functions */
-static void nand_set_defaults(struct nand_chip *chip, int busw)
-{
- /* check for proper chip_delay setup, set 20us if not */
- if (!chip->legacy.chip_delay)
- chip->legacy.chip_delay = 20;
-
- /* check, if a user supplied command function given */
- if (chip->legacy.cmdfunc == NULL)
- chip->legacy.cmdfunc = nand_command;
-
- /* check, if a user supplied wait function given */
- if (chip->legacy.waitfunc == NULL)
- chip->legacy.waitfunc = nand_wait;
-
- if (!chip->legacy.select_chip)
- chip->legacy.select_chip = nand_select_chip;
-
- /* set for ONFI nand */
- if (!chip->legacy.set_features)
- chip->legacy.set_features = nand_onfi_set_features;
- if (!chip->legacy.get_features)
- chip->legacy.get_features = nand_onfi_get_features;
-
- if (!chip->legacy.read_byte)
- chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
- if (!chip->legacy.read_word)
- chip->legacy.read_word = nand_read_word;
- if (!chip->legacy.block_bad)
- chip->legacy.block_bad = nand_block_bad;
-#ifdef CONFIG_MTD_WRITE
- if (!chip->legacy.block_markbad)
- chip->legacy.block_markbad = nand_default_block_markbad;
- if (!chip->legacy.write_buf)
- chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
-#endif
- if (!chip->legacy.read_buf)
- chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
-#ifdef CONFIG_NAND_BBT
- if (!chip->scan_bbt)
- chip->scan_bbt = nand_default_bbt;
-#endif
- if (!chip->controller) {
- chip->controller = &chip->hwcontrol;
- }
+ if (of_property_read_bool(dn, "nand-is-boot-medium"))
+ chip->options |= NAND_IS_BOOT_MEDIUM;
-}
+ if (of_get_nand_on_flash_bbt(dn))
+ chip->bbt_options |= NAND_BBT_USE_FLASH;
-/* Sanitize ONFI strings so we can safely print them */
-static void sanitize_string(uint8_t *s, size_t len)
-{
- ssize_t i;
+ ecc_mode = of_get_nand_ecc_mode(dn);
+ ecc_algo = of_get_nand_ecc_algo(dn);
+ ecc_strength = of_get_nand_ecc_strength(dn);
+ ecc_step = of_get_nand_ecc_step_size(dn);
- /* Null terminate */
- s[len - 1] = 0;
+ if (ecc_mode >= 0)
+ chip->ecc.mode = ecc_mode;
- /* Remove non printable chars */
- for (i = 0; i < len - 1; i++) {
- if (s[i] < ' ' || s[i] > 127)
- s[i] = '?';
- }
+ if (ecc_algo != NAND_ECC_UNKNOWN)
+ chip->ecc.algo = ecc_algo;
- /* Remove trailing spaces */
- strim(s);
-}
+ if (ecc_strength >= 0)
+ chip->ecc.strength = ecc_strength;
-static u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
-{
- int i;
- while (len--) {
- crc ^= *p++ << 8;
- for (i = 0; i < 8; i++)
- crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
- }
+ if (ecc_step > 0)
+ chip->ecc.size = ecc_step;
+
+ if (of_property_read_bool(dn, "nand-ecc-maximize"))
+ chip->ecc.options |= NAND_ECC_MAXIMIZE;
- return crc;
+ return 0;
}
-/*
- * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+/**
+ * nand_scan_ident - Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for
+ * @table: alternative NAND ID table
+ *
+ * This is the first phase of the normal nand_scan() function. It reads the
+ * flash ID and sets up MTD fields accordingly.
+ *
+ * This helper used to be called directly from controller drivers that needed
+ * to tweak some ECC-related parameters before nand_scan_tail(). This separation
+ * prevented dynamic allocations during this phase which was unconvenient and
+ * as been banned for the benefit of the ->init_ecc()/cleanup_ecc() hooks.
*/
-static int nand_flash_detect_onfi(struct nand_chip *chip, int *busw)
+int nand_scan_ident(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *table)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- struct nand_onfi_params *p = &chip->onfi_params;
- int i, j;
- int val;
+ struct nand_memory_organization *memorg;
+ int nand_maf_id, nand_dev_id;
+ unsigned int i;
+ int ret;
- /* Try ONFI for unknown chip or LP */
- chip->legacy.cmdfunc(chip, NAND_CMD_READID, 0x20, -1);
- if (chip->legacy.read_byte(chip) != 'O' || chip->legacy.read_byte(chip) != 'N' ||
- chip->legacy.read_byte(chip) != 'F' || chip->legacy.read_byte(chip) != 'I')
- return 0;
+ memorg = nanddev_get_memorg(&chip->base);
- chip->legacy.cmdfunc(chip, NAND_CMD_PARAM, 0, -1);
- for (i = 0; i < 3; i++) {
- for (j = 0; j < sizeof(*p); j++)
- ((uint8_t *)p)[j] = chip->legacy.read_byte(chip);
- if (onfi_crc16(ONFI_CRC_BASE, (uint8_t *)p, 254) ==
- le16_to_cpu(p->crc)) {
- break;
- }
- }
+ /* Assume all dies are deselected when we enter nand_scan_ident(). */
+ chip->cur_cs = -1;
- if (i == 3) {
- pr_err("Could not find valid ONFI parameter page; aborting\n");
- return 0;
- }
+ mutex_init(&chip->lock);
- /* Check version */
- val = le16_to_cpu(p->revision);
- if (val & (1 << 5))
- chip->onfi_version = 23;
- else if (val & (1 << 4))
- chip->onfi_version = 22;
- else if (val & (1 << 3))
- chip->onfi_version = 21;
- else if (val & (1 << 2))
- chip->onfi_version = 20;
- else if (val & (1 << 1))
- chip->onfi_version = 10;
-
- if (!chip->onfi_version) {
- pr_info("%s: unsupported ONFI version: %d\n", __func__, val);
- return 0;
+ /* Enforce the right timings for reset/detection */
+ chip->current_interface_config = nand_get_reset_interface_config();
+
+ if (IS_ENABLED(CONFIG_OFTREE)) {
+ ret = nand_dt_init(chip);
+ if (ret)
+ return ret;
}
- sanitize_string(p->manufacturer, sizeof(p->manufacturer));
- sanitize_string(p->model, sizeof(p->model));
- if (!mtd->name)
- mtd->name = p->model;
- mtd->writesize = le32_to_cpu(p->byte_per_page);
- mtd->erasesize = le32_to_cpu(p->pages_per_block) * mtd->writesize;
- mtd->oobsize = le16_to_cpu(p->spare_bytes_per_page);
- chip->chipsize = le32_to_cpu(p->blocks_per_lun);
- chip->chipsize *= (uint64_t)mtd->erasesize * p->lun_count;
- chip->bits_per_cell = p->bits_per_cell;
-
- *busw = 0;
- if (le16_to_cpu(p->features) & 1)
- *busw = NAND_BUSWIDTH_16;
-
- pr_info("ONFI flash detected\n");
- return 1;
-}
+ if (!mtd->name && mtd->dev.parent)
+ mtd->name = strdup(dev_name(mtd->dev.parent));
-/*
- * nand_id_has_period - Check if an ID string has a given wraparound period
- * @id_data: the ID string
- * @arrlen: the length of the @id_data array
- * @period: the period of repitition
- *
- * Check if an ID string is repeated within a given sequence of bytes at
- * specific repetition interval period (e.g., {0x20,0x01,0x7F,0x20} has a
- * period of 3). This is a helper function for nand_id_len(). Returns non-zero
- * if the repetition has a period of @period; otherwise, returns zero.
- */
-static int nand_id_has_period(u8 *id_data, int arrlen, int period)
-{
- int i, j;
- for (i = 0; i < period; i++)
- for (j = i + period; j < arrlen; j += period)
- if (id_data[i] != id_data[j])
- return 0;
- return 1;
-}
+ /* Set the default functions */
+ nand_set_defaults(chip);
-/*
- * nand_id_len - Get the length of an ID string returned by CMD_READID
- * @id_data: the ID string
- * @arrlen: the length of the @id_data array
+ ret = nand_legacy_check_hooks(chip);
+ if (ret)
+ return ret;
- * Returns the length of the ID string, according to known wraparound/trailing
- * zero patterns. If no pattern exists, returns the length of the array.
- */
-static int nand_id_len(u8 *id_data, int arrlen)
-{
- int last_nonzero, period;
+ memorg->ntargets = maxchips;
- /* Find last non-zero byte */
- for (last_nonzero = arrlen - 1; last_nonzero >= 0; last_nonzero--)
- if (id_data[last_nonzero])
- break;
+ /* Read the flash type */
+ ret = nand_detect(chip, table);
+ if (ret) {
+ if (!(chip->options & NAND_SCAN_SILENT_NODEV))
+ pr_warn("No NAND device found\n");
+ nand_deselect_target(chip);
+ return ret;
+ }
- /* All zeros */
- if (last_nonzero < 0)
- return 0;
+ nand_maf_id = chip->id.data[0];
+ nand_dev_id = chip->id.data[1];
- /* Calculate wraparound period */
- for (period = 1; period < arrlen; period++)
- if (nand_id_has_period(id_data, arrlen, period))
+ nand_deselect_target(chip);
+
+ /* Check for a chip array */
+ for (i = 1; i < maxchips; i++) {
+ u8 id[2];
+
+ /* See comment in nand_get_flash_type for reset */
+ ret = nand_reset(chip, i);
+ if (ret)
break;
- /* There's a repeated pattern */
- if (period < arrlen)
- return period;
+ nand_select_target(chip, i);
+ /* Send the command for reading device ID */
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ break;
+ /* Read manufacturer and device IDs */
+ if (nand_maf_id != id[0] || nand_dev_id != id[1]) {
+ nand_deselect_target(chip);
+ break;
+ }
+ nand_deselect_target(chip);
+ }
+ if (i > 1)
+ pr_info("%d chips detected\n", i);
- /* There are trailing zeros */
- if (last_nonzero < arrlen - 1)
- return last_nonzero + 1;
+ /* Store the number of chips and calc total size for mtd */
+ memorg->ntargets = i;
+ mtd->size = i * nanddev_target_size(&chip->base);
- /* No pattern detected */
- return arrlen;
+ return 0;
}
-/* Extract the bits of per cell from the 3rd byte of the extended ID */
-static int nand_get_bits_per_cell(u8 cellinfo)
+static void nand_scan_ident_cleanup(struct nand_chip *chip)
{
- int bits;
-
- bits = cellinfo & NAND_CI_CELLTYPE_MSK;
- bits >>= NAND_CI_CELLTYPE_SHIFT;
- return bits + 1;
+ kfree(chip->parameters.model);
+ kfree(chip->parameters.onfi);
}
-/*
- * Many new NAND share similar device ID codes, which represent the size of the
- * chip. The rest of the parameters must be decoded according to generic or
- * manufacturer-specific "extended ID" decoding patterns.
- */
-static void nand_decode_ext_id(struct mtd_info *mtd, struct nand_chip *chip,
- u8 id_data[8], int *busw)
+static int nand_set_ecc_soft_ops(struct nand_chip *chip)
{
- int extid, id_len;
- /* The 3rd id byte holds MLC / multichip data */
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
- /* The 4th id byte is the important one */
- extid = id_data[3];
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
- id_len = nand_id_len(id_data, 8);
+ if (WARN_ON(ecc->mode != NAND_ECC_SOFT))
+ return -EINVAL;
- /*
- * Field definitions are in the following datasheets:
- * Old style (4,5 byte ID): Samsung K9GAG08U0M (p.32)
- * New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44)
- * Hynix MLC (6 byte ID): Hynix H27UBG8T2B (p.22)
- *
- * Check for ID length, non-zero 6th byte, cell type, and Hynix/Samsung
- * ID to decide what to do.
- */
- if (id_len == 6 && id_data[0] == NAND_MFR_SAMSUNG &&
- !nand_is_slc(chip) && id_data[5] != 0x00) {
- /* Calc pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
- case 1:
- mtd->oobsize = 128;
- break;
- case 2:
- mtd->oobsize = 218;
- break;
- case 3:
- mtd->oobsize = 400;
- break;
- case 4:
- mtd->oobsize = 436;
- break;
- case 5:
- mtd->oobsize = 512;
- break;
- case 6:
- default: /* Other cases are "reserved" (unknown) */
- mtd->oobsize = 640;
- break;
+ switch (ecc->algo) {
+ case NAND_ECC_HAMMING:
+ ecc->calculate = nand_calculate_ecc;
+ ecc->correct = nand_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->size)
+ ecc->size = 256;
+ ecc->bytes = 3;
+ ecc->strength = 1;
+
+ if (IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_HAMMING_SMC))
+ ecc->options |= NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return 0;
+ case NAND_ECC_BCH:
+ if (!mtd_nand_has_bch()) {
+ WARN(1, "CONFIG_MTD_NAND_ECC_SW_BCH not enabled\n");
+ return -EINVAL;
}
- extid >>= 2;
- /* Calc blocksize */
- mtd->erasesize = (128 * 1024) <<
- (((extid >> 1) & 0x04) | (extid & 0x03));
- *busw = 0;
- } else if (id_len == 6 && id_data[0] == NAND_MFR_HYNIX &&
- !nand_is_slc(chip)) {
- unsigned int tmp;
-
- /* Calc pagesize */
- mtd->writesize = 2048 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- switch (((extid >> 2) & 0x04) | (extid & 0x03)) {
- case 0:
- mtd->oobsize = 128;
- break;
- case 1:
- mtd->oobsize = 224;
- break;
- case 2:
- mtd->oobsize = 448;
- break;
- case 3:
- mtd->oobsize = 64;
- break;
- case 4:
- mtd->oobsize = 32;
- break;
- case 5:
- mtd->oobsize = 16;
- break;
- default:
- mtd->oobsize = 640;
- break;
+ ecc->calculate = nand_bch_calculate_ecc;
+ ecc->correct = nand_bch_correct_data;
+ ecc->read_page = nand_read_page_swecc;
+ ecc->read_subpage = nand_read_subpage;
+ ecc->write_page = nand_write_page_swecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->write_oob = nand_write_oob_std;
+
+ /*
+ * Board driver should supply ecc.size and ecc.strength
+ * values to select how many bits are correctable.
+ * Otherwise, default to 4 bits for large page devices.
+ */
+ if (!ecc->size && (mtd->oobsize >= 64)) {
+ ecc->size = 512;
+ ecc->strength = 4;
}
- extid >>= 2;
- /* Calc blocksize */
- tmp = ((extid >> 1) & 0x04) | (extid & 0x03);
- if (tmp < 0x03)
- mtd->erasesize = (128 * 1024) << tmp;
- else if (tmp == 0x03)
- mtd->erasesize = 768 * 1024;
- else
- mtd->erasesize = (64 * 1024) << tmp;
- *busw = 0;
- } else {
- /* Calc pagesize */
- mtd->writesize = 1024 << (extid & 0x03);
- extid >>= 2;
- /* Calc oobsize */
- mtd->oobsize = (8 << (extid & 0x01)) *
- (mtd->writesize >> 9);
- extid >>= 2;
- /* Calc blocksize. Blocksize is multiples of 64KiB */
- mtd->erasesize = (64 * 1024) << (extid & 0x03);
- extid >>= 2;
- /* Get buswidth information */
- *busw = (extid & 0x01) ? NAND_BUSWIDTH_16 : 0;
+
/*
- * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
- * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
- * follows:
- * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
- * 110b -> 24nm
- * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ * if no ecc placement scheme was provided pickup the default
+ * large page one.
*/
- if (id_len >= 6 && id_data[0] == NAND_MFR_TOSHIBA &&
- nand_is_slc(chip) &&
- (id_data[5] & 0x7) == 0x6 /* 24nm */ &&
- !(id_data[4] & 0x80) /* !BENAND */) {
- mtd->oobsize = 32 * mtd->writesize >> 9;
+ if (!mtd->ooblayout) {
+ /* handle large page devices only */
+ if (mtd->oobsize < 64) {
+ WARN(1, "OOB layout is required when using software BCH on small pages\n");
+ return -EINVAL;
+ }
+
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+
}
- }
-}
-/*
- * Old devices have chip data hardcoded in the device ID table. nand_decode_id
- * decodes a matching ID table entry and assigns the MTD size parameters for
- * the chip.
- */
-static void nand_decode_id(struct mtd_info *mtd, struct nand_chip *chip,
- struct nand_flash_dev *type, u8 id_data[8],
- int *busw)
-{
- int maf_id = id_data[0];
+ /*
+ * We can only maximize ECC config when the default layout is
+ * used, otherwise we don't know how many bytes can really be
+ * used.
+ */
+ if (mtd->ooblayout == &nand_ooblayout_lp_ops &&
+ ecc->options & NAND_ECC_MAXIMIZE) {
+ int steps, bytes;
- mtd->erasesize = type->erasesize;
- mtd->writesize = type->pagesize;
- mtd->oobsize = mtd->writesize / 32;
- *busw = type->options & NAND_BUSWIDTH_16;
+ /* Always prefer 1k blocks over 512bytes ones */
+ ecc->size = 1024;
+ steps = mtd->writesize / ecc->size;
- /* All legacy ID NAND are small-page, SLC */
- chip->bits_per_cell = 1;
+ /* Reserve 2 bytes for the BBM */
+ bytes = (mtd->oobsize - 2) / steps;
+ ecc->strength = bytes * 8 / fls(8 * ecc->size);
+ }
- /*
- * Check for Spansion/AMD ID + repeating 5th, 6th byte since
- * some Spansion chips have erasesize that conflicts with size
- * listed in nand_ids table.
- * Data sheet (5 byte ID): Spansion S30ML-P ORNAND (p.39)
- */
- if (maf_id == NAND_MFR_AMD && id_data[4] != 0x00 && id_data[5] == 0x00
- && id_data[6] == 0x00 && id_data[7] == 0x00
- && mtd->writesize == 512) {
- mtd->erasesize = 128 * 1024;
- mtd->erasesize <<= ((id_data[3] & 0x03) << 1);
+ /* See nand_bch_init() for details. */
+ ecc->bytes = 0;
+ ecc->priv = nand_bch_init(mtd);
+ if (!ecc->priv) {
+ WARN(1, "BCH ECC initialization failed!\n");
+ return -EINVAL;
+ }
+ return 0;
+ default:
+ WARN(1, "Unsupported ECC algorithm!\n");
+ return -EINVAL;
}
}
-/*
- * Set the bad block marker/indicator (BBM/BBI) patterns according to some
- * heuristic patterns using various detected parameters (e.g., manufacturer,
- * page size, cell-type information).
+/**
+ * nand_check_ecc_caps - check the sanity of preset ECC settings
+ * @chip: nand chip info structure
+ * @caps: ECC caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * When ECC step size and strength are already set, check if they are supported
+ * by the controller and the calculated ECC bytes fit within the chip's OOB.
+ * On success, the calculated ECC bytes is set.
*/
-static void nand_decode_bbm_options(struct mtd_info *mtd,
- struct nand_chip *chip, u8 id_data[8])
+static int
+nand_check_ecc_caps(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
- int maf_id = id_data[0];
-
- /* Set the bad block position */
- if (mtd->writesize > 512 || (chip->options & NAND_BUSWIDTH_16))
- chip->badblockpos = NAND_LARGE_BADBLOCK_POS;
- else
- chip->badblockpos = NAND_SMALL_BADBLOCK_POS;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int preset_step = chip->ecc.size;
+ int preset_strength = chip->ecc.strength;
+ int ecc_bytes, nsteps = mtd->writesize / preset_step;
+ int i, j;
- /*
- * Bad block marker is stored in the last page of each block on Samsung
- * and Hynix MLC devices; stored in first two pages of each block on
- * Micron devices with 2KiB pages and on SLC Samsung, Hynix, Toshiba,
- * AMD/Spansion, and Macronix. All others scan only the first page.
- */
- if (!nand_is_slc(chip) &&
- (maf_id == NAND_MFR_SAMSUNG ||
- maf_id == NAND_MFR_HYNIX))
- chip->bbt_options |= NAND_BBT_SCANLASTPAGE;
- else if ((nand_is_slc(chip) &&
- (maf_id == NAND_MFR_SAMSUNG ||
- maf_id == NAND_MFR_HYNIX ||
- maf_id == NAND_MFR_TOSHIBA ||
- maf_id == NAND_MFR_AMD ||
- maf_id == NAND_MFR_MACRONIX)) ||
- (mtd->writesize == 2048 &&
- maf_id == NAND_MFR_MICRON))
- chip->bbt_options |= NAND_BBT_SCAN2NDPAGE;
-}
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
-static inline bool is_full_id_nand(struct nand_flash_dev *type)
-{
- return type->id_len;
-}
+ if (stepinfo->stepsize != preset_step)
+ continue;
-static bool find_full_id_nand(struct nand_chip *chip,
- struct nand_flash_dev *type, u8 *id_data, int *busw)
-{
- struct mtd_info *mtd = nand_to_mtd(chip);
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ if (stepinfo->strengths[j] != preset_strength)
+ continue;
- if (!strncmp(type->id, id_data, type->id_len)) {
- mtd->writesize = type->pagesize;
- mtd->erasesize = type->erasesize;
- mtd->oobsize = type->oobsize;
+ ecc_bytes = caps->calc_ecc_bytes(preset_step,
+ preset_strength);
+ if (WARN_ONCE(ecc_bytes < 0, "%s: eccbytes < 0\n", __func__))
+ return ecc_bytes;
- chip->bits_per_cell = nand_get_bits_per_cell(id_data[2]);
- chip->chipsize = (uint64_t)type->chipsize << 20;
- chip->options |= type->options;
+ if (ecc_bytes * nsteps > oobavail) {
+ pr_err("ECC (step, strength) = (%d, %d) does not fit in OOB",
+ preset_step, preset_strength);
+ return -ENOSPC;
+ }
- *busw = type->options & NAND_BUSWIDTH_16;
+ chip->ecc.bytes = ecc_bytes;
- return true;
+ return 0;
+ }
}
- return false;
+
+ pr_err("ECC (step, strength) = (%d, %d) not supported on this controller",
+ preset_step, preset_strength);
+
+ return -ENOTSUPP;
}
-/*
- * Get the flash and manufacturer id and lookup if the type is supported.
+/**
+ * nand_match_ecc_req - meet the chip's requirement with least ECC bytes
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * If a chip's ECC requirement is provided, try to meet it with the least
+ * number of ECC bytes (i.e. with the largest number of OOB-free bytes).
+ * On success, the chosen ECC settings are set.
*/
-static struct nand_flash_dev *nand_get_flash_type(struct nand_chip *chip,
- int busw,
- int *maf_id, int *dev_id,
- struct nand_flash_dev *type)
+static int
+nand_match_ecc_req(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i, maf_idx;
- u8 id_data[8];
+ const struct nand_ecc_step_info *stepinfo;
+ int req_step = chip->base.eccreq.step_size;
+ int req_strength = chip->base.eccreq.strength;
+ int req_corr, step_size, strength, nsteps, ecc_bytes, ecc_bytes_total;
+ int best_step, best_strength, best_ecc_bytes;
+ int best_ecc_bytes_total = INT_MAX;
+ int i, j;
- /* Select the device */
- chip->legacy.select_chip(chip, 0);
+ /* No information provided by the NAND chip */
+ if (!req_step || !req_strength)
+ return -ENOTSUPP;
- /*
- * Reset the chip, required by some chips (e.g. Micron MT29FxGxxxxx)
- * after power-up.
- */
- chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
+ /* number of correctable bits the chip requires in a page */
+ req_corr = mtd->writesize / req_step * req_strength;
- /* Send the command for reading device ID */
- chip->legacy.cmdfunc(chip, NAND_CMD_READID, 0x00, -1);
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
- /* Read manufacturer and device IDs */
- *maf_id = chip->legacy.read_byte(chip);
- *dev_id = chip->legacy.read_byte(chip);
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
- /*
- * Try again to make sure, as some systems the bus-hold or other
- * interface concerns can cause random data which looks like a
- * possibly credible NAND flash to appear. If the two results do
- * not match, ignore the device completely.
- */
+ /*
+ * If both step size and strength are smaller than the
+ * chip's requirement, it is not easy to compare the
+ * resulted reliability.
+ */
+ if (step_size < req_step && strength < req_strength)
+ continue;
- chip->legacy.cmdfunc(chip, NAND_CMD_READID, 0x00, -1);
+ if (mtd->writesize % step_size)
+ continue;
- /* Read entire ID string */
- for (i = 0; i < 8; i++)
- id_data[i] = chip->legacy.read_byte(chip);
+ nsteps = mtd->writesize / step_size;
- if (id_data[0] != *maf_id || id_data[1] != *dev_id) {
- pr_info("%s: second ID read did not match "
- "%02x,%02x against %02x,%02x\n", __func__,
- *maf_id, *dev_id, id_data[0], id_data[1]);
- return ERR_PTR(-ENODEV);
- }
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ONCE(ecc_bytes < 0, "%s: eccbytes < 0\n", __func__))
+ continue;
+ ecc_bytes_total = ecc_bytes * nsteps;
- if (!type)
- type = nand_flash_ids;
+ if (ecc_bytes_total > oobavail ||
+ strength * nsteps < req_corr)
+ continue;
- for (; type->name != NULL; type++) {
- if (is_full_id_nand(type)) {
- if (find_full_id_nand(chip, type, id_data, &busw))
- goto ident_done;
- } else if (*dev_id == type->dev_id) {
- break;
+ /*
+ * We assume the best is to meet the chip's requrement
+ * with the least number of ECC bytes.
+ */
+ if (ecc_bytes_total < best_ecc_bytes_total) {
+ best_ecc_bytes_total = ecc_bytes_total;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
}
}
- chip->onfi_version = 0;
- if (!type->name || !type->pagesize) {
- /* Check is chip is ONFI compliant */
- if (nand_flash_detect_onfi(chip, &busw))
- goto ident_done;
- }
+ if (best_ecc_bytes_total == INT_MAX)
+ return -ENOTSUPP;
+
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
+
+ return 0;
+}
- if (!type->name)
- return ERR_PTR(-ENODEV);
+/**
+ * nand_maximize_ecc - choose the max ECC strength available
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the max ECC strength that is supported on the controller, and can fit
+ * within the chip's OOB. On success, the chosen ECC settings are set.
+ */
+static int
+nand_maximize_ecc(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ const struct nand_ecc_step_info *stepinfo;
+ int step_size, strength, nsteps, ecc_bytes, corr;
+ int best_corr = 0;
+ int best_step = 0;
+ int best_strength, best_ecc_bytes;
+ int i, j;
- if (!mtd->name)
- mtd->name = type->name;
+ for (i = 0; i < caps->nstepinfos; i++) {
+ stepinfo = &caps->stepinfos[i];
+ step_size = stepinfo->stepsize;
- chip->chipsize = (uint64_t)type->chipsize << 20;
+ /* If chip->ecc.size is already set, respect it */
+ if (chip->ecc.size && step_size != chip->ecc.size)
+ continue;
- if (!type->pagesize) {
- /* Decode parameters from extended ID */
- nand_decode_ext_id(mtd, chip, id_data, &busw);
- } else {
- nand_decode_id(mtd, chip, type, id_data, &busw);
- }
- /* Get chip options */
- chip->options |= type->options;
+ for (j = 0; j < stepinfo->nstrengths; j++) {
+ strength = stepinfo->strengths[j];
- /*
- * Check if chip is not a Samsung device. Do not clear the
- * options for chips which do not have an extended id.
- */
- if (*maf_id != NAND_MFR_SAMSUNG && !type->pagesize)
- chip->options &= ~NAND_SAMSUNG_LP_OPTIONS;
-ident_done:
+ if (mtd->writesize % step_size)
+ continue;
- /* Try to identify manufacturer */
- for (maf_idx = 0; nand_manuf_ids[maf_idx].id != 0x0; maf_idx++) {
- if (nand_manuf_ids[maf_idx].id == *maf_id)
- break;
- }
+ nsteps = mtd->writesize / step_size;
- if (chip->options & NAND_BUSWIDTH_AUTO) {
- WARN_ON(chip->options & NAND_BUSWIDTH_16);
- chip->options |= busw;
- nand_set_defaults(chip, busw);
- } else if (busw != (chip->options & NAND_BUSWIDTH_16)) {
- /*
- * Check, if buswidth is correct. Hardware drivers should set
- * chip correct!
- */
- pr_info("NAND device: Manufacturer ID:"
- " 0x%02x, Chip ID: 0x%02x (%s %s)\n", *maf_id,
- *dev_id, nand_manuf_ids[maf_idx].name, mtd->name);
- pr_warn("NAND bus width %d instead %d bit\n",
- (chip->options & NAND_BUSWIDTH_16) ? 16 : 8,
- busw ? 16 : 8);
- return ERR_PTR(-EINVAL);
- }
+ ecc_bytes = caps->calc_ecc_bytes(step_size, strength);
+ if (WARN_ONCE(ecc_bytes < 0, "%s: eccbytes < 0\n", __func__))
+ continue;
- nand_decode_bbm_options(mtd, chip, id_data);
+ if (ecc_bytes * nsteps > oobavail)
+ continue;
- /* Calculate the address shift from the page size */
- chip->page_shift = ffs(mtd->writesize) - 1;
- /* Convert chipsize to number of pages per chip -1 */
- chip->pagemask = (chip->chipsize >> chip->page_shift) - 1;
+ corr = strength * nsteps;
- chip->bbt_erase_shift = chip->phys_erase_shift =
- ffs(mtd->erasesize) - 1;
- if (chip->chipsize & 0xffffffff)
- chip->chip_shift = ffs((unsigned)chip->chipsize) - 1;
- else {
- chip->chip_shift = ffs((unsigned)(chip->chipsize >> 32));
- chip->chip_shift += 32 - 1;
+ /*
+ * If the number of correctable bits is the same,
+ * bigger step_size has more reliability.
+ */
+ if (corr > best_corr ||
+ (corr == best_corr && step_size > best_step)) {
+ best_corr = corr;
+ best_step = step_size;
+ best_strength = strength;
+ best_ecc_bytes = ecc_bytes;
+ }
+ }
}
- chip->badblockbits = 8;
-
- /* Do not replace user supplied command function! */
- if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
- chip->legacy.cmdfunc = nand_command_lp;
+ if (!best_corr)
+ return -ENOTSUPP;
- pr_info("NAND device: Manufacturer ID: 0x%02x, Chip ID: 0x%02x (%s %s),"
- " %dMiB, page size: %d, OOB size: %d\n",
- *maf_id, *dev_id, nand_manuf_ids[maf_idx].name,
- chip->onfi_version ? chip->onfi_params.model : type->name,
- (int)(chip->chipsize >> 20), mtd->writesize, mtd->oobsize);
+ chip->ecc.size = best_step;
+ chip->ecc.strength = best_strength;
+ chip->ecc.bytes = best_ecc_bytes;
- return type;
+ return 0;
}
/**
- * nand_of_parse_node - parse generic NAND properties
- * @mtd: MTD device structure
- * @np: Device node to read information from
+ * nand_ecc_choose_conf - Set the ECC strength and ECC step size
+ * @chip: nand chip info structure
+ * @caps: ECC engine caps info structure
+ * @oobavail: OOB size that the ECC engine can use
+ *
+ * Choose the ECC configuration according to following logic
*
- * This parses device tree properties generic to NAND controllers and fills in
- * the various fields in struct nand_chip.
+ * 1. If both ECC step size and ECC strength are already set (usually by DT)
+ * then check if it is supported by this controller.
+ * 2. If NAND_ECC_MAXIMIZE is set, then select maximum ECC strength.
+ * 3. Otherwise, try to match the ECC step size and ECC strength closest
+ * to the chip's requirement. If available OOB size can't fit the chip
+ * requirement then fallback to the maximum ECC step size and ECC strength.
+ *
+ * On success, the chosen ECC settings are set.
*/
-void nand_of_parse_node(struct mtd_info *mtd, struct device_node *np)
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail)
{
- struct nand_chip *chip = mtd_to_nand(mtd);
- int ecc_strength, ecc_size;
+ struct mtd_info *mtd = nand_to_mtd(chip);
- if (!IS_ENABLED(CONFIG_OFDEVICE))
- return;
+ if (WARN_ON(oobavail < 0 || oobavail > mtd->oobsize))
+ return -EINVAL;
- ecc_strength = of_get_nand_ecc_strength(np);
- ecc_size = of_get_nand_ecc_step_size(np);
+ if (chip->ecc.size && chip->ecc.strength)
+ return nand_check_ecc_caps(chip, caps, oobavail);
- if (ecc_strength >= 0)
- chip->ecc.strength = ecc_strength;
+ if (chip->ecc.options & NAND_ECC_MAXIMIZE)
+ return nand_maximize_ecc(chip, caps, oobavail);
+
+ if (!nand_match_ecc_req(chip, caps, oobavail))
+ return 0;
- if (ecc_size >= 0)
- chip->ecc.size = ecc_size;
+ return nand_maximize_ecc(chip, caps, oobavail);
}
+EXPORT_SYMBOL_GPL(nand_ecc_choose_conf);
-/**
- * nand_scan_ident - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: number of chips to scan for
- * @table: alternative NAND ID table
+/*
+ * Check if the chip configuration meet the datasheet requirements.
+
+ * If our configuration corrects A bits per B bytes and the minimum
+ * required correction level is X bits per Y bytes, then we must ensure
+ * both of the following are true:
*
- * This is the first phase of the normal nand_scan() function. It reads the
- * flash ID and sets up MTD fields accordingly.
+ * (1) A / B >= X / Y
+ * (2) A >= X
*
- * The mtd->owner field must be set to the module of the caller.
+ * Requirement (1) ensures we can correct for the required bitflip density.
+ * Requirement (2) ensures we can correct even when all bitflips are clumped
+ * in the same sector.
*/
-int nand_scan_ident(struct nand_chip *chip, int maxchips,
- struct nand_flash_dev *table)
+static bool nand_ecc_strength_good(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i, busw, nand_maf_id, nand_dev_id;
- struct nand_flash_dev *type;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int corr, ds_corr;
- /* Get buswidth to select the correct functions */
- busw = chip->options & NAND_BUSWIDTH_16;
- /* Set the default functions */
- nand_set_defaults(chip, busw);
+ if (ecc->size == 0 || chip->base.eccreq.step_size == 0)
+ /* Not enough information */
+ return true;
- /* Read the flash type */
- type = nand_get_flash_type(chip, busw,
- &nand_maf_id, &nand_dev_id, table);
+ /*
+ * We get the number of corrected bits per page to compare
+ * the correction density.
+ */
+ corr = (mtd->writesize * ecc->strength) / ecc->size;
+ ds_corr = (mtd->writesize * chip->base.eccreq.strength) /
+ chip->base.eccreq.step_size;
- if (IS_ERR(type)) {
- if (!(chip->options & NAND_SCAN_SILENT_NODEV))
- pr_warn("No NAND device found\n");
- chip->legacy.select_chip(chip, -1);
- return PTR_ERR(type);
- }
+ return corr >= ds_corr && ecc->strength >= chip->base.eccreq.strength;
+}
+
+static int rawnand_erase(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ unsigned int eb = nanddev_pos_to_row(nand, pos);
+ int ret;
- chip->legacy.select_chip(chip, -1);
+ eb >>= nand->rowconv.eraseblock_addr_shift;
- /* Check for a chip array */
- for (i = 1; i < maxchips; i++) {
- chip->legacy.select_chip(chip, i);
- /* See comment in nand_get_flash_type for reset */
- chip->legacy.cmdfunc(chip, NAND_CMD_RESET, -1, -1);
- /* Send the command for reading device ID */
- chip->legacy.cmdfunc(chip, NAND_CMD_READID, 0x00, -1);
- /* Read manufacturer and device IDs */
- if (nand_maf_id != chip->legacy.read_byte(chip) ||
- nand_dev_id != chip->legacy.read_byte(chip)) {
- chip->legacy.select_chip(chip, -1);
- break;
- }
- chip->legacy.select_chip(chip, -1);
- }
- if (i > 1)
- pr_info("%d NAND chips detected\n", i);
+ nand_select_target(chip, pos->target);
+ ret = nand_erase_op(chip, eb);
+ nand_deselect_target(chip);
- /* Store the number of chips and calc total size for mtd */
- chip->numchips = i;
- mtd->size = i * chip->chipsize;
+ return ret;
+}
- return 0;
+static int rawnand_markbad(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+
+ return nand_markbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+}
+
+static bool rawnand_isbad(struct nand_device *nand, const struct nand_pos *pos)
+{
+ struct nand_chip *chip = container_of(nand, struct nand_chip,
+ base);
+ int ret;
+
+ nand_select_target(chip, pos->target);
+ ret = nand_isbad_bbm(chip, nanddev_pos_to_offs(nand, pos));
+ nand_deselect_target(chip);
+
+ return ret;
}
-EXPORT_SYMBOL(nand_scan_ident);
+static const struct nand_ops rawnand_ops = {
+ .erase = rawnand_erase,
+ .markbad = rawnand_markbad,
+ .isbad = rawnand_isbad,
+};
/**
- * nand_scan_tail - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
+ * nand_scan_tail - Scan for the NAND device
+ * @chip: NAND chip object
*
* This is the second phase of the normal nand_scan() function. It fills out
* all the uninitialized function pointers with the defaults and scans for a
@@ -3561,218 +5486,224 @@ EXPORT_SYMBOL(nand_scan_ident);
int nand_scan_tail(struct nand_chip *chip)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ struct nand_ecc_ctrl *ecc = &chip->ecc;
+ int ret, i;
/* New bad blocks should be marked in OOB, flash-based BBT, or both */
- BUG_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
- !(chip->bbt_options & NAND_BBT_USE_FLASH));
+ if (WARN_ON((chip->bbt_options & NAND_BBT_NO_OOB_BBM) &&
+ !(chip->bbt_options & NAND_BBT_USE_FLASH))) {
+ return -EINVAL;
+ }
- if (!(chip->options & NAND_OWN_BUFFERS))
- chip->buffers = kmalloc(sizeof(*chip->buffers), GFP_KERNEL);
- if (!chip->buffers)
+ chip->data_buf = kmalloc(mtd->writesize + mtd->oobsize, GFP_KERNEL);
+ if (!chip->data_buf)
return -ENOMEM;
+ /*
+ * FIXME: some NAND manufacturer drivers expect the first die to be
+ * selected when manufacturer->init() is called. They should be fixed
+ * to explictly select the relevant die when interacting with the NAND
+ * chip.
+ */
+ nand_select_target(chip, 0);
+ ret = nand_manufacturer_init(chip);
+ nand_deselect_target(chip);
+ if (ret)
+ goto err_free_buf;
+
/* Set the internal oob buffer location, just after the page data */
- chip->oob_poi = chip->buffers->databuf + mtd->writesize;
+ chip->oob_poi = chip->data_buf + mtd->writesize;
/*
* If no default placement scheme is given, select an appropriate one.
*/
- if (!chip->ecc.layout && (chip->ecc.mode != NAND_ECC_SOFT_BCH)) {
+ if (!mtd->ooblayout &&
+ !(ecc->mode == NAND_ECC_SOFT && ecc->algo == NAND_ECC_BCH)) {
switch (mtd->oobsize) {
case 8:
- chip->ecc.layout = &nand_oob_8;
- break;
case 16:
- chip->ecc.layout = &nand_oob_16;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_sp_ops);
break;
case 64:
- chip->ecc.layout = &nand_oob_64;
- break;
case 128:
- chip->ecc.layout = &nand_oob_128;
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_hamming_ops);
break;
default:
- pr_warn("No oob scheme defined for oobsize %d\n",
- mtd->oobsize);
- BUG();
+ /*
+ * Expose the whole OOB area to users if ECC_NONE
+ * is passed. We could do that for all kind of
+ * ->oobsize, but we must keep the old large/small
+ * page with ECC layout when ->oobsize <= 128 for
+ * compatibility reasons.
+ */
+ if (ecc->mode == NAND_ECC_NONE) {
+ mtd_set_ooblayout(mtd,
+ &nand_ooblayout_lp_ops);
+ break;
+ }
+
+ WARN(1, "No oob scheme defined for oobsize %d\n",
+ mtd->oobsize);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
}
}
- if (!chip->write_page)
- chip->write_page = nand_write_page;
-
/*
* Check ECC mode, default to software if 3byte/512byte hardware ECC is
* selected and we have 256 byte pagesize fallback to software ECC
*/
- switch (chip->ecc.mode) {
-#ifdef CONFIG_NAND_ECC_HW_OOB_FIRST
- case NAND_ECC_HW_OOB_FIRST:
- /* Similar to NAND_ECC_HW, but a separate read_page handle */
- if (!chip->ecc.calculate || !chip->ecc.correct ||
- !chip->ecc.hwctl) {
- pr_warn("No ECC functions supplied; hardware ECC not possible\n");
- BUG();
- }
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc_oob_first;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_hwecc;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_std;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.read_subpage)
- chip->ecc.read_subpage = nand_read_subpage;
- if (!chip->ecc.write_subpage)
- chip->ecc.write_subpage = nand_write_subpage_hwecc;
- break;
-#endif
-#ifdef CONFIG_NAND_ECC_HW
+ switch (ecc->mode) {
case NAND_ECC_HW:
/* Use standard hwecc read page function? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_hwecc;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_hwecc;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_std;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.read_subpage)
- chip->ecc.read_subpage = nand_read_subpage;
- if (!chip->ecc.write_subpage)
- chip->ecc.write_subpage = nand_write_subpage_hwecc;
- break;
-#endif
-#ifdef CONFIG_NAND_ECC_HW_SYNDROME
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_hwecc;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_hwecc;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
+ if (!ecc->read_subpage)
+ ecc->read_subpage = nand_read_subpage;
+ if (!ecc->write_subpage && ecc->hwctl && ecc->calculate)
+ ecc->write_subpage = nand_write_subpage_hwecc;
case NAND_ECC_HW_SYNDROME:
+ if ((!ecc->calculate || !ecc->correct || !ecc->hwctl) &&
+ (!ecc->read_page ||
+ ecc->read_page == nand_read_page_hwecc ||
+ !ecc->write_page ||
+ ecc->write_page == nand_write_page_hwecc)) {
+ WARN(1, "No ECC functions supplied; hardware ECC not possible\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
/* Use standard syndrome read/write page function? */
- if (!chip->ecc.read_page)
- chip->ecc.read_page = nand_read_page_syndrome;
- if (!chip->ecc.write_page)
- chip->ecc.write_page = nand_write_page_syndrome;
- if (!chip->ecc.read_page_raw)
- chip->ecc.read_page_raw = nand_read_page_raw_syndrome;
- if (!chip->ecc.write_page_raw)
- chip->ecc.write_page_raw = nand_write_page_raw_syndrome;
- if (!chip->ecc.read_oob)
- chip->ecc.read_oob = nand_read_oob_syndrome;
- if (!chip->ecc.write_oob)
- chip->ecc.write_oob = nand_write_oob_syndrome;
- break;
-#endif
-#ifdef CONFIG_NAND_ECC_SOFT
- case NAND_ECC_SOFT:
- chip->ecc.calculate = nand_calculate_ecc;
- chip->ecc.correct = nand_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
- if (!chip->ecc.size)
- chip->ecc.size = 256;
- chip->ecc.bytes = 3;
- chip->ecc.strength = 1;
- break;
-#endif
-#ifdef CONFIG_NAND_ECC_BCH
- case NAND_ECC_SOFT_BCH:
- if (!mtd_nand_has_bch()) {
- pr_warn("CONFIG_MTD_ECC_BCH not enabled\n");
- BUG();
+ if (!ecc->read_page)
+ ecc->read_page = nand_read_page_syndrome;
+ if (!ecc->write_page)
+ ecc->write_page = nand_write_page_syndrome;
+ if (!ecc->read_page_raw)
+ ecc->read_page_raw = nand_read_page_raw_syndrome;
+ if (!ecc->write_page_raw)
+ ecc->write_page_raw = nand_write_page_raw_syndrome;
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_syndrome;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_syndrome;
+
+ if (mtd->writesize >= ecc->size) {
+ if (!ecc->strength) {
+ WARN(1, "Driver must set ecc.strength when using hardware ECC\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ break;
}
- chip->ecc.calculate = nand_bch_calculate_ecc;
- chip->ecc.correct = nand_bch_correct_data;
- chip->ecc.read_page = nand_read_page_swecc;
- chip->ecc.read_subpage = nand_read_subpage;
- chip->ecc.write_page = nand_write_page_swecc;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.write_oob = nand_write_oob_std;
- /*
- * Board driver should supply ecc.size and ecc.strength values
- * to select how many bits are correctable. Otherwise, default
- * to 4 bits for large page devices.
- */
- if (!chip->ecc.size && (mtd->oobsize >= 64)) {
- chip->ecc.size = 512;
- chip->ecc.strength = 4;
+ pr_warn("%d byte HW ECC not possible on %d byte page size, fallback to SW ECC\n",
+ ecc->size, mtd->writesize);
+ ecc->mode = NAND_ECC_SOFT;
+ ecc->algo = NAND_ECC_HAMMING;
+ case NAND_ECC_SOFT:
+ ret = nand_set_ecc_soft_ops(chip);
+ if (ret) {
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
}
+ break;
- /* See nand_bch_init() for details. */
- chip->ecc.bytes = 0;
- chip->ecc.priv = nand_bch_init(chip);
- if (!chip->ecc.priv) {
- pr_warn("BCH ECC initialization failed!\n");
- BUG();
+ case NAND_ECC_ON_DIE:
+ if (!ecc->read_page || !ecc->write_page) {
+ WARN(1, "No ECC functions supplied; on-die ECC not possible\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
}
+ if (!ecc->read_oob)
+ ecc->read_oob = nand_read_oob_std;
+ if (!ecc->write_oob)
+ ecc->write_oob = nand_write_oob_std;
break;
-#endif
-#ifdef CONFIG_NAND_ECC_NONE
+
case NAND_ECC_NONE:
pr_warn("NAND_ECC_NONE selected by board driver. This is not recommended!\n");
- chip->ecc.read_page = nand_read_page_raw;
- chip->ecc.write_page = nand_write_page_raw;
- chip->ecc.read_oob = nand_read_oob_std;
- chip->ecc.read_page_raw = nand_read_page_raw;
- chip->ecc.write_page_raw = nand_write_page_raw;
- chip->ecc.write_oob = nand_write_oob_std;
- chip->ecc.size = mtd->writesize;
- chip->ecc.bytes = 0;
- chip->ecc.strength = 0;
+ ecc->read_page = nand_read_page_raw;
+ ecc->write_page = nand_write_page_raw;
+ ecc->read_oob = nand_read_oob_std;
+ ecc->read_page_raw = nand_read_page_raw;
+ ecc->write_page_raw = nand_write_page_raw;
+ ecc->write_oob = nand_write_oob_std;
+ ecc->size = mtd->writesize;
+ ecc->bytes = 0;
+ ecc->strength = 0;
break;
-#endif
+
default:
- pr_warn("Invalid NAND_ECC_MODE %d\n", chip->ecc.mode);
- BUG();
+ WARN(1, "Invalid NAND_ECC_MODE %d\n", ecc->mode);
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+
+ if (ecc->correct || ecc->calculate) {
+ ecc->calc_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ ecc->code_buf = kmalloc(mtd->oobsize, GFP_KERNEL);
+ if (!ecc->calc_buf || !ecc->code_buf) {
+ ret = -ENOMEM;
+ goto err_nand_manuf_cleanup;
+ }
}
/* For many systems, the standard OOB write also works for raw */
- if (!chip->ecc.read_oob_raw)
- chip->ecc.read_oob_raw = chip->ecc.read_oob;
- if (!chip->ecc.write_oob_raw)
- chip->ecc.write_oob_raw = chip->ecc.write_oob;
+ if (!ecc->read_oob_raw)
+ ecc->read_oob_raw = ecc->read_oob;
+ if (!ecc->write_oob_raw)
+ ecc->write_oob_raw = ecc->write_oob;
- /*
- * The number of bytes available for a client to place data into
- * the out of band area.
- */
- chip->ecc.layout->oobavail = 0;
- for (i = 0; chip->ecc.layout->oobfree[i].length
- && i < ARRAY_SIZE(chip->ecc.layout->oobfree); i++)
- chip->ecc.layout->oobavail +=
- chip->ecc.layout->oobfree[i].length;
- mtd->oobavail = chip->ecc.layout->oobavail;
+ /* propagate ecc info to mtd_info */
+ mtd->ecc_strength = ecc->strength;
+ mtd->ecc_step_size = ecc->size;
/*
* Set the number of read / write steps for one page depending on ECC
* mode.
*/
- chip->ecc.steps = mtd->writesize / chip->ecc.size;
- if (chip->ecc.steps * chip->ecc.size != mtd->writesize) {
- pr_warn("Invalid ECC parameters\n");
- BUG();
+ ecc->steps = mtd->writesize / ecc->size;
+ if (ecc->steps * ecc->size != mtd->writesize) {
+ WARN(1, "Invalid ECC parameters\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
+ }
+ ecc->total = ecc->steps * ecc->bytes;
+ if (ecc->total > mtd->oobsize) {
+ WARN(1, "Total number of ECC bytes exceeded oobsize\n");
+ ret = -EINVAL;
+ goto err_nand_manuf_cleanup;
}
- chip->ecc.total = chip->ecc.steps * chip->ecc.bytes;
+
+ /*
+ * The number of bytes available for a client to place data into
+ * the out of band area.
+ */
+ ret = mtd_ooblayout_count_freebytes(mtd);
+ if (ret < 0)
+ ret = 0;
+
+ mtd->oobavail = ret;
+
+ /* ECC sanity check: warn if it's too weak */
+ if (!nand_ecc_strength_good(chip))
+ pr_warn("WARNING: %s: the ECC used on your system (%db/%dB) is too weak compared to the one required by the NAND chip (%db/%dB)\n",
+ mtd->name, chip->ecc.strength, chip->ecc.size,
+ chip->base.eccreq.strength,
+ chip->base.eccreq.step_size);
/* Allow subpage writes up to ecc.steps. Not possible for MLC flash */
if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && nand_is_slc(chip)) {
- switch (chip->ecc.steps) {
+ switch (ecc->steps) {
case 2:
mtd->subpage_sft = 1;
break;
@@ -3785,36 +5716,39 @@ int nand_scan_tail(struct nand_chip *chip)
}
chip->subpagesize = mtd->writesize >> mtd->subpage_sft;
- /* Initialize state */
- chip->state = FL_READY;
-
/* Invalidate the pagebuffer reference */
- chip->pagebuf = -1;
+ chip->pagecache.page = -1;
/* Large page NAND with SOFT_ECC should support subpage reads */
- if ((chip->ecc.mode == NAND_ECC_SOFT) && (chip->page_shift > 9))
- chip->options |= NAND_SUBPAGE_READ;
+ switch (ecc->mode) {
+ case NAND_ECC_SOFT:
+ if (chip->page_shift > 9)
+ chip->options |= NAND_SUBPAGE_READ;
+ break;
+
+ default:
+ break;
+ }
+
+ ret = nanddev_init(&chip->base, &rawnand_ops, mtd->owner);
+ if (ret)
+ goto err_nand_manuf_cleanup;
+
+ /* Adjust the MTD_CAP_ flags when NAND_ROM is set. */
+ if (chip->options & NAND_ROM)
+ mtd->flags = MTD_CAP_ROM;
/* Fill in remaining MTD driver data */
- mtd->type = MTD_NANDFLASH;
- mtd->flags = (chip->options & NAND_ROM) ? MTD_CAP_ROM :
- MTD_CAP_NANDFLASH;
mtd->_erase = nand_erase;
- mtd->_read = nand_read;
- mtd->_write = nand_write;
mtd->_read_oob = nand_read_oob;
mtd->_write_oob = nand_write_oob;
mtd->_sync = nand_sync;
- mtd->_lock = NULL;
- mtd->_unlock = NULL;
+ mtd->_lock = nand_lock;
+ mtd->_unlock = nand_unlock;
mtd->_block_isbad = nand_block_isbad;
mtd->_block_markbad = nand_block_markbad;
mtd->_block_markgood = nand_block_markgood;
- mtd->writebufsize = mtd->writesize;
- /* propagate ecc info to mtd_info */
- mtd->ecclayout = chip->ecc.layout;
- mtd->ecc_strength = chip->ecc.strength;
/*
* Initialize bitflip_threshold to its default prior scan_bbt() call.
* scan_bbt() might invoke mtd_read(), thus bitflip_threshold must be
@@ -3823,82 +5757,139 @@ int nand_scan_tail(struct nand_chip *chip)
if (!mtd->bitflip_threshold)
mtd->bitflip_threshold = DIV_ROUND_UP(mtd->ecc_strength * 3, 4);
+ /* Find the fastest data interface for this chip */
+ ret = nand_choose_interface_config(chip);
+ if (ret)
+ goto err_nanddev_cleanup;
+
+ /* Enter fastest possible mode on all dies. */
+ for (i = 0; i < nanddev_ntargets(&chip->base); i++) {
+ ret = nand_setup_interface(chip, i);
+ if (ret)
+ goto err_free_interface_config;
+ }
+
/* Check, if we should skip the bad block table scan */
if (chip->options & NAND_SKIP_BBTSCAN)
return 0;
- if (!IS_ENABLED(CONFIG_NAND_BBT))
- return 0;
-
/* Build bad block table */
- return chip->scan_bbt(chip);
+ ret = nand_create_bbt(chip);
+ if (ret)
+ goto err_free_interface_config;
+
+ return 0;
+
+err_free_interface_config:
+ kfree(chip->best_interface_config);
+
+err_nanddev_cleanup:
+ nanddev_cleanup(&chip->base);
+
+err_nand_manuf_cleanup:
+ nand_manufacturer_cleanup(chip);
+
+err_free_buf:
+ kfree(chip->data_buf);
+ kfree(ecc->code_buf);
+ kfree(ecc->calc_buf);
+
+ return ret;
+}
+
+static int nand_attach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->attach_chip)
+ return chip->controller->ops->attach_chip(chip);
+
+ return 0;
+}
+
+static void nand_detach(struct nand_chip *chip)
+{
+ if (chip->controller->ops && chip->controller->ops->detach_chip)
+ chip->controller->ops->detach_chip(chip);
}
-EXPORT_SYMBOL(nand_scan_tail);
/**
- * nand_scan - [NAND Interface] Scan for the NAND device
- * @mtd: MTD device structure
- * @maxchips: number of chips to scan for
+ * nand_scan_with_ids - [NAND Interface] Scan for the NAND device
+ * @chip: NAND chip object
+ * @maxchips: number of chips to scan for.
+ * @ids: optional flash IDs table
*
* This fills out all the uninitialized function pointers with the defaults.
* The flash ID is read and the mtd/chip structures are filled with the
- * appropriate values. The mtd->owner field must be set to the module of the
- * caller.
+ * appropriate values.
*/
-int nand_scan(struct nand_chip *chip, int maxchips)
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int maxchips,
+ struct nand_flash_dev *ids)
{
int ret;
- ret = nand_scan_ident(chip, maxchips, NULL);
- if (!ret)
- ret = nand_scan_tail(chip);
+ if (!maxchips)
+ return -EINVAL;
+
+ ret = nand_scan_ident(chip, maxchips, ids);
+ if (ret)
+ return ret;
+
+ ret = nand_attach(chip);
+ if (ret)
+ goto cleanup_ident;
+
+ ret = nand_scan_tail(chip);
+ if (ret)
+ goto detach_chip;
+
+ return 0;
+
+detach_chip:
+ nand_detach(chip);
+cleanup_ident:
+ nand_scan_ident_cleanup(chip);
+
return ret;
}
-EXPORT_SYMBOL(nand_scan);
+EXPORT_SYMBOL(nand_scan_with_ids);
/**
- * nand_release - [NAND Interface] Free resources held by the NAND device
- * @mtd: MTD device structure
+ * nand_cleanup - [NAND Interface] Free resources held by the NAND device
+ * @chip: NAND chip object
*/
-void nand_release(struct nand_chip *chip)
+void nand_cleanup(struct nand_chip *chip)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
-
- if (chip->ecc.mode == NAND_ECC_SOFT_BCH)
+ if (chip->ecc.mode == NAND_ECC_SOFT &&
+ chip->ecc.algo == NAND_ECC_BCH)
nand_bch_free((struct nand_bch_control *)chip->ecc.priv);
- del_mtd_device(mtd);
+ nanddev_cleanup(&chip->base);
/* Free bad block table memory */
kfree(chip->bbt);
- if (!(chip->options & NAND_OWN_BUFFERS))
- kfree(chip->buffers);
+ kfree(chip->data_buf);
+ kfree(chip->ecc.code_buf);
+ kfree(chip->ecc.calc_buf);
/* Free bad block descriptor memory */
if (chip->badblock_pattern && chip->badblock_pattern->options
& NAND_BBT_DYNAMICSTRUCT)
kfree(chip->badblock_pattern);
-}
-EXPORT_SYMBOL_GPL(nand_release);
-
-static int mtd_set_erasebad(struct param_d *param, void *priv)
-{
- struct mtd_info *mtd = priv;
- if (!mtd->p_allow_erasebad) {
- mtd->allow_erasebad = false;
- return 0;
- }
+ /* Free the data interface */
+ kfree(chip->best_interface_config);
- if (!mtd->allow_erasebad)
- dev_warn(&mtd->dev,
- "Allowing to erase bad blocks. This may be dangerous!\n");
+ /* Free manufacturer priv data. */
+ nand_manufacturer_cleanup(chip);
- mtd->allow_erasebad = true;
+ /* Free controller specific allocations after chip identification */
+ nand_detach(chip);
- return 0;
+ /* Free identification phase allocations */
+ nand_scan_ident_cleanup(chip);
}
+EXPORT_SYMBOL_GPL(nand_cleanup);
+
enum bbt_type {
BBT_TYPE_NONE = 0,
BBT_TYPE_FLASHBASED,
@@ -3930,9 +5921,27 @@ static int mtd_get_bbt_type(struct param_d *p, void *priv)
return 0;
}
-int add_mtd_nand_device(struct nand_chip *chip, char *devname)
+static int mtd_set_erasebad(struct param_d *param, void *priv)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
+ struct mtd_info *mtd = priv;
+
+ if (!mtd->p_allow_erasebad) {
+ mtd->allow_erasebad = false;
+ return 0;
+ }
+
+ if (!mtd->allow_erasebad)
+ dev_warn(&mtd->dev,
+ "Allowing to erase bad blocks. This may be dangerous!\n");
+
+ mtd->allow_erasebad = true;
+
+ return 0;
+}
+
+int add_mtd_nand_device(struct mtd_info *mtd, char *devname)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
int ret;
ret = add_mtd_device(mtd, devname, DEVICE_ID_DYNAMIC);
@@ -3945,8 +5954,7 @@ int add_mtd_nand_device(struct nand_chip *chip, char *devname)
dev_add_param_enum(&mtd->dev, "bbt", NULL, mtd_get_bbt_type,
&chip->bbt_type, bbt_type_strings,
- ARRAY_SIZE(bbt_type_strings),
- mtd);
+ ARRAY_SIZE(bbt_type_strings), mtd);
dev_add_param_uint32_ro(&mtd->dev, "ecc.bytes", &chip->ecc.bytes, "%u");
dev_add_param_uint32_ro(&mtd->dev, "ecc.strength", &chip->ecc.strength, "%u");
@@ -3954,3 +5962,8 @@ int add_mtd_nand_device(struct nand_chip *chip, char *devname)
return ret;
}
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Steven J. Hill <sjhill at realitydiluted.com>");
+MODULE_AUTHOR("Thomas Gleixner <tglx at linutronix.de>");
+MODULE_DESCRIPTION("Generic NAND flash driver code");
diff --git a/drivers/mtd/nand/nand_bbt.c b/drivers/mtd/nand/nand_bbt.c
index 7d04c89d76..f582799636 100644
--- a/drivers/mtd/nand/nand_bbt.c
+++ b/drivers/mtd/nand/nand_bbt.c
@@ -1,15 +1,10 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/mtd/nand_bbt.c
- *
* Overview:
* Bad block table support for the NAND driver
*
* Copyright © 2004 Thomas Gleixner (tglx at linutronix.de)
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* Description:
*
* When nand_scan_bbt is called, then it tries to find the bad block table
@@ -56,18 +51,16 @@
* Following assumptions are made:
* - bbts start at a page boundary, if autolocated on a block boundary
* - the space necessary for a bbt in FLASH does not exceed a block boundary
- *
*/
-#include <common.h>
#include <linux/types.h>
#include <linux/mtd/mtd.h>
-#include <linux/mtd/nand.h>
-#include <linux/mtd/nand_ecc.h>
+#include <linux/mtd/bbm.h>
#include <linux/bitops.h>
-#include <clock.h>
-#include <errno.h>
-#include <malloc.h>
+#include <linux/export.h>
+#include <linux/string.h>
+
+#include "internals.h"
#define BBT_BLOCK_GOOD 0x00
#define BBT_BLOCK_WORN 0x01
@@ -164,7 +157,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
/**
* read_bbt - [GENERIC] Read the bad block table starting from page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @page: the starting page
* @num: the number of bbt descriptors to read
@@ -174,7 +167,7 @@ static u32 add_marker_len(struct nand_bbt_descr *td)
* Read the bad block table starting from page.
*/
static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
- struct nand_bbt_descr *td, int offs)
+ struct nand_bbt_descr *td, int offs)
{
struct mtd_info *mtd = nand_to_mtd(this);
int res, ret = 0, i, j, act = 0;
@@ -232,7 +225,11 @@ static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
mtd->ecc_stats.bbtblocks++;
continue;
}
- pr_debug("nand_read_bbt: bad block at 0x%012llx\n",
+ /*
+ * Leave it for now, if it's matured we can
+ * move this message to pr_debug.
+ */
+ pr_info("nand_read_bbt: bad block at 0x%012llx\n",
(loff_t)(offs + act) <<
this->bbt_erase_shift);
/* Factory marked bad or worn out? */
@@ -253,7 +250,7 @@ static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
/**
* read_abs_bbt - [GENERIC] Read the bad block table starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @chip: read the table for a specific chip, -1 read all chips; applies only if
@@ -262,22 +259,23 @@ static int read_bbt(struct nand_chip *this, uint8_t *buf, int page, int num,
* Read the bad block table for all chips starting at a given page. We assume
* that the bbt bits are in consecutive order.
*/
-static int read_abs_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_descr *td,
- int chip)
+static int read_abs_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td, int chip)
{
struct mtd_info *mtd = nand_to_mtd(this);
+ u64 targetsize = nanddev_target_size(&this->base);
int res = 0, i;
if (td->options & NAND_BBT_PERCHIP) {
int offs = 0;
- for (i = 0; i < this->numchips; i++) {
+ for (i = 0; i < nanddev_ntargets(&this->base); i++) {
if (chip == -1 || chip == i)
res = read_bbt(this, buf, td->pages[i],
- this->chipsize >> this->bbt_erase_shift,
+ targetsize >> this->bbt_erase_shift,
td, offs);
if (res)
return res;
- offs += this->chipsize >> this->bbt_erase_shift;
+ offs += targetsize >> this->bbt_erase_shift;
}
} else {
res = read_bbt(this, buf, td->pages[0],
@@ -290,7 +288,7 @@ static int read_abs_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_de
/* BBT marker is in the first page, no OOB */
static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
- struct nand_bbt_descr *td)
+ struct nand_bbt_descr *td)
{
struct mtd_info *mtd = nand_to_mtd(this);
size_t retlen;
@@ -305,7 +303,7 @@ static int scan_read_data(struct nand_chip *this, uint8_t *buf, loff_t offs,
/**
* scan_read_oob - [GENERIC] Scan data+OOB region to buffer
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @offs: offset at which to scan
* @len: length of data region to read
@@ -346,7 +344,7 @@ static int scan_read_oob(struct nand_chip *this, uint8_t *buf, loff_t offs,
}
static int scan_read(struct nand_chip *this, uint8_t *buf, loff_t offs,
- size_t len, struct nand_bbt_descr *td)
+ size_t len, struct nand_bbt_descr *td)
{
if (td->options & NAND_BBT_NO_OOB)
return scan_read_data(this, buf, offs, td);
@@ -383,7 +381,7 @@ static u32 bbt_get_ver_offs(struct nand_chip *this, struct nand_bbt_descr *td)
/**
* read_abs_bbts - [GENERIC] Read the bad block table(s) for all chips starting at a given page
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -399,7 +397,7 @@ static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
/* Read the primary version, if available */
if (td->options & NAND_BBT_VERSION) {
scan_read(this, buf, (loff_t)td->pages[0] << this->page_shift,
- mtd->writesize, td);
+ mtd->writesize, td);
td->version[0] = buf[bbt_get_ver_offs(this, td)];
pr_info("Bad block table at page %d, version 0x%02X\n",
td->pages[0], td->version[0]);
@@ -408,7 +406,7 @@ static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
/* Read the mirror version, if available */
if (md && (md->options & NAND_BBT_VERSION)) {
scan_read(this, buf, (loff_t)md->pages[0] << this->page_shift,
- mtd->writesize, md);
+ mtd->writesize, md);
md->version[0] = buf[bbt_get_ver_offs(this, md)];
pr_info("Bad block table at page %d, version 0x%02X\n",
md->pages[0], md->version[0]);
@@ -417,11 +415,12 @@ static void read_abs_bbts(struct nand_chip *this, uint8_t *buf,
/* Scan a given block partially */
static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
- loff_t offs, uint8_t *buf, int numpages)
+ loff_t offs, uint8_t *buf)
{
struct mtd_info *mtd = nand_to_mtd(this);
+
struct mtd_oob_ops ops;
- int j, ret;
+ int ret, page_offset;
ops.ooblen = mtd->oobsize;
ops.oobbuf = buf;
@@ -429,12 +428,15 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
ops.datbuf = NULL;
ops.mode = MTD_OPS_PLACE_OOB;
- for (j = 0; j < numpages; j++) {
+ page_offset = nand_bbm_get_next_page(this, 0);
+
+ while (page_offset >= 0) {
/*
* Read the full oob until read_oob is fixed to handle single
* byte reads for 16 bit buswidth.
*/
- ret = mtd_read_oob(mtd, offs, &ops);
+ ret = mtd_read_oob(mtd, offs + (page_offset * mtd->writesize),
+ &ops);
/* Ignore ECC errors when checking for BBM */
if (ret && !mtd_is_bitflip_or_eccerr(ret))
return ret;
@@ -442,14 +444,15 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
if (check_short_pattern(buf, bd))
return 1;
- offs += mtd->writesize;
+ page_offset = nand_bbm_get_next_page(this, page_offset + 1);
}
+
return 0;
}
/**
* create_bbt - [GENERIC] Create a bad block table by scanning the device
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
* @chip: create the table for a specific chip, -1 read all chips; applies only
@@ -459,45 +462,37 @@ static int scan_block_fast(struct nand_chip *this, struct nand_bbt_descr *bd,
* identify pattern.
*/
static int create_bbt(struct nand_chip *this, uint8_t *buf,
- struct nand_bbt_descr *bd, int chip)
+ struct nand_bbt_descr *bd, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
- int i, numblocks, numpages;
- int startblock;
+ int i, numblocks, startblock;
loff_t from;
pr_info("Scanning device for bad blocks\n");
- if (bd->options & NAND_BBT_SCAN2NDPAGE)
- numpages = 2;
- else
- numpages = 1;
-
if (chip == -1) {
numblocks = mtd->size >> this->bbt_erase_shift;
startblock = 0;
from = 0;
} else {
- if (chip >= this->numchips) {
+ if (chip >= nanddev_ntargets(&this->base)) {
pr_warn("create_bbt(): chipnr (%d) > available chips (%d)\n",
- chip + 1, this->numchips);
+ chip + 1, nanddev_ntargets(&this->base));
return -EINVAL;
}
- numblocks = this->chipsize >> this->bbt_erase_shift;
+ numblocks = targetsize >> this->bbt_erase_shift;
startblock = chip * numblocks;
numblocks += startblock;
from = (loff_t)startblock << this->bbt_erase_shift;
}
- if (this->bbt_options & NAND_BBT_SCANLASTPAGE)
- from += mtd->erasesize - (mtd->writesize * numpages);
-
for (i = startblock; i < numblocks; i++) {
int ret;
BUG_ON(bd->options & NAND_BBT_NO_OOB);
- ret = scan_block_fast(this, bd, from, buf, numpages);
+ ret = scan_block_fast(this, bd, from, buf);
if (ret < 0)
return ret;
@@ -515,7 +510,7 @@ static int create_bbt(struct nand_chip *this, uint8_t *buf,
/**
* search_bbt - [GENERIC] scan the device for a specific bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
*
@@ -528,8 +523,10 @@ static int create_bbt(struct nand_chip *this, uint8_t *buf,
*
* The bbt ident pattern resides in the oob area of the first page in a block.
*/
-static int search_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_descr *td)
+static int search_bbt(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
int i, chips;
int startblock, block, dir;
@@ -548,8 +545,8 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_desc
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- bbtblocks = this->chipsize >> this->bbt_erase_shift;
+ chips = nanddev_ntargets(&this->base);
+ bbtblocks = targetsize >> this->bbt_erase_shift;
startblock &= bbtblocks - 1;
} else {
chips = 1;
@@ -577,7 +574,7 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_desc
break;
}
}
- startblock += this->chipsize >> this->bbt_erase_shift;
+ startblock += targetsize >> this->bbt_erase_shift;
}
/* Check, if we found a bbt for each requested chip */
for (i = 0; i < chips; i++) {
@@ -592,7 +589,7 @@ static int search_bbt(struct nand_chip *this, uint8_t *buf, struct nand_bbt_desc
/**
* search_read_bbts - [GENERIC] scan the device for bad block table(s)
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -627,6 +624,7 @@ static void search_read_bbts(struct nand_chip *this, uint8_t *buf,
static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
struct nand_bbt_descr *md, int chip)
{
+ u64 targetsize = nanddev_target_size(&this->base);
int startblock, dir, page, numblocks, i;
/*
@@ -638,9 +636,9 @@ static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
return td->pages[chip] >>
(this->bbt_erase_shift - this->page_shift);
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
if (!(td->options & NAND_BBT_PERCHIP))
- numblocks *= this->numchips;
+ numblocks *= nanddev_ntargets(&this->base);
/*
* Automatic placement of the bad block table. Search direction
@@ -676,7 +674,7 @@ static int get_bbt_block(struct nand_chip *this, struct nand_bbt_descr *td,
/**
* mark_bbt_block_bad - Mark one of the block reserved for BBT bad
- * @mtd: the MTD device
+ * @this: the NAND device
* @td: the BBT description
* @chip: the CHIP selector
* @block: the BBT block to mark
@@ -696,7 +694,7 @@ static void mark_bbt_block_bad(struct nand_chip *this,
bbt_mark_entry(this, block, BBT_BLOCK_WORN);
to = (loff_t)block << this->bbt_erase_shift;
- res = this->legacy.block_markbad(this, to);
+ res = nand_markbad_bbm(this, to);
if (res)
pr_warn("nand_bbt: error %d while marking block %d bad\n",
res, block);
@@ -706,7 +704,7 @@ static void mark_bbt_block_bad(struct nand_chip *this,
/**
* write_bbt - [GENERIC] (Re)write the bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @buf: temporary buffer
* @td: descriptor for the bad block table
* @md: descriptor for the bad block table mirror
@@ -718,6 +716,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
struct nand_bbt_descr *td, struct nand_bbt_descr *md,
int chipsel)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
struct erase_info einfo;
int i, res, chip = 0;
@@ -738,10 +737,10 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
rcode = 0xff;
/* Write bad block table per chip rather than per device? */
if (td->options & NAND_BBT_PERCHIP) {
- numblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ numblocks = (int)(targetsize >> this->bbt_erase_shift);
/* Full device write or specific chip? */
if (chipsel == -1) {
- nrchips = this->numchips;
+ nrchips = nanddev_ntargets(&this->base);
} else {
nrchips = chipsel + 1;
chip = chipsel;
@@ -792,7 +791,7 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
/* Must we save the block contents? */
if (td->options & NAND_BBT_SAVECONTENT) {
/* Make it block aligned */
- to &= ~((loff_t)((1 << this->bbt_erase_shift) - 1));
+ to &= ~(((loff_t)1 << this->bbt_erase_shift) - 1);
len = 1 << this->bbt_erase_shift;
res = mtd_read(mtd, to, len, &retlen, buf);
if (res < 0) {
@@ -858,7 +857,6 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
}
memset(&einfo, 0, sizeof(einfo));
- einfo.mtd = mtd;
einfo.addr = to;
einfo.len = 1 << this->bbt_erase_shift;
res = nand_erase_nand(this, &einfo, 1);
@@ -870,8 +868,8 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
}
res = scan_write_bbt(this, to, len, buf,
- td->options & NAND_BBT_NO_OOB ? NULL :
- &buf[len]);
+ td->options & NAND_BBT_NO_OOB ?
+ NULL : &buf[len]);
if (res < 0) {
pr_warn("nand_bbt: error while writing BBT block %d\n",
res);
@@ -894,20 +892,23 @@ static int write_bbt(struct nand_chip *this, uint8_t *buf,
/**
* nand_memory_bbt - [GENERIC] create a memory based bad block table
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @bd: descriptor for the good/bad block search pattern
*
* The function creates a memory based bbt by scanning the device for
* manufacturer / software marked good / bad blocks.
*/
-static inline int nand_memory_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
+static inline int nand_memory_bbt(struct nand_chip *this,
+ struct nand_bbt_descr *bd)
{
- return create_bbt(this, this->buffers->databuf, bd, -1);
+ u8 *pagebuf = nand_get_data_buf(this);
+
+ return create_bbt(this, pagebuf, bd, -1);
}
/**
* check_create - [GENERIC] create and write bbt(s) if necessary
- * @mtd: MTD device structure
+ * @this: the NAND device
* @buf: temporary buffer
* @bd: descriptor for the good/bad block search pattern
*
@@ -916,7 +917,8 @@ static inline int nand_memory_bbt(struct nand_chip *this, struct nand_bbt_descr
* for the chip/device. Update is necessary if one of the tables is missing or
* the version nr. of one table is less than the other.
*/
-static int check_create(struct nand_chip *this, uint8_t *buf, struct nand_bbt_descr *bd)
+static int check_create(struct nand_chip *this, uint8_t *buf,
+ struct nand_bbt_descr *bd)
{
int i, chips, writeops, create, chipsel, res, res2;
struct nand_bbt_descr *td = this->bbt_td;
@@ -925,7 +927,7 @@ static int check_create(struct nand_chip *this, uint8_t *buf, struct nand_bbt_de
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP)
- chips = this->numchips;
+ chips = nanddev_ntargets(&this->base);
else
chips = 1;
@@ -1032,9 +1034,64 @@ static int check_create(struct nand_chip *this, uint8_t *buf, struct nand_bbt_de
return 0;
}
+/**
+ * nand_update_bbt - update bad block table(s)
+ * @this: the NAND device
+ * @offs: the offset of the newly marked block
+ *
+ * The function updates the bad block table(s).
+ */
+int nand_update_bbt(struct nand_chip *this, loff_t offs)
+{
+ struct mtd_info *mtd = nand_to_mtd(this);
+ int len, res = 0;
+ int chip, chipsel;
+ uint8_t *buf;
+ struct nand_bbt_descr *td = this->bbt_td;
+ struct nand_bbt_descr *md = this->bbt_md;
+
+ if (!this->bbt || !td)
+ return -EINVAL;
+
+ /* Allocate a temporary buffer for one eraseblock incl. oob */
+ len = (1 << this->bbt_erase_shift);
+ len += (len >> this->page_shift) * mtd->oobsize;
+ buf = kmalloc(len, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Do we have a bbt per chip? */
+ if (td->options & NAND_BBT_PERCHIP) {
+ chip = (int)(offs >> this->chip_shift);
+ chipsel = chip;
+ } else {
+ chip = 0;
+ chipsel = -1;
+ }
+
+ td->version[chip]++;
+ if (md)
+ md->version[chip]++;
+
+ /* Write the bad block table to the device? */
+ if (td->options & NAND_BBT_WRITE) {
+ res = write_bbt(this, buf, td, md, chipsel);
+ if (res < 0)
+ goto out;
+ }
+ /* Write the mirror bad block table to the device? */
+ if (md && (md->options & NAND_BBT_WRITE)) {
+ res = write_bbt(this, buf, md, td, chipsel);
+ }
+
+ out:
+ kfree(buf);
+ return res;
+}
+
/**
* mark_bbt_regions - [GENERIC] mark the bad block table regions
- * @mtd: MTD device structure
+ * @this: the NAND device
* @td: bad block table descriptor
*
* The bad block table regions are marked as "bad" to prevent accidental
@@ -1042,16 +1099,18 @@ static int check_create(struct nand_chip *this, uint8_t *buf, struct nand_bbt_de
*/
static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
{
+ u64 targetsize = nanddev_target_size(&this->base);
+ struct mtd_info *mtd = nand_to_mtd(this);
int i, j, chips, block, nrblocks, update;
uint8_t oldval;
/* Do we have a bbt per chip? */
if (td->options & NAND_BBT_PERCHIP) {
- chips = this->numchips;
- nrblocks = (int)(this->chipsize >> this->bbt_erase_shift);
+ chips = nanddev_ntargets(&this->base);
+ nrblocks = (int)(targetsize >> this->bbt_erase_shift);
} else {
chips = 1;
- nrblocks = (int)(this->mtd.size >> this->bbt_erase_shift);
+ nrblocks = (int)(mtd->size >> this->bbt_erase_shift);
}
for (i = 0; i < chips; i++) {
@@ -1093,7 +1152,7 @@ static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
/**
* verify_bbt_descr - verify the bad block description
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: the table to verify
*
* This functions performs a few sanity checks on the bad block description
@@ -1101,6 +1160,7 @@ static void mark_bbt_region(struct nand_chip *this, struct nand_bbt_descr *td)
*/
static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
{
+ u64 targetsize = nanddev_target_size(&this->base);
struct mtd_info *mtd = nand_to_mtd(this);
u32 pattern_len;
u32 bits;
@@ -1129,7 +1189,7 @@ static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
}
if (bd->options & NAND_BBT_PERCHIP)
- table_size = this->chipsize >> this->bbt_erase_shift;
+ table_size = targetsize >> this->bbt_erase_shift;
else
table_size = mtd->size >> this->bbt_erase_shift;
table_size >>= 3;
@@ -1141,7 +1201,7 @@ static void verify_bbt_descr(struct nand_chip *this, struct nand_bbt_descr *bd)
/**
* nand_scan_bbt - [NAND Interface] scan, find, read and maybe create bad block table(s)
- * @mtd: MTD device structure
+ * @this: the NAND device
* @bd: descriptor for the good/bad block search pattern
*
* The function checks, if a bad block table(s) is/are already available. If
@@ -1159,7 +1219,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
struct nand_bbt_descr *td = this->bbt_td;
struct nand_bbt_descr *md = this->bbt_md;
- len = mtd->size >> (this->bbt_erase_shift + 2);
+ len = (mtd->size >> (this->bbt_erase_shift + 2)) ? : 1;
/*
* Allocate memory (2bit per block) and clear the memory bad block
* table.
@@ -1169,13 +1229,13 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
return -ENOMEM;
/*
- * If no primary table decriptor is given, scan the device to build a
+ * If no primary table descriptor is given, scan the device to build a
* memory based bad block table.
*/
if (!td) {
if ((res = nand_memory_bbt(this, bd))) {
pr_err("nand_bbt: can't scan flash and build the RAM-based BBT\n");
- goto err;
+ goto err_free_bbt;
}
return 0;
}
@@ -1188,7 +1248,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
buf = vmalloc(len);
if (!buf) {
res = -ENOMEM;
- goto err;
+ goto err_free_bbt;
}
/* Is the bbt at a given page? */
@@ -1201,7 +1261,7 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
res = check_create(this, buf, bd);
if (res)
- goto err;
+ goto err_free_buf;
/* Prevent the bbt regions from erasing / writing */
mark_bbt_region(this, td);
@@ -1211,66 +1271,14 @@ static int nand_scan_bbt(struct nand_chip *this, struct nand_bbt_descr *bd)
vfree(buf);
return 0;
-err:
+err_free_buf:
+ vfree(buf);
+err_free_bbt:
kfree(this->bbt);
this->bbt = NULL;
return res;
}
-/**
- * nand_update_bbt - update bad block table(s)
- * @mtd: MTD device structure
- * @offs: the offset of the newly marked block
- *
- * The function updates the bad block table(s).
- */
-int nand_update_bbt(struct nand_chip *this, loff_t offs)
-{
- int len, res = 0;
- int chip, chipsel;
- uint8_t *buf;
- struct nand_bbt_descr *td = this->bbt_td;
- struct nand_bbt_descr *md = this->bbt_md;
-
- if (!this->bbt || !td)
- return -EINVAL;
-
- /* Allocate a temporary buffer for one eraseblock incl. oob */
- len = (1 << this->bbt_erase_shift);
- len += (len >> this->page_shift) * this->mtd.oobsize;
- buf = kmalloc(len, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
-
- /* Do we have a bbt per chip? */
- if (td->options & NAND_BBT_PERCHIP) {
- chip = (int)(offs >> this->chip_shift);
- chipsel = chip;
- } else {
- chip = 0;
- chipsel = -1;
- }
-
- td->version[chip]++;
- if (md)
- md->version[chip]++;
-
- /* Write the bad block table to the device? */
- if (td->options & NAND_BBT_WRITE) {
- res = write_bbt(this, buf, td, md, chipsel);
- if (res < 0)
- goto out;
- }
- /* Write the mirror bad block table to the device? */
- if (md && (md->options & NAND_BBT_WRITE)) {
- res = write_bbt(this, buf, md, td, chipsel);
- }
-
- out:
- kfree(buf);
- return res;
-}
-
/*
* Define some generic bad / good block scan pattern which are used
* while scanning a device for factory marked good / bad blocks.
@@ -1351,13 +1359,13 @@ static int nand_create_badblock_pattern(struct nand_chip *this)
}
/**
- * nand_default_bbt - [NAND Interface] Select a default bad block table for the device
- * @mtd: MTD device structure
+ * nand_create_bbt - [NAND Interface] Select a default bad block table for the device
+ * @this: NAND chip object
*
* This function selects the default bad block table support for the device and
* calls the nand_scan_bbt function.
*/
-int nand_default_bbt(struct nand_chip *this)
+int nand_create_bbt(struct nand_chip *this)
{
int ret;
@@ -1386,17 +1394,30 @@ int nand_default_bbt(struct nand_chip *this)
return nand_scan_bbt(this, this->badblock_pattern);
}
+EXPORT_SYMBOL(nand_create_bbt);
+
+/**
+ * nand_isreserved_bbt - [NAND Interface] Check if a block is reserved
+ * @this: NAND chip object
+ * @offs: offset in the device
+ */
+int nand_isreserved_bbt(struct nand_chip *this, loff_t offs)
+{
+ int block;
+
+ block = (int)(offs >> this->bbt_erase_shift);
+ return bbt_get_entry(this, block) == BBT_BLOCK_RESERVED;
+}
/**
* nand_isbad_bbt - [NAND Interface] Check if a block is bad
- * @mtd: MTD device structure
+ * @this: NAND chip object
* @offs: offset in the device
* @allowbbt: allow access to bad block table region
*/
int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
{
- int block;
- uint8_t res;
+ int block, res;
block = (int)(offs >> this->bbt_erase_shift);
res = bbt_get_entry(this, block);
@@ -1404,7 +1425,7 @@ int nand_isbad_bbt(struct nand_chip *this, loff_t offs, int allowbbt)
pr_debug("nand_isbad_bbt(): bbt info for offs 0x%08x: (block %d) 0x%02x\n",
(unsigned int)offs, block, res);
- switch ((int)res) {
+ switch (res) {
case BBT_BLOCK_GOOD:
return 0;
case BBT_BLOCK_WORN:
@@ -1450,7 +1471,3 @@ int nand_markgood_bbt(struct nand_chip *this, loff_t offs)
{
return nand_mark_bbt(this, offs, BBT_BLOCK_GOOD);
}
-
-EXPORT_SYMBOL(nand_scan_bbt);
-EXPORT_SYMBOL(nand_default_bbt);
-EXPORT_SYMBOL_GPL(nand_update_bbt);
diff --git a/drivers/mtd/nand/nand_bch.c b/drivers/mtd/nand/nand_bch.c
index 42ffa1b21a..0d636d9608 100644
--- a/drivers/mtd/nand/nand_bch.c
+++ b/drivers/mtd/nand/nand_bch.c
@@ -1,19 +1,9 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
* This file provides ECC correction for more than 1 bit per block of data,
* using binary BCH codes. It relies on the generic BCH library lib/bch.c.
*
* Copyright © 2011 Ivan Djelic <ivan.djelic at parrot.com>
- *
- * This file is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 or (at your option) any
- * later version.
- *
- * This file is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
*/
#include <common.h>
@@ -22,6 +12,7 @@
#include <linux/kernel.h>
#include <linux/bitops.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <linux/mtd/nand_bch.h>
#include <linux/bch.h>
@@ -29,20 +20,18 @@
/**
* struct nand_bch_control - private NAND BCH control structure
* @bch: BCH control structure
- * @ecclayout: private ecc layout for this BCH configuration
* @errloc: error location array
* @eccmask: XOR ecc mask, allows erased pages to be decoded as valid
*/
struct nand_bch_control {
struct bch_control *bch;
- struct nand_ecclayout ecclayout;
unsigned int *errloc;
unsigned char *eccmask;
};
/**
* nand_bch_calculate_ecc - [NAND Interface] Calculate ECC for data block
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: input buffer with raw data
* @code: output buffer with ECC
*/
@@ -65,7 +54,7 @@ EXPORT_SYMBOL(nand_bch_calculate_ecc);
/**
* nand_bch_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
+ * @chip: NAND chip object
* @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
@@ -92,8 +81,8 @@ int nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
errloc[i]);
}
} else if (count < 0) {
- printk(KERN_ERR "ecc unrecoverable error\n");
- count = -1;
+ pr_err("ecc unrecoverable error\n");
+ count = -EBADMSG;
}
return count;
}
@@ -115,11 +104,10 @@ EXPORT_SYMBOL(nand_bch_correct_data);
* @eccsize = 512 (thus, m=13 is the smallest integer such that 2^m-1 > 512*8)
* @eccbytes = 7 (7 bytes are required to store m*t = 13*4 = 52 bits)
*/
-struct nand_bch_control *nand_bch_init(struct nand_chip *nand)
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
- struct mtd_info *mtd = nand_to_mtd(nand);
+ struct nand_chip *nand = mtd_to_nand(mtd);
unsigned int m, t, eccsteps, i;
- struct nand_ecclayout *layout = nand->ecc.layout;
struct nand_bch_control *nbc = NULL;
unsigned char *erased_page;
unsigned int eccsize = nand->ecc.size;
@@ -132,7 +120,7 @@ struct nand_bch_control *nand_bch_init(struct nand_chip *nand)
}
if (!eccsize || !eccbytes) {
- printk(KERN_WARNING "ecc parameters not supplied\n");
+ pr_warn("ecc parameters not supplied\n");
goto fail;
}
@@ -149,55 +137,42 @@ struct nand_bch_control *nand_bch_init(struct nand_chip *nand)
/* verify that eccbytes has the expected value */
if (nbc->bch->ecc_bytes != eccbytes) {
- printk(KERN_WARNING "invalid eccbytes %u, should be %u\n",
- eccbytes, nbc->bch->ecc_bytes);
+ pr_warn("invalid eccbytes %u, should be %u\n",
+ eccbytes, nbc->bch->ecc_bytes);
goto fail;
}
eccsteps = mtd->writesize/eccsize;
- /* if no ecc placement scheme was provided, build one */
- if (!layout) {
-
- /* handle large page devices only */
- if (mtd->oobsize < 64) {
- printk(KERN_WARNING "must provide an oob scheme for "
- "oobsize %d\n", mtd->oobsize);
- goto fail;
- }
-
- layout = &nbc->ecclayout;
- layout->eccbytes = eccsteps*eccbytes;
-
- /* reserve 2 bytes for bad block marker */
- if (layout->eccbytes+2 > mtd->oobsize) {
- printk(KERN_WARNING "no suitable oob scheme available "
- "for oobsize %d eccbytes %u\n", mtd->oobsize,
- eccbytes);
- goto fail;
- }
- /* put ecc bytes at oob tail */
- for (i = 0; i < layout->eccbytes; i++)
- layout->eccpos[i] = mtd->oobsize-layout->eccbytes+i;
-
- layout->oobfree[0].offset = 2;
- layout->oobfree[0].length = mtd->oobsize-2-layout->eccbytes;
-
- nand->ecc.layout = layout;
+ /* Check that we have an oob layout description. */
+ if (!mtd->ooblayout) {
+ pr_warn("missing oob scheme");
+ goto fail;
}
/* sanity checks */
if (8*(eccsize+eccbytes) >= (1 << m)) {
- printk(KERN_WARNING "eccsize %u is too large\n", eccsize);
+ pr_warn("eccsize %u is too large\n", eccsize);
goto fail;
}
- if (layout->eccbytes != (eccsteps*eccbytes)) {
- printk(KERN_WARNING "invalid ecc layout\n");
+
+ /*
+ * ecc->steps and ecc->total might be used by mtd->ooblayout->ecc(),
+ * which is called by mtd_ooblayout_count_eccbytes().
+ * Make sure they are properly initialized before calling
+ * mtd_ooblayout_count_eccbytes().
+ * FIXME: we should probably rework the sequencing in nand_scan_tail()
+ * to avoid setting those fields twice.
+ */
+ nand->ecc.steps = eccsteps;
+ nand->ecc.total = eccsteps * eccbytes;
+ if (mtd_ooblayout_count_eccbytes(mtd) != (eccsteps*eccbytes)) {
+ pr_warn("invalid ecc layout\n");
goto fail;
}
- nbc->eccmask = kmalloc(eccbytes, GFP_KERNEL);
- nbc->errloc = kmalloc(t*sizeof(*nbc->errloc), GFP_KERNEL);
+ nbc->eccmask = kzalloc(eccbytes, GFP_KERNEL);
+ nbc->errloc = kmalloc_array(t, sizeof(*nbc->errloc), GFP_KERNEL);
if (!nbc->eccmask || !nbc->errloc)
goto fail;
/*
@@ -208,7 +183,6 @@ struct nand_bch_control *nand_bch_init(struct nand_chip *nand)
goto fail;
memset(erased_page, 0xff, eccsize);
- memset(nbc->eccmask, 0, eccbytes);
encode_bch(nbc->bch, erased_page, eccsize, nbc->eccmask);
kfree(erased_page);
diff --git a/drivers/mtd/nand/nand_denali.c b/drivers/mtd/nand/nand_denali.c
index bffcbd8a7f..49028bf082 100644
--- a/drivers/mtd/nand/nand_denali.c
+++ b/drivers/mtd/nand/nand_denali.c
@@ -23,6 +23,7 @@
#include <malloc.h>
#include <init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <io.h>
#include <clock.h>
@@ -888,6 +889,8 @@ static bool is_erased(uint8_t *buf, int len)
static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
uint32_t irq_status, unsigned int *max_bitflips)
{
+ struct nand_chip *chip = &denali->nand;
+ struct mtd_info *mtd = nand_to_mtd(chip);
bool check_erased_page = false;
unsigned int bitflips = 0;
@@ -933,7 +936,7 @@ static bool handle_ecc(struct denali_nand_info *denali, uint8_t *buf,
err_device;
/* correct the ECC error */
buf[offset] ^= err_correction_value;
- denali->nand.mtd.ecc_stats.corrected++;
+ mtd->ecc_stats.corrected++;
bitflips++;
}
} else {
@@ -995,7 +998,8 @@ static void denali_setup_dma(struct denali_nand_info *denali, int op)
* writes a page. user specifies type, and this function handles the
* configuration details.
*/
-static int write_page(struct nand_chip *chip, const uint8_t *buf, bool raw_xfer)
+static int write_page(struct nand_chip *chip, const uint8_t *buf, bool raw_xfer,
+ int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = nand_to_denali(chip);
@@ -1012,6 +1016,8 @@ static int write_page(struct nand_chip *chip, const uint8_t *buf, bool raw_xfer)
*/
setup_ecc_for_xfer(denali, !raw_xfer, raw_xfer);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
/* copy buffer into DMA buffer */
memcpy(denali->buf.buf, buf, mtd->writesize);
@@ -1041,7 +1047,7 @@ static int write_page(struct nand_chip *chip, const uint8_t *buf, bool raw_xfer)
denali_enable_dma(denali, false);
dma_sync_single_for_cpu(addr, size, DMA_TO_DEVICE);
- return 0;
+ return nand_prog_page_end_op(chip);
}
/* NAND core entry points */
@@ -1052,13 +1058,13 @@ static int write_page(struct nand_chip *chip, const uint8_t *buf, bool raw_xfer)
* by write_page above.
*/
static int denali_write_page(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
/*
* for regular page writes, we let HW handle all the ECC
* data written to the device.
*/
- return write_page(chip, buf, false);
+ return write_page(chip, buf, false, page);
}
/*
@@ -1067,13 +1073,13 @@ static int denali_write_page(struct nand_chip *chip,
* write_page() function above.
*/
static int denali_write_page_raw(struct nand_chip *chip,
- const uint8_t *buf, int oob_required)
+ const uint8_t *buf, int oob_required, int page)
{
/*
* for raw page writes, we want to disable ECC and simply write
* whatever data is in the buffer.
*/
- return write_page(chip, buf, true);
+ return write_page(chip, buf, true, page);
}
static int denali_write_oob(struct nand_chip *chip, int page)
@@ -1104,15 +1110,10 @@ static int denali_read_page(struct nand_chip *chip,
(INTR_STATUS__ECC_TRANSACTION_DONE | INTR_STATUS__ECC_ERR);
bool check_erased_page = false;
- if (page != denali->page) {
- dev_err(denali->dev,
- "IN %s: page %d is not equal to denali->page %d",
- __func__, page, denali->page);
- BUG();
- }
-
setup_ecc_for_xfer(denali, true, false);
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
denali_enable_dma(denali, true);
dma_sync_single_for_device(addr, size, DMA_FROM_DEVICE);
@@ -1161,6 +1162,8 @@ static int denali_read_page_raw(struct nand_chip *chip,
size_t size = mtd->writesize + mtd->oobsize;
uint32_t irq_mask = INTR_STATUS__DMA_CMD_COMP;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
if (page != denali->page) {
dev_err(denali->dev,
"IN %s: page %d is not equal to denali->page %d",
@@ -1227,11 +1230,8 @@ static int denali_waitfunc(struct nand_chip *chip)
static void denali_cmdfunc(struct nand_chip *chip, unsigned int cmd, int col,
int page)
{
- struct mtd_info *mtd = nand_to_mtd(chip);
struct denali_nand_info *denali = nand_to_denali(chip);
uint32_t addr, id;
- uint32_t pages_per_block;
- uint32_t block;
int i;
switch (cmd) {
@@ -1296,18 +1296,6 @@ static void denali_cmdfunc(struct nand_chip *chip, unsigned int cmd, int col,
case NAND_CMD_READOOB:
/* TODO: Read OOB data */
break;
- case NAND_CMD_UNLOCK1:
- pages_per_block = mtd->erasesize / mtd->writesize;
- block = page / pages_per_block;
- addr = (uint32_t)MODE_10 | (block * pages_per_block);
- index_addr(denali, addr, 0x10);
- break;
- case NAND_CMD_UNLOCK2:
- pages_per_block = mtd->erasesize / mtd->writesize;
- block = (page+pages_per_block-1) / pages_per_block;
- addr = (uint32_t)MODE_10 | (block * pages_per_block);
- index_addr(denali, addr, 0x11);
- break;
case NAND_CMD_ERASE1:
case NAND_CMD_ERASE2:
addr = MODE_10 | BANK(denali->flash_bank) | page;
@@ -1376,9 +1364,10 @@ static void denali_drv_init(struct denali_nand_info *denali)
int denali_init(struct denali_nand_info *denali)
{
struct nand_chip *nand = &denali->nand;
- struct mtd_info *mtd = &nand->mtd;
+ struct mtd_info *mtd = nand_to_mtd(nand);
int ret = 0;
uint32_t val;
+ struct nand_ecclayout *ecclayout;
if (denali->platform == INTEL_CE4100) {
/*
@@ -1477,7 +1466,7 @@ int denali_init(struct denali_nand_info *denali)
ECC_SECTOR_SIZE)))) {
/* if MLC OOB size is large enough, use 15bit ECC*/
nand->ecc.strength = 15;
- nand->ecc.layout = &nand_15bit_oob;
+ ecclayout = &nand_15bit_oob;
nand->ecc.bytes = ECC_15BITS;
iowrite32(15, denali->flash_reg + ECC_CORRECTION);
} else if (mtd->oobsize < (denali->bbtskipbytes +
@@ -1487,24 +1476,26 @@ int denali_init(struct denali_nand_info *denali)
goto failed_req_irq;
} else {
nand->ecc.strength = 8;
- nand->ecc.layout = &nand_8bit_oob;
+ ecclayout = &nand_8bit_oob;
nand->ecc.bytes = ECC_8BITS;
iowrite32(8, denali->flash_reg + ECC_CORRECTION);
}
- nand->ecc.layout->oobfree[0].offset =
- denali->bbtskipbytes + nand->ecc.layout->eccbytes;
- nand->ecc.layout->oobfree[0].length =
- mtd->oobsize - nand->ecc.layout->eccbytes -
+ ecclayout->oobfree[0].offset =
+ denali->bbtskipbytes + ecclayout->eccbytes;
+ ecclayout->oobfree[0].length =
+ mtd->oobsize - ecclayout->eccbytes -
denali->bbtskipbytes;
+ mtd_set_ecclayout(mtd, ecclayout);
+
/*
* Let driver know the total blocks number and how many blocks
* contained by each nand chip. blksperchip will help driver to
* know how many blocks is taken by FW.
*/
denali->totalblks = mtd->size >> nand->phys_erase_shift;
- denali->blksperchip = denali->totalblks / nand->numchips;
+ denali->blksperchip = denali->totalblks;
/* override the default read operations */
nand->ecc.size = ECC_SECTOR_SIZE;
@@ -1534,7 +1525,7 @@ int denali_init(struct denali_nand_info *denali)
goto failed_req_irq;
}
- return add_mtd_nand_device(nand, "nand");
+ return add_mtd_nand_device(mtd, "nand");
failed_req_irq:
denali_irq_cleanup(denali->irq, denali);
diff --git a/drivers/mtd/nand/nand_ecc.c b/drivers/mtd/nand/nand_ecc.c
index e488130f73..58fb335bb4 100644
--- a/drivers/mtd/nand/nand_ecc.c
+++ b/drivers/mtd/nand/nand_ecc.c
@@ -1,194 +1,483 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
/*
- * This file contains an ECC algorithm from Toshiba that detects and
- * corrects 1 bit errors in a 256 byte block of data.
+ * This file contains an ECC algorithm that detects and corrects 1 bit
+ * errors in a 256 byte block of data.
*
- * drivers/mtd/nand/nand_ecc.c
+ * Copyright © 2008 Koninklijke Philips Electronics NV.
+ * Author: Frans Meulenbroeks
*
- * Copyright (C) 2000-2004 Steven J. Hill (sjhill at realitydiluted.com)
- * Toshiba America Electronics Components, Inc.
+ * Completely replaces the previous ECC implementation which was written by:
+ * Steven J. Hill (sjhill at realitydiluted.com)
+ * Thomas Gleixner (tglx at linutronix.de)
*
- * Copyright (C) 2006 Thomas Gleixner <tglx at linutronix.de>
- *
- * $Id: nand_ecc.c,v 1.15 2005/11/07 11:14:30 gleixner Exp $
- *
- * This file is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 or (at your option) any
- * later version.
- *
- * This file is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * for more details.
- *
- * As a special exception, if other files instantiate templates or use
- * macros or inline functions from these files, or you compile these
- * files and link them with other works to produce a work based on these
- * files, these files do not by themselves cause the resulting work to be
- * covered by the GNU General Public License. However the source code for
- * these files must still be made available in accordance with section (3)
- * of the GNU General Public License.
- *
- * This exception does not invalidate any other reasons why a work based on
- * this file might be covered by the GNU General Public License.
+ * Information on how this algorithm works and how it was developed
+ * can be found in Documentation/driver-api/mtd/nand_ecc.rst
*/
#include <linux/types.h>
-#include <common.h>
-#include <errno.h>
+#include <linux/kernel.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
+#include <asm/byteorder.h>
+
+/*
+ * invparity is a 256 byte table that contains the odd parity
+ * for each byte. So if the number of bits in a byte is even,
+ * the array element is 1, and when the number of bits is odd
+ * the array eleemnt is 0.
+ */
+static const char invparity[256] = {
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 0, 1, 0, 1, 1, 0,
+ 1, 0, 0, 1, 0, 1, 1, 0, 0, 1, 1, 0, 1, 0, 0, 1
+};
+
+/*
+ * bitsperbyte contains the number of bits per byte
+ * this is only used for testing and repairing parity
+ * (a precalculated value slightly improves performance)
+ */
+static const char bitsperbyte[256] = {
+ 0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 1, 2, 2, 3, 2, 3, 3, 4, 2, 3, 3, 4, 3, 4, 4, 5,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 2, 3, 3, 4, 3, 4, 4, 5, 3, 4, 4, 5, 4, 5, 5, 6,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 3, 4, 4, 5, 4, 5, 5, 6, 4, 5, 5, 6, 5, 6, 6, 7,
+ 4, 5, 5, 6, 5, 6, 6, 7, 5, 6, 6, 7, 6, 7, 7, 8,
+};
/*
- * Pre-calculated 256-way 1 byte column parity
+ * addressbits is a lookup table to filter out the bits from the xor-ed
+ * ECC data that identify the faulty location.
+ * this is only used for repairing parity
+ * see the comments in nand_correct_data for more details
*/
-static const u_char nand_ecc_precalc_table[] = {
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x6a, 0x3f, 0x3c, 0x69, 0x33, 0x66, 0x65, 0x30, 0x30, 0x65, 0x66, 0x33, 0x69, 0x3c, 0x3f, 0x6a,
- 0x0f, 0x5a, 0x59, 0x0c, 0x56, 0x03, 0x00, 0x55, 0x55, 0x00, 0x03, 0x56, 0x0c, 0x59, 0x5a, 0x0f,
- 0x0c, 0x59, 0x5a, 0x0f, 0x55, 0x00, 0x03, 0x56, 0x56, 0x03, 0x00, 0x55, 0x0f, 0x5a, 0x59, 0x0c,
- 0x69, 0x3c, 0x3f, 0x6a, 0x30, 0x65, 0x66, 0x33, 0x33, 0x66, 0x65, 0x30, 0x6a, 0x3f, 0x3c, 0x69,
- 0x03, 0x56, 0x55, 0x00, 0x5a, 0x0f, 0x0c, 0x59, 0x59, 0x0c, 0x0f, 0x5a, 0x00, 0x55, 0x56, 0x03,
- 0x66, 0x33, 0x30, 0x65, 0x3f, 0x6a, 0x69, 0x3c, 0x3c, 0x69, 0x6a, 0x3f, 0x65, 0x30, 0x33, 0x66,
- 0x65, 0x30, 0x33, 0x66, 0x3c, 0x69, 0x6a, 0x3f, 0x3f, 0x6a, 0x69, 0x3c, 0x66, 0x33, 0x30, 0x65,
- 0x00, 0x55, 0x56, 0x03, 0x59, 0x0c, 0x0f, 0x5a, 0x5a, 0x0f, 0x0c, 0x59, 0x03, 0x56, 0x55, 0x00
+static const char addressbits[256] = {
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x00, 0x00, 0x01, 0x01, 0x00, 0x00, 0x01, 0x01,
+ 0x02, 0x02, 0x03, 0x03, 0x02, 0x02, 0x03, 0x03,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x04, 0x04, 0x05, 0x05, 0x04, 0x04, 0x05, 0x05,
+ 0x06, 0x06, 0x07, 0x07, 0x06, 0x06, 0x07, 0x07,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x08, 0x08, 0x09, 0x09, 0x08, 0x08, 0x09, 0x09,
+ 0x0a, 0x0a, 0x0b, 0x0b, 0x0a, 0x0a, 0x0b, 0x0b,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f,
+ 0x0c, 0x0c, 0x0d, 0x0d, 0x0c, 0x0c, 0x0d, 0x0d,
+ 0x0e, 0x0e, 0x0f, 0x0f, 0x0e, 0x0e, 0x0f, 0x0f
};
/**
- * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256-byte block
- * @mtd: MTD block structure
- * @dat: raw data
- * @ecc_code: buffer for ECC
+ * __nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @buf: input buffer with raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @code: output buffer with ECC
+ * @sm_order: Smart Media byte ordering
*/
-int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
- u_char *ecc_code)
+void __nand_calculate_ecc(const unsigned char *buf, unsigned int eccsize,
+ unsigned char *code, bool sm_order)
{
- uint8_t idx, reg1, reg2, reg3, tmp1, tmp2;
int i;
+ const uint32_t *bp = (uint32_t *)buf;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
+ uint32_t cur; /* current value in buffer */
+ /* rp0..rp15..rp17 are the various accumulated parities (per byte) */
+ uint32_t rp0, rp1, rp2, rp3, rp4, rp5, rp6, rp7;
+ uint32_t rp8, rp9, rp10, rp11, rp12, rp13, rp14, rp15, rp16;
+ uint32_t rp17 = 0;
+ uint32_t par; /* the cumulative parity for all data */
+ uint32_t tmppar; /* the cumulative parity for this iteration;
+ for rp12, rp14 and rp16 at the end of the
+ loop */
- /* Initialize variables */
- reg1 = reg2 = reg3 = 0;
+ par = 0;
+ rp4 = 0;
+ rp6 = 0;
+ rp8 = 0;
+ rp10 = 0;
+ rp12 = 0;
+ rp14 = 0;
+ rp16 = 0;
- /* Build up column parity */
- for(i = 0; i < 256; i++) {
- /* Get CP0 - CP5 from table */
- idx = nand_ecc_precalc_table[*dat++];
- reg1 ^= (idx & 0x3f);
+ /*
+ * The loop is unrolled a number of times;
+ * This avoids if statements to decide on which rp value to update
+ * Also we process the data by longwords.
+ * Note: passing unaligned data might give a performance penalty.
+ * It is assumed that the buffers are aligned.
+ * tmppar is the cumulative sum of this iteration.
+ * needed for calculating rp12, rp14, rp16 and par
+ * also used as a performance improvement for rp6, rp8 and rp10
+ */
+ for (i = 0; i < eccsize_mult << 2; i++) {
+ cur = *bp++;
+ tmppar = cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= tmppar;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= tmppar;
- /* All bit XOR = 1 ? */
- if (idx & 0x40) {
- reg3 ^= (uint8_t) i;
- reg2 ^= ~((uint8_t) i);
- }
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp10 ^= tmppar;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp8 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp8 ^= cur;
+
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp6 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+ rp4 ^= cur;
+ cur = *bp++;
+ tmppar ^= cur;
+
+ par ^= tmppar;
+ if ((i & 0x1) == 0)
+ rp12 ^= tmppar;
+ if ((i & 0x2) == 0)
+ rp14 ^= tmppar;
+ if (eccsize_mult == 2 && (i & 0x4) == 0)
+ rp16 ^= tmppar;
}
- /* Create non-inverted ECC code from line parity */
- tmp1 = (reg3 & 0x80) >> 0; /* B7 -> B7 */
- tmp1 |= (reg2 & 0x80) >> 1; /* B7 -> B6 */
- tmp1 |= (reg3 & 0x40) >> 1; /* B6 -> B5 */
- tmp1 |= (reg2 & 0x40) >> 2; /* B6 -> B4 */
- tmp1 |= (reg3 & 0x20) >> 2; /* B5 -> B3 */
- tmp1 |= (reg2 & 0x20) >> 3; /* B5 -> B2 */
- tmp1 |= (reg3 & 0x10) >> 3; /* B4 -> B1 */
- tmp1 |= (reg2 & 0x10) >> 4; /* B4 -> B0 */
-
- tmp2 = (reg3 & 0x08) << 4; /* B3 -> B7 */
- tmp2 |= (reg2 & 0x08) << 3; /* B3 -> B6 */
- tmp2 |= (reg3 & 0x04) << 3; /* B2 -> B5 */
- tmp2 |= (reg2 & 0x04) << 2; /* B2 -> B4 */
- tmp2 |= (reg3 & 0x02) << 2; /* B1 -> B3 */
- tmp2 |= (reg2 & 0x02) << 1; /* B1 -> B2 */
- tmp2 |= (reg3 & 0x01) << 1; /* B0 -> B1 */
- tmp2 |= (reg2 & 0x01) << 0; /* B7 -> B0 */
-
- /* Calculate final ECC code */
-#ifdef CONFIG_MTD_NAND_ECC_SMC
- ecc_code[0] = ~tmp2;
- ecc_code[1] = ~tmp1;
+ /*
+ * handle the fact that we use longword operations
+ * we'll bring rp4..rp14..rp16 back to single byte entities by
+ * shifting and xoring first fold the upper and lower 16 bits,
+ * then the upper and lower 8 bits.
+ */
+ rp4 ^= (rp4 >> 16);
+ rp4 ^= (rp4 >> 8);
+ rp4 &= 0xff;
+ rp6 ^= (rp6 >> 16);
+ rp6 ^= (rp6 >> 8);
+ rp6 &= 0xff;
+ rp8 ^= (rp8 >> 16);
+ rp8 ^= (rp8 >> 8);
+ rp8 &= 0xff;
+ rp10 ^= (rp10 >> 16);
+ rp10 ^= (rp10 >> 8);
+ rp10 &= 0xff;
+ rp12 ^= (rp12 >> 16);
+ rp12 ^= (rp12 >> 8);
+ rp12 &= 0xff;
+ rp14 ^= (rp14 >> 16);
+ rp14 ^= (rp14 >> 8);
+ rp14 &= 0xff;
+ if (eccsize_mult == 2) {
+ rp16 ^= (rp16 >> 16);
+ rp16 ^= (rp16 >> 8);
+ rp16 &= 0xff;
+ }
+
+ /*
+ * we also need to calculate the row parity for rp0..rp3
+ * This is present in par, because par is now
+ * rp3 rp3 rp2 rp2 in little endian and
+ * rp2 rp2 rp3 rp3 in big endian
+ * as well as
+ * rp1 rp0 rp1 rp0 in little endian and
+ * rp0 rp1 rp0 rp1 in big endian
+ * First calculate rp2 and rp3
+ */
+#ifdef __BIG_ENDIAN
+ rp2 = (par >> 16);
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
+ rp3 = par & 0xffff;
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
#else
- ecc_code[0] = ~tmp1;
- ecc_code[1] = ~tmp2;
+ rp3 = (par >> 16);
+ rp3 ^= (rp3 >> 8);
+ rp3 &= 0xff;
+ rp2 = par & 0xffff;
+ rp2 ^= (rp2 >> 8);
+ rp2 &= 0xff;
#endif
- ecc_code[2] = ((~reg1) << 2) | 0x03;
- return 0;
+ /* reduce par to 16 bits then calculate rp1 and rp0 */
+ par ^= (par >> 16);
+#ifdef __BIG_ENDIAN
+ rp0 = (par >> 8) & 0xff;
+ rp1 = (par & 0xff);
+#else
+ rp1 = (par >> 8) & 0xff;
+ rp0 = (par & 0xff);
+#endif
+
+ /* finally reduce par to 8 bits */
+ par ^= (par >> 8);
+ par &= 0xff;
+
+ /*
+ * and calculate rp5..rp15..rp17
+ * note that par = rp4 ^ rp5 and due to the commutative property
+ * of the ^ operator we can say:
+ * rp5 = (par ^ rp4);
+ * The & 0xff seems superfluous, but benchmarking learned that
+ * leaving it out gives slightly worse results. No idea why, probably
+ * it has to do with the way the pipeline in pentium is organized.
+ */
+ rp5 = (par ^ rp4) & 0xff;
+ rp7 = (par ^ rp6) & 0xff;
+ rp9 = (par ^ rp8) & 0xff;
+ rp11 = (par ^ rp10) & 0xff;
+ rp13 = (par ^ rp12) & 0xff;
+ rp15 = (par ^ rp14) & 0xff;
+ if (eccsize_mult == 2)
+ rp17 = (par ^ rp16) & 0xff;
+
+ /*
+ * Finally calculate the ECC bits.
+ * Again here it might seem that there are performance optimisations
+ * possible, but benchmarks showed that on the system this is developed
+ * the code below is the fastest
+ */
+ if (sm_order) {
+ code[0] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[1] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ } else {
+ code[1] = (invparity[rp7] << 7) | (invparity[rp6] << 6) |
+ (invparity[rp5] << 5) | (invparity[rp4] << 4) |
+ (invparity[rp3] << 3) | (invparity[rp2] << 2) |
+ (invparity[rp1] << 1) | (invparity[rp0]);
+ code[0] = (invparity[rp15] << 7) | (invparity[rp14] << 6) |
+ (invparity[rp13] << 5) | (invparity[rp12] << 4) |
+ (invparity[rp11] << 3) | (invparity[rp10] << 2) |
+ (invparity[rp9] << 1) | (invparity[rp8]);
+ }
+
+ if (eccsize_mult == 1)
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ 3;
+ else
+ code[2] =
+ (invparity[par & 0xf0] << 7) |
+ (invparity[par & 0x0f] << 6) |
+ (invparity[par & 0xcc] << 5) |
+ (invparity[par & 0x33] << 4) |
+ (invparity[par & 0xaa] << 3) |
+ (invparity[par & 0x55] << 2) |
+ (invparity[rp17] << 1) |
+ (invparity[rp16] << 0);
}
-EXPORT_SYMBOL(nand_calculate_ecc);
+EXPORT_SYMBOL(__nand_calculate_ecc);
-static inline int countbits(uint32_t byte)
+/**
+ * nand_calculate_ecc - [NAND Interface] Calculate 3-byte ECC for 256/512-byte
+ * block
+ * @chip: NAND chip object
+ * @buf: input buffer with raw data
+ * @code: output buffer with ECC
+ */
+int nand_calculate_ecc(struct nand_chip *chip, const unsigned char *buf,
+ unsigned char *code)
{
- int res = 0;
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ __nand_calculate_ecc(buf, chip->ecc.size, code, sm_order);
- for (;byte; byte >>= 1)
- res += byte & 0x01;
- return res;
+ return 0;
}
+EXPORT_SYMBOL(nand_calculate_ecc);
/**
- * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
- * @mtd: MTD block structure
- * @dat: raw data read from the chip
+ * __nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @buf: raw data read from the chip
* @read_ecc: ECC from the chip
* @calc_ecc: the ECC calculated from raw data
+ * @eccsize: data bytes per ECC step (256 or 512)
+ * @sm_order: Smart Media byte order
*
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for eccsize byte block
*/
-int nand_correct_data(struct nand_chip *chip, u_char *dat,
- u_char *read_ecc, u_char *calc_ecc)
+int __nand_correct_data(unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc,
+ unsigned int eccsize, bool sm_order)
{
- uint8_t s0, s1, s2;
+ unsigned char b0, b1, b2, bit_addr;
+ unsigned int byte_addr;
+ /* 256 or 512 bytes/ecc */
+ const uint32_t eccsize_mult = eccsize >> 8;
-#ifdef CONFIG_MTD_NAND_ECC_SMC
- s0 = calc_ecc[0] ^ read_ecc[0];
- s1 = calc_ecc[1] ^ read_ecc[1];
- s2 = calc_ecc[2] ^ read_ecc[2];
-#else
- s1 = calc_ecc[0] ^ read_ecc[0];
- s0 = calc_ecc[1] ^ read_ecc[1];
- s2 = calc_ecc[2] ^ read_ecc[2];
-#endif
- if ((s0 | s1 | s2) == 0)
- return 0;
-
- /* Check for a single bit error */
- if( ((s0 ^ (s0 >> 1)) & 0x55) == 0x55 &&
- ((s1 ^ (s1 >> 1)) & 0x55) == 0x55 &&
- ((s2 ^ (s2 >> 1)) & 0x54) == 0x54) {
-
- uint32_t byteoffs, bitnum;
+ /*
+ * b0 to b2 indicate which bit is faulty (if any)
+ * we might need the xor result more than once,
+ * so keep them in a local var
+ */
+ if (sm_order) {
+ b0 = read_ecc[0] ^ calc_ecc[0];
+ b1 = read_ecc[1] ^ calc_ecc[1];
+ } else {
+ b0 = read_ecc[1] ^ calc_ecc[1];
+ b1 = read_ecc[0] ^ calc_ecc[0];
+ }
- byteoffs = (s1 << 0) & 0x80;
- byteoffs |= (s1 << 1) & 0x40;
- byteoffs |= (s1 << 2) & 0x20;
- byteoffs |= (s1 << 3) & 0x10;
+ b2 = read_ecc[2] ^ calc_ecc[2];
- byteoffs |= (s0 >> 4) & 0x08;
- byteoffs |= (s0 >> 3) & 0x04;
- byteoffs |= (s0 >> 2) & 0x02;
- byteoffs |= (s0 >> 1) & 0x01;
+ /* check if there are any bitfaults */
- bitnum = (s2 >> 5) & 0x04;
- bitnum |= (s2 >> 4) & 0x02;
- bitnum |= (s2 >> 3) & 0x01;
+ /* repeated if statements are slightly more efficient than switch ... */
+ /* ordered in order of likelihood */
- dat[byteoffs] ^= (1 << bitnum);
+ if ((b0 | b1 | b2) == 0)
+ return 0; /* no error */
+ if ((((b0 ^ (b0 >> 1)) & 0x55) == 0x55) &&
+ (((b1 ^ (b1 >> 1)) & 0x55) == 0x55) &&
+ ((eccsize_mult == 1 && ((b2 ^ (b2 >> 1)) & 0x54) == 0x54) ||
+ (eccsize_mult == 2 && ((b2 ^ (b2 >> 1)) & 0x55) == 0x55))) {
+ /* single bit error */
+ /*
+ * rp17/rp15/13/11/9/7/5/3/1 indicate which byte is the faulty
+ * byte, cp 5/3/1 indicate the faulty bit.
+ * A lookup table (called addressbits) is used to filter
+ * the bits from the byte they are in.
+ * A marginal optimisation is possible by having three
+ * different lookup tables.
+ * One as we have now (for b0), one for b2
+ * (that would avoid the >> 1), and one for b1 (with all values
+ * << 4). However it was felt that introducing two more tables
+ * hardly justify the gain.
+ *
+ * The b2 shift is there to get rid of the lowest two bits.
+ * We could also do addressbits[b2] >> 1 but for the
+ * performance it does not make any difference
+ */
+ if (eccsize_mult == 1)
+ byte_addr = (addressbits[b1] << 4) + addressbits[b0];
+ else
+ byte_addr = (addressbits[b2 & 0x3] << 8) +
+ (addressbits[b1] << 4) + addressbits[b0];
+ bit_addr = addressbits[b2 >> 2];
+ /* flip the bit */
+ buf[byte_addr] ^= (1 << bit_addr);
return 1;
- }
- if(countbits(s0 | ((uint32_t)s1 << 8) | ((uint32_t)s2 <<16)) == 1)
- return 1;
+ }
+ /* count nr of bits; use table lookup, faster than calculating it */
+ if ((bitsperbyte[b0] + bitsperbyte[b1] + bitsperbyte[b2]) == 1)
+ return 1; /* error in ECC data; no action needed */
+ pr_err("%s: uncorrectable ECC error\n", __func__);
return -EBADMSG;
}
+EXPORT_SYMBOL(__nand_correct_data);
+
+/**
+ * nand_correct_data - [NAND Interface] Detect and correct bit error(s)
+ * @chip: NAND chip object
+ * @buf: raw data read from the chip
+ * @read_ecc: ECC from the chip
+ * @calc_ecc: the ECC calculated from raw data
+ *
+ * Detect and correct a 1 bit error for 256/512 byte block
+ */
+int nand_correct_data(struct nand_chip *chip, unsigned char *buf,
+ unsigned char *read_ecc, unsigned char *calc_ecc)
+{
+ bool sm_order = chip->ecc.options & NAND_ECC_SOFT_HAMMING_SM_ORDER;
+
+ return __nand_correct_data(buf, read_ecc, calc_ecc, chip->ecc.size,
+ sm_order);
+}
EXPORT_SYMBOL(nand_correct_data);
MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Steven J. Hill <sjhill at realitydiluted.com>");
+MODULE_AUTHOR("Frans Meulenbroeks <fransmeulenbroeks at gmail.com>");
MODULE_DESCRIPTION("Generic NAND ECC support");
diff --git a/drivers/mtd/nand/nand_esmt.c b/drivers/mtd/nand/nand_esmt.c
new file mode 100644
index 0000000000..3338c68aaa
--- /dev/null
+++ b/drivers/mtd/nand/nand_esmt.c
@@ -0,0 +1,54 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Toradex AG
+ *
+ * Author: Marcel Ziswiler <marcel.ziswiler at toradex.com>
+ */
+
+#include <linux/mtd/rawnand.h>
+#include "internals.h"
+
+static void esmt_nand_decode_id(struct nand_chip *chip)
+{
+ nand_decode_ext_id(chip);
+
+ /* Extract ECC requirements from 5th id byte. */
+ if (chip->id.len >= 5 && nand_is_slc(chip)) {
+ chip->base.eccreq.step_size = 512;
+ switch (chip->id.data[4] & 0x3) {
+ case 0x0:
+ chip->base.eccreq.strength = 4;
+ break;
+ case 0x1:
+ chip->base.eccreq.strength = 2;
+ break;
+ case 0x2:
+ chip->base.eccreq.strength = 1;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ chip->base.eccreq.step_size = 0;
+ break;
+ }
+ }
+}
+
+static int esmt_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ /*
+ * It is known that some ESMT SLC NANDs have been shipped
+ * with the factory bad block markers in the first or last page
+ * of the block, instead of the first or second page. To be on
+ * the safe side, let's check all three locations.
+ */
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE |
+ NAND_BBM_LASTPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops esmt_nand_manuf_ops = {
+ .detect = esmt_nand_decode_id,
+ .init = esmt_nand_init,
+};
diff --git a/drivers/mtd/nand/nand_hynix.c b/drivers/mtd/nand/nand_hynix.c
new file mode 100644
index 0000000000..0422ed53aa
--- /dev/null
+++ b/drivers/mtd/nand/nand_hynix.c
@@ -0,0 +1,716 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include <linux/sizes.h>
+
+#include "internals.h"
+
+#define NAND_HYNIX_CMD_SET_PARAMS 0x36
+#define NAND_HYNIX_CMD_APPLY_PARAMS 0x16
+
+#define NAND_HYNIX_1XNM_RR_REPEAT 8
+
+/**
+ * struct hynix_read_retry - read-retry data
+ * @nregs: number of register to set when applying a new read-retry mode
+ * @regs: register offsets (NAND chip dependent)
+ * @values: array of values to set in registers. The array size is equal to
+ * (nregs * nmodes)
+ */
+struct hynix_read_retry {
+ int nregs;
+ const u8 *regs;
+ u8 values[];
+};
+
+/**
+ * struct hynix_nand - private Hynix NAND struct
+ * @nand_technology: manufacturing process expressed in picometer
+ * @read_retry: read-retry information
+ */
+struct hynix_nand {
+ const struct hynix_read_retry *read_retry;
+};
+
+/**
+ * struct hynix_read_retry_otp - structure describing how the read-retry OTP
+ * area
+ * @nregs: number of hynix private registers to set before reading the reading
+ * the OTP area
+ * @regs: registers that should be configured
+ * @values: values that should be set in regs
+ * @page: the address to pass to the READ_PAGE command. Depends on the NAND
+ * chip
+ * @size: size of the read-retry OTP section
+ */
+struct hynix_read_retry_otp {
+ int nregs;
+ const u8 *regs;
+ const u8 *values;
+ int page;
+ int size;
+};
+
+static bool hynix_nand_has_valid_jedecid(struct nand_chip *chip)
+{
+ u8 jedecid[5] = { };
+ int ret;
+
+ ret = nand_readid_op(chip, 0x40, jedecid, sizeof(jedecid));
+ if (ret)
+ return false;
+
+ return !strncmp("JEDEC", jedecid, sizeof(jedecid));
+}
+
+static int hynix_nand_cmd_op(struct nand_chip *chip, u8 cmd)
+{
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(cmd, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, cmd, -1, -1);
+
+ return 0;
+}
+
+static int hynix_nand_reg_write_op(struct nand_chip *chip, u8 addr, u8 val)
+{
+ u16 column = ((u16)addr << 8) | addr;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_ADDR(1, &addr, 0),
+ NAND_OP_8BIT_DATA_OUT(1, &val, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ chip->legacy.cmdfunc(chip, NAND_CMD_NONE, column, -1);
+ chip->legacy.write_byte(chip, val);
+
+ return 0;
+}
+
+static int hynix_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ const u8 *values;
+ int i, ret;
+
+ values = hynix->read_retry->values +
+ (retry_mode * hynix->read_retry->nregs);
+
+ /* Enter 'Set Hynix Parameters' mode */
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ /*
+ * Configure the NAND in the requested read-retry mode.
+ * This is done by setting pre-defined values in internal NAND
+ * registers.
+ *
+ * The set of registers is NAND specific, and the values are either
+ * predefined or extracted from an OTP area on the NAND (values are
+ * probably tweaked at production in this case).
+ */
+ for (i = 0; i < hynix->read_retry->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, hynix->read_retry->regs[i],
+ values[i]);
+ if (ret)
+ return ret;
+ }
+
+ /* Apply the new settings. */
+ return hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+}
+
+/**
+ * hynix_get_majority - get the value that is occurring the most in a given
+ * set of values
+ * @in: the array of values to test
+ * @repeat: the size of the in array
+ * @out: pointer used to store the output value
+ *
+ * This function implements the 'majority check' logic that is supposed to
+ * overcome the unreliability of MLC NANDs when reading the OTP area storing
+ * the read-retry parameters.
+ *
+ * It's based on a pretty simple assumption: if we repeat the same value
+ * several times and then take the one that is occurring the most, we should
+ * find the correct value.
+ * Let's hope this dummy algorithm prevents us from losing the read-retry
+ * parameters.
+ */
+static int hynix_get_majority(const u8 *in, int repeat, u8 *out)
+{
+ int i, j, half = repeat / 2;
+
+ /*
+ * We only test the first half of the in array because we must ensure
+ * that the value is at least occurring repeat / 2 times.
+ *
+ * This loop is suboptimal since we may count the occurrences of the
+ * same value several time, but we are doing that on small sets, which
+ * makes it acceptable.
+ */
+ for (i = 0; i < half; i++) {
+ int cnt = 0;
+ u8 val = in[i];
+
+ /* Count all values that are matching the one at index i. */
+ for (j = i + 1; j < repeat; j++) {
+ if (in[j] == val)
+ cnt++;
+ }
+
+ /* We found a value occurring more than repeat / 2. */
+ if (cnt > half) {
+ *out = val;
+ return 0;
+ }
+ }
+
+ return -EIO;
+}
+
+static int hynix_read_rr_otp(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info,
+ void *buf)
+{
+ int i, ret;
+
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < info->nregs; i++) {
+ ret = hynix_nand_reg_write_op(chip, info->regs[i],
+ info->values[i]);
+ if (ret)
+ return ret;
+ }
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ /* Sequence to enter OTP mode? */
+ ret = hynix_nand_cmd_op(chip, 0x17);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x4);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, 0x19);
+ if (ret)
+ return ret;
+
+ /* Now read the page */
+ ret = nand_read_page_op(chip, info->page, 0, buf, info->size);
+ if (ret)
+ return ret;
+
+ /* Put everything back to normal */
+ ret = nand_reset_op(chip);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_SET_PARAMS);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_reg_write_op(chip, 0x38, 0);
+ if (ret)
+ return ret;
+
+ ret = hynix_nand_cmd_op(chip, NAND_HYNIX_CMD_APPLY_PARAMS);
+ if (ret)
+ return ret;
+
+ return nand_read_page_op(chip, 0, 0, NULL, 0);
+}
+
+#define NAND_HYNIX_1XNM_RR_COUNT_OFFS 0
+#define NAND_HYNIX_1XNM_RR_REG_COUNT_OFFS 8
+#define NAND_HYNIX_1XNM_RR_SET_OFFS(x, setsize, inv) \
+ (16 + ((((x) * 2) + ((inv) ? 1 : 0)) * (setsize)))
+
+static int hynix_mlc_1xnm_rr_value(const u8 *buf, int nmodes, int nregs,
+ int mode, int reg, bool inv, u8 *val)
+{
+ u8 tmp[NAND_HYNIX_1XNM_RR_REPEAT];
+ int val_offs = (mode * nregs) + reg;
+ int set_size = nmodes * nregs;
+ int i, ret;
+
+ for (i = 0; i < NAND_HYNIX_1XNM_RR_REPEAT; i++) {
+ int set_offs = NAND_HYNIX_1XNM_RR_SET_OFFS(i, set_size, inv);
+
+ tmp[i] = buf[val_offs + set_offs];
+ }
+
+ ret = hynix_get_majority(tmp, NAND_HYNIX_1XNM_RR_REPEAT, val);
+ if (ret)
+ return ret;
+
+ if (inv)
+ *val = ~*val;
+
+ return 0;
+}
+
+static u8 hynix_1xnm_mlc_read_retry_regs[] = {
+ 0xcc, 0xbf, 0xaa, 0xab, 0xcd, 0xad, 0xae, 0xaf
+};
+
+static int hynix_mlc_1xnm_rr_init(struct nand_chip *chip,
+ const struct hynix_read_retry_otp *info)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+ struct hynix_read_retry *rr = NULL;
+ int ret, i, j;
+ u8 nregs, nmodes;
+ u8 *buf;
+
+ buf = kmalloc(info->size, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = hynix_read_rr_otp(chip, info, buf);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf, NAND_HYNIX_1XNM_RR_REPEAT,
+ &nmodes);
+ if (ret)
+ goto out;
+
+ ret = hynix_get_majority(buf + NAND_HYNIX_1XNM_RR_REPEAT,
+ NAND_HYNIX_1XNM_RR_REPEAT,
+ &nregs);
+ if (ret)
+ goto out;
+
+ rr = kzalloc(sizeof(*rr) + (nregs * nmodes), GFP_KERNEL);
+ if (!rr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ for (i = 0; i < nmodes; i++) {
+ for (j = 0; j < nregs; j++) {
+ u8 *val = rr->values + (i * nregs);
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ false, val);
+ if (!ret)
+ continue;
+
+ ret = hynix_mlc_1xnm_rr_value(buf, nmodes, nregs, i, j,
+ true, val);
+ if (ret)
+ goto out;
+ }
+ }
+
+ rr->nregs = nregs;
+ rr->regs = hynix_1xnm_mlc_read_retry_regs;
+ hynix->read_retry = rr;
+ chip->ops.setup_read_retry = hynix_nand_setup_read_retry;
+ chip->read_retries = nmodes;
+
+out:
+ kfree(buf);
+
+ if (ret)
+ kfree(rr);
+
+ return ret;
+}
+
+static const u8 hynix_mlc_1xnm_rr_otp_regs[] = { 0x38 };
+static const u8 hynix_mlc_1xnm_rr_otp_values[] = { 0x52 };
+
+static const struct hynix_read_retry_otp hynix_mlc_1xnm_rr_otps[] = {
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x21f,
+ .size = 784
+ },
+ {
+ .nregs = ARRAY_SIZE(hynix_mlc_1xnm_rr_otp_regs),
+ .regs = hynix_mlc_1xnm_rr_otp_regs,
+ .values = hynix_mlc_1xnm_rr_otp_values,
+ .page = 0x200,
+ .size = 528,
+ },
+};
+
+static int hynix_nand_rr_init(struct nand_chip *chip)
+{
+ int i, ret = 0;
+ bool valid_jedecid;
+
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ /*
+ * We only support read-retry for 1xnm NANDs, and those NANDs all
+ * expose a valid JEDEC ID.
+ */
+ if (valid_jedecid) {
+ u8 nand_tech = chip->id.data[5] >> 4;
+
+ /* 1xnm technology */
+ if (nand_tech == 4) {
+ for (i = 0; i < ARRAY_SIZE(hynix_mlc_1xnm_rr_otps);
+ i++) {
+ /*
+ * FIXME: Hynix recommend to copy the
+ * read-retry OTP area into a normal page.
+ */
+ ret = hynix_mlc_1xnm_rr_init(chip,
+ hynix_mlc_1xnm_rr_otps);
+ if (!ret)
+ break;
+ }
+ }
+ }
+
+ if (ret)
+ pr_warn("failed to initialize read-retry infrastructure");
+
+ return 0;
+}
+
+static void hynix_nand_extract_oobsize(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ u8 oobsize;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ oobsize = ((chip->id.data[3] >> 2) & 0x3) |
+ ((chip->id.data[3] >> 4) & 0x4);
+
+ if (valid_jedecid) {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 2048;
+ break;
+ case 1:
+ memorg->oobsize = 1664;
+ break;
+ case 2:
+ memorg->oobsize = 1024;
+ break;
+ case 3:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+ } else {
+ switch (oobsize) {
+ case 0:
+ memorg->oobsize = 128;
+ break;
+ case 1:
+ memorg->oobsize = 224;
+ break;
+ case 2:
+ memorg->oobsize = 448;
+ break;
+ case 3:
+ memorg->oobsize = 64;
+ break;
+ case 4:
+ memorg->oobsize = 32;
+ break;
+ case 5:
+ memorg->oobsize = 16;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size");
+ break;
+ }
+
+ /*
+ * The datasheet of H27UCG8T2BTR mentions that the "Redundant
+ * Area Size" is encoded "per 8KB" (page size). This chip uses
+ * a page size of 16KiB. The datasheet mentions an OOB size of
+ * 1.280 bytes, but the OOB size encoded in the ID bytes (using
+ * the existing logic above) is 640 bytes.
+ * Update the OOB size for this chip by taking the value
+ * determined above and scaling it to the actual page size (so
+ * the actual OOB size for this chip is: 640 * 16k / 8k).
+ */
+ if (chip->id.data[1] == 0xde)
+ memorg->oobsize *= memorg->pagesize / SZ_8K;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+}
+
+static void hynix_nand_extract_ecc_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ u8 ecc_level = (chip->id.data[4] >> 4) & 0x7;
+
+ if (valid_jedecid) {
+ /* Reference: H27UCG8T2E datasheet */
+ chip->base.eccreq.step_size = 1024;
+
+ switch (ecc_level) {
+ case 0:
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
+ break;
+ case 1:
+ chip->base.eccreq.strength = 4;
+ break;
+ case 2:
+ chip->base.eccreq.strength = 24;
+ break;
+ case 3:
+ chip->base.eccreq.strength = 32;
+ break;
+ case 4:
+ chip->base.eccreq.strength = 40;
+ break;
+ case 5:
+ chip->base.eccreq.strength = 50;
+ break;
+ case 6:
+ chip->base.eccreq.strength = 60;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /*
+ * The ECC requirements field meaning depends on the
+ * NAND technology.
+ */
+ u8 nand_tech = chip->id.data[5] & 0x7;
+
+ if (nand_tech < 3) {
+ /* > 26nm, reference: H27UBG8T2A datasheet */
+ if (ecc_level < 5) {
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << ecc_level;
+ } else if (ecc_level < 7) {
+ if (ecc_level == 5)
+ chip->base.eccreq.step_size = 2048;
+ else
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24;
+ } else {
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Hynix decided
+ * to use a different extended ID format, and
+ * we should find a way to support it.
+ */
+ WARN(1, "Invalid ECC requirements");
+ }
+ } else {
+ /* <= 26nm, reference: H27UBG8T2B datasheet */
+ if (!ecc_level) {
+ chip->base.eccreq.step_size = 0;
+ chip->base.eccreq.strength = 0;
+ } else if (ecc_level < 5) {
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << (ecc_level - 1);
+ } else {
+ chip->base.eccreq.step_size = 1024;
+ chip->base.eccreq.strength = 24 +
+ (8 * (ecc_level - 5));
+ }
+ }
+ }
+}
+
+static void hynix_nand_extract_scrambling_requirements(struct nand_chip *chip,
+ bool valid_jedecid)
+{
+ u8 nand_tech;
+
+ /* We need scrambling on all TLC NANDs*/
+ if (nanddev_bits_per_cell(&chip->base) > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+
+ /* And on MLC NANDs with sub-3xnm process */
+ if (valid_jedecid) {
+ nand_tech = chip->id.data[5] >> 4;
+
+ /* < 3xnm */
+ if (nand_tech > 0)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ } else {
+ nand_tech = chip->id.data[5] & 0x7;
+
+ /* < 32nm */
+ if (nand_tech > 2)
+ chip->options |= NAND_NEED_SCRAMBLING;
+ }
+}
+
+static void hynix_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ bool valid_jedecid;
+ u8 tmp;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /*
+ * Exclude all SLC NANDs from this advanced detection scheme.
+ * According to the ranges defined in several datasheets, it might
+ * appear that even SLC NANDs could fall in this extended ID scheme.
+ * If that the case rework the test to let SLC NANDs go through the
+ * detection process.
+ */
+ if (chip->id.len < 6 || nand_is_slc(chip)) {
+ nand_decode_ext_id(chip);
+ return;
+ }
+
+ /* Extract pagesize */
+ memorg->pagesize = 2048 << (chip->id.data[3] & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ tmp = (chip->id.data[3] >> 4) & 0x3;
+ /*
+ * When bit7 is set that means we start counting at 1MiB, otherwise
+ * we start counting at 128KiB and shift this value the content of
+ * ID[3][4:5].
+ * The only exception is when ID[3][4:5] == 3 and ID[3][7] == 0, in
+ * this case the erasesize is set to 768KiB.
+ */
+ if (chip->id.data[3] & 0x80) {
+ memorg->pages_per_eraseblock = (SZ_1M << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_1M << tmp;
+ } else if (tmp == 3) {
+ memorg->pages_per_eraseblock = (SZ_512K + SZ_256K) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_512K + SZ_256K;
+ } else {
+ memorg->pages_per_eraseblock = (SZ_128K << tmp) /
+ memorg->pagesize;
+ mtd->erasesize = SZ_128K << tmp;
+ }
+
+ /*
+ * Modern Toggle DDR NANDs have a valid JEDECID even though they are
+ * not exposing a valid JEDEC parameter table.
+ * These NANDs use a different NAND ID scheme.
+ */
+ valid_jedecid = hynix_nand_has_valid_jedecid(chip);
+
+ hynix_nand_extract_oobsize(chip, valid_jedecid);
+ hynix_nand_extract_ecc_requirements(chip, valid_jedecid);
+ hynix_nand_extract_scrambling_requirements(chip, valid_jedecid);
+}
+
+static void hynix_nand_cleanup(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix = nand_get_manufacturer_data(chip);
+
+ if (!hynix)
+ return;
+
+ kfree(hynix->read_retry);
+ kfree(hynix);
+ nand_set_manufacturer_data(chip, NULL);
+}
+
+static int
+h27ucg8t2atrbc_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int hynix_nand_init(struct nand_chip *chip)
+{
+ struct hynix_nand *hynix;
+ int ret;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ hynix = kzalloc(sizeof(*hynix), GFP_KERNEL);
+ if (!hynix)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, hynix);
+
+ if (!strncmp("H27UCG8T2ATR-BC", chip->parameters.model,
+ sizeof("H27UCG8T2ATR-BC") - 1))
+ chip->ops.choose_interface_config =
+ h27ucg8t2atrbc_choose_interface_config;
+
+ ret = hynix_nand_rr_init(chip);
+ if (ret)
+ hynix_nand_cleanup(chip);
+
+ return ret;
+}
+
+const struct nand_manufacturer_ops hynix_nand_manuf_ops = {
+ .detect = hynix_nand_decode_id,
+ .init = hynix_nand_init,
+ .cleanup = hynix_nand_cleanup,
+};
diff --git a/drivers/mtd/nand/nand_ids.c b/drivers/mtd/nand/nand_ids.c
index 863e8d49ab..b9945791a9 100644
--- a/drivers/mtd/nand/nand_ids.c
+++ b/drivers/mtd/nand/nand_ids.c
@@ -1,24 +1,13 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
- * drivers/mtd/nandids.c
- *
* Copyright (C) 2002 Thomas Gleixner (tglx at linutronix.de)
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
-#include <common.h>
+
#include <linux/sizes.h>
-#include <linux/mtd/nand.h>
-#ifdef CONFIG_NAND_INFO
-#define __STR(str) str
-#else
-#define __STR(str) ""
-#endif
+#include "internals.h"
-#define LP_OPTIONS NAND_SAMSUNG_LP_OPTIONS
+#define LP_OPTIONS 0
#define LP_OPTIONS16 (LP_OPTIONS | NAND_BUSWIDTH_16)
#define SP_OPTIONS NAND_NEED_READRDY
@@ -37,49 +26,65 @@ struct nand_flash_dev nand_flash_ids[] = {
* listed by full ID. We list them first so that we can easily identify
* the most specific match.
*/
- {__STR("TC58NVG2S0F 4G 3.3V 8-bit"),
+ {"TC58NVG0S3E 1G 3.3V 8-bit",
+ { .id = {0x98, 0xd1, 0x90, 0x15, 0x76, 0x14, 0x01, 0x00} },
+ SZ_2K, SZ_128, SZ_128K, 0, 8, 64, NAND_ECC_INFO(1, SZ_512), },
+ {"TC58NVG2S0F 4G 3.3V 8-bit",
{ .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x15, 0x01, 0x08} },
- SZ_4K, SZ_512, SZ_256K, 0, 8, 224},
- {__STR("TC58NVG3S0F 8G 3.3V 8-bit"),
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 224, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG2S0H 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x90, 0x26, 0x76, 0x16, 0x08, 0x00} },
+ SZ_4K, SZ_512, SZ_256K, 0, 8, 256, NAND_ECC_INFO(8, SZ_512) },
+ {"TC58NVG3S0F 8G 3.3V 8-bit",
{ .id = {0x98, 0xd3, 0x90, 0x26, 0x76, 0x15, 0x02, 0x08} },
- SZ_4K, SZ_1K, SZ_256K, 0, 8, 232},
- {__STR("TC58NVG5D2 32G 3.3V 8-bit"),
+ SZ_4K, SZ_1K, SZ_256K, 0, 8, 232, NAND_ECC_INFO(4, SZ_512) },
+ {"TC58NVG5D2 32G 3.3V 8-bit",
{ .id = {0x98, 0xd7, 0x94, 0x32, 0x76, 0x56, 0x09, 0x00} },
- SZ_8K, SZ_4K, SZ_1M, 0, 8, 640},
- {__STR("TC58NVG6D2 64G 3.3V 8-bit"),
+ SZ_8K, SZ_4K, SZ_1M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"TC58NVG6D2 64G 3.3V 8-bit",
{ .id = {0x98, 0xde, 0x94, 0x82, 0x76, 0x56, 0x04, 0x20} },
- SZ_8K, SZ_8K, SZ_2M, 0, 8, 640},
-
- LEGACY_ID_NAND(__STR("NAND 4MiB 5V 8-bit"), 0x6B, 4, SZ_8K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 4MiB 3,3V 8-bit"), 0xE3, 4, SZ_8K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 4MiB 3,3V 8-bit"), 0xE5, 4, SZ_8K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 8MiB 3,3V 8-bit"), 0xD6, 8, SZ_8K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 8MiB 3,3V 8-bit"), 0xE6, 8, SZ_8K, SP_OPTIONS),
-
- LEGACY_ID_NAND(__STR("NAND 16MiB 1,8V 8-bit"), 0x33, 16, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 16MiB 3,3V 8-bit"), 0x73, 16, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 16MiB 1,8V 16-bit"), 0x43, 16, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 16MiB 3,3V 16-bit"), 0x53, 16, SZ_16K, SP_OPTIONS16),
-
- LEGACY_ID_NAND(__STR("NAND 32MiB 1,8V 8-bit"), 0x35, 32, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 32MiB 3,3V 8-bit"), 0x75, 32, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 32MiB 1,8V 16-bit"), 0x45, 32, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 32MiB 3,3V 16-bit"), 0x55, 32, SZ_16K, SP_OPTIONS16),
-
- LEGACY_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0x36, 64, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0x76, 64, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0x46, 64, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0x56, 64, SZ_16K, SP_OPTIONS16),
-
- LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0x78, 128, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0x39, 128, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0x79, 128, SZ_16K, SP_OPTIONS),
- LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0x72, 128, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0x49, 128, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0x74, 128, SZ_16K, SP_OPTIONS16),
- LEGACY_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0x59, 128, SZ_16K, SP_OPTIONS16),
-
- LEGACY_ID_NAND(__STR("NAND 256MiB 3,3V 8-bit"), 0x71, 256, SZ_16K, SP_OPTIONS),
+ SZ_8K, SZ_8K, SZ_2M, 0, 8, 640, NAND_ECC_INFO(40, SZ_1K) },
+ {"SDTNRGAMA 64G 3.3V 8-bit",
+ { .id = {0x45, 0xde, 0x94, 0x93, 0x76, 0x50} },
+ SZ_16K, SZ_8K, SZ_4M, 0, 6, 1280, NAND_ECC_INFO(40, SZ_1K) },
+ {"H27UCG8T2ATR-BC 64G 3.3V 8-bit",
+ { .id = {0xad, 0xde, 0x94, 0xda, 0x74, 0xc4} },
+ SZ_8K, SZ_8K, SZ_2M, NAND_NEED_SCRAMBLING, 6, 640,
+ NAND_ECC_INFO(40, SZ_1K) },
+ {"TH58NVG2S3HBAI4 4G 3.3V 8-bit",
+ { .id = {0x98, 0xdc, 0x91, 0x15, 0x76} },
+ SZ_2K, SZ_512, SZ_128K, 0, 5, 128, NAND_ECC_INFO(8, SZ_512) },
+
+ LEGACY_ID_NAND("NAND 4MiB 5V 8-bit", 0x6B, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE3, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 4MiB 3,3V 8-bit", 0xE5, 4, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xD6, 8, SZ_8K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 8MiB 3,3V 8-bit", 0xE6, 8, SZ_8K, SP_OPTIONS),
+
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 8-bit", 0x33, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 8-bit", 0x73, 16, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 16MiB 1,8V 16-bit", 0x43, 16, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 16MiB 3,3V 16-bit", 0x53, 16, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 8-bit", 0x35, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 8-bit", 0x75, 32, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 32MiB 1,8V 16-bit", 0x45, 32, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 32MiB 3,3V 16-bit", 0x55, 32, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 8-bit", 0x36, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 8-bit", 0x76, 64, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 64MiB 1,8V 16-bit", 0x46, 64, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 64MiB 3,3V 16-bit", 0x56, 64, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x78, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 8-bit", 0x39, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 8-bit", 0x79, 128, SZ_16K, SP_OPTIONS),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x72, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 1,8V 16-bit", 0x49, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x74, 128, SZ_16K, SP_OPTIONS16),
+ LEGACY_ID_NAND("NAND 128MiB 3,3V 16-bit", 0x59, 128, SZ_16K, SP_OPTIONS16),
+
+ LEGACY_ID_NAND("NAND 256MiB 3,3V 8-bit", 0x71, 256, SZ_16K, SP_OPTIONS),
/*
* These are the new chips with large page size. Their page size and
@@ -87,101 +92,116 @@ struct nand_flash_dev nand_flash_ids[] = {
*/
/* 512 Megabit */
- EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0xA2, 64, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 8-bit"), 0xA0, 64, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xF2, 64, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xD0, 64, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 8-bit"), 0xF0, 64, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0xB2, 64, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 1,8V 16-bit"), 0xB0, 64, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0xC2, 64, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 64MiB 3,3V 16-bit"), 0xC0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 8-bit", 0xA0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF2, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xD0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 8-bit", 0xF0, 64, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 1,8V 16-bit", 0xB0, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC2, 64, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64MiB 3,3V 16-bit", 0xC0, 64, LP_OPTIONS16),
/* 1 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 8-bit"), 0xA1, 128, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0xF1, 128, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 8-bit"), 0xD1, 128, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0xB1, 128, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 128MiB 3,3V 16-bit"), 0xC1, 128, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 128MiB 1,8V 16-bit"), 0xAD, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 8-bit", 0xA1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xF1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 8-bit", 0xD1, 128, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xB1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 3,3V 16-bit", 0xC1, 128, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 128MiB 1,8V 16-bit", 0xAD, 128, LP_OPTIONS16),
/* 2 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 256MiB 1,8V 8-bit"), 0xAA, 256, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 256MiB 3,3V 8-bit"), 0xDA, 256, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 256MiB 1,8V 16-bit"), 0xBA, 256, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 256MiB 3,3V 16-bit"), 0xCA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 8-bit", 0xAA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 8-bit", 0xDA, 256, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 256MiB 1,8V 16-bit", 0xBA, 256, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 256MiB 3,3V 16-bit", 0xCA, 256, LP_OPTIONS16),
/* 4 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 512MiB 1,8V 8-bit"), 0xAC, 512, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 512MiB 3,3V 8-bit"), 0xDC, 512, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 512MiB 1,8V 16-bit"), 0xBC, 512, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 512MiB 3,3V 16-bit"), 0xCC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 8-bit", 0xAC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 8-bit", 0xDC, 512, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 512MiB 1,8V 16-bit", 0xBC, 512, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 512MiB 3,3V 16-bit", 0xCC, 512, LP_OPTIONS16),
/* 8 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 1GiB 1,8V 8-bit"), 0xA3, 1024, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 1GiB 3,3V 8-bit"), 0xD3, 1024, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 1GiB 1,8V 16-bit"), 0xB3, 1024, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 1GiB 3,3V 16-bit"), 0xC3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 8-bit", 0xA3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 8-bit", 0xD3, 1024, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 1GiB 1,8V 16-bit", 0xB3, 1024, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 1GiB 3,3V 16-bit", 0xC3, 1024, LP_OPTIONS16),
/* 16 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 2GiB 1,8V 8-bit"), 0xA5, 2048, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 2GiB 3,3V 8-bit"), 0xD5, 2048, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 2GiB 1,8V 16-bit"), 0xB5, 2048, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 2GiB 3,3V 16-bit"), 0xC5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 8-bit", 0xA5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 8-bit", 0xD5, 2048, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 2GiB 1,8V 16-bit", 0xB5, 2048, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 2GiB 3,3V 16-bit", 0xC5, 2048, LP_OPTIONS16),
/* 32 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 4GiB 1,8V 8-bit"), 0xA7, 4096, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 4GiB 3,3V 8-bit"), 0xD7, 4096, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 4GiB 1,8V 16-bit"), 0xB7, 4096, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 4GiB 3,3V 16-bit"), 0xC7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 8-bit", 0xA7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 8-bit", 0xD7, 4096, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 4GiB 1,8V 16-bit", 0xB7, 4096, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 4GiB 3,3V 16-bit", 0xC7, 4096, LP_OPTIONS16),
/* 64 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 8GiB 1,8V 8-bit"), 0xAE, 8192, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 8GiB 3,3V 8-bit"), 0xDE, 8192, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 8GiB 1,8V 16-bit"), 0xBE, 8192, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 8GiB 3,3V 16-bit"), 0xCE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 8-bit", 0xAE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 8-bit", 0xDE, 8192, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 8GiB 1,8V 16-bit", 0xBE, 8192, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 8GiB 3,3V 16-bit", 0xCE, 8192, LP_OPTIONS16),
/* 128 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 16GiB 1,8V 8-bit"), 0x1A, 16384, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 16GiB 3,3V 8-bit"), 0x3A, 16384, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 16GiB 1,8V 16-bit"), 0x2A, 16384, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 16GiB 3,3V 16-bit"), 0x4A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 8-bit", 0x1A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 8-bit", 0x3A, 16384, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 16GiB 1,8V 16-bit", 0x2A, 16384, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 16GiB 3,3V 16-bit", 0x4A, 16384, LP_OPTIONS16),
/* 256 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 32GiB 1,8V 8-bit"), 0x1C, 32768, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 32GiB 3,3V 8-bit"), 0x3C, 32768, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 32GiB 1,8V 16-bit"), 0x2C, 32768, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 32GiB 3,3V 16-bit"), 0x4C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 8-bit", 0x1C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 8-bit", 0x3C, 32768, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 32GiB 1,8V 16-bit", 0x2C, 32768, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 32GiB 3,3V 16-bit", 0x4C, 32768, LP_OPTIONS16),
/* 512 Gigabit */
- EXTENDED_ID_NAND(__STR("NAND 64GiB 1,8V 8-bit"), 0x1E, 65536, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64GiB 3,3V 8-bit"), 0x3E, 65536, LP_OPTIONS),
- EXTENDED_ID_NAND(__STR("NAND 64GiB 1,8V 16-bit"), 0x2E, 65536, LP_OPTIONS16),
- EXTENDED_ID_NAND(__STR("NAND 64GiB 3,3V 16-bit"), 0x4E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 8-bit", 0x1E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 8-bit", 0x3E, 65536, LP_OPTIONS),
+ EXTENDED_ID_NAND("NAND 64GiB 1,8V 16-bit", 0x2E, 65536, LP_OPTIONS16),
+ EXTENDED_ID_NAND("NAND 64GiB 3,3V 16-bit", 0x4E, 65536, LP_OPTIONS16),
{NULL}
};
/* Manufacturer IDs */
-struct nand_manufacturers nand_manuf_ids[] = {
- {NAND_MFR_TOSHIBA, "Toshiba"},
- {NAND_MFR_SAMSUNG, "Samsung"},
+static const struct nand_manufacturer_desc nand_manufacturer_descs[] = {
+ {NAND_MFR_AMD, "AMD/Spansion", &amd_nand_manuf_ops},
+ {NAND_MFR_ATO, "ATO"},
+ {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_ESMT, "ESMT", &esmt_nand_manuf_ops},
{NAND_MFR_FUJITSU, "Fujitsu"},
+ {NAND_MFR_HYNIX, "Hynix", &hynix_nand_manuf_ops},
+ {NAND_MFR_INTEL, "Intel"},
+ {NAND_MFR_MACRONIX, "Macronix", ¯onix_nand_manuf_ops},
+ {NAND_MFR_MICRON, "Micron", µn_nand_manuf_ops},
{NAND_MFR_NATIONAL, "National"},
{NAND_MFR_RENESAS, "Renesas"},
+ {NAND_MFR_SAMSUNG, "Samsung", &samsung_nand_manuf_ops},
+ {NAND_MFR_SANDISK, "SanDisk"},
{NAND_MFR_STMICRO, "ST Micro"},
- {NAND_MFR_HYNIX, "Hynix"},
- {NAND_MFR_MICRON, "Micron"},
- {NAND_MFR_AMD, "AMD/Spansion"},
- {NAND_MFR_MACRONIX, "Macronix"},
- {NAND_MFR_EON, "Eon"},
+ {NAND_MFR_TOSHIBA, "Toshiba", &toshiba_nand_manuf_ops},
{NAND_MFR_WINBOND, "Winbond"},
- {0x0, "Unknown"}
};
-EXPORT_SYMBOL(nand_manuf_ids);
-EXPORT_SYMBOL(nand_flash_ids);
+/**
+ * nand_get_manufacturer_desc - Get manufacturer information from the
+ * manufacturer ID
+ * @id: manufacturer ID
+ *
+ * Returns a nand_manufacturer_desc object if the manufacturer is defined
+ * in the NAND manufacturers database, NULL otherwise.
+ */
+const struct nand_manufacturer_desc *nand_get_manufacturer_desc(u8 id)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(nand_manufacturer_descs); i++)
+ if (nand_manufacturer_descs[i].id == id)
+ return &nand_manufacturer_descs[i];
-MODULE_LICENSE("GPL");
-MODULE_AUTHOR("Thomas Gleixner <tglx at linutronix.de>");
-MODULE_DESCRIPTION("Nand device & manufacturer IDs");
+ return NULL;
+}
diff --git a/drivers/mtd/nand/nand_imx.c b/drivers/mtd/nand/nand_imx.c
index 993fb2e249..5e4504a48e 100644
--- a/drivers/mtd/nand/nand_imx.c
+++ b/drivers/mtd/nand/nand_imx.c
@@ -24,6 +24,7 @@
#include <init.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/clk.h>
#include <mach/generic.h>
#include <mach/imx-nand.h>
@@ -885,20 +886,18 @@ static void preset_v3(struct nand_chip *chip)
}
static int imx_nand_write_page(struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw)
+ const uint8_t *buf, bool ecc, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct imx_nand_host *host = chip->priv;
int status;
- host->enable_hwecc(chip, !raw);
+ host->enable_hwecc(chip, ecc);
chip->legacy.cmdfunc(chip, NAND_CMD_SEQIN, 0x00, page);
memcpy32(host->main_area0, buf, mtd->writesize);
- if (oob_required)
- copy_spare(chip, 0, chip->oob_poi);
+ copy_spare(chip, 0, chip->oob_poi);
host->send_page(host, NFC_INPUT);
chip->legacy.cmdfunc(chip, NAND_CMD_PAGEPROG, -1, -1);
@@ -910,12 +909,26 @@ static int imx_nand_write_page(struct nand_chip *chip,
return 0;
}
+static int imx_nand_write_page_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return imx_nand_write_page(chip, buf, true, page);
+}
+
+static int imx_nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ return imx_nand_write_page(chip, buf, false, page);
+}
+
static void imx_nand_do_read_page(struct nand_chip *chip, uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct imx_nand_host *host = chip->priv;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
host->send_page(host, NFC_OUTPUT);
memcpy32(buf, host->main_area0, mtd->writesize);
@@ -931,7 +944,7 @@ static int imx_nand_read_page(struct nand_chip *chip, uint8_t *buf,
host->enable_hwecc(chip, true);
- imx_nand_do_read_page(chip, buf, oob_required);
+ imx_nand_do_read_page(chip, buf, oob_required, page);
return host->correct(chip);
}
@@ -943,7 +956,7 @@ static int imx_nand_read_page_raw(struct nand_chip *chip, uint8_t *buf,
host->enable_hwecc(chip, false);
- imx_nand_do_read_page(chip, buf, oob_required);
+ imx_nand_do_read_page(chip, buf, oob_required, page);
return 0;
}
@@ -1205,7 +1218,7 @@ static int imxnd_create_bbt(struct nand_chip *chip)
if (ret)
return ret;
- ret = nand_default_bbt(chip);
+ ret = nand_create_bbt(chip);
if (ret)
return ret;
@@ -1336,7 +1349,7 @@ static int __init imxnd_probe(struct device_d *dev)
/* structures must be linked */
this = &host->nand;
- mtd = &this->mtd;
+ mtd = nand_to_mtd(this);
mtd->dev.parent = dev;
mtd->name = "imx_nand";
@@ -1351,7 +1364,8 @@ static int __init imxnd_probe(struct device_d *dev)
this->legacy.read_word = imx_nand_read_word;
this->legacy.write_buf = imx_nand_write_buf;
this->legacy.read_buf = imx_nand_read_buf;
- this->write_page = imx_nand_write_page;
+ this->ecc.write_page = imx_nand_write_page_ecc;
+ this->ecc.write_page_raw = imx_nand_write_page_raw;
if (host->hw_ecc) {
this->ecc.calculate = imx_nand_calculate_ecc;
@@ -1372,12 +1386,12 @@ static int __init imxnd_probe(struct device_d *dev)
this->ecc.mode = NAND_ECC_SOFT;
}
- this->ecc.layout = oob_smallpage;
+ mtd_set_ecclayout(mtd, oob_smallpage);
/* NAND bus width determines access functions used by upper layer */
if (host->data_width == 2) {
this->options |= NAND_BUSWIDTH_16;
- this->ecc.layout = &nandv1_hw_eccoob_smallpage;
+ mtd_set_ecclayout(mtd, &nandv1_hw_eccoob_smallpage);
imx_nand_set_layout(0, 16);
}
@@ -1405,9 +1419,9 @@ static int __init imxnd_probe(struct device_d *dev)
"You will loose factory bad block markers!\n");
if (mtd->writesize == 2048)
- this->ecc.layout = oob_largepage;
+ mtd_set_ecclayout(mtd, oob_largepage);
else
- this->ecc.layout = oob_4kpage;
+ mtd_set_ecclayout(mtd, oob_4kpage);
host->pagesize_2k = 1;
if (nfc_is_v21())
writew(NFC_V2_SPAS_SPARESIZE(64), host->regs + NFC_V2_SPAS);
@@ -1437,7 +1451,7 @@ static int __init imxnd_probe(struct device_d *dev)
err = 0;
}
- add_mtd_nand_device(this, "nand");
+ add_mtd_nand_device(mtd, "nand");
dev->priv = host;
diff --git a/drivers/mtd/nand/nand_jedec.c b/drivers/mtd/nand/nand_jedec.c
new file mode 100644
index 0000000000..5632d2c73f
--- /dev/null
+++ b/drivers/mtd/nand/nand_jedec.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill at realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx at linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <common.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+#define JEDEC_PARAM_PAGES 3
+
+/*
+ * Check if the NAND chip is JEDEC compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_jedec_detect(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_jedec_params *p;
+ struct jedec_ecc_info *ecc;
+ bool use_datain = false;
+ int jedec_version = 0;
+ char id[5];
+ int i, val, ret;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try JEDEC for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x40, id, sizeof(id));
+ if (ret || strncmp(id, "JEDEC", sizeof(id)))
+ return 0;
+
+ /* JEDEC chip: allocate a buffer to hold its parameter page */
+ p = kzalloc(sizeof(*p), GFP_KERNEL);
+ if (!p)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, p, sizeof(*p), true, true))
+ use_datain = true;
+
+ for (i = 0; i < JEDEC_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0x40, p,
+ sizeof(*p));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, p, sizeof(*p), true,
+ false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*p) * i,
+ p, sizeof(*p), true);
+ if (ret) {
+ ret = 0;
+ goto free_jedec_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)p, 510);
+ if (crc == le16_to_cpu(p->crc))
+ break;
+ }
+
+ if (i == JEDEC_PARAM_PAGES) {
+ pr_err("Could not find valid JEDEC parameter page; aborting\n");
+ goto free_jedec_param_page;
+ }
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & (1 << 2))
+ jedec_version = 10;
+ else if (val & (1 << 1))
+ jedec_version = 1; /* vendor specific version */
+
+ if (!jedec_version) {
+ pr_info("unsupported JEDEC version: %d\n", val);
+ goto free_jedec_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = strdup(p->model);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_jedec_param_page;
+ }
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->multi_plane_addr;
+
+ /* Please reference to the comment for nand_flash_detect_onfi. */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & JEDEC_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ /* ECC info */
+ ecc = &p->ecc_info[0];
+
+ if (ecc->codeword_size >= 9) {
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+ } else {
+ pr_warn("Invalid codeword size\n");
+ }
+
+ ret = 1;
+
+free_jedec_param_page:
+ kfree(p);
+ return ret;
+}
diff --git a/drivers/mtd/nand/nand_legacy.c b/drivers/mtd/nand/nand_legacy.c
new file mode 100644
index 0000000000..0fcafe38f9
--- /dev/null
+++ b/drivers/mtd/nand/nand_legacy.c
@@ -0,0 +1,629 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill at realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx at linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all legacy helpers/code that should be removed
+ * at some point.
+ */
+
+#include <common.h>
+#include <errno.h>
+#include <clock.h>
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/nand_bch.h>
+#include <linux/mtd/nand_ecc.h>
+#include <linux/err.h>
+#include <asm/byteorder.h>
+#include <io.h>
+#include <malloc.h>
+#include <module.h>
+
+#include "internals.h"
+
+/**
+ * nand_read_byte - [DEFAULT] read one byte from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 8bit buswidth
+ */
+static uint8_t nand_read_byte(struct nand_chip *chip)
+{
+ return readb(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_read_byte16 - [DEFAULT] read one byte endianness aware from the chip
+ * @chip: NAND chip object
+ *
+ * Default read function for 16bit buswidth with endianness conversion.
+ *
+ */
+static uint8_t nand_read_byte16(struct nand_chip *chip)
+{
+ return (uint8_t) cpu_to_le16(readw(chip->legacy.IO_ADDR_R));
+}
+
+/**
+ * nand_select_chip - [DEFAULT] control CE line
+ * @chip: NAND chip object
+ * @chipnr: chipnumber to select, -1 for deselect
+ *
+ * Default select function for 1 chip devices.
+ */
+static void nand_select_chip(struct nand_chip *chip, int chipnr)
+{
+ switch (chipnr) {
+ case -1:
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ 0 | NAND_CTRL_CHANGE);
+ break;
+ case 0:
+ break;
+
+ default:
+ BUG();
+ }
+}
+
+/**
+ * nand_write_byte - [DEFAULT] write single byte to chip
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0]
+ */
+static void nand_write_byte(struct nand_chip *chip, uint8_t byte)
+{
+ chip->legacy.write_buf(chip, &byte, 1);
+}
+
+/**
+ * nand_write_byte16 - [DEFAULT] write single byte to a chip with width 16
+ * @chip: NAND chip object
+ * @byte: value to write
+ *
+ * Default function to write a byte to I/O[7:0] on a 16-bit wide chip.
+ */
+static void nand_write_byte16(struct nand_chip *chip, uint8_t byte)
+{
+ uint16_t word = byte;
+
+ /*
+ * It's not entirely clear what should happen to I/O[15:8] when writing
+ * a byte. The ONFi spec (Revision 3.1; 2012-09-19, Section 2.16) reads:
+ *
+ * When the host supports a 16-bit bus width, only data is
+ * transferred at the 16-bit width. All address and command line
+ * transfers shall use only the lower 8-bits of the data bus. During
+ * command transfers, the host may place any value on the upper
+ * 8-bits of the data bus. During address transfers, the host shall
+ * set the upper 8-bits of the data bus to 00h.
+ *
+ * One user of the write_byte callback is nand_set_features. The
+ * four parameters are specified to be written to I/O[7:0], but this is
+ * neither an address nor a command transfer. Let's assume a 0 on the
+ * upper I/O lines is OK.
+ */
+ chip->legacy.write_buf(chip, (uint8_t *)&word, 2);
+}
+
+/**
+ * nand_write_buf - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 8bit buswidth.
+ */
+static void nand_write_buf(struct nand_chip *chip, const uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ writeb(buf[i], chip->legacy.IO_ADDR_W);
+}
+
+/**
+ * nand_read_buf - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 8bit buswidth.
+ */
+static void nand_read_buf(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ int i;
+
+ for (i = 0; i < len; i++)
+ buf[i] = readb(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_write_buf16 - [DEFAULT] write buffer to chip
+ * @chip: NAND chip object
+ * @buf: data buffer
+ * @len: number of bytes to write
+ *
+ * Default write function for 16bit buswidth.
+ */
+static void nand_write_buf16(struct nand_chip *chip, const uint8_t *buf,
+ int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ writew(p[i], chip->legacy.IO_ADDR_W);
+}
+
+/**
+ * nand_read_buf16 - [DEFAULT] read chip data into buffer
+ * @chip: NAND chip object
+ * @buf: buffer to store date
+ * @len: number of bytes to read
+ *
+ * Default read function for 16bit buswidth.
+ */
+static void nand_read_buf16(struct nand_chip *chip, uint8_t *buf, int len)
+{
+ int i;
+ u16 *p = (u16 *) buf;
+ len >>= 1;
+
+ for (i = 0; i < len; i++)
+ p[i] = readw(chip->legacy.IO_ADDR_R);
+}
+
+/**
+ * nand_wait_ready - [GENERIC] Wait for the ready pin after commands.
+ * @chip: NAND chip object
+ *
+ * Wait for the ready pin after a command, and warn if a timeout occurs.
+ */
+void nand_wait_ready(struct nand_chip *chip)
+{
+ uint64_t start = get_time_ns();
+
+ /* Wait until command is processed or timeout occurs */
+ do {
+ if (chip->legacy.dev_ready(chip))
+ return;
+ } while (!is_timeout(start, 400 * MSECOND));
+
+ if (!chip->legacy.dev_ready(chip))
+ pr_warn("timeout while waiting for chip to become ready\n");
+}
+EXPORT_SYMBOL_GPL(nand_wait_ready);
+
+/**
+ * nand_wait_status_ready - [GENERIC] Wait for the ready status after commands.
+ * @chip: NAND chip object
+ * @timeo: Timeout in ms
+ *
+ * Wait for status ready (i.e. command done) or timeout.
+ */
+static void nand_wait_status_ready(struct nand_chip *chip, unsigned long timeo)
+{
+ uint64_t start = get_time_ns();
+ int ret;
+
+ do {
+ u8 status;
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true,
+ false);
+ if (ret)
+ return;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ } while (!is_timeout(start, timeo * MSECOND));
+};
+
+/**
+ * nand_command - [DEFAULT] Send command to NAND device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This function is used for small page devices
+ * (512 Bytes per page).
+ */
+static void nand_command(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ctrl = NAND_CTRL_CLE | NAND_CTRL_CHANGE;
+
+ /* Write out the command to the device */
+ if (command == NAND_CMD_SEQIN) {
+ int readcmd;
+
+ if (column >= mtd->writesize) {
+ /* OOB area */
+ column -= mtd->writesize;
+ readcmd = NAND_CMD_READOOB;
+ } else if (column < 256) {
+ /* First 256 bytes --> READ0 */
+ readcmd = NAND_CMD_READ0;
+ } else {
+ column -= 256;
+ readcmd = NAND_CMD_READ1;
+ }
+ chip->legacy.cmd_ctrl(chip, readcmd, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command, ctrl);
+
+ /* Address cycle, when necessary */
+ ctrl = NAND_CTRL_ALE | NAND_CTRL_CHANGE;
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8, ctrl);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16, ctrl);
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status and sequential
+ * in needs no delay
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_CTRL_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ /* This applies to read commands */
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that we should not wait for the
+ * device to be ready.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+static void nand_ccs_delay(struct nand_chip *chip)
+{
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+
+ /*
+ * The controller already takes care of waiting for tCCS when the RNDIN
+ * or RNDOUT command is sent, return directly.
+ */
+ if (!(chip->options & NAND_WAIT_TCCS))
+ return;
+
+ /*
+ * Wait tCCS_min if it is correctly defined, otherwise wait 500ns
+ * (which should be safe for all NANDs).
+ */
+ if (nand_controller_can_setup_interface(chip))
+ ndelay(sdr->tCCS_min / 1000);
+ else
+ ndelay(500);
+}
+
+/**
+ * nand_command_lp - [DEFAULT] Send command to NAND large page device
+ * @chip: NAND chip object
+ * @command: the command to be sent
+ * @column: the column address for this command, -1 if none
+ * @page_addr: the page address for this command, -1 if none
+ *
+ * Send command to NAND device. This is the version for the new large page
+ * devices. We don't have the separate regions as we have in the small page
+ * devices. We must emulate NAND_CMD_READOOB to keep the code compatible.
+ */
+static void nand_command_lp(struct nand_chip *chip, unsigned int command,
+ int column, int page_addr)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Emulate NAND_CMD_READOOB */
+ if (command == NAND_CMD_READOOB) {
+ column += mtd->writesize;
+ command = NAND_CMD_READ0;
+ }
+
+ /* Command latch cycle */
+ if (command != NAND_CMD_NONE)
+ chip->legacy.cmd_ctrl(chip, command,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+
+ if (column != -1 || page_addr != -1) {
+ int ctrl = NAND_CTRL_CHANGE | NAND_NCE | NAND_ALE;
+
+ /* Serially input address */
+ if (column != -1) {
+ /* Adjust columns for 16 bit buswidth */
+ if (chip->options & NAND_BUSWIDTH_16 &&
+ !nand_opcode_8bits(command))
+ column >>= 1;
+ chip->legacy.cmd_ctrl(chip, column, ctrl);
+ ctrl &= ~NAND_CTRL_CHANGE;
+
+ /* Only output a single addr cycle for 8bits opcodes. */
+ if (!nand_opcode_8bits(command))
+ chip->legacy.cmd_ctrl(chip, column >> 8, ctrl);
+ }
+ if (page_addr != -1) {
+ chip->legacy.cmd_ctrl(chip, page_addr, ctrl);
+ chip->legacy.cmd_ctrl(chip, page_addr >> 8,
+ NAND_NCE | NAND_ALE);
+ if (chip->options & NAND_ROW_ADDR_3)
+ chip->legacy.cmd_ctrl(chip, page_addr >> 16,
+ NAND_NCE | NAND_ALE);
+ }
+ }
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ /*
+ * Program and erase have their own busy handlers status, sequential
+ * in and status need no delay.
+ */
+ switch (command) {
+
+ case NAND_CMD_NONE:
+ case NAND_CMD_CACHEDPROG:
+ case NAND_CMD_PAGEPROG:
+ case NAND_CMD_ERASE1:
+ case NAND_CMD_ERASE2:
+ case NAND_CMD_SEQIN:
+ case NAND_CMD_STATUS:
+ case NAND_CMD_READID:
+ case NAND_CMD_SET_FEATURES:
+ return;
+
+ case NAND_CMD_RNDIN:
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_RESET:
+ if (chip->legacy.dev_ready)
+ break;
+ udelay(chip->legacy.chip_delay);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_STATUS,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ /* EZ-NAND can take upto 250ms as per ONFi v4.0 */
+ nand_wait_status_ready(chip, 250);
+ return;
+
+ case NAND_CMD_RNDOUT:
+ /* No ready / busy check necessary */
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_RNDOUTSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+
+ nand_ccs_delay(chip);
+ return;
+
+ case NAND_CMD_READ0:
+ /*
+ * READ0 is sometimes used to exit GET STATUS mode. When this
+ * is the case no address cycles are requested, and we can use
+ * this information to detect that READSTART should not be
+ * issued.
+ */
+ if (column == -1 && page_addr == -1)
+ return;
+
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_READSTART,
+ NAND_NCE | NAND_CLE | NAND_CTRL_CHANGE);
+ chip->legacy.cmd_ctrl(chip, NAND_CMD_NONE,
+ NAND_NCE | NAND_CTRL_CHANGE);
+ default:
+ /*
+ * If we don't have access to the busy pin, we apply the given
+ * command delay.
+ */
+ if (!chip->legacy.dev_ready) {
+ udelay(chip->legacy.chip_delay);
+ return;
+ }
+ }
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in
+ * any case on any machine.
+ */
+ ndelay(100);
+
+ nand_wait_ready(chip);
+}
+
+/**
+ * nand_get_set_features_notsupp - set/get features stub returning -ENOTSUPP
+ * @chip: nand chip info structure
+ * @addr: feature address.
+ * @subfeature_param: the subfeature parameters, a four bytes array.
+ *
+ * Should be used by NAND controller drivers that do not support the SET/GET
+ * FEATURES operations.
+ */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param)
+{
+ return -ENOTSUPP;
+}
+EXPORT_SYMBOL(nand_get_set_features_notsupp);
+
+/**
+ * nand_wait - [DEFAULT] wait until the command is done
+ * @chip: NAND chip structure
+ *
+ * Wait for command done. This applies to erase and program only.
+ */
+static int nand_wait(struct nand_chip *chip)
+{
+
+ u8 status;
+ int ret;
+ uint64_t start = get_time_ns();
+
+ /*
+ * Apply this short delay always to ensure that we do wait tWB in any
+ * case on any machine.
+ */
+ ndelay(100);
+
+ ret = nand_status_op(chip, NULL);
+ if (ret)
+ return ret;
+
+ do {
+ if (chip->legacy.dev_ready) {
+ if (chip->legacy.dev_ready(chip))
+ break;
+ } else {
+ ret = nand_read_data_op(chip, &status,
+ sizeof(status), true,
+ false);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_READY)
+ break;
+ }
+ } while (!is_timeout(start, 400 * MSECOND));
+
+ ret = nand_read_data_op(chip, &status, sizeof(status), true, false);
+ if (ret)
+ return ret;
+
+ /* This can happen if in case of timeout or buggy dev_ready */
+ WARN_ON(!(status & NAND_STATUS_READY));
+ return status;
+}
+
+void nand_legacy_set_defaults(struct nand_chip *chip)
+{
+ unsigned int busw = chip->options & NAND_BUSWIDTH_16;
+
+ if (nand_has_exec_op(chip))
+ return;
+
+ /* check for proper chip_delay setup, set 20us if not */
+ if (!chip->legacy.chip_delay)
+ chip->legacy.chip_delay = 20;
+
+ /* check, if a user supplied command function given */
+ if (!chip->legacy.cmdfunc)
+ chip->legacy.cmdfunc = nand_command;
+
+ /* check, if a user supplied wait function given */
+ if (chip->legacy.waitfunc == NULL)
+ chip->legacy.waitfunc = nand_wait;
+
+ if (!chip->legacy.select_chip)
+ chip->legacy.select_chip = nand_select_chip;
+
+ /* If called twice, pointers that depend on busw may need to be reset */
+ if (!chip->legacy.read_byte || chip->legacy.read_byte == nand_read_byte)
+ chip->legacy.read_byte = busw ? nand_read_byte16 : nand_read_byte;
+ if (!chip->legacy.write_buf || chip->legacy.write_buf == nand_write_buf)
+ chip->legacy.write_buf = busw ? nand_write_buf16 : nand_write_buf;
+ if (!chip->legacy.write_byte || chip->legacy.write_byte == nand_write_byte)
+ chip->legacy.write_byte = busw ? nand_write_byte16 : nand_write_byte;
+ if (!chip->legacy.read_buf || chip->legacy.read_buf == nand_read_buf)
+ chip->legacy.read_buf = busw ? nand_read_buf16 : nand_read_buf;
+}
+
+void nand_legacy_adjust_cmdfunc(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /* Do not replace user supplied command function! */
+ if (mtd->writesize > 512 && chip->legacy.cmdfunc == nand_command)
+ chip->legacy.cmdfunc = nand_command_lp;
+}
+
+int nand_legacy_check_hooks(struct nand_chip *chip)
+{
+ /*
+ * ->legacy.cmdfunc() is legacy and will only be used if ->exec_op() is
+ * not populated.
+ */
+ if (nand_has_exec_op(chip))
+ return 0;
+
+ /*
+ * Default functions assigned for ->legacy.cmdfunc() and
+ * ->legacy.select_chip() both expect ->legacy.cmd_ctrl() to be
+ * populated.
+ */
+ if ((!chip->legacy.cmdfunc || !chip->legacy.select_chip) &&
+ !chip->legacy.cmd_ctrl) {
+ pr_err("->legacy.cmd_ctrl() should be provided\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
diff --git a/drivers/mtd/nand/nand_macronix.c b/drivers/mtd/nand/nand_macronix.c
new file mode 100644
index 0000000000..bfedc789fb
--- /dev/null
+++ b/drivers/mtd/nand/nand_macronix.c
@@ -0,0 +1,334 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include <linux/bitmap.h>
+#include "internals.h"
+
+#define MACRONIX_READ_RETRY_BIT BIT(0)
+#define MACRONIX_NUM_READ_RETRY_MODES 6
+
+#define ONFI_FEATURE_ADDR_MXIC_PROTECTION 0xA0
+#define MXIC_BLOCK_PROTECTION_ALL_LOCK 0x38
+#define MXIC_BLOCK_PROTECTION_ALL_UNLOCK 0x0
+
+#define ONFI_FEATURE_ADDR_MXIC_RANDOMIZER 0xB0
+#define MACRONIX_RANDOMIZER_BIT BIT(1)
+#define MACRONIX_RANDOMIZER_ENPGM BIT(0)
+#define MACRONIX_RANDOMIZER_RANDEN BIT(1)
+#define MACRONIX_RANDOMIZER_RANDOPT BIT(2)
+#define MACRONIX_RANDOMIZER_MODE_ENTER \
+ (MACRONIX_RANDOMIZER_ENPGM | \
+ MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+#define MACRONIX_RANDOMIZER_MODE_EXIT \
+ (MACRONIX_RANDOMIZER_RANDEN | \
+ MACRONIX_RANDOMIZER_RANDOPT)
+
+#define MXIC_CMD_POWER_DOWN 0xB9
+
+struct nand_onfi_vendor_macronix {
+ u8 reserved;
+ u8 reliability_func;
+} __packed;
+
+static int macronix_nand_setup_read_retry(struct nand_chip *chip, int mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+
+ if (!chip->parameters.supports_set_get_features ||
+ !test_bit(ONFI_FEATURE_ADDR_READ_RETRY,
+ chip->parameters.set_feature_list))
+ return -ENOTSUPP;
+
+ feature[0] = mode;
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+static int macronix_nand_randomizer_check_enable(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ if (feature[0])
+ return feature[0];
+
+ feature[0] = MACRONIX_RANDOMIZER_MODE_ENTER;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ /* RANDEN and RANDOPT OTP bits are programmed */
+ feature[0] = 0x0;
+ ret = nand_prog_page_op(chip, 0, 0, feature, 1);
+ if (ret < 0)
+ return ret;
+
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ feature[0] &= MACRONIX_RANDOMIZER_MODE_EXIT;
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ feature);
+ if (ret < 0)
+ return ret;
+
+ return 0;
+}
+
+static void macronix_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+ struct nand_onfi_vendor_macronix *mxic;
+ struct device_node *dn = nand_get_flash_node(chip);
+ int rand_otp = 0;
+ int ret;
+
+ if (!p->onfi)
+ return;
+
+ if (of_find_property(dn, "mxic,enable-randomizer-otp", NULL))
+ rand_otp = 1;
+
+ mxic = (struct nand_onfi_vendor_macronix *)p->onfi->vendor;
+ /* Subpage write is prohibited in randomizer operatoin */
+ if (rand_otp && chip->options & NAND_NO_SUBPAGE_WRITE &&
+ mxic->reliability_func & MACRONIX_RANDOMIZER_BIT) {
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER, 1);
+ ret = macronix_nand_randomizer_check_enable(chip);
+ if (ret < 0) {
+ bitmap_clear(p->set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ bitmap_clear(p->get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_RANDOMIZER,
+ 1);
+ pr_info("Macronix NAND randomizer failed\n");
+ } else {
+ pr_info("Macronix NAND randomizer enabled\n");
+ }
+ }
+ }
+
+ if ((mxic->reliability_func & MACRONIX_READ_RETRY_BIT) == 0)
+ return;
+
+ chip->read_retries = MACRONIX_NUM_READ_RETRY_MODES;
+ chip->ops.setup_read_retry = macronix_nand_setup_read_retry;
+
+ if (p->supports_set_get_features) {
+ bitmap_set(p->set_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ bitmap_set(p->get_feature_list,
+ ONFI_FEATURE_ADDR_READ_RETRY, 1);
+ }
+}
+
+/*
+ * Macronix AC series does not support using SET/GET_FEATURES to change
+ * the timings unlike what is declared in the parameter page. Unflag
+ * this feature to avoid unnecessary downturns.
+ */
+static void macronix_nand_fix_broken_get_timings(struct nand_chip *chip)
+{
+ int i;
+ static const char * const broken_get_timings[] = {
+ "MX30LF1G18AC",
+ "MX30LF1G28AC",
+ "MX30LF2G18AC",
+ "MX30LF2G28AC",
+ "MX30LF4G18AC",
+ "MX30LF4G28AC",
+ "MX60LF8G18AC",
+ "MX30UF1G18AC",
+ "MX30UF1G16AC",
+ "MX30UF2G18AC",
+ "MX30UF2G16AC",
+ "MX30UF4G18AC",
+ "MX30UF4G16AC",
+ "MX30UF4G28AC",
+ };
+
+ if (!chip->parameters.supports_set_get_features)
+ return;
+
+ i = match_string(broken_get_timings, ARRAY_SIZE(broken_get_timings),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_clear(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+}
+
+/*
+ * Macronix NAND supports Block Protection by Protectoin(PT) pin;
+ * active high at power-on which protects the entire chip even the #WP is
+ * disabled. Lock/unlock protection area can be partition according to
+ * protection bits, i.e. upper 1/2 locked, upper 1/4 locked and so on.
+ */
+static int mxic_nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_LOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static int mxic_nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_set_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret)
+ pr_err("%s all blocks failed\n", __func__);
+
+ return ret;
+}
+
+static void macronix_nand_block_protection_support(struct nand_chip *chip)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN];
+ int ret;
+
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ feature[0] = MXIC_BLOCK_PROTECTION_ALL_UNLOCK;
+ nand_select_target(chip, 0);
+ ret = nand_get_features(chip, ONFI_FEATURE_ADDR_MXIC_PROTECTION,
+ feature);
+ nand_deselect_target(chip);
+ if (ret || feature[0] != MXIC_BLOCK_PROTECTION_ALL_LOCK) {
+ if (ret)
+ pr_err("Block protection check failed\n");
+
+ bitmap_clear(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+ return;
+ }
+
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_MXIC_PROTECTION, 1);
+
+ chip->ops.lock_area = mxic_nand_lock;
+ chip->ops.unlock_area = mxic_nand_unlock;
+}
+
+static int nand_power_down_op(struct nand_chip *chip)
+{
+ int ret;
+
+ if (nand_has_exec_op(chip)) {
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(MXIC_CMD_POWER_DOWN, 0),
+ };
+
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ ret = nand_exec_op(chip, &op);
+ if (ret)
+ return ret;
+
+ } else {
+ chip->legacy.cmdfunc(chip, MXIC_CMD_POWER_DOWN, -1, -1);
+ }
+
+ return 0;
+}
+
+static int mxic_nand_suspend(struct nand_chip *chip)
+{
+ int ret;
+
+ nand_select_target(chip, 0);
+ ret = nand_power_down_op(chip);
+ if (ret < 0)
+ pr_err("Suspending MXIC NAND chip failed (%d)\n", ret);
+ nand_deselect_target(chip);
+
+ return ret;
+}
+
+static void mxic_nand_resume(struct nand_chip *chip)
+{
+ /*
+ * Toggle #CS pin to resume NAND device and don't care
+ * of the others CLE, #WE, #RE pins status.
+ * A NAND controller ensure it is able to assert/de-assert #CS
+ * by sending any byte over the NAND bus.
+ * i.e.,
+ * NAND power down command or reset command w/o R/B# status checking.
+ */
+ nand_select_target(chip, 0);
+ nand_power_down_op(chip);
+ /* The minimum of a recovery time tRDP is 35 us */
+ udelay(35);
+ nand_deselect_target(chip);
+}
+
+static void macronix_nand_deep_power_down_support(struct nand_chip *chip)
+{
+ int i;
+ static const char * const deep_power_down_dev[] = {
+ "MX30UF1G28AD",
+ "MX30UF2G28AD",
+ "MX30UF4G28AD",
+ };
+
+ i = match_string(deep_power_down_dev, ARRAY_SIZE(deep_power_down_dev),
+ chip->parameters.model);
+ if (i < 0)
+ return;
+
+ chip->ops.suspend = mxic_nand_suspend;
+ chip->ops.resume = mxic_nand_resume;
+}
+
+static int macronix_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ macronix_nand_fix_broken_get_timings(chip);
+ macronix_nand_onfi_init(chip);
+ macronix_nand_block_protection_support(chip);
+ macronix_nand_deep_power_down_support(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops macronix_nand_manuf_ops = {
+ .init = macronix_nand_init,
+};
diff --git a/drivers/mtd/nand/nand_micron.c b/drivers/mtd/nand/nand_micron.c
new file mode 100644
index 0000000000..d59be7ca7b
--- /dev/null
+++ b/drivers/mtd/nand/nand_micron.c
@@ -0,0 +1,595 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include <common.h>
+#include <linux/slab.h>
+
+#include "internals.h"
+
+/*
+ * Special Micron status bit 3 indicates that the block has been
+ * corrected by on-die ECC and should be rewritten.
+ */
+#define NAND_ECC_STATUS_WRITE_RECOMMENDED BIT(3)
+
+/*
+ * On chips with 8-bit ECC and additional bit can be used to distinguish
+ * cases where a errors were corrected without needing a rewrite
+ *
+ * Bit 4 Bit 3 Bit 0 Description
+ * ----- ----- ----- -----------
+ * 0 0 0 No Errors
+ * 0 0 1 Multiple uncorrected errors
+ * 0 1 0 4 - 6 errors corrected, recommend rewrite
+ * 0 1 1 Reserved
+ * 1 0 0 1 - 3 errors corrected
+ * 1 0 1 Reserved
+ * 1 1 0 7 - 8 errors corrected, recommend rewrite
+ */
+#define NAND_ECC_STATUS_MASK (BIT(4) | BIT(3) | BIT(0))
+#define NAND_ECC_STATUS_UNCORRECTABLE BIT(0)
+#define NAND_ECC_STATUS_4_6_CORRECTED BIT(3)
+#define NAND_ECC_STATUS_1_3_CORRECTED BIT(4)
+#define NAND_ECC_STATUS_7_8_CORRECTED (BIT(4) | BIT(3))
+
+struct nand_onfi_vendor_micron {
+ u8 two_plane_read;
+ u8 read_cache;
+ u8 read_unique_id;
+ u8 dq_imped;
+ u8 dq_imped_num_settings;
+ u8 dq_imped_feat_addr;
+ u8 rb_pulldown_strength;
+ u8 rb_pulldown_strength_feat_addr;
+ u8 rb_pulldown_strength_num_settings;
+ u8 otp_mode;
+ u8 otp_page_start;
+ u8 otp_data_prot_addr;
+ u8 otp_num_pages;
+ u8 otp_feat_addr;
+ u8 read_retry_options;
+ u8 reserved[72];
+ u8 param_revision;
+} __packed;
+
+struct micron_on_die_ecc {
+ bool forced;
+ bool enabled;
+ void *rawbuf;
+};
+
+struct micron_nand {
+ struct micron_on_die_ecc ecc;
+};
+
+static int micron_nand_setup_read_retry(struct nand_chip *chip, int retry_mode)
+{
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = {retry_mode};
+
+ return nand_set_features(chip, ONFI_FEATURE_ADDR_READ_RETRY, feature);
+}
+
+/*
+ * Configure chip properties from Micron vendor-specific ONFI table
+ */
+static int micron_nand_onfi_init(struct nand_chip *chip)
+{
+ struct nand_parameters *p = &chip->parameters;
+
+ if (p->onfi) {
+ struct nand_onfi_vendor_micron *micron = (void *)p->onfi->vendor;
+
+ chip->read_retries = micron->read_retry_options;
+ chip->ops.setup_read_retry = micron_nand_setup_read_retry;
+ }
+
+ if (p->supports_set_get_features) {
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->set_feature_list);
+ set_bit(ONFI_FEATURE_ADDR_READ_RETRY, p->get_feature_list);
+ set_bit(ONFI_FEATURE_ON_DIE_ECC, p->get_feature_list);
+ }
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 8;
+ oobregion->length = 8;
+
+ return 0;
+}
+
+static int micron_nand_on_die_4_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section >= 4)
+ return -ERANGE;
+
+ oobregion->offset = (section * 16) + 2;
+ oobregion->length = 6;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_4_ooblayout_ops = {
+ .ecc = micron_nand_on_die_4_ooblayout_ecc,
+ .free = micron_nand_on_die_4_ooblayout_free,
+};
+
+static int micron_nand_on_die_8_ooblayout_ecc(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = mtd->oobsize - chip->ecc.total;
+ oobregion->length = chip->ecc.total;
+
+ return 0;
+}
+
+static int micron_nand_on_die_8_ooblayout_free(struct mtd_info *mtd,
+ int section,
+ struct mtd_oob_region *oobregion)
+{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
+ if (section)
+ return -ERANGE;
+
+ oobregion->offset = 2;
+ oobregion->length = mtd->oobsize - chip->ecc.total - 2;
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops micron_nand_on_die_8_ooblayout_ops = {
+ .ecc = micron_nand_on_die_8_ooblayout_ecc,
+ .free = micron_nand_on_die_8_ooblayout_free,
+};
+
+static int micron_nand_on_die_ecc_setup(struct nand_chip *chip, bool enable)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ u8 feature[ONFI_SUBFEATURE_PARAM_LEN] = { 0, };
+ int ret;
+
+ if (micron->ecc.forced)
+ return 0;
+
+ if (micron->ecc.enabled == enable)
+ return 0;
+
+ if (enable)
+ feature[0] |= ONFI_FEATURE_ON_DIE_ECC_EN;
+
+ ret = nand_set_features(chip, ONFI_FEATURE_ON_DIE_ECC, feature);
+ if (!ret)
+ micron->ecc.enabled = enable;
+
+ return ret;
+}
+
+static int micron_nand_on_die_ecc_status_4(struct nand_chip *chip, u8 status,
+ void *buf, int page,
+ int oob_required)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ unsigned int step, max_bitflips = 0;
+ bool use_datain = false;
+ int ret;
+
+ if (!(status & NAND_ECC_STATUS_WRITE_RECOMMENDED)) {
+ if (status & NAND_STATUS_FAIL)
+ mtd->ecc_stats.failed++;
+
+ return 0;
+ }
+
+ /*
+ * The internal ECC doesn't tell us the number of bitflips that have
+ * been corrected, but tells us if it recommends to rewrite the block.
+ * If it's the case, we need to read the page in raw mode and compare
+ * its content to the corrected version to extract the actual number of
+ * bitflips.
+ * But before we do that, we must make sure we have all OOB bytes read
+ * in non-raw mode, even if the user did not request those bytes.
+ */
+ if (!oob_required) {
+ /*
+ * We first check which operation is supported by the controller
+ * before running it. This trick makes it possible to support
+ * all controllers, even the most constraints, without almost
+ * any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check
+ * over and over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, chip->oob_poi, mtd->oobsize, false,
+ true))
+ use_datain = true;
+
+ if (use_datain)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ else
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ if (ret)
+ return ret;
+ }
+
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ ret = nand_read_page_op(chip, page, 0, micron->ecc.rawbuf,
+ mtd->writesize + mtd->oobsize);
+ if (ret)
+ return ret;
+
+ for (step = 0; step < chip->ecc.steps; step++) {
+ unsigned int offs, i, nbitflips = 0;
+ u8 *rawbuf, *corrbuf;
+
+ offs = step * chip->ecc.size;
+ rawbuf = micron->ecc.rawbuf + offs;
+ corrbuf = buf + offs;
+
+ for (i = 0; i < chip->ecc.size; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ offs = (step * 16) + 4;
+ rawbuf = micron->ecc.rawbuf + mtd->writesize + offs;
+ corrbuf = chip->oob_poi + offs;
+
+ for (i = 0; i < chip->ecc.bytes + 4; i++)
+ nbitflips += hweight8(corrbuf[i] ^ rawbuf[i]);
+
+ if (WARN_ON(nbitflips > chip->ecc.strength))
+ return -EINVAL;
+
+ max_bitflips = max(nbitflips, max_bitflips);
+ mtd->ecc_stats.corrected += nbitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int micron_nand_on_die_ecc_status_8(struct nand_chip *chip, u8 status)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * With 8/512 we have more information but still don't know precisely
+ * how many bit-flips were seen.
+ */
+ switch (status & NAND_ECC_STATUS_MASK) {
+ case NAND_ECC_STATUS_UNCORRECTABLE:
+ mtd->ecc_stats.failed++;
+ return 0;
+ case NAND_ECC_STATUS_1_3_CORRECTED:
+ mtd->ecc_stats.corrected += 3;
+ return 3;
+ case NAND_ECC_STATUS_4_6_CORRECTED:
+ mtd->ecc_stats.corrected += 6;
+ /* rewrite recommended */
+ return 6;
+ case NAND_ECC_STATUS_7_8_CORRECTED:
+ mtd->ecc_stats.corrected += 8;
+ /* rewrite recommended */
+ return 8;
+ default:
+ return 0;
+ }
+}
+
+static int
+micron_nand_read_page_on_die_ecc(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ bool use_datain = false;
+ u8 status;
+ int ret, max_bitflips = 0;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_read_page_op(chip, page, 0, NULL, 0);
+ if (ret)
+ goto out;
+
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ goto out;
+
+ /*
+ * We first check which operation is supported by the controller before
+ * running it. This trick makes it possible to support all controllers,
+ * even the most constraints, without almost any performance hit.
+ *
+ * TODO: could be enhanced to avoid repeating the same check over and
+ * over in the fast path.
+ */
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, buf, mtd->writesize, false, true))
+ use_datain = true;
+
+ if (use_datain) {
+ ret = nand_exit_status_op(chip);
+ if (ret)
+ goto out;
+
+ ret = nand_read_data_op(chip, buf, mtd->writesize, false,
+ false);
+ if (!ret && oob_required)
+ ret = nand_read_data_op(chip, chip->oob_poi,
+ mtd->oobsize, false, false);
+ } else {
+ ret = nand_change_read_column_op(chip, 0, buf, mtd->writesize,
+ false);
+ if (!ret && oob_required)
+ ret = nand_change_read_column_op(chip, mtd->writesize,
+ chip->oob_poi,
+ mtd->oobsize, false);
+ }
+
+ if (chip->ecc.strength == 4)
+ max_bitflips = micron_nand_on_die_ecc_status_4(chip, status,
+ buf, page,
+ oob_required);
+ else
+ max_bitflips = micron_nand_on_die_ecc_status_8(chip, status);
+
+out:
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret ? ret : max_bitflips;
+}
+
+static int
+micron_nand_write_page_on_die_ecc(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return ret;
+
+ ret = nand_write_page_raw(chip, buf, oob_required, page);
+ micron_nand_on_die_ecc_setup(chip, false);
+
+ return ret;
+}
+
+enum {
+ /* The NAND flash doesn't support on-die ECC */
+ MICRON_ON_DIE_UNSUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC and it can be
+ * enabled/disabled by a set features command.
+ */
+ MICRON_ON_DIE_SUPPORTED,
+
+ /*
+ * The NAND flash supports on-die ECC, and it cannot be
+ * disabled.
+ */
+ MICRON_ON_DIE_MANDATORY,
+};
+
+#define MICRON_ID_INTERNAL_ECC_MASK GENMASK(1, 0)
+#define MICRON_ID_ECC_ENABLED BIT(7)
+
+/*
+ * Try to detect if the NAND support on-die ECC. To do this, we enable
+ * the feature, and read back if it has been enabled as expected. We
+ * also check if it can be disabled, because some Micron NANDs do not
+ * allow disabling the on-die ECC and we don't support such NANDs for
+ * now.
+ *
+ * This function also has the side effect of disabling on-die ECC if
+ * it had been left enabled by the firmware/bootloader.
+ */
+static int micron_supports_on_die_ecc(struct nand_chip *chip)
+{
+ u8 id[5];
+ int ret;
+
+ if (!chip->parameters.onfi)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (nanddev_bits_per_cell(&chip->base) != 1)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /* 0x2 means on-die ECC is available. */
+ if (chip->id.len != 5 ||
+ (chip->id.data[4] & MICRON_ID_INTERNAL_ECC_MASK) != 0x2)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ /*
+ * It seems that there are devices which do not support ECC officially.
+ * At least the MT29F2G08ABAGA / MT29F2G08ABBGA devices supports
+ * enabling the ECC feature but don't reflect that to the READ_ID table.
+ * So we have to guarantee that we disable the ECC feature directly
+ * after we did the READ_ID table command. Later we can evaluate the
+ * ECC_ENABLE support.
+ */
+ ret = micron_nand_on_die_ecc_setup(chip, true);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = micron_nand_on_die_ecc_setup(chip, false);
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (!(id[4] & MICRON_ID_ECC_ENABLED))
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ ret = nand_readid_op(chip, 0, id, sizeof(id));
+ if (ret)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ if (id[4] & MICRON_ID_ECC_ENABLED)
+ return MICRON_ON_DIE_MANDATORY;
+
+ /*
+ * We only support on-die ECC of 4/512 or 8/512
+ */
+ if (chip->base.eccreq.strength != 4 && chip->base.eccreq.strength != 8)
+ return MICRON_ON_DIE_UNSUPPORTED;
+
+ return MICRON_ON_DIE_SUPPORTED;
+}
+
+static int micron_nand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct micron_nand *micron;
+ int ondie;
+ int ret;
+
+ micron = kzalloc(sizeof(*micron), GFP_KERNEL);
+ if (!micron)
+ return -ENOMEM;
+
+ nand_set_manufacturer_data(chip, micron);
+
+ ret = micron_nand_onfi_init(chip);
+ if (ret)
+ goto err_free_manuf_data;
+
+ chip->options |= NAND_BBM_FIRSTPAGE;
+
+ if (mtd->writesize == 2048)
+ chip->options |= NAND_BBM_SECONDPAGE;
+
+ ondie = micron_supports_on_die_ecc(chip);
+
+ if (ondie == MICRON_ON_DIE_MANDATORY &&
+ chip->ecc.mode != NAND_ECC_ON_DIE) {
+ pr_err("On-die ECC forcefully enabled, not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (chip->ecc.mode == NAND_ECC_ON_DIE) {
+ if (ondie == MICRON_ON_DIE_UNSUPPORTED) {
+ pr_err("On-die ECC selected but not supported\n");
+ ret = -EINVAL;
+ goto err_free_manuf_data;
+ }
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ micron->ecc.forced = true;
+ micron->ecc.enabled = true;
+ }
+
+ /*
+ * In case of 4bit on-die ECC, we need a buffer to store a
+ * page dumped in raw mode so that we can compare its content
+ * to the same page after ECC correction happened and extract
+ * the real number of bitflips from this comparison.
+ * That's not needed for 8-bit ECC, because the status expose
+ * a better approximation of the number of bitflips in a page.
+ */
+ if (chip->base.eccreq.strength == 4) {
+ micron->ecc.rawbuf = kmalloc(mtd->writesize +
+ mtd->oobsize,
+ GFP_KERNEL);
+ if (!micron->ecc.rawbuf) {
+ ret = -ENOMEM;
+ goto err_free_manuf_data;
+ }
+ }
+
+ if (chip->base.eccreq.strength == 4)
+ mtd_set_ooblayout(mtd,
+ µn_nand_on_die_4_ooblayout_ops);
+ else
+ mtd_set_ooblayout(mtd,
+ µn_nand_on_die_8_ooblayout_ops);
+
+ chip->ecc.bytes = chip->base.eccreq.strength * 2;
+ chip->ecc.size = 512;
+ chip->ecc.strength = chip->base.eccreq.strength;
+ chip->ecc.algo = NAND_ECC_BCH;
+ chip->ecc.read_page = micron_nand_read_page_on_die_ecc;
+ chip->ecc.write_page = micron_nand_write_page_on_die_ecc;
+
+ if (ondie == MICRON_ON_DIE_MANDATORY) {
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+ } else {
+ if (!chip->ecc.read_page_raw)
+ chip->ecc.read_page_raw = nand_read_page_raw;
+ if (!chip->ecc.write_page_raw)
+ chip->ecc.write_page_raw = nand_write_page_raw;
+ }
+ }
+
+ return 0;
+
+err_free_manuf_data:
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+
+ return ret;
+}
+
+static void micron_nand_cleanup(struct nand_chip *chip)
+{
+ struct micron_nand *micron = nand_get_manufacturer_data(chip);
+
+ kfree(micron->ecc.rawbuf);
+ kfree(micron);
+}
+
+static void micron_fixup_onfi_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ /*
+ * MT29F1G08ABAFAWP-ITE:F and possibly others report 00 00 for the
+ * revision number field of the ONFI parameter page. Assume ONFI
+ * version 1.0 if the revision number is 00 00.
+ */
+ if (le16_to_cpu(p->revision) == 0)
+ p->revision = cpu_to_le16(ONFI_VERSION_1_0);
+}
+
+const struct nand_manufacturer_ops micron_nand_manuf_ops = {
+ .init = micron_nand_init,
+ .cleanup = micron_nand_cleanup,
+ .fixup_onfi_param_page = micron_fixup_onfi_param_page,
+};
diff --git a/drivers/mtd/nand/nand_mrvl_nfc.c b/drivers/mtd/nand/nand_mrvl_nfc.c
index 892ce507a5..1f3e152375 100644
--- a/drivers/mtd/nand/nand_mrvl_nfc.c
+++ b/drivers/mtd/nand/nand_mrvl_nfc.c
@@ -20,6 +20,7 @@
#include <init.h>
#include <io.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -398,7 +399,7 @@ static int mrvl_nand_ready(struct nand_chip *chip)
* Thus, this function is only called when we want *all* blocks to look good,
* so it *always* return success.
*/
-static int mrvl_nand_block_bad(struct nand_chip *chip, loff_t ofs, int getchip)
+static int mrvl_nand_block_bad(struct nand_chip *chip, loff_t ofs)
{
return 0;
}
@@ -799,11 +800,13 @@ static void mrvl_nand_cmdfunc(struct nand_chip *chip, unsigned command,
* Returns 0
*/
static int mrvl_nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mrvl_nand_host *host = nand_to_host(chip);
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
memcpy(host->data_buff, buf, mtd->writesize);
if (oob_required)
memcpy(host->data_buff + mtd->writesize, chip->oob_poi,
@@ -812,7 +815,8 @@ static int mrvl_nand_write_page_hwecc(struct nand_chip *chip, const uint8_t *buf
memset(host->data_buff + mtd->writesize, 0xff, mtd->oobsize);
dev_dbg(host->dev, "%s(buf=%p, oob_required=%d) => 0\n",
__func__, buf, oob_required);
- return 0;
+
+ return nand_prog_page_end_op(chip);
}
static int mrvl_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
@@ -823,6 +827,8 @@ static int mrvl_nand_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
u32 ndsr;
int ret = 0;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
chip->legacy.read_buf(chip, buf, mtd->writesize);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
ndsr = nand_readl(host, NDSR);
@@ -920,6 +926,9 @@ static void mrvl_nand_config_flash(struct mrvl_nand_host *host)
static int pxa_ecc_strength1(struct mrvl_nand_host *host,
struct nand_ecc_ctrl *ecc, int ecc_stepsize, int page_size)
{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (ecc_stepsize == 512 && page_size == 2048) {
host->chunk_size = 2048;
host->spare_size = 40;
@@ -928,7 +937,7 @@ static int pxa_ecc_strength1(struct mrvl_nand_host *host,
ecc->mode = NAND_ECC_HW;
ecc->size = 512;
ecc->strength = 1;
- ecc->layout = &ecc_layout_2KB_hwecc;
+ mtd_set_ecclayout(mtd, &ecc_layout_2KB_hwecc);
return 0;
}
@@ -939,7 +948,7 @@ static int pxa_ecc_strength1(struct mrvl_nand_host *host,
host->ecc_bch = 0;
ecc->mode = NAND_ECC_HW;
ecc->size = 512;
- ecc->layout = &ecc_layout_512B_hwecc;
+ mtd_set_ecclayout(mtd, &ecc_layout_512B_hwecc);
ecc->strength = 1;
return 0;
}
@@ -950,6 +959,9 @@ static int pxa_ecc_strength1(struct mrvl_nand_host *host,
static int pxa_ecc_strength4(struct mrvl_nand_host *host,
struct nand_ecc_ctrl *ecc, int ecc_stepsize, int page_size)
{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (!(host->hwflags & HWFLAGS_ECC_BCH))
return -ENODEV;
@@ -964,7 +976,7 @@ static int pxa_ecc_strength4(struct mrvl_nand_host *host,
host->ecc_bch = 1;
ecc->mode = NAND_ECC_HW;
ecc->size = 2048;
- ecc->layout = &ecc_layout_2KB_bch4bit;
+ mtd_set_ecclayout(mtd, &ecc_layout_2KB_bch4bit);
ecc->strength = 16;
return 0;
}
@@ -976,7 +988,7 @@ static int pxa_ecc_strength4(struct mrvl_nand_host *host,
host->ecc_bch = 1;
ecc->mode = NAND_ECC_HW;
ecc->size = 2048;
- ecc->layout = &ecc_layout_4KB_bch4bit;
+ mtd_set_ecclayout(mtd, &ecc_layout_4KB_bch4bit);
ecc->strength = 16;
return 0;
}
@@ -987,6 +999,9 @@ static int pxa_ecc_strength4(struct mrvl_nand_host *host,
static int pxa_ecc_strength8(struct mrvl_nand_host *host,
struct nand_ecc_ctrl *ecc, int ecc_stepsize, int page_size)
{
+ struct nand_chip *chip = &host->chip;
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
if (!(host->hwflags & HWFLAGS_ECC_BCH))
return -ENODEV;
@@ -1001,7 +1016,7 @@ static int pxa_ecc_strength8(struct mrvl_nand_host *host,
host->ecc_bch = 1;
ecc->mode = NAND_ECC_HW;
ecc->size = 1024;
- ecc->layout = &ecc_layout_4KB_bch8bit;
+ mtd_set_ecclayout(mtd, &ecc_layout_4KB_bch8bit);
ecc->strength = 16;
return 0;
}
@@ -1234,7 +1249,7 @@ static int mrvl_nand_probe(struct device_d *dev)
return -ENODEV;
}
- ret = add_mtd_nand_device(&host->chip, "nand");
+ ret = add_mtd_nand_device(mtd, "nand");
return ret;
}
diff --git a/drivers/mtd/nand/nand_mxs.c b/drivers/mtd/nand/nand_mxs.c
index 7504909faa..310ce92944 100644
--- a/drivers/mtd/nand/nand_mxs.c
+++ b/drivers/mtd/nand/nand_mxs.c
@@ -20,6 +20,7 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_mxs.h>
#include <linux/types.h>
#include <linux/clk.h>
@@ -235,8 +236,6 @@ struct mxs_nand_info {
int bb_mark_bit_offset;
};
-static struct nand_ecclayout fake_ecc_layout;
-
static inline int mxs_nand_is_imx6(struct mxs_nand_info *info)
{
return info->type == GPMI_IMX6;
@@ -338,8 +337,6 @@ static int mxs_nand_calc_geo(struct nand_chip *chip)
int gf_len = 13; /* length of Galois Field for non-DDR nand */
int max_ecc_strength;
- nand_of_parse_node(mtd, mtd->dev.parent->device_node);
-
max_ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8)
/ (gf_len * ecc_chunk_count);
/* We need the minor even number. */
@@ -750,6 +747,8 @@ static int __mxs_nand_ecc_read_page(struct nand_chip *chip,
unsigned int max_bitflips = 0;
int i, ret, readtotal, nchunks;
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
readlen = roundup(readlen, MXS_NAND_CHUNK_DATA_CHUNK_SIZE);
nchunks = mxs_nand_ecc_chunk_cnt(readlen);
readtotal = MXS_NAND_METADATA_SIZE;
@@ -959,7 +958,7 @@ static int gpmi_ecc_read_subpage(struct nand_chip *chip,
* Write a page to NAND.
*/
static int mxs_nand_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
- int oob_required)
+ int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
struct mxs_nand_info *nand_info = chip->priv;
@@ -967,6 +966,8 @@ static int mxs_nand_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
uint32_t channel = nand_info->dma_channel_base + nand_info->cur_chip;
int ret = 0;
+ nand_prog_page_begin_op(chip, page, 0, NULL, 0);
+
memcpy(nand_info->data_buf, buf, mtd->writesize);
memcpy(nand_info->oob_buf, chip->oob_poi, mtd->oobsize);
@@ -1014,7 +1015,10 @@ static int mxs_nand_ecc_write_page(struct nand_chip *chip, const uint8_t *buf,
rtn:
mxs_nand_return_dma_descs(nand_info);
- return ret;
+ if (ret)
+ return ret;
+
+ return nand_prog_page_end_op(chip);
}
/*
@@ -1216,7 +1220,7 @@ static int mxs_nand_ecc_write_oob(struct nand_chip *chip, int page)
* Thus, this function is only called when we want *all* blocks to look good,
* so it *always* return success.
*/
-static int mxs_nand_block_bad(struct nand_chip *chip , loff_t ofs, int getchip)
+static int mxs_nand_block_bad(struct nand_chip *chip , loff_t ofs)
{
return 0;
}
@@ -1273,7 +1277,7 @@ static int mxs_nand_scan_bbt(struct nand_chip *chip)
}
/* We use the reference implementation for bad block management. */
- return nand_default_bbt(chip);
+ return nand_create_bbt(chip);
}
/*
@@ -1474,11 +1478,11 @@ static int mxs_nand_compute_hardware_timing(struct mxs_nand_info *info,
* If there are multiple chips, we need to relax the timings to allow
* for signal distortion due to higher capacitance.
*/
- if (chip->numchips > 2) {
+ if (nanddev_ntargets(&chip->base) > 2) {
target.data_setup_in_ns += 10;
target.data_hold_in_ns += 10;
target.address_setup_in_ns += 10;
- } else if (chip->numchips > 1) {
+ } else if (nanddev_ntargets(&chip->base) > 1) {
target.data_setup_in_ns += 5;
target.data_hold_in_ns += 5;
target.address_setup_in_ns += 5;
@@ -2031,7 +2035,7 @@ static int mxs_nand_enable_edo_mode(struct mxs_nand_info *info)
if (!mxs_nand_is_imx6(info))
return -ENODEV;
- if (!chip->onfi_version)
+ if (!chip->parameters.onfi)
return -ENOENT;
mode = onfi_get_async_timing_mode(chip);
@@ -2046,8 +2050,7 @@ static int mxs_nand_enable_edo_mode(struct mxs_nand_info *info)
chip->legacy.select_chip(chip, 0);
- if (le16_to_cpu(chip->onfi_params.opt_cmd)
- & ONFI_OPT_CMD_SET_GET_FEATURES) {
+ if (nand_supports_set_features(chip, ONFI_FEATURE_ADDR_TIMING_MODE)) {
/* [1] send SET FEATURE commond to NAND */
feature[0] = mode;
@@ -2200,7 +2203,6 @@ static int mxs_nand_probe(struct device_d *dev)
chip->legacy.dev_ready = mxs_nand_device_ready;
chip->legacy.select_chip = mxs_nand_select_chip;
chip->legacy.block_bad = mxs_nand_block_bad;
- chip->scan_bbt = mxs_nand_scan_bbt;
chip->legacy.read_byte = mxs_nand_read_byte;
@@ -2212,7 +2214,6 @@ static int mxs_nand_probe(struct device_d *dev)
chip->ecc.read_oob = mxs_nand_ecc_read_oob;
chip->ecc.write_oob = mxs_nand_ecc_write_oob;
- chip->ecc.layout = &fake_ecc_layout;
chip->ecc.mode = NAND_ECC_HW;
/* first scan to find the device and get the page size */
@@ -2229,7 +2230,7 @@ static int mxs_nand_probe(struct device_d *dev)
chip->options |= NAND_SUBPAGE_READ;
}
- chip->options |= NAND_NO_SUBPAGE_WRITE;
+ chip->options |= NAND_NO_SUBPAGE_WRITE | NAND_SKIP_BBTSCAN;
mxs_nand_setup_timing(nand_info);
@@ -2238,7 +2239,9 @@ static int mxs_nand_probe(struct device_d *dev)
if (err)
goto err2;
- err = add_mtd_nand_device(chip, "nand");
+ mxs_nand_scan_bbt(chip);
+
+ err = add_mtd_nand_device(mtd, "nand");
if (err)
goto err2;
diff --git a/drivers/mtd/nand/nand_omap_gpmc.c b/drivers/mtd/nand/nand_omap_gpmc.c
index d51a8a8a9e..e3d36a1cf4 100644
--- a/drivers/mtd/nand/nand_omap_gpmc.c
+++ b/drivers/mtd/nand/nand_omap_gpmc.c
@@ -66,6 +66,7 @@
#include <clock.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand_ecc.h>
#include <io.h>
#include <mach/gpmc.h>
@@ -682,15 +683,15 @@ static int omap_gpmc_read_page_bch_rom_mode(struct nand_chip *chip, uint8_t *buf
struct gpmc_nand_info *oinfo = chip->priv;
int dev_width = chip->options & NAND_BUSWIDTH_16 ? GPMC_ECC_CONFIG_ECC16B : 0;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
int eccsize = chip->ecc.size;
unsigned int max_bitflips = 0;
- int stat, i, j;
+ int stat, i, j, ret;
+ nand_read_page_op(chip, page, 0, NULL, 0);
writel(GPMC_ECC_SIZE_CONFIG_ECCSIZE1(0) |
GPMC_ECC_SIZE_CONFIG_ECCSIZE0(64),
@@ -725,8 +726,10 @@ static int omap_gpmc_read_page_bch_rom_mode(struct nand_chip *chip, uint8_t *buf
p += omap_gpmc_read_buf_manual(chip, p, 6, 5);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
p = buf;
@@ -950,19 +953,23 @@ static int gpmc_read_page_hwecc_elm(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i;
+ int i, ret;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
chip->legacy.read_buf(chip, buf, mtd->writesize);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
eccsteps = chip->ecc.steps;
@@ -976,21 +983,25 @@ static int gpmc_read_page_hwecc(struct nand_chip *chip, uint8_t *buf,
int oob_required, int page)
{
struct mtd_info *mtd = nand_to_mtd(chip);
- int i, eccsize = chip->ecc.size;
+ int i, ret, eccsize = chip->ecc.size;
int eccbytes = chip->ecc.bytes;
int eccsteps = chip->ecc.steps;
uint8_t *p = buf;
- uint8_t *ecc_calc = chip->buffers->ecccalc;
- uint8_t *ecc_code = chip->buffers->ecccode;
- uint32_t *eccpos = chip->ecc.layout->eccpos;
+ uint8_t *ecc_calc = chip->ecc.calc_buf;
+ uint8_t *ecc_code = chip->ecc.code_buf;
unsigned int max_bitflips = 0;
chip->ecc.hwctl(chip, NAND_ECC_READ);
+
+ nand_read_page_op(chip, page, 0, NULL, 0);
+
chip->legacy.read_buf(chip, p, mtd->writesize);
chip->legacy.read_buf(chip, chip->oob_poi, mtd->oobsize);
- for (i = 0; i < chip->ecc.total; i++)
- ecc_code[i] = chip->oob_poi[eccpos[i]];
+ ret = mtd_ooblayout_get_eccbytes(mtd, ecc_code, chip->oob_poi, 0,
+ chip->ecc.total);
+ if (ret)
+ return ret;
eccsteps = chip->ecc.steps;
p = buf;
@@ -1014,7 +1025,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
enum gpmc_ecc_mode mode)
{
struct nand_chip *nand = &oinfo->nand;
- struct mtd_info *minfo = &nand->mtd;
+ struct mtd_info *minfo = nand_to_mtd(nand);
int offset, err;
int i, j;
@@ -1029,7 +1040,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
offset = 1;
if (mode != OMAP_ECC_SOFT) {
- nand->ecc.layout = &omap_oobinfo;
+ mtd_set_ecclayout(minfo, &omap_oobinfo);
nand->ecc.calculate = omap_calculate_ecc;
nand->ecc.hwctl = omap_enable_hwecc;
nand->ecc.correct = omap_correct_data;
@@ -1117,7 +1128,7 @@ static int omap_gpmc_eccmode(struct gpmc_nand_info *oinfo,
break;
case OMAP_ECC_SOFT:
- nand->ecc.layout = NULL;
+ minfo->ecclayout = NULL;
nand->ecc.mode = NAND_ECC_SOFT;
oinfo->nand.ecc.strength = 1;
break;
@@ -1191,7 +1202,7 @@ static int gpmc_nand_probe(struct device_d *pdev)
nand = &oinfo->nand;
nand->priv = (void *)oinfo;
- minfo = &nand->mtd;
+ minfo = nand_to_mtd(nand);
minfo->dev.parent = pdev;
if (pdata->cs >= GPMC_NUM_CS) {
@@ -1263,9 +1274,6 @@ static int gpmc_nand_probe(struct device_d *pdev)
/* Dont do a bbt scan at the start */
nand->options |= NAND_SKIP_BBTSCAN;
- nand->options |= NAND_OWN_BUFFERS;
- nand->buffers = xzalloc(sizeof(*nand->buffers));
-
/* All information is ready.. now lets call setup, if present */
if (pdata->nand_setup) {
err = pdata->nand_setup(pdata);
@@ -1307,7 +1315,7 @@ static int gpmc_nand_probe(struct device_d *pdev)
omap_gpmc_eccmode(oinfo, oinfo->ecc_mode);
/* We are all set to register with the system now! */
- err = add_mtd_nand_device(nand, "nand");
+ err = add_mtd_nand_device(minfo, "nand");
if (err) {
dev_dbg(pdev, "device registration failed\n");
goto out_release_mem;
diff --git a/drivers/mtd/nand/nand_onfi.c b/drivers/mtd/nand/nand_onfi.c
new file mode 100644
index 0000000000..5f4c5c3437
--- /dev/null
+++ b/drivers/mtd/nand/nand_onfi.c
@@ -0,0 +1,326 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2000 Steven J. Hill (sjhill at realitydiluted.com)
+ * 2002-2006 Thomas Gleixner (tglx at linutronix.de)
+ *
+ * Credits:
+ * David Woodhouse for adding multichip support
+ *
+ * Aleph One Ltd. and Toby Churchill Ltd. for supporting the
+ * rework for 2K page size chips
+ *
+ * This file contains all ONFI helpers.
+ */
+
+#include <common.h>
+#include <linux/bitmap.h>
+
+#include "internals.h"
+
+#define ONFI_PARAM_PAGES 3
+
+u16 onfi_crc16(u16 crc, u8 const *p, size_t len)
+{
+ int i;
+ while (len--) {
+ crc ^= *p++ << 8;
+ for (i = 0; i < 8; i++)
+ crc = (crc << 1) ^ ((crc & 0x8000) ? 0x8005 : 0);
+ }
+
+ return crc;
+}
+
+/* Parse the Extended Parameter Page. */
+static int nand_flash_detect_ext_param_page(struct nand_chip *chip,
+ struct nand_onfi_params *p)
+{
+ struct onfi_ext_param_page *ep;
+ struct onfi_ext_section *s;
+ struct onfi_ext_ecc_info *ecc;
+ uint8_t *cursor;
+ int ret;
+ int len;
+ int i;
+
+ len = le16_to_cpu(p->ext_param_page_length) * 16;
+ ep = kmalloc(len, GFP_KERNEL);
+ if (!ep)
+ return -ENOMEM;
+
+ /*
+ * Use the Change Read Column command to skip the ONFI param pages and
+ * ensure we read at the right location.
+ */
+ ret = nand_change_read_column_op(chip,
+ sizeof(*p) * p->num_of_param_pages,
+ ep, len, true);
+ if (ret)
+ goto ext_out;
+
+ ret = -EINVAL;
+ if ((onfi_crc16(ONFI_CRC_BASE, ((uint8_t *)ep) + 2, len - 2)
+ != le16_to_cpu(ep->crc))) {
+ pr_debug("fail in the CRC.\n");
+ goto ext_out;
+ }
+
+ /*
+ * Check the signature.
+ * Do not strictly follow the ONFI spec, maybe changed in future.
+ */
+ if (strncmp(ep->sig, "EPPS", 4)) {
+ pr_debug("The signature is invalid.\n");
+ goto ext_out;
+ }
+
+ /* find the ECC section. */
+ cursor = (uint8_t *)(ep + 1);
+ for (i = 0; i < ONFI_EXT_SECTION_MAX; i++) {
+ s = ep->sections + i;
+ if (s->type == ONFI_SECTION_TYPE_2)
+ break;
+ cursor += s->length * 16;
+ }
+ if (i == ONFI_EXT_SECTION_MAX) {
+ pr_debug("We can not find the ECC section.\n");
+ goto ext_out;
+ }
+
+ /* get the info we want. */
+ ecc = (struct onfi_ext_ecc_info *)cursor;
+
+ if (!ecc->codeword_size) {
+ pr_debug("Invalid codeword size\n");
+ goto ext_out;
+ }
+
+ chip->base.eccreq.strength = ecc->ecc_bits;
+ chip->base.eccreq.step_size = 1 << ecc->codeword_size;
+ ret = 0;
+
+ext_out:
+ kfree(ep);
+ return ret;
+}
+
+/*
+ * Recover data with bit-wise majority
+ */
+static void nand_bit_wise_majority(const void **srcbufs,
+ unsigned int nsrcbufs,
+ void *dstbuf,
+ unsigned int bufsize)
+{
+ int i, j, k;
+
+ for (i = 0; i < bufsize; i++) {
+ u8 val = 0;
+
+ for (j = 0; j < 8; j++) {
+ unsigned int cnt = 0;
+
+ for (k = 0; k < nsrcbufs; k++) {
+ const u8 *srcbuf = srcbufs[k];
+
+ if (srcbuf[i] & BIT(j))
+ cnt++;
+ }
+
+ if (cnt > nsrcbufs / 2)
+ val |= BIT(j);
+ }
+
+ ((u8 *)dstbuf)[i] = val;
+ }
+}
+
+/*
+ * Check if the NAND chip is ONFI compliant, returns 1 if it is, 0 otherwise.
+ */
+int nand_onfi_detect(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+ struct nand_onfi_params *p = NULL, *pbuf;
+ struct onfi_params *onfi;
+ bool use_datain = false;
+ int onfi_version = 0;
+ char id[4];
+ int i, ret, val;
+ u16 crc;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* Try ONFI for unknown chip or LP */
+ ret = nand_readid_op(chip, 0x20, id, sizeof(id));
+ if (ret || strncmp(id, "ONFI", 4))
+ return 0;
+
+ /* ONFI chip: allocate a buffer to hold its parameter page */
+ pbuf = kzalloc((sizeof(*pbuf) * ONFI_PARAM_PAGES), GFP_KERNEL);
+ if (!pbuf)
+ return -ENOMEM;
+
+ if (!nand_has_exec_op(chip) ||
+ !nand_read_data_op(chip, &pbuf[0], sizeof(*pbuf), true, true))
+ use_datain = true;
+
+ for (i = 0; i < ONFI_PARAM_PAGES; i++) {
+ if (!i)
+ ret = nand_read_param_page_op(chip, 0, &pbuf[i],
+ sizeof(*pbuf));
+ else if (use_datain)
+ ret = nand_read_data_op(chip, &pbuf[i], sizeof(*pbuf),
+ true, false);
+ else
+ ret = nand_change_read_column_op(chip, sizeof(*pbuf) * i,
+ &pbuf[i], sizeof(*pbuf),
+ true);
+ if (ret) {
+ ret = 0;
+ goto free_onfi_param_page;
+ }
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)&pbuf[i], 254);
+ if (crc == le16_to_cpu(pbuf[i].crc)) {
+ p = &pbuf[i];
+ break;
+ }
+ }
+
+ if (i == ONFI_PARAM_PAGES) {
+ const void *srcbufs[ONFI_PARAM_PAGES];
+ unsigned int j;
+
+ for (j = 0; j < ONFI_PARAM_PAGES; j++)
+ srcbufs[j] = pbuf + j;
+
+ pr_warn("Could not find a valid ONFI parameter page, trying bit-wise majority to recover it\n");
+ nand_bit_wise_majority(srcbufs, ONFI_PARAM_PAGES, pbuf,
+ sizeof(*pbuf));
+
+ crc = onfi_crc16(ONFI_CRC_BASE, (u8 *)pbuf, 254);
+ if (crc != le16_to_cpu(pbuf->crc)) {
+ pr_err("ONFI parameter recovery failed, aborting\n");
+ goto free_onfi_param_page;
+ }
+ p = pbuf;
+ }
+
+ if (chip->manufacturer.desc && chip->manufacturer.desc->ops &&
+ chip->manufacturer.desc->ops->fixup_onfi_param_page)
+ chip->manufacturer.desc->ops->fixup_onfi_param_page(chip, p);
+
+ /* Check version */
+ val = le16_to_cpu(p->revision);
+ if (val & ONFI_VERSION_2_3)
+ onfi_version = 23;
+ else if (val & ONFI_VERSION_2_2)
+ onfi_version = 22;
+ else if (val & ONFI_VERSION_2_1)
+ onfi_version = 21;
+ else if (val & ONFI_VERSION_2_0)
+ onfi_version = 20;
+ else if (val & ONFI_VERSION_1_0)
+ onfi_version = 10;
+
+ if (!onfi_version) {
+ pr_info("unsupported ONFI version: %d\n", val);
+ goto free_onfi_param_page;
+ }
+
+ sanitize_string(p->manufacturer, sizeof(p->manufacturer));
+ sanitize_string(p->model, sizeof(p->model));
+ chip->parameters.model = strdup(p->model);
+ if (!chip->parameters.model) {
+ ret = -ENOMEM;
+ goto free_onfi_param_page;
+ }
+
+ memorg->pagesize = le32_to_cpu(p->byte_per_page);
+ mtd->writesize = memorg->pagesize;
+
+ /*
+ * pages_per_block and blocks_per_lun may not be a power-of-2 size
+ * (don't ask me who thought of this...). MTD assumes that these
+ * dimensions will be power-of-2, so just truncate the remaining area.
+ */
+ memorg->pages_per_eraseblock =
+ 1 << (fls(le32_to_cpu(p->pages_per_block)) - 1);
+ mtd->erasesize = memorg->pages_per_eraseblock * memorg->pagesize;
+
+ memorg->oobsize = le16_to_cpu(p->spare_bytes_per_page);
+ mtd->oobsize = memorg->oobsize;
+
+ memorg->luns_per_target = p->lun_count;
+ memorg->planes_per_lun = 1 << p->interleaved_bits;
+
+ /* See erasesize comment */
+ memorg->eraseblocks_per_lun =
+ 1 << (fls(le32_to_cpu(p->blocks_per_lun)) - 1);
+ memorg->max_bad_eraseblocks_per_lun = le32_to_cpu(p->blocks_per_lun);
+ memorg->bits_per_cell = p->bits_per_cell;
+
+ if (le16_to_cpu(p->features) & ONFI_FEATURE_16_BIT_BUS)
+ chip->options |= NAND_BUSWIDTH_16;
+
+ if (p->ecc_bits != 0xff) {
+ chip->base.eccreq.strength = p->ecc_bits;
+ chip->base.eccreq.step_size = 512;
+ } else if (onfi_version >= 21 &&
+ (le16_to_cpu(p->features) & ONFI_FEATURE_EXT_PARAM_PAGE)) {
+
+ /*
+ * The nand_flash_detect_ext_param_page() uses the
+ * Change Read Column command which maybe not supported
+ * by the chip->legacy.cmdfunc. So try to update the
+ * chip->legacy.cmdfunc now. We do not replace user supplied
+ * command function.
+ */
+ nand_legacy_adjust_cmdfunc(chip);
+
+ /* The Extended Parameter Page is supported since ONFI 2.1. */
+ if (nand_flash_detect_ext_param_page(chip, p))
+ pr_warn("Failed to detect ONFI extended param page\n");
+ } else {
+ pr_warn("Could not retrieve ONFI ECC requirements\n");
+ }
+
+ /* Save some parameters from the parameter page for future use */
+ if (le16_to_cpu(p->opt_cmd) & ONFI_OPT_CMD_SET_GET_FEATURES) {
+ chip->parameters.supports_set_get_features = true;
+ bitmap_set(chip->parameters.get_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ bitmap_set(chip->parameters.set_feature_list,
+ ONFI_FEATURE_ADDR_TIMING_MODE, 1);
+ }
+
+ onfi = kzalloc(sizeof(*onfi), GFP_KERNEL);
+ if (!onfi) {
+ ret = -ENOMEM;
+ goto free_model;
+ }
+
+ onfi->version = onfi_version;
+ onfi->tPROG = le16_to_cpu(p->t_prog);
+ onfi->tBERS = le16_to_cpu(p->t_bers);
+ onfi->tR = le16_to_cpu(p->t_r);
+ onfi->tCCS = le16_to_cpu(p->t_ccs);
+ onfi->async_timing_mode = le16_to_cpu(p->async_timing_mode);
+ onfi->vendor_revision = le16_to_cpu(p->vendor_revision);
+ memcpy(onfi->vendor, p->vendor, sizeof(p->vendor));
+ chip->parameters.onfi = onfi;
+
+ /* Identification done, free the full ONFI parameter page and exit */
+ kfree(pbuf);
+
+ return 1;
+
+free_model:
+ kfree(chip->parameters.model);
+free_onfi_param_page:
+ kfree(pbuf);
+
+ return ret;
+}
diff --git a/drivers/mtd/nand/nand_orion.c b/drivers/mtd/nand/nand_orion.c
index 785d877b3d..3899a67b56 100644
--- a/drivers/mtd/nand/nand_orion.c
+++ b/drivers/mtd/nand/nand_orion.c
@@ -18,6 +18,7 @@
#include <errno.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <linux/clk.h>
struct orion_nand {
@@ -135,7 +136,7 @@ static int orion_nand_probe(struct device_d *dev)
goto no_dev;
}
- add_mtd_nand_device(chip, "orion_nand");
+ add_mtd_nand_device(mtd, "orion_nand");
return 0;
no_dev:
if (!IS_ERR(clk))
diff --git a/drivers/mtd/nand/nand_s3c24xx.c b/drivers/mtd/nand/nand_s3c24xx.c
index e20517ab4a..59bd6c9fed 100644
--- a/drivers/mtd/nand/nand_s3c24xx.c
+++ b/drivers/mtd/nand/nand_s3c24xx.c
@@ -26,6 +26,7 @@
#include <malloc.h>
#include <init.h>
#include <linux/mtd/mtd.h>
+#include <linux/mtd/rawnand.h>
#include <linux/mtd/nand.h>
#include <mach/s3c-generic.h>
#include <mach/s3c-iomap.h>
@@ -420,7 +421,7 @@ static int s3c24x0_nand_probe(struct device_d *dev)
/* structures must be linked */
chip = &host->nand;
- mtd = &chip->mtd;
+ mtd = nand_to_mtd(chip);
mtd->dev.parent = dev;
/* init the default settings */
@@ -462,7 +463,7 @@ static int s3c24x0_nand_probe(struct device_d *dev)
{
/* small page (512 bytes per page) */
chip->ecc.size = 512;
- chip->ecc.layout = &nand_hw_eccoob;
+ mtd_set_ecclayout(mtd, &nand_hw_eccoob);
}
if (pdata->flash_bbt) {
@@ -481,7 +482,7 @@ static int s3c24x0_nand_probe(struct device_d *dev)
goto on_error;
}
- return add_mtd_nand_device(chip, "nand");
+ return add_mtd_nand_device(mtd, "nand");
on_error:
free(host);
diff --git a/drivers/mtd/nand/nand_samsung.c b/drivers/mtd/nand/nand_samsung.c
new file mode 100644
index 0000000000..3a4a19e808
--- /dev/null
+++ b/drivers/mtd/nand/nand_samsung.c
@@ -0,0 +1,135 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include "internals.h"
+
+static void samsung_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ /* New Samsung (6 byte ID): Samsung K9GAG08U0F (p.44) */
+ if (chip->id.len == 6 && !nand_is_slc(chip) &&
+ chip->id.data[5] != 0x00) {
+ u8 extid = chip->id.data[3];
+
+ /* Get pagesize */
+ memorg->pagesize = 2048 << (extid & 0x03);
+ mtd->writesize = memorg->pagesize;
+
+ extid >>= 2;
+
+ /* Get oobsize */
+ switch (((extid >> 2) & 0x4) | (extid & 0x3)) {
+ case 1:
+ memorg->oobsize = 128;
+ break;
+ case 2:
+ memorg->oobsize = 218;
+ break;
+ case 3:
+ memorg->oobsize = 400;
+ break;
+ case 4:
+ memorg->oobsize = 436;
+ break;
+ case 5:
+ memorg->oobsize = 512;
+ break;
+ case 6:
+ memorg->oobsize = 640;
+ break;
+ default:
+ /*
+ * We should never reach this case, but if that
+ * happens, this probably means Samsung decided to use
+ * a different extended ID format, and we should find
+ * a way to support it.
+ */
+ WARN(1, "Invalid OOB size value");
+ break;
+ }
+
+ mtd->oobsize = memorg->oobsize;
+
+ /* Get blocksize */
+ extid >>= 2;
+ memorg->pages_per_eraseblock = (128 * 1024) <<
+ (((extid >> 1) & 0x04) |
+ (extid & 0x03)) /
+ memorg->pagesize;
+ mtd->erasesize = (128 * 1024) <<
+ (((extid >> 1) & 0x04) | (extid & 0x03));
+
+ /* Extract ECC requirements from 5th id byte*/
+ extid = (chip->id.data[4] >> 4) & 0x07;
+ if (extid < 5) {
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1 << extid;
+ } else {
+ chip->base.eccreq.step_size = 1024;
+ switch (extid) {
+ case 5:
+ chip->base.eccreq.strength = 24;
+ break;
+ case 6:
+ chip->base.eccreq.strength = 40;
+ break;
+ case 7:
+ chip->base.eccreq.strength = 60;
+ break;
+ default:
+ WARN(1, "Could not decode ECC info");
+ chip->base.eccreq.step_size = 0;
+ }
+ }
+ } else {
+ nand_decode_ext_id(chip);
+
+ if (nand_is_slc(chip)) {
+ switch (chip->id.data[1]) {
+ /* K9F4G08U0D-S[I|C]B0(T00) */
+ case 0xDC:
+ chip->base.eccreq.step_size = 512;
+ chip->base.eccreq.strength = 1;
+ break;
+
+ /* K9F1G08U0E 21nm chips do not support subpage write */
+ case 0xF1:
+ if (chip->id.len > 4 &&
+ (chip->id.data[4] & GENMASK(1, 0)) == 0x1)
+ chip->options |= NAND_NO_SUBPAGE_WRITE;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+}
+
+static int samsung_nand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ if (mtd->writesize > 512)
+ chip->options |= NAND_SAMSUNG_LP_OPTIONS;
+
+ if (!nand_is_slc(chip))
+ chip->options |= NAND_BBM_LASTPAGE;
+ else
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops samsung_nand_manuf_ops = {
+ .detect = samsung_nand_decode_id,
+ .init = samsung_nand_init,
+};
diff --git a/drivers/mtd/nand/nand_timings.c b/drivers/mtd/nand/nand_timings.c
index 7a939510b7..1338133e81 100644
--- a/drivers/mtd/nand/nand_timings.c
+++ b/drivers/mtd/nand/nand_timings.c
@@ -1,242 +1,391 @@
+// SPDX-License-Identifier: GPL-2.0-only
/*
* Copyright (C) 2014 Free Electrons
*
* Author: Boris BREZILLON <boris.brezillon at free-electrons.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
*/
#include <linux/kernel.h>
#include <linux/err.h>
-#include <linux/mtd/nand.h>
+#include <linux/export.h>
+
+#include "internals.h"
+
+#define ONFI_DYN_TIMING_MAX U16_MAX
-static const struct nand_sdr_timings onfi_sdr_timings[] = {
+/*
+ * For non-ONFI chips we use the highest possible value for tPROG and tBERS.
+ * tR and tCCS will take the default values precised in the ONFI specification
+ * for timing mode 0, respectively 200us and 500ns.
+ *
+ * These four values are tweaked to be more accurate in the case of ONFI chips.
+ */
+static const struct nand_interface_config onfi_sdr_timings[] = {
/* Mode 0 */
{
- .tADL_min = 200000,
- .tALH_min = 20000,
- .tALS_min = 50000,
- .tAR_min = 25000,
- .tCEA_max = 100000,
- .tCEH_min = 20000,
- .tCH_min = 20000,
- .tCHZ_max = 100000,
- .tCLH_min = 20000,
- .tCLR_min = 20000,
- .tCLS_min = 50000,
- .tCOH_min = 0,
- .tCS_min = 70000,
- .tDH_min = 20000,
- .tDS_min = 40000,
- .tFEAT_max = 1000000,
- .tIR_min = 10000,
- .tITC_max = 1000000,
- .tRC_min = 100000,
- .tREA_max = 40000,
- .tREH_min = 30000,
- .tRHOH_min = 0,
- .tRHW_min = 200000,
- .tRHZ_max = 200000,
- .tRLOH_min = 0,
- .tRP_min = 50000,
- .tRST_max = 250000000000,
- .tWB_max = 200000,
- .tRR_min = 40000,
- .tWC_min = 100000,
- .tWH_min = 30000,
- .tWHR_min = 120000,
- .tWP_min = 50000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 0,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 20000,
+ .tALS_min = 50000,
+ .tAR_min = 25000,
+ .tCEA_max = 100000,
+ .tCEH_min = 20000,
+ .tCH_min = 20000,
+ .tCHZ_max = 100000,
+ .tCLH_min = 20000,
+ .tCLR_min = 20000,
+ .tCLS_min = 50000,
+ .tCOH_min = 0,
+ .tCS_min = 70000,
+ .tDH_min = 20000,
+ .tDS_min = 40000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 10000,
+ .tITC_max = 1000000,
+ .tRC_min = 100000,
+ .tREA_max = 40000,
+ .tREH_min = 30000,
+ .tRHOH_min = 0,
+ .tRHW_min = 200000,
+ .tRHZ_max = 200000,
+ .tRLOH_min = 0,
+ .tRP_min = 50000,
+ .tRR_min = 40000,
+ .tRST_max = 250000000000ULL,
+ .tWB_max = 200000,
+ .tWC_min = 100000,
+ .tWH_min = 30000,
+ .tWHR_min = 120000,
+ .tWP_min = 50000,
+ .tWW_min = 100000,
+ },
},
/* Mode 1 */
{
- .tADL_min = 100000,
- .tALH_min = 10000,
- .tALS_min = 25000,
- .tAR_min = 10000,
- .tCEA_max = 45000,
- .tCEH_min = 20000,
- .tCH_min = 10000,
- .tCHZ_max = 50000,
- .tCLH_min = 10000,
- .tCLR_min = 10000,
- .tCLS_min = 25000,
- .tCOH_min = 15000,
- .tCS_min = 35000,
- .tDH_min = 10000,
- .tDS_min = 20000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 50000,
- .tREA_max = 30000,
- .tREH_min = 15000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRP_min = 25000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 45000,
- .tWH_min = 15000,
- .tWHR_min = 80000,
- .tWP_min = 25000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 1,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 25000,
+ .tAR_min = 10000,
+ .tCEA_max = 45000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 25000,
+ .tCOH_min = 15000,
+ .tCS_min = 35000,
+ .tDH_min = 10000,
+ .tDS_min = 20000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 50000,
+ .tREA_max = 30000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 25000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 45000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 25000,
+ .tWW_min = 100000,
+ },
},
/* Mode 2 */
{
- .tADL_min = 100000,
- .tALH_min = 10000,
- .tALS_min = 15000,
- .tAR_min = 10000,
- .tCEA_max = 30000,
- .tCEH_min = 20000,
- .tCH_min = 10000,
- .tCHZ_max = 50000,
- .tCLH_min = 10000,
- .tCLR_min = 10000,
- .tCLS_min = 15000,
- .tCOH_min = 15000,
- .tCS_min = 25000,
- .tDH_min = 5000,
- .tDS_min = 15000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 35000,
- .tREA_max = 25000,
- .tREH_min = 15000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tRP_min = 17000,
- .tWC_min = 35000,
- .tWH_min = 15000,
- .tWHR_min = 80000,
- .tWP_min = 17000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 2,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 10000,
+ .tALS_min = 15000,
+ .tAR_min = 10000,
+ .tCEA_max = 30000,
+ .tCEH_min = 20000,
+ .tCH_min = 10000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 10000,
+ .tCLR_min = 10000,
+ .tCLS_min = 15000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 15000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 35000,
+ .tREA_max = 25000,
+ .tREH_min = 15000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tRP_min = 17000,
+ .tWC_min = 35000,
+ .tWH_min = 15000,
+ .tWHR_min = 80000,
+ .tWP_min = 17000,
+ .tWW_min = 100000,
+ },
},
/* Mode 3 */
{
- .tADL_min = 100000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 50000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 25000,
- .tDH_min = 5000,
- .tDS_min = 10000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 30000,
- .tREA_max = 20000,
- .tREH_min = 10000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 0,
- .tRP_min = 15000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 30000,
- .tWH_min = 10000,
- .tWHR_min = 80000,
- .tWP_min = 15000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 3,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 50000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 25000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 30000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 0,
+ .tRP_min = 15000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 30000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 15000,
+ .tWW_min = 100000,
+ },
},
/* Mode 4 */
{
- .tADL_min = 70000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 30000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 20000,
- .tDH_min = 5000,
- .tDS_min = 10000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 25000,
- .tREA_max = 20000,
- .tREH_min = 10000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 5000,
- .tRP_min = 12000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 25000,
- .tWH_min = 10000,
- .tWHR_min = 80000,
- .tWP_min = 12000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 4,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 20000,
+ .tDH_min = 5000,
+ .tDS_min = 10000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 25000,
+ .tREA_max = 20000,
+ .tREH_min = 10000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 12000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 25000,
+ .tWH_min = 10000,
+ .tWHR_min = 80000,
+ .tWP_min = 12000,
+ .tWW_min = 100000,
+ },
},
/* Mode 5 */
{
- .tADL_min = 70000,
- .tALH_min = 5000,
- .tALS_min = 10000,
- .tAR_min = 10000,
- .tCEA_max = 25000,
- .tCEH_min = 20000,
- .tCH_min = 5000,
- .tCHZ_max = 30000,
- .tCLH_min = 5000,
- .tCLR_min = 10000,
- .tCLS_min = 10000,
- .tCOH_min = 15000,
- .tCS_min = 15000,
- .tDH_min = 5000,
- .tDS_min = 7000,
- .tFEAT_max = 1000000,
- .tIR_min = 0,
- .tITC_max = 1000000,
- .tRC_min = 20000,
- .tREA_max = 16000,
- .tREH_min = 7000,
- .tRHOH_min = 15000,
- .tRHW_min = 100000,
- .tRHZ_max = 100000,
- .tRLOH_min = 5000,
- .tRP_min = 10000,
- .tRR_min = 20000,
- .tRST_max = 500000000,
- .tWB_max = 100000,
- .tWC_min = 20000,
- .tWH_min = 7000,
- .tWHR_min = 80000,
- .tWP_min = 10000,
- .tWW_min = 100000,
+ .type = NAND_SDR_IFACE,
+ .timings.mode = 5,
+ .timings.sdr = {
+ .tCCS_min = 500000,
+ .tR_max = 200000000,
+ .tPROG_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tBERS_max = 1000000ULL * ONFI_DYN_TIMING_MAX,
+ .tADL_min = 400000,
+ .tALH_min = 5000,
+ .tALS_min = 10000,
+ .tAR_min = 10000,
+ .tCEA_max = 25000,
+ .tCEH_min = 20000,
+ .tCH_min = 5000,
+ .tCHZ_max = 30000,
+ .tCLH_min = 5000,
+ .tCLR_min = 10000,
+ .tCLS_min = 10000,
+ .tCOH_min = 15000,
+ .tCS_min = 15000,
+ .tDH_min = 5000,
+ .tDS_min = 7000,
+ .tFEAT_max = 1000000,
+ .tIR_min = 0,
+ .tITC_max = 1000000,
+ .tRC_min = 20000,
+ .tREA_max = 16000,
+ .tREH_min = 7000,
+ .tRHOH_min = 15000,
+ .tRHW_min = 100000,
+ .tRHZ_max = 100000,
+ .tRLOH_min = 5000,
+ .tRP_min = 10000,
+ .tRR_min = 20000,
+ .tRST_max = 500000000,
+ .tWB_max = 100000,
+ .tWC_min = 20000,
+ .tWH_min = 7000,
+ .tWHR_min = 80000,
+ .tWP_min = 10000,
+ .tWW_min = 100000,
+ },
},
};
+/* All NAND chips share the same reset data interface: SDR mode 0 */
+const struct nand_interface_config *nand_get_reset_interface_config(void)
+{
+ return &onfi_sdr_timings[0];
+}
+
+/**
+ * onfi_find_closest_sdr_mode - Derive the closest ONFI SDR timing mode given a
+ * set of timings
+ * @spec_timings: the timings to challenge
+ */
+unsigned int
+onfi_find_closest_sdr_mode(const struct nand_sdr_timings *spec_timings)
+{
+ const struct nand_sdr_timings *onfi_timings;
+ int mode;
+
+ for (mode = ARRAY_SIZE(onfi_sdr_timings) - 1; mode > 0; mode--) {
+ onfi_timings = &onfi_sdr_timings[mode].timings.sdr;
+
+ if (spec_timings->tCCS_min <= onfi_timings->tCCS_min &&
+ spec_timings->tADL_min <= onfi_timings->tADL_min &&
+ spec_timings->tALH_min <= onfi_timings->tALH_min &&
+ spec_timings->tALS_min <= onfi_timings->tALS_min &&
+ spec_timings->tAR_min <= onfi_timings->tAR_min &&
+ spec_timings->tCEH_min <= onfi_timings->tCEH_min &&
+ spec_timings->tCH_min <= onfi_timings->tCH_min &&
+ spec_timings->tCLH_min <= onfi_timings->tCLH_min &&
+ spec_timings->tCLR_min <= onfi_timings->tCLR_min &&
+ spec_timings->tCLS_min <= onfi_timings->tCLS_min &&
+ spec_timings->tCOH_min <= onfi_timings->tCOH_min &&
+ spec_timings->tCS_min <= onfi_timings->tCS_min &&
+ spec_timings->tDH_min <= onfi_timings->tDH_min &&
+ spec_timings->tDS_min <= onfi_timings->tDS_min &&
+ spec_timings->tIR_min <= onfi_timings->tIR_min &&
+ spec_timings->tRC_min <= onfi_timings->tRC_min &&
+ spec_timings->tREH_min <= onfi_timings->tREH_min &&
+ spec_timings->tRHOH_min <= onfi_timings->tRHOH_min &&
+ spec_timings->tRHW_min <= onfi_timings->tRHW_min &&
+ spec_timings->tRLOH_min <= onfi_timings->tRLOH_min &&
+ spec_timings->tRP_min <= onfi_timings->tRP_min &&
+ spec_timings->tRR_min <= onfi_timings->tRR_min &&
+ spec_timings->tWC_min <= onfi_timings->tWC_min &&
+ spec_timings->tWH_min <= onfi_timings->tWH_min &&
+ spec_timings->tWHR_min <= onfi_timings->tWHR_min &&
+ spec_timings->tWP_min <= onfi_timings->tWP_min &&
+ spec_timings->tWW_min <= onfi_timings->tWW_min)
+ return mode;
+ }
+
+ return 0;
+}
+
+/**
+ * onfi_fill_interface_config - Initialize an interface config from a given
+ * ONFI mode
+ * @chip: The NAND chip
+ * @iface: The interface configuration to fill
+ * @type: The interface type
+ * @timing_mode: The ONFI timing mode
+ */
+void onfi_fill_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface,
+ enum nand_interface_type type,
+ unsigned int timing_mode)
+{
+ struct onfi_params *onfi = chip->parameters.onfi;
+
+ if (WARN_ON(type != NAND_SDR_IFACE))
+ return;
+
+ if (WARN_ON(timing_mode >= ARRAY_SIZE(onfi_sdr_timings)))
+ return;
+
+ *iface = onfi_sdr_timings[timing_mode];
+
+ /*
+ * Initialize timings that cannot be deduced from timing mode:
+ * tPROG, tBERS, tR and tCCS.
+ * These information are part of the ONFI parameter page.
+ */
+ if (onfi) {
+ struct nand_sdr_timings *timings = &iface->timings.sdr;
+
+ /* microseconds -> picoseconds */
+ timings->tPROG_max = 1000000ULL * onfi->tPROG;
+ timings->tBERS_max = 1000000ULL * onfi->tBERS;
+ timings->tR_max = 1000000ULL * onfi->tR;
+
+ /* nanoseconds -> picoseconds */
+ timings->tCCS_min = 1000UL * onfi->tCCS;
+ }
+}
+
/**
* onfi_async_timing_mode_to_sdr_timings - [NAND Interface] Retrieve NAND
* timings according to the given ONFI timing mode
@@ -246,6 +395,5 @@ const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode)
{
if (mode < 0 || mode >= ARRAY_SIZE(onfi_sdr_timings))
return ERR_PTR(-EINVAL);
-
- return &onfi_sdr_timings[mode];
+ return &onfi_sdr_timings[mode].timings.sdr;
}
diff --git a/drivers/mtd/nand/nand_toshiba.c b/drivers/mtd/nand/nand_toshiba.c
new file mode 100644
index 0000000000..21a5dbc7e0
--- /dev/null
+++ b/drivers/mtd/nand/nand_toshiba.c
@@ -0,0 +1,274 @@
+// SPDX-License-Identifier: GPL-2.0-or-later
+/*
+ * Copyright (C) 2017 Free Electrons
+ * Copyright (C) 2017 NextThing Co
+ *
+ * Author: Boris Brezillon <boris.brezillon at free-electrons.com>
+ */
+
+#include "internals.h"
+
+/* Bit for detecting BENAND */
+#define TOSHIBA_NAND_ID4_IS_BENAND BIT(7)
+
+/* Recommended to rewrite for BENAND */
+#define TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED BIT(3)
+
+/* ECC Status Read Command for BENAND */
+#define TOSHIBA_NAND_CMD_ECC_STATUS_READ 0x7A
+
+/* ECC Status Mask for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_MASK 0x0F
+
+/* Uncorrectable Error for BENAND */
+#define TOSHIBA_NAND_ECC_STATUS_UNCORR 0x0F
+
+/* Max ECC Steps for BENAND */
+#define TOSHIBA_NAND_MAX_ECC_STEPS 8
+
+static int toshiba_nand_benand_read_eccstatus_op(struct nand_chip *chip,
+ u8 *buf)
+{
+ u8 *ecc_status = buf;
+
+ if (nand_has_exec_op(chip)) {
+ const struct nand_sdr_timings *sdr =
+ nand_get_sdr_timings(nand_get_interface_config(chip));
+ struct nand_op_instr instrs[] = {
+ NAND_OP_CMD(TOSHIBA_NAND_CMD_ECC_STATUS_READ,
+ PSEC_TO_NSEC(sdr->tADL_min)),
+ NAND_OP_8BIT_DATA_IN(chip->ecc.steps, ecc_status, 0),
+ };
+ struct nand_operation op = NAND_OPERATION(chip->cur_cs, instrs);
+
+ return nand_exec_op(chip, &op);
+ }
+
+ return -ENOTSUPP;
+}
+
+static int toshiba_nand_benand_eccstatus(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ int ret;
+ unsigned int max_bitflips = 0;
+ u8 status, ecc_status[TOSHIBA_NAND_MAX_ECC_STEPS];
+
+ /* Check Status */
+ ret = toshiba_nand_benand_read_eccstatus_op(chip, ecc_status);
+ if (!ret) {
+ unsigned int i, bitflips = 0;
+
+ for (i = 0; i < chip->ecc.steps; i++) {
+ bitflips = ecc_status[i] & TOSHIBA_NAND_ECC_STATUS_MASK;
+ if (bitflips == TOSHIBA_NAND_ECC_STATUS_UNCORR) {
+ mtd->ecc_stats.failed++;
+ } else {
+ mtd->ecc_stats.corrected += bitflips;
+ max_bitflips = max(max_bitflips, bitflips);
+ }
+ }
+
+ return max_bitflips;
+ }
+
+ /*
+ * Fallback to regular status check if
+ * toshiba_nand_benand_read_eccstatus_op() failed.
+ */
+ ret = nand_status_op(chip, &status);
+ if (ret)
+ return ret;
+
+ if (status & NAND_STATUS_FAIL) {
+ /* uncorrected */
+ mtd->ecc_stats.failed++;
+ } else if (status & TOSHIBA_NAND_STATUS_REWRITE_RECOMMENDED) {
+ /* corrected */
+ max_bitflips = mtd->bitflip_threshold;
+ mtd->ecc_stats.corrected += max_bitflips;
+ }
+
+ return max_bitflips;
+}
+
+static int
+toshiba_nand_read_page_benand(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page)
+{
+ int ret;
+
+ ret = nand_read_page_raw(chip, buf, oob_required, page);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static int
+toshiba_nand_read_subpage_benand(struct nand_chip *chip, uint32_t data_offs,
+ uint32_t readlen, uint8_t *bufpoi, int page)
+{
+ int ret;
+
+ ret = nand_read_page_op(chip, page, data_offs,
+ bufpoi + data_offs, readlen);
+ if (ret)
+ return ret;
+
+ return toshiba_nand_benand_eccstatus(chip);
+}
+
+static void toshiba_nand_benand_init(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+
+ /*
+ * On BENAND, the entire OOB region can be used by the MTD user.
+ * The calculated ECC bytes are stored into other isolated
+ * area which is not accessible to users.
+ * This is why chip->ecc.bytes = 0.
+ */
+ chip->ecc.bytes = 0;
+ chip->ecc.size = 512;
+ chip->ecc.strength = 8;
+ chip->ecc.read_page = toshiba_nand_read_page_benand;
+ chip->ecc.read_subpage = toshiba_nand_read_subpage_benand;
+ chip->ecc.write_page = nand_write_page_raw;
+ chip->ecc.read_page_raw = nand_read_page_raw_notsupp;
+ chip->ecc.write_page_raw = nand_write_page_raw_notsupp;
+
+ chip->options |= NAND_SUBPAGE_READ;
+
+ mtd_set_ooblayout(mtd, &nand_ooblayout_lp_ops);
+}
+
+static void toshiba_nand_decode_id(struct nand_chip *chip)
+{
+ struct mtd_info *mtd = nand_to_mtd(chip);
+ struct nand_memory_organization *memorg;
+
+ memorg = nanddev_get_memorg(&chip->base);
+
+ nand_decode_ext_id(chip);
+
+ /*
+ * Toshiba 24nm raw SLC (i.e., not BENAND) have 32B OOB per
+ * 512B page. For Toshiba SLC, we decode the 5th/6th byte as
+ * follows:
+ * - ID byte 6, bits[2:0]: 100b -> 43nm, 101b -> 32nm,
+ * 110b -> 24nm
+ * - ID byte 5, bit[7]: 1 -> BENAND, 0 -> raw SLC
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip) &&
+ (chip->id.data[5] & 0x7) == 0x6 /* 24nm */ &&
+ !(chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND) /* !BENAND */) {
+ memorg->oobsize = 32 * memorg->pagesize >> 9;
+ mtd->oobsize = memorg->oobsize;
+ }
+
+ /*
+ * Extract ECC requirements from 6th id byte.
+ * For Toshiba SLC, ecc requrements are as follows:
+ * - 43nm: 1 bit ECC for each 512Byte is required.
+ * - 32nm: 4 bit ECC for each 512Byte is required.
+ * - 24nm: 8 bit ECC for each 512Byte is required.
+ */
+ if (chip->id.len >= 6 && nand_is_slc(chip)) {
+ chip->base.eccreq.step_size = 512;
+ switch (chip->id.data[5] & 0x7) {
+ case 0x4:
+ chip->base.eccreq.strength = 1;
+ break;
+ case 0x5:
+ chip->base.eccreq.strength = 4;
+ break;
+ case 0x6:
+ chip->base.eccreq.strength = 8;
+ break;
+ default:
+ WARN(1, "Could not get ECC info");
+ chip->base.eccreq.step_size = 0;
+ break;
+ }
+ }
+}
+
+static int
+tc58nvg0s3e_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 2);
+
+ return nand_choose_best_sdr_timings(chip, iface, NULL);
+}
+
+static int
+th58nvg2s3hbai4_choose_interface_config(struct nand_chip *chip,
+ struct nand_interface_config *iface)
+{
+ struct nand_sdr_timings *sdr = &iface->timings.sdr;
+
+ /* Start with timings from the closest timing mode, mode 4. */
+ onfi_fill_interface_config(chip, iface, NAND_SDR_IFACE, 4);
+
+ /* Patch timings that differ from mode 4. */
+ sdr->tALS_min = 12000;
+ sdr->tCHZ_max = 20000;
+ sdr->tCLS_min = 12000;
+ sdr->tCOH_min = 0;
+ sdr->tDS_min = 12000;
+ sdr->tRHOH_min = 25000;
+ sdr->tRHW_min = 30000;
+ sdr->tRHZ_max = 60000;
+ sdr->tWHR_min = 60000;
+
+ /* Patch timings not part of onfi timing mode. */
+ sdr->tPROG_max = 700000000;
+ sdr->tBERS_max = 5000000000;
+
+ return nand_choose_best_sdr_timings(chip, iface, sdr);
+}
+
+static int tc58nvg0s3e_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &tc58nvg0s3e_choose_interface_config;
+
+ return 0;
+}
+
+static int th58nvg2s3hbai4_init(struct nand_chip *chip)
+{
+ chip->ops.choose_interface_config =
+ &th58nvg2s3hbai4_choose_interface_config;
+
+ return 0;
+}
+
+static int toshiba_nand_init(struct nand_chip *chip)
+{
+ if (nand_is_slc(chip))
+ chip->options |= NAND_BBM_FIRSTPAGE | NAND_BBM_SECONDPAGE;
+
+ /* Check that chip is BENAND and ECC mode is on-die */
+ if (nand_is_slc(chip) && chip->ecc.mode == NAND_ECC_ON_DIE &&
+ chip->id.data[4] & TOSHIBA_NAND_ID4_IS_BENAND)
+ toshiba_nand_benand_init(chip);
+
+ if (!strcmp("TC58TEG5DCLTA00", chip->parameters.model))
+ return -EINVAL; /* MLC, not yet supported in barebox */
+ if (!strncmp("TC58NVG0S3E", chip->parameters.model,
+ sizeof("TC58NVG0S3E") - 1))
+ tc58nvg0s3e_init(chip);
+ if (!strncmp("TH58NVG2S3HBAI4", chip->parameters.model,
+ sizeof("TH58NVG2S3HBAI4") - 1))
+ th58nvg2s3hbai4_init(chip);
+
+ return 0;
+}
+
+const struct nand_manufacturer_ops toshiba_nand_manuf_ops = {
+ .detect = toshiba_nand_decode_id,
+ .init = toshiba_nand_init,
+};
diff --git a/drivers/mtd/nand/nomadik_nand.c b/drivers/mtd/nand/nomadik_nand.c
index 2767259f7a..b5ef39223e 100644
--- a/drivers/mtd/nand/nomadik_nand.c
+++ b/drivers/mtd/nand/nomadik_nand.c
@@ -30,6 +30,7 @@
#include <linux/types.h>
#include <linux/mtd/mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
#include <io.h>
#include <mach/nand.h>
@@ -196,7 +197,7 @@ static int nomadik_nand_probe(struct device_d *dev)
/* Link all private pointers */
nand = &host->nand;
- mtd = &nand->mtd;
+ mtd = nand_to_mtd(nand);
nand->priv = host;
mtd->dev.parent = dev;
@@ -207,7 +208,7 @@ static int nomadik_nand_probe(struct device_d *dev)
nand->legacy.cmd_ctrl = nomadik_cmd_ctrl;
nand->ecc.mode = NAND_ECC_HW;
- nand->ecc.layout = &nomadik_ecc_layout;
+ mtd_set_ecclayout(mtd, &nomadik_ecc_layout);
nand->ecc.calculate = nomadik_ecc512_calc;
nand->ecc.correct = nomadik_ecc512_correct;
nand->ecc.hwctl = nomadik_ecc_control;
@@ -226,7 +227,7 @@ static int nomadik_nand_probe(struct device_d *dev)
}
pr_info("Registering %s as whole device\n", mtd->name);
- add_mtd_nand_device(nand, "nand");
+ add_mtd_nand_device(mtd, "nand");
return 0;
diff --git a/drivers/mtd/partition.c b/drivers/mtd/partition.c
index 1572038392..a426c8bfcf 100644
--- a/drivers/mtd/partition.c
+++ b/drivers/mtd/partition.c
@@ -167,6 +167,7 @@ struct mtd_info *mtd_add_partition(struct mtd_info *mtd, off_t offset,
part->bitflip_threshold = mtd->bitflip_threshold;
part->ecclayout = mtd->ecclayout;
part->ecc_step_size = mtd->ecc_step_size;
+ part->ooblayout = mtd->ooblayout;
part->ecc_strength = mtd->ecc_strength;
part->subpage_sft = mtd->subpage_sft;
part->cdev.flags = flags;
diff --git a/drivers/mtd/peb.c b/drivers/mtd/peb.c
index 27996d7e86..1b5605d1f4 100644
--- a/drivers/mtd/peb.c
+++ b/drivers/mtd/peb.c
@@ -542,7 +542,6 @@ int mtd_peb_erase(struct mtd_info *mtd, int pnum)
if (!mtd_peb_valid(mtd, pnum))
return -EINVAL;
- ei.mtd = mtd;
ei.addr = (loff_t)pnum * mtd->erasesize;
ei.len = mtd->erasesize;
diff --git a/drivers/of/of_mtd.c b/drivers/of/of_mtd.c
index 0956ee15d3..c531518c1d 100644
--- a/drivers/of/of_mtd.c
+++ b/drivers/of/of_mtd.c
@@ -9,6 +9,7 @@
#include <common.h>
#include <of_mtd.h>
#include <linux/mtd/nand.h>
+#include <linux/mtd/rawnand.h>
/**
* It maps 'enum nand_ecc_modes_t' found in include/linux/mtd/nand.h
@@ -16,10 +17,12 @@
* device driver can get nand ecc from device tree.
*/
static const char *nand_ecc_modes[] = {
+ [NAND_ECC_INVALID] = "invalid",
[NAND_ECC_NONE] = "none",
[NAND_ECC_SOFT] = "soft",
[NAND_ECC_HW] = "hw",
[NAND_ECC_HW_SYNDROME] = "hw_syndrome",
+ [NAND_ECC_ON_DIE] = "on-die",
[NAND_ECC_HW_OOB_FIRST] = "hw_oob_first",
[NAND_ECC_SOFT_BCH] = "soft_bch",
};
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
new file mode 100644
index 0000000000..0b6b59f7cf
--- /dev/null
+++ b/include/linux/mtd/jedec.h
@@ -0,0 +1,91 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2 at infradead.org>
+ * Steven J. Hill <sjhill at realitydiluted.com>
+ * Thomas Gleixner <tglx at linutronix.de>
+ *
+ * Contains all JEDEC related definitions
+ */
+
+#ifndef __LINUX_MTD_JEDEC_H
+#define __LINUX_MTD_JEDEC_H
+
+struct jedec_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+/* JEDEC features */
+#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+
+struct nand_jedec_params {
+ /* rev info and features block */
+ /* 'J' 'E' 'S' 'D' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ u8 opt_cmd[3];
+ __le16 sec_cmd;
+ u8 num_of_param_pages;
+ u8 reserved0[18];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id[6];
+ u8 reserved1[10];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ u8 reserved2[6];
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ u8 programs_per_page;
+ u8 multi_plane_addr;
+ u8 multi_plane_op_attr;
+ u8 reserved3[38];
+
+ /* electrical parameter block */
+ __le16 async_sdr_speed_grade;
+ __le16 toggle_ddr_speed_grade;
+ __le16 sync_ddr_speed_grade;
+ u8 async_sdr_features;
+ u8 toggle_ddr_features;
+ u8 sync_ddr_features;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_r_multi_plane;
+ __le16 t_ccs;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ __le16 clk_pin_capacitance_typ;
+ u8 driver_strength_support;
+ __le16 t_adl;
+ u8 reserved4[36];
+
+ /* ECC and endurance block */
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ struct jedec_ecc_info ecc_info[4];
+ u8 reserved5[29];
+
+ /* reserved */
+ u8 reserved6[148];
+
+ /* vendor */
+ __le16 vendor_rev_num;
+ u8 reserved7[88];
+
+ /* CRC for Parameter Page */
+ __le16 crc;
+} __packed;
+
+#endif /* __LINUX_MTD_JEDEC_H */
diff --git a/include/linux/mtd/mtd-abi.h b/include/linux/mtd/mtd-abi.h
index dfcb3554fb..6ad34c8912 100644
--- a/include/linux/mtd/mtd-abi.h
+++ b/include/linux/mtd/mtd-abi.h
@@ -46,6 +46,7 @@ enum {
#define MTD_NANDFLASH 4
#define MTD_DATAFLASH 6
#define MTD_UBIVOLUME 7
+#define MTD_MLCNANDFLASH 8 /* MLC NAND (including TLC) */
#define MTD_WRITEABLE 0x400 /* Device is writeable */
#define MTD_BIT_WRITEABLE 0x800 /* Single bits can be flipped */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index 710cba7a20..00a6a4f9c8 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -11,6 +11,7 @@
#include <driver.h>
#include <errno.h>
+#include <printk.h>
#include <linux/types.h>
#include <linux/list.h>
#include <linux/mtd/mtd-abi.h>
@@ -75,6 +76,39 @@ struct mtd_oob_ops {
uint8_t *oobbuf;
};
+#define MTD_MAX_OOBFREE_ENTRIES_LARGE 32
+#define MTD_MAX_ECCPOS_ENTRIES_LARGE 640
+/**
+ * struct mtd_oob_region - oob region definition
+ * @offset: region offset
+ * @length: region length
+ *
+ * This structure describes a region of the OOB area, and is used
+ * to retrieve ECC or free bytes sections.
+ * Each section is defined by an offset within the OOB area and a
+ * length.
+ */
+struct mtd_oob_region {
+ u32 offset;
+ u32 length;
+};
+
+/*
+ * struct mtd_ooblayout_ops - NAND OOB layout operations
+ * @ecc: function returning an ECC region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * ECC sections.
+ * @free: function returning a free region in the OOB area.
+ * Should return -ERANGE if %section exceeds the total number of
+ * free sections.
+ */
+struct mtd_ooblayout_ops {
+ int (*ecc)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+ int (*free)(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
+};
+
struct mtd_info {
u_char type;
u_int32_t flags;
@@ -123,6 +157,8 @@ struct mtd_info {
/* ecc layout structure pointer - read only ! */
struct nand_ecclayout *ecclayout;
+ /* OOB layout description */
+ const struct mtd_ooblayout_ops *ooblayout;
/* the ecc step size. */
unsigned int ecc_step_size;
@@ -141,15 +177,6 @@ struct mtd_info {
int (*_read) (struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf);
int (*_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
- /* In blackbox flight recorder like scenarios we want to make successful
- writes in interrupt context. panic_write() is only intended to be
- called when its known the kernel is about to panic and we need the
- write to succeed. Since the kernel is not going to be running for much
- longer, this function can break locks and delay to ensure the write
- succeeds (but not sleep). */
-
- int (*_panic_write) (struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen, const u_char *buf);
-
int (*_read_oob) (struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops);
int (*_write_oob) (struct mtd_info *mtd, loff_t to,
@@ -225,6 +252,30 @@ struct mtd_info {
unsigned int of_binding;
};
+int mtd_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobecc);
+int mtd_ooblayout_find_eccregion(struct mtd_info *mtd, int eccbyte,
+ int *section,
+ struct mtd_oob_region *oobregion);
+int mtd_ooblayout_get_eccbytes(struct mtd_info *mtd, u8 *eccbuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_eccbytes(struct mtd_info *mtd, const u8 *eccbuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobfree);
+int mtd_ooblayout_get_databytes(struct mtd_info *mtd, u8 *databuf,
+ const u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_set_databytes(struct mtd_info *mtd, const u8 *databuf,
+ u8 *oobbuf, int start, int nbytes);
+int mtd_ooblayout_count_freebytes(struct mtd_info *mtd);
+int mtd_ooblayout_count_eccbytes(struct mtd_info *mtd);
+
+static inline void mtd_set_ooblayout(struct mtd_info *mtd,
+ const struct mtd_ooblayout_ops *ooblayout)
+{
+ mtd->ooblayout = ooblayout;
+}
+
int mtd_erase(struct mtd_info *mtd, struct erase_info *instr);
int mtd_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen,
u_char *buf);
@@ -233,17 +284,28 @@ int mtd_write(struct mtd_info *mtd, loff_t to, size_t len, size_t *retlen,
int mtd_read_oob(struct mtd_info *mtd, loff_t from, struct mtd_oob_ops *ops);
-static inline int mtd_write_oob(struct mtd_info *mtd, loff_t to,
- struct mtd_oob_ops *ops)
+static inline void mtd_set_of_node(struct mtd_info *mtd,
+ struct device_node *np)
+{
+ mtd->dev.device_node = np;
+}
+
+static inline struct device_node *mtd_get_of_node(struct mtd_info *mtd)
{
- ops->retlen = ops->oobretlen = 0;
- if (!mtd->_write_oob)
- return -EOPNOTSUPP;
- if (!(mtd->flags & MTD_WRITEABLE))
- return -EROFS;
- return mtd->_write_oob(mtd, to, ops);
+ if (mtd->dev.device_node)
+ return mtd->dev.device_node;
+ if (mtd->dev.parent)
+ return mtd->dev.parent->device_node;
+ return NULL;
}
+static inline u32 mtd_oobavail(struct mtd_info *mtd, struct mtd_oob_ops *ops)
+{
+ return ops->mode == MTD_OPS_AUTO_OOB ? mtd->oobavail : mtd->oobsize;
+}
+
+int mtd_write_oob(struct mtd_info *mtd, loff_t to, struct mtd_oob_ops *ops);
+
static inline int mtd_can_have_bb(const struct mtd_info *mtd)
{
return !!mtd->_block_isbad;
@@ -312,4 +374,6 @@ static inline int mtd_is_bitflip_or_eccerr(int err) {
return mtd_is_bitflip(err) || mtd_is_eccerr(err);
}
+void mtd_set_ecclayout(struct mtd_info *mtd, struct nand_ecclayout *ecclayout);
+
#endif /* __MTD_MTD_H__ */
diff --git a/include/linux/mtd/nand.h b/include/linux/mtd/nand.h
index f0d3455fd3..876849e7e8 100644
--- a/include/linux/mtd/nand.h
+++ b/include/linux/mtd/nand.h
@@ -1,812 +1,718 @@
+/* SPDX-License-Identifier: GPL-2.0 */
/*
- * linux/include/linux/mtd/nand.h
+ * Copyright 2017 - Free Electrons
*
- * Copyright © 2000-2010 David Woodhouse <dwmw2 at infradead.org>
- * Steven J. Hill <sjhill at realitydiluted.com>
- * Thomas Gleixner <tglx at linutronix.de>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Info:
- * Contains standard defines and IDs for NAND flash devices
- *
- * Changelog:
- * See git changelog.
+ * Authors:
+ * Boris Brezillon <boris.brezillon at free-electrons.com>
+ * Peter Pan <peterpandong at micron.com>
*/
+
#ifndef __LINUX_MTD_NAND_H
#define __LINUX_MTD_NAND_H
#include <linux/mtd/mtd.h>
-#include <linux/mtd/flashchip.h>
-#include <linux/mtd/bbm.h>
-
-struct mtd_info;
-struct nand_chip;
-struct nand_flash_dev;
-/* Scan and identify a NAND device */
-extern int nand_scan(struct nand_chip *chip, int max_chips);
+
+struct nand_device;
+
/*
- * Separate phases of nand_scan(), allowing board driver to intervene
- * and override command or ECC setup according to flash type.
+ * This constant declares the max. oobsize / page, which
+ * is supported now. If you add a chip with bigger oobsize/page
+ * adjust this accordingly.
*/
-extern int nand_scan_ident(struct nand_chip *chip, int max_chips,
- struct nand_flash_dev *table);
-extern int nand_scan_tail(struct nand_chip *chip);
+#define NAND_MAX_OOBSIZE 640
+#define NAND_MAX_PAGESIZE 8192
-/* Free resources held by the NAND device */
-extern void nand_release(struct nand_chip *chip);
+/**
+ * struct nand_memory_organization - Memory organization structure
+ * @bits_per_cell: number of bits per NAND cell
+ * @pagesize: page size
+ * @oobsize: OOB area size
+ * @pages_per_eraseblock: number of pages per eraseblock
+ * @eraseblocks_per_lun: number of eraseblocks per LUN (Logical Unit Number)
+ * @max_bad_eraseblocks_per_lun: maximum number of eraseblocks per LUN
+ * @planes_per_lun: number of planes per LUN
+ * @luns_per_target: number of LUN per target (target is a synonym for die)
+ * @ntargets: total number of targets exposed by the NAND device
+ */
+struct nand_memory_organization {
+ unsigned int bits_per_cell;
+ unsigned int pagesize;
+ unsigned int oobsize;
+ unsigned int pages_per_eraseblock;
+ unsigned int eraseblocks_per_lun;
+ unsigned int max_bad_eraseblocks_per_lun;
+ unsigned int planes_per_lun;
+ unsigned int luns_per_target;
+ unsigned int ntargets;
+};
-/* Internal helper for board drivers which need to override command function */
-extern void nand_wait_ready(struct nand_chip *chip);
+#define NAND_MEMORG(bpc, ps, os, ppe, epl, mbb, ppl, lpt, nt) \
+ { \
+ .bits_per_cell = (bpc), \
+ .pagesize = (ps), \
+ .oobsize = (os), \
+ .pages_per_eraseblock = (ppe), \
+ .eraseblocks_per_lun = (epl), \
+ .max_bad_eraseblocks_per_lun = (mbb), \
+ .planes_per_lun = (ppl), \
+ .luns_per_target = (lpt), \
+ .ntargets = (nt), \
+ }
-/* locks all blocks present in the device */
-extern int nand_lock(struct nand_chip *chip, loff_t ofs, uint64_t len);
+/**
+ * struct nand_row_converter - Information needed to convert an absolute offset
+ * into a row address
+ * @lun_addr_shift: position of the LUN identifier in the row address
+ * @eraseblock_addr_shift: position of the eraseblock identifier in the row
+ * address
+ */
+struct nand_row_converter {
+ unsigned int lun_addr_shift;
+ unsigned int eraseblock_addr_shift;
+};
-/* unlocks specified locked blocks */
-extern int nand_unlock(struct nand_chip *chip, loff_t ofs, uint64_t len);
+/**
+ * struct nand_pos - NAND position object
+ * @target: the NAND target/die
+ * @lun: the LUN identifier
+ * @plane: the plane within the LUN
+ * @eraseblock: the eraseblock within the LUN
+ * @page: the page within the LUN
+ *
+ * These information are usually used by specific sub-layers to select the
+ * appropriate target/die and generate a row address to pass to the device.
+ */
+struct nand_pos {
+ unsigned int target;
+ unsigned int lun;
+ unsigned int plane;
+ unsigned int eraseblock;
+ unsigned int page;
+};
-extern int nand_check_erased_ecc_chunk(void *data, int datalen,
- void *ecc, int ecclen,
- void *extraoob, int extraooblen,
- int bitflips_threshold);
-int nand_check_erased_buf(void *buf, int len, int bitflips_threshold);
+/**
+ * struct nand_page_io_req - NAND I/O request object
+ * @pos: the position this I/O request is targeting
+ * @dataoffs: the offset within the page
+ * @datalen: number of data bytes to read from/write to this page
+ * @databuf: buffer to store data in or get data from
+ * @ooboffs: the OOB offset within the page
+ * @ooblen: the number of OOB bytes to read from/write to this page
+ * @oobbuf: buffer to store OOB data in or get OOB data from
+ * @mode: one of the %MTD_OPS_XXX mode
+ *
+ * This object is used to pass per-page I/O requests to NAND sub-layers. This
+ * way all useful information are already formatted in a useful way and
+ * specific NAND layers can focus on translating these information into
+ * specific commands/operations.
+ */
+struct nand_page_io_req {
+ struct nand_pos pos;
+ unsigned int dataoffs;
+ unsigned int datalen;
+ union {
+ const void *out;
+ void *in;
+ } databuf;
+ unsigned int ooboffs;
+ unsigned int ooblen;
+ union {
+ const void *out;
+ void *in;
+ } oobbuf;
+ int mode;
+};
-void nand_of_parse_node(struct mtd_info *mtd, struct device_node *np);
+/**
+ * struct nand_ecc_props - NAND ECC properties
+ * @strength: ECC strength
+ * @step_size: Number of bytes per step
+ */
+struct nand_ecc_props {
+ unsigned int strength;
+ unsigned int step_size;
+};
-/* The maximum number of NAND chips in an array */
-#define NAND_MAX_CHIPS 8
+#define NAND_ECCREQ(str, stp) { .strength = (str), .step_size = (stp) }
-/*
- * This constant declares the max. oobsize / page, which
- * is supported now. If you add a chip with bigger oobsize/page
- * adjust this accordingly.
+/**
+ * struct nand_bbt - bad block table object
+ * @cache: in memory BBT cache
*/
-#define NAND_MAX_OOBSIZE 640
-#define NAND_MAX_PAGESIZE 8192
+struct nand_bbt {
+ unsigned long *cache;
+};
-/*
- * Constants for hardware specific CLE/ALE/NCE function
+/**
+ * struct nand_ops - NAND operations
+ * @erase: erase a specific block. No need to check if the block is bad before
+ * erasing, this has been taken care of by the generic NAND layer
+ * @markbad: mark a specific block bad. No need to check if the block is
+ * already marked bad, this has been taken care of by the generic
+ * NAND layer. This method should just write the BBM (Bad Block
+ * Marker) so that future call to struct_nand_ops->isbad() return
+ * true
+ * @isbad: check whether a block is bad or not. This method should just read
+ * the BBM and return whether the block is bad or not based on what it
+ * reads
*
- * These are bits which can be or'ed to set/clear multiple
- * bits in one go.
+ * These are all low level operations that should be implemented by specialized
+ * NAND layers (SPI NAND, raw NAND, ...).
*/
-/* Select the chip by setting nCE to low */
-#define NAND_NCE 0x01
-/* Select the command latch by setting CLE to high */
-#define NAND_CLE 0x02
-/* Select the address latch by setting ALE to high */
-#define NAND_ALE 0x04
+struct nand_ops {
+ int (*erase)(struct nand_device *nand, const struct nand_pos *pos);
+ int (*markbad)(struct nand_device *nand, const struct nand_pos *pos);
+ bool (*isbad)(struct nand_device *nand, const struct nand_pos *pos);
+};
-#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
-#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
-#define NAND_CTRL_CHANGE 0x80
+/**
+ * struct nand_device - NAND device
+ * @mtd: MTD instance attached to the NAND device
+ * @memorg: memory layout
+ * @eccreq: ECC requirements
+ * @rowconv: position to row address converter
+ * @bbt: bad block table info
+ * @ops: NAND operations attached to the NAND device
+ *
+ * Generic NAND object. Specialized NAND layers (raw NAND, SPI NAND, OneNAND)
+ * should declare their own NAND object embedding a nand_device struct (that's
+ * how inheritance is done).
+ * struct_nand_device->memorg and struct_nand_device->eccreq should be filled
+ * at device detection time to reflect the NAND device
+ * capabilities/requirements. Once this is done nanddev_init() can be called.
+ * It will take care of converting NAND information into MTD ones, which means
+ * the specialized NAND layers should never manually tweak
+ * struct_nand_device->mtd except for the ->_read/write() hooks.
+ */
+struct nand_device {
+ struct mtd_info mtd;
+ struct nand_memory_organization memorg;
+ struct nand_ecc_props eccreq;
+ struct nand_row_converter rowconv;
+ struct nand_bbt bbt;
+ const struct nand_ops *ops;
+};
-/*
- * Standard NAND flash commands
- */
-#define NAND_CMD_READ0 0
-#define NAND_CMD_READ1 1
-#define NAND_CMD_RNDOUT 5
-#define NAND_CMD_PAGEPROG 0x10
-#define NAND_CMD_READOOB 0x50
-#define NAND_CMD_ERASE1 0x60
-#define NAND_CMD_STATUS 0x70
-#define NAND_CMD_SEQIN 0x80
-#define NAND_CMD_RNDIN 0x85
-#define NAND_CMD_READID 0x90
-#define NAND_CMD_ERASE2 0xd0
-#define NAND_CMD_PARAM 0xec
-#define NAND_CMD_GET_FEATURES 0xee
-#define NAND_CMD_SET_FEATURES 0xef
-#define NAND_CMD_RESET 0xff
-
-#define NAND_CMD_LOCK 0x2a
-#define NAND_CMD_UNLOCK1 0x23
-#define NAND_CMD_UNLOCK2 0x24
-
-/* Extended commands for large page devices */
-#define NAND_CMD_READSTART 0x30
-#define NAND_CMD_RNDOUTSTART 0xE0
-#define NAND_CMD_CACHEDPROG 0x15
-
-#define NAND_CMD_NONE -1
-
-/* Status bits */
-#define NAND_STATUS_FAIL 0x01
-#define NAND_STATUS_FAIL_N1 0x02
-#define NAND_STATUS_TRUE_READY 0x20
-#define NAND_STATUS_READY 0x40
-#define NAND_STATUS_WP 0x80
+/**
+ * struct nand_io_iter - NAND I/O iterator
+ * @req: current I/O request
+ * @oobbytes_per_page: maximum number of OOB bytes per page
+ * @dataleft: remaining number of data bytes to read/write
+ * @oobleft: remaining number of OOB bytes to read/write
+ *
+ * Can be used by specialized NAND layers to iterate over all pages covered
+ * by an MTD I/O request, which should greatly simplifies the boiler-plate
+ * code needed to read/write data from/to a NAND device.
+ */
+struct nand_io_iter {
+ struct nand_page_io_req req;
+ unsigned int oobbytes_per_page;
+ unsigned int dataleft;
+ unsigned int oobleft;
+};
-/*
- * Constants for ECC_MODES
+/**
+ * mtd_to_nanddev() - Get the NAND device attached to the MTD instance
+ * @mtd: MTD instance
+ *
+ * Return: the NAND device embedding @mtd.
*/
-typedef enum {
- NAND_ECC_NONE,
- NAND_ECC_SOFT,
- NAND_ECC_HW,
- NAND_ECC_HW_SYNDROME,
- NAND_ECC_HW_OOB_FIRST,
- NAND_ECC_SOFT_BCH,
-} nand_ecc_modes_t;
+static inline struct nand_device *mtd_to_nanddev(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_device, mtd);
+}
-/*
- * Constants for Hardware ECC
+/**
+ * nanddev_to_mtd() - Get the MTD device attached to a NAND device
+ * @nand: NAND device
+ *
+ * Return: the MTD device embedded in @nand.
*/
-/* Reset Hardware ECC for read */
-#define NAND_ECC_READ 0
-/* Reset Hardware ECC for write */
-#define NAND_ECC_WRITE 1
-/* Enable Hardware ECC before syndrome is read back from flash */
-#define NAND_ECC_READSYN 2
+static inline struct mtd_info *nanddev_to_mtd(struct nand_device *nand)
+{
+ return &nand->mtd;
+}
-/* Bit mask for flags passed to do_nand_read_ecc */
-#define NAND_GET_DEVICE 0x80
+/*
+ * nanddev_bits_per_cell() - Get the number of bits per cell
+ * @nand: NAND device
+ *
+ * Return: the number of bits per cell.
+ */
+static inline unsigned int nanddev_bits_per_cell(const struct nand_device *nand)
+{
+ return nand->memorg.bits_per_cell;
+}
+/**
+ * nanddev_page_size() - Get NAND page size
+ * @nand: NAND device
+ *
+ * Return: the page size.
+ */
+static inline size_t nanddev_page_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize;
+}
-/*
- * Option constants for bizarre disfunctionality and real
- * features.
+/**
+ * nanddev_per_page_oobsize() - Get NAND OOB size
+ * @nand: NAND device
+ *
+ * Return: the OOB size.
*/
-/* Buswidth is 16 bit */
-#define NAND_BUSWIDTH_16 0x00000002
-/* Chip has cache program function */
-#define NAND_CACHEPRG 0x00000008
-/*
- * Chip requires ready check on read (for auto-incremented sequential read).
- * True only for small page devices; large page devices do not support
- * autoincrement.
+static inline unsigned int
+nanddev_per_page_oobsize(const struct nand_device *nand)
+{
+ return nand->memorg.oobsize;
+}
+
+/**
+ * nanddev_pages_per_eraseblock() - Get the number of pages per eraseblock
+ * @nand: NAND device
+ *
+ * Return: the number of pages per eraseblock.
*/
-#define NAND_NEED_READRDY 0x00000100
+static inline unsigned int
+nanddev_pages_per_eraseblock(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock;
+}
-/* Chip does not allow subpage writes */
-#define NAND_NO_SUBPAGE_WRITE 0x00000200
+/**
+ * nanddev_pages_per_target() - Get the number of pages per target
+ * @nand: NAND device
+ *
+ * Return: the number of pages per target.
+ */
+static inline unsigned int
+nanddev_pages_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.pages_per_eraseblock *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.luns_per_target;
+}
-/* Device is one of 'new' xD cards that expose fake nand command set */
-#define NAND_BROKEN_XD 0x00000400
+/**
+ * nanddev_per_page_oobsize() - Get NAND erase block size
+ * @nand: NAND device
+ *
+ * Return: the eraseblock size.
+ */
+static inline size_t nanddev_eraseblock_size(const struct nand_device *nand)
+{
+ return nand->memorg.pagesize * nand->memorg.pages_per_eraseblock;
+}
-/* Device behaves just like nand, but is readonly */
-#define NAND_ROM 0x00000800
+/**
+ * nanddev_eraseblocks_per_lun() - Get the number of eraseblocks per LUN
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per LUN.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_lun(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun;
+}
-/* Device supports subpage reads */
-/* Disabled in barebox for smaller binary sizes */
-#define NAND_SUBPAGE_READ (0x00001000)
+/**
+ * nanddev_eraseblocks_per_target() - Get the number of eraseblocks per target
+ * @nand: NAND device
+ *
+ * Return: the number of eraseblocks per target.
+ */
+static inline unsigned int
+nanddev_eraseblocks_per_target(const struct nand_device *nand)
+{
+ return nand->memorg.eraseblocks_per_lun * nand->memorg.luns_per_target;
+}
-/* Options valid for Samsung large page devices */
-#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+/**
+ * nanddev_target_size() - Get the total size provided by a single target/die
+ * @nand: NAND device
+ *
+ * Return: the total size exposed by a single target/die in bytes.
+ */
+static inline u64 nanddev_target_size(const struct nand_device *nand)
+{
+ return (u64)nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun *
+ nand->memorg.pages_per_eraseblock *
+ nand->memorg.pagesize;
+}
-/* Macros to identify the above */
-#define NAND_HAS_CACHEPROG(chip) ((chip->options & NAND_CACHEPRG))
-#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+/**
+ * nanddev_ntarget() - Get the total of targets
+ * @nand: NAND device
+ *
+ * Return: the number of targets/dies exposed by @nand.
+ */
+static inline unsigned int nanddev_ntargets(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets;
+}
-/* Non chip related options */
-/* This option skips the bbt scan during initialization. */
-#define NAND_SKIP_BBTSCAN 0x00010000
-/*
- * This option is defined if the board driver allocates its own buffers
- * (e.g. because it needs them DMA-coherent).
+/**
+ * nanddev_neraseblocks() - Get the total number of eraseblocks
+ * @nand: NAND device
+ *
+ * Return: the total number of eraseblocks exposed by @nand.
*/
-#define NAND_OWN_BUFFERS 0x00020000
-/* Chip may not exist, so silence any errors in scan */
-#define NAND_SCAN_SILENT_NODEV 0x00040000
-/*
- * Autodetect nand buswidth with readid/onfi.
- * This suppose the driver will configure the hardware in 8 bits mode
- * when calling nand_scan_ident, and update its configuration
- * before calling nand_scan_tail.
- */
-#define NAND_BUSWIDTH_AUTO 0x00080000
-
-/* Options set by nand scan */
-/* Nand scan has allocated controller struct */
-#define NAND_CONTROLLER_ALLOC 0x80000000
-
-/* Cell info constants */
-#define NAND_CI_CHIPNR_MSK 0x03
-#define NAND_CI_CELLTYPE_MSK 0x0C
-#define NAND_CI_CELLTYPE_SHIFT 2
-
-/* Keep gcc happy */
-struct nand_chip;
-
-/* ONFI timing mode, used in both asynchronous and synchronous mode */
-#define ONFI_TIMING_MODE_0 (1 << 0)
-#define ONFI_TIMING_MODE_1 (1 << 1)
-#define ONFI_TIMING_MODE_2 (1 << 2)
-#define ONFI_TIMING_MODE_3 (1 << 3)
-#define ONFI_TIMING_MODE_4 (1 << 4)
-#define ONFI_TIMING_MODE_5 (1 << 5)
-#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
-
-/* ONFI feature address */
-#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
-
-/* ONFI subfeature parameters length */
-#define ONFI_SUBFEATURE_PARAM_LEN 4
-
-/* ONFI optional commands SET/GET FEATURES supported? */
-#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
-
-struct nand_onfi_params {
- /* rev info and features block */
- /* 'O' 'N' 'F' 'I' */
- u8 sig[4];
- __le16 revision;
- __le16 features;
- __le16 opt_cmd;
- u8 reserved[22];
-
- /* manufacturer information block */
- char manufacturer[12];
- char model[20];
- u8 jedec_id;
- __le16 date_code;
- u8 reserved2[13];
-
- /* memory organization block */
- __le32 byte_per_page;
- __le16 spare_bytes_per_page;
- __le32 data_bytes_per_ppage;
- __le16 spare_bytes_per_ppage;
- __le32 pages_per_block;
- __le32 blocks_per_lun;
- u8 lun_count;
- u8 addr_cycles;
- u8 bits_per_cell;
- __le16 bb_per_lun;
- __le16 block_endurance;
- u8 guaranteed_good_blocks;
- __le16 guaranteed_block_endurance;
- u8 programs_per_page;
- u8 ppage_attr;
- u8 ecc_bits;
- u8 interleaved_bits;
- u8 interleaved_ops;
- u8 reserved3[13];
-
- /* electrical parameter block */
- u8 io_pin_capacitance_max;
- __le16 async_timing_mode;
- __le16 program_cache_timing_mode;
- __le16 t_prog;
- __le16 t_bers;
- __le16 t_r;
- __le16 t_ccs;
- __le16 src_sync_timing_mode;
- __le16 src_ssync_features;
- __le16 clk_pin_capacitance_typ;
- __le16 io_pin_capacitance_typ;
- __le16 input_pin_capacitance_typ;
- u8 input_pin_capacitance_max;
- u8 driver_strenght_support;
- __le16 t_int_r;
- __le16 t_ald;
- u8 reserved4[7];
-
- /* vendor */
- u8 reserved5[90];
-
- __le16 crc;
-} __attribute__((packed));
-
-#define ONFI_CRC_BASE 0x4F4E
-
-/**
- * struct nand_hw_control - Control structure for hardware controller (e.g ECC generator) shared among independent devices
- * @lock: protection lock
- * @active: the mtd device which holds the controller currently
- * @wq: wait queue to sleep on if a NAND operation is in
- * progress used instead of the per chip wait queue
- * when a hw controller is available.
- */
-struct nand_hw_control {
- struct nand_chip *active;
-};
+static inline unsigned int nanddev_neraseblocks(const struct nand_device *nand)
+{
+ return nand->memorg.ntargets * nand->memorg.luns_per_target *
+ nand->memorg.eraseblocks_per_lun;
+}
/**
- * struct nand_ecc_ctrl - Control structure for ECC
- * @mode: ECC mode
- * @steps: number of ECC steps per page
- * @size: data bytes per ECC step
- * @bytes: ECC bytes per step
- * @strength: max number of correctible bits per ECC step
- * @total: total number of ECC bytes per page
- * @prepad: padding information for syndrome based ECC generators
- * @postpad: padding information for syndrome based ECC generators
- * @layout: ECC layout control struct pointer
- * @priv: pointer to private ECC control data
- * @hwctl: function to control hardware ECC generator. Must only
- * be provided if an hardware ECC is available
- * @calculate: function for ECC calculation or readback from ECC hardware
- * @correct: function for ECC correction, matching to ECC generator (sw/hw)
- * @read_page_raw: function to read a raw page without ECC
- * @write_page_raw: function to write a raw page without ECC
- * @read_page: function to read a page according to the ECC generator
- * requirements; returns maximum number of bitflips corrected in
- * any single ECC step, 0 if bitflips uncorrectable, -EIO hw error
- * @read_subpage: function to read parts of the page covered by ECC;
- * returns same as read_page()
- * @write_subpage: function to write parts of the page covered by ECC.
- * @write_page: function to write a page according to the ECC generator
- * requirements.
- * @write_oob_raw: function to write chip OOB data without ECC
- * @read_oob_raw: function to read chip OOB data without ECC
- * @read_oob: function to read chip OOB data
- * @write_oob: function to write chip OOB data
- */
-struct nand_ecc_ctrl {
- nand_ecc_modes_t mode;
- int steps;
- int size;
- int bytes;
- int total;
- int strength;
- int prepad;
- int postpad;
- struct nand_ecclayout *layout;
- void *priv;
- void (*hwctl)(struct nand_chip *chip, int mode);
- int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
- uint8_t *ecc_code);
- int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
- uint8_t *calc_ecc);
- int (*read_page_raw)(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*write_page_raw)(struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
- int (*read_page)(struct nand_chip *chip,
- uint8_t *buf, int oob_required, int page);
- int (*read_subpage)(struct nand_chip *chip,
- uint32_t offs, uint32_t len, uint8_t *buf, int page);
- int (*write_subpage)(struct nand_chip *chip,
- uint32_t offset, uint32_t data_len,
- const uint8_t *data_buf, int oob_required);
- int (*write_page)(struct nand_chip *chip,
- const uint8_t *buf, int oob_required);
- int (*write_oob_raw)(struct nand_chip *chip, int page);
- int (*read_oob_raw)(struct nand_chip *chip, int page);
- int (*read_oob)(struct nand_chip *chip, int page);
- int (*write_oob)(struct nand_chip *chip, int page);
-};
+ * nanddev_size() - Get NAND size
+ * @nand: NAND device
+ *
+ * Return: the total size (in bytes) exposed by @nand.
+ */
+static inline u64 nanddev_size(const struct nand_device *nand)
+{
+ return nanddev_target_size(nand) * nanddev_ntargets(nand);
+}
/**
- * struct nand_buffers - buffer structure for read/write
- * @ecccalc: buffer for calculated ECC
- * @ecccode: buffer for ECC read from flash
- * @databuf: buffer for data - dynamically sized
+ * nanddev_get_memorg() - Extract memory organization info from a NAND device
+ * @nand: NAND device
+ *
+ * This can be used by the upper layer to fill the memorg info before calling
+ * nanddev_init().
*
- * Do not change the order of buffers. databuf and oobrbuf must be in
- * consecutive order.
+ * Return: the memorg object embedded in the NAND device.
*/
-struct nand_buffers {
- uint8_t ecccalc[NAND_MAX_OOBSIZE];
- uint8_t ecccode[NAND_MAX_OOBSIZE];
- uint8_t databuf[NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE];
-};
+static inline struct nand_memory_organization *
+nanddev_get_memorg(struct nand_device *nand)
+{
+ return &nand->memorg;
+}
+
+int nanddev_init(struct nand_device *nand, const struct nand_ops *ops,
+ struct module *owner);
+void nanddev_cleanup(struct nand_device *nand);
/**
- * struct nand_legacy - NAND chip legacy fields/hooks
- * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
- * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
- * @select_chip: select/deselect a specific target/die
- * @read_byte: read one byte from the chip
- * @write_byte: write a single byte to the chip on the low 8 I/O lines
- * @write_buf: write data from the buffer to the chip
- * @read_buf: read data from the chip into the buffer
- * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
- * to write command and address
- * @cmdfunc: hardware specific function for writing commands to the chip.
- * @dev_ready: hardware specific function for accessing device ready/busy line.
- * If set to NULL no access to ready/busy is available and the
- * ready/busy information is read from the chip status register.
- * @waitfunc: hardware specific function for wait on ready.
- * @block_bad: check if a block is bad, using OOB markers
- * @block_markbad: mark a block bad
- * @set_features: set the NAND chip features
- * @get_features: get the NAND chip features
- * @chip_delay: chip dependent delay for transferring data from array to read
- * regs (tR).
- * @dummy_controller: dummy controller implementation for drivers that can
- * only control a single chip
- *
- * If you look at this structure you're already wrong. These fields/hooks are
- * all deprecated.
- */
-struct nand_legacy {
- void __iomem *IO_ADDR_R;
- void __iomem *IO_ADDR_W;
- void (*select_chip)(struct nand_chip *chip, int cs);
- u8 (*read_byte)(struct nand_chip *chip);
- u16 (*read_word)(struct nand_chip *chip);
- void (*write_byte)(struct nand_chip *chip, u8 byte);
- void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
- void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
- void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
- void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
- int page_addr);
- int (*dev_ready)(struct nand_chip *chip);
- int (*waitfunc)(struct nand_chip *chip);
- int (*block_bad)(struct nand_chip *chip, loff_t ofs, int getchip);
- int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
- int (*set_features)(struct nand_chip *chip, int feature_addr,
- u8 *subfeature_para);
- int (*get_features)(struct nand_chip *chip, int feature_addr,
- u8 *subfeature_para);
- int chip_delay;
-};
+ * nanddev_offs_to_pos() - Convert an absolute NAND offset into a NAND position
+ * @nand: NAND device
+ * @offs: absolute NAND offset (usually passed by the MTD layer)
+ * @pos: a NAND position object to fill in
+ *
+ * Converts @offs into a nand_pos representation.
+ *
+ * Return: the offset within the NAND page pointed by @pos.
+ */
+static inline unsigned int nanddev_offs_to_pos(struct nand_device *nand,
+ loff_t offs,
+ struct nand_pos *pos)
+{
+ unsigned int pageoffs;
+ u64 tmp = offs;
+
+ pageoffs = do_div(tmp, nand->memorg.pagesize);
+ pos->page = do_div(tmp, nand->memorg.pages_per_eraseblock);
+ pos->eraseblock = do_div(tmp, nand->memorg.eraseblocks_per_lun);
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+ pos->lun = do_div(tmp, nand->memorg.luns_per_target);
+ pos->target = tmp;
+
+ return pageoffs;
+}
/**
- * struct nand_chip - NAND Private Flash Chip Data
- * @init_size: [BOARDSPECIFIC] hardwarespecific function for setting
- * mtd->oobsize, mtd->writesize and so on.
- * @id_data contains the 8 bytes values of NAND_CMD_READID.
- * Return with the bus width.
- * @ecc: [BOARDSPECIFIC] ECC control structure
- * @buffers: buffer structure for read/write
- * @hwcontrol: platform-specific hardware control structure
- * @scan_bbt: [REPLACEABLE] function to scan bad block table
- * @state: [INTERN] the current state of the NAND device
- * @oob_poi: "poison value buffer," used for laying out OOB data
- * before writing
- * @page_shift: [INTERN] number of address bits in a page (column
- * address bits).
- * @phys_erase_shift: [INTERN] number of address bits in a physical eraseblock
- * @bbt_erase_shift: [INTERN] number of address bits in a bbt entry
- * @chip_shift: [INTERN] number of address bits in one chip
- * @options: [BOARDSPECIFIC] various chip options. They can partly
- * be set to inform nand_scan about special functionality.
- * See the defines for further explanation.
- * @bbt_options: [INTERN] bad block specific options. All options used
- * here must come from bbm.h. By default, these options
- * will be copied to the appropriate nand_bbt_descr's.
- * @badblockpos: [INTERN] position of the bad block marker in the oob
- * area.
- * @badblockbits: [INTERN] minimum number of set bits in a good block's
- * bad block marker position; i.e., BBM == 11110111b is
- * not bad when badblockbits == 7
- * @bits_per_cell: [INTERN] number of bits per cell. i.e., 1 means SLC.
- * @numchips: [INTERN] number of physical chips
- * @chipsize: [INTERN] the size of one chip for multichip arrays
- * @pagemask: [INTERN] page number mask = number of (pages / chip) - 1
- * @pagebuf: [INTERN] holds the pagenumber which is currently in
- * data_buf.
- * @pagebuf_bitflips: [INTERN] holds the bitflip count for the page which is
- * currently in data_buf.
- * @subpagesize: [INTERN] holds the subpagesize
- * @onfi_version: [INTERN] holds the chip ONFI version (BCD encoded),
- * non 0 if ONFI supported.
- * @onfi_params: [INTERN] holds the ONFI page parameter when ONFI is
- * supported, 0 otherwise.
- * @onfi_set_features: [REPLACEABLE] set the features for ONFI nand
- * @onfi_get_features: [REPLACEABLE] get the features for ONFI nand
- * @ecclayout: [REPLACEABLE] the default ECC placement scheme
- * @bbt: [INTERN] bad block table pointer
- * @bbt_td: [REPLACEABLE] bad block table descriptor for flash
- * lookup.
- * @bbt_md: [REPLACEABLE] bad block table mirror descriptor
- * @badblock_pattern: [REPLACEABLE] bad block scan pattern used for initial
- * bad block scan.
- * @controller: [REPLACEABLE] a pointer to a hardware controller
- * structure which is shared among multiple independent
- * devices.
- * @priv: [OPTIONAL] pointer to private chip data
- * @write_page: [REPLACEABLE] High-level page write function
- */
-
-struct nand_chip {
- int (*scan_bbt)(struct nand_chip *chip);
- int (*write_page)(struct nand_chip *chip,
- uint32_t offset, int data_len, const uint8_t *buf,
- int oob_required, int page, int cached, int raw);
-
- struct nand_legacy legacy;
-
- unsigned int options;
- unsigned int bbt_options;
-
- int page_shift;
- int phys_erase_shift;
- int bbt_erase_shift;
- int chip_shift;
- int numchips;
- uint64_t chipsize;
- int pagemask;
- int pagebuf;
- unsigned int pagebuf_bitflips;
- int subpagesize;
- uint8_t bits_per_cell;
- int badblockpos;
- int badblockbits;
-
- int onfi_version;
- struct nand_onfi_params onfi_params;
-
- flstate_t state;
-
- uint8_t *oob_poi;
- struct nand_hw_control *controller;
- struct nand_ecclayout *ecclayout;
-
- struct nand_ecc_ctrl ecc;
- struct nand_buffers *buffers;
- struct nand_hw_control hwcontrol;
-
- uint8_t *bbt;
- struct nand_bbt_descr *bbt_td;
- struct nand_bbt_descr *bbt_md;
-
- struct nand_bbt_descr *badblock_pattern;
-
- void *priv;
- unsigned int bbt_type;
+ * nanddev_pos_cmp() - Compare two NAND positions
+ * @a: First NAND position
+ * @b: Second NAND position
+ *
+ * Compares two NAND positions.
+ *
+ * Return: -1 if @a < @b, 0 if @a == @b and 1 if @a > @b.
+ */
+static inline int nanddev_pos_cmp(const struct nand_pos *a,
+ const struct nand_pos *b)
+{
+ if (a->target != b->target)
+ return a->target < b->target ? -1 : 1;
- struct mtd_info mtd;
-};
+ if (a->lun != b->lun)
+ return a->lun < b->lun ? -1 : 1;
-/*
- * NAND Flash Manufacturer ID Codes
- */
-#define NAND_MFR_TOSHIBA 0x98
-#define NAND_MFR_SAMSUNG 0xec
-#define NAND_MFR_FUJITSU 0x04
-#define NAND_MFR_NATIONAL 0x8f
-#define NAND_MFR_RENESAS 0x07
-#define NAND_MFR_STMICRO 0x20
-#define NAND_MFR_HYNIX 0xad
-#define NAND_MFR_MICRON 0x2c
-#define NAND_MFR_AMD 0x01
-#define NAND_MFR_MACRONIX 0xc2
-#define NAND_MFR_EON 0x92
-#define NAND_MFR_WINBOND 0xef
-
-/* The maximum expected count of bytes in the NAND ID sequence */
-#define NAND_MAX_ID_LEN 8
+ if (a->eraseblock != b->eraseblock)
+ return a->eraseblock < b->eraseblock ? -1 : 1;
-/*
- * A helper for defining older NAND chips where the second ID byte fully
- * defined the chip, including the geometry (chip size, eraseblock size, page
- * size). All these chips have 512 bytes NAND page size.
+ if (a->page != b->page)
+ return a->page < b->page ? -1 : 1;
+
+ return 0;
+}
+
+/**
+ * nanddev_pos_to_offs() - Convert a NAND position into an absolute offset
+ * @nand: NAND device
+ * @pos: the NAND position to convert
+ *
+ * Converts @pos NAND position into an absolute offset.
+ *
+ * Return: the absolute offset. Note that @pos points to the beginning of a
+ * page, if one wants to point to a specific offset within this page
+ * the returned offset has to be adjusted manually.
*/
-#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
- { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
- .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+static inline loff_t nanddev_pos_to_offs(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ unsigned int npages;
-/*
- * A helper for defining newer chips which report their page size and
- * eraseblock size via the extended ID bytes.
- *
- * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
- * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
- * device ID now only represented a particular total chip size (and voltage,
- * buswidth), and the page size, eraseblock size, and OOB size could vary while
- * using the same device ID.
- */
-#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
- { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
- .options = (opts) }
-
-/**
- * struct nand_flash_dev - NAND Flash Device ID Structure
- * @name: a human-readable name of the NAND chip
- * @dev_id: the device ID (the second byte of the full chip ID array)
- * @mfr_id: manufecturer ID part of the full chip ID array (refers the same
- * memory address as @id[0])
- * @dev_id: device ID part of the full chip ID array (refers the same memory
- * address as @id[1])
- * @id: full device ID array
- * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
- * well as the eraseblock size) is determined from the extended NAND
- * chip ID array)
- * @chipsize: total chip size in MiB
- * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
- * @options: stores various chip bit options
- * @id_len: The valid length of the @id.
- * @oobsize: OOB size
- */
-struct nand_flash_dev {
- char *name;
- union {
- struct {
- uint8_t mfr_id;
- uint8_t dev_id;
- };
- uint8_t id[NAND_MAX_ID_LEN];
- };
- unsigned int pagesize;
- unsigned int chipsize;
- unsigned int erasesize;
- unsigned int options;
- uint16_t id_len;
- uint16_t oobsize;
-};
+ npages = pos->page +
+ ((pos->eraseblock +
+ (pos->lun +
+ (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun) *
+ nand->memorg.pages_per_eraseblock);
+
+ return (loff_t)npages * nand->memorg.pagesize;
+}
/**
- * struct nand_manufacturers - NAND Flash Manufacturer ID Structure
- * @name: Manufacturer name
- * @id: manufacturer ID code of device.
-*/
-struct nand_manufacturers {
- int id;
- char *name;
-};
+ * nanddev_pos_to_row() - Extract a row address from a NAND position
+ * @nand: NAND device
+ * @pos: the position to convert
+ *
+ * Converts a NAND position into a row address that can then be passed to the
+ * device.
+ *
+ * Return: the row address extracted from @pos.
+ */
+static inline unsigned int nanddev_pos_to_row(struct nand_device *nand,
+ const struct nand_pos *pos)
+{
+ return (pos->lun << nand->rowconv.lun_addr_shift) |
+ (pos->eraseblock << nand->rowconv.eraseblock_addr_shift) |
+ pos->page;
+}
-extern struct nand_flash_dev nand_flash_ids[];
-extern struct nand_manufacturers nand_manuf_ids[];
-
-extern int nand_update_bbt(struct nand_chip *chip, loff_t offs);
-extern int nand_default_bbt(struct nand_chip *chip);
-extern int nand_markbad_bbt(struct nand_chip *chip, loff_t offs);
-extern int nand_markgood_bbt(struct nand_chip *chip, loff_t offs);
-extern int nand_isbad_bbt(struct nand_chip *chip, loff_t offs, int allowbbt);
-extern int nand_erase_nand(struct nand_chip *chip, struct erase_info *instr,
- int allowbbt);
-extern int nand_do_read(struct nand_chip *chip, loff_t from, size_t len,
- size_t *retlen, uint8_t *buf);
-extern int add_mtd_nand_device(struct nand_chip *chip, char *devname);
-
-/**
- * struct platform_nand_chip - chip level device structure
- * @nr_chips: max. number of chips to scan for
- * @chip_offset: chip number offset
- * @nr_partitions: number of partitions pointed to by partitions (or zero)
- * @partitions: mtd partition list
- * @chip_delay: R/B delay value in us
- * @options: Option flags, e.g. 16bit buswidth
- * @bbt_options: BBT option flags, e.g. NAND_BBT_USE_FLASH
- * @ecclayout: ECC layout info structure
- * @part_probe_types: NULL-terminated array of probe types
- */
-struct platform_nand_chip {
- int nr_chips;
- int chip_offset;
- int nr_partitions;
- struct mtd_partition *partitions;
- struct nand_ecclayout *ecclayout;
- int chip_delay;
- unsigned int options;
- unsigned int bbt_options;
- const char **part_probe_types;
-};
+/**
+ * nanddev_pos_next_target() - Move a position to the next target/die
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next target/die. Useful when you
+ * want to iterate over all targets/dies of a NAND device.
+ */
+static inline void nanddev_pos_next_target(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+ pos->lun = 0;
+ pos->target++;
+}
-/* Keep gcc happy */
-struct platform_device;
-
-/**
- * struct platform_nand_ctrl - controller level device structure
- * @probe: platform specific function to probe/setup hardware
- * @remove: platform specific function to remove/teardown hardware
- * @hwcontrol: platform specific hardware control structure
- * @dev_ready: platform specific function to read ready/busy pin
- * @select_chip: platform specific chip select function
- * @cmd_ctrl: platform specific function for controlling
- * ALE/CLE/nCE. Also used to write command and address
- * @write_buf: platform specific function for write buffer
- * @read_buf: platform specific function for read buffer
- * @read_byte: platform specific function to read one byte from chip
- * @priv: private data to transport driver specific settings
- *
- * All fields are optional and depend on the hardware driver requirements
- */
-struct platform_nand_ctrl {
- int (*probe)(struct platform_device *pdev);
- void (*remove)(struct platform_device *pdev);
- void (*hwcontrol)(struct mtd_info *mtd, int cmd);
- int (*dev_ready)(struct mtd_info *mtd);
- void (*select_chip)(struct mtd_info *mtd, int chip);
- void (*cmd_ctrl)(struct mtd_info *mtd, int dat, unsigned int ctrl);
- void (*write_buf)(struct mtd_info *mtd, const uint8_t *buf, int len);
- void (*read_buf)(struct mtd_info *mtd, uint8_t *buf, int len);
- unsigned char (*read_byte)(struct mtd_info *mtd);
- void *priv;
-};
+/**
+ * nanddev_pos_next_lun() - Move a position to the next LUN
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next LUN. Useful when you want to
+ * iterate over all LUNs of a NAND device.
+ */
+static inline void nanddev_pos_next_lun(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->lun >= nand->memorg.luns_per_target - 1)
+ return nanddev_pos_next_target(nand, pos);
+
+ pos->lun++;
+ pos->page = 0;
+ pos->plane = 0;
+ pos->eraseblock = 0;
+}
/**
- * struct platform_nand_data - container structure for platform-specific data
- * @chip: chip level chip structure
- * @ctrl: controller level device structure
+ * nanddev_pos_next_eraseblock() - Move a position to the next eraseblock
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next eraseblock. Useful when you
+ * want to iterate over all eraseblocks of a NAND device.
*/
-struct platform_nand_data {
- struct platform_nand_chip chip;
- struct platform_nand_ctrl ctrl;
-};
+static inline void nanddev_pos_next_eraseblock(struct nand_device *nand,
+ struct nand_pos *pos)
+{
+ if (pos->eraseblock >= nand->memorg.eraseblocks_per_lun - 1)
+ return nanddev_pos_next_lun(nand, pos);
+
+ pos->eraseblock++;
+ pos->page = 0;
+ pos->plane = pos->eraseblock % nand->memorg.planes_per_lun;
+}
-/* Some helpers to access the data structures */
-static inline
-struct platform_nand_chip *get_platform_nandchip(struct mtd_info *mtd)
+/**
+ * nanddev_pos_next_page() - Move a position to the next page
+ * @nand: NAND device
+ * @pos: the position to update
+ *
+ * Updates @pos to point to the start of the next page. Useful when you want to
+ * iterate over all pages of a NAND device.
+ */
+static inline void nanddev_pos_next_page(struct nand_device *nand,
+ struct nand_pos *pos)
{
- struct nand_chip *chip = mtd->priv;
+ if (pos->page >= nand->memorg.pages_per_eraseblock - 1)
+ return nanddev_pos_next_eraseblock(nand, pos);
- return chip->priv;
+ pos->page++;
}
-/* return the supported asynchronous timing mode. */
-static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+/**
+ * nand_io_iter_init - Initialize a NAND I/O iterator
+ * @nand: NAND device
+ * @offs: absolute offset
+ * @req: MTD request
+ * @iter: NAND I/O iterator
+ *
+ * Initializes a NAND iterator based on the information passed by the MTD
+ * layer.
+ */
+static inline void nanddev_io_iter_init(struct nand_device *nand,
+ loff_t offs, struct mtd_oob_ops *req,
+ struct nand_io_iter *iter)
{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.async_timing_mode);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+
+ iter->req.mode = req->mode;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, offs, &iter->req.pos);
+ iter->req.ooboffs = req->ooboffs;
+ iter->oobbytes_per_page = mtd_oobavail(mtd, req);
+ iter->dataleft = req->len;
+ iter->oobleft = req->ooblen;
+ iter->req.databuf.in = req->datbuf;
+ iter->req.datalen = min_t(unsigned int,
+ nand->memorg.pagesize - iter->req.dataoffs,
+ iter->dataleft);
+ iter->req.oobbuf.in = req->oobbuf;
+ iter->req.ooblen = min_t(unsigned int,
+ iter->oobbytes_per_page - iter->req.ooboffs,
+ iter->oobleft);
}
-/* return the supported synchronous timing mode. */
-static inline int onfi_get_sync_timing_mode(struct nand_chip *chip)
+/**
+ * nand_io_iter_next_page - Move to the next page
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Updates the @iter to point to the next page.
+ */
+static inline void nanddev_io_iter_next_page(struct nand_device *nand,
+ struct nand_io_iter *iter)
{
- if (!chip->onfi_version)
- return ONFI_TIMING_MODE_UNKNOWN;
- return le16_to_cpu(chip->onfi_params.src_sync_timing_mode);
+ nanddev_pos_next_page(nand, &iter->req.pos);
+ iter->dataleft -= iter->req.datalen;
+ iter->req.databuf.in += iter->req.datalen;
+ iter->oobleft -= iter->req.ooblen;
+ iter->req.oobbuf.in += iter->req.ooblen;
+ iter->req.dataoffs = 0;
+ iter->req.ooboffs = 0;
+ iter->req.datalen = min_t(unsigned int, nand->memorg.pagesize,
+ iter->dataleft);
+ iter->req.ooblen = min_t(unsigned int, iter->oobbytes_per_page,
+ iter->oobleft);
}
-/*
- * Check if it is a SLC nand.
- * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
- * We do not distinguish the MLC and TLC now.
+/**
+ * nand_io_iter_end - Should end iteration or not
+ * @nand: NAND device
+ * @iter: NAND I/O iterator
+ *
+ * Check whether @iter has reached the end of the NAND portion it was asked to
+ * iterate on or not.
+ *
+ * Return: true if @iter has reached the end of the iteration request, false
+ * otherwise.
*/
-static inline bool nand_is_slc(struct nand_chip *chip)
+static inline bool nanddev_io_iter_end(struct nand_device *nand,
+ const struct nand_io_iter *iter)
{
- return chip->bits_per_cell == 1;
+ if (iter->dataleft || iter->oobleft)
+ return false;
+
+ return true;
}
/**
- * struct nand_sdr_timings - SDR NAND chip timings
- *
- * This struct defines the timing requirements of a SDR NAND chip.
- * These informations can be found in every NAND datasheets and the timings
- * meaning are described in the ONFI specifications:
- * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
- * Parameters)
- *
- * All these timings are expressed in picoseconds.
- */
-
-struct nand_sdr_timings {
- u32 tALH_min;
- u32 tADL_min;
- u32 tALS_min;
- u32 tAR_min;
- u32 tCEA_max;
- u32 tCEH_min;
- u32 tCH_min;
- u32 tCHZ_max;
- u32 tCLH_min;
- u32 tCLR_min;
- u32 tCLS_min;
- u32 tCOH_min;
- u32 tCS_min;
- u32 tDH_min;
- u32 tDS_min;
- u32 tFEAT_max;
- u32 tIR_min;
- u32 tITC_max;
- u32 tRC_min;
- u32 tREA_max;
- u32 tREH_min;
- u32 tRHOH_min;
- u32 tRHW_min;
- u32 tRHZ_max;
- u32 tRLOH_min;
- u32 tRP_min;
- u32 tRR_min;
- u64 tRST_max;
- u32 tWB_max;
- u32 tWC_min;
- u32 tWH_min;
- u32 tWHR_min;
- u32 tWP_min;
- u32 tWW_min;
+ * nand_io_for_each_page - Iterate over all NAND pages contained in an MTD I/O
+ * request
+ * @nand: NAND device
+ * @start: start address to read/write from
+ * @req: MTD I/O request
+ * @iter: NAND I/O iterator
+ *
+ * Should be used for iterate over pages that are contained in an MTD request.
+ */
+#define nanddev_io_for_each_page(nand, start, req, iter) \
+ for (nanddev_io_iter_init(nand, start, req, iter); \
+ !nanddev_io_iter_end(nand, iter); \
+ nanddev_io_iter_next_page(nand, iter))
+
+bool nanddev_isbad(struct nand_device *nand, const struct nand_pos *pos);
+bool nanddev_isreserved(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_erase(struct nand_device *nand, const struct nand_pos *pos);
+int nanddev_markbad(struct nand_device *nand, const struct nand_pos *pos);
+
+/* BBT related functions */
+enum nand_bbt_block_status {
+ NAND_BBT_BLOCK_STATUS_UNKNOWN,
+ NAND_BBT_BLOCK_GOOD,
+ NAND_BBT_BLOCK_WORN,
+ NAND_BBT_BLOCK_RESERVED,
+ NAND_BBT_BLOCK_FACTORY_BAD,
+ NAND_BBT_BLOCK_NUM_STATUS,
};
-/* get timing characteristics from ONFI timing mode. */
-const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+int nanddev_bbt_init(struct nand_device *nand);
+void nanddev_bbt_cleanup(struct nand_device *nand);
+int nanddev_bbt_update(struct nand_device *nand);
+int nanddev_bbt_get_block_status(const struct nand_device *nand,
+ unsigned int entry);
+int nanddev_bbt_set_block_status(struct nand_device *nand, unsigned int entry,
+ enum nand_bbt_block_status status);
+int nanddev_bbt_markbad(struct nand_device *nand, unsigned int block);
-static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
+/**
+ * nanddev_bbt_pos_to_entry() - Convert a NAND position into a BBT entry
+ * @nand: NAND device
+ * @pos: the NAND position we want to get BBT entry for
+ *
+ * Return the BBT entry used to store information about the eraseblock pointed
+ * by @pos.
+ *
+ * Return: the BBT entry storing information about eraseblock pointed by @pos.
+ */
+static inline unsigned int nanddev_bbt_pos_to_entry(struct nand_device *nand,
+ const struct nand_pos *pos)
{
- return container_of(mtd, struct nand_chip, mtd);
+ return pos->eraseblock +
+ ((pos->lun + (pos->target * nand->memorg.luns_per_target)) *
+ nand->memorg.eraseblocks_per_lun);
}
-static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
+/**
+ * nanddev_bbt_is_initialized() - Check if the BBT has been initialized
+ * @nand: NAND device
+ *
+ * Return: true if the BBT has been initialized, false otherwise.
+ */
+static inline bool nanddev_bbt_is_initialized(struct nand_device *nand)
{
- return &chip->mtd;
+ return !!nand->bbt.cache;
}
+/* MTD -> NAND helper functions. */
+int nanddev_mtd_erase(struct mtd_info *mtd, struct erase_info *einfo);
+int nanddev_mtd_max_bad_blocks(struct mtd_info *mtd, loff_t offs, size_t len);
+int nand_check_erased_buf(void *buf, int len, int bitflips_threshold);
+
#endif /* __LINUX_MTD_NAND_H */
diff --git a/include/linux/mtd/nand_bch.h b/include/linux/mtd/nand_bch.h
index eee80a558c..d5956cc48b 100644
--- a/include/linux/mtd/nand_bch.h
+++ b/include/linux/mtd/nand_bch.h
@@ -1,20 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
* Copyright © 2011 Ivan Djelic <ivan.djelic at parrot.com>
*
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
* This file is the header for the NAND BCH ECC implementation.
*/
#ifndef __MTD_NAND_BCH_H__
#define __MTD_NAND_BCH_H__
+struct mtd_info;
struct nand_chip;
struct nand_bch_control;
-#if defined(CONFIG_NAND_ECC_BCH)
+#if IS_ENABLED(CONFIG_MTD_NAND_ECC_SW_BCH)
static inline int mtd_nand_has_bch(void) { return 1; }
@@ -27,18 +25,18 @@ int nand_bch_calculate_ecc(struct nand_chip *chip, const u_char *dat,
/*
* Detect and correct bit errors
*/
-int nand_bch_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
- u_char *calc_ecc);
+int nand_bch_correct_data(struct nand_chip *chip, u_char *dat,
+ u_char *read_ecc, u_char *calc_ecc);
/*
* Initialize BCH encoder/decoder
*/
-struct nand_bch_control *nand_bch_init(struct nand_chip *chip);
+struct nand_bch_control *nand_bch_init(struct mtd_info *mtd);
/*
* Release BCH encoder/decoder resources
*/
void nand_bch_free(struct nand_bch_control *nbc);
-#else /* !CONFIG_MTD_NAND_ECC_BCH */
+#else /* !CONFIG_MTD_NAND_ECC_SW_BCH */
static inline int mtd_nand_has_bch(void) { return 0; }
@@ -53,16 +51,16 @@ static inline int
nand_bch_correct_data(struct nand_chip *chip, unsigned char *buf,
unsigned char *read_ecc, unsigned char *calc_ecc)
{
- return -1;
+ return -ENOTSUPP;
}
-static inline struct nand_bch_control *nand_bch_init(struct nand_chip *chip)
+static inline struct nand_bch_control *nand_bch_init(struct mtd_info *mtd)
{
return NULL;
}
static inline void nand_bch_free(struct nand_bch_control *nbc) {}
-#endif /* CONFIG_MTD_NAND_ECC_BCH */
+#endif /* CONFIG_MTD_NAND_ECC_SW_BCH */
#endif /* __MTD_NAND_BCH_H__ */
diff --git a/include/linux/mtd/nand_ecc.h b/include/linux/mtd/nand_ecc.h
index 1dc779581f..d423916b94 100644
--- a/include/linux/mtd/nand_ecc.h
+++ b/include/linux/mtd/nand_ecc.h
@@ -1,13 +1,8 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
/*
- * drivers/mtd/nand_ecc.h
- *
- * Copyright (C) 2000 Steven J. Hill (sjhill at realitydiluted.com)
- *
- * $Id: nand_ecc.h,v 1.4 2004/06/17 02:35:02 dbrown Exp $
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
+ * Copyright (C) 2000-2010 Steven J. Hill <sjhill at realitydiluted.com>
+ * David Woodhouse <dwmw2 at infradead.org>
+ * Thomas Gleixner <tglx at linutronix.de>
*
* This file is the header for the ECC algorithm.
*/
@@ -18,13 +13,27 @@
struct nand_chip;
/*
- * Calculate 3 byte ECC code for 256 byte block
+ * Calculate 3 byte ECC code for eccsize byte block
+ */
+void __nand_calculate_ecc(const u_char *dat, unsigned int eccsize,
+ u_char *ecc_code, bool sm_order);
+
+/*
+ * Calculate 3 byte ECC code for 256/512 byte block
+ */
+int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat,
+ u_char *ecc_code);
+
+/*
+ * Detect and correct a 1 bit error for eccsize byte block
*/
-int nand_calculate_ecc(struct nand_chip *chip, const u_char *dat, u_char *ecc_code);
+int __nand_correct_data(u_char *dat, u_char *read_ecc, u_char *calc_ecc,
+ unsigned int eccsize, bool sm_order);
/*
- * Detect and correct a 1 bit error for 256 byte block
+ * Detect and correct a 1 bit error for 256/512 byte block
*/
-int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc, u_char *calc_ecc);
+int nand_correct_data(struct nand_chip *chip, u_char *dat, u_char *read_ecc,
+ u_char *calc_ecc);
#endif /* __MTD_NAND_ECC_H__ */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
new file mode 100644
index 0000000000..339ac79856
--- /dev/null
+++ b/include/linux/mtd/onfi.h
@@ -0,0 +1,178 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2 at infradead.org>
+ * Steven J. Hill <sjhill at realitydiluted.com>
+ * Thomas Gleixner <tglx at linutronix.de>
+ *
+ * Contains all ONFI related definitions
+ */
+
+#ifndef __LINUX_MTD_ONFI_H
+#define __LINUX_MTD_ONFI_H
+
+#include <linux/types.h>
+
+/* ONFI version bits */
+#define ONFI_VERSION_1_0 BIT(1)
+#define ONFI_VERSION_2_0 BIT(2)
+#define ONFI_VERSION_2_1 BIT(3)
+#define ONFI_VERSION_2_2 BIT(4)
+#define ONFI_VERSION_2_3 BIT(5)
+#define ONFI_VERSION_3_0 BIT(6)
+#define ONFI_VERSION_3_1 BIT(7)
+#define ONFI_VERSION_3_2 BIT(8)
+#define ONFI_VERSION_4_0 BIT(9)
+
+/* ONFI features */
+#define ONFI_FEATURE_16_BIT_BUS (1 << 0)
+#define ONFI_FEATURE_EXT_PARAM_PAGE (1 << 7)
+
+/* ONFI timing mode, used in both asynchronous and synchronous mode */
+#define ONFI_TIMING_MODE_0 (1 << 0)
+#define ONFI_TIMING_MODE_1 (1 << 1)
+#define ONFI_TIMING_MODE_2 (1 << 2)
+#define ONFI_TIMING_MODE_3 (1 << 3)
+#define ONFI_TIMING_MODE_4 (1 << 4)
+#define ONFI_TIMING_MODE_5 (1 << 5)
+#define ONFI_TIMING_MODE_UNKNOWN (1 << 6)
+
+/* ONFI feature number/address */
+#define ONFI_FEATURE_NUMBER 256
+#define ONFI_FEATURE_ADDR_TIMING_MODE 0x1
+
+/* Vendor-specific feature address (Micron) */
+#define ONFI_FEATURE_ADDR_READ_RETRY 0x89
+#define ONFI_FEATURE_ON_DIE_ECC 0x90
+#define ONFI_FEATURE_ON_DIE_ECC_EN BIT(3)
+
+/* ONFI subfeature parameters length */
+#define ONFI_SUBFEATURE_PARAM_LEN 4
+
+/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_SET_GET_FEATURES (1 << 2)
+
+struct nand_onfi_params {
+ /* rev info and features block */
+ /* 'O' 'N' 'F' 'I' */
+ u8 sig[4];
+ __le16 revision;
+ __le16 features;
+ __le16 opt_cmd;
+ u8 reserved0[2];
+ __le16 ext_param_page_length; /* since ONFI 2.1 */
+ u8 num_of_param_pages; /* since ONFI 2.1 */
+ u8 reserved1[17];
+
+ /* manufacturer information block */
+ char manufacturer[12];
+ char model[20];
+ u8 jedec_id;
+ __le16 date_code;
+ u8 reserved2[13];
+
+ /* memory organization block */
+ __le32 byte_per_page;
+ __le16 spare_bytes_per_page;
+ __le32 data_bytes_per_ppage;
+ __le16 spare_bytes_per_ppage;
+ __le32 pages_per_block;
+ __le32 blocks_per_lun;
+ u8 lun_count;
+ u8 addr_cycles;
+ u8 bits_per_cell;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 guaranteed_good_blocks;
+ __le16 guaranteed_block_endurance;
+ u8 programs_per_page;
+ u8 ppage_attr;
+ u8 ecc_bits;
+ u8 interleaved_bits;
+ u8 interleaved_ops;
+ u8 reserved3[13];
+
+ /* electrical parameter block */
+ u8 io_pin_capacitance_max;
+ __le16 async_timing_mode;
+ __le16 program_cache_timing_mode;
+ __le16 t_prog;
+ __le16 t_bers;
+ __le16 t_r;
+ __le16 t_ccs;
+ __le16 src_sync_timing_mode;
+ u8 src_ssync_features;
+ __le16 clk_pin_capacitance_typ;
+ __le16 io_pin_capacitance_typ;
+ __le16 input_pin_capacitance_typ;
+ u8 input_pin_capacitance_max;
+ u8 driver_strength_support;
+ __le16 t_int_r;
+ __le16 t_adl;
+ u8 reserved4[8];
+
+ /* vendor */
+ __le16 vendor_revision;
+ u8 vendor[88];
+
+ __le16 crc;
+} __packed;
+
+#define ONFI_CRC_BASE 0x4F4E
+
+/* Extended ECC information Block Definition (since ONFI 2.1) */
+struct onfi_ext_ecc_info {
+ u8 ecc_bits;
+ u8 codeword_size;
+ __le16 bb_per_lun;
+ __le16 block_endurance;
+ u8 reserved[2];
+} __packed;
+
+#define ONFI_SECTION_TYPE_0 0 /* Unused section. */
+#define ONFI_SECTION_TYPE_1 1 /* for additional sections. */
+#define ONFI_SECTION_TYPE_2 2 /* for ECC information. */
+struct onfi_ext_section {
+ u8 type;
+ u8 length;
+} __packed;
+
+#define ONFI_EXT_SECTION_MAX 8
+
+/* Extended Parameter Page Definition (since ONFI 2.1) */
+struct onfi_ext_param_page {
+ __le16 crc;
+ u8 sig[4]; /* 'E' 'P' 'P' 'S' */
+ u8 reserved0[10];
+ struct onfi_ext_section sections[ONFI_EXT_SECTION_MAX];
+
+ /*
+ * The actual size of the Extended Parameter Page is in
+ * @ext_param_page_length of nand_onfi_params{}.
+ * The following are the variable length sections.
+ * So we do not add any fields below. Please see the ONFI spec.
+ */
+} __packed;
+
+/**
+ * struct onfi_params - ONFI specific parameters that will be reused
+ * @version: ONFI version (BCD encoded), 0 if ONFI is not supported
+ * @tPROG: Page program time
+ * @tBERS: Block erase time
+ * @tR: Page read time
+ * @tCCS: Change column setup time
+ * @async_timing_mode: Supported asynchronous timing mode
+ * @vendor_revision: Vendor specific revision number
+ * @vendor: Vendor specific data
+ */
+struct onfi_params {
+ int version;
+ u16 tPROG;
+ u16 tBERS;
+ u16 tR;
+ u16 tCCS;
+ u16 async_timing_mode;
+ u16 vendor_revision;
+ u8 vendor[88];
+};
+
+#endif /* __LINUX_MTD_ONFI_H */
diff --git a/include/linux/mtd/partitions.h b/include/linux/mtd/partitions.h
new file mode 100644
index 0000000000..9ea2310bd9
--- /dev/null
+++ b/include/linux/mtd/partitions.h
@@ -0,0 +1,115 @@
+/*
+ * MTD partitioning layer definitions
+ *
+ * (C) 2000 Nicolas Pitre <nico at fluxnic.net>
+ *
+ * This code is GPL
+ */
+
+#ifndef MTD_PARTITIONS_H
+#define MTD_PARTITIONS_H
+
+#include <linux/types.h>
+
+
+/*
+ * Partition definition structure:
+ *
+ * An array of struct partition is passed along with a MTD object to
+ * mtd_device_register() to create them.
+ *
+ * For each partition, these fields are available:
+ * name: string that will be used to label the partition's MTD device.
+ * types: some partitions can be containers using specific format to describe
+ * embedded subpartitions / volumes. E.g. many home routers use "firmware"
+ * partition that contains at least kernel and rootfs. In such case an
+ * extra parser is needed that will detect these dynamic partitions and
+ * report them to the MTD subsystem. If set this property stores an array
+ * of parser names to use when looking for subpartitions.
+ * size: the partition size; if defined as MTDPART_SIZ_FULL, the partition
+ * will extend to the end of the master MTD device.
+ * offset: absolute starting position within the master MTD device; if
+ * defined as MTDPART_OFS_APPEND, the partition will start where the
+ * previous one ended; if MTDPART_OFS_NXTBLK, at the next erase block;
+ * if MTDPART_OFS_RETAIN, consume as much as possible, leaving size
+ * after the end of partition.
+ * mask_flags: contains flags that have to be masked (removed) from the
+ * master MTD flag set for the corresponding MTD partition.
+ * For example, to force a read-only partition, simply adding
+ * MTD_WRITEABLE to the mask_flags will do the trick.
+ * add_flags: contains flags to add to the parent flags
+ *
+ * Note: writeable partitions require their size and offset be
+ * erasesize aligned (e.g. use MTDPART_OFS_NEXTBLK).
+ */
+
+struct mtd_partition {
+ const char *name; /* identifier string */
+ const char *const *types; /* names of parsers to use if any */
+ uint64_t size; /* partition size */
+ uint64_t offset; /* offset within the master MTD space */
+ uint32_t mask_flags; /* master MTD flags to mask out for this partition */
+ uint32_t add_flags; /* flags to add to the partition */
+ struct device_node *of_node;
+};
+
+#define MTDPART_OFS_RETAIN (-3)
+#define MTDPART_OFS_NXTBLK (-2)
+#define MTDPART_OFS_APPEND (-1)
+#define MTDPART_SIZ_FULL (0)
+
+
+struct mtd_info;
+struct device_node;
+
+/**
+ * struct mtd_part_parser_data - used to pass data to MTD partition parsers.
+ * @origin: for RedBoot, start address of MTD device
+ */
+struct mtd_part_parser_data {
+ unsigned long origin;
+};
+
+
+/*
+ * Functions dealing with the various ways of partitioning the space
+ */
+
+struct mtd_part_parser {
+ struct list_head list;
+ struct module *owner;
+ const char *name;
+ const struct of_device_id *of_match_table;
+ int (*parse_fn)(struct mtd_info *, const struct mtd_partition **,
+ struct mtd_part_parser_data *);
+ void (*cleanup)(const struct mtd_partition *pparts, int nr_parts);
+};
+
+/* Container for passing around a set of parsed partitions */
+struct mtd_partitions {
+ const struct mtd_partition *parts;
+ int nr_parts;
+ const struct mtd_part_parser *parser;
+};
+
+extern int __register_mtd_parser(struct mtd_part_parser *parser,
+ struct module *owner);
+#define register_mtd_parser(parser) __register_mtd_parser(parser, THIS_MODULE)
+
+extern void deregister_mtd_parser(struct mtd_part_parser *parser);
+
+/*
+ * module_mtd_part_parser() - Helper macro for MTD partition parsers that don't
+ * do anything special in module init/exit. Each driver may only use this macro
+ * once, and calling it replaces module_init() and module_exit().
+ */
+#define module_mtd_part_parser(__mtd_part_parser) \
+ module_driver(__mtd_part_parser, register_mtd_parser, \
+ deregister_mtd_parser)
+
+struct mtd_info *mtd_add_partition(struct mtd_info *mtd, off_t offset,
+ uint64_t size, unsigned long flags, const char *name);
+int mtd_del_partition(struct mtd_info *part);
+uint64_t mtd_get_device_size(const struct mtd_info *mtd);
+
+#endif
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
new file mode 100644
index 0000000000..1147f235a6
--- /dev/null
+++ b/include/linux/mtd/rawnand.h
@@ -0,0 +1,1464 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright © 2000-2010 David Woodhouse <dwmw2 at infradead.org>
+ * Steven J. Hill <sjhill at realitydiluted.com>
+ * Thomas Gleixner <tglx at linutronix.de>
+ *
+ * Info:
+ * Contains standard defines and IDs for NAND flash devices
+ *
+ * Changelog:
+ * See git changelog.
+ */
+#ifndef __LINUX_MTD_RAWNAND_H
+#define __LINUX_MTD_RAWNAND_H
+
+#include <linux/mtd/mtd.h>
+#include <linux/mtd/flashchip.h>
+#include <linux/mtd/bbm.h>
+#include <linux/mtd/jedec.h>
+#include <linux/mtd/nand.h>
+#include <linux/mtd/onfi.h>
+#include <linux/bitmap.h>
+#include <linux/mutex.h>
+#include <linux/types.h>
+#include <common.h>
+
+struct nand_chip;
+
+/* The maximum number of NAND chips in an array */
+#define NAND_MAX_CHIPS 8
+
+/*
+ * Constants for hardware specific CLE/ALE/NCE function
+ *
+ * These are bits which can be or'ed to set/clear multiple
+ * bits in one go.
+ */
+/* Select the chip by setting nCE to low */
+#define NAND_NCE 0x01
+/* Select the command latch by setting CLE to high */
+#define NAND_CLE 0x02
+/* Select the address latch by setting ALE to high */
+#define NAND_ALE 0x04
+
+#define NAND_CTRL_CLE (NAND_NCE | NAND_CLE)
+#define NAND_CTRL_ALE (NAND_NCE | NAND_ALE)
+#define NAND_CTRL_CHANGE 0x80
+
+/*
+ * Standard NAND flash commands
+ */
+#define NAND_CMD_READ0 0
+#define NAND_CMD_READ1 1
+#define NAND_CMD_RNDOUT 5
+#define NAND_CMD_PAGEPROG 0x10
+#define NAND_CMD_READOOB 0x50
+#define NAND_CMD_ERASE1 0x60
+#define NAND_CMD_STATUS 0x70
+#define NAND_CMD_SEQIN 0x80
+#define NAND_CMD_RNDIN 0x85
+#define NAND_CMD_READID 0x90
+#define NAND_CMD_ERASE2 0xd0
+#define NAND_CMD_PARAM 0xec
+#define NAND_CMD_GET_FEATURES 0xee
+#define NAND_CMD_SET_FEATURES 0xef
+#define NAND_CMD_RESET 0xff
+
+/* Extended commands for large page devices */
+#define NAND_CMD_READSTART 0x30
+#define NAND_CMD_RNDOUTSTART 0xE0
+#define NAND_CMD_CACHEDPROG 0x15
+
+#define NAND_CMD_NONE -1
+
+/* Status bits */
+#define NAND_STATUS_FAIL 0x01
+#define NAND_STATUS_FAIL_N1 0x02
+#define NAND_STATUS_TRUE_READY 0x20
+#define NAND_STATUS_READY 0x40
+#define NAND_STATUS_WP 0x80
+
+#define NAND_DATA_IFACE_CHECK_ONLY -1
+
+/*
+ * Constants for ECC_MODES
+ */
+enum nand_ecc_mode {
+ NAND_ECC_INVALID,
+ NAND_ECC_NONE,
+ NAND_ECC_SOFT,
+ NAND_ECC_HW,
+ NAND_ECC_HW_SYNDROME,
+ NAND_ECC_ON_DIE,
+ NAND_ECC_HW_OOB_FIRST,
+ NAND_ECC_SOFT_BCH
+};
+
+enum nand_ecc_algo {
+ NAND_ECC_UNKNOWN,
+ NAND_ECC_HAMMING,
+ NAND_ECC_BCH,
+ NAND_ECC_RS,
+};
+
+/*
+ * Constants for Hardware ECC
+ */
+/* Reset Hardware ECC for read */
+#define NAND_ECC_READ 0
+/* Reset Hardware ECC for write */
+#define NAND_ECC_WRITE 1
+/* Enable Hardware ECC before syndrome is read back from flash */
+#define NAND_ECC_READSYN 2
+
+/*
+ * Enable generic NAND 'page erased' check. This check is only done when
+ * ecc.correct() returns -EBADMSG.
+ * Set this flag if your implementation does not fix bitflips in erased
+ * pages and you want to rely on the default implementation.
+ */
+#define NAND_ECC_GENERIC_ERASED_CHECK BIT(0)
+#define NAND_ECC_MAXIMIZE BIT(1)
+
+/*
+ * Option constants for bizarre disfunctionality and real
+ * features.
+ */
+
+/* Buswidth is 16 bit */
+#define NAND_BUSWIDTH_16 BIT(1)
+
+/*
+ * When using software implementation of Hamming, we can specify which byte
+ * ordering should be used.
+ */
+#define NAND_ECC_SOFT_HAMMING_SM_ORDER BIT(2)
+
+/* Chip has cache program function */
+#define NAND_CACHEPRG BIT(3)
+/* Options valid for Samsung large page devices */
+#define NAND_SAMSUNG_LP_OPTIONS NAND_CACHEPRG
+
+/*
+ * Chip requires ready check on read (for auto-incremented sequential read).
+ * True only for small page devices; large page devices do not support
+ * autoincrement.
+ */
+#define NAND_NEED_READRDY BIT(8)
+
+/* Chip does not allow subpage writes */
+#define NAND_NO_SUBPAGE_WRITE BIT(9)
+
+/* Device is one of 'new' xD cards that expose fake nand command set */
+#define NAND_BROKEN_XD BIT(10)
+
+/* Device behaves just like nand, but is readonly */
+#define NAND_ROM BIT(11)
+
+/* Device supports subpage reads */
+#define NAND_SUBPAGE_READ BIT(12)
+/* Macros to identify the above */
+#define NAND_HAS_SUBPAGE_READ(chip) ((chip->options & NAND_SUBPAGE_READ))
+
+/*
+ * Some MLC NANDs need data scrambling to limit bitflips caused by repeated
+ * patterns.
+ */
+#define NAND_NEED_SCRAMBLING BIT(13)
+
+/* Device needs 3rd row address cycle */
+#define NAND_ROW_ADDR_3 BIT(14)
+
+/* Non chip related options */
+/* This option skips the bbt scan during initialization. */
+#define NAND_SKIP_BBTSCAN BIT(16)
+/* Chip may not exist, so silence any errors in scan */
+#define NAND_SCAN_SILENT_NODEV BIT(18)
+
+/*
+ * Autodetect nand buswidth with readid/onfi.
+ * This suppose the driver will configure the hardware in 8 bits mode
+ * when calling nand_scan_ident, and update its configuration
+ * before calling nand_scan_tail.
+ */
+#define NAND_BUSWIDTH_AUTO BIT(19)
+
+/*
+ * This option could be defined by controller drivers to protect against
+ * kmap'ed, vmalloc'ed highmem buffers being passed from upper layers
+ */
+#define NAND_USES_DMA BIT(20)
+
+/*
+ * In case your controller is implementing ->legacy.cmd_ctrl() and is relying
+ * on the default ->cmdfunc() implementation, you may want to let the core
+ * handle the tCCS delay which is required when a column change (RNDIN or
+ * RNDOUT) is requested.
+ * If your controller already takes care of this delay, you don't need to set
+ * this flag.
+ */
+#define NAND_WAIT_TCCS BIT(21)
+
+/*
+ * Whether the NAND chip is a boot medium. Drivers might use this information
+ * to select ECC algorithms supported by the boot ROM or similar restrictions.
+ */
+#define NAND_IS_BOOT_MEDIUM BIT(22)
+
+/*
+ * Do not try to tweak the timings at runtime. This is needed when the
+ * controller initializes the timings on itself or when it relies on
+ * configuration done by the bootloader.
+ */
+#define NAND_KEEP_TIMINGS BIT(23)
+
+/*
+ * There are different places where the manufacturer stores the factory bad
+ * block markers.
+ *
+ * Position within the block: Each of these pages needs to be checked for a
+ * bad block marking pattern.
+ */
+#define NAND_BBM_FIRSTPAGE BIT(24)
+#define NAND_BBM_SECONDPAGE BIT(25)
+#define NAND_BBM_LASTPAGE BIT(26)
+
+/*
+ * Some controllers with pipelined ECC engines override the BBM marker with
+ * data or ECC bytes, thus making bad block detection through bad block marker
+ * impossible. Let's flag those chips so the core knows it shouldn't check the
+ * BBM and consider all blocks good.
+ */
+#define NAND_NO_BBM_QUIRK BIT(27)
+
+/* Cell info constants */
+#define NAND_CI_CHIPNR_MSK 0x03
+#define NAND_CI_CELLTYPE_MSK 0x0C
+#define NAND_CI_CELLTYPE_SHIFT 2
+
+/* Position within the OOB data of the page */
+#define NAND_BBM_POS_SMALL 5
+#define NAND_BBM_POS_LARGE 0
+
+/**
+ * struct nand_parameters - NAND generic parameters from the parameter page
+ * @model: Model name
+ * @supports_set_get_features: The NAND chip supports setting/getting features
+ * @set_feature_list: Bitmap of features that can be set
+ * @get_feature_list: Bitmap of features that can be get
+ * @onfi: ONFI specific parameters
+ */
+struct nand_parameters {
+ /* Generic parameters */
+ const char *model;
+ bool supports_set_get_features;
+ DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
+ DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
+
+ /* ONFI parameters */
+ struct onfi_params *onfi;
+};
+
+/* The maximum expected count of bytes in the NAND ID sequence */
+#define NAND_MAX_ID_LEN 8
+
+/**
+ * struct nand_id - NAND id structure
+ * @data: buffer containing the id bytes.
+ * @len: ID length.
+ */
+struct nand_id {
+ u8 data[NAND_MAX_ID_LEN];
+ int len;
+};
+
+/**
+ * struct nand_ecc_step_info - ECC step information of ECC engine
+ * @stepsize: data bytes per ECC step
+ * @strengths: array of supported strengths
+ * @nstrengths: number of supported strengths
+ */
+struct nand_ecc_step_info {
+ int stepsize;
+ const int *strengths;
+ int nstrengths;
+};
+
+/**
+ * struct nand_ecc_caps - capability of ECC engine
+ * @stepinfos: array of ECC step information
+ * @nstepinfos: number of ECC step information
+ * @calc_ecc_bytes: driver's hook to calculate ECC bytes per step
+ */
+struct nand_ecc_caps {
+ const struct nand_ecc_step_info *stepinfos;
+ int nstepinfos;
+ int (*calc_ecc_bytes)(int step_size, int strength);
+};
+
+/* a shorthand to generate struct nand_ecc_caps with only one ECC stepsize */
+#define NAND_ECC_CAPS_SINGLE(__name, __calc, __step, ...) \
+static const int __name##_strengths[] = { __VA_ARGS__ }; \
+static const struct nand_ecc_step_info __name##_stepinfo = { \
+ .stepsize = __step, \
+ .strengths = __name##_strengths, \
+ .nstrengths = ARRAY_SIZE(__name##_strengths), \
+}; \
+static const struct nand_ecc_caps __name = { \
+ .stepinfos = &__name##_stepinfo, \
+ .nstepinfos = 1, \
+ .calc_ecc_bytes = __calc, \
+}
+
+/**
+ * struct nand_ecc_ctrl - Control structure for ECC
+ * @mode: ECC mode
+ * @algo: ECC algorithm
+ * @steps: number of ECC steps per page
+ * @size: data bytes per ECC step
+ * @bytes: ECC bytes per step
+ * @strength: max number of correctible bits per ECC step
+ * @total: total number of ECC bytes per page
+ * @prepad: padding information for syndrome based ECC generators
+ * @postpad: padding information for syndrome based ECC generators
+ * @options: ECC specific options (see NAND_ECC_XXX flags defined above)
+ * @priv: pointer to private ECC control data
+ * @calc_buf: buffer for calculated ECC, size is oobsize.
+ * @code_buf: buffer for ECC read from flash, size is oobsize.
+ * @hwctl: function to control hardware ECC generator. Must only
+ * be provided if an hardware ECC is available
+ * @calculate: function for ECC calculation or readback from ECC hardware
+ * @correct: function for ECC correction, matching to ECC generator (sw/hw).
+ * Should return a positive number representing the number of
+ * corrected bitflips, -EBADMSG if the number of bitflips exceed
+ * ECC strength, or any other error code if the error is not
+ * directly related to correction.
+ * If -EBADMSG is returned the input buffers should be left
+ * untouched.
+ * @read_page_raw: function to read a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and always return contiguous in-band and
+ * out-of-band data even if they're not stored
+ * contiguously on the NAND chip (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @write_page_raw: function to write a raw page without ECC. This function
+ * should hide the specific layout used by the ECC
+ * controller and consider the passed data as contiguous
+ * in-band and out-of-band data. ECC controller is
+ * responsible for doing the appropriate transformations
+ * to adapt to its specific layout (e.g.
+ * NAND_ECC_HW_SYNDROME interleaves in-band and
+ * out-of-band data).
+ * @read_page: function to read a page according to the ECC generator
+ * requirements; returns maximum number of bitflips corrected in
+ * any single ECC step, -EIO hw error
+ * @read_subpage: function to read parts of the page covered by ECC;
+ * returns same as read_page()
+ * @write_subpage: function to write parts of the page covered by ECC.
+ * @write_page: function to write a page according to the ECC generator
+ * requirements.
+ * @write_oob_raw: function to write chip OOB data without ECC
+ * @read_oob_raw: function to read chip OOB data without ECC
+ * @read_oob: function to read chip OOB data
+ * @write_oob: function to write chip OOB data
+ */
+struct nand_ecc_ctrl {
+ enum nand_ecc_mode mode;
+ enum nand_ecc_algo algo;
+ int steps;
+ int size;
+ int bytes;
+ int total;
+ int strength;
+ int prepad;
+ int postpad;
+ unsigned int options;
+ void *priv;
+ u8 *calc_buf;
+ u8 *code_buf;
+ void (*hwctl)(struct nand_chip *chip, int mode);
+ int (*calculate)(struct nand_chip *chip, const uint8_t *dat,
+ uint8_t *ecc_code);
+ int (*correct)(struct nand_chip *chip, uint8_t *dat, uint8_t *read_ecc,
+ uint8_t *calc_ecc);
+ int (*read_page_raw)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*write_page_raw)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*read_page)(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+ int (*read_subpage)(struct nand_chip *chip, uint32_t offs,
+ uint32_t len, uint8_t *buf, int page);
+ int (*write_subpage)(struct nand_chip *chip, uint32_t offset,
+ uint32_t data_len, const uint8_t *data_buf,
+ int oob_required, int page);
+ int (*write_page)(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+ int (*write_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob_raw)(struct nand_chip *chip, int page);
+ int (*read_oob)(struct nand_chip *chip, int page);
+ int (*write_oob)(struct nand_chip *chip, int page);
+};
+
+/**
+ * struct nand_sdr_timings - SDR NAND chip timings
+ *
+ * This struct defines the timing requirements of a SDR NAND chip.
+ * These information can be found in every NAND datasheets and the timings
+ * meaning are described in the ONFI specifications:
+ * www.onfi.org/~/media/ONFI/specs/onfi_3_1_spec.pdf (chapter 4.15 Timing
+ * Parameters)
+ *
+ * All these timings are expressed in picoseconds.
+ *
+ * @tBERS_max: Block erase time
+ * @tCCS_min: Change column setup time
+ * @tPROG_max: Page program time
+ * @tR_max: Page read time
+ * @tALH_min: ALE hold time
+ * @tADL_min: ALE to data loading time
+ * @tALS_min: ALE setup time
+ * @tAR_min: ALE to RE# delay
+ * @tCEA_max: CE# access time
+ * @tCEH_min: CE# high hold time
+ * @tCH_min: CE# hold time
+ * @tCHZ_max: CE# high to output hi-Z
+ * @tCLH_min: CLE hold time
+ * @tCLR_min: CLE to RE# delay
+ * @tCLS_min: CLE setup time
+ * @tCOH_min: CE# high to output hold
+ * @tCS_min: CE# setup time
+ * @tDH_min: Data hold time
+ * @tDS_min: Data setup time
+ * @tFEAT_max: Busy time for Set Features and Get Features
+ * @tIR_min: Output hi-Z to RE# low
+ * @tITC_max: Interface and Timing Mode Change time
+ * @tRC_min: RE# cycle time
+ * @tREA_max: RE# access time
+ * @tREH_min: RE# high hold time
+ * @tRHOH_min: RE# high to output hold
+ * @tRHW_min: RE# high to WE# low
+ * @tRHZ_max: RE# high to output hi-Z
+ * @tRLOH_min: RE# low to output hold
+ * @tRP_min: RE# pulse width
+ * @tRR_min: Ready to RE# low (data only)
+ * @tRST_max: Device reset time, measured from the falling edge of R/B# to the
+ * rising edge of R/B#.
+ * @tWB_max: WE# high to SR[6] low
+ * @tWC_min: WE# cycle time
+ * @tWH_min: WE# high hold time
+ * @tWHR_min: WE# high to RE# low
+ * @tWP_min: WE# pulse width
+ * @tWW_min: WP# transition to WE# low
+ */
+struct nand_sdr_timings {
+ u64 tBERS_max;
+ u32 tCCS_min;
+ u64 tPROG_max;
+ u64 tR_max;
+ u32 tALH_min;
+ u32 tADL_min;
+ u32 tALS_min;
+ u32 tAR_min;
+ u32 tCEA_max;
+ u32 tCEH_min;
+ u32 tCH_min;
+ u32 tCHZ_max;
+ u32 tCLH_min;
+ u32 tCLR_min;
+ u32 tCLS_min;
+ u32 tCOH_min;
+ u32 tCS_min;
+ u32 tDH_min;
+ u32 tDS_min;
+ u32 tFEAT_max;
+ u32 tIR_min;
+ u32 tITC_max;
+ u32 tRC_min;
+ u32 tREA_max;
+ u32 tREH_min;
+ u32 tRHOH_min;
+ u32 tRHW_min;
+ u32 tRHZ_max;
+ u32 tRLOH_min;
+ u32 tRP_min;
+ u32 tRR_min;
+ u64 tRST_max;
+ u32 tWB_max;
+ u32 tWC_min;
+ u32 tWH_min;
+ u32 tWHR_min;
+ u32 tWP_min;
+ u32 tWW_min;
+};
+
+/**
+ * enum nand_interface_type - NAND interface type
+ * @NAND_SDR_IFACE: Single Data Rate interface
+ */
+enum nand_interface_type {
+ NAND_SDR_IFACE,
+};
+
+/**
+ * struct nand_interface_config - NAND interface timing
+ * @type: type of the timing
+ * @timings: The timing information
+ * @timings.mode: Timing mode as defined in the specification
+ * @timings.sdr: Use it when @type is %NAND_SDR_IFACE.
+ */
+struct nand_interface_config {
+ enum nand_interface_type type;
+ struct nand_timings {
+ unsigned int mode;
+ union {
+ struct nand_sdr_timings sdr;
+ };
+ } timings;
+};
+
+/**
+ * nand_get_sdr_timings - get SDR timing from data interface
+ * @conf: The data interface
+ */
+static inline const struct nand_sdr_timings *
+nand_get_sdr_timings(const struct nand_interface_config *conf)
+{
+ if (conf->type != NAND_SDR_IFACE)
+ return ERR_PTR(-EINVAL);
+
+ return &conf->timings.sdr;
+}
+
+/**
+ * struct nand_op_cmd_instr - Definition of a command instruction
+ * @opcode: the command to issue in one cycle
+ */
+struct nand_op_cmd_instr {
+ u8 opcode;
+};
+
+/**
+ * struct nand_op_addr_instr - Definition of an address instruction
+ * @naddrs: length of the @addrs array
+ * @addrs: array containing the address cycles to issue
+ */
+struct nand_op_addr_instr {
+ unsigned int naddrs;
+ const u8 *addrs;
+};
+
+/**
+ * struct nand_op_data_instr - Definition of a data instruction
+ * @len: number of data bytes to move
+ * @buf: buffer to fill
+ * @buf.in: buffer to fill when reading from the NAND chip
+ * @buf.out: buffer to read from when writing to the NAND chip
+ * @force_8bit: force 8-bit access
+ *
+ * Please note that "in" and "out" are inverted from the ONFI specification
+ * and are from the controller perspective, so a "in" is a read from the NAND
+ * chip while a "out" is a write to the NAND chip.
+ */
+struct nand_op_data_instr {
+ unsigned int len;
+ union {
+ void *in;
+ const void *out;
+ } buf;
+ bool force_8bit;
+};
+
+/**
+ * struct nand_op_waitrdy_instr - Definition of a wait ready instruction
+ * @timeout_ms: maximum delay while waiting for the ready/busy pin in ms
+ */
+struct nand_op_waitrdy_instr {
+ unsigned int timeout_ms;
+};
+
+/**
+ * enum nand_op_instr_type - Definition of all instruction types
+ * @NAND_OP_CMD_INSTR: command instruction
+ * @NAND_OP_ADDR_INSTR: address instruction
+ * @NAND_OP_DATA_IN_INSTR: data in instruction
+ * @NAND_OP_DATA_OUT_INSTR: data out instruction
+ * @NAND_OP_WAITRDY_INSTR: wait ready instruction
+ */
+enum nand_op_instr_type {
+ NAND_OP_CMD_INSTR,
+ NAND_OP_ADDR_INSTR,
+ NAND_OP_DATA_IN_INSTR,
+ NAND_OP_DATA_OUT_INSTR,
+ NAND_OP_WAITRDY_INSTR,
+};
+
+/**
+ * struct nand_op_instr - Instruction object
+ * @type: the instruction type
+ * @ctx: extra data associated to the instruction. You'll have to use the
+ * appropriate element depending on @type
+ * @ctx.cmd: use it if @type is %NAND_OP_CMD_INSTR
+ * @ctx.addr: use it if @type is %NAND_OP_ADDR_INSTR
+ * @ctx.data: use it if @type is %NAND_OP_DATA_IN_INSTR
+ * or %NAND_OP_DATA_OUT_INSTR
+ * @ctx.waitrdy: use it if @type is %NAND_OP_WAITRDY_INSTR
+ * @delay_ns: delay the controller should apply after the instruction has been
+ * issued on the bus. Most modern controllers have internal timings
+ * control logic, and in this case, the controller driver can ignore
+ * this field.
+ */
+struct nand_op_instr {
+ enum nand_op_instr_type type;
+ union {
+ struct nand_op_cmd_instr cmd;
+ struct nand_op_addr_instr addr;
+ struct nand_op_data_instr data;
+ struct nand_op_waitrdy_instr waitrdy;
+ } ctx;
+ unsigned int delay_ns;
+};
+
+/*
+ * Special handling must be done for the WAITRDY timeout parameter as it usually
+ * is either tPROG (after a prog), tR (before a read), tRST (during a reset) or
+ * tBERS (during an erase) which all of them are u64 values that cannot be
+ * divided by usual kernel macros and must be handled with the special
+ * DIV_ROUND_UP_ULL() macro.
+ *
+ * Cast to type of dividend is needed here to guarantee that the result won't
+ * be an unsigned long long when the dividend is an unsigned long (or smaller),
+ * which is what the compiler does when it sees ternary operator with 2
+ * different return types (picks the largest type to make sure there's no
+ * loss).
+ */
+#define __DIVIDE(dividend, divisor) ({ \
+ (__typeof__(dividend))(sizeof(dividend) <= sizeof(unsigned long) ? \
+ DIV_ROUND_UP(dividend, divisor) : \
+ DIV_ROUND_UP_ULL(dividend, divisor)); \
+ })
+#define PSEC_TO_NSEC(x) __DIVIDE(x, 1000)
+#define PSEC_TO_MSEC(x) __DIVIDE(x, 1000000000)
+
+#define NAND_OP_CMD(id, ns) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .ctx.cmd.opcode = id, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_ADDR(ncycles, cycles, ns) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .ctx.addr = { \
+ .naddrs = ncycles, \
+ .addrs = cycles, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = false, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_IN(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.in = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_8BIT_DATA_OUT(l, b, ns) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .ctx.data = { \
+ .len = l, \
+ .buf.out = b, \
+ .force_8bit = true, \
+ }, \
+ .delay_ns = ns, \
+ }
+
+#define NAND_OP_WAIT_RDY(tout_ms, ns) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .ctx.waitrdy.timeout_ms = tout_ms, \
+ .delay_ns = ns, \
+ }
+
+/**
+ * struct nand_subop - a sub operation
+ * @cs: the CS line to select for this NAND sub-operation
+ * @instrs: array of instructions
+ * @ninstrs: length of the @instrs array
+ * @first_instr_start_off: offset to start from for the first instruction
+ * of the sub-operation
+ * @last_instr_end_off: offset to end at (excluded) for the last instruction
+ * of the sub-operation
+ *
+ * Both @first_instr_start_off and @last_instr_end_off only apply to data or
+ * address instructions.
+ *
+ * When an operation cannot be handled as is by the NAND controller, it will
+ * be split by the parser into sub-operations which will be passed to the
+ * controller driver.
+ */
+struct nand_subop {
+ unsigned int cs;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+ unsigned int first_instr_start_off;
+ unsigned int last_instr_end_off;
+};
+
+unsigned int nand_subop_get_addr_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_num_addr_cyc(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_start_off(const struct nand_subop *subop,
+ unsigned int op_id);
+unsigned int nand_subop_get_data_len(const struct nand_subop *subop,
+ unsigned int op_id);
+
+/**
+ * struct nand_op_parser_addr_constraints - Constraints for address instructions
+ * @maxcycles: maximum number of address cycles the controller can issue in a
+ * single step
+ */
+struct nand_op_parser_addr_constraints {
+ unsigned int maxcycles;
+};
+
+/**
+ * struct nand_op_parser_data_constraints - Constraints for data instructions
+ * @maxlen: maximum data length that the controller can handle in a single step
+ */
+struct nand_op_parser_data_constraints {
+ unsigned int maxlen;
+};
+
+/**
+ * struct nand_op_parser_pattern_elem - One element of a pattern
+ * @type: the instructuction type
+ * @optional: whether this element of the pattern is optional or mandatory
+ * @ctx: address or data constraint
+ * @ctx.addr: address constraint (number of cycles)
+ * @ctx.data: data constraint (data length)
+ */
+struct nand_op_parser_pattern_elem {
+ enum nand_op_instr_type type;
+ bool optional;
+ union {
+ struct nand_op_parser_addr_constraints addr;
+ struct nand_op_parser_data_constraints data;
+ } ctx;
+};
+
+#define NAND_OP_PARSER_PAT_CMD_ELEM(_opt) \
+ { \
+ .type = NAND_OP_CMD_INSTR, \
+ .optional = _opt, \
+ }
+
+#define NAND_OP_PARSER_PAT_ADDR_ELEM(_opt, _maxcycles) \
+ { \
+ .type = NAND_OP_ADDR_INSTR, \
+ .optional = _opt, \
+ .ctx.addr.maxcycles = _maxcycles, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_IN_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_IN_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_DATA_OUT_ELEM(_opt, _maxlen) \
+ { \
+ .type = NAND_OP_DATA_OUT_INSTR, \
+ .optional = _opt, \
+ .ctx.data.maxlen = _maxlen, \
+ }
+
+#define NAND_OP_PARSER_PAT_WAITRDY_ELEM(_opt) \
+ { \
+ .type = NAND_OP_WAITRDY_INSTR, \
+ .optional = _opt, \
+ }
+
+/**
+ * struct nand_op_parser_pattern - NAND sub-operation pattern descriptor
+ * @elems: array of pattern elements
+ * @nelems: number of pattern elements in @elems array
+ * @exec: the function that will issue a sub-operation
+ *
+ * A pattern is a list of elements, each element reprensenting one instruction
+ * with its constraints. The pattern itself is used by the core to match NAND
+ * chip operation with NAND controller operations.
+ * Once a match between a NAND controller operation pattern and a NAND chip
+ * operation (or a sub-set of a NAND operation) is found, the pattern ->exec()
+ * hook is called so that the controller driver can issue the operation on the
+ * bus.
+ *
+ * Controller drivers should declare as many patterns as they support and pass
+ * this list of patterns (created with the help of the following macro) to
+ * the nand_op_parser_exec_op() helper.
+ */
+struct nand_op_parser_pattern {
+ const struct nand_op_parser_pattern_elem *elems;
+ unsigned int nelems;
+ int (*exec)(struct nand_chip *chip, const struct nand_subop *subop);
+};
+
+#define NAND_OP_PARSER_PATTERN(_exec, ...) \
+ { \
+ .exec = _exec, \
+ .elems = (const struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }, \
+ .nelems = sizeof((struct nand_op_parser_pattern_elem[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern_elem), \
+ }
+
+/**
+ * struct nand_op_parser - NAND controller operation parser descriptor
+ * @patterns: array of supported patterns
+ * @npatterns: length of the @patterns array
+ *
+ * The parser descriptor is just an array of supported patterns which will be
+ * iterated by nand_op_parser_exec_op() everytime it tries to execute an
+ * NAND operation (or tries to determine if a specific operation is supported).
+ *
+ * It is worth mentioning that patterns will be tested in their declaration
+ * order, and the first match will be taken, so it's important to order patterns
+ * appropriately so that simple/inefficient patterns are placed at the end of
+ * the list. Usually, this is where you put single instruction patterns.
+ */
+struct nand_op_parser {
+ const struct nand_op_parser_pattern *patterns;
+ unsigned int npatterns;
+};
+
+#define NAND_OP_PARSER(...) \
+ { \
+ .patterns = (const struct nand_op_parser_pattern[]) { __VA_ARGS__ }, \
+ .npatterns = sizeof((struct nand_op_parser_pattern[]) { __VA_ARGS__ }) / \
+ sizeof(struct nand_op_parser_pattern), \
+ }
+
+/**
+ * struct nand_operation - NAND operation descriptor
+ * @cs: the CS line to select for this NAND operation
+ * @instrs: array of instructions to execute
+ * @ninstrs: length of the @instrs array
+ *
+ * The actual operation structure that will be passed to chip->exec_op().
+ */
+struct nand_operation {
+ unsigned int cs;
+ const struct nand_op_instr *instrs;
+ unsigned int ninstrs;
+};
+
+#define NAND_OPERATION(_cs, _instrs) \
+ { \
+ .cs = _cs, \
+ .instrs = _instrs, \
+ .ninstrs = ARRAY_SIZE(_instrs), \
+ }
+
+int nand_op_parser_exec_op(struct nand_chip *chip,
+ const struct nand_op_parser *parser,
+ const struct nand_operation *op, bool check_only);
+
+static inline void nand_op_trace(const char *prefix,
+ const struct nand_op_instr *instr)
+{
+#if IS_ENABLED(CONFIG_DYNAMIC_DEBUG) || defined(DEBUG)
+ switch (instr->type) {
+ case NAND_OP_CMD_INSTR:
+ pr_debug("%sCMD [0x%02x]\n", prefix,
+ instr->ctx.cmd.opcode);
+ break;
+ case NAND_OP_ADDR_INSTR:
+ pr_debug("%sADDR [%d cyc: %*ph]\n", prefix,
+ instr->ctx.addr.naddrs,
+ instr->ctx.addr.naddrs < 64 ?
+ instr->ctx.addr.naddrs : 64,
+ instr->ctx.addr.addrs);
+ break;
+ case NAND_OP_DATA_IN_INSTR:
+ pr_debug("%sDATA_IN [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_DATA_OUT_INSTR:
+ pr_debug("%sDATA_OUT [%d B%s]\n", prefix,
+ instr->ctx.data.len,
+ instr->ctx.data.force_8bit ?
+ ", force 8-bit" : "");
+ break;
+ case NAND_OP_WAITRDY_INSTR:
+ pr_debug("%sWAITRDY [max %d ms]\n", prefix,
+ instr->ctx.waitrdy.timeout_ms);
+ break;
+ }
+#endif
+}
+
+/**
+ * struct nand_controller_ops - Controller operations
+ *
+ * @attach_chip: this method is called after the NAND detection phase after
+ * flash ID and MTD fields such as erase size, page size and OOB
+ * size have been set up. ECC requirements are available if
+ * provided by the NAND chip or device tree. Typically used to
+ * choose the appropriate ECC configuration and allocate
+ * associated resources.
+ * This hook is optional.
+ * @detach_chip: free all resources allocated/claimed in
+ * nand_controller_ops->attach_chip().
+ * This hook is optional.
+ * @exec_op: controller specific method to execute NAND operations.
+ * This method replaces chip->legacy.cmdfunc(),
+ * chip->legacy.{read,write}_{buf,byte,word}(),
+ * chip->legacy.dev_ready() and chip->legacy.waifunc().
+ * @setup_interface: setup the data interface and timing. If chipnr is set to
+ * %NAND_DATA_IFACE_CHECK_ONLY this means the configuration
+ * should not be applied but only checked.
+ * This hook is optional.
+ */
+struct nand_controller_ops {
+ int (*attach_chip)(struct nand_chip *chip);
+ void (*detach_chip)(struct nand_chip *chip);
+ int (*exec_op)(struct nand_chip *chip,
+ const struct nand_operation *op,
+ bool check_only);
+ int (*setup_interface)(struct nand_chip *chip, int chipnr,
+ const struct nand_interface_config *conf);
+};
+
+/**
+ * struct nand_controller - Structure used to describe a NAND controller
+ *
+ * @lock: lock used to serialize accesses to the NAND controller
+ * @ops: NAND controller operations.
+ */
+struct nand_controller {
+ struct mutex lock;
+ const struct nand_controller_ops *ops;
+};
+
+static inline void nand_controller_init(struct nand_controller *nfc)
+{
+ mutex_init(&nfc->lock);
+}
+
+/**
+ * struct nand_legacy - NAND chip legacy fields/hooks
+ * @IO_ADDR_R: address to read the 8 I/O lines of the flash device
+ * @IO_ADDR_W: address to write the 8 I/O lines of the flash device
+ * @select_chip: select/deselect a specific target/die
+ * @read_byte: read one byte from the chip
+ * @write_byte: write a single byte to the chip on the low 8 I/O lines
+ * @write_buf: write data from the buffer to the chip
+ * @read_buf: read data from the chip into the buffer
+ * @cmd_ctrl: hardware specific function for controlling ALE/CLE/nCE. Also used
+ * to write command and address
+ * @cmdfunc: hardware specific function for writing commands to the chip.
+ * @dev_ready: hardware specific function for accessing device ready/busy line.
+ * If set to NULL no access to ready/busy is available and the
+ * ready/busy information is read from the chip status register.
+ * @waitfunc: hardware specific function for wait on ready.
+ * @block_bad: check if a block is bad, using OOB markers
+ * @block_markbad: mark a block bad
+ * @set_features: set the NAND chip features
+ * @get_features: get the NAND chip features
+ * @chip_delay: chip dependent delay for transferring data from array to read
+ * regs (tR).
+ * @dummy_controller: dummy controller implementation for drivers that can
+ * only control a single chip
+ *
+ * If you look at this structure you're already wrong. These fields/hooks are
+ * all deprecated.
+ */
+struct nand_legacy {
+ void __iomem *IO_ADDR_R;
+ void __iomem *IO_ADDR_W;
+ void (*select_chip)(struct nand_chip *chip, int cs);
+ u8 (*read_byte)(struct nand_chip *chip);
+ u16 (*read_word)(struct nand_chip *chip);
+ void (*write_byte)(struct nand_chip *chip, u8 byte);
+ void (*write_buf)(struct nand_chip *chip, const u8 *buf, int len);
+ void (*read_buf)(struct nand_chip *chip, u8 *buf, int len);
+ void (*cmd_ctrl)(struct nand_chip *chip, int dat, unsigned int ctrl);
+ void (*cmdfunc)(struct nand_chip *chip, unsigned command, int column,
+ int page_addr);
+ int (*dev_ready)(struct nand_chip *chip);
+ int (*waitfunc)(struct nand_chip *chip);
+ int (*block_bad)(struct nand_chip *chip, loff_t ofs);
+ int (*block_markbad)(struct nand_chip *chip, loff_t ofs);
+ int (*set_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int (*get_features)(struct nand_chip *chip, int feature_addr,
+ u8 *subfeature_para);
+ int chip_delay;
+ struct nand_controller dummy_controller;
+};
+
+/**
+ * struct nand_chip_ops - NAND chip operations
+ * @suspend: Suspend operation
+ * @resume: Resume operation
+ * @lock_area: Lock operation
+ * @unlock_area: Unlock operation
+ * @setup_read_retry: Set the read-retry mode (mostly needed for MLC NANDs)
+ * @choose_interface_config: Choose the best interface configuration
+ */
+struct nand_chip_ops {
+ int (*suspend)(struct nand_chip *chip);
+ void (*resume)(struct nand_chip *chip);
+ int (*lock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*unlock_area)(struct nand_chip *chip, loff_t ofs, uint64_t len);
+ int (*setup_read_retry)(struct nand_chip *chip, int retry_mode);
+ int (*choose_interface_config)(struct nand_chip *chip,
+ struct nand_interface_config *iface);
+};
+
+/**
+ * struct nand_manufacturer - NAND manufacturer structure
+ * @desc: The manufacturer description
+ * @priv: Private information for the manufacturer driver
+ */
+struct nand_manufacturer {
+ const struct nand_manufacturer_desc *desc;
+ void *priv;
+};
+
+/**
+ * struct nand_chip - NAND Private Flash Chip Data
+ * @base: Inherit from the generic NAND device
+ * @id: Holds NAND ID
+ * @parameters: Holds generic parameters under an easily readable form
+ * @manufacturer: Manufacturer information
+ * @ops: NAND chip operations
+ * @legacy: All legacy fields/hooks. If you develop a new driver, don't even try
+ * to use any of these fields/hooks, and if you're modifying an
+ * existing driver that is using those fields/hooks, you should
+ * consider reworking the driver and avoid using them.
+ * @options: Various chip options. They can partly be set to inform nand_scan
+ * about special functionality. See the defines for further
+ * explanation.
+ * @current_interface_config: The currently used NAND interface configuration
+ * @best_interface_config: The best NAND interface configuration which fits both
+ * the NAND chip and NAND controller constraints. If
+ * unset, the default reset interface configuration must
+ * be used.
+ * @bbt_erase_shift: Number of address bits in a bbt entry
+ * @bbt_options: Bad block table specific options. All options used here must
+ * come from bbm.h. By default, these options will be copied to
+ * the appropriate nand_bbt_descr's.
+ * @badblockpos: Bad block marker position in the oob area
+ * @badblockbits: Minimum number of set bits in a good block's bad block marker
+ * position; i.e., BBM = 11110111b is good when badblockbits = 7
+ * @bbt_td: Bad block table descriptor for flash lookup
+ * @bbt_md: Bad block table mirror descriptor
+ * @badblock_pattern: Bad block scan pattern used for initial bad block scan
+ * @bbt: Bad block table pointer
+ * @page_shift: Number of address bits in a page (column address bits)
+ * @phys_erase_shift: Number of address bits in a physical eraseblock
+ * @chip_shift: Number of address bits in one chip
+ * @pagemask: Page number mask = number of (pages / chip) - 1
+ * @subpagesize: Holds the subpagesize
+ * @data_buf: Buffer for data, size is (page size + oobsize)
+ * @oob_poi: pointer on the OOB area covered by data_buf
+ * @pagecache: Structure containing page cache related fields
+ * @pagecache.bitflips: Number of bitflips of the cached page
+ * @pagecache.page: Page number currently in the cache. -1 means no page is
+ * currently cached
+ * @buf_align: Minimum buffer alignment required by a platform
+ * @lock: Lock protecting the suspended field. Also used to serialize accesses
+ * to the NAND device
+ * @suspended: Set to 1 when the device is suspended, 0 when it's not
+ * @cur_cs: Currently selected target. -1 means no target selected, otherwise we
+ * should always have cur_cs >= 0 && cur_cs < nanddev_ntargets().
+ * NAND Controller drivers should not modify this value, but they're
+ * allowed to read it.
+ * @read_retries: The number of read retry modes supported
+ * @controller: The hardware controller structure which is shared among multiple
+ * independent devices
+ * @ecc: The ECC controller structure
+ * @priv: Chip private data
+ */
+struct nand_chip {
+ struct nand_device base;
+ struct nand_id id;
+ struct nand_parameters parameters;
+ struct nand_manufacturer manufacturer;
+ struct nand_chip_ops ops;
+ struct nand_legacy legacy;
+ unsigned int options;
+
+ /* Data interface */
+ const struct nand_interface_config *current_interface_config;
+ struct nand_interface_config *best_interface_config;
+
+ /* Bad block information */
+ unsigned int bbt_erase_shift;
+ unsigned int bbt_options;
+ unsigned int badblockpos;
+ unsigned int badblockbits;
+ struct nand_bbt_descr *bbt_td;
+ struct nand_bbt_descr *bbt_md;
+ struct nand_bbt_descr *badblock_pattern;
+ u8 *bbt;
+
+ /* Device internal layout */
+ unsigned int page_shift;
+ unsigned int phys_erase_shift;
+ unsigned int chip_shift;
+ unsigned int pagemask;
+ unsigned int subpagesize;
+
+ /* Buffers */
+ u8 *data_buf;
+ u8 *oob_poi;
+ struct {
+ unsigned int bitflips;
+ int page;
+ } pagecache;
+ unsigned long buf_align;
+
+ /* Internals */
+ struct mutex lock;
+ unsigned int suspended : 1;
+ int cur_cs;
+ int read_retries;
+
+ /* Externals */
+ struct nand_controller *controller;
+ struct nand_ecc_ctrl ecc;
+ void *priv;
+
+ /* barebox specific */
+ unsigned int bbt_type;
+};
+
+extern const struct mtd_ooblayout_ops nand_ooblayout_sp_ops;
+extern const struct mtd_ooblayout_ops nand_ooblayout_lp_ops;
+
+static inline struct nand_chip *mtd_to_nand(struct mtd_info *mtd)
+{
+ return container_of(mtd, struct nand_chip, base.mtd);
+}
+
+static inline struct mtd_info *nand_to_mtd(struct nand_chip *chip)
+{
+ return &chip->base.mtd;
+}
+
+static inline void *nand_get_controller_data(struct nand_chip *chip)
+{
+ return chip->priv;
+}
+
+static inline void nand_set_controller_data(struct nand_chip *chip, void *priv)
+{
+ chip->priv = priv;
+}
+
+static inline void nand_set_manufacturer_data(struct nand_chip *chip,
+ void *priv)
+{
+ chip->manufacturer.priv = priv;
+}
+
+static inline void *nand_get_manufacturer_data(struct nand_chip *chip)
+{
+ return chip->manufacturer.priv;
+}
+
+static inline void nand_set_flash_node(struct nand_chip *chip,
+ struct device_node *np)
+{
+ mtd_set_of_node(nand_to_mtd(chip), np);
+}
+
+static inline struct device_node *nand_get_flash_node(struct nand_chip *chip)
+{
+ return mtd_get_of_node(nand_to_mtd(chip));
+}
+
+/**
+ * nand_get_interface_config - Retrieve the current interface configuration
+ * of a NAND chip
+ * @chip: The NAND chip
+ */
+static inline const struct nand_interface_config *
+nand_get_interface_config(struct nand_chip *chip)
+{
+ return chip->current_interface_config;
+}
+
+/*
+ * A helper for defining older NAND chips where the second ID byte fully
+ * defined the chip, including the geometry (chip size, eraseblock size, page
+ * size). All these chips have 512 bytes NAND page size.
+ */
+#define LEGACY_ID_NAND(nm, devid, chipsz, erasesz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .pagesize = 512, \
+ .chipsize = (chipsz), .erasesize = (erasesz), .options = (opts) }
+
+/*
+ * A helper for defining newer chips which report their page size and
+ * eraseblock size via the extended ID bytes.
+ *
+ * The real difference between LEGACY_ID_NAND and EXTENDED_ID_NAND is that with
+ * EXTENDED_ID_NAND, manufacturers overloaded the same device ID so that the
+ * device ID now only represented a particular total chip size (and voltage,
+ * buswidth), and the page size, eraseblock size, and OOB size could vary while
+ * using the same device ID.
+ */
+#define EXTENDED_ID_NAND(nm, devid, chipsz, opts) \
+ { .name = (nm), {{ .dev_id = (devid) }}, .chipsize = (chipsz), \
+ .options = (opts) }
+
+#define NAND_ECC_INFO(_strength, _step) \
+ { .strength_ds = (_strength), .step_ds = (_step) }
+#define NAND_ECC_STRENGTH(type) ((type)->ecc.strength_ds)
+#define NAND_ECC_STEP(type) ((type)->ecc.step_ds)
+
+/**
+ * struct nand_flash_dev - NAND Flash Device ID Structure
+ * @name: a human-readable name of the NAND chip
+ * @dev_id: the device ID (the second byte of the full chip ID array)
+ * @mfr_id: manufacturer ID part of the full chip ID array (refers the same
+ * memory address as ``id[0]``)
+ * @dev_id: device ID part of the full chip ID array (refers the same memory
+ * address as ``id[1]``)
+ * @id: full device ID array
+ * @pagesize: size of the NAND page in bytes; if 0, then the real page size (as
+ * well as the eraseblock size) is determined from the extended NAND
+ * chip ID array)
+ * @chipsize: total chip size in MiB
+ * @erasesize: eraseblock size in bytes (determined from the extended ID if 0)
+ * @options: stores various chip bit options
+ * @id_len: The valid length of the @id.
+ * @oobsize: OOB size
+ * @ecc: ECC correctability and step information from the datasheet.
+ * @ecc.strength_ds: The ECC correctability from the datasheet, same as the
+ * @ecc_strength_ds in nand_chip{}.
+ * @ecc.step_ds: The ECC step required by the @ecc.strength_ds, same as the
+ * @ecc_step_ds in nand_chip{}, also from the datasheet.
+ * For example, the "4bit ECC for each 512Byte" can be set with
+ * NAND_ECC_INFO(4, 512).
+ */
+struct nand_flash_dev {
+ char *name;
+ union {
+ struct {
+ uint8_t mfr_id;
+ uint8_t dev_id;
+ };
+ uint8_t id[NAND_MAX_ID_LEN];
+ };
+ unsigned int pagesize;
+ unsigned int chipsize;
+ unsigned int erasesize;
+ unsigned int options;
+ uint16_t id_len;
+ uint16_t oobsize;
+ struct {
+ uint16_t strength_ds;
+ uint16_t step_ds;
+ } ecc;
+};
+
+int nand_create_bbt(struct nand_chip *chip);
+
+/*
+ * Check if it is a SLC nand.
+ * The !nand_is_slc() can be used to check the MLC/TLC nand chips.
+ * We do not distinguish the MLC and TLC now.
+ */
+static inline bool nand_is_slc(struct nand_chip *chip)
+{
+ WARN(nanddev_bits_per_cell(&chip->base) == 0,
+ "chip->bits_per_cell is used uninitialized\n");
+ return nanddev_bits_per_cell(&chip->base) == 1;
+}
+
+/**
+ * Check if the opcode's address should be sent only on the lower 8 bits
+ * @command: opcode to check
+ */
+static inline int nand_opcode_8bits(unsigned int command)
+{
+ switch (command) {
+ case NAND_CMD_READID:
+ case NAND_CMD_PARAM:
+ case NAND_CMD_GET_FEATURES:
+ case NAND_CMD_SET_FEATURES:
+ return 1;
+ default:
+ break;
+ }
+ return 0;
+}
+
+int nand_check_erased_ecc_chunk(void *data, int datalen,
+ void *ecc, int ecclen,
+ void *extraoob, int extraooblen,
+ int threshold);
+
+int nand_ecc_choose_conf(struct nand_chip *chip,
+ const struct nand_ecc_caps *caps, int oobavail);
+
+/* Default write_oob implementation */
+int nand_write_oob_std(struct nand_chip *chip, int page);
+
+/* Default read_oob implementation */
+int nand_read_oob_std(struct nand_chip *chip, int page);
+
+/* Stub used by drivers that do not support GET/SET FEATURES operations */
+int nand_get_set_features_notsupp(struct nand_chip *chip, int addr,
+ u8 *subfeature_param);
+
+/* read_page_raw implementations */
+int nand_read_page_raw(struct nand_chip *chip, uint8_t *buf, int oob_required,
+ int page);
+int nand_monolithic_read_page_raw(struct nand_chip *chip, uint8_t *buf,
+ int oob_required, int page);
+
+/* write_page_raw implementations */
+int nand_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+int nand_monolithic_write_page_raw(struct nand_chip *chip, const uint8_t *buf,
+ int oob_required, int page);
+
+/* Reset and initialize a NAND device */
+int nand_reset(struct nand_chip *chip, int chipnr);
+
+/* NAND operation helpers */
+int nand_reset_op(struct nand_chip *chip);
+int nand_readid_op(struct nand_chip *chip, u8 addr, void *buf,
+ unsigned int len);
+int nand_status_op(struct nand_chip *chip, u8 *status);
+int nand_erase_op(struct nand_chip *chip, unsigned int eraseblock);
+int nand_read_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_change_read_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_oob_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, void *buf, unsigned int len);
+int nand_prog_page_begin_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_prog_page_end_op(struct nand_chip *chip);
+int nand_prog_page_op(struct nand_chip *chip, unsigned int page,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len);
+int nand_change_write_column_op(struct nand_chip *chip,
+ unsigned int offset_in_page, const void *buf,
+ unsigned int len, bool force_8bit);
+int nand_read_data_op(struct nand_chip *chip, void *buf, unsigned int len,
+ bool force_8bit, bool check_only);
+int nand_write_data_op(struct nand_chip *chip, const void *buf,
+ unsigned int len, bool force_8bit);
+
+/* Scan and identify a NAND device */
+int nand_scan_with_ids(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *ids);
+
+static inline int nand_scan(struct nand_chip *chip, unsigned int max_chips)
+{
+ return nand_scan_with_ids(chip, max_chips, NULL);
+}
+
+/* Internal helper for board drivers which need to override command function */
+void nand_wait_ready(struct nand_chip *chip);
+
+/*
+ * Free resources held by the NAND device, must be called on error after a
+ * sucessful nand_scan().
+ */
+void nand_cleanup(struct nand_chip *chip);
+
+/*
+ * External helper for controller drivers that have to implement the WAITRDY
+ * instruction and have no physical pin to check it.
+ */
+int nand_soft_waitrdy(struct nand_chip *chip, unsigned long timeout_ms);
+
+/* Select/deselect a NAND target. */
+void nand_select_target(struct nand_chip *chip, unsigned int cs);
+void nand_deselect_target(struct nand_chip *chip);
+
+/* Bitops */
+void nand_extract_bits(u8 *dst, unsigned int dst_off, const u8 *src,
+ unsigned int src_off, unsigned int nbits);
+
+/**
+ * nand_get_data_buf() - Get the internal page buffer
+ * @chip: NAND chip object
+ *
+ * Returns the pre-allocated page buffer after invalidating the cache. This
+ * function should be used by drivers that do not want to allocate their own
+ * bounce buffer and still need such a buffer for specific operations (most
+ * commonly when reading OOB data only).
+ *
+ * Be careful to never call this function in the write/write_oob path, because
+ * the core may have placed the data to be written out in this buffer.
+ *
+ * Return: pointer to the page cache buffer
+ */
+static inline void *nand_get_data_buf(struct nand_chip *chip)
+{
+ chip->pagecache.page = -1;
+
+ return chip->data_buf;
+}
+
+int nand_scan_ident(struct nand_chip *chip, unsigned int max_chips,
+ struct nand_flash_dev *table);
+int nand_scan_tail(struct nand_chip *chip);
+int nand_update_bbt(struct nand_chip *this, loff_t offs);
+int add_mtd_nand_device(struct mtd_info *mtd, char *devname);
+
+/* return the supported asynchronous timing mode. */
+static inline int onfi_get_async_timing_mode(struct nand_chip *chip)
+{
+ if (!chip->parameters.onfi)
+ return ONFI_TIMING_MODE_UNKNOWN;
+ return chip->parameters.onfi->async_timing_mode;
+}
+
+const struct nand_sdr_timings *onfi_async_timing_mode_to_sdr_timings(int mode);
+bool nand_supports_set_features(struct nand_chip *chip, int addr);
+
+#endif /* __LINUX_MTD_RAWNAND_H */
--
2.20.1
More information about the barebox
mailing list