[PATCH v2 3/4] mtd: spinand: Add support continuous read operation
Zhengxun
zhengxunli.mxic at gmail.com
Wed Feb 9 01:10:21 PST 2022
The continuous read operation including: firstly, starting
with the page read command and the 1st page data will be
read into the cache after the read latency tRD. Secondly,
Issuing the Read From Cache commands (03h/0Bh/3Bh/6Bh/BBh/EBh)
to read out the data from cache continuously. After all the
data is read out, the host should pull CS# high to terminate
this continuous read operation and wait tRST for the NAND
device resets read operation.
The continuous read usuage is enable by read mutiple page
(at least larger than 1 page size) and the column address is
don't care in this operation, since the data output for each
page will always start from byte 0 and a full page data should
be read out for each page.
On the other hand, since the continuous read mode can only read
the entire page of data and cannot read the oob data, the dynamic
change mode is added to enable continuous read mode and disable
continuous read mode in spinand_mtd_continuous_read to avoid
writing and erasing operation is abnormal.
The performance of continuous read mode is as follows. Set the
flash to QUAD mode and run 25MZ direct mapping mode on the SPI
bus and use the MTD test module to show the performance of
continuous reads.
=================================================
mtd_speedtest: MTD device: 0 count: 100
mtd_speedtest: MTD device size 268435456, eraseblock size 131072,
page size 2048, count of eraseblocks 2048, pages per
eraseblock 64, OOB size 64
mtd_test: scanning for bad eraseblocks
mtd_test: scanned 100 eraseblocks, 0 are bad
mtd_speedtest: testing eraseblock write speed
mtd_speedtest: eraseblock write speed is 1298 KiB/s
mtd_speedtest: testing eraseblock read speed
mtd_speedtest: eraseblock read speed is 11053 KiB/s
mtd_speedtest: testing page write speed
mtd_speedtest: page write speed is 1291 KiB/s
mtd_speedtest: testing page read speed
mtd_speedtest: page read speed is 3240 KiB/s
mtd_speedtest: testing 2 page write speed
mtd_speedtest: 2 page write speed is 1289 KiB/s
mtd_speedtest: testing 2 page read speed
mtd_speedtest: 2 page read speed is 2909 KiB/s
mtd_speedtest: Testing erase speed
mtd_speedtest: erase speed is 45229 KiB/s
mtd_speedtest: Testing 2x multi-block erase speed
mtd_speedtest: 2x multi-block erase speed is 62135 KiB/s
mtd_speedtest: Testing 4x multi-block erase speed
mtd_speedtest: 4x multi-block erase speed is 60093 KiB/s
mtd_speedtest: Testing 8x multi-block erase speed
mtd_speedtest: 8x multi-block erase speed is 61244 KiB/s
mtd_speedtest: Testing 16x multi-block erase speed
mtd_speedtest: 16x multi-block erase speed is 61538 KiB/s
mtd_speedtest: Testing 32x multi-block erase speed
mtd_speedtest: 32x multi-block erase speed is 61835 KiB/s
mtd_speedtest: Testing 64x multi-block erase speed
mtd_speedtest: 64x multi-block erase speed is 60663 KiB/s
mtd_speedtest: finished
=================================================
Signed-off-by: Zhengxun <zhengxunli.mxic at gmail.com>
---
drivers/mtd/nand/spi/core.c | 97 ++++++++++++++++++++++++++++++++++++-
1 file changed, 96 insertions(+), 1 deletion(-)
diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index 380411feab6c..990836fff924 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -383,7 +383,10 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
u16 column = 0;
ssize_t ret;
- if (req->datalen) {
+ if (spinand->use_continuous_read) {
+ buf = req->databuf.in;
+ nbytes = req->datalen;
+ } else if (req->datalen) {
buf = spinand->databuf;
nbytes = nanddev_page_size(nand);
column = 0;
@@ -412,6 +415,9 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
buf += ret;
}
+ if (spinand->use_continuous_read)
+ return 0;
+
if (req->datalen)
memcpy(req->databuf.in, spinand->databuf + req->dataoffs,
req->datalen);
@@ -640,6 +646,81 @@ static int spinand_write_page(struct spinand_device *spinand,
return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
}
+static int spinand_mtd_continuous_read(struct mtd_info *mtd, loff_t from,
+ struct mtd_oob_ops *ops,
+ struct nand_io_iter *iter)
+{
+ struct spinand_device *spinand = mtd_to_spinand(mtd);
+ struct nand_device *nand = mtd_to_nanddev(mtd);
+ int ret = 0;
+
+ /*
+ * Since the continuous read mode can only read the entire page of data
+ * and cannot read the oob data, therefore, only ECC-Free SPI-NAND support
+ * continuous read mode now.
+ */
+ iter->req.type = NAND_PAGE_READ;
+ iter->req.mode = MTD_OPS_RAW;
+ iter->req.dataoffs = nanddev_offs_to_pos(nand, from, &iter->req.pos);
+ iter->req.databuf.in = ops->datbuf;
+ iter->req.datalen = ops->len;
+
+ if (from & (nanddev_page_size(nand) - 1)) {
+ pr_debug("%s: unaligned address\n", __func__);
+ return -EINVAL;
+ }
+
+ ret = spinand_continuous_read_enable(spinand);
+ if (ret)
+ return ret;
+
+ spinand->use_continuous_read = true;
+
+ ret = spinand_select_target(spinand, iter->req.pos.target);
+ if (ret)
+ return ret;
+
+ /*
+ * The continuous read operation including: firstly, starting with the
+ * page read command and the 1 st page data will be read into the cache
+ * after the read latency tRD. Secondly, Issuing the Read From Cache
+ * commands (03h/0Bh/3Bh/6Bh/BBh/EBh) to read out the data from cache
+ * continuously.
+ *
+ * The cache is divided into two halves, while one half of the cache is
+ * outputting the data, the other half will be loaded for the new data;
+ * therefore, the host can read out the data continuously from page to
+ * page. Multiple of Read From Cache commands can be issued in one
+ * continuous read operation, each Read From Cache command is required
+ * to read multiple 4-byte data exactly; otherwise, the data output will
+ * be out of sequence from one Read From Cache command to another Read
+ * From Cache command.
+ *
+ * After all the data is read out, the host should pull CS# high to
+ * terminate this continuous read operation and wait a 6us of tRST for
+ * the NAND device resets read operation. The data output for each page
+ * will always start from byte 0 and a full page data should be read out
+ * for each page.
+ */
+ ret = spinand_read_page(spinand, &iter->req);
+ if (ret)
+ return ret;
+
+ ret = spinand_reset_op(spinand);
+ if (ret)
+ return ret;
+
+ ret = spinand_continuous_read_disable(spinand);
+ if (ret)
+ return ret;
+
+ spinand->use_continuous_read = false;
+
+ ops->retlen = iter->req.datalen;
+
+ return ret;
+}
+
static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
struct mtd_oob_ops *ops)
{
@@ -656,6 +737,19 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
mutex_lock(&spinand->lock);
+ /*
+ * If the device support continuous read mode and read length larger
+ * than one page size will enter the continuous read mode. This mode
+ * helps avoid issuing a page read command and read from cache command
+ * again, and improves read performance for continuous addresses.
+ */
+ if ((spinand->flags & SPINAND_HAS_CONT_READ_BIT) &&
+ (ops->len > nanddev_page_size(nand))) {
+ ret = spinand_mtd_continuous_read(mtd, from, ops, &iter);
+
+ goto continuous_read_finish;
+ }
+
nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
if (disable_ecc)
iter.req.mode = MTD_OPS_RAW;
@@ -678,6 +772,7 @@ static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
ops->oobretlen += iter.req.ooblen;
}
+continuous_read_finish:
mutex_unlock(&spinand->lock);
if (ecc_failed && !ret)
--
2.17.1
More information about the linux-mtd
mailing list