[openwrt/openwrt] boot: airoha: speed up spinand flash operations using dma
LEDE Commits
lede-commits at lists.infradead.org
Thu Oct 9 07:37:37 PDT 2025
robimarko pushed a commit to openwrt/openwrt.git, branch main:
https://git.openwrt.org/e67ba973d2b43507ed900ec8fdf8187cadbf4918
commit e67ba973d2b43507ed900ec8fdf8187cadbf4918
Author: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
AuthorDate: Sat Oct 4 02:48:43 2025 +0300
boot: airoha: speed up spinand flash operations using dma
This patch series greatly improve flash operation speed in u-boot.
The measurement shows:
With DMA:
=> mtd read.benchmark spi-nand0 $loadaddr 0 0x8000000
Reading 134217728 byte(s) (65536 page(s)) at offset 0x00000000
Read speed: 8131kiB/s
Without DMA:
mtd read.benchmark spi-nand0 $loadaddr 0 0x8000000
Reading 134217728 byte(s) (65536 page(s)) at offset 0x00000000
Read speed: 2062kiB/s
Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
Link: https://github.com/openwrt/openwrt/pull/20295
Signed-off-by: Robert Marko <robimarko at gmail.com>
---
...02-mtd-spinand-Use-the-spi-mem-dirmap-API.patch | 320 +++++++++++++++++
...remove-unnecessary-operation-adjust_op_si.patch | 51 +++
...add-support-of-dual-quad-wires-spi-modes-.patch | 262 ++++++++++++++
.../patches/205-spi-airoha-add-dma-support.patch | 378 +++++++++++++++++++++
...support-of-dualio-quadio-flash-reading-co.patch | 94 +++++
5 files changed, 1105 insertions(+)
diff --git a/package/boot/uboot-airoha/patches/202-mtd-spinand-Use-the-spi-mem-dirmap-API.patch b/package/boot/uboot-airoha/patches/202-mtd-spinand-Use-the-spi-mem-dirmap-API.patch
new file mode 100644
index 0000000000..6e425792b8
--- /dev/null
+++ b/package/boot/uboot-airoha/patches/202-mtd-spinand-Use-the-spi-mem-dirmap-API.patch
@@ -0,0 +1,320 @@
+From f45ae9019afb838979792e4237e344003151fbf7 Mon Sep 17 00:00:00 2001
+From: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Date: Sun, 12 Nov 2023 20:57:52 +0300
+Subject: [PATCH 1/5] mtd: spinand: Use the spi-mem dirmap API
+
+Make use of the spi-mem direct mapping API to let advanced controllers
+optimize read/write operations when they support direct mapping.
+
+Based on a linux commit 981d1aa0697c ("mtd: spinand: Use the spi-mem dirmap API")
+created by Boris Brezillon <bbrezillon at kernel.org> with additional
+fixes taken from Linux 6.10.
+
+Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Reviewed-by: Frieder Schrempf <frieder.schrempf at kontron.de>
+---
+ drivers/mtd/nand/spi/core.c | 185 +++++++++++++++++-------------------
+ include/linux/mtd/spinand.h | 7 ++
+ 2 files changed, 95 insertions(+), 97 deletions(-)
+
+diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
+index f5ddfbf4b83..ea00cd7dcf0 100644
+--- a/drivers/mtd/nand/spi/core.c
++++ b/drivers/mtd/nand/spi/core.c
+@@ -41,21 +41,6 @@ struct spinand_plat {
+ /* SPI NAND index visible in MTD names */
+ static int spi_nand_idx;
+
+-static void spinand_cache_op_adjust_colum(struct spinand_device *spinand,
+- const struct nand_page_io_req *req,
+- u16 *column)
+-{
+- struct nand_device *nand = spinand_to_nand(spinand);
+- unsigned int shift;
+-
+- if (nand->memorg.planes_per_lun < 2)
+- return;
+-
+- /* The plane number is passed in MSB just above the column address */
+- shift = fls(nand->memorg.pagesize);
+- *column |= req->pos.plane << shift;
+-}
+-
+ static int spinand_read_reg_op(struct spinand_device *spinand, u8 reg, u8 *val)
+ {
+ struct spi_mem_op op = SPINAND_GET_FEATURE_OP(reg,
+@@ -249,27 +234,21 @@ static int spinand_load_page_op(struct spinand_device *spinand,
+ static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+ {
+- struct spi_mem_op op = *spinand->op_templates.read_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+- struct nand_page_io_req adjreq = *req;
++ struct spi_mem_dirmap_desc *rdesc;
+ unsigned int nbytes = 0;
+ void *buf = NULL;
+ u16 column = 0;
+- int ret;
++ ssize_t ret;
+
+ if (req->datalen) {
+- adjreq.datalen = nanddev_page_size(nand);
+- adjreq.dataoffs = 0;
+- adjreq.databuf.in = spinand->databuf;
+ buf = spinand->databuf;
+- nbytes = adjreq.datalen;
++ nbytes = nanddev_page_size(nand);
++ column = 0;
+ }
+
+ if (req->ooblen) {
+- adjreq.ooblen = nanddev_per_page_oobsize(nand);
+- adjreq.ooboffs = 0;
+- adjreq.oobbuf.in = spinand->oobbuf;
+ nbytes += nanddev_per_page_oobsize(nand);
+ if (!buf) {
+ buf = spinand->oobbuf;
+@@ -277,28 +256,19 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ }
+ }
+
+- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+- op.addr.val = column;
++ rdesc = spinand->dirmaps[req->pos.plane].rdesc;
+
+- /*
+- * Some controllers are limited in term of max RX data size. In this
+- * case, just repeat the READ_CACHE operation after updating the
+- * column.
+- */
+ while (nbytes) {
+- op.data.buf.in = buf;
+- op.data.nbytes = nbytes;
+- ret = spi_mem_adjust_op_size(spinand->slave, &op);
+- if (ret)
++ ret = spi_mem_dirmap_read(rdesc, column, nbytes, buf);
++ if (ret < 0)
+ return ret;
+
+- ret = spi_mem_exec_op(spinand->slave, &op);
+- if (ret)
+- return ret;
++ if (!ret || ret > nbytes)
++ return -EIO;
+
+- buf += op.data.nbytes;
+- nbytes -= op.data.nbytes;
+- op.addr.val += op.data.nbytes;
++ nbytes -= ret;
++ column += ret;
++ buf += ret;
+ }
+
+ if (req->datalen)
+@@ -322,14 +292,12 @@ static int spinand_read_from_cache_op(struct spinand_device *spinand,
+ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ const struct nand_page_io_req *req)
+ {
+- struct spi_mem_op op = *spinand->op_templates.write_cache;
+ struct nand_device *nand = spinand_to_nand(spinand);
+ struct mtd_info *mtd = nanddev_to_mtd(nand);
+- struct nand_page_io_req adjreq = *req;
+- unsigned int nbytes = 0;
+- void *buf = NULL;
+- u16 column = 0;
+- int ret;
++ struct spi_mem_dirmap_desc *wdesc;
++ unsigned int nbytes, column = 0;
++ void *buf = spinand->databuf;
++ ssize_t ret;
+
+ /*
+ * Looks like PROGRAM LOAD (AKA write cache) does not necessarily reset
+@@ -338,19 +306,12 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ * the data portion of the page, otherwise we might corrupt the BBM or
+ * user data previously programmed in OOB area.
+ */
+- memset(spinand->databuf, 0xff,
+- nanddev_page_size(nand) +
+- nanddev_per_page_oobsize(nand));
++ nbytes = nanddev_page_size(nand) + nanddev_per_page_oobsize(nand);
++ memset(spinand->databuf, 0xff, nbytes);
+
+- if (req->datalen) {
++ if (req->datalen)
+ memcpy(spinand->databuf + req->dataoffs, req->databuf.out,
+ req->datalen);
+- adjreq.dataoffs = 0;
+- adjreq.datalen = nanddev_page_size(nand);
+- adjreq.databuf.out = spinand->databuf;
+- nbytes = adjreq.datalen;
+- buf = spinand->databuf;
+- }
+
+ if (req->ooblen) {
+ if (req->mode == MTD_OPS_AUTO_OOB)
+@@ -361,52 +322,21 @@ static int spinand_write_to_cache_op(struct spinand_device *spinand,
+ else
+ memcpy(spinand->oobbuf + req->ooboffs, req->oobbuf.out,
+ req->ooblen);
+-
+- adjreq.ooblen = nanddev_per_page_oobsize(nand);
+- adjreq.ooboffs = 0;
+- nbytes += nanddev_per_page_oobsize(nand);
+- if (!buf) {
+- buf = spinand->oobbuf;
+- column = nanddev_page_size(nand);
+- }
+ }
+
+- spinand_cache_op_adjust_colum(spinand, &adjreq, &column);
+-
+- op = *spinand->op_templates.write_cache;
+- op.addr.val = column;
++ wdesc = spinand->dirmaps[req->pos.plane].wdesc;
+
+- /*
+- * Some controllers are limited in term of max TX data size. In this
+- * case, split the operation into one LOAD CACHE and one or more
+- * LOAD RANDOM CACHE.
+- */
+ while (nbytes) {
+- op.data.buf.out = buf;
+- op.data.nbytes = nbytes;
+-
+- ret = spi_mem_adjust_op_size(spinand->slave, &op);
+- if (ret)
+- return ret;
+-
+- ret = spi_mem_exec_op(spinand->slave, &op);
+- if (ret)
++ ret = spi_mem_dirmap_write(wdesc, column, nbytes, buf);
++ if (ret < 0)
+ return ret;
+
+- buf += op.data.nbytes;
+- nbytes -= op.data.nbytes;
+- op.addr.val += op.data.nbytes;
++ if (!ret || ret > nbytes)
++ return -EIO;
+
+- /*
+- * We need to use the RANDOM LOAD CACHE operation if there's
+- * more than one iteration, because the LOAD operation resets
+- * the cache to 0xff.
+- */
+- if (nbytes) {
+- column = op.addr.val;
+- op = *spinand->op_templates.update_cache;
+- op.addr.val = column;
+- }
++ nbytes -= ret;
++ column += ret;
++ buf += ret;
+ }
+
+ return 0;
+@@ -819,6 +749,59 @@ static int spinand_mtd_block_isreserved(struct mtd_info *mtd, loff_t offs)
+ return ret;
+ }
+
++static int spinand_create_dirmap(struct spinand_device *spinand,
++ unsigned int plane)
++{
++ struct nand_device *nand = spinand_to_nand(spinand);
++ struct spi_mem_dirmap_info info = {
++ .length = nanddev_page_size(nand) +
++ nanddev_per_page_oobsize(nand),
++ };
++ struct spi_mem_dirmap_desc *desc;
++
++ /* The plane number is passed in MSB just above the column address */
++ info.offset = plane << fls(nand->memorg.pagesize);
++
++ info.op_tmpl = *spinand->op_templates.update_cache;
++ desc = spi_mem_dirmap_create(spinand->slave, &info);
++ if (IS_ERR(desc))
++ return PTR_ERR(desc);
++
++ spinand->dirmaps[plane].wdesc = desc;
++
++ info.op_tmpl = *spinand->op_templates.read_cache;
++ desc = spi_mem_dirmap_create(spinand->slave, &info);
++ if (IS_ERR(desc)) {
++ spi_mem_dirmap_destroy(spinand->dirmaps[plane].wdesc);
++ return PTR_ERR(desc);
++ }
++
++ spinand->dirmaps[plane].rdesc = desc;
++
++ return 0;
++}
++
++static int spinand_create_dirmaps(struct spinand_device *spinand)
++{
++ struct nand_device *nand = spinand_to_nand(spinand);
++ int i, ret;
++
++ spinand->dirmaps = devm_kzalloc(spinand->slave->dev,
++ sizeof(*spinand->dirmaps) *
++ nand->memorg.planes_per_lun,
++ GFP_KERNEL);
++ if (!spinand->dirmaps)
++ return -ENOMEM;
++
++ for (i = 0; i < nand->memorg.planes_per_lun; i++) {
++ ret = spinand_create_dirmap(spinand, i);
++ if (ret)
++ return ret;
++ }
++
++ return 0;
++}
++
+ static const struct nand_ops spinand_ops = {
+ .erase = spinand_erase,
+ .markbad = spinand_markbad,
+@@ -1116,6 +1099,14 @@ static int spinand_init(struct spinand_device *spinand)
+ goto err_free_bufs;
+ }
+
++ ret = spinand_create_dirmaps(spinand);
++ if (ret) {
++ dev_err(spinand->slave->dev,
++ "Failed to create direct mappings for read/write operations (err = %d)\n",
++ ret);
++ goto err_manuf_cleanup;
++ }
++
+ /* After power up, all blocks are locked, so unlock them here. */
+ for (i = 0; i < nand->memorg.ntargets; i++) {
+ ret = spinand_select_target(spinand, i);
+diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
+index 6fe6fd520a4..163269313f6 100644
+--- a/include/linux/mtd/spinand.h
++++ b/include/linux/mtd/spinand.h
+@@ -363,6 +363,11 @@ struct spinand_info {
+ __VA_ARGS__ \
+ }
+
++struct spinand_dirmap {
++ struct spi_mem_dirmap_desc *wdesc;
++ struct spi_mem_dirmap_desc *rdesc;
++};
++
+ /**
+ * struct spinand_device - SPI NAND device instance
+ * @base: NAND device instance
+@@ -406,6 +411,8 @@ struct spinand_device {
+ const struct spi_mem_op *update_cache;
+ } op_templates;
+
++ struct spinand_dirmap *dirmaps;
++
+ int (*select_target)(struct spinand_device *spinand,
+ unsigned int target);
+ unsigned int cur_target;
+--
+2.51.0
+
diff --git a/package/boot/uboot-airoha/patches/203-spi-airoha-remove-unnecessary-operation-adjust_op_si.patch b/package/boot/uboot-airoha/patches/203-spi-airoha-remove-unnecessary-operation-adjust_op_si.patch
new file mode 100644
index 0000000000..639f18fc31
--- /dev/null
+++ b/package/boot/uboot-airoha/patches/203-spi-airoha-remove-unnecessary-operation-adjust_op_si.patch
@@ -0,0 +1,51 @@
+From 1e29cf13c183ee457ed70055f5cbff60ff56a726 Mon Sep 17 00:00:00 2001
+From: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Date: Sat, 7 Jun 2025 07:18:12 +0300
+Subject: [PATCH 2/5] spi: airoha: remove unnecessary operation adjust_op_size
+
+This operation is not needed because airoha_snand_write_data() and
+airoha_snand_read_data() will properly handle data transfers above
+SPI_MAX_TRANSFER_SIZE.
+
+Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+---
+ drivers/spi/airoha_snfi_spi.c | 16 ----------------
+ 1 file changed, 16 deletions(-)
+
+diff --git a/drivers/spi/airoha_snfi_spi.c b/drivers/spi/airoha_snfi_spi.c
+index 3ea25b293d1..4eb01038404 100644
+--- a/drivers/spi/airoha_snfi_spi.c
++++ b/drivers/spi/airoha_snfi_spi.c
+@@ -525,21 +525,6 @@ static int airoha_snand_nfi_config(struct airoha_snand_priv *priv)
+ SPI_NFI_CUS_SEC_SIZE, val);
+ }
+
+-static int airoha_snand_adjust_op_size(struct spi_slave *slave,
+- struct spi_mem_op *op)
+-{
+- size_t max_len;
+-
+- max_len = 1 + op->addr.nbytes + op->dummy.nbytes;
+- if (max_len >= 160)
+- return -EOPNOTSUPP;
+-
+- if (op->data.nbytes > 160 - max_len)
+- op->data.nbytes = 160 - max_len;
+-
+- return 0;
+-}
+-
+ static bool airoha_snand_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+ {
+@@ -691,7 +676,6 @@ static int airoha_snand_nfi_setup(struct spi_slave *slave,
+ }
+
+ static const struct spi_controller_mem_ops airoha_snand_mem_ops = {
+- .adjust_op_size = airoha_snand_adjust_op_size,
+ .supports_op = airoha_snand_supports_op,
+ .exec_op = airoha_snand_exec_op,
+ };
+--
+2.51.0
+
diff --git a/package/boot/uboot-airoha/patches/204-spi-airoha-add-support-of-dual-quad-wires-spi-modes-.patch b/package/boot/uboot-airoha/patches/204-spi-airoha-add-support-of-dual-quad-wires-spi-modes-.patch
new file mode 100644
index 0000000000..055c8b781a
--- /dev/null
+++ b/package/boot/uboot-airoha/patches/204-spi-airoha-add-support-of-dual-quad-wires-spi-modes-.patch
@@ -0,0 +1,262 @@
+From fe8c32af9d8c8ff8875efece82001680fc300ad5 Mon Sep 17 00:00:00 2001
+From: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Date: Sat, 7 Jun 2025 09:09:38 +0300
+Subject: [PATCH 3/5] spi: airoha: add support of dual/quad wires spi modes
+ to exec_op() handler
+
+Booting without this patch and disabled dirmap support results in
+
+[ 2.980719] spi-nand spi0.0: Micron SPI NAND was found.
+[ 2.986040] spi-nand spi0.0: 256 MiB, block size: 128 KiB, page size: 2048, OOB size: 128
+[ 2.994709] 2 fixed-partitions partitions found on MTD device spi0.0
+[ 3.001075] Creating 2 MTD partitions on "spi0.0":
+[ 3.005862] 0x000000000000-0x000000020000 : "bl2"
+[ 3.011272] 0x000000020000-0x000010000000 : "ubi"
+...
+[ 6.195594] ubi0: attaching mtd1
+[ 13.338398] ubi0: scanning is finished
+[ 13.342188] ubi0 error: ubi_read_volume_table: the layout volume was not found
+[ 13.349784] ubi0 error: ubi_attach_mtd_dev: failed to attach mtd1, error -22
+[ 13.356897] UBI error: cannot attach mtd1
+
+If dirmap is disabled or not supported in the spi driver, the dirmap requests
+will be executed via exec_op() handler. Thus, if the hardware supports
+dual/quad spi modes, then corresponding requests will be sent to exec_op()
+handler. Current driver does not support such requests, so error is arrised.
+As result the flash can't be read/write.
+
+This patch adds support of dual and quad wires spi modes to exec_op() handler.
+
+Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+---
+ drivers/spi/airoha_snfi_spi.c | 143 +++++++++++++++++++++++++++-------
+ 1 file changed, 117 insertions(+), 26 deletions(-)
+
+diff --git a/drivers/spi/airoha_snfi_spi.c b/drivers/spi/airoha_snfi_spi.c
+index 4eb01038404..7cd409ba44a 100644
+--- a/drivers/spi/airoha_snfi_spi.c
++++ b/drivers/spi/airoha_snfi_spi.c
+@@ -186,6 +186,14 @@
+ #define SPI_NAND_OP_RESET 0xff
+ #define SPI_NAND_OP_DIE_SELECT 0xc2
+
++/* SNAND FIFO commands */
++#define SNAND_FIFO_TX_BUSWIDTH_SINGLE 0x08
++#define SNAND_FIFO_TX_BUSWIDTH_DUAL 0x09
++#define SNAND_FIFO_TX_BUSWIDTH_QUAD 0x0a
++#define SNAND_FIFO_RX_BUSWIDTH_SINGLE 0x0c
++#define SNAND_FIFO_RX_BUSWIDTH_DUAL 0x0e
++#define SNAND_FIFO_RX_BUSWIDTH_QUAD 0x0f
++
+ #define SPI_NAND_CACHE_SIZE (SZ_4K + SZ_256)
+ #define SPI_MAX_TRANSFER_SIZE 511
+
+@@ -380,10 +388,26 @@ static int airoha_snand_set_mode(struct airoha_snand_priv *priv,
+ return regmap_write(priv->regmap_ctrl, REG_SPI_CTRL_DUMMY, 0);
+ }
+
+-static int airoha_snand_write_data(struct airoha_snand_priv *priv, u8 cmd,
+- const u8 *data, int len)
++static int airoha_snand_write_data(struct airoha_snand_priv *priv,
++ const u8 *data, int len, int buswidth)
+ {
+ int i, data_len;
++ u8 cmd;
++
++ switch (buswidth) {
++ case 0:
++ case 1:
++ cmd = SNAND_FIFO_TX_BUSWIDTH_SINGLE;
++ break;
++ case 2:
++ cmd = SNAND_FIFO_TX_BUSWIDTH_DUAL;
++ break;
++ case 4:
++ cmd = SNAND_FIFO_TX_BUSWIDTH_QUAD;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ for (i = 0; i < len; i += data_len) {
+ int err;
+@@ -402,16 +426,32 @@ static int airoha_snand_write_data(struct airoha_snand_priv *priv, u8 cmd,
+ return 0;
+ }
+
+-static int airoha_snand_read_data(struct airoha_snand_priv *priv, u8 *data,
+- int len)
++static int airoha_snand_read_data(struct airoha_snand_priv *priv,
++ u8 *data, int len, int buswidth)
+ {
+ int i, data_len;
++ u8 cmd;
++
++ switch (buswidth) {
++ case 0:
++ case 1:
++ cmd = SNAND_FIFO_RX_BUSWIDTH_SINGLE;
++ break;
++ case 2:
++ cmd = SNAND_FIFO_RX_BUSWIDTH_DUAL;
++ break;
++ case 4:
++ cmd = SNAND_FIFO_RX_BUSWIDTH_QUAD;
++ break;
++ default:
++ return -EINVAL;
++ }
+
+ for (i = 0; i < len; i += data_len) {
+ int err;
+
+ data_len = min(len - i, SPI_MAX_TRANSFER_SIZE);
+- err = airoha_snand_set_fifo_op(priv, 0xc, data_len);
++ err = airoha_snand_set_fifo_op(priv, cmd, data_len);
+ if (err)
+ return err;
+
+@@ -525,6 +565,38 @@ static int airoha_snand_nfi_config(struct airoha_snand_priv *priv)
+ SPI_NFI_CUS_SEC_SIZE, val);
+ }
+
++static bool airoha_snand_is_page_ops(const struct spi_mem_op *op)
++{
++ if (op->addr.nbytes != 2)
++ return false;
++
++ if (op->addr.buswidth != 1 && op->addr.buswidth != 2 &&
++ op->addr.buswidth != 4)
++ return false;
++
++ switch (op->data.dir) {
++ case SPI_MEM_DATA_IN:
++ if (op->dummy.nbytes * BITS_PER_BYTE / op->dummy.buswidth > 0xf)
++ return false;
++
++ /* quad in / quad out */
++ if (op->addr.buswidth == 4)
++ return op->data.buswidth == 4;
++
++ if (op->addr.buswidth == 2)
++ return op->data.buswidth == 2;
++
++ /* standard spi */
++ return op->data.buswidth == 4 || op->data.buswidth == 2 ||
++ op->data.buswidth == 1;
++ case SPI_MEM_DATA_OUT:
++ return !op->dummy.nbytes && op->addr.buswidth == 1 &&
++ (op->data.buswidth == 4 || op->data.buswidth == 1);
++ default:
++ return false;
++ }
++}
++
+ static bool airoha_snand_supports_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+ {
+@@ -534,6 +606,9 @@ static bool airoha_snand_supports_op(struct spi_slave *slave,
+ if (op->cmd.buswidth != 1)
+ return false;
+
++ if (airoha_snand_is_page_ops(op))
++ return true;
++
+ return (!op->addr.nbytes || op->addr.buswidth == 1) &&
+ (!op->dummy.nbytes || op->dummy.buswidth == 1) &&
+ (!op->data.nbytes || op->data.buswidth == 1);
+@@ -542,13 +617,29 @@ static bool airoha_snand_supports_op(struct spi_slave *slave,
+ static int airoha_snand_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+ {
+- u8 data[8], cmd, opcode = op->cmd.opcode;
+ struct udevice *bus = slave->dev->parent;
+ struct airoha_snand_priv *priv;
++ int op_len, addr_len, dummy_len;
++ u8 buf[20], *data;
+ int i, err;
+
+ priv = dev_get_priv(bus);
+
++ op_len = op->cmd.nbytes;
++ addr_len = op->addr.nbytes;
++ dummy_len = op->dummy.nbytes;
++
++ if (op_len + dummy_len + addr_len > sizeof(buf))
++ return -EIO;
++
++ data = buf;
++ for (i = 0; i < op_len; i++)
++ *data++ = op->cmd.opcode >> (8 * (op_len - i - 1));
++ for (i = 0; i < addr_len; i++)
++ *data++ = op->addr.val >> (8 * (addr_len - i - 1));
++ for (i = 0; i < dummy_len; i++)
++ *data++ = 0xff;
++
+ /* switch to manual mode */
+ err = airoha_snand_set_mode(priv, SPI_MODE_MANUAL);
+ if (err < 0)
+@@ -559,40 +650,40 @@ static int airoha_snand_exec_op(struct spi_slave *slave,
+ return err;
+
+ /* opcode */
+- err = airoha_snand_write_data(priv, 0x8, &opcode, sizeof(opcode));
++ data = buf;
++ err = airoha_snand_write_data(priv, data, op_len,
++ op->cmd.buswidth);
+ if (err)
+ return err;
+
+ /* addr part */
+- cmd = opcode == SPI_NAND_OP_GET_FEATURE ? 0x11 : 0x8;
+- put_unaligned_be64(op->addr.val, data);
+-
+- for (i = ARRAY_SIZE(data) - op->addr.nbytes;
+- i < ARRAY_SIZE(data); i++) {
+- err = airoha_snand_write_data(priv, cmd, &data[i],
+- sizeof(data[0]));
++ data += op_len;
++ if (addr_len) {
++ err = airoha_snand_write_data(priv, data, addr_len,
++ op->addr.buswidth);
+ if (err)
+ return err;
+ }
+
+ /* dummy */
+- data[0] = 0xff;
+- for (i = 0; i < op->dummy.nbytes; i++) {
+- err = airoha_snand_write_data(priv, 0x8, &data[0],
+- sizeof(data[0]));
++ data += addr_len;
++ if (dummy_len) {
++ err = airoha_snand_write_data(priv, data, dummy_len,
++ op->dummy.buswidth);
+ if (err)
+ return err;
+ }
+
+ /* data */
+- if (op->data.dir == SPI_MEM_DATA_IN) {
+- err = airoha_snand_read_data(priv, op->data.buf.in,
+- op->data.nbytes);
+- if (err)
+- return err;
+- } else {
+- err = airoha_snand_write_data(priv, 0x8, op->data.buf.out,
+- op->data.nbytes);
++ if (op->data.nbytes) {
++ if (op->data.dir == SPI_MEM_DATA_IN)
++ err = airoha_snand_read_data(priv, op->data.buf.in,
++ op->data.nbytes,
++ op->data.buswidth);
++ else
++ err = airoha_snand_write_data(priv, op->data.buf.out,
++ op->data.nbytes,
++ op->data.buswidth);
+ if (err)
+ return err;
+ }
+--
+2.51.0
+
diff --git a/package/boot/uboot-airoha/patches/205-spi-airoha-add-dma-support.patch b/package/boot/uboot-airoha/patches/205-spi-airoha-add-dma-support.patch
new file mode 100644
index 0000000000..0a4a56d0e9
--- /dev/null
+++ b/package/boot/uboot-airoha/patches/205-spi-airoha-add-dma-support.patch
@@ -0,0 +1,378 @@
+From f1fe2f174f26eb98af35862caea083439e08a344 Mon Sep 17 00:00:00 2001
+From: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Date: Sun, 8 Jun 2025 05:30:22 +0300
+Subject: [PATCH 4/5] spi: airoha: add dma support
+
+This patch speed up cache reading/writing/updating opearions.
+It was tested on en7523/an7581 and some other Airoha chips.
+
+It will speed up
+ * page reading/writing without oob
+ * page reading/writing with oob
+ * oob reading/writing (significant for UBI scanning)
+
+The only know issue appears in a very specific conditions for en7523 family
+chips only.
+
+Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+---
+ drivers/spi/airoha_snfi_spi.c | 309 ++++++++++++++++++++++++++++++++++
+ 1 file changed, 309 insertions(+)
+
+diff --git a/drivers/spi/airoha_snfi_spi.c b/drivers/spi/airoha_snfi_spi.c
+index 7cd409ba44a..f72d11f5b19 100644
+--- a/drivers/spi/airoha_snfi_spi.c
++++ b/drivers/spi/airoha_snfi_spi.c
+@@ -141,12 +141,14 @@
+ #define SPI_NFI_CUS_SEC_SIZE_EN BIT(16)
+
+ #define REG_SPI_NFI_RD_CTL2 0x0510
++
+ #define REG_SPI_NFI_RD_CTL3 0x0514
+
+ #define REG_SPI_NFI_PG_CTL1 0x0524
+ #define SPI_NFI_PG_LOAD_CMD GENMASK(15, 8)
+
+ #define REG_SPI_NFI_PG_CTL2 0x0528
++
+ #define REG_SPI_NFI_NOR_PROG_ADDR 0x052c
+ #define REG_SPI_NFI_NOR_RD_ADDR 0x0534
+
+@@ -219,6 +221,8 @@ struct airoha_snand_priv {
+ u8 sec_num;
+ u8 spare_size;
+ } nfi_cfg;
++
++ u8 *txrx_buf;
+ };
+
+ static int airoha_snand_set_fifo_op(struct airoha_snand_priv *priv,
+@@ -614,6 +618,302 @@ static bool airoha_snand_supports_op(struct spi_slave *slave,
+ (!op->data.nbytes || op->data.buswidth == 1);
+ }
+
++static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
++{
++ struct spi_slave *slave = desc->slave;
++ struct udevice *bus = slave->dev->parent;
++ struct airoha_snand_priv *priv = dev_get_priv(bus);
++
++ if (!priv->txrx_buf)
++ return -EINVAL;
++
++ if (desc->info.offset + desc->info.length > U32_MAX)
++ return -EINVAL;
++
++ if (!airoha_snand_supports_op(desc->slave, &desc->info.op_tmpl))
++ return -EOPNOTSUPP;
++
++ return 0;
++}
++
++static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
++ u64 offs, size_t len, void *buf)
++{
++ struct spi_mem_op *op = &desc->info.op_tmpl;
++ struct spi_slave *slave = desc->slave;
++ struct udevice *bus = slave->dev->parent;
++ struct airoha_snand_priv *priv = dev_get_priv(bus);
++ u8 *txrx_buf = priv->txrx_buf;
++ dma_addr_t dma_addr;
++ u32 val, rd_mode;
++ int err;
++
++ switch (op->cmd.opcode) {
++ case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
++ rd_mode = 1;
++ break;
++ case SPI_NAND_OP_READ_FROM_CACHE_QUAD:
++ rd_mode = 2;
++ break;
++ default:
++ rd_mode = 0;
++ break;
++ }
++
++ err = airoha_snand_set_mode(priv, SPI_MODE_DMA);
++ if (err < 0)
++ return err;
++
++ err = airoha_snand_nfi_config(priv);
++ if (err)
++ goto error_dma_mode_off;
++
++ dma_addr = dma_map_single(txrx_buf, SPI_NAND_CACHE_SIZE,
++ DMA_FROM_DEVICE);
++
++ /* set dma addr */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_STRADDR,
++ dma_addr);
++ if (err)
++ goto error_dma_unmap;
++
++ /* set cust sec size */
++ val = priv->nfi_cfg.sec_size * priv->nfi_cfg.sec_num;
++ val = FIELD_PREP(SPI_NFI_READ_DATA_BYTE_NUM, val);
++ err = regmap_update_bits(priv->regmap_nfi,
++ REG_SPI_NFI_SNF_MISC_CTL2,
++ SPI_NFI_READ_DATA_BYTE_NUM, val);
++ if (err)
++ goto error_dma_unmap;
++
++ /* set read command */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_RD_CTL2,
++ op->cmd.opcode);
++ if (err)
++ goto error_dma_unmap;
++
++ /* set read mode */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
++ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, rd_mode));
++ if (err)
++ goto error_dma_unmap;
++
++ /* set read addr: zero page offset + descriptor read offset */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_RD_CTL3,
++ desc->info.offset);
++ if (err)
++ goto error_dma_unmap;
++
++ /* set nfi read */
++ err = regmap_update_bits(priv->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_OPMODE,
++ FIELD_PREP(SPI_NFI_OPMODE, 6));
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_set_bits(priv->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_READ_MODE | SPI_NFI_DMA_MODE);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_CMD, 0x0);
++ if (err)
++ goto error_dma_unmap;
++
++ /* trigger dma reading */
++ err = regmap_clear_bits(priv->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_RD_TRIG);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_set_bits(priv->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_RD_TRIG);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_read_poll_timeout(priv->regmap_nfi,
++ REG_SPI_NFI_SNF_STA_CTL1, val,
++ (val & SPI_NFI_READ_FROM_CACHE_DONE),
++ 0, 1 * MSEC_PER_SEC);
++ if (err)
++ goto error_dma_unmap;
++
++ /*
++ * SPI_NFI_READ_FROM_CACHE_DONE bit must be written at the end
++ * of dirmap_read operation even if it is already set.
++ */
++ err = regmap_update_bits(priv->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_READ_FROM_CACHE_DONE,
++ SPI_NFI_READ_FROM_CACHE_DONE);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_read_poll_timeout(priv->regmap_nfi, REG_SPI_NFI_INTR,
++ val, (val & SPI_NFI_AHB_DONE), 0,
++ 1 * MSEC_PER_SEC);
++ if (err)
++ goto error_dma_unmap;
++
++ /* DMA read need delay for data ready from controller to DRAM */
++ udelay(1);
++
++ dma_unmap_single(dma_addr, SPI_NAND_CACHE_SIZE, DMA_FROM_DEVICE);
++
++ err = airoha_snand_set_mode(priv, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ memcpy(buf, txrx_buf + offs, len);
++
++ return len;
++
++error_dma_unmap:
++ dma_unmap_single(dma_addr, SPI_NAND_CACHE_SIZE, DMA_FROM_DEVICE);
++error_dma_mode_off:
++ airoha_snand_set_mode(priv, SPI_MODE_MANUAL);
++ return err;
++}
++
++static ssize_t airoha_snand_dirmap_write(struct spi_mem_dirmap_desc *desc,
++ u64 offs, size_t len, const void *buf)
++{
++ struct spi_slave *slave = desc->slave;
++ struct udevice *bus = slave->dev->parent;
++ struct airoha_snand_priv *priv = dev_get_priv(bus);
++ u8 *txrx_buf = priv->txrx_buf;
++ dma_addr_t dma_addr;
++ u32 wr_mode, val, opcode;
++ int err;
++
++ opcode = desc->info.op_tmpl.cmd.opcode;
++ switch (opcode) {
++ case SPI_NAND_OP_PROGRAM_LOAD_SINGLE:
++ case SPI_NAND_OP_PROGRAM_LOAD_RAMDOM_SINGLE:
++ wr_mode = 0;
++ break;
++ case SPI_NAND_OP_PROGRAM_LOAD_QUAD:
++ case SPI_NAND_OP_PROGRAM_LOAD_RAMDON_QUAD:
++ wr_mode = 2;
++ break;
++ default:
++ /* unknown opcode */
++ return -EOPNOTSUPP;
++ }
++
++ memcpy(txrx_buf + offs, buf, len);
++
++ err = airoha_snand_set_mode(priv, SPI_MODE_DMA);
++ if (err < 0)
++ return err;
++
++ err = airoha_snand_nfi_config(priv);
++ if (err)
++ goto error_dma_mode_off;
++
++ dma_addr = dma_map_single(txrx_buf, SPI_NAND_CACHE_SIZE,
++ DMA_TO_DEVICE);
++
++ /* set dma addr */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_STRADDR,
++ dma_addr);
++ if (err)
++ goto error_dma_unmap;
++
++ val = FIELD_PREP(SPI_NFI_PROG_LOAD_BYTE_NUM,
++ priv->nfi_cfg.sec_size * priv->nfi_cfg.sec_num);
++ err = regmap_update_bits(priv->regmap_nfi,
++ REG_SPI_NFI_SNF_MISC_CTL2,
++ SPI_NFI_PROG_LOAD_BYTE_NUM, val);
++ if (err)
++ goto error_dma_unmap;
++
++ /* set write command */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_PG_CTL1,
++ FIELD_PREP(SPI_NFI_PG_LOAD_CMD, opcode));
++ if (err)
++ goto error_dma_unmap;
++
++ /* set write mode */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_SNF_MISC_CTL,
++ FIELD_PREP(SPI_NFI_DATA_READ_WR_MODE, wr_mode));
++ if (err)
++ goto error_dma_unmap;
++
++ /* set write addr: zero page offset + descriptor write offset */
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_PG_CTL2,
++ desc->info.offset);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_clear_bits(priv->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_READ_MODE);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_update_bits(priv->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_OPMODE,
++ FIELD_PREP(SPI_NFI_OPMODE, 3));
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_set_bits(priv->regmap_nfi, REG_SPI_NFI_CNFG,
++ SPI_NFI_DMA_MODE);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_CMD, 0x80);
++ if (err)
++ goto error_dma_unmap;
++
++ /* trigger dma writing */
++ err = regmap_clear_bits(priv->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_WR_TRIG);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_set_bits(priv->regmap_nfi, REG_SPI_NFI_CON,
++ SPI_NFI_WR_TRIG);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_read_poll_timeout(priv->regmap_nfi, REG_SPI_NFI_INTR,
++ val, (val & SPI_NFI_AHB_DONE), 0,
++ 1 * MSEC_PER_SEC);
++ if (err)
++ goto error_dma_unmap;
++
++ err = regmap_read_poll_timeout(priv->regmap_nfi,
++ REG_SPI_NFI_SNF_STA_CTL1, val,
++ (val & SPI_NFI_LOAD_TO_CACHE_DONE),
++ 0, 1 * MSEC_PER_SEC);
++ if (err)
++ goto error_dma_unmap;
++
++ /*
++ * SPI_NFI_LOAD_TO_CACHE_DONE bit must be written at the end
++ * of dirmap_write operation even if it is already set.
++ */
++ err = regmap_update_bits(priv->regmap_nfi, REG_SPI_NFI_SNF_STA_CTL1,
++ SPI_NFI_LOAD_TO_CACHE_DONE,
++ SPI_NFI_LOAD_TO_CACHE_DONE);
++ if (err)
++ goto error_dma_unmap;
++
++ dma_unmap_single(dma_addr, SPI_NAND_CACHE_SIZE, DMA_TO_DEVICE);
++
++ err = airoha_snand_set_mode(priv, SPI_MODE_MANUAL);
++ if (err < 0)
++ return err;
++
++ return len;
++
++error_dma_unmap:
++ dma_unmap_single(dma_addr, SPI_NAND_CACHE_SIZE, DMA_TO_DEVICE);
++error_dma_mode_off:
++ airoha_snand_set_mode(priv, SPI_MODE_MANUAL);
++ return err;
++}
++
+ static int airoha_snand_exec_op(struct spi_slave *slave,
+ const struct spi_mem_op *op)
+ {
+@@ -696,6 +996,12 @@ static int airoha_snand_probe(struct udevice *dev)
+ struct airoha_snand_priv *priv = dev_get_priv(dev);
+ int ret;
+
++ priv->txrx_buf = memalign(ARCH_DMA_MINALIGN, SPI_NAND_CACHE_SIZE);
++ if (!priv->txrx_buf) {
++ dev_err(dev, "failed to alloacate memory for dirmap\n");
++ return -ENOMEM;
++ }
++
+ ret = regmap_init_mem_index(dev_ofnode(dev), &priv->regmap_ctrl, 0);
+ if (ret) {
+ dev_err(dev, "failed to init spi ctrl regmap\n");
+@@ -769,6 +1075,9 @@ static int airoha_snand_nfi_setup(struct spi_slave *slave,
+ static const struct spi_controller_mem_ops airoha_snand_mem_ops = {
+ .supports_op = airoha_snand_supports_op,
+ .exec_op = airoha_snand_exec_op,
++ .dirmap_create = airoha_snand_dirmap_create,
++ .dirmap_read = airoha_snand_dirmap_read,
++ .dirmap_write = airoha_snand_dirmap_write,
+ };
+
+ static const struct dm_spi_ops airoha_snfi_spi_ops = {
+--
+2.51.0
+
diff --git a/package/boot/uboot-airoha/patches/206-spi-airoha-support-of-dualio-quadio-flash-reading-co.patch b/package/boot/uboot-airoha/patches/206-spi-airoha-support-of-dualio-quadio-flash-reading-co.patch
new file mode 100644
index 0000000000..8defba35ae
--- /dev/null
+++ b/package/boot/uboot-airoha/patches/206-spi-airoha-support-of-dualio-quadio-flash-reading-co.patch
@@ -0,0 +1,94 @@
+From 2ebbccfa053993d0fe90bee523020a8f796e8988 Mon Sep 17 00:00:00 2001
+From: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+Date: Sun, 8 Jun 2025 05:30:22 +0300
+Subject: [PATCH 5/5] spi: airoha: support of dualio/quadio flash reading
+ commands
+
+Airoha snfi spi controller supports acceleration of DUAL/QUAD
+operations, but does not supports DUAL_IO/QUAD_IO operations.
+Luckily DUAL/QUAD operations do the same as DUAL_IO/QUAD_IO ones,
+so we can issue corresponding DUAL/QUAD operation instead of
+DUAL_IO/QUAD_IO one.
+
+Signed-off-by: Mikhail Kshevetskiy <mikhail.kshevetskiy at iopsys.eu>
+---
+ drivers/spi/airoha_snfi_spi.c | 27 +++++++++++++++++++++------
+ 1 file changed, 21 insertions(+), 6 deletions(-)
+
+diff --git a/drivers/spi/airoha_snfi_spi.c b/drivers/spi/airoha_snfi_spi.c
+index f72d11f5b19..7cafa900bbc 100644
+--- a/drivers/spi/airoha_snfi_spi.c
++++ b/drivers/spi/airoha_snfi_spi.c
+@@ -141,6 +141,7 @@
+ #define SPI_NFI_CUS_SEC_SIZE_EN BIT(16)
+
+ #define REG_SPI_NFI_RD_CTL2 0x0510
++#define SPI_NFI_DATA_READ_CMD GENMASK(7, 0)
+
+ #define REG_SPI_NFI_RD_CTL3 0x0514
+
+@@ -175,7 +176,9 @@
+ #define SPI_NAND_OP_READ_FROM_CACHE_SINGLE 0x03
+ #define SPI_NAND_OP_READ_FROM_CACHE_SINGLE_FAST 0x0b
+ #define SPI_NAND_OP_READ_FROM_CACHE_DUAL 0x3b
++#define SPI_NAND_OP_READ_FROM_CACHE_DUALIO 0xbb
+ #define SPI_NAND_OP_READ_FROM_CACHE_QUAD 0x6b
++#define SPI_NAND_OP_READ_FROM_CACHE_QUADIO 0xeb
+ #define SPI_NAND_OP_WRITE_ENABLE 0x06
+ #define SPI_NAND_OP_WRITE_DISABLE 0x04
+ #define SPI_NAND_OP_PROGRAM_LOAD_SINGLE 0x02
+@@ -639,25 +642,37 @@ static int airoha_snand_dirmap_create(struct spi_mem_dirmap_desc *desc)
+ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
+ u64 offs, size_t len, void *buf)
+ {
+- struct spi_mem_op *op = &desc->info.op_tmpl;
+ struct spi_slave *slave = desc->slave;
+ struct udevice *bus = slave->dev->parent;
+ struct airoha_snand_priv *priv = dev_get_priv(bus);
+ u8 *txrx_buf = priv->txrx_buf;
+ dma_addr_t dma_addr;
+- u32 val, rd_mode;
++ u32 val, rd_mode, opcode;
+ int err;
+
+- switch (op->cmd.opcode) {
++ /*
++ * DUALIO and QUADIO opcodes are not supported by the spi controller,
++ * replace them with supported opcodes.
++ */
++ opcode = desc->info.op_tmpl.cmd.opcode;
++ switch (opcode) {
++ case SPI_NAND_OP_READ_FROM_CACHE_SINGLE:
++ case SPI_NAND_OP_READ_FROM_CACHE_SINGLE_FAST:
++ rd_mode = 0;
++ break;
+ case SPI_NAND_OP_READ_FROM_CACHE_DUAL:
++ case SPI_NAND_OP_READ_FROM_CACHE_DUALIO:
++ opcode = SPI_NAND_OP_READ_FROM_CACHE_DUAL;
+ rd_mode = 1;
+ break;
+ case SPI_NAND_OP_READ_FROM_CACHE_QUAD:
++ case SPI_NAND_OP_READ_FROM_CACHE_QUADIO:
++ opcode = SPI_NAND_OP_READ_FROM_CACHE_QUAD;
+ rd_mode = 2;
+ break;
+ default:
+- rd_mode = 0;
+- break;
++ /* unknown opcode */
++ return -EOPNOTSUPP;
+ }
+
+ err = airoha_snand_set_mode(priv, SPI_MODE_DMA);
+@@ -688,7 +703,7 @@ static ssize_t airoha_snand_dirmap_read(struct spi_mem_dirmap_desc *desc,
+
+ /* set read command */
+ err = regmap_write(priv->regmap_nfi, REG_SPI_NFI_RD_CTL2,
+- op->cmd.opcode);
++ FIELD_PREP(SPI_NFI_DATA_READ_CMD, opcode));
+ if (err)
+ goto error_dma_unmap;
+
+--
+2.51.0
+
More information about the lede-commits
mailing list