[RFC PATCH 8/8] mtd: spi-nor: Enhance locking to support reads while writes

Miquel Raynal miquel.raynal at bootlin.com
Wed May 25 09:31:08 PDT 2022


On devices featuring several banks, the Read While Write (RWW) feature
is here to improve the overall performance when performing parallel
reads and writes at different locations (different banks). The following
constraints have to be taken into account:
1#: A single operation can be performed in a given bank.
2#: Only a single program or erase operation can happen on the entire chip.
3#: The I/O bus is unique and thus is the most constrained resource, all
    spi-nor operations requiring access to the spi bus (through the spi
    controller) must be serialized until the bus exchanges are over. So
    we must ensure a single operation can be "sent" at a time.
4#: Any other operation that would not be either a read or a write or an
    erase is considered requiring access to the full chip and cannot be
    parallelized, we then need to ensure the full chip is in the idle
    state when this occurs.

All these constraints can easily be managed with a proper locking model:
1#: Is protected by a per-bank mutex. Only a single operation can happen
    in a specific bank at any times. If the bank mutex is not available,
    the operation cannot start.
2#: Is handled by the pe_mode mutex which is acquired before any write
    or erase, and is released only at the very end of the
    operation. This way, no other destructive operation on the chip can
    start during this time frame.
3#: A device-wide mutex is introduced in order to capture and serialize
    bus accessed. This is the one being released "sooner" than before,
    because we only need to protect the chip against other SPI accesses
    during the I/O phase, which for the destructive operations is the
    beginning of the operation (when we send the command cycles and
    eventually the data), while the second part of the operation (the
    erase delay or the programmation delay) is when we can do something
    else with another bank.
4#: Is handled by the "generic" helpers which existed before, where
    basically all the locks are taken before the operation can start,
    and all locks are released once done.

As many devices still do not support this feature, the original lock is
also kept in a union: either the feature is available and we initialize
and use the new locks, or it is not and we keep using the previous
logic.

Signed-off-by: Miquel Raynal <miquel.raynal at bootlin.com>
---
 drivers/mtd/spi-nor/core.c  | 134 +++++++++++++++++++++++++++++++++---
 include/linux/mtd/spi-nor.h |  12 +++-
 2 files changed, 134 insertions(+), 12 deletions(-)

diff --git a/drivers/mtd/spi-nor/core.c b/drivers/mtd/spi-nor/core.c
index 3d8e5f9961b9..872a3934f9a6 100644
--- a/drivers/mtd/spi-nor/core.c
+++ b/drivers/mtd/spi-nor/core.c
@@ -1081,6 +1081,61 @@ static void spi_nor_unprep(struct spi_nor *nor)
 		nor->controller_ops->unprepare(nor);
 }
 
+static bool spi_nor_parallel_locking(struct spi_nor *nor)
+{
+	return nor->info->n_banks > 1 && nor->info->no_sfdp_flags & SPI_NOR_RWW;
+}
+
+static void spi_nor_lock_all_banks(struct spi_nor *nor)
+{
+	int i;
+
+	for (i = 0; i < nor->info->n_banks; i++)
+		mutex_lock(&nor->locks.bank[i]);
+}
+
+static void spi_nor_unlock_all_banks(struct spi_nor *nor)
+{
+	int i;
+
+	for (i = 0; i < nor->info->n_banks; i++)
+		mutex_unlock(&nor->locks.bank[i]);
+}
+
+static void spi_nor_lock_banks(struct spi_nor *nor, loff_t start, size_t len)
+{
+	int first, last, i;
+
+	first = DIV_ROUND_DOWN_ULL(start, nor->params->bank_size);
+	last = DIV_ROUND_DOWN_ULL(start + len - 1, nor->params->bank_size);
+
+	for (i = first; i < last; i++)
+		mutex_lock(&nor->locks.bank[i]);
+}
+
+static void spi_nor_unlock_banks(struct spi_nor *nor, loff_t start, size_t len)
+{
+	int first, last, i;
+
+	first = DIV_ROUND_DOWN_ULL(start, nor->params->bank_size);
+	last = DIV_ROUND_DOWN_ULL(start + len - 1, nor->params->bank_size);
+
+	for (i = first; i < last; i++)
+		mutex_unlock(&nor->locks.bank[i]);
+}
+
+static void spi_nor_lock_device(struct spi_nor *nor)
+{
+	if (spi_nor_parallel_locking(nor))
+		mutex_lock(&nor->locks.device);
+}
+
+static void spi_nor_unlock_device(struct spi_nor *nor)
+{
+	if (spi_nor_parallel_locking(nor))
+		mutex_unlock(&nor->locks.device);
+}
+
 /* Generic helpers for internal locking and serialization */
 int spi_nor_lock_and_prep(struct spi_nor *nor)
 {
@@ -1090,14 +1145,26 @@ int spi_nor_lock_and_prep(struct spi_nor *nor)
 	if (ret)
 		return ret;
 
-	mutex_lock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor)) {
+		mutex_lock(&nor->lock);
+	} else {
+		spi_nor_lock_all_banks(nor);
+		mutex_lock(&nor->locks.pe_mode);
+		mutex_lock(&nor->locks.device);
+	}
 
 	return 0;
 }
 
 void spi_nor_unlock_and_unprep(struct spi_nor *nor)
 {
-	mutex_unlock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor)) {
+		mutex_unlock(&nor->lock);
+	} else {
+		mutex_unlock(&nor->locks.device);
+		mutex_unlock(&nor->locks.pe_mode);
+		spi_nor_unlock_all_banks(nor);
+	}
 
 	spi_nor_unprep(nor);
 }
@@ -1111,14 +1178,24 @@ static int spi_nor_lock_and_prep_pe(struct spi_nor *nor, loff_t start, size_t le
 	if (ret)
 		return ret;
 
-	mutex_lock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor)) {
+		mutex_lock(&nor->lock);
+	} else {
+		spi_nor_lock_banks(nor, start, len);
+		mutex_lock(&nor->locks.pe_mode);
+	}
 
 	return 0;
 }
 
 static void spi_nor_unlock_and_unprep_pe(struct spi_nor *nor, loff_t start, size_t len)
 {
-	mutex_unlock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor)) {
+		mutex_unlock(&nor->lock);
+	} else {
+		mutex_unlock(&nor->locks.pe_mode);
+		spi_nor_unlock_banks(nor, start, len);
+	}
 
 	spi_nor_unprep(nor);
 }
@@ -1132,14 +1209,20 @@ static int spi_nor_lock_and_prep_rd(struct spi_nor *nor, loff_t start, size_t le
 	if (ret)
 		return ret;
 
-	mutex_lock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor))
+		mutex_lock(&nor->lock);
+	else
+		spi_nor_lock_banks(nor, start, len);
 
 	return 0;
 }
 
 static void spi_nor_unlock_and_unprep_rd(struct spi_nor *nor, loff_t start, size_t len)
 {
-	mutex_unlock(&nor->lock);
+	if (!spi_nor_parallel_locking(nor))
+		mutex_unlock(&nor->lock);
+	else
+		spi_nor_unlock_banks(nor, start, len);
 
 	spi_nor_unprep(nor);
 }
@@ -1448,11 +1531,16 @@ static int spi_nor_erase_multi_sectors(struct spi_nor *nor, u64 addr, u32 len)
 			dev_vdbg(nor->dev, "erase_cmd->size = 0x%08x, erase_cmd->opcode = 0x%02x, erase_cmd->count = %u\n",
 				 cmd->size, cmd->opcode, cmd->count);
 
+			spi_nor_lock_device(nor);
+
 			ret = spi_nor_write_enable(nor);
-			if (ret)
+			if (ret) {
+				spi_nor_unlock_device(nor);
 				goto destroy_erase_cmd_list;
+			}
 
 			ret = spi_nor_erase_sector(nor, addr);
+			spi_nor_unlock_device(nor);
 			if (ret)
 				goto destroy_erase_cmd_list;
 
@@ -1505,11 +1593,16 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
 	if (len == mtd->size && !(nor->flags & SNOR_F_NO_OP_CHIP_ERASE)) {
 		unsigned long timeout;
 
+		spi_nor_lock_device(nor);
+
 		ret = spi_nor_write_enable(nor);
-		if (ret)
+		if (ret) {
+			spi_nor_unlock_device(nor);
 			goto erase_err;
+		}
 
 		ret = spi_nor_erase_chip(nor);
+		spi_nor_unlock_device(nor);
 		if (ret)
 			goto erase_err;
 
@@ -1534,11 +1627,16 @@ static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr)
 	/* "sector"-at-a-time erase */
 	} else if (spi_nor_has_uniform_erase(nor)) {
 		while (len) {
+			spi_nor_lock_device(nor);
+
 			ret = spi_nor_write_enable(nor);
-			if (ret)
+			if (ret) {
+				spi_nor_unlock_device(nor);
 				goto erase_err;
+			}
 
 			ret = spi_nor_erase_sector(nor, addr);
+			spi_nor_unlock_device(nor);
 			if (ret)
 				goto erase_err;
 
@@ -1756,7 +1854,9 @@ static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len,
 
 		addr = spi_nor_convert_addr(nor, addr);
 
+		spi_nor_lock_device(nor);
 		ret = spi_nor_read_data(nor, addr, len, buf);
+		spi_nor_unlock_device(nor);
 		if (ret == 0) {
 			/* We shouldn't see 0-length reads */
 			ret = -EIO;
@@ -1819,11 +1919,16 @@ static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len,
 
 		addr = spi_nor_convert_addr(nor, addr);
 
+		spi_nor_lock_device(nor);
+
 		ret = spi_nor_write_enable(nor);
-		if (ret)
+		if (ret) {
+			spi_nor_unlock_device(nor);
 			goto write_err;
+		}
 
 		ret = spi_nor_write_data(nor, addr, page_remain, buf + i);
+		spi_nor_unlock_device(nor);
 		if (ret < 0)
 			goto write_err;
 		written = ret;
@@ -3060,7 +3165,14 @@ int spi_nor_scan(struct spi_nor *nor, const char *name,
 
 	nor->info = info;
 
-	mutex_init(&nor->lock);
+	if (!spi_nor_parallel_locking(nor)) {
+		mutex_init(&nor->lock);
+	} else {
+		mutex_init(&nor->locks.device);
+		mutex_init(&nor->locks.pe_mode);
+		for (i = 0; i < nor->info->n_banks; i++)
+			mutex_init(&nor->locks.bank[i]);
+	}
 
 	/* Init flash parameters based on flash_info struct and SFDP */
 	ret = spi_nor_init_params(nor);
diff --git a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h
index 5e25a7b75ae2..772da9b074d9 100644
--- a/include/linux/mtd/spi-nor.h
+++ b/include/linux/mtd/spi-nor.h
@@ -346,6 +346,9 @@ struct spi_nor_flash_parameter;
  * struct spi_nor - Structure for defining the SPI NOR layer
  * @mtd:		an mtd_info structure
  * @lock:		the lock for the read/write/erase/lock/unlock operations
+ * @locks.device:	the lock for the I/O bus
+ * @locks.pe_mode:	the lock for the program/erase mode for RWW operations
+ * @locks.bank:		the lock for every available bank
  * @dev:		pointer to an SPI device or an SPI NOR controller device
  * @spimem:		pointer to the SPI memory device
  * @bouncebuf:		bounce buffer used when the buffer passed by the MTD
@@ -375,7 +378,14 @@ struct spi_nor_flash_parameter;
  */
 struct spi_nor {
 	struct mtd_info		mtd;
-	struct mutex		lock;
+	union {
+		struct mutex	lock;
+		struct {
+			struct mutex device;
+			struct mutex pe_mode;
+			struct mutex *bank;
+		} locks;
+	};
 	struct device		*dev;
 	struct spi_mem		*spimem;
 	u8			*bouncebuf;
-- 
2.34.1




More information about the linux-mtd mailing list