diff mbox series

[v2,3/9] mtd: spi-nand: Isolate the MTD read logic in a helper

Message ID 20240826101412.20644-4-miquel.raynal@bootlin.com
State Accepted
Headers show
Series mtd: spi-nand: Continuous read support | expand

Commit Message

Miquel Raynal Aug. 26, 2024, 10:14 a.m. UTC
There is currently only a single path for performing page reads as
requested by the MTD layer. Soon there will be two:
- a "regular" page read
- a continuous page read

Let's extract the page read logic in a dedicated helper, so the
introduction of continuous page reads will be as easy as checking whether
continuous reads shall/can be used and calling one helper or the other.

There is not behavioral change intended.

Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>
---
 drivers/mtd/nand/spi/core.c | 40 ++++++++++++++++++++++++-------------
 1 file changed, 26 insertions(+), 14 deletions(-)

Comments

Miquel Raynal Sept. 6, 2024, 3:03 p.m. UTC | #1
On Mon, 2024-08-26 at 10:14:06 UTC, Miquel Raynal wrote:
> There is currently only a single path for performing page reads as
> requested by the MTD layer. Soon there will be two:
> - a "regular" page read
> - a continuous page read
> 
> Let's extract the page read logic in a dedicated helper, so the
> introduction of continuous page reads will be as easy as checking whether
> continuous reads shall/can be used and calling one helper or the other.
> 
> There is not behavioral change intended.
> 
> Signed-off-by: Miquel Raynal <miquel.raynal@bootlin.com>

Applied to https://git.kernel.org/pub/scm/linux/kernel/git/mtd/linux.git nand/next.

Miquel
diff mbox series

Patch

diff --git a/drivers/mtd/nand/spi/core.c b/drivers/mtd/nand/spi/core.c
index e0b6715e5dfe..9042a092687c 100644
--- a/drivers/mtd/nand/spi/core.c
+++ b/drivers/mtd/nand/spi/core.c
@@ -630,25 +630,20 @@  static int spinand_write_page(struct spinand_device *spinand,
 	return nand_ecc_finish_io_req(nand, (struct nand_page_io_req *)req);
 }
 
-static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
-			    struct mtd_oob_ops *ops)
+static int spinand_mtd_regular_page_read(struct mtd_info *mtd, loff_t from,
+					 struct mtd_oob_ops *ops,
+					 unsigned int *max_bitflips)
 {
 	struct spinand_device *spinand = mtd_to_spinand(mtd);
 	struct nand_device *nand = mtd_to_nanddev(mtd);
-	struct mtd_ecc_stats old_stats;
-	unsigned int max_bitflips = 0;
 	struct nand_io_iter iter;
 	bool disable_ecc = false;
 	bool ecc_failed = false;
-	int ret = 0;
+	int ret;
 
-	if (ops->mode == MTD_OPS_RAW || !spinand->eccinfo.ooblayout)
+	if (ops->mode == MTD_OPS_RAW || !mtd->ooblayout)
 		disable_ecc = true;
 
-	mutex_lock(&spinand->lock);
-
-	old_stats = mtd->ecc_stats;
-
 	nanddev_io_for_each_page(nand, NAND_PAGE_READ, from, ops, &iter) {
 		if (disable_ecc)
 			iter.req.mode = MTD_OPS_RAW;
@@ -664,13 +659,33 @@  static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 		if (ret == -EBADMSG)
 			ecc_failed = true;
 		else
-			max_bitflips = max_t(unsigned int, max_bitflips, ret);
+			*max_bitflips = max_t(unsigned int, *max_bitflips, ret);
 
 		ret = 0;
 		ops->retlen += iter.req.datalen;
 		ops->oobretlen += iter.req.ooblen;
 	}
 
+	if (ecc_failed && !ret)
+		ret = -EBADMSG;
+
+	return ret;
+}
+
+static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
+			    struct mtd_oob_ops *ops)
+{
+	struct spinand_device *spinand = mtd_to_spinand(mtd);
+	struct mtd_ecc_stats old_stats;
+	unsigned int max_bitflips = 0;
+	int ret;
+
+	mutex_lock(&spinand->lock);
+
+	old_stats = mtd->ecc_stats;
+
+	ret = spinand_mtd_regular_page_read(mtd, from, ops, &max_bitflips);
+
 	if (ops->stats) {
 		ops->stats->uncorrectable_errors +=
 			mtd->ecc_stats.failed - old_stats.failed;
@@ -680,9 +695,6 @@  static int spinand_mtd_read(struct mtd_info *mtd, loff_t from,
 
 	mutex_unlock(&spinand->lock);
 
-	if (ecc_failed && !ret)
-		ret = -EBADMSG;
-
 	return ret ? ret : max_bitflips;
 }