[MTD] nandsim: Enhance nandsim optionally to report wear information

Linux-MTD Mailing List linux-mtd at lists.infradead.org
Wed Apr 18 04:59:04 EDT 2007


Gitweb:     http://git.infradead.org/?p=mtd-2.6.git;a=commit;h=57aa6b545f6f772dd317ccd29bdada999b16a13d
Commit:     57aa6b545f6f772dd317ccd29bdada999b16a13d
Parent:     514087e74fb401a6621e8c836f4eaab87c269f24
Author:     Adrian Hunter <ext-adrian.hunter at nokia.com>
AuthorDate: Mon Mar 19 12:40:41 2007 +0200
Committer:  David Woodhouse <dwmw2 at infradead.org>
CommitDate: Tue Apr 17 13:55:29 2007 -0400

    [MTD] nandsim: Enhance nandsim optionally to report wear information
    
    A new module parameter 'rptwear' specifies how many erases between
    reporting wear information.  Zero means never.
    
    Signed-off-by: Adrian Hunter <ext-adrian.hunter at nokia.com>
    Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy at nokia.com>
    Signed-off-by: David Woodhouse <dwmw2 at infradead.org>
---
 drivers/mtd/nand/nandsim.c |   99 ++++++++++++++++++++++++++++++++++++++++++++
 1 files changed, 99 insertions(+), 0 deletions(-)

diff --git a/drivers/mtd/nand/nandsim.c b/drivers/mtd/nand/nandsim.c
index 05b4207..1a44ef6 100644
--- a/drivers/mtd/nand/nandsim.c
+++ b/drivers/mtd/nand/nandsim.c
@@ -99,6 +99,7 @@ static char *weakblocks = NULL;
 static char *weakpages = NULL;
 static unsigned int bitflips = 0;
 static char *gravepages = NULL;
+static unsigned int rptwear = 0;
 
 module_param(first_id_byte,  uint, 0400);
 module_param(second_id_byte, uint, 0400);
@@ -119,6 +120,7 @@ module_param(weakblocks,     charp, 0400);
 module_param(weakpages,      charp, 0400);
 module_param(bitflips,       uint, 0400);
 module_param(gravepages,     charp, 0400);
+module_param(rptwear,        uint, 0400);
 
 MODULE_PARM_DESC(first_id_byte,  "The fist byte returned by NAND Flash 'read ID' command (manufaturer ID)");
 MODULE_PARM_DESC(second_id_byte, "The second byte returned by NAND Flash 'read ID' command (chip ID)");
@@ -146,6 +148,7 @@ MODULE_PARM_DESC(bitflips,       "Maximum number of random bit flips per page (z
 MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (defaults to 3)]"
 				 " separated by commas e.g. 1401:2 means page 1401"
 				 " can be read only twice before failing");
+MODULE_PARM_DESC(rptwear,        "Number of erases inbetween reporting wear, if not zero");
 
 /* The largest possible page size */
 #define NS_LARGEST_PAGE_SIZE	2048
@@ -162,6 +165,8 @@ MODULE_PARM_DESC(gravepages,     "Pages that lose data [: maximum reads (default
 	do { printk(KERN_WARNING NS_OUTPUT_PREFIX " warning: " args); } while(0)
 #define NS_ERR(args...) \
 	do { printk(KERN_ERR NS_OUTPUT_PREFIX " error: " args); } while(0)
+#define NS_INFO(args...) \
+	do { printk(KERN_INFO NS_OUTPUT_PREFIX " " args); } while(0)
 
 /* Busy-wait delay macros (microseconds, milliseconds) */
 #define NS_UDELAY(us) \
@@ -394,6 +399,11 @@ struct grave_page {
 
 static LIST_HEAD(grave_pages);
 
+static unsigned long *erase_block_wear = NULL;
+static unsigned int wear_eb_count = 0;
+static unsigned long total_wear = 0;
+static unsigned int rptwear_cnt = 0;
+
 /* MTD structure for NAND controller */
 static struct mtd_info *nsmtd;
 
@@ -801,6 +811,89 @@ static void free_lists(void)
 		list_del(pos);
 		kfree(list_entry(pos, struct grave_page, list));
 	}
+	kfree(erase_block_wear);
+}
+
+static int setup_wear_reporting(struct mtd_info *mtd)
+{
+	size_t mem;
+
+	if (!rptwear)
+		return 0;
+	wear_eb_count = mtd->size / mtd->erasesize;
+	mem = wear_eb_count * sizeof(unsigned long);
+	if (mem / sizeof(unsigned long) != wear_eb_count) {
+		NS_ERR("Too many erase blocks for wear reporting\n");
+		return -ENOMEM;
+	}
+	erase_block_wear = kzalloc(mem, GFP_KERNEL);
+	if (!erase_block_wear) {
+		NS_ERR("Too many erase blocks for wear reporting\n");
+		return -ENOMEM;
+	}
+	return 0;
+}
+
+static void update_wear(unsigned int erase_block_no)
+{
+	unsigned long wmin = -1, wmax = 0, avg;
+	unsigned long deciles[10], decile_max[10], tot = 0;
+	unsigned int i;
+
+	if (!erase_block_wear)
+		return;
+	total_wear += 1;
+	if (total_wear == 0)
+		NS_ERR("Erase counter total overflow\n");
+	erase_block_wear[erase_block_no] += 1;
+	if (erase_block_wear[erase_block_no] == 0)
+		NS_ERR("Erase counter overflow for erase block %u\n", erase_block_no);
+	rptwear_cnt += 1;
+	if (rptwear_cnt < rptwear)
+		return;
+	rptwear_cnt = 0;
+	/* Calc wear stats */
+	for (i = 0; i < wear_eb_count; ++i) {
+		unsigned long wear = erase_block_wear[i];
+		if (wear < wmin)
+			wmin = wear;
+		if (wear > wmax)
+			wmax = wear;
+		tot += wear;
+	}
+	for (i = 0; i < 9; ++i) {
+		deciles[i] = 0;
+		decile_max[i] = (wmax * (i + 1) + 5) / 10;
+	}
+	deciles[9] = 0;
+	decile_max[9] = wmax;
+	for (i = 0; i < wear_eb_count; ++i) {
+		int d;
+		unsigned long wear = erase_block_wear[i];
+		for (d = 0; d < 10; ++d)
+			if (wear <= decile_max[d]) {
+				deciles[d] += 1;
+				break;
+			}
+	}
+	avg = tot / wear_eb_count;
+	/* Output wear report */
+	NS_INFO("*** Wear Report ***\n");
+	NS_INFO("Total numbers of erases:  %lu\n", tot);
+	NS_INFO("Number of erase blocks:   %u\n", wear_eb_count);
+	NS_INFO("Average number of erases: %lu\n", avg);
+	NS_INFO("Maximum number of erases: %lu\n", wmax);
+	NS_INFO("Minimum number of erases: %lu\n", wmin);
+	for (i = 0; i < 10; ++i) {
+		unsigned long from = (i ? decile_max[i - 1] + 1 : 0);
+		if (from > decile_max[i])
+			continue;
+		NS_INFO("Number of ebs with erase counts from %lu to %lu : %lu\n",
+			from,
+			decile_max[i],
+			deciles[i]);
+	}
+	NS_INFO("*** End of Wear Report ***\n");
 }
 
 /*
@@ -1268,6 +1361,9 @@ static int do_state_action(struct nandsim *ns, uint32_t action)
 
 		NS_MDELAY(erase_delay);
 
+		if (erase_block_wear)
+			update_wear(erase_block_no);
+
 		if (erase_error(erase_block_no)) {
 			NS_WARN("simulating erase failure in erase block %u\n", erase_block_no);
 			return -1;
@@ -1903,6 +1999,9 @@ static int __init ns_init_module(void)
 		goto error;
 	}
 
+	if ((retval = setup_wear_reporting(nsmtd)) != 0)
+		goto err_exit;
+
 	if ((retval = init_nandsim(nsmtd)) != 0)
 		goto err_exit;
 



More information about the linux-mtd-cvs mailing list