mtd/fs/jffs2 wear_leveling.c, NONE, 1.1 Makefile.common, 1.11, 1.12 build.c, 1.86, 1.87 erase.c, 1.90, 1.91 gc.c, 1.158, 1.159 malloc.c, 1.32, 1.33 nodelist.h, 1.145, 1.146 nodemgmt.c, 1.130, 1.131 scan.c, 1.132, 1.133 wbuf.c, 1.107, 1.108

Forrest Zhao forrest.zhao at intel.com
Fri Nov 18 02:27:49 EST 2005


Update of /home/cvs/mtd/fs/jffs2
In directory phoenix.infradead.org:/tmp/cvs-serv20956/fs/jffs2

Modified Files:
	Makefile.common build.c erase.c gc.c malloc.c nodelist.h 
	nodemgmt.c scan.c wbuf.c 
Added Files:
	wear_leveling.c 
Log Message:
This patch eliminated the random nature of wear-leveling(WL) mechanism in JFFS2
by the following approachs:
1 WL is triggered when the erase count gap between each erase block execeed a predefined value(currently is 1024).
2 when WL is triggered, garge collection will pick up erase block with less erase count to do the garge collection.


--- NEW FILE wear_leveling.c ---
/*
 * JFFS2 -- Journalling Flash File System, Version 2.
 *
 * Copyright (C) 2005  Zhao Forrest <forrest.zhao at intel.com>
 *
 * For licensing information, see the file 'LICENCE' in this directory.
 *
 */

#include "nodelist.h"

void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag)
{
	struct jffs2_blocks_bucket *hash_table;
	uint32_t index, *current_index_p;

	if (flag == 1) {
		hash_table = c->used_blocks;
		current_index_p = &(c->used_blocks_current_index);
	}else if (flag == 2) {
		hash_table = c->free_blocks;
		current_index_p = &(c->free_blocks_current_index);
	}else {
		return;
	}

	index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN);
	if (index >= HASH_SIZE) {
		return;
	}
	if (index < *current_index_p) {
		*current_index_p = index;
	}
	hash_table[index].number++;
	list_add_tail(&jeb->hash_list, &(hash_table[index].chain));
	return;
}

void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag)
{
	struct jffs2_blocks_bucket *hash_table;
	uint32_t index, *current_index_p, i;

	if (flag == 1) {
		hash_table = c->used_blocks;
		current_index_p = &(c->used_blocks_current_index);
	}else if (flag == 2) {
		hash_table = c->free_blocks;
		current_index_p = &(c->free_blocks_current_index);
	}else {
		return;
	}

	index = (jeb->erase_count >> BUCKET_RANGE_BIT_LEN);
	if (index >= HASH_SIZE) {
		return;
	}
	hash_table[index].number--;
	list_del(&jeb->hash_list);

	if (hash_table[index].number == 0) {
		for (i=index+1; i<HASH_SIZE; i++) {
			if (hash_table[i].number != 0) {
				*current_index_p = i;
				break;
			}
		}
		if (i == HASH_SIZE) {
			*current_index_p = HASH_SIZE;
		}
	}
	return;
}

struct jffs2_eraseblock *jffs2_get_free_block(struct jffs2_sb_info *c)
{
	struct list_head *next;
	struct jffs2_eraseblock *jeb;

	if (c->free_blocks_current_index == HASH_SIZE) {
		return NULL;
	}
	next = c->free_blocks[c->free_blocks_current_index].chain.next;
	jeb = list_entry(next, struct jffs2_eraseblock, hash_list);
	list_del(&jeb->list);
	jffs2_remove_from_hash_table(c, jeb, 2);
	c->nr_free_blocks--;

	return jeb;
}

struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c)
{
	struct list_head *next;
	struct jffs2_eraseblock *jeb;

	if (c->used_blocks_current_index == HASH_SIZE) {
		return NULL;
	}
	next = c->used_blocks[c->used_blocks_current_index].chain.next;
	jeb = list_entry(next, struct jffs2_eraseblock, hash_list);
	list_del(&jeb->list);
	jffs2_remove_from_hash_table(c, jeb, 1);

	return jeb;
}


Index: Makefile.common
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/Makefile.common,v
retrieving revision 1.11
retrieving revision 1.12
diff -u -r1.11 -r1.12
--- Makefile.common	7 Sep 2005 08:34:53 -0000	1.11
+++ Makefile.common	18 Nov 2005 07:27:45 -0000	1.12
@@ -9,7 +9,7 @@
 jffs2-y	:= compr.o dir.o file.o ioctl.o nodelist.o malloc.o
 jffs2-y	+= read.o nodemgmt.o readinode.o write.o scan.o gc.o
 jffs2-y	+= symlink.o build.o erase.o background.o fs.o writev.o
-jffs2-y	+= super.o debug.o
+jffs2-y	+= super.o debug.o wear_leveling.o
 
 jffs2-$(CONFIG_JFFS2_FS_WRITEBUFFER)	+= wbuf.o
 jffs2-$(CONFIG_JFFS2_RUBIN)	+= compr_rubin.o

Index: build.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/build.c,v
retrieving revision 1.86
retrieving revision 1.87
diff -u -r1.86 -r1.87
--- build.c	11 Nov 2005 08:51:38 -0000	1.86
+++ build.c	18 Nov 2005 07:27:45 -0000	1.87
@@ -305,6 +305,19 @@
 		  c->nospc_dirty_size);
 }
 
+static void jffs2_init_hash_tables(struct jffs2_sb_info *c)
+{
+	int i;
+
+	for (i=0; i<HASH_SIZE; i++) {
+		INIT_LIST_HEAD(&(c->used_blocks[i].chain));
+		INIT_LIST_HEAD(&(c->free_blocks[i].chain));
+	}
+	c->used_blocks_current_index = HASH_SIZE;
+	c->free_blocks_current_index = HASH_SIZE;
+	return;
+}
+
 int jffs2_do_mount_fs(struct jffs2_sb_info *c)
 {
 	int ret;
@@ -330,6 +343,8 @@
 	c->highest_ino = 1;
 	c->summary = NULL;
 
+	jffs2_init_hash_tables(c);
+
 	ret = jffs2_sum_init(c);
 	if (ret) {
 		jffs2_free_eraseblocks(c);

Index: erase.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/erase.c,v
retrieving revision 1.90
retrieving revision 1.91
diff -u -r1.90 -r1.91
--- erase.c	18 Nov 2005 07:25:27 -0000	1.90
+++ erase.c	18 Nov 2005 07:27:45 -0000	1.91
@@ -446,6 +446,7 @@
 	jffs2_dbg_acct_paranoia_check_nolock(c, jeb);
 
 	list_add_tail(&jeb->list, &c->free_list);
+	jffs2_add_to_hash_table(c, jeb, 2);
 	c->nr_erasing_blocks--;
 	c->nr_free_blocks++;
 	spin_unlock(&c->erase_completion_lock);

Index: gc.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/gc.c,v
retrieving revision 1.158
retrieving revision 1.159
diff -u -r1.158 -r1.159
--- gc.c	13 Nov 2005 18:22:06 -0000	1.158
+++ gc.c	18 Nov 2005 07:27:45 -0000	1.159
@@ -45,6 +45,7 @@
 	struct jffs2_eraseblock *ret;
 	struct list_head *nextlist = NULL;
 	int n = jiffies % 128;
+	int flag = 0;
 
 	/* Pick an eraseblock to garbage collect next. This is where we'll
 	   put the clever wear-levelling algorithms. Eventually.  */
@@ -59,23 +60,28 @@
 		   So don't favour the erasable_list _too_ much. */
 		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next\n"));
 		nextlist = &c->erasable_list;
-	} else if (n < 110 && !list_empty(&c->very_dirty_list)) {
+	} else if (n < 112 && !list_empty(&c->very_dirty_list)) {
 		/* Most of the time, pick one off the very_dirty list */
 		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next\n"));
 		nextlist = &c->very_dirty_list;
-	} else if (n < 126 && !list_empty(&c->dirty_list)) {
+		flag = 1;
+	} else if (n < 128 && !list_empty(&c->dirty_list)) {
 		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next\n"));
 		nextlist = &c->dirty_list;
+		flag = 1;
 	} else if (!list_empty(&c->clean_list)) {
 		D1(printk(KERN_DEBUG "Picking block from clean_list to GC next\n"));
 		nextlist = &c->clean_list;
+		flag = 1;
 	} else if (!list_empty(&c->dirty_list)) {
 		D1(printk(KERN_DEBUG "Picking block from dirty_list to GC next (clean_list was empty)\n"));
 
 		nextlist = &c->dirty_list;
+		flag = 1;
 	} else if (!list_empty(&c->very_dirty_list)) {
 		D1(printk(KERN_DEBUG "Picking block from very_dirty_list to GC next (clean_list and dirty_list were empty)\n"));
 		nextlist = &c->very_dirty_list;
+		flag = 1;
 	} else if (!list_empty(&c->erasable_list)) {
 		D1(printk(KERN_DEBUG "Picking block from erasable_list to GC next (clean_list and {very_,}dirty_list were empty)\n"));
 
@@ -95,6 +101,9 @@
 
 	ret = list_entry(nextlist->next, struct jffs2_eraseblock, list);
 	list_del(&ret->list);
+	if (flag == 1) {
+		jffs2_remove_from_hash_table(c, ret, 1);
+	}
 	c->gcblock = ret;
 	ret->gc_node = ret->first_node;
 	if (!ret->gc_node) {
@@ -114,6 +123,51 @@
 	return ret;
 }
 
+static int jffs2_should_pick_used_block(struct jffs2_sb_info *c)
+{
+	static uint8_t seqno = 99;
+
+	if (((c->max_erase_count >> BUCKET_RANGE_BIT_LEN) - c->used_blocks_current_index)
+		<= WL_DELTA/BUCKET_RANGE) {
+		return 0;
+	}
+	seqno++;
+	if (seqno == 100) {
+		seqno = 0;
+		return 1;
+	}
+	return 0;
+}
+
+static struct jffs2_eraseblock *jffs2_find_gc_block_with_wl(struct jffs2_sb_info *c)
+{
+	struct jffs2_eraseblock *ret;
+
+	if (jffs2_should_pick_used_block(c)) {
+		ret = jffs2_get_used_block(c);
+		if (ret == NULL) {
+			return NULL;
+		}
+		c->gcblock = ret;
+		ret->gc_node = ret->first_node;
+		if (!ret->gc_node) {
+			printk(KERN_WARNING "Eep. ret->gc_node for block at 0x%08x is NULL\n", ret->offset);
+			BUG();
+		}
+		if (ret->wasted_size) {
+			D1(printk(KERN_DEBUG "Converting wasted_size %08x to dirty_size\n", ret->wasted_size));
+			ret->dirty_size += ret->wasted_size;
+			c->wasted_size -= ret->wasted_size;
+			c->dirty_size += ret->wasted_size;
+			ret->wasted_size = 0;
+		}
+	} else {
+		ret = jffs2_find_gc_block(c);
+	}
+
+	return ret;
+}
+
 /* jffs2_garbage_collect_pass
  * Make a single attempt to progress GC. Move one node, and possibly
  * start erasing one eraseblock.
@@ -209,7 +263,7 @@
 	jeb = c->gcblock;
 
 	if (!jeb)
-		jeb = jffs2_find_gc_block(c);
+		jeb = jffs2_find_gc_block_with_wl(c);
 
 	if (!jeb) {
 		D1 (printk(KERN_NOTICE "jffs2: Couldn't find erase block to garbage collect!\n"));

Index: malloc.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/malloc.c,v
retrieving revision 1.32
retrieving revision 1.33
diff -u -r1.32 -r1.33
--- malloc.c	11 Nov 2005 08:51:38 -0000	1.32
+++ malloc.c	18 Nov 2005 07:27:45 -0000	1.33
@@ -247,6 +247,7 @@
 	
 	for (i=0; i<c->nr_blocks; i++) {
 		INIT_LIST_HEAD(&c->blocks[i]->list);
+		INIT_LIST_HEAD(&c->blocks[i]->hash_list);
 		c->blocks[i]->offset = i * c->sector_size;
 		c->blocks[i]->free_size = c->sector_size;
 		c->blocks[i]->first_node = NULL;

Index: nodelist.h
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/nodelist.h,v
retrieving revision 1.145
retrieving revision 1.146
diff -u -r1.145 -r1.146
--- nodelist.h	18 Nov 2005 07:25:27 -0000	1.145
+++ nodelist.h	18 Nov 2005 07:27:45 -0000	1.146
@@ -183,6 +183,7 @@
 struct jffs2_eraseblock
 {
 	struct list_head list;
+	struct list_head hash_list;
 	uint16_t bad_count;
 	uint16_t flags;
 	uint32_t offset;		/* of this block in the MTD */
@@ -418,6 +419,12 @@
 int jffs2_write_nand_ebh(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb);
 #endif
 
+/* wear_leveling.c */
+void jffs2_add_to_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag);
+void jffs2_remove_from_hash_table(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint8_t flag);
+struct jffs2_eraseblock *jffs2_get_free_block(struct jffs2_sb_info *c);
+struct jffs2_eraseblock *jffs2_get_used_block(struct jffs2_sb_info *c);
+
 #include "debug.h"
 
 #endif /* __JFFS2_NODELIST_H__ */

Index: nodemgmt.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/nodemgmt.c,v
retrieving revision 1.130
retrieving revision 1.131
diff -u -r1.130 -r1.131
--- nodemgmt.c	11 Nov 2005 08:51:38 -0000	1.130
+++ nodemgmt.c	18 Nov 2005 07:27:45 -0000	1.131
@@ -188,6 +188,7 @@
 		  jeb->offset, jeb->free_size, jeb->dirty_size, jeb->used_size));
 		list_add_tail(&jeb->list, &c->clean_list);
 	}
+	jffs2_add_to_hash_table(c, jeb, 1);
 	c->nextblock = NULL;
 
 }
@@ -196,7 +197,6 @@
 
 static int jffs2_find_nextblock(struct jffs2_sb_info *c)
 {
-	struct list_head *next;
 
 	/* Take the next block off the 'free' list */
 
@@ -246,10 +246,7 @@
 		return -EAGAIN;
 	}
 
-	next = c->free_list.next;
-	list_del(next);
-	c->nextblock = list_entry(next, struct jffs2_eraseblock, list);
-	c->nr_free_blocks--;
+	c->nextblock = jffs2_get_free_block(c);
 
 	jffs2_sum_reset_collected(c->summary); /* reset collected summary */
 
@@ -455,6 +452,7 @@
 		}
 
 		list_add_tail(&jeb->list, &c->clean_list);
+		jffs2_add_to_hash_table(c, jeb, 1);
 		c->nextblock = NULL;
 	}
 	jffs2_dbg_acct_sanity_check_nolock(c,jeb);
@@ -597,6 +595,7 @@
 		} else {
 			D1(printk(KERN_DEBUG "Eraseblock at 0x%08x completely dirtied. Removing from (dirty?) list...\n", jeb->offset));
 			list_del(&jeb->list);
+			jffs2_remove_from_hash_table(c, jeb, 1);
 		}
 		if (jffs2_wbuf_dirty(c)) {
 			D1(printk(KERN_DEBUG "...and adding to erasable_pending_wbuf_list\n"));

Index: scan.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/scan.c,v
retrieving revision 1.132
retrieving revision 1.133
diff -u -r1.132 -r1.133
--- scan.c	18 Nov 2005 07:25:27 -0000	1.132
+++ scan.c	18 Nov 2005 07:27:45 -0000	1.133
@@ -150,6 +150,7 @@
 			if (!jeb->dirty_size) {
 				/* It's actually free */
 				list_add(&jeb->list, &c->free_list);
+				jffs2_add_to_hash_table(c, jeb, 2);
 				c->nr_free_blocks++;
 			} else {
 				/* Dirt */
@@ -162,6 +163,7 @@
 		case BLK_STATE_CLEAN:
 			/* Full (or almost full) of clean data. Clean list */
 			list_add(&jeb->list, &c->clean_list);
+			jffs2_add_to_hash_table(c, jeb, 1);
 			break;
 
 		case BLK_STATE_PARTDIRTY:
@@ -182,6 +184,7 @@
 					} else {
 						list_add(&c->nextblock->list, &c->dirty_list);
 					}
+					jffs2_add_to_hash_table(c, c->nextblock, 1);
 					/* deleting summary information of the old nextblock */
 					jffs2_sum_reset_collected(c->summary);
 				}
@@ -200,6 +203,7 @@
 				} else {
 					list_add(&jeb->list, &c->dirty_list);
 				}
+				jffs2_add_to_hash_table(c, jeb, 1);
 			}
 			break;
 

Index: wbuf.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/wbuf.c,v
retrieving revision 1.107
retrieving revision 1.108
diff -u -r1.107 -r1.108
--- wbuf.c	18 Nov 2005 07:25:27 -0000	1.107
+++ wbuf.c	18 Nov 2005 07:27:45 -0000	1.108
@@ -142,8 +142,10 @@
 	/* File the existing block on the bad_used_list.... */
 	if (c->nextblock == jeb)
 		c->nextblock = NULL;
-	else /* Not sure this should ever happen... need more coffee */
+	else { /* Not sure this should ever happen... need more coffee */
 		list_del(&jeb->list);
+		jffs2_remove_from_hash_table(c, jeb, 1);
+	}
 	if (jeb->first_node) {
 		D1(printk("Refiling block at %08x to bad_used_list\n", jeb->offset));
 		list_add(&jeb->list, &c->bad_used_list);





More information about the linux-mtd-cvs mailing list