mtd/fs/jffs2 erase.c,1.76,1.77

joern at infradead.org joern at infradead.org
Thu Jul 14 10:11:21 EDT 2005


Update of /home/cvs/mtd/fs/jffs2
In directory phoenix.infradead.org:/home/joern/mtd/fs/jffs2

Modified Files:
	erase.c 
Log Message:
Cleanup attempt - any new bugs are mine.



Index: erase.c
===================================================================
RCS file: /home/cvs/mtd/fs/jffs2/erase.c,v
retrieving revision 1.76
retrieving revision 1.77
diff -u -r1.76 -r1.77
--- erase.c	3 May 2005 15:11:40 -0000	1.76
+++ erase.c	14 Jul 2005 14:11:18 -0000	1.77
@@ -48,7 +48,6 @@
 #else /* Linux */
 	struct erase_info *instr;
 
-	D1(printk(KERN_DEBUG "jffs2_erase_block(): erase block %#x (range %#x-%#x)\n", jeb->offset, jeb->offset, jeb->offset + c->sector_size));
 	instr = kmalloc(sizeof(struct erase_info) + sizeof(struct erase_priv_struct), GFP_KERNEL);
 	if (!instr) {
 		printk(KERN_WARNING "kmalloc for struct erase_info in jffs2_erase_block failed. Refiling block for later\n");
@@ -234,7 +233,7 @@
 			continue;
 		} 
 
-		if (SECTOR_ADDR((*prev)->flash_offset) == jeb->offset) {
+		if (((*prev)->flash_offset & ~(c->sector_size -1)) == jeb->offset) {
 			/* It's in the block we're erasing */
 			struct jffs2_raw_node_ref *this;
 
@@ -278,8 +277,11 @@
 		printk("\n");
 	});
 
-	if (ic->nodes == (void *)ic && ic->nlink == 0)
+	if (ic->nodes == (void *)ic) {
+		D1(printk(KERN_DEBUG "inocache for ino #%u is all gone now. Freeing\n", ic->ino));
 		jffs2_del_ino_cache(c, ic);
+		jffs2_free_inode_cache(ic);
+	}
 }
 
 static void jffs2_free_all_node_refs(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
@@ -300,15 +302,64 @@
 	jeb->last_node = NULL;
 }
 
+static int jffs2_block_is_erased(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb, uint32_t *bad_offset)
+{
+	void *ebuf;
+	uint32_t ofs;
+	size_t retlen;
+	int ret = 0;
+	
+	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+	if (!ebuf) {
+		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
+		return 1; /* XXX: Not sure who came up with this policy.  If we don't get memory, we just assume the block actually is erased */
+	}
+
+	D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
+	for (ofs = jeb->offset; ofs < jeb->offset + c->sector_size; ) {
+		uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
+		int i;
+
+		*bad_offset = ofs;
+
+		ret = jffs2_flash_read(c, ofs, readlen, &retlen, ebuf);
+		if (ret) {
+			printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
+			goto fail;
+		}
+		if (retlen != readlen) {
+			printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
+			goto fail;
+		}
+		for (i=0; i<readlen; i += sizeof(unsigned long)) {
+			/* It's OK. We know it's properly aligned */
+			unsigned long *datum = ebuf + i;
+			if (*datum + 1) {
+				*bad_offset += i;
+				printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", *datum, *bad_offset);
+				goto fail;
+			}
+		}
+		ofs += readlen;
+		cond_resched();
+	}
+	ret = 1;
+fail:
+	kfree(ebuf);
+	return ret;
+}
+
 static void jffs2_mark_erased_block(struct jffs2_sb_info *c, struct jffs2_eraseblock *jeb)
 {
 	struct jffs2_raw_node_ref *marker_ref = NULL;
-	unsigned char *ebuf;
 	size_t retlen;
 	int ret;
 	uint32_t bad_offset;
 
-	if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0)) {
+	if (! jffs2_block_is_erased(c, jeb, &bad_offset))
+		goto bad2;
+
+	if (!jffs2_cleanmarker_oob(c)) {
 		marker_ref = jffs2_alloc_raw_node_ref();
 		if (!marker_ref) {
 			printk(KERN_WARNING "Failed to allocate raw node ref for clean marker\n");
@@ -320,56 +371,6 @@
 			return;
 		}
 	}
-	ebuf = kmalloc(PAGE_SIZE, GFP_KERNEL);
-	if (!ebuf) {
-		printk(KERN_WARNING "Failed to allocate page buffer for verifying erase at 0x%08x. Assuming it worked\n", jeb->offset);
-	} else {
-		uint32_t ofs = jeb->offset;
-
-		D1(printk(KERN_DEBUG "Verifying erase at 0x%08x\n", jeb->offset));
-		while(ofs < jeb->offset + c->sector_size) {
-			uint32_t readlen = min((uint32_t)PAGE_SIZE, jeb->offset + c->sector_size - ofs);
-			int i;
-
-			bad_offset = ofs;
-
-			ret = c->mtd->read(c->mtd, ofs, readlen, &retlen, ebuf);
-
-			if (ret) {
-				printk(KERN_WARNING "Read of newly-erased block at 0x%08x failed: %d. Putting on bad_list\n", ofs, ret);
-				goto bad;
-			}
-			if (retlen != readlen) {
-				printk(KERN_WARNING "Short read from newly-erased block at 0x%08x. Wanted %d, got %zd\n", ofs, readlen, retlen);
-				goto bad;
-			}
-			for (i=0; i<readlen; i += sizeof(unsigned long)) {
-				/* It's OK. We know it's properly aligned */
-				unsigned long datum = *(unsigned long *)(&ebuf[i]);
-				if (datum + 1) {
-					bad_offset += i;
-					printk(KERN_WARNING "Newly-erased block contained word 0x%lx at offset 0x%08x\n", datum, bad_offset);
-				bad: 
-					if ((!jffs2_cleanmarker_oob(c)) && (c->cleanmarker_size > 0))
-						jffs2_free_raw_node_ref(marker_ref);
-					kfree(ebuf);
-				bad2:
-					spin_lock(&c->erase_completion_lock);
-					/* Stick it on a list (any list) so
-					   erase_failed can take it right off
-					   again.  Silly, but shouldn't happen
-					   often. */
-					list_add(&jeb->list, &c->erasing_list);
-					spin_unlock(&c->erase_completion_lock);
-					jffs2_erase_failed(c, jeb, bad_offset);
-					return;
-				}
-			}
-			ofs += readlen;
-			cond_resched();
-		}
-		kfree(ebuf);
-	}
 
 	bad_offset = jeb->offset;
 
@@ -386,13 +387,6 @@
 		jeb->used_size = 0;
 		jeb->dirty_size = 0;
 		jeb->wasted_size = 0;
-	} else if (c->cleanmarker_size == 0) {
-		jeb->first_node = jeb->last_node = NULL;
-
-		jeb->free_size = c->sector_size;
-		jeb->used_size = 0;
-		jeb->dirty_size = 0;
-		jeb->wasted_size = 0;
 	} else {
 		struct kvec vecs[1];
 		struct jffs2_unknown_node marker = {
@@ -444,5 +438,20 @@
 	c->nr_free_blocks++;
 	spin_unlock(&c->erase_completion_lock);
 	wake_up(&c->erase_wait);
+	return;
+
+bad_marker: 
+	if (!jffs2_cleanmarker_oob(c))
+		jffs2_free_raw_node_ref(marker_ref);
+bad2:
+	spin_lock(&c->erase_completion_lock);
+	/* Stick it on a list (any list) so
+	   erase_failed can take it right off
+	   again.  Silly, but shouldn't happen
+	   often. */
+	list_add(&jeb->list, &c->erasing_list);
+	spin_unlock(&c->erase_completion_lock);
+	jffs2_erase_failed(c, jeb, bad_offset);
+	return;
 }
 





More information about the linux-mtd-cvs mailing list