UBI: fix ubi_wl_flush

Linux-MTD Mailing List linux-mtd at lists.infradead.org
Tue Jan 8 02:59:03 EST 2008


Gitweb:     http://git.infradead.org/?p=mtd-2.6.git;a=commit;h=593dd33c92c6529443d5df1350dc5cc76511232d
Commit:     593dd33c92c6529443d5df1350dc5cc76511232d
Parent:     458dbb3d07574e8fcdcb921ac155ccd81b16b05f
Author:     Artem Bityutskiy <Artem.Bityutskiy at nokia.com>
AuthorDate: Tue Dec 18 15:54:35 2007 +0200
Committer:  Artem Bityutskiy <Artem.Bityutskiy at nokia.com>
CommitDate: Wed Dec 26 19:15:16 2007 +0200

    UBI: fix ubi_wl_flush
    
    The flush function should finish all the pending jobs. But if
    somebody else is doing a work, this function should wait and let
    it finish.
    
    This patche uses rw semaphore for synchronization purpose - it
    just looks quite convinient.
    
    Signed-off-by: Artem Bityutskiy <Artem.Bityutskiy at nokia.com>
---
 drivers/mtd/ubi/ubi.h |    1 +
 drivers/mtd/ubi/wl.c  |   39 ++++++++++++++++++++++++++++++++-------
 2 files changed, 33 insertions(+), 7 deletions(-)

diff --git a/drivers/mtd/ubi/ubi.h b/drivers/mtd/ubi/ubi.h
index ea9a699..994233d 100644
--- a/drivers/mtd/ubi/ubi.h
+++ b/drivers/mtd/ubi/ubi.h
@@ -355,6 +355,7 @@ struct ubi_device {
 	} prot;
 	spinlock_t wl_lock;
 	struct mutex move_mutex;
+	struct rw_semaphore work_sem;
 	int wl_scheduled;
 	struct ubi_wl_entry **lookuptbl;
 	unsigned long long abs_ec;
diff --git a/drivers/mtd/ubi/wl.c b/drivers/mtd/ubi/wl.c
index a60f942..8421c7a 100644
--- a/drivers/mtd/ubi/wl.c
+++ b/drivers/mtd/ubi/wl.c
@@ -251,10 +251,18 @@ static int do_work(struct ubi_device *ubi)
 
 	cond_resched();
 
+	/*
+	 * @ubi->work_sem is used to synchronize with the workers. Workers take
+	 * it in read mode, so many of them may be doing works at a time. But
+	 * the queue flush code has to be sure the whole queue of works is
+	 * done, and it takes the mutex in write mode.
+	 */
+	down_read(&ubi->work_sem);
 	spin_lock(&ubi->wl_lock);
 
 	if (list_empty(&ubi->works)) {
 		spin_unlock(&ubi->wl_lock);
+		up_read(&ubi->work_sem);
 		return 0;
 	}
 
@@ -275,6 +283,7 @@ static int do_work(struct ubi_device *ubi)
 	ubi->works_count -= 1;
 	ubi_assert(ubi->works_count >= 0);
 	spin_unlock(&ubi->wl_lock);
+	up_read(&ubi->work_sem);
 	return err;
 }
 
@@ -1173,7 +1182,7 @@ retry:
 		 * the WL unit has not put the PEB to the "used" tree yet, but
 		 * it is about to do this. So we just set a flag which will
 		 * tell the WL worker that the PEB is not needed anymore and
-		 * should be sheduled for erasure.
+		 * should be scheduled for erasure.
 		 */
 		dbg_wl("PEB %d is the target of data moving", pnum);
 		ubi_assert(!ubi->move_to_put);
@@ -1280,17 +1289,32 @@ retry:
  */
 int ubi_wl_flush(struct ubi_device *ubi)
 {
-	int err, pending_count;
-
-	pending_count = ubi->works_count;
-
-	dbg_wl("flush (%d pending works)", pending_count);
+	int err;
 
 	/*
 	 * Erase while the pending works queue is not empty, but not more then
 	 * the number of currently pending works.
 	 */
-	while (pending_count-- > 0) {
+	dbg_wl("flush (%d pending works)", ubi->works_count);
+	while (ubi->works_count) {
+		err = do_work(ubi);
+		if (err)
+			return err;
+	}
+
+	/*
+	 * Make sure all the works which have been done in parallel are
+	 * finished.
+	 */
+	down_write(&ubi->work_sem);
+	up_write(&ubi->work_sem);
+
+	/*
+	 * And in case last was the WL worker and it cancelled the LEB
+	 * movement, flush again.
+	 */
+	while (ubi->works_count) {
+		dbg_wl("flush more (%d pending works)", ubi->works_count);
 		err = do_work(ubi);
 		if (err)
 			return err;
@@ -1426,6 +1450,7 @@ int ubi_wl_init_scan(struct ubi_device *ubi, struct ubi_scan_info *si)
 	ubi->prot.pnum = ubi->prot.aec = RB_ROOT;
 	spin_lock_init(&ubi->wl_lock);
 	mutex_init(&ubi->move_mutex);
+	init_rwsem(&ubi->work_sem);
 	ubi->max_ec = si->max_ec;
 	INIT_LIST_HEAD(&ubi->works);
 



More information about the linux-mtd-cvs mailing list