[PATCH] [MTD] [UBI] add block device layer on top of UBI

Nancy nancydreaming at gmail.com
Tue May 13 00:16:36 EDT 2008


Hi all,
       I think it is my final edition of ubi block device layer code
based on UBI commit e442c48f84982d0fa10c6b292018241dafca4d65
       Finally, it can support any filesystem based on block device layer.
       eg: FAT, ext2....
       #modprobe ubiblk
       #mkfs.vfat /dev/ubiblock1
       #mount -t vfat /dev/ubiblock1 /mnt/fat

       I notice there many people need this function especially our
Chinese. Hope this helpful :-)
       Here's my implementation:

diff -uprBN ../ubi/bdev.c ubi/bdev.c
--- ../ubi/bdev.c	1970-01-01 08:00:00.000000000 +0800
+++ ubi/bdev.c	2008-05-13 11:26:28.000000000 +0800
@@ -0,0 +1,432 @@
+/*
+ *
+ * (C) 2003 David Woodhouse <dwmw2 at infradead.org>
+ *
+ * Interface to Linux 2.5 block layer for UBI 'translation layers'.
+ *
+ * 2008 Yurong Tan <nancydreaming at gmail.com>:
+ *      borrow from mtd_blkdevs.c for building block device layer on
top of UBI
+ *
+ */
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <linux/spinlock.h>
+#include <linux/hdreg.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <linux/kthread.h>
+#include <asm/uaccess.h>
+#include "ubi.h"
+#include "ubiblk.h"
+
+static LIST_HEAD(blktrans_majors);
+extern struct mutex vol_table_mutex;
+extern struct ubi_volume *vol_table[];
+
+extern void register_vol_user (struct vol_notifier *new);
+extern int unregister_vol_user (struct vol_notifier *old);
+extern int ubi_major2num(int major);
+
+struct ubi_blkcore_priv {
+	struct task_struct *thread;
+	struct request_queue *rq;
+	spinlock_t queue_lock;
+};
+
+static int do_blktrans_request(struct ubi_blktrans_ops *tr,
+			       struct ubi_blktrans_dev *dev,
+			       struct request *req)
+{
+	unsigned long block, nsect;
+	char *buf;
+
+	block = req->sector << 9 >> tr->blkshift;
+	nsect = req->current_nr_sectors << 9 >> tr->blkshift;
+	buf = req->buffer;
+
+	if (!blk_fs_request(req))
+		return 0;
+
+	if (req->sector + req->current_nr_sectors > get_capacity(req->rq_disk))
+		return 0;
+
+	switch(rq_data_dir(req)) {
+	case READ:
+		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+			if (tr->readsect(dev, block, buf))
+				return 0;
+		return 1;
+
+	case WRITE:
+		if (!tr->writesect)
+			return 0;
+
+		for (; nsect > 0; nsect--, block++, buf += tr->blksize)
+			if (tr->writesect(dev, block, buf))
+				return 0;
+		return 1;
+		
+	default:
+		printk(KERN_NOTICE "Unknown request %u\n", rq_data_dir(req));
+		return 0;		
+	}
+}
+
+static int ubi_blktrans_thread(void *arg)
+{
+	struct ubi_blktrans_ops *tr = arg;
+	struct request_queue *rq = tr->blkcore_priv->rq;
+
+	/* we might get involved when memory gets low, so use PF_MEMALLOC */
+	current->flags |= PF_MEMALLOC;
+
+	spin_lock_irq(rq->queue_lock);
+	while (!kthread_should_stop()) {
+		struct request *req;
+		struct ubi_blktrans_dev *dev;
+		int res = 0;
+		
+		req = elv_next_request(rq);
+		
+		if (!req) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			spin_unlock_irq(rq->queue_lock);
+			schedule();
+			spin_lock_irq(rq->queue_lock);
+			continue;
+		}
+		dev = req->rq_disk->private_data;
+		tr = dev->tr;
+
+		spin_unlock_irq(rq->queue_lock);
+		mutex_lock(&dev->lock);
+		res = do_blktrans_request(tr, dev, req);
+		mutex_unlock(&dev->lock);
+		spin_lock_irq(rq->queue_lock);
+
+		end_request(req, res);
+	}
+	spin_unlock_irq(rq->queue_lock);
+
+	return 0;
+}
+
+static void ubi_blktrans_request(struct request_queue *rq)
+{
+	struct ubi_blktrans_ops *tr = rq->queuedata;
+	wake_up_process(tr->blkcore_priv->thread);
+}
+
+static int blktrans_open(struct inode *i, struct file *f)
+{
+	struct ubi_blktrans_dev *dev;
+	struct ubi_blktrans_ops *tr; 	
+	int ret =0;
+	
+	dev = i->i_bdev->bd_disk->private_data;
+	tr = dev->tr;	
+
+	if (!try_module_get(tr->owner))
+		goto out_tr;
+	
+	if(!tr->open)
+		return -1;
+	else
+		ret = tr->open(i,f);
+
+	return ret;
+out_tr:
+		module_put(tr->owner);
+		return -1;
+}
+
+static int blktrans_release(struct inode *i, struct file *f)
+{
+	struct ubi_blktrans_dev *dev;
+	struct ubi_blktrans_ops *tr;
+	struct ubi_volume_desc *desc;
+	int ret = 0;
+	
+	dev = i->i_bdev->bd_disk->private_data;
+	tr = dev->tr;
+	desc = dev->uv;
+	
+	if (tr->release)
+		ret = tr->release(dev);
+
+	module_put(tr->owner);
+	return ret;
+}
+
+static int blktrans_getgeo(struct block_device *bdev, struct hd_geometry *geo)
+{
+	struct ubi_blktrans_dev *dev = bdev->bd_disk->private_data;
+
+	if (dev->tr->getgeo)
+		return dev->tr->getgeo(dev, geo);
+	return -ENOTTY;
+}
+
+static int blktrans_ioctl(struct inode *inode, struct file *file,
+			      unsigned int cmd, unsigned long arg)
+{
+	struct ubi_blktrans_dev *dev = inode->i_bdev->bd_disk->private_data;
+	struct ubi_blktrans_ops *tr = dev->tr;
+
+	switch (cmd) {
+	case BLKFLSBUF:
+		if (tr->flush)
+			return tr->flush(dev);
+		/* The core code did the work, we had nothing to do. */
+		return 0;
+	default:
+		return -ENOTTY;
+	}
+}
+
+struct block_device_operations ubi_blktrans_ops = {
+	.owner		= THIS_MODULE,
+	.open		= blktrans_open,
+	.release	         = blktrans_release,
+	.ioctl	          	= blktrans_ioctl,
+	.getgeo		= blktrans_getgeo,
+};
+
+int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new)
+{
+	struct ubi_blktrans_ops *tr = new->tr;
+	struct list_head *this;
+	int last_devnum = -1;
+	struct gendisk *gd;
+	
+	if (mutex_trylock(&vol_table_mutex)) {
+		mutex_unlock(&vol_table_mutex);
+		BUG();
+	}
+
+	list_for_each(this, &tr->devs) {
+		struct ubi_blktrans_dev *d = list_entry(this, struct ubi_blktrans_dev, list);
+		if (new->devnum == -1) {
+			/* Use first free number */
+			if (d->devnum != last_devnum+1) {
+				/* Found a free devnum. Plug it in here */
+				new->devnum = last_devnum+1;
+				list_add_tail(&new->list, &d->list);
+				goto added;
+			}
+		} else if (d->devnum == new->devnum) {
+			/* Required number taken */
+			return -EBUSY;
+		} else if (d->devnum > new->devnum) {
+			/* Required number was free */
+			list_add_tail(&new->list, &d->list);
+			goto added;
+		}
+		last_devnum = d->devnum;
+	}
+	if (new->devnum == -1)
+		new->devnum = last_devnum+1;
+
+	if ((new->devnum << tr->part_bits) > 256) {
+		return -EBUSY;
+	}
+
+	mutex_init(&new->lock);
+	list_add_tail(&new->list, &tr->devs);
+ added:
+	if (!tr->writesect)
+		new->readonly = 1;
+
+	gd = alloc_disk(1 << tr->part_bits);
+	if (!gd) {
+		list_del(&new->list);
+		return -ENOMEM;
+	}
+	gd->major = tr->major;
+	gd->first_minor = (new->devnum) << tr->part_bits;
+	gd->fops = &ubi_blktrans_ops;
+
+	if (tr->part_bits)
+		if (new->devnum < 26)
+			snprintf(gd->disk_name, sizeof(gd->disk_name),
+				 "%s%c", tr->name, 'a' + new->devnum);
+		else
+			snprintf(gd->disk_name, sizeof(gd->disk_name),
+				 "%s%c%c", tr->name,
+				 'a' - 1 + new->devnum / 26,
+				 'a' + new->devnum % 26);
+	else
+		snprintf(gd->disk_name, sizeof(gd->disk_name),
+			 "%s%d", tr->name, new->devnum);
+
+	/* 2.5 has capacity in units of 512 bytes while still
+	   having BLOCK_SIZE_BITS set to 10. Just to keep us amused. */
+	set_capacity(gd, (new->size * tr->blksize) >> 9);
+
+	gd->private_data = new;
+	new->blkcore_priv = gd;
+	gd->queue = tr->blkcore_priv->rq;
+
+	if (new->readonly)
+		set_disk_ro(gd, 1);
+
+	add_disk(gd);
+
+	return 0;
+}
+
+int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old)
+{
+	if (mutex_trylock(&vol_table_mutex)) {
+		mutex_unlock(&vol_table_mutex);
+		BUG();
+	}
+
+	list_del(&old->list);
+
+	del_gendisk(old->blkcore_priv);
+	put_disk(old->blkcore_priv);
+
+	return 0;
+}
+
+static void blktrans_notify_remove(struct ubi_volume *vol)
+{
+	struct list_head *this, *this2, *next;
+
+	list_for_each(this, &blktrans_majors) {
+		struct ubi_blktrans_ops *tr = list_entry(this, struct
ubi_blktrans_ops, list);
+
+		list_for_each_safe(this2, next, &tr->devs) {
+			struct ubi_blktrans_dev *dev = list_entry(this2, struct
ubi_blktrans_dev, list);
+
+			if (dev->uv->vol == vol)
+				tr->remove_vol(dev);
+		}
+	}
+}
+
+static void blktrans_notify_add(struct ubi_volume *vol)
+{
+	struct list_head *this;
+
+	list_for_each(this, &blktrans_majors) {
+		struct ubi_blktrans_ops *tr = list_entry(this, struct
ubi_blktrans_ops, list);
+
+		tr->add_vol(tr,vol);
+	}
+
+}
+
+static struct vol_notifier blktrans_notifier = {
+	.add = blktrans_notify_add,
+	.remove = blktrans_notify_remove,
+};
+
+
+int register_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+	int i;
+
+	/* Register the notifier if/when the first device type is
+	   registered, to prevent the link/init ordering from fucking
+	   us over. */
+	if (!blktrans_notifier.list.next)
+		register_vol_user(&blktrans_notifier);
+
+	tr->blkcore_priv = kzalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
+	if (!tr->blkcore_priv)
+		return -ENOMEM;
+
+	mutex_lock(&vol_table_mutex);
+	tr->major = register_blkdev(0, tr->name);
+	spin_lock_init(&tr->blkcore_priv->queue_lock);
+
+	tr->blkcore_priv->rq = blk_init_queue(ubi_blktrans_request,
&tr->blkcore_priv->queue_lock);
+	if (!tr->blkcore_priv->rq) {
+		unregister_blkdev(tr->major, tr->name);
+		kfree(tr->blkcore_priv);
+		mutex_unlock(&vol_table_mutex);
+		return -ENOMEM;
+	}
+
+	tr->blkcore_priv->rq->queuedata = tr;
+	blk_queue_hardsect_size(tr->blkcore_priv->rq, tr->blksize);
+	tr->blkshift = ffs(tr->blksize) - 1;
+
+	tr->blkcore_priv->thread = kthread_run(ubi_blktrans_thread, tr,
+			"%sd", tr->name);
+	if (IS_ERR(tr->blkcore_priv->thread)) {
+		blk_cleanup_queue(tr->blkcore_priv->rq);
+		unregister_blkdev(tr->major, tr->name);
+		kfree(tr->blkcore_priv);
+		mutex_unlock(&vol_table_mutex);
+		return PTR_ERR(tr->blkcore_priv->thread);
+	}
+
+	INIT_LIST_HEAD(&tr->devs);
+	list_add(&tr->list, &blktrans_majors);
+
+	for (i=0; i<UBI_MAX_VOLUMES; i++) {
+		if (vol_table[i] )
+			tr->add_vol(tr, vol_table[i]);
+	}
+	
+	mutex_unlock(&vol_table_mutex);
+	return 0;
+}
+
+int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr)
+{
+	struct list_head *this, *next;
+
+	mutex_lock(&vol_table_mutex);
+
+	/* Clean up the kernel thread */
+	kthread_stop(tr->blkcore_priv->thread);
+
+	/* Remove it from the list of active majors */
+	list_del(&tr->list);
+
+	list_for_each_safe(this, next, &tr->devs) {
+		struct ubi_blktrans_dev *dev = list_entry(this, struct
ubi_blktrans_dev, list);
+		tr->remove_vol(dev);
+	}
+
+	blk_cleanup_queue(tr->blkcore_priv->rq);
+	unregister_blkdev(tr->major, tr->name);
+
+	mutex_unlock(&vol_table_mutex);
+
+	kfree(tr->blkcore_priv);
+
+	BUG_ON(!list_empty(&tr->devs));
+	return 0;
+}
+
+static void __exit ubi_blktrans_exit(void)
+{
+	/* No race here -- if someone's currently in register_ubi_blktrans
+	   we're screwed anyway. */
+	if (blktrans_notifier.list.next)
+		unregister_vol_user(&blktrans_notifier);
+}
+
+
+module_exit(ubi_blktrans_exit);
+
+EXPORT_SYMBOL_GPL(register_ubi_blktrans);
+EXPORT_SYMBOL_GPL(deregister_ubi_blktrans);
+EXPORT_SYMBOL_GPL(add_ubi_blktrans_dev);
+EXPORT_SYMBOL_GPL(del_ubi_blktrans_dev);
+
+MODULE_AUTHOR("David Woodhouse <dwmw2 at infradead.org>, Yurong Tan
<nancydreaming at gmail.com>");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Common interface to block layer for UBI
'translation layers'");
+
diff -uprBN ../ubi/build.c ubi/build.c
--- ../ubi/build.c	2008-05-09 07:27:46.000000000 +0800
+++ ubi/build.c	2008-05-13 11:29:55.000000000 +0800
@@ -18,6 +18,7 @@
  *
  * Author: Artem Bityutskiy (Битюцкий Артём),
  *         Frank Haverkamp
+ *         Yurong tan(Nancy)
  */

 /*
@@ -46,6 +47,113 @@
 /* Maximum length of the 'mtd=' parameter */
 #define MTD_PARAM_LEN_MAX 64

+/* add by Nancy begin */
+DEFINE_MUTEX(vol_table_mutex);
+struct ubi_volume *vol_table[UBI_MAX_VOLUMES];
+
+EXPORT_SYMBOL_GPL(vol_table_mutex);
+EXPORT_SYMBOL_GPL(vol_table);
+
+static LIST_HEAD(vol_notifiers);
+
+int add_vol_device(struct ubi_volume *vol)
+{
+	mutex_lock(&vol_table_mutex);
+	if (!vol_table[vol->vol_id]) {
+
+		struct list_head *this;
+		vol_table[vol->vol_id] = vol;			
+		/* No need to get a refcount on the module containing
+		   the notifier, since we hold the vol_table_mutex */
+		list_for_each(this, &vol_notifiers) {
+			struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+			not->add(vol);
+		}
+		mutex_unlock(&vol_table_mutex);
+		/* We _know_ we aren't being removed, because
+		   our caller is still holding us here. So none
+		   of this try_ nonsense, and no bitching about it
+		   either. :) */
+		return 0;
+	}
+	mutex_unlock(&vol_table_mutex);
+	return 1;
+}
+
+int del_vol_device (struct ubi_volume *vol)
+{
+	int ret;
+	struct list_head *this;
+
+	mutex_lock(&vol_table_mutex);
+	if (vol_table[vol->vol_id] != vol) {
+		ret = -ENODEV;
+	} else if (vol->readers ||vol->writers || vol->exclusive) {
+		printk(KERN_NOTICE "Removing MTD device #%d (%s) with use count 0\n",
+		       vol->vol_id, vol->name);
+		ret = -EBUSY;
+	} else {
+		/* No need to get a refcount on the module containing
+		   the notifier, since we hold the vol_table_mutex */
+		list_for_each(this, &vol_notifiers) {
+			struct vol_notifier *not = list_entry(this, struct vol_notifier, list);
+			not->remove(vol);
+		}
+
+		vol_table[vol->vol_id] = NULL;
+		module_put(THIS_MODULE);
+		ret = 0;
+	}
+	mutex_unlock(&vol_table_mutex);
+	return ret;
+}
+
+void register_vol_user(struct vol_notifier *new)
+{
+	int i;
+
+	mutex_lock(&vol_table_mutex);
+	list_add(&new->list, &vol_notifiers);
+ 	__module_get(THIS_MODULE);
+
+	for (i=0; i< UBI_MAX_VOLUMES;  i++)
+		if (vol_table[i])
+			new->add(vol_table[i]);
+
+	mutex_unlock(&vol_table_mutex);
+}
+
+int unregister_vol_user(struct vol_notifier *old)
+{
+	int i;
+
+	mutex_lock(&vol_table_mutex);
+	module_put(THIS_MODULE);
+
+	for (i=0; i< UBI_MAX_VOLUMES; i++)
+		if (vol_table[i])
+			old->remove(vol_table[i]);
+
+	list_del(&old->list);
+	mutex_unlock(&vol_table_mutex);
+	return 0;
+}
+
+static int bdev_init(struct ubi_device *ubi){
+	int i;
+	for(i=0; i<ubi->vtbl_slots; i++)
+		if(ubi->volumes[i])
+			add_vol_device(ubi->volumes[i]);
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(add_vol_device);
+EXPORT_SYMBOL_GPL(del_vol_device);
+EXPORT_SYMBOL_GPL(register_vol_user);
+EXPORT_SYMBOL_GPL(unregister_vol_user);
+/* add by Nancy end*/
+
+
 /**
  * struct mtd_dev_param - MTD device parameter description data structure.
  * @name: MTD device name or number string
@@ -84,6 +192,7 @@ DEFINE_MUTEX(ubi_devices_mutex);

 /* Protects @ubi_devices and @ubi->ref_count */
 static DEFINE_SPINLOCK(ubi_devices_lock);
+EXPORT_SYMBOL_GPL(ubi_devices_lock);

 /* "Show" method for files in '/<sysfs>/class/ubi/' */
 static ssize_t ubi_version_show(struct class *class, char *buf)
@@ -204,7 +313,8 @@ int ubi_major2num(int major)
 	for (i = 0; i < UBI_MAX_DEVICES; i++) {
 		struct ubi_device *ubi = ubi_devices[i];

-		if (ubi && MAJOR(ubi->cdev.dev) == major) {
+		if ( (ubi && MAJOR(ubi->cdev.dev) == major) ||
+			(ubi && ubi->bdev_major == major)) {
 			ubi_num = ubi->ubi_num;
 			break;
 		}
@@ -213,6 +323,7 @@ int ubi_major2num(int major)

 	return ubi_num;
 }
+EXPORT_SYMBOL_GPL(ubi_major2num);

 /* "Show" method for files in '/<sysfs>/class/ubi/ubiX/' */
 static ssize_t dev_attribute_show(struct device *dev,
@@ -763,7 +874,8 @@ int ubi_attach_mtd_dev(struct mtd_info *
 	mutex_init(&ubi->volumes_mutex);
 	spin_lock_init(&ubi->volumes_lock);

-	ubi_msg("attaching mtd%d to ubi%d", mtd->index, ubi_num);
+	dbg_msg("attaching mtd%d to ubi%d: VID header offset %d",
+		mtd->index, ubi_num, vid_hdr_offset);

 	err = io_init(ubi);
 	if (err)
@@ -800,6 +912,10 @@ int ubi_attach_mtd_dev(struct mtd_info *
 	if (err)
 		goto out_detach;

+	err = bdev_init(ubi);
+	if(err)
+		goto out_detach;
+
 	ubi->bgt_thread = kthread_create(ubi_thread, ubi, ubi->bgt_name);
 	if (IS_ERR(ubi->bgt_thread)) {
 		err = PTR_ERR(ubi->bgt_thread);
diff -uprBN ../ubi/cdev.c ubi/cdev.c
--- ../ubi/cdev.c	2008-05-09 07:27:46.000000000 +0800
+++ ubi/cdev.c	2008-05-13 11:51:29.000000000 +0800
@@ -411,6 +411,47 @@ static int vol_cdev_ioctl(struct inode *
 	void __user *argp = (void __user *)arg;

 	switch (cmd) {
+	/* Volume dump command */
+	case UBI_IOCLEBDP:
+	{
+		struct ubi_leb_dump dp;
+		int pnum;
+		char *lebbuf;
+
+		if (copy_from_user(&dp, argp, sizeof(struct ubi_leb_dump))){
+			err = -EFAULT;
+			break;
+		}
+		
+		pnum = vol->eba_tbl[dp.lnum];
+		if (pnum < 0) {
+			//the LEB is clean, no need dump
+			err = 1;
+			break;
+		}
+				
+		lebbuf = kmalloc(vol->ubi->leb_size, GFP_KERNEL);
+		if (!lebbuf){
+			err = -ENOMEM;
+			break;
+		}
+				
+		err= ubi_eba_read_leb(ubi, vol, dp.lnum, lebbuf, 0, vol->ubi->leb_size, 0);
+		if (err){
+			kfree(lebbuf);
+			break;	
+		}
+		
+		err = copy_to_user(dp.lebbuf, lebbuf, vol->ubi->leb_size);
+		if (err) {
+			kfree(lebbuf);
+			err = -EFAULT;
+			break;
+		}
+		kfree(lebbuf);
+		break;
+	}
+
 	/* Volume update command */
 	case UBI_IOCVOLUP:
 	{
diff -uprBN ../ubi/eba.c ubi/eba.c
--- ../ubi/eba.c	2008-05-09 07:27:47.000000000 +0800
+++ ubi/eba.c	2008-05-13 11:18:36.000000000 +0800
@@ -45,7 +45,7 @@
 #include <linux/crc32.h>
 #include <linux/err.h>
 #include "ubi.h"
-
+#include "ubiblk.h"
 /* Number of physical eraseblocks reserved for atomic LEB change operation */
 #define EBA_RESERVED_PEBS 1

@@ -1250,3 +1250,140 @@ void ubi_eba_close(const struct ubi_devi
 		kfree(ubi->volumes[i]->eba_tbl);
 	}
 }
+
+/* add by Nancy  begin */
+
+static int ubiblk_fill_writecache(struct ubiblk_dev *ubiblk)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	struct ubi_device *ubi = uv->vol->ubi;
+	int ppb = ubi->leb_size / ubi->min_io_size;
+	unsigned short subpage_shift = 9;
+	unsigned short spp = ubi->min_io_size >> subpage_shift;
+	unsigned short page_shift =  ffs(ubi->min_io_size) - 1;
+	unsigned short sectors_in_page_shift = ffs(ubi->min_io_size / 512) - 1;
+	unsigned short page, sector;
+	char page_buf[ubi->min_io_size];
+
+	if (!page_buf)
+		return -ENOMEM;
+
+	for (page = 0; page < ppb; page++) {
+		if ( !ubiblk->page_sts[page]) {
+			ubi_leb_read(uv, ubiblk->vbw,
+					&ubiblk->write_cache[page<<page_shift],
+					page<<page_shift, ubi->min_io_size, 0);
+		}else{
+			for(sector = 0; sector < spp; sector++)
+				if( !ubiblk->subpage_sts[(page<<sectors_in_page_shift)+sector] )
+					break;
+			if(sector != spp){				
+				ubi_leb_read(uv, ubiblk->vbw,
+						page_buf,
+						page<<page_shift, ubi->min_io_size, 0);
+				for(sector = 0; sector < spp; sector++)
+					if(!ubiblk->subpage_sts[(page<<sectors_in_page_shift) + sector])
+						memcpy(&ubiblk->write_cache[ \
+							       (page<<page_shift)+(sector<<subpage_shift)],
+						       &page_buf[sector<<subpage_shift],
+						       512);
+			}
+		}
+	}
+	return 0;
+}
+
+int ubiblk_eba_atomic_leb_change(struct ubi_device *ubi, struct
ubi_volume *vol,
+			      int lnum, void *buf, int len, int dtype, struct ubiblk_dev *ubiblk)
+{
+	int err, pnum, tries = 0, vol_id = vol->vol_id;
+	struct ubi_vid_hdr *vid_hdr;
+	uint32_t crc;
+
+	if (ubi->ro_mode)
+		return -EROFS;
+
+	vid_hdr = ubi_zalloc_vid_hdr(ubi, GFP_NOFS);
+	if (!vid_hdr)
+		return -ENOMEM;
+
+	ubiblk_fill_writecache(ubiblk);
+	mutex_lock(&ubi->alc_mutex);
+	err = leb_write_lock(ubi, vol_id, lnum);
+	if (err)
+		goto out_mutex;
+
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	vid_hdr->vol_id = cpu_to_be32(vol_id);
+	vid_hdr->lnum = cpu_to_be32(lnum);
+	vid_hdr->compat = ubi_get_compat(ubi, vol_id);
+	vid_hdr->data_pad = cpu_to_be32(vol->data_pad);
+
+	crc = crc32(UBI_CRC32_INIT, buf, len);
+	vid_hdr->vol_type = UBI_VID_DYNAMIC;
+	vid_hdr->data_size = cpu_to_be32(len);
+	vid_hdr->copy_flag = 1;
+	vid_hdr->data_crc = cpu_to_be32(crc);
+
+retry:
+	pnum = ubi_wl_get_peb(ubi, dtype);
+	if (pnum < 0) {
+		err = pnum;
+		goto out_leb_unlock;
+	}
+
+	dbg_eba("change LEB %d:%d, PEB %d, write VID hdr to PEB %d",
+		vol_id, lnum, vol->eba_tbl[lnum], pnum);
+
+	err = ubi_io_write_vid_hdr(ubi, pnum, vid_hdr);
+	if (err) {
+		ubi_warn("failed to write VID header to LEB %d:%d, PEB %d",
+			 vol_id, lnum, pnum);
+		goto write_error;
+	}
+
+	err = ubi_io_write_data(ubi, buf, pnum, 0, len);
+	if (err) {
+		ubi_warn("failed to write %d bytes of data to PEB %d",
+			 len, pnum);
+		goto write_error;
+	}
+	if (vol->eba_tbl[lnum] >= 0) {
+		err = ubi_wl_put_peb(ubi, vol->eba_tbl[lnum], 0);
+		if (err)
+			goto out_leb_unlock;
+	}
+
+	vol->eba_tbl[lnum] = pnum;
+
+out_leb_unlock:
+	leb_write_unlock(ubi, vol_id, lnum);
+out_mutex:
+	mutex_unlock(&ubi->alc_mutex);
+	ubi_free_vid_hdr(ubi, vid_hdr);
+	return err;
+
+write_error:
+	if (err != -EIO || !ubi->bad_allowed) {
+		/*
+		 * This flash device does not admit of bad eraseblocks or
+		 * something nasty and unexpected happened. Switch to read-only
+		 * mode just in case.
+		 */
+		ubi_ro_mode(ubi);
+		goto out_leb_unlock;
+	}
+
+	err = ubi_wl_put_peb(ubi, pnum, 1);
+	if (err || ++tries > UBI_IO_RETRIES) {
+		ubi_ro_mode(ubi);
+		goto out_leb_unlock;
+	}
+
+	vid_hdr->sqnum = cpu_to_be64(next_sqnum(ubi));
+	ubi_msg("try another PEB");
+	goto retry;
+}
+
+/* add by Nancy end*/
+
diff -uprBN ../ubi/kapi.c ubi/kapi.c
--- ../ubi/kapi.c	2008-05-09 07:27:47.000000000 +0800
+++ ubi/kapi.c	2008-05-13 11:18:49.000000000 +0800
@@ -24,7 +24,10 @@
 #include <linux/err.h>
 #include <asm/div64.h>
 #include "ubi.h"
+#include "ubiblk.h"

+extern int ubiblk_eba_atomic_leb_change(struct ubi_device *ubi,
struct ubi_volume *vol,
+					int lnum, void *buf, int len, int dtype, struct ubiblk_dev *ubiblk);
 /**
  * ubi_get_device_info - get information about UBI device.
  * @ubi_num: UBI device number
@@ -632,3 +635,138 @@ int ubi_is_mapped(struct ubi_volume_desc
 	return vol->eba_tbl[lnum] >= 0;
 }
 EXPORT_SYMBOL_GPL(ubi_is_mapped);
+
+/* add by Nancy start */
+
+int ubiblk_leb_change(struct ubiblk_dev *ubiblk)
+{
+	struct ubi_volume *vol = ubiblk->uv->vol;
+	struct ubi_device *ubi = vol->ubi;
+	int vol_id = vol->vol_id;
+
+	struct ubi_volume_desc *desc = ubiblk->uv;
+	int lnum = ubiblk->vbw;
+	int len = ubi->leb_size;
+	int dtype = UBI_UNKNOWN;
+	void *buf = ubiblk->write_cache;
+	
+	dbg_msg("atomically write %d bytes to LEB %d:%d", len, vol_id, lnum);
+
+	if (vol_id < 0 || vol_id >= ubi->vtbl_slots)
+		return -EINVAL;
+
+	if (desc->mode == UBI_READONLY || vol->vol_type == UBI_STATIC_VOLUME)
+		return -EROFS;
+
+	if (lnum < 0 || lnum >= vol->reserved_pebs || len < 0 ||
+	    len > vol->usable_leb_size || len % ubi->min_io_size)
+		return -EINVAL;
+
+	if (vol->upd_marker)
+		return -EBADF;
+
+	if (len == 0)
+		return 0;
+
+	return ubiblk_eba_atomic_leb_change(ubi, vol, lnum, buf, len, dtype, ubiblk);
+}
+EXPORT_SYMBOL_GPL(ubiblk_leb_change);
+
+
+void ubi_open_blkdev(int ubi_num, int vol_id, int mode)
+{
+	int err;
+	struct ubi_device *ubi;
+	struct ubi_volume *vol;
+
+	dbg_msg("open device %d volume %d, mode %d", ubi_num, vol_id, mode);
+
+	if (ubi_num < 0 || ubi_num >= UBI_MAX_DEVICES)
+		return;
+
+	if (mode != UBI_READONLY && mode != UBI_READWRITE &&
+	    mode != UBI_EXCLUSIVE)
+		return;
+	/*
+	 * First of all, we have to get the UBI device to prevent its removal.
+	 */
+	ubi = ubi_get_device(ubi_num);
+	if (!ubi)
+		return;
+
+	if (vol_id < 0 || vol_id >= ubi->vtbl_slots) {
+		err = -EINVAL;
+		goto out_put_ubi;
+	}
+
+	err = -ENODEV;
+	if (!try_module_get(THIS_MODULE))
+		goto out_put_ubi;
+
+	spin_lock(&ubi->volumes_lock);
+	vol = ubi->volumes[vol_id];
+	if (!vol)
+		goto out_unlock;
+
+	err = -EBUSY;
+	switch (mode) {
+	case UBI_READONLY:
+		if (vol->exclusive)
+			goto out_unlock;
+		vol->readers += 1;
+		break;
+
+	case UBI_READWRITE:
+		if (vol->exclusive || vol->writers > 0)
+			goto out_unlock;
+		vol->writers += 1;
+		break;
+
+	case UBI_EXCLUSIVE:
+		if (vol->exclusive || vol->writers || vol->readers)
+			goto out_unlock;
+		vol->exclusive = 1;
+		break;
+	}
+	get_device(&vol->dev);
+	vol->ref_count += 1;
+	spin_unlock(&ubi->volumes_lock);
+	return;
+
+out_unlock:
+	spin_unlock(&ubi->volumes_lock);
+	module_put(THIS_MODULE);
+out_put_ubi:
+	ubi_put_device(ubi);
+	return;
+}
+EXPORT_SYMBOL_GPL(ubi_open_blkdev);
+
+
+void ubi_close_blkdev(struct ubi_volume_desc *desc)
+{
+	struct ubi_volume *vol = desc->vol;
+	struct ubi_device *ubi = vol->ubi;
+
+	dbg_msg("close volume %d, mode %d", vol->vol_id, desc->mode);
+
+	spin_lock(&ubi->volumes_lock);
+	switch (desc->mode) {
+	case UBI_READONLY:
+		vol->readers -= 1;
+		break;
+	case UBI_READWRITE:
+		vol->writers -= 1;
+		break;
+	case UBI_EXCLUSIVE:
+		vol->exclusive = 0;
+	}
+	vol->ref_count -= 1;
+	spin_unlock(&ubi->volumes_lock);
+	put_device(&vol->dev);
+	ubi_put_device(ubi);
+	module_put(THIS_MODULE);
+}
+EXPORT_SYMBOL_GPL(ubi_close_blkdev);
+/* add by Nancy end */
+
diff -uprBN ../ubi/Kconfig ubi/Kconfig
--- ../ubi/Kconfig	2008-05-09 07:27:46.000000000 +0800
+++ ubi/Kconfig	2008-04-02 15:14:11.000000000 +0800
@@ -24,13 +24,8 @@ config MTD_UBI_WL_THRESHOLD
 	  erase counter value and the lowest erase counter value of eraseblocks
 	  of UBI devices. When this threshold is exceeded, UBI starts performing
 	  wear leveling by means of moving data from eraseblock with low erase
-	  counter to eraseblocks with high erase counter.
-
-	  The default value should be OK for SLC NAND flashes, NOR flashes and
-	  other flashes which have eraseblock life-cycle 100000 or more.
-	  However, in case of MLC NAND flashes which typically have eraseblock
-	  life-cycle less then 10000, the threshold should be lessened (e.g.,
-	  to 128 or 256, although it does not have to be power of 2).
+	  counter to eraseblocks with high erase counter. Leave the default
+	  value if unsure.

 config MTD_UBI_BEB_RESERVE
 	int "Percentage of reserved eraseblocks for bad eraseblocks handling"
@@ -60,4 +55,18 @@ config MTD_UBI_GLUEBI
 	   this if no legacy software will be used.

 source "drivers/mtd/ubi/Kconfig.debug"
+
+config MTD_UBI_BLKDEVS
+	tristate "Common interface to block layer for UBI 'translation layers'"
+	depends on BLOCK
+	default n
+
+config MTD_UBI_BLOCK
+	tristate "Emulate block devices"
+	default n
+	depends on MTD_UBI_BLKDEVS
+	help
+	   This option enables Block layer emulation on top of UBI volumes: for
+	   each UBI volumes an block device is created. This is handy to make
+	   traditional filesystem (like ext2, VFAT) work on top of UBI.
 endmenu
diff -uprBN ../ubi/Makefile ubi/Makefile
--- ../ubi/Makefile	2008-05-09 07:27:46.000000000 +0800
+++ ubi/Makefile	2008-05-13 11:19:30.000000000 +0800
@@ -5,3 +5,6 @@ ubi-y += misc.o

 ubi-$(CONFIG_MTD_UBI_DEBUG) += debug.o
 ubi-$(CONFIG_MTD_UBI_GLUEBI) += gluebi.o
+
+obj-$(CONFIG_MTD_UBI_BLKDEVS) += bdev.o
+obj-$(CONFIG_MTD_UBI_BLOCK) += ubiblk.o
diff -uprBN ../ubi/ubiblk.c ubi/ubiblk.c
--- ../ubi/ubiblk.c	1970-01-01 08:00:00.000000000 +0800
+++ ubi/ubiblk.c	2008-05-13 11:34:21.000000000 +0800
@@ -0,0 +1,347 @@
+/*
+ * Direct UBI block device access
+ *
+ * (C) 2000-2003 Nicolas Pitre <nico at cam.org>
+ * (C) 1999-2003 David Woodhouse <dwmw2 at infradead.org>
+ * (C) 2008 Yurong Tan <nancydreaming at gmail.com> :
+ *        borrow mtdblock.c to work on top of UBI
+ */
+
+#include <linux/fs.h>
+#include <linux/blkdev.h>
+#include <linux/blkpg.h>
+#include <linux/freezer.h>
+#include <asm/uaccess.h>
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/types.h>
+#include <linux/vmalloc.h>
+#include <linux/hdreg.h>
+#include <linux/mutex.h>
+#include "ubi.h"
+#include "ubiblk.h"
+
+#define UBIBLK_UNMAPPED 0
+#define UBIBLK_SECTOR_SIZE 512
+
+extern void ubi_open_blkdev(int ubi_num, int vol_id, int mode);
+extern void ubi_close_blkdev(struct ubi_volume_desc *desc);
+static void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int
virt_block);
+static int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk);
+extern int ubiblk_leb_change(struct ubiblk_dev *ubiblk);
+
+struct ubiblk_dev *ubiblks[UBI_MAX_VOLUMES];
+static unsigned short subpage_shift;
+
+static int ubiblk_flush_writecache(struct ubiblk_dev *ubiblk)
+{
+	if (STATE_UNUSED == ubiblk->write_cache_state)
+		return 0;
+	ubiblk_leb_change(ubiblk);
+	ubiblk->write_cache_state = STATE_UNUSED;
+
+	return 0;
+}
+
+static void ubiblk_setup_writecache(struct ubiblk_dev *ubiblk, int virt_block)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	struct ubi_device *ubi = uv->vol->ubi;
+	int ppb = ubi->leb_size / ubi->min_io_size;
+	unsigned short spp = ubi->min_io_size >> subpage_shift;
+
+	ubiblk->vbw = virt_block;
+	ubiblk->write_cache_state = STATE_USED;
+		
+	memset(ubiblk->page_sts, 0, ppb);
+	memset(ubiblk->subpage_sts, 0, ppb*spp);
+}
+
+static int do_cached_write (struct ubiblk_dev *ubiblk, unsigned long sector,
+			    int len, const char *buf)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	struct ubi_device *ubi = uv->vol->ubi;
+	int ppb = ubi->leb_size / ubi->min_io_size;
+	unsigned short sectors_per_page =  ubi->min_io_size / len;
+	unsigned short sectors_in_page_shift = ffs(sectors_per_page) - 1;
+	unsigned short page_shift =  ffs(ubi->min_io_size) - 1;
+	unsigned short virt_block, page, subpage;
+	unsigned long virt_page;
+	
+	virt_page = sector / sectors_per_page;
+	subpage = sector % sectors_per_page;
+	virt_block = virt_page / ppb;
+	page = virt_page % ppb;
+	
+	if(ubi_is_mapped(uv, virt_block) == UBIBLK_UNMAPPED ){
+		mutex_lock(&ubiblk->cache_mutex);
+		ubiblk_flush_writecache(ubiblk);
+		mutex_unlock(&ubiblk->cache_mutex);
+
+		ubiblk_setup_writecache(ubiblk, virt_block);
+	} else {
+		if ( STATE_USED == ubiblk->write_cache_state ) {
+			if ( ubiblk->vbw != virt_block) {
+			// Commit before we start a new cache.
+				mutex_lock(&ubiblk->cache_mutex);
+				ubiblk_flush_writecache(ubiblk);
+				mutex_unlock(&ubiblk->cache_mutex);
+				
+				ubiblk_setup_writecache(ubiblk, virt_block);
+			} else {
+				//printk("cache hit: 0x%x\n", virt_page);
+			}
+		} else {
+//			printk("with existing mapping\n");
+			ubiblk_setup_writecache(ubiblk, virt_block);
+		}
+	}		
+	ubiblk->page_sts[page] = 1;
+	ubiblk->subpage_sts[(page<<sectors_in_page_shift) + subpage] = 1;
+	memcpy(&ubiblk->write_cache[(page<<page_shift) +(subpage<<subpage_shift)],
+	       buf,len);
+	return 0;
+}
+
+static int do_cached_read (struct ubiblk_dev *ubiblk, unsigned long sector,
+			   int len, char *buf)
+{
+	struct ubi_volume_desc *uv = ubiblk->uv;
+	int ppb = uv->vol->ubi->leb_size / uv->vol->ubi->min_io_size;
+	unsigned short sectors_per_page =  uv->vol->ubi->min_io_size >> 9;
+	unsigned short page_shift =  ffs(uv->vol->ubi->min_io_size) - 1;
+	unsigned short virt_block, page, page_offset; 	
+	unsigned long virt_page;
+		
+	virt_page = sector / sectors_per_page;
+	page_offset = sector % sectors_per_page;
+	virt_block = virt_page / ppb;
+	page = virt_page % ppb;
+
+	if(ubiblk->vbw == virt_block){		
+		mutex_lock(&ubiblk->cache_mutex);
+		ubiblk_flush_writecache(ubiblk);
+		mutex_unlock(&ubiblk->cache_mutex);
+	}
+
+	if ( ubi_is_mapped( uv, virt_block) == UBIBLK_UNMAPPED){
+		/* In a Flash Memory device, there might be a logical block that is
+		  * not allcated to a physical block due to the block not being used.
+		  * All data returned should be set to 0xFF when accessing this logical
+		  * block.
+		  */	
+		
+		//printk("address translate fail\n");
+		memset(buf, 0xFF, UBIBLK_SECTOR_SIZE);
+	} else {
+
+		if( ubiblk->vbr != virt_block ||ubiblk->read_cache_state == STATE_UNUSED ){
+			ubiblk->vbr = virt_block;
+			ubi_leb_read(uv, virt_block, ubiblk->read_cache, 0,
uv->vol->usable_leb_size, 0);
+			ubiblk->read_cache_state = STATE_USED;
+		}
+		memcpy(buf, &ubiblk->read_cache[(page<<page_shift)+(page_offset<<9)], len);
+	}
+	return 0;
+}
+
+static int ubiblk_readsect(struct ubi_blktrans_dev *dev,
+			      unsigned long block, char *buf)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+	return do_cached_read(ubiblk, block, UBIBLK_SECTOR_SIZE, buf);
+}
+
+static int ubiblk_writesect(struct ubi_blktrans_dev *dev,
+			      unsigned long block, char *buf)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+	return do_cached_write(ubiblk, block, UBIBLK_SECTOR_SIZE, buf);
+}
+
+static int ubiblk_init_vol(int dev, struct ubi_volume_desc *uv)
+{
+	struct ubiblk_dev *ubiblk;
+	struct ubi_device *ubi = uv->vol->ubi;
+	int ppb = ubi->leb_size / ubi->min_io_size;
+	unsigned short spp = ubi->min_io_size >> subpage_shift;
+			
+	ubiblk = kmalloc(sizeof(struct ubiblk_dev), GFP_KERNEL);
+	if (!ubiblk)
+		return -ENOMEM;
+
+	memset(ubiblk, 0, sizeof(*ubiblk));
+
+	ubiblk->count = 1;
+	ubiblk->uv = uv;
+	mutex_init (&ubiblk->cache_mutex);
+
+	ubiblk->write_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+	ubiblk->read_cache = vmalloc(ubiblk->uv->vol->usable_leb_size);
+	ubiblk->page_sts = vmalloc(ppb);
+	ubiblk->subpage_sts = vmalloc(ppb*spp);
+	
+	if(!ubiblk->write_cache ||
+		!ubiblk->read_cache ||
+		!ubiblk->page_sts ||
+		!ubiblk->subpage_sts)
+		return -ENOMEM;
+
+	ubiblk->write_cache_state = STATE_UNUSED;
+	ubiblk->read_cache_state = STATE_UNUSED;
+
+	ubiblks[dev] = ubiblk;
+	DEBUG(MTD_DEBUG_LEVEL1, "ok\n");
+	return 0;
+}
+
+static int ubiblk_open(struct inode *i, struct file *f )
+{
+	struct ubi_volume_desc *desc;
+	struct ubi_blktrans_dev *dev;
+	int ubi_num = ubi_major2num(imajor(i));
+	int vol_id = iminor(i);
+	int mode;
+	int ret = 0;
+
+	if (f->f_mode & FMODE_WRITE)
+		mode = UBI_READWRITE;
+	else
+		mode = UBI_READONLY;
+
+	dev = i->i_bdev->bd_disk->private_data;
+	if (ubiblks[dev->devnum]) {
+		ubiblks[dev->devnum]->count++;
+		ubi_open_blkdev(ubi_num, vol_id, mode);
+		printk("%s: increase use count\n",__FUNCTION__);
+		return 0;
+	}
+	
+	desc = ubi_open_volume(ubi_num, vol_id, mode);
+	if (IS_ERR(desc))
+		return PTR_ERR(desc);
+	
+	desc->vol->bdev_mode = mode;
+	dev->uv = desc;
+
+	subpage_shift = ffs(UBIBLK_SECTOR_SIZE)-1;
+	ret = ubiblk_init_vol(dev->devnum, desc);
+	return ret;
+}
+
+static int ubiblk_release(struct ubi_blktrans_dev *ubd)
+{
+	int dev = ubd->devnum;
+	struct ubiblk_dev *ubiblk = ubiblks[dev];
+	struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+	
+	mutex_lock(&ubiblk->cache_mutex);
+	ubiblk_flush_writecache(ubiblk);
+	mutex_unlock(&ubiblk->cache_mutex);
+
+	ubiblk->count --;
+	if (!ubiblk->count) {
+		/* It was the last usage. Free the device */
+		ubiblks[dev] = NULL;
+
+		if (ubi->mtd->sync)
+			ubi->mtd->sync(ubi->mtd);
+
+		vfree(ubiblk->write_cache);
+		vfree(ubiblk->read_cache);
+		kfree(ubiblk);
+		
+		ubi_close_volume(ubiblk->uv);
+		return 0;
+	}
+	else{
+		printk("%s: decrease use count\n",__FUNCTION__);
+		ubi_close_blkdev(ubiblk->uv);
+		return 0;
+	}
+	return 1;
+}
+static int ubiblk_flush(struct ubi_blktrans_dev *dev)
+{
+	struct ubiblk_dev *ubiblk = ubiblks[dev->devnum];
+	struct ubi_device *ubi = ubiblk->uv->vol->ubi;
+	
+	mutex_lock(&ubiblk->cache_mutex);
+	ubiblk_flush_writecache(ubiblk);
+	mutex_unlock(&ubiblk->cache_mutex);
+
+	if (ubi->mtd->sync)
+		ubi->mtd->sync(ubi->mtd);
+	return 0;
+}
+
+void ubiblk_add_vol_dev(struct ubi_blktrans_ops *tr, struct ubi_volume *vol)
+{
+	struct ubi_blktrans_dev *dev = kzalloc(sizeof(*dev), GFP_KERNEL);
+	if (!dev)
+		return;
+
+	dev->devnum = vol->vol_id;
+	dev->size = vol->used_bytes >> 9;
+	dev->tr = tr;
+
+	if (vol->bdev_mode == UBI_READONLY)
+		dev->readonly = 1;
+
+	vol->ubi->bdev_major = tr->major;
+
+	add_ubi_blktrans_dev(dev);
+}
+
+void ubiblk_remove_vol_dev(struct ubi_blktrans_dev *dev)
+{
+	del_ubi_blktrans_dev(dev);
+	kfree(dev);
+}
+
+static int ubiblk_getgeo(struct ubi_blktrans_dev *dev, struct hd_geometry *geo)
+{
+	memset(geo, 0, sizeof(*geo));
+	geo->heads     = 4;
+	geo->sectors   = 16;
+	geo->cylinders = dev->size/(4*16);
+	return 0;
+}
+
+static struct ubi_blktrans_ops ubiblk_tr = {
+	.name		         = "ubiblock",
+	.major                   = 0,
+	.part_bits	         = 0,
+	.blksize 	         = UBIBLK_SECTOR_SIZE,
+	.open		         = ubiblk_open,
+	.release	         = ubiblk_release,
+	.readsect	         = ubiblk_readsect,
+	.writesect	         = ubiblk_writesect,
+	.getgeo                  = ubiblk_getgeo,
+	.flush		         = ubiblk_flush,
+	.add_vol	         = ubiblk_add_vol_dev,
+	.remove_vol	         = ubiblk_remove_vol_dev,
+	.owner		         = THIS_MODULE,
+};
+
+static int __init init_ubiblock(void)
+{
+	return register_ubi_blktrans(&ubiblk_tr);
+}
+
+static void __exit cleanup_ubiblock(void)
+{
+	deregister_ubi_blktrans(&ubiblk_tr);
+}
+
+module_init(init_ubiblock);
+module_exit(cleanup_ubiblock);
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Nicolas Pitre <nico at cam.org> , Yurong Tan
<nancydreaming at gmail.com>");
+MODULE_DESCRIPTION("Caching read/erase/writeback block device
emulation access to UBI volumes");
diff -uprBN ../ubi/ubiblk.h ubi/ubiblk.h
--- ../ubi/ubiblk.h	1970-01-01 08:00:00.000000000 +0800
+++ ubi/ubiblk.h	2008-05-13 11:35:44.000000000 +0800
@@ -0,0 +1,85 @@
+/*
+  * (C) 2003 David Woodhouse <dwmw2 at infradead.org>
+  * (C) 2008 Yurong Tan <nancydreaming at gmail.com> : borrow from MTD
blktrans.h for UBI used
+  * Interface to Linux block layer for UBI 'translation layers'.
+  */
+
+#ifndef __UBI_TRANS_H__
+#define __UBI_TRANS_H__
+
+#include <linux/mutex.h>
+#include <linux/fs.h>
+#include "ubi.h"
+
+struct hd_geometry;
+struct ubi_volume_desc;
+struct ubi_blktrans_ops;
+struct file;
+struct inode;
+
+struct ubiblk_dev {
+	struct ubi_volume_desc *uv;
+	int count;
+	struct mutex cache_mutex;
+	unsigned short vbw;           //virt block number of write cache
+	unsigned short vbr;            //virt block number of read cache
+
+	unsigned char *write_cache;
+	unsigned char *page_sts;
+	unsigned char *subpage_sts;
+	
+	unsigned char *read_cache;
+	enum { STATE_UNUSED, STATE_USED } read_cache_state, write_cache_state;
+};
+
+struct ubi_blktrans_dev {
+	struct ubi_blktrans_ops *tr;
+	struct list_head list;
+	struct ubi_volume_desc *uv;
+      	struct mutex lock;
+	int devnum;
+	unsigned long size;
+	int readonly;
+	void *blkcore_priv; /* gendisk in 2.5, devfs_handle in 2.4 */
+};
+
+struct blkcore_priv; /* Differs for 2.4 and 2.5 kernels; private */
+
+struct ubi_blktrans_ops {
+	char *name;
+	int major;
+	int part_bits;
+	int blksize;
+	int blkshift;
+
+	/* Access functions */
+	int (*readsect)(struct ubi_blktrans_dev *dev,
+		    unsigned long block, char *buffer);
+	int (*writesect)(struct ubi_blktrans_dev *dev,
+		     unsigned long block, char *buffer);
+
+	/* Block layer ioctls */
+	int (*getgeo)(struct ubi_blktrans_dev *dev, struct hd_geometry *geo);
+	int (*flush)(struct ubi_blktrans_dev *dev);
+
+	/* Called with mtd_table_mutex held; no race with add/remove */
+	int (*open)(struct inode *i, struct file *f);
+	int (*release)(struct ubi_blktrans_dev *dev);
+
+	/* Called on {de,}registration and on subsequent addition/removal
+	   of devices, with mtd_table_mutex held. */
+	void (*add_vol)(struct ubi_blktrans_ops *tr, struct ubi_volume *vol);
+	void (*remove_vol)(struct ubi_blktrans_dev *dev);
+
+	struct list_head devs;
+	struct list_head list;
+	struct module *owner;
+
+	struct ubi_blkcore_priv *blkcore_priv;
+};
+
+extern int add_ubi_blktrans_dev(struct ubi_blktrans_dev *new);
+extern int del_ubi_blktrans_dev(struct ubi_blktrans_dev *old);
+extern int register_ubi_blktrans(struct ubi_blktrans_ops *tr);
+extern int deregister_ubi_blktrans(struct ubi_blktrans_ops *tr);
+#endif /* __UBI_TRANS_H__ */
diff -uprBN ../ubi/ubi.h ubi/ubi.h
--- ../ubi/ubi.h	2008-05-09 07:27:47.000000000 +0800
+++ ubi/ubi.h	2008-05-13 11:17:47.000000000 +0800
@@ -232,8 +232,16 @@ struct ubi_volume {
 	int gluebi_refcount;
 	struct mtd_info gluebi_mtd;
 #endif
+	int  bdev_mode;  //add by Nancy
 };

+struct vol_notifier {
+	void (*add)(struct ubi_volume *vol);
+	void (*remove)(struct ubi_volume *vol);
+	struct list_head list;
+};
+
+
 /**
  * struct ubi_volume_desc - descriptor of the UBI volume returned when it is
  * opened.
@@ -336,6 +344,7 @@ struct ubi_wl_entry;
  */
 struct ubi_device {
 	struct cdev cdev;
+	int bdev_major;  //add by Nancy
 	struct device dev;
 	int ubi_num;
 	char ubi_name[sizeof(UBI_NAME_STR)+5];
@@ -504,7 +513,7 @@ int ubi_io_read_vid_hdr(struct ubi_devic
 			struct ubi_vid_hdr *vid_hdr, int verbose);
 int ubi_io_write_vid_hdr(struct ubi_device *ubi, int pnum,
 			 struct ubi_vid_hdr *vid_hdr);
-
+
 /* build.c */
 int ubi_attach_mtd_dev(struct mtd_info *mtd, int ubi_num, int vid_hdr_offset);
 int ubi_detach_mtd_dev(int ubi_num, int anyway);


Signed-off-by: Yurong Tan (Nancy)<nancydreaming at gmail.com>

-- 
Best wishes,
Nancy


More information about the linux-mtd mailing list