mtd/drivers/mtd mtd_blkdevs.c,1.7,1.8

David Woodhouse dwmw2 at infradead.org
Tue May 20 10:30:50 EDT 2003


Update of /home/cvs/mtd/drivers/mtd
In directory phoenix.infradead.org:/tmp/cvs-serv19341

Modified Files:
	mtd_blkdevs.c 
Log Message:
Wheee. Builds for 2.5. Probably doesn't work but testing is boring.


Index: mtd_blkdevs.c
===================================================================
RCS file: /home/cvs/mtd/drivers/mtd/mtd_blkdevs.c,v
retrieving revision 1.7
retrieving revision 1.8
diff -u -r1.7 -r1.8
--- mtd_blkdevs.c	18 May 2003 19:09:02 -0000	1.7
+++ mtd_blkdevs.c	20 May 2003 14:30:47 -0000	1.8
@@ -3,7 +3,7 @@
  *
  * (C) 2003 David Woodhouse <dwmw2 at infradead.org>
  *
- * Interface to Linux 2.4 block layer for MTD 'translation layers'.
+ * Interface to Linux 2.5 block layer for MTD 'translation layers'.
  *
  */
 
@@ -19,20 +19,15 @@
 #include <linux/blkpg.h>
 #include <linux/spinlock.h>
 #include <asm/semaphore.h>
+#include <linux/devfs_fs_kernel.h>
 
 static LIST_HEAD(blktrans_majors);
 
 extern struct semaphore mtd_table_mutex;
 extern struct mtd_info *mtd_table[];
 
-struct mtd_blkcore_priv {
-	devfs_handle_t devfs_handle;
-	int blksizes[256];
-	int sizes[256];
-	struct hd_struct part_table[256];
-	struct gendisk gd;
-	spinlock_t devs_lock; /* See comment in _request function */
-};
+static struct request_queue mtd_blktrans_queue;
+static spinlock_t mtd_blktrans_queue_lock;
 
 static inline struct mtd_blktrans_dev *tr_get_dev(struct mtd_blktrans_ops *tr,
 					   int devnum)
@@ -69,20 +64,18 @@
 {
 	unsigned long block, nsect;
 	char *buf;
-	int minor;
 
-	minor = MINOR(req->rq_dev);
 	block = req->sector;
 	nsect = req->current_nr_sectors;
 	buf = req->buffer;
 
-	if (block + nsect > tr->blkcore_priv->part_table[minor].nr_sects) {
-		printk(KERN_WARNING "Access beyond end of device.\n");
+	if (!req->flags & REQ_CMD)
+		return 0;
+
+	if (block + nsect > get_capacity(req->rq_disk))
 		return 0;
-	}
-	block += tr->blkcore_priv->part_table[minor].start_sect;
 
-	switch(req->cmd) {
+	switch(rq_data_dir(req)) {
 	case READ:
 		for (; nsect > 0; nsect--, block++, buf += 512)
 			if (tr->readsect(dev, block, buf))
@@ -99,7 +92,7 @@
 		return 1;
 
 	default:
-		printk(KERN_NOTICE "Unknown request cmd %d\n", req->cmd);
+		printk(KERN_NOTICE "Unknown request %ld\n", rq_data_dir(req));
 		return 0;
 	}
 }
@@ -107,55 +100,24 @@
 static void mtd_blktrans_request(request_queue_t *rq)
 {
 	struct request *req;
-	struct mtd_blktrans_ops *tr = rq->queuedata;
-	struct mtd_blktrans_dev *dev;
-
-	BUG_ON(!tr);
 
-	for (;;) {
-		int devnum;
+	while ((req = elv_next_request(rq))) {
+		struct mtd_blktrans_ops *tr;
+		struct mtd_blktrans_dev *dev;
 		int res = 0;
 
-		if (list_empty(&rq->queue_head))
-			return;
-
-		req = blkdev_entry_next_request(&rq->queue_head);
+		dev = req->rq_disk->private_data;
+		tr = dev->tr;
 
-		devnum = MINOR(req->rq_dev) >> tr->part_bits;
+		spin_unlock_irq(rq->queue_lock);
 
-		/* The ll_rw_blk code knows not to touch the request
-		   at the head of the queue */
-		spin_unlock_irq(&io_request_lock);
-
-		/* FIXME: Where can we store the dev, on which
-		   we already have a refcount anyway? We need to
-		   lock against concurrent addition/removal of devices,
-		   but if we use the mtd_table_mutex we deadlock when
-		   grok_partitions is called from the registration
-		   callbacks. */
-		spin_lock(&tr->blkcore_priv->devs_lock);
-		dev = tr_get_dev(tr, devnum);
-		spin_unlock(&tr->blkcore_priv->devs_lock);
-
-		if (!dev) {
-			/* We'd BUG() but it's rude to do so when we know
-			   we'd leave the io_request_lock locked */
-			printk(KERN_CRIT "mtd_blktrans request for unknown %s device #%d\n", 
-			       tr->name, devnum);
-			goto relock;
-			
-		}
-		/* Ensure serialisation of requests */
 		down(&dev->sem);
 		res = do_blktrans_request(tr, dev, req);
 		up(&dev->sem);
-	relock:
-		spin_lock_irq(&io_request_lock);
 
-		if (!end_that_request_first(req, res, tr->name)) {
-			blkdev_dequeue_request(req);
-			end_that_request_last(req);
-		}
+		spin_lock_irq(rq->queue_lock);
+
+		end_request(req, res);
 	}
 }
 
@@ -163,13 +125,16 @@
 {
 	struct mtd_blktrans_ops *tr = NULL;
 	struct mtd_blktrans_dev *dev = NULL;
-	int major_nr = MAJOR(i->i_rdev);
-	int minor_nr = MINOR(i->i_rdev);
+	int major_nr = major(i->i_rdev);
+	int minor_nr = minor(i->i_rdev);
 	int devnum;
 	int ret = -ENODEV;
 
+#if 0 /* Do we still have to do this in 2.5? Hopefully not since I don't
+	 see how */
 	if (is_read_only(i->i_rdev) && (f->f_mode & FMODE_WRITE))
 		return -EROFS;
+#endif
 
 	down(&mtd_table_mutex);
 
@@ -185,11 +150,12 @@
 	if (!dev)
 		goto out;
 
+#if 0
 	if (!tr->blkcore_priv->part_table[minor_nr].nr_sects) {
 		ret = -ENODEV;
 		goto out;
 	}
-
+#endif
 	if (!try_module_get(dev->mtd->owner))
 		goto out;
 
@@ -223,13 +189,13 @@
 
 	down(&mtd_table_mutex);
 
-	tr = get_tr(MAJOR(i->i_rdev));
+	tr = get_tr(major(i->i_rdev));
 	if (!tr) {
 		up(&mtd_table_mutex);
 		return -ENODEV;
 	}
 
-	devnum = MINOR(i->i_rdev) >> tr->part_bits;
+	devnum = minor(i->i_rdev) >> tr->part_bits;
 	dev = tr_get_dev(tr, devnum);
 
 	if (!dev) {
@@ -253,43 +219,6 @@
 	return ret;
 }
 
-static int mtd_blktrans_rrpart(kdev_t rdev, struct mtd_blktrans_ops *tr,
-			       struct mtd_blktrans_dev *dev)
-{
-	struct gendisk *gd = &(tr->blkcore_priv->gd);
-	int i;
-	int minor = MINOR(rdev);
-
-	if (minor & ((1<<tr->part_bits)-1) || !tr->part_bits) {
-		/* BLKRRPART on a partition. Go away. */
-		return -ENOTTY;
-	}
-
-	if (!capable(CAP_SYS_ADMIN))
-	    return -EACCES;
-
-	/* We are required to prevent simultaneous open() ourselves.
-	   The core doesn't do that for us. Did I ever mention how
-	   much the Linux block layer sucks? Sledgehammer approach... */
-	down(&mtd_table_mutex);
-
-	if (dev->usecount > 1) {
-		up(&mtd_table_mutex);
-		return -EBUSY;
-	}
-
-	for (i=0; i < (1<<tr->part_bits); i++) {
-		invalidate_device(MKDEV(tr->major, minor+i), 1);
-		gd->part[minor + i].start_sect = 0;
-		gd->part[minor + i].nr_sects = 0;
-	}
-
-	grok_partitions(gd, minor, 1 << tr->part_bits, 
-			tr->blkcore_priv->sizes[minor]);
-	up(&mtd_table_mutex);
-
-	return 0;
-}
 
 static int blktrans_ioctl(struct inode *inode, struct file *file, 
 			      unsigned int cmd, unsigned long arg)
@@ -297,31 +226,17 @@
 	struct mtd_blktrans_dev *dev;
 	struct mtd_blktrans_ops *tr;
 	int devnum;
-
-	switch(cmd) {
-	case BLKGETSIZE:
-        case BLKGETSIZE64:
-        case BLKBSZSET:
-        case BLKBSZGET:
-        case BLKROSET:
-        case BLKROGET:
-        case BLKRASET:
-        case BLKRAGET:
-        case BLKPG:
-        case BLKELVGET:
-        case BLKELVSET:
-		return blk_ioctl(inode->i_rdev, cmd, arg);
-	}
+	int ret = -ENOTTY;
 
 	down(&mtd_table_mutex);
 
-	tr = get_tr(MAJOR(inode->i_rdev));
+	tr = get_tr(major(inode->i_rdev));
 	if (!tr) {
 		up(&mtd_table_mutex);
 		return -ENODEV;
 	}
 
-	devnum = MINOR(inode->i_rdev) >> tr->part_bits;
+	devnum = minor(inode->i_rdev) >> tr->part_bits;
 	dev = tr_get_dev(tr, devnum);
 
 	up(&mtd_table_mutex);
@@ -329,20 +244,14 @@
 	if (!dev)
 		return -ENODEV;
 
-	switch(cmd) {
-	case BLKRRPART:
-		return mtd_blktrans_rrpart(inode->i_rdev, tr, dev);
-		
-        case BLKFLSBUF:
-		blk_ioctl(inode->i_rdev, cmd, arg);
-		if (!tr->ioctl)
-			return 0;
+	if (tr->ioctl)
+		ret = tr->ioctl(dev, inode, file, cmd, arg);
 
-	default:
-		if (!tr->ioctl)
-			return -ENOTTY;
-		return tr->ioctl(dev, inode, file, cmd, arg);
+	if (ret == -ENOTTY && (cmd == BLKROSET || cmd == BLKFLSBUF)) {
+		/* The core code did the work, we had nothing to do. */
+		ret = 0;
 	}
+	return ret;
 }
 
 struct block_device_operations mtd_blktrans_ops = {
@@ -357,15 +266,13 @@
 	struct mtd_blktrans_ops *tr = new->tr;
 	struct list_head *this;
 	int last_devnum = -1;
-	int i;
+	struct gendisk *gd;
 
 	if (!down_trylock(&mtd_table_mutex)) {
 		up(&mtd_table_mutex);
 		BUG();
 	}
 
-	spin_lock(&tr->blkcore_priv->devs_lock);
-
 	list_for_each(this, &tr->devs) {
 		struct mtd_blktrans_dev *d = list_entry(this, struct mtd_blktrans_dev, list);
 		if (new->devnum == -1) {
@@ -378,7 +285,6 @@
 			}
 		} else if (d->devnum == new->devnum) {
 			/* Required number taken */
-			spin_unlock(&tr->blkcore_priv->devs_lock);
 			return -EBUSY;
 		} else if (d->devnum > new->devnum) {
 			/* Required number was free */
@@ -391,45 +297,46 @@
 		new->devnum = last_devnum+1;
 
 	if ((new->devnum << tr->part_bits) > 256) {
-		spin_unlock(&tr->blkcore_priv->devs_lock);
 		return -EBUSY;
 	}
 
 	init_MUTEX(&new->sem);
 	list_add_tail(&new->list, &tr->devs);
  added:
-	spin_unlock(&tr->blkcore_priv->devs_lock);
 	new->usecount = 0;
 
 	if (!tr->writesect)
 		new->readonly = 1;
 
-	for (i = new->devnum << tr->part_bits;
-	     i < (new->devnum+1) << tr->part_bits; 
-	     i++) {
-		set_device_ro(MKDEV(tr->major, i), new->readonly);
-		tr->blkcore_priv->blksizes[i] = new->blksize;
-		tr->blkcore_priv->sizes[i] = 0;
-		tr->blkcore_priv->part_table[i].nr_sects = 0;
-		tr->blkcore_priv->part_table[i].start_sect = 0;
-	}
-
-	tr->blkcore_priv->sizes[new->devnum << tr->part_bits] = new->size;
-	tr->blkcore_priv->part_table[new->devnum << tr->part_bits].nr_sects = new->size;
-	tr->blkcore_priv->gd.nr_real++;
-
-	if (tr->part_bits) {
-		grok_partitions(&tr->blkcore_priv->gd, new->devnum,
-				1 << tr->part_bits, new->size);
+	gd = alloc_disk(1 << tr->part_bits);
+	if (!gd) {
+		list_del(&new->list);
+		return -ENOMEM;
 	}
+	gd->major = tr->major;
+	gd->first_minor = (new->devnum) << tr->part_bits;
+	gd->fops = &mtd_blktrans_ops;
+	
+	snprintf(gd->disk_name, sizeof(gd->disk_name),
+		 "%s%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
+	snprintf(gd->devfs_name, sizeof(gd->devfs_name),
+		 "%s/%c", tr->name, (tr->part_bits?'a':'0') + new->devnum);
+
+	set_capacity(gd, new->size);
+	gd->private_data = new;
+	new->blkcore_priv = gd;
+	gd->queue = &mtd_blktrans_queue;
+
+	if (new->readonly)
+		set_disk_ro(gd, 1);
+
+	add_disk(gd);
+	
 	return 0;
 }
 
 int del_mtd_blktrans_dev(struct mtd_blktrans_dev *old)
 {
-	struct mtd_blktrans_ops *tr = old->tr;
-	int i;
-
 	if (!down_trylock(&mtd_table_mutex)) {
 		up(&mtd_table_mutex);
 		BUG();
@@ -438,17 +345,10 @@
 	if (old->usecount)
 		return -EBUSY;
 
-	spin_lock(&tr->blkcore_priv->devs_lock);
 	list_del(&old->list);
-	spin_unlock(&tr->blkcore_priv->devs_lock);
 
-	for (i = (old->devnum << tr->part_bits); 
-	     i < ((old->devnum+1) << tr->part_bits); i++) {
-		tr->blkcore_priv->sizes[i] = 0;
-		tr->blkcore_priv->part_table[i].nr_sects = 0;
-		tr->blkcore_priv->part_table[i].start_sect = 0;
-	}
-	tr->blkcore_priv->gd.nr_real--;
+	del_gendisk(old->blkcore_priv);
+	put_disk(old->blkcore_priv);
 		
 	return 0;
 }
@@ -496,40 +396,19 @@
 	if (!blktrans_notifier.list.next)
 		register_mtd_user(&blktrans_notifier);
 
-	tr->blkcore_priv = kmalloc(sizeof(*tr->blkcore_priv), GFP_KERNEL);
-	if (!tr->blkcore_priv)
-		return -ENOMEM;
-
-	memset(tr->blkcore_priv, 0, sizeof(*tr->blkcore_priv));
-
 	down(&mtd_table_mutex);
 
-	ret = devfs_register_blkdev(tr->major, tr->name, &mtd_blktrans_ops);
+	ret = register_blkdev(tr->major, tr->name);
 	if (ret) {
 		printk(KERN_WARNING "Unable to register %s block device on major %d: %d\n",
 		       tr->name, tr->major, ret);
-		kfree(tr->blkcore_priv);
 		up(&mtd_table_mutex);
 		return ret;
 	}
+	blk_init_queue(&mtd_blktrans_queue, mtd_blktrans_request, 
+		       &mtd_blktrans_queue_lock);
 
-	tr->blkcore_priv->devfs_handle = devfs_mk_dir(NULL, tr->name, NULL);
-	blk_init_queue(BLK_DEFAULT_QUEUE(tr->major), &mtd_blktrans_request);
-	(BLK_DEFAULT_QUEUE(tr->major))->queuedata = tr;
-	
-	blksize_size[tr->major] = tr->blkcore_priv->blksizes;
-	blk_size[tr->major] = tr->blkcore_priv->sizes;
-
-	tr->blkcore_priv->gd.major = tr->major;
-	tr->blkcore_priv->gd.major_name = tr->name;
-	tr->blkcore_priv->gd.minor_shift = tr->part_bits;
-	tr->blkcore_priv->gd.max_p = (1<<tr->part_bits) - 1;
-	tr->blkcore_priv->gd.part = tr->blkcore_priv->part_table;
-	tr->blkcore_priv->gd.sizes = tr->blkcore_priv->sizes;
-
-	spin_lock_init(&tr->blkcore_priv->devs_lock);
-
-	add_gendisk(&tr->blkcore_priv->gd);
+	devfs_mk_dir(tr->name);
 
 	tr->usecount = 0;
 	INIT_LIST_HEAD(&tr->devs);
@@ -539,6 +418,7 @@
 		if (mtd_table[i])
 			tr->add_mtd(tr, mtd_table[i]);
 	}
+
 	up(&mtd_table_mutex);
 	return 0;
 }
@@ -546,6 +426,7 @@
 int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
 {
 	struct list_head *this, *next;
+
 	down(&mtd_table_mutex);
 	if (tr->usecount) {
 		up(&mtd_table_mutex);
@@ -557,16 +438,12 @@
 		struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
 		tr->remove_dev(dev);
 	}
-	devfs_unregister(tr->blkcore_priv->devfs_handle);
-	devfs_unregister_blkdev(tr->major, tr->name);
-	blk_cleanup_queue(BLK_DEFAULT_QUEUE(tr->major));
-	blksize_size[tr->major] = NULL;
-	blk_size[tr->major] = NULL;
-	del_gendisk(&tr->blkcore_priv->gd);
 
-	up(&mtd_table_mutex);
+	devfs_remove(tr->name);
+	blk_cleanup_queue(&mtd_blktrans_queue);
+	unregister_blkdev(tr->major, tr->name);
 
-	kfree(tr->blkcore_priv);
+	up(&mtd_table_mutex);
 
 	if (!list_empty(&tr->devs))
 		BUG();




More information about the linux-mtd-cvs mailing list