mtd/drivers/mtd mtd_blkdevs-24.c,1.1,1.2
David Woodhouse
dwmw2 at infradead.org
Tue May 20 13:17:29 EDT 2003
Update of /home/cvs/mtd/drivers/mtd
In directory phoenix.infradead.org:/tmp/cvs-serv29320
Modified Files:
mtd_blkdevs-24.c
Log Message:
Add kernel thread again.
Index: mtd_blkdevs-24.c
===================================================================
RCS file: /home/cvs/mtd/drivers/mtd/mtd_blkdevs-24.c,v
retrieving revision 1.1
retrieving revision 1.2
diff -u -r1.1 -r1.2
--- mtd_blkdevs-24.c 18 May 2003 19:10:12 -0000 1.1
+++ mtd_blkdevs-24.c 20 May 2003 17:17:26 -0000 1.2
@@ -26,12 +26,15 @@
extern struct mtd_info *mtd_table[];
struct mtd_blkcore_priv {
- devfs_handle_t devfs_handle;
+ devfs_handle_t devfs_dir_handle;
int blksizes[256];
int sizes[256];
struct hd_struct part_table[256];
struct gendisk gd;
spinlock_t devs_lock; /* See comment in _request function */
+ struct completion thread_dead;
+ int exiting;
+ wait_queue_head_t thread_wq;
};
static inline struct mtd_blktrans_dev *tr_get_dev(struct mtd_blktrans_ops *tr,
@@ -104,20 +107,50 @@
}
}
-static void mtd_blktrans_request(request_queue_t *rq)
+static int mtd_blktrans_thread(void *arg)
{
- struct request *req;
- struct mtd_blktrans_ops *tr = rq->queuedata;
- struct mtd_blktrans_dev *dev;
+ struct mtd_blktrans_ops *tr = arg;
+ request_queue_t *rq = (BLK_DEFAULT_QUEUE(tr->major));
BUG_ON(!tr);
- for (;;) {
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ snprintf(current->comm, sizeof(current->comm), "%sd", tr->name);
+
+ /* daemonize() doesn't do this for us since some kernel threads
+ actually want to deal with signals. We can't just call
+ exit_sighand() since that'll cause an oops when we finally
+ do exit. */
+ spin_lock_irq(¤t->sighand->siglock);
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
+
+ daemonize();
+
+ while (!tr->blkcore_priv->exiting) {
+ struct request *req;
+ struct mtd_blktrans_dev *dev;
int devnum;
int res = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ spin_lock_irq(&io_request_lock);
+
+ if (list_empty(&rq->queue_head)) {
+
+ add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_unlock_irq(&io_request_lock);
- if (list_empty(&rq->queue_head))
- return;
+ schedule();
+ remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+
+ continue;
+ }
req = blkdev_entry_next_request(&rq->queue_head);
@@ -137,26 +170,28 @@
dev = tr_get_dev(tr, devnum);
spin_unlock(&tr->blkcore_priv->devs_lock);
- if (!dev) {
- /* We'd BUG() but it's rude to do so when we know
- we'd leave the io_request_lock locked */
- printk(KERN_CRIT "mtd_blktrans request for unknown %s device #%d\n",
- tr->name, devnum);
- goto relock;
-
- }
+ BUG_ON(!dev);
+
/* Ensure serialisation of requests */
down(&dev->sem);
+
res = do_blktrans_request(tr, dev, req);
up(&dev->sem);
- relock:
- spin_lock_irq(&io_request_lock);
if (!end_that_request_first(req, res, tr->name)) {
+ spin_lock_irq(&io_request_lock);
blkdev_dequeue_request(req);
end_that_request_last(req);
+ spin_unlock_irq(&io_request_lock);
}
}
+ complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
+}
+
+static void mtd_blktrans_request(struct request_queue *rq)
+{
+ struct mtd_blktrans_ops *tr = rq->queuedata;
+ wake_up(&tr->blkcore_priv->thread_wq);
}
int blktrans_open(struct inode *i, struct file *f)
@@ -190,10 +225,10 @@
goto out;
}
- if (!try_module_get(dev->mtd->owner))
+ if (!try_inc_mod_count(dev->mtd->owner))
goto out;
- if (!try_module_get(tr->owner))
+ if (!try_inc_mod_count(tr->owner))
goto out_tr;
dev->mtd->usecount++;
@@ -205,12 +240,13 @@
tr->usecount--;
dev->usecount--;
dev->mtd->usecount--;
- module_put(dev->mtd->owner);
+ __MOD_DEC_USE_COUNT(dev->mtd->owner);
out_tr:
- module_put(tr->owner);
+ __MOD_DEC_USE_COUNT(tr->owner);
}
out:
up(&mtd_table_mutex);
+
return ret;
}
@@ -244,8 +280,8 @@
tr->usecount--;
dev->usecount--;
dev->mtd->usecount--;
- module_put(dev->mtd->owner);
- module_put(tr->owner);
+ __MOD_DEC_USE_COUNT(dev->mtd->owner);
+ __MOD_DEC_USE_COUNT(tr->owner);
}
up(&mtd_table_mutex);
@@ -422,6 +458,20 @@
grok_partitions(&tr->blkcore_priv->gd, new->devnum,
1 << tr->part_bits, new->size);
}
+#ifdef CONFIG_DEVFS_FS
+ {
+ char name[2];
+
+ name[0] = (tr->part_bits?'a':'0')+new->devnum;
+ name[1] = 0;
+
+ new->blkcore_priv =
+ devfs_register(tr->blkcore_priv->devfs_dir_handle,
+ name, DEVFS_FL_DEFAULT, tr->major,
+ new->devnum, S_IFBLK|S_IRUGO|S_IWUGO,
+ &mtd_blktrans_ops, NULL);
+ }
+#endif
return 0;
}
@@ -437,7 +487,9 @@
if (old->usecount)
return -EBUSY;
-
+#ifdef CONFIG_DEVFS_FS
+ devfs_unregister(old->blkcore_priv);
+#endif
spin_lock(&tr->blkcore_priv->devs_lock);
list_del(&old->list);
spin_unlock(&tr->blkcore_priv->devs_lock);
@@ -461,7 +513,7 @@
struct mtd_blktrans_ops *tr = list_entry(this, struct mtd_blktrans_ops, list);
list_for_each_safe(this2, next, &tr->devs) {
- struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
+ struct mtd_blktrans_dev *dev = list_entry(this2, struct mtd_blktrans_dev, list);
if (dev->mtd == mtd)
tr->remove_dev(dev);
@@ -513,10 +565,22 @@
return ret;
}
- tr->blkcore_priv->devfs_handle = devfs_mk_dir(NULL, tr->name, NULL);
blk_init_queue(BLK_DEFAULT_QUEUE(tr->major), &mtd_blktrans_request);
(BLK_DEFAULT_QUEUE(tr->major))->queuedata = tr;
+ init_completion(&tr->blkcore_priv->thread_dead);
+ init_waitqueue_head(&tr->blkcore_priv->thread_wq);
+
+ ret = kernel_thread(mtd_blktrans_thread, tr,
+ CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
+ if (ret < 0) {
+ blk_cleanup_queue(BLK_DEFAULT_QUEUE(tr->major));
+ devfs_unregister_blkdev(tr->major, tr->name);
+ return ret;
+ }
+
+ tr->blkcore_priv->devfs_dir_handle = devfs_mk_dir(NULL, tr->name, NULL);
+
blksize_size[tr->major] = tr->blkcore_priv->blksizes;
blk_size[tr->major] = tr->blkcore_priv->sizes;
@@ -540,29 +604,42 @@
tr->add_mtd(tr, mtd_table[i]);
}
up(&mtd_table_mutex);
+
return 0;
}
int deregister_mtd_blktrans(struct mtd_blktrans_ops *tr)
{
struct list_head *this, *next;
+
down(&mtd_table_mutex);
+
if (tr->usecount) {
up(&mtd_table_mutex);
return -EBUSY;
}
+
+ /* Clean up the kernel thread */
+ tr->blkcore_priv->exiting = 1;
+ wake_up(&tr->blkcore_priv->thread_wq);
+ wait_for_completion(&tr->blkcore_priv->thread_dead);
+
+ /* Remove it from the list of active majors */
list_del(&tr->list);
+ /* Remove each of its devices */
list_for_each_safe(this, next, &tr->devs) {
struct mtd_blktrans_dev *dev = list_entry(this, struct mtd_blktrans_dev, list);
tr->remove_dev(dev);
}
- devfs_unregister(tr->blkcore_priv->devfs_handle);
- devfs_unregister_blkdev(tr->major, tr->name);
blk_cleanup_queue(BLK_DEFAULT_QUEUE(tr->major));
blksize_size[tr->major] = NULL;
blk_size[tr->major] = NULL;
+
del_gendisk(&tr->blkcore_priv->gd);
+
+ devfs_unregister_blkdev(tr->major, tr->name);
+ devfs_unregister(tr->blkcore_priv->devfs_dir_handle);
up(&mtd_table_mutex);
More information about the linux-mtd-cvs
mailing list