mtd/drivers/mtd mtd_blkdevs.c,1.10,1.11
David Woodhouse
dwmw2 at infradead.org
Tue May 20 18:29:11 EDT 2003
Update of /home/cvs/mtd/drivers/mtd
In directory phoenix.infradead.org:/tmp/cvs-serv12939
Modified Files:
mtd_blkdevs.c
Log Message:
Reinstate kernel thread.
Index: mtd_blkdevs.c
===================================================================
RCS file: /home/cvs/mtd/drivers/mtd/mtd_blkdevs.c,v
retrieving revision 1.10
retrieving revision 1.11
diff -u -r1.10 -r1.11
--- mtd_blkdevs.c 20 May 2003 21:31:53 -0000 1.10
+++ mtd_blkdevs.c 20 May 2003 22:29:08 -0000 1.11
@@ -18,6 +18,7 @@
#include <linux/blk.h>
#include <linux/blkpg.h>
#include <linux/spinlock.h>
+#include <linux/init.h>
#include <asm/semaphore.h>
#include <linux/devfs_fs_kernel.h>
@@ -29,6 +30,12 @@
static struct request_queue mtd_blktrans_queue;
static spinlock_t mtd_blktrans_queue_lock;
+struct mtd_blkcore_priv {
+ struct completion thread_dead;
+ int exiting;
+ wait_queue_head_t thread_wq;
+};
+
static inline struct mtd_blktrans_dev *tr_get_dev(struct mtd_blktrans_ops *tr,
int devnum)
{
@@ -97,14 +104,46 @@
}
}
-static void mtd_blktrans_request(request_queue_t *rq)
+static int mtd_blktrans_thread(void *arg)
{
- struct request *req;
+ struct mtd_blktrans_ops *tr = arg;
+ struct request_queue *rq = &mtd_blktrans_queue;
+
+ /* we might get involved when memory gets low, so use PF_MEMALLOC */
+ current->flags |= PF_MEMALLOC;
+
+ daemonize("%sd", tr->name);
+
+ /* daemonize() doesn't do this for us since some kernel threads
+ actually want to deal with signals. We can't just call
+ exit_sighand() since that'll cause an oops when we finally
+ do exit. */
+ spin_lock_irq(¤t->sighand->siglock);
+ sigfillset(¤t->blocked);
+ recalc_sigpending();
+ spin_unlock_irq(¤t->sighand->siglock);
- while ((req = elv_next_request(rq))) {
- struct mtd_blktrans_ops *tr;
+ while (!tr->blkcore_priv->exiting) {
+ struct request *req;
struct mtd_blktrans_dev *dev;
int res = 0;
+ DECLARE_WAITQUEUE(wait, current);
+
+ spin_lock_irq(rq->queue_lock);
+
+ req = elv_next_request(rq);
+
+ if (!req) {
+ add_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+ set_current_state(TASK_INTERRUPTIBLE);
+
+ spin_unlock_irq(rq->queue_lock);
+
+ schedule();
+ remove_wait_queue(&tr->blkcore_priv->thread_wq, &wait);
+
+ continue;
+ }
dev = req->rq_disk->private_data;
tr = dev->tr;
@@ -119,8 +158,16 @@
end_request(req, res);
}
+ complete_and_exit(&tr->blkcore_priv->thread_dead, 0);
}
+static void mtd_blktrans_request(struct request_queue *rq)
+{
+ struct mtd_blktrans_ops *tr = rq->queuedata;
+ wake_up(&tr->blkcore_priv->thread_wq);
+}
+
+
int blktrans_open(struct inode *i, struct file *f)
{
struct mtd_blktrans_ops *tr = NULL;
@@ -177,6 +224,7 @@
}
out:
up(&mtd_table_mutex);
+
return ret;
}
@@ -411,6 +459,17 @@
blk_init_queue(&mtd_blktrans_queue, mtd_blktrans_request,
&mtd_blktrans_queue_lock);
+ init_completion(&tr->blkcore_priv->thread_dead);
+ init_waitqueue_head(&tr->blkcore_priv->thread_wq);
+
+ ret = kernel_thread(mtd_blktrans_thread, tr,
+ CLONE_FS|CLONE_FILES|CLONE_SIGHAND);
+ if (ret < 0) {
+ blk_cleanup_queue(&mtd_blktrans_queue);
+ unregister_blkdev(tr->major, tr->name);
+ return ret;
+ }
+
devfs_mk_dir(tr->name);
tr->usecount = 0;
@@ -423,6 +482,7 @@
}
up(&mtd_table_mutex);
+
return 0;
}
@@ -431,10 +491,19 @@
struct list_head *this, *next;
down(&mtd_table_mutex);
+
if (tr->usecount) {
up(&mtd_table_mutex);
return -EBUSY;
}
+
+
+ /* Clean up the kernel thread */
+ tr->blkcore_priv->exiting = 1;
+ wake_up(&tr->blkcore_priv->thread_wq);
+ wait_for_completion(&tr->blkcore_priv->thread_dead);
+
+ /* Remove it from the list of active majors */
list_del(&tr->list);
list_for_each_safe(this, next, &tr->devs) {
More information about the linux-mtd-cvs
mailing list