mtd/drivers/mtd/devices blkmtd.c,1.13,1.14
spse at infradead.org
spse at infradead.org
Wed Sep 4 18:59:33 EDT 2002
Update of /home/cvs/mtd/drivers/mtd/devices
In directory phoenix.infradead.org:/tmp/cvs-serv15043
Modified Files:
blkmtd.c
Log Message:
- remove 'size_t totalsize' from struct blkmtd_dev, use mtd_info.size as
it is equivalent.
- remove 'struct address_space as' from blkmtd_dev, use the mapping
in the block device inode.
- get rid of writepage() and blkmtd_aops as these are not used.
- replace UnlockPage() with unlock_page().
- fixup blkmtd_proc_read() to give more debuginfo per device.
- get the size of the device from dev->binding->bd_inode->i_size.
- call invalidate_inode_pages() when the device is freed.
Index: blkmtd.c
===================================================================
RCS file: /home/cvs/mtd/drivers/mtd/devices/blkmtd.c,v
retrieving revision 1.13
retrieving revision 1.14
diff -u -r1.13 -r1.14
--- blkmtd.c 4 Sep 2002 22:20:08 -0000 1.13
+++ blkmtd.c 4 Sep 2002 22:59:31 -0000 1.14
@@ -82,9 +82,7 @@
/* else page no of last page */
int last_page_sectors; /* Number of sectors in last page */
/* if partial_last_page != 0 */
- size_t totalsize;
int readonly;
- struct address_space as;
struct mtd_info mtd_info;
};
@@ -146,14 +144,6 @@
/* Page cache stuff */
-/* writepage() - should never be called - catch it anyway */
-static int blkmtd_writepage(struct page *page)
-{
- info("writepage called!!!");
- return -EIO;
-}
-
-
/* readpage() - reads one page from the block device */
static int blkmtd_readpage(struct blkmtd_dev *dev, struct page *page)
{
@@ -175,7 +165,7 @@
if(Page_Uptodate(page)) {
DEBUG(2, "blkmtd: readpage page %ld is already upto date\n",
page->index);
- UnlockPage(page);
+ unlock_page(page);
return 0;
}
@@ -203,7 +193,7 @@
}
SetPageUptodate(page);
flush_dcache_page(page);
- UnlockPage(page);
+ unlock_page(page);
spin_unlock(&mbd_writeq_lock);
return 0;
}
@@ -284,18 +274,12 @@
err = 0;
}
flush_dcache_page(page);
- UnlockPage(page);
+ unlock_page(page);
DEBUG(2, "blkmtd: readpage: finished, err = %d\n", err);
return 0;
}
-static struct address_space_operations blkmtd_aops = {
- writepage: blkmtd_writepage,
- readpage: NULL,
-};
-
-
/* This is the kernel thread that empties the write queue to disk */
static int write_queue_task(void *data)
{
@@ -431,7 +415,7 @@
write_queue_tail %= write_queue_sz;
if(!item->iserase) {
for(i = 0 ; i < item->pagecnt; i++) {
- UnlockPage(item->pages[i]);
+ unlock_page(item->pages[i]);
__free_pages(item->pages[i], 0);
}
kfree(item->pages);
@@ -486,7 +470,7 @@
outpage = alloc_pages(GFP_KERNEL, 0);
if(!outpage) {
while(i--) {
- UnlockPage(new_pages[i]);
+ unlock_page(new_pages[i]);
__free_pages(new_pages[i], 0);
}
kfree(new_pages);
@@ -608,7 +592,7 @@
while(pagecnt) {
/* get the page via the page cache */
DEBUG(3, "blkmtd: erase: doing grab_cache_page() for page %d\n", pagenr);
- page = grab_cache_page(&dev->as, pagenr);
+ page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr);
if(!page) {
DEBUG(3, "blkmtd: erase: grab_cache_page() failed for page %d\n",
pagenr);
@@ -630,7 +614,7 @@
if(!err) {
while(pagecnt--) {
SetPageUptodate(pages[pagecnt]);
- UnlockPage(pages[pagecnt]);
+ unlock_page(pages[pagecnt]);
page_cache_release(pages[pagecnt]);
flush_dcache_page(pages[pagecnt]);
}
@@ -682,7 +666,7 @@
struct page *page;
int cpylen;
DEBUG(3, "blkmtd: read: looking for page: %d\n", pagenr);
- page = read_cache_page(&dev->as, pagenr,
+ page = read_cache_page(dev->binding->bd_inode->i_mapping, pagenr,
(filler_t *)blkmtd_readpage, dev);
if(IS_ERR(page)) {
return PTR_ERR(page);
@@ -749,12 +733,12 @@
return -EROFS;
}
- if(to >= dev->totalsize) {
+ if(to >= mtd->size) {
return -ENOSPC;
}
- if(to + len > dev->totalsize) {
- len = (dev->totalsize - to);
+ if(to + len > mtd->size) {
+ len = (mtd->size - to);
}
@@ -801,7 +785,7 @@
DEBUG(3, "blkmtd: write: doing partial start, page = %d len = %d offset = %d\n",
pagenr, len1, offset);
- page = read_cache_page(&dev->as, pagenr,
+ page = read_cache_page(dev->binding->bd_inode->i_mapping, pagenr,
(filler_t *)blkmtd_readpage, dev);
if(IS_ERR(page)) {
@@ -826,7 +810,7 @@
/* see if page is in the page cache */
DEBUG(3, "blkmtd: write: grabbing page %d from page cache\n", pagenr);
- page = grab_cache_page(&dev->as, pagenr);
+ page = grab_cache_page(dev->binding->bd_inode->i_mapping, pagenr);
DEBUG(3, "blkmtd: write: got page %d from page cache\n", pagenr);
if(!page) {
warn("write: cant grab cache page %d", pagenr);
@@ -835,7 +819,7 @@
}
memcpy(page_address(page), buf, PAGE_SIZE);
pages[pagecnt++] = page;
- UnlockPage(page);
+ unlock_page(page);
SetPageUptodate(page);
pagenr++;
pagesc--;
@@ -850,7 +834,7 @@
struct page *page;
DEBUG(3, "blkmtd: write: doing partial end, page = %d len = %d\n",
pagenr, len3);
- page = read_cache_page(&dev->as, pagenr,
+ page = read_cache_page(dev->binding->bd_inode->i_mapping, pagenr,
(filler_t *)blkmtd_readpage, dev);
if(IS_ERR(page)) {
err = PTR_ERR(page);
@@ -919,14 +903,12 @@
static int blkmtd_proc_read(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
- int clean = 0, dirty = 0, locked = 0;
- //struct list_head *temp;
int i, len, pages = 0, cnt;
struct list_head *temp1, *temp2;
MOD_INC_USE_COUNT;
-#if 0
+
spin_lock(&mbd_writeq_lock);
cnt = write_queue_cnt;
i = write_queue_tail;
@@ -937,41 +919,62 @@
i %= write_queue_sz;
cnt--;
}
-
- /* Count the size of the page lists */
-#if 0
- list_for_each(temp, &mtd_dev->as.clean_pages) {
- clean++;
- }
- list_for_each(temp, &mtd_dev->as.dirty_pages) {
- dirty++;
- }
- list_for_each(temp, &mtd_dev->as.locked_pages) {
- locked++;
- }
-#endif
+ spin_unlock(&mbd_writeq_lock);
len = sprintf(page, "Write queue head: %d\nWrite queue tail: %d\n"
- "Write queue count: %d\nPages in queue: %d (%dK)\n"
- "Clean Pages: %d\nDirty Pages: %d\nLocked Pages: %d\n"
- "nrpages: %ld\n",
+ "Write queue count: %d\nPages in queue: %d (%dK)\n",
write_queue_head, write_queue_tail, write_queue_cnt,
- pages, pages << (PAGE_SHIFT-10), clean, dirty, locked,
- 0L ); //mtd_dev->as.nrpages);
- if(len <= count)
- *eof = 1;
- spin_unlock(&mbd_writeq_lock);
-#else
- len = 0;
+ pages, pages << (PAGE_SHIFT-10));
+
+ /* Count the size of the page lists */
+
+ len += sprintf(page+len, "dev\tnrpages\tclean\tdirty\tlocked\tlru\n");
list_for_each_safe(temp1, temp2, &blkmtd_device_list) {
struct blkmtd_dev *dev = list_entry(temp1, struct blkmtd_dev,
list);
- len += sprintf(page+len, "Name: %s\nsector_size: %d\nsector_bits: %d\npartial_last_page: %d\ntotalsize: %d\nreadonly: %d\n\n",
- dev->mtd_info.name, dev->sector_size, dev->sector_bits, dev->partial_last_page, dev->totalsize, dev->readonly);
+ struct list_head *temp;
+ struct page *pagei;
+
+ int clean = 0, dirty = 0, locked = 0, lru = 0;
+ /* Count the size of the page lists */
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->clean_pages) {
+ pagei = list_entry(temp, struct page, list);
+ clean++;
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->dirty_pages) {
+ pagei = list_entry(temp, struct page, list);
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+ list_for_each(temp, &dev->binding->bd_inode->i_mapping->locked_pages) {
+ pagei = list_entry(temp, struct page, list);
+ if(PageLocked(pagei))
+ locked++;
+ if(PageDirty(pagei))
+ dirty++;
+ if(PageLRU(pagei))
+ lru++;
+ }
+
+ len += sprintf(page+len, "mtd%d:\t%ld\t%d\t%d\t%d\t%d\n",
+ dev->mtd_info.index,
+ dev->binding->bd_inode->i_mapping->nrpages,
+ clean, dirty, locked, lru);
}
+
if(len <= count)
*eof = 1;
-#endif
+
MOD_DEC_USE_COUNT;
return len;
}
@@ -988,6 +991,9 @@
kfree(dev->mtd_info.eraseregions);
if(dev->mtd_info.name)
kfree(dev->mtd_info.name);
+
+ invalidate_inode_pages(dev->binding->bd_inode);
+
if(dev->binding)
blkdev_put(dev->binding, BDEV_RAW);
kfree(dev);
@@ -1129,14 +1135,6 @@
blocksize = block_size(kdev);
blocksize_bits = blksize_bits(blocksize);
- size = (loff_t) blk_size[maj][min] << blocksize_bits;
-
- DEBUG(1, "blkmtd: size = %ld\n", (long int)size);
-
- if(size == 0) {
- err("cant determine size");
- return NULL;
- }
dev = kmalloc(sizeof(struct blkmtd_dev), GFP_KERNEL);
if(dev == NULL)
@@ -1149,8 +1147,8 @@
if (err) {
goto devinit_err;
}
-
- dev->totalsize = size;
+ size = dev->binding->bd_inode->i_size;
+ DEBUG(1, "blkmtd: size = %ld\n", (long int)size);
dev->sector_size = blocksize;
dev->sector_bits = blocksize_bits;
dev->readonly = readonly;
@@ -1199,19 +1197,6 @@
dev->mtd_info.unpoint = 0;
dev->mtd_info.priv = dev;
- /* setup the page cache info */
-
- dev->as.nrpages = 0;
- INIT_LIST_HEAD(&dev->as.clean_pages);
- INIT_LIST_HEAD(&dev->as.dirty_pages);
- INIT_LIST_HEAD(&dev->as.locked_pages);
- dev->as.host = NULL;
- spin_lock_init(&(dev->as.i_shared_lock));
-
- dev->as.a_ops = &blkmtd_aops;
- dev->as.i_mmap = NULL;
- dev->as.i_mmap_shared = NULL;
- dev->as.gfp_mask = GFP_KERNEL;
dev->mtd_info.module = THIS_MODULE;
list_add(&dev->list, &blkmtd_device_list);
if (add_mtd_device(&dev->mtd_info)) {
@@ -1262,7 +1247,7 @@
}
if(erase_page) {
- UnlockPage(erase_page);
+ unlock_page(erase_page);
__free_pages(erase_page, 0);
}
}
More information about the linux-mtd-cvs
mailing list