Commit 996395f2 authored by Alexander Viro's avatar Alexander Viro Committed by James Bottomley

[PATCH] mtdblock_ro fixes (based on patch from rmk)

	* compile fixes
	* switched to private queue
	* set ->queue
parent 0ffbe56a
/* /*
* $Id: mtdblock_ro.c,v 1.9 2001/10/02 15:05:11 dwmw2 Exp $ * $Id: mtdblock_ro.c,v 1.13 2002/03/11 16:03:29 sioux Exp $
* *
* Read-only version of the mtdblock device, without the * Read-only flash, read-write RAM version of the mtdblock device,
* read/erase/modify/writeback stuff * without caching.
*/ */
#ifdef MTDBLOCK_DEBUG #ifdef MTDBLOCK_DEBUG
...@@ -21,216 +21,226 @@ ...@@ -21,216 +21,226 @@
#define LOCAL_END_REQUEST #define LOCAL_END_REQUEST
#define MAJOR_NR MTD_BLOCK_MAJOR #define MAJOR_NR MTD_BLOCK_MAJOR
#define DEVICE_NAME "mtdblock" #define DEVICE_NAME "mtdblock"
#define DEVICE_NR(device) (device)
#include <linux/blk.h> #include <linux/blk.h>
#define RQFUNC_ARG request_queue_t *q
#ifdef MTDBLOCK_DEBUG #ifdef MTDBLOCK_DEBUG
static int debug = MTDBLOCK_DEBUG; static int debug = MTDBLOCK_DEBUG;
MODULE_PARM(debug, "i"); MODULE_PARM(debug, "i");
#endif #endif
static struct gendisk *mtd_disks[MAX_MTD_DEVICES]; struct mtdro_dev {
struct gendisk *disk;
struct mtd_info *mtd;
int open;
};
static struct mtdro_dev mtd_dev[MAX_MTD_DEVICES];
static DECLARE_MUTEX(mtd_sem);
static struct request_queue mtdro_queue;
static spinlock_t mtdro_lock = SPIN_LOCK_UNLOCKED;
static int mtdblock_open(struct inode *inode, struct file *file) static int mtdblock_open(struct inode *inode, struct file *file)
{ {
struct mtd_info *mtd = NULL; struct mtdro_dev *mdev = inode->i_bdev->bd_disk->private_data;
int dev = minor(inode->i_rdev); int ret = 0;
struct gendisk *disk = mtd_disks[dev];
DEBUG(1,"mtdblock_open\n"); DEBUG(1,"mtdblock_open\n");
mtd = get_mtd_device(NULL, dev); down(&mtd_sem);
if (!mtd) if (mdev->mtd == NULL) {
return -EINVAL; mdev->mtd = get_mtd_device(NULL, minor(inode->i_rdev));
if (MTD_ABSENT == mtd->type) { if (!mdev->mtd || mdev->mtd->type == MTD_ABSENT) {
put_mtd_device(mtd); if (mdev->mtd)
return -EINVAL; put_mtd_device(mdev->mtd);
ret = -ENODEV;
}
} }
set_capacity(disk, mtd->size>>9); if (ret == 0) {
add_disk(disk); set_device_ro(inode->i_rdev, !(mdev->mtd->flags & MTD_CAP_RAM));
mdev->open++;
}
up(&mtd_sem);
DEBUG(1, "ok\n"); DEBUG(1, "%s\n", ret ? "ok" : "nodev");
return 0; return ret;
} }
static release_t mtdblock_release(struct inode *inode, struct file *file) static release_t mtdblock_release(struct inode *inode, struct file *file)
{ {
int dev; struct mtdro_dev *mdev = inode->i_bdev->bd_disk->private_data;
struct mtd_info *mtd;
DEBUG(1, "mtdblock_release\n"); DEBUG(1, "mtdblock_release\n");
if (inode == NULL) down(&mtd_sem);
release_return(-ENODEV); if (mdev->open-- == 0) {
struct mtd_info *mtd = mdev->mtd;
dev = minor(inode->i_rdev);
mtd = __get_mtd_device(NULL, dev);
if (!mtd) {
printk(KERN_WARNING "MTD device is absent on mtd_release!\n");
release_return(-ENODEV);
}
del_gendisk(mtd_disks[dev]); mdev->mtd = NULL;
if (mtd->sync)
if (mtd->sync) mtd->sync(mtd);
mtd->sync(mtd);
put_mtd_device(mtd); put_mtd_device(mtd);
}
up(&mtd_sem);
DEBUG(1, "ok\n"); DEBUG(1, "ok\n");
release_return(0); release_return(0);
} }
static inline void mtdblock_end_request(struct request *req, int uptodate) static void mtdblock_request(request_queue_t *q)
{ {
if (end_that_request_first(req, uptodate, req->hard_cur_sectors)) while (!blk_queue_empty(q)) {
return; struct request *req = elv_next_request(q);
blkdev_dequeue_request(req); struct mtdro_dev *mdev = req->rq_disk->private_data;
end_that_request_last(req); struct mtd_info *mtd = mdev->mtd;
} unsigned int res;
static void mtdblock_request(RQFUNC_ARG) if (!(req->flags & REQ_CMD)) {
{ res = 0;
struct request *current_request; goto end_req;
unsigned int res = 0; }
struct mtd_info *mtd;
if ((req->sector + req->current_nr_sectors) > (mtd->size >> 9)) {
while (1) printk("mtd: Attempt to read past end of device!\n");
{ printk("size: %x, sector: %lx, nr_sectors: %x\n",
/* Grab the Request and unlink it from the request list, we mtd->size, req->sector, req->current_nr_sectors);
will execute a return if we are done. */ res = 0;
if (blk_queue_empty(QUEUE)) goto end_req;
return; }
current_request = CURRENT;
if (minor(current_request->rq_dev) >= MAX_MTD_DEVICES)
{
printk("mtd: Unsupported device!\n");
mtdblock_end_request(current_request, 0);
continue;
}
// Grab our MTD structure
mtd = __get_mtd_device(NULL, minor(current_request->rq_dev));
if (!mtd) {
printk("MTD device %s doesn't appear to exist any more\n", kdevname(DEVICE_NR(CURRENT->rq_dev)));
mtdblock_end_request(current_request, 0);
}
if (current_request->sector << 9 > mtd->size ||
(current_request->sector + current_request->nr_sectors) << 9 > mtd->size)
{
printk("mtd: Attempt to read past end of device!\n");
printk("size: %x, sector: %lx, nr_sectors %lx\n", mtd->size, current_request->sector, current_request->nr_sectors);
mtdblock_end_request(current_request, 0);
continue;
}
/* Remove the request we are handling from the request list so nobody messes /* Now drop the lock that the ll_rw_blk functions grabbed for
with it */ us and process the request. This is necessary due to the
/* Now drop the lock that the ll_rw_blk functions grabbed for us extreme time we spend processing it. */
and process the request. This is necessary due to the extreme time spin_unlock_irq(q->queue_lock);
we spend processing it. */
spin_unlock_irq(&io_request_lock); /* Handle the request */
switch (rq_data_dir(req)) {
// Handle the request size_t retlen;
switch (current_request->cmd)
{ case READ:
size_t retlen; if (MTD_READ(mtd, req->sector << 9,
req->current_nr_sectors << 9,
case READ: &retlen, req->buffer) == 0)
if (MTD_READ(mtd,current_request->sector<<9, res = 1;
current_request->nr_sectors << 9, else
&retlen, current_request->buffer) == 0) res = 0;
res = 1; break;
else
res = 0; case WRITE:
break; /* printk("mtdblock_request WRITE sector=%d(%d)\n",
req->sector, req->current_nr_sectors);
case WRITE: */
/* printk("mtdblock_request WRITE sector=%d(%d)\n",current_request->sector, /* Read only device */
current_request->nr_sectors); if ((mtd->flags & MTD_CAP_RAM) == 0) {
*/ res = 0;
break;
// Read only device }
if ((mtd->flags & MTD_CAP_RAM) == 0)
{ /* Do the write */
res = 0; if (MTD_WRITE(mtd, req->sector << 9,
break; req->current_nr_sectors << 9,
} &retlen, req->buffer) == 0)
res = 1;
// Do the write else
if (MTD_WRITE(mtd,current_request->sector<<9, res = 0;
current_request->nr_sectors << 9, break;
&retlen, current_request->buffer) == 0)
res = 1; /* Shouldn't happen */
else default:
res = 0; printk("mtd: unknown request\n");
break; res = 0;
break;
// Shouldn't happen }
default:
printk("mtd: unknown request\n"); /* Grab the lock and re-thread the item onto the linked list */
break; spin_lock_irq(q->queue_lock);
} end_req:
if (!end_that_request_first(req, res, req->hard_cur_sectors)) {
// Grab the lock and re-thread the item onto the linked list blkdev_dequeue_request(req);
spin_lock_irq(&io_request_lock); end_that_request_last(req);
mtdblock_end_request(current_request, res); }
} }
} }
static int mtdblock_ioctl(struct inode * inode, struct file * file, static int mtdblock_ioctl(struct inode * inode, struct file * file,
unsigned int cmd, unsigned long arg) unsigned int cmd, unsigned long arg)
{ {
struct mtd_info *mtd; struct mtdro_dev *mdev = inode->i_bdev->bd_disk->private_data;
mtd = __get_mtd_device(NULL, minor(inode->i_rdev));
if (!mtd || cmd != BLKFLSBUF) if (cmd != BLKFLSBUF)
return -EINVAL; return -EINVAL;
fsync_bdev(inode->i_bdev); fsync_bdev(inode->i_bdev);
invalidate_bdev(inode->i_bdev, 0); invalidate_bdev(inode->i_bdev, 0);
if (mtd->sync) if (mdev->mtd->sync)
mtd->sync(mtd); mdev->mtd->sync(mdev->mtd);
return 0; return 0;
} }
static struct block_device_operations mtd_fops = static struct block_device_operations mtd_fops = {
{
.owner = THIS_MODULE, .owner = THIS_MODULE,
.open = mtdblock_open, .open = mtdblock_open,
.release = mtdblock_release, .release = mtdblock_release,
.ioctl = mtdblock_ioctl .ioctl = mtdblock_ioctl
}; };
int __init init_mtdblock(void) /* Called with mtd_table_mutex held. */
static void mtd_notify_add(struct mtd_info* mtd)
{ {
int err = -ENOMEM; struct gendisk *disk;
int i;
for (i = 0; i < MAX_MTD_DEVICES; i++) { if (!mtd || mtd->type == MTD_ABSENT || mtd->index >= MAX_MTD_DEVICES)
struct gendisk *disk = alloc_disk(1); return;
if (!disk)
goto out; disk = alloc_disk(1);
if (disk) {
disk->major = MAJOR_NR; disk->major = MAJOR_NR;
disk->first_minor = i; disk->first_minor = mtd->index;
sprintf(disk->disk_name, "mtdblock%d", i);
disk->fops = &mtd_fops; disk->fops = &mtd_fops;
mtd_disks[i] = disk; sprintf(disk->disk_name, "mtdblock%d", mtd->index);
mtd_dev[mtd->index].disk = disk;
set_capacity(disk, mtd->size / 512);
disk->queue = &mtdro_queue;
disk->private_data = &mtd_dev[mtd->index];
add_disk(disk);
} }
}
/* Called with mtd_table_mutex held. */
static void mtd_notify_remove(struct mtd_info* mtd)
{
struct mtdro_dev *mdev;
struct gendisk *disk;
if (!mtd || mtd->type == MTD_ABSENT || mtd->index >= MAX_MTD_DEVICES)
return;
mdev = &mtd_dev[mtd->index];
disk = mdev->disk;
mdev->disk = NULL;
if (disk) {
del_gendisk(disk);
put_disk(disk);
}
}
static struct mtd_notifier notifier = {
.add = mtd_notify_add,
.remove = mtd_notify_remove,
};
int __init init_mtdblock(void)
{
int err;
if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) { if (register_blkdev(MAJOR_NR,DEVICE_NAME,&mtd_fops)) {
printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n", printk(KERN_NOTICE "Can't allocate major number %d for Memory Technology Devices.\n",
...@@ -239,21 +249,18 @@ int __init init_mtdblock(void) ...@@ -239,21 +249,18 @@ int __init init_mtdblock(void)
goto out; goto out;
} }
blk_init_queue(BLK_DEFAULT_QUEUE(MAJOR_NR), &mtdblock_request); blk_init_queue(&mtdro_queue, &mtdblock_request, &mtdro_lock);
return 0; register_mtd_user(&notifier);
out: err = 0;
while (i--) out:
put_disk(mtd_disks[i]);
return err; return err;
} }
static void __exit cleanup_mtdblock(void) static void __exit cleanup_mtdblock(void)
{ {
int i; unregister_mtd_user(&notifier);
unregister_blkdev(MAJOR_NR,DEVICE_NAME); unregister_blkdev(MAJOR_NR,DEVICE_NAME);
blk_cleanup_queue(BLK_DEFAULT_QUEUE(MAJOR_NR)); blk_cleanup_queue(&mtdro_queue);
for (i = 0; i < MAX_MTD_DEVICES; i++)
put_disk(mtd_disks[i]);
} }
module_init(init_mtdblock); module_init(init_mtdblock);
...@@ -262,4 +269,4 @@ module_exit(cleanup_mtdblock); ...@@ -262,4 +269,4 @@ module_exit(cleanup_mtdblock);
MODULE_LICENSE("GPL"); MODULE_LICENSE("GPL");
MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al."); MODULE_AUTHOR("Erwin Authried <eauth@softsys.co.at> et al.");
MODULE_DESCRIPTION("Simple read-only block device emulation access to MTD devices"); MODULE_DESCRIPTION("Simple uncached block device emulation access to MTD devices");
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment