Commit 4533ed18 authored by Jens Axboe's avatar Jens Axboe

[PATCH] ide-cd/scsi/block fixups for SG_IO

- Kill the bogus ret transformation in block/ioctl.c if we return
  -EINVAL, doesn't make any sense.

- Don't allow sg_reserved_size to be set bigger than a request we can
  deal with...

- timeout fixes.

- Cleanup of user access.

- Set SAM_STAT_CHECK_CONDITION, not CHECK_CONDITION which needs to be
  bit shifted 1 up.

- Set sense_len correctly.

- Account sense_len correctly, don't just increment by 1...

- Use the correct pointer in post transform.

- Fix oops in bio_map_user(), it must get the extra reference prior to
  calling bio_unmap_user() itself too.
parent b6e27b19
......@@ -207,11 +207,8 @@ int blkdev_ioctl(struct inode *inode, struct file *file, unsigned cmd,
set_device_ro(bdev, n);
return 0;
default:
if (disk->fops->ioctl) {
ret = disk->fops->ioctl(inode, file, cmd, arg);
if (ret != -EINVAL)
return ret;
}
if (disk->fops->ioctl)
return disk->fops->ioctl(inode, file, cmd, arg);
}
return -ENOTTY;
}
......@@ -68,7 +68,6 @@ static int blk_do_rq(request_queue_t *q, struct block_device *bdev,
rq->flags |= REQ_NOMERGE;
rq->waiting = &wait;
drive_stat_acct(rq, rq->nr_sectors, 1);
elv_add_request(q, rq, 1, 1);
generic_unplug_device(q);
wait_for_completion(&wait);
......@@ -99,7 +98,7 @@ static int scsi_get_bus(request_queue_t *q, int *p)
static int sg_get_timeout(request_queue_t *q)
{
return q->sg_timeout;
return q->sg_timeout / (HZ / USER_HZ);
}
static int sg_set_timeout(request_queue_t *q, int *p)
......@@ -107,7 +106,7 @@ static int sg_set_timeout(request_queue_t *q, int *p)
int timeout, err = get_user(timeout, p);
if (!err)
q->sg_timeout = timeout;
q->sg_timeout = timeout * (HZ / USER_HZ);
return err;
}
......@@ -121,10 +120,14 @@ static int sg_set_reserved_size(request_queue_t *q, int *p)
{
int size, err = get_user(size, p);
if (!err)
q->sg_reserved_size = size;
if (err)
return err;
if (size > (q->max_sectors << 9))
return -EINVAL;
q->sg_reserved_size = size;
return 0;
}
/*
......@@ -139,16 +142,14 @@ static int sg_emulated_host(request_queue_t *q, int *p)
static int sg_io(request_queue_t *q, struct block_device *bdev,
struct sg_io_hdr *uptr)
{
unsigned long uaddr, start_time;
int reading, writing, nr_sectors;
unsigned long start_time;
int reading, writing;
struct sg_io_hdr hdr;
struct request *rq;
struct bio *bio;
char sense[SCSI_SENSE_BUFFERSIZE];
void *buffer;
if (!access_ok(VERIFY_WRITE, uptr, sizeof(*uptr)))
return -EFAULT;
if (copy_from_user(&hdr, uptr, sizeof(*uptr)))
return -EFAULT;
......@@ -156,11 +157,6 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
return -EINVAL;
if (hdr.cmd_len > sizeof(rq->cmd))
return -EINVAL;
if (!access_ok(VERIFY_READ, hdr.cmdp, hdr.cmd_len))
return -EFAULT;
if (hdr.dxfer_len > 65536)
return -EINVAL;
/*
* we'll do that later
......@@ -168,7 +164,9 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (hdr.iovec_count)
return -EOPNOTSUPP;
nr_sectors = 0;
if (hdr.dxfer_len > (q->max_sectors << 9))
return -EIO;
reading = writing = 0;
buffer = NULL;
bio = NULL;
......@@ -189,19 +187,12 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
break;
}
uaddr = (unsigned long) hdr.dxferp;
/* writing to device -> reading from vm */
if (writing && !access_ok(VERIFY_READ, uaddr, bytes))
return -EFAULT;
/* reading from device -> writing to vm */
else if (reading && !access_ok(VERIFY_WRITE, uaddr, bytes))
return -EFAULT;
/*
* first try to map it into a bio. reading from device will
* be a write to vm.
*/
bio = bio_map_user(bdev, uaddr, hdr.dxfer_len, reading);
bio = bio_map_user(bdev, (unsigned long) hdr.dxferp,
hdr.dxfer_len, reading);
/*
* if bio setup failed, fall back to slow approach
......@@ -211,10 +202,11 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (!buffer)
return -ENOMEM;
nr_sectors = bytes >> 9;
if (writing)
copy_from_user(buffer,hdr.dxferp,hdr.dxfer_len);
else
if (writing) {
if (copy_from_user(buffer, hdr.dxferp,
hdr.dxfer_len))
goto out_buffer;
} else
memset(buffer, 0, hdr.dxfer_len);
}
}
......@@ -225,7 +217,8 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
* fill in request structure
*/
rq->cmd_len = hdr.cmd_len;
copy_from_user(rq->cmd, hdr.cmdp, hdr.cmd_len);
if (copy_from_user(rq->cmd, hdr.cmdp, hdr.cmd_len))
goto out_request;
if (sizeof(rq->cmd) != hdr.cmd_len)
memset(rq->cmd + hdr.cmd_len, 0, sizeof(rq->cmd) - hdr.cmd_len);
......@@ -235,18 +228,15 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
rq->flags |= REQ_BLOCK_PC;
rq->hard_nr_sectors = rq->nr_sectors = nr_sectors;
rq->hard_cur_sectors = rq->current_nr_sectors = nr_sectors;
rq->bio = rq->biotail = bio;
rq->bio = rq->biotail = NULL;
if (bio)
blk_rq_bio_prep(q, rq, bio);
rq->data_len = hdr.dxfer_len;
rq->data = buffer;
rq->data_len = hdr.dxfer_len;
rq->timeout = hdr.timeout;
rq->timeout = (hdr.timeout * HZ) / 1000;
if (!rq->timeout)
rq->timeout = q->sg_timeout;
if (!rq->timeout)
......@@ -273,12 +263,11 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
if (hdr.masked_status || hdr.host_status || hdr.driver_status)
hdr.info |= SG_INFO_CHECK;
hdr.resid = rq->data_len;
hdr.duration = (jiffies - start_time) * (1000 / HZ);
hdr.duration = ((jiffies - start_time) * 1000) / HZ;
hdr.sb_len_wr = 0;
if (rq->sense_len && hdr.sbp) {
int len = (hdr.mx_sb_len < rq->sense_len) ?
hdr.mx_sb_len : rq->sense_len;
int len = min((unsigned int) hdr.mx_sb_len, rq->sense_len);
if (!copy_to_user(hdr.sbp, rq->sense, len))
hdr.sb_len_wr = len;
......@@ -286,17 +275,25 @@ static int sg_io(request_queue_t *q, struct block_device *bdev,
blk_put_request(rq);
copy_to_user(uptr, &hdr, sizeof(*uptr));
if (copy_to_user(uptr, &hdr, sizeof(*uptr)))
goto out_buffer;
if (buffer) {
if (reading)
copy_to_user(hdr.dxferp, buffer, hdr.dxfer_len);
if (copy_to_user(hdr.dxferp, buffer, hdr.dxfer_len))
goto out_buffer;
kfree(buffer);
}
/* may not have succeeded, but output values written to control
* structure (struct sg_io_hdr). */
return 0;
out_request:
blk_put_request(rq);
out_buffer:
kfree(buffer);
return -EFAULT;
}
#define FORMAT_UNIT_TIMEOUT (2 * 60 * 60 * HZ)
......
......@@ -666,8 +666,10 @@ static void cdrom_end_request (ide_drive_t *drive, int uptodate)
struct cdrom_info *info = drive->driver_data;
void *sense = &info->sense_data;
if (failed && failed->sense)
if (failed && failed->sense) {
sense = failed->sense;
failed->sense_len = rq->sense_len;
}
cdrom_analyze_sense_data(drive, failed, sense);
}
......@@ -723,7 +725,7 @@ static int cdrom_decode_status(ide_drive_t *drive, int good_stat, int *stat_ret)
* scsi status byte
*/
if ((rq->flags & REQ_BLOCK_PC) && !rq->errors)
rq->errors = CHECK_CONDITION;
rq->errors = SAM_STAT_CHECK_CONDITION;
/* Check for tray open. */
if (sense_key == NOT_READY) {
......@@ -1471,8 +1473,9 @@ static ide_startstop_t cdrom_pc_intr (ide_drive_t *drive)
/* Keep count of how much data we've moved. */
rq->data += thislen;
rq->data_len -= thislen;
if (rq->cmd[0] == GPCMD_REQUEST_SENSE)
rq->sense_len++;
if (rq->flags & REQ_SENSE)
rq->sense_len += thislen;
} else {
confused:
printk ("%s: cdrom_pc_intr: The drive "
......@@ -1609,12 +1612,20 @@ static inline int cdrom_write_check_ireason(ide_drive_t *drive, int len, int ire
static void post_transform_command(struct request *req)
{
char *ibuf = req->buffer;
u8 *c = req->cmd;
char *ibuf;
if (!blk_pc_request(req))
return;
if (req->bio)
ibuf = bio_data(req->bio);
else
ibuf = req->data;
if (!ibuf)
return;
/*
* set ansi-revision and response data as atapi
*/
......
......@@ -538,12 +538,6 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
bio = __bio_map_user(bdev, uaddr, len, write_to_vm);
if (bio) {
if (bio->bi_size < len) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio, 0);
return NULL;
}
/*
* subtle -- if __bio_map_user() ended up bouncing a bio,
* it would normally disappear when its bi_end_io is run.
......@@ -551,6 +545,12 @@ struct bio *bio_map_user(struct block_device *bdev, unsigned long uaddr,
* reference to it
*/
bio_get(bio);
if (bio->bi_size < len) {
bio_endio(bio, bio->bi_size, 0);
bio_unmap_user(bio, 0);
return NULL;
}
}
return bio;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment