Commit 90012e6e authored by Alexander Viro's avatar Alexander Viro Committed by Linus Torvalds

[PATCH] (13/15) big struct block_device * push (first series)

 - *NOW* all places that (re)assign ->bi_dev have relevant struct
   block_device *.  ->bi_bdev (struct block_device * equivalent of
   ->bi_dev) introduced, ->bi_dev removed, users updated.
parent 61e3b7fc
...@@ -2944,6 +2944,10 @@ static inline void DAC960_ProcessCompletedBuffer(BufferHeader_T *BufferHeader, ...@@ -2944,6 +2944,10 @@ static inline void DAC960_ProcessCompletedBuffer(BufferHeader_T *BufferHeader,
BufferHeader->bi_end_io(BufferHeader); BufferHeader->bi_end_io(BufferHeader);
} }
static inline int DAC960_PartitionByCommand(DAC960_Command_T *Command)
{
return DAC960_PartitionNumber(to_kdev_t(Command->BufferHeader->bi_bdev->bd_dev));
}
/* /*
DAC960_V1_ReadWriteError prints an appropriate error message for Command DAC960_V1_ReadWriteError prints an appropriate error message for Command
...@@ -2995,11 +2999,11 @@ static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command) ...@@ -2995,11 +2999,11 @@ static void DAC960_V1_ReadWriteError(DAC960_Command_T *Command)
Controller, Controller->ControllerNumber, Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->BlockNumber, Command->LogicalDriveNumber, Command->BlockNumber,
Command->BlockNumber + Command->BlockCount - 1); Command->BlockNumber + Command->BlockCount - 1);
if (DAC960_PartitionNumber(Command->BufferHeader->bi_dev) > 0) if (DAC960_PartitionByCommand(Command) > 0)
DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n", DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n",
Controller, Controller->ControllerNumber, Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->LogicalDriveNumber,
DAC960_PartitionNumber(Command->BufferHeader->bi_dev), DAC960_PartitionByCommand(Command),
Command->BufferHeader->bi_sector, Command->BufferHeader->bi_sector,
Command->BufferHeader->bi_sector + Command->BlockCount - 1); Command->BufferHeader->bi_sector + Command->BlockCount - 1);
} }
...@@ -3859,11 +3863,11 @@ static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command) ...@@ -3859,11 +3863,11 @@ static void DAC960_V2_ReadWriteError(DAC960_Command_T *Command)
Controller, Controller->ControllerNumber, Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->BlockNumber, Command->LogicalDriveNumber, Command->BlockNumber,
Command->BlockNumber + Command->BlockCount - 1); Command->BlockNumber + Command->BlockCount - 1);
if (DAC960_PartitionNumber(Command->BufferHeader->bi_dev) > 0) if (DAC960_PartitionByCommand(Command) > 0)
DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n", DAC960_Error(" /dev/rd/c%dd%dp%d: relative blocks %u..%u\n",
Controller, Controller->ControllerNumber, Controller, Controller->ControllerNumber,
Command->LogicalDriveNumber, Command->LogicalDriveNumber,
DAC960_PartitionNumber(Command->BufferHeader->bi_dev), DAC960_PartitionByCommand(Command),
Command->BufferHeader->bi_sector, Command->BufferHeader->bi_sector,
Command->BufferHeader->bi_sector + Command->BlockCount - 1); Command->BufferHeader->bi_sector + Command->BlockCount - 1);
} }
......
...@@ -111,7 +111,8 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio) ...@@ -111,7 +111,8 @@ inline int elv_rq_merge_ok(struct request *rq, struct bio *bio)
/* /*
* same device and no special stuff set, merge is ok * same device and no special stuff set, merge is ok
*/ */
if (kdev_same(rq->rq_dev, bio->bi_dev) && !rq->waiting && !rq->special) if (kdev_same(rq->rq_dev, to_kdev_t(bio->bi_bdev->bd_dev)) &&
!rq->waiting && !rq->special)
return 1; return 1;
return 0; return 0;
......
...@@ -1254,7 +1254,7 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1254,7 +1254,7 @@ static int __make_request(request_queue_t *q, struct bio *bio)
req->buffer = bio_data(bio); /* see ->buffer comment above */ req->buffer = bio_data(bio); /* see ->buffer comment above */
req->waiting = NULL; req->waiting = NULL;
req->bio = req->biotail = bio; req->bio = req->biotail = bio;
req->rq_dev = bio->bi_dev; req->rq_dev = to_kdev_t(bio->bi_bdev->bd_dev);
add_request(q, req, insert_here); add_request(q, req, insert_here);
out: out:
if (freereq) if (freereq)
...@@ -1273,23 +1273,19 @@ static int __make_request(request_queue_t *q, struct bio *bio) ...@@ -1273,23 +1273,19 @@ static int __make_request(request_queue_t *q, struct bio *bio)
*/ */
static inline void blk_partition_remap(struct bio *bio) static inline void blk_partition_remap(struct bio *bio)
{ {
int major, minor, drive, minor0; struct block_device *bdev = bio->bi_bdev;
struct gendisk *g; struct gendisk *g;
kdev_t dev0;
if (bdev == bdev->bd_contains)
major = major(bio->bi_dev); return;
if ((g = get_gendisk(bio->bi_dev))) {
minor = minor(bio->bi_dev); g = get_gendisk(to_kdev_t(bdev->bd_dev));
drive = (minor >> g->minor_shift); if (!g)
minor0 = (drive << g->minor_shift); /* whole disk device */ BUG();
/* that is, minor0 = (minor & ~((1<<g->minor_shift)-1)); */
dev0 = mk_kdev(major, minor0); bio->bi_sector += g->part[minor(to_kdev_t((bdev->bd_dev)))].start_sect;
if (!kdev_same(dev0, bio->bi_dev)) { bio->bi_bdev = bdev->bd_contains;
bio->bi_dev = dev0; /* lots of checks are possible */
bio->bi_sector += g->part[minor].start_sect;
}
/* lots of checks are possible */
}
} }
/** /**
...@@ -1324,7 +1320,7 @@ void generic_make_request(struct bio *bio) ...@@ -1324,7 +1320,7 @@ void generic_make_request(struct bio *bio)
int ret, nr_sectors = bio_sectors(bio); int ret, nr_sectors = bio_sectors(bio);
/* Test device or partition size, when known. */ /* Test device or partition size, when known. */
maxsector = (blkdev_size_in_bytes(bio->bi_dev) >> 9); maxsector = bio->bi_bdev->bd_inode->i_size >> 9;
if (maxsector) { if (maxsector) {
sector_t sector = bio->bi_sector; sector_t sector = bio->bi_sector;
...@@ -1336,7 +1332,8 @@ void generic_make_request(struct bio *bio) ...@@ -1336,7 +1332,8 @@ void generic_make_request(struct bio *bio)
printk(KERN_INFO printk(KERN_INFO
"attempt to access beyond end of device\n"); "attempt to access beyond end of device\n");
printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n", printk(KERN_INFO "%s: rw=%ld, want=%ld, limit=%Lu\n",
kdevname(bio->bi_dev), bio->bi_rw, kdevname(to_kdev_t(bio->bi_bdev->bd_dev)),
bio->bi_rw,
sector + nr_sectors, sector + nr_sectors,
(long long) maxsector); (long long) maxsector);
...@@ -1354,11 +1351,12 @@ void generic_make_request(struct bio *bio) ...@@ -1354,11 +1351,12 @@ void generic_make_request(struct bio *bio)
* Stacking drivers are expected to know what they are doing. * Stacking drivers are expected to know what they are doing.
*/ */
do { do {
q = blk_get_queue(bio->bi_dev); q = blk_get_queue(to_kdev_t(bio->bi_bdev->bd_dev));
if (!q) { if (!q) {
printk(KERN_ERR printk(KERN_ERR
"generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n", "generic_make_request: Trying to access nonexistent block-device %s (%Lu)\n",
kdevname(bio->bi_dev), (long long) bio->bi_sector); kdevname(to_kdev_t(bio->bi_bdev->bd_dev)),
(long long) bio->bi_sector);
end_io: end_io:
bio->bi_end_io(bio); bio->bi_end_io(bio);
break; break;
...@@ -1445,7 +1443,7 @@ int submit_bh(int rw, struct buffer_head * bh) ...@@ -1445,7 +1443,7 @@ int submit_bh(int rw, struct buffer_head * bh)
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9); bio->bi_sector = bh->b_blocknr * (bh->b_size >> 9);
bio->bi_dev = bh->b_dev; bio->bi_bdev = bh->b_bdev;
bio->bi_io_vec[0].bv_page = bh->b_page; bio->bi_io_vec[0].bv_page = bh->b_page;
bio->bi_io_vec[0].bv_len = bh->b_size; bio->bi_io_vec[0].bv_len = bh->b_size;
bio->bi_io_vec[0].bv_offset = bh_offset(bh); bio->bi_io_vec[0].bv_offset = bh_offset(bh);
......
...@@ -380,7 +380,7 @@ static struct bio *loop_get_bio(struct loop_device *lo) ...@@ -380,7 +380,7 @@ static struct bio *loop_get_bio(struct loop_device *lo)
static void loop_end_io_transfer(struct bio *bio) static void loop_end_io_transfer(struct bio *bio)
{ {
struct bio *rbh = bio->bi_private; struct bio *rbh = bio->bi_private;
struct loop_device *lo = &loop_dev[minor(rbh->bi_dev)]; struct loop_device *lo = &loop_dev[minor(to_kdev_t(rbh->bi_bdev->bd_dev))];
int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags); int uptodate = test_bit(BIO_UPTODATE, &bio->bi_flags);
if (!uptodate || bio_rw(bio) == WRITE) { if (!uptodate || bio_rw(bio) == WRITE) {
...@@ -413,7 +413,7 @@ static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh) ...@@ -413,7 +413,7 @@ static struct bio *loop_get_buffer(struct loop_device *lo, struct bio *rbh)
bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9); bio->bi_sector = rbh->bi_sector + (lo->lo_offset >> 9);
bio->bi_rw = rbh->bi_rw; bio->bi_rw = rbh->bi_rw;
spin_lock_irq(&lo->lo_lock); spin_lock_irq(&lo->lo_lock);
bio->bi_dev = to_kdev_t(lo->lo_device->bd_dev); bio->bi_bdev = lo->lo_device;
spin_unlock_irq(&lo->lo_lock); spin_unlock_irq(&lo->lo_lock);
return bio; return bio;
...@@ -425,11 +425,12 @@ static int loop_make_request(request_queue_t *q, struct bio *rbh) ...@@ -425,11 +425,12 @@ static int loop_make_request(request_queue_t *q, struct bio *rbh)
struct loop_device *lo; struct loop_device *lo;
unsigned long IV; unsigned long IV;
int rw = bio_rw(rbh); int rw = bio_rw(rbh);
int unit = minor(to_kdev_t(rbh->bi_bdev->bd_dev));
if (minor(rbh->bi_dev) >= max_loop) if (unit >= max_loop)
goto out; goto out;
lo = &loop_dev[minor(rbh->bi_dev)]; lo = &loop_dev[unit];
spin_lock_irq(&lo->lo_lock); spin_lock_irq(&lo->lo_lock);
if (lo->lo_state != Lo_bound) if (lo->lo_state != Lo_bound)
goto inactive; goto inactive;
......
...@@ -244,7 +244,7 @@ static int rd_make_request(request_queue_t * q, struct bio *sbh) ...@@ -244,7 +244,7 @@ static int rd_make_request(request_queue_t * q, struct bio *sbh)
unsigned long offset, len; unsigned long offset, len;
int rw = sbh->bi_rw; int rw = sbh->bi_rw;
minor = minor(sbh->bi_dev); minor = minor(to_kdev_t(sbh->bi_bdev->bd_dev));
if (minor >= NUM_RAMDISKS) if (minor >= NUM_RAMDISKS)
goto fail; goto fail;
......
...@@ -156,7 +156,7 @@ static int linear_make_request (mddev_t *mddev, int rw, struct bio *bio) ...@@ -156,7 +156,7 @@ static int linear_make_request (mddev_t *mddev, int rw, struct bio *bio)
bio_io_error(bio); bio_io_error(bio);
return 0; return 0;
} }
bio->bi_dev = tmp_dev->dev; bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1); bio->bi_sector = bio->bi_sector - (tmp_dev->offset << 1);
return 1; return 1;
......
#error Broken until maintainers will sanitize kdev_t handling
/* /*
* kernel/lvm.c * kernel/lvm.c
* *
......
...@@ -172,7 +172,7 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev) ...@@ -172,7 +172,7 @@ void del_mddev_mapping(mddev_t * mddev, kdev_t dev)
static int md_make_request (request_queue_t *q, struct bio *bio) static int md_make_request (request_queue_t *q, struct bio *bio)
{ {
mddev_t *mddev = kdev_to_mddev(bio->bi_dev); mddev_t *mddev = kdev_to_mddev(to_kdev_t(bio->bi_bdev->bd_dev));
if (mddev && mddev->pers) if (mddev && mddev->pers)
return mddev->pers->make_request(mddev, bio_rw(bio), bio); return mddev->pers->make_request(mddev, bio_rw(bio), bio);
......
...@@ -137,7 +137,7 @@ static void multipath_shrink_mpbh(multipath_conf_t *conf) ...@@ -137,7 +137,7 @@ static void multipath_shrink_mpbh(multipath_conf_t *conf)
} }
static int multipath_map (mddev_t *mddev, kdev_t *dev) static int multipath_map (mddev_t *mddev, struct block_device **bdev)
{ {
multipath_conf_t *conf = mddev_to_conf(mddev); multipath_conf_t *conf = mddev_to_conf(mddev);
int i, disks = MD_SB_DISKS; int i, disks = MD_SB_DISKS;
...@@ -149,7 +149,7 @@ static int multipath_map (mddev_t *mddev, kdev_t *dev) ...@@ -149,7 +149,7 @@ static int multipath_map (mddev_t *mddev, kdev_t *dev)
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
if (conf->multipaths[i].operational) { if (conf->multipaths[i].operational) {
*dev = conf->multipaths[i].dev; *bdev = conf->multipaths[i].bdev;
return (0); return (0);
} }
} }
...@@ -198,7 +198,7 @@ void multipath_end_request(struct bio *bio) ...@@ -198,7 +198,7 @@ void multipath_end_request(struct bio *bio)
* this branch is our 'one multipath IO has finished' event handler: * this branch is our 'one multipath IO has finished' event handler:
*/ */
if (!uptodate) if (!uptodate)
md_error (mp_bh->mddev, bio->bi_dev); md_error (mp_bh->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else else
/* /*
* Set MPBH_Uptodate in our master buffer_head, so that * Set MPBH_Uptodate in our master buffer_head, so that
...@@ -220,7 +220,7 @@ void multipath_end_request(struct bio *bio) ...@@ -220,7 +220,7 @@ void multipath_end_request(struct bio *bio)
* oops, IO error: * oops, IO error:
*/ */
printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n", printk(KERN_ERR "multipath: %s: rescheduling sector %lu\n",
partition_name(bio->bi_dev), bio->bi_sector); bdev_partition_name(bio->bi_bdev), bio->bi_sector);
multipath_reschedule_retry(mp_bh); multipath_reschedule_retry(mp_bh);
return; return;
} }
...@@ -269,7 +269,7 @@ static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio) ...@@ -269,7 +269,7 @@ static int multipath_make_request (mddev_t *mddev, int rw, struct bio * bio)
multipath = conf->multipaths + multipath_read_balance(conf); multipath = conf->multipaths + multipath_read_balance(conf);
real_bio = bio_clone(bio, GFP_NOIO); real_bio = bio_clone(bio, GFP_NOIO);
real_bio->bi_dev = multipath->dev; real_bio->bi_bdev = multipath->bdev;
real_bio->bi_rw = rw; real_bio->bi_rw = rw;
real_bio->bi_end_io = multipath_end_request; real_bio->bi_end_io = multipath_end_request;
real_bio->bi_private = mp_bh; real_bio->bi_private = mp_bh;
...@@ -692,7 +692,7 @@ static void multipathd (void *data) ...@@ -692,7 +692,7 @@ static void multipathd (void *data)
struct bio *bio; struct bio *bio;
unsigned long flags; unsigned long flags;
mddev_t *mddev; mddev_t *mddev;
kdev_t dev; struct block_device *bdev;
for (;;) { for (;;) {
spin_lock_irqsave(&retry_list_lock, flags); spin_lock_irqsave(&retry_list_lock, flags);
...@@ -709,16 +709,16 @@ static void multipathd (void *data) ...@@ -709,16 +709,16 @@ static void multipathd (void *data)
md_update_sb(mddev); md_update_sb(mddev);
} }
bio = mp_bh->bio; bio = mp_bh->bio;
dev = bio->bi_dev; bdev = bio->bi_bdev;
multipath_map (mddev, &bio->bi_dev); multipath_map (mddev, &bio->bi_bdev);
if (kdev_same(bio->bi_dev, dev)) { if (bio->bi_bdev == bdev) {
printk(IO_ERROR, printk(IO_ERROR,
partition_name(bio->bi_dev), bio->bi_sector); bdev_partition_name(bio->bi_bdev), bio->bi_sector);
multipath_end_bh_io(mp_bh, 0); multipath_end_bh_io(mp_bh, 0);
} else { } else {
printk(REDIRECT_SECTOR, printk(REDIRECT_SECTOR,
partition_name(bio->bi_dev), bio->bi_sector); bdev_partition_name(bio->bi_bdev), bio->bi_sector);
generic_make_request(bio); generic_make_request(bio);
} }
} }
......
...@@ -264,7 +264,7 @@ static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio) ...@@ -264,7 +264,7 @@ static int raid0_make_request (mddev_t *mddev, int rw, struct bio *bio)
* The new BH_Lock semantics in ll_rw_blk.c guarantee that this * The new BH_Lock semantics in ll_rw_blk.c guarantee that this
* is the only IO operation happening on this bh. * is the only IO operation happening on this bh.
*/ */
bio->bi_dev = tmp_dev->dev; bio->bi_bdev = tmp_dev->bdev;
bio->bi_sector = rsect; bio->bi_sector = rsect;
/* /*
......
...@@ -186,7 +186,7 @@ static inline void put_buf(r1bio_t *r1_bio) ...@@ -186,7 +186,7 @@ static inline void put_buf(r1bio_t *r1_bio)
mempool_free(r1_bio, conf->r1buf_pool); mempool_free(r1_bio, conf->r1buf_pool);
} }
static int map(mddev_t *mddev, kdev_t *rdev) static int map(mddev_t *mddev, struct block_device **bdev)
{ {
conf_t *conf = mddev_to_conf(mddev); conf_t *conf = mddev_to_conf(mddev);
int i, disks = MD_SB_DISKS; int i, disks = MD_SB_DISKS;
...@@ -198,7 +198,7 @@ static int map(mddev_t *mddev, kdev_t *rdev) ...@@ -198,7 +198,7 @@ static int map(mddev_t *mddev, kdev_t *rdev)
for (i = 0; i < disks; i++) { for (i = 0; i < disks; i++) {
if (conf->mirrors[i].operational) { if (conf->mirrors[i].operational) {
*rdev = conf->mirrors[i].dev; *bdev = conf->mirrors[i].bdev;
return 0; return 0;
} }
} }
...@@ -255,7 +255,7 @@ static void end_request(struct bio *bio) ...@@ -255,7 +255,7 @@ static void end_request(struct bio *bio)
* this branch is our 'one mirror IO has finished' event handler: * this branch is our 'one mirror IO has finished' event handler:
*/ */
if (!uptodate) if (!uptodate)
md_error(r1_bio->mddev, bio->bi_dev); md_error(r1_bio->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else else
/* /*
* Set R1BIO_Uptodate in our master bio, so that * Set R1BIO_Uptodate in our master bio, so that
...@@ -283,7 +283,7 @@ static void end_request(struct bio *bio) ...@@ -283,7 +283,7 @@ static void end_request(struct bio *bio)
* oops, read error: * oops, read error:
*/ */
printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n", printk(KERN_ERR "raid1: %s: rescheduling sector %lu\n",
partition_name(bio->bi_dev), r1_bio->sector); bdev_partition_name(bio->bi_bdev), r1_bio->sector);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
return; return;
} }
...@@ -479,7 +479,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) ...@@ -479,7 +479,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
r1_bio->read_bio = read_bio; r1_bio->read_bio = read_bio;
read_bio->bi_sector = r1_bio->sector; read_bio->bi_sector = r1_bio->sector;
read_bio->bi_dev = mirror->dev; read_bio->bi_bdev = mirror->bdev;
read_bio->bi_end_io = end_request; read_bio->bi_end_io = end_request;
read_bio->bi_rw = rw; read_bio->bi_rw = rw;
read_bio->bi_private = r1_bio; read_bio->bi_private = r1_bio;
...@@ -503,7 +503,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio) ...@@ -503,7 +503,7 @@ static int make_request(mddev_t *mddev, int rw, struct bio * bio)
r1_bio->write_bios[i] = mbio; r1_bio->write_bios[i] = mbio;
mbio->bi_sector = r1_bio->sector; mbio->bi_sector = r1_bio->sector;
mbio->bi_dev = conf->mirrors[i].dev; mbio->bi_bdev = conf->mirrors[i].bdev;
mbio->bi_end_io = end_request; mbio->bi_end_io = end_request;
mbio->bi_rw = rw; mbio->bi_rw = rw;
mbio->bi_private = r1_bio; mbio->bi_private = r1_bio;
...@@ -947,7 +947,7 @@ static void end_sync_read(struct bio *bio) ...@@ -947,7 +947,7 @@ static void end_sync_read(struct bio *bio)
* We don't do much here, just schedule handling by raid1d * We don't do much here, just schedule handling by raid1d
*/ */
if (!uptodate) if (!uptodate)
md_error (r1_bio->mddev, bio->bi_dev); md_error (r1_bio->mddev, to_kdev_t(bio->bi_bdev->bd_dev));
else else
set_bit(R1BIO_Uptodate, &r1_bio->state); set_bit(R1BIO_Uptodate, &r1_bio->state);
reschedule_retry(r1_bio); reschedule_retry(r1_bio);
...@@ -961,7 +961,7 @@ static void end_sync_write(struct bio *bio) ...@@ -961,7 +961,7 @@ static void end_sync_write(struct bio *bio)
int i; int i;
if (!uptodate) if (!uptodate)
md_error(mddev, bio->bi_dev); md_error(mddev, to_kdev_t(bio->bi_bdev->bd_dev));
for (i = 0; i < MD_SB_DISKS; i++) for (i = 0; i < MD_SB_DISKS; i++)
if (r1_bio->write_bios[i] == bio) { if (r1_bio->write_bios[i] == bio) {
...@@ -995,7 +995,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -995,7 +995,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* There is no point trying a read-for-reconstruct as * There is no point trying a read-for-reconstruct as
* reconstruct is about to be aborted * reconstruct is about to be aborted
*/ */
printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector); printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf); resume_device(conf);
put_buf(r1_bio); put_buf(r1_bio);
...@@ -1020,7 +1020,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -1020,7 +1020,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
if (r1_bio->write_bios[i]) if (r1_bio->write_bios[i])
BUG(); BUG();
r1_bio->write_bios[i] = mbio; r1_bio->write_bios[i] = mbio;
mbio->bi_dev = conf->mirrors[i].dev; mbio->bi_bdev = conf->mirrors[i].bdev;
mbio->bi_sector = r1_bio->sector; mbio->bi_sector = r1_bio->sector;
mbio->bi_end_io = end_sync_write; mbio->bi_end_io = end_sync_write;
mbio->bi_rw = WRITE; mbio->bi_rw = WRITE;
...@@ -1038,7 +1038,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -1038,7 +1038,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
* Nowhere to write this to... I guess we * Nowhere to write this to... I guess we
* must be done * must be done
*/ */
printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector); printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0); md_done_sync(mddev, r1_bio->master_bio->bi_size >> 9, 0);
resume_device(conf); resume_device(conf);
put_buf(r1_bio); put_buf(r1_bio);
...@@ -1049,7 +1049,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio) ...@@ -1049,7 +1049,7 @@ static void sync_request_write(mddev_t *mddev, r1bio_t *r1_bio)
if (!mbio) if (!mbio)
continue; continue;
md_sync_acct(mbio->bi_dev, mbio->bi_size >> 9); md_sync_acct(to_kdev_t(mbio->bi_bdev->bd_dev), mbio->bi_size >> 9);
generic_make_request(mbio); generic_make_request(mbio);
atomic_inc(&conf->mirrors[i].nr_pending); atomic_inc(&conf->mirrors[i].nr_pending);
} }
...@@ -1071,7 +1071,7 @@ static void raid1d(void *data) ...@@ -1071,7 +1071,7 @@ static void raid1d(void *data)
unsigned long flags; unsigned long flags;
mddev_t *mddev; mddev_t *mddev;
conf_t *conf; conf_t *conf;
kdev_t dev; struct block_device *bdev;
for (;;) { for (;;) {
...@@ -1096,15 +1096,15 @@ static void raid1d(void *data) ...@@ -1096,15 +1096,15 @@ static void raid1d(void *data)
break; break;
case READ: case READ:
case READA: case READA:
dev = bio->bi_dev; bdev = bio->bi_bdev;
map(mddev, &bio->bi_dev); map(mddev, &bio->bi_bdev);
if (kdev_same(bio->bi_dev, dev)) { if (bio->bi_bdev == bdev) {
printk(IO_ERROR, partition_name(bio->bi_dev), r1_bio->sector); printk(IO_ERROR, bdev_partition_name(bio->bi_bdev), r1_bio->sector);
raid_end_bio_io(r1_bio, 0); raid_end_bio_io(r1_bio, 0);
break; break;
} }
printk(REDIRECT_SECTOR, printk(REDIRECT_SECTOR,
partition_name(bio->bi_dev), r1_bio->sector); bdev_partition_name(bio->bi_bdev), r1_bio->sector);
bio->bi_sector = r1_bio->sector; bio->bi_sector = r1_bio->sector;
bio->bi_rw = r1_bio->cmd; bio->bi_rw = r1_bio->cmd;
...@@ -1235,7 +1235,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) ...@@ -1235,7 +1235,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
read_bio = bio_clone(r1_bio->master_bio, GFP_NOIO); read_bio = bio_clone(r1_bio->master_bio, GFP_NOIO);
read_bio->bi_sector = sector_nr; read_bio->bi_sector = sector_nr;
read_bio->bi_dev = mirror->dev; read_bio->bi_bdev = mirror->bdev;
read_bio->bi_end_io = end_sync_read; read_bio->bi_end_io = end_sync_read;
read_bio->bi_rw = READ; read_bio->bi_rw = READ;
read_bio->bi_private = r1_bio; read_bio->bi_private = r1_bio;
...@@ -1244,7 +1244,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster) ...@@ -1244,7 +1244,7 @@ static int sync_request(mddev_t *mddev, sector_t sector_nr, int go_faster)
BUG(); BUG();
r1_bio->read_bio = read_bio; r1_bio->read_bio = read_bio;
md_sync_acct(read_bio->bi_dev, nr_sectors); md_sync_acct(to_kdev_t(read_bio->bi_bdev->bd_dev), nr_sectors);
generic_make_request(read_bio); generic_make_request(read_bio);
atomic_inc(&conf->mirrors[conf->last_used].nr_pending); atomic_inc(&conf->mirrors[conf->last_used].nr_pending);
......
...@@ -202,7 +202,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src) ...@@ -202,7 +202,7 @@ inline void __bio_clone(struct bio *bio, struct bio *bio_src)
bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec;
bio->bi_sector = bio_src->bi_sector; bio->bi_sector = bio_src->bi_sector;
bio->bi_dev = bio_src->bi_dev; bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED; bio->bi_flags |= 1 << BIO_CLONED;
bio->bi_rw = bio_src->bi_rw; bio->bi_rw = bio_src->bi_rw;
...@@ -300,7 +300,7 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy) ...@@ -300,7 +300,7 @@ struct bio *bio_copy(struct bio *bio, int gfp_mask, int copy)
} }
b->bi_sector = bio->bi_sector; b->bi_sector = bio->bi_sector;
b->bi_dev = bio->bi_dev; b->bi_bdev = bio->bi_bdev;
b->bi_rw = bio->bi_rw; b->bi_rw = bio->bi_rw;
b->bi_vcnt = bio->bi_vcnt; b->bi_vcnt = bio->bi_vcnt;
...@@ -383,7 +383,7 @@ void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t s ...@@ -383,7 +383,7 @@ void ll_rw_kio(int rw, struct kiobuf *kio, struct block_device *bdev, sector_t s
} }
bio->bi_sector = sector; bio->bi_sector = sector;
bio->bi_dev = dev; bio->bi_bdev = bdev;
bio->bi_idx = 0; bio->bi_idx = 0;
bio->bi_end_io = bio_end_io_kio; bio->bi_end_io = bio_end_io_kio;
bio->bi_private = kio; bio->bi_private = kio;
......
...@@ -1773,7 +1773,7 @@ static int lbmRead(log_t * log, int pn, lbuf_t ** bpp) ...@@ -1773,7 +1773,7 @@ static int lbmRead(log_t * log, int pn, lbuf_t ** bpp)
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_dev = to_kdev_t(log->bdev->bd_dev); bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata); bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = 0; bio->bi_io_vec[0].bv_offset = 0;
...@@ -1915,7 +1915,7 @@ void lbmStartIO(lbuf_t * bp) ...@@ -1915,7 +1915,7 @@ void lbmStartIO(lbuf_t * bp)
bio = bio_alloc(GFP_NOFS, 1); bio = bio_alloc(GFP_NOFS, 1);
bio->bi_sector = bp->l_blkno << (log->l2bsize - 9); bio->bi_sector = bp->l_blkno << (log->l2bsize - 9);
bio->bi_dev = to_kdev_t(log->bdev->bd_dev); bio->bi_bdev = log->bdev;
bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata); bio->bi_io_vec[0].bv_page = virt_to_page(bp->l_ldata);
bio->bi_io_vec[0].bv_len = LOGPSIZE; bio->bi_io_vec[0].bv_len = LOGPSIZE;
bio->bi_io_vec[0].bv_offset = 0; bio->bi_io_vec[0].bv_offset = 0;
......
...@@ -60,7 +60,7 @@ typedef void (bio_destructor_t) (struct bio *); ...@@ -60,7 +60,7 @@ typedef void (bio_destructor_t) (struct bio *);
struct bio { struct bio {
sector_t bi_sector; sector_t bi_sector;
struct bio *bi_next; /* request queue link */ struct bio *bi_next; /* request queue link */
kdev_t bi_dev; /* will be block device */ struct block_device *bi_bdev;
unsigned long bi_flags; /* status, command, etc */ unsigned long bi_flags; /* status, command, etc */
unsigned long bi_rw; /* bottom bits READ/WRITE, unsigned long bi_rw; /* bottom bits READ/WRITE,
* top bits priority * top bits priority
......
...@@ -67,6 +67,10 @@ extern struct hd_struct md_hd_struct[MAX_MD_DEVS]; ...@@ -67,6 +67,10 @@ extern struct hd_struct md_hd_struct[MAX_MD_DEVS];
extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data); extern void add_mddev_mapping (mddev_t *mddev, kdev_t dev, void *data);
extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev); extern void del_mddev_mapping (mddev_t *mddev, kdev_t dev);
extern char * partition_name (kdev_t dev); extern char * partition_name (kdev_t dev);
extern inline char * bdev_partition_name (struct block_device *bdev)
{
return partition_name(to_kdev_t(bdev->bd_dev));
}
extern int register_md_personality (int p_num, mdk_personality_t *p); extern int register_md_personality (int p_num, mdk_personality_t *p);
extern int unregister_md_personality (int p_num); extern int unregister_md_personality (int p_num);
extern mdk_thread_t * md_register_thread (void (*run) (void *data), extern mdk_thread_t * md_register_thread (void (*run) (void *data),
......
...@@ -423,7 +423,7 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig) ...@@ -423,7 +423,7 @@ void create_bounce(unsigned long pfn, int gfp, struct bio **bio_orig)
} }
} }
bio->bi_dev = (*bio_orig)->bi_dev; bio->bi_bdev = (*bio_orig)->bi_bdev;
bio->bi_sector = (*bio_orig)->bi_sector; bio->bi_sector = (*bio_orig)->bi_sector;
bio->bi_rw = (*bio_orig)->bi_rw; bio->bi_rw = (*bio_orig)->bi_rw;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment