Commit aa8b57aa authored by Kent Overstreet's avatar Kent Overstreet

block: Use bio_sectors() more consistently

Bunch of places in the code weren't using it where they could be -
this'll reduce the size of the patch that puts bi_sector/bi_size/bi_idx
into a struct bvec_iter.
Signed-off-by: default avatarKent Overstreet <koverstreet@google.com>
CC: Jens Axboe <axboe@kernel.dk>
CC: "Ed L. Cashin" <ecashin@coraid.com>
CC: Nick Piggin <npiggin@kernel.dk>
CC: Jiri Kosina <jkosina@suse.cz>
CC: Jim Paris <jim@jtan.com>
CC: Geoff Levand <geoff@infradead.org>
CC: Alasdair Kergon <agk@redhat.com>
CC: dm-devel@redhat.com
CC: Neil Brown <neilb@suse.de>
CC: Steven Rostedt <rostedt@goodmis.org>
Acked-by: default avatarEd Cashin <ecashin@coraid.com>
parent f73a1c7d
...@@ -2433,7 +2433,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio) ...@@ -2433,7 +2433,7 @@ static void pkt_make_request(struct request_queue *q, struct bio *bio)
cloned_bio->bi_bdev = pd->bdev; cloned_bio->bi_bdev = pd->bdev;
cloned_bio->bi_private = psd; cloned_bio->bi_private = psd;
cloned_bio->bi_end_io = pkt_end_io_read_cloned; cloned_bio->bi_end_io = pkt_end_io_read_cloned;
pd->stats.secs_r += bio->bi_size >> 9; pd->stats.secs_r += bio_sectors(bio);
pkt_queue_bio(pd, cloned_bio); pkt_queue_bio(pd, cloned_bio);
return; return;
} }
......
...@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m, ...@@ -458,7 +458,7 @@ static void map_region(struct dm_io_region *io, struct mirror *m,
{ {
io->bdev = m->dev->bdev; io->bdev = m->dev->bdev;
io->sector = map_sector(m, bio); io->sector = map_sector(m, bio);
io->count = bio->bi_size >> 9; io->count = bio_sectors(bio);
} }
static void hold_bio(struct mirror_set *ms, struct bio *bio) static void hold_bio(struct mirror_set *ms, struct bio *bio)
......
...@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev, ...@@ -502,11 +502,11 @@ static inline int is_io_in_chunk_boundary(struct mddev *mddev,
{ {
if (likely(is_power_of_2(chunk_sects))) { if (likely(is_power_of_2(chunk_sects))) {
return chunk_sects >= ((bio->bi_sector & (chunk_sects-1)) return chunk_sects >= ((bio->bi_sector & (chunk_sects-1))
+ (bio->bi_size >> 9)); + bio_sectors(bio));
} else{ } else{
sector_t sector = bio->bi_sector; sector_t sector = bio->bi_sector;
return chunk_sects >= (sector_div(sector, chunk_sects) return chunk_sects >= (sector_div(sector, chunk_sects)
+ (bio->bi_size >> 9)); + bio_sectors(bio));
} }
} }
...@@ -567,7 +567,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio) ...@@ -567,7 +567,7 @@ static void raid0_make_request(struct mddev *mddev, struct bio *bio)
printk("md/raid0:%s: make_request bug: can't convert block across chunks" printk("md/raid0:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", " or bigger than %dk %llu %d\n",
mdname(mddev), chunk_sects / 2, mdname(mddev), chunk_sects / 2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10); (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
bio_io_error(bio); bio_io_error(bio);
return; return;
......
...@@ -267,7 +267,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio) ...@@ -267,7 +267,7 @@ static void raid_end_bio_io(struct r1bio *r1_bio)
(bio_data_dir(bio) == WRITE) ? "write" : "read", (bio_data_dir(bio) == WRITE) ? "write" : "read",
(unsigned long long) bio->bi_sector, (unsigned long long) bio->bi_sector,
(unsigned long long) bio->bi_sector + (unsigned long long) bio->bi_sector +
(bio->bi_size >> 9) - 1); bio_sectors(bio) - 1);
call_bio_endio(r1_bio); call_bio_endio(r1_bio);
} }
...@@ -458,7 +458,7 @@ static void raid1_end_write_request(struct bio *bio, int error) ...@@ -458,7 +458,7 @@ static void raid1_end_write_request(struct bio *bio, int error)
" %llu-%llu\n", " %llu-%llu\n",
(unsigned long long) mbio->bi_sector, (unsigned long long) mbio->bi_sector,
(unsigned long long) mbio->bi_sector + (unsigned long long) mbio->bi_sector +
(mbio->bi_size >> 9) - 1); bio_sectors(mbio) - 1);
call_bio_endio(r1_bio); call_bio_endio(r1_bio);
} }
} }
...@@ -1049,7 +1049,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1049,7 +1049,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio; r1_bio->master_bio = bio;
r1_bio->sectors = bio->bi_size >> 9; r1_bio->sectors = bio_sectors(bio);
r1_bio->state = 0; r1_bio->state = 0;
r1_bio->mddev = mddev; r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector; r1_bio->sector = bio->bi_sector;
...@@ -1127,7 +1127,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1127,7 +1127,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio; r1_bio->master_bio = bio;
r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0; r1_bio->state = 0;
r1_bio->mddev = mddev; r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector + sectors_handled; r1_bio->sector = bio->bi_sector + sectors_handled;
...@@ -1329,14 +1329,14 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1329,14 +1329,14 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* Mustn't call r1_bio_write_done before this next test, /* Mustn't call r1_bio_write_done before this next test,
* as it could result in the bio being freed. * as it could result in the bio being freed.
*/ */
if (sectors_handled < (bio->bi_size >> 9)) { if (sectors_handled < bio_sectors(bio)) {
r1_bio_write_done(r1_bio); r1_bio_write_done(r1_bio);
/* We need another r1_bio. It has already been counted /* We need another r1_bio. It has already been counted
* in bio->bi_phys_segments * in bio->bi_phys_segments
*/ */
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = bio; r1_bio->master_bio = bio;
r1_bio->sectors = (bio->bi_size >> 9) - sectors_handled; r1_bio->sectors = bio_sectors(bio) - sectors_handled;
r1_bio->state = 0; r1_bio->state = 0;
r1_bio->mddev = mddev; r1_bio->mddev = mddev;
r1_bio->sector = bio->bi_sector + sectors_handled; r1_bio->sector = bio->bi_sector + sectors_handled;
...@@ -1947,7 +1947,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio) ...@@ -1947,7 +1947,7 @@ static void sync_request_write(struct mddev *mddev, struct r1bio *r1_bio)
wbio->bi_rw = WRITE; wbio->bi_rw = WRITE;
wbio->bi_end_io = end_sync_write; wbio->bi_end_io = end_sync_write;
atomic_inc(&r1_bio->remaining); atomic_inc(&r1_bio->remaining);
md_sync_acct(conf->mirrors[i].rdev->bdev, wbio->bi_size >> 9); md_sync_acct(conf->mirrors[i].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio); generic_make_request(wbio);
} }
...@@ -2284,8 +2284,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio) ...@@ -2284,8 +2284,7 @@ static void handle_read_error(struct r1conf *conf, struct r1bio *r1_bio)
r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO); r1_bio = mempool_alloc(conf->r1bio_pool, GFP_NOIO);
r1_bio->master_bio = mbio; r1_bio->master_bio = mbio;
r1_bio->sectors = (mbio->bi_size >> 9) r1_bio->sectors = bio_sectors(mbio) - sectors_handled;
- sectors_handled;
r1_bio->state = 0; r1_bio->state = 0;
set_bit(R1BIO_ReadError, &r1_bio->state); set_bit(R1BIO_ReadError, &r1_bio->state);
r1_bio->mddev = mddev; r1_bio->mddev = mddev;
......
...@@ -1169,7 +1169,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1169,7 +1169,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
/* If this request crosses a chunk boundary, we need to /* If this request crosses a chunk boundary, we need to
* split it. This will only happen for 1 PAGE (or less) requests. * split it. This will only happen for 1 PAGE (or less) requests.
*/ */
if (unlikely((bio->bi_sector & chunk_mask) + (bio->bi_size >> 9) if (unlikely((bio->bi_sector & chunk_mask) + bio_sectors(bio)
> chunk_sects > chunk_sects
&& (conf->geo.near_copies < conf->geo.raid_disks && (conf->geo.near_copies < conf->geo.raid_disks
|| conf->prev.near_copies < conf->prev.raid_disks))) { || conf->prev.near_copies < conf->prev.raid_disks))) {
...@@ -1209,7 +1209,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1209,7 +1209,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
bad_map: bad_map:
printk("md/raid10:%s: make_request bug: can't convert block across chunks" printk("md/raid10:%s: make_request bug: can't convert block across chunks"
" or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2, " or bigger than %dk %llu %d\n", mdname(mddev), chunk_sects/2,
(unsigned long long)bio->bi_sector, bio->bi_size >> 10); (unsigned long long)bio->bi_sector, bio_sectors(bio) / 2);
bio_io_error(bio); bio_io_error(bio);
return; return;
...@@ -1224,7 +1224,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1224,7 +1224,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
*/ */
wait_barrier(conf); wait_barrier(conf);
sectors = bio->bi_size >> 9; sectors = bio_sectors(bio);
while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) && while (test_bit(MD_RECOVERY_RESHAPE, &mddev->recovery) &&
bio->bi_sector < conf->reshape_progress && bio->bi_sector < conf->reshape_progress &&
bio->bi_sector + sectors > conf->reshape_progress) { bio->bi_sector + sectors > conf->reshape_progress) {
...@@ -1326,8 +1326,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1326,8 +1326,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
r10_bio->sectors = ((bio->bi_size >> 9) r10_bio->sectors = bio_sectors(bio) - sectors_handled;
- sectors_handled);
r10_bio->state = 0; r10_bio->state = 0;
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled; r10_bio->sector = bio->bi_sector + sectors_handled;
...@@ -1569,7 +1568,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1569,7 +1568,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
* after checking if we need to go around again. * after checking if we need to go around again.
*/ */
if (sectors_handled < (bio->bi_size >> 9)) { if (sectors_handled < bio_sectors(bio)) {
one_write_done(r10_bio); one_write_done(r10_bio);
/* We need another r10_bio. It has already been counted /* We need another r10_bio. It has already been counted
* in bio->bi_phys_segments. * in bio->bi_phys_segments.
...@@ -1577,7 +1576,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1577,7 +1576,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO); r10_bio = mempool_alloc(conf->r10bio_pool, GFP_NOIO);
r10_bio->master_bio = bio; r10_bio->master_bio = bio;
r10_bio->sectors = (bio->bi_size >> 9) - sectors_handled; r10_bio->sectors = bio_sectors(bio) - sectors_handled;
r10_bio->mddev = mddev; r10_bio->mddev = mddev;
r10_bio->sector = bio->bi_sector + sectors_handled; r10_bio->sector = bio->bi_sector + sectors_handled;
...@@ -2103,7 +2102,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2103,7 +2102,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum; d = r10_bio->devs[i].devnum;
atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&conf->mirrors[d].rdev->nr_pending);
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].rdev->bdev, tbio->bi_size >> 9); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(tbio));
tbio->bi_sector += conf->mirrors[d].rdev->data_offset; tbio->bi_sector += conf->mirrors[d].rdev->data_offset;
tbio->bi_bdev = conf->mirrors[d].rdev->bdev; tbio->bi_bdev = conf->mirrors[d].rdev->bdev;
...@@ -2128,7 +2127,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2128,7 +2127,7 @@ static void sync_request_write(struct mddev *mddev, struct r10bio *r10_bio)
d = r10_bio->devs[i].devnum; d = r10_bio->devs[i].devnum;
atomic_inc(&r10_bio->remaining); atomic_inc(&r10_bio->remaining);
md_sync_acct(conf->mirrors[d].replacement->bdev, md_sync_acct(conf->mirrors[d].replacement->bdev,
tbio->bi_size >> 9); bio_sectors(tbio));
generic_make_request(tbio); generic_make_request(tbio);
} }
...@@ -2254,13 +2253,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2254,13 +2253,13 @@ static void recovery_request_write(struct mddev *mddev, struct r10bio *r10_bio)
wbio2 = r10_bio->devs[1].repl_bio; wbio2 = r10_bio->devs[1].repl_bio;
if (wbio->bi_end_io) { if (wbio->bi_end_io) {
atomic_inc(&conf->mirrors[d].rdev->nr_pending); atomic_inc(&conf->mirrors[d].rdev->nr_pending);
md_sync_acct(conf->mirrors[d].rdev->bdev, wbio->bi_size >> 9); md_sync_acct(conf->mirrors[d].rdev->bdev, bio_sectors(wbio));
generic_make_request(wbio); generic_make_request(wbio);
} }
if (wbio2 && wbio2->bi_end_io) { if (wbio2 && wbio2->bi_end_io) {
atomic_inc(&conf->mirrors[d].replacement->nr_pending); atomic_inc(&conf->mirrors[d].replacement->nr_pending);
md_sync_acct(conf->mirrors[d].replacement->bdev, md_sync_acct(conf->mirrors[d].replacement->bdev,
wbio2->bi_size >> 9); bio_sectors(wbio2));
generic_make_request(wbio2); generic_make_request(wbio2);
} }
} }
...@@ -2690,8 +2689,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio) ...@@ -2690,8 +2689,7 @@ static void handle_read_error(struct mddev *mddev, struct r10bio *r10_bio)
r10_bio = mempool_alloc(conf->r10bio_pool, r10_bio = mempool_alloc(conf->r10bio_pool,
GFP_NOIO); GFP_NOIO);
r10_bio->master_bio = mbio; r10_bio->master_bio = mbio;
r10_bio->sectors = (mbio->bi_size >> 9) r10_bio->sectors = bio_sectors(mbio) - sectors_handled;
- sectors_handled;
r10_bio->state = 0; r10_bio->state = 0;
set_bit(R10BIO_ReadError, set_bit(R10BIO_ReadError,
&r10_bio->state); &r10_bio->state);
......
...@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect) ...@@ -90,7 +90,7 @@ static inline struct hlist_head *stripe_hash(struct r5conf *conf, sector_t sect)
*/ */
static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector) static inline struct bio *r5_next_bio(struct bio *bio, sector_t sector)
{ {
int sectors = bio->bi_size >> 9; int sectors = bio_sectors(bio);
if (bio->bi_sector + sectors < sector + STRIPE_SECTORS) if (bio->bi_sector + sectors < sector + STRIPE_SECTORS)
return bio->bi_next; return bio->bi_next;
else else
...@@ -3804,7 +3804,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio) ...@@ -3804,7 +3804,7 @@ static int in_chunk_boundary(struct mddev *mddev, struct bio *bio)
{ {
sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev); sector_t sector = bio->bi_sector + get_start_sect(bio->bi_bdev);
unsigned int chunk_sectors = mddev->chunk_sectors; unsigned int chunk_sectors = mddev->chunk_sectors;
unsigned int bio_sectors = bio->bi_size >> 9; unsigned int bio_sectors = bio_sectors(bio);
if (mddev->new_chunk_sectors < mddev->chunk_sectors) if (mddev->new_chunk_sectors < mddev->chunk_sectors)
chunk_sectors = mddev->new_chunk_sectors; chunk_sectors = mddev->new_chunk_sectors;
...@@ -3894,7 +3894,7 @@ static int bio_fits_rdev(struct bio *bi) ...@@ -3894,7 +3894,7 @@ static int bio_fits_rdev(struct bio *bi)
{ {
struct request_queue *q = bdev_get_queue(bi->bi_bdev); struct request_queue *q = bdev_get_queue(bi->bi_bdev);
if ((bi->bi_size>>9) > queue_max_sectors(q)) if (bio_sectors(bi) > queue_max_sectors(q))
return 0; return 0;
blk_recount_segments(q, bi); blk_recount_segments(q, bi);
if (bi->bi_phys_segments > queue_max_segments(q)) if (bi->bi_phys_segments > queue_max_segments(q))
...@@ -3964,7 +3964,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -3964,7 +3964,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
align_bi->bi_flags &= ~(1 << BIO_SEG_VALID); align_bi->bi_flags &= ~(1 << BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) || if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_sector, align_bi->bi_size>>9, is_badblock(rdev, align_bi->bi_sector, bio_sectors(align_bi),
&first_bad, &bad_sectors)) { &first_bad, &bad_sectors)) {
/* too big in some way, or has a known bad block */ /* too big in some way, or has a known bad block */
bio_put(align_bi); bio_put(align_bi);
......
...@@ -5166,7 +5166,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio, ...@@ -5166,7 +5166,7 @@ static int bio_size_ok(struct block_device *bdev, struct bio *bio,
} }
prev = &bio->bi_io_vec[bio->bi_vcnt - 1]; prev = &bio->bi_io_vec[bio->bi_vcnt - 1];
if ((bio->bi_size >> 9) > max_sectors) if (bio_sectors(bio) > max_sectors)
return 0; return 0;
if (!q->merge_bvec_fn) if (!q->merge_bvec_fn)
......
...@@ -244,7 +244,7 @@ TRACE_EVENT(block_bio_bounce, ...@@ -244,7 +244,7 @@ TRACE_EVENT(block_bio_bounce,
__entry->dev = bio->bi_bdev ? __entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0; bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
), ),
...@@ -281,7 +281,7 @@ TRACE_EVENT(block_bio_complete, ...@@ -281,7 +281,7 @@ TRACE_EVENT(block_bio_complete,
__entry->dev = bio->bi_bdev ? __entry->dev = bio->bi_bdev ?
bio->bi_bdev->bd_dev : 0; bio->bi_bdev->bd_dev : 0;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio_sectors(bio);
__entry->error = error; __entry->error = error;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
), ),
...@@ -309,7 +309,7 @@ DECLARE_EVENT_CLASS(block_bio_merge, ...@@ -309,7 +309,7 @@ DECLARE_EVENT_CLASS(block_bio_merge,
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
), ),
...@@ -376,7 +376,7 @@ TRACE_EVENT(block_bio_queue, ...@@ -376,7 +376,7 @@ TRACE_EVENT(block_bio_queue,
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio_sectors(bio);
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
), ),
...@@ -404,7 +404,7 @@ DECLARE_EVENT_CLASS(block_get_rq, ...@@ -404,7 +404,7 @@ DECLARE_EVENT_CLASS(block_get_rq,
TP_fast_assign( TP_fast_assign(
__entry->dev = bio ? bio->bi_bdev->bd_dev : 0; __entry->dev = bio ? bio->bi_bdev->bd_dev : 0;
__entry->sector = bio ? bio->bi_sector : 0; __entry->sector = bio ? bio->bi_sector : 0;
__entry->nr_sector = bio ? bio->bi_size >> 9 : 0; __entry->nr_sector = bio ? bio_sectors(bio) : 0;
blk_fill_rwbs(__entry->rwbs, blk_fill_rwbs(__entry->rwbs,
bio ? bio->bi_rw : 0, __entry->nr_sector); bio ? bio->bi_rw : 0, __entry->nr_sector);
memcpy(__entry->comm, current->comm, TASK_COMM_LEN); memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
...@@ -580,7 +580,7 @@ TRACE_EVENT(block_bio_remap, ...@@ -580,7 +580,7 @@ TRACE_EVENT(block_bio_remap,
TP_fast_assign( TP_fast_assign(
__entry->dev = bio->bi_bdev->bd_dev; __entry->dev = bio->bi_bdev->bd_dev;
__entry->sector = bio->bi_sector; __entry->sector = bio->bi_sector;
__entry->nr_sector = bio->bi_size >> 9; __entry->nr_sector = bio_sectors(bio);
__entry->old_dev = dev; __entry->old_dev = dev;
__entry->old_sector = from; __entry->old_sector = from;
blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size); blk_fill_rwbs(__entry->rwbs, bio->bi_rw, bio->bi_size);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment