Commit b7c44ed9 authored by Jens Axboe's avatar Jens Axboe

block: manipulate bio->bi_flags through helpers

Some places use helpers now, others don't. We only have the 'is set'
helper, add helpers for setting and clearing flags too.

It was a bit of a mess of atomic vs non-atomic access. With
BIO_UPTODATE gone, we don't have any risk of concurrent access to the
flags. So relax the restriction and don't make any of them atomic. The
flags that do have serialization issues (reffed and chained), we
already handle those separately.
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 4246a0b6
...@@ -311,7 +311,7 @@ static void bio_chain_endio(struct bio *bio) ...@@ -311,7 +311,7 @@ static void bio_chain_endio(struct bio *bio)
*/ */
static inline void bio_inc_remaining(struct bio *bio) static inline void bio_inc_remaining(struct bio *bio)
{ {
bio->bi_flags |= (1 << BIO_CHAIN); bio_set_flag(bio, BIO_CHAIN);
smp_mb__before_atomic(); smp_mb__before_atomic();
atomic_inc(&bio->__bi_remaining); atomic_inc(&bio->__bi_remaining);
} }
...@@ -495,7 +495,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs) ...@@ -495,7 +495,7 @@ struct bio *bio_alloc_bioset(gfp_t gfp_mask, int nr_iovecs, struct bio_set *bs)
if (unlikely(!bvl)) if (unlikely(!bvl))
goto err_free; goto err_free;
bio->bi_flags |= 1 << BIO_OWNS_VEC; bio_set_flag(bio, BIO_OWNS_VEC);
} else if (nr_iovecs) { } else if (nr_iovecs) {
bvl = bio->bi_inline_vecs; bvl = bio->bi_inline_vecs;
} }
...@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src) ...@@ -580,7 +580,7 @@ void __bio_clone_fast(struct bio *bio, struct bio *bio_src)
* so we don't set nor calculate new physical/hw segment counts here * so we don't set nor calculate new physical/hw segment counts here
*/ */
bio->bi_bdev = bio_src->bi_bdev; bio->bi_bdev = bio_src->bi_bdev;
bio->bi_flags |= 1 << BIO_CLONED; bio_set_flag(bio, BIO_CLONED);
bio->bi_rw = bio_src->bi_rw; bio->bi_rw = bio_src->bi_rw;
bio->bi_iter = bio_src->bi_iter; bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec; bio->bi_io_vec = bio_src->bi_io_vec;
...@@ -829,7 +829,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page ...@@ -829,7 +829,7 @@ static int __bio_add_page(struct request_queue *q, struct bio *bio, struct page
/* If we may be able to merge these biovecs, force a recount */ /* If we may be able to merge these biovecs, force a recount */
if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec))) if (bio->bi_vcnt > 1 && (BIOVEC_PHYS_MERGEABLE(bvec-1, bvec)))
bio->bi_flags &= ~(1 << BIO_SEG_VALID); bio_clear_flag(bio, BIO_SEG_VALID);
done: done:
return len; return len;
...@@ -1390,7 +1390,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1390,7 +1390,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
if (iter->type & WRITE) if (iter->type & WRITE)
bio->bi_rw |= REQ_WRITE; bio->bi_rw |= REQ_WRITE;
bio->bi_flags |= (1 << BIO_USER_MAPPED); bio_set_flag(bio, BIO_USER_MAPPED);
/* /*
* subtle -- if __bio_map_user() ended up bouncing a bio, * subtle -- if __bio_map_user() ended up bouncing a bio,
...@@ -1770,7 +1770,7 @@ static inline bool bio_remaining_done(struct bio *bio) ...@@ -1770,7 +1770,7 @@ static inline bool bio_remaining_done(struct bio *bio)
BUG_ON(atomic_read(&bio->__bi_remaining) <= 0); BUG_ON(atomic_read(&bio->__bi_remaining) <= 0);
if (atomic_dec_and_test(&bio->__bi_remaining)) { if (atomic_dec_and_test(&bio->__bi_remaining)) {
clear_bit(BIO_CHAIN, &bio->bi_flags); bio_clear_flag(bio, BIO_CHAIN);
return true; return true;
} }
...@@ -1866,7 +1866,7 @@ void bio_trim(struct bio *bio, int offset, int size) ...@@ -1866,7 +1866,7 @@ void bio_trim(struct bio *bio, int offset, int size)
if (offset == 0 && size == bio->bi_iter.bi_size) if (offset == 0 && size == bio->bi_iter.bi_size)
return; return;
clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_clear_flag(bio, BIO_SEG_VALID);
bio_advance(bio, offset << 9); bio_advance(bio, offset << 9);
......
...@@ -146,7 +146,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio, ...@@ -146,7 +146,7 @@ static void req_bio_endio(struct request *rq, struct bio *bio,
bio->bi_error = error; bio->bi_error = error;
if (unlikely(rq->cmd_flags & REQ_QUIET)) if (unlikely(rq->cmd_flags & REQ_QUIET))
set_bit(BIO_QUIET, &bio->bi_flags); bio_set_flag(bio, BIO_QUIET);
bio_advance(bio, nbytes); bio_advance(bio, nbytes);
......
...@@ -94,7 +94,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -94,7 +94,7 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
return PTR_ERR(bio); return PTR_ERR(bio);
if (map_data && map_data->null_mapped) if (map_data && map_data->null_mapped)
bio->bi_flags |= (1 << BIO_NULL_MAPPED); bio_set_flag(bio, BIO_NULL_MAPPED);
if (bio->bi_iter.bi_size != iter->count) { if (bio->bi_iter.bi_size != iter->count) {
/* /*
......
...@@ -116,7 +116,7 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio) ...@@ -116,7 +116,7 @@ void blk_recount_segments(struct request_queue *q, struct bio *bio)
bio->bi_next = nxt; bio->bi_next = nxt;
} }
bio->bi_flags |= (1 << BIO_SEG_VALID); bio_set_flag(bio, BIO_SEG_VALID);
} }
EXPORT_SYMBOL(blk_recount_segments); EXPORT_SYMBOL(blk_recount_segments);
......
...@@ -186,7 +186,7 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) ...@@ -186,7 +186,7 @@ static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
if (!bdi_cap_stable_pages_required(&q->backing_dev_info)) if (!bdi_cap_stable_pages_required(&q->backing_dev_info))
return 0; return 0;
return test_bit(BIO_SNAP_STABLE, &bio->bi_flags); return bio_flagged(bio, BIO_SNAP_STABLE);
} }
#else #else
static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio) static int must_snapshot_stable_pages(struct request_queue *q, struct bio *bio)
......
...@@ -1157,7 +1157,7 @@ static void make_request(struct mddev *mddev, struct bio * bio) ...@@ -1157,7 +1157,7 @@ static void make_request(struct mddev *mddev, struct bio * bio)
* non-zero, then it is the number of not-completed requests. * non-zero, then it is the number of not-completed requests.
*/ */
bio->bi_phys_segments = 0; bio->bi_phys_segments = 0;
clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) { if (rw == READ) {
/* /*
...@@ -2711,7 +2711,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp ...@@ -2711,7 +2711,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, int *skipp
/* remove last page from this bio */ /* remove last page from this bio */
bio->bi_vcnt--; bio->bi_vcnt--;
bio->bi_iter.bi_size -= len; bio->bi_iter.bi_size -= len;
__clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_clear_flag(bio, BIO_SEG_VALID);
} }
goto bio_full; goto bio_full;
} }
......
...@@ -1216,7 +1216,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio) ...@@ -1216,7 +1216,7 @@ static void __make_request(struct mddev *mddev, struct bio *bio)
* non-zero, then it is the number of not-completed requests. * non-zero, then it is the number of not-completed requests.
*/ */
bio->bi_phys_segments = 0; bio->bi_phys_segments = 0;
clear_bit(BIO_SEG_VALID, &bio->bi_flags); bio_clear_flag(bio, BIO_SEG_VALID);
if (rw == READ) { if (rw == READ) {
/* /*
...@@ -3353,7 +3353,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr, ...@@ -3353,7 +3353,7 @@ static sector_t sync_request(struct mddev *mddev, sector_t sector_nr,
/* remove last page from this bio */ /* remove last page from this bio */
bio2->bi_vcnt--; bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len; bio2->bi_iter.bi_size -= len;
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags); bio_clear_flag(bio2, BIO_SEG_VALID);
} }
goto bio_full; goto bio_full;
} }
...@@ -4433,7 +4433,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr, ...@@ -4433,7 +4433,7 @@ static sector_t reshape_request(struct mddev *mddev, sector_t sector_nr,
/* Remove last page from this bio */ /* Remove last page from this bio */
bio2->bi_vcnt--; bio2->bi_vcnt--;
bio2->bi_iter.bi_size -= len; bio2->bi_iter.bi_size -= len;
__clear_bit(BIO_SEG_VALID, &bio2->bi_flags); bio_clear_flag(bio2, BIO_SEG_VALID);
} }
goto bio_full; goto bio_full;
} }
......
...@@ -4850,7 +4850,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio) ...@@ -4850,7 +4850,7 @@ static int chunk_aligned_read(struct mddev *mddev, struct bio * raid_bio)
rcu_read_unlock(); rcu_read_unlock();
raid_bio->bi_next = (void*)rdev; raid_bio->bi_next = (void*)rdev;
align_bi->bi_bdev = rdev->bdev; align_bi->bi_bdev = rdev->bdev;
__clear_bit(BIO_SEG_VALID, &align_bi->bi_flags); bio_clear_flag(align_bi, BIO_SEG_VALID);
if (!bio_fits_rdev(align_bi) || if (!bio_fits_rdev(align_bi) ||
is_badblock(rdev, align_bi->bi_iter.bi_sector, is_badblock(rdev, align_bi->bi_iter.bi_sector,
......
...@@ -2961,7 +2961,7 @@ static void end_bio_bh_io_sync(struct bio *bio) ...@@ -2961,7 +2961,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
{ {
struct buffer_head *bh = bio->bi_private; struct buffer_head *bh = bio->bi_private;
if (unlikely (test_bit(BIO_QUIET,&bio->bi_flags))) if (unlikely(bio_flagged(bio, BIO_QUIET)))
set_bit(BH_Quiet, &bh->b_state); set_bit(BH_Quiet, &bh->b_state);
bh->b_end_io(bh, !bio->bi_error); bh->b_end_io(bh, !bio->bi_error);
......
...@@ -304,6 +304,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count) ...@@ -304,6 +304,21 @@ static inline void bio_cnt_set(struct bio *bio, unsigned int count)
atomic_set(&bio->__bi_cnt, count); atomic_set(&bio->__bi_cnt, count);
} }
static inline bool bio_flagged(struct bio *bio, unsigned int bit)
{
return (bio->bi_flags & (1UL << bit)) != 0;
}
static inline void bio_set_flag(struct bio *bio, unsigned int bit)
{
bio->bi_flags |= (1UL << bit);
}
static inline void bio_clear_flag(struct bio *bio, unsigned int bit)
{
bio->bi_flags &= ~(1UL << bit);
}
enum bip_flags { enum bip_flags {
BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */ BIP_BLOCK_INTEGRITY = 1 << 0, /* block layer owns integrity data */
BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */ BIP_MAPPED_INTEGRITY = 1 << 1, /* ref tag has been remapped */
......
...@@ -129,8 +129,6 @@ struct bio { ...@@ -129,8 +129,6 @@ struct bio {
#define BIO_RESET_BITS 13 #define BIO_RESET_BITS 13
#define BIO_OWNS_VEC 13 /* bio_free() should free bvec */ #define BIO_OWNS_VEC 13 /* bio_free() should free bvec */
#define bio_flagged(bio, flag) ((bio)->bi_flags & (1 << (flag)))
/* /*
* top 4 bits of bio flags indicate the pool this bio came from * top 4 bits of bio flags indicate the pool this bio came from
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment