Commit 4e4cd21c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'block-5.5-2020-01-10' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A few fixes that should go into this round.

  This pull request contains two NVMe fixes via Keith, removal of a dead
  function, and a fix for the bio op for read truncates (Ming)"

* tag 'block-5.5-2020-01-10' of git://git.kernel.dk/linux-block:
  nvmet: fix per feat data len for get_feature
  nvme: Translate more status codes to blk_status_t
  fs: move guard_bio_eod() after bio_set_op_attrs
  block: remove unused mp_bvec_last_segment
parents 30b6487d e17016f6
...@@ -538,6 +538,16 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start) ...@@ -538,6 +538,16 @@ void zero_fill_bio_iter(struct bio *bio, struct bvec_iter start)
} }
EXPORT_SYMBOL(zero_fill_bio_iter); EXPORT_SYMBOL(zero_fill_bio_iter);
/**
* bio_truncate - truncate the bio to small size of @new_size
* @bio: the bio to be truncated
* @new_size: new size for truncating the bio
*
* Description:
* Truncate the bio to new size of @new_size. If bio_op(bio) is
* REQ_OP_READ, zero the truncated part. This function should only
* be used for handling corner cases, such as bio eod.
*/
void bio_truncate(struct bio *bio, unsigned new_size) void bio_truncate(struct bio *bio, unsigned new_size)
{ {
struct bio_vec bv; struct bio_vec bv;
...@@ -548,7 +558,7 @@ void bio_truncate(struct bio *bio, unsigned new_size) ...@@ -548,7 +558,7 @@ void bio_truncate(struct bio *bio, unsigned new_size)
if (new_size >= bio->bi_iter.bi_size) if (new_size >= bio->bi_iter.bi_size)
return; return;
if (bio_data_dir(bio) != READ) if (bio_op(bio) != REQ_OP_READ)
goto exit; goto exit;
bio_for_each_segment(bv, bio, iter) { bio_for_each_segment(bv, bio, iter) {
......
...@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status) ...@@ -222,6 +222,8 @@ static blk_status_t nvme_error_status(u16 status)
case NVME_SC_CAP_EXCEEDED: case NVME_SC_CAP_EXCEEDED:
return BLK_STS_NOSPC; return BLK_STS_NOSPC;
case NVME_SC_LBA_RANGE: case NVME_SC_LBA_RANGE:
case NVME_SC_CMD_INTERRUPTED:
case NVME_SC_NS_NOT_READY:
return BLK_STS_TARGET; return BLK_STS_TARGET;
case NVME_SC_BAD_ATTRIBUTES: case NVME_SC_BAD_ATTRIBUTES:
case NVME_SC_ONCS_NOT_SUPPORTED: case NVME_SC_ONCS_NOT_SUPPORTED:
......
...@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) ...@@ -24,6 +24,16 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd)
return len; return len;
} }
static u32 nvmet_feat_data_len(struct nvmet_req *req, u32 cdw10)
{
switch (cdw10 & 0xff) {
case NVME_FEAT_HOST_ID:
return sizeof(req->sq->ctrl->hostid);
default:
return 0;
}
}
u64 nvmet_get_log_page_offset(struct nvme_command *cmd) u64 nvmet_get_log_page_offset(struct nvme_command *cmd)
{ {
return le64_to_cpu(cmd->get_log_page.lpo); return le64_to_cpu(cmd->get_log_page.lpo);
...@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req) ...@@ -778,7 +788,7 @@ static void nvmet_execute_get_features(struct nvmet_req *req)
u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10); u32 cdw10 = le32_to_cpu(req->cmd->common.cdw10);
u16 status = 0; u16 status = 0;
if (!nvmet_check_data_len(req, 0)) if (!nvmet_check_data_len(req, nvmet_feat_data_len(req, cdw10)))
return; return;
switch (cdw10 & 0xff) { switch (cdw10 & 0xff) {
......
...@@ -3031,7 +3031,7 @@ static void end_bio_bh_io_sync(struct bio *bio) ...@@ -3031,7 +3031,7 @@ static void end_bio_bh_io_sync(struct bio *bio)
* errors, this only handles the "we need to be able to * errors, this only handles the "we need to be able to
* do IO at the final sector" case. * do IO at the final sector" case.
*/ */
void guard_bio_eod(int op, struct bio *bio) void guard_bio_eod(struct bio *bio)
{ {
sector_t maxsector; sector_t maxsector;
struct hd_struct *part; struct hd_struct *part;
...@@ -3095,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh, ...@@ -3095,15 +3095,15 @@ static int submit_bh_wbc(int op, int op_flags, struct buffer_head *bh,
bio->bi_end_io = end_bio_bh_io_sync; bio->bi_end_io = end_bio_bh_io_sync;
bio->bi_private = bh; bio->bi_private = bh;
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(op, bio);
if (buffer_meta(bh)) if (buffer_meta(bh))
op_flags |= REQ_META; op_flags |= REQ_META;
if (buffer_prio(bh)) if (buffer_prio(bh))
op_flags |= REQ_PRIO; op_flags |= REQ_PRIO;
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
/* Take care of bh's that straddle the end of the device */
guard_bio_eod(bio);
if (wbc) { if (wbc) {
wbc_init_bio(wbc, bio); wbc_init_bio(wbc, bio);
wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size); wbc_account_cgroup_owner(wbc, bh->b_page, bh->b_size);
......
...@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait) ...@@ -38,7 +38,7 @@ static inline int __sync_blockdev(struct block_device *bdev, int wait)
/* /*
* buffer.c * buffer.c
*/ */
extern void guard_bio_eod(int rw, struct bio *bio); extern void guard_bio_eod(struct bio *bio);
extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len, extern int __block_write_begin_int(struct page *page, loff_t pos, unsigned len,
get_block_t *get_block, struct iomap *iomap); get_block_t *get_block, struct iomap *iomap);
......
...@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio) ...@@ -62,7 +62,7 @@ static struct bio *mpage_bio_submit(int op, int op_flags, struct bio *bio)
{ {
bio->bi_end_io = mpage_end_io; bio->bi_end_io = mpage_end_io;
bio_set_op_attrs(bio, op, op_flags); bio_set_op_attrs(bio, op, op_flags);
guard_bio_eod(op, bio); guard_bio_eod(bio);
submit_bio(bio); submit_bio(bio);
return NULL; return NULL;
} }
......
...@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec, ...@@ -153,26 +153,4 @@ static inline void bvec_advance(const struct bio_vec *bvec,
} }
} }
/*
* Get the last single-page segment from the multi-page bvec and store it
* in @seg
*/
static inline void mp_bvec_last_segment(const struct bio_vec *bvec,
struct bio_vec *seg)
{
unsigned total = bvec->bv_offset + bvec->bv_len;
unsigned last_page = (total - 1) / PAGE_SIZE;
seg->bv_page = bvec->bv_page + last_page;
/* the whole segment is inside the last page */
if (bvec->bv_offset >= last_page * PAGE_SIZE) {
seg->bv_offset = bvec->bv_offset % PAGE_SIZE;
seg->bv_len = bvec->bv_len;
} else {
seg->bv_offset = 0;
seg->bv_len = total - last_page * PAGE_SIZE;
}
}
#endif /* __LINUX_BVEC_ITER_H */ #endif /* __LINUX_BVEC_ITER_H */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment