Commit b35ba01e authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

nvme: support ranged discard requests

NVMe supports up to 256 ranges per DSM command, so wire up support
for ranged discards up to that limit.
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parent 1e739730
...@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns, ...@@ -238,26 +238,38 @@ static inline void nvme_setup_flush(struct nvme_ns *ns,
static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req, static inline int nvme_setup_discard(struct nvme_ns *ns, struct request *req,
struct nvme_command *cmnd) struct nvme_command *cmnd)
{ {
unsigned short segments = blk_rq_nr_discard_segments(req), n = 0;
struct nvme_dsm_range *range; struct nvme_dsm_range *range;
unsigned int nr_bytes = blk_rq_bytes(req); struct bio *bio;
range = kmalloc(sizeof(*range), GFP_ATOMIC); range = kmalloc_array(segments, sizeof(*range), GFP_ATOMIC);
if (!range) if (!range)
return BLK_MQ_RQ_QUEUE_BUSY; return BLK_MQ_RQ_QUEUE_BUSY;
range->cattr = cpu_to_le32(0); __rq_for_each_bio(bio, req) {
range->nlb = cpu_to_le32(nr_bytes >> ns->lba_shift); u64 slba = nvme_block_nr(ns, bio->bi_iter.bi_sector);
range->slba = cpu_to_le64(nvme_block_nr(ns, blk_rq_pos(req))); u32 nlb = bio->bi_iter.bi_size >> ns->lba_shift;
range[n].cattr = cpu_to_le32(0);
range[n].nlb = cpu_to_le32(nlb);
range[n].slba = cpu_to_le64(slba);
n++;
}
if (WARN_ON_ONCE(n != segments)) {
kfree(range);
return BLK_MQ_RQ_QUEUE_ERROR;
}
memset(cmnd, 0, sizeof(*cmnd)); memset(cmnd, 0, sizeof(*cmnd));
cmnd->dsm.opcode = nvme_cmd_dsm; cmnd->dsm.opcode = nvme_cmd_dsm;
cmnd->dsm.nsid = cpu_to_le32(ns->ns_id); cmnd->dsm.nsid = cpu_to_le32(ns->ns_id);
cmnd->dsm.nr = 0; cmnd->dsm.nr = segments - 1;
cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD); cmnd->dsm.attributes = cpu_to_le32(NVME_DSMGMT_AD);
req->special_vec.bv_page = virt_to_page(range); req->special_vec.bv_page = virt_to_page(range);
req->special_vec.bv_offset = offset_in_page(range); req->special_vec.bv_offset = offset_in_page(range);
req->special_vec.bv_len = sizeof(*range); req->special_vec.bv_len = sizeof(*range) * segments;
req->rq_flags |= RQF_SPECIAL_PAYLOAD; req->rq_flags |= RQF_SPECIAL_PAYLOAD;
return BLK_MQ_RQ_QUEUE_OK; return BLK_MQ_RQ_QUEUE_OK;
...@@ -871,6 +883,9 @@ static void nvme_config_discard(struct nvme_ns *ns) ...@@ -871,6 +883,9 @@ static void nvme_config_discard(struct nvme_ns *ns)
struct nvme_ctrl *ctrl = ns->ctrl; struct nvme_ctrl *ctrl = ns->ctrl;
u32 logical_block_size = queue_logical_block_size(ns->queue); u32 logical_block_size = queue_logical_block_size(ns->queue);
BUILD_BUG_ON(PAGE_SIZE / sizeof(struct nvme_dsm_range) <
NVME_DSM_MAX_RANGES);
if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES) if (ctrl->quirks & NVME_QUIRK_DISCARD_ZEROES)
ns->queue->limits.discard_zeroes_data = 1; ns->queue->limits.discard_zeroes_data = 1;
else else
...@@ -879,6 +894,7 @@ static void nvme_config_discard(struct nvme_ns *ns) ...@@ -879,6 +894,7 @@ static void nvme_config_discard(struct nvme_ns *ns)
ns->queue->limits.discard_alignment = logical_block_size; ns->queue->limits.discard_alignment = logical_block_size;
ns->queue->limits.discard_granularity = logical_block_size; ns->queue->limits.discard_granularity = logical_block_size;
blk_queue_max_discard_sectors(ns->queue, UINT_MAX); blk_queue_max_discard_sectors(ns->queue, UINT_MAX);
blk_queue_max_discard_segments(ns->queue, NVME_DSM_MAX_RANGES);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue);
} }
......
...@@ -553,6 +553,8 @@ enum { ...@@ -553,6 +553,8 @@ enum {
NVME_DSMGMT_AD = 1 << 2, NVME_DSMGMT_AD = 1 << 2,
}; };
#define NVME_DSM_MAX_RANGES 256
struct nvme_dsm_range { struct nvme_dsm_range {
__le32 cattr; __le32 cattr;
__le32 nlb; __le32 nlb;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment