Commit b2a205ff authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'for-linus-20181019' of git://git.kernel.dk/linux-block

Jens writes:
  "Block fixes for 4.19-final

   Two small fixes that should go into this release."

* tag 'for-linus-20181019' of git://git.kernel.dk/linux-block:
  block: don't deal with discard limit in blkdev_issue_discard()
  nvme: remove ns sibling before clearing path
parents 91b15613 744889b7
...@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -29,9 +29,7 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
{ {
struct request_queue *q = bdev_get_queue(bdev); struct request_queue *q = bdev_get_queue(bdev);
struct bio *bio = *biop; struct bio *bio = *biop;
unsigned int granularity;
unsigned int op; unsigned int op;
int alignment;
sector_t bs_mask; sector_t bs_mask;
if (!q) if (!q)
...@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector, ...@@ -54,38 +52,16 @@ int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
if ((sector | nr_sects) & bs_mask) if ((sector | nr_sects) & bs_mask)
return -EINVAL; return -EINVAL;
/* Zero-sector (unknown) and one-sector granularities are the same. */
granularity = max(q->limits.discard_granularity >> 9, 1U);
alignment = (bdev_discard_alignment(bdev) >> 9) % granularity;
while (nr_sects) { while (nr_sects) {
unsigned int req_sects; unsigned int req_sects = nr_sects;
sector_t end_sect, tmp; sector_t end_sect;
/*
* Issue in chunks of the user defined max discard setting,
* ensuring that bi_size doesn't overflow
*/
req_sects = min_t(sector_t, nr_sects,
q->limits.max_discard_sectors);
if (!req_sects) if (!req_sects)
goto fail; goto fail;
if (req_sects > UINT_MAX >> 9) if (req_sects > UINT_MAX >> 9)
req_sects = UINT_MAX >> 9; req_sects = UINT_MAX >> 9;
/*
* If splitting a request, and the next starting sector would be
* misaligned, stop the discard at the previous aligned sector.
*/
end_sect = sector + req_sects; end_sect = sector + req_sects;
tmp = end_sect;
if (req_sects < nr_sects &&
sector_div(tmp, granularity) != alignment) {
end_sect = end_sect - alignment;
sector_div(end_sect, granularity);
end_sect = end_sect * granularity + alignment;
req_sects = end_sect - sector;
}
bio = next_bio(bio, 0, gfp_mask); bio = next_bio(bio, 0, gfp_mask);
bio->bi_iter.bi_sector = sector; bio->bi_iter.bi_sector = sector;
......
...@@ -3143,8 +3143,8 @@ static void nvme_ns_remove(struct nvme_ns *ns) ...@@ -3143,8 +3143,8 @@ static void nvme_ns_remove(struct nvme_ns *ns)
} }
mutex_lock(&ns->ctrl->subsys->lock); mutex_lock(&ns->ctrl->subsys->lock);
nvme_mpath_clear_current_path(ns);
list_del_rcu(&ns->siblings); list_del_rcu(&ns->siblings);
nvme_mpath_clear_current_path(ns);
mutex_unlock(&ns->ctrl->subsys->lock); mutex_unlock(&ns->ctrl->subsys->lock);
down_write(&ns->ctrl->namespaces_rwsem); down_write(&ns->ctrl->namespaces_rwsem);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment