Commit 0b31c3ec authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block fixes from Jens Axboe:
 "A small batch of fixes that should be included for the 4.13 release.
  This contains:

   - Revert of the 4k loop blocksize support. Even with a recent batch
     of 4 fixes, we're still not really happy with it. Rather than be
     stuck with an API issue, let's revert it and get it right for 4.14.

   - Trivial patch from Bart, adding a few flags to the blk-mq debugfs
     exports that were added in this release, but not to the debugfs
     parts.

   - Regression fix for bsg, fixing a potential kernel panic. From
     Benjamin.

   - Tweak for the blk throttling, improving how we account discards.
     From Shaohua"

* 'for-linus' of git://git.kernel.dk/linux-block:
  blk-mq-debugfs: Add names for recently added flags
  bsg-lib: fix kernel panic resulting from missing allocation of reply-buffer
  Revert "loop: support 4k physical blocksize"
  blk-throttle: cap discard request size
parents 1f5de42d 22d53821
...@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = { ...@@ -75,6 +75,8 @@ static const char *const blk_queue_flag_name[] = {
QUEUE_FLAG_NAME(STATS), QUEUE_FLAG_NAME(STATS),
QUEUE_FLAG_NAME(POLL_STATS), QUEUE_FLAG_NAME(POLL_STATS),
QUEUE_FLAG_NAME(REGISTERED), QUEUE_FLAG_NAME(REGISTERED),
QUEUE_FLAG_NAME(SCSI_PASSTHROUGH),
QUEUE_FLAG_NAME(QUIESCED),
}; };
#undef QUEUE_FLAG_NAME #undef QUEUE_FLAG_NAME
...@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = { ...@@ -265,6 +267,7 @@ static const char *const cmd_flag_name[] = {
CMD_FLAG_NAME(RAHEAD), CMD_FLAG_NAME(RAHEAD),
CMD_FLAG_NAME(BACKGROUND), CMD_FLAG_NAME(BACKGROUND),
CMD_FLAG_NAME(NOUNMAP), CMD_FLAG_NAME(NOUNMAP),
CMD_FLAG_NAME(NOWAIT),
}; };
#undef CMD_FLAG_NAME #undef CMD_FLAG_NAME
......
...@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw) ...@@ -382,6 +382,14 @@ static unsigned int tg_iops_limit(struct throtl_grp *tg, int rw)
} \ } \
} while (0) } while (0)
static inline unsigned int throtl_bio_data_size(struct bio *bio)
{
/* assume it's one sector */
if (unlikely(bio_op(bio) == REQ_OP_DISCARD))
return 512;
return bio->bi_iter.bi_size;
}
static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg) static void throtl_qnode_init(struct throtl_qnode *qn, struct throtl_grp *tg)
{ {
INIT_LIST_HEAD(&qn->node); INIT_LIST_HEAD(&qn->node);
...@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -934,6 +942,7 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
u64 bytes_allowed, extra_bytes, tmp; u64 bytes_allowed, extra_bytes, tmp;
unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd; unsigned long jiffy_elapsed, jiffy_wait, jiffy_elapsed_rnd;
unsigned int bio_size = throtl_bio_data_size(bio);
jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw]; jiffy_elapsed = jiffy_elapsed_rnd = jiffies - tg->slice_start[rw];
...@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio, ...@@ -947,14 +956,14 @@ static bool tg_with_in_bps_limit(struct throtl_grp *tg, struct bio *bio,
do_div(tmp, HZ); do_div(tmp, HZ);
bytes_allowed = tmp; bytes_allowed = tmp;
if (tg->bytes_disp[rw] + bio->bi_iter.bi_size <= bytes_allowed) { if (tg->bytes_disp[rw] + bio_size <= bytes_allowed) {
if (wait) if (wait)
*wait = 0; *wait = 0;
return true; return true;
} }
/* Calc approx time to dispatch */ /* Calc approx time to dispatch */
extra_bytes = tg->bytes_disp[rw] + bio->bi_iter.bi_size - bytes_allowed; extra_bytes = tg->bytes_disp[rw] + bio_size - bytes_allowed;
jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw)); jiffy_wait = div64_u64(extra_bytes * HZ, tg_bps_limit(tg, rw));
if (!jiffy_wait) if (!jiffy_wait)
...@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio, ...@@ -1034,11 +1043,12 @@ static bool tg_may_dispatch(struct throtl_grp *tg, struct bio *bio,
static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio) static void throtl_charge_bio(struct throtl_grp *tg, struct bio *bio)
{ {
bool rw = bio_data_dir(bio); bool rw = bio_data_dir(bio);
unsigned int bio_size = throtl_bio_data_size(bio);
/* Charge the bio to the group */ /* Charge the bio to the group */
tg->bytes_disp[rw] += bio->bi_iter.bi_size; tg->bytes_disp[rw] += bio_size;
tg->io_disp[rw]++; tg->io_disp[rw]++;
tg->last_bytes_disp[rw] += bio->bi_iter.bi_size; tg->last_bytes_disp[rw] += bio_size;
tg->last_io_disp[rw]++; tg->last_io_disp[rw]++;
/* /*
......
...@@ -29,26 +29,25 @@ ...@@ -29,26 +29,25 @@
#include <scsi/scsi_cmnd.h> #include <scsi/scsi_cmnd.h>
/** /**
* bsg_destroy_job - routine to teardown/delete a bsg job * bsg_teardown_job - routine to teardown a bsg job
* @job: bsg_job that is to be torn down * @job: bsg_job that is to be torn down
*/ */
static void bsg_destroy_job(struct kref *kref) static void bsg_teardown_job(struct kref *kref)
{ {
struct bsg_job *job = container_of(kref, struct bsg_job, kref); struct bsg_job *job = container_of(kref, struct bsg_job, kref);
struct request *rq = job->req; struct request *rq = job->req;
blk_end_request_all(rq, BLK_STS_OK);
put_device(job->dev); /* release reference for the request */ put_device(job->dev); /* release reference for the request */
kfree(job->request_payload.sg_list); kfree(job->request_payload.sg_list);
kfree(job->reply_payload.sg_list); kfree(job->reply_payload.sg_list);
kfree(job);
blk_end_request_all(rq, BLK_STS_OK);
} }
void bsg_job_put(struct bsg_job *job) void bsg_job_put(struct bsg_job *job)
{ {
kref_put(&job->kref, bsg_destroy_job); kref_put(&job->kref, bsg_teardown_job);
} }
EXPORT_SYMBOL_GPL(bsg_job_put); EXPORT_SYMBOL_GPL(bsg_job_put);
...@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done); ...@@ -100,7 +99,7 @@ EXPORT_SYMBOL_GPL(bsg_job_done);
*/ */
static void bsg_softirq_done(struct request *rq) static void bsg_softirq_done(struct request *rq)
{ {
struct bsg_job *job = rq->special; struct bsg_job *job = blk_mq_rq_to_pdu(rq);
bsg_job_put(job); bsg_job_put(job);
} }
...@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req) ...@@ -122,33 +121,20 @@ static int bsg_map_buffer(struct bsg_buffer *buf, struct request *req)
} }
/** /**
* bsg_create_job - create the bsg_job structure for the bsg request * bsg_prepare_job - create the bsg_job structure for the bsg request
* @dev: device that is being sent the bsg request * @dev: device that is being sent the bsg request
* @req: BSG request that needs a job structure * @req: BSG request that needs a job structure
*/ */
static int bsg_create_job(struct device *dev, struct request *req) static int bsg_prepare_job(struct device *dev, struct request *req)
{ {
struct request *rsp = req->next_rq; struct request *rsp = req->next_rq;
struct request_queue *q = req->q;
struct scsi_request *rq = scsi_req(req); struct scsi_request *rq = scsi_req(req);
struct bsg_job *job; struct bsg_job *job = blk_mq_rq_to_pdu(req);
int ret; int ret;
BUG_ON(req->special);
job = kzalloc(sizeof(struct bsg_job) + q->bsg_job_size, GFP_KERNEL);
if (!job)
return -ENOMEM;
req->special = job;
job->req = req;
if (q->bsg_job_size)
job->dd_data = (void *)&job[1];
job->request = rq->cmd; job->request = rq->cmd;
job->request_len = rq->cmd_len; job->request_len = rq->cmd_len;
job->reply = rq->sense;
job->reply_len = SCSI_SENSE_BUFFERSIZE; /* Size of sense buffer
* allocated */
if (req->bio) { if (req->bio) {
ret = bsg_map_buffer(&job->request_payload, req); ret = bsg_map_buffer(&job->request_payload, req);
if (ret) if (ret)
...@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q) ...@@ -187,7 +173,6 @@ static void bsg_request_fn(struct request_queue *q)
{ {
struct device *dev = q->queuedata; struct device *dev = q->queuedata;
struct request *req; struct request *req;
struct bsg_job *job;
int ret; int ret;
if (!get_device(dev)) if (!get_device(dev))
...@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q) ...@@ -199,7 +184,7 @@ static void bsg_request_fn(struct request_queue *q)
break; break;
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
ret = bsg_create_job(dev, req); ret = bsg_prepare_job(dev, req);
if (ret) { if (ret) {
scsi_req(req)->result = ret; scsi_req(req)->result = ret;
blk_end_request_all(req, BLK_STS_OK); blk_end_request_all(req, BLK_STS_OK);
...@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q) ...@@ -207,8 +192,7 @@ static void bsg_request_fn(struct request_queue *q)
continue; continue;
} }
job = req->special; ret = q->bsg_job_fn(blk_mq_rq_to_pdu(req));
ret = q->bsg_job_fn(job);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
if (ret) if (ret)
break; break;
...@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q) ...@@ -219,6 +203,35 @@ static void bsg_request_fn(struct request_queue *q)
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
} }
static int bsg_init_rq(struct request_queue *q, struct request *req, gfp_t gfp)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
memset(job, 0, sizeof(*job));
scsi_req_init(sreq);
sreq->sense_len = SCSI_SENSE_BUFFERSIZE;
sreq->sense = kzalloc(sreq->sense_len, gfp);
if (!sreq->sense)
return -ENOMEM;
job->req = req;
job->reply = sreq->sense;
job->reply_len = sreq->sense_len;
job->dd_data = job + 1;
return 0;
}
static void bsg_exit_rq(struct request_queue *q, struct request *req)
{
struct bsg_job *job = blk_mq_rq_to_pdu(req);
struct scsi_request *sreq = &job->sreq;
kfree(sreq->sense);
}
/** /**
* bsg_setup_queue - Create and add the bsg hooks so we can receive requests * bsg_setup_queue - Create and add the bsg hooks so we can receive requests
* @dev: device to attach bsg device to * @dev: device to attach bsg device to
...@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, ...@@ -235,7 +248,9 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
q = blk_alloc_queue(GFP_KERNEL); q = blk_alloc_queue(GFP_KERNEL);
if (!q) if (!q)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
q->cmd_size = sizeof(struct scsi_request); q->cmd_size = sizeof(struct bsg_job) + dd_job_size;
q->init_rq_fn = bsg_init_rq;
q->exit_rq_fn = bsg_exit_rq;
q->request_fn = bsg_request_fn; q->request_fn = bsg_request_fn;
ret = blk_init_allocated_queue(q); ret = blk_init_allocated_queue(q);
...@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name, ...@@ -243,7 +258,6 @@ struct request_queue *bsg_setup_queue(struct device *dev, char *name,
goto out_cleanup_queue; goto out_cleanup_queue;
q->queuedata = dev; q->queuedata = dev;
q->bsg_job_size = dd_job_size;
q->bsg_job_fn = job_fn; q->bsg_job_fn = job_fn;
queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q); queue_flag_set_unlocked(QUEUE_FLAG_BIDI, q);
queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q); queue_flag_set_unlocked(QUEUE_FLAG_SCSI_PASSTHROUGH, q);
......
...@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio) ...@@ -221,8 +221,7 @@ static void __loop_update_dio(struct loop_device *lo, bool dio)
} }
static int static int
figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit)
loff_t logical_blocksize)
{ {
loff_t size = get_size(offset, sizelimit, lo->lo_backing_file); loff_t size = get_size(offset, sizelimit, lo->lo_backing_file);
sector_t x = (sector_t)size; sector_t x = (sector_t)size;
...@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit, ...@@ -234,12 +233,6 @@ figure_loop_size(struct loop_device *lo, loff_t offset, loff_t sizelimit,
lo->lo_offset = offset; lo->lo_offset = offset;
if (lo->lo_sizelimit != sizelimit) if (lo->lo_sizelimit != sizelimit)
lo->lo_sizelimit = sizelimit; lo->lo_sizelimit = sizelimit;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE) {
lo->lo_logical_blocksize = logical_blocksize;
blk_queue_physical_block_size(lo->lo_queue, lo->lo_blocksize);
blk_queue_logical_block_size(lo->lo_queue,
lo->lo_logical_blocksize);
}
set_capacity(lo->lo_disk, x); set_capacity(lo->lo_disk, x);
bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9); bd_set_size(bdev, (loff_t)get_capacity(bdev->bd_disk) << 9);
/* let user-space know about the new size */ /* let user-space know about the new size */
...@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -820,7 +813,6 @@ static void loop_config_discard(struct loop_device *lo)
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
struct inode *inode = file->f_mapping->host; struct inode *inode = file->f_mapping->host;
struct request_queue *q = lo->lo_queue; struct request_queue *q = lo->lo_queue;
int lo_bits = 9;
/* /*
* We use punch hole to reclaim the free space used by the * We use punch hole to reclaim the free space used by the
...@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo) ...@@ -840,11 +832,9 @@ static void loop_config_discard(struct loop_device *lo)
q->limits.discard_granularity = inode->i_sb->s_blocksize; q->limits.discard_granularity = inode->i_sb->s_blocksize;
q->limits.discard_alignment = 0; q->limits.discard_alignment = 0;
if (lo->lo_flags & LO_FLAGS_BLOCKSIZE)
lo_bits = blksize_bits(lo->lo_logical_blocksize);
blk_queue_max_discard_sectors(q, UINT_MAX >> lo_bits); blk_queue_max_discard_sectors(q, UINT_MAX >> 9);
blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> lo_bits); blk_queue_max_write_zeroes_sectors(q, UINT_MAX >> 9);
queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q); queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, q);
} }
...@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode, ...@@ -938,7 +928,6 @@ static int loop_set_fd(struct loop_device *lo, fmode_t mode,
lo->use_dio = false; lo->use_dio = false;
lo->lo_blocksize = lo_blocksize; lo->lo_blocksize = lo_blocksize;
lo->lo_logical_blocksize = 512;
lo->lo_device = bdev; lo->lo_device = bdev;
lo->lo_flags = lo_flags; lo->lo_flags = lo_flags;
lo->lo_backing_file = file; lo->lo_backing_file = file;
...@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ...@@ -1104,7 +1093,6 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
int err; int err;
struct loop_func_table *xfer; struct loop_func_table *xfer;
kuid_t uid = current_uid(); kuid_t uid = current_uid();
int lo_flags = lo->lo_flags;
if (lo->lo_encrypt_key_size && if (lo->lo_encrypt_key_size &&
!uid_eq(lo->lo_key_owner, uid) && !uid_eq(lo->lo_key_owner, uid) &&
...@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ...@@ -1137,26 +1125,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if (err) if (err)
goto exit; goto exit;
if (info->lo_flags & LO_FLAGS_BLOCKSIZE) {
if (!(lo->lo_flags & LO_FLAGS_BLOCKSIZE))
lo->lo_logical_blocksize = 512;
lo->lo_flags |= LO_FLAGS_BLOCKSIZE;
if (LO_INFO_BLOCKSIZE(info) != 512 &&
LO_INFO_BLOCKSIZE(info) != 1024 &&
LO_INFO_BLOCKSIZE(info) != 2048 &&
LO_INFO_BLOCKSIZE(info) != 4096)
return -EINVAL;
if (LO_INFO_BLOCKSIZE(info) > lo->lo_blocksize)
return -EINVAL;
}
if (lo->lo_offset != info->lo_offset || if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit || lo->lo_sizelimit != info->lo_sizelimit) {
lo->lo_flags != lo_flags || if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
((lo->lo_flags & LO_FLAGS_BLOCKSIZE) &&
lo->lo_logical_blocksize != LO_INFO_BLOCKSIZE(info))) {
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit,
LO_INFO_BLOCKSIZE(info))) {
err = -EFBIG; err = -EFBIG;
goto exit; goto exit;
} }
...@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo) ...@@ -1348,8 +1319,7 @@ static int loop_set_capacity(struct loop_device *lo)
if (unlikely(lo->lo_state != Lo_bound)) if (unlikely(lo->lo_state != Lo_bound))
return -ENXIO; return -ENXIO;
return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit, return figure_loop_size(lo, lo->lo_offset, lo->lo_sizelimit);
lo->lo_logical_blocksize);
} }
static int loop_set_dio(struct loop_device *lo, unsigned long arg) static int loop_set_dio(struct loop_device *lo, unsigned long arg)
......
...@@ -49,7 +49,6 @@ struct loop_device { ...@@ -49,7 +49,6 @@ struct loop_device {
struct file * lo_backing_file; struct file * lo_backing_file;
struct block_device *lo_device; struct block_device *lo_device;
unsigned lo_blocksize; unsigned lo_blocksize;
unsigned lo_logical_blocksize;
void *key_data; void *key_data;
gfp_t old_gfp_mask; gfp_t old_gfp_mask;
......
...@@ -568,7 +568,6 @@ struct request_queue { ...@@ -568,7 +568,6 @@ struct request_queue {
#if defined(CONFIG_BLK_DEV_BSG) #if defined(CONFIG_BLK_DEV_BSG)
bsg_job_fn *bsg_job_fn; bsg_job_fn *bsg_job_fn;
int bsg_job_size;
struct bsg_class_device bsg_dev; struct bsg_class_device bsg_dev;
#endif #endif
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#define _BLK_BSG_ #define _BLK_BSG_
#include <linux/blkdev.h> #include <linux/blkdev.h>
#include <scsi/scsi_request.h>
struct request; struct request;
struct device; struct device;
...@@ -37,6 +38,7 @@ struct bsg_buffer { ...@@ -37,6 +38,7 @@ struct bsg_buffer {
}; };
struct bsg_job { struct bsg_job {
struct scsi_request sreq;
struct device *dev; struct device *dev;
struct request *req; struct request *req;
......
...@@ -22,7 +22,6 @@ enum { ...@@ -22,7 +22,6 @@ enum {
LO_FLAGS_AUTOCLEAR = 4, LO_FLAGS_AUTOCLEAR = 4,
LO_FLAGS_PARTSCAN = 8, LO_FLAGS_PARTSCAN = 8,
LO_FLAGS_DIRECT_IO = 16, LO_FLAGS_DIRECT_IO = 16,
LO_FLAGS_BLOCKSIZE = 32,
}; };
#include <asm/posix_types.h> /* for __kernel_old_dev_t */ #include <asm/posix_types.h> /* for __kernel_old_dev_t */
...@@ -60,8 +59,6 @@ struct loop_info64 { ...@@ -60,8 +59,6 @@ struct loop_info64 {
__u64 lo_init[2]; __u64 lo_init[2];
}; };
#define LO_INFO_BLOCKSIZE(l) (l)->lo_init[0]
/* /*
* Loop filter types * Loop filter types
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment