Commit 6010720d authored by Jens Axboe's avatar Jens Axboe

Merge branch 'for-4.11/block' into for-4.11/linus-merge

Signed-off-by: default avatarJens Axboe <axboe@fb.com>
parents 2fe1e8a7 8a9ae523
...@@ -249,7 +249,6 @@ struct& cdrom_device_ops\ \{ \hidewidth\cr ...@@ -249,7 +249,6 @@ struct& cdrom_device_ops\ \{ \hidewidth\cr
unsigned\ long);\cr unsigned\ long);\cr
\noalign{\medskip} \noalign{\medskip}
&const\ int& capability;& capability flags \cr &const\ int& capability;& capability flags \cr
&int& n_minors;& number of active minor devices \cr
\};\cr \};\cr
} }
$$ $$
...@@ -258,13 +257,7 @@ it should add a function pointer to this $struct$. When a particular ...@@ -258,13 +257,7 @@ it should add a function pointer to this $struct$. When a particular
function is not implemented, however, this $struct$ should contain a function is not implemented, however, this $struct$ should contain a
NULL instead. The $capability$ flags specify the capabilities of the NULL instead. The $capability$ flags specify the capabilities of the
\cdrom\ hardware and/or low-level \cdrom\ driver when a \cdrom\ drive \cdrom\ hardware and/or low-level \cdrom\ driver when a \cdrom\ drive
is registered with the \UCD. The value $n_minors$ should be a positive is registered with the \UCD.
value indicating the number of minor devices that are supported by
the low-level device driver, normally~1. Although these two variables
are `informative' rather than `operational,' they are included in
$cdrom_device_ops$ because they describe the capability of the {\em
driver\/} rather than the {\em drive}. Nomenclature has always been
difficult in computer programming.
Note that most functions have fewer parameters than their Note that most functions have fewer parameters than their
$blkdev_fops$ counterparts. This is because very little of the $blkdev_fops$ counterparts. This is because very little of the
......
...@@ -8612,10 +8612,10 @@ S: Maintained ...@@ -8612,10 +8612,10 @@ S: Maintained
F: drivers/net/ethernet/netronome/ F: drivers/net/ethernet/netronome/
NETWORK BLOCK DEVICE (NBD) NETWORK BLOCK DEVICE (NBD)
M: Markus Pargmann <mpa@pengutronix.de> M: Josef Bacik <jbacik@fb.com>
S: Maintained S: Maintained
L: linux-block@vger.kernel.org
L: nbd-general@lists.sourceforge.net L: nbd-general@lists.sourceforge.net
T: git git://git.pengutronix.de/git/mpa/linux-nbd.git
F: Documentation/blockdev/nbd.txt F: Documentation/blockdev/nbd.txt
F: drivers/block/nbd.c F: drivers/block/nbd.c
F: include/uapi/linux/nbd.h F: include/uapi/linux/nbd.h
...@@ -11089,6 +11089,17 @@ L: linux-mmc@vger.kernel.org ...@@ -11089,6 +11089,17 @@ L: linux-mmc@vger.kernel.org
S: Maintained S: Maintained
F: drivers/mmc/host/sdhci-spear.c F: drivers/mmc/host/sdhci-spear.c
SECURE ENCRYPTING DEVICE (SED) OPAL DRIVER
M: Scott Bauer <scott.bauer@intel.com>
M: Jonathan Derrick <jonathan.derrick@intel.com>
M: Rafael Antognolli <rafael.antognolli@intel.com>
L: linux-block@vger.kernel.org
S: Supported
F: block/sed*
F: block/opal_proto.h
F: include/linux/sed*
F: include/uapi/linux/sed*
SECURITY SUBSYSTEM SECURITY SUBSYSTEM
M: James Morris <james.l.morris@oracle.com> M: James Morris <james.l.morris@oracle.com>
M: "Serge E. Hallyn" <serge@hallyn.com> M: "Serge E. Hallyn" <serge@hallyn.com>
......
...@@ -147,6 +147,25 @@ config BLK_WBT_MQ ...@@ -147,6 +147,25 @@ config BLK_WBT_MQ
Multiqueue currently doesn't have support for IO scheduling, Multiqueue currently doesn't have support for IO scheduling,
enabling this option is recommended. enabling this option is recommended.
config BLK_DEBUG_FS
bool "Block layer debugging information in debugfs"
default y
depends on DEBUG_FS
---help---
Include block layer debugging information in debugfs. This information
is mostly useful for kernel developers, but it doesn't incur any cost
at runtime.
Unless you are building a kernel for a tiny system, you should
say Y here.
config BLK_SED_OPAL
bool "Logic for interfacing with Opal enabled SEDs"
---help---
Builds Logic for interfacing with Opal enabled controllers.
Enabling this option enables users to setup/unlock/lock
Locking ranges for SED devices using the Opal protocol.
menu "Partition Types" menu "Partition Types"
source "block/partitions/Kconfig" source "block/partitions/Kconfig"
......
...@@ -63,6 +63,56 @@ config DEFAULT_IOSCHED ...@@ -63,6 +63,56 @@ config DEFAULT_IOSCHED
default "cfq" if DEFAULT_CFQ default "cfq" if DEFAULT_CFQ
default "noop" if DEFAULT_NOOP default "noop" if DEFAULT_NOOP
config MQ_IOSCHED_DEADLINE
tristate "MQ deadline I/O scheduler"
default y
---help---
MQ version of the deadline IO scheduler.
config MQ_IOSCHED_NONE
bool
default y
choice
prompt "Default single-queue blk-mq I/O scheduler"
default DEFAULT_SQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with a single queue.
config DEFAULT_SQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_SQ_NONE
bool "None"
endchoice
config DEFAULT_SQ_IOSCHED
string
default "mq-deadline" if DEFAULT_SQ_DEADLINE
default "none" if DEFAULT_SQ_NONE
choice
prompt "Default multi-queue blk-mq I/O scheduler"
default DEFAULT_MQ_NONE
help
Select the I/O scheduler which will be used by default for blk-mq
managed block devices with multiple queues.
config DEFAULT_MQ_DEADLINE
bool "MQ Deadline" if MQ_IOSCHED_DEADLINE=y
config DEFAULT_MQ_NONE
bool "None"
endchoice
config DEFAULT_MQ_IOSCHED
string
default "mq-deadline" if DEFAULT_MQ_DEADLINE
default "none" if DEFAULT_MQ_NONE
endmenu endmenu
endif endif
...@@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \ ...@@ -6,7 +6,7 @@ obj-$(CONFIG_BLOCK) := bio.o elevator.o blk-core.o blk-tag.o blk-sysfs.o \
blk-flush.o blk-settings.o blk-ioc.o blk-map.o \ blk-flush.o blk-settings.o blk-ioc.o blk-map.o \
blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \ blk-exec.o blk-merge.o blk-softirq.o blk-timeout.o \
blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \ blk-lib.o blk-mq.o blk-mq-tag.o blk-stat.o \
blk-mq-sysfs.o blk-mq-cpumap.o ioctl.o \ blk-mq-sysfs.o blk-mq-cpumap.o blk-mq-sched.o ioctl.o \
genhd.o scsi_ioctl.o partition-generic.o ioprio.o \ genhd.o scsi_ioctl.o partition-generic.o ioprio.o \
badblocks.o partitions/ badblocks.o partitions/
...@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o ...@@ -18,6 +18,7 @@ obj-$(CONFIG_BLK_DEV_THROTTLING) += blk-throttle.o
obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o obj-$(CONFIG_IOSCHED_NOOP) += noop-iosched.o
obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o obj-$(CONFIG_IOSCHED_DEADLINE) += deadline-iosched.o
obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o obj-$(CONFIG_IOSCHED_CFQ) += cfq-iosched.o
obj-$(CONFIG_MQ_IOSCHED_DEADLINE) += mq-deadline.o
obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o obj-$(CONFIG_BLOCK_COMPAT) += compat_ioctl.o
obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o obj-$(CONFIG_BLK_CMDLINE_PARSER) += cmdline-parser.o
...@@ -25,3 +26,5 @@ obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o ...@@ -25,3 +26,5 @@ obj-$(CONFIG_BLK_DEV_INTEGRITY) += bio-integrity.o blk-integrity.o t10-pi.o
obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o obj-$(CONFIG_BLK_MQ_PCI) += blk-mq-pci.o
obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o obj-$(CONFIG_BLK_DEV_ZONED) += blk-zoned.o
obj-$(CONFIG_BLK_WBT) += blk-wbt.o obj-$(CONFIG_BLK_WBT) += blk-wbt.o
obj-$(CONFIG_BLK_DEBUG_FS) += blk-mq-debugfs.o
obj-$(CONFIG_BLK_SED_OPAL) += sed-opal.o
...@@ -1403,7 +1403,7 @@ struct bio *bio_map_user_iov(struct request_queue *q, ...@@ -1403,7 +1403,7 @@ struct bio *bio_map_user_iov(struct request_queue *q,
bio_set_flag(bio, BIO_USER_MAPPED); bio_set_flag(bio, BIO_USER_MAPPED);
/* /*
* subtle -- if __bio_map_user() ended up bouncing a bio, * subtle -- if bio_map_user_iov() ended up bouncing a bio,
* it would normally disappear when its bi_end_io is run. * it would normally disappear when its bi_end_io is run.
* however, we need it for the unmap, so grab an extra * however, we need it for the unmap, so grab an extra
* reference to it * reference to it
...@@ -1445,8 +1445,8 @@ static void __bio_unmap_user(struct bio *bio) ...@@ -1445,8 +1445,8 @@ static void __bio_unmap_user(struct bio *bio)
* bio_unmap_user - unmap a bio * bio_unmap_user - unmap a bio
* @bio: the bio being unmapped * @bio: the bio being unmapped
* *
* Unmap a bio previously mapped by bio_map_user(). Must be called with * Unmap a bio previously mapped by bio_map_user_iov(). Must be called from
* a process context. * process context.
* *
* bio_unmap_user() may sleep. * bio_unmap_user() may sleep.
*/ */
......
...@@ -1223,6 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -1223,6 +1223,9 @@ int blkcg_activate_policy(struct request_queue *q,
if (blkcg_policy_enabled(q, pol)) if (blkcg_policy_enabled(q, pol))
return 0; return 0;
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q); blk_queue_bypass_start(q);
pd_prealloc: pd_prealloc:
if (!pd_prealloc) { if (!pd_prealloc) {
...@@ -1261,6 +1264,9 @@ int blkcg_activate_policy(struct request_queue *q, ...@@ -1261,6 +1264,9 @@ int blkcg_activate_policy(struct request_queue *q,
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
out_bypass_end: out_bypass_end:
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
if (pd_prealloc) if (pd_prealloc)
pol->pd_free_fn(pd_prealloc); pol->pd_free_fn(pd_prealloc);
...@@ -1284,7 +1290,11 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1284,7 +1290,11 @@ void blkcg_deactivate_policy(struct request_queue *q,
if (!blkcg_policy_enabled(q, pol)) if (!blkcg_policy_enabled(q, pol))
return; return;
if (q->mq_ops)
blk_mq_freeze_queue(q);
else
blk_queue_bypass_start(q); blk_queue_bypass_start(q);
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
__clear_bit(pol->plid, q->blkcg_pols); __clear_bit(pol->plid, q->blkcg_pols);
...@@ -1304,6 +1314,10 @@ void blkcg_deactivate_policy(struct request_queue *q, ...@@ -1304,6 +1314,10 @@ void blkcg_deactivate_policy(struct request_queue *q,
} }
spin_unlock_irq(q->queue_lock); spin_unlock_irq(q->queue_lock);
if (q->mq_ops)
blk_mq_unfreeze_queue(q);
else
blk_queue_bypass_end(q); blk_queue_bypass_end(q);
} }
EXPORT_SYMBOL_GPL(blkcg_deactivate_policy); EXPORT_SYMBOL_GPL(blkcg_deactivate_policy);
......
...@@ -39,6 +39,7 @@ ...@@ -39,6 +39,7 @@
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-sched.h"
#include "blk-wbt.h" #include "blk-wbt.h"
EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap); EXPORT_TRACEPOINT_SYMBOL_GPL(block_bio_remap);
...@@ -134,6 +135,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq) ...@@ -134,6 +135,7 @@ void blk_rq_init(struct request_queue *q, struct request *rq)
rq->cmd = rq->__cmd; rq->cmd = rq->__cmd;
rq->cmd_len = BLK_MAX_CDB; rq->cmd_len = BLK_MAX_CDB;
rq->tag = -1; rq->tag = -1;
rq->internal_tag = -1;
rq->start_time = jiffies; rq->start_time = jiffies;
set_start_time_ns(rq); set_start_time_ns(rq);
rq->part = NULL; rq->part = NULL;
...@@ -525,12 +527,14 @@ void blk_set_queue_dying(struct request_queue *q) ...@@ -525,12 +527,14 @@ void blk_set_queue_dying(struct request_queue *q)
else { else {
struct request_list *rl; struct request_list *rl;
spin_lock_irq(q->queue_lock);
blk_queue_for_each_rl(rl, q) { blk_queue_for_each_rl(rl, q) {
if (rl->rq_pool) { if (rl->rq_pool) {
wake_up(&rl->wait[BLK_RW_SYNC]); wake_up(&rl->wait[BLK_RW_SYNC]);
wake_up(&rl->wait[BLK_RW_ASYNC]); wake_up(&rl->wait[BLK_RW_ASYNC]);
} }
} }
spin_unlock_irq(q->queue_lock);
} }
} }
EXPORT_SYMBOL_GPL(blk_set_queue_dying); EXPORT_SYMBOL_GPL(blk_set_queue_dying);
...@@ -1033,28 +1037,12 @@ static bool blk_rq_should_init_elevator(struct bio *bio) ...@@ -1033,28 +1037,12 @@ static bool blk_rq_should_init_elevator(struct bio *bio)
* Flush requests do not use the elevator so skip initialization. * Flush requests do not use the elevator so skip initialization.
* This allows a request to share the flush and elevator data. * This allows a request to share the flush and elevator data.
*/ */
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) if (op_is_flush(bio->bi_opf))
return false; return false;
return true; return true;
} }
/**
* rq_ioc - determine io_context for request allocation
* @bio: request being allocated is for this bio (can be %NULL)
*
* Determine io_context to use for request allocation for @bio. May return
* %NULL if %current->io_context doesn't exist.
*/
static struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
if (bio && bio->bi_ioc)
return bio->bi_ioc;
#endif
return current->io_context;
}
/** /**
* __get_request - get a free request * __get_request - get a free request
* @rl: request list to allocate from * @rl: request list to allocate from
...@@ -1655,7 +1643,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio) ...@@ -1655,7 +1643,7 @@ static blk_qc_t blk_queue_bio(struct request_queue *q, struct bio *bio)
return BLK_QC_T_NONE; return BLK_QC_T_NONE;
} }
if (bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) { if (op_is_flush(bio->bi_opf)) {
spin_lock_irq(q->queue_lock); spin_lock_irq(q->queue_lock);
where = ELEVATOR_INSERT_FLUSH; where = ELEVATOR_INSERT_FLUSH;
goto get_rq; goto get_rq;
...@@ -1894,7 +1882,7 @@ generic_make_request_checks(struct bio *bio) ...@@ -1894,7 +1882,7 @@ generic_make_request_checks(struct bio *bio)
* drivers without flush support don't have to worry * drivers without flush support don't have to worry
* about them. * about them.
*/ */
if ((bio->bi_opf & (REQ_PREFLUSH | REQ_FUA)) && if (op_is_flush(bio->bi_opf) &&
!test_bit(QUEUE_FLAG_WC, &q->queue_flags)) { !test_bit(QUEUE_FLAG_WC, &q->queue_flags)) {
bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA); bio->bi_opf &= ~(REQ_PREFLUSH | REQ_FUA);
if (!nr_sectors) { if (!nr_sectors) {
...@@ -2143,7 +2131,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) ...@@ -2143,7 +2131,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
if (q->mq_ops) { if (q->mq_ops) {
if (blk_queue_io_stat(q)) if (blk_queue_io_stat(q))
blk_account_io_start(rq, true); blk_account_io_start(rq, true);
blk_mq_insert_request(rq, false, true, false); blk_mq_sched_insert_request(rq, false, true, false, false);
return 0; return 0;
} }
...@@ -2159,7 +2147,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq) ...@@ -2159,7 +2147,7 @@ int blk_insert_cloned_request(struct request_queue *q, struct request *rq)
*/ */
BUG_ON(blk_queued_rq(rq)); BUG_ON(blk_queued_rq(rq));
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) if (op_is_flush(rq->cmd_flags))
where = ELEVATOR_INSERT_FLUSH; where = ELEVATOR_INSERT_FLUSH;
add_acct_request(q, rq, where); add_acct_request(q, rq, where);
...@@ -3270,7 +3258,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule) ...@@ -3270,7 +3258,7 @@ void blk_flush_plug_list(struct blk_plug *plug, bool from_schedule)
/* /*
* rq is already accounted, so use raw insert * rq is already accounted, so use raw insert
*/ */
if (rq->cmd_flags & (REQ_PREFLUSH | REQ_FUA)) if (op_is_flush(rq->cmd_flags))
__elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH); __elv_add_request(q, rq, ELEVATOR_INSERT_FLUSH);
else else
__elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE); __elv_add_request(q, rq, ELEVATOR_INSERT_SORT_MERGE);
......
...@@ -9,6 +9,7 @@ ...@@ -9,6 +9,7 @@
#include <linux/sched/sysctl.h> #include <linux/sched/sysctl.h>
#include "blk.h" #include "blk.h"
#include "blk-mq-sched.h"
/* /*
* for max sense size * for max sense size
...@@ -65,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk, ...@@ -65,7 +66,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
* be reused after dying flag is set * be reused after dying flag is set
*/ */
if (q->mq_ops) { if (q->mq_ops) {
blk_mq_insert_request(rq, at_head, true, false); blk_mq_sched_insert_request(rq, at_head, true, false, false);
return; return;
} }
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#include "blk.h" #include "blk.h"
#include "blk-mq.h" #include "blk-mq.h"
#include "blk-mq-tag.h" #include "blk-mq-tag.h"
#include "blk-mq-sched.h"
/* FLUSH/FUA sequences */ /* FLUSH/FUA sequences */
enum { enum {
...@@ -391,9 +392,10 @@ static void mq_flush_data_end_io(struct request *rq, int error) ...@@ -391,9 +392,10 @@ static void mq_flush_data_end_io(struct request *rq, int error)
* the comment in flush_end_io(). * the comment in flush_end_io().
*/ */
spin_lock_irqsave(&fq->mq_flush_lock, flags); spin_lock_irqsave(&fq->mq_flush_lock, flags);
if (blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error)) blk_flush_complete_seq(rq, fq, REQ_FSEQ_DATA, error);
blk_mq_run_hw_queue(hctx, true);
spin_unlock_irqrestore(&fq->mq_flush_lock, flags); spin_unlock_irqrestore(&fq->mq_flush_lock, flags);
blk_mq_run_hw_queue(hctx, true);
} }
/** /**
...@@ -453,9 +455,9 @@ void blk_insert_flush(struct request *rq) ...@@ -453,9 +455,9 @@ void blk_insert_flush(struct request *rq)
*/ */
if ((policy & REQ_FSEQ_DATA) && if ((policy & REQ_FSEQ_DATA) &&
!(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) { !(policy & (REQ_FSEQ_PREFLUSH | REQ_FSEQ_POSTFLUSH))) {
if (q->mq_ops) { if (q->mq_ops)
blk_mq_insert_request(rq, false, true, false); blk_mq_sched_insert_request(rq, false, true, false, false);
} else else
list_add_tail(&rq->queuelist, &q->queue_head); list_add_tail(&rq->queuelist, &q->queue_head);
return; return;
} }
......
...@@ -43,8 +43,10 @@ static void ioc_exit_icq(struct io_cq *icq) ...@@ -43,8 +43,10 @@ static void ioc_exit_icq(struct io_cq *icq)
if (icq->flags & ICQ_EXITED) if (icq->flags & ICQ_EXITED)
return; return;
if (et->ops.elevator_exit_icq_fn) if (et->uses_mq && et->ops.mq.exit_icq)
et->ops.elevator_exit_icq_fn(icq); et->ops.mq.exit_icq(icq);
else if (!et->uses_mq && et->ops.sq.elevator_exit_icq_fn)
et->ops.sq.elevator_exit_icq_fn(icq);
icq->flags |= ICQ_EXITED; icq->flags |= ICQ_EXITED;
} }
...@@ -383,8 +385,10 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q, ...@@ -383,8 +385,10 @@ struct io_cq *ioc_create_icq(struct io_context *ioc, struct request_queue *q,
if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) { if (likely(!radix_tree_insert(&ioc->icq_tree, q->id, icq))) {
hlist_add_head(&icq->ioc_node, &ioc->icq_list); hlist_add_head(&icq->ioc_node, &ioc->icq_list);
list_add(&icq->q_node, &q->icq_list); list_add(&icq->q_node, &q->icq_list);
if (et->ops.elevator_init_icq_fn) if (et->uses_mq && et->ops.mq.init_icq)
et->ops.elevator_init_icq_fn(icq); et->ops.mq.init_icq(icq);
else if (!et->uses_mq && et->ops.sq.elevator_init_icq_fn)
et->ops.sq.elevator_init_icq_fn(icq);
} else { } else {
kmem_cache_free(et->icq_cache, icq); kmem_cache_free(et->icq_cache, icq);
icq = ioc_lookup_icq(ioc, q); icq = ioc_lookup_icq(ioc, q);
......
...@@ -763,8 +763,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq, ...@@ -763,8 +763,8 @@ int blk_attempt_req_merge(struct request_queue *q, struct request *rq,
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_allow_rq_merge_fn) if (!e->uses_mq && e->type->ops.sq.elevator_allow_rq_merge_fn)
if (!e->type->ops.elevator_allow_rq_merge_fn(q, rq, next)) if (!e->type->ops.sq.elevator_allow_rq_merge_fn(q, rq, next))
return 0; return 0;
return attempt_merge(q, rq, next); return attempt_merge(q, rq, next);
......
This diff is collapsed.
This diff is collapsed.
#ifndef BLK_MQ_SCHED_H
#define BLK_MQ_SCHED_H
#include "blk-mq.h"
#include "blk-mq-tag.h"
int blk_mq_sched_init_hctx_data(struct request_queue *q, size_t size,
int (*init)(struct blk_mq_hw_ctx *),
void (*exit)(struct blk_mq_hw_ctx *));
void blk_mq_sched_free_hctx_data(struct request_queue *q,
void (*exit)(struct blk_mq_hw_ctx *));
struct request *blk_mq_sched_get_request(struct request_queue *q, struct bio *bio, unsigned int op, struct blk_mq_alloc_data *data);
void blk_mq_sched_put_request(struct request *rq);
void blk_mq_sched_request_inserted(struct request *rq);
bool blk_mq_sched_bypass_insert(struct blk_mq_hw_ctx *hctx, struct request *rq);
bool blk_mq_sched_try_merge(struct request_queue *q, struct bio *bio);
bool __blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio);
bool blk_mq_sched_try_insert_merge(struct request_queue *q, struct request *rq);
void blk_mq_sched_restart_queues(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_insert_request(struct request *rq, bool at_head,
bool run_queue, bool async, bool can_block);
void blk_mq_sched_insert_requests(struct request_queue *q,
struct blk_mq_ctx *ctx,
struct list_head *list, bool run_queue_async);
void blk_mq_sched_dispatch_requests(struct blk_mq_hw_ctx *hctx);
void blk_mq_sched_move_to_dispatch(struct blk_mq_hw_ctx *hctx,
struct list_head *rq_list,
struct request *(*get_rq)(struct blk_mq_hw_ctx *));
int blk_mq_sched_setup(struct request_queue *q);
void blk_mq_sched_teardown(struct request_queue *q);
int blk_mq_sched_init(struct request_queue *q);
static inline bool
blk_mq_sched_bio_merge(struct request_queue *q, struct bio *bio)
{
struct elevator_queue *e = q->elevator;
if (!e || blk_queue_nomerges(q) || !bio_mergeable(bio))
return false;
return __blk_mq_sched_bio_merge(q, bio);
}
static inline int blk_mq_sched_get_rq_priv(struct request_queue *q,
struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.get_rq_priv)
return e->type->ops.mq.get_rq_priv(q, rq);
return 0;
}
static inline void blk_mq_sched_put_rq_priv(struct request_queue *q,
struct request *rq)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.put_rq_priv)
e->type->ops.mq.put_rq_priv(q, rq);
}
static inline bool
blk_mq_sched_allow_merge(struct request_queue *q, struct request *rq,
struct bio *bio)
{
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.allow_merge)
return e->type->ops.mq.allow_merge(q, rq, bio);
return true;
}
static inline void
blk_mq_sched_completed_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct elevator_queue *e = hctx->queue->elevator;
if (e && e->type->ops.mq.completed_request)
e->type->ops.mq.completed_request(hctx, rq);
BUG_ON(rq->internal_tag == -1);
blk_mq_put_tag(hctx, hctx->sched_tags, rq->mq_ctx, rq->internal_tag);
}
static inline void blk_mq_sched_started_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.started_request)
e->type->ops.mq.started_request(rq);
}
static inline void blk_mq_sched_requeue_request(struct request *rq)
{
struct request_queue *q = rq->q;
struct elevator_queue *e = q->elevator;
if (e && e->type->ops.mq.requeue_request)
e->type->ops.mq.requeue_request(rq);
}
static inline bool blk_mq_sched_has_work(struct blk_mq_hw_ctx *hctx)
{
struct elevator_queue *e = hctx->queue->elevator;
if (e && e->type->ops.mq.has_work)
return e->type->ops.mq.has_work(hctx);
return false;
}
static inline void blk_mq_sched_mark_restart(struct blk_mq_hw_ctx *hctx)
{
if (!test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state)) {
set_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
if (hctx->flags & BLK_MQ_F_TAG_SHARED) {
struct request_queue *q = hctx->queue;
if (!test_bit(QUEUE_FLAG_RESTART, &q->queue_flags))
set_bit(QUEUE_FLAG_RESTART, &q->queue_flags);
}
}
}
static inline bool blk_mq_sched_needs_restart(struct blk_mq_hw_ctx *hctx)
{
return test_bit(BLK_MQ_S_SCHED_RESTART, &hctx->state);
}
#endif
...@@ -122,123 +122,16 @@ static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj, ...@@ -122,123 +122,16 @@ static ssize_t blk_mq_hw_sysfs_store(struct kobject *kobj,
return res; return res;
} }
static ssize_t blk_mq_sysfs_dispatched_show(struct blk_mq_ctx *ctx, char *page) static ssize_t blk_mq_hw_sysfs_nr_tags_show(struct blk_mq_hw_ctx *hctx,
{
return sprintf(page, "%lu %lu\n", ctx->rq_dispatched[1],
ctx->rq_dispatched[0]);
}
static ssize_t blk_mq_sysfs_merged_show(struct blk_mq_ctx *ctx, char *page)
{
return sprintf(page, "%lu\n", ctx->rq_merged);
}
static ssize_t blk_mq_sysfs_completed_show(struct blk_mq_ctx *ctx, char *page)
{
return sprintf(page, "%lu %lu\n", ctx->rq_completed[1],
ctx->rq_completed[0]);
}
static ssize_t sysfs_list_show(char *page, struct list_head *list, char *msg)
{
struct request *rq;
int len = snprintf(page, PAGE_SIZE - 1, "%s:\n", msg);
list_for_each_entry(rq, list, queuelist) {
const int rq_len = 2 * sizeof(rq) + 2;
/* if the output will be truncated */
if (PAGE_SIZE - 1 < len + rq_len) {
/* backspacing if it can't hold '\t...\n' */
if (PAGE_SIZE - 1 < len + 5)
len -= rq_len;
len += snprintf(page + len, PAGE_SIZE - 1 - len,
"\t...\n");
break;
}
len += snprintf(page + len, PAGE_SIZE - 1 - len,
"\t%p\n", rq);
}
return len;
}
static ssize_t blk_mq_sysfs_rq_list_show(struct blk_mq_ctx *ctx, char *page)
{
ssize_t ret;
spin_lock(&ctx->lock);
ret = sysfs_list_show(page, &ctx->rq_list, "CTX pending");
spin_unlock(&ctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_poll_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return sprintf(page, "considered=%lu, invoked=%lu, success=%lu\n",
hctx->poll_considered, hctx->poll_invoked,
hctx->poll_success);
}
static ssize_t blk_mq_hw_sysfs_poll_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t size)
{
hctx->poll_considered = hctx->poll_invoked = hctx->poll_success = 0;
return size;
}
static ssize_t blk_mq_hw_sysfs_queued_show(struct blk_mq_hw_ctx *hctx,
char *page) char *page)
{ {
return sprintf(page, "%lu\n", hctx->queued); return sprintf(page, "%u\n", hctx->tags->nr_tags);
} }
static ssize_t blk_mq_hw_sysfs_run_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_nr_reserved_tags_show(struct blk_mq_hw_ctx *hctx,
{
return sprintf(page, "%lu\n", hctx->run);
}
static ssize_t blk_mq_hw_sysfs_dispatched_show(struct blk_mq_hw_ctx *hctx,
char *page) char *page)
{ {
char *start_page = page; return sprintf(page, "%u\n", hctx->tags->nr_reserved_tags);
int i;
page += sprintf(page, "%8u\t%lu\n", 0U, hctx->dispatched[0]);
for (i = 1; i < BLK_MQ_MAX_DISPATCH_ORDER - 1; i++) {
unsigned int d = 1U << (i - 1);
page += sprintf(page, "%8u\t%lu\n", d, hctx->dispatched[i]);
}
page += sprintf(page, "%8u+\t%lu\n", 1U << (i - 1),
hctx->dispatched[i]);
return page - start_page;
}
static ssize_t blk_mq_hw_sysfs_rq_list_show(struct blk_mq_hw_ctx *hctx,
char *page)
{
ssize_t ret;
spin_lock(&hctx->lock);
ret = sysfs_list_show(page, &hctx->dispatch, "HCTX pending");
spin_unlock(&hctx->lock);
return ret;
}
static ssize_t blk_mq_hw_sysfs_tags_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return blk_mq_tag_sysfs_show(hctx->tags, page);
}
static ssize_t blk_mq_hw_sysfs_active_show(struct blk_mq_hw_ctx *hctx, char *page)
{
return sprintf(page, "%u\n", atomic_read(&hctx->nr_active));
} }
static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
...@@ -259,121 +152,27 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page) ...@@ -259,121 +152,27 @@ static ssize_t blk_mq_hw_sysfs_cpus_show(struct blk_mq_hw_ctx *hctx, char *page)
return ret; return ret;
} }
static void blk_mq_stat_clear(struct blk_mq_hw_ctx *hctx)
{
struct blk_mq_ctx *ctx;
unsigned int i;
hctx_for_each_ctx(hctx, ctx, i) {
blk_stat_init(&ctx->stat[BLK_STAT_READ]);
blk_stat_init(&ctx->stat[BLK_STAT_WRITE]);
}
}
static ssize_t blk_mq_hw_sysfs_stat_store(struct blk_mq_hw_ctx *hctx,
const char *page, size_t count)
{
blk_mq_stat_clear(hctx);
return count;
}
static ssize_t print_stat(char *page, struct blk_rq_stat *stat, const char *pre)
{
return sprintf(page, "%s samples=%llu, mean=%lld, min=%lld, max=%lld\n",
pre, (long long) stat->nr_samples,
(long long) stat->mean, (long long) stat->min,
(long long) stat->max);
}
static ssize_t blk_mq_hw_sysfs_stat_show(struct blk_mq_hw_ctx *hctx, char *page)
{
struct blk_rq_stat stat[2];
ssize_t ret;
blk_stat_init(&stat[BLK_STAT_READ]);
blk_stat_init(&stat[BLK_STAT_WRITE]);
blk_hctx_stat_get(hctx, stat);
ret = print_stat(page, &stat[BLK_STAT_READ], "read :");
ret += print_stat(page + ret, &stat[BLK_STAT_WRITE], "write:");
return ret;
}
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_sysfs_dispatched_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_merged = {
.attr = {.name = "merged", .mode = S_IRUGO },
.show = blk_mq_sysfs_merged_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_completed = {
.attr = {.name = "completed", .mode = S_IRUGO },
.show = blk_mq_sysfs_completed_show,
};
static struct blk_mq_ctx_sysfs_entry blk_mq_sysfs_rq_list = {
.attr = {.name = "rq_list", .mode = S_IRUGO },
.show = blk_mq_sysfs_rq_list_show,
};
static struct attribute *default_ctx_attrs[] = { static struct attribute *default_ctx_attrs[] = {
&blk_mq_sysfs_dispatched.attr,
&blk_mq_sysfs_merged.attr,
&blk_mq_sysfs_completed.attr,
&blk_mq_sysfs_rq_list.attr,
NULL, NULL,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_queued = { static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_tags = {
.attr = {.name = "queued", .mode = S_IRUGO }, .attr = {.name = "nr_tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_queued_show, .show = blk_mq_hw_sysfs_nr_tags_show,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_run = { static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_nr_reserved_tags = {
.attr = {.name = "run", .mode = S_IRUGO }, .attr = {.name = "nr_reserved_tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_run_show, .show = blk_mq_hw_sysfs_nr_reserved_tags_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_dispatched = {
.attr = {.name = "dispatched", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_dispatched_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_active = {
.attr = {.name = "active", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_active_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_pending = {
.attr = {.name = "pending", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_rq_list_show,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_tags = {
.attr = {.name = "tags", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_tags_show,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = { static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_cpus = {
.attr = {.name = "cpu_list", .mode = S_IRUGO }, .attr = {.name = "cpu_list", .mode = S_IRUGO },
.show = blk_mq_hw_sysfs_cpus_show, .show = blk_mq_hw_sysfs_cpus_show,
}; };
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_poll = {
.attr = {.name = "io_poll", .mode = S_IWUSR | S_IRUGO },
.show = blk_mq_hw_sysfs_poll_show,
.store = blk_mq_hw_sysfs_poll_store,
};
static struct blk_mq_hw_ctx_sysfs_entry blk_mq_hw_sysfs_stat = {
.attr = {.name = "stats", .mode = S_IRUGO | S_IWUSR },
.show = blk_mq_hw_sysfs_stat_show,
.store = blk_mq_hw_sysfs_stat_store,
};
static struct attribute *default_hw_ctx_attrs[] = { static struct attribute *default_hw_ctx_attrs[] = {
&blk_mq_hw_sysfs_queued.attr, &blk_mq_hw_sysfs_nr_tags.attr,
&blk_mq_hw_sysfs_run.attr, &blk_mq_hw_sysfs_nr_reserved_tags.attr,
&blk_mq_hw_sysfs_dispatched.attr,
&blk_mq_hw_sysfs_pending.attr,
&blk_mq_hw_sysfs_tags.attr,
&blk_mq_hw_sysfs_cpus.attr, &blk_mq_hw_sysfs_cpus.attr,
&blk_mq_hw_sysfs_active.attr,
&blk_mq_hw_sysfs_poll.attr,
&blk_mq_hw_sysfs_stat.attr,
NULL, NULL,
}; };
...@@ -455,6 +254,8 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q) ...@@ -455,6 +254,8 @@ static void __blk_mq_unregister_dev(struct device *dev, struct request_queue *q)
kobject_put(&hctx->kobj); kobject_put(&hctx->kobj);
} }
blk_mq_debugfs_unregister(q);
kobject_uevent(&q->mq_kobj, KOBJ_REMOVE); kobject_uevent(&q->mq_kobj, KOBJ_REMOVE);
kobject_del(&q->mq_kobj); kobject_del(&q->mq_kobj);
kobject_put(&q->mq_kobj); kobject_put(&q->mq_kobj);
...@@ -504,6 +305,8 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q) ...@@ -504,6 +305,8 @@ int blk_mq_register_dev(struct device *dev, struct request_queue *q)
kobject_uevent(&q->mq_kobj, KOBJ_ADD); kobject_uevent(&q->mq_kobj, KOBJ_ADD);
blk_mq_debugfs_register(q, kobject_name(&dev->kobj));
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx); ret = blk_mq_register_hctx(hctx);
if (ret) if (ret)
...@@ -529,6 +332,8 @@ void blk_mq_sysfs_unregister(struct request_queue *q) ...@@ -529,6 +332,8 @@ void blk_mq_sysfs_unregister(struct request_queue *q)
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
return; return;
blk_mq_debugfs_unregister_hctxs(q);
queue_for_each_hw_ctx(q, hctx, i) queue_for_each_hw_ctx(q, hctx, i)
blk_mq_unregister_hctx(hctx); blk_mq_unregister_hctx(hctx);
} }
...@@ -541,6 +346,8 @@ int blk_mq_sysfs_register(struct request_queue *q) ...@@ -541,6 +346,8 @@ int blk_mq_sysfs_register(struct request_queue *q)
if (!q->mq_sysfs_init_done) if (!q->mq_sysfs_init_done)
return ret; return ret;
blk_mq_debugfs_register_hctxs(q);
queue_for_each_hw_ctx(q, hctx, i) { queue_for_each_hw_ctx(q, hctx, i) {
ret = blk_mq_register_hctx(hctx); ret = blk_mq_register_hctx(hctx);
if (ret) if (ret)
......
...@@ -90,113 +90,97 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx, ...@@ -90,113 +90,97 @@ static inline bool hctx_may_queue(struct blk_mq_hw_ctx *hctx,
return atomic_read(&hctx->nr_active) < depth; return atomic_read(&hctx->nr_active) < depth;
} }
static int __bt_get(struct blk_mq_hw_ctx *hctx, struct sbitmap_queue *bt) static int __blk_mq_get_tag(struct blk_mq_alloc_data *data,
struct sbitmap_queue *bt)
{ {
if (!hctx_may_queue(hctx, bt)) if (!(data->flags & BLK_MQ_REQ_INTERNAL) &&
!hctx_may_queue(data->hctx, bt))
return -1; return -1;
return __sbitmap_queue_get(bt); return __sbitmap_queue_get(bt);
} }
static int bt_get(struct blk_mq_alloc_data *data, struct sbitmap_queue *bt, unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data)
struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags)
{ {
struct blk_mq_tags *tags = blk_mq_tags_from_data(data);
struct sbitmap_queue *bt;
struct sbq_wait_state *ws; struct sbq_wait_state *ws;
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
unsigned int tag_offset;
bool drop_ctx;
int tag; int tag;
tag = __bt_get(hctx, bt); if (data->flags & BLK_MQ_REQ_RESERVED) {
if (unlikely(!tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
bt = &tags->breserved_tags;
tag_offset = 0;
} else {
bt = &tags->bitmap_tags;
tag_offset = tags->nr_reserved_tags;
}
tag = __blk_mq_get_tag(data, bt);
if (tag != -1) if (tag != -1)
return tag; goto found_tag;
if (data->flags & BLK_MQ_REQ_NOWAIT) if (data->flags & BLK_MQ_REQ_NOWAIT)
return -1; return BLK_MQ_TAG_FAIL;
ws = bt_wait_ptr(bt, hctx); ws = bt_wait_ptr(bt, data->hctx);
drop_ctx = data->ctx == NULL;
do { do {
prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE); prepare_to_wait(&ws->wait, &wait, TASK_UNINTERRUPTIBLE);
tag = __bt_get(hctx, bt); tag = __blk_mq_get_tag(data, bt);
if (tag != -1) if (tag != -1)
break; break;
/* /*
* We're out of tags on this hardware queue, kick any * We're out of tags on this hardware queue, kick any
* pending IO submits before going to sleep waiting for * pending IO submits before going to sleep waiting for
* some to complete. Note that hctx can be NULL here for * some to complete.
* reserved tag allocation.
*/ */
if (hctx) blk_mq_run_hw_queue(data->hctx, false);
blk_mq_run_hw_queue(hctx, false);
/* /*
* Retry tag allocation after running the hardware queue, * Retry tag allocation after running the hardware queue,
* as running the queue may also have found completions. * as running the queue may also have found completions.
*/ */
tag = __bt_get(hctx, bt); tag = __blk_mq_get_tag(data, bt);
if (tag != -1) if (tag != -1)
break; break;
if (data->ctx)
blk_mq_put_ctx(data->ctx); blk_mq_put_ctx(data->ctx);
io_schedule(); io_schedule();
data->ctx = blk_mq_get_ctx(data->q); data->ctx = blk_mq_get_ctx(data->q);
data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu); data->hctx = blk_mq_map_queue(data->q, data->ctx->cpu);
if (data->flags & BLK_MQ_REQ_RESERVED) { tags = blk_mq_tags_from_data(data);
bt = &data->hctx->tags->breserved_tags; if (data->flags & BLK_MQ_REQ_RESERVED)
} else { bt = &tags->breserved_tags;
hctx = data->hctx; else
bt = &hctx->tags->bitmap_tags; bt = &tags->bitmap_tags;
}
finish_wait(&ws->wait, &wait);
ws = bt_wait_ptr(bt, hctx);
} while (1);
finish_wait(&ws->wait, &wait); finish_wait(&ws->wait, &wait);
return tag; ws = bt_wait_ptr(bt, data->hctx);
} } while (1);
static unsigned int __blk_mq_get_tag(struct blk_mq_alloc_data *data)
{
int tag;
tag = bt_get(data, &data->hctx->tags->bitmap_tags, data->hctx,
data->hctx->tags);
if (tag >= 0)
return tag + data->hctx->tags->nr_reserved_tags;
return BLK_MQ_TAG_FAIL;
}
static unsigned int __blk_mq_get_reserved_tag(struct blk_mq_alloc_data *data)
{
int tag;
if (unlikely(!data->hctx->tags->nr_reserved_tags)) {
WARN_ON_ONCE(1);
return BLK_MQ_TAG_FAIL;
}
tag = bt_get(data, &data->hctx->tags->breserved_tags, NULL, if (drop_ctx && data->ctx)
data->hctx->tags); blk_mq_put_ctx(data->ctx);
if (tag < 0)
return BLK_MQ_TAG_FAIL;
return tag; finish_wait(&ws->wait, &wait);
}
unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data) found_tag:
{ return tag + tag_offset;
if (data->flags & BLK_MQ_REQ_RESERVED)
return __blk_mq_get_reserved_tag(data);
return __blk_mq_get_tag(data);
} }
void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
unsigned int tag) struct blk_mq_ctx *ctx, unsigned int tag)
{ {
struct blk_mq_tags *tags = hctx->tags;
if (tag >= tags->nr_reserved_tags) { if (tag >= tags->nr_reserved_tags) {
const int real_tag = tag - tags->nr_reserved_tags; const int real_tag = tag - tags->nr_reserved_tags;
...@@ -312,11 +296,11 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set) ...@@ -312,11 +296,11 @@ int blk_mq_reinit_tagset(struct blk_mq_tag_set *set)
struct blk_mq_tags *tags = set->tags[i]; struct blk_mq_tags *tags = set->tags[i];
for (j = 0; j < tags->nr_tags; j++) { for (j = 0; j < tags->nr_tags; j++) {
if (!tags->rqs[j]) if (!tags->static_rqs[j])
continue; continue;
ret = set->ops->reinit_request(set->driver_data, ret = set->ops->reinit_request(set->driver_data,
tags->rqs[j]); tags->static_rqs[j]);
if (ret) if (ret)
goto out; goto out;
} }
...@@ -351,11 +335,6 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, ...@@ -351,11 +335,6 @@ void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
} }
static unsigned int bt_unused_tags(const struct sbitmap_queue *bt)
{
return bt->sb.depth - sbitmap_weight(&bt->sb);
}
static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth, static int bt_alloc(struct sbitmap_queue *bt, unsigned int depth,
bool round_robin, int node) bool round_robin, int node)
{ {
...@@ -411,19 +390,56 @@ void blk_mq_free_tags(struct blk_mq_tags *tags) ...@@ -411,19 +390,56 @@ void blk_mq_free_tags(struct blk_mq_tags *tags)
kfree(tags); kfree(tags);
} }
int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int tdepth) int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
struct blk_mq_tags **tagsptr, unsigned int tdepth,
bool can_grow)
{ {
struct blk_mq_tags *tags = *tagsptr;
if (tdepth <= tags->nr_reserved_tags)
return -EINVAL;
tdepth -= tags->nr_reserved_tags; tdepth -= tags->nr_reserved_tags;
if (tdepth > tags->nr_tags)
/*
* If we are allowed to grow beyond the original size, allocate
* a new set of tags before freeing the old one.
*/
if (tdepth > tags->nr_tags) {
struct blk_mq_tag_set *set = hctx->queue->tag_set;
struct blk_mq_tags *new;
bool ret;
if (!can_grow)
return -EINVAL; return -EINVAL;
/* /*
* Don't need (or can't) update reserved tags here, they remain * We need some sort of upper limit, set it high enough that
* static and should never need resizing. * no valid use cases should require more.
*/
if (tdepth > 16 * BLKDEV_MAX_RQ)
return -EINVAL;
new = blk_mq_alloc_rq_map(set, hctx->queue_num, tdepth, 0);
if (!new)
return -ENOMEM;
ret = blk_mq_alloc_rqs(set, new, hctx->queue_num, tdepth);
if (ret) {
blk_mq_free_rq_map(new);
return -ENOMEM;
}
blk_mq_free_rqs(set, *tagsptr, hctx->queue_num);
blk_mq_free_rq_map(*tagsptr);
*tagsptr = new;
} else {
/*
* Don't need (or can't) update reserved tags here, they
* remain static and should never need resizing.
*/ */
sbitmap_queue_resize(&tags->bitmap_tags, tdepth); sbitmap_queue_resize(&tags->bitmap_tags, tdepth);
}
blk_mq_tag_wakeup_all(tags, false);
return 0; return 0;
} }
...@@ -454,25 +470,3 @@ u32 blk_mq_unique_tag(struct request *rq) ...@@ -454,25 +470,3 @@ u32 blk_mq_unique_tag(struct request *rq)
(rq->tag & BLK_MQ_UNIQUE_TAG_MASK); (rq->tag & BLK_MQ_UNIQUE_TAG_MASK);
} }
EXPORT_SYMBOL(blk_mq_unique_tag); EXPORT_SYMBOL(blk_mq_unique_tag);
ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page)
{
char *orig_page = page;
unsigned int free, res;
if (!tags)
return 0;
page += sprintf(page, "nr_tags=%u, reserved_tags=%u, "
"bits_per_word=%u\n",
tags->nr_tags, tags->nr_reserved_tags,
1U << tags->bitmap_tags.sb.shift);
free = bt_unused_tags(&tags->bitmap_tags);
res = bt_unused_tags(&tags->breserved_tags);
page += sprintf(page, "nr_free=%u, nr_reserved=%u\n", free, res);
page += sprintf(page, "active_queues=%u\n", atomic_read(&tags->active_queues));
return page - orig_page;
}
...@@ -16,6 +16,7 @@ struct blk_mq_tags { ...@@ -16,6 +16,7 @@ struct blk_mq_tags {
struct sbitmap_queue breserved_tags; struct sbitmap_queue breserved_tags;
struct request **rqs; struct request **rqs;
struct request **static_rqs;
struct list_head page_list; struct list_head page_list;
}; };
...@@ -24,11 +25,12 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r ...@@ -24,11 +25,12 @@ extern struct blk_mq_tags *blk_mq_init_tags(unsigned int nr_tags, unsigned int r
extern void blk_mq_free_tags(struct blk_mq_tags *tags); extern void blk_mq_free_tags(struct blk_mq_tags *tags);
extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data); extern unsigned int blk_mq_get_tag(struct blk_mq_alloc_data *data);
extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx, extern void blk_mq_put_tag(struct blk_mq_hw_ctx *hctx, struct blk_mq_tags *tags,
unsigned int tag); struct blk_mq_ctx *ctx, unsigned int tag);
extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags); extern bool blk_mq_has_free_tags(struct blk_mq_tags *tags);
extern ssize_t blk_mq_tag_sysfs_show(struct blk_mq_tags *tags, char *page); extern int blk_mq_tag_update_depth(struct blk_mq_hw_ctx *hctx,
extern int blk_mq_tag_update_depth(struct blk_mq_tags *tags, unsigned int depth); struct blk_mq_tags **tags,
unsigned int depth, bool can_grow);
extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool); extern void blk_mq_tag_wakeup_all(struct blk_mq_tags *tags, bool);
void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn, void blk_mq_queue_tag_busy_iter(struct request_queue *q, busy_iter_fn *fn,
void *priv); void *priv);
......
This diff is collapsed.
...@@ -32,7 +32,31 @@ void blk_mq_free_queue(struct request_queue *q); ...@@ -32,7 +32,31 @@ void blk_mq_free_queue(struct request_queue *q);
int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr); int blk_mq_update_nr_requests(struct request_queue *q, unsigned int nr);
void blk_mq_wake_waiters(struct request_queue *q); void blk_mq_wake_waiters(struct request_queue *q);
bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *); bool blk_mq_dispatch_rq_list(struct blk_mq_hw_ctx *, struct list_head *);
void blk_mq_flush_busy_ctxs(struct blk_mq_hw_ctx *hctx, struct list_head *list);
bool blk_mq_hctx_has_pending(struct blk_mq_hw_ctx *hctx);
bool blk_mq_get_driver_tag(struct request *rq, struct blk_mq_hw_ctx **hctx,
bool wait);
/*
* Internal helpers for allocating/freeing the request map
*/
void blk_mq_free_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx);
void blk_mq_free_rq_map(struct blk_mq_tags *tags);
struct blk_mq_tags *blk_mq_alloc_rq_map(struct blk_mq_tag_set *set,
unsigned int hctx_idx,
unsigned int nr_tags,
unsigned int reserved_tags);
int blk_mq_alloc_rqs(struct blk_mq_tag_set *set, struct blk_mq_tags *tags,
unsigned int hctx_idx, unsigned int depth);
/*
* Internal helpers for request insertion into sw queues
*/
void __blk_mq_insert_request(struct blk_mq_hw_ctx *hctx, struct request *rq,
bool at_head);
void blk_mq_insert_requests(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct list_head *list);
/* /*
* CPU hotplug helpers * CPU hotplug helpers
*/ */
...@@ -57,6 +81,40 @@ extern int blk_mq_sysfs_register(struct request_queue *q); ...@@ -57,6 +81,40 @@ extern int blk_mq_sysfs_register(struct request_queue *q);
extern void blk_mq_sysfs_unregister(struct request_queue *q); extern void blk_mq_sysfs_unregister(struct request_queue *q);
extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx); extern void blk_mq_hctx_kobj_init(struct blk_mq_hw_ctx *hctx);
/*
* debugfs helpers
*/
#ifdef CONFIG_BLK_DEBUG_FS
void blk_mq_debugfs_init(void);
int blk_mq_debugfs_register(struct request_queue *q, const char *name);
void blk_mq_debugfs_unregister(struct request_queue *q);
int blk_mq_debugfs_register_hctxs(struct request_queue *q);
void blk_mq_debugfs_unregister_hctxs(struct request_queue *q);
#else
static inline void blk_mq_debugfs_init(void)
{
}
static inline int blk_mq_debugfs_register(struct request_queue *q,
const char *name)
{
return 0;
}
static inline void blk_mq_debugfs_unregister(struct request_queue *q)
{
}
static inline int blk_mq_debugfs_register_hctxs(struct request_queue *q)
{
return 0;
}
static inline void blk_mq_debugfs_unregister_hctxs(struct request_queue *q)
{
}
#endif
extern void blk_mq_rq_timed_out(struct request *req, bool reserved); extern void blk_mq_rq_timed_out(struct request *req, bool reserved);
void blk_mq_release(struct request_queue *q); void blk_mq_release(struct request_queue *q);
...@@ -103,6 +161,25 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data, ...@@ -103,6 +161,25 @@ static inline void blk_mq_set_alloc_data(struct blk_mq_alloc_data *data,
data->hctx = hctx; data->hctx = hctx;
} }
static inline struct blk_mq_tags *blk_mq_tags_from_data(struct blk_mq_alloc_data *data)
{
if (data->flags & BLK_MQ_REQ_INTERNAL)
return data->hctx->sched_tags;
return data->hctx->tags;
}
/*
* Internal helpers for request allocation/init/free
*/
void blk_mq_rq_ctx_init(struct request_queue *q, struct blk_mq_ctx *ctx,
struct request *rq, unsigned int op);
void __blk_mq_finish_request(struct blk_mq_hw_ctx *hctx, struct blk_mq_ctx *ctx,
struct request *rq);
void blk_mq_finish_request(struct request *rq);
struct request *__blk_mq_alloc_request(struct blk_mq_alloc_data *data,
unsigned int op);
static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx) static inline bool blk_mq_hctx_stopped(struct blk_mq_hw_ctx *hctx)
{ {
return test_bit(BLK_MQ_S_STOPPED, &hctx->state); return test_bit(BLK_MQ_S_STOPPED, &hctx->state);
......
...@@ -272,6 +272,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq) ...@@ -272,6 +272,7 @@ void blk_queue_end_tag(struct request_queue *q, struct request *rq)
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
rq->rq_flags &= ~RQF_QUEUED; rq->rq_flags &= ~RQF_QUEUED;
rq->tag = -1; rq->tag = -1;
rq->internal_tag = -1;
if (unlikely(bqt->tag_index[tag] == NULL)) if (unlikely(bqt->tag_index[tag] == NULL))
printk(KERN_ERR "%s: tag %d is missing\n", printk(KERN_ERR "%s: tag %d is missing\n",
......
...@@ -866,10 +866,12 @@ static void tg_update_disptime(struct throtl_grp *tg) ...@@ -866,10 +866,12 @@ static void tg_update_disptime(struct throtl_grp *tg)
unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime; unsigned long read_wait = -1, write_wait = -1, min_wait = -1, disptime;
struct bio *bio; struct bio *bio;
if ((bio = throtl_peek_queued(&sq->queued[READ]))) bio = throtl_peek_queued(&sq->queued[READ]);
if (bio)
tg_may_dispatch(tg, bio, &read_wait); tg_may_dispatch(tg, bio, &read_wait);
if ((bio = throtl_peek_queued(&sq->queued[WRITE]))) bio = throtl_peek_queued(&sq->queued[WRITE]);
if (bio)
tg_may_dispatch(tg, bio, &write_wait); tg_may_dispatch(tg, bio, &write_wait);
min_wait = min(read_wait, write_wait); min_wait = min(read_wait, write_wait);
......
...@@ -167,7 +167,7 @@ static inline struct request *__elv_next_request(struct request_queue *q) ...@@ -167,7 +167,7 @@ static inline struct request *__elv_next_request(struct request_queue *q)
return NULL; return NULL;
} }
if (unlikely(blk_queue_bypass(q)) || if (unlikely(blk_queue_bypass(q)) ||
!q->elevator->type->ops.elevator_dispatch_fn(q, 0)) !q->elevator->type->ops.sq.elevator_dispatch_fn(q, 0))
return NULL; return NULL;
} }
} }
...@@ -176,16 +176,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq) ...@@ -176,16 +176,16 @@ static inline void elv_activate_rq(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_activate_req_fn) if (e->type->ops.sq.elevator_activate_req_fn)
e->type->ops.elevator_activate_req_fn(q, rq); e->type->ops.sq.elevator_activate_req_fn(q, rq);
} }
static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq) static inline void elv_deactivate_rq(struct request_queue *q, struct request *rq)
{ {
struct elevator_queue *e = q->elevator; struct elevator_queue *e = q->elevator;
if (e->type->ops.elevator_deactivate_req_fn) if (e->type->ops.sq.elevator_deactivate_req_fn)
e->type->ops.elevator_deactivate_req_fn(q, rq); e->type->ops.sq.elevator_deactivate_req_fn(q, rq);
} }
#ifdef CONFIG_FAIL_IO_TIMEOUT #ifdef CONFIG_FAIL_IO_TIMEOUT
...@@ -263,6 +263,22 @@ void ioc_clear_queue(struct request_queue *q); ...@@ -263,6 +263,22 @@ void ioc_clear_queue(struct request_queue *q);
int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node); int create_task_io_context(struct task_struct *task, gfp_t gfp_mask, int node);
/**
* rq_ioc - determine io_context for request allocation
* @bio: request being allocated is for this bio (can be %NULL)
*
* Determine io_context to use for request allocation for @bio. May return
* %NULL if %current->io_context doesn't exist.
*/
static inline struct io_context *rq_ioc(struct bio *bio)
{
#ifdef CONFIG_BLK_CGROUP
if (bio && bio->bi_ioc)
return bio->bi_ioc;
#endif
return current->io_context;
}
/** /**
* create_io_context - try to create task->io_context * create_io_context - try to create task->io_context
* @gfp_mask: allocation mask * @gfp_mask: allocation mask
......
...@@ -2749,9 +2749,11 @@ static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd) ...@@ -2749,9 +2749,11 @@ static struct cfq_queue *cfq_get_next_queue_forced(struct cfq_data *cfqd)
if (!cfqg) if (!cfqg)
return NULL; return NULL;
for_each_cfqg_st(cfqg, i, j, st) for_each_cfqg_st(cfqg, i, j, st) {
if ((cfqq = cfq_rb_first(st)) != NULL) cfqq = cfq_rb_first(st);
if (cfqq)
return cfqq; return cfqq;
}
return NULL; return NULL;
} }
...@@ -3864,6 +3866,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic, ...@@ -3864,6 +3866,8 @@ cfq_get_queue(struct cfq_data *cfqd, bool is_sync, struct cfq_io_cq *cic,
goto out; goto out;
} }
/* cfq_init_cfqq() assumes cfqq->ioprio_class is initialized. */
cfqq->ioprio_class = IOPRIO_CLASS_NONE;
cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync); cfq_init_cfqq(cfqd, cfqq, current->pid, is_sync);
cfq_init_prio_data(cfqq, cic); cfq_init_prio_data(cfqq, cic);
cfq_link_cfqq_cfqg(cfqq, cfqg); cfq_link_cfqq_cfqg(cfqq, cfqg);
...@@ -4837,7 +4841,7 @@ static struct elv_fs_entry cfq_attrs[] = { ...@@ -4837,7 +4841,7 @@ static struct elv_fs_entry cfq_attrs[] = {
}; };
static struct elevator_type iosched_cfq = { static struct elevator_type iosched_cfq = {
.ops = { .ops.sq = {
.elevator_merge_fn = cfq_merge, .elevator_merge_fn = cfq_merge,
.elevator_merged_fn = cfq_merged_request, .elevator_merged_fn = cfq_merged_request,
.elevator_merge_req_fn = cfq_merged_requests, .elevator_merge_req_fn = cfq_merged_requests,
......
...@@ -439,7 +439,7 @@ static struct elv_fs_entry deadline_attrs[] = { ...@@ -439,7 +439,7 @@ static struct elv_fs_entry deadline_attrs[] = {
}; };
static struct elevator_type iosched_deadline = { static struct elevator_type iosched_deadline = {
.ops = { .ops.sq = {
.elevator_merge_fn = deadline_merge, .elevator_merge_fn = deadline_merge,
.elevator_merged_fn = deadline_merged_request, .elevator_merged_fn = deadline_merged_request,
.elevator_merge_req_fn = deadline_merged_requests, .elevator_merge_req_fn = deadline_merged_requests,
......
This diff is collapsed.
This diff is collapsed.
...@@ -92,7 +92,7 @@ static void noop_exit_queue(struct elevator_queue *e) ...@@ -92,7 +92,7 @@ static void noop_exit_queue(struct elevator_queue *e)
} }
static struct elevator_type elevator_noop = { static struct elevator_type elevator_noop = {
.ops = { .ops.sq = {
.elevator_merge_req_fn = noop_merged_requests, .elevator_merge_req_fn = noop_merged_requests,
.elevator_dispatch_fn = noop_dispatch, .elevator_dispatch_fn = noop_dispatch,
.elevator_add_req_fn = noop_add_request, .elevator_add_req_fn = noop_add_request,
......
This diff is collapsed.
...@@ -293,7 +293,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state, ...@@ -293,7 +293,7 @@ static gpt_entry *alloc_read_gpt_entries(struct parsed_partitions *state,
if (!gpt) if (!gpt)
return NULL; return NULL;
count = le32_to_cpu(gpt->num_partition_entries) * count = (size_t)le32_to_cpu(gpt->num_partition_entries) *
le32_to_cpu(gpt->sizeof_partition_entry); le32_to_cpu(gpt->sizeof_partition_entry);
if (!count) if (!count)
return NULL; return NULL;
...@@ -352,7 +352,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, ...@@ -352,7 +352,7 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
gpt_header **gpt, gpt_entry **ptes) gpt_header **gpt, gpt_entry **ptes)
{ {
u32 crc, origcrc; u32 crc, origcrc;
u64 lastlba; u64 lastlba, pt_size;
if (!ptes) if (!ptes)
return 0; return 0;
...@@ -434,13 +434,20 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba, ...@@ -434,13 +434,20 @@ static int is_gpt_valid(struct parsed_partitions *state, u64 lba,
goto fail; goto fail;
} }
/* Sanity check partition table size */
pt_size = (u64)le32_to_cpu((*gpt)->num_partition_entries) *
le32_to_cpu((*gpt)->sizeof_partition_entry);
if (pt_size > KMALLOC_MAX_SIZE) {
pr_debug("GUID Partition Table is too large: %llu > %lu bytes\n",
(unsigned long long)pt_size, KMALLOC_MAX_SIZE);
goto fail;
}
if (!(*ptes = alloc_read_gpt_entries(state, *gpt))) if (!(*ptes = alloc_read_gpt_entries(state, *gpt)))
goto fail; goto fail;
/* Check the GUID Partition Entry Array CRC */ /* Check the GUID Partition Entry Array CRC */
crc = efi_crc32((const unsigned char *) (*ptes), crc = efi_crc32((const unsigned char *) (*ptes), pt_size);
le32_to_cpu((*gpt)->num_partition_entries) *
le32_to_cpu((*gpt)->sizeof_partition_entry));
if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) { if (crc != le32_to_cpu((*gpt)->partition_entry_array_crc32)) {
pr_debug("GUID Partition Entry Array CRC check failed.\n"); pr_debug("GUID Partition Entry Array CRC check failed.\n");
......
This diff is collapsed.
...@@ -4074,41 +4074,27 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h) ...@@ -4074,41 +4074,27 @@ static void cciss_put_controller_into_performant_mode(ctlr_info_t *h)
static void cciss_interrupt_mode(ctlr_info_t *h) static void cciss_interrupt_mode(ctlr_info_t *h)
{ {
#ifdef CONFIG_PCI_MSI int ret;
int err;
struct msix_entry cciss_msix_entries[4] = { {0, 0}, {0, 1},
{0, 2}, {0, 3}
};
/* Some boards advertise MSI but don't really support it */ /* Some boards advertise MSI but don't really support it */
if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) || if ((h->board_id == 0x40700E11) || (h->board_id == 0x40800E11) ||
(h->board_id == 0x40820E11) || (h->board_id == 0x40830E11)) (h->board_id == 0x40820E11) || (h->board_id == 0x40830E11))
goto default_int_mode; goto default_int_mode;
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSIX)) { ret = pci_alloc_irq_vectors(h->pdev, 4, 4, PCI_IRQ_MSIX);
err = pci_enable_msix_exact(h->pdev, cciss_msix_entries, 4); if (ret >= 0) {
if (!err) { h->intr[0] = pci_irq_vector(h->pdev, 0);
h->intr[0] = cciss_msix_entries[0].vector; h->intr[1] = pci_irq_vector(h->pdev, 1);
h->intr[1] = cciss_msix_entries[1].vector; h->intr[2] = pci_irq_vector(h->pdev, 2);
h->intr[2] = cciss_msix_entries[2].vector; h->intr[3] = pci_irq_vector(h->pdev, 3);
h->intr[3] = cciss_msix_entries[3].vector;
h->msix_vector = 1;
return; return;
} else {
dev_warn(&h->pdev->dev,
"MSI-X init failed %d\n", err);
}
}
if (pci_find_capability(h->pdev, PCI_CAP_ID_MSI)) {
if (!pci_enable_msi(h->pdev))
h->msi_vector = 1;
else
dev_warn(&h->pdev->dev, "MSI init failed\n");
} }
ret = pci_alloc_irq_vectors(h->pdev, 1, 1, PCI_IRQ_MSI);
default_int_mode: default_int_mode:
#endif /* CONFIG_PCI_MSI */
/* if we get here we're going to use the default interrupt mode */ /* if we get here we're going to use the default interrupt mode */
h->intr[h->intr_mode] = h->pdev->irq; h->intr[h->intr_mode] = pci_irq_vector(h->pdev, 0);
return; return;
} }
...@@ -4888,7 +4874,7 @@ static int cciss_request_irq(ctlr_info_t *h, ...@@ -4888,7 +4874,7 @@ static int cciss_request_irq(ctlr_info_t *h,
irqreturn_t (*msixhandler)(int, void *), irqreturn_t (*msixhandler)(int, void *),
irqreturn_t (*intxhandler)(int, void *)) irqreturn_t (*intxhandler)(int, void *))
{ {
if (h->msix_vector || h->msi_vector) { if (h->pdev->msi_enabled || h->pdev->msix_enabled) {
if (!request_irq(h->intr[h->intr_mode], msixhandler, if (!request_irq(h->intr[h->intr_mode], msixhandler,
0, h->devname, h)) 0, h->devname, h))
return 0; return 0;
...@@ -4934,12 +4920,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h) ...@@ -4934,12 +4920,7 @@ static void cciss_undo_allocations_after_kdump_soft_reset(ctlr_info_t *h)
int ctlr = h->ctlr; int ctlr = h->ctlr;
free_irq(h->intr[h->intr_mode], h); free_irq(h->intr[h->intr_mode], h);
#ifdef CONFIG_PCI_MSI pci_free_irq_vectors(h->pdev);
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds); cciss_free_sg_chain_blocks(h->cmd_sg_list, h->nr_cmds);
cciss_free_scatterlists(h); cciss_free_scatterlists(h);
cciss_free_cmd_pool(h); cciss_free_cmd_pool(h);
...@@ -5295,12 +5276,7 @@ static void cciss_remove_one(struct pci_dev *pdev) ...@@ -5295,12 +5276,7 @@ static void cciss_remove_one(struct pci_dev *pdev)
cciss_shutdown(pdev); cciss_shutdown(pdev);
#ifdef CONFIG_PCI_MSI pci_free_irq_vectors(h->pdev);
if (h->msix_vector)
pci_disable_msix(h->pdev);
else if (h->msi_vector)
pci_disable_msi(h->pdev);
#endif /* CONFIG_PCI_MSI */
iounmap(h->transtable); iounmap(h->transtable);
iounmap(h->cfgtable); iounmap(h->cfgtable);
......
...@@ -90,8 +90,6 @@ struct ctlr_info ...@@ -90,8 +90,6 @@ struct ctlr_info
# define SIMPLE_MODE_INT 2 # define SIMPLE_MODE_INT 2
# define MEMQ_MODE_INT 3 # define MEMQ_MODE_INT 3
unsigned int intr[4]; unsigned int intr[4];
unsigned int msix_vector;
unsigned int msi_vector;
int intr_mode; int intr_mode;
int cciss_max_sectors; int cciss_max_sectors;
BYTE cciss_read; BYTE cciss_read;
...@@ -333,7 +331,7 @@ static unsigned long SA5_performant_completed(ctlr_info_t *h) ...@@ -333,7 +331,7 @@ static unsigned long SA5_performant_completed(ctlr_info_t *h)
*/ */
register_value = readl(h->vaddr + SA5_OUTDB_STATUS); register_value = readl(h->vaddr + SA5_OUTDB_STATUS);
/* msi auto clears the interrupt pending bit. */ /* msi auto clears the interrupt pending bit. */
if (!(h->msi_vector || h->msix_vector)) { if (!(h->pdev->msi_enabled || h->pdev->msix_enabled)) {
writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR); writel(SA5_OUTDB_CLEAR_PERF_BIT, h->vaddr + SA5_OUTDB_CLEAR);
/* Do a read in order to flush the write to the controller /* Do a read in order to flush the write to the controller
* (as per spec.) * (as per spec.)
...@@ -393,7 +391,7 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h) ...@@ -393,7 +391,7 @@ static bool SA5_performant_intr_pending(ctlr_info_t *h)
if (!register_value) if (!register_value)
return false; return false;
if (h->msi_vector || h->msix_vector) if (h->pdev->msi_enabled || h->pdev->msix_enabled)
return true; return true;
/* Read outbound doorbell to flush */ /* Read outbound doorbell to flush */
......
...@@ -3119,7 +3119,7 @@ static int raw_cmd_copyin(int cmd, void __user *param, ...@@ -3119,7 +3119,7 @@ static int raw_cmd_copyin(int cmd, void __user *param,
*rcmd = NULL; *rcmd = NULL;
loop: loop:
ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_USER); ptr = kmalloc(sizeof(struct floppy_raw_cmd), GFP_KERNEL);
if (!ptr) if (!ptr)
return -ENOMEM; return -ENOMEM;
*rcmd = ptr; *rcmd = ptr;
......
...@@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ...@@ -1097,9 +1097,12 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE) if ((unsigned int) info->lo_encrypt_key_size > LO_KEY_SIZE)
return -EINVAL; return -EINVAL;
/* I/O need to be drained during transfer transition */
blk_mq_freeze_queue(lo->lo_queue);
err = loop_release_xfer(lo); err = loop_release_xfer(lo);
if (err) if (err)
return err; goto exit;
if (info->lo_encrypt_type) { if (info->lo_encrypt_type) {
unsigned int type = info->lo_encrypt_type; unsigned int type = info->lo_encrypt_type;
...@@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ...@@ -1114,12 +1117,14 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
err = loop_init_xfer(lo, xfer, info); err = loop_init_xfer(lo, xfer, info);
if (err) if (err)
return err; goto exit;
if (lo->lo_offset != info->lo_offset || if (lo->lo_offset != info->lo_offset ||
lo->lo_sizelimit != info->lo_sizelimit) lo->lo_sizelimit != info->lo_sizelimit)
if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) if (figure_loop_size(lo, info->lo_offset, info->lo_sizelimit)) {
return -EFBIG; err = -EFBIG;
goto exit;
}
loop_config_discard(lo); loop_config_discard(lo);
...@@ -1156,7 +1161,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info) ...@@ -1156,7 +1161,9 @@ loop_set_status(struct loop_device *lo, const struct loop_info64 *info)
/* update dio if lo_offset or transfer is changed */ /* update dio if lo_offset or transfer is changed */
__loop_update_dio(lo, lo->use_dio); __loop_update_dio(lo, lo->use_dio);
return 0; exit:
blk_mq_unfreeze_queue(lo->lo_queue);
return err;
} }
static int static int
......
...@@ -420,7 +420,8 @@ static void null_lnvm_end_io(struct request *rq, int error) ...@@ -420,7 +420,8 @@ static void null_lnvm_end_io(struct request *rq, int error)
{ {
struct nvm_rq *rqd = rq->end_io_data; struct nvm_rq *rqd = rq->end_io_data;
nvm_end_io(rqd, error); rqd->error = error;
nvm_end_io(rqd);
blk_put_request(rq); blk_put_request(rq);
} }
...@@ -460,7 +461,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) ...@@ -460,7 +461,6 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
id->ver_id = 0x1; id->ver_id = 0x1;
id->vmnt = 0; id->vmnt = 0;
id->cgrps = 1;
id->cap = 0x2; id->cap = 0x2;
id->dom = 0x1; id->dom = 0x1;
...@@ -479,7 +479,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id) ...@@ -479,7 +479,7 @@ static int null_lnvm_id(struct nvm_dev *dev, struct nvm_id *id)
sector_div(size, bs); /* convert size to pages */ sector_div(size, bs); /* convert size to pages */
size >>= 8; /* concert size to pgs pr blk */ size >>= 8; /* concert size to pgs pr blk */
grp = &id->groups[0]; grp = &id->grp;
grp->mtype = 0; grp->mtype = 0;
grp->fmtype = 0; grp->fmtype = 0;
grp->num_ch = 1; grp->num_ch = 1;
......
...@@ -273,7 +273,7 @@ static const struct block_device_operations pcd_bdops = { ...@@ -273,7 +273,7 @@ static const struct block_device_operations pcd_bdops = {
.check_events = pcd_block_check_events, .check_events = pcd_block_check_events,
}; };
static struct cdrom_device_ops pcd_dops = { static const struct cdrom_device_ops pcd_dops = {
.open = pcd_open, .open = pcd_open,
.release = pcd_release, .release = pcd_release,
.drive_status = pcd_drive_status, .drive_status = pcd_drive_status,
......
This diff is collapsed.
...@@ -481,7 +481,7 @@ static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd, ...@@ -481,7 +481,7 @@ static int gdrom_audio_ioctl(struct cdrom_device_info *cdi, unsigned int cmd,
return -EINVAL; return -EINVAL;
} }
static struct cdrom_device_ops gdrom_ops = { static const struct cdrom_device_ops gdrom_ops = {
.open = gdrom_open, .open = gdrom_open,
.release = gdrom_release, .release = gdrom_release,
.drive_status = gdrom_drivestatus, .drive_status = gdrom_drivestatus,
...@@ -489,9 +489,9 @@ static struct cdrom_device_ops gdrom_ops = { ...@@ -489,9 +489,9 @@ static struct cdrom_device_ops gdrom_ops = {
.get_last_session = gdrom_get_last_session, .get_last_session = gdrom_get_last_session,
.reset = gdrom_hardreset, .reset = gdrom_hardreset,
.audio_ioctl = gdrom_audio_ioctl, .audio_ioctl = gdrom_audio_ioctl,
.generic_packet = cdrom_dummy_generic_packet,
.capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED | .capability = CDC_MULTI_SESSION | CDC_MEDIA_CHANGED |
CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R, CDC_RESET | CDC_DRIVE_STATUS | CDC_CD_R,
.n_minors = 1,
}; };
static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode) static int gdrom_bdops_open(struct block_device *bdev, fmode_t mode)
...@@ -807,16 +807,20 @@ static int probe_gdrom(struct platform_device *devptr) ...@@ -807,16 +807,20 @@ static int probe_gdrom(struct platform_device *devptr)
if (err) if (err)
goto probe_fail_cmdirq_register; goto probe_fail_cmdirq_register;
gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock); gd.gdrom_rq = blk_init_queue(gdrom_request, &gdrom_lock);
if (!gd.gdrom_rq) if (!gd.gdrom_rq) {
err = -ENOMEM;
goto probe_fail_requestq; goto probe_fail_requestq;
}
err = probe_gdrom_setupqueue(); err = probe_gdrom_setupqueue();
if (err) if (err)
goto probe_fail_toc; goto probe_fail_toc;
gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL); gd.toc = kzalloc(sizeof(struct gdromtoc), GFP_KERNEL);
if (!gd.toc) if (!gd.toc) {
err = -ENOMEM;
goto probe_fail_toc; goto probe_fail_toc;
}
add_disk(gd.disk); add_disk(gd.disk);
return 0; return 0;
......
...@@ -1166,7 +1166,7 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf) ...@@ -1166,7 +1166,7 @@ void ide_cdrom_update_speed(ide_drive_t *drive, u8 *buf)
CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \ CDC_CD_RW | CDC_DVD | CDC_DVD_R | CDC_DVD_RAM | CDC_GENERIC_PACKET | \
CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM) CDC_MO_DRIVE | CDC_MRW | CDC_MRW_W | CDC_RAM)
static struct cdrom_device_ops ide_cdrom_dops = { static const struct cdrom_device_ops ide_cdrom_dops = {
.open = ide_cdrom_open_real, .open = ide_cdrom_open_real,
.release = ide_cdrom_release_real, .release = ide_cdrom_release_real,
.drive_status = ide_cdrom_drive_status, .drive_status = ide_cdrom_drive_status,
......
...@@ -26,15 +26,6 @@ config NVM_DEBUG ...@@ -26,15 +26,6 @@ config NVM_DEBUG
It is required to create/remove targets without IOCTLs. It is required to create/remove targets without IOCTLs.
config NVM_GENNVM
tristate "General Non-Volatile Memory Manager for Open-Channel SSDs"
---help---
Non-volatile memory media manager for Open-Channel SSDs that implements
physical media metadata management and block provisioning API.
This is the standard media manager for using Open-Channel SSDs, and
required for targets to be instantiated.
config NVM_RRPC config NVM_RRPC
tristate "Round-robin Hybrid Open-Channel SSD target" tristate "Round-robin Hybrid Open-Channel SSD target"
---help--- ---help---
......
...@@ -2,6 +2,5 @@ ...@@ -2,6 +2,5 @@
# Makefile for Open-Channel SSDs. # Makefile for Open-Channel SSDs.
# #
obj-$(CONFIG_NVM) := core.o sysblk.o obj-$(CONFIG_NVM) := core.o
obj-$(CONFIG_NVM_GENNVM) += gennvm.o
obj-$(CONFIG_NVM_RRPC) += rrpc.o obj-$(CONFIG_NVM_RRPC) += rrpc.o
This diff is collapsed.
This diff is collapsed.
/*
* Copyright: Matias Bjorling <mb@bjorling.me>
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License version
* 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but
* WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* General Public License for more details.
*
*/
#ifndef GENNVM_H_
#define GENNVM_H_
#include <linux/module.h>
#include <linux/vmalloc.h>
#include <linux/lightnvm.h>
struct gen_dev {
struct nvm_dev *dev;
int nr_luns;
struct list_head area_list;
struct mutex lock;
struct list_head targets;
};
/* Map between virtual and physical channel and lun */
struct gen_ch_map {
int ch_off;
int nr_luns;
int *lun_offs;
};
struct gen_dev_map {
struct gen_ch_map *chnls;
int nr_chnls;
};
struct gen_area {
struct list_head list;
sector_t begin;
sector_t end; /* end is excluded */
};
static inline void *ch_map_to_lun_offs(struct gen_ch_map *ch_map)
{
return ch_map + 1;
}
typedef int (gen_trans_fn)(struct nvm_tgt_dev *, struct ppa_addr *);
#define gen_for_each_lun(bm, lun, i) \
for ((i) = 0, lun = &(bm)->luns[0]; \
(i) < (bm)->nr_luns; (i)++, lun = &(bm)->luns[(i)])
#endif /* GENNVM_H_ */
...@@ -779,7 +779,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd, ...@@ -779,7 +779,7 @@ static void rrpc_end_io_write(struct rrpc *rrpc, struct rrpc_rq *rrqd,
static void rrpc_end_io(struct nvm_rq *rqd) static void rrpc_end_io(struct nvm_rq *rqd)
{ {
struct rrpc *rrpc = container_of(rqd->ins, struct rrpc, instance); struct rrpc *rrpc = rqd->private;
struct nvm_tgt_dev *dev = rrpc->dev; struct nvm_tgt_dev *dev = rrpc->dev;
struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd); struct rrpc_rq *rrqd = nvm_rq_to_pdu(rqd);
uint8_t npages = rqd->nr_ppas; uint8_t npages = rqd->nr_ppas;
...@@ -972,8 +972,9 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio, ...@@ -972,8 +972,9 @@ static int rrpc_submit_io(struct rrpc *rrpc, struct bio *bio,
bio_get(bio); bio_get(bio);
rqd->bio = bio; rqd->bio = bio;
rqd->ins = &rrpc->instance; rqd->private = rrpc;
rqd->nr_ppas = nr_pages; rqd->nr_ppas = nr_pages;
rqd->end_io = rrpc_end_io;
rrq->flags = flags; rrq->flags = flags;
err = nvm_submit_io(dev, rqd); err = nvm_submit_io(dev, rqd);
...@@ -1532,7 +1533,6 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk) ...@@ -1532,7 +1533,6 @@ static void *rrpc_init(struct nvm_tgt_dev *dev, struct gendisk *tdisk)
if (!rrpc) if (!rrpc)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
rrpc->instance.tt = &tt_rrpc;
rrpc->dev = dev; rrpc->dev = dev;
rrpc->disk = tdisk; rrpc->disk = tdisk;
...@@ -1611,7 +1611,6 @@ static struct nvm_tgt_type tt_rrpc = { ...@@ -1611,7 +1611,6 @@ static struct nvm_tgt_type tt_rrpc = {
.make_rq = rrpc_make_rq, .make_rq = rrpc_make_rq,
.capacity = rrpc_capacity, .capacity = rrpc_capacity,
.end_io = rrpc_end_io,
.init = rrpc_init, .init = rrpc_init,
.exit = rrpc_exit, .exit = rrpc_exit,
......
...@@ -102,9 +102,6 @@ struct rrpc_lun { ...@@ -102,9 +102,6 @@ struct rrpc_lun {
}; };
struct rrpc { struct rrpc {
/* instance must be kept in top to resolve rrpc in unprep */
struct nvm_tgt_instance instance;
struct nvm_tgt_dev *dev; struct nvm_tgt_dev *dev;
struct gendisk *disk; struct gendisk *disk;
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment