Commit 3a90a63d authored by Douglas Gilbert's avatar Douglas Gilbert Committed by Martin K. Petersen

scsi: scsi_debug: every_nth triggered error injection

This patch simplifies, or at least makes more consistent, the way setting
the every_nth parameter injects errors. Here is a list of 'opts' flags and
in which cases they inject errors when abs(every_nth)%command_count == 0 is
reached:

  - OPT_RECOVERED_ERR: issued on READ(*)s, WRITE(*)s and
                       WRITE_SCATTEREDs
  - OPT_DIF_ERR:       issued on READ(*)s, WRITE(*)s and
                       WRITE_SCATTEREDs
  - OPT_DIX_ERR:       issued on READ(*)s, WRITE(*)s and
                       WRITE_SCATTEREDs
  - OPT_SHORT_TRANSFER: issued on READ(*)s
  - OPT_TRANSPORT_ERR: issued on all commands
  - OPT_CMD_ABORT:     issued on all commands

The other uses of every_nth were not modified.

Previously if, for example, OPT_SHORT_TRANSFER was armed then if
(abs(every_nth) % command_count == 0) occurred during a command that was
_not_ a READ, then no error injection occurred. This behaviour puzzled
several testers. Now a global "inject_pending" flag is set and the _next_
READ will get hit and that flag is cleared. OPT_RECOVERED_ERR, OPT_DIF_ERR
and OPT_DIX_ERR have similar behaviour. A downside of this is that there
might be a hang-over pending injection that gets triggered by a following
test.

Also expand the every_nth runtime parameter so that it can take hex value
(i.e. with a leading '0x') as well as a decimal value. Now both the 'opts'
and the 'every_nth' runtime parameters can take hexadecimal values.

Link: https://lore.kernel.org/r/20200712182927.72044-2-dgilbert@interlog.comSigned-off-by: default avatarDouglas Gilbert <dgilbert@interlog.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent c10fa55f
...@@ -187,21 +187,8 @@ static const char *sdebug_version_date = "20200421"; ...@@ -187,21 +187,8 @@ static const char *sdebug_version_date = "20200421";
SDEBUG_OPT_SHORT_TRANSFER | \ SDEBUG_OPT_SHORT_TRANSFER | \
SDEBUG_OPT_HOST_BUSY | \ SDEBUG_OPT_HOST_BUSY | \
SDEBUG_OPT_CMD_ABORT) SDEBUG_OPT_CMD_ABORT)
/* When "every_nth" > 0 then modulo "every_nth" commands: #define SDEBUG_OPT_RECOV_DIF_DIX (SDEBUG_OPT_RECOVERED_ERR | \
* - a missing response is simulated if SDEBUG_OPT_TIMEOUT is set SDEBUG_OPT_DIF_ERR | SDEBUG_OPT_DIX_ERR)
* - a RECOVERED_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_RECOVERED_ERR is set.
* - a TRANSPORT_ERROR is simulated on successful read and write
* commands if SDEBUG_OPT_TRANSPORT_ERR is set.
* - similarly for DIF_ERR, DIX_ERR, SHORT_TRANSFER, HOST_BUSY and
* CMD_ABORT
*
* When "every_nth" < 0 then after "- every_nth" commands the selected
* error will be injected. The error will be injected on every subsequent
* command until some other action occurs; for example, the user writing
* a new value (other than -1 or 1) to every_nth:
* echo 0 > /sys/bus/pseudo/drivers/scsi_debug/every_nth
*/
/* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in /* As indicated in SAM-5 and SPC-4 Unit Attentions (UAs) are returned in
* priority order. In the subset implemented here lower numbers have higher * priority order. In the subset implemented here lower numbers have higher
...@@ -358,13 +345,6 @@ struct sdebug_queued_cmd { ...@@ -358,13 +345,6 @@ struct sdebug_queued_cmd {
*/ */
struct sdebug_defer *sd_dp; struct sdebug_defer *sd_dp;
struct scsi_cmnd *a_cmnd; struct scsi_cmnd *a_cmnd;
unsigned int inj_recovered:1;
unsigned int inj_transport:1;
unsigned int inj_dif:1;
unsigned int inj_dix:1;
unsigned int inj_short:1;
unsigned int inj_host_busy:1;
unsigned int inj_cmd_abort:1;
}; };
struct sdebug_queue { struct sdebug_queue {
...@@ -378,6 +358,7 @@ static atomic_t sdebug_cmnd_count; /* number of incoming commands */ ...@@ -378,6 +358,7 @@ static atomic_t sdebug_cmnd_count; /* number of incoming commands */
static atomic_t sdebug_completions; /* count of deferred completions */ static atomic_t sdebug_completions; /* count of deferred completions */
static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */ static atomic_t sdebug_miss_cpus; /* submission + completion cpus differ */
static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */ static atomic_t sdebug_a_tsf; /* 'almost task set full' counter */
static atomic_t sdeb_inject_pending;
struct opcode_info_t { struct opcode_info_t {
u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */ u8 num_attached; /* 0 if this is it (i.e. a leaf); use 0xff */
...@@ -3111,7 +3092,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ...@@ -3111,7 +3092,6 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
struct sdeb_store_info *sip = devip2sip(devip, true); struct sdeb_store_info *sip = devip2sip(devip, true);
rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck; rwlock_t *macc_lckp = sip ? &sip->macc_lck : &sdeb_fake_rw_lck;
u8 *cmd = scp->cmnd; u8 *cmd = scp->cmnd;
struct sdebug_queued_cmd *sqcp;
switch (cmd[0]) { switch (cmd[0]) {
case READ_16: case READ_16:
...@@ -3164,15 +3144,11 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ...@@ -3164,15 +3144,11 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
sdev_printk(KERN_ERR, scp->device, "Unprotected RD " sdev_printk(KERN_ERR, scp->device, "Unprotected RD "
"to DIF device\n"); "to DIF device\n");
} }
if (unlikely(sdebug_any_injecting_opt)) { if (unlikely((sdebug_opts & SDEBUG_OPT_SHORT_TRANSFER) &&
sqcp = (struct sdebug_queued_cmd *)scp->host_scribble; atomic_read(&sdeb_inject_pending))) {
num /= 2;
if (sqcp) { atomic_set(&sdeb_inject_pending, 0);
if (sqcp->inj_short) }
num /= 2;
}
} else
sqcp = NULL;
ret = check_device_access_params(scp, lba, num, false); ret = check_device_access_params(scp, lba, num, false);
if (ret) if (ret)
...@@ -3213,21 +3189,20 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ...@@ -3213,21 +3189,20 @@ static int resp_read_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
scsi_set_resid(scp, scsi_bufflen(scp) - ret); scsi_set_resid(scp, scsi_bufflen(scp) - ret);
if (unlikely(sqcp)) { if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
if (sqcp->inj_recovered) { atomic_read(&sdeb_inject_pending))) {
mk_sense_buffer(scp, RECOVERED_ERROR, if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
THRESHOLD_EXCEEDED, 0); mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
return check_condition_result; atomic_set(&sdeb_inject_pending, 0);
} else if (sqcp->inj_transport) {
mk_sense_buffer(scp, ABORTED_COMMAND,
TRANSPORT_PROBLEM, ACK_NAK_TO);
return check_condition_result; return check_condition_result;
} else if (sqcp->inj_dif) { } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
/* Logical block guard check failed */ /* Logical block guard check failed */
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
atomic_set(&sdeb_inject_pending, 0);
return illegal_condition_result; return illegal_condition_result;
} else if (sqcp->inj_dix) { } else if (SDEBUG_OPT_DIX_ERR & sdebug_opts) {
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
atomic_set(&sdeb_inject_pending, 0);
return illegal_condition_result; return illegal_condition_result;
} }
} }
...@@ -3506,23 +3481,21 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip) ...@@ -3506,23 +3481,21 @@ static int resp_write_dt0(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
"%s: write: cdb indicated=%u, IO sent=%d bytes\n", "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
my_name, num * sdebug_sector_size, ret); my_name, num * sdebug_sector_size, ret);
if (unlikely(sdebug_any_injecting_opt)) { if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
struct sdebug_queued_cmd *sqcp = atomic_read(&sdeb_inject_pending))) {
(struct sdebug_queued_cmd *)scp->host_scribble; if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
if (sqcp) { atomic_set(&sdeb_inject_pending, 0);
if (sqcp->inj_recovered) { return check_condition_result;
mk_sense_buffer(scp, RECOVERED_ERROR, } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
THRESHOLD_EXCEEDED, 0); /* Logical block guard check failed */
return check_condition_result; mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
} else if (sqcp->inj_dif) { atomic_set(&sdeb_inject_pending, 0);
/* Logical block guard check failed */ return illegal_condition_result;
mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1); } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
return illegal_condition_result; mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
} else if (sqcp->inj_dix) { atomic_set(&sdeb_inject_pending, 0);
mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1); return illegal_condition_result;
return illegal_condition_result;
}
} }
} }
return 0; return 0;
...@@ -3664,28 +3637,24 @@ static int resp_write_scat(struct scsi_cmnd *scp, ...@@ -3664,28 +3637,24 @@ static int resp_write_scat(struct scsi_cmnd *scp,
"%s: write: cdb indicated=%u, IO sent=%d bytes\n", "%s: write: cdb indicated=%u, IO sent=%d bytes\n",
my_name, num_by, ret); my_name, num_by, ret);
if (unlikely(sdebug_any_injecting_opt)) { if (unlikely((sdebug_opts & SDEBUG_OPT_RECOV_DIF_DIX) &&
struct sdebug_queued_cmd *sqcp = atomic_read(&sdeb_inject_pending))) {
(struct sdebug_queued_cmd *)scp->host_scribble; if (sdebug_opts & SDEBUG_OPT_RECOVERED_ERR) {
mk_sense_buffer(scp, RECOVERED_ERROR, THRESHOLD_EXCEEDED, 0);
if (sqcp) { atomic_set(&sdeb_inject_pending, 0);
if (sqcp->inj_recovered) { ret = check_condition_result;
mk_sense_buffer(scp, RECOVERED_ERROR, goto err_out_unlock;
THRESHOLD_EXCEEDED, 0); } else if (sdebug_opts & SDEBUG_OPT_DIF_ERR) {
ret = illegal_condition_result; /* Logical block guard check failed */
goto err_out_unlock; mk_sense_buffer(scp, ABORTED_COMMAND, 0x10, 1);
} else if (sqcp->inj_dif) { atomic_set(&sdeb_inject_pending, 0);
/* Logical block guard check failed */ ret = illegal_condition_result;
mk_sense_buffer(scp, ABORTED_COMMAND, goto err_out_unlock;
0x10, 1); } else if (sdebug_opts & SDEBUG_OPT_DIX_ERR) {
ret = illegal_condition_result; mk_sense_buffer(scp, ILLEGAL_REQUEST, 0x10, 1);
goto err_out_unlock; atomic_set(&sdeb_inject_pending, 0);
} else if (sqcp->inj_dix) { ret = illegal_condition_result;
mk_sense_buffer(scp, ILLEGAL_REQUEST, goto err_out_unlock;
0x10, 1);
ret = illegal_condition_result;
goto err_out_unlock;
}
} }
} }
sg_off += num_by; sg_off += num_by;
...@@ -5348,24 +5317,11 @@ static void clear_queue_stats(void) ...@@ -5348,24 +5317,11 @@ static void clear_queue_stats(void)
atomic_set(&sdebug_a_tsf, 0); atomic_set(&sdebug_a_tsf, 0);
} }
static void setup_inject(struct sdebug_queue *sqp, static bool inject_on_this_cmd(void)
struct sdebug_queued_cmd *sqcp)
{ {
if ((atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) > 0) { if (sdebug_every_nth == 0)
if (sdebug_every_nth > 0) return false;
sqcp->inj_recovered = sqcp->inj_transport return (atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
= sqcp->inj_dif
= sqcp->inj_dix = sqcp->inj_short
= sqcp->inj_host_busy = sqcp->inj_cmd_abort = 0;
return;
}
sqcp->inj_recovered = !!(SDEBUG_OPT_RECOVERED_ERR & sdebug_opts);
sqcp->inj_transport = !!(SDEBUG_OPT_TRANSPORT_ERR & sdebug_opts);
sqcp->inj_dif = !!(SDEBUG_OPT_DIF_ERR & sdebug_opts);
sqcp->inj_dix = !!(SDEBUG_OPT_DIX_ERR & sdebug_opts);
sqcp->inj_short = !!(SDEBUG_OPT_SHORT_TRANSFER & sdebug_opts);
sqcp->inj_host_busy = !!(SDEBUG_OPT_HOST_BUSY & sdebug_opts);
sqcp->inj_cmd_abort = !!(SDEBUG_OPT_CMD_ABORT & sdebug_opts);
} }
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */ #define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
...@@ -5382,7 +5338,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5382,7 +5338,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
int delta_jiff, int ndelay) int delta_jiff, int ndelay)
{ {
bool new_sd_dp; bool new_sd_dp;
int k, num_in_q, qdepth, inject; bool inject = false;
int k, num_in_q, qdepth;
unsigned long iflags; unsigned long iflags;
u64 ns_from_boot = 0; u64 ns_from_boot = 0;
struct sdebug_queue *sqp; struct sdebug_queue *sqp;
...@@ -5408,7 +5365,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5408,7 +5365,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
} }
num_in_q = atomic_read(&devip->num_in_q); num_in_q = atomic_read(&devip->num_in_q);
qdepth = cmnd->device->queue_depth; qdepth = cmnd->device->queue_depth;
inject = 0;
if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) { if (unlikely((qdepth > 0) && (num_in_q >= qdepth))) {
if (scsi_result) { if (scsi_result) {
spin_unlock_irqrestore(&sqp->qc_lock, iflags); spin_unlock_irqrestore(&sqp->qc_lock, iflags);
...@@ -5422,7 +5378,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5422,7 +5378,7 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
(atomic_inc_return(&sdebug_a_tsf) >= (atomic_inc_return(&sdebug_a_tsf) >=
abs(sdebug_every_nth))) { abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0); atomic_set(&sdebug_a_tsf, 0);
inject = 1; inject = true;
scsi_result = device_qfull_result; scsi_result = device_qfull_result;
} }
} }
...@@ -5452,8 +5408,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5452,8 +5408,6 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
cmnd->host_scribble = (unsigned char *)sqcp; cmnd->host_scribble = (unsigned char *)sqcp;
sd_dp = sqcp->sd_dp; sd_dp = sqcp->sd_dp;
spin_unlock_irqrestore(&sqp->qc_lock, iflags); spin_unlock_irqrestore(&sqp->qc_lock, iflags);
if (unlikely(sdebug_every_nth && sdebug_any_injecting_opt))
setup_inject(sqp, sqcp);
if (!sd_dp) { if (!sd_dp) {
sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC); sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
if (!sd_dp) { if (!sd_dp) {
...@@ -5474,13 +5428,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5474,13 +5428,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
ns_from_boot = ktime_get_boottime_ns(); ns_from_boot = ktime_get_boottime_ns();
/* one of the resp_*() response functions is called here */ /* one of the resp_*() response functions is called here */
cmnd->result = pfp != NULL ? pfp(cmnd, devip) : 0; cmnd->result = pfp ? pfp(cmnd, devip) : 0;
if (cmnd->result & SDEG_RES_IMMED_MASK) { if (cmnd->result & SDEG_RES_IMMED_MASK) {
cmnd->result &= ~SDEG_RES_IMMED_MASK; cmnd->result &= ~SDEG_RES_IMMED_MASK;
delta_jiff = ndelay = 0; delta_jiff = ndelay = 0;
} }
if (cmnd->result == 0 && scsi_result != 0) if (cmnd->result == 0 && scsi_result != 0)
cmnd->result = scsi_result; cmnd->result = scsi_result;
if (cmnd->result == 0 && unlikely(sdebug_opts & SDEBUG_OPT_TRANSPORT_ERR)) {
if (atomic_read(&sdeb_inject_pending)) {
mk_sense_buffer(cmnd, ABORTED_COMMAND, TRANSPORT_PROBLEM, ACK_NAK_TO);
atomic_set(&sdeb_inject_pending, 0);
cmnd->result = check_condition_result;
}
}
if (unlikely(sdebug_verbose && cmnd->result)) if (unlikely(sdebug_verbose && cmnd->result))
sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n", sdev_printk(KERN_INFO, sdp, "%s: non-zero result=0x%x\n",
...@@ -5546,21 +5507,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip, ...@@ -5546,21 +5507,20 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (sdebug_statistics) if (sdebug_statistics)
sd_dp->issuing_cpu = raw_smp_processor_id(); sd_dp->issuing_cpu = raw_smp_processor_id();
sd_dp->defer_t = SDEB_DEFER_WQ; sd_dp->defer_t = SDEB_DEFER_WQ;
if (unlikely(sqcp->inj_cmd_abort)) if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending)))
sd_dp->aborted = true; sd_dp->aborted = true;
schedule_work(&sd_dp->ew.work); schedule_work(&sd_dp->ew.work);
if (unlikely(sqcp->inj_cmd_abort)) { if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", atomic_read(&sdeb_inject_pending))) {
cmnd->request->tag); sdev_printk(KERN_INFO, sdp, "abort request tag %d\n", cmnd->request->tag);
blk_abort_request(cmnd->request); blk_abort_request(cmnd->request);
atomic_set(&sdeb_inject_pending, 0);
} }
} }
if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && if (unlikely((SDEBUG_OPT_Q_NOISE & sdebug_opts) && scsi_result == device_qfull_result))
(scsi_result == device_qfull_result))) sdev_printk(KERN_INFO, sdp, "%s: num_in_q=%d +1, %s%s\n", __func__,
sdev_printk(KERN_INFO, sdp, num_in_q, (inject ? "<inject> " : ""), "status: TASK SET FULL");
"%s: num_in_q=%d +1, %s%s\n", __func__,
num_in_q, (inject ? "<inject> " : ""),
"status: TASK SET FULL");
return 0; return 0;
respond_in_thread: /* call back to mid-layer using invocation thread */ respond_in_thread: /* call back to mid-layer using invocation thread */
...@@ -6097,17 +6057,27 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf, ...@@ -6097,17 +6057,27 @@ static ssize_t every_nth_store(struct device_driver *ddp, const char *buf,
size_t count) size_t count)
{ {
int nth; int nth;
char work[20];
if ((count > 0) && (1 == sscanf(buf, "%d", &nth))) { if (sscanf(buf, "%10s", work) == 1) {
sdebug_every_nth = nth; if (strncasecmp(work, "0x", 2) == 0) {
if (nth && !sdebug_statistics) { if (kstrtoint(work + 2, 16, &nth) == 0)
pr_info("every_nth needs statistics=1, set it\n"); goto every_nth_done;
sdebug_statistics = true; } else {
if (kstrtoint(work, 10, &nth) == 0)
goto every_nth_done;
} }
tweak_cmnd_count();
return count;
} }
return -EINVAL; return -EINVAL;
every_nth_done:
sdebug_every_nth = nth;
if (nth && !sdebug_statistics) {
pr_info("every_nth needs statistics=1, set it\n");
sdebug_statistics = true;
}
tweak_cmnd_count();
return count;
} }
static DRIVER_ATTR_RW(every_nth); static DRIVER_ATTR_RW(every_nth);
...@@ -7102,12 +7072,6 @@ static bool fake_timeout(struct scsi_cmnd *scp) ...@@ -7102,12 +7072,6 @@ static bool fake_timeout(struct scsi_cmnd *scp)
return false; return false;
} }
static bool fake_host_busy(struct scsi_cmnd *scp)
{
return (sdebug_opts & SDEBUG_OPT_HOST_BUSY) &&
(atomic_read(&sdebug_cmnd_count) % abs(sdebug_every_nth)) == 0;
}
static int scsi_debug_queuecommand(struct Scsi_Host *shost, static int scsi_debug_queuecommand(struct Scsi_Host *shost,
struct scsi_cmnd *scp) struct scsi_cmnd *scp)
{ {
...@@ -7116,7 +7080,6 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, ...@@ -7116,7 +7080,6 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
const struct opcode_info_t *oip; const struct opcode_info_t *oip;
const struct opcode_info_t *r_oip; const struct opcode_info_t *r_oip;
struct sdebug_dev_info *devip; struct sdebug_dev_info *devip;
u8 *cmd = scp->cmnd; u8 *cmd = scp->cmnd;
int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *); int (*r_pfp)(struct scsi_cmnd *, struct sdebug_dev_info *);
int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL; int (*pfp)(struct scsi_cmnd *, struct sdebug_dev_info *) = NULL;
...@@ -7126,10 +7089,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, ...@@ -7126,10 +7089,15 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
u16 sa; u16 sa;
u8 opcode = cmd[0]; u8 opcode = cmd[0];
bool has_wlun_rl; bool has_wlun_rl;
bool inject_now;
scsi_set_resid(scp, 0); scsi_set_resid(scp, 0);
if (sdebug_statistics) if (sdebug_statistics) {
atomic_inc(&sdebug_cmnd_count); atomic_inc(&sdebug_cmnd_count);
inject_now = inject_on_this_cmd();
} else {
inject_now = false;
}
if (unlikely(sdebug_verbose && if (unlikely(sdebug_verbose &&
!(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) { !(SDEBUG_OPT_NO_CDB_NOISE & sdebug_opts))) {
char b[120]; char b[120];
...@@ -7147,7 +7115,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, ...@@ -7147,7 +7115,7 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name, sdev_printk(KERN_INFO, sdp, "%s: tag=%#x, cmd %s\n", my_name,
blk_mq_unique_tag(scp->request), b); blk_mq_unique_tag(scp->request), b);
} }
if (fake_host_busy(scp)) if (unlikely(inject_now && (sdebug_opts & SDEBUG_OPT_HOST_BUSY)))
return SCSI_MLQUEUE_HOST_BUSY; return SCSI_MLQUEUE_HOST_BUSY;
has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS); has_wlun_rl = (sdp->lun == SCSI_W_LUN_REPORT_LUNS);
if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl)) if (unlikely((sdp->lun >= sdebug_max_luns) && !has_wlun_rl))
...@@ -7161,6 +7129,9 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost, ...@@ -7161,6 +7129,9 @@ static int scsi_debug_queuecommand(struct Scsi_Host *shost,
if (NULL == devip) if (NULL == devip)
goto err_out; goto err_out;
} }
if (unlikely(inject_now && !atomic_read(&sdeb_inject_pending)))
atomic_set(&sdeb_inject_pending, 1);
na = oip->num_attached; na = oip->num_attached;
r_pfp = oip->pfp; r_pfp = oip->pfp;
if (na) { /* multiple commands with this opcode */ if (na) { /* multiple commands with this opcode */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment