Commit 9a6d5488 authored by Jens Axboe's avatar Jens Axboe

ide: ensure atapi sense request aren't preempted

There's an issue with how sense requests are handled in IDE. If ide-cd
encounters an error, it queues a sense request. With how IDE request
handling is done, this is the next request we need to handle. But it's
impossible to guarantee this, as another request could come in between
the sense being queued, and ->queue_rq() being run and handling it. If
that request ALSO fails, then we attempt to doubly queue the single
sense request we have.

Since we only support one active request at the time, defer request
processing when a sense request is queued.

Fixes: 60033520 "ide: convert to blk-mq"
Reported-by: default avatarHe Zhe <zhe.he@windriver.com>
Tested-by: default avatarHe Zhe <zhe.he@windriver.com>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent 85bd6e61
...@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense); ...@@ -235,21 +235,28 @@ EXPORT_SYMBOL_GPL(ide_prep_sense);
int ide_queue_sense_rq(ide_drive_t *drive, void *special) int ide_queue_sense_rq(ide_drive_t *drive, void *special)
{ {
struct request *sense_rq = drive->sense_rq; ide_hwif_t *hwif = drive->hwif;
struct request *sense_rq;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
/* deferred failure from ide_prep_sense() */ /* deferred failure from ide_prep_sense() */
if (!drive->sense_rq_armed) { if (!drive->sense_rq_armed) {
printk(KERN_WARNING PFX "%s: error queuing a sense request\n", printk(KERN_WARNING PFX "%s: error queuing a sense request\n",
drive->name); drive->name);
spin_unlock_irqrestore(&hwif->lock, flags);
return -ENOMEM; return -ENOMEM;
} }
sense_rq = drive->sense_rq;
ide_req(sense_rq)->special = special; ide_req(sense_rq)->special = special;
drive->sense_rq_armed = false; drive->sense_rq_armed = false;
drive->hwif->rq = NULL; drive->hwif->rq = NULL;
ide_insert_request_head(drive, sense_rq); ide_insert_request_head(drive, sense_rq);
spin_unlock_irqrestore(&hwif->lock, flags);
return 0; return 0;
} }
EXPORT_SYMBOL_GPL(ide_queue_sense_rq); EXPORT_SYMBOL_GPL(ide_queue_sense_rq);
......
...@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error, ...@@ -68,8 +68,10 @@ int ide_end_rq(ide_drive_t *drive, struct request *rq, blk_status_t error,
} }
if (!blk_update_request(rq, error, nr_bytes)) { if (!blk_update_request(rq, error, nr_bytes)) {
if (rq == drive->sense_rq) if (rq == drive->sense_rq) {
drive->sense_rq = NULL; drive->sense_rq = NULL;
drive->sense_rq_active = false;
}
__blk_mq_end_request(rq, error); __blk_mq_end_request(rq, error);
return 0; return 0;
...@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq) ...@@ -451,16 +453,11 @@ void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq)
blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3); blk_mq_delay_run_hw_queue(q->queue_hw_ctx[0], 3);
} }
/* blk_status_t ide_issue_rq(ide_drive_t *drive, struct request *rq,
* Issue a new request to a device. bool local_requeue)
*/
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{ {
ide_drive_t *drive = hctx->queue->queuedata; ide_hwif_t *hwif = drive->hwif;
ide_hwif_t *hwif = drive->hwif;
struct ide_host *host = hwif->host; struct ide_host *host = hwif->host;
struct request *rq = bd->rq;
ide_startstop_t startstop; ide_startstop_t startstop;
if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) { if (!blk_rq_is_passthrough(rq) && !(rq->rq_flags & RQF_DONTPREP)) {
...@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -474,8 +471,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
if (ide_lock_host(host, hwif)) if (ide_lock_host(host, hwif))
return BLK_STS_DEV_RESOURCE; return BLK_STS_DEV_RESOURCE;
blk_mq_start_request(rq);
spin_lock_irq(&hwif->lock); spin_lock_irq(&hwif->lock);
if (!ide_lock_port(hwif)) { if (!ide_lock_port(hwif)) {
...@@ -510,18 +505,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -510,18 +505,6 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
hwif->cur_dev = drive; hwif->cur_dev = drive;
drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED); drive->dev_flags &= ~(IDE_DFLAG_SLEEPING | IDE_DFLAG_PARKED);
/*
* we know that the queue isn't empty, but this can happen
* if ->prep_rq() decides to kill a request
*/
if (!rq) {
rq = bd->rq;
if (!rq) {
ide_unlock_port(hwif);
goto out;
}
}
/* /*
* Sanity: don't accept a request that isn't a PM request * Sanity: don't accept a request that isn't a PM request
* if we are currently power managed. This is very important as * if we are currently power managed. This is very important as
...@@ -560,9 +543,12 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -560,9 +543,12 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
} }
} else { } else {
plug_device: plug_device:
if (local_requeue)
list_add(&rq->queuelist, &drive->rq_list);
spin_unlock_irq(&hwif->lock); spin_unlock_irq(&hwif->lock);
ide_unlock_host(host); ide_unlock_host(host);
ide_requeue_and_plug(drive, rq); if (!local_requeue)
ide_requeue_and_plug(drive, rq);
return BLK_STS_OK; return BLK_STS_OK;
} }
...@@ -573,6 +559,26 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx, ...@@ -573,6 +559,26 @@ blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
return BLK_STS_OK; return BLK_STS_OK;
} }
/*
* Issue a new request to a device.
*/
blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *hctx,
const struct blk_mq_queue_data *bd)
{
ide_drive_t *drive = hctx->queue->queuedata;
ide_hwif_t *hwif = drive->hwif;
spin_lock_irq(&hwif->lock);
if (drive->sense_rq_active) {
spin_unlock_irq(&hwif->lock);
return BLK_STS_DEV_RESOURCE;
}
spin_unlock_irq(&hwif->lock);
blk_mq_start_request(bd->rq);
return ide_issue_rq(drive, bd->rq, false);
}
static int drive_is_ready(ide_drive_t *drive) static int drive_is_ready(ide_drive_t *drive)
{ {
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
...@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer); ...@@ -893,13 +899,8 @@ EXPORT_SYMBOL_GPL(ide_pad_transfer);
void ide_insert_request_head(ide_drive_t *drive, struct request *rq) void ide_insert_request_head(ide_drive_t *drive, struct request *rq)
{ {
ide_hwif_t *hwif = drive->hwif; drive->sense_rq_active = true;
unsigned long flags;
spin_lock_irqsave(&hwif->lock, flags);
list_add_tail(&rq->queuelist, &drive->rq_list); list_add_tail(&rq->queuelist, &drive->rq_list);
spin_unlock_irqrestore(&hwif->lock, flags);
kblockd_schedule_work(&drive->rq_work); kblockd_schedule_work(&drive->rq_work);
} }
EXPORT_SYMBOL_GPL(ide_insert_request_head); EXPORT_SYMBOL_GPL(ide_insert_request_head);
...@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout) ...@@ -54,7 +54,9 @@ static void issue_park_cmd(ide_drive_t *drive, unsigned long timeout)
scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS; scsi_req(rq)->cmd[0] = REQ_UNPARK_HEADS;
scsi_req(rq)->cmd_len = 1; scsi_req(rq)->cmd_len = 1;
ide_req(rq)->type = ATA_PRIV_MISC; ide_req(rq)->type = ATA_PRIV_MISC;
spin_lock_irq(&hwif->lock);
ide_insert_request_head(drive, rq); ide_insert_request_head(drive, rq);
spin_unlock_irq(&hwif->lock);
out: out:
return; return;
......
...@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work) ...@@ -1159,18 +1159,27 @@ static void drive_rq_insert_work(struct work_struct *work)
ide_drive_t *drive = container_of(work, ide_drive_t, rq_work); ide_drive_t *drive = container_of(work, ide_drive_t, rq_work);
ide_hwif_t *hwif = drive->hwif; ide_hwif_t *hwif = drive->hwif;
struct request *rq; struct request *rq;
blk_status_t ret;
LIST_HEAD(list); LIST_HEAD(list);
spin_lock_irq(&hwif->lock); blk_mq_quiesce_queue(drive->queue);
if (!list_empty(&drive->rq_list))
list_splice_init(&drive->rq_list, &list);
spin_unlock_irq(&hwif->lock);
while (!list_empty(&list)) { ret = BLK_STS_OK;
rq = list_first_entry(&list, struct request, queuelist); spin_lock_irq(&hwif->lock);
while (!list_empty(&drive->rq_list)) {
rq = list_first_entry(&drive->rq_list, struct request, queuelist);
list_del_init(&rq->queuelist); list_del_init(&rq->queuelist);
blk_execute_rq_nowait(drive->queue, rq->rq_disk, rq, true, NULL);
spin_unlock_irq(&hwif->lock);
ret = ide_issue_rq(drive, rq, true);
spin_lock_irq(&hwif->lock);
} }
spin_unlock_irq(&hwif->lock);
blk_mq_unquiesce_queue(drive->queue);
if (ret != BLK_STS_OK)
kblockd_schedule_work(&drive->rq_work);
} }
static const u8 ide_hwif_to_major[] = static const u8 ide_hwif_to_major[] =
......
...@@ -615,6 +615,7 @@ struct ide_drive_s { ...@@ -615,6 +615,7 @@ struct ide_drive_s {
/* current sense rq and buffer */ /* current sense rq and buffer */
bool sense_rq_armed; bool sense_rq_armed;
bool sense_rq_active;
struct request *sense_rq; struct request *sense_rq;
struct request_sense sense_data; struct request_sense sense_data;
...@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout); ...@@ -1219,6 +1220,7 @@ extern void ide_stall_queue(ide_drive_t *drive, unsigned long timeout);
extern void ide_timer_expiry(struct timer_list *t); extern void ide_timer_expiry(struct timer_list *t);
extern irqreturn_t ide_intr(int irq, void *dev_id); extern irqreturn_t ide_intr(int irq, void *dev_id);
extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *); extern blk_status_t ide_queue_rq(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data *);
extern blk_status_t ide_issue_rq(ide_drive_t *, struct request *, bool);
extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq); extern void ide_requeue_and_plug(ide_drive_t *drive, struct request *rq);
void ide_init_disk(struct gendisk *, ide_drive_t *); void ide_init_disk(struct gendisk *, ide_drive_t *);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment