Commit a8946afe authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-block

Pull block layer fixes from Jens Axboe:
 "Distilled down version of bug fixes for 3.7.  The patches have been
  well tested.  If you notice that commit dates are from today, it's
  because I pulled less important bits out and shuffled them into the
  3.8 mix.  Apart from that, no changes, base still the same.

  It contains:

   - Fix for aoe, don't run request_fn while it's plugged.

   - Fix for a regression in floppy since 3.6, which causes problems if
     no floppy is found.

   - Stable fix for blk_exec(), don't touch a request after it has been
     sent to the scheduler (and the device as well).

   - Five fixes for various nasties in mtip32xx."

* 'for-linus' of git://git.kernel.dk/linux-block:
  block: Don't access request after it might be freed
  mtip32xx: Fix padding issue
  aoe: avoid running request handler on plugged queue
  mtip32xx: fix potential NULL pointer dereference in mtip_timeout_function()
  mtip32xx: fix shift larger than type warning
  mtip32xx: Fix incorrect mask used for erase mode
  mtip32xx: Fix to make lba address correct in big-endian systems
  mtip32xx: fix potential crash on SEC_ERASE_UNIT
  dm: fix deadlock with request based dm and queue request_fn recursion
  floppy: destroy floppy workqueue before cleaning up the queue
parents f789dcc7 893d290f
......@@ -52,11 +52,17 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
rq_end_io_fn *done)
{
int where = at_head ? ELEVATOR_INSERT_FRONT : ELEVATOR_INSERT_BACK;
bool is_pm_resume;
WARN_ON(irqs_disabled());
rq->rq_disk = bd_disk;
rq->end_io = done;
/*
* need to check this before __blk_run_queue(), because rq can
* be freed before that returns.
*/
is_pm_resume = rq->cmd_type == REQ_TYPE_PM_RESUME;
spin_lock_irq(q->queue_lock);
......@@ -71,7 +77,7 @@ void blk_execute_rq_nowait(struct request_queue *q, struct gendisk *bd_disk,
__elv_add_request(q, rq, where);
__blk_run_queue(q);
/* the queue is stopped so it won't be run */
if (rq->cmd_type == REQ_TYPE_PM_RESUME)
if (is_pm_resume)
q->request_fn(q);
spin_unlock_irq(q->queue_lock);
}
......
......@@ -935,7 +935,7 @@ aoe_end_request(struct aoedev *d, struct request *rq, int fastfail)
/* cf. http://lkml.org/lkml/2006/10/31/28 */
if (!fastfail)
q->request_fn(q);
__blk_run_queue(q);
}
static void
......
......@@ -4330,6 +4330,7 @@ static int __init do_floppy_init(void)
out_unreg_blkdev:
unregister_blkdev(FLOPPY_MAJOR, "fd");
out_put_disk:
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
if (!disks[drive])
break;
......@@ -4340,7 +4341,6 @@ static int __init do_floppy_init(void)
}
put_disk(disks[drive]);
}
destroy_workqueue(floppy_wq);
return err;
}
......@@ -4555,6 +4555,8 @@ static void __exit floppy_module_exit(void)
unregister_blkdev(FLOPPY_MAJOR, "fd");
platform_driver_unregister(&floppy_driver);
destroy_workqueue(floppy_wq);
for (drive = 0; drive < N_DRIVE; drive++) {
del_timer_sync(&motor_off_timer[drive]);
......@@ -4578,7 +4580,6 @@ static void __exit floppy_module_exit(void)
cancel_delayed_work_sync(&fd_timeout);
cancel_delayed_work_sync(&fd_timer);
destroy_workqueue(floppy_wq);
if (atomic_read(&usage_count))
floppy_release_irq_and_dma();
......
......@@ -559,7 +559,7 @@ static void mtip_timeout_function(unsigned long int data)
struct mtip_cmd *command;
int tag, cmdto_cnt = 0;
unsigned int bit, group;
unsigned int num_command_slots = port->dd->slot_groups * 32;
unsigned int num_command_slots;
unsigned long to, tagaccum[SLOTBITS_IN_LONGS];
if (unlikely(!port))
......@@ -572,6 +572,7 @@ static void mtip_timeout_function(unsigned long int data)
}
/* clear the tag accumulator */
memset(tagaccum, 0, SLOTBITS_IN_LONGS * sizeof(long));
num_command_slots = port->dd->slot_groups * 32;
for (tag = 0; tag < num_command_slots; tag++) {
/*
......@@ -2218,8 +2219,8 @@ static int exec_drive_taskfile(struct driver_data *dd,
fis.device);
/* check for erase mode support during secure erase.*/
if ((fis.command == ATA_CMD_SEC_ERASE_UNIT)
&& (outbuf[0] & MTIP_SEC_ERASE_MODE)) {
if ((fis.command == ATA_CMD_SEC_ERASE_UNIT) && outbuf &&
(outbuf[0] & MTIP_SEC_ERASE_MODE)) {
erasemode = 1;
}
......@@ -2439,7 +2440,7 @@ static int mtip_hw_ioctl(struct driver_data *dd, unsigned int cmd,
* return value
* None
*/
static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
static void mtip_hw_submit_io(struct driver_data *dd, sector_t sector,
int nsect, int nents, int tag, void *callback,
void *data, int dir)
{
......@@ -2447,6 +2448,7 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
struct mtip_port *port = dd->port;
struct mtip_cmd *command = &port->commands[tag];
int dma_dir = (dir == READ) ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
u64 start = sector;
/* Map the scatter list for DMA access */
nents = dma_map_sg(&dd->pdev->dev, command->sg, nents, dma_dir);
......@@ -2465,8 +2467,12 @@ static void mtip_hw_submit_io(struct driver_data *dd, sector_t start,
fis->opts = 1 << 7;
fis->command =
(dir == READ ? ATA_CMD_FPDMA_READ : ATA_CMD_FPDMA_WRITE);
*((unsigned int *) &fis->lba_low) = (start & 0xFFFFFF);
*((unsigned int *) &fis->lba_low_ex) = ((start >> 24) & 0xFFFFFF);
fis->lba_low = start & 0xFF;
fis->lba_mid = (start >> 8) & 0xFF;
fis->lba_hi = (start >> 16) & 0xFF;
fis->lba_low_ex = (start >> 24) & 0xFF;
fis->lba_mid_ex = (start >> 32) & 0xFF;
fis->lba_hi_ex = (start >> 40) & 0xFF;
fis->device = 1 << 6;
fis->features = nsect & 0xFF;
fis->features_ex = (nsect >> 8) & 0xFF;
......
......@@ -34,7 +34,7 @@
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* check for erase mode support during secure erase */
#define MTIP_SEC_ERASE_MODE 0x3
#define MTIP_SEC_ERASE_MODE 0x2
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
......@@ -155,14 +155,14 @@ enum {
MTIP_DDF_REBUILD_FAILED_BIT = 8,
};
__packed struct smart_attr{
struct smart_attr {
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
};
} __packed;
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
......
......@@ -740,8 +740,14 @@ static void rq_completed(struct mapped_device *md, int rw, int run_queue)
if (!md_in_flight(md))
wake_up(&md->wait);
/*
* Run this off this callpath, as drivers could invoke end_io while
* inside their request_fn (and holding the queue lock). Calling
* back into ->request_fn() could deadlock attempting to grab the
* queue lock again.
*/
if (run_queue)
blk_run_queue(md->queue);
blk_run_queue_async(md->queue);
/*
* dm_put() must be at the end of this function. See the comment above
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment