Commit c104f1fa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-3.4/drivers' of git://git.kernel.dk/linux-block

Pull block driver bits from Jens Axboe:

 - A series of fixes for mtip32xx.  Most from Asai at Micron, but also
   one from Greg, getting rid of the dependency on PCIE_HOTPLUG.

 - A few bug fixes for xen-blkfront, and blkback.

 - A virtio-blk fix for Vivek, making resize actually work.

 - Two fixes from Stephen, making larger transfers possible on cciss.
   This is needed for tape drive support.

* 'for-3.4/drivers' of git://git.kernel.dk/linux-block:
  block: mtip32xx: remove HOTPLUG_PCI_PCIE dependancy
  mtip32xx: dump tagmap on failure
  mtip32xx: fix handling of commands in various scenarios
  mtip32xx: Shorten macro names
  mtip32xx: misc changes
  mtip32xx: Add new sysfs entry 'status'
  mtip32xx: make setting comp_time as common
  mtip32xx: Add new bitwise flag 'dd_flag'
  mtip32xx: fix error handling in mtip_init()
  virtio-blk: Call revalidate_disk() upon online disk resize
  xen/blkback: Make optional features be really optional.
  xen/blkback: Squash the discard support for 'file' and 'phy' type.
  mtip32xx: fix incorrect value set for drv_cleanup_done, and re-initialize and start port in mtip_restart_port()
  cciss: Fix scsi tape io with more than 255 scatter gather elements
  cciss: Initialize scsi host max_sectors for tape drive support
  xen-blkfront: make blkif_io_lock spinlock per-device
  xen/blkfront: don't put bdev right after getting it
  xen-blkfront: use bitmap_set() and bitmap_clear()
  xen/blkback: Enable blkback on HVM guests
  xen/blkback: use grant-table.c hypercall wrappers
parents d8dd0b6d 63634806
What: /sys/block/rssd*/registers
Date: March 2012
KernelVersion: 3.3
Contact: Asai Thambi S P <asamymuthupa@micron.com>
Description: This is a read-only file. Dumps below driver information and
hardware registers.
- S ACTive
- Command Issue
- Allocated
- Completed
- PORT IRQ STAT
- HOST IRQ STAT
What: /sys/block/rssd*/status
Date: April 2012
KernelVersion: 3.4
Contact: Asai Thambi S P <asamymuthupa@micron.com>
Description: This is a read-only file. Indicates the status of the device.
......@@ -866,6 +866,7 @@ cciss_scsi_detect(ctlr_info_t *h)
sh->can_queue = cciss_tape_cmds;
sh->sg_tablesize = h->maxsgentries;
sh->max_cmd_len = MAX_COMMAND_SIZE;
sh->max_sectors = h->cciss_max_sectors;
((struct cciss_scsi_adapter_data_t *)
h->scsi_ctlr)->scsi_host = sh;
......@@ -1410,7 +1411,7 @@ static void cciss_scatter_gather(ctlr_info_t *h, CommandList_struct *c,
/* track how many SG entries we are using */
if (request_nsgs > h->maxSG)
h->maxSG = request_nsgs;
c->Header.SGTotal = (__u8) request_nsgs + chained;
c->Header.SGTotal = (u16) request_nsgs + chained;
if (request_nsgs > h->max_cmd_sgentries)
c->Header.SGList = h->max_cmd_sgentries;
else
......
......@@ -4,6 +4,6 @@
config BLK_DEV_PCIESSD_MTIP32XX
tristate "Block Device Driver for Micron PCIe SSDs"
depends on HOTPLUG_PCI_PCIE
depends on PCI
help
This enables the block driver for Micron PCIe SSDs.
This diff is collapsed.
......@@ -34,8 +34,8 @@
/* offset of Device Control register in PCIe extended capabilites space */
#define PCIE_CONFIG_EXT_DEVICE_CONTROL_OFFSET 0x48
/* # of times to retry timed out IOs */
#define MTIP_MAX_RETRIES 5
/* # of times to retry timed out/failed IOs */
#define MTIP_MAX_RETRIES 2
/* Various timeout values in ms */
#define MTIP_NCQ_COMMAND_TIMEOUT_MS 5000
......@@ -114,12 +114,41 @@
#define __force_bit2int (unsigned int __force)
/* below are bit numbers in 'flags' defined in mtip_port */
#define MTIP_FLAG_IC_ACTIVE_BIT 0
#define MTIP_FLAG_EH_ACTIVE_BIT 1
#define MTIP_FLAG_SVC_THD_ACTIVE_BIT 2
#define MTIP_FLAG_ISSUE_CMDS_BIT 4
#define MTIP_FLAG_REBUILD_BIT 5
#define MTIP_FLAG_SVC_THD_SHOULD_STOP_BIT 8
#define MTIP_PF_IC_ACTIVE_BIT 0 /* pio/ioctl */
#define MTIP_PF_EH_ACTIVE_BIT 1 /* error handling */
#define MTIP_PF_SE_ACTIVE_BIT 2 /* secure erase */
#define MTIP_PF_DM_ACTIVE_BIT 3 /* download microcde */
#define MTIP_PF_PAUSE_IO ((1 << MTIP_PF_IC_ACTIVE_BIT) | \
(1 << MTIP_PF_EH_ACTIVE_BIT) | \
(1 << MTIP_PF_SE_ACTIVE_BIT) | \
(1 << MTIP_PF_DM_ACTIVE_BIT))
#define MTIP_PF_SVC_THD_ACTIVE_BIT 4
#define MTIP_PF_ISSUE_CMDS_BIT 5
#define MTIP_PF_REBUILD_BIT 6
#define MTIP_PF_SVC_THD_STOP_BIT 8
/* below are bit numbers in 'dd_flag' defined in driver_data */
#define MTIP_DDF_REMOVE_PENDING_BIT 1
#define MTIP_DDF_OVER_TEMP_BIT 2
#define MTIP_DDF_WRITE_PROTECT_BIT 3
#define MTIP_DDF_STOP_IO ((1 << MTIP_DDF_REMOVE_PENDING_BIT) | \
(1 << MTIP_DDF_OVER_TEMP_BIT) | \
(1 << MTIP_DDF_WRITE_PROTECT_BIT))
#define MTIP_DDF_CLEANUP_BIT 5
#define MTIP_DDF_RESUME_BIT 6
#define MTIP_DDF_INIT_DONE_BIT 7
#define MTIP_DDF_REBUILD_FAILED_BIT 8
__packed struct smart_attr{
u8 attr_id;
u16 flags;
u8 cur;
u8 worst;
u32 data;
u8 res[3];
};
/* Register Frame Information Structure (FIS), host to device. */
struct host_to_dev_fis {
......@@ -345,6 +374,12 @@ struct mtip_port {
* when the command slot and all associated data structures
* are no longer needed.
*/
u16 *log_buf;
dma_addr_t log_buf_dma;
u8 *smart_buf;
dma_addr_t smart_buf_dma;
unsigned long allocated[SLOTBITS_IN_LONGS];
/*
* used to queue commands when an internal command is in progress
......@@ -368,6 +403,7 @@ struct mtip_port {
* Timer used to complete commands that have been active for too long.
*/
struct timer_list cmd_timer;
unsigned long ic_pause_timer;
/*
* Semaphore used to block threads if there are no
* command slots available.
......@@ -404,13 +440,9 @@ struct driver_data {
unsigned slot_groups; /* number of slot groups the product supports */
atomic_t drv_cleanup_done; /* Atomic variable for SRSI */
unsigned long index; /* Index to determine the disk name */
unsigned int ftlrebuildflag; /* FTL rebuild flag */
atomic_t resumeflag; /* Atomic variable to track suspend/resume */
unsigned long dd_flag; /* NOTE: use atomic bit operations on this */
struct task_struct *mtip_svc_handler; /* task_struct of svc thd */
};
......
......@@ -351,6 +351,7 @@ static void virtblk_config_changed_work(struct work_struct *work)
cap_str_10, cap_str_2);
set_capacity(vblk->disk, capacity);
revalidate_disk(vblk->disk);
done:
mutex_unlock(&vblk->config_lock);
}
......
......@@ -321,6 +321,7 @@ struct seg_buf {
static void xen_blkbk_unmap(struct pending_req *req)
{
struct gnttab_unmap_grant_ref unmap[BLKIF_MAX_SEGMENTS_PER_REQUEST];
struct page *pages[BLKIF_MAX_SEGMENTS_PER_REQUEST];
unsigned int i, invcount = 0;
grant_handle_t handle;
int ret;
......@@ -332,25 +333,12 @@ static void xen_blkbk_unmap(struct pending_req *req)
gnttab_set_unmap_op(&unmap[invcount], vaddr(req, i),
GNTMAP_host_map, handle);
pending_handle(req, i) = BLKBACK_INVALID_HANDLE;
pages[invcount] = virt_to_page(vaddr(req, i));
invcount++;
}
ret = HYPERVISOR_grant_table_op(
GNTTABOP_unmap_grant_ref, unmap, invcount);
ret = gnttab_unmap_refs(unmap, pages, invcount, false);
BUG_ON(ret);
/*
* Note, we use invcount, so nr->pages, so we can't index
* using vaddr(req, i).
*/
for (i = 0; i < invcount; i++) {
ret = m2p_remove_override(
virt_to_page(unmap[i].host_addr), false);
if (ret) {
pr_alert(DRV_PFX "Failed to remove M2P override for %lx\n",
(unsigned long)unmap[i].host_addr);
continue;
}
}
}
static int xen_blkbk_map(struct blkif_request *req,
......@@ -378,7 +366,7 @@ static int xen_blkbk_map(struct blkif_request *req,
pending_req->blkif->domid);
}
ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref, map, nseg);
ret = gnttab_map_refs(map, NULL, &blkbk->pending_page(pending_req, 0), nseg);
BUG_ON(ret);
/*
......@@ -398,15 +386,6 @@ static int xen_blkbk_map(struct blkif_request *req,
if (ret)
continue;
ret = m2p_add_override(PFN_DOWN(map[i].dev_bus_addr),
blkbk->pending_page(pending_req, i), NULL);
if (ret) {
pr_alert(DRV_PFX "Failed to install M2P override for %lx (ret: %d)\n",
(unsigned long)map[i].dev_bus_addr, ret);
/* We could switch over to GNTTABOP_copy */
continue;
}
seg[i].buf = map[i].dev_bus_addr |
(req->u.rw.seg[i].first_sect << 9);
}
......@@ -419,21 +398,18 @@ static int dispatch_discard_io(struct xen_blkif *blkif,
int err = 0;
int status = BLKIF_RSP_OKAY;
struct block_device *bdev = blkif->vbd.bdev;
unsigned long secure;
blkif->st_ds_req++;
xen_blkif_get(blkif);
if (blkif->blk_backend_type == BLKIF_BACKEND_PHY ||
blkif->blk_backend_type == BLKIF_BACKEND_FILE) {
unsigned long secure = (blkif->vbd.discard_secure &&
secure = (blkif->vbd.discard_secure &&
(req->u.discard.flag & BLKIF_DISCARD_SECURE)) ?
BLKDEV_DISCARD_SECURE : 0;
err = blkdev_issue_discard(bdev,
req->u.discard.sector_number,
err = blkdev_issue_discard(bdev, req->u.discard.sector_number,
req->u.discard.nr_sectors,
GFP_KERNEL, secure);
} else
err = -EOPNOTSUPP;
if (err == -EOPNOTSUPP) {
pr_debug(DRV_PFX "discard op failed, not supported\n");
......@@ -830,7 +806,7 @@ static int __init xen_blkif_init(void)
int i, mmap_pages;
int rc = 0;
if (!xen_pv_domain())
if (!xen_domain())
return -ENODEV;
blkbk = kzalloc(sizeof(struct xen_blkbk), GFP_KERNEL);
......
......@@ -146,11 +146,6 @@ enum blkif_protocol {
BLKIF_PROTOCOL_X86_64 = 3,
};
enum blkif_backend_type {
BLKIF_BACKEND_PHY = 1,
BLKIF_BACKEND_FILE = 2,
};
struct xen_vbd {
/* What the domain refers to this vbd as. */
blkif_vdev_t handle;
......@@ -177,7 +172,6 @@ struct xen_blkif {
unsigned int irq;
/* Comms information. */
enum blkif_protocol blk_protocol;
enum blkif_backend_type blk_backend_type;
union blkif_back_rings blk_rings;
void *blk_ring;
/* The VBD attached to this interface. */
......
......@@ -381,72 +381,49 @@ int xen_blkbk_flush_diskcache(struct xenbus_transaction xbt,
err = xenbus_printf(xbt, dev->nodename, "feature-flush-cache",
"%d", state);
if (err)
xenbus_dev_fatal(dev, err, "writing feature-flush-cache");
dev_warn(&dev->dev, "writing feature-flush-cache (%d)", err);
return err;
}
int xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
static void xen_blkbk_discard(struct xenbus_transaction xbt, struct backend_info *be)
{
struct xenbus_device *dev = be->dev;
struct xen_blkif *blkif = be->blkif;
char *type;
int err;
int state = 0;
type = xenbus_read(XBT_NIL, dev->nodename, "type", NULL);
if (!IS_ERR(type)) {
if (strncmp(type, "file", 4) == 0) {
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_FILE;
}
if (strncmp(type, "phy", 3) == 0) {
struct block_device *bdev = be->blkif->vbd.bdev;
struct request_queue *q = bdev_get_queue(bdev);
if (blk_queue_discard(q)) {
err = xenbus_printf(xbt, dev->nodename,
"discard-granularity", "%u",
q->limits.discard_granularity);
if (err) {
xenbus_dev_fatal(dev, err,
"writing discard-granularity");
goto kfree;
dev_warn(&dev->dev, "writing discard-granularity (%d)", err);
return;
}
err = xenbus_printf(xbt, dev->nodename,
"discard-alignment", "%u",
q->limits.discard_alignment);
if (err) {
xenbus_dev_fatal(dev, err,
"writing discard-alignment");
goto kfree;
dev_warn(&dev->dev, "writing discard-alignment (%d)", err);
return;
}
state = 1;
blkif->blk_backend_type = BLKIF_BACKEND_PHY;
}
/* Optional. */
err = xenbus_printf(xbt, dev->nodename,
"discard-secure", "%d",
blkif->vbd.discard_secure);
if (err) {
xenbus_dev_fatal(dev, err,
"writting discard-secure");
goto kfree;
}
dev_warn(dev-dev, "writing discard-secure (%d)", err);
return;
}
} else {
err = PTR_ERR(type);
xenbus_dev_fatal(dev, err, "reading type");
goto out;
}
err = xenbus_printf(xbt, dev->nodename, "feature-discard",
"%d", state);
if (err)
xenbus_dev_fatal(dev, err, "writing feature-discard");
kfree:
kfree(type);
out:
return err;
dev_warn(&dev->dev, "writing feature-discard (%d)", err);
}
int xen_blkbk_barrier(struct xenbus_transaction xbt,
struct backend_info *be, int state)
......@@ -457,7 +434,7 @@ int xen_blkbk_barrier(struct xenbus_transaction xbt,
err = xenbus_printf(xbt, dev->nodename, "feature-barrier",
"%d", state);
if (err)
xenbus_dev_fatal(dev, err, "writing feature-barrier");
dev_warn(&dev->dev, "writing feature-barrier (%d)", err);
return err;
}
......@@ -689,14 +666,12 @@ static void connect(struct backend_info *be)
return;
}
err = xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
if (err)
goto abort;
/* If we can't advertise it is OK. */
xen_blkbk_flush_diskcache(xbt, be, be->blkif->vbd.flush_support);
err = xen_blkbk_discard(xbt, be);
xen_blkbk_discard(xbt, be);
/* If we can't advertise it is OK. */
err = xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
xen_blkbk_barrier(xbt, be, be->blkif->vbd.flush_support);
err = xenbus_printf(xbt, dev->nodename, "sectors", "%llu",
(unsigned long long)vbd_sz(&be->blkif->vbd));
......
......@@ -43,6 +43,7 @@
#include <linux/slab.h>
#include <linux/mutex.h>
#include <linux/scatterlist.h>
#include <linux/bitmap.h>
#include <xen/xen.h>
#include <xen/xenbus.h>
......@@ -81,6 +82,7 @@ static const struct block_device_operations xlvbd_block_fops;
*/
struct blkfront_info
{
spinlock_t io_lock;
struct mutex mutex;
struct xenbus_device *xbdev;
struct gendisk *gd;
......@@ -105,8 +107,6 @@ struct blkfront_info
int is_ready;
};
static DEFINE_SPINLOCK(blkif_io_lock);
static unsigned int nr_minors;
static unsigned long *minors;
static DEFINE_SPINLOCK(minor_lock);
......@@ -177,8 +177,7 @@ static int xlbd_reserve_minors(unsigned int minor, unsigned int nr)
spin_lock(&minor_lock);
if (find_next_bit(minors, end, minor) >= end) {
for (; minor < end; ++minor)
__set_bit(minor, minors);
bitmap_set(minors, minor, nr);
rc = 0;
} else
rc = -EBUSY;
......@@ -193,8 +192,7 @@ static void xlbd_release_minors(unsigned int minor, unsigned int nr)
BUG_ON(end > nr_minors);
spin_lock(&minor_lock);
for (; minor < end; ++minor)
__clear_bit(minor, minors);
bitmap_clear(minors, minor, nr);
spin_unlock(&minor_lock);
}
......@@ -419,7 +417,7 @@ static int xlvbd_init_blk_queue(struct gendisk *gd, u16 sector_size)
struct request_queue *rq;
struct blkfront_info *info = gd->private_data;
rq = blk_init_queue(do_blkif_request, &blkif_io_lock);
rq = blk_init_queue(do_blkif_request, &info->io_lock);
if (rq == NULL)
return -1;
......@@ -636,14 +634,14 @@ static void xlvbd_release_gendisk(struct blkfront_info *info)
if (info->rq == NULL)
return;
spin_lock_irqsave(&blkif_io_lock, flags);
spin_lock_irqsave(&info->io_lock, flags);
/* No more blkif_request(). */
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irqrestore(&blkif_io_lock, flags);
spin_unlock_irqrestore(&info->io_lock, flags);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
......@@ -675,16 +673,16 @@ static void blkif_restart_queue(struct work_struct *work)
{
struct blkfront_info *info = container_of(work, struct blkfront_info, work);
spin_lock_irq(&blkif_io_lock);
spin_lock_irq(&info->io_lock);
if (info->connected == BLKIF_STATE_CONNECTED)
kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock);
spin_unlock_irq(&info->io_lock);
}
static void blkif_free(struct blkfront_info *info, int suspend)
{
/* Prevent new requests being issued until we fix things up. */
spin_lock_irq(&blkif_io_lock);
spin_lock_irq(&info->io_lock);
info->connected = suspend ?
BLKIF_STATE_SUSPENDED : BLKIF_STATE_DISCONNECTED;
/* No more blkif_request(). */
......@@ -692,7 +690,7 @@ static void blkif_free(struct blkfront_info *info, int suspend)
blk_stop_queue(info->rq);
/* No more gnttab callback work. */
gnttab_cancel_free_callback(&info->callback);
spin_unlock_irq(&blkif_io_lock);
spin_unlock_irq(&info->io_lock);
/* Flush gnttab callback work. Must be done with no locks held. */
flush_work_sync(&info->work);
......@@ -728,10 +726,10 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
struct blkfront_info *info = (struct blkfront_info *)dev_id;
int error;
spin_lock_irqsave(&blkif_io_lock, flags);
spin_lock_irqsave(&info->io_lock, flags);
if (unlikely(info->connected != BLKIF_STATE_CONNECTED)) {
spin_unlock_irqrestore(&blkif_io_lock, flags);
spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
......@@ -816,7 +814,7 @@ static irqreturn_t blkif_interrupt(int irq, void *dev_id)
kick_pending_request_queues(info);
spin_unlock_irqrestore(&blkif_io_lock, flags);
spin_unlock_irqrestore(&info->io_lock, flags);
return IRQ_HANDLED;
}
......@@ -991,6 +989,7 @@ static int blkfront_probe(struct xenbus_device *dev,
}
mutex_init(&info->mutex);
spin_lock_init(&info->io_lock);
info->xbdev = dev;
info->vdevice = vdevice;
info->connected = BLKIF_STATE_DISCONNECTED;
......@@ -1068,7 +1067,7 @@ static int blkif_recover(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected);
spin_lock_irq(&blkif_io_lock);
spin_lock_irq(&info->io_lock);
/* Now safe for us to use the shared ring */
info->connected = BLKIF_STATE_CONNECTED;
......@@ -1079,7 +1078,7 @@ static int blkif_recover(struct blkfront_info *info)
/* Kick any other new requests queued since we resumed */
kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock);
spin_unlock_irq(&info->io_lock);
return 0;
}
......@@ -1277,10 +1276,10 @@ static void blkfront_connect(struct blkfront_info *info)
xenbus_switch_state(info->xbdev, XenbusStateConnected);
/* Kick pending requests. */
spin_lock_irq(&blkif_io_lock);
spin_lock_irq(&info->io_lock);
info->connected = BLKIF_STATE_CONNECTED;
kick_pending_request_queues(info);
spin_unlock_irq(&blkif_io_lock);
spin_unlock_irq(&info->io_lock);
add_disk(info->gd);
......@@ -1410,7 +1409,6 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
mutex_lock(&blkfront_mutex);
bdev = bdget_disk(disk, 0);
bdput(bdev);
if (bdev->bd_openers)
goto out;
......@@ -1441,6 +1439,7 @@ static int blkif_release(struct gendisk *disk, fmode_t mode)
}
out:
bdput(bdev);
mutex_unlock(&blkfront_mutex);
return 0;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment