Commit 8a9f772c authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block: (27 commits)
  block: remove unused copy_io_context()
  Documentation: remove anticipatory scheduler info
  block: remove REQ_HARDBARRIER
  ioprio: rcu_read_lock/unlock protect find_task_by_vpid call (V2)
  ioprio: fix RCU locking around task dereference
  block: ioctl: fix information leak to userland
  block: read i_size with i_size_read()
  cciss: fix proc warning on attempt to remove non-existant directory
  bio: take care not overflow page count when mapping/copying user data
  block: limit vec count in bio_kmalloc() and bio_alloc_map_data()
  block: take care not to overflow when calculating total iov length
  block: check for proper length of iov entries in blk_rq_map_user_iov()
  cciss: remove controllers supported by hpsa
  cciss: use usleep_range not msleep for small sleeps
  cciss: limit commands allocated on reset_devices
  cciss: Use kernel provided PCI state save and restore functions
  cciss: fix board status waiting code
  drbd: Removed checks for REQ_HARDBARRIER on incomming BIOs
  drbd: REQ_HARDBARRIER -> REQ_FUA transition for meta data accesses
  drbd: Removed the BIO_RW_BARRIER support form the receiver/epoch code
  ...
parents 25a34554 cedb4a7d
...@@ -16,7 +16,7 @@ you can do so by typing: ...@@ -16,7 +16,7 @@ you can do so by typing:
As of the Linux 2.6.10 kernel, it is now possible to change the As of the Linux 2.6.10 kernel, it is now possible to change the
IO scheduler for a given block device on the fly (thus making it possible, IO scheduler for a given block device on the fly (thus making it possible,
for instance, to set the CFQ scheduler for the system default, but for instance, to set the CFQ scheduler for the system default, but
set a specific device to use the anticipatory or noop schedulers - which set a specific device to use the deadline or noop schedulers - which
can improve that device's throughput). can improve that device's throughput).
To set a specific scheduler, simply do this: To set a specific scheduler, simply do this:
...@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names ...@@ -31,7 +31,7 @@ a "cat /sys/block/DEV/queue/scheduler" - the list of valid names
will be displayed, with the currently selected scheduler in brackets: will be displayed, with the currently selected scheduler in brackets:
# cat /sys/block/hda/queue/scheduler # cat /sys/block/hda/queue/scheduler
noop anticipatory deadline [cfq] noop deadline [cfq]
# echo anticipatory > /sys/block/hda/queue/scheduler # echo deadline > /sys/block/hda/queue/scheduler
# cat /sys/block/hda/queue/scheduler # cat /sys/block/hda/queue/scheduler
noop [anticipatory] deadline cfq noop [deadline] cfq
...@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file ...@@ -706,7 +706,7 @@ and is between 256 and 4096 characters. It is defined in the file
arch/x86/kernel/cpu/cpufreq/elanfreq.c. arch/x86/kernel/cpu/cpufreq/elanfreq.c.
elevator= [IOSCHED] elevator= [IOSCHED]
Format: {"anticipatory" | "cfq" | "deadline" | "noop"} Format: {"cfq" | "deadline" | "noop"}
See Documentation/block/as-iosched.txt and See Documentation/block/as-iosched.txt and
Documentation/block/deadline-iosched.txt for details. Documentation/block/deadline-iosched.txt for details.
......
...@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower ...@@ -21,8 +21,8 @@ three rotations, respectively, to balance the tree), with slightly slower
To quote Linux Weekly News: To quote Linux Weekly News:
There are a number of red-black trees in use in the kernel. There are a number of red-black trees in use in the kernel.
The anticipatory, deadline, and CFQ I/O schedulers all employ The deadline and CFQ I/O schedulers employ rbtrees to
rbtrees to track requests; the packet CD/DVD driver does the same. track requests; the packet CD/DVD driver does the same.
The high-resolution timer code uses an rbtree to organize outstanding The high-resolution timer code uses an rbtree to organize outstanding
timer requests. The ext3 filesystem tracks directory entries in a timer requests. The ext3 filesystem tracks directory entries in a
red-black tree. Virtual memory areas (VMAs) are tracked with red-black red-black tree. Virtual memory areas (VMAs) are tracked with red-black
......
...@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio) ...@@ -1194,13 +1194,6 @@ static int __make_request(struct request_queue *q, struct bio *bio)
int where = ELEVATOR_INSERT_SORT; int where = ELEVATOR_INSERT_SORT;
int rw_flags; int rw_flags;
/* REQ_HARDBARRIER is no more */
if (WARN_ONCE(bio->bi_rw & REQ_HARDBARRIER,
"block: HARDBARRIER is deprecated, use FLUSH/FUA instead\n")) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
/* /*
* low level driver can indicate that it wants pages above a * low level driver can indicate that it wants pages above a
* certain limit bounced to low memory (ie for highmem, or even * certain limit bounced to low memory (ie for highmem, or even
...@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio) ...@@ -1351,7 +1344,7 @@ static void handle_bad_sector(struct bio *bio)
bdevname(bio->bi_bdev, b), bdevname(bio->bi_bdev, b),
bio->bi_rw, bio->bi_rw,
(unsigned long long)bio->bi_sector + bio_sectors(bio), (unsigned long long)bio->bi_sector + bio_sectors(bio),
(long long)(bio->bi_bdev->bd_inode->i_size >> 9)); (long long)(i_size_read(bio->bi_bdev->bd_inode) >> 9));
set_bit(BIO_EOF, &bio->bi_flags); set_bit(BIO_EOF, &bio->bi_flags);
} }
...@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors) ...@@ -1404,7 +1397,7 @@ static inline int bio_check_eod(struct bio *bio, unsigned int nr_sectors)
return 0; return 0;
/* Test device or partition size, when known. */ /* Test device or partition size, when known. */
maxsector = bio->bi_bdev->bd_inode->i_size >> 9; maxsector = i_size_read(bio->bi_bdev->bd_inode) >> 9;
if (maxsector) { if (maxsector) {
sector_t sector = bio->bi_sector; sector_t sector = bio->bi_sector;
......
...@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node) ...@@ -153,20 +153,6 @@ struct io_context *get_io_context(gfp_t gfp_flags, int node)
} }
EXPORT_SYMBOL(get_io_context); EXPORT_SYMBOL(get_io_context);
void copy_io_context(struct io_context **pdst, struct io_context **psrc)
{
struct io_context *src = *psrc;
struct io_context *dst = *pdst;
if (src) {
BUG_ON(atomic_long_read(&src->refcount) == 0);
atomic_long_inc(&src->refcount);
put_io_context(dst);
*pdst = src;
}
}
EXPORT_SYMBOL(copy_io_context);
static int __init blk_ioc_init(void) static int __init blk_ioc_init(void)
{ {
iocontext_cachep = kmem_cache_create("blkdev_ioc", iocontext_cachep = kmem_cache_create("blkdev_ioc",
......
...@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq, ...@@ -205,6 +205,8 @@ int blk_rq_map_user_iov(struct request_queue *q, struct request *rq,
unaligned = 1; unaligned = 1;
break; break;
} }
if (!iov[i].iov_len)
return -EINVAL;
} }
if (unaligned || (q->dma_pad_mask & len) || map_data) if (unaligned || (q->dma_pad_mask & len) || map_data)
......
...@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg) ...@@ -744,13 +744,13 @@ long compat_blkdev_ioctl(struct file *file, unsigned cmd, unsigned long arg)
bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE; bdi->ra_pages = (arg * 512) / PAGE_CACHE_SIZE;
return 0; return 0;
case BLKGETSIZE: case BLKGETSIZE:
size = bdev->bd_inode->i_size; size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL) if ((size >> 9) > ~0UL)
return -EFBIG; return -EFBIG;
return compat_put_ulong(arg, size >> 9); return compat_put_ulong(arg, size >> 9);
case BLKGETSIZE64_32: case BLKGETSIZE64_32:
return compat_put_u64(arg, bdev->bd_inode->i_size); return compat_put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESETUP32: case BLKTRACESETUP32:
case BLKTRACESTART: /* compatible */ case BLKTRACESTART: /* compatible */
......
...@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq) ...@@ -429,7 +429,7 @@ void elv_dispatch_sort(struct request_queue *q, struct request *rq)
q->nr_sorted--; q->nr_sorted--;
boundary = q->end_sector; boundary = q->end_sector;
stop_flags = REQ_SOFTBARRIER | REQ_HARDBARRIER | REQ_STARTED; stop_flags = REQ_SOFTBARRIER | REQ_STARTED;
list_for_each_prev(entry, &q->queue_head) { list_for_each_prev(entry, &q->queue_head) {
struct request *pos = list_entry_rq(entry); struct request *pos = list_entry_rq(entry);
...@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where) ...@@ -691,7 +691,7 @@ void elv_insert(struct request_queue *q, struct request *rq, int where)
void __elv_add_request(struct request_queue *q, struct request *rq, int where, void __elv_add_request(struct request_queue *q, struct request *rq, int where,
int plug) int plug)
{ {
if (rq->cmd_flags & (REQ_SOFTBARRIER | REQ_HARDBARRIER)) { if (rq->cmd_flags & REQ_SOFTBARRIER) {
/* barriers are scheduling boundary, update end_sector */ /* barriers are scheduling boundary, update end_sector */
if (rq->cmd_type == REQ_TYPE_FS || if (rq->cmd_type == REQ_TYPE_FS ||
(rq->cmd_flags & REQ_DISCARD)) { (rq->cmd_flags & REQ_DISCARD)) {
......
...@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start, ...@@ -125,7 +125,7 @@ static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
start >>= 9; start >>= 9;
len >>= 9; len >>= 9;
if (start + len > (bdev->bd_inode->i_size >> 9)) if (start + len > (i_size_read(bdev->bd_inode) >> 9))
return -EINVAL; return -EINVAL;
if (secure) if (secure)
flags |= BLKDEV_DISCARD_SECURE; flags |= BLKDEV_DISCARD_SECURE;
...@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -242,6 +242,7 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
* We need to set the startsect first, the driver may * We need to set the startsect first, the driver may
* want to override it. * want to override it.
*/ */
memset(&geo, 0, sizeof(geo));
geo.start = get_start_sect(bdev); geo.start = get_start_sect(bdev);
ret = disk->fops->getgeo(bdev, &geo); ret = disk->fops->getgeo(bdev, &geo);
if (ret) if (ret)
...@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd, ...@@ -307,12 +308,12 @@ int blkdev_ioctl(struct block_device *bdev, fmode_t mode, unsigned cmd,
ret = blkdev_reread_part(bdev); ret = blkdev_reread_part(bdev);
break; break;
case BLKGETSIZE: case BLKGETSIZE:
size = bdev->bd_inode->i_size; size = i_size_read(bdev->bd_inode);
if ((size >> 9) > ~0UL) if ((size >> 9) > ~0UL)
return -EFBIG; return -EFBIG;
return put_ulong(arg, size >> 9); return put_ulong(arg, size >> 9);
case BLKGETSIZE64: case BLKGETSIZE64:
return put_u64(arg, bdev->bd_inode->i_size); return put_u64(arg, i_size_read(bdev->bd_inode));
case BLKTRACESTART: case BLKTRACESTART:
case BLKTRACESTOP: case BLKTRACESTOP:
case BLKTRACESETUP: case BLKTRACESETUP:
......
...@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk, ...@@ -321,33 +321,47 @@ static int sg_io(struct request_queue *q, struct gendisk *bd_disk,
if (hdr->iovec_count) { if (hdr->iovec_count) {
const int size = sizeof(struct sg_iovec) * hdr->iovec_count; const int size = sizeof(struct sg_iovec) * hdr->iovec_count;
size_t iov_data_len; size_t iov_data_len;
struct sg_iovec *iov; struct sg_iovec *sg_iov;
struct iovec *iov;
int i;
iov = kmalloc(size, GFP_KERNEL); sg_iov = kmalloc(size, GFP_KERNEL);
if (!iov) { if (!sg_iov) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
if (copy_from_user(iov, hdr->dxferp, size)) { if (copy_from_user(sg_iov, hdr->dxferp, size)) {
kfree(iov); kfree(sg_iov);
ret = -EFAULT; ret = -EFAULT;
goto out; goto out;
} }
/*
* Sum up the vecs, making sure they don't overflow
*/
iov = (struct iovec *) sg_iov;
iov_data_len = 0;
for (i = 0; i < hdr->iovec_count; i++) {
if (iov_data_len + iov[i].iov_len < iov_data_len) {
kfree(sg_iov);
ret = -EINVAL;
goto out;
}
iov_data_len += iov[i].iov_len;
}
/* SG_IO howto says that the shorter of the two wins */ /* SG_IO howto says that the shorter of the two wins */
iov_data_len = iov_length((struct iovec *)iov,
hdr->iovec_count);
if (hdr->dxfer_len < iov_data_len) { if (hdr->dxfer_len < iov_data_len) {
hdr->iovec_count = iov_shorten((struct iovec *)iov, hdr->iovec_count = iov_shorten(iov,
hdr->iovec_count, hdr->iovec_count,
hdr->dxfer_len); hdr->dxfer_len);
iov_data_len = hdr->dxfer_len; iov_data_len = hdr->dxfer_len;
} }
ret = blk_rq_map_user_iov(q, rq, NULL, iov, hdr->iovec_count, ret = blk_rq_map_user_iov(q, rq, NULL, sg_iov, hdr->iovec_count,
iov_data_len, GFP_KERNEL); iov_data_len, GFP_KERNEL);
kfree(iov); kfree(sg_iov);
} else if (hdr->dxfer_len) } else if (hdr->dxfer_len)
ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len, ret = blk_rq_map_user(q, rq, NULL, hdr->dxferp, hdr->dxfer_len,
GFP_KERNEL); GFP_KERNEL);
......
...@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio) ...@@ -180,9 +180,6 @@ aoeblk_make_request(struct request_queue *q, struct bio *bio)
BUG(); BUG();
bio_endio(bio, -ENXIO); bio_endio(bio, -ENXIO);
return 0; return 0;
} else if (bio->bi_rw & REQ_HARDBARRIER) {
bio_endio(bio, -EOPNOTSUPP);
return 0;
} else if (bio->bi_io_vec == NULL) { } else if (bio->bi_io_vec == NULL) {
printk(KERN_ERR "aoe: bi_io_vec is NULL\n"); printk(KERN_ERR "aoe: bi_io_vec is NULL\n");
BUG(); BUG();
......
...@@ -113,6 +113,8 @@ static struct board_type products[] = { ...@@ -113,6 +113,8 @@ static struct board_type products[] = {
{0x409D0E11, "Smart Array 6400 EM", &SA5_access}, {0x409D0E11, "Smart Array 6400 EM", &SA5_access},
{0x40910E11, "Smart Array 6i", &SA5_access}, {0x40910E11, "Smart Array 6i", &SA5_access},
{0x3225103C, "Smart Array P600", &SA5_access}, {0x3225103C, "Smart Array P600", &SA5_access},
{0x3223103C, "Smart Array P800", &SA5_access},
{0x3234103C, "Smart Array P400", &SA5_access},
{0x3235103C, "Smart Array P400i", &SA5_access}, {0x3235103C, "Smart Array P400i", &SA5_access},
{0x3211103C, "Smart Array E200i", &SA5_access}, {0x3211103C, "Smart Array E200i", &SA5_access},
{0x3212103C, "Smart Array E200", &SA5_access}, {0x3212103C, "Smart Array E200", &SA5_access},
...@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h) ...@@ -3753,7 +3755,7 @@ static void __devinit cciss_wait_for_mode_change_ack(ctlr_info_t *h)
for (i = 0; i < MAX_CONFIG_WAIT; i++) { for (i = 0; i < MAX_CONFIG_WAIT; i++) {
if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq)) if (!(readl(h->vaddr + SA5_DOORBELL) & CFGTBL_ChangeReq))
break; break;
msleep(10); usleep_range(10000, 20000);
} }
} }
...@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id) ...@@ -3937,10 +3939,9 @@ static int __devinit cciss_lookup_board_id(struct pci_dev *pdev, u32 *board_id)
*board_id = ((subsystem_device_id << 16) & 0xffff0000) | *board_id = ((subsystem_device_id << 16) & 0xffff0000) |
subsystem_vendor_id; subsystem_vendor_id;
for (i = 0; i < ARRAY_SIZE(products); i++) { for (i = 0; i < ARRAY_SIZE(products); i++)
if (*board_id == products[i].board_id) if (*board_id == products[i].board_id)
return i; return i;
}
dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n", dev_warn(&pdev->dev, "unrecognized board ID: 0x%08x, ignoring.\n",
*board_id); *board_id);
return -ENODEV; return -ENODEV;
...@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev, ...@@ -3971,18 +3972,31 @@ static int __devinit cciss_pci_find_memory_BAR(struct pci_dev *pdev,
return -ENODEV; return -ENODEV;
} }
static int __devinit cciss_wait_for_board_ready(ctlr_info_t *h) static int __devinit cciss_wait_for_board_state(struct pci_dev *pdev,
void __iomem *vaddr, int wait_for_ready)
#define BOARD_READY 1
#define BOARD_NOT_READY 0
{ {
int i; int i, iterations;
u32 scratchpad; u32 scratchpad;
for (i = 0; i < CCISS_BOARD_READY_ITERATIONS; i++) { if (wait_for_ready)
scratchpad = readl(h->vaddr + SA5_SCRATCHPAD_OFFSET); iterations = CCISS_BOARD_READY_ITERATIONS;
if (scratchpad == CCISS_FIRMWARE_READY) else
return 0; iterations = CCISS_BOARD_NOT_READY_ITERATIONS;
for (i = 0; i < iterations; i++) {
scratchpad = readl(vaddr + SA5_SCRATCHPAD_OFFSET);
if (wait_for_ready) {
if (scratchpad == CCISS_FIRMWARE_READY)
return 0;
} else {
if (scratchpad != CCISS_FIRMWARE_READY)
return 0;
}
msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS); msleep(CCISS_BOARD_READY_POLL_INTERVAL_MSECS);
} }
dev_warn(&h->pdev->dev, "board not ready, timed out.\n"); dev_warn(&pdev->dev, "board not ready, timed out.\n");
return -ENODEV; return -ENODEV;
} }
...@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h) ...@@ -4031,6 +4045,11 @@ static int __devinit cciss_find_cfgtables(ctlr_info_t *h)
static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h) static void __devinit cciss_get_max_perf_mode_cmds(struct ctlr_info *h)
{ {
h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands)); h->max_commands = readl(&(h->cfgtable->MaxPerformantModeCommands));
/* Limit commands in memory limited kdump scenario. */
if (reset_devices && h->max_commands > 32)
h->max_commands = 32;
if (h->max_commands < 16) { if (h->max_commands < 16) {
dev_warn(&h->pdev->dev, "Controller reports " dev_warn(&h->pdev->dev, "Controller reports "
"max supported commands of %d, an obvious lie. " "max supported commands of %d, an obvious lie. "
...@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h) ...@@ -4148,7 +4167,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *h)
err = -ENOMEM; err = -ENOMEM;
goto err_out_free_res; goto err_out_free_res;
} }
err = cciss_wait_for_board_ready(h); err = cciss_wait_for_board_state(h->pdev, h->vaddr, BOARD_READY);
if (err) if (err)
goto err_out_free_res; goto err_out_free_res;
err = cciss_find_cfgtables(h); err = cciss_find_cfgtables(h);
...@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u ...@@ -4313,36 +4332,6 @@ static __devinit int cciss_message(struct pci_dev *pdev, unsigned char opcode, u
#define cciss_soft_reset_controller(p) cciss_message(p, 1, 0) #define cciss_soft_reset_controller(p) cciss_message(p, 1, 0)
#define cciss_noop(p) cciss_message(p, 3, 0) #define cciss_noop(p) cciss_message(p, 3, 0)
static __devinit int cciss_reset_msi(struct pci_dev *pdev)
{
/* the #defines are stolen from drivers/pci/msi.h. */
#define msi_control_reg(base) (base + PCI_MSI_FLAGS)
#define PCI_MSIX_FLAGS_ENABLE (1 << 15)
int pos;
u16 control = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_MSI);
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSI_FLAGS_ENABLE) {
dev_info(&pdev->dev, "resetting MSI\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSI_FLAGS_ENABLE);
}
}
pos = pci_find_capability(pdev, PCI_CAP_ID_MSIX);
if (pos) {
pci_read_config_word(pdev, msi_control_reg(pos), &control);
if (control & PCI_MSIX_FLAGS_ENABLE) {
dev_info(&pdev->dev, "resetting MSI-X\n");
pci_write_config_word(pdev, msi_control_reg(pos), control & ~PCI_MSIX_FLAGS_ENABLE);
}
}
return 0;
}
static int cciss_controller_hard_reset(struct pci_dev *pdev, static int cciss_controller_hard_reset(struct pci_dev *pdev,
void * __iomem vaddr, bool use_doorbell) void * __iomem vaddr, bool use_doorbell)
{ {
...@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev, ...@@ -4397,17 +4386,17 @@ static int cciss_controller_hard_reset(struct pci_dev *pdev,
* states or using the doorbell register. */ * states or using the doorbell register. */
static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
{ {
u16 saved_config_space[32];
u64 cfg_offset; u64 cfg_offset;
u32 cfg_base_addr; u32 cfg_base_addr;
u64 cfg_base_addr_index; u64 cfg_base_addr_index;
void __iomem *vaddr; void __iomem *vaddr;
unsigned long paddr; unsigned long paddr;
u32 misc_fw_support, active_transport; u32 misc_fw_support, active_transport;
int rc, i; int rc;
CfgTable_struct __iomem *cfgtable; CfgTable_struct __iomem *cfgtable;
bool use_doorbell; bool use_doorbell;
u32 board_id; u32 board_id;
u16 command_register;
/* For controllers as old a the p600, this is very nearly /* For controllers as old a the p600, this is very nearly
* the same thing as * the same thing as
...@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4417,14 +4406,6 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
* pci_set_power_state(pci_dev, PCI_D0); * pci_set_power_state(pci_dev, PCI_D0);
* pci_restore_state(pci_dev); * pci_restore_state(pci_dev);
* *
* but we can't use these nice canned kernel routines on
* kexec, because they also check the MSI/MSI-X state in PCI
* configuration space and do the wrong thing when it is
* set/cleared. Also, the pci_save/restore_state functions
* violate the ordering requirements for restoring the
* configuration space from the CCISS document (see the
* comment below). So we roll our own ....
*
* For controllers newer than the P600, the pci power state * For controllers newer than the P600, the pci power state
* method of resetting doesn't work so we have another way * method of resetting doesn't work so we have another way
* using the doorbell register. * using the doorbell register.
...@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4443,8 +4424,13 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
return -ENODEV; return -ENODEV;
} }
for (i = 0; i < 32; i++) /* Save the PCI command register */
pci_read_config_word(pdev, 2*i, &saved_config_space[i]); pci_read_config_word(pdev, 4, &command_register);
/* Turn the board off. This is so that later pci_restore_state()
* won't turn the board on before the rest of config space is ready.
*/
pci_disable_device(pdev);
pci_save_state(pdev);
/* find the first memory BAR, so we can find the cfg table */ /* find the first memory BAR, so we can find the cfg table */
rc = cciss_pci_find_memory_BAR(pdev, &paddr); rc = cciss_pci_find_memory_BAR(pdev, &paddr);
...@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev) ...@@ -4479,26 +4465,32 @@ static __devinit int cciss_kdump_hard_reset_controller(struct pci_dev *pdev)
rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell); rc = cciss_controller_hard_reset(pdev, vaddr, use_doorbell);
if (rc) if (rc)
goto unmap_cfgtable; goto unmap_cfgtable;
pci_restore_state(pdev);
/* Restore the PCI configuration space. The Open CISS rc = pci_enable_device(pdev);
* Specification says, "Restore the PCI Configuration if (rc) {
* Registers, offsets 00h through 60h. It is important to dev_warn(&pdev->dev, "failed to enable device.\n");
* restore the command register, 16-bits at offset 04h, goto unmap_cfgtable;
* last. Do not restore the configuration status register,
* 16-bits at offset 06h." Note that the offset is 2*i.
*/
for (i = 0; i < 32; i++) {
if (i == 2 || i == 3)
continue;
pci_write_config_word(pdev, 2*i, saved_config_space[i]);
} }
wmb(); pci_write_config_word(pdev, 4, command_register);
pci_write_config_word(pdev, 4, saved_config_space[2]);
/* Some devices (notably the HP Smart Array 5i Controller) /* Some devices (notably the HP Smart Array 5i Controller)
need a little pause here */ need a little pause here */
msleep(CCISS_POST_RESET_PAUSE_MSECS); msleep(CCISS_POST_RESET_PAUSE_MSECS);
/* Wait for board to become not ready, then ready. */
dev_info(&pdev->dev, "Waiting for board to become ready.\n");
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_NOT_READY);
if (rc) /* Don't bail, might be E500, etc. which can't be reset */
dev_warn(&pdev->dev,
"failed waiting for board to become not ready\n");
rc = cciss_wait_for_board_state(pdev, vaddr, BOARD_READY);
if (rc) {
dev_warn(&pdev->dev,
"failed waiting for board to become ready\n");
goto unmap_cfgtable;
}
dev_info(&pdev->dev, "board ready.\n");
/* Controller should be in simple mode at this point. If it's not, /* Controller should be in simple mode at this point. If it's not,
* It means we're on one of those controllers which doesn't support * It means we're on one of those controllers which doesn't support
* the doorbell reset method and on which the PCI power management reset * the doorbell reset method and on which the PCI power management reset
...@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev) ...@@ -4539,8 +4531,6 @@ static __devinit int cciss_init_reset_devices(struct pci_dev *pdev)
return 0; /* just try to do the kdump anyhow. */ return 0; /* just try to do the kdump anyhow. */
if (rc) if (rc)
return -ENODEV; return -ENODEV;
if (cciss_reset_msi(pdev))
return -ENODEV;
/* Now try to get the controller to respond to a no-op */ /* Now try to get the controller to respond to a no-op */
for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) { for (i = 0; i < CCISS_POST_RESET_NOOP_RETRIES; i++) {
...@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void) ...@@ -4936,7 +4926,8 @@ static void __exit cciss_cleanup(void)
} }
} }
kthread_stop(cciss_scan_thread); kthread_stop(cciss_scan_thread);
remove_proc_entry("driver/cciss", NULL); if (proc_cciss)
remove_proc_entry("driver/cciss", NULL);
bus_unregister(&cciss_bus_type); bus_unregister(&cciss_bus_type);
} }
......
...@@ -200,10 +200,14 @@ struct ctlr_info ...@@ -200,10 +200,14 @@ struct ctlr_info
* the above. * the above.
*/ */
#define CCISS_BOARD_READY_WAIT_SECS (120) #define CCISS_BOARD_READY_WAIT_SECS (120)
#define CCISS_BOARD_NOT_READY_WAIT_SECS (10)
#define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100) #define CCISS_BOARD_READY_POLL_INTERVAL_MSECS (100)
#define CCISS_BOARD_READY_ITERATIONS \ #define CCISS_BOARD_READY_ITERATIONS \
((CCISS_BOARD_READY_WAIT_SECS * 1000) / \ ((CCISS_BOARD_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS) CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_BOARD_NOT_READY_ITERATIONS \
((CCISS_BOARD_NOT_READY_WAIT_SECS * 1000) / \
CCISS_BOARD_READY_POLL_INTERVAL_MSECS)
#define CCISS_POST_RESET_PAUSE_MSECS (3000) #define CCISS_POST_RESET_PAUSE_MSECS (3000)
#define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000) #define CCISS_POST_RESET_NOOP_INTERVAL_MSECS (1000)
#define CCISS_POST_RESET_NOOP_RETRIES (12) #define CCISS_POST_RESET_NOOP_RETRIES (12)
......
...@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -78,11 +78,10 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
init_completion(&md_io.event); init_completion(&md_io.event);
md_io.error = 0; md_io.error = 0;
if ((rw & WRITE) && !test_bit(MD_NO_BARRIER, &mdev->flags)) if ((rw & WRITE) && !test_bit(MD_NO_FUA, &mdev->flags))
rw |= REQ_HARDBARRIER; rw |= REQ_FUA;
rw |= REQ_UNPLUG | REQ_SYNC; rw |= REQ_UNPLUG | REQ_SYNC;
retry:
bio = bio_alloc(GFP_NOIO, 1); bio = bio_alloc(GFP_NOIO, 1);
bio->bi_bdev = bdev->md_bdev; bio->bi_bdev = bdev->md_bdev;
bio->bi_sector = sector; bio->bi_sector = sector;
...@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev, ...@@ -100,17 +99,6 @@ static int _drbd_md_sync_page_io(struct drbd_conf *mdev,
wait_for_completion(&md_io.event); wait_for_completion(&md_io.event);
ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0; ok = bio_flagged(bio, BIO_UPTODATE) && md_io.error == 0;
/* check for unsupported barrier op.
* would rather check on EOPNOTSUPP, but that is not reliable.
* don't try again for ANY return value != 0 */
if (unlikely((bio->bi_rw & REQ_HARDBARRIER) && !ok)) {
/* Try again with no barrier */
dev_warn(DEV, "Barriers not supported on meta data device - disabling\n");
set_bit(MD_NO_BARRIER, &mdev->flags);
rw &= ~REQ_HARDBARRIER;
bio_put(bio);
goto retry;
}
out: out:
bio_put(bio); bio_put(bio);
return ok; return ok;
...@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused) ...@@ -284,18 +272,32 @@ w_al_write_transaction(struct drbd_conf *mdev, struct drbd_work *w, int unused)
u32 xor_sum = 0; u32 xor_sum = 0;
if (!get_ldev(mdev)) { if (!get_ldev(mdev)) {
dev_err(DEV, "get_ldev() failed in w_al_write_transaction\n"); dev_err(DEV,
"disk is %s, cannot start al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event); complete(&((struct update_al_work *)w)->event);
return 1; return 1;
} }
/* do we have to do a bitmap write, first? /* do we have to do a bitmap write, first?
* TODO reduce maximum latency: * TODO reduce maximum latency:
* submit both bios, then wait for both, * submit both bios, then wait for both,
* instead of doing two synchronous sector writes. */ * instead of doing two synchronous sector writes.
* For now, we must not write the transaction,
* if we cannot write out the bitmap of the evicted extent. */
if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE) if (mdev->state.conn < C_CONNECTED && evicted != LC_FREE)
drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT); drbd_bm_write_sect(mdev, evicted/AL_EXT_PER_BM_SECT);
mutex_lock(&mdev->md_io_mutex); /* protects md_io_page, al_tr_cycle, ... */ /* The bitmap write may have failed, causing a state change. */
if (mdev->state.disk < D_INCONSISTENT) {
dev_err(DEV,
"disk is %s, cannot write al transaction (-%d +%d)\n",
drbd_disk_str(mdev->state.disk), evicted, new_enr);
complete(&((struct update_al_work *)w)->event);
put_ldev(mdev);
return 1;
}
mutex_lock(&mdev->md_io_mutex); /* protects md_io_buffer, al_tr_cycle, ... */
buffer = (struct al_transaction *)page_address(mdev->md_io_page); buffer = (struct al_transaction *)page_address(mdev->md_io_page);
buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC); buffer->magic = __constant_cpu_to_be32(DRBD_MAGIC);
...@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) ...@@ -739,7 +741,7 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
unsigned int enr; unsigned int enr;
unsigned long add = 0; unsigned long add = 0;
char ppb[10]; char ppb[10];
int i; int i, tmp;
wait_event(mdev->al_wait, lc_try_lock(mdev->act_log)); wait_event(mdev->al_wait, lc_try_lock(mdev->act_log));
...@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev) ...@@ -747,7 +749,9 @@ void drbd_al_apply_to_bm(struct drbd_conf *mdev)
enr = lc_element_by_index(mdev->act_log, i)->lc_number; enr = lc_element_by_index(mdev->act_log, i)->lc_number;
if (enr == LC_FREE) if (enr == LC_FREE)
continue; continue;
add += drbd_bm_ALe_set_all(mdev, enr); tmp = drbd_bm_ALe_set_all(mdev, enr);
dynamic_dev_dbg(DEV, "AL: set %d bits in extent %u\n", tmp, enr);
add += tmp;
} }
lc_unlock(mdev->act_log); lc_unlock(mdev->act_log);
......
...@@ -114,11 +114,11 @@ struct drbd_conf; ...@@ -114,11 +114,11 @@ struct drbd_conf;
#define D_ASSERT(exp) if (!(exp)) \ #define D_ASSERT(exp) if (!(exp)) \
dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__) dev_err(DEV, "ASSERT( " #exp " ) in %s:%d\n", __FILE__, __LINE__)
#define ERR_IF(exp) if (({ \ #define ERR_IF(exp) if (({ \
int _b = (exp) != 0; \ int _b = (exp) != 0; \
if (_b) dev_err(DEV, "%s: (%s) in %s:%d\n", \ if (_b) dev_err(DEV, "ASSERT FAILED: %s: (%s) in %s:%d\n", \
__func__, #exp, __FILE__, __LINE__); \ __func__, #exp, __FILE__, __LINE__); \
_b; \ _b; \
})) }))
/* Defines to control fault insertion */ /* Defines to control fault insertion */
...@@ -749,17 +749,12 @@ struct drbd_epoch { ...@@ -749,17 +749,12 @@ struct drbd_epoch {
/* drbd_epoch flag bits */ /* drbd_epoch flag bits */
enum { enum {
DE_BARRIER_IN_NEXT_EPOCH_ISSUED,
DE_BARRIER_IN_NEXT_EPOCH_DONE,
DE_CONTAINS_A_BARRIER,
DE_HAVE_BARRIER_NUMBER, DE_HAVE_BARRIER_NUMBER,
DE_IS_FINISHING,
}; };
enum epoch_event { enum epoch_event {
EV_PUT, EV_PUT,
EV_GOT_BARRIER_NR, EV_GOT_BARRIER_NR,
EV_BARRIER_DONE,
EV_BECAME_LAST, EV_BECAME_LAST,
EV_CLEANUP = 32, /* used as flag */ EV_CLEANUP = 32, /* used as flag */
}; };
...@@ -801,11 +796,6 @@ enum { ...@@ -801,11 +796,6 @@ enum {
__EE_CALL_AL_COMPLETE_IO, __EE_CALL_AL_COMPLETE_IO,
__EE_MAY_SET_IN_SYNC, __EE_MAY_SET_IN_SYNC,
/* This epoch entry closes an epoch using a barrier.
* On sucessful completion, the epoch is released,
* and the P_BARRIER_ACK send. */
__EE_IS_BARRIER,
/* In case a barrier failed, /* In case a barrier failed,
* we need to resubmit without the barrier flag. */ * we need to resubmit without the barrier flag. */
__EE_RESUBMITTED, __EE_RESUBMITTED,
...@@ -820,7 +810,6 @@ enum { ...@@ -820,7 +810,6 @@ enum {
}; };
#define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO) #define EE_CALL_AL_COMPLETE_IO (1<<__EE_CALL_AL_COMPLETE_IO)
#define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC) #define EE_MAY_SET_IN_SYNC (1<<__EE_MAY_SET_IN_SYNC)
#define EE_IS_BARRIER (1<<__EE_IS_BARRIER)
#define EE_RESUBMITTED (1<<__EE_RESUBMITTED) #define EE_RESUBMITTED (1<<__EE_RESUBMITTED)
#define EE_WAS_ERROR (1<<__EE_WAS_ERROR) #define EE_WAS_ERROR (1<<__EE_WAS_ERROR)
#define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST) #define EE_HAS_DIGEST (1<<__EE_HAS_DIGEST)
...@@ -843,16 +832,15 @@ enum { ...@@ -843,16 +832,15 @@ enum {
* Gets cleared when the state.conn * Gets cleared when the state.conn
* goes into C_CONNECTED state. */ * goes into C_CONNECTED state. */
WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */ WRITE_BM_AFTER_RESYNC, /* A kmalloc() during resync failed */
NO_BARRIER_SUPP, /* underlying block device doesn't implement barriers */
CONSIDER_RESYNC, CONSIDER_RESYNC,
MD_NO_BARRIER, /* meta data device does not support barriers, MD_NO_FUA, /* Users wants us to not use FUA/FLUSH on meta data dev */
so don't even try */
SUSPEND_IO, /* suspend application io */ SUSPEND_IO, /* suspend application io */
BITMAP_IO, /* suspend application io; BITMAP_IO, /* suspend application io;
once no more io in flight, start bitmap io */ once no more io in flight, start bitmap io */
BITMAP_IO_QUEUED, /* Started bitmap IO */ BITMAP_IO_QUEUED, /* Started bitmap IO */
GO_DISKLESS, /* Disk failed, local_cnt reached zero, we are going diskless */ GO_DISKLESS, /* Disk is being detached, on io-error or admin request. */
WAS_IO_ERROR, /* Local disk failed returned IO error */
RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */ RESYNC_AFTER_NEG, /* Resync after online grow after the attach&negotiate finished. */
NET_CONGESTED, /* The data socket is congested */ NET_CONGESTED, /* The data socket is congested */
...@@ -947,7 +935,6 @@ enum write_ordering_e { ...@@ -947,7 +935,6 @@ enum write_ordering_e {
WO_none, WO_none,
WO_drain_io, WO_drain_io,
WO_bdev_flush, WO_bdev_flush,
WO_bio_barrier
}; };
struct fifo_buffer { struct fifo_buffer {
...@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev); ...@@ -1281,6 +1268,7 @@ extern int drbd_bmio_set_n_write(struct drbd_conf *mdev);
extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev); extern int drbd_bmio_clear_n_write(struct drbd_conf *mdev);
extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why); extern int drbd_bitmap_io(struct drbd_conf *mdev, int (*io_fn)(struct drbd_conf *), char *why);
extern void drbd_go_diskless(struct drbd_conf *mdev); extern void drbd_go_diskless(struct drbd_conf *mdev);
extern void drbd_ldev_destroy(struct drbd_conf *mdev);
/* Meta data layout /* Meta data layout
...@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach, ...@@ -1798,17 +1786,17 @@ static inline void __drbd_chk_io_error_(struct drbd_conf *mdev, int forcedetach,
case EP_PASS_ON: case EP_PASS_ON:
if (!forcedetach) { if (!forcedetach) {
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "Local IO failed in %s." dev_err(DEV, "Local IO failed in %s.\n", where);
"Passing error on...\n", where);
break; break;
} }
/* NOTE fall through to detach case if forcedetach set */ /* NOTE fall through to detach case if forcedetach set */
case EP_DETACH: case EP_DETACH:
case EP_CALL_HELPER: case EP_CALL_HELPER:
set_bit(WAS_IO_ERROR, &mdev->flags);
if (mdev->state.disk > D_FAILED) { if (mdev->state.disk > D_FAILED) {
_drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL); _drbd_set_state(_NS(mdev, disk, D_FAILED), CS_HARD, NULL);
dev_err(DEV, "Local IO failed in %s." dev_err(DEV,
"Detaching...\n", where); "Local IO failed in %s. Detaching...\n", where);
} }
break; break;
} }
...@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev) ...@@ -1874,7 +1862,7 @@ static inline sector_t drbd_md_last_sector(struct drbd_backing_dev *bdev)
static inline sector_t drbd_get_capacity(struct block_device *bdev) static inline sector_t drbd_get_capacity(struct block_device *bdev)
{ {
/* return bdev ? get_capacity(bdev->bd_disk) : 0; */ /* return bdev ? get_capacity(bdev->bd_disk) : 0; */
return bdev ? bdev->bd_inode->i_size >> 9 : 0; return bdev ? i_size_read(bdev->bd_inode) >> 9 : 0;
} }
/** /**
...@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev) ...@@ -2127,7 +2115,11 @@ static inline void put_ldev(struct drbd_conf *mdev)
__release(local); __release(local);
D_ASSERT(i >= 0); D_ASSERT(i >= 0);
if (i == 0) { if (i == 0) {
if (mdev->state.disk == D_DISKLESS)
/* even internal references gone, safe to destroy */
drbd_ldev_destroy(mdev);
if (mdev->state.disk == D_FAILED) if (mdev->state.disk == D_FAILED)
/* all application IO references gone. */
drbd_go_diskless(mdev); drbd_go_diskless(mdev);
wake_up(&mdev->misc_wait); wake_up(&mdev->misc_wait);
} }
...@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat ...@@ -2138,6 +2130,10 @@ static inline int _get_ldev_if_state(struct drbd_conf *mdev, enum drbd_disk_stat
{ {
int io_allowed; int io_allowed;
/* never get a reference while D_DISKLESS */
if (mdev->state.disk == D_DISKLESS)
return 0;
atomic_inc(&mdev->local_cnt); atomic_inc(&mdev->local_cnt);
io_allowed = (mdev->state.disk >= mins); io_allowed = (mdev->state.disk >= mins);
if (!io_allowed) if (!io_allowed)
...@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev) ...@@ -2406,12 +2402,12 @@ static inline void drbd_md_flush(struct drbd_conf *mdev)
{ {
int r; int r;
if (test_bit(MD_NO_BARRIER, &mdev->flags)) if (test_bit(MD_NO_FUA, &mdev->flags))
return; return;
r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL); r = blkdev_issue_flush(mdev->ldev->md_bdev, GFP_KERNEL, NULL);
if (r) { if (r) {
set_bit(MD_NO_BARRIER, &mdev->flags); set_bit(MD_NO_FUA, &mdev->flags);
dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r); dev_err(DEV, "meta data flush failed with status %d, disabling md-flushes\n", r);
} }
} }
......
...@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state ...@@ -835,6 +835,15 @@ static union drbd_state sanitize_state(struct drbd_conf *mdev, union drbd_state
ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN) ns.conn != C_UNCONNECTED && ns.conn != C_DISCONNECTING && ns.conn <= C_TEAR_DOWN)
ns.conn = os.conn; ns.conn = os.conn;
/* we cannot fail (again) if we already detached */
if (ns.disk == D_FAILED && os.disk == D_DISKLESS)
ns.disk = D_DISKLESS;
/* if we are only D_ATTACHING yet,
* we can (and should) go directly to D_DISKLESS. */
if (ns.disk == D_FAILED && os.disk == D_ATTACHING)
ns.disk = D_DISKLESS;
/* After C_DISCONNECTING only C_STANDALONE may follow */ /* After C_DISCONNECTING only C_STANDALONE may follow */
if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE) if (os.conn == C_DISCONNECTING && ns.conn != C_STANDALONE)
ns.conn = os.conn; ns.conn = os.conn;
...@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev, ...@@ -1056,7 +1065,15 @@ int __drbd_set_state(struct drbd_conf *mdev,
!test_and_set_bit(CONFIG_PENDING, &mdev->flags)) !test_and_set_bit(CONFIG_PENDING, &mdev->flags))
set_bit(DEVICE_DYING, &mdev->flags); set_bit(DEVICE_DYING, &mdev->flags);
mdev->state.i = ns.i; /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp.
* drbd_ldev_destroy() won't happen before our corresponding
* after_state_ch works run, where we put_ldev again. */
if ((os.disk != D_FAILED && ns.disk == D_FAILED) ||
(os.disk != D_DISKLESS && ns.disk == D_DISKLESS))
atomic_inc(&mdev->local_cnt);
mdev->state = ns;
wake_up(&mdev->misc_wait); wake_up(&mdev->misc_wait);
wake_up(&mdev->state_wait); wake_up(&mdev->state_wait);
...@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1268,7 +1285,6 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
if (test_bit(NEW_CUR_UUID, &mdev->flags)) { if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev); drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags); clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_md_sync(mdev);
} }
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
_drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL); _drbd_set_state(_NS(mdev, susp_fen, 0), CS_VERBOSE, NULL);
...@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1365,63 +1381,64 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT) os.disk > D_INCONSISTENT && ns.disk == D_INCONSISTENT)
drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate"); drbd_queue_bitmap_io(mdev, &drbd_bmio_set_n_write, NULL, "set_n_write from invalidate");
/* first half of local IO error */ /* first half of local IO error, failure to attach,
if (os.disk > D_FAILED && ns.disk == D_FAILED) { * or administrative detach */
enum drbd_io_error_p eh = EP_PASS_ON; if (os.disk != D_FAILED && ns.disk == D_FAILED) {
enum drbd_io_error_p eh;
int was_io_error;
/* corresponding get_ldev was in __drbd_set_state, to serialize
* our cleanup here with the transition to D_DISKLESS,
* so it is safe to dreference ldev here. */
eh = mdev->ldev->dc.on_io_error;
was_io_error = test_and_clear_bit(WAS_IO_ERROR, &mdev->flags);
/* current state still has to be D_FAILED,
* there is only one way out: to D_DISKLESS,
* and that may only happen after our put_ldev below. */
if (mdev->state.disk != D_FAILED)
dev_err(DEV,
"ASSERT FAILED: disk is %s during detach\n",
drbd_disk_str(mdev->state.disk));
if (drbd_send_state(mdev)) if (drbd_send_state(mdev))
dev_warn(DEV, "Notified peer that my disk is broken.\n"); dev_warn(DEV, "Notified peer that I am detaching my disk\n");
else else
dev_err(DEV, "Sending state for drbd_io_error() failed\n"); dev_err(DEV, "Sending state for detaching disk failed\n");
drbd_rs_cancel_all(mdev); drbd_rs_cancel_all(mdev);
if (get_ldev_if_state(mdev, D_FAILED)) { /* In case we want to get something to stable storage still,
eh = mdev->ldev->dc.on_io_error; * this may be the last chance.
put_ldev(mdev); * Following put_ldev may transition to D_DISKLESS. */
} drbd_md_sync(mdev);
if (eh == EP_CALL_HELPER) put_ldev(mdev);
if (was_io_error && eh == EP_CALL_HELPER)
drbd_khelper(mdev, "local-io-error"); drbd_khelper(mdev, "local-io-error");
} }
/* second half of local IO error, failure to attach,
* or administrative detach,
* after local_cnt references have reached zero again */
if (os.disk != D_DISKLESS && ns.disk == D_DISKLESS) {
/* We must still be diskless,
* re-attach has to be serialized with this! */
if (mdev->state.disk != D_DISKLESS)
dev_err(DEV,
"ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(mdev->state.disk));
/* second half of local IO error handling, mdev->rs_total = 0;
* after local_cnt references have reached zero: */ mdev->rs_failed = 0;
if (os.disk == D_FAILED && ns.disk == D_DISKLESS) { atomic_set(&mdev->rs_pending_cnt, 0);
mdev->rs_total = 0;
mdev->rs_failed = 0;
atomic_set(&mdev->rs_pending_cnt, 0);
}
if (os.disk > D_DISKLESS && ns.disk == D_DISKLESS) {
/* We must still be diskless,
* re-attach has to be serialized with this! */
if (mdev->state.disk != D_DISKLESS)
dev_err(DEV,
"ASSERT FAILED: disk is %s while going diskless\n",
drbd_disk_str(mdev->state.disk));
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state
* will inc/dec it frequently. Since we became D_DISKLESS, no
* one has touched the protected members anymore, though, so we
* are safe to free them here. */
if (drbd_send_state(mdev)) if (drbd_send_state(mdev))
dev_warn(DEV, "Notified peer that I detached my disk.\n"); dev_warn(DEV, "Notified peer that I'm now diskless.\n");
else else
dev_err(DEV, "Sending state for detach failed\n"); dev_err(DEV, "Sending state for being diskless failed\n");
/* corresponding get_ldev in __drbd_set_state
lc_destroy(mdev->resync); * this may finaly trigger drbd_ldev_destroy. */
mdev->resync = NULL; put_ldev(mdev);
lc_destroy(mdev->act_log);
mdev->act_log = NULL;
__no_warn(local,
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
if (mdev->md_io_tmpp) {
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
} }
/* Disks got bigger while they were detached */ /* Disks got bigger while they were detached */
...@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -2772,11 +2789,6 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_set_defaults(mdev); drbd_set_defaults(mdev);
/* for now, we do NOT yet support it,
* even though we start some framework
* to eventually support barriers */
set_bit(NO_BARRIER_SUPP, &mdev->flags);
atomic_set(&mdev->ap_bio_cnt, 0); atomic_set(&mdev->ap_bio_cnt, 0);
atomic_set(&mdev->ap_pending_cnt, 0); atomic_set(&mdev->ap_pending_cnt, 0);
atomic_set(&mdev->rs_pending_cnt, 0); atomic_set(&mdev->rs_pending_cnt, 0);
...@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev) ...@@ -2842,7 +2854,7 @@ void drbd_init_set_defaults(struct drbd_conf *mdev)
drbd_thread_init(mdev, &mdev->asender, drbd_asender); drbd_thread_init(mdev, &mdev->asender, drbd_asender);
mdev->agreed_pro_version = PRO_VERSION_MAX; mdev->agreed_pro_version = PRO_VERSION_MAX;
mdev->write_ordering = WO_bio_barrier; mdev->write_ordering = WO_bdev_flush;
mdev->resync_wenr = LC_FREE; mdev->resync_wenr = LC_FREE;
} }
...@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev) ...@@ -2899,7 +2911,6 @@ void drbd_mdev_cleanup(struct drbd_conf *mdev)
D_ASSERT(list_empty(&mdev->resync_work.list)); D_ASSERT(list_empty(&mdev->resync_work.list));
D_ASSERT(list_empty(&mdev->unplug_work.list)); D_ASSERT(list_empty(&mdev->unplug_work.list));
D_ASSERT(list_empty(&mdev->go_diskless.list)); D_ASSERT(list_empty(&mdev->go_diskless.list));
} }
...@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local) ...@@ -3660,6 +3671,8 @@ void drbd_uuid_new_current(struct drbd_conf *mdev) __must_hold(local)
get_random_bytes(&val, sizeof(u64)); get_random_bytes(&val, sizeof(u64));
_drbd_uuid_set(mdev, UI_CURRENT, val); _drbd_uuid_set(mdev, UI_CURRENT, val);
/* get it to stable storage _now_ */
drbd_md_sync(mdev);
} }
void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local) void drbd_uuid_set_bm(struct drbd_conf *mdev, u64 val) __must_hold(local)
...@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused) ...@@ -3756,19 +3769,31 @@ static int w_bitmap_io(struct drbd_conf *mdev, struct drbd_work *w, int unused)
return 1; return 1;
} }
void drbd_ldev_destroy(struct drbd_conf *mdev)
{
lc_destroy(mdev->resync);
mdev->resync = NULL;
lc_destroy(mdev->act_log);
mdev->act_log = NULL;
__no_warn(local,
drbd_free_bc(mdev->ldev);
mdev->ldev = NULL;);
if (mdev->md_io_tmpp) {
__free_page(mdev->md_io_tmpp);
mdev->md_io_tmpp = NULL;
}
clear_bit(GO_DISKLESS, &mdev->flags);
}
static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused) static int w_go_diskless(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{ {
D_ASSERT(mdev->state.disk == D_FAILED); D_ASSERT(mdev->state.disk == D_FAILED);
/* we cannot assert local_cnt == 0 here, as get_ldev_if_state will /* we cannot assert local_cnt == 0 here, as get_ldev_if_state will
* inc/dec it frequently. Once we are D_DISKLESS, no one will touch * inc/dec it frequently. Once we are D_DISKLESS, no one will touch
* the protected members anymore, though, so in the after_state_ch work * the protected members anymore, though, so once put_ldev reaches zero
* it will be safe to free them. */ * again, it will be safe to free them. */
drbd_force_state(mdev, NS(disk, D_DISKLESS)); drbd_force_state(mdev, NS(disk, D_DISKLESS));
/* We need to wait for return of references checked out while we still
* have been D_FAILED, though (drbd_md_sync, bitmap io). */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
clear_bit(GO_DISKLESS, &mdev->flags);
return 1; return 1;
} }
...@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev) ...@@ -3777,9 +3802,6 @@ void drbd_go_diskless(struct drbd_conf *mdev)
D_ASSERT(mdev->state.disk == D_FAILED); D_ASSERT(mdev->state.disk == D_FAILED);
if (!test_and_set_bit(GO_DISKLESS, &mdev->flags)) if (!test_and_set_bit(GO_DISKLESS, &mdev->flags))
drbd_queue_work(&mdev->data.work, &mdev->go_diskless); drbd_queue_work(&mdev->data.work, &mdev->go_diskless);
/* don't drbd_queue_work_front,
* we need to serialize with the after_state_ch work
* of the -> D_FAILED transition. */
} }
/** /**
......
...@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -870,6 +870,11 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
retcode = ERR_DISK_CONFIGURED; retcode = ERR_DISK_CONFIGURED;
goto fail; goto fail;
} }
/* It may just now have detached because of IO error. Make sure
* drbd_ldev_destroy is done already, we may end up here very fast,
* e.g. if someone calls attach from the on-io-error handler,
* to realize a "hot spare" feature (not that I'd recommend that) */
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
/* allocation not in the IO path, cqueue thread context */ /* allocation not in the IO path, cqueue thread context */
nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL); nbc = kzalloc(sizeof(struct drbd_backing_dev), GFP_KERNEL);
...@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1098,9 +1103,9 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Reset the "barriers don't work" bits here, then force meta data to /* Reset the "barriers don't work" bits here, then force meta data to
* be written, to ensure we determine if barriers are supported. */ * be written, to ensure we determine if barriers are supported. */
if (nbc->dc.no_md_flush) if (nbc->dc.no_md_flush)
set_bit(MD_NO_BARRIER, &mdev->flags); set_bit(MD_NO_FUA, &mdev->flags);
else else
clear_bit(MD_NO_BARRIER, &mdev->flags); clear_bit(MD_NO_FUA, &mdev->flags);
/* Point of no return reached. /* Point of no return reached.
* Devices and memory are no longer released by error cleanup below. * Devices and memory are no longer released by error cleanup below.
...@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1112,8 +1117,8 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
nbc = NULL; nbc = NULL;
resync_lru = NULL; resync_lru = NULL;
mdev->write_ordering = WO_bio_barrier; mdev->write_ordering = WO_bdev_flush;
drbd_bump_write_ordering(mdev, WO_bio_barrier); drbd_bump_write_ordering(mdev, WO_bdev_flush);
if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY)) if (drbd_md_test_flag(mdev->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &mdev->flags); set_bit(CRASHED_PRIMARY, &mdev->flags);
...@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1262,7 +1267,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
force_diskless_dec: force_diskless_dec:
put_ldev(mdev); put_ldev(mdev);
force_diskless: force_diskless:
drbd_force_state(mdev, NS(disk, D_DISKLESS)); drbd_force_state(mdev, NS(disk, D_FAILED));
drbd_md_sync(mdev); drbd_md_sync(mdev);
release_bdev2_fail: release_bdev2_fail:
if (nbc) if (nbc)
...@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1285,10 +1290,19 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
return 0; return 0;
} }
/* Detaching the disk is a process in multiple stages. First we need to lock
* out application IO, in-flight IO, IO stuck in drbd_al_begin_io.
* Then we transition to D_DISKLESS, and wait for put_ldev() to return all
* internal references as well.
* Only then we have finally detached. */
static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, static int drbd_nl_detach(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
struct drbd_nl_cfg_reply *reply) struct drbd_nl_cfg_reply *reply)
{ {
drbd_suspend_io(mdev); /* so no-one is stuck in drbd_al_begin_io */
reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS)); reply->ret_code = drbd_request_state(mdev, NS(disk, D_DISKLESS));
if (mdev->state.disk == D_DISKLESS)
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
drbd_resume_io(mdev);
return 0; return 0;
} }
...@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1953,7 +1967,6 @@ static int drbd_nl_resume_io(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
if (test_bit(NEW_CUR_UUID, &mdev->flags)) { if (test_bit(NEW_CUR_UUID, &mdev->flags)) {
drbd_uuid_new_current(mdev); drbd_uuid_new_current(mdev);
clear_bit(NEW_CUR_UUID, &mdev->flags); clear_bit(NEW_CUR_UUID, &mdev->flags);
drbd_md_sync(mdev);
} }
drbd_suspend_io(mdev); drbd_suspend_io(mdev);
reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); reply->ret_code = drbd_request_state(mdev, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
......
...@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -158,7 +158,6 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
[WO_none] = 'n', [WO_none] = 'n',
[WO_drain_io] = 'd', [WO_drain_io] = 'd',
[WO_bdev_flush] = 'f', [WO_bdev_flush] = 'f',
[WO_bio_barrier] = 'b',
}; };
seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n", seq_printf(seq, "version: " REL_VERSION " (api:%d/proto:%d-%d)\n%s\n",
......
This diff is collapsed.
...@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m) ...@@ -258,7 +258,7 @@ void _req_may_be_done(struct drbd_request *req, struct bio_and_error *m)
if (!hlist_unhashed(&req->colision)) if (!hlist_unhashed(&req->colision))
hlist_del(&req->colision); hlist_del(&req->colision);
else else
D_ASSERT((s & RQ_NET_MASK) == 0); D_ASSERT((s & (RQ_NET_MASK & ~RQ_NET_DONE)) == 0);
/* for writes we need to do some extra housekeeping */ /* for writes we need to do some extra housekeeping */
if (rw == WRITE) if (rw == WRITE)
...@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) ...@@ -813,7 +813,8 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
mdev->state.conn >= C_CONNECTED)); mdev->state.conn >= C_CONNECTED));
if (!(local || remote) && !is_susp(mdev->state)) { if (!(local || remote) && !is_susp(mdev->state)) {
dev_err(DEV, "IO ERROR: neither local nor remote disk\n"); if (__ratelimit(&drbd_ratelimit_state))
dev_err(DEV, "IO ERROR: neither local nor remote disk\n");
goto fail_free_complete; goto fail_free_complete;
} }
...@@ -942,12 +943,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio) ...@@ -942,12 +943,21 @@ static int drbd_make_request_common(struct drbd_conf *mdev, struct bio *bio)
if (local) { if (local) {
req->private_bio->bi_bdev = mdev->ldev->backing_bdev; req->private_bio->bi_bdev = mdev->ldev->backing_bdev;
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR /* State may have changed since we grabbed our reference on the
: rw == READ ? DRBD_FAULT_DT_RD * mdev->ldev member. Double check, and short-circuit to endio.
: DRBD_FAULT_DT_RA)) * In case the last activity log transaction failed to get on
* stable storage, and this is a WRITE, we may not even submit
* this bio. */
if (get_ldev(mdev)) {
if (FAULT_ACTIVE(mdev, rw == WRITE ? DRBD_FAULT_DT_WR
: rw == READ ? DRBD_FAULT_DT_RD
: DRBD_FAULT_DT_RA))
bio_endio(req->private_bio, -EIO);
else
generic_make_request(req->private_bio);
put_ldev(mdev);
} else
bio_endio(req->private_bio, -EIO); bio_endio(req->private_bio, -EIO);
else
generic_make_request(req->private_bio);
} }
/* we need to plug ALWAYS since we possibly need to kick lo_dev. /* we need to plug ALWAYS since we possibly need to kick lo_dev.
...@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio) ...@@ -1022,20 +1032,6 @@ int drbd_make_request_26(struct request_queue *q, struct bio *bio)
return 0; return 0;
} }
/* Reject barrier requests if we know the underlying device does
* not support them.
* XXX: Need to get this info from peer as well some how so we
* XXX: reject if EITHER side/data/metadata area does not support them.
*
* because of those XXX, this is not yet enabled,
* i.e. in drbd_init_set_defaults we set the NO_BARRIER_SUPP bit.
*/
if (unlikely(bio->bi_rw & REQ_HARDBARRIER) && test_bit(NO_BARRIER_SUPP, &mdev->flags)) {
/* dev_warn(DEV, "Rejecting barrier request as underlying device does not support\n"); */
bio_endio(bio, -EOPNOTSUPP);
return 0;
}
/* /*
* what we "blindly" assume: * what we "blindly" assume:
*/ */
......
...@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local) ...@@ -102,12 +102,6 @@ void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
put_ldev(mdev); put_ldev(mdev);
} }
static int is_failed_barrier(int ee_flags)
{
return (ee_flags & (EE_IS_BARRIER|EE_WAS_ERROR|EE_RESUBMITTED))
== (EE_IS_BARRIER|EE_WAS_ERROR);
}
/* writes on behalf of the partner, or resync writes, /* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */ * "submitted" by the receiver, final stage. */
static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local) static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
...@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo ...@@ -119,21 +113,6 @@ static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(lo
int is_syncer_req; int is_syncer_req;
int do_al_complete_io; int do_al_complete_io;
/* if this is a failed barrier request, disable use of barriers,
* and schedule for resubmission */
if (is_failed_barrier(e->flags)) {
drbd_bump_write_ordering(mdev, WO_bdev_flush);
spin_lock_irqsave(&mdev->req_lock, flags);
list_del(&e->w.list);
e->flags = (e->flags & ~EE_WAS_ERROR) | EE_RESUBMITTED;
e->w.cb = w_e_reissue;
/* put_ldev actually happens below, once we come here again. */
__release(local);
spin_unlock_irqrestore(&mdev->req_lock, flags);
drbd_queue_work(&mdev->data.work, &e->w);
return;
}
D_ASSERT(e->block_id != ID_VACANT); D_ASSERT(e->block_id != ID_VACANT);
/* after we moved e to done_ee, /* after we moved e to done_ee,
...@@ -925,7 +904,7 @@ int drbd_resync_finished(struct drbd_conf *mdev) ...@@ -925,7 +904,7 @@ int drbd_resync_finished(struct drbd_conf *mdev)
drbd_md_sync(mdev); drbd_md_sync(mdev);
if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) { if (test_and_clear_bit(WRITE_BM_AFTER_RESYNC, &mdev->flags)) {
dev_warn(DEV, "Writing the whole bitmap, due to failed kmalloc\n"); dev_info(DEV, "Writing the whole bitmap\n");
drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished"); drbd_queue_bitmap_io(mdev, &drbd_bm_write, NULL, "write from resync_finished");
} }
......
...@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio) ...@@ -481,12 +481,6 @@ static int do_bio_filebacked(struct loop_device *lo, struct bio *bio)
if (bio_rw(bio) == WRITE) { if (bio_rw(bio) == WRITE) {
struct file *file = lo->lo_backing_file; struct file *file = lo->lo_backing_file;
/* REQ_HARDBARRIER is deprecated */
if (bio->bi_rw & REQ_HARDBARRIER) {
ret = -EOPNOTSUPP;
goto out;
}
if (bio->bi_rw & REQ_FLUSH) { if (bio->bi_rw & REQ_FLUSH) {
ret = vfs_fsync(file, 0); ret = vfs_fsync(file, 0);
if (unlikely(ret && ret != -EINVAL)) { if (unlikely(ret && ret != -EINVAL)) {
......
...@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req) ...@@ -289,8 +289,6 @@ static int blkif_queue_request(struct request *req)
ring_req->operation = rq_data_dir(req) ? ring_req->operation = rq_data_dir(req) ?
BLKIF_OP_WRITE : BLKIF_OP_READ; BLKIF_OP_WRITE : BLKIF_OP_READ;
if (req->cmd_flags & REQ_HARDBARRIER)
ring_req->operation = BLKIF_OP_WRITE_BARRIER;
ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg); ring_req->nr_segments = blk_rq_map_sg(req->q, req, info->sg);
BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST); BUG_ON(ring_req->nr_segments > BLKIF_MAX_SEGMENTS_PER_REQUEST);
......
...@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel) ...@@ -706,7 +706,7 @@ static struct mdk_personality *find_pers(int level, char *clevel)
/* return the offset of the super block in 512byte sectors */ /* return the offset of the super block in 512byte sectors */
static inline sector_t calc_dev_sboffset(struct block_device *bdev) static inline sector_t calc_dev_sboffset(struct block_device *bdev)
{ {
sector_t num_sectors = bdev->bd_inode->i_size / 512; sector_t num_sectors = i_size_read(bdev->bd_inode) / 512;
return MD_NEW_SIZE_SECTORS(num_sectors); return MD_NEW_SIZE_SECTORS(num_sectors);
} }
...@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -1386,7 +1386,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
*/ */
switch(minor_version) { switch(minor_version) {
case 0: case 0:
sb_start = rdev->bdev->bd_inode->i_size >> 9; sb_start = i_size_read(rdev->bdev->bd_inode) >> 9;
sb_start -= 8*2; sb_start -= 8*2;
sb_start &= ~(sector_t)(4*2-1); sb_start &= ~(sector_t)(4*2-1);
break; break;
...@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version) ...@@ -1472,7 +1472,7 @@ static int super_1_load(mdk_rdev_t *rdev, mdk_rdev_t *refdev, int minor_version)
ret = 0; ret = 0;
} }
if (minor_version) if (minor_version)
rdev->sectors = (rdev->bdev->bd_inode->i_size >> 9) - rdev->sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
le64_to_cpu(sb->data_offset); le64_to_cpu(sb->data_offset);
else else
rdev->sectors = rdev->sb_start; rdev->sectors = rdev->sb_start;
...@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) ...@@ -1680,7 +1680,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
return 0; /* component must fit device */ return 0; /* component must fit device */
if (rdev->sb_start < rdev->data_offset) { if (rdev->sb_start < rdev->data_offset) {
/* minor versions 1 and 2; superblock before data */ /* minor versions 1 and 2; superblock before data */
max_sectors = rdev->bdev->bd_inode->i_size >> 9; max_sectors = i_size_read(rdev->bdev->bd_inode) >> 9;
max_sectors -= rdev->data_offset; max_sectors -= rdev->data_offset;
if (!num_sectors || num_sectors > max_sectors) if (!num_sectors || num_sectors > max_sectors)
num_sectors = max_sectors; num_sectors = max_sectors;
...@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors) ...@@ -1690,7 +1690,7 @@ super_1_rdev_size_change(mdk_rdev_t *rdev, sector_t num_sectors)
} else { } else {
/* minor version 0; superblock after data */ /* minor version 0; superblock after data */
sector_t sb_start; sector_t sb_start;
sb_start = (rdev->bdev->bd_inode->i_size >> 9) - 8*2; sb_start = (i_size_read(rdev->bdev->bd_inode) >> 9) - 8*2;
sb_start &= ~(sector_t)(4*2 - 1); sb_start &= ~(sector_t)(4*2 - 1);
max_sectors = rdev->sectors + sb_start - rdev->sb_start; max_sectors = rdev->sectors + sb_start - rdev->sb_start;
if (!num_sectors || num_sectors > max_sectors) if (!num_sectors || num_sectors > max_sectors)
...@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len) ...@@ -2584,7 +2584,7 @@ rdev_size_store(mdk_rdev_t *rdev, const char *buf, size_t len)
if (!sectors) if (!sectors)
return -EBUSY; return -EBUSY;
} else if (!sectors) } else if (!sectors)
sectors = (rdev->bdev->bd_inode->i_size >> 9) - sectors = (i_size_read(rdev->bdev->bd_inode) >> 9) -
rdev->data_offset; rdev->data_offset;
} }
if (sectors < my_mddev->dev_sectors) if (sectors < my_mddev->dev_sectors)
...@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi ...@@ -2797,7 +2797,7 @@ static mdk_rdev_t *md_import_device(dev_t newdev, int super_format, int super_mi
kobject_init(&rdev->kobj, &rdev_ktype); kobject_init(&rdev->kobj, &rdev_ktype);
size = rdev->bdev->bd_inode->i_size >> BLOCK_SIZE_BITS; size = i_size_read(rdev->bdev->bd_inode) >> BLOCK_SIZE_BITS;
if (!size) { if (!size) {
printk(KERN_WARNING printk(KERN_WARNING
"md: %s has zero or unknown size, marking faulty!\n", "md: %s has zero or unknown size, marking faulty!\n",
...@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info) ...@@ -5235,8 +5235,8 @@ static int add_new_disk(mddev_t * mddev, mdu_disk_info_t *info)
if (!mddev->persistent) { if (!mddev->persistent) {
printk(KERN_INFO "md: nonpersistent superblock ...\n"); printk(KERN_INFO "md: nonpersistent superblock ...\n");
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
} else } else
rdev->sb_start = calc_dev_sboffset(rdev->bdev); rdev->sb_start = calc_dev_sboffset(rdev->bdev);
rdev->sectors = rdev->sb_start; rdev->sectors = rdev->sb_start;
...@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev) ...@@ -5306,7 +5306,7 @@ static int hot_add_disk(mddev_t * mddev, dev_t dev)
if (mddev->persistent) if (mddev->persistent)
rdev->sb_start = calc_dev_sboffset(rdev->bdev); rdev->sb_start = calc_dev_sboffset(rdev->bdev);
else else
rdev->sb_start = rdev->bdev->bd_inode->i_size / 512; rdev->sb_start = i_size_read(rdev->bdev->bd_inode) / 512;
rdev->sectors = rdev->sb_start; rdev->sectors = rdev->sb_start;
......
...@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd) ...@@ -320,19 +320,11 @@ static int scsi_check_sense(struct scsi_cmnd *scmd)
"changed. The Linux SCSI layer does not " "changed. The Linux SCSI layer does not "
"automatically adjust these parameters.\n"); "automatically adjust these parameters.\n");
if (scmd->request->cmd_flags & REQ_HARDBARRIER) /*
/* * Pass the UA upwards for a determination in the completion
* barrier requests should always retry on UA * functions.
* otherwise block will get a spurious error */
*/ return SUCCESS;
return NEEDS_RETRY;
else
/*
* for normal (non barrier) commands, pass the
* UA upwards for a determination in the
* completion functions
*/
return SUCCESS;
/* these three are not supported */ /* these three are not supported */
case COPY_ABORTED: case COPY_ABORTED:
......
...@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp, ...@@ -331,10 +331,7 @@ static struct urb *uas_alloc_cmd_urb(struct uas_dev_info *devinfo, gfp_t gfp,
iu->iu_id = IU_ID_COMMAND; iu->iu_id = IU_ID_COMMAND;
iu->tag = cpu_to_be16(stream_id); iu->tag = cpu_to_be16(stream_id);
if (sdev->ordered_tags && (cmnd->request->cmd_flags & REQ_HARDBARRIER)) iu->prio_attr = UAS_SIMPLE_TAG;
iu->prio_attr = UAS_ORDERED_TAG;
else
iu->prio_attr = UAS_SIMPLE_TAG;
iu->len = len; iu->len = len;
int_to_scsilun(sdev->lun, &iu->lun); int_to_scsilun(sdev->lun, &iu->lun);
memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len); memcpy(iu->cdb, cmnd->cmnd, cmnd->cmd_len);
......
...@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs) ...@@ -370,6 +370,9 @@ struct bio *bio_kmalloc(gfp_t gfp_mask, int nr_iovecs)
{ {
struct bio *bio; struct bio *bio;
if (nr_iovecs > UIO_MAXIOV)
return NULL;
bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec), bio = kmalloc(sizeof(struct bio) + nr_iovecs * sizeof(struct bio_vec),
gfp_mask); gfp_mask);
if (unlikely(!bio)) if (unlikely(!bio))
...@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd) ...@@ -697,8 +700,12 @@ static void bio_free_map_data(struct bio_map_data *bmd)
static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count, static struct bio_map_data *bio_alloc_map_data(int nr_segs, int iov_count,
gfp_t gfp_mask) gfp_t gfp_mask)
{ {
struct bio_map_data *bmd = kmalloc(sizeof(*bmd), gfp_mask); struct bio_map_data *bmd;
if (iov_count > UIO_MAXIOV)
return NULL;
bmd = kmalloc(sizeof(*bmd), gfp_mask);
if (!bmd) if (!bmd)
return NULL; return NULL;
...@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q, ...@@ -827,6 +834,12 @@ struct bio *bio_copy_user_iov(struct request_queue *q,
end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT; end = (uaddr + iov[i].iov_len + PAGE_SIZE - 1) >> PAGE_SHIFT;
start = uaddr >> PAGE_SHIFT; start = uaddr >> PAGE_SHIFT;
/*
* Overflow, abort
*/
if (end < start)
return ERR_PTR(-EINVAL);
nr_pages += end - start; nr_pages += end - start;
len += iov[i].iov_len; len += iov[i].iov_len;
} }
...@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, ...@@ -955,6 +968,12 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT; unsigned long end = (uaddr + len + PAGE_SIZE - 1) >> PAGE_SHIFT;
unsigned long start = uaddr >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT;
/*
* Overflow, abort
*/
if (end < start)
return ERR_PTR(-EINVAL);
nr_pages += end - start; nr_pages += end - start;
/* /*
* buffer must be aligned to at least hardsector size for now * buffer must be aligned to at least hardsector size for now
...@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q, ...@@ -982,7 +1001,7 @@ static struct bio *__bio_map_user_iov(struct request_queue *q,
unsigned long start = uaddr >> PAGE_SHIFT; unsigned long start = uaddr >> PAGE_SHIFT;
const int local_nr_pages = end - start; const int local_nr_pages = end - start;
const int page_limit = cur_page + local_nr_pages; const int page_limit = cur_page + local_nr_pages;
ret = get_user_pages_fast(uaddr, local_nr_pages, ret = get_user_pages_fast(uaddr, local_nr_pages,
write_to_vm, &pages[cur_page]); write_to_vm, &pages[cur_page]);
if (ret < local_nr_pages) { if (ret < local_nr_pages) {
......
...@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ...@@ -111,12 +111,14 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
switch (which) { switch (which) {
case IOPRIO_WHO_PROCESS: case IOPRIO_WHO_PROCESS:
rcu_read_lock();
if (!who) if (!who)
p = current; p = current;
else else
p = find_task_by_vpid(who); p = find_task_by_vpid(who);
if (p) if (p)
ret = set_task_ioprio(p, ioprio); ret = set_task_ioprio(p, ioprio);
rcu_read_unlock();
break; break;
case IOPRIO_WHO_PGRP: case IOPRIO_WHO_PGRP:
if (!who) if (!who)
...@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio) ...@@ -139,7 +141,12 @@ SYSCALL_DEFINE3(ioprio_set, int, which, int, who, int, ioprio)
break; break;
do_each_thread(g, p) { do_each_thread(g, p) {
if (__task_cred(p)->uid != who) int match;
rcu_read_lock();
match = __task_cred(p)->uid == who;
rcu_read_unlock();
if (!match)
continue; continue;
ret = set_task_ioprio(p, ioprio); ret = set_task_ioprio(p, ioprio);
if (ret) if (ret)
...@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ...@@ -200,12 +207,14 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
read_lock(&tasklist_lock); read_lock(&tasklist_lock);
switch (which) { switch (which) {
case IOPRIO_WHO_PROCESS: case IOPRIO_WHO_PROCESS:
rcu_read_lock();
if (!who) if (!who)
p = current; p = current;
else else
p = find_task_by_vpid(who); p = find_task_by_vpid(who);
if (p) if (p)
ret = get_task_ioprio(p); ret = get_task_ioprio(p);
rcu_read_unlock();
break; break;
case IOPRIO_WHO_PGRP: case IOPRIO_WHO_PGRP:
if (!who) if (!who)
...@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who) ...@@ -232,7 +241,12 @@ SYSCALL_DEFINE2(ioprio_get, int, which, int, who)
break; break;
do_each_thread(g, p) { do_each_thread(g, p) {
if (__task_cred(p)->uid != user->uid) int match;
rcu_read_lock();
match = __task_cred(p)->uid == user->uid;
rcu_read_unlock();
if (!match)
continue; continue;
tmpio = get_task_ioprio(p); tmpio = get_task_ioprio(p);
if (tmpio < 0) if (tmpio < 0)
......
...@@ -66,10 +66,6 @@ ...@@ -66,10 +66,6 @@
#define bio_offset(bio) bio_iovec((bio))->bv_offset #define bio_offset(bio) bio_iovec((bio))->bv_offset
#define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx) #define bio_segments(bio) ((bio)->bi_vcnt - (bio)->bi_idx)
#define bio_sectors(bio) ((bio)->bi_size >> 9) #define bio_sectors(bio) ((bio)->bi_size >> 9)
#define bio_empty_barrier(bio) \
((bio->bi_rw & REQ_HARDBARRIER) && \
!bio_has_data(bio) && \
!(bio->bi_rw & REQ_DISCARD))
static inline unsigned int bio_cur_bytes(struct bio *bio) static inline unsigned int bio_cur_bytes(struct bio *bio)
{ {
......
...@@ -122,7 +122,6 @@ enum rq_flag_bits { ...@@ -122,7 +122,6 @@ enum rq_flag_bits {
__REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */ __REQ_FAILFAST_TRANSPORT, /* no driver retries of transport errors */
__REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */ __REQ_FAILFAST_DRIVER, /* no driver retries of driver errors */
__REQ_HARDBARRIER, /* may not be passed by drive either */
__REQ_SYNC, /* request is sync (sync write or read) */ __REQ_SYNC, /* request is sync (sync write or read) */
__REQ_META, /* metadata io request */ __REQ_META, /* metadata io request */
__REQ_DISCARD, /* request to discard sectors */ __REQ_DISCARD, /* request to discard sectors */
...@@ -159,7 +158,6 @@ enum rq_flag_bits { ...@@ -159,7 +158,6 @@ enum rq_flag_bits {
#define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV) #define REQ_FAILFAST_DEV (1 << __REQ_FAILFAST_DEV)
#define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT) #define REQ_FAILFAST_TRANSPORT (1 << __REQ_FAILFAST_TRANSPORT)
#define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER) #define REQ_FAILFAST_DRIVER (1 << __REQ_FAILFAST_DRIVER)
#define REQ_HARDBARRIER (1 << __REQ_HARDBARRIER)
#define REQ_SYNC (1 << __REQ_SYNC) #define REQ_SYNC (1 << __REQ_SYNC)
#define REQ_META (1 << __REQ_META) #define REQ_META (1 << __REQ_META)
#define REQ_DISCARD (1 << __REQ_DISCARD) #define REQ_DISCARD (1 << __REQ_DISCARD)
...@@ -168,8 +166,8 @@ enum rq_flag_bits { ...@@ -168,8 +166,8 @@ enum rq_flag_bits {
#define REQ_FAILFAST_MASK \ #define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER) (REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
#define REQ_COMMON_MASK \ #define REQ_COMMON_MASK \
(REQ_WRITE | REQ_FAILFAST_MASK | REQ_HARDBARRIER | REQ_SYNC | \ (REQ_WRITE | REQ_FAILFAST_MASK | REQ_SYNC | REQ_META | REQ_DISCARD | \
REQ_META | REQ_DISCARD | REQ_NOIDLE | REQ_FLUSH | REQ_FUA) REQ_NOIDLE | REQ_FLUSH | REQ_FUA)
#define REQ_CLONE_MASK REQ_COMMON_MASK #define REQ_CLONE_MASK REQ_COMMON_MASK
#define REQ_UNPLUG (1 << __REQ_UNPLUG) #define REQ_UNPLUG (1 << __REQ_UNPLUG)
......
...@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync) ...@@ -552,8 +552,7 @@ static inline void blk_clear_queue_full(struct request_queue *q, int sync)
* it already be started by driver. * it already be started by driver.
*/ */
#define RQ_NOMERGE_FLAGS \ #define RQ_NOMERGE_FLAGS \
(REQ_NOMERGE | REQ_STARTED | REQ_HARDBARRIER | REQ_SOFTBARRIER | \ (REQ_NOMERGE | REQ_STARTED | REQ_SOFTBARRIER | REQ_FLUSH | REQ_FUA)
REQ_FLUSH | REQ_FUA)
#define rq_mergeable(rq) \ #define rq_mergeable(rq) \
(!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \ (!((rq)->cmd_flags & RQ_NOMERGE_FLAGS) && \
(((rq)->cmd_flags & REQ_DISCARD) || \ (((rq)->cmd_flags & REQ_DISCARD) || \
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void); extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.9rc2" #define REL_VERSION "8.3.9"
#define API_VERSION 88 #define API_VERSION 88
#define PRO_VERSION_MIN 86 #define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 95 #define PRO_VERSION_MAX 95
......
...@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc); ...@@ -76,7 +76,6 @@ int put_io_context(struct io_context *ioc);
void exit_io_context(struct task_struct *task); void exit_io_context(struct task_struct *task);
struct io_context *get_io_context(gfp_t gfp_flags, int node); struct io_context *get_io_context(gfp_t gfp_flags, int node);
struct io_context *alloc_io_context(gfp_t gfp_flags, int node); struct io_context *alloc_io_context(gfp_t gfp_flags, int node);
void copy_io_context(struct io_context **pdst, struct io_context **psrc);
#else #else
static inline void exit_io_context(struct task_struct *task) static inline void exit_io_context(struct task_struct *task)
{ {
......
...@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector, ...@@ -168,7 +168,6 @@ static int act_log_check(struct blk_trace *bt, u32 what, sector_t sector,
static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ), static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
BLK_TC_ACT(BLK_TC_WRITE) }; BLK_TC_ACT(BLK_TC_WRITE) };
#define BLK_TC_HARDBARRIER BLK_TC_BARRIER
#define BLK_TC_RAHEAD BLK_TC_AHEAD #define BLK_TC_RAHEAD BLK_TC_AHEAD
/* The ilog2() calls fall out because they're constant */ /* The ilog2() calls fall out because they're constant */
...@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes, ...@@ -196,7 +195,6 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
return; return;
what |= ddir_act[rw & WRITE]; what |= ddir_act[rw & WRITE];
what |= MASK_TC_BIT(rw, HARDBARRIER);
what |= MASK_TC_BIT(rw, SYNC); what |= MASK_TC_BIT(rw, SYNC);
what |= MASK_TC_BIT(rw, RAHEAD); what |= MASK_TC_BIT(rw, RAHEAD);
what |= MASK_TC_BIT(rw, META); what |= MASK_TC_BIT(rw, META);
...@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes) ...@@ -1807,8 +1805,6 @@ void blk_fill_rwbs(char *rwbs, u32 rw, int bytes)
if (rw & REQ_RAHEAD) if (rw & REQ_RAHEAD)
rwbs[i++] = 'A'; rwbs[i++] = 'A';
if (rw & REQ_HARDBARRIER)
rwbs[i++] = 'B';
if (rw & REQ_SYNC) if (rw & REQ_SYNC)
rwbs[i++] = 'S'; rwbs[i++] = 'S';
if (rw & REQ_META) if (rw & REQ_META)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment