Commit 456eac94 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.dk/linux-2.6-block

* 'for-linus' of git://git.kernel.dk/linux-2.6-block:
  fs/bio.c: fix shadows sparse warning
  drbd: The kernel code is now equivalent to out of tree release 8.3.7
  drbd: Allow online resizing of DRBD devices while peer not reachable (needs to be explicitly forced)
  drbd: Don't go into StandAlone mode when authentification failes because of network error
  drivers/block/drbd/drbd_receiver.c: correct NULL test
  cfq-iosched: Respect ioprio_class when preempting
  genhd: overlapping variable definition
  block: removed unused as_io_context
  DM: Fix device mapper topology stacking
  block: bdev_stack_limits wrapper
  block: Fix discard alignment calculation and printing
  block: Correct handling of bottom device misaligment
  drbd: check on CONFIG_LBDAF, not LBD
  drivers/block/drbd: Correct NULL test
  drbd: Silenced an assert that could triggered after changing write ordering method
  drbd: Kconfig fix
  drbd: Fix for a race between IO and a detach operation [Bugz 262]
  drbd: Use drbd_crypto_is_hash() instead of an open coded check
parents dedd0c2a f06f135d
...@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc) ...@@ -39,8 +39,6 @@ int put_io_context(struct io_context *ioc)
if (atomic_long_dec_and_test(&ioc->refcount)) { if (atomic_long_dec_and_test(&ioc->refcount)) {
rcu_read_lock(); rcu_read_lock();
if (ioc->aic && ioc->aic->dtor)
ioc->aic->dtor(ioc->aic);
cfq_dtor(ioc); cfq_dtor(ioc);
rcu_read_unlock(); rcu_read_unlock();
...@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task) ...@@ -76,8 +74,6 @@ void exit_io_context(struct task_struct *task)
task_unlock(task); task_unlock(task);
if (atomic_dec_and_test(&ioc->nr_tasks)) { if (atomic_dec_and_test(&ioc->nr_tasks)) {
if (ioc->aic && ioc->aic->exit)
ioc->aic->exit(ioc->aic);
cfq_exit(ioc); cfq_exit(ioc);
} }
...@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node) ...@@ -97,7 +93,6 @@ struct io_context *alloc_io_context(gfp_t gfp_flags, int node)
ret->ioprio = 0; ret->ioprio = 0;
ret->last_waited = jiffies; /* doesn't matter... */ ret->last_waited = jiffies; /* doesn't matter... */
ret->nr_batch_requests = 0; /* because this is 0 */ ret->nr_batch_requests = 0; /* because this is 0 */
ret->aic = NULL;
INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH); INIT_RADIX_TREE(&ret->radix_root, GFP_ATOMIC | __GFP_HIGH);
INIT_HLIST_HEAD(&ret->cic_list); INIT_HLIST_HEAD(&ret->cic_list);
ret->ioc_data = NULL; ret->ioc_data = NULL;
......
...@@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -528,7 +528,7 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset) sector_t offset)
{ {
sector_t alignment; sector_t alignment;
unsigned int top, bottom; unsigned int top, bottom, ret = 0;
t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors); t->max_sectors = min_not_zero(t->max_sectors, b->max_sectors);
t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors); t->max_hw_sectors = min_not_zero(t->max_hw_sectors, b->max_hw_sectors);
...@@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -546,6 +546,8 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
t->max_segment_size = min_not_zero(t->max_segment_size, t->max_segment_size = min_not_zero(t->max_segment_size,
b->max_segment_size); b->max_segment_size);
t->misaligned |= b->misaligned;
alignment = queue_limit_alignment_offset(b, offset); alignment = queue_limit_alignment_offset(b, offset);
/* Bottom device has different alignment. Check that it is /* Bottom device has different alignment. Check that it is
...@@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -558,8 +560,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
bottom = max(b->physical_block_size, b->io_min) + alignment; bottom = max(b->physical_block_size, b->io_min) + alignment;
/* Verify that top and bottom intervals line up */ /* Verify that top and bottom intervals line up */
if (max(top, bottom) & (min(top, bottom) - 1)) if (max(top, bottom) & (min(top, bottom) - 1)) {
t->misaligned = 1; t->misaligned = 1;
ret = -1;
}
} }
t->logical_block_size = max(t->logical_block_size, t->logical_block_size = max(t->logical_block_size,
...@@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -578,18 +582,21 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
if (t->physical_block_size & (t->logical_block_size - 1)) { if (t->physical_block_size & (t->logical_block_size - 1)) {
t->physical_block_size = t->logical_block_size; t->physical_block_size = t->logical_block_size;
t->misaligned = 1; t->misaligned = 1;
ret = -1;
} }
/* Minimum I/O a multiple of the physical block size? */ /* Minimum I/O a multiple of the physical block size? */
if (t->io_min & (t->physical_block_size - 1)) { if (t->io_min & (t->physical_block_size - 1)) {
t->io_min = t->physical_block_size; t->io_min = t->physical_block_size;
t->misaligned = 1; t->misaligned = 1;
ret = -1;
} }
/* Optimal I/O a multiple of the physical block size? */ /* Optimal I/O a multiple of the physical block size? */
if (t->io_opt & (t->physical_block_size - 1)) { if (t->io_opt & (t->physical_block_size - 1)) {
t->io_opt = 0; t->io_opt = 0;
t->misaligned = 1; t->misaligned = 1;
ret = -1;
} }
/* Find lowest common alignment_offset */ /* Find lowest common alignment_offset */
...@@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -597,8 +604,10 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
& (max(t->physical_block_size, t->io_min) - 1); & (max(t->physical_block_size, t->io_min) - 1);
/* Verify that new alignment_offset is on a logical block boundary */ /* Verify that new alignment_offset is on a logical block boundary */
if (t->alignment_offset & (t->logical_block_size - 1)) if (t->alignment_offset & (t->logical_block_size - 1)) {
t->misaligned = 1; t->misaligned = 1;
ret = -1;
}
/* Discard alignment and granularity */ /* Discard alignment and granularity */
if (b->discard_granularity) { if (b->discard_granularity) {
...@@ -626,10 +635,32 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, ...@@ -626,10 +635,32 @@ int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
(t->discard_granularity - 1); (t->discard_granularity - 1);
} }
return t->misaligned ? -1 : 0; return ret;
} }
EXPORT_SYMBOL(blk_stack_limits); EXPORT_SYMBOL(blk_stack_limits);
/**
* bdev_stack_limits - adjust queue limits for stacked drivers
* @t: the stacking driver limits (top device)
* @bdev: the component block_device (bottom)
* @start: first data sector within component device
*
* Description:
* Merges queue limits for a top device and a block_device. Returns
* 0 if alignment didn't change. Returns -1 if adding the bottom
* device caused misalignment.
*/
int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t start)
{
struct request_queue *bq = bdev_get_queue(bdev);
start += get_start_sect(bdev);
return blk_stack_limits(t, &bq->limits, start << 9);
}
EXPORT_SYMBOL(bdev_stack_limits);
/** /**
* disk_stack_limits - adjust queue limits for stacked drivers * disk_stack_limits - adjust queue limits for stacked drivers
* @disk: MD/DM gendisk (top) * @disk: MD/DM gendisk (top)
......
...@@ -3076,6 +3076,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq, ...@@ -3076,6 +3076,12 @@ cfq_should_preempt(struct cfq_data *cfqd, struct cfq_queue *new_cfqq,
if (cfq_class_idle(cfqq)) if (cfq_class_idle(cfqq))
return true; return true;
/*
* Don't allow a non-RT request to preempt an ongoing RT cfqq timeslice.
*/
if (cfq_class_rt(cfqq) && !cfq_class_rt(new_cfqq))
return false;
/* /*
* if the new request is sync, but the currently running queue is * if the new request is sync, but the currently running queue is
* not, let the sync request have priority. * not, let the sync request have priority.
......
...@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev, ...@@ -867,7 +867,7 @@ static ssize_t disk_discard_alignment_show(struct device *dev,
{ {
struct gendisk *disk = dev_to_disk(dev); struct gendisk *disk = dev_to_disk(dev);
return sprintf(buf, "%u\n", queue_discard_alignment(disk->queue)); return sprintf(buf, "%d\n", queue_discard_alignment(disk->queue));
} }
static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL); static DEVICE_ATTR(range, S_IRUGO, disk_range_show, NULL);
......
...@@ -3,7 +3,7 @@ ...@@ -3,7 +3,7 @@
# #
comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected" comment "DRBD disabled because PROC_FS, INET or CONNECTOR not selected"
depends on !PROC_FS || !INET || !CONNECTOR depends on PROC_FS='n' || INET='n' || CONNECTOR='n'
config BLK_DEV_DRBD config BLK_DEV_DRBD
tristate "DRBD Distributed Replicated Block Device support" tristate "DRBD Distributed Replicated Block Device support"
......
...@@ -1275,7 +1275,7 @@ struct bm_extent { ...@@ -1275,7 +1275,7 @@ struct bm_extent {
#if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32 #if DRBD_MAX_SECTORS_BM < DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_BM
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_BM
#elif !defined(CONFIG_LBD) && BITS_PER_LONG == 32 #elif !defined(CONFIG_LBDAF) && BITS_PER_LONG == 32
#define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32 #define DRBD_MAX_SECTORS DRBD_MAX_SECTORS_32
#define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32 #define DRBD_MAX_SECTORS_FLEX DRBD_MAX_SECTORS_32
#else #else
...@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t); ...@@ -1371,10 +1371,9 @@ extern int is_valid_ar_handle(struct drbd_request *, sector_t);
extern void drbd_suspend_io(struct drbd_conf *mdev); extern void drbd_suspend_io(struct drbd_conf *mdev);
extern void drbd_resume_io(struct drbd_conf *mdev); extern void drbd_resume_io(struct drbd_conf *mdev);
extern char *ppsize(char *buf, unsigned long long size); extern char *ppsize(char *buf, unsigned long long size);
extern sector_t drbd_new_dev_size(struct drbd_conf *, extern sector_t drbd_new_dev_size(struct drbd_conf *, struct drbd_backing_dev *, int);
struct drbd_backing_dev *);
enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 }; enum determine_dev_size { dev_size_error = -1, unchanged = 0, shrunk = 1, grew = 2 };
extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *) __must_hold(local); extern enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *, int force) __must_hold(local);
extern void resync_after_online_grow(struct drbd_conf *); extern void resync_after_online_grow(struct drbd_conf *);
extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local); extern void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int) __must_hold(local);
extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role, extern int drbd_set_role(struct drbd_conf *mdev, enum drbd_role new_role,
......
...@@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os, ...@@ -1298,6 +1298,7 @@ static void after_state_ch(struct drbd_conf *mdev, union drbd_state os,
dev_err(DEV, "Sending state in drbd_io_error() failed\n"); dev_err(DEV, "Sending state in drbd_io_error() failed\n");
} }
wait_event(mdev->misc_wait, !atomic_read(&mdev->local_cnt));
lc_destroy(mdev->resync); lc_destroy(mdev->resync);
mdev->resync = NULL; mdev->resync = NULL;
lc_destroy(mdev->act_log); lc_destroy(mdev->act_log);
......
...@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev) ...@@ -510,7 +510,7 @@ void drbd_resume_io(struct drbd_conf *mdev)
* Returns 0 on success, negative return values indicate errors. * Returns 0 on success, negative return values indicate errors.
* You should call drbd_md_sync() after calling this function. * You should call drbd_md_sync() after calling this function.
*/ */
enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_hold(local) enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev, int force) __must_hold(local)
{ {
sector_t prev_first_sect, prev_size; /* previous meta location */ sector_t prev_first_sect, prev_size; /* previous meta location */
sector_t la_size; sector_t la_size;
...@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho ...@@ -541,7 +541,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
/* TODO: should only be some assert here, not (re)init... */ /* TODO: should only be some assert here, not (re)init... */
drbd_md_set_sector_offsets(mdev, mdev->ldev); drbd_md_set_sector_offsets(mdev, mdev->ldev);
size = drbd_new_dev_size(mdev, mdev->ldev); size = drbd_new_dev_size(mdev, mdev->ldev, force);
if (drbd_get_capacity(mdev->this_bdev) != size || if (drbd_get_capacity(mdev->this_bdev) != size ||
drbd_bm_capacity(mdev) != size) { drbd_bm_capacity(mdev) != size) {
...@@ -596,7 +596,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho ...@@ -596,7 +596,7 @@ enum determine_dev_size drbd_determin_dev_size(struct drbd_conf *mdev) __must_ho
} }
sector_t sector_t
drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev, int assume_peer_has_space)
{ {
sector_t p_size = mdev->p_size; /* partner's disk size. */ sector_t p_size = mdev->p_size; /* partner's disk size. */
sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */ sector_t la_size = bdev->md.la_size_sect; /* last agreed size. */
...@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) ...@@ -606,6 +606,11 @@ drbd_new_dev_size(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
m_size = drbd_get_max_capacity(bdev); m_size = drbd_get_max_capacity(bdev);
if (mdev->state.conn < C_CONNECTED && assume_peer_has_space) {
dev_warn(DEV, "Resize while not connected was forced by the user!\n");
p_size = m_size;
}
if (p_size && m_size) { if (p_size && m_size) {
size = min_t(sector_t, p_size, m_size); size = min_t(sector_t, p_size, m_size);
} else { } else {
...@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -965,7 +970,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
/* Prevent shrinking of consistent devices ! */ /* Prevent shrinking of consistent devices ! */
if (drbd_md_test_flag(nbc, MDF_CONSISTENT) && if (drbd_md_test_flag(nbc, MDF_CONSISTENT) &&
drbd_new_dev_size(mdev, nbc) < nbc->md.la_size_sect) { drbd_new_dev_size(mdev, nbc, 0) < nbc->md.la_size_sect) {
dev_warn(DEV, "refusing to truncate a consistent device\n"); dev_warn(DEV, "refusing to truncate a consistent device\n");
retcode = ERR_DISK_TO_SMALL; retcode = ERR_DISK_TO_SMALL;
goto force_diskless_dec; goto force_diskless_dec;
...@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp ...@@ -1052,7 +1057,7 @@ static int drbd_nl_disk_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp
!drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND)) !drbd_md_test_flag(mdev->ldev, MDF_CONNECTED_IND))
set_bit(USE_DEGR_WFC_T, &mdev->flags); set_bit(USE_DEGR_WFC_T, &mdev->flags);
dd = drbd_determin_dev_size(mdev); dd = drbd_determin_dev_size(mdev, 0);
if (dd == dev_size_error) { if (dd == dev_size_error) {
retcode = ERR_NOMEM_BITMAP; retcode = ERR_NOMEM_BITMAP;
goto force_diskless_dec; goto force_diskless_dec;
...@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1271,7 +1276,7 @@ static int drbd_nl_net_conf(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
goto fail; goto fail;
} }
if (crypto_tfm_alg_type(crypto_hash_tfm(tfm)) != CRYPTO_ALG_TYPE_SHASH) { if (!drbd_crypto_is_hash(crypto_hash_tfm(tfm))) {
retcode = ERR_AUTH_ALG_ND; retcode = ERR_AUTH_ALG_ND;
goto fail; goto fail;
} }
...@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp, ...@@ -1504,7 +1509,7 @@ static int drbd_nl_resize(struct drbd_conf *mdev, struct drbd_nl_cfg_req *nlp,
} }
mdev->ldev->dc.disk_size = (sector_t)rs.resize_size; mdev->ldev->dc.disk_size = (sector_t)rs.resize_size;
dd = drbd_determin_dev_size(mdev); dd = drbd_determin_dev_size(mdev, rs.resize_force);
drbd_md_sync(mdev); drbd_md_sync(mdev);
put_ldev(mdev); put_ldev(mdev);
if (dd == dev_size_error) { if (dd == dev_size_error) {
......
...@@ -878,9 +878,13 @@ static int drbd_connect(struct drbd_conf *mdev) ...@@ -878,9 +878,13 @@ static int drbd_connect(struct drbd_conf *mdev)
if (mdev->cram_hmac_tfm) { if (mdev->cram_hmac_tfm) {
/* drbd_request_state(mdev, NS(conn, WFAuth)); */ /* drbd_request_state(mdev, NS(conn, WFAuth)); */
if (!drbd_do_auth(mdev)) { switch (drbd_do_auth(mdev)) {
case -1:
dev_err(DEV, "Authentication of peer failed\n"); dev_err(DEV, "Authentication of peer failed\n");
return -1; return -1;
case 0:
dev_err(DEV, "Authentication of peer failed, trying again.\n");
return 0;
} }
} }
...@@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h) ...@@ -1201,10 +1205,11 @@ static int receive_Barrier(struct drbd_conf *mdev, struct p_header *h)
case WO_bdev_flush: case WO_bdev_flush:
case WO_drain_io: case WO_drain_io:
D_ASSERT(rv == FE_STILL_LIVE); if (rv == FE_STILL_LIVE) {
set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags); set_bit(DE_BARRIER_IN_NEXT_EPOCH_ISSUED, &mdev->current_epoch->flags);
drbd_wait_ee_list_empty(mdev, &mdev->active_ee); drbd_wait_ee_list_empty(mdev, &mdev->active_ee);
rv = drbd_flush_after_epoch(mdev, mdev->current_epoch); rv = drbd_flush_after_epoch(mdev, mdev->current_epoch);
}
if (rv == FE_RECYCLED) if (rv == FE_RECYCLED)
return TRUE; return TRUE;
...@@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) ...@@ -2865,7 +2870,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
/* Never shrink a device with usable data during connect. /* Never shrink a device with usable data during connect.
But allow online shrinking if we are connected. */ But allow online shrinking if we are connected. */
if (drbd_new_dev_size(mdev, mdev->ldev) < if (drbd_new_dev_size(mdev, mdev->ldev, 0) <
drbd_get_capacity(mdev->this_bdev) && drbd_get_capacity(mdev->this_bdev) &&
mdev->state.disk >= D_OUTDATED && mdev->state.disk >= D_OUTDATED &&
mdev->state.conn < C_CONNECTED) { mdev->state.conn < C_CONNECTED) {
...@@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h) ...@@ -2880,7 +2885,7 @@ static int receive_sizes(struct drbd_conf *mdev, struct p_header *h)
#undef min_not_zero #undef min_not_zero
if (get_ldev(mdev)) { if (get_ldev(mdev)) {
dd = drbd_determin_dev_size(mdev); dd = drbd_determin_dev_size(mdev, 0);
put_ldev(mdev); put_ldev(mdev);
if (dd == dev_size_error) if (dd == dev_size_error)
return FALSE; return FALSE;
...@@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3830,10 +3835,17 @@ static int drbd_do_auth(struct drbd_conf *mdev)
{ {
dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n"); dev_err(DEV, "This kernel was build without CONFIG_CRYPTO_HMAC.\n");
dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n"); dev_err(DEV, "You need to disable 'cram-hmac-alg' in drbd.conf.\n");
return 0; return -1;
} }
#else #else
#define CHALLENGE_LEN 64 #define CHALLENGE_LEN 64
/* Return value:
1 - auth succeeded,
0 - failed, try again (network error),
-1 - auth failed, don't try again.
*/
static int drbd_do_auth(struct drbd_conf *mdev) static int drbd_do_auth(struct drbd_conf *mdev)
{ {
char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */ char my_challenge[CHALLENGE_LEN]; /* 64 Bytes... */
...@@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3854,7 +3866,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
(u8 *)mdev->net_conf->shared_secret, key_len); (u8 *)mdev->net_conf->shared_secret, key_len);
if (rv) { if (rv) {
dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv); dev_err(DEV, "crypto_hash_setkey() failed with %d\n", rv);
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3877,14 +3889,14 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (p.length > CHALLENGE_LEN*2) { if (p.length > CHALLENGE_LEN*2) {
dev_err(DEV, "expected AuthChallenge payload too big.\n"); dev_err(DEV, "expected AuthChallenge payload too big.\n");
rv = 0; rv = -1;
goto fail; goto fail;
} }
peers_ch = kmalloc(p.length, GFP_NOIO); peers_ch = kmalloc(p.length, GFP_NOIO);
if (peers_ch == NULL) { if (peers_ch == NULL) {
dev_err(DEV, "kmalloc of peers_ch failed\n"); dev_err(DEV, "kmalloc of peers_ch failed\n");
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3900,7 +3912,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
response = kmalloc(resp_size, GFP_NOIO); response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) { if (response == NULL) {
dev_err(DEV, "kmalloc of response failed\n"); dev_err(DEV, "kmalloc of response failed\n");
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3910,7 +3922,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, response); rv = crypto_hash_digest(&desc, &sg, sg.length, response);
if (rv) { if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3944,9 +3956,9 @@ static int drbd_do_auth(struct drbd_conf *mdev)
} }
right_response = kmalloc(resp_size, GFP_NOIO); right_response = kmalloc(resp_size, GFP_NOIO);
if (response == NULL) { if (right_response == NULL) {
dev_err(DEV, "kmalloc of right_response failed\n"); dev_err(DEV, "kmalloc of right_response failed\n");
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3955,7 +3967,7 @@ static int drbd_do_auth(struct drbd_conf *mdev)
rv = crypto_hash_digest(&desc, &sg, sg.length, right_response); rv = crypto_hash_digest(&desc, &sg, sg.length, right_response);
if (rv) { if (rv) {
dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv); dev_err(DEV, "crypto_hash_digest() failed with %d\n", rv);
rv = 0; rv = -1;
goto fail; goto fail;
} }
...@@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev) ...@@ -3964,6 +3976,8 @@ static int drbd_do_auth(struct drbd_conf *mdev)
if (rv) if (rv)
dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n", dev_info(DEV, "Peer authenticated using %d bytes of '%s' HMAC\n",
resp_size, mdev->net_conf->cram_hmac_alg); resp_size, mdev->net_conf->cram_hmac_alg);
else
rv = -1;
fail: fail:
kfree(peers_ch); kfree(peers_ch);
......
...@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev, ...@@ -503,16 +503,15 @@ int dm_set_device_limits(struct dm_target *ti, struct dm_dev *dev,
return 0; return 0;
} }
if (blk_stack_limits(limits, &q->limits, start << 9) < 0) if (bdev_stack_limits(limits, bdev, start) < 0)
DMWARN("%s: target device %s is misaligned: " DMWARN("%s: adding target device %s caused an alignment inconsistency: "
"physical_block_size=%u, logical_block_size=%u, " "physical_block_size=%u, logical_block_size=%u, "
"alignment_offset=%u, start=%llu", "alignment_offset=%u, start=%llu",
dm_device_name(ti->table->md), bdevname(bdev, b), dm_device_name(ti->table->md), bdevname(bdev, b),
q->limits.physical_block_size, q->limits.physical_block_size,
q->limits.logical_block_size, q->limits.logical_block_size,
q->limits.alignment_offset, q->limits.alignment_offset,
(unsigned long long) start << 9); (unsigned long long) start << SECTOR_SHIFT);
/* /*
* Check if merge fn is supported. * Check if merge fn is supported.
...@@ -1026,9 +1025,9 @@ int dm_calculate_queue_limits(struct dm_table *table, ...@@ -1026,9 +1025,9 @@ int dm_calculate_queue_limits(struct dm_table *table,
* for the table. * for the table.
*/ */
if (blk_stack_limits(limits, &ti_limits, 0) < 0) if (blk_stack_limits(limits, &ti_limits, 0) < 0)
DMWARN("%s: target device " DMWARN("%s: adding target device "
"(start sect %llu len %llu) " "(start sect %llu len %llu) "
"is misaligned", "caused an alignment inconsistency",
dm_device_name(table->md), dm_device_name(table->md),
(unsigned long long) ti->begin, (unsigned long long) ti->begin,
(unsigned long long) ti->len); (unsigned long long) ti->len);
...@@ -1079,15 +1078,6 @@ static void dm_table_set_integrity(struct dm_table *t) ...@@ -1079,15 +1078,6 @@ static void dm_table_set_integrity(struct dm_table *t)
void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q, void dm_table_set_restrictions(struct dm_table *t, struct request_queue *q,
struct queue_limits *limits) struct queue_limits *limits)
{ {
/*
* Each target device in the table has a data area that should normally
* be aligned such that the DM device's alignment_offset is 0.
* FIXME: Propagate alignment_offsets up the stack and warn of
* sub-optimal or inconsistent settings.
*/
limits->alignment_offset = 0;
limits->misaligned = 0;
/* /*
* Copy table's limits to the DM device's request_queue * Copy table's limits to the DM device's request_queue
*/ */
......
...@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size) ...@@ -78,7 +78,7 @@ static struct kmem_cache *bio_find_or_create_slab(unsigned int extra_size)
i = 0; i = 0;
while (i < bio_slab_nr) { while (i < bio_slab_nr) {
struct bio_slab *bslab = &bio_slabs[i]; bslab = &bio_slabs[i];
if (!bslab->slab && entry == -1) if (!bslab->slab && entry == -1)
entry = i; entry = i;
......
...@@ -938,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt); ...@@ -938,6 +938,8 @@ extern void blk_queue_io_opt(struct request_queue *q, unsigned int opt);
extern void blk_set_default_limits(struct queue_limits *lim); extern void blk_set_default_limits(struct queue_limits *lim);
extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b, extern int blk_stack_limits(struct queue_limits *t, struct queue_limits *b,
sector_t offset); sector_t offset);
extern int bdev_stack_limits(struct queue_limits *t, struct block_device *bdev,
sector_t offset);
extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev, extern void disk_stack_limits(struct gendisk *disk, struct block_device *bdev,
sector_t offset); sector_t offset);
extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b); extern void blk_queue_stack_limits(struct request_queue *t, struct request_queue *b);
...@@ -1148,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q) ...@@ -1148,8 +1150,11 @@ static inline int queue_discard_alignment(struct request_queue *q)
static inline int queue_sector_discard_alignment(struct request_queue *q, static inline int queue_sector_discard_alignment(struct request_queue *q,
sector_t sector) sector_t sector)
{ {
return ((sector << 9) - q->limits.discard_alignment) struct queue_limits *lim = &q->limits;
& (q->limits.discard_granularity - 1); unsigned int alignment = (sector << 9) & (lim->discard_granularity - 1);
return (lim->discard_granularity + lim->discard_alignment - alignment)
& (lim->discard_granularity - 1);
} }
static inline unsigned int queue_discard_zeroes_data(struct request_queue *q) static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
......
...@@ -53,7 +53,7 @@ ...@@ -53,7 +53,7 @@
extern const char *drbd_buildtag(void); extern const char *drbd_buildtag(void);
#define REL_VERSION "8.3.6" #define REL_VERSION "8.3.7"
#define API_VERSION 88 #define API_VERSION 88
#define PRO_VERSION_MIN 86 #define PRO_VERSION_MIN 86
#define PRO_VERSION_MAX 91 #define PRO_VERSION_MAX 91
......
...@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, ) ...@@ -69,6 +69,7 @@ NL_PACKET(disconnect, 6, )
NL_PACKET(resize, 7, NL_PACKET(resize, 7,
NL_INT64( 29, T_MAY_IGNORE, resize_size) NL_INT64( 29, T_MAY_IGNORE, resize_size)
NL_BIT( 68, T_MAY_IGNORE, resize_force)
) )
NL_PACKET(syncer_conf, 8, NL_PACKET(syncer_conf, 8,
......
...@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk, ...@@ -256,9 +256,9 @@ extern struct hd_struct *disk_map_sector_rcu(struct gendisk *disk,
#define part_stat_read(part, field) \ #define part_stat_read(part, field) \
({ \ ({ \
typeof((part)->dkstats->field) res = 0; \ typeof((part)->dkstats->field) res = 0; \
int i; \ unsigned int _cpu; \
for_each_possible_cpu(i) \ for_each_possible_cpu(_cpu) \
res += per_cpu_ptr((part)->dkstats, i)->field; \ res += per_cpu_ptr((part)->dkstats, _cpu)->field; \
res; \ res; \
}) })
......
...@@ -4,32 +4,6 @@ ...@@ -4,32 +4,6 @@
#include <linux/radix-tree.h> #include <linux/radix-tree.h>
#include <linux/rcupdate.h> #include <linux/rcupdate.h>
/*
* This is the per-process anticipatory I/O scheduler state.
*/
struct as_io_context {
spinlock_t lock;
void (*dtor)(struct as_io_context *aic); /* destructor */
void (*exit)(struct as_io_context *aic); /* called on task exit */
unsigned long state;
atomic_t nr_queued; /* queued reads & sync writes */
atomic_t nr_dispatched; /* number of requests gone to the drivers */
/* IO History tracking */
/* Thinktime */
unsigned long last_end_request;
unsigned long ttime_total;
unsigned long ttime_samples;
unsigned long ttime_mean;
/* Layout pattern */
unsigned int seek_samples;
sector_t last_request_pos;
u64 seek_total;
sector_t seek_mean;
};
struct cfq_queue; struct cfq_queue;
struct cfq_io_context { struct cfq_io_context {
void *key; void *key;
...@@ -78,7 +52,6 @@ struct io_context { ...@@ -78,7 +52,6 @@ struct io_context {
unsigned long last_waited; /* Time last woken after wait for request */ unsigned long last_waited; /* Time last woken after wait for request */
int nr_batch_requests; /* Number of requests left in the batch */ int nr_batch_requests; /* Number of requests left in the batch */
struct as_io_context *aic;
struct radix_tree_root radix_root; struct radix_tree_root radix_root;
struct hlist_head cic_list; struct hlist_head cic_list;
void *ioc_data; void *ioc_data;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment