Commit db141b2f authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: fix max_bio_size to be unsigned

We capped our max_bio_size respectively max_hw_sectors with
min_t(int, lower level limit, our limit);
unfortunately, some drivers, e.g. the kvm virtio block driver, initialize their
limits to "-1U", and that is of course a smaller "int" value than our limit.

Impact: we started to request 16 MB resync requests,
which lead to protocol error and a reconnect loop.

Fix all relevant constants and parameters to be unsigned int.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 7ee1fb93
...@@ -1136,8 +1136,8 @@ struct drbd_conf { ...@@ -1136,8 +1136,8 @@ struct drbd_conf {
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
int rs_planed; /* resync sectors already planned */ int rs_planed; /* resync sectors already planned */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
int peer_max_bio_size; unsigned int peer_max_bio_size;
int local_max_bio_size; unsigned int local_max_bio_size;
}; };
static inline struct drbd_conf *minor_to_mdev(unsigned int minor) static inline struct drbd_conf *minor_to_mdev(unsigned int minor)
...@@ -1441,9 +1441,9 @@ struct bm_extent { ...@@ -1441,9 +1441,9 @@ struct bm_extent {
* hash table. */ * hash table. */
#define HT_SHIFT 8 #define HT_SHIFT 8
#define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT)) #define DRBD_MAX_BIO_SIZE (1U<<(9+HT_SHIFT))
#define DRBD_MAX_BIO_SIZE_SAFE (1 << 12) /* Works always = 4k */ #define DRBD_MAX_BIO_SIZE_SAFE (1U << 12) /* Works always = 4k */
#define DRBD_MAX_SIZE_H80_PACKET (1 << 15) /* The old header only allows packets up to 32Kib data */ #define DRBD_MAX_SIZE_H80_PACKET (1U << 15) /* The old header only allows packets up to 32Kib data */
/* Number of elements in the app_reads_hash */ /* Number of elements in the app_reads_hash */
#define APP_R_HSIZE 15 #define APP_R_HSIZE 15
......
...@@ -2209,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl ...@@ -2209,7 +2209,8 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
{ {
struct p_sizes p; struct p_sizes p;
sector_t d_size, u_size; sector_t d_size, u_size;
int q_order_type, max_bio_size; int q_order_type;
unsigned int max_bio_size;
int ok; int ok;
if (get_ldev_if_state(mdev, D_NEGOTIATING)) { if (get_ldev_if_state(mdev, D_NEGOTIATING)) {
...@@ -2218,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl ...@@ -2218,7 +2219,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
u_size = mdev->ldev->dc.disk_size; u_size = mdev->ldev->dc.disk_size;
q_order_type = drbd_queue_order_type(mdev); q_order_type = drbd_queue_order_type(mdev);
max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9; max_bio_size = queue_max_hw_sectors(mdev->ldev->backing_bdev->bd_disk->queue) << 9;
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_BIO_SIZE); max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE);
put_ldev(mdev); put_ldev(mdev);
} else { } else {
d_size = 0; d_size = 0;
...@@ -2229,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl ...@@ -2229,7 +2230,7 @@ int drbd_send_sizes(struct drbd_conf *mdev, int trigger_reply, enum dds_flags fl
/* Never allow old drbd (up to 8.3.7) to see more than 32KiB */ /* Never allow old drbd (up to 8.3.7) to see more than 32KiB */
if (mdev->agreed_pro_version <= 94) if (mdev->agreed_pro_version <= 94)
max_bio_size = min_t(int, max_bio_size, DRBD_MAX_SIZE_H80_PACKET); max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
p.d_size = cpu_to_be64(d_size); p.d_size = cpu_to_be64(d_size);
p.u_size = cpu_to_be64(u_size); p.u_size = cpu_to_be64(u_size);
...@@ -3981,9 +3982,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev) ...@@ -3981,9 +3982,9 @@ int drbd_md_read(struct drbd_conf *mdev, struct drbd_backing_dev *bdev)
spin_lock_irq(&mdev->req_lock); spin_lock_irq(&mdev->req_lock);
if (mdev->state.conn < C_CONNECTED) { if (mdev->state.conn < C_CONNECTED) {
int peer; unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size); peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max_t(int, peer, DRBD_MAX_BIO_SIZE_SAFE); peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
mdev->peer_max_bio_size = peer; mdev->peer_max_bio_size = peer;
} }
spin_unlock_irq(&mdev->req_lock); spin_unlock_irq(&mdev->req_lock);
......
...@@ -801,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev) ...@@ -801,8 +801,8 @@ static int drbd_check_al_size(struct drbd_conf *mdev)
static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size) static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_size)
{ {
struct request_queue * const q = mdev->rq_queue; struct request_queue * const q = mdev->rq_queue;
int max_hw_sectors = max_bio_size >> 9; unsigned int max_hw_sectors = max_bio_size >> 9;
int max_segments = 0; unsigned int max_segments = 0;
if (get_ldev_if_state(mdev, D_ATTACHING)) { if (get_ldev_if_state(mdev, D_ATTACHING)) {
struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue; struct request_queue * const b = mdev->ldev->backing_bdev->bd_disk->queue;
...@@ -835,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_ ...@@ -835,7 +835,7 @@ static void drbd_setup_queue_param(struct drbd_conf *mdev, unsigned int max_bio_
void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
{ {
int now, new, local, peer; unsigned int now, new, local, peer;
now = queue_max_hw_sectors(mdev->rq_queue) << 9; now = queue_max_hw_sectors(mdev->rq_queue) << 9;
local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */ local = mdev->local_max_bio_size; /* Eventually last known value, from volatile memory */
...@@ -846,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) ...@@ -846,13 +846,14 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
mdev->local_max_bio_size = local; mdev->local_max_bio_size = local;
put_ldev(mdev); put_ldev(mdev);
} }
local = min(local, DRBD_MAX_BIO_SIZE);
/* We may ignore peer limits if the peer is modern enough. /* We may ignore peer limits if the peer is modern enough.
Because new from 8.3.8 onwards the peer can use multiple Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */ BIOs for a single peer_request */
if (mdev->state.conn >= C_CONNECTED) { if (mdev->state.conn >= C_CONNECTED) {
if (mdev->agreed_pro_version < 94) { if (mdev->agreed_pro_version < 94) {
peer = min_t(int, mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); peer = min(mdev->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
} else if (mdev->agreed_pro_version == 94) } else if (mdev->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET; peer = DRBD_MAX_SIZE_H80_PACKET;
...@@ -860,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev) ...@@ -860,10 +861,10 @@ void drbd_reconsider_max_bio_size(struct drbd_conf *mdev)
peer = DRBD_MAX_BIO_SIZE; peer = DRBD_MAX_BIO_SIZE;
} }
new = min_t(int, local, peer); new = min(local, peer);
if (mdev->state.role == R_PRIMARY && new < now) if (mdev->state.role == R_PRIMARY && new < now)
dev_err(DEV, "ASSERT FAILED new < now; (%d < %d)\n", new, now); dev_err(DEV, "ASSERT FAILED new < now; (%u < %u)\n", new, now);
if (new != now) if (new != now)
dev_info(DEV, "max BIO size = %u\n", new); dev_info(DEV, "max BIO size = %u\n", new);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment