Commit bb3d000c authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: allow resync requests to be larger than max_segment_size

this should allow for better background resync performance.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 45bb912b
......@@ -462,7 +462,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
int max_segment_size = queue_max_segment_size(mdev->rq_queue);
int max_segment_size;
int number, i, size, pe, mx;
int align, queued, sndbuf;
......@@ -488,6 +488,11 @@ int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
/* starting with drbd 8.3.8, we can handle multi-bio EEs,
* if it should be necessary */
max_segment_size = mdev->agreed_pro_version < 94 ?
queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
mdev->c_sync_rate = calc_resync_rate(mdev);
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
......@@ -552,12 +557,6 @@ int w_make_resync_request(struct drbd_conf *mdev,
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
*
* we _do_ care about the agreed-upon q->max_segment_size
* here, as splitting up the requests on the other side is more
* difficult. the consequence is, that on lvm and md and other
* "indirect" devices, this is dead code, since
* q->max_segment_size will be PAGE_SIZE.
*/
align = 1;
for (;;) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment