Commit aaaba345 authored by Lars Ellenberg's avatar Lars Ellenberg Committed by Philipp Reisner

drbd: implement csums-after-crash-only

Checksum based resync trades CPU cycles for network bandwidth,
in situations where we expect much of the to-be-resynced blocks
to be actually identical on both sides already.

In a "network hickup" scenario, it won't help:
all to-be-resynced blocks will typically be different.

The use case is for the resync of *potentially* different blocks
after crash recovery -- the crash recovery had marked larger areas
(those covered by the activity log) as need-to-be-resynced,
just in case. Most of those blocks will be identical.

This option makes it possible to configure checksum based resync,
but only actually use it for the first resync after primary crash.
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
Signed-off-by: default avatarLars Ellenberg <lars.ellenberg@linbit.com>
parent 6a8d68b1
...@@ -738,6 +738,8 @@ struct drbd_device { ...@@ -738,6 +738,8 @@ struct drbd_device {
struct rb_root read_requests; struct rb_root read_requests;
struct rb_root write_requests; struct rb_root write_requests;
/* use checksums for *this* resync */
bool use_csums;
/* blocks to resync in this run [unit BM_BLOCK_SIZE] */ /* blocks to resync in this run [unit BM_BLOCK_SIZE] */
unsigned long rs_total; unsigned long rs_total;
/* number of resync blocks that failed in this run */ /* number of resync blocks that failed in this run */
......
...@@ -2555,6 +2555,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet ...@@ -2555,6 +2555,8 @@ static int receive_DataRequest(struct drbd_connection *connection, struct packet
peer_req->w.cb = w_e_end_csum_rs_req; peer_req->w.cb = w_e_end_csum_rs_req;
/* used in the sector offset progress display */ /* used in the sector offset progress display */
device->bm_resync_fo = BM_SECT_TO_BIT(sector); device->bm_resync_fo = BM_SECT_TO_BIT(sector);
/* remember to report stats in drbd_resync_finished */
device->use_csums = true;
} else if (pi->cmd == P_OV_REPLY) { } else if (pi->cmd == P_OV_REPLY) {
/* track progress, we may need to throttle */ /* track progress, we may need to throttle */
atomic_add(size >> 9, &device->rs_sect_in); atomic_add(size >> 9, &device->rs_sect_in);
......
...@@ -698,8 +698,8 @@ static int make_resync_request(struct drbd_device *const device, int cancel) ...@@ -698,8 +698,8 @@ static int make_resync_request(struct drbd_device *const device, int cancel)
/* adjust very last sectors, in case we are oddly sized */ /* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity) if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9; size = (capacity-sector)<<9;
if (connection->agreed_pro_version >= 89 &&
connection->csums_tfm) { if (device->use_csums) {
switch (read_for_csum(peer_device, sector, size)) { switch (read_for_csum(peer_device, sector, size)) {
case -EIO: /* Disk failure */ case -EIO: /* Disk failure */
put_ldev(device); put_ldev(device);
...@@ -913,7 +913,7 @@ int drbd_resync_finished(struct drbd_device *device) ...@@ -913,7 +913,7 @@ int drbd_resync_finished(struct drbd_device *device)
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target"; khelper_cmd = "after-resync-target";
if (first_peer_device(device)->connection->csums_tfm && device->rs_total) { if (device->use_csums && device->rs_total) {
const unsigned long s = device->rs_same_csum; const unsigned long s = device->rs_same_csum;
const unsigned long t = device->rs_total; const unsigned long t = device->rs_total;
const int ratio = const int ratio =
...@@ -1622,6 +1622,18 @@ static void do_start_resync(struct drbd_device *device) ...@@ -1622,6 +1622,18 @@ static void do_start_resync(struct drbd_device *device)
clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags); clear_bit(AHEAD_TO_SYNC_SOURCE, &device->flags);
} }
static bool use_checksum_based_resync(struct drbd_connection *connection, struct drbd_device *device)
{
bool csums_after_crash_only;
rcu_read_lock();
csums_after_crash_only = rcu_dereference(connection->net_conf)->csums_after_crash_only;
rcu_read_unlock();
return connection->agreed_pro_version >= 89 && /* supported? */
connection->csums_tfm && /* configured? */
(csums_after_crash_only == 0 /* use for each resync? */
|| test_bit(CRASHED_PRIMARY, &device->flags)); /* or only after Primary crash? */
}
/** /**
* drbd_start_resync() - Start the resync process * drbd_start_resync() - Start the resync process
* @device: DRBD device. * @device: DRBD device.
...@@ -1756,8 +1768,12 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1756,8 +1768,12 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
drbd_conn_str(ns.conn), drbd_conn_str(ns.conn),
(unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10), (unsigned long) device->rs_total << (BM_BLOCK_SHIFT-10),
(unsigned long) device->rs_total); (unsigned long) device->rs_total);
if (side == C_SYNC_TARGET) if (side == C_SYNC_TARGET) {
device->bm_resync_fo = 0; device->bm_resync_fo = 0;
device->use_csums = use_checksum_based_resync(connection, device);
} else {
device->use_csums = 0;
}
/* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid /* Since protocol 96, we must serialize drbd_gen_and_send_sync_uuid
* with w_send_oos, or the sync target will get confused as to * with w_send_oos, or the sync target will get confused as to
......
...@@ -171,6 +171,9 @@ GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf, ...@@ -171,6 +171,9 @@ GENL_struct(DRBD_NLA_NET_CONF, 5, net_conf,
__flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative) __flg_field(28, DRBD_GENLA_F_MANDATORY | DRBD_F_INVARIANT, tentative)
__flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF) __flg_field_def(29, DRBD_GENLA_F_MANDATORY, use_rle, DRBD_USE_RLE_DEF)
/* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */ /* 9: __u32_field_def(30, DRBD_GENLA_F_MANDATORY, fencing_policy, DRBD_FENCING_DEF) */
/* 9: __str_field_def(31, DRBD_GENLA_F_MANDATORY, name, SHARED_SECRET_MAX) */
/* 9: __u32_field(32, DRBD_F_REQUIRED | DRBD_F_INVARIANT, peer_node_id) */
__flg_field_def(33, 0 /* OPTIONAL */, csums_after_crash_only, DRBD_CSUMS_AFTER_CRASH_ONLY_DEF)
) )
GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms, GENL_struct(DRBD_NLA_SET_ROLE_PARMS, 6, set_role_parms,
......
...@@ -214,6 +214,7 @@ ...@@ -214,6 +214,7 @@
#define DRBD_ALLOW_TWO_PRIMARIES_DEF 0 #define DRBD_ALLOW_TWO_PRIMARIES_DEF 0
#define DRBD_ALWAYS_ASBP_DEF 0 #define DRBD_ALWAYS_ASBP_DEF 0
#define DRBD_USE_RLE_DEF 1 #define DRBD_USE_RLE_DEF 1
#define DRBD_CSUMS_AFTER_CRASH_ONLY_DEF 0
#define DRBD_AL_STRIPES_MIN 1 #define DRBD_AL_STRIPES_MIN 1
#define DRBD_AL_STRIPES_MAX 1024 #define DRBD_AL_STRIPES_MAX 1024
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment