Commit bde89a9e authored by Andreas Gruenbacher's avatar Andreas Gruenbacher Committed by Philipp Reisner

drbd: Rename drbd_tconn -> drbd_connection

sed -i -e 's:all_tconn:connections:g' -e 's:tconn:connection:g'
Signed-off-by: default avatarAndreas Gruenbacher <agruen@linbit.com>
Signed-off-by: default avatarPhilipp Reisner <philipp.reisner@linbit.com>
parent b30ab791
...@@ -315,7 +315,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate) ...@@ -315,7 +315,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
{ {
bool locked = false; bool locked = false;
BUG_ON(delegate && current == device->tconn->worker.task); BUG_ON(delegate && current == device->connection->worker.task);
/* Serialize multiple transactions. /* Serialize multiple transactions.
* This uses test_and_set_bit, memory barrier is implicit. * This uses test_and_set_bit, memory barrier is implicit.
...@@ -354,7 +354,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate) ...@@ -354,7 +354,7 @@ void drbd_al_begin_io_commit(struct drbd_device *device, bool delegate)
*/ */
void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate) void drbd_al_begin_io(struct drbd_device *device, struct drbd_interval *i, bool delegate)
{ {
BUG_ON(delegate && current == device->tconn->worker.task); BUG_ON(delegate && current == device->connection->worker.task);
if (drbd_al_begin_io_prepare(device, i)) if (drbd_al_begin_io_prepare(device, i))
drbd_al_begin_io_commit(device, delegate); drbd_al_begin_io_commit(device, delegate);
...@@ -614,7 +614,7 @@ static int al_write_transaction(struct drbd_device *device, bool delegate) ...@@ -614,7 +614,7 @@ static int al_write_transaction(struct drbd_device *device, bool delegate)
init_completion(&al_work.event); init_completion(&al_work.event);
al_work.w.cb = w_al_write_transaction; al_work.w.cb = w_al_write_transaction;
al_work.w.device = device; al_work.w.device = device;
drbd_queue_work_front(&device->tconn->sender_work, &al_work.w); drbd_queue_work_front(&device->connection->sender_work, &al_work.w);
wait_for_completion(&al_work.event); wait_for_completion(&al_work.event);
return al_work.err; return al_work.err;
} else } else
...@@ -796,7 +796,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto ...@@ -796,7 +796,7 @@ static void drbd_try_clear_on_disk_bm(struct drbd_device *device, sector_t secto
udw->enr = ext->lce.lc_number; udw->enr = ext->lce.lc_number;
udw->w.cb = w_update_odbm; udw->w.cb = w_update_odbm;
udw->w.device = device; udw->w.device = device;
drbd_queue_work_front(&device->tconn->sender_work, &udw->w); drbd_queue_work_front(&device->connection->sender_work, &udw->w);
} else { } else {
dev_warn(DEV, "Could not kmalloc an udw\n"); dev_warn(DEV, "Could not kmalloc an udw\n");
} }
......
...@@ -119,9 +119,9 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func) ...@@ -119,9 +119,9 @@ static void __bm_print_lock_info(struct drbd_device *device, const char *func)
if (!__ratelimit(&drbd_ratelimit_state)) if (!__ratelimit(&drbd_ratelimit_state))
return; return;
dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n", dev_err(DEV, "FIXME %s in %s, bitmap locked for '%s' by %s\n",
drbd_task_to_thread_name(device->tconn, current), drbd_task_to_thread_name(device->connection, current),
func, b->bm_why ?: "?", func, b->bm_why ?: "?",
drbd_task_to_thread_name(device->tconn, b->bm_task)); drbd_task_to_thread_name(device->connection, b->bm_task));
} }
void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
...@@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags) ...@@ -138,9 +138,9 @@ void drbd_bm_lock(struct drbd_device *device, char *why, enum bm_flag flags)
if (trylock_failed) { if (trylock_failed) {
dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n", dev_warn(DEV, "%s going to '%s' but bitmap already locked for '%s' by %s\n",
drbd_task_to_thread_name(device->tconn, current), drbd_task_to_thread_name(device->connection, current),
why, b->bm_why ?: "?", why, b->bm_why ?: "?",
drbd_task_to_thread_name(device->tconn, b->bm_task)); drbd_task_to_thread_name(device->connection, b->bm_task));
mutex_lock(&b->bm_change); mutex_lock(&b->bm_change);
} }
if (BM_LOCKED_MASK & b->bm_flags) if (BM_LOCKED_MASK & b->bm_flags)
......
...@@ -98,7 +98,7 @@ extern char usermode_helper[]; ...@@ -98,7 +98,7 @@ extern char usermode_helper[];
#define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL) #define UUID_NEW_BM_OFFSET ((u64)0x0001000000000000ULL)
struct drbd_device; struct drbd_device;
struct drbd_tconn; struct drbd_connection;
/* to shorten dev_warn(DEV, "msg"); and relatives statements */ /* to shorten dev_warn(DEV, "msg"); and relatives statements */
...@@ -167,7 +167,7 @@ drbd_insert_fault(struct drbd_device *device, unsigned int type) { ...@@ -167,7 +167,7 @@ drbd_insert_fault(struct drbd_device *device, unsigned int type) {
extern struct ratelimit_state drbd_ratelimit_state; extern struct ratelimit_state drbd_ratelimit_state;
extern struct idr minors; /* RCU, updates: genl_lock() */ extern struct idr minors; /* RCU, updates: genl_lock() */
extern struct list_head drbd_tconns; /* RCU, updates: genl_lock() */ extern struct list_head drbd_connections; /* RCU, updates: genl_lock() */
extern const char *cmdname(enum drbd_packet cmd); extern const char *cmdname(enum drbd_packet cmd);
...@@ -211,7 +211,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c) ...@@ -211,7 +211,7 @@ static inline void bm_xfer_ctx_bit_to_word_offset(struct bm_xfer_ctx *c)
#endif #endif
} }
extern unsigned int drbd_header_size(struct drbd_tconn *tconn); extern unsigned int drbd_header_size(struct drbd_connection *connection);
/**********************************************************************/ /**********************************************************************/
enum drbd_thread_state { enum drbd_thread_state {
...@@ -227,7 +227,7 @@ struct drbd_thread { ...@@ -227,7 +227,7 @@ struct drbd_thread {
struct completion stop; struct completion stop;
enum drbd_thread_state t_state; enum drbd_thread_state t_state;
int (*function) (struct drbd_thread *); int (*function) (struct drbd_thread *);
struct drbd_tconn *tconn; struct drbd_connection *connection;
int reset_cpu_mask; int reset_cpu_mask;
char name[9]; char name[9];
}; };
...@@ -247,7 +247,7 @@ struct drbd_work { ...@@ -247,7 +247,7 @@ struct drbd_work {
int (*cb)(struct drbd_work *, int cancel); int (*cb)(struct drbd_work *, int cancel);
union { union {
struct drbd_device *device; struct drbd_device *device;
struct drbd_tconn *tconn; struct drbd_connection *connection;
}; };
}; };
...@@ -289,7 +289,7 @@ struct drbd_request { ...@@ -289,7 +289,7 @@ struct drbd_request {
}; };
struct drbd_epoch { struct drbd_epoch {
struct drbd_tconn *tconn; struct drbd_connection *connection;
struct list_head list; struct list_head list;
unsigned int barrier_nr; unsigned int barrier_nr;
atomic_t epoch_size; /* increased on every request added. */ atomic_t epoch_size; /* increased on every request added. */
...@@ -483,7 +483,7 @@ struct drbd_backing_dev { ...@@ -483,7 +483,7 @@ struct drbd_backing_dev {
struct block_device *backing_bdev; struct block_device *backing_bdev;
struct block_device *md_bdev; struct block_device *md_bdev;
struct drbd_md md; struct drbd_md md;
struct disk_conf *disk_conf; /* RCU, for updates: device->tconn->conf_update */ struct disk_conf *disk_conf; /* RCU, for updates: device->connection->conf_update */
sector_t known_size; /* last known size of that backing device */ sector_t known_size; /* last known size of that backing device */
}; };
...@@ -514,7 +514,7 @@ struct fifo_buffer { ...@@ -514,7 +514,7 @@ struct fifo_buffer {
}; };
extern struct fifo_buffer *fifo_alloc(int fifo_size); extern struct fifo_buffer *fifo_alloc(int fifo_size);
/* flag bits per tconn */ /* flag bits per connection */
enum { enum {
NET_CONGESTED, /* The data socket is congested */ NET_CONGESTED, /* The data socket is congested */
RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */ RESOLVE_CONFLICTS, /* Set on one node, cleared on the peer! */
...@@ -536,11 +536,11 @@ enum { ...@@ -536,11 +536,11 @@ enum {
DISCONNECT_SENT, DISCONNECT_SENT,
}; };
struct drbd_tconn { /* is a resource from the config file */ struct drbd_connection { /* is a resource from the config file */
char *name; /* Resource name */ char *name; /* Resource name */
struct list_head all_tconn; /* linked on global drbd_tconns */ struct list_head connections; /* linked on global drbd_connections */
struct kref kref; struct kref kref;
struct idr volumes; /* <tconn, vnr> to device mapping */ struct idr volumes; /* <connection, vnr> to device mapping */
enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */ enum drbd_conns cstate; /* Only C_STANDALONE to C_WF_REPORT_PARAMS */
unsigned susp:1; /* IO suspended by user */ unsigned susp:1; /* IO suspended by user */
unsigned susp_nod:1; /* IO suspended because no data */ unsigned susp_nod:1; /* IO suspended because no data */
...@@ -570,7 +570,7 @@ struct drbd_tconn { /* is a resource from the config file */ ...@@ -570,7 +570,7 @@ struct drbd_tconn { /* is a resource from the config file */
struct list_head transfer_log; /* all requests not yet fully processed */ struct list_head transfer_log; /* all requests not yet fully processed */
struct crypto_hash *cram_hmac_tfm; struct crypto_hash *cram_hmac_tfm;
struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by tconn->data->mutex */ struct crypto_hash *integrity_tfm; /* checksums we compute, updates protected by connection->data->mutex */
struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */ struct crypto_hash *peer_integrity_tfm; /* checksums we verify, only accessed from receiver thread */
struct crypto_hash *csums_tfm; struct crypto_hash *csums_tfm;
struct crypto_hash *verify_tfm; struct crypto_hash *verify_tfm;
...@@ -618,7 +618,7 @@ struct submit_worker { ...@@ -618,7 +618,7 @@ struct submit_worker {
}; };
struct drbd_device { struct drbd_device {
struct drbd_tconn *tconn; struct drbd_connection *connection;
int vnr; /* volume number within the connection */ int vnr; /* volume number within the connection */
struct kref kref; struct kref kref;
...@@ -744,7 +744,7 @@ struct drbd_device { ...@@ -744,7 +744,7 @@ struct drbd_device {
struct bm_io_work bm_io_work; struct bm_io_work bm_io_work;
u64 ed_uuid; /* UUID of the exposed data */ u64 ed_uuid; /* UUID of the exposed data */
struct mutex own_state_mutex; struct mutex own_state_mutex;
struct mutex *state_mutex; /* either own_state_mutex or device->tconn->cstate_mutex */ struct mutex *state_mutex; /* either own_state_mutex or device->connection->cstate_mutex */
char congestion_reason; /* Why we where congested... */ char congestion_reason; /* Why we where congested... */
atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */ atomic_t rs_sect_in; /* for incoming resync data rate, SyncTarget */
atomic_t rs_sect_ev; /* for submitted resync data rate, both */ atomic_t rs_sect_ev; /* for submitted resync data rate, both */
...@@ -752,7 +752,7 @@ struct drbd_device { ...@@ -752,7 +752,7 @@ struct drbd_device {
int rs_last_events; /* counter of read or write "events" (unit sectors) int rs_last_events; /* counter of read or write "events" (unit sectors)
* on the lower level device when we last looked. */ * on the lower level device when we last looked. */
int c_sync_rate; /* current resync rate after syncer throttle magic */ int c_sync_rate; /* current resync rate after syncer throttle magic */
struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, tconn->conn_update) */ struct fifo_buffer *rs_plan_s; /* correction values of resync planer (RCU, connection->conn_update) */
int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */ int rs_in_flight; /* resync sectors in flight (to proxy, in proxy and from proxy) */
atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */ atomic_t ap_in_flight; /* App sectors in flight (waiting for ack) */
unsigned int peer_max_bio_size; unsigned int peer_max_bio_size;
...@@ -773,9 +773,9 @@ static inline unsigned int device_to_minor(struct drbd_device *device) ...@@ -773,9 +773,9 @@ static inline unsigned int device_to_minor(struct drbd_device *device)
return device->minor; return device->minor;
} }
static inline struct drbd_device *vnr_to_device(struct drbd_tconn *tconn, int vnr) static inline struct drbd_device *vnr_to_device(struct drbd_connection *connection, int vnr)
{ {
return (struct drbd_device *)idr_find(&tconn->volumes, vnr); return (struct drbd_device *)idr_find(&connection->volumes, vnr);
} }
/* /*
...@@ -792,25 +792,25 @@ enum dds_flags { ...@@ -792,25 +792,25 @@ enum dds_flags {
extern void drbd_init_set_defaults(struct drbd_device *device); extern void drbd_init_set_defaults(struct drbd_device *device);
extern int drbd_thread_start(struct drbd_thread *thi); extern int drbd_thread_start(struct drbd_thread *thi);
extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait); extern void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait);
extern char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task); extern char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task);
#ifdef CONFIG_SMP #ifdef CONFIG_SMP
extern void drbd_thread_current_set_cpu(struct drbd_thread *thi); extern void drbd_thread_current_set_cpu(struct drbd_thread *thi);
extern void drbd_calc_cpu_mask(struct drbd_tconn *tconn); extern void drbd_calc_cpu_mask(struct drbd_connection *connection);
#else #else
#define drbd_thread_current_set_cpu(A) ({}) #define drbd_thread_current_set_cpu(A) ({})
#define drbd_calc_cpu_mask(A) ({}) #define drbd_calc_cpu_mask(A) ({})
#endif #endif
extern void tl_release(struct drbd_tconn *, unsigned int barrier_nr, extern void tl_release(struct drbd_connection *, unsigned int barrier_nr,
unsigned int set_size); unsigned int set_size);
extern void tl_clear(struct drbd_tconn *); extern void tl_clear(struct drbd_connection *);
extern void drbd_free_sock(struct drbd_tconn *tconn); extern void drbd_free_sock(struct drbd_connection *connection);
extern int drbd_send(struct drbd_tconn *tconn, struct socket *sock, extern int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags); void *buf, size_t size, unsigned msg_flags);
extern int drbd_send_all(struct drbd_tconn *, struct socket *, void *, size_t, extern int drbd_send_all(struct drbd_connection *, struct socket *, void *, size_t,
unsigned); unsigned);
extern int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd); extern int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd);
extern int drbd_send_protocol(struct drbd_tconn *tconn); extern int drbd_send_protocol(struct drbd_connection *connection);
extern int drbd_send_uuids(struct drbd_device *device); extern int drbd_send_uuids(struct drbd_device *device);
extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device); extern int drbd_send_uuids_skip_initial_sync(struct drbd_device *device);
extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device); extern void drbd_gen_and_send_sync_uuid(struct drbd_device *device);
...@@ -818,7 +818,7 @@ extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum d ...@@ -818,7 +818,7 @@ extern int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum d
extern int drbd_send_state(struct drbd_device *device, union drbd_state s); extern int drbd_send_state(struct drbd_device *device, union drbd_state s);
extern int drbd_send_current_state(struct drbd_device *device); extern int drbd_send_current_state(struct drbd_device *device);
extern int drbd_send_sync_param(struct drbd_device *device); extern int drbd_send_sync_param(struct drbd_device *device);
extern void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, extern void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr,
u32 set_size); u32 set_size);
extern int drbd_send_ack(struct drbd_device *, enum drbd_packet, extern int drbd_send_ack(struct drbd_device *, enum drbd_packet,
struct drbd_peer_request *); struct drbd_peer_request *);
...@@ -841,12 +841,12 @@ extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int ...@@ -841,12 +841,12 @@ extern int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int
extern int drbd_send_bitmap(struct drbd_device *device); extern int drbd_send_bitmap(struct drbd_device *device);
extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode); extern void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode);
extern void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode); extern void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode);
extern void drbd_free_bc(struct drbd_backing_dev *ldev); extern void drbd_free_bc(struct drbd_backing_dev *ldev);
extern void drbd_device_cleanup(struct drbd_device *device); extern void drbd_device_cleanup(struct drbd_device *device);
void drbd_print_uuids(struct drbd_device *device, const char *text); void drbd_print_uuids(struct drbd_device *device, const char *text);
extern void conn_md_sync(struct drbd_tconn *tconn); extern void conn_md_sync(struct drbd_connection *connection);
extern void drbd_md_write(struct drbd_device *device, void *buffer); extern void drbd_md_write(struct drbd_device *device, void *buffer);
extern void drbd_md_sync(struct drbd_device *device); extern void drbd_md_sync(struct drbd_device *device);
extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev); extern int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev);
...@@ -1153,17 +1153,17 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask); ...@@ -1153,17 +1153,17 @@ extern struct bio *bio_alloc_drbd(gfp_t gfp_mask);
extern rwlock_t global_state_lock; extern rwlock_t global_state_lock;
extern int conn_lowest_minor(struct drbd_tconn *tconn); extern int conn_lowest_minor(struct drbd_connection *connection);
enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr); enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr);
extern void drbd_minor_destroy(struct kref *kref); extern void drbd_minor_destroy(struct kref *kref);
extern int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts); extern int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts);
extern struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts); extern struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts);
extern void conn_destroy(struct kref *kref); extern void conn_destroy(struct kref *kref);
struct drbd_tconn *conn_get_by_name(const char *name); struct drbd_connection *conn_get_by_name(const char *name);
extern struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, extern struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len); void *peer_addr, int peer_addr_len);
extern void conn_free_crypto(struct drbd_tconn *tconn); extern void conn_free_crypto(struct drbd_connection *connection);
extern int proc_details; extern int proc_details;
...@@ -1198,8 +1198,8 @@ extern void drbd_reconsider_max_bio_size(struct drbd_device *device); ...@@ -1198,8 +1198,8 @@ extern void drbd_reconsider_max_bio_size(struct drbd_device *device);
extern enum drbd_state_rv drbd_set_role(struct drbd_device *device, extern enum drbd_state_rv drbd_set_role(struct drbd_device *device,
enum drbd_role new_role, enum drbd_role new_role,
int force); int force);
extern bool conn_try_outdate_peer(struct drbd_tconn *tconn); extern bool conn_try_outdate_peer(struct drbd_connection *connection);
extern void conn_try_outdate_peer_async(struct drbd_tconn *tconn); extern void conn_try_outdate_peer_async(struct drbd_connection *connection);
extern int drbd_khelper(struct drbd_device *device, char *cmd); extern int drbd_khelper(struct drbd_device *device, char *cmd);
/* drbd_worker.c */ /* drbd_worker.c */
...@@ -1271,11 +1271,11 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request ...@@ -1271,11 +1271,11 @@ extern void __drbd_free_peer_req(struct drbd_device *, struct drbd_peer_request
extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool); extern struct page *drbd_alloc_pages(struct drbd_device *, unsigned int, bool);
extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled); extern void drbd_set_recv_tcq(struct drbd_device *device, int tcq_enabled);
extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed); extern void _drbd_clear_done_ee(struct drbd_device *device, struct list_head *to_be_freed);
extern void conn_flush_workqueue(struct drbd_tconn *tconn); extern void conn_flush_workqueue(struct drbd_connection *connection);
extern int drbd_connected(struct drbd_device *device); extern int drbd_connected(struct drbd_device *device);
static inline void drbd_flush_workqueue(struct drbd_device *device) static inline void drbd_flush_workqueue(struct drbd_device *device)
{ {
conn_flush_workqueue(device->tconn); conn_flush_workqueue(device->connection);
} }
/* Yes, there is kernel_setsockopt, but only since 2.6.18. /* Yes, there is kernel_setsockopt, but only since 2.6.18.
...@@ -1327,7 +1327,7 @@ static inline void drbd_tcp_quickack(struct socket *sock) ...@@ -1327,7 +1327,7 @@ static inline void drbd_tcp_quickack(struct socket *sock)
(char*)&val, sizeof(val)); (char*)&val, sizeof(val));
} }
void drbd_bump_write_ordering(struct drbd_tconn *tconn, enum write_ordering_e wo); void drbd_bump_write_ordering(struct drbd_connection *connection, enum write_ordering_e wo);
/* drbd_proc.c */ /* drbd_proc.c */
extern struct proc_dir_entry *drbd_proc; extern struct proc_dir_entry *drbd_proc;
...@@ -1421,9 +1421,9 @@ static inline union drbd_state drbd_read_state(struct drbd_device *device) ...@@ -1421,9 +1421,9 @@ static inline union drbd_state drbd_read_state(struct drbd_device *device)
union drbd_state rv; union drbd_state rv;
rv.i = device->state.i; rv.i = device->state.i;
rv.susp = device->tconn->susp; rv.susp = device->connection->susp;
rv.susp_nod = device->tconn->susp_nod; rv.susp_nod = device->connection->susp_nod;
rv.susp_fen = device->tconn->susp_fen; rv.susp_fen = device->connection->susp_fen;
return rv; return rv;
} }
...@@ -1505,9 +1505,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device, ...@@ -1505,9 +1505,9 @@ static inline void drbd_chk_io_error_(struct drbd_device *device,
{ {
if (error) { if (error) {
unsigned long flags; unsigned long flags;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
__drbd_chk_io_error_(device, forcedetach, where); __drbd_chk_io_error_(device, forcedetach, where);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
} }
} }
...@@ -1630,31 +1630,31 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w) ...@@ -1630,31 +1630,31 @@ drbd_queue_work(struct drbd_work_queue *q, struct drbd_work *w)
wake_up(&q->q_wait); wake_up(&q->q_wait);
} }
static inline void wake_asender(struct drbd_tconn *tconn) static inline void wake_asender(struct drbd_connection *connection)
{ {
if (test_bit(SIGNAL_ASENDER, &tconn->flags)) if (test_bit(SIGNAL_ASENDER, &connection->flags))
force_sig(DRBD_SIG, tconn->asender.task); force_sig(DRBD_SIG, connection->asender.task);
} }
static inline void request_ping(struct drbd_tconn *tconn) static inline void request_ping(struct drbd_connection *connection)
{ {
set_bit(SEND_PING, &tconn->flags); set_bit(SEND_PING, &connection->flags);
wake_asender(tconn); wake_asender(connection);
} }
extern void *conn_prepare_command(struct drbd_tconn *, struct drbd_socket *); extern void *conn_prepare_command(struct drbd_connection *, struct drbd_socket *);
extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *); extern void *drbd_prepare_command(struct drbd_device *, struct drbd_socket *);
extern int conn_send_command(struct drbd_tconn *, struct drbd_socket *, extern int conn_send_command(struct drbd_connection *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *, enum drbd_packet, unsigned int, void *,
unsigned int); unsigned int);
extern int drbd_send_command(struct drbd_device *, struct drbd_socket *, extern int drbd_send_command(struct drbd_device *, struct drbd_socket *,
enum drbd_packet, unsigned int, void *, enum drbd_packet, unsigned int, void *,
unsigned int); unsigned int);
extern int drbd_send_ping(struct drbd_tconn *tconn); extern int drbd_send_ping(struct drbd_connection *connection);
extern int drbd_send_ping_ack(struct drbd_tconn *tconn); extern int drbd_send_ping_ack(struct drbd_connection *connection);
extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state); extern int drbd_send_state_req(struct drbd_device *, union drbd_state, union drbd_state);
extern int conn_send_state_req(struct drbd_tconn *, union drbd_state, union drbd_state); extern int conn_send_state_req(struct drbd_connection *, union drbd_state, union drbd_state);
static inline void drbd_thread_stop(struct drbd_thread *thi) static inline void drbd_thread_stop(struct drbd_thread *thi)
{ {
...@@ -1783,7 +1783,7 @@ static inline void put_ldev(struct drbd_device *device) ...@@ -1783,7 +1783,7 @@ static inline void put_ldev(struct drbd_device *device)
if (device->state.disk == D_FAILED) { if (device->state.disk == D_FAILED) {
/* all application IO references gone. */ /* all application IO references gone. */
if (!test_and_set_bit(GO_DISKLESS, &device->flags)) if (!test_and_set_bit(GO_DISKLESS, &device->flags))
drbd_queue_work(&device->tconn->sender_work, &device->go_diskless); drbd_queue_work(&device->connection->sender_work, &device->go_diskless);
} }
wake_up(&device->misc_wait); wake_up(&device->misc_wait);
} }
...@@ -1865,7 +1865,7 @@ static inline int drbd_get_max_buffers(struct drbd_device *device) ...@@ -1865,7 +1865,7 @@ static inline int drbd_get_max_buffers(struct drbd_device *device)
int mxb; int mxb;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */ mxb = nc ? nc->max_buffers : 1000000; /* arbitrary limit on open requests */
rcu_read_unlock(); rcu_read_unlock();
...@@ -1908,7 +1908,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device) ...@@ -1908,7 +1908,7 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
/* Allow IO in BM exchange states with new protocols */ /* Allow IO in BM exchange states with new protocols */
case C_WF_BITMAP_S: case C_WF_BITMAP_S:
if (device->tconn->agreed_pro_version < 96) if (device->connection->agreed_pro_version < 96)
return 0; return 0;
break; break;
...@@ -1944,9 +1944,9 @@ static inline int drbd_state_is_stable(struct drbd_device *device) ...@@ -1944,9 +1944,9 @@ static inline int drbd_state_is_stable(struct drbd_device *device)
static inline int drbd_suspended(struct drbd_device *device) static inline int drbd_suspended(struct drbd_device *device)
{ {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
return tconn->susp || tconn->susp_fen || tconn->susp_nod; return connection->susp || connection->susp_fen || connection->susp_nod;
} }
static inline bool may_inc_ap_bio(struct drbd_device *device) static inline bool may_inc_ap_bio(struct drbd_device *device)
...@@ -1979,11 +1979,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device) ...@@ -1979,11 +1979,11 @@ static inline bool inc_ap_bio_cond(struct drbd_device *device)
{ {
bool rv = false; bool rv = false;
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
rv = may_inc_ap_bio(device); rv = may_inc_ap_bio(device);
if (rv) if (rv)
atomic_inc(&device->ap_bio_cnt); atomic_inc(&device->ap_bio_cnt);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
return rv; return rv;
} }
...@@ -2010,7 +2010,7 @@ static inline void dec_ap_bio(struct drbd_device *device) ...@@ -2010,7 +2010,7 @@ static inline void dec_ap_bio(struct drbd_device *device)
if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) { if (ap_bio == 0 && test_bit(BITMAP_IO, &device->flags)) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w); drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w);
} }
/* this currently does wake_up for every dec_ap_bio! /* this currently does wake_up for every dec_ap_bio!
...@@ -2022,8 +2022,8 @@ static inline void dec_ap_bio(struct drbd_device *device) ...@@ -2022,8 +2022,8 @@ static inline void dec_ap_bio(struct drbd_device *device)
static inline bool verify_can_do_stop_sector(struct drbd_device *device) static inline bool verify_can_do_stop_sector(struct drbd_device *device)
{ {
return device->tconn->agreed_pro_version >= 97 && return device->connection->agreed_pro_version >= 97 &&
device->tconn->agreed_pro_version != 100; device->connection->agreed_pro_version != 100;
} }
static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val) static inline int drbd_set_ed_uuid(struct drbd_device *device, u64 val)
......
...@@ -118,7 +118,7 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0 ...@@ -118,7 +118,7 @@ module_param_string(usermode_helper, usermode_helper, sizeof(usermode_helper), 0
* as member "struct gendisk *vdisk;" * as member "struct gendisk *vdisk;"
*/ */
struct idr minors; struct idr minors;
struct list_head drbd_tconns; /* list of struct drbd_tconn */ struct list_head drbd_connections; /* list of struct drbd_connection */
struct kmem_cache *drbd_request_cache; struct kmem_cache *drbd_request_cache;
struct kmem_cache *drbd_ee_cache; /* peer requests */ struct kmem_cache *drbd_ee_cache; /* peer requests */
...@@ -182,7 +182,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) ...@@ -182,7 +182,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
/** /**
* tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch * tl_release() - mark as BARRIER_ACKED all requests in the corresponding transfer log epoch
* @tconn: DRBD connection. * @connection: DRBD connection.
* @barrier_nr: Expected identifier of the DRBD write barrier packet. * @barrier_nr: Expected identifier of the DRBD write barrier packet.
* @set_size: Expected number of requests before that barrier. * @set_size: Expected number of requests before that barrier.
* *
...@@ -190,7 +190,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins) ...@@ -190,7 +190,7 @@ int _get_ldev_if_state(struct drbd_device *device, enum drbd_disk_state mins)
* epoch of not yet barrier-acked requests, this function will cause a * epoch of not yet barrier-acked requests, this function will cause a
* termination of the connection. * termination of the connection.
*/ */
void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, void tl_release(struct drbd_connection *connection, unsigned int barrier_nr,
unsigned int set_size) unsigned int set_size)
{ {
struct drbd_request *r; struct drbd_request *r;
...@@ -198,11 +198,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, ...@@ -198,11 +198,11 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
int expect_epoch = 0; int expect_epoch = 0;
int expect_size = 0; int expect_size = 0;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
/* find oldest not yet barrier-acked write request, /* find oldest not yet barrier-acked write request,
* count writes in its epoch. */ * count writes in its epoch. */
list_for_each_entry(r, &tconn->transfer_log, tl_requests) { list_for_each_entry(r, &connection->transfer_log, tl_requests) {
const unsigned s = r->rq_state; const unsigned s = r->rq_state;
if (!req) { if (!req) {
if (!(s & RQ_WRITE)) if (!(s & RQ_WRITE))
...@@ -227,18 +227,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, ...@@ -227,18 +227,18 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* first some paranoia code */ /* first some paranoia code */
if (req == NULL) { if (req == NULL) {
conn_err(tconn, "BAD! BarrierAck #%u received, but no epoch in tl!?\n", conn_err(connection, "BAD! BarrierAck #%u received, but no epoch in tl!?\n",
barrier_nr); barrier_nr);
goto bail; goto bail;
} }
if (expect_epoch != barrier_nr) { if (expect_epoch != barrier_nr) {
conn_err(tconn, "BAD! BarrierAck #%u received, expected #%u!\n", conn_err(connection, "BAD! BarrierAck #%u received, expected #%u!\n",
barrier_nr, expect_epoch); barrier_nr, expect_epoch);
goto bail; goto bail;
} }
if (expect_size != set_size) { if (expect_size != set_size) {
conn_err(tconn, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n", conn_err(connection, "BAD! BarrierAck #%u received with n_writes=%u, expected n_writes=%u!\n",
barrier_nr, set_size, expect_size); barrier_nr, set_size, expect_size);
goto bail; goto bail;
} }
...@@ -247,21 +247,21 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, ...@@ -247,21 +247,21 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
/* this extra list walk restart is paranoia, /* this extra list walk restart is paranoia,
* to catch requests being barrier-acked "unexpectedly". * to catch requests being barrier-acked "unexpectedly".
* It usually should find the same req again, or some READ preceding it. */ * It usually should find the same req again, or some READ preceding it. */
list_for_each_entry(req, &tconn->transfer_log, tl_requests) list_for_each_entry(req, &connection->transfer_log, tl_requests)
if (req->epoch == expect_epoch) if (req->epoch == expect_epoch)
break; break;
list_for_each_entry_safe_from(req, r, &tconn->transfer_log, tl_requests) { list_for_each_entry_safe_from(req, r, &connection->transfer_log, tl_requests) {
if (req->epoch != expect_epoch) if (req->epoch != expect_epoch)
break; break;
_req_mod(req, BARRIER_ACKED); _req_mod(req, BARRIER_ACKED);
} }
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
return; return;
bail: bail:
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
conn_request_state(tconn, NS(conn, C_PROTOCOL_ERROR), CS_HARD); conn_request_state(connection, NS(conn, C_PROTOCOL_ERROR), CS_HARD);
} }
...@@ -274,19 +274,19 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr, ...@@ -274,19 +274,19 @@ void tl_release(struct drbd_tconn *tconn, unsigned int barrier_nr,
* RESTART_FROZEN_DISK_IO. * RESTART_FROZEN_DISK_IO.
*/ */
/* must hold resource->req_lock */ /* must hold resource->req_lock */
void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{ {
struct drbd_request *req, *r; struct drbd_request *req, *r;
list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests)
_req_mod(req, what); _req_mod(req, what);
} }
void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) void tl_restart(struct drbd_connection *connection, enum drbd_req_event what)
{ {
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
_tl_restart(tconn, what); _tl_restart(connection, what);
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
} }
/** /**
...@@ -297,9 +297,9 @@ void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what) ...@@ -297,9 +297,9 @@ void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what)
* by the requests on the transfer gets marked as our of sync. Called from the * by the requests on the transfer gets marked as our of sync. Called from the
* receiver thread and the worker thread. * receiver thread and the worker thread.
*/ */
void tl_clear(struct drbd_tconn *tconn) void tl_clear(struct drbd_connection *connection)
{ {
tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
} }
/** /**
...@@ -308,29 +308,29 @@ void tl_clear(struct drbd_tconn *tconn) ...@@ -308,29 +308,29 @@ void tl_clear(struct drbd_tconn *tconn)
*/ */
void tl_abort_disk_io(struct drbd_device *device) void tl_abort_disk_io(struct drbd_device *device)
{ {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
struct drbd_request *req, *r; struct drbd_request *req, *r;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
list_for_each_entry_safe(req, r, &tconn->transfer_log, tl_requests) { list_for_each_entry_safe(req, r, &connection->transfer_log, tl_requests) {
if (!(req->rq_state & RQ_LOCAL_PENDING)) if (!(req->rq_state & RQ_LOCAL_PENDING))
continue; continue;
if (req->w.device != device) if (req->w.device != device)
continue; continue;
_req_mod(req, ABORT_DISK_IO); _req_mod(req, ABORT_DISK_IO);
} }
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
} }
static int drbd_thread_setup(void *arg) static int drbd_thread_setup(void *arg)
{ {
struct drbd_thread *thi = (struct drbd_thread *) arg; struct drbd_thread *thi = (struct drbd_thread *) arg;
struct drbd_tconn *tconn = thi->tconn; struct drbd_connection *connection = thi->connection;
unsigned long flags; unsigned long flags;
int retval; int retval;
snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s", snprintf(current->comm, sizeof(current->comm), "drbd_%c_%s",
thi->name[0], thi->tconn->name); thi->name[0], thi->connection->name);
restart: restart:
retval = thi->function(thi); retval = thi->function(thi);
...@@ -348,7 +348,7 @@ static int drbd_thread_setup(void *arg) ...@@ -348,7 +348,7 @@ static int drbd_thread_setup(void *arg)
*/ */
if (thi->t_state == RESTARTING) { if (thi->t_state == RESTARTING) {
conn_info(tconn, "Restarting %s thread\n", thi->name); conn_info(connection, "Restarting %s thread\n", thi->name);
thi->t_state = RUNNING; thi->t_state = RUNNING;
spin_unlock_irqrestore(&thi->t_lock, flags); spin_unlock_irqrestore(&thi->t_lock, flags);
goto restart; goto restart;
...@@ -360,29 +360,29 @@ static int drbd_thread_setup(void *arg) ...@@ -360,29 +360,29 @@ static int drbd_thread_setup(void *arg)
complete_all(&thi->stop); complete_all(&thi->stop);
spin_unlock_irqrestore(&thi->t_lock, flags); spin_unlock_irqrestore(&thi->t_lock, flags);
conn_info(tconn, "Terminating %s\n", current->comm); conn_info(connection, "Terminating %s\n", current->comm);
/* Release mod reference taken when thread was started */ /* Release mod reference taken when thread was started */
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
module_put(THIS_MODULE); module_put(THIS_MODULE);
return retval; return retval;
} }
static void drbd_thread_init(struct drbd_tconn *tconn, struct drbd_thread *thi, static void drbd_thread_init(struct drbd_connection *connection, struct drbd_thread *thi,
int (*func) (struct drbd_thread *), char *name) int (*func) (struct drbd_thread *), char *name)
{ {
spin_lock_init(&thi->t_lock); spin_lock_init(&thi->t_lock);
thi->task = NULL; thi->task = NULL;
thi->t_state = NONE; thi->t_state = NONE;
thi->function = func; thi->function = func;
thi->tconn = tconn; thi->connection = connection;
strncpy(thi->name, name, ARRAY_SIZE(thi->name)); strncpy(thi->name, name, ARRAY_SIZE(thi->name));
} }
int drbd_thread_start(struct drbd_thread *thi) int drbd_thread_start(struct drbd_thread *thi)
{ {
struct drbd_tconn *tconn = thi->tconn; struct drbd_connection *connection = thi->connection;
struct task_struct *nt; struct task_struct *nt;
unsigned long flags; unsigned long flags;
...@@ -392,17 +392,17 @@ int drbd_thread_start(struct drbd_thread *thi) ...@@ -392,17 +392,17 @@ int drbd_thread_start(struct drbd_thread *thi)
switch (thi->t_state) { switch (thi->t_state) {
case NONE: case NONE:
conn_info(tconn, "Starting %s thread (from %s [%d])\n", conn_info(connection, "Starting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid); thi->name, current->comm, current->pid);
/* Get ref on module for thread - this is released when thread exits */ /* Get ref on module for thread - this is released when thread exits */
if (!try_module_get(THIS_MODULE)) { if (!try_module_get(THIS_MODULE)) {
conn_err(tconn, "Failed to get module reference in drbd_thread_start\n"); conn_err(connection, "Failed to get module reference in drbd_thread_start\n");
spin_unlock_irqrestore(&thi->t_lock, flags); spin_unlock_irqrestore(&thi->t_lock, flags);
return false; return false;
} }
kref_get(&thi->tconn->kref); kref_get(&thi->connection->kref);
init_completion(&thi->stop); init_completion(&thi->stop);
thi->reset_cpu_mask = 1; thi->reset_cpu_mask = 1;
...@@ -411,12 +411,12 @@ int drbd_thread_start(struct drbd_thread *thi) ...@@ -411,12 +411,12 @@ int drbd_thread_start(struct drbd_thread *thi)
flush_signals(current); /* otherw. may get -ERESTARTNOINTR */ flush_signals(current); /* otherw. may get -ERESTARTNOINTR */
nt = kthread_create(drbd_thread_setup, (void *) thi, nt = kthread_create(drbd_thread_setup, (void *) thi,
"drbd_%c_%s", thi->name[0], thi->tconn->name); "drbd_%c_%s", thi->name[0], thi->connection->name);
if (IS_ERR(nt)) { if (IS_ERR(nt)) {
conn_err(tconn, "Couldn't start thread\n"); conn_err(connection, "Couldn't start thread\n");
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
module_put(THIS_MODULE); module_put(THIS_MODULE);
return false; return false;
} }
...@@ -428,7 +428,7 @@ int drbd_thread_start(struct drbd_thread *thi) ...@@ -428,7 +428,7 @@ int drbd_thread_start(struct drbd_thread *thi)
break; break;
case EXITING: case EXITING:
thi->t_state = RESTARTING; thi->t_state = RESTARTING;
conn_info(tconn, "Restarting %s thread (from %s [%d])\n", conn_info(connection, "Restarting %s thread (from %s [%d])\n",
thi->name, current->comm, current->pid); thi->name, current->comm, current->pid);
/* fall through */ /* fall through */
case RUNNING: case RUNNING:
...@@ -477,29 +477,29 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait) ...@@ -477,29 +477,29 @@ void _drbd_thread_stop(struct drbd_thread *thi, int restart, int wait)
wait_for_completion(&thi->stop); wait_for_completion(&thi->stop);
} }
static struct drbd_thread *drbd_task_to_thread(struct drbd_tconn *tconn, struct task_struct *task) static struct drbd_thread *drbd_task_to_thread(struct drbd_connection *connection, struct task_struct *task)
{ {
struct drbd_thread *thi = struct drbd_thread *thi =
task == tconn->receiver.task ? &tconn->receiver : task == connection->receiver.task ? &connection->receiver :
task == tconn->asender.task ? &tconn->asender : task == connection->asender.task ? &connection->asender :
task == tconn->worker.task ? &tconn->worker : NULL; task == connection->worker.task ? &connection->worker : NULL;
return thi; return thi;
} }
char *drbd_task_to_thread_name(struct drbd_tconn *tconn, struct task_struct *task) char *drbd_task_to_thread_name(struct drbd_connection *connection, struct task_struct *task)
{ {
struct drbd_thread *thi = drbd_task_to_thread(tconn, task); struct drbd_thread *thi = drbd_task_to_thread(connection, task);
return thi ? thi->name : task->comm; return thi ? thi->name : task->comm;
} }
int conn_lowest_minor(struct drbd_tconn *tconn) int conn_lowest_minor(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
int vnr = 0, m; int vnr = 0, m;
rcu_read_lock(); rcu_read_lock();
device = idr_get_next(&tconn->volumes, &vnr); device = idr_get_next(&connection->volumes, &vnr);
m = device ? device_to_minor(device) : -1; m = device ? device_to_minor(device) : -1;
rcu_read_unlock(); rcu_read_unlock();
...@@ -514,23 +514,23 @@ int conn_lowest_minor(struct drbd_tconn *tconn) ...@@ -514,23 +514,23 @@ int conn_lowest_minor(struct drbd_tconn *tconn)
* Forces all threads of a device onto the same CPU. This is beneficial for * Forces all threads of a device onto the same CPU. This is beneficial for
* DRBD's performance. May be overwritten by user's configuration. * DRBD's performance. May be overwritten by user's configuration.
*/ */
void drbd_calc_cpu_mask(struct drbd_tconn *tconn) void drbd_calc_cpu_mask(struct drbd_connection *connection)
{ {
int ord, cpu; int ord, cpu;
/* user override. */ /* user override. */
if (cpumask_weight(tconn->cpu_mask)) if (cpumask_weight(connection->cpu_mask))
return; return;
ord = conn_lowest_minor(tconn) % cpumask_weight(cpu_online_mask); ord = conn_lowest_minor(connection) % cpumask_weight(cpu_online_mask);
for_each_online_cpu(cpu) { for_each_online_cpu(cpu) {
if (ord-- == 0) { if (ord-- == 0) {
cpumask_set_cpu(cpu, tconn->cpu_mask); cpumask_set_cpu(cpu, connection->cpu_mask);
return; return;
} }
} }
/* should not be reached */ /* should not be reached */
cpumask_setall(tconn->cpu_mask); cpumask_setall(connection->cpu_mask);
} }
/** /**
...@@ -548,7 +548,7 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi) ...@@ -548,7 +548,7 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
if (!thi->reset_cpu_mask) if (!thi->reset_cpu_mask)
return; return;
thi->reset_cpu_mask = 0; thi->reset_cpu_mask = 0;
set_cpus_allowed_ptr(p, thi->tconn->cpu_mask); set_cpus_allowed_ptr(p, thi->connection->cpu_mask);
} }
#endif #endif
...@@ -559,9 +559,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi) ...@@ -559,9 +559,9 @@ void drbd_thread_current_set_cpu(struct drbd_thread *thi)
* word aligned on 64-bit architectures. (The bitmap send and receive code * word aligned on 64-bit architectures. (The bitmap send and receive code
* relies on this.) * relies on this.)
*/ */
unsigned int drbd_header_size(struct drbd_tconn *tconn) unsigned int drbd_header_size(struct drbd_connection *connection)
{ {
if (tconn->agreed_pro_version >= 100) { if (connection->agreed_pro_version >= 100) {
BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8)); BUILD_BUG_ON(!IS_ALIGNED(sizeof(struct p_header100), 8));
return sizeof(struct p_header100); return sizeof(struct p_header100);
} else { } else {
...@@ -599,32 +599,32 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm ...@@ -599,32 +599,32 @@ static unsigned int prepare_header100(struct p_header100 *h, enum drbd_packet cm
return sizeof(struct p_header100); return sizeof(struct p_header100);
} }
static unsigned int prepare_header(struct drbd_tconn *tconn, int vnr, static unsigned int prepare_header(struct drbd_connection *connection, int vnr,
void *buffer, enum drbd_packet cmd, int size) void *buffer, enum drbd_packet cmd, int size)
{ {
if (tconn->agreed_pro_version >= 100) if (connection->agreed_pro_version >= 100)
return prepare_header100(buffer, cmd, size, vnr); return prepare_header100(buffer, cmd, size, vnr);
else if (tconn->agreed_pro_version >= 95 && else if (connection->agreed_pro_version >= 95 &&
size > DRBD_MAX_SIZE_H80_PACKET) size > DRBD_MAX_SIZE_H80_PACKET)
return prepare_header95(buffer, cmd, size); return prepare_header95(buffer, cmd, size);
else else
return prepare_header80(buffer, cmd, size); return prepare_header80(buffer, cmd, size);
} }
static void *__conn_prepare_command(struct drbd_tconn *tconn, static void *__conn_prepare_command(struct drbd_connection *connection,
struct drbd_socket *sock) struct drbd_socket *sock)
{ {
if (!sock->socket) if (!sock->socket)
return NULL; return NULL;
return sock->sbuf + drbd_header_size(tconn); return sock->sbuf + drbd_header_size(connection);
} }
void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock) void *conn_prepare_command(struct drbd_connection *connection, struct drbd_socket *sock)
{ {
void *p; void *p;
mutex_lock(&sock->mutex); mutex_lock(&sock->mutex);
p = __conn_prepare_command(tconn, sock); p = __conn_prepare_command(connection, sock);
if (!p) if (!p)
mutex_unlock(&sock->mutex); mutex_unlock(&sock->mutex);
...@@ -633,10 +633,10 @@ void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock) ...@@ -633,10 +633,10 @@ void *conn_prepare_command(struct drbd_tconn *tconn, struct drbd_socket *sock)
void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock) void *drbd_prepare_command(struct drbd_device *device, struct drbd_socket *sock)
{ {
return conn_prepare_command(device->tconn, sock); return conn_prepare_command(device->connection, sock);
} }
static int __send_command(struct drbd_tconn *tconn, int vnr, static int __send_command(struct drbd_connection *connection, int vnr,
struct drbd_socket *sock, enum drbd_packet cmd, struct drbd_socket *sock, enum drbd_packet cmd,
unsigned int header_size, void *data, unsigned int header_size, void *data,
unsigned int size) unsigned int size)
...@@ -653,29 +653,29 @@ static int __send_command(struct drbd_tconn *tconn, int vnr, ...@@ -653,29 +653,29 @@ static int __send_command(struct drbd_tconn *tconn, int vnr,
*/ */
msg_flags = data ? MSG_MORE : 0; msg_flags = data ? MSG_MORE : 0;
header_size += prepare_header(tconn, vnr, sock->sbuf, cmd, header_size += prepare_header(connection, vnr, sock->sbuf, cmd,
header_size + size); header_size + size);
err = drbd_send_all(tconn, sock->socket, sock->sbuf, header_size, err = drbd_send_all(connection, sock->socket, sock->sbuf, header_size,
msg_flags); msg_flags);
if (data && !err) if (data && !err)
err = drbd_send_all(tconn, sock->socket, data, size, 0); err = drbd_send_all(connection, sock->socket, data, size, 0);
return err; return err;
} }
static int __conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, static int __conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size, enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size) void *data, unsigned int size)
{ {
return __send_command(tconn, 0, sock, cmd, header_size, data, size); return __send_command(connection, 0, sock, cmd, header_size, data, size);
} }
int conn_send_command(struct drbd_tconn *tconn, struct drbd_socket *sock, int conn_send_command(struct drbd_connection *connection, struct drbd_socket *sock,
enum drbd_packet cmd, unsigned int header_size, enum drbd_packet cmd, unsigned int header_size,
void *data, unsigned int size) void *data, unsigned int size)
{ {
int err; int err;
err = __conn_send_command(tconn, sock, cmd, header_size, data, size); err = __conn_send_command(connection, sock, cmd, header_size, data, size);
mutex_unlock(&sock->mutex); mutex_unlock(&sock->mutex);
return err; return err;
} }
...@@ -686,30 +686,30 @@ int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock, ...@@ -686,30 +686,30 @@ int drbd_send_command(struct drbd_device *device, struct drbd_socket *sock,
{ {
int err; int err;
err = __send_command(device->tconn, device->vnr, sock, cmd, header_size, err = __send_command(device->connection, device->vnr, sock, cmd, header_size,
data, size); data, size);
mutex_unlock(&sock->mutex); mutex_unlock(&sock->mutex);
return err; return err;
} }
int drbd_send_ping(struct drbd_tconn *tconn) int drbd_send_ping(struct drbd_connection *connection)
{ {
struct drbd_socket *sock; struct drbd_socket *sock;
sock = &tconn->meta; sock = &connection->meta;
if (!conn_prepare_command(tconn, sock)) if (!conn_prepare_command(connection, sock))
return -EIO; return -EIO;
return conn_send_command(tconn, sock, P_PING, 0, NULL, 0); return conn_send_command(connection, sock, P_PING, 0, NULL, 0);
} }
int drbd_send_ping_ack(struct drbd_tconn *tconn) int drbd_send_ping_ack(struct drbd_connection *connection)
{ {
struct drbd_socket *sock; struct drbd_socket *sock;
sock = &tconn->meta; sock = &connection->meta;
if (!conn_prepare_command(tconn, sock)) if (!conn_prepare_command(connection, sock))
return -EIO; return -EIO;
return conn_send_command(tconn, sock, P_PING_ACK, 0, NULL, 0); return conn_send_command(connection, sock, P_PING_ACK, 0, NULL, 0);
} }
int drbd_send_sync_param(struct drbd_device *device) int drbd_send_sync_param(struct drbd_device *device)
...@@ -717,18 +717,18 @@ int drbd_send_sync_param(struct drbd_device *device) ...@@ -717,18 +717,18 @@ int drbd_send_sync_param(struct drbd_device *device)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_rs_param_95 *p; struct p_rs_param_95 *p;
int size; int size;
const int apv = device->tconn->agreed_pro_version; const int apv = device->connection->agreed_pro_version;
enum drbd_packet cmd; enum drbd_packet cmd;
struct net_conf *nc; struct net_conf *nc;
struct disk_conf *dc; struct disk_conf *dc;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
size = apv <= 87 ? sizeof(struct p_rs_param) size = apv <= 87 ? sizeof(struct p_rs_param)
: apv == 88 ? sizeof(struct p_rs_param) : apv == 88 ? sizeof(struct p_rs_param)
...@@ -766,30 +766,30 @@ int drbd_send_sync_param(struct drbd_device *device) ...@@ -766,30 +766,30 @@ int drbd_send_sync_param(struct drbd_device *device)
return drbd_send_command(device, sock, cmd, size, NULL, 0); return drbd_send_command(device, sock, cmd, size, NULL, 0);
} }
int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd) int __drbd_send_protocol(struct drbd_connection *connection, enum drbd_packet cmd)
{ {
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_protocol *p; struct p_protocol *p;
struct net_conf *nc; struct net_conf *nc;
int size, cf; int size, cf;
sock = &tconn->data; sock = &connection->data;
p = __conn_prepare_command(tconn, sock); p = __conn_prepare_command(connection, sock);
if (!p) if (!p)
return -EIO; return -EIO;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(tconn->net_conf); nc = rcu_dereference(connection->net_conf);
if (nc->tentative && tconn->agreed_pro_version < 92) { if (nc->tentative && connection->agreed_pro_version < 92) {
rcu_read_unlock(); rcu_read_unlock();
mutex_unlock(&sock->mutex); mutex_unlock(&sock->mutex);
conn_err(tconn, "--dry-run is not supported by peer"); conn_err(connection, "--dry-run is not supported by peer");
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
size = sizeof(*p); size = sizeof(*p);
if (tconn->agreed_pro_version >= 87) if (connection->agreed_pro_version >= 87)
size += strlen(nc->integrity_alg) + 1; size += strlen(nc->integrity_alg) + 1;
p->protocol = cpu_to_be32(nc->wire_protocol); p->protocol = cpu_to_be32(nc->wire_protocol);
...@@ -804,20 +804,20 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd) ...@@ -804,20 +804,20 @@ int __drbd_send_protocol(struct drbd_tconn *tconn, enum drbd_packet cmd)
cf |= CF_DRY_RUN; cf |= CF_DRY_RUN;
p->conn_flags = cpu_to_be32(cf); p->conn_flags = cpu_to_be32(cf);
if (tconn->agreed_pro_version >= 87) if (connection->agreed_pro_version >= 87)
strcpy(p->integrity_alg, nc->integrity_alg); strcpy(p->integrity_alg, nc->integrity_alg);
rcu_read_unlock(); rcu_read_unlock();
return __conn_send_command(tconn, sock, cmd, size, NULL, 0); return __conn_send_command(connection, sock, cmd, size, NULL, 0);
} }
int drbd_send_protocol(struct drbd_tconn *tconn) int drbd_send_protocol(struct drbd_connection *connection)
{ {
int err; int err;
mutex_lock(&tconn->data.mutex); mutex_lock(&connection->data.mutex);
err = __drbd_send_protocol(tconn, P_PROTOCOL); err = __drbd_send_protocol(connection, P_PROTOCOL);
mutex_unlock(&tconn->data.mutex); mutex_unlock(&connection->data.mutex);
return err; return err;
} }
...@@ -831,7 +831,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags) ...@@ -831,7 +831,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
if (!get_ldev_if_state(device, D_NEGOTIATING)) if (!get_ldev_if_state(device, D_NEGOTIATING))
return 0; return 0;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) { if (!p) {
put_ldev(device); put_ldev(device);
...@@ -845,7 +845,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags) ...@@ -845,7 +845,7 @@ static int _drbd_send_uuids(struct drbd_device *device, u64 uuid_flags)
device->comm_bm_set = drbd_bm_total_weight(device); device->comm_bm_set = drbd_bm_total_weight(device);
p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set); p->uuid[UI_SIZE] = cpu_to_be64(device->comm_bm_set);
rcu_read_lock(); rcu_read_lock();
uuid_flags |= rcu_dereference(device->tconn->net_conf)->discard_my_data ? 1 : 0; uuid_flags |= rcu_dereference(device->connection->net_conf)->discard_my_data ? 1 : 0;
rcu_read_unlock(); rcu_read_unlock();
uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0; uuid_flags |= test_bit(CRASHED_PRIMARY, &device->flags) ? 2 : 0;
uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0; uuid_flags |= device->new_state_tmp.disk == D_INCONSISTENT ? 4 : 0;
...@@ -900,7 +900,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device) ...@@ -900,7 +900,7 @@ void drbd_gen_and_send_sync_uuid(struct drbd_device *device)
drbd_print_uuids(device, "updated sync UUID"); drbd_print_uuids(device, "updated sync UUID");
drbd_md_sync(device); drbd_md_sync(device);
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (p) { if (p) {
p->uuid = cpu_to_be64(uuid); p->uuid = cpu_to_be64(uuid);
...@@ -933,14 +933,14 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag ...@@ -933,14 +933,14 @@ int drbd_send_sizes(struct drbd_device *device, int trigger_reply, enum dds_flag
max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */ max_bio_size = DRBD_MAX_BIO_SIZE; /* ... multiple BIOs per peer_request */
} }
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
if (device->tconn->agreed_pro_version <= 94) if (device->connection->agreed_pro_version <= 94)
max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET); max_bio_size = min(max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
else if (device->tconn->agreed_pro_version < 100) else if (device->connection->agreed_pro_version < 100)
max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95); max_bio_size = min(max_bio_size, DRBD_MAX_BIO_SIZE_P95);
p->d_size = cpu_to_be64(d_size); p->d_size = cpu_to_be64(d_size);
...@@ -961,7 +961,7 @@ int drbd_send_current_state(struct drbd_device *device) ...@@ -961,7 +961,7 @@ int drbd_send_current_state(struct drbd_device *device)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_state *p; struct p_state *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -984,7 +984,7 @@ int drbd_send_state(struct drbd_device *device, union drbd_state state) ...@@ -984,7 +984,7 @@ int drbd_send_state(struct drbd_device *device, union drbd_state state)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_state *p; struct p_state *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -997,7 +997,7 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union ...@@ -997,7 +997,7 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_req_state *p; struct p_req_state *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1006,20 +1006,20 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union ...@@ -1006,20 +1006,20 @@ int drbd_send_state_req(struct drbd_device *device, union drbd_state mask, union
return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0); return drbd_send_command(device, sock, P_STATE_CHG_REQ, sizeof(*p), NULL, 0);
} }
int conn_send_state_req(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val) int conn_send_state_req(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{ {
enum drbd_packet cmd; enum drbd_packet cmd;
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_req_state *p; struct p_req_state *p;
cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ; cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REQ : P_CONN_ST_CHG_REQ;
sock = &tconn->data; sock = &connection->data;
p = conn_prepare_command(tconn, sock); p = conn_prepare_command(connection, sock);
if (!p) if (!p)
return -EIO; return -EIO;
p->mask = cpu_to_be32(mask.i); p->mask = cpu_to_be32(mask.i);
p->val = cpu_to_be32(val.i); p->val = cpu_to_be32(val.i);
return conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); return conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
} }
void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode) void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
...@@ -1027,7 +1027,7 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode) ...@@ -1027,7 +1027,7 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_req_state_reply *p; struct p_req_state_reply *p;
sock = &device->tconn->meta; sock = &device->connection->meta;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (p) { if (p) {
p->retcode = cpu_to_be32(retcode); p->retcode = cpu_to_be32(retcode);
...@@ -1035,17 +1035,17 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode) ...@@ -1035,17 +1035,17 @@ void drbd_send_sr_reply(struct drbd_device *device, enum drbd_state_rv retcode)
} }
} }
void conn_send_sr_reply(struct drbd_tconn *tconn, enum drbd_state_rv retcode) void conn_send_sr_reply(struct drbd_connection *connection, enum drbd_state_rv retcode)
{ {
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_req_state_reply *p; struct p_req_state_reply *p;
enum drbd_packet cmd = tconn->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY; enum drbd_packet cmd = connection->agreed_pro_version < 100 ? P_STATE_CHG_REPLY : P_CONN_ST_CHG_REPLY;
sock = &tconn->meta; sock = &connection->meta;
p = conn_prepare_command(tconn, sock); p = conn_prepare_command(connection, sock);
if (p) { if (p) {
p->retcode = cpu_to_be32(retcode); p->retcode = cpu_to_be32(retcode);
conn_send_command(tconn, sock, cmd, sizeof(*p), NULL, 0); conn_send_command(connection, sock, cmd, sizeof(*p), NULL, 0);
} }
} }
...@@ -1081,9 +1081,9 @@ static int fill_bitmap_rle_bits(struct drbd_device *device, ...@@ -1081,9 +1081,9 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
/* may we use this feature? */ /* may we use this feature? */
rcu_read_lock(); rcu_read_lock();
use_rle = rcu_dereference(device->tconn->net_conf)->use_rle; use_rle = rcu_dereference(device->connection->net_conf)->use_rle;
rcu_read_unlock(); rcu_read_unlock();
if (!use_rle || device->tconn->agreed_pro_version < 90) if (!use_rle || device->connection->agreed_pro_version < 90)
return 0; return 0;
if (c->bit_offset >= c->bm_bits) if (c->bit_offset >= c->bm_bits)
...@@ -1172,8 +1172,8 @@ static int fill_bitmap_rle_bits(struct drbd_device *device, ...@@ -1172,8 +1172,8 @@ static int fill_bitmap_rle_bits(struct drbd_device *device,
static int static int
send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
{ {
struct drbd_socket *sock = &device->tconn->data; struct drbd_socket *sock = &device->connection->data;
unsigned int header_size = drbd_header_size(device->tconn); unsigned int header_size = drbd_header_size(device->connection);
struct p_compressed_bm *p = sock->sbuf + header_size; struct p_compressed_bm *p = sock->sbuf + header_size;
int len, err; int len, err;
...@@ -1184,7 +1184,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) ...@@ -1184,7 +1184,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
if (len) { if (len) {
dcbp_set_code(p, RLE_VLI_Bits); dcbp_set_code(p, RLE_VLI_Bits);
err = __send_command(device->tconn, device->vnr, sock, err = __send_command(device->connection, device->vnr, sock,
P_COMPRESSED_BITMAP, sizeof(*p) + len, P_COMPRESSED_BITMAP, sizeof(*p) + len,
NULL, 0); NULL, 0);
c->packets[0]++; c->packets[0]++;
...@@ -1205,7 +1205,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c) ...@@ -1205,7 +1205,7 @@ send_bitmap_rle_or_plain(struct drbd_device *device, struct bm_xfer_ctx *c)
len = num_words * sizeof(*p); len = num_words * sizeof(*p);
if (len) if (len)
drbd_bm_get_lel(device, c->word_offset, num_words, p); drbd_bm_get_lel(device, c->word_offset, num_words, p);
err = __send_command(device->tconn, device->vnr, sock, P_BITMAP, len, NULL, 0); err = __send_command(device->connection, device->vnr, sock, P_BITMAP, len, NULL, 0);
c->word_offset += num_words; c->word_offset += num_words;
c->bit_offset = c->word_offset * BITS_PER_LONG; c->bit_offset = c->word_offset * BITS_PER_LONG;
...@@ -1265,7 +1265,7 @@ static int _drbd_send_bitmap(struct drbd_device *device) ...@@ -1265,7 +1265,7 @@ static int _drbd_send_bitmap(struct drbd_device *device)
int drbd_send_bitmap(struct drbd_device *device) int drbd_send_bitmap(struct drbd_device *device)
{ {
struct drbd_socket *sock = &device->tconn->data; struct drbd_socket *sock = &device->connection->data;
int err = -1; int err = -1;
mutex_lock(&sock->mutex); mutex_lock(&sock->mutex);
...@@ -1275,21 +1275,21 @@ int drbd_send_bitmap(struct drbd_device *device) ...@@ -1275,21 +1275,21 @@ int drbd_send_bitmap(struct drbd_device *device)
return err; return err;
} }
void drbd_send_b_ack(struct drbd_tconn *tconn, u32 barrier_nr, u32 set_size) void drbd_send_b_ack(struct drbd_connection *connection, u32 barrier_nr, u32 set_size)
{ {
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_barrier_ack *p; struct p_barrier_ack *p;
if (tconn->cstate < C_WF_REPORT_PARAMS) if (connection->cstate < C_WF_REPORT_PARAMS)
return; return;
sock = &tconn->meta; sock = &connection->meta;
p = conn_prepare_command(tconn, sock); p = conn_prepare_command(connection, sock);
if (!p) if (!p)
return; return;
p->barrier = barrier_nr; p->barrier = barrier_nr;
p->set_size = cpu_to_be32(set_size); p->set_size = cpu_to_be32(set_size);
conn_send_command(tconn, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0); conn_send_command(connection, sock, P_BARRIER_ACK, sizeof(*p), NULL, 0);
} }
/** /**
...@@ -1309,7 +1309,7 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd, ...@@ -1309,7 +1309,7 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
if (device->state.conn < C_CONNECTED) if (device->state.conn < C_CONNECTED)
return -EIO; return -EIO;
sock = &device->tconn->meta; sock = &device->connection->meta;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1326,8 +1326,8 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd, ...@@ -1326,8 +1326,8 @@ static int _drbd_send_ack(struct drbd_device *device, enum drbd_packet cmd,
void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd, void drbd_send_ack_dp(struct drbd_device *device, enum drbd_packet cmd,
struct p_data *dp, int data_size) struct p_data *dp, int data_size)
{ {
if (device->tconn->peer_integrity_tfm) if (device->connection->peer_integrity_tfm)
data_size -= crypto_hash_digestsize(device->tconn->peer_integrity_tfm); data_size -= crypto_hash_digestsize(device->connection->peer_integrity_tfm);
_drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size), _drbd_send_ack(device, cmd, dp->sector, cpu_to_be32(data_size),
dp->block_id); dp->block_id);
} }
...@@ -1370,7 +1370,7 @@ int drbd_send_drequest(struct drbd_device *device, int cmd, ...@@ -1370,7 +1370,7 @@ int drbd_send_drequest(struct drbd_device *device, int cmd,
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_block_req *p; struct p_block_req *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1388,7 +1388,7 @@ int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int siz ...@@ -1388,7 +1388,7 @@ int drbd_send_drequest_csum(struct drbd_device *device, sector_t sector, int siz
/* FIXME: Put the digest into the preallocated socket buffer. */ /* FIXME: Put the digest into the preallocated socket buffer. */
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1404,7 +1404,7 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size) ...@@ -1404,7 +1404,7 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_block_req *p; struct p_block_req *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1418,34 +1418,34 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size) ...@@ -1418,34 +1418,34 @@ int drbd_send_ov_request(struct drbd_device *device, sector_t sector, int size)
* returns false if we should retry, * returns false if we should retry,
* true if we think connection is dead * true if we think connection is dead
*/ */
static int we_should_drop_the_connection(struct drbd_tconn *tconn, struct socket *sock) static int we_should_drop_the_connection(struct drbd_connection *connection, struct socket *sock)
{ {
int drop_it; int drop_it;
/* long elapsed = (long)(jiffies - device->last_received); */ /* long elapsed = (long)(jiffies - device->last_received); */
drop_it = tconn->meta.socket == sock drop_it = connection->meta.socket == sock
|| !tconn->asender.task || !connection->asender.task
|| get_t_state(&tconn->asender) != RUNNING || get_t_state(&connection->asender) != RUNNING
|| tconn->cstate < C_WF_REPORT_PARAMS; || connection->cstate < C_WF_REPORT_PARAMS;
if (drop_it) if (drop_it)
return true; return true;
drop_it = !--tconn->ko_count; drop_it = !--connection->ko_count;
if (!drop_it) { if (!drop_it) {
conn_err(tconn, "[%s/%d] sock_sendmsg time expired, ko = %u\n", conn_err(connection, "[%s/%d] sock_sendmsg time expired, ko = %u\n",
current->comm, current->pid, tconn->ko_count); current->comm, current->pid, connection->ko_count);
request_ping(tconn); request_ping(connection);
} }
return drop_it; /* && (device->state == R_PRIMARY) */; return drop_it; /* && (device->state == R_PRIMARY) */;
} }
static void drbd_update_congested(struct drbd_tconn *tconn) static void drbd_update_congested(struct drbd_connection *connection)
{ {
struct sock *sk = tconn->data.socket->sk; struct sock *sk = connection->data.socket->sk;
if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5) if (sk->sk_wmem_queued > sk->sk_sndbuf * 4 / 5)
set_bit(NET_CONGESTED, &tconn->flags); set_bit(NET_CONGESTED, &connection->flags);
} }
/* The idea of sendpage seems to be to put some kind of reference /* The idea of sendpage seems to be to put some kind of reference
...@@ -1476,9 +1476,9 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page, ...@@ -1476,9 +1476,9 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
void *addr; void *addr;
int err; int err;
socket = device->tconn->data.socket; socket = device->connection->data.socket;
addr = kmap(page) + offset; addr = kmap(page) + offset;
err = drbd_send_all(device->tconn, socket, addr, size, msg_flags); err = drbd_send_all(device->connection, socket, addr, size, msg_flags);
kunmap(page); kunmap(page);
if (!err) if (!err)
device->send_cnt += size >> 9; device->send_cnt += size >> 9;
...@@ -1488,7 +1488,7 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page, ...@@ -1488,7 +1488,7 @@ static int _drbd_no_send_page(struct drbd_device *device, struct page *page,
static int _drbd_send_page(struct drbd_device *device, struct page *page, static int _drbd_send_page(struct drbd_device *device, struct page *page,
int offset, size_t size, unsigned msg_flags) int offset, size_t size, unsigned msg_flags)
{ {
struct socket *socket = device->tconn->data.socket; struct socket *socket = device->connection->data.socket;
mm_segment_t oldfs = get_fs(); mm_segment_t oldfs = get_fs();
int len = size; int len = size;
int err = -EIO; int err = -EIO;
...@@ -1503,7 +1503,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page, ...@@ -1503,7 +1503,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
return _drbd_no_send_page(device, page, offset, size, msg_flags); return _drbd_no_send_page(device, page, offset, size, msg_flags);
msg_flags |= MSG_NOSIGNAL; msg_flags |= MSG_NOSIGNAL;
drbd_update_congested(device->tconn); drbd_update_congested(device->connection);
set_fs(KERNEL_DS); set_fs(KERNEL_DS);
do { do {
int sent; int sent;
...@@ -1511,7 +1511,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page, ...@@ -1511,7 +1511,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
sent = socket->ops->sendpage(socket, page, offset, len, msg_flags); sent = socket->ops->sendpage(socket, page, offset, len, msg_flags);
if (sent <= 0) { if (sent <= 0) {
if (sent == -EAGAIN) { if (sent == -EAGAIN) {
if (we_should_drop_the_connection(device->tconn, socket)) if (we_should_drop_the_connection(device->connection, socket))
break; break;
continue; continue;
} }
...@@ -1525,7 +1525,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page, ...@@ -1525,7 +1525,7 @@ static int _drbd_send_page(struct drbd_device *device, struct page *page,
offset += sent; offset += sent;
} while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/); } while (len > 0 /* THINK && device->cstate >= C_CONNECTED*/);
set_fs(oldfs); set_fs(oldfs);
clear_bit(NET_CONGESTED, &device->tconn->flags); clear_bit(NET_CONGESTED, &device->connection->flags);
if (len == 0) { if (len == 0) {
err = 0; err = 0;
...@@ -1593,7 +1593,7 @@ static int _drbd_send_zc_ee(struct drbd_device *device, ...@@ -1593,7 +1593,7 @@ static int _drbd_send_zc_ee(struct drbd_device *device,
static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw) static u32 bio_flags_to_wire(struct drbd_device *device, unsigned long bi_rw)
{ {
if (device->tconn->agreed_pro_version >= 95) if (device->connection->agreed_pro_version >= 95)
return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) | return (bi_rw & REQ_SYNC ? DP_RW_SYNC : 0) |
(bi_rw & REQ_FUA ? DP_FUA : 0) | (bi_rw & REQ_FUA ? DP_FUA : 0) |
(bi_rw & REQ_FLUSH ? DP_FLUSH : 0) | (bi_rw & REQ_FLUSH ? DP_FLUSH : 0) |
...@@ -1613,9 +1613,9 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req) ...@@ -1613,9 +1613,9 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
int dgs; int dgs;
int err; int err;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
dgs = device->tconn->integrity_tfm ? crypto_hash_digestsize(device->tconn->integrity_tfm) : 0; dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0;
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1626,7 +1626,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req) ...@@ -1626,7 +1626,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
if (device->state.conn >= C_SYNC_SOURCE && if (device->state.conn >= C_SYNC_SOURCE &&
device->state.conn <= C_PAUSED_SYNC_T) device->state.conn <= C_PAUSED_SYNC_T)
dp_flags |= DP_MAY_SET_IN_SYNC; dp_flags |= DP_MAY_SET_IN_SYNC;
if (device->tconn->agreed_pro_version >= 100) { if (device->connection->agreed_pro_version >= 100) {
if (req->rq_state & RQ_EXP_RECEIVE_ACK) if (req->rq_state & RQ_EXP_RECEIVE_ACK)
dp_flags |= DP_SEND_RECEIVE_ACK; dp_flags |= DP_SEND_RECEIVE_ACK;
if (req->rq_state & RQ_EXP_WRITE_ACK) if (req->rq_state & RQ_EXP_WRITE_ACK)
...@@ -1634,8 +1634,8 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req) ...@@ -1634,8 +1634,8 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
} }
p->dp_flags = cpu_to_be32(dp_flags); p->dp_flags = cpu_to_be32(dp_flags);
if (dgs) if (dgs)
drbd_csum_bio(device, device->tconn->integrity_tfm, req->master_bio, p + 1); drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, p + 1);
err = __send_command(device->tconn, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size); err = __send_command(device->connection, device->vnr, sock, P_DATA, sizeof(*p) + dgs, NULL, req->i.size);
if (!err) { if (!err) {
/* For protocol A, we have to memcpy the payload into /* For protocol A, we have to memcpy the payload into
* socket buffers, as we may complete right away * socket buffers, as we may complete right away
...@@ -1658,7 +1658,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req) ...@@ -1658,7 +1658,7 @@ int drbd_send_dblock(struct drbd_device *device, struct drbd_request *req)
/* 64 byte, 512 bit, is the largest digest size /* 64 byte, 512 bit, is the largest digest size
* currently supported in kernel crypto. */ * currently supported in kernel crypto. */
unsigned char digest[64]; unsigned char digest[64];
drbd_csum_bio(device, device->tconn->integrity_tfm, req->master_bio, digest); drbd_csum_bio(device, device->connection->integrity_tfm, req->master_bio, digest);
if (memcmp(p + 1, digest, dgs)) { if (memcmp(p + 1, digest, dgs)) {
dev_warn(DEV, dev_warn(DEV,
"Digest mismatch, buffer modified by upper layers during write: %llus +%u\n", "Digest mismatch, buffer modified by upper layers during write: %llus +%u\n",
...@@ -1685,10 +1685,10 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd, ...@@ -1685,10 +1685,10 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
int err; int err;
int dgs; int dgs;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
dgs = device->tconn->integrity_tfm ? crypto_hash_digestsize(device->tconn->integrity_tfm) : 0; dgs = device->connection->integrity_tfm ? crypto_hash_digestsize(device->connection->integrity_tfm) : 0;
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1697,8 +1697,8 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd, ...@@ -1697,8 +1697,8 @@ int drbd_send_block(struct drbd_device *device, enum drbd_packet cmd,
p->seq_num = 0; /* unused */ p->seq_num = 0; /* unused */
p->dp_flags = 0; p->dp_flags = 0;
if (dgs) if (dgs)
drbd_csum_ee(device, device->tconn->integrity_tfm, peer_req, p + 1); drbd_csum_ee(device, device->connection->integrity_tfm, peer_req, p + 1);
err = __send_command(device->tconn, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size); err = __send_command(device->connection, device->vnr, sock, cmd, sizeof(*p) + dgs, NULL, peer_req->i.size);
if (!err) if (!err)
err = _drbd_send_zc_ee(device, peer_req); err = _drbd_send_zc_ee(device, peer_req);
mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */ mutex_unlock(&sock->mutex); /* locked by drbd_prepare_command() */
...@@ -1711,7 +1711,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req) ...@@ -1711,7 +1711,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
struct drbd_socket *sock; struct drbd_socket *sock;
struct p_block_desc *p; struct p_block_desc *p;
sock = &device->tconn->data; sock = &device->connection->data;
p = drbd_prepare_command(device, sock); p = drbd_prepare_command(device, sock);
if (!p) if (!p)
return -EIO; return -EIO;
...@@ -1736,7 +1736,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req) ...@@ -1736,7 +1736,7 @@ int drbd_send_out_of_sync(struct drbd_device *device, struct drbd_request *req)
/* /*
* you must have down()ed the appropriate [m]sock_mutex elsewhere! * you must have down()ed the appropriate [m]sock_mutex elsewhere!
*/ */
int drbd_send(struct drbd_tconn *tconn, struct socket *sock, int drbd_send(struct drbd_connection *connection, struct socket *sock,
void *buf, size_t size, unsigned msg_flags) void *buf, size_t size, unsigned msg_flags)
{ {
struct kvec iov; struct kvec iov;
...@@ -1757,11 +1757,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, ...@@ -1757,11 +1757,11 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
msg.msg_controllen = 0; msg.msg_controllen = 0;
msg.msg_flags = msg_flags | MSG_NOSIGNAL; msg.msg_flags = msg_flags | MSG_NOSIGNAL;
if (sock == tconn->data.socket) { if (sock == connection->data.socket) {
rcu_read_lock(); rcu_read_lock();
tconn->ko_count = rcu_dereference(tconn->net_conf)->ko_count; connection->ko_count = rcu_dereference(connection->net_conf)->ko_count;
rcu_read_unlock(); rcu_read_unlock();
drbd_update_congested(tconn); drbd_update_congested(connection);
} }
do { do {
/* STRANGE /* STRANGE
...@@ -1775,7 +1775,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, ...@@ -1775,7 +1775,7 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
*/ */
rv = kernel_sendmsg(sock, &msg, &iov, 1, size); rv = kernel_sendmsg(sock, &msg, &iov, 1, size);
if (rv == -EAGAIN) { if (rv == -EAGAIN) {
if (we_should_drop_the_connection(tconn, sock)) if (we_should_drop_the_connection(connection, sock))
break; break;
else else
continue; continue;
...@@ -1791,17 +1791,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, ...@@ -1791,17 +1791,17 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
iov.iov_len -= rv; iov.iov_len -= rv;
} while (sent < size); } while (sent < size);
if (sock == tconn->data.socket) if (sock == connection->data.socket)
clear_bit(NET_CONGESTED, &tconn->flags); clear_bit(NET_CONGESTED, &connection->flags);
if (rv <= 0) { if (rv <= 0) {
if (rv != -EAGAIN) { if (rv != -EAGAIN) {
conn_err(tconn, "%s_sendmsg returned %d\n", conn_err(connection, "%s_sendmsg returned %d\n",
sock == tconn->meta.socket ? "msock" : "sock", sock == connection->meta.socket ? "msock" : "sock",
rv); rv);
conn_request_state(tconn, NS(conn, C_BROKEN_PIPE), CS_HARD); conn_request_state(connection, NS(conn, C_BROKEN_PIPE), CS_HARD);
} else } else
conn_request_state(tconn, NS(conn, C_TIMEOUT), CS_HARD); conn_request_state(connection, NS(conn, C_TIMEOUT), CS_HARD);
} }
return sent; return sent;
...@@ -1812,12 +1812,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock, ...@@ -1812,12 +1812,12 @@ int drbd_send(struct drbd_tconn *tconn, struct socket *sock,
* *
* Returns 0 upon success and a negative error value otherwise. * Returns 0 upon success and a negative error value otherwise.
*/ */
int drbd_send_all(struct drbd_tconn *tconn, struct socket *sock, void *buffer, int drbd_send_all(struct drbd_connection *connection, struct socket *sock, void *buffer,
size_t size, unsigned msg_flags) size_t size, unsigned msg_flags)
{ {
int err; int err;
err = drbd_send(tconn, sock, buffer, size, msg_flags); err = drbd_send(connection, sock, buffer, size, msg_flags);
if (err < 0) if (err < 0)
return err; return err;
if (err != size) if (err != size)
...@@ -1832,7 +1832,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) ...@@ -1832,7 +1832,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
int rv = 0; int rv = 0;
mutex_lock(&drbd_main_mutex); mutex_lock(&drbd_main_mutex);
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
/* to have a stable device->state.role /* to have a stable device->state.role
* and no race with updating open_cnt */ * and no race with updating open_cnt */
...@@ -1845,7 +1845,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode) ...@@ -1845,7 +1845,7 @@ static int drbd_open(struct block_device *bdev, fmode_t mode)
if (!rv) if (!rv)
device->open_cnt++; device->open_cnt++;
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
mutex_unlock(&drbd_main_mutex); mutex_unlock(&drbd_main_mutex);
return rv; return rv;
...@@ -1950,9 +1950,9 @@ void drbd_init_set_defaults(struct drbd_device *device) ...@@ -1950,9 +1950,9 @@ void drbd_init_set_defaults(struct drbd_device *device)
void drbd_device_cleanup(struct drbd_device *device) void drbd_device_cleanup(struct drbd_device *device)
{ {
int i; int i;
if (device->tconn->receiver.t_state != NONE) if (device->connection->receiver.t_state != NONE)
dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n", dev_err(DEV, "ASSERT FAILED: receiver t_state == %d expected 0.\n",
device->tconn->receiver.t_state); device->connection->receiver.t_state);
device->al_writ_cnt = device->al_writ_cnt =
device->bm_writ_cnt = device->bm_writ_cnt =
...@@ -1970,7 +1970,7 @@ void drbd_device_cleanup(struct drbd_device *device) ...@@ -1970,7 +1970,7 @@ void drbd_device_cleanup(struct drbd_device *device)
device->rs_mark_left[i] = 0; device->rs_mark_left[i] = 0;
device->rs_mark_time[i] = 0; device->rs_mark_time[i] = 0;
} }
D_ASSERT(device->tconn->net_conf == NULL); D_ASSERT(device->connection->net_conf == NULL);
drbd_set_my_capacity(device, 0); drbd_set_my_capacity(device, 0);
if (device->bitmap) { if (device->bitmap) {
...@@ -1990,7 +1990,7 @@ void drbd_device_cleanup(struct drbd_device *device) ...@@ -1990,7 +1990,7 @@ void drbd_device_cleanup(struct drbd_device *device)
D_ASSERT(list_empty(&device->read_ee)); D_ASSERT(list_empty(&device->read_ee));
D_ASSERT(list_empty(&device->net_ee)); D_ASSERT(list_empty(&device->net_ee));
D_ASSERT(list_empty(&device->resync_reads)); D_ASSERT(list_empty(&device->resync_reads));
D_ASSERT(list_empty(&device->tconn->sender_work.q)); D_ASSERT(list_empty(&device->connection->sender_work.q));
D_ASSERT(list_empty(&device->resync_work.list)); D_ASSERT(list_empty(&device->resync_work.list));
D_ASSERT(list_empty(&device->unplug_work.list)); D_ASSERT(list_empty(&device->unplug_work.list));
D_ASSERT(list_empty(&device->go_diskless.list)); D_ASSERT(list_empty(&device->go_diskless.list));
...@@ -2159,7 +2159,7 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device) ...@@ -2159,7 +2159,7 @@ static void drbd_release_all_peer_reqs(struct drbd_device *device)
void drbd_minor_destroy(struct kref *kref) void drbd_minor_destroy(struct kref *kref)
{ {
struct drbd_device *device = container_of(kref, struct drbd_device, kref); struct drbd_device *device = container_of(kref, struct drbd_device, kref);
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
del_timer_sync(&device->request_timer); del_timer_sync(&device->request_timer);
...@@ -2192,7 +2192,7 @@ void drbd_minor_destroy(struct kref *kref) ...@@ -2192,7 +2192,7 @@ void drbd_minor_destroy(struct kref *kref)
kfree(device->rs_plan_s); kfree(device->rs_plan_s);
kfree(device); kfree(device);
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
} }
/* One global retry thread, if we need to push back some bio and have it /* One global retry thread, if we need to push back some bio and have it
...@@ -2278,7 +2278,7 @@ static void drbd_cleanup(void) ...@@ -2278,7 +2278,7 @@ static void drbd_cleanup(void)
{ {
unsigned int i; unsigned int i;
struct drbd_device *device; struct drbd_device *device;
struct drbd_tconn *tconn, *tmp; struct drbd_connection *connection, *tmp;
unregister_reboot_notifier(&drbd_notifier); unregister_reboot_notifier(&drbd_notifier);
...@@ -2300,7 +2300,7 @@ static void drbd_cleanup(void) ...@@ -2300,7 +2300,7 @@ static void drbd_cleanup(void)
idr_for_each_entry(&minors, device, i) { idr_for_each_entry(&minors, device, i) {
idr_remove(&minors, device_to_minor(device)); idr_remove(&minors, device_to_minor(device));
idr_remove(&device->tconn->volumes, device->vnr); idr_remove(&device->connection->volumes, device->vnr);
destroy_workqueue(device->submit.wq); destroy_workqueue(device->submit.wq);
del_gendisk(device->vdisk); del_gendisk(device->vdisk);
/* synchronize_rcu(); No other threads running at this point */ /* synchronize_rcu(); No other threads running at this point */
...@@ -2308,10 +2308,10 @@ static void drbd_cleanup(void) ...@@ -2308,10 +2308,10 @@ static void drbd_cleanup(void)
} }
/* not _rcu since, no other updater anymore. Genl already unregistered */ /* not _rcu since, no other updater anymore. Genl already unregistered */
list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { list_for_each_entry_safe(connection, tmp, &drbd_connections, connections) {
list_del(&tconn->all_tconn); /* not _rcu no proc, not other threads */ list_del(&connection->connections); /* not _rcu no proc, not other threads */
/* synchronize_rcu(); */ /* synchronize_rcu(); */
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
} }
drbd_destroy_mempools(); drbd_destroy_mempools();
...@@ -2343,7 +2343,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) ...@@ -2343,7 +2343,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
goto out; goto out;
} }
if (test_bit(CALLBACK_PENDING, &device->tconn->flags)) { if (test_bit(CALLBACK_PENDING, &device->connection->flags)) {
r |= (1 << BDI_async_congested); r |= (1 << BDI_async_congested);
/* Without good local data, we would need to read from remote, /* Without good local data, we would need to read from remote,
* and that would need the worker thread as well, which is * and that would need the worker thread as well, which is
...@@ -2367,7 +2367,7 @@ static int drbd_congested(void *congested_data, int bdi_bits) ...@@ -2367,7 +2367,7 @@ static int drbd_congested(void *congested_data, int bdi_bits)
reason = 'b'; reason = 'b';
} }
if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &device->tconn->flags)) { if (bdi_bits & (1 << BDI_async_congested) && test_bit(NET_CONGESTED, &device->connection->flags)) {
r |= (1 << BDI_async_congested); r |= (1 << BDI_async_congested);
reason = reason == 'b' ? 'a' : 'n'; reason = reason == 'b' ? 'a' : 'n';
} }
...@@ -2384,45 +2384,45 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq) ...@@ -2384,45 +2384,45 @@ static void drbd_init_workqueue(struct drbd_work_queue* wq)
init_waitqueue_head(&wq->q_wait); init_waitqueue_head(&wq->q_wait);
} }
struct drbd_tconn *conn_get_by_name(const char *name) struct drbd_connection *conn_get_by_name(const char *name)
{ {
struct drbd_tconn *tconn; struct drbd_connection *connection;
if (!name || !name[0]) if (!name || !name[0])
return NULL; return NULL;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { list_for_each_entry_rcu(connection, &drbd_connections, connections) {
if (!strcmp(tconn->name, name)) { if (!strcmp(connection->name, name)) {
kref_get(&tconn->kref); kref_get(&connection->kref);
goto found; goto found;
} }
} }
tconn = NULL; connection = NULL;
found: found:
rcu_read_unlock(); rcu_read_unlock();
return tconn; return connection;
} }
struct drbd_tconn *conn_get_by_addrs(void *my_addr, int my_addr_len, struct drbd_connection *conn_get_by_addrs(void *my_addr, int my_addr_len,
void *peer_addr, int peer_addr_len) void *peer_addr, int peer_addr_len)
{ {
struct drbd_tconn *tconn; struct drbd_connection *connection;
rcu_read_lock(); rcu_read_lock();
list_for_each_entry_rcu(tconn, &drbd_tconns, all_tconn) { list_for_each_entry_rcu(connection, &drbd_connections, connections) {
if (tconn->my_addr_len == my_addr_len && if (connection->my_addr_len == my_addr_len &&
tconn->peer_addr_len == peer_addr_len && connection->peer_addr_len == peer_addr_len &&
!memcmp(&tconn->my_addr, my_addr, my_addr_len) && !memcmp(&connection->my_addr, my_addr, my_addr_len) &&
!memcmp(&tconn->peer_addr, peer_addr, peer_addr_len)) { !memcmp(&connection->peer_addr, peer_addr, peer_addr_len)) {
kref_get(&tconn->kref); kref_get(&connection->kref);
goto found; goto found;
} }
} }
tconn = NULL; connection = NULL;
found: found:
rcu_read_unlock(); rcu_read_unlock();
return tconn; return connection;
} }
static int drbd_alloc_socket(struct drbd_socket *socket) static int drbd_alloc_socket(struct drbd_socket *socket)
...@@ -2442,28 +2442,28 @@ static void drbd_free_socket(struct drbd_socket *socket) ...@@ -2442,28 +2442,28 @@ static void drbd_free_socket(struct drbd_socket *socket)
free_page((unsigned long) socket->rbuf); free_page((unsigned long) socket->rbuf);
} }
void conn_free_crypto(struct drbd_tconn *tconn) void conn_free_crypto(struct drbd_connection *connection)
{ {
drbd_free_sock(tconn); drbd_free_sock(connection);
crypto_free_hash(tconn->csums_tfm); crypto_free_hash(connection->csums_tfm);
crypto_free_hash(tconn->verify_tfm); crypto_free_hash(connection->verify_tfm);
crypto_free_hash(tconn->cram_hmac_tfm); crypto_free_hash(connection->cram_hmac_tfm);
crypto_free_hash(tconn->integrity_tfm); crypto_free_hash(connection->integrity_tfm);
crypto_free_hash(tconn->peer_integrity_tfm); crypto_free_hash(connection->peer_integrity_tfm);
kfree(tconn->int_dig_in); kfree(connection->int_dig_in);
kfree(tconn->int_dig_vv); kfree(connection->int_dig_vv);
tconn->csums_tfm = NULL; connection->csums_tfm = NULL;
tconn->verify_tfm = NULL; connection->verify_tfm = NULL;
tconn->cram_hmac_tfm = NULL; connection->cram_hmac_tfm = NULL;
tconn->integrity_tfm = NULL; connection->integrity_tfm = NULL;
tconn->peer_integrity_tfm = NULL; connection->peer_integrity_tfm = NULL;
tconn->int_dig_in = NULL; connection->int_dig_in = NULL;
tconn->int_dig_vv = NULL; connection->int_dig_vv = NULL;
} }
int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts) int set_resource_options(struct drbd_connection *connection, struct res_opts *res_opts)
{ {
cpumask_var_t new_cpu_mask; cpumask_var_t new_cpu_mask;
int err; int err;
...@@ -2481,18 +2481,18 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts) ...@@ -2481,18 +2481,18 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
err = bitmap_parse(res_opts->cpu_mask, 32, err = bitmap_parse(res_opts->cpu_mask, 32,
cpumask_bits(new_cpu_mask), nr_cpu_ids); cpumask_bits(new_cpu_mask), nr_cpu_ids);
if (err) { if (err) {
conn_warn(tconn, "bitmap_parse() failed with %d\n", err); conn_warn(connection, "bitmap_parse() failed with %d\n", err);
/* retcode = ERR_CPU_MASK_PARSE; */ /* retcode = ERR_CPU_MASK_PARSE; */
goto fail; goto fail;
} }
} }
tconn->res_opts = *res_opts; connection->res_opts = *res_opts;
if (!cpumask_equal(tconn->cpu_mask, new_cpu_mask)) { if (!cpumask_equal(connection->cpu_mask, new_cpu_mask)) {
cpumask_copy(tconn->cpu_mask, new_cpu_mask); cpumask_copy(connection->cpu_mask, new_cpu_mask);
drbd_calc_cpu_mask(tconn); drbd_calc_cpu_mask(connection);
tconn->receiver.reset_cpu_mask = 1; connection->receiver.reset_cpu_mask = 1;
tconn->asender.reset_cpu_mask = 1; connection->asender.reset_cpu_mask = 1;
tconn->worker.reset_cpu_mask = 1; connection->worker.reset_cpu_mask = 1;
} }
err = 0; err = 0;
...@@ -2503,92 +2503,92 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts) ...@@ -2503,92 +2503,92 @@ int set_resource_options(struct drbd_tconn *tconn, struct res_opts *res_opts)
} }
/* caller must be under genl_lock() */ /* caller must be under genl_lock() */
struct drbd_tconn *conn_create(const char *name, struct res_opts *res_opts) struct drbd_connection *conn_create(const char *name, struct res_opts *res_opts)
{ {
struct drbd_tconn *tconn; struct drbd_connection *connection;
tconn = kzalloc(sizeof(struct drbd_tconn), GFP_KERNEL); connection = kzalloc(sizeof(struct drbd_connection), GFP_KERNEL);
if (!tconn) if (!connection)
return NULL; return NULL;
tconn->name = kstrdup(name, GFP_KERNEL); connection->name = kstrdup(name, GFP_KERNEL);
if (!tconn->name) if (!connection->name)
goto fail; goto fail;
if (drbd_alloc_socket(&tconn->data)) if (drbd_alloc_socket(&connection->data))
goto fail; goto fail;
if (drbd_alloc_socket(&tconn->meta)) if (drbd_alloc_socket(&connection->meta))
goto fail; goto fail;
if (!zalloc_cpumask_var(&tconn->cpu_mask, GFP_KERNEL)) if (!zalloc_cpumask_var(&connection->cpu_mask, GFP_KERNEL))
goto fail; goto fail;
if (set_resource_options(tconn, res_opts)) if (set_resource_options(connection, res_opts))
goto fail; goto fail;
tconn->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL); connection->current_epoch = kzalloc(sizeof(struct drbd_epoch), GFP_KERNEL);
if (!tconn->current_epoch) if (!connection->current_epoch)
goto fail; goto fail;
INIT_LIST_HEAD(&tconn->transfer_log); INIT_LIST_HEAD(&connection->transfer_log);
INIT_LIST_HEAD(&tconn->current_epoch->list); INIT_LIST_HEAD(&connection->current_epoch->list);
tconn->epochs = 1; connection->epochs = 1;
spin_lock_init(&tconn->epoch_lock); spin_lock_init(&connection->epoch_lock);
tconn->write_ordering = WO_bdev_flush; connection->write_ordering = WO_bdev_flush;
tconn->send.seen_any_write_yet = false; connection->send.seen_any_write_yet = false;
tconn->send.current_epoch_nr = 0; connection->send.current_epoch_nr = 0;
tconn->send.current_epoch_writes = 0; connection->send.current_epoch_writes = 0;
tconn->cstate = C_STANDALONE; connection->cstate = C_STANDALONE;
mutex_init(&tconn->cstate_mutex); mutex_init(&connection->cstate_mutex);
spin_lock_init(&tconn->req_lock); spin_lock_init(&connection->req_lock);
mutex_init(&tconn->conf_update); mutex_init(&connection->conf_update);
init_waitqueue_head(&tconn->ping_wait); init_waitqueue_head(&connection->ping_wait);
idr_init(&tconn->volumes); idr_init(&connection->volumes);
drbd_init_workqueue(&tconn->sender_work); drbd_init_workqueue(&connection->sender_work);
mutex_init(&tconn->data.mutex); mutex_init(&connection->data.mutex);
mutex_init(&tconn->meta.mutex); mutex_init(&connection->meta.mutex);
drbd_thread_init(tconn, &tconn->receiver, drbdd_init, "receiver"); drbd_thread_init(connection, &connection->receiver, drbdd_init, "receiver");
drbd_thread_init(tconn, &tconn->worker, drbd_worker, "worker"); drbd_thread_init(connection, &connection->worker, drbd_worker, "worker");
drbd_thread_init(tconn, &tconn->asender, drbd_asender, "asender"); drbd_thread_init(connection, &connection->asender, drbd_asender, "asender");
kref_init(&tconn->kref); kref_init(&connection->kref);
list_add_tail_rcu(&tconn->all_tconn, &drbd_tconns); list_add_tail_rcu(&connection->connections, &drbd_connections);
return tconn; return connection;
fail: fail:
kfree(tconn->current_epoch); kfree(connection->current_epoch);
free_cpumask_var(tconn->cpu_mask); free_cpumask_var(connection->cpu_mask);
drbd_free_socket(&tconn->meta); drbd_free_socket(&connection->meta);
drbd_free_socket(&tconn->data); drbd_free_socket(&connection->data);
kfree(tconn->name); kfree(connection->name);
kfree(tconn); kfree(connection);
return NULL; return NULL;
} }
void conn_destroy(struct kref *kref) void conn_destroy(struct kref *kref)
{ {
struct drbd_tconn *tconn = container_of(kref, struct drbd_tconn, kref); struct drbd_connection *connection = container_of(kref, struct drbd_connection, kref);
if (atomic_read(&tconn->current_epoch->epoch_size) != 0) if (atomic_read(&connection->current_epoch->epoch_size) != 0)
conn_err(tconn, "epoch_size:%d\n", atomic_read(&tconn->current_epoch->epoch_size)); conn_err(connection, "epoch_size:%d\n", atomic_read(&connection->current_epoch->epoch_size));
kfree(tconn->current_epoch); kfree(connection->current_epoch);
idr_destroy(&tconn->volumes); idr_destroy(&connection->volumes);
free_cpumask_var(tconn->cpu_mask); free_cpumask_var(connection->cpu_mask);
drbd_free_socket(&tconn->meta); drbd_free_socket(&connection->meta);
drbd_free_socket(&tconn->data); drbd_free_socket(&connection->data);
kfree(tconn->name); kfree(connection->name);
kfree(tconn->int_dig_in); kfree(connection->int_dig_in);
kfree(tconn->int_dig_vv); kfree(connection->int_dig_vv);
kfree(tconn); kfree(connection);
} }
static int init_submitter(struct drbd_device *device) static int init_submitter(struct drbd_device *device)
...@@ -2606,7 +2606,7 @@ static int init_submitter(struct drbd_device *device) ...@@ -2606,7 +2606,7 @@ static int init_submitter(struct drbd_device *device)
return 0; return 0;
} }
enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, int vnr) enum drbd_ret_code conn_new_minor(struct drbd_connection *connection, unsigned int minor, int vnr)
{ {
struct drbd_device *device; struct drbd_device *device;
struct gendisk *disk; struct gendisk *disk;
...@@ -2624,8 +2624,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, ...@@ -2624,8 +2624,8 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
if (!device) if (!device)
return ERR_NOMEM; return ERR_NOMEM;
kref_get(&tconn->kref); kref_get(&connection->kref);
device->tconn = tconn; device->connection = connection;
device->minor = minor; device->minor = minor;
device->vnr = vnr; device->vnr = vnr;
...@@ -2666,7 +2666,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, ...@@ -2666,7 +2666,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8); blk_queue_max_hw_sectors(q, DRBD_MAX_BIO_SIZE_SAFE >> 8);
blk_queue_bounce_limit(q, BLK_BOUNCE_ANY); blk_queue_bounce_limit(q, BLK_BOUNCE_ANY);
blk_queue_merge_bvec(q, drbd_merge_bvec); blk_queue_merge_bvec(q, drbd_merge_bvec);
q->queue_lock = &device->tconn->req_lock; /* needed since we use */ q->queue_lock = &device->connection->req_lock; /* needed since we use */
device->md_io_page = alloc_page(GFP_KERNEL); device->md_io_page = alloc_page(GFP_KERNEL);
if (!device->md_io_page) if (!device->md_io_page)
...@@ -2686,7 +2686,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, ...@@ -2686,7 +2686,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
goto out_no_minor_idr; goto out_no_minor_idr;
} }
vnr_got = idr_alloc(&tconn->volumes, device, vnr, vnr + 1, GFP_KERNEL); vnr_got = idr_alloc(&connection->volumes, device, vnr, vnr + 1, GFP_KERNEL);
if (vnr_got < 0) { if (vnr_got < 0) {
if (vnr_got == -ENOSPC) { if (vnr_got == -ENOSPC) {
err = ERR_INVALID_REQUEST; err = ERR_INVALID_REQUEST;
...@@ -2705,14 +2705,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, ...@@ -2705,14 +2705,14 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
kref_init(&device->kref); /* one ref for both idrs and the the add_disk */ kref_init(&device->kref); /* one ref for both idrs and the the add_disk */
/* inherit the connection state */ /* inherit the connection state */
device->state.conn = tconn->cstate; device->state.conn = connection->cstate;
if (device->state.conn == C_WF_REPORT_PARAMS) if (device->state.conn == C_WF_REPORT_PARAMS)
drbd_connected(device); drbd_connected(device);
return NO_ERROR; return NO_ERROR;
out_idr_remove_vol: out_idr_remove_vol:
idr_remove(&tconn->volumes, vnr_got); idr_remove(&connection->volumes, vnr_got);
out_idr_remove_minor: out_idr_remove_minor:
idr_remove(&minors, minor_got); idr_remove(&minors, minor_got);
synchronize_rcu(); synchronize_rcu();
...@@ -2726,7 +2726,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor, ...@@ -2726,7 +2726,7 @@ enum drbd_ret_code conn_new_minor(struct drbd_tconn *tconn, unsigned int minor,
blk_cleanup_queue(q); blk_cleanup_queue(q);
out_no_q: out_no_q:
kfree(device); kfree(device);
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
return err; return err;
} }
...@@ -2763,7 +2763,7 @@ int __init drbd_init(void) ...@@ -2763,7 +2763,7 @@ int __init drbd_init(void)
idr_init(&minors); idr_init(&minors);
rwlock_init(&global_state_lock); rwlock_init(&global_state_lock);
INIT_LIST_HEAD(&drbd_tconns); INIT_LIST_HEAD(&drbd_connections);
err = drbd_genl_register(); err = drbd_genl_register();
if (err) { if (err) {
...@@ -2821,33 +2821,33 @@ void drbd_free_bc(struct drbd_backing_dev *ldev) ...@@ -2821,33 +2821,33 @@ void drbd_free_bc(struct drbd_backing_dev *ldev)
kfree(ldev); kfree(ldev);
} }
void drbd_free_sock(struct drbd_tconn *tconn) void drbd_free_sock(struct drbd_connection *connection)
{ {
if (tconn->data.socket) { if (connection->data.socket) {
mutex_lock(&tconn->data.mutex); mutex_lock(&connection->data.mutex);
kernel_sock_shutdown(tconn->data.socket, SHUT_RDWR); kernel_sock_shutdown(connection->data.socket, SHUT_RDWR);
sock_release(tconn->data.socket); sock_release(connection->data.socket);
tconn->data.socket = NULL; connection->data.socket = NULL;
mutex_unlock(&tconn->data.mutex); mutex_unlock(&connection->data.mutex);
} }
if (tconn->meta.socket) { if (connection->meta.socket) {
mutex_lock(&tconn->meta.mutex); mutex_lock(&connection->meta.mutex);
kernel_sock_shutdown(tconn->meta.socket, SHUT_RDWR); kernel_sock_shutdown(connection->meta.socket, SHUT_RDWR);
sock_release(tconn->meta.socket); sock_release(connection->meta.socket);
tconn->meta.socket = NULL; connection->meta.socket = NULL;
mutex_unlock(&tconn->meta.mutex); mutex_unlock(&connection->meta.mutex);
} }
} }
/* meta data management */ /* meta data management */
void conn_md_sync(struct drbd_tconn *tconn) void conn_md_sync(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
kref_get(&device->kref); kref_get(&device->kref);
rcu_read_unlock(); rcu_read_unlock();
drbd_md_sync(device); drbd_md_sync(device);
...@@ -3172,14 +3172,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev) ...@@ -3172,14 +3172,14 @@ int drbd_md_read(struct drbd_device *device, struct drbd_backing_dev *bdev)
rv = NO_ERROR; rv = NO_ERROR;
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
if (device->state.conn < C_CONNECTED) { if (device->state.conn < C_CONNECTED) {
unsigned int peer; unsigned int peer;
peer = be32_to_cpu(buffer->la_peer_max_bio_size); peer = be32_to_cpu(buffer->la_peer_max_bio_size);
peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE); peer = max(peer, DRBD_MAX_BIO_SIZE_SAFE);
device->peer_max_bio_size = peer; device->peer_max_bio_size = peer;
} }
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
err: err:
drbd_md_put_buffer(device); drbd_md_put_buffer(device);
...@@ -3454,7 +3454,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device, ...@@ -3454,7 +3454,7 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
void (*done)(struct drbd_device *, int), void (*done)(struct drbd_device *, int),
char *why, enum bm_flag flags) char *why, enum bm_flag flags)
{ {
D_ASSERT(current == device->tconn->worker.task); D_ASSERT(current == device->connection->worker.task);
D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags)); D_ASSERT(!test_bit(BITMAP_IO_QUEUED, &device->flags));
D_ASSERT(!test_bit(BITMAP_IO, &device->flags)); D_ASSERT(!test_bit(BITMAP_IO, &device->flags));
...@@ -3468,13 +3468,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device, ...@@ -3468,13 +3468,13 @@ void drbd_queue_bitmap_io(struct drbd_device *device,
device->bm_io_work.why = why; device->bm_io_work.why = why;
device->bm_io_work.flags = flags; device->bm_io_work.flags = flags;
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
set_bit(BITMAP_IO, &device->flags); set_bit(BITMAP_IO, &device->flags);
if (atomic_read(&device->ap_bio_cnt) == 0) { if (atomic_read(&device->ap_bio_cnt) == 0) {
if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags)) if (!test_and_set_bit(BITMAP_IO_QUEUED, &device->flags))
drbd_queue_work(&device->tconn->sender_work, &device->bm_io_work.w); drbd_queue_work(&device->connection->sender_work, &device->bm_io_work.w);
} }
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
} }
/** /**
...@@ -3491,7 +3491,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device * ...@@ -3491,7 +3491,7 @@ int drbd_bitmap_io(struct drbd_device *device, int (*io_fn)(struct drbd_device *
{ {
int rv; int rv;
D_ASSERT(current != device->tconn->worker.task); D_ASSERT(current != device->connection->worker.task);
if ((flags & BM_LOCKED_SET_ALLOWED) == 0) if ((flags & BM_LOCKED_SET_ALLOWED) == 0)
drbd_suspend_io(device); drbd_suspend_io(device);
...@@ -3532,7 +3532,7 @@ static void md_sync_timer_fn(unsigned long data) ...@@ -3532,7 +3532,7 @@ static void md_sync_timer_fn(unsigned long data)
/* must not double-queue! */ /* must not double-queue! */
if (list_empty(&device->md_sync_work.list)) if (list_empty(&device->md_sync_work.list))
drbd_queue_work_front(&device->tconn->sender_work, &device->md_sync_work); drbd_queue_work_front(&device->connection->sender_work, &device->md_sync_work);
} }
static int w_md_sync(struct drbd_work *w, int unused) static int w_md_sync(struct drbd_work *w, int unused)
...@@ -3631,7 +3631,7 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i) ...@@ -3631,7 +3631,7 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
long timeout; long timeout;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
if (!nc) { if (!nc) {
rcu_read_unlock(); rcu_read_unlock();
return -ETIMEDOUT; return -ETIMEDOUT;
...@@ -3642,10 +3642,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i) ...@@ -3642,10 +3642,10 @@ int drbd_wait_misc(struct drbd_device *device, struct drbd_interval *i)
/* Indicate to wake up device->misc_wait on progress. */ /* Indicate to wake up device->misc_wait on progress. */
i->waiting = true; i->waiting = true;
prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE); prepare_to_wait(&device->misc_wait, &wait, TASK_INTERRUPTIBLE);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
timeout = schedule_timeout(timeout); timeout = schedule_timeout(timeout);
finish_wait(&device->misc_wait, &wait); finish_wait(&device->misc_wait, &wait);
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
if (!timeout || device->state.conn < C_CONNECTED) if (!timeout || device->state.conn < C_CONNECTED)
return -ETIMEDOUT; return -ETIMEDOUT;
if (signal_pending(current)) if (signal_pending(current))
......
...@@ -104,7 +104,7 @@ static struct drbd_config_context { ...@@ -104,7 +104,7 @@ static struct drbd_config_context {
struct drbd_genlmsghdr *reply_dh; struct drbd_genlmsghdr *reply_dh;
/* resolved from attributes, if possible */ /* resolved from attributes, if possible */
struct drbd_device *device; struct drbd_device *device;
struct drbd_tconn *tconn; struct drbd_connection *connection;
} adm_ctx; } adm_ctx;
static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info) static void drbd_adm_send_reply(struct sk_buff *skb, struct genl_info *info)
...@@ -203,9 +203,9 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, ...@@ -203,9 +203,9 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)]; adm_ctx.my_addr = nested_attr_tb[__nla_type(T_ctx_my_addr)];
adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)]; adm_ctx.peer_addr = nested_attr_tb[__nla_type(T_ctx_peer_addr)];
if ((adm_ctx.my_addr && if ((adm_ctx.my_addr &&
nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.tconn->my_addr)) || nla_len(adm_ctx.my_addr) > sizeof(adm_ctx.connection->my_addr)) ||
(adm_ctx.peer_addr && (adm_ctx.peer_addr &&
nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.tconn->peer_addr))) { nla_len(adm_ctx.peer_addr) > sizeof(adm_ctx.connection->peer_addr))) {
err = -EINVAL; err = -EINVAL;
goto fail; goto fail;
} }
...@@ -213,19 +213,19 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, ...@@ -213,19 +213,19 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
adm_ctx.minor = d_in->minor; adm_ctx.minor = d_in->minor;
adm_ctx.device = minor_to_device(d_in->minor); adm_ctx.device = minor_to_device(d_in->minor);
adm_ctx.tconn = conn_get_by_name(adm_ctx.resource_name); adm_ctx.connection = conn_get_by_name(adm_ctx.resource_name);
if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) { if (!adm_ctx.device && (flags & DRBD_ADM_NEED_MINOR)) {
drbd_msg_put_info("unknown minor"); drbd_msg_put_info("unknown minor");
return ERR_MINOR_INVALID; return ERR_MINOR_INVALID;
} }
if (!adm_ctx.tconn && (flags & DRBD_ADM_NEED_RESOURCE)) { if (!adm_ctx.connection && (flags & DRBD_ADM_NEED_RESOURCE)) {
drbd_msg_put_info("unknown resource"); drbd_msg_put_info("unknown resource");
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
if (flags & DRBD_ADM_NEED_CONNECTION) { if (flags & DRBD_ADM_NEED_CONNECTION) {
if (adm_ctx.tconn && !(flags & DRBD_ADM_NEED_RESOURCE)) { if (adm_ctx.connection && !(flags & DRBD_ADM_NEED_RESOURCE)) {
drbd_msg_put_info("no resource name expected"); drbd_msg_put_info("no resource name expected");
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
...@@ -234,22 +234,22 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, ...@@ -234,22 +234,22 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
if (adm_ctx.my_addr && adm_ctx.peer_addr) if (adm_ctx.my_addr && adm_ctx.peer_addr)
adm_ctx.tconn = conn_get_by_addrs(nla_data(adm_ctx.my_addr), adm_ctx.connection = conn_get_by_addrs(nla_data(adm_ctx.my_addr),
nla_len(adm_ctx.my_addr), nla_len(adm_ctx.my_addr),
nla_data(adm_ctx.peer_addr), nla_data(adm_ctx.peer_addr),
nla_len(adm_ctx.peer_addr)); nla_len(adm_ctx.peer_addr));
if (!adm_ctx.tconn) { if (!adm_ctx.connection) {
drbd_msg_put_info("unknown connection"); drbd_msg_put_info("unknown connection");
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
} }
/* some more paranoia, if the request was over-determined */ /* some more paranoia, if the request was over-determined */
if (adm_ctx.device && adm_ctx.tconn && if (adm_ctx.device && adm_ctx.connection &&
adm_ctx.device->tconn != adm_ctx.tconn) { adm_ctx.device->connection != adm_ctx.connection) {
pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n", pr_warning("request: minor=%u, resource=%s; but that minor belongs to connection %s\n",
adm_ctx.minor, adm_ctx.resource_name, adm_ctx.minor, adm_ctx.resource_name,
adm_ctx.device->tconn->name); adm_ctx.device->connection->name);
drbd_msg_put_info("minor exists in different resource"); drbd_msg_put_info("minor exists in different resource");
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
...@@ -258,7 +258,7 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, ...@@ -258,7 +258,7 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
adm_ctx.volume != adm_ctx.device->vnr) { adm_ctx.volume != adm_ctx.device->vnr) {
pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n", pr_warning("request: minor=%u, volume=%u; but that minor is volume %u in %s\n",
adm_ctx.minor, adm_ctx.volume, adm_ctx.minor, adm_ctx.volume,
adm_ctx.device->vnr, adm_ctx.device->tconn->name); adm_ctx.device->vnr, adm_ctx.device->connection->name);
drbd_msg_put_info("minor exists as different volume"); drbd_msg_put_info("minor exists as different volume");
return ERR_INVALID_REQUEST; return ERR_INVALID_REQUEST;
} }
...@@ -273,9 +273,9 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info, ...@@ -273,9 +273,9 @@ static int drbd_adm_prepare(struct sk_buff *skb, struct genl_info *info,
static int drbd_adm_finish(struct genl_info *info, int retcode) static int drbd_adm_finish(struct genl_info *info, int retcode)
{ {
if (adm_ctx.tconn) { if (adm_ctx.connection) {
kref_put(&adm_ctx.tconn->kref, &conn_destroy); kref_put(&adm_ctx.connection->kref, &conn_destroy);
adm_ctx.tconn = NULL; adm_ctx.connection = NULL;
} }
if (!adm_ctx.reply_skb) if (!adm_ctx.reply_skb)
...@@ -286,29 +286,29 @@ static int drbd_adm_finish(struct genl_info *info, int retcode) ...@@ -286,29 +286,29 @@ static int drbd_adm_finish(struct genl_info *info, int retcode)
return 0; return 0;
} }
static void setup_khelper_env(struct drbd_tconn *tconn, char **envp) static void setup_khelper_env(struct drbd_connection *connection, char **envp)
{ {
char *afs; char *afs;
/* FIXME: A future version will not allow this case. */ /* FIXME: A future version will not allow this case. */
if (tconn->my_addr_len == 0 || tconn->peer_addr_len == 0) if (connection->my_addr_len == 0 || connection->peer_addr_len == 0)
return; return;
switch (((struct sockaddr *)&tconn->peer_addr)->sa_family) { switch (((struct sockaddr *)&connection->peer_addr)->sa_family) {
case AF_INET6: case AF_INET6:
afs = "ipv6"; afs = "ipv6";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6", snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI6",
&((struct sockaddr_in6 *)&tconn->peer_addr)->sin6_addr); &((struct sockaddr_in6 *)&connection->peer_addr)->sin6_addr);
break; break;
case AF_INET: case AF_INET:
afs = "ipv4"; afs = "ipv4";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
&((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
break; break;
default: default:
afs = "ssocks"; afs = "ssocks";
snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4", snprintf(envp[4], 60, "DRBD_PEER_ADDRESS=%pI4",
&((struct sockaddr_in *)&tconn->peer_addr)->sin_addr); &((struct sockaddr_in *)&connection->peer_addr)->sin_addr);
} }
snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs); snprintf(envp[3], 20, "DRBD_PEER_AF=%s", afs);
} }
...@@ -323,15 +323,15 @@ int drbd_khelper(struct drbd_device *device, char *cmd) ...@@ -323,15 +323,15 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
NULL }; NULL };
char mb[12]; char mb[12];
char *argv[] = {usermode_helper, cmd, mb, NULL }; char *argv[] = {usermode_helper, cmd, mb, NULL };
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
struct sib_info sib; struct sib_info sib;
int ret; int ret;
if (current == tconn->worker.task) if (current == connection->worker.task)
set_bit(CALLBACK_PENDING, &tconn->flags); set_bit(CALLBACK_PENDING, &connection->flags);
snprintf(mb, 12, "minor-%d", device_to_minor(device)); snprintf(mb, 12, "minor-%d", device_to_minor(device));
setup_khelper_env(tconn, envp); setup_khelper_env(connection, envp);
/* The helper may take some time. /* The helper may take some time.
* write out any unsynced meta data changes now */ * write out any unsynced meta data changes now */
...@@ -354,8 +354,8 @@ int drbd_khelper(struct drbd_device *device, char *cmd) ...@@ -354,8 +354,8 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
sib.helper_exit_code = ret; sib.helper_exit_code = ret;
drbd_bcast_event(device, &sib); drbd_bcast_event(device, &sib);
if (current == tconn->worker.task) if (current == connection->worker.task)
clear_bit(CALLBACK_PENDING, &tconn->flags); clear_bit(CALLBACK_PENDING, &connection->flags);
if (ret < 0) /* Ignore any ERRNOs we got. */ if (ret < 0) /* Ignore any ERRNOs we got. */
ret = 0; ret = 0;
...@@ -363,7 +363,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd) ...@@ -363,7 +363,7 @@ int drbd_khelper(struct drbd_device *device, char *cmd)
return ret; return ret;
} }
static int conn_khelper(struct drbd_tconn *tconn, char *cmd) static int conn_khelper(struct drbd_connection *connection, char *cmd)
{ {
char *envp[] = { "HOME=/", char *envp[] = { "HOME=/",
"TERM=linux", "TERM=linux",
...@@ -371,23 +371,23 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd) ...@@ -371,23 +371,23 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd)
(char[20]) { }, /* address family */ (char[20]) { }, /* address family */
(char[60]) { }, /* address */ (char[60]) { }, /* address */
NULL }; NULL };
char *argv[] = {usermode_helper, cmd, tconn->name, NULL }; char *argv[] = {usermode_helper, cmd, connection->name, NULL };
int ret; int ret;
setup_khelper_env(tconn, envp); setup_khelper_env(connection, envp);
conn_md_sync(tconn); conn_md_sync(connection);
conn_info(tconn, "helper command: %s %s %s\n", usermode_helper, cmd, tconn->name); conn_info(connection, "helper command: %s %s %s\n", usermode_helper, cmd, connection->name);
/* TODO: conn_bcast_event() ?? */ /* TODO: conn_bcast_event() ?? */
ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC); ret = call_usermodehelper(usermode_helper, argv, envp, UMH_WAIT_PROC);
if (ret) if (ret)
conn_warn(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", conn_warn(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, tconn->name, usermode_helper, cmd, connection->name,
(ret >> 8) & 0xff, ret); (ret >> 8) & 0xff, ret);
else else
conn_info(tconn, "helper command: %s %s %s exit code %u (0x%x)\n", conn_info(connection, "helper command: %s %s %s exit code %u (0x%x)\n",
usermode_helper, cmd, tconn->name, usermode_helper, cmd, connection->name,
(ret >> 8) & 0xff, ret); (ret >> 8) & 0xff, ret);
/* TODO: conn_bcast_event() ?? */ /* TODO: conn_bcast_event() ?? */
...@@ -397,14 +397,14 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd) ...@@ -397,14 +397,14 @@ static int conn_khelper(struct drbd_tconn *tconn, char *cmd)
return ret; return ret;
} }
static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) static enum drbd_fencing_p highest_fencing_policy(struct drbd_connection *connection)
{ {
enum drbd_fencing_p fp = FP_NOT_AVAIL; enum drbd_fencing_p fp = FP_NOT_AVAIL;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
if (get_ldev_if_state(device, D_CONSISTENT)) { if (get_ldev_if_state(device, D_CONSISTENT)) {
fp = max_t(enum drbd_fencing_p, fp, fp = max_t(enum drbd_fencing_p, fp,
rcu_dereference(device->ldev->disk_conf)->fencing); rcu_dereference(device->ldev->disk_conf)->fencing);
...@@ -416,7 +416,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn) ...@@ -416,7 +416,7 @@ static enum drbd_fencing_p highest_fencing_policy(struct drbd_tconn *tconn)
return fp; return fp;
} }
bool conn_try_outdate_peer(struct drbd_tconn *tconn) bool conn_try_outdate_peer(struct drbd_connection *connection)
{ {
unsigned int connect_cnt; unsigned int connect_cnt;
union drbd_state mask = { }; union drbd_state mask = { };
...@@ -425,26 +425,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn) ...@@ -425,26 +425,26 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
char *ex_to_string; char *ex_to_string;
int r; int r;
if (tconn->cstate >= C_WF_REPORT_PARAMS) { if (connection->cstate >= C_WF_REPORT_PARAMS) {
conn_err(tconn, "Expected cstate < C_WF_REPORT_PARAMS\n"); conn_err(connection, "Expected cstate < C_WF_REPORT_PARAMS\n");
return false; return false;
} }
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
connect_cnt = tconn->connect_cnt; connect_cnt = connection->connect_cnt;
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
fp = highest_fencing_policy(tconn); fp = highest_fencing_policy(connection);
switch (fp) { switch (fp) {
case FP_NOT_AVAIL: case FP_NOT_AVAIL:
conn_warn(tconn, "Not fencing peer, I'm not even Consistent myself.\n"); conn_warn(connection, "Not fencing peer, I'm not even Consistent myself.\n");
goto out; goto out;
case FP_DONT_CARE: case FP_DONT_CARE:
return true; return true;
default: ; default: ;
} }
r = conn_khelper(tconn, "fence-peer"); r = conn_khelper(connection, "fence-peer");
switch ((r>>8) & 0xff) { switch ((r>>8) & 0xff) {
case 3: /* peer is inconsistent */ case 3: /* peer is inconsistent */
...@@ -458,7 +458,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn) ...@@ -458,7 +458,7 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
val.pdsk = D_OUTDATED; val.pdsk = D_OUTDATED;
break; break;
case 5: /* peer was down */ case 5: /* peer was down */
if (conn_highest_disk(tconn) == D_UP_TO_DATE) { if (conn_highest_disk(connection) == D_UP_TO_DATE) {
/* we will(have) create(d) a new UUID anyways... */ /* we will(have) create(d) a new UUID anyways... */
ex_to_string = "peer is unreachable, assumed to be dead"; ex_to_string = "peer is unreachable, assumed to be dead";
mask.pdsk = D_MASK; mask.pdsk = D_MASK;
...@@ -471,65 +471,65 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn) ...@@ -471,65 +471,65 @@ bool conn_try_outdate_peer(struct drbd_tconn *tconn)
* This is useful when an unconnected R_SECONDARY is asked to * This is useful when an unconnected R_SECONDARY is asked to
* become R_PRIMARY, but finds the other peer being active. */ * become R_PRIMARY, but finds the other peer being active. */
ex_to_string = "peer is active"; ex_to_string = "peer is active";
conn_warn(tconn, "Peer is primary, outdating myself.\n"); conn_warn(connection, "Peer is primary, outdating myself.\n");
mask.disk = D_MASK; mask.disk = D_MASK;
val.disk = D_OUTDATED; val.disk = D_OUTDATED;
break; break;
case 7: case 7:
if (fp != FP_STONITH) if (fp != FP_STONITH)
conn_err(tconn, "fence-peer() = 7 && fencing != Stonith !!!\n"); conn_err(connection, "fence-peer() = 7 && fencing != Stonith !!!\n");
ex_to_string = "peer was stonithed"; ex_to_string = "peer was stonithed";
mask.pdsk = D_MASK; mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED; val.pdsk = D_OUTDATED;
break; break;
default: default:
/* The script is broken ... */ /* The script is broken ... */
conn_err(tconn, "fence-peer helper broken, returned %d\n", (r>>8)&0xff); conn_err(connection, "fence-peer helper broken, returned %d\n", (r>>8)&0xff);
return false; /* Eventually leave IO frozen */ return false; /* Eventually leave IO frozen */
} }
conn_info(tconn, "fence-peer helper returned %d (%s)\n", conn_info(connection, "fence-peer helper returned %d (%s)\n",
(r>>8) & 0xff, ex_to_string); (r>>8) & 0xff, ex_to_string);
out: out:
/* Not using /* Not using
conn_request_state(tconn, mask, val, CS_VERBOSE); conn_request_state(connection, mask, val, CS_VERBOSE);
here, because we might were able to re-establish the connection in the here, because we might were able to re-establish the connection in the
meantime. */ meantime. */
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
if (tconn->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &tconn->flags)) { if (connection->cstate < C_WF_REPORT_PARAMS && !test_bit(STATE_SENT, &connection->flags)) {
if (tconn->connect_cnt != connect_cnt) if (connection->connect_cnt != connect_cnt)
/* In case the connection was established and droped /* In case the connection was established and droped
while the fence-peer handler was running, ignore it */ while the fence-peer handler was running, ignore it */
conn_info(tconn, "Ignoring fence-peer exit code\n"); conn_info(connection, "Ignoring fence-peer exit code\n");
else else
_conn_request_state(tconn, mask, val, CS_VERBOSE); _conn_request_state(connection, mask, val, CS_VERBOSE);
} }
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
return conn_highest_pdsk(tconn) <= D_OUTDATED; return conn_highest_pdsk(connection) <= D_OUTDATED;
} }
static int _try_outdate_peer_async(void *data) static int _try_outdate_peer_async(void *data)
{ {
struct drbd_tconn *tconn = (struct drbd_tconn *)data; struct drbd_connection *connection = (struct drbd_connection *)data;
conn_try_outdate_peer(tconn); conn_try_outdate_peer(connection);
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
return 0; return 0;
} }
void conn_try_outdate_peer_async(struct drbd_tconn *tconn) void conn_try_outdate_peer_async(struct drbd_connection *connection)
{ {
struct task_struct *opa; struct task_struct *opa;
kref_get(&tconn->kref); kref_get(&connection->kref);
opa = kthread_run(_try_outdate_peer_async, tconn, "drbd_async_h"); opa = kthread_run(_try_outdate_peer_async, connection, "drbd_async_h");
if (IS_ERR(opa)) { if (IS_ERR(opa)) {
conn_err(tconn, "out of mem, failed to invoke fence-peer helper\n"); conn_err(connection, "out of mem, failed to invoke fence-peer helper\n");
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
} }
} }
...@@ -544,7 +544,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -544,7 +544,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
union drbd_state mask, val; union drbd_state mask, val;
if (new_role == R_PRIMARY) if (new_role == R_PRIMARY)
request_ping(device->tconn); /* Detect a dead peer ASAP */ request_ping(device->connection); /* Detect a dead peer ASAP */
mutex_lock(device->state_mutex); mutex_lock(device->state_mutex);
...@@ -575,7 +575,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -575,7 +575,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
device->state.disk == D_CONSISTENT && mask.pdsk == 0) { device->state.disk == D_CONSISTENT && mask.pdsk == 0) {
D_ASSERT(device->state.pdsk == D_UNKNOWN); D_ASSERT(device->state.pdsk == D_UNKNOWN);
if (conn_try_outdate_peer(device->tconn)) { if (conn_try_outdate_peer(device->connection)) {
val.disk = D_UP_TO_DATE; val.disk = D_UP_TO_DATE;
mask.disk = D_MASK; mask.disk = D_MASK;
} }
...@@ -585,7 +585,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -585,7 +585,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
if (rv == SS_NOTHING_TO_DO) if (rv == SS_NOTHING_TO_DO)
goto out; goto out;
if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) { if (rv == SS_PRIMARY_NOP && mask.pdsk == 0) {
if (!conn_try_outdate_peer(device->tconn) && force) { if (!conn_try_outdate_peer(device->connection) && force) {
dev_warn(DEV, "Forced into split brain situation!\n"); dev_warn(DEV, "Forced into split brain situation!\n");
mask.pdsk = D_MASK; mask.pdsk = D_MASK;
val.pdsk = D_OUTDATED; val.pdsk = D_OUTDATED;
...@@ -598,7 +598,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -598,7 +598,7 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
retry at most once more in this case. */ retry at most once more in this case. */
int timeo; int timeo;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1; timeo = nc ? (nc->ping_timeo + 1) * HZ / 10 : 1;
rcu_read_unlock(); rcu_read_unlock();
schedule_timeout_interruptible(timeo); schedule_timeout_interruptible(timeo);
...@@ -633,11 +633,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force) ...@@ -633,11 +633,11 @@ drbd_set_role(struct drbd_device *device, enum drbd_role new_role, int force)
put_ldev(device); put_ldev(device);
} }
} else { } else {
mutex_lock(&device->tconn->conf_update); mutex_lock(&device->connection->conf_update);
nc = device->tconn->net_conf; nc = device->connection->net_conf;
if (nc) if (nc)
nc->discard_my_data = 0; /* without copy; single bit op is atomic */ nc->discard_my_data = 0; /* without copy; single bit op is atomic */
mutex_unlock(&device->tconn->conf_update); mutex_unlock(&device->connection->conf_update);
set_disk_ro(device->vdisk, false); set_disk_ro(device->vdisk, false);
if (get_ldev(device)) { if (get_ldev(device)) {
...@@ -1134,12 +1134,12 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) ...@@ -1134,12 +1134,12 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
Because new from 8.3.8 onwards the peer can use multiple Because new from 8.3.8 onwards the peer can use multiple
BIOs for a single peer_request */ BIOs for a single peer_request */
if (device->state.conn >= C_WF_REPORT_PARAMS) { if (device->state.conn >= C_WF_REPORT_PARAMS) {
if (device->tconn->agreed_pro_version < 94) if (device->connection->agreed_pro_version < 94)
peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET); peer = min(device->peer_max_bio_size, DRBD_MAX_SIZE_H80_PACKET);
/* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */ /* Correct old drbd (up to 8.3.7) if it believes it can do more than 32KiB */
else if (device->tconn->agreed_pro_version == 94) else if (device->connection->agreed_pro_version == 94)
peer = DRBD_MAX_SIZE_H80_PACKET; peer = DRBD_MAX_SIZE_H80_PACKET;
else if (device->tconn->agreed_pro_version < 100) else if (device->connection->agreed_pro_version < 100)
peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */ peer = DRBD_MAX_BIO_SIZE_P95; /* drbd 8.3.8 onwards, before 8.4.0 */
else else
peer = DRBD_MAX_BIO_SIZE; peer = DRBD_MAX_BIO_SIZE;
...@@ -1157,25 +1157,25 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device) ...@@ -1157,25 +1157,25 @@ void drbd_reconsider_max_bio_size(struct drbd_device *device)
} }
/* Starts the worker thread */ /* Starts the worker thread */
static void conn_reconfig_start(struct drbd_tconn *tconn) static void conn_reconfig_start(struct drbd_connection *connection)
{ {
drbd_thread_start(&tconn->worker); drbd_thread_start(&connection->worker);
conn_flush_workqueue(tconn); conn_flush_workqueue(connection);
} }
/* if still unconfigured, stops worker again. */ /* if still unconfigured, stops worker again. */
static void conn_reconfig_done(struct drbd_tconn *tconn) static void conn_reconfig_done(struct drbd_connection *connection)
{ {
bool stop_threads; bool stop_threads;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
stop_threads = conn_all_vols_unconf(tconn) && stop_threads = conn_all_vols_unconf(connection) &&
tconn->cstate == C_STANDALONE; connection->cstate == C_STANDALONE;
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
if (stop_threads) { if (stop_threads) {
/* asender is implicitly stopped by receiver /* asender is implicitly stopped by receiver
* in conn_disconnect() */ * in conn_disconnect() */
drbd_thread_stop(&tconn->receiver); drbd_thread_stop(&connection->receiver);
drbd_thread_stop(&tconn->worker); drbd_thread_stop(&connection->worker);
} }
} }
...@@ -1190,10 +1190,10 @@ static void drbd_suspend_al(struct drbd_device *device) ...@@ -1190,10 +1190,10 @@ static void drbd_suspend_al(struct drbd_device *device)
} }
drbd_al_shrink(device); drbd_al_shrink(device);
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
if (device->state.conn < C_CONNECTED) if (device->state.conn < C_CONNECTED)
s = !test_and_set_bit(AL_SUSPENDED, &device->flags); s = !test_and_set_bit(AL_SUSPENDED, &device->flags);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
lc_unlock(device->act_log); lc_unlock(device->act_log);
if (s) if (s)
...@@ -1264,7 +1264,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -1264,7 +1264,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
} }
mutex_lock(&device->tconn->conf_update); mutex_lock(&device->connection->conf_update);
old_disk_conf = device->ldev->disk_conf; old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf; *new_disk_conf = *old_disk_conf;
if (should_set_defaults(info)) if (should_set_defaults(info))
...@@ -1327,7 +1327,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -1327,7 +1327,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
rcu_assign_pointer(device->rs_plan_s, new_plan); rcu_assign_pointer(device->rs_plan_s, new_plan);
} }
mutex_unlock(&device->tconn->conf_update); mutex_unlock(&device->connection->conf_update);
if (new_disk_conf->al_updates) if (new_disk_conf->al_updates)
device->ldev->md.flags &= ~MDF_AL_DISABLED; device->ldev->md.flags &= ~MDF_AL_DISABLED;
...@@ -1339,7 +1339,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -1339,7 +1339,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
else else
set_bit(MD_NO_FUA, &device->flags); set_bit(MD_NO_FUA, &device->flags);
drbd_bump_write_ordering(device->tconn, WO_bdev_flush); drbd_bump_write_ordering(device->connection, WO_bdev_flush);
drbd_md_sync(device); drbd_md_sync(device);
...@@ -1353,7 +1353,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -1353,7 +1353,7 @@ int drbd_adm_disk_opts(struct sk_buff *skb, struct genl_info *info)
goto success; goto success;
fail_unlock: fail_unlock:
mutex_unlock(&device->tconn->conf_update); mutex_unlock(&device->connection->conf_update);
fail: fail:
kfree(new_disk_conf); kfree(new_disk_conf);
kfree(new_plan); kfree(new_plan);
...@@ -1388,7 +1388,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1388,7 +1388,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto finish; goto finish;
device = adm_ctx.device; device = adm_ctx.device;
conn_reconfig_start(device->tconn); conn_reconfig_start(device->connection);
/* if you want to reconfigure, please tear down first */ /* if you want to reconfigure, please tear down first */
if (device->state.disk > D_DISKLESS) { if (device->state.disk > D_DISKLESS) {
...@@ -1455,7 +1455,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1455,7 +1455,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
if (nc) { if (nc) {
if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) { if (new_disk_conf->fencing == FP_STONITH && nc->wire_protocol == DRBD_PROT_A) {
rcu_read_unlock(); rcu_read_unlock();
...@@ -1636,7 +1636,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1636,7 +1636,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
new_disk_conf = NULL; new_disk_conf = NULL;
new_plan = NULL; new_plan = NULL;
drbd_bump_write_ordering(device->tconn, WO_bdev_flush); drbd_bump_write_ordering(device->connection, WO_bdev_flush);
if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY)) if (drbd_md_test_flag(device->ldev, MDF_CRASHED_PRIMARY))
set_bit(CRASHED_PRIMARY, &device->flags); set_bit(CRASHED_PRIMARY, &device->flags);
...@@ -1644,7 +1644,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1644,7 +1644,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
clear_bit(CRASHED_PRIMARY, &device->flags); clear_bit(CRASHED_PRIMARY, &device->flags);
if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) && if (drbd_md_test_flag(device->ldev, MDF_PRIMARY_IND) &&
!(device->state.role == R_PRIMARY && device->tconn->susp_nod)) !(device->state.role == R_PRIMARY && device->connection->susp_nod))
set_bit(CRASHED_PRIMARY, &device->flags); set_bit(CRASHED_PRIMARY, &device->flags);
device->send_cnt = 0; device->send_cnt = 0;
...@@ -1702,7 +1702,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1702,7 +1702,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
if (_drbd_bm_total_weight(device) == drbd_bm_bits(device)) if (_drbd_bm_total_weight(device) == drbd_bm_bits(device))
drbd_suspend_al(device); /* IO is still suspended here... */ drbd_suspend_al(device); /* IO is still suspended here... */
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
os = drbd_read_state(device); os = drbd_read_state(device);
ns = os; ns = os;
/* If MDF_CONSISTENT is not set go into inconsistent state, /* If MDF_CONSISTENT is not set go into inconsistent state,
...@@ -1754,7 +1754,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1754,7 +1754,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
} }
rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL); rv = _drbd_set_state(device, ns, CS_VERBOSE, NULL);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
goto force_diskless_dec; goto force_diskless_dec;
...@@ -1771,7 +1771,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1771,7 +1771,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE); kobject_uevent(&disk_to_dev(device->vdisk)->kobj, KOBJ_CHANGE);
put_ldev(device); put_ldev(device);
conn_reconfig_done(device->tconn); conn_reconfig_done(device->connection);
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
...@@ -1781,7 +1781,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info) ...@@ -1781,7 +1781,7 @@ int drbd_adm_attach(struct sk_buff *skb, struct genl_info *info)
drbd_force_state(device, NS(disk, D_DISKLESS)); drbd_force_state(device, NS(disk, D_DISKLESS));
drbd_md_sync(device); drbd_md_sync(device);
fail: fail:
conn_reconfig_done(device->tconn); conn_reconfig_done(device->connection);
if (nbc) { if (nbc) {
if (nbc->backing_bdev) if (nbc->backing_bdev)
blkdev_put(nbc->backing_bdev, blkdev_put(nbc->backing_bdev,
...@@ -1860,14 +1860,14 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info) ...@@ -1860,14 +1860,14 @@ int drbd_adm_detach(struct sk_buff *skb, struct genl_info *info)
return 0; return 0;
} }
static bool conn_resync_running(struct drbd_tconn *tconn) static bool conn_resync_running(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
bool rv = false; bool rv = false;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
if (device->state.conn == C_SYNC_SOURCE || if (device->state.conn == C_SYNC_SOURCE ||
device->state.conn == C_SYNC_TARGET || device->state.conn == C_SYNC_TARGET ||
device->state.conn == C_PAUSED_SYNC_S || device->state.conn == C_PAUSED_SYNC_S ||
...@@ -1881,14 +1881,14 @@ static bool conn_resync_running(struct drbd_tconn *tconn) ...@@ -1881,14 +1881,14 @@ static bool conn_resync_running(struct drbd_tconn *tconn)
return rv; return rv;
} }
static bool conn_ov_running(struct drbd_tconn *tconn) static bool conn_ov_running(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
bool rv = false; bool rv = false;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
if (device->state.conn == C_VERIFY_S || if (device->state.conn == C_VERIFY_S ||
device->state.conn == C_VERIFY_T) { device->state.conn == C_VERIFY_T) {
rv = true; rv = true;
...@@ -1901,12 +1901,12 @@ static bool conn_ov_running(struct drbd_tconn *tconn) ...@@ -1901,12 +1901,12 @@ static bool conn_ov_running(struct drbd_tconn *tconn)
} }
static enum drbd_ret_code static enum drbd_ret_code
_check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct net_conf *new_conf) _check_net_options(struct drbd_connection *connection, struct net_conf *old_conf, struct net_conf *new_conf)
{ {
struct drbd_device *device; struct drbd_device *device;
int i; int i;
if (old_conf && tconn->cstate == C_WF_REPORT_PARAMS && tconn->agreed_pro_version < 100) { if (old_conf && connection->cstate == C_WF_REPORT_PARAMS && connection->agreed_pro_version < 100) {
if (new_conf->wire_protocol != old_conf->wire_protocol) if (new_conf->wire_protocol != old_conf->wire_protocol)
return ERR_NEED_APV_100; return ERR_NEED_APV_100;
...@@ -1918,15 +1918,15 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n ...@@ -1918,15 +1918,15 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
} }
if (!new_conf->two_primaries && if (!new_conf->two_primaries &&
conn_highest_role(tconn) == R_PRIMARY && conn_highest_role(connection) == R_PRIMARY &&
conn_highest_peer(tconn) == R_PRIMARY) conn_highest_peer(connection) == R_PRIMARY)
return ERR_NEED_ALLOW_TWO_PRI; return ERR_NEED_ALLOW_TWO_PRI;
if (new_conf->two_primaries && if (new_conf->two_primaries &&
(new_conf->wire_protocol != DRBD_PROT_C)) (new_conf->wire_protocol != DRBD_PROT_C))
return ERR_NOT_PROTO_C; return ERR_NOT_PROTO_C;
idr_for_each_entry(&tconn->volumes, device, i) { idr_for_each_entry(&connection->volumes, device, i) {
if (get_ldev(device)) { if (get_ldev(device)) {
enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing; enum drbd_fencing_p fp = rcu_dereference(device->ldev->disk_conf)->fencing;
put_ldev(device); put_ldev(device);
...@@ -1944,18 +1944,18 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n ...@@ -1944,18 +1944,18 @@ _check_net_options(struct drbd_tconn *tconn, struct net_conf *old_conf, struct n
} }
static enum drbd_ret_code static enum drbd_ret_code
check_net_options(struct drbd_tconn *tconn, struct net_conf *new_conf) check_net_options(struct drbd_connection *connection, struct net_conf *new_conf)
{ {
static enum drbd_ret_code rv; static enum drbd_ret_code rv;
struct drbd_device *device; struct drbd_device *device;
int i; int i;
rcu_read_lock(); rcu_read_lock();
rv = _check_net_options(tconn, rcu_dereference(tconn->net_conf), new_conf); rv = _check_net_options(connection, rcu_dereference(connection->net_conf), new_conf);
rcu_read_unlock(); rcu_read_unlock();
/* tconn->volumes protected by genl_lock() here */ /* connection->volumes protected by genl_lock() here */
idr_for_each_entry(&tconn->volumes, device, i) { idr_for_each_entry(&connection->volumes, device, i) {
if (!device->bitmap) { if (!device->bitmap) {
if (drbd_bm_init(device)) if (drbd_bm_init(device))
return ERR_NOMEM; return ERR_NOMEM;
...@@ -2027,7 +2027,7 @@ static void free_crypto(struct crypto *crypto) ...@@ -2027,7 +2027,7 @@ static void free_crypto(struct crypto *crypto)
int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
{ {
enum drbd_ret_code retcode; enum drbd_ret_code retcode;
struct drbd_tconn *tconn; struct drbd_connection *connection;
struct net_conf *old_conf, *new_conf = NULL; struct net_conf *old_conf, *new_conf = NULL;
int err; int err;
int ovr; /* online verify running */ int ovr; /* online verify running */
...@@ -2040,7 +2040,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2040,7 +2040,7 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto out; goto out;
tconn = adm_ctx.tconn; connection = adm_ctx.connection;
new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL); new_conf = kzalloc(sizeof(struct net_conf), GFP_KERNEL);
if (!new_conf) { if (!new_conf) {
...@@ -2048,11 +2048,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2048,11 +2048,11 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
goto out; goto out;
} }
conn_reconfig_start(tconn); conn_reconfig_start(connection);
mutex_lock(&tconn->data.mutex); mutex_lock(&connection->data.mutex);
mutex_lock(&tconn->conf_update); mutex_lock(&connection->conf_update);
old_conf = tconn->net_conf; old_conf = connection->net_conf;
if (!old_conf) { if (!old_conf) {
drbd_msg_put_info("net conf missing, try connect"); drbd_msg_put_info("net conf missing, try connect");
...@@ -2071,19 +2071,19 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2071,19 +2071,19 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
} }
retcode = check_net_options(tconn, new_conf); retcode = check_net_options(connection, new_conf);
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto fail; goto fail;
/* re-sync running */ /* re-sync running */
rsr = conn_resync_running(tconn); rsr = conn_resync_running(connection);
if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) { if (rsr && strcmp(new_conf->csums_alg, old_conf->csums_alg)) {
retcode = ERR_CSUMS_RESYNC_RUNNING; retcode = ERR_CSUMS_RESYNC_RUNNING;
goto fail; goto fail;
} }
/* online verify running */ /* online verify running */
ovr = conn_ov_running(tconn); ovr = conn_ov_running(connection);
if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) { if (ovr && strcmp(new_conf->verify_alg, old_conf->verify_alg)) {
retcode = ERR_VERIFY_RUNNING; retcode = ERR_VERIFY_RUNNING;
goto fail; goto fail;
...@@ -2093,45 +2093,45 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2093,45 +2093,45 @@ int drbd_adm_net_opts(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto fail; goto fail;
rcu_assign_pointer(tconn->net_conf, new_conf); rcu_assign_pointer(connection->net_conf, new_conf);
if (!rsr) { if (!rsr) {
crypto_free_hash(tconn->csums_tfm); crypto_free_hash(connection->csums_tfm);
tconn->csums_tfm = crypto.csums_tfm; connection->csums_tfm = crypto.csums_tfm;
crypto.csums_tfm = NULL; crypto.csums_tfm = NULL;
} }
if (!ovr) { if (!ovr) {
crypto_free_hash(tconn->verify_tfm); crypto_free_hash(connection->verify_tfm);
tconn->verify_tfm = crypto.verify_tfm; connection->verify_tfm = crypto.verify_tfm;
crypto.verify_tfm = NULL; crypto.verify_tfm = NULL;
} }
crypto_free_hash(tconn->integrity_tfm); crypto_free_hash(connection->integrity_tfm);
tconn->integrity_tfm = crypto.integrity_tfm; connection->integrity_tfm = crypto.integrity_tfm;
if (tconn->cstate >= C_WF_REPORT_PARAMS && tconn->agreed_pro_version >= 100) if (connection->cstate >= C_WF_REPORT_PARAMS && connection->agreed_pro_version >= 100)
/* Do this without trying to take tconn->data.mutex again. */ /* Do this without trying to take connection->data.mutex again. */
__drbd_send_protocol(tconn, P_PROTOCOL_UPDATE); __drbd_send_protocol(connection, P_PROTOCOL_UPDATE);
crypto_free_hash(tconn->cram_hmac_tfm); crypto_free_hash(connection->cram_hmac_tfm);
tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
mutex_unlock(&tconn->conf_update); mutex_unlock(&connection->conf_update);
mutex_unlock(&tconn->data.mutex); mutex_unlock(&connection->data.mutex);
synchronize_rcu(); synchronize_rcu();
kfree(old_conf); kfree(old_conf);
if (tconn->cstate >= C_WF_REPORT_PARAMS) if (connection->cstate >= C_WF_REPORT_PARAMS)
drbd_send_sync_param(minor_to_device(conn_lowest_minor(tconn))); drbd_send_sync_param(minor_to_device(conn_lowest_minor(connection)));
goto done; goto done;
fail: fail:
mutex_unlock(&tconn->conf_update); mutex_unlock(&connection->conf_update);
mutex_unlock(&tconn->data.mutex); mutex_unlock(&connection->data.mutex);
free_crypto(&crypto); free_crypto(&crypto);
kfree(new_conf); kfree(new_conf);
done: done:
conn_reconfig_done(tconn); conn_reconfig_done(connection);
out: out:
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
...@@ -2142,7 +2142,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -2142,7 +2142,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
struct drbd_device *device; struct drbd_device *device;
struct net_conf *old_conf, *new_conf = NULL; struct net_conf *old_conf, *new_conf = NULL;
struct crypto crypto = { }; struct crypto crypto = { };
struct drbd_tconn *tconn; struct drbd_connection *connection;
enum drbd_ret_code retcode; enum drbd_ret_code retcode;
int i; int i;
int err; int err;
...@@ -2162,24 +2162,24 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -2162,24 +2162,24 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
/* No need for _rcu here. All reconfiguration is /* No need for _rcu here. All reconfiguration is
* strictly serialized on genl_lock(). We are protected against * strictly serialized on genl_lock(). We are protected against
* concurrent reconfiguration/addition/deletion */ * concurrent reconfiguration/addition/deletion */
list_for_each_entry(tconn, &drbd_tconns, all_tconn) { list_for_each_entry(connection, &drbd_connections, connections) {
if (nla_len(adm_ctx.my_addr) == tconn->my_addr_len && if (nla_len(adm_ctx.my_addr) == connection->my_addr_len &&
!memcmp(nla_data(adm_ctx.my_addr), &tconn->my_addr, tconn->my_addr_len)) { !memcmp(nla_data(adm_ctx.my_addr), &connection->my_addr, connection->my_addr_len)) {
retcode = ERR_LOCAL_ADDR; retcode = ERR_LOCAL_ADDR;
goto out; goto out;
} }
if (nla_len(adm_ctx.peer_addr) == tconn->peer_addr_len && if (nla_len(adm_ctx.peer_addr) == connection->peer_addr_len &&
!memcmp(nla_data(adm_ctx.peer_addr), &tconn->peer_addr, tconn->peer_addr_len)) { !memcmp(nla_data(adm_ctx.peer_addr), &connection->peer_addr, connection->peer_addr_len)) {
retcode = ERR_PEER_ADDR; retcode = ERR_PEER_ADDR;
goto out; goto out;
} }
} }
tconn = adm_ctx.tconn; connection = adm_ctx.connection;
conn_reconfig_start(tconn); conn_reconfig_start(connection);
if (tconn->cstate > C_STANDALONE) { if (connection->cstate > C_STANDALONE) {
retcode = ERR_NET_CONFIGURED; retcode = ERR_NET_CONFIGURED;
goto fail; goto fail;
} }
...@@ -2200,7 +2200,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -2200,7 +2200,7 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
} }
retcode = check_net_options(tconn, new_conf); retcode = check_net_options(connection, new_conf);
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto fail; goto fail;
...@@ -2210,40 +2210,40 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -2210,40 +2210,40 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0; ((char *)new_conf->shared_secret)[SHARED_SECRET_MAX-1] = 0;
conn_flush_workqueue(tconn); conn_flush_workqueue(connection);
mutex_lock(&tconn->conf_update); mutex_lock(&connection->conf_update);
old_conf = tconn->net_conf; old_conf = connection->net_conf;
if (old_conf) { if (old_conf) {
retcode = ERR_NET_CONFIGURED; retcode = ERR_NET_CONFIGURED;
mutex_unlock(&tconn->conf_update); mutex_unlock(&connection->conf_update);
goto fail; goto fail;
} }
rcu_assign_pointer(tconn->net_conf, new_conf); rcu_assign_pointer(connection->net_conf, new_conf);
conn_free_crypto(tconn); conn_free_crypto(connection);
tconn->cram_hmac_tfm = crypto.cram_hmac_tfm; connection->cram_hmac_tfm = crypto.cram_hmac_tfm;
tconn->integrity_tfm = crypto.integrity_tfm; connection->integrity_tfm = crypto.integrity_tfm;
tconn->csums_tfm = crypto.csums_tfm; connection->csums_tfm = crypto.csums_tfm;
tconn->verify_tfm = crypto.verify_tfm; connection->verify_tfm = crypto.verify_tfm;
tconn->my_addr_len = nla_len(adm_ctx.my_addr); connection->my_addr_len = nla_len(adm_ctx.my_addr);
memcpy(&tconn->my_addr, nla_data(adm_ctx.my_addr), tconn->my_addr_len); memcpy(&connection->my_addr, nla_data(adm_ctx.my_addr), connection->my_addr_len);
tconn->peer_addr_len = nla_len(adm_ctx.peer_addr); connection->peer_addr_len = nla_len(adm_ctx.peer_addr);
memcpy(&tconn->peer_addr, nla_data(adm_ctx.peer_addr), tconn->peer_addr_len); memcpy(&connection->peer_addr, nla_data(adm_ctx.peer_addr), connection->peer_addr_len);
mutex_unlock(&tconn->conf_update); mutex_unlock(&connection->conf_update);
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, i) { idr_for_each_entry(&connection->volumes, device, i) {
device->send_cnt = 0; device->send_cnt = 0;
device->recv_cnt = 0; device->recv_cnt = 0;
} }
rcu_read_unlock(); rcu_read_unlock();
retcode = conn_request_state(tconn, NS(conn, C_UNCONNECTED), CS_VERBOSE); retcode = conn_request_state(connection, NS(conn, C_UNCONNECTED), CS_VERBOSE);
conn_reconfig_done(tconn); conn_reconfig_done(connection);
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
...@@ -2251,17 +2251,17 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info) ...@@ -2251,17 +2251,17 @@ int drbd_adm_connect(struct sk_buff *skb, struct genl_info *info)
free_crypto(&crypto); free_crypto(&crypto);
kfree(new_conf); kfree(new_conf);
conn_reconfig_done(tconn); conn_reconfig_done(connection);
out: out:
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
} }
static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool force) static enum drbd_state_rv conn_try_disconnect(struct drbd_connection *connection, bool force)
{ {
enum drbd_state_rv rv; enum drbd_state_rv rv;
rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
force ? CS_HARD : 0); force ? CS_HARD : 0);
switch (rv) { switch (rv) {
...@@ -2271,18 +2271,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for ...@@ -2271,18 +2271,18 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
return SS_SUCCESS; return SS_SUCCESS;
case SS_PRIMARY_NOP: case SS_PRIMARY_NOP:
/* Our state checking code wants to see the peer outdated. */ /* Our state checking code wants to see the peer outdated. */
rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0); rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING, pdsk, D_OUTDATED), 0);
if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */ if (rv == SS_OUTDATE_WO_CONN) /* lost connection before graceful disconnect succeeded */
rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), CS_VERBOSE); rv = conn_request_state(connection, NS(conn, C_DISCONNECTING), CS_VERBOSE);
break; break;
case SS_CW_FAILED_BY_PEER: case SS_CW_FAILED_BY_PEER:
/* The peer probably wants to see us outdated. */ /* The peer probably wants to see us outdated. */
rv = conn_request_state(tconn, NS2(conn, C_DISCONNECTING, rv = conn_request_state(connection, NS2(conn, C_DISCONNECTING,
disk, D_OUTDATED), 0); disk, D_OUTDATED), 0);
if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) { if (rv == SS_IS_DISKLESS || rv == SS_LOWER_THAN_OUTDATED) {
rv = conn_request_state(tconn, NS(conn, C_DISCONNECTING), rv = conn_request_state(connection, NS(conn, C_DISCONNECTING),
CS_HARD); CS_HARD);
} }
break; break;
...@@ -2296,7 +2296,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for ...@@ -2296,7 +2296,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
* The state handling only uses drbd_thread_stop_nowait(), * The state handling only uses drbd_thread_stop_nowait(),
* we want to really wait here until the receiver is no more. * we want to really wait here until the receiver is no more.
*/ */
drbd_thread_stop(&adm_ctx.tconn->receiver); drbd_thread_stop(&adm_ctx.connection->receiver);
/* Race breaker. This additional state change request may be /* Race breaker. This additional state change request may be
* necessary, if this was a forced disconnect during a receiver * necessary, if this was a forced disconnect during a receiver
...@@ -2304,10 +2304,10 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for ...@@ -2304,10 +2304,10 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
* after drbdd_init() returned. Typically, we should be * after drbdd_init() returned. Typically, we should be
* C_STANDALONE already, now, and this becomes a no-op. * C_STANDALONE already, now, and this becomes a no-op.
*/ */
rv2 = conn_request_state(tconn, NS(conn, C_STANDALONE), rv2 = conn_request_state(connection, NS(conn, C_STANDALONE),
CS_VERBOSE | CS_HARD); CS_VERBOSE | CS_HARD);
if (rv2 < SS_SUCCESS) if (rv2 < SS_SUCCESS)
conn_err(tconn, conn_err(connection,
"unexpected rv2=%d in conn_try_disconnect()\n", "unexpected rv2=%d in conn_try_disconnect()\n",
rv2); rv2);
} }
...@@ -2317,7 +2317,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for ...@@ -2317,7 +2317,7 @@ static enum drbd_state_rv conn_try_disconnect(struct drbd_tconn *tconn, bool for
int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
{ {
struct disconnect_parms parms; struct disconnect_parms parms;
struct drbd_tconn *tconn; struct drbd_connection *connection;
enum drbd_state_rv rv; enum drbd_state_rv rv;
enum drbd_ret_code retcode; enum drbd_ret_code retcode;
int err; int err;
...@@ -2328,7 +2328,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) ...@@ -2328,7 +2328,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto fail; goto fail;
tconn = adm_ctx.tconn; connection = adm_ctx.connection;
memset(&parms, 0, sizeof(parms)); memset(&parms, 0, sizeof(parms));
if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) { if (info->attrs[DRBD_NLA_DISCONNECT_PARMS]) {
err = disconnect_parms_from_attrs(&parms, info); err = disconnect_parms_from_attrs(&parms, info);
...@@ -2339,7 +2339,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info) ...@@ -2339,7 +2339,7 @@ int drbd_adm_disconnect(struct sk_buff *skb, struct genl_info *info)
} }
} }
rv = conn_try_disconnect(tconn, parms.force_disconnect); rv = conn_try_disconnect(connection, parms.force_disconnect);
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
retcode = rv; /* FIXME: Type mismatch. */ retcode = rv; /* FIXME: Type mismatch. */
else else
...@@ -2357,7 +2357,7 @@ void resync_after_online_grow(struct drbd_device *device) ...@@ -2357,7 +2357,7 @@ void resync_after_online_grow(struct drbd_device *device)
if (device->state.role != device->state.peer) if (device->state.role != device->state.peer)
iass = (device->state.role == R_PRIMARY); iass = (device->state.role == R_PRIMARY);
else else
iass = test_bit(RESOLVE_CONFLICTS, &device->tconn->flags); iass = test_bit(RESOLVE_CONFLICTS, &device->connection->flags);
if (iass) if (iass)
drbd_start_resync(device, C_SYNC_SOURCE); drbd_start_resync(device, C_SYNC_SOURCE);
...@@ -2412,7 +2412,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) ...@@ -2412,7 +2412,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
goto fail_ldev; goto fail_ldev;
} }
if (rs.no_resync && device->tconn->agreed_pro_version < 93) { if (rs.no_resync && device->connection->agreed_pro_version < 93) {
retcode = ERR_NEED_APV_93; retcode = ERR_NEED_APV_93;
goto fail_ldev; goto fail_ldev;
} }
...@@ -2454,12 +2454,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) ...@@ -2454,12 +2454,12 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev); device->ldev->known_size = drbd_get_capacity(device->ldev->backing_bdev);
if (new_disk_conf) { if (new_disk_conf) {
mutex_lock(&device->tconn->conf_update); mutex_lock(&device->connection->conf_update);
old_disk_conf = device->ldev->disk_conf; old_disk_conf = device->ldev->disk_conf;
*new_disk_conf = *old_disk_conf; *new_disk_conf = *old_disk_conf;
new_disk_conf->disk_size = (sector_t)rs.resize_size; new_disk_conf->disk_size = (sector_t)rs.resize_size;
rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf); rcu_assign_pointer(device->ldev->disk_conf, new_disk_conf);
mutex_unlock(&device->tconn->conf_update); mutex_unlock(&device->connection->conf_update);
synchronize_rcu(); synchronize_rcu();
kfree(old_disk_conf); kfree(old_disk_conf);
} }
...@@ -2499,7 +2499,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info) ...@@ -2499,7 +2499,7 @@ int drbd_adm_resize(struct sk_buff *skb, struct genl_info *info)
int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
{ {
enum drbd_ret_code retcode; enum drbd_ret_code retcode;
struct drbd_tconn *tconn; struct drbd_connection *connection;
struct res_opts res_opts; struct res_opts res_opts;
int err; int err;
...@@ -2508,9 +2508,9 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2508,9 +2508,9 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
return retcode; return retcode;
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto fail; goto fail;
tconn = adm_ctx.tconn; connection = adm_ctx.connection;
res_opts = tconn->res_opts; res_opts = connection->res_opts;
if (should_set_defaults(info)) if (should_set_defaults(info))
set_res_opts_defaults(&res_opts); set_res_opts_defaults(&res_opts);
...@@ -2521,7 +2521,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info) ...@@ -2521,7 +2521,7 @@ int drbd_adm_resource_opts(struct sk_buff *skb, struct genl_info *info)
goto fail; goto fail;
} }
err = set_resource_options(tconn, &res_opts); err = set_resource_options(connection, &res_opts);
if (err) { if (err) {
retcode = ERR_INVALID_REQUEST; retcode = ERR_INVALID_REQUEST;
if (err == -ENOMEM) if (err == -ENOMEM)
...@@ -2710,9 +2710,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info) ...@@ -2710,9 +2710,9 @@ int drbd_adm_resume_io(struct sk_buff *skb, struct genl_info *info)
retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0)); retcode = drbd_request_state(device, NS3(susp, 0, susp_nod, 0, susp_fen, 0));
if (retcode == SS_SUCCESS) { if (retcode == SS_SUCCESS) {
if (device->state.conn < C_CONNECTED) if (device->state.conn < C_CONNECTED)
tl_clear(device->tconn); tl_clear(device->connection);
if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED) if (device->state.disk == D_DISKLESS || device->state.disk == D_FAILED)
tl_restart(device->tconn, FAIL_FROZEN_DISK_IO); tl_restart(device->connection, FAIL_FROZEN_DISK_IO);
} }
drbd_resume_io(device); drbd_resume_io(device);
...@@ -2726,8 +2726,7 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info) ...@@ -2726,8 +2726,7 @@ int drbd_adm_outdate(struct sk_buff *skb, struct genl_info *info)
return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED)); return drbd_adm_simple_request_state(skb, info, NS(disk, D_OUTDATED));
} }
static int nla_put_drbd_cfg_context(struct sk_buff *skb, static int nla_put_drbd_cfg_context(struct sk_buff *skb, struct drbd_connection *connection, unsigned vnr)
struct drbd_tconn *tconn, unsigned vnr)
{ {
struct nlattr *nla; struct nlattr *nla;
nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT); nla = nla_nest_start(skb, DRBD_NLA_CFG_CONTEXT);
...@@ -2736,13 +2735,13 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb, ...@@ -2736,13 +2735,13 @@ static int nla_put_drbd_cfg_context(struct sk_buff *skb,
if (vnr != VOLUME_UNSPECIFIED && if (vnr != VOLUME_UNSPECIFIED &&
nla_put_u32(skb, T_ctx_volume, vnr)) nla_put_u32(skb, T_ctx_volume, vnr))
goto nla_put_failure; goto nla_put_failure;
if (nla_put_string(skb, T_ctx_resource_name, tconn->name)) if (nla_put_string(skb, T_ctx_resource_name, connection->name))
goto nla_put_failure; goto nla_put_failure;
if (tconn->my_addr_len && if (connection->my_addr_len &&
nla_put(skb, T_ctx_my_addr, tconn->my_addr_len, &tconn->my_addr)) nla_put(skb, T_ctx_my_addr, connection->my_addr_len, &connection->my_addr))
goto nla_put_failure; goto nla_put_failure;
if (tconn->peer_addr_len && if (connection->peer_addr_len &&
nla_put(skb, T_ctx_peer_addr, tconn->peer_addr_len, &tconn->peer_addr)) nla_put(skb, T_ctx_peer_addr, connection->peer_addr_len, &connection->peer_addr))
goto nla_put_failure; goto nla_put_failure;
nla_nest_end(skb, nla); nla_nest_end(skb, nla);
return 0; return 0;
...@@ -2779,10 +2778,10 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, ...@@ -2779,10 +2778,10 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
/* We need to add connection name and volume number information still. /* We need to add connection name and volume number information still.
* Minor number is in drbd_genlmsghdr. */ * Minor number is in drbd_genlmsghdr. */
if (nla_put_drbd_cfg_context(skb, device->tconn, device->vnr)) if (nla_put_drbd_cfg_context(skb, device->connection, device->vnr))
goto nla_put_failure; goto nla_put_failure;
if (res_opts_to_skb(skb, &device->tconn->res_opts, exclude_sensitive)) if (res_opts_to_skb(skb, &device->connection->res_opts, exclude_sensitive))
goto nla_put_failure; goto nla_put_failure;
rcu_read_lock(); rcu_read_lock();
...@@ -2795,7 +2794,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device, ...@@ -2795,7 +2794,7 @@ static int nla_put_status_info(struct sk_buff *skb, struct drbd_device *device,
if (!err) { if (!err) {
struct net_conf *nc; struct net_conf *nc;
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
if (nc) if (nc)
err = net_conf_to_skb(skb, nc, exclude_sensitive); err = net_conf_to_skb(skb, nc, exclude_sensitive);
} }
...@@ -2899,18 +2898,18 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2899,18 +2898,18 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
{ {
struct drbd_device *device; struct drbd_device *device;
struct drbd_genlmsghdr *dh; struct drbd_genlmsghdr *dh;
struct drbd_tconn *pos = (struct drbd_tconn*)cb->args[0]; struct drbd_connection *pos = (struct drbd_connection *)cb->args[0];
struct drbd_tconn *tconn = NULL; struct drbd_connection *connection = NULL;
struct drbd_tconn *tmp; struct drbd_connection *tmp;
unsigned volume = cb->args[1]; unsigned volume = cb->args[1];
/* Open coded, deferred, iteration: /* Open coded, deferred, iteration:
* list_for_each_entry_safe(tconn, tmp, &drbd_tconns, all_tconn) { * list_for_each_entry_safe(connection, tmp, &drbd_connections, connections) {
* idr_for_each_entry(&tconn->volumes, device, i) { * idr_for_each_entry(&connection->volumes, device, i) {
* ... * ...
* } * }
* } * }
* where tconn is cb->args[0]; * where connection is cb->args[0];
* and i is cb->args[1]; * and i is cb->args[1];
* *
* cb->args[2] indicates if we shall loop over all resources, * cb->args[2] indicates if we shall loop over all resources,
...@@ -2927,36 +2926,36 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2927,36 +2926,36 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
/* synchronize with conn_create()/conn_destroy() */ /* synchronize with conn_create()/conn_destroy() */
rcu_read_lock(); rcu_read_lock();
/* revalidate iterator position */ /* revalidate iterator position */
list_for_each_entry_rcu(tmp, &drbd_tconns, all_tconn) { list_for_each_entry_rcu(tmp, &drbd_connections, connections) {
if (pos == NULL) { if (pos == NULL) {
/* first iteration */ /* first iteration */
pos = tmp; pos = tmp;
tconn = pos; connection = pos;
break; break;
} }
if (tmp == pos) { if (tmp == pos) {
tconn = pos; connection = pos;
break; break;
} }
} }
if (tconn) { if (connection) {
next_tconn: next_connection:
device = idr_get_next(&tconn->volumes, &volume); device = idr_get_next(&connection->volumes, &volume);
if (!device) { if (!device) {
/* No more volumes to dump on this tconn. /* No more volumes to dump on this connection.
* Advance tconn iterator. */ * Advance connection iterator. */
pos = list_entry_rcu(tconn->all_tconn.next, pos = list_entry_rcu(connection->connections.next,
struct drbd_tconn, all_tconn); struct drbd_connection, connections);
/* Did we dump any volume on this tconn yet? */ /* Did we dump any volume on this connection yet? */
if (volume != 0) { if (volume != 0) {
/* If we reached the end of the list, /* If we reached the end of the list,
* or only a single resource dump was requested, * or only a single resource dump was requested,
* we are done. */ * we are done. */
if (&pos->all_tconn == &drbd_tconns || cb->args[2]) if (&pos->connections == &drbd_connections || cb->args[2])
goto out; goto out;
volume = 0; volume = 0;
tconn = pos; connection = pos;
goto next_tconn; goto next_connection;
} }
} }
...@@ -2967,22 +2966,22 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2967,22 +2966,22 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
goto out; goto out;
if (!device) { if (!device) {
/* This is a tconn without a single volume. /* This is a connection without a single volume.
* Suprisingly enough, it may have a network * Suprisingly enough, it may have a network
* configuration. */ * configuration. */
struct net_conf *nc; struct net_conf *nc;
dh->minor = -1U; dh->minor = -1U;
dh->ret_code = NO_ERROR; dh->ret_code = NO_ERROR;
if (nla_put_drbd_cfg_context(skb, tconn, VOLUME_UNSPECIFIED)) if (nla_put_drbd_cfg_context(skb, connection, VOLUME_UNSPECIFIED))
goto cancel; goto cancel;
nc = rcu_dereference(tconn->net_conf); nc = rcu_dereference(connection->net_conf);
if (nc && net_conf_to_skb(skb, nc, 1) != 0) if (nc && net_conf_to_skb(skb, nc, 1) != 0)
goto cancel; goto cancel;
goto done; goto done;
} }
D_ASSERT(device->vnr == volume); D_ASSERT(device->vnr == volume);
D_ASSERT(device->tconn == tconn); D_ASSERT(device->connection == connection);
dh->minor = device_to_minor(device); dh->minor = device_to_minor(device);
dh->ret_code = NO_ERROR; dh->ret_code = NO_ERROR;
...@@ -2994,15 +2993,15 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -2994,15 +2993,15 @@ static int get_one_status(struct sk_buff *skb, struct netlink_callback *cb)
} }
done: done:
genlmsg_end(skb, dh); genlmsg_end(skb, dh);
} }
out: out:
rcu_read_unlock(); rcu_read_unlock();
/* where to start the next iteration */ /* where to start the next iteration */
cb->args[0] = (long)pos; cb->args[0] = (long)pos;
cb->args[1] = (pos == tconn) ? volume + 1 : 0; cb->args[1] = (pos == connection) ? volume + 1 : 0;
/* No more tconns/volumes/minors found results in an empty skb. /* No more connections/volumes/minors found results in an empty skb.
* Which will terminate the dump. */ * Which will terminate the dump. */
return skb->len; return skb->len;
} }
...@@ -3022,7 +3021,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -3022,7 +3021,7 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ; const unsigned hdrlen = GENL_HDRLEN + GENL_MAGIC_FAMILY_HDRSZ;
struct nlattr *nla; struct nlattr *nla;
const char *resource_name; const char *resource_name;
struct drbd_tconn *tconn; struct drbd_connection *connection;
int maxtype; int maxtype;
/* Is this a followup call? */ /* Is this a followup call? */
...@@ -3051,18 +3050,18 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb) ...@@ -3051,18 +3050,18 @@ int drbd_adm_get_status_all(struct sk_buff *skb, struct netlink_callback *cb)
if (!nla) if (!nla)
return -EINVAL; return -EINVAL;
resource_name = nla_data(nla); resource_name = nla_data(nla);
tconn = conn_get_by_name(resource_name); connection = conn_get_by_name(resource_name);
if (!tconn) if (!connection)
return -ENODEV; return -ENODEV;
kref_put(&tconn->kref, &conn_destroy); /* get_one_status() (re)validates tconn by itself */ kref_put(&connection->kref, &conn_destroy); /* get_one_status() (re)validates connection by itself */
/* prime iterators, and set "filter" mode mark: /* prime iterators, and set "filter" mode mark:
* only dump this tconn. */ * only dump this connection. */
cb->args[0] = (long)tconn; cb->args[0] = (long)connection;
/* cb->args[1] = 0; passed in this way. */ /* cb->args[1] = 0; passed in this way. */
cb->args[2] = (long)tconn; cb->args[2] = (long)connection;
dump: dump:
return get_one_status(skb, cb); return get_one_status(skb, cb);
...@@ -3169,7 +3168,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) ...@@ -3169,7 +3168,7 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
} }
/* this is "skip initial sync", assume to be clean */ /* this is "skip initial sync", assume to be clean */
if (device->state.conn == C_CONNECTED && device->tconn->agreed_pro_version >= 90 && if (device->state.conn == C_CONNECTED && device->connection->agreed_pro_version >= 90 &&
device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) { device->ldev->md.uuid[UI_CURRENT] == UUID_JUST_CREATED && args.clear_bm) {
dev_info(DEV, "Preparing to skip initial sync\n"); dev_info(DEV, "Preparing to skip initial sync\n");
skip_initial_sync = 1; skip_initial_sync = 1;
...@@ -3192,10 +3191,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info) ...@@ -3192,10 +3191,10 @@ int drbd_adm_new_c_uuid(struct sk_buff *skb, struct genl_info *info)
drbd_send_uuids_skip_initial_sync(device); drbd_send_uuids_skip_initial_sync(device);
_drbd_uuid_set(device, UI_BITMAP, 0); _drbd_uuid_set(device, UI_BITMAP, 0);
drbd_print_uuids(device, "cleared bitmap UUID"); drbd_print_uuids(device, "cleared bitmap UUID");
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
_drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE), _drbd_set_state(_NS2(device, disk, D_UP_TO_DATE, pdsk, D_UP_TO_DATE),
CS_VERBOSE, NULL); CS_VERBOSE, NULL);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
} }
} }
...@@ -3249,7 +3248,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info) ...@@ -3249,7 +3248,7 @@ int drbd_adm_new_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto out; goto out;
if (adm_ctx.tconn) { if (adm_ctx.connection) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) { if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) {
retcode = ERR_INVALID_REQUEST; retcode = ERR_INVALID_REQUEST;
drbd_msg_put_info("resource exists"); drbd_msg_put_info("resource exists");
...@@ -3288,7 +3287,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info) ...@@ -3288,7 +3287,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
} }
/* drbd_adm_prepare made sure already /* drbd_adm_prepare made sure already
* that device->tconn and device->vnr match the request. */ * that device->connection and device->vnr match the request. */
if (adm_ctx.device) { if (adm_ctx.device) {
if (info->nlhdr->nlmsg_flags & NLM_F_EXCL) if (info->nlhdr->nlmsg_flags & NLM_F_EXCL)
retcode = ERR_MINOR_EXISTS; retcode = ERR_MINOR_EXISTS;
...@@ -3296,7 +3295,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info) ...@@ -3296,7 +3295,7 @@ int drbd_adm_add_minor(struct sk_buff *skb, struct genl_info *info)
goto out; goto out;
} }
retcode = conn_new_minor(adm_ctx.tconn, dh->minor, adm_ctx.volume); retcode = conn_new_minor(adm_ctx.connection, dh->minor, adm_ctx.volume);
out: out:
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
...@@ -3311,7 +3310,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_device *device) ...@@ -3311,7 +3310,7 @@ static enum drbd_ret_code adm_delete_minor(struct drbd_device *device)
device->state.role == R_SECONDARY) { device->state.role == R_SECONDARY) {
_drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS), _drbd_request_state(device, NS(conn, C_WF_REPORT_PARAMS),
CS_VERBOSE + CS_WAIT_COMPLETE); CS_VERBOSE + CS_WAIT_COMPLETE);
idr_remove(&device->tconn->volumes, device->vnr); idr_remove(&device->connection->volumes, device->vnr);
idr_remove(&minors, device_to_minor(device)); idr_remove(&minors, device_to_minor(device));
destroy_workqueue(device->submit.wq); destroy_workqueue(device->submit.wq);
del_gendisk(device->vdisk); del_gendisk(device->vdisk);
...@@ -3350,13 +3349,13 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) ...@@ -3350,13 +3349,13 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto out; goto out;
if (!adm_ctx.tconn) { if (!adm_ctx.connection) {
retcode = ERR_RES_NOT_KNOWN; retcode = ERR_RES_NOT_KNOWN;
goto out; goto out;
} }
/* demote */ /* demote */
idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
retcode = drbd_set_role(device, R_SECONDARY, 0); retcode = drbd_set_role(device, R_SECONDARY, 0);
if (retcode < SS_SUCCESS) { if (retcode < SS_SUCCESS) {
drbd_msg_put_info("failed to demote"); drbd_msg_put_info("failed to demote");
...@@ -3364,14 +3363,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) ...@@ -3364,14 +3363,14 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
} }
} }
retcode = conn_try_disconnect(adm_ctx.tconn, 0); retcode = conn_try_disconnect(adm_ctx.connection, 0);
if (retcode < SS_SUCCESS) { if (retcode < SS_SUCCESS) {
drbd_msg_put_info("failed to disconnect"); drbd_msg_put_info("failed to disconnect");
goto out; goto out;
} }
/* detach */ /* detach */
idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
retcode = adm_detach(device, 0); retcode = adm_detach(device, 0);
if (retcode < SS_SUCCESS || retcode > NO_ERROR) { if (retcode < SS_SUCCESS || retcode > NO_ERROR) {
drbd_msg_put_info("failed to detach"); drbd_msg_put_info("failed to detach");
...@@ -3379,15 +3378,15 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) ...@@ -3379,15 +3378,15 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
} }
} }
/* If we reach this, all volumes (of this tconn) are Secondary, /* If we reach this, all volumes (of this connection) are Secondary,
* Disconnected, Diskless, aka Unconfigured. Make sure all threads have * Disconnected, Diskless, aka Unconfigured. Make sure all threads have
* actually stopped, state handling only does drbd_thread_stop_nowait(). */ * actually stopped, state handling only does drbd_thread_stop_nowait(). */
drbd_thread_stop(&adm_ctx.tconn->worker); drbd_thread_stop(&adm_ctx.connection->worker);
/* Now, nothing can fail anymore */ /* Now, nothing can fail anymore */
/* delete volumes */ /* delete volumes */
idr_for_each_entry(&adm_ctx.tconn->volumes, device, i) { idr_for_each_entry(&adm_ctx.connection->volumes, device, i) {
retcode = adm_delete_minor(device); retcode = adm_delete_minor(device);
if (retcode != NO_ERROR) { if (retcode != NO_ERROR) {
/* "can not happen" */ /* "can not happen" */
...@@ -3397,10 +3396,10 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info) ...@@ -3397,10 +3396,10 @@ int drbd_adm_down(struct sk_buff *skb, struct genl_info *info)
} }
/* delete connection */ /* delete connection */
if (conn_lowest_minor(adm_ctx.tconn) < 0) { if (conn_lowest_minor(adm_ctx.connection) < 0) {
list_del_rcu(&adm_ctx.tconn->all_tconn); list_del_rcu(&adm_ctx.connection->connections);
synchronize_rcu(); synchronize_rcu();
kref_put(&adm_ctx.tconn->kref, &conn_destroy); kref_put(&adm_ctx.connection->kref, &conn_destroy);
retcode = NO_ERROR; retcode = NO_ERROR;
} else { } else {
...@@ -3424,10 +3423,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) ...@@ -3424,10 +3423,10 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
if (retcode != NO_ERROR) if (retcode != NO_ERROR)
goto out; goto out;
if (conn_lowest_minor(adm_ctx.tconn) < 0) { if (conn_lowest_minor(adm_ctx.connection) < 0) {
list_del_rcu(&adm_ctx.tconn->all_tconn); list_del_rcu(&adm_ctx.connection->connections);
synchronize_rcu(); synchronize_rcu();
kref_put(&adm_ctx.tconn->kref, &conn_destroy); kref_put(&adm_ctx.connection->kref, &conn_destroy);
retcode = NO_ERROR; retcode = NO_ERROR;
} else { } else {
...@@ -3435,7 +3434,7 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info) ...@@ -3435,7 +3434,7 @@ int drbd_adm_del_resource(struct sk_buff *skb, struct genl_info *info)
} }
if (retcode == NO_ERROR) if (retcode == NO_ERROR)
drbd_thread_stop(&adm_ctx.tconn->worker); drbd_thread_stop(&adm_ctx.connection->worker);
out: out:
drbd_adm_finish(info, retcode); drbd_adm_finish(info, retcode);
return 0; return 0;
......
...@@ -251,7 +251,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -251,7 +251,7 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
/* reset device->congestion_reason */ /* reset device->congestion_reason */
bdi_rw_congested(&device->rq_queue->backing_dev_info); bdi_rw_congested(&device->rq_queue->backing_dev_info);
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' '; wp = nc ? nc->wire_protocol - DRBD_PROT_A + 'A' : ' ';
seq_printf(seq, seq_printf(seq,
"%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n" "%2d: cs:%s ro:%s/%s ds:%s/%s %c %c%c%c%c%c%c\n"
...@@ -280,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v) ...@@ -280,8 +280,8 @@ static int drbd_seq_show(struct seq_file *seq, void *v)
atomic_read(&device->rs_pending_cnt), atomic_read(&device->rs_pending_cnt),
atomic_read(&device->unacked_cnt), atomic_read(&device->unacked_cnt),
atomic_read(&device->ap_bio_cnt), atomic_read(&device->ap_bio_cnt),
device->tconn->epochs, device->connection->epochs,
write_ordering_chars[device->tconn->write_ordering] write_ordering_chars[device->connection->write_ordering]
); );
seq_printf(seq, " oos:%llu\n", seq_printf(seq, " oos:%llu\n",
Bit2KB((unsigned long long) Bit2KB((unsigned long long)
......
This source diff could not be displayed because it is too large. You can view the blob instead.
...@@ -163,20 +163,21 @@ void drbd_req_destroy(struct kref *kref) ...@@ -163,20 +163,21 @@ void drbd_req_destroy(struct kref *kref)
mempool_free(req, drbd_request_mempool); mempool_free(req, drbd_request_mempool);
} }
static void wake_all_senders(struct drbd_tconn *tconn) { static void wake_all_senders(struct drbd_connection *connection)
wake_up(&tconn->sender_work.q_wait); {
wake_up(&connection->sender_work.q_wait);
} }
/* must hold resource->req_lock */ /* must hold resource->req_lock */
void start_new_tl_epoch(struct drbd_tconn *tconn) void start_new_tl_epoch(struct drbd_connection *connection)
{ {
/* no point closing an epoch, if it is empty, anyways. */ /* no point closing an epoch, if it is empty, anyways. */
if (tconn->current_tle_writes == 0) if (connection->current_tle_writes == 0)
return; return;
tconn->current_tle_writes = 0; connection->current_tle_writes = 0;
atomic_inc(&tconn->current_tle_nr); atomic_inc(&connection->current_tle_nr);
wake_all_senders(tconn); wake_all_senders(connection);
} }
void complete_master_bio(struct drbd_device *device, void complete_master_bio(struct drbd_device *device,
...@@ -273,8 +274,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m) ...@@ -273,8 +274,8 @@ void drbd_req_complete(struct drbd_request *req, struct bio_and_error *m)
* and reset the transfer log epoch write_cnt. * and reset the transfer log epoch write_cnt.
*/ */
if (rw == WRITE && if (rw == WRITE &&
req->epoch == atomic_read(&device->tconn->current_tle_nr)) req->epoch == atomic_read(&device->connection->current_tle_nr))
start_new_tl_epoch(device->tconn); start_new_tl_epoch(device->connection);
/* Update disk stats */ /* Update disk stats */
_drbd_end_io_acct(device, req); _drbd_end_io_acct(device, req);
...@@ -476,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -476,7 +477,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
* and from w_read_retry_remote */ * and from w_read_retry_remote */
D_ASSERT(!(req->rq_state & RQ_NET_MASK)); D_ASSERT(!(req->rq_state & RQ_NET_MASK));
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
p = nc->wire_protocol; p = nc->wire_protocol;
rcu_read_unlock(); rcu_read_unlock();
req->rq_state |= req->rq_state |=
...@@ -541,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -541,7 +542,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0); D_ASSERT((req->rq_state & RQ_LOCAL_MASK) == 0);
mod_rq_state(req, m, 0, RQ_NET_QUEUED); mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_read_req; req->w.cb = w_send_read_req;
drbd_queue_work(&device->tconn->sender_work, &req->w); drbd_queue_work(&device->connection->sender_work, &req->w);
break; break;
case QUEUE_FOR_NET_WRITE: case QUEUE_FOR_NET_WRITE:
...@@ -576,22 +577,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -576,22 +577,22 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
D_ASSERT(req->rq_state & RQ_NET_PENDING); D_ASSERT(req->rq_state & RQ_NET_PENDING);
mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK); mod_rq_state(req, m, 0, RQ_NET_QUEUED|RQ_EXP_BARR_ACK);
req->w.cb = w_send_dblock; req->w.cb = w_send_dblock;
drbd_queue_work(&device->tconn->sender_work, &req->w); drbd_queue_work(&device->connection->sender_work, &req->w);
/* close the epoch, in case it outgrew the limit */ /* close the epoch, in case it outgrew the limit */
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
p = nc->max_epoch_size; p = nc->max_epoch_size;
rcu_read_unlock(); rcu_read_unlock();
if (device->tconn->current_tle_writes >= p) if (device->connection->current_tle_writes >= p)
start_new_tl_epoch(device->tconn); start_new_tl_epoch(device->connection);
break; break;
case QUEUE_FOR_SEND_OOS: case QUEUE_FOR_SEND_OOS:
mod_rq_state(req, m, 0, RQ_NET_QUEUED); mod_rq_state(req, m, 0, RQ_NET_QUEUED);
req->w.cb = w_send_out_of_sync; req->w.cb = w_send_out_of_sync;
drbd_queue_work(&device->tconn->sender_work, &req->w); drbd_queue_work(&device->connection->sender_work, &req->w);
break; break;
case READ_RETRY_REMOTE_CANCELED: case READ_RETRY_REMOTE_CANCELED:
...@@ -703,7 +704,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -703,7 +704,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
get_ldev(device); /* always succeeds in this call path */ get_ldev(device); /* always succeeds in this call path */
req->w.cb = w_restart_disk_io; req->w.cb = w_restart_disk_io;
drbd_queue_work(&device->tconn->sender_work, &req->w); drbd_queue_work(&device->connection->sender_work, &req->w);
break; break;
case RESEND: case RESEND:
...@@ -724,7 +725,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -724,7 +725,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING); mod_rq_state(req, m, RQ_COMPLETION_SUSP, RQ_NET_QUEUED|RQ_NET_PENDING);
if (req->w.cb) { if (req->w.cb) {
drbd_queue_work(&device->tconn->sender_work, &req->w); drbd_queue_work(&device->connection->sender_work, &req->w);
rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ; rv = req->rq_state & RQ_WRITE ? MR_WRITE : MR_READ;
} /* else: FIXME can this happen? */ } /* else: FIXME can this happen? */
break; break;
...@@ -756,7 +757,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -756,7 +757,7 @@ int __req_mod(struct drbd_request *req, enum drbd_req_event what,
break; break;
case QUEUE_AS_DRBD_BARRIER: case QUEUE_AS_DRBD_BARRIER:
start_new_tl_epoch(device->tconn); start_new_tl_epoch(device->connection);
mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE); mod_rq_state(req, m, 0, RQ_NET_OK|RQ_NET_DONE);
break; break;
}; };
...@@ -850,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req) ...@@ -850,9 +851,9 @@ static void complete_conflicting_writes(struct drbd_request *req)
break; break;
/* Indicate to wake up device->misc_wait on progress. */ /* Indicate to wake up device->misc_wait on progress. */
i->waiting = true; i->waiting = true;
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
schedule(); schedule();
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
} }
finish_wait(&device->misc_wait, &wait); finish_wait(&device->misc_wait, &wait);
} }
...@@ -860,17 +861,17 @@ static void complete_conflicting_writes(struct drbd_request *req) ...@@ -860,17 +861,17 @@ static void complete_conflicting_writes(struct drbd_request *req)
/* called within req_lock and rcu_read_lock() */ /* called within req_lock and rcu_read_lock() */
static void maybe_pull_ahead(struct drbd_device *device) static void maybe_pull_ahead(struct drbd_device *device)
{ {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
struct net_conf *nc; struct net_conf *nc;
bool congested = false; bool congested = false;
enum drbd_on_congestion on_congestion; enum drbd_on_congestion on_congestion;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(tconn->net_conf); nc = rcu_dereference(connection->net_conf);
on_congestion = nc ? nc->on_congestion : OC_BLOCK; on_congestion = nc ? nc->on_congestion : OC_BLOCK;
rcu_read_unlock(); rcu_read_unlock();
if (on_congestion == OC_BLOCK || if (on_congestion == OC_BLOCK ||
tconn->agreed_pro_version < 96) connection->agreed_pro_version < 96)
return; return;
/* If I don't even have good local storage, we can not reasonably try /* If I don't even have good local storage, we can not reasonably try
...@@ -893,7 +894,7 @@ static void maybe_pull_ahead(struct drbd_device *device) ...@@ -893,7 +894,7 @@ static void maybe_pull_ahead(struct drbd_device *device)
if (congested) { if (congested) {
/* start a new epoch for non-mirrored writes */ /* start a new epoch for non-mirrored writes */
start_new_tl_epoch(device->tconn); start_new_tl_epoch(device->connection);
if (on_congestion == OC_PULL_AHEAD) if (on_congestion == OC_PULL_AHEAD)
_drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL); _drbd_set_state(_NS(device, conn, C_AHEAD), 0, NULL);
...@@ -1077,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request ...@@ -1077,7 +1078,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
struct bio_and_error m = { NULL, }; struct bio_and_error m = { NULL, };
bool no_remote = false; bool no_remote = false;
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
if (rw == WRITE) { if (rw == WRITE) {
/* This may temporarily give up the req_lock, /* This may temporarily give up the req_lock,
* but will re-aquire it before it returns here. * but will re-aquire it before it returns here.
...@@ -1111,15 +1112,15 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request ...@@ -1111,15 +1112,15 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
} }
/* which transfer log epoch does this belong to? */ /* which transfer log epoch does this belong to? */
req->epoch = atomic_read(&device->tconn->current_tle_nr); req->epoch = atomic_read(&device->connection->current_tle_nr);
/* no point in adding empty flushes to the transfer log, /* no point in adding empty flushes to the transfer log,
* they are mapped to drbd barriers already. */ * they are mapped to drbd barriers already. */
if (likely(req->i.size!=0)) { if (likely(req->i.size!=0)) {
if (rw == WRITE) if (rw == WRITE)
device->tconn->current_tle_writes++; device->connection->current_tle_writes++;
list_add_tail(&req->tl_requests, &device->tconn->transfer_log); list_add_tail(&req->tl_requests, &device->connection->transfer_log);
} }
if (rw == WRITE) { if (rw == WRITE) {
...@@ -1139,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request ...@@ -1139,9 +1140,9 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
/* needs to be marked within the same spinlock */ /* needs to be marked within the same spinlock */
_req_mod(req, TO_BE_SUBMITTED); _req_mod(req, TO_BE_SUBMITTED);
/* but we need to give up the spinlock to submit */ /* but we need to give up the spinlock to submit */
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
drbd_submit_req_private_bio(req); drbd_submit_req_private_bio(req);
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
} else if (no_remote) { } else if (no_remote) {
nodata: nodata:
if (__ratelimit(&drbd_ratelimit_state)) if (__ratelimit(&drbd_ratelimit_state))
...@@ -1154,7 +1155,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request ...@@ -1154,7 +1155,7 @@ static void drbd_send_and_submit(struct drbd_device *device, struct drbd_request
out: out:
if (drbd_req_put_completion_ref(req, &m, 1)) if (drbd_req_put_completion_ref(req, &m, 1))
kref_put(&req->kref, drbd_req_destroy); kref_put(&req->kref, drbd_req_destroy);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
if (m.bio) if (m.bio)
complete_master_bio(device, &m); complete_master_bio(device, &m);
...@@ -1320,12 +1321,12 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct ...@@ -1320,12 +1321,12 @@ int drbd_merge_bvec(struct request_queue *q, struct bvec_merge_data *bvm, struct
return limit; return limit;
} }
static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) static struct drbd_request *find_oldest_request(struct drbd_connection *connection)
{ {
/* Walk the transfer log, /* Walk the transfer log,
* and find the oldest not yet completed request */ * and find the oldest not yet completed request */
struct drbd_request *r; struct drbd_request *r;
list_for_each_entry(r, &tconn->transfer_log, tl_requests) { list_for_each_entry(r, &connection->transfer_log, tl_requests) {
if (atomic_read(&r->completion_ref)) if (atomic_read(&r->completion_ref))
return r; return r;
} }
...@@ -1335,14 +1336,14 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn) ...@@ -1335,14 +1336,14 @@ static struct drbd_request *find_oldest_request(struct drbd_tconn *tconn)
void request_timer_fn(unsigned long data) void request_timer_fn(unsigned long data)
{ {
struct drbd_device *device = (struct drbd_device *) data; struct drbd_device *device = (struct drbd_device *) data;
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
struct drbd_request *req; /* oldest request */ struct drbd_request *req; /* oldest request */
struct net_conf *nc; struct net_conf *nc;
unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */ unsigned long ent = 0, dt = 0, et, nt; /* effective timeout = ko_count * timeout */
unsigned long now; unsigned long now;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(tconn->net_conf); nc = rcu_dereference(connection->net_conf);
if (nc && device->state.conn >= C_WF_REPORT_PARAMS) if (nc && device->state.conn >= C_WF_REPORT_PARAMS)
ent = nc->timeout * HZ/10 * nc->ko_count; ent = nc->timeout * HZ/10 * nc->ko_count;
...@@ -1359,10 +1360,10 @@ void request_timer_fn(unsigned long data) ...@@ -1359,10 +1360,10 @@ void request_timer_fn(unsigned long data)
now = jiffies; now = jiffies;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
req = find_oldest_request(tconn); req = find_oldest_request(connection);
if (!req) { if (!req) {
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
mod_timer(&device->request_timer, now + et); mod_timer(&device->request_timer, now + et);
return; return;
} }
...@@ -1385,7 +1386,7 @@ void request_timer_fn(unsigned long data) ...@@ -1385,7 +1386,7 @@ void request_timer_fn(unsigned long data)
*/ */
if (ent && req->rq_state & RQ_NET_PENDING && if (ent && req->rq_state & RQ_NET_PENDING &&
time_after(now, req->start_time + ent) && time_after(now, req->start_time + ent) &&
!time_in_range(now, tconn->last_reconnect_jif, tconn->last_reconnect_jif + ent)) { !time_in_range(now, connection->last_reconnect_jif, connection->last_reconnect_jif + ent)) {
dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n"); dev_warn(DEV, "Remote failed to finish a request within ko-count * timeout\n");
_drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL); _drbd_set_state(_NS(device, conn, C_TIMEOUT), CS_VERBOSE | CS_HARD, NULL);
} }
...@@ -1396,6 +1397,6 @@ void request_timer_fn(unsigned long data) ...@@ -1396,6 +1397,6 @@ void request_timer_fn(unsigned long data)
__drbd_chk_io_error(device, DRBD_FORCE_DETACH); __drbd_chk_io_error(device, DRBD_FORCE_DETACH);
} }
nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et; nt = (time_after(now, req->start_time + et) ? now : req->start_time) + et;
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
mod_timer(&device->request_timer, nt); mod_timer(&device->request_timer, nt);
} }
...@@ -275,7 +275,7 @@ struct bio_and_error { ...@@ -275,7 +275,7 @@ struct bio_and_error {
int error; int error;
}; };
extern void start_new_tl_epoch(struct drbd_tconn *tconn); extern void start_new_tl_epoch(struct drbd_connection *connection);
extern void drbd_req_destroy(struct kref *kref); extern void drbd_req_destroy(struct kref *kref);
extern void _req_may_be_done(struct drbd_request *req, extern void _req_may_be_done(struct drbd_request *req,
struct bio_and_error *m); struct bio_and_error *m);
...@@ -284,8 +284,8 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what, ...@@ -284,8 +284,8 @@ extern int __req_mod(struct drbd_request *req, enum drbd_req_event what,
extern void complete_master_bio(struct drbd_device *device, extern void complete_master_bio(struct drbd_device *device,
struct bio_and_error *m); struct bio_and_error *m);
extern void request_timer_fn(unsigned long data); extern void request_timer_fn(unsigned long data);
extern void tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what); extern void tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
extern void _tl_restart(struct drbd_tconn *tconn, enum drbd_req_event what); extern void _tl_restart(struct drbd_connection *connection, enum drbd_req_event what);
/* this is in drbd_main.c */ /* this is in drbd_main.c */
extern void drbd_restart_request(struct drbd_request *req); extern void drbd_restart_request(struct drbd_request *req);
...@@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req, ...@@ -318,9 +318,9 @@ static inline int req_mod(struct drbd_request *req,
struct bio_and_error m; struct bio_and_error m;
int rv; int rv;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
rv = __req_mod(req, what, &m); rv = __req_mod(req, what, &m);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
if (m.bio) if (m.bio)
complete_master_bio(device, &m); complete_master_bio(device, &m);
......
...@@ -51,7 +51,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused); ...@@ -51,7 +51,7 @@ static int w_after_state_ch(struct drbd_work *w, int unused);
static void after_state_ch(struct drbd_device *device, union drbd_state os, static void after_state_ch(struct drbd_device *device, union drbd_state os,
union drbd_state ns, enum chg_state_flags flags); union drbd_state ns, enum chg_state_flags flags);
static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state); static enum drbd_state_rv is_valid_state(struct drbd_device *, union drbd_state);
static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_tconn *); static enum drbd_state_rv is_valid_soft_transition(union drbd_state, union drbd_state, struct drbd_connection *);
static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns); static enum drbd_state_rv is_valid_transition(union drbd_state os, union drbd_state ns);
static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns, static union drbd_state sanitize_state(struct drbd_device *device, union drbd_state ns,
enum sanitize_state_warnings *warn); enum sanitize_state_warnings *warn);
...@@ -61,14 +61,14 @@ static inline bool is_susp(union drbd_state s) ...@@ -61,14 +61,14 @@ static inline bool is_susp(union drbd_state s)
return s.susp || s.susp_nod || s.susp_fen; return s.susp || s.susp_nod || s.susp_fen;
} }
bool conn_all_vols_unconf(struct drbd_tconn *tconn) bool conn_all_vols_unconf(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
bool rv = true; bool rv = true;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
if (device->state.disk != D_DISKLESS || if (device->state.disk != D_DISKLESS ||
device->state.conn != C_STANDALONE || device->state.conn != C_STANDALONE ||
device->state.role != R_SECONDARY) { device->state.role != R_SECONDARY) {
...@@ -100,98 +100,98 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2) ...@@ -100,98 +100,98 @@ static enum drbd_role min_role(enum drbd_role role1, enum drbd_role role2)
return R_PRIMARY; return R_PRIMARY;
} }
enum drbd_role conn_highest_role(struct drbd_tconn *tconn) enum drbd_role conn_highest_role(struct drbd_connection *connection)
{ {
enum drbd_role role = R_UNKNOWN; enum drbd_role role = R_UNKNOWN;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
role = max_role(role, device->state.role); role = max_role(role, device->state.role);
rcu_read_unlock(); rcu_read_unlock();
return role; return role;
} }
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn) enum drbd_role conn_highest_peer(struct drbd_connection *connection)
{ {
enum drbd_role peer = R_UNKNOWN; enum drbd_role peer = R_UNKNOWN;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
peer = max_role(peer, device->state.peer); peer = max_role(peer, device->state.peer);
rcu_read_unlock(); rcu_read_unlock();
return peer; return peer;
} }
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn) enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection)
{ {
enum drbd_disk_state ds = D_DISKLESS; enum drbd_disk_state ds = D_DISKLESS;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
ds = max_t(enum drbd_disk_state, ds, device->state.disk); ds = max_t(enum drbd_disk_state, ds, device->state.disk);
rcu_read_unlock(); rcu_read_unlock();
return ds; return ds;
} }
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn) enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection)
{ {
enum drbd_disk_state ds = D_MASK; enum drbd_disk_state ds = D_MASK;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
ds = min_t(enum drbd_disk_state, ds, device->state.disk); ds = min_t(enum drbd_disk_state, ds, device->state.disk);
rcu_read_unlock(); rcu_read_unlock();
return ds; return ds;
} }
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn) enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection)
{ {
enum drbd_disk_state ds = D_DISKLESS; enum drbd_disk_state ds = D_DISKLESS;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
ds = max_t(enum drbd_disk_state, ds, device->state.pdsk); ds = max_t(enum drbd_disk_state, ds, device->state.pdsk);
rcu_read_unlock(); rcu_read_unlock();
return ds; return ds;
} }
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn) enum drbd_conns conn_lowest_conn(struct drbd_connection *connection)
{ {
enum drbd_conns conn = C_MASK; enum drbd_conns conn = C_MASK;
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
conn = min_t(enum drbd_conns, conn, device->state.conn); conn = min_t(enum drbd_conns, conn, device->state.conn);
rcu_read_unlock(); rcu_read_unlock();
return conn; return conn;
} }
static bool no_peer_wf_report_params(struct drbd_tconn *tconn) static bool no_peer_wf_report_params(struct drbd_connection *connection)
{ {
struct drbd_device *device; struct drbd_device *device;
int vnr; int vnr;
bool rv = true; bool rv = true;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) idr_for_each_entry(&connection->volumes, device, vnr)
if (device->state.conn == C_WF_REPORT_PARAMS) { if (device->state.conn == C_WF_REPORT_PARAMS) {
rv = false; rv = false;
break; break;
...@@ -237,10 +237,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f, ...@@ -237,10 +237,10 @@ drbd_change_state(struct drbd_device *device, enum chg_state_flags f,
union drbd_state ns; union drbd_state ns;
enum drbd_state_rv rv; enum drbd_state_rv rv;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
ns = apply_mask_val(drbd_read_state(device), mask, val); ns = apply_mask_val(drbd_read_state(device), mask, val);
rv = _drbd_set_state(device, ns, f, NULL); rv = _drbd_set_state(device, ns, f, NULL);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
return rv; return rv;
} }
...@@ -271,7 +271,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask, ...@@ -271,7 +271,7 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags)) if (test_and_clear_bit(CL_ST_CHG_FAIL, &device->flags))
return SS_CW_FAILED_BY_PEER; return SS_CW_FAILED_BY_PEER;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
os = drbd_read_state(device); os = drbd_read_state(device);
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns); rv = is_valid_transition(os, ns);
...@@ -283,12 +283,12 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask, ...@@ -283,12 +283,12 @@ _req_st_cond(struct drbd_device *device, union drbd_state mask,
if (rv == SS_UNKNOWN_ERROR) { if (rv == SS_UNKNOWN_ERROR) {
rv = is_valid_state(device, ns); rv = is_valid_state(device, ns);
if (rv >= SS_SUCCESS) { if (rv >= SS_SUCCESS) {
rv = is_valid_soft_transition(os, ns, device->tconn); rv = is_valid_soft_transition(os, ns, device->connection);
if (rv >= SS_SUCCESS) if (rv >= SS_SUCCESS)
rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */ rv = SS_UNKNOWN_ERROR; /* cont waiting, otherwise fail. */
} }
} }
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
return rv; return rv;
} }
...@@ -317,20 +317,20 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, ...@@ -317,20 +317,20 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
if (f & CS_SERIALIZE) if (f & CS_SERIALIZE)
mutex_lock(device->state_mutex); mutex_lock(device->state_mutex);
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
os = drbd_read_state(device); os = drbd_read_state(device);
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
rv = is_valid_transition(os, ns); rv = is_valid_transition(os, ns);
if (rv < SS_SUCCESS) { if (rv < SS_SUCCESS) {
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
goto abort; goto abort;
} }
if (cl_wide_st_chg(device, os, ns)) { if (cl_wide_st_chg(device, os, ns)) {
rv = is_valid_state(device, ns); rv = is_valid_state(device, ns);
if (rv == SS_SUCCESS) if (rv == SS_SUCCESS)
rv = is_valid_soft_transition(os, ns, device->tconn); rv = is_valid_soft_transition(os, ns, device->connection);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
if (rv < SS_SUCCESS) { if (rv < SS_SUCCESS) {
if (f & CS_VERBOSE) if (f & CS_VERBOSE)
...@@ -353,17 +353,17 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask, ...@@ -353,17 +353,17 @@ drbd_req_state(struct drbd_device *device, union drbd_state mask,
print_st_err(device, os, ns, rv); print_st_err(device, os, ns, rv);
goto abort; goto abort;
} }
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
ns = apply_mask_val(drbd_read_state(device), mask, val); ns = apply_mask_val(drbd_read_state(device), mask, val);
rv = _drbd_set_state(device, ns, f, &done); rv = _drbd_set_state(device, ns, f, &done);
} else { } else {
rv = _drbd_set_state(device, ns, f, &done); rv = _drbd_set_state(device, ns, f, &done);
} }
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) { if (f & CS_WAIT_COMPLETE && rv == SS_SUCCESS) {
D_ASSERT(current != device->tconn->worker.task); D_ASSERT(current != device->connection->worker.task);
wait_for_completion(&done); wait_for_completion(&done);
} }
...@@ -480,7 +480,7 @@ static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os ...@@ -480,7 +480,7 @@ static void drbd_pr_state_change(struct drbd_device *device, union drbd_state os
dev_info(DEV, "%s\n", pb); dev_info(DEV, "%s\n", pb);
} }
static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, union drbd_state ns, static void conn_pr_state_change(struct drbd_connection *connection, union drbd_state os, union drbd_state ns,
enum chg_state_flags flags) enum chg_state_flags flags)
{ {
char pb[300]; char pb[300];
...@@ -494,7 +494,7 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os, ...@@ -494,7 +494,7 @@ static void conn_pr_state_change(struct drbd_tconn *tconn, union drbd_state os,
is_susp(ns)); is_susp(ns));
if (pbp != pb) if (pbp != pb)
conn_info(tconn, "%s\n", pb); conn_info(connection, "%s\n", pb);
} }
...@@ -519,12 +519,12 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ...@@ -519,12 +519,12 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
put_ldev(device); put_ldev(device);
} }
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
if (nc) { if (nc) {
if (!nc->two_primaries && ns.role == R_PRIMARY) { if (!nc->two_primaries && ns.role == R_PRIMARY) {
if (ns.peer == R_PRIMARY) if (ns.peer == R_PRIMARY)
rv = SS_TWO_PRIMARIES; rv = SS_TWO_PRIMARIES;
else if (conn_highest_peer(device->tconn) == R_PRIMARY) else if (conn_highest_peer(device->connection) == R_PRIMARY)
rv = SS_O_VOL_PEER_PRI; rv = SS_O_VOL_PEER_PRI;
} }
} }
...@@ -565,7 +565,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ...@@ -565,7 +565,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
rv = SS_NO_VERIFY_ALG; rv = SS_NO_VERIFY_ALG;
else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) && else if ((ns.conn == C_VERIFY_S || ns.conn == C_VERIFY_T) &&
device->tconn->agreed_pro_version < 88) device->connection->agreed_pro_version < 88)
rv = SS_NOT_SUPPORTED; rv = SS_NOT_SUPPORTED;
else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE) else if (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)
...@@ -592,7 +592,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns) ...@@ -592,7 +592,7 @@ is_valid_state(struct drbd_device *device, union drbd_state ns)
* @os: old state. * @os: old state.
*/ */
static enum drbd_state_rv static enum drbd_state_rv
is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_tconn *tconn) is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_connection *connection)
{ {
enum drbd_state_rv rv = SS_SUCCESS; enum drbd_state_rv rv = SS_SUCCESS;
...@@ -620,7 +620,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t ...@@ -620,7 +620,7 @@ is_valid_soft_transition(union drbd_state os, union drbd_state ns, struct drbd_t
/* While establishing a connection only allow cstate to change. /* While establishing a connection only allow cstate to change.
Delay/refuse role changes, detach attach etc... */ Delay/refuse role changes, detach attach etc... */
if (test_bit(STATE_SENT, &tconn->flags) && if (test_bit(STATE_SENT, &connection->flags) &&
!(os.conn == C_WF_REPORT_PARAMS || !(os.conn == C_WF_REPORT_PARAMS ||
(ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION))) (ns.conn == C_WF_REPORT_PARAMS && os.conn == C_WF_CONNECTION)))
rv = SS_IN_TRANSIENT_STATE; rv = SS_IN_TRANSIENT_STATE;
...@@ -871,7 +871,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st ...@@ -871,7 +871,7 @@ static union drbd_state sanitize_state(struct drbd_device *device, union drbd_st
(ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED)) (ns.role == R_PRIMARY && ns.conn < C_CONNECTED && ns.pdsk > D_OUTDATED))
ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */ ns.susp_fen = 1; /* Suspend IO while fence-peer handler runs (peer lost) */
if (device->tconn->res_opts.on_no_data == OND_SUSPEND_IO && if (device->connection->res_opts.on_no_data == OND_SUSPEND_IO &&
(ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE)) (ns.role == R_PRIMARY && ns.disk < D_UP_TO_DATE && ns.pdsk < D_UP_TO_DATE))
ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */ ns.susp_nod = 1; /* Suspend IO while no data available (no accessible data available) */
...@@ -899,7 +899,7 @@ void drbd_resume_al(struct drbd_device *device) ...@@ -899,7 +899,7 @@ void drbd_resume_al(struct drbd_device *device)
/* helper for __drbd_set_state */ /* helper for __drbd_set_state */
static void set_ov_position(struct drbd_device *device, enum drbd_conns cs) static void set_ov_position(struct drbd_device *device, enum drbd_conns cs)
{ {
if (device->tconn->agreed_pro_version < 90) if (device->connection->agreed_pro_version < 90)
device->ov_start_sector = 0; device->ov_start_sector = 0;
device->rs_total = drbd_bm_bits(device); device->rs_total = drbd_bm_bits(device);
device->ov_position = 0; device->ov_position = 0;
...@@ -962,9 +962,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -962,9 +962,9 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
this happen...*/ this happen...*/
if (is_valid_state(device, os) == rv) if (is_valid_state(device, os) == rv)
rv = is_valid_soft_transition(os, ns, device->tconn); rv = is_valid_soft_transition(os, ns, device->connection);
} else } else
rv = is_valid_soft_transition(os, ns, device->tconn); rv = is_valid_soft_transition(os, ns, device->connection);
} }
if (rv < SS_SUCCESS) { if (rv < SS_SUCCESS) {
...@@ -981,7 +981,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -981,7 +981,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
sanitize_state(). Only display it here if we where not called from sanitize_state(). Only display it here if we where not called from
_conn_request_state() */ _conn_request_state() */
if (!(flags & CS_DC_SUSP)) if (!(flags & CS_DC_SUSP))
conn_pr_state_change(device->tconn, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP); conn_pr_state_change(device->connection, os, ns, (flags & ~CS_DC_MASK) | CS_DC_SUSP);
/* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference /* if we are going -> D_FAILED or D_DISKLESS, grab one extra reference
* on the ldev here, to be sure the transition -> D_DISKLESS resp. * on the ldev here, to be sure the transition -> D_DISKLESS resp.
...@@ -994,25 +994,25 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -994,25 +994,25 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
did_remote = drbd_should_do_remote(device->state); did_remote = drbd_should_do_remote(device->state);
device->state.i = ns.i; device->state.i = ns.i;
should_do_remote = drbd_should_do_remote(device->state); should_do_remote = drbd_should_do_remote(device->state);
device->tconn->susp = ns.susp; device->connection->susp = ns.susp;
device->tconn->susp_nod = ns.susp_nod; device->connection->susp_nod = ns.susp_nod;
device->tconn->susp_fen = ns.susp_fen; device->connection->susp_fen = ns.susp_fen;
/* put replicated vs not-replicated requests in seperate epochs */ /* put replicated vs not-replicated requests in seperate epochs */
if (did_remote != should_do_remote) if (did_remote != should_do_remote)
start_new_tl_epoch(device->tconn); start_new_tl_epoch(device->connection);
if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING) if (os.disk == D_ATTACHING && ns.disk >= D_NEGOTIATING)
drbd_print_uuids(device, "attached to UUIDs"); drbd_print_uuids(device, "attached to UUIDs");
/* Wake up role changes, that were delayed because of connection establishing */ /* Wake up role changes, that were delayed because of connection establishing */
if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS && if (os.conn == C_WF_REPORT_PARAMS && ns.conn != C_WF_REPORT_PARAMS &&
no_peer_wf_report_params(device->tconn)) no_peer_wf_report_params(device->connection))
clear_bit(STATE_SENT, &device->tconn->flags); clear_bit(STATE_SENT, &device->connection->flags);
wake_up(&device->misc_wait); wake_up(&device->misc_wait);
wake_up(&device->state_wait); wake_up(&device->state_wait);
wake_up(&device->tconn->ping_wait); wake_up(&device->connection->ping_wait);
/* Aborted verify run, or we reached the stop sector. /* Aborted verify run, or we reached the stop sector.
* Log the last position, unless end-of-device. */ * Log the last position, unless end-of-device. */
...@@ -1101,21 +1101,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -1101,21 +1101,21 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
/* Receiver should clean up itself */ /* Receiver should clean up itself */
if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING) if (os.conn != C_DISCONNECTING && ns.conn == C_DISCONNECTING)
drbd_thread_stop_nowait(&device->tconn->receiver); drbd_thread_stop_nowait(&device->connection->receiver);
/* Now the receiver finished cleaning up itself, it should die */ /* Now the receiver finished cleaning up itself, it should die */
if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE) if (os.conn != C_STANDALONE && ns.conn == C_STANDALONE)
drbd_thread_stop_nowait(&device->tconn->receiver); drbd_thread_stop_nowait(&device->connection->receiver);
/* Upon network failure, we need to restart the receiver. */ /* Upon network failure, we need to restart the receiver. */
if (os.conn > C_WF_CONNECTION && if (os.conn > C_WF_CONNECTION &&
ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT) ns.conn <= C_TEAR_DOWN && ns.conn >= C_TIMEOUT)
drbd_thread_restart_nowait(&device->tconn->receiver); drbd_thread_restart_nowait(&device->connection->receiver);
/* Resume AL writing if we get a connection */ /* Resume AL writing if we get a connection */
if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) { if (os.conn < C_CONNECTED && ns.conn >= C_CONNECTED) {
drbd_resume_al(device); drbd_resume_al(device);
device->tconn->connect_cnt++; device->connection->connect_cnt++;
} }
/* remember last attach time so request_timer_fn() won't /* remember last attach time so request_timer_fn() won't
...@@ -1133,7 +1133,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns, ...@@ -1133,7 +1133,7 @@ __drbd_set_state(struct drbd_device *device, union drbd_state ns,
ascw->w.cb = w_after_state_ch; ascw->w.cb = w_after_state_ch;
ascw->w.device = device; ascw->w.device = device;
ascw->done = done; ascw->done = done;
drbd_queue_work(&device->tconn->sender_work, &ascw->w); drbd_queue_work(&device->connection->sender_work, &ascw->w);
} else { } else {
dev_err(DEV, "Could not kmalloc an ascw\n"); dev_err(DEV, "Could not kmalloc an ascw\n");
} }
...@@ -1181,7 +1181,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device, ...@@ -1181,7 +1181,7 @@ int drbd_bitmap_io_from_worker(struct drbd_device *device,
{ {
int rv; int rv;
D_ASSERT(current == device->tconn->worker.task); D_ASSERT(current == device->connection->worker.task);
/* open coded non-blocking drbd_suspend_io(device); */ /* open coded non-blocking drbd_suspend_io(device); */
set_bit(SUSPEND_IO, &device->flags); set_bit(SUSPEND_IO, &device->flags);
...@@ -1228,47 +1228,47 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1228,47 +1228,47 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
state change. This function might sleep */ state change. This function might sleep */
if (ns.susp_nod) { if (ns.susp_nod) {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
enum drbd_req_event what = NOTHING; enum drbd_req_event what = NOTHING;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
if (os.conn < C_CONNECTED && conn_lowest_conn(tconn) >= C_CONNECTED) if (os.conn < C_CONNECTED && conn_lowest_conn(connection) >= C_CONNECTED)
what = RESEND; what = RESEND;
if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) && if ((os.disk == D_ATTACHING || os.disk == D_NEGOTIATING) &&
conn_lowest_disk(tconn) > D_NEGOTIATING) conn_lowest_disk(connection) > D_NEGOTIATING)
what = RESTART_FROZEN_DISK_IO; what = RESTART_FROZEN_DISK_IO;
if (tconn->susp_nod && what != NOTHING) { if (connection->susp_nod && what != NOTHING) {
_tl_restart(tconn, what); _tl_restart(connection, what);
_conn_request_state(tconn, _conn_request_state(connection,
(union drbd_state) { { .susp_nod = 1 } }, (union drbd_state) { { .susp_nod = 1 } },
(union drbd_state) { { .susp_nod = 0 } }, (union drbd_state) { { .susp_nod = 0 } },
CS_VERBOSE); CS_VERBOSE);
} }
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
} }
if (ns.susp_fen) { if (ns.susp_fen) {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
if (tconn->susp_fen && conn_lowest_conn(tconn) >= C_CONNECTED) { if (connection->susp_fen && conn_lowest_conn(connection) >= C_CONNECTED) {
/* case2: The connection was established again: */ /* case2: The connection was established again: */
struct drbd_device *odev; struct drbd_device *odev;
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, odev, vnr) idr_for_each_entry(&connection->volumes, odev, vnr)
clear_bit(NEW_CUR_UUID, &odev->flags); clear_bit(NEW_CUR_UUID, &odev->flags);
rcu_read_unlock(); rcu_read_unlock();
_tl_restart(tconn, RESEND); _tl_restart(connection, RESEND);
_conn_request_state(tconn, _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } }, (union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } }, (union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE); CS_VERBOSE);
} }
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
} }
/* Became sync source. With protocol >= 96, we still need to send out /* Became sync source. With protocol >= 96, we still need to send out
...@@ -1277,7 +1277,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os, ...@@ -1277,7 +1277,7 @@ static void after_state_ch(struct drbd_device *device, union drbd_state os,
* which is unexpected. */ * which is unexpected. */
if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) && if ((os.conn != C_SYNC_SOURCE && os.conn != C_PAUSED_SYNC_S) &&
(ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) && (ns.conn == C_SYNC_SOURCE || ns.conn == C_PAUSED_SYNC_S) &&
device->tconn->agreed_pro_version >= 96 && get_ldev(device)) { device->connection->agreed_pro_version >= 96 && get_ldev(device)) {
drbd_gen_and_send_sync_uuid(device); drbd_gen_and_send_sync_uuid(device);
put_ldev(device); put_ldev(device);
} }
...@@ -1526,7 +1526,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused) ...@@ -1526,7 +1526,7 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
{ {
struct after_conn_state_chg_work *acscw = struct after_conn_state_chg_work *acscw =
container_of(w, struct after_conn_state_chg_work, w); container_of(w, struct after_conn_state_chg_work, w);
struct drbd_tconn *tconn = w->tconn; struct drbd_connection *connection = w->connection;
enum drbd_conns oc = acscw->oc; enum drbd_conns oc = acscw->oc;
union drbd_state ns_max = acscw->ns_max; union drbd_state ns_max = acscw->ns_max;
struct drbd_device *device; struct drbd_device *device;
...@@ -1536,18 +1536,18 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused) ...@@ -1536,18 +1536,18 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
/* Upon network configuration, we need to start the receiver */ /* Upon network configuration, we need to start the receiver */
if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED) if (oc == C_STANDALONE && ns_max.conn == C_UNCONNECTED)
drbd_thread_start(&tconn->receiver); drbd_thread_start(&connection->receiver);
if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) { if (oc == C_DISCONNECTING && ns_max.conn == C_STANDALONE) {
struct net_conf *old_conf; struct net_conf *old_conf;
mutex_lock(&tconn->conf_update); mutex_lock(&connection->conf_update);
old_conf = tconn->net_conf; old_conf = connection->net_conf;
tconn->my_addr_len = 0; connection->my_addr_len = 0;
tconn->peer_addr_len = 0; connection->peer_addr_len = 0;
rcu_assign_pointer(tconn->net_conf, NULL); rcu_assign_pointer(connection->net_conf, NULL);
conn_free_crypto(tconn); conn_free_crypto(connection);
mutex_unlock(&tconn->conf_update); mutex_unlock(&connection->conf_update);
synchronize_rcu(); synchronize_rcu();
kfree(old_conf); kfree(old_conf);
...@@ -1557,30 +1557,30 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused) ...@@ -1557,30 +1557,30 @@ static int w_after_conn_state_ch(struct drbd_work *w, int unused)
/* case1: The outdate peer handler is successful: */ /* case1: The outdate peer handler is successful: */
if (ns_max.pdsk <= D_OUTDATED) { if (ns_max.pdsk <= D_OUTDATED) {
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
if (test_bit(NEW_CUR_UUID, &device->flags)) { if (test_bit(NEW_CUR_UUID, &device->flags)) {
drbd_uuid_new_current(device); drbd_uuid_new_current(device);
clear_bit(NEW_CUR_UUID, &device->flags); clear_bit(NEW_CUR_UUID, &device->flags);
} }
} }
rcu_read_unlock(); rcu_read_unlock();
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
_tl_restart(tconn, CONNECTION_LOST_WHILE_PENDING); _tl_restart(connection, CONNECTION_LOST_WHILE_PENDING);
_conn_request_state(tconn, _conn_request_state(connection,
(union drbd_state) { { .susp_fen = 1 } }, (union drbd_state) { { .susp_fen = 1 } },
(union drbd_state) { { .susp_fen = 0 } }, (union drbd_state) { { .susp_fen = 0 } },
CS_VERBOSE); CS_VERBOSE);
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
} }
} }
kref_put(&tconn->kref, &conn_destroy); kref_put(&connection->kref, &conn_destroy);
conn_md_sync(tconn); conn_md_sync(connection);
return 0; return 0;
} }
void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum chg_state_flags *pf) void conn_old_common_state(struct drbd_connection *connection, union drbd_state *pcs, enum chg_state_flags *pf)
{ {
enum chg_state_flags flags = ~0; enum chg_state_flags flags = ~0;
struct drbd_device *device; struct drbd_device *device;
...@@ -1588,13 +1588,13 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum ...@@ -1588,13 +1588,13 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
union drbd_dev_state os, cs = { union drbd_dev_state os, cs = {
{ .role = R_SECONDARY, { .role = R_SECONDARY,
.peer = R_UNKNOWN, .peer = R_UNKNOWN,
.conn = tconn->cstate, .conn = connection->cstate,
.disk = D_DISKLESS, .disk = D_DISKLESS,
.pdsk = D_UNKNOWN, .pdsk = D_UNKNOWN,
} }; } };
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
os = device->state; os = device->state;
if (first_vol) { if (first_vol) {
...@@ -1626,7 +1626,7 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum ...@@ -1626,7 +1626,7 @@ void conn_old_common_state(struct drbd_tconn *tconn, union drbd_state *pcs, enum
} }
static enum drbd_state_rv static enum drbd_state_rv
conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, conn_is_valid_transition(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags) enum chg_state_flags flags)
{ {
enum drbd_state_rv rv = SS_SUCCESS; enum drbd_state_rv rv = SS_SUCCESS;
...@@ -1635,7 +1635,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union ...@@ -1635,7 +1635,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
int vnr; int vnr;
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
os = drbd_read_state(device); os = drbd_read_state(device);
ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL); ns = sanitize_state(device, apply_mask_val(os, mask, val), NULL);
...@@ -1653,9 +1653,9 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union ...@@ -1653,9 +1653,9 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
rv = is_valid_state(device, ns); rv = is_valid_state(device, ns);
if (rv < SS_SUCCESS) { if (rv < SS_SUCCESS) {
if (is_valid_state(device, os) == rv) if (is_valid_state(device, os) == rv)
rv = is_valid_soft_transition(os, ns, tconn); rv = is_valid_soft_transition(os, ns, connection);
} else } else
rv = is_valid_soft_transition(os, ns, tconn); rv = is_valid_soft_transition(os, ns, connection);
} }
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
break; break;
...@@ -1669,7 +1669,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union ...@@ -1669,7 +1669,7 @@ conn_is_valid_transition(struct drbd_tconn *tconn, union drbd_state mask, union
} }
void void
conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, conn_set_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags) union drbd_state *pns_min, union drbd_state *pns_max, enum chg_state_flags flags)
{ {
union drbd_state ns, os, ns_max = { }; union drbd_state ns, os, ns_max = { };
...@@ -1688,14 +1688,14 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state ...@@ -1688,14 +1688,14 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
/* remember last connect time so request_timer_fn() won't /* remember last connect time so request_timer_fn() won't
* kill newly established sessions while we are still trying to thaw * kill newly established sessions while we are still trying to thaw
* previously frozen IO */ * previously frozen IO */
if (tconn->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS) if (connection->cstate != C_WF_REPORT_PARAMS && val.conn == C_WF_REPORT_PARAMS)
tconn->last_reconnect_jif = jiffies; connection->last_reconnect_jif = jiffies;
tconn->cstate = val.conn; connection->cstate = val.conn;
} }
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
number_of_volumes++; number_of_volumes++;
os = drbd_read_state(device); os = drbd_read_state(device);
ns = apply_mask_val(os, mask, val); ns = apply_mask_val(os, mask, val);
...@@ -1733,39 +1733,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state ...@@ -1733,39 +1733,39 @@ conn_set_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state
} }; } };
} }
ns_min.susp = ns_max.susp = tconn->susp; ns_min.susp = ns_max.susp = connection->susp;
ns_min.susp_nod = ns_max.susp_nod = tconn->susp_nod; ns_min.susp_nod = ns_max.susp_nod = connection->susp_nod;
ns_min.susp_fen = ns_max.susp_fen = tconn->susp_fen; ns_min.susp_fen = ns_max.susp_fen = connection->susp_fen;
*pns_min = ns_min; *pns_min = ns_min;
*pns_max = ns_max; *pns_max = ns_max;
} }
static enum drbd_state_rv static enum drbd_state_rv
_conn_rq_cond(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val) _conn_rq_cond(struct drbd_connection *connection, union drbd_state mask, union drbd_state val)
{ {
enum drbd_state_rv rv; enum drbd_state_rv rv;
if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &tconn->flags)) if (test_and_clear_bit(CONN_WD_ST_CHG_OKAY, &connection->flags))
return SS_CW_SUCCESS; return SS_CW_SUCCESS;
if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &tconn->flags)) if (test_and_clear_bit(CONN_WD_ST_CHG_FAIL, &connection->flags))
return SS_CW_FAILED_BY_PEER; return SS_CW_FAILED_BY_PEER;
rv = conn_is_valid_transition(tconn, mask, val, 0); rv = conn_is_valid_transition(connection, mask, val, 0);
if (rv == SS_SUCCESS && tconn->cstate == C_WF_REPORT_PARAMS) if (rv == SS_SUCCESS && connection->cstate == C_WF_REPORT_PARAMS)
rv = SS_UNKNOWN_ERROR; /* continue waiting */ rv = SS_UNKNOWN_ERROR; /* continue waiting */
return rv; return rv;
} }
enum drbd_state_rv enum drbd_state_rv
_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags) enum chg_state_flags flags)
{ {
enum drbd_state_rv rv = SS_SUCCESS; enum drbd_state_rv rv = SS_SUCCESS;
struct after_conn_state_chg_work *acscw; struct after_conn_state_chg_work *acscw;
enum drbd_conns oc = tconn->cstate; enum drbd_conns oc = connection->cstate;
union drbd_state ns_max, ns_min, os; union drbd_state ns_max, ns_min, os;
bool have_mutex = false; bool have_mutex = false;
...@@ -1775,7 +1775,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_ ...@@ -1775,7 +1775,7 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
goto abort; goto abort;
} }
rv = conn_is_valid_transition(tconn, mask, val, flags); rv = conn_is_valid_transition(connection, mask, val, flags);
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
goto abort; goto abort;
...@@ -1785,38 +1785,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_ ...@@ -1785,38 +1785,38 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
/* This will be a cluster-wide state change. /* This will be a cluster-wide state change.
* Need to give up the spinlock, grab the mutex, * Need to give up the spinlock, grab the mutex,
* then send the state change request, ... */ * then send the state change request, ... */
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
mutex_lock(&tconn->cstate_mutex); mutex_lock(&connection->cstate_mutex);
have_mutex = true; have_mutex = true;
set_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); set_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (conn_send_state_req(tconn, mask, val)) { if (conn_send_state_req(connection, mask, val)) {
/* sending failed. */ /* sending failed. */
clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
rv = SS_CW_FAILED_BY_PEER; rv = SS_CW_FAILED_BY_PEER;
/* need to re-aquire the spin lock, though */ /* need to re-aquire the spin lock, though */
goto abort_unlocked; goto abort_unlocked;
} }
if (val.conn == C_DISCONNECTING) if (val.conn == C_DISCONNECTING)
set_bit(DISCONNECT_SENT, &tconn->flags); set_bit(DISCONNECT_SENT, &connection->flags);
/* ... and re-aquire the spinlock. /* ... and re-aquire the spinlock.
* If _conn_rq_cond() returned >= SS_SUCCESS, we must call * If _conn_rq_cond() returned >= SS_SUCCESS, we must call
* conn_set_state() within the same spinlock. */ * conn_set_state() within the same spinlock. */
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
wait_event_lock_irq(tconn->ping_wait, wait_event_lock_irq(connection->ping_wait,
(rv = _conn_rq_cond(tconn, mask, val)), (rv = _conn_rq_cond(connection, mask, val)),
tconn->req_lock); connection->req_lock);
clear_bit(CONN_WD_ST_CHG_REQ, &tconn->flags); clear_bit(CONN_WD_ST_CHG_REQ, &connection->flags);
if (rv < SS_SUCCESS) if (rv < SS_SUCCESS)
goto abort; goto abort;
} }
conn_old_common_state(tconn, &os, &flags); conn_old_common_state(connection, &os, &flags);
flags |= CS_DC_SUSP; flags |= CS_DC_SUSP;
conn_set_state(tconn, mask, val, &ns_min, &ns_max, flags); conn_set_state(connection, mask, val, &ns_min, &ns_max, flags);
conn_pr_state_change(tconn, os, ns_max, flags); conn_pr_state_change(connection, os, ns_max, flags);
acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC); acscw = kmalloc(sizeof(*acscw), GFP_ATOMIC);
if (acscw) { if (acscw) {
...@@ -1825,39 +1825,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_ ...@@ -1825,39 +1825,39 @@ _conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_
acscw->ns_max = ns_max; acscw->ns_max = ns_max;
acscw->flags = flags; acscw->flags = flags;
acscw->w.cb = w_after_conn_state_ch; acscw->w.cb = w_after_conn_state_ch;
kref_get(&tconn->kref); kref_get(&connection->kref);
acscw->w.tconn = tconn; acscw->w.connection = connection;
drbd_queue_work(&tconn->sender_work, &acscw->w); drbd_queue_work(&connection->sender_work, &acscw->w);
} else { } else {
conn_err(tconn, "Could not kmalloc an acscw\n"); conn_err(connection, "Could not kmalloc an acscw\n");
} }
abort: abort:
if (have_mutex) { if (have_mutex) {
/* mutex_unlock() "... must not be used in interrupt context.", /* mutex_unlock() "... must not be used in interrupt context.",
* so give up the spinlock, then re-aquire it */ * so give up the spinlock, then re-aquire it */
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
abort_unlocked: abort_unlocked:
mutex_unlock(&tconn->cstate_mutex); mutex_unlock(&connection->cstate_mutex);
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
} }
if (rv < SS_SUCCESS && flags & CS_VERBOSE) { if (rv < SS_SUCCESS && flags & CS_VERBOSE) {
conn_err(tconn, "State change failed: %s\n", drbd_set_st_err_str(rv)); conn_err(connection, "State change failed: %s\n", drbd_set_st_err_str(rv));
conn_err(tconn, " mask = 0x%x val = 0x%x\n", mask.i, val.i); conn_err(connection, " mask = 0x%x val = 0x%x\n", mask.i, val.i);
conn_err(tconn, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn)); conn_err(connection, " old_conn:%s wanted_conn:%s\n", drbd_conn_str(oc), drbd_conn_str(val.conn));
} }
return rv; return rv;
} }
enum drbd_state_rv enum drbd_state_rv
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags) enum chg_state_flags flags)
{ {
enum drbd_state_rv rv; enum drbd_state_rv rv;
spin_lock_irq(&tconn->req_lock); spin_lock_irq(&connection->req_lock);
rv = _conn_request_state(tconn, mask, val, flags); rv = _conn_request_state(connection, mask, val, flags);
spin_unlock_irq(&tconn->req_lock); spin_unlock_irq(&connection->req_lock);
return rv; return rv;
} }
...@@ -2,7 +2,7 @@ ...@@ -2,7 +2,7 @@
#define DRBD_STATE_H #define DRBD_STATE_H
struct drbd_device; struct drbd_device;
struct drbd_tconn; struct drbd_connection;
/** /**
* DOC: DRBD State macros * DOC: DRBD State macros
...@@ -124,15 +124,15 @@ extern void print_st_err(struct drbd_device *, union drbd_state, ...@@ -124,15 +124,15 @@ extern void print_st_err(struct drbd_device *, union drbd_state,
union drbd_state, int); union drbd_state, int);
enum drbd_state_rv enum drbd_state_rv
_conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, _conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags); enum chg_state_flags flags);
enum drbd_state_rv enum drbd_state_rv
conn_request_state(struct drbd_tconn *tconn, union drbd_state mask, union drbd_state val, conn_request_state(struct drbd_connection *connection, union drbd_state mask, union drbd_state val,
enum chg_state_flags flags); enum chg_state_flags flags);
extern void drbd_resume_al(struct drbd_device *device); extern void drbd_resume_al(struct drbd_device *device);
extern bool conn_all_vols_unconf(struct drbd_tconn *tconn); extern bool conn_all_vols_unconf(struct drbd_connection *connection);
/** /**
* drbd_request_state() - Reqest a state change * drbd_request_state() - Reqest a state change
...@@ -151,11 +151,11 @@ static inline int drbd_request_state(struct drbd_device *device, ...@@ -151,11 +151,11 @@ static inline int drbd_request_state(struct drbd_device *device,
return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED); return _drbd_request_state(device, mask, val, CS_VERBOSE + CS_ORDERED);
} }
enum drbd_role conn_highest_role(struct drbd_tconn *tconn); enum drbd_role conn_highest_role(struct drbd_connection *connection);
enum drbd_role conn_highest_peer(struct drbd_tconn *tconn); enum drbd_role conn_highest_peer(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_disk(struct drbd_tconn *tconn); enum drbd_disk_state conn_highest_disk(struct drbd_connection *connection);
enum drbd_disk_state conn_lowest_disk(struct drbd_tconn *tconn); enum drbd_disk_state conn_lowest_disk(struct drbd_connection *connection);
enum drbd_disk_state conn_highest_pdsk(struct drbd_tconn *tconn); enum drbd_disk_state conn_highest_pdsk(struct drbd_connection *connection);
enum drbd_conns conn_lowest_conn(struct drbd_tconn *tconn); enum drbd_conns conn_lowest_conn(struct drbd_connection *connection);
#endif #endif
...@@ -102,16 +102,16 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele ...@@ -102,16 +102,16 @@ static void drbd_endio_read_sec_final(struct drbd_peer_request *peer_req) __rele
unsigned long flags = 0; unsigned long flags = 0;
struct drbd_device *device = peer_req->w.device; struct drbd_device *device = peer_req->w.device;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
device->read_cnt += peer_req->i.size >> 9; device->read_cnt += peer_req->i.size >> 9;
list_del(&peer_req->w.list); list_del(&peer_req->w.list);
if (list_empty(&device->read_ee)) if (list_empty(&device->read_ee))
wake_up(&device->ee_wait); wake_up(&device->ee_wait);
if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
__drbd_chk_io_error(device, DRBD_READ_ERROR); __drbd_chk_io_error(device, DRBD_READ_ERROR);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
drbd_queue_work(&device->tconn->sender_work, &peer_req->w); drbd_queue_work(&device->connection->sender_work, &peer_req->w);
put_ldev(device); put_ldev(device);
} }
...@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -134,7 +134,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO; do_al_complete_io = peer_req->flags & EE_CALL_AL_COMPLETE_IO;
block_id = peer_req->block_id; block_id = peer_req->block_id;
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
device->writ_cnt += peer_req->i.size >> 9; device->writ_cnt += peer_req->i.size >> 9;
list_move_tail(&peer_req->w.list, &device->done_ee); list_move_tail(&peer_req->w.list, &device->done_ee);
...@@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -150,7 +150,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
if (test_bit(__EE_WAS_ERROR, &peer_req->flags)) if (test_bit(__EE_WAS_ERROR, &peer_req->flags))
__drbd_chk_io_error(device, DRBD_WRITE_ERROR); __drbd_chk_io_error(device, DRBD_WRITE_ERROR);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
if (block_id == ID_SYNCER) if (block_id == ID_SYNCER)
drbd_rs_complete_io(device, i.sector); drbd_rs_complete_io(device, i.sector);
...@@ -161,7 +161,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel ...@@ -161,7 +161,7 @@ static void drbd_endio_write_sec_final(struct drbd_peer_request *peer_req) __rel
if (do_al_complete_io) if (do_al_complete_io)
drbd_al_complete_io(device, &i); drbd_al_complete_io(device, &i);
wake_asender(device->tconn); wake_asender(device->connection);
put_ldev(device); put_ldev(device);
} }
...@@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error) ...@@ -273,9 +273,9 @@ void drbd_request_endio(struct bio *bio, int error)
req->private_bio = ERR_PTR(error); req->private_bio = ERR_PTR(error);
/* not req_mod(), we need irqsave here! */ /* not req_mod(), we need irqsave here! */
spin_lock_irqsave(&device->tconn->req_lock, flags); spin_lock_irqsave(&device->connection->req_lock, flags);
__req_mod(req, what, &m); __req_mod(req, what, &m);
spin_unlock_irqrestore(&device->tconn->req_lock, flags); spin_unlock_irqrestore(&device->connection->req_lock, flags);
put_ldev(device); put_ldev(device);
if (m.bio) if (m.bio)
...@@ -345,12 +345,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel) ...@@ -345,12 +345,12 @@ static int w_e_send_csum(struct drbd_work *w, int cancel)
if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0)) if (unlikely((peer_req->flags & EE_WAS_ERROR) != 0))
goto out; goto out;
digest_size = crypto_hash_digestsize(device->tconn->csums_tfm); digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
if (digest) { if (digest) {
sector_t sector = peer_req->i.sector; sector_t sector = peer_req->i.sector;
unsigned int size = peer_req->i.size; unsigned int size = peer_req->i.size;
drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest); drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
/* Free peer_req and pages before send. /* Free peer_req and pages before send.
* In case we block on congestion, we could otherwise run into * In case we block on congestion, we could otherwise run into
* some distributed deadlock, if the other side blocks on * some distributed deadlock, if the other side blocks on
...@@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size) ...@@ -397,9 +397,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
goto defer; goto defer;
peer_req->w.cb = w_e_send_csum; peer_req->w.cb = w_e_send_csum;
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
list_add(&peer_req->w.list, &device->read_ee); list_add(&peer_req->w.list, &device->read_ee);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
atomic_add(size >> 9, &device->rs_sect_ev); atomic_add(size >> 9, &device->rs_sect_ev);
if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0) if (drbd_submit_peer_request(device, peer_req, READ, DRBD_FAULT_RS_RD) == 0)
...@@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size) ...@@ -409,9 +409,9 @@ static int read_for_csum(struct drbd_device *device, sector_t sector, int size)
* because bio_add_page failed (probably broken lower level driver), * because bio_add_page failed (probably broken lower level driver),
* retry may or may not help. * retry may or may not help.
* If it does not, you may need to force disconnect. */ * If it does not, you may need to force disconnect. */
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
list_del(&peer_req->w.list); list_del(&peer_req->w.list);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
drbd_free_peer_req(device, peer_req); drbd_free_peer_req(device, peer_req);
defer: defer:
...@@ -439,7 +439,7 @@ void resync_timer_fn(unsigned long data) ...@@ -439,7 +439,7 @@ void resync_timer_fn(unsigned long data)
struct drbd_device *device = (struct drbd_device *) data; struct drbd_device *device = (struct drbd_device *) data;
if (list_empty(&device->resync_work.list)) if (list_empty(&device->resync_work.list))
drbd_queue_work(&device->tconn->sender_work, &device->resync_work); drbd_queue_work(&device->connection->sender_work, &device->resync_work);
} }
static void fifo_set(struct fifo_buffer *fb, int value) static void fifo_set(struct fifo_buffer *fb, int value)
...@@ -597,15 +597,15 @@ int w_make_resync_request(struct drbd_work *w, int cancel) ...@@ -597,15 +597,15 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
for (i = 0; i < number; i++) { for (i = 0; i < number; i++) {
/* Stop generating RS requests, when half of the send buffer is filled */ /* Stop generating RS requests, when half of the send buffer is filled */
mutex_lock(&device->tconn->data.mutex); mutex_lock(&device->connection->data.mutex);
if (device->tconn->data.socket) { if (device->connection->data.socket) {
queued = device->tconn->data.socket->sk->sk_wmem_queued; queued = device->connection->data.socket->sk->sk_wmem_queued;
sndbuf = device->tconn->data.socket->sk->sk_sndbuf; sndbuf = device->connection->data.socket->sk->sk_sndbuf;
} else { } else {
queued = 1; queued = 1;
sndbuf = 0; sndbuf = 0;
} }
mutex_unlock(&device->tconn->data.mutex); mutex_unlock(&device->connection->data.mutex);
if (queued > sndbuf / 2) if (queued > sndbuf / 2)
goto requeue; goto requeue;
...@@ -675,7 +675,7 @@ int w_make_resync_request(struct drbd_work *w, int cancel) ...@@ -675,7 +675,7 @@ int w_make_resync_request(struct drbd_work *w, int cancel)
/* adjust very last sectors, in case we are oddly sized */ /* adjust very last sectors, in case we are oddly sized */
if (sector + (size>>9) > capacity) if (sector + (size>>9) > capacity)
size = (capacity-sector)<<9; size = (capacity-sector)<<9;
if (device->tconn->agreed_pro_version >= 89 && device->tconn->csums_tfm) { if (device->connection->agreed_pro_version >= 89 && device->connection->csums_tfm) {
switch (read_for_csum(device, sector, size)) { switch (read_for_csum(device, sector, size)) {
case -EIO: /* Disk failure */ case -EIO: /* Disk failure */
put_ldev(device); put_ldev(device);
...@@ -800,12 +800,12 @@ static int w_resync_finished(struct drbd_work *w, int cancel) ...@@ -800,12 +800,12 @@ static int w_resync_finished(struct drbd_work *w, int cancel)
static void ping_peer(struct drbd_device *device) static void ping_peer(struct drbd_device *device)
{ {
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
clear_bit(GOT_PING_ACK, &tconn->flags); clear_bit(GOT_PING_ACK, &connection->flags);
request_ping(tconn); request_ping(connection);
wait_event(tconn->ping_wait, wait_event(connection->ping_wait,
test_bit(GOT_PING_ACK, &tconn->flags) || device->state.conn < C_CONNECTED); test_bit(GOT_PING_ACK, &connection->flags) || device->state.conn < C_CONNECTED);
} }
int drbd_resync_finished(struct drbd_device *device) int drbd_resync_finished(struct drbd_device *device)
...@@ -831,7 +831,7 @@ int drbd_resync_finished(struct drbd_device *device) ...@@ -831,7 +831,7 @@ int drbd_resync_finished(struct drbd_device *device)
if (w) { if (w) {
w->cb = w_resync_finished; w->cb = w_resync_finished;
w->device = device; w->device = device;
drbd_queue_work(&device->tconn->sender_work, w); drbd_queue_work(&device->connection->sender_work, w);
return 1; return 1;
} }
dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n"); dev_err(DEV, "Warn failed to drbd_rs_del_all() and to kmalloc(w).\n");
...@@ -854,7 +854,7 @@ int drbd_resync_finished(struct drbd_device *device) ...@@ -854,7 +854,7 @@ int drbd_resync_finished(struct drbd_device *device)
ping_peer(device); ping_peer(device);
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
os = drbd_read_state(device); os = drbd_read_state(device);
verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T); verify_done = (os.conn == C_VERIFY_S || os.conn == C_VERIFY_T);
...@@ -885,7 +885,7 @@ int drbd_resync_finished(struct drbd_device *device) ...@@ -885,7 +885,7 @@ int drbd_resync_finished(struct drbd_device *device)
if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T) if (os.conn == C_SYNC_TARGET || os.conn == C_PAUSED_SYNC_T)
khelper_cmd = "after-resync-target"; khelper_cmd = "after-resync-target";
if (device->tconn->csums_tfm && device->rs_total) { if (device->connection->csums_tfm && device->rs_total) {
const unsigned long s = device->rs_same_csum; const unsigned long s = device->rs_same_csum;
const unsigned long t = device->rs_total; const unsigned long t = device->rs_total;
const int ratio = const int ratio =
...@@ -943,7 +943,7 @@ int drbd_resync_finished(struct drbd_device *device) ...@@ -943,7 +943,7 @@ int drbd_resync_finished(struct drbd_device *device)
_drbd_set_state(device, ns, CS_VERBOSE, NULL); _drbd_set_state(device, ns, CS_VERBOSE, NULL);
out_unlock: out_unlock:
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
put_ldev(device); put_ldev(device);
out: out:
device->rs_total = 0; device->rs_total = 0;
...@@ -970,9 +970,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_ ...@@ -970,9 +970,9 @@ static void move_to_net_ee_or_free(struct drbd_device *device, struct drbd_peer_
int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT; int i = (peer_req->i.size + PAGE_SIZE -1) >> PAGE_SHIFT;
atomic_add(i, &device->pp_in_use_by_net); atomic_add(i, &device->pp_in_use_by_net);
atomic_sub(i, &device->pp_in_use); atomic_sub(i, &device->pp_in_use);
spin_lock_irq(&device->tconn->req_lock); spin_lock_irq(&device->connection->req_lock);
list_add_tail(&peer_req->w.list, &device->net_ee); list_add_tail(&peer_req->w.list, &device->net_ee);
spin_unlock_irq(&device->tconn->req_lock); spin_unlock_irq(&device->connection->req_lock);
wake_up(&drbd_pp_wait); wake_up(&drbd_pp_wait);
} else } else
drbd_free_peer_req(device, peer_req); drbd_free_peer_req(device, peer_req);
...@@ -1096,13 +1096,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel) ...@@ -1096,13 +1096,13 @@ int w_e_end_csum_rs_req(struct drbd_work *w, int cancel)
/* quick hack to try to avoid a race against reconfiguration. /* quick hack to try to avoid a race against reconfiguration.
* a real fix would be much more involved, * a real fix would be much more involved,
* introducing more locking mechanisms */ * introducing more locking mechanisms */
if (device->tconn->csums_tfm) { if (device->connection->csums_tfm) {
digest_size = crypto_hash_digestsize(device->tconn->csums_tfm); digest_size = crypto_hash_digestsize(device->connection->csums_tfm);
D_ASSERT(digest_size == di->digest_size); D_ASSERT(digest_size == di->digest_size);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
} }
if (digest) { if (digest) {
drbd_csum_ee(device, device->tconn->csums_tfm, peer_req, digest); drbd_csum_ee(device, device->connection->csums_tfm, peer_req, digest);
eq = !memcmp(digest, di->digest, digest_size); eq = !memcmp(digest, di->digest, digest_size);
kfree(digest); kfree(digest);
} }
...@@ -1146,7 +1146,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel) ...@@ -1146,7 +1146,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
if (unlikely(cancel)) if (unlikely(cancel))
goto out; goto out;
digest_size = crypto_hash_digestsize(device->tconn->verify_tfm); digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
if (!digest) { if (!digest) {
err = 1; /* terminate the connection in case the allocation failed */ err = 1; /* terminate the connection in case the allocation failed */
...@@ -1154,7 +1154,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel) ...@@ -1154,7 +1154,7 @@ int w_e_end_ov_req(struct drbd_work *w, int cancel)
} }
if (likely(!(peer_req->flags & EE_WAS_ERROR))) if (likely(!(peer_req->flags & EE_WAS_ERROR)))
drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest); drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
else else
memset(digest, 0, digest_size); memset(digest, 0, digest_size);
...@@ -1217,10 +1217,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel) ...@@ -1217,10 +1217,10 @@ int w_e_end_ov_reply(struct drbd_work *w, int cancel)
di = peer_req->digest; di = peer_req->digest;
if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) { if (likely((peer_req->flags & EE_WAS_ERROR) == 0)) {
digest_size = crypto_hash_digestsize(device->tconn->verify_tfm); digest_size = crypto_hash_digestsize(device->connection->verify_tfm);
digest = kmalloc(digest_size, GFP_NOIO); digest = kmalloc(digest_size, GFP_NOIO);
if (digest) { if (digest) {
drbd_csum_ee(device, device->tconn->verify_tfm, peer_req, digest); drbd_csum_ee(device, device->connection->verify_tfm, peer_req, digest);
D_ASSERT(digest_size == di->digest_size); D_ASSERT(digest_size == di->digest_size);
eq = !memcmp(digest, di->digest, digest_size); eq = !memcmp(digest, di->digest, digest_size);
...@@ -1274,20 +1274,20 @@ int w_prev_work_done(struct drbd_work *w, int cancel) ...@@ -1274,20 +1274,20 @@ int w_prev_work_done(struct drbd_work *w, int cancel)
* and to be able to wait for them. * and to be able to wait for them.
* See also comment in drbd_adm_attach before drbd_suspend_io. * See also comment in drbd_adm_attach before drbd_suspend_io.
*/ */
static int drbd_send_barrier(struct drbd_tconn *tconn) static int drbd_send_barrier(struct drbd_connection *connection)
{ {
struct p_barrier *p; struct p_barrier *p;
struct drbd_socket *sock; struct drbd_socket *sock;
sock = &tconn->data; sock = &connection->data;
p = conn_prepare_command(tconn, sock); p = conn_prepare_command(connection, sock);
if (!p) if (!p)
return -EIO; return -EIO;
p->barrier = tconn->send.current_epoch_nr; p->barrier = connection->send.current_epoch_nr;
p->pad = 0; p->pad = 0;
tconn->send.current_epoch_writes = 0; connection->send.current_epoch_writes = 0;
return conn_send_command(tconn, sock, P_BARRIER, sizeof(*p), NULL, 0); return conn_send_command(connection, sock, P_BARRIER, sizeof(*p), NULL, 0);
} }
int w_send_write_hint(struct drbd_work *w, int cancel) int w_send_write_hint(struct drbd_work *w, int cancel)
...@@ -1297,30 +1297,30 @@ int w_send_write_hint(struct drbd_work *w, int cancel) ...@@ -1297,30 +1297,30 @@ int w_send_write_hint(struct drbd_work *w, int cancel)
if (cancel) if (cancel)
return 0; return 0;
sock = &device->tconn->data; sock = &device->connection->data;
if (!drbd_prepare_command(device, sock)) if (!drbd_prepare_command(device, sock))
return -EIO; return -EIO;
return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0); return drbd_send_command(device, sock, P_UNPLUG_REMOTE, 0, NULL, 0);
} }
static void re_init_if_first_write(struct drbd_tconn *tconn, unsigned int epoch) static void re_init_if_first_write(struct drbd_connection *connection, unsigned int epoch)
{ {
if (!tconn->send.seen_any_write_yet) { if (!connection->send.seen_any_write_yet) {
tconn->send.seen_any_write_yet = true; connection->send.seen_any_write_yet = true;
tconn->send.current_epoch_nr = epoch; connection->send.current_epoch_nr = epoch;
tconn->send.current_epoch_writes = 0; connection->send.current_epoch_writes = 0;
} }
} }
static void maybe_send_barrier(struct drbd_tconn *tconn, unsigned int epoch) static void maybe_send_barrier(struct drbd_connection *connection, unsigned int epoch)
{ {
/* re-init if first write on this connection */ /* re-init if first write on this connection */
if (!tconn->send.seen_any_write_yet) if (!connection->send.seen_any_write_yet)
return; return;
if (tconn->send.current_epoch_nr != epoch) { if (connection->send.current_epoch_nr != epoch) {
if (tconn->send.current_epoch_writes) if (connection->send.current_epoch_writes)
drbd_send_barrier(tconn); drbd_send_barrier(connection);
tconn->send.current_epoch_nr = epoch; connection->send.current_epoch_nr = epoch;
} }
} }
...@@ -1328,7 +1328,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) ...@@ -1328,7 +1328,7 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = w->device; struct drbd_device *device = w->device;
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1336,11 +1336,11 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel) ...@@ -1336,11 +1336,11 @@ int w_send_out_of_sync(struct drbd_work *w, int cancel)
return 0; return 0;
} }
/* this time, no tconn->send.current_epoch_writes++; /* this time, no connection->send.current_epoch_writes++;
* If it was sent, it was the closing barrier for the last * If it was sent, it was the closing barrier for the last
* replicated epoch, before we went into AHEAD mode. * replicated epoch, before we went into AHEAD mode.
* No more barriers will be sent, until we leave AHEAD mode again. */ * No more barriers will be sent, until we leave AHEAD mode again. */
maybe_send_barrier(tconn, req->epoch); maybe_send_barrier(connection, req->epoch);
err = drbd_send_out_of_sync(device, req); err = drbd_send_out_of_sync(device, req);
req_mod(req, OOS_HANDED_TO_NETWORK); req_mod(req, OOS_HANDED_TO_NETWORK);
...@@ -1358,7 +1358,7 @@ int w_send_dblock(struct drbd_work *w, int cancel) ...@@ -1358,7 +1358,7 @@ int w_send_dblock(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = w->device; struct drbd_device *device = w->device;
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1366,9 +1366,9 @@ int w_send_dblock(struct drbd_work *w, int cancel) ...@@ -1366,9 +1366,9 @@ int w_send_dblock(struct drbd_work *w, int cancel)
return 0; return 0;
} }
re_init_if_first_write(tconn, req->epoch); re_init_if_first_write(connection, req->epoch);
maybe_send_barrier(tconn, req->epoch); maybe_send_barrier(connection, req->epoch);
tconn->send.current_epoch_writes++; connection->send.current_epoch_writes++;
err = drbd_send_dblock(device, req); err = drbd_send_dblock(device, req);
req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK); req_mod(req, err ? SEND_FAILED : HANDED_OVER_TO_NETWORK);
...@@ -1386,7 +1386,7 @@ int w_send_read_req(struct drbd_work *w, int cancel) ...@@ -1386,7 +1386,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
{ {
struct drbd_request *req = container_of(w, struct drbd_request, w); struct drbd_request *req = container_of(w, struct drbd_request, w);
struct drbd_device *device = w->device; struct drbd_device *device = w->device;
struct drbd_tconn *tconn = device->tconn; struct drbd_connection *connection = device->connection;
int err; int err;
if (unlikely(cancel)) { if (unlikely(cancel)) {
...@@ -1396,7 +1396,7 @@ int w_send_read_req(struct drbd_work *w, int cancel) ...@@ -1396,7 +1396,7 @@ int w_send_read_req(struct drbd_work *w, int cancel)
/* Even read requests may close a write epoch, /* Even read requests may close a write epoch,
* if there was any yet. */ * if there was any yet. */
maybe_send_barrier(tconn, req->epoch); maybe_send_barrier(connection, req->epoch);
err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size, err = drbd_send_drequest(device, P_DATA_REQUEST, req->i.sector, req->i.size,
(unsigned long)req); (unsigned long)req);
...@@ -1581,7 +1581,7 @@ void start_resync_timer_fn(unsigned long data) ...@@ -1581,7 +1581,7 @@ void start_resync_timer_fn(unsigned long data)
{ {
struct drbd_device *device = (struct drbd_device *) data; struct drbd_device *device = (struct drbd_device *) data;
drbd_queue_work(&device->tconn->sender_work, &device->start_resync_work); drbd_queue_work(&device->connection->sender_work, &device->start_resync_work);
} }
int w_start_resync(struct drbd_work *w, int cancel) int w_start_resync(struct drbd_work *w, int cancel)
...@@ -1628,7 +1628,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1628,7 +1628,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
if (r > 0) { if (r > 0) {
dev_info(DEV, "before-resync-target handler returned %d, " dev_info(DEV, "before-resync-target handler returned %d, "
"dropping connection.\n", r); "dropping connection.\n", r);
conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return; return;
} }
} else /* C_SYNC_SOURCE */ { } else /* C_SYNC_SOURCE */ {
...@@ -1641,14 +1641,14 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1641,14 +1641,14 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
} else { } else {
dev_info(DEV, "before-resync-source handler returned %d, " dev_info(DEV, "before-resync-source handler returned %d, "
"dropping connection.\n", r); "dropping connection.\n", r);
conn_request_state(device->tconn, NS(conn, C_DISCONNECTING), CS_HARD); conn_request_state(device->connection, NS(conn, C_DISCONNECTING), CS_HARD);
return; return;
} }
} }
} }
} }
if (current == device->tconn->worker.task) { if (current == device->connection->worker.task) {
/* The worker should not sleep waiting for state_mutex, /* The worker should not sleep waiting for state_mutex,
that can take long */ that can take long */
if (!mutex_trylock(device->state_mutex)) { if (!mutex_trylock(device->state_mutex)) {
...@@ -1727,10 +1727,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1727,10 +1727,10 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
* drbd_resync_finished from here in that case. * drbd_resync_finished from here in that case.
* We drbd_gen_and_send_sync_uuid here for protocol < 96, * We drbd_gen_and_send_sync_uuid here for protocol < 96,
* and from after_state_ch otherwise. */ * and from after_state_ch otherwise. */
if (side == C_SYNC_SOURCE && device->tconn->agreed_pro_version < 96) if (side == C_SYNC_SOURCE && device->connection->agreed_pro_version < 96)
drbd_gen_and_send_sync_uuid(device); drbd_gen_and_send_sync_uuid(device);
if (device->tconn->agreed_pro_version < 95 && device->rs_total == 0) { if (device->connection->agreed_pro_version < 95 && device->rs_total == 0) {
/* This still has a race (about when exactly the peers /* This still has a race (about when exactly the peers
* detect connection loss) that can lead to a full sync * detect connection loss) that can lead to a full sync
* on next handshake. In 8.3.9 we fixed this with explicit * on next handshake. In 8.3.9 we fixed this with explicit
...@@ -1746,7 +1746,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1746,7 +1746,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
int timeo; int timeo;
rcu_read_lock(); rcu_read_lock();
nc = rcu_dereference(device->tconn->net_conf); nc = rcu_dereference(device->connection->net_conf);
timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9; timeo = nc->ping_int * HZ + nc->ping_timeo * HZ / 9;
rcu_read_unlock(); rcu_read_unlock();
schedule_timeout_interruptible(timeo); schedule_timeout_interruptible(timeo);
...@@ -1772,7 +1772,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side) ...@@ -1772,7 +1772,7 @@ void drbd_start_resync(struct drbd_device *device, enum drbd_conns side)
* (because we have not yet seen new requests), we should send the * (because we have not yet seen new requests), we should send the
* corresponding barrier now. Must be checked within the same spinlock * corresponding barrier now. Must be checked within the same spinlock
* that is used to check for new requests. */ * that is used to check for new requests. */
static bool need_to_send_barrier(struct drbd_tconn *connection) static bool need_to_send_barrier(struct drbd_connection *connection)
{ {
if (!connection->send.seen_any_write_yet) if (!connection->send.seen_any_write_yet)
return false; return false;
...@@ -1813,7 +1813,7 @@ static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *w ...@@ -1813,7 +1813,7 @@ static bool dequeue_work_item(struct drbd_work_queue *queue, struct list_head *w
return !list_empty(work_list); return !list_empty(work_list);
} }
static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_list) static void wait_for_work(struct drbd_connection *connection, struct list_head *work_list)
{ {
DEFINE_WAIT(wait); DEFINE_WAIT(wait);
struct net_conf *nc; struct net_conf *nc;
...@@ -1884,7 +1884,7 @@ static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_ ...@@ -1884,7 +1884,7 @@ static void wait_for_work(struct drbd_tconn *connection, struct list_head *work_
int drbd_worker(struct drbd_thread *thi) int drbd_worker(struct drbd_thread *thi)
{ {
struct drbd_tconn *tconn = thi->tconn; struct drbd_connection *connection = thi->connection;
struct drbd_work *w = NULL; struct drbd_work *w = NULL;
struct drbd_device *device; struct drbd_device *device;
LIST_HEAD(work_list); LIST_HEAD(work_list);
...@@ -1896,12 +1896,12 @@ int drbd_worker(struct drbd_thread *thi) ...@@ -1896,12 +1896,12 @@ int drbd_worker(struct drbd_thread *thi)
/* as long as we use drbd_queue_work_front(), /* as long as we use drbd_queue_work_front(),
* we may only dequeue single work items here, not batches. */ * we may only dequeue single work items here, not batches. */
if (list_empty(&work_list)) if (list_empty(&work_list))
wait_for_work(tconn, &work_list); wait_for_work(connection, &work_list);
if (signal_pending(current)) { if (signal_pending(current)) {
flush_signals(current); flush_signals(current);
if (get_t_state(thi) == RUNNING) { if (get_t_state(thi) == RUNNING) {
conn_warn(tconn, "Worker got an unexpected signal\n"); conn_warn(connection, "Worker got an unexpected signal\n");
continue; continue;
} }
break; break;
...@@ -1913,10 +1913,10 @@ int drbd_worker(struct drbd_thread *thi) ...@@ -1913,10 +1913,10 @@ int drbd_worker(struct drbd_thread *thi)
while (!list_empty(&work_list)) { while (!list_empty(&work_list)) {
w = list_first_entry(&work_list, struct drbd_work, list); w = list_first_entry(&work_list, struct drbd_work, list);
list_del_init(&w->list); list_del_init(&w->list);
if (w->cb(w, tconn->cstate < C_WF_REPORT_PARAMS) == 0) if (w->cb(w, connection->cstate < C_WF_REPORT_PARAMS) == 0)
continue; continue;
if (tconn->cstate >= C_WF_REPORT_PARAMS) if (connection->cstate >= C_WF_REPORT_PARAMS)
conn_request_state(tconn, NS(conn, C_NETWORK_FAILURE), CS_HARD); conn_request_state(connection, NS(conn, C_NETWORK_FAILURE), CS_HARD);
} }
} }
...@@ -1926,11 +1926,11 @@ int drbd_worker(struct drbd_thread *thi) ...@@ -1926,11 +1926,11 @@ int drbd_worker(struct drbd_thread *thi)
list_del_init(&w->list); list_del_init(&w->list);
w->cb(w, 1); w->cb(w, 1);
} }
dequeue_work_batch(&tconn->sender_work, &work_list); dequeue_work_batch(&connection->sender_work, &work_list);
} while (!list_empty(&work_list)); } while (!list_empty(&work_list));
rcu_read_lock(); rcu_read_lock();
idr_for_each_entry(&tconn->volumes, device, vnr) { idr_for_each_entry(&connection->volumes, device, vnr) {
D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE); D_ASSERT(device->state.disk == D_DISKLESS && device->state.conn == C_STANDALONE);
kref_get(&device->kref); kref_get(&device->kref);
rcu_read_unlock(); rcu_read_unlock();
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment