Commit 60bcbc41 authored by David S. Miller's avatar David S. Miller

Merge branch 'net-smc-add-and-delete-link-processing'

Karsten Graul says:

====================
net/smc: add and delete link processing

These patches add the 'add link' and 'delete link' processing as
SMC server and client. This processing allows to establish and
remove links of a link group dynamically.

v2: Fix mess up with unused static functions. Merge patch 8 into patch 4.
    Postpone patch 13 to next series.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d1a22509 4dadd151
......@@ -427,7 +427,7 @@ static int smcr_clnt_conf_first_link(struct smc_sock *smc)
return rc;
}
smc_llc_flow_qentry_clr(&link->lgr->llc_flow_lcl);
/* tbd: call smc_llc_cli_add_link(link, qentry); */
smc_llc_cli_add_link(link, qentry);
return 0;
}
......@@ -1067,7 +1067,7 @@ static int smcr_serv_conf_first_link(struct smc_sock *smc)
smc_llc_link_active(link);
/* initial contact - try to establish second link */
/* tbd: call smc_llc_srv_add_link(link); */
smc_llc_srv_add_link(link);
return 0;
}
......
......@@ -193,12 +193,19 @@ static void smc_lgr_unregister_conn(struct smc_connection *conn)
void smc_lgr_cleanup_early(struct smc_connection *conn)
{
struct smc_link_group *lgr = conn->lgr;
struct list_head *lgr_list;
spinlock_t *lgr_lock;
if (!lgr)
return;
smc_conn_free(conn);
smc_lgr_forget(lgr);
lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
if (!list_empty(lgr_list))
list_del_init(lgr_list);
spin_unlock_bh(lgr_lock);
smc_lgr_schedule_free_work_fast(lgr);
}
......@@ -273,8 +280,8 @@ static u8 smcr_next_link_id(struct smc_link_group *lgr)
return link_id;
}
static int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini)
int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini)
{
u8 rndvec[3];
int rc;
......@@ -653,19 +660,6 @@ static void smc_lgr_free(struct smc_link_group *lgr)
kfree(lgr);
}
void smc_lgr_forget(struct smc_link_group *lgr)
{
struct list_head *lgr_list;
spinlock_t *lgr_lock;
lgr_list = smc_lgr_list_head(lgr, &lgr_lock);
spin_lock_bh(lgr_lock);
/* do not use this link group for new connections */
if (!list_empty(lgr_list))
list_del_init(lgr_list);
spin_unlock_bh(lgr_lock);
}
static void smcd_unregister_all_dmbs(struct smc_link_group *lgr)
{
int i;
......@@ -889,7 +883,7 @@ static void smcr_link_up(struct smc_link_group *lgr,
link = smc_llc_usable_link(lgr);
if (!link)
return;
/* tbd: call smc_llc_srv_add_link_local(link); */
smc_llc_srv_add_link_local(link);
} else {
/* invite server to start add link processing */
u8 gid[SMC_GID_SIZE];
......@@ -960,6 +954,7 @@ static void smcr_link_down(struct smc_link *lnk)
if (lgr->role == SMC_SERV) {
/* trigger local delete link processing */
smc_llc_srv_delete_link_local(to_lnk, del_link_id);
} else {
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* another llc task is ongoing */
......
......@@ -254,6 +254,7 @@ struct smc_link_group {
struct mutex llc_conf_mutex;
/* protects lgr reconfig. */
struct work_struct llc_add_link_work;
struct work_struct llc_del_link_work;
struct work_struct llc_event_work;
/* llc event worker */
wait_queue_head_t llc_waiter;
......@@ -343,7 +344,6 @@ struct smc_sock;
struct smc_clc_msg_accept_confirm;
struct smc_clc_msg_local;
void smc_lgr_forget(struct smc_link_group *lgr);
void smc_lgr_cleanup_early(struct smc_connection *conn);
void smc_lgr_terminate_sched(struct smc_link_group *lgr);
void smcr_port_add(struct smc_ib_device *smcibdev, u8 ibport);
......@@ -374,6 +374,8 @@ void smc_lgr_schedule_free_work_fast(struct smc_link_group *lgr);
int smc_core_init(void);
void smc_core_exit(void);
int smcr_link_init(struct smc_link_group *lgr, struct smc_link *lnk,
u8 link_idx, struct smc_init_info *ini);
void smcr_link_clear(struct smc_link *lnk);
int smcr_buf_map_lgr(struct smc_link *lnk);
int smcr_buf_reg_lgr(struct smc_link *lnk);
......
This diff is collapsed.
......@@ -28,6 +28,7 @@ enum smc_llc_reqresp {
enum smc_llc_msg_type {
SMC_LLC_CONFIRM_LINK = 0x01,
SMC_LLC_ADD_LINK = 0x02,
SMC_LLC_ADD_LINK_CONT = 0x03,
SMC_LLC_DELETE_LINK = 0x04,
SMC_LLC_CONFIRM_RKEY = 0x06,
SMC_LLC_TEST_LINK = 0x07,
......@@ -68,6 +69,7 @@ int smc_llc_send_add_link(struct smc_link *link, u8 mac[], u8 gid[],
int smc_llc_send_delete_link(struct smc_link *link, u8 link_del_id,
enum smc_llc_reqresp reqresp, bool orderly,
u32 reason);
void smc_llc_srv_delete_link_local(struct smc_link *link, u8 del_link_id);
void smc_llc_lgr_init(struct smc_link_group *lgr, struct smc_sock *smc);
void smc_llc_lgr_clear(struct smc_link_group *lgr);
int smc_llc_link_init(struct smc_link *link);
......@@ -87,6 +89,9 @@ struct smc_llc_qentry *smc_llc_wait(struct smc_link_group *lgr,
int time_out, u8 exp_msg);
struct smc_llc_qentry *smc_llc_flow_qentry_clr(struct smc_llc_flow *flow);
void smc_llc_flow_qentry_del(struct smc_llc_flow *flow);
int smc_llc_cli_add_link(struct smc_link *link, struct smc_llc_qentry *qentry);
int smc_llc_srv_add_link(struct smc_link *link);
void smc_llc_srv_add_link_local(struct smc_link *link);
int smc_llc_init(void) __init;
#endif /* SMC_LLC_H */
......@@ -61,7 +61,7 @@ static inline bool smc_wr_is_tx_pend(struct smc_link *link)
}
/* wait till all pending tx work requests on the given link are completed */
static inline int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link)
{
if (wait_event_timeout(link->wr_tx_wait, !smc_wr_is_tx_pend(link),
SMC_WR_TX_WAIT_PENDING_TIME))
......
......@@ -106,6 +106,7 @@ void smc_wr_tx_dismiss_slots(struct smc_link *lnk, u8 wr_rx_hdr_type,
smc_wr_tx_filter filter,
smc_wr_tx_dismisser dismisser,
unsigned long data);
int smc_wr_tx_wait_no_pending_sends(struct smc_link *link);
int smc_wr_rx_register_handler(struct smc_wr_rx_handler *handler);
int smc_wr_rx_post_init(struct smc_link *link);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment