Commit a4ee3539 authored by Sagi Grimberg's avatar Sagi Grimberg Committed by Roland Dreier

IB/iser: Re-introduce ib_conn

Structure that describes the RDMA relates connection objects.  Static
member of iser_conn.

This patch does not change any functionality
Signed-off-by: default avatarSagi Grimberg <sagig@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarRoland Dreier <roland@purestorage.com>
parent 5716af6e
...@@ -148,7 +148,7 @@ int iser_initialize_task_headers(struct iscsi_task *task, ...@@ -148,7 +148,7 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc) struct iser_tx_desc *tx_desc)
{ {
struct iser_conn *iser_conn = task->conn->dd_data; struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->device; struct iser_device *device = iser_conn->ib_conn.device;
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
u64 dma_addr; u64 dma_addr;
...@@ -291,7 +291,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task) ...@@ -291,7 +291,7 @@ static void iscsi_iser_cleanup_task(struct iscsi_task *task)
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = &iser_task->desc; struct iser_tx_desc *tx_desc = &iser_task->desc;
struct iser_conn *iser_conn = task->conn->dd_data; struct iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->device; struct iser_device *device = iser_conn->ib_conn.device;
ib_dma_unmap_single(device->ib_device, ib_dma_unmap_single(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
...@@ -448,6 +448,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -448,6 +448,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
struct iscsi_session *session; struct iscsi_session *session;
struct Scsi_Host *shost; struct Scsi_Host *shost;
struct iser_conn *iser_conn = NULL; struct iser_conn *iser_conn = NULL;
struct ib_conn *ib_conn;
shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0); shost = iscsi_host_alloc(&iscsi_iser_sht, 0, 0);
if (!shost) if (!shost)
...@@ -465,8 +466,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -465,8 +466,9 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
*/ */
if (ep) { if (ep) {
iser_conn = ep->dd_data; iser_conn = ep->dd_data;
if (iser_conn->pi_support) { ib_conn = &iser_conn->ib_conn;
u32 sig_caps = iser_conn->device->dev_attr.sig_prot_cap; if (ib_conn->pi_support) {
u32 sig_caps = ib_conn->device->dev_attr.sig_prot_cap;
scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps)); scsi_host_set_prot(shost, iser_dif_prot_caps(sig_caps));
if (iser_pi_guard) if (iser_pi_guard)
...@@ -477,7 +479,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep, ...@@ -477,7 +479,7 @@ iscsi_iser_session_create(struct iscsi_endpoint *ep,
} }
if (iscsi_host_add(shost, ep ? if (iscsi_host_add(shost, ep ?
iser_conn->device->ib_device->dma_device : NULL)) ib_conn->device->ib_device->dma_device : NULL))
goto free_host; goto free_host;
if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) { if (cmds_max > ISER_DEF_XMIT_CMDS_MAX) {
...@@ -583,11 +585,11 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep, ...@@ -583,11 +585,11 @@ static int iscsi_iser_get_ep_param(struct iscsi_endpoint *ep,
switch (param) { switch (param) {
case ISCSI_PARAM_CONN_PORT: case ISCSI_PARAM_CONN_PORT:
case ISCSI_PARAM_CONN_ADDRESS: case ISCSI_PARAM_CONN_ADDRESS:
if (!iser_conn || !iser_conn->cma_id) if (!iser_conn || !iser_conn->ib_conn.cma_id)
return -ENOTCONN; return -ENOTCONN;
return iscsi_conn_get_addr_param((struct sockaddr_storage *) return iscsi_conn_get_addr_param((struct sockaddr_storage *)
&iser_conn->cma_id->route.addr.dst_addr, &iser_conn->ib_conn.cma_id->route.addr.dst_addr,
param, buf); param, buf);
break; break;
default: default:
......
...@@ -265,6 +265,7 @@ struct iser_rx_desc { ...@@ -265,6 +265,7 @@ struct iser_rx_desc {
#define ISER_MAX_CQ 4 #define ISER_MAX_CQ 4
struct iser_conn; struct iser_conn;
struct ib_conn;
struct iscsi_iser_task; struct iscsi_iser_task;
struct iser_device { struct iser_device {
...@@ -281,9 +282,9 @@ struct iser_device { ...@@ -281,9 +282,9 @@ struct iser_device {
int cq_active_qps[ISER_MAX_CQ]; int cq_active_qps[ISER_MAX_CQ];
int cqs_used; int cqs_used;
struct iser_cq_desc *cq_desc; struct iser_cq_desc *cq_desc;
int (*iser_alloc_rdma_reg_res)(struct iser_conn *iser_conn, int (*iser_alloc_rdma_reg_res)(struct ib_conn *ib_conn,
unsigned cmds_max); unsigned cmds_max);
void (*iser_free_rdma_reg_res)(struct iser_conn *iser_conn); void (*iser_free_rdma_reg_res)(struct ib_conn *ib_conn);
int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task, int (*iser_reg_rdma_mem)(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir); enum iser_data_dir cmd_dir);
void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task, void (*iser_unreg_rdma_mem)(struct iscsi_iser_task *iser_task,
...@@ -317,20 +318,57 @@ struct fast_reg_descriptor { ...@@ -317,20 +318,57 @@ struct fast_reg_descriptor {
u8 reg_indicators; u8 reg_indicators;
}; };
/**
* struct ib_conn - Infiniband related objects
*
* @cma_id: rdma_cm connection maneger handle
* @qp: Connection Queue-pair
* @post_recv_buf_count: post receive counter
* @post_send_buf_count: post send counter
* @rx_wr: receive work request for batch posts
* @device: reference to iser device
* @pi_support: Indicate device T10-PI support
* @lock: protects fmr/fastreg pool
* @union.fmr:
* @pool: FMR pool for fast registrations
* @page_vec: page vector to hold mapped commands pages
* used for registration
* @union.fastreg:
* @pool: Fast registration descriptors pool for fast
* registrations
* @pool_size: Size of pool
*/
struct ib_conn {
struct rdma_cm_id *cma_id;
struct ib_qp *qp;
int post_recv_buf_count;
atomic_t post_send_buf_count;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
struct iser_device *device;
int cq_index;
bool pi_support;
spinlock_t lock;
union {
struct {
struct ib_fmr_pool *pool;
struct iser_page_vec *page_vec;
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
};
struct iser_conn { struct iser_conn {
struct ib_conn ib_conn;
struct iscsi_conn *iscsi_conn; struct iscsi_conn *iscsi_conn;
struct iscsi_endpoint *ep; struct iscsi_endpoint *ep;
enum iser_conn_state state; /* rdma connection state */ enum iser_conn_state state; /* rdma connection state */
atomic_t refcount; atomic_t refcount;
spinlock_t lock; /* used for state changes */
struct iser_device *device; /* device context */
struct rdma_cm_id *cma_id; /* CMA ID */
struct ib_qp *qp; /* QP */
unsigned qp_max_recv_dtos; /* num of rx buffers */ unsigned qp_max_recv_dtos; /* num of rx buffers */
unsigned qp_max_recv_dtos_mask; /* above minus 1 */ unsigned qp_max_recv_dtos_mask; /* above minus 1 */
unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */ unsigned min_posted_rx; /* qp_max_recv_dtos >> 2 */
int post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */
char name[ISER_OBJECT_NAME_SIZE]; char name[ISER_OBJECT_NAME_SIZE];
struct work_struct release_work; struct work_struct release_work;
struct completion stop_completion; struct completion stop_completion;
...@@ -344,21 +382,6 @@ struct iser_conn { ...@@ -344,21 +382,6 @@ struct iser_conn {
u64 login_req_dma, login_resp_dma; u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head; unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs; struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
bool pi_support;
/* Connection memory registration pool */
union {
struct {
struct ib_fmr_pool *pool; /* pool of IB FMRs */
struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/
} fmr;
struct {
struct list_head pool;
int pool_size;
} fastreg;
};
}; };
struct iscsi_iser_task { struct iscsi_iser_task {
...@@ -429,10 +452,10 @@ void iser_release_work(struct work_struct *work); ...@@ -429,10 +452,10 @@ void iser_release_work(struct work_struct *work);
void iser_rcv_completion(struct iser_rx_desc *desc, void iser_rcv_completion(struct iser_rx_desc *desc,
unsigned long dto_xfer_len, unsigned long dto_xfer_len,
struct iser_conn *iser_conn); struct ib_conn *ib_conn);
void iser_snd_completion(struct iser_tx_desc *desc, void iser_snd_completion(struct iser_tx_desc *desc,
struct iser_conn *iser_conn); struct ib_conn *ib_conn);
void iser_task_rdma_init(struct iscsi_iser_task *task); void iser_task_rdma_init(struct iscsi_iser_task *task);
...@@ -455,7 +478,7 @@ int iser_connect(struct iser_conn *iser_conn, ...@@ -455,7 +478,7 @@ int iser_connect(struct iser_conn *iser_conn,
struct sockaddr *dst_addr, struct sockaddr *dst_addr,
int non_blocking); int non_blocking);
int iser_reg_page_vec(struct iser_conn *iser_conn, int iser_reg_page_vec(struct ib_conn *ib_conn,
struct iser_page_vec *page_vec, struct iser_page_vec *page_vec,
struct iser_mem_reg *mem_reg); struct iser_mem_reg *mem_reg);
...@@ -466,7 +489,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task, ...@@ -466,7 +489,7 @@ void iser_unreg_mem_fastreg(struct iscsi_iser_task *iser_task,
int iser_post_recvl(struct iser_conn *iser_conn); int iser_post_recvl(struct iser_conn *iser_conn);
int iser_post_recvm(struct iser_conn *iser_conn, int count); int iser_post_recvm(struct iser_conn *iser_conn, int count);
int iser_post_send(struct iser_conn *iser_conn, struct iser_tx_desc *tx_desc); int iser_post_send(struct ib_conn *ib_conn, struct iser_tx_desc *tx_desc);
int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data, struct iser_data_buf *data,
...@@ -479,10 +502,10 @@ int iser_initialize_task_headers(struct iscsi_task *task, ...@@ -479,10 +502,10 @@ int iser_initialize_task_headers(struct iscsi_task *task,
struct iser_tx_desc *tx_desc); struct iser_tx_desc *tx_desc);
int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
struct iscsi_session *session); struct iscsi_session *session);
int iser_create_fmr_pool(struct iser_conn *iser_conn, unsigned cmds_max); int iser_create_fmr_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fmr_pool(struct iser_conn *iser_conn); void iser_free_fmr_pool(struct ib_conn *ib_conn);
int iser_create_fastreg_pool(struct iser_conn *iser_conn, unsigned cmds_max); int iser_create_fastreg_pool(struct ib_conn *ib_conn, unsigned cmds_max);
void iser_free_fastreg_pool(struct iser_conn *iser_conn); void iser_free_fastreg_pool(struct ib_conn *ib_conn);
u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task, u8 iser_check_task_pi_status(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir, sector_t *sector); enum iser_data_dir cmd_dir, sector_t *sector);
#endif #endif
...@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task) ...@@ -49,7 +49,7 @@ static int iser_prepare_read_cmd(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->device; struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
...@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -103,7 +103,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
unsigned int edtl) unsigned int edtl)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_device *device = iser_task->iser_conn->device; struct iser_device *device = iser_task->iser_conn->ib_conn.device;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
int err; int err;
struct iser_hdr *hdr = &iser_task->desc.iser_header; struct iser_hdr *hdr = &iser_task->desc.iser_header;
...@@ -163,7 +163,7 @@ iser_prepare_write_cmd(struct iscsi_task *task, ...@@ -163,7 +163,7 @@ iser_prepare_write_cmd(struct iscsi_task *task,
static void iser_create_send_desc(struct iser_conn *iser_conn, static void iser_create_send_desc(struct iser_conn *iser_conn,
struct iser_tx_desc *tx_desc) struct iser_tx_desc *tx_desc)
{ {
struct iser_device *device = iser_conn->device; struct iser_device *device = iser_conn->ib_conn.device;
ib_dma_sync_single_for_cpu(device->ib_device, ib_dma_sync_single_for_cpu(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE); tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
...@@ -181,16 +181,18 @@ static void iser_create_send_desc(struct iser_conn *iser_conn, ...@@ -181,16 +181,18 @@ static void iser_create_send_desc(struct iser_conn *iser_conn,
static void iser_free_login_buf(struct iser_conn *iser_conn) static void iser_free_login_buf(struct iser_conn *iser_conn)
{ {
struct iser_device *device = iser_conn->ib_conn.device;
if (!iser_conn->login_buf) if (!iser_conn->login_buf)
return; return;
if (iser_conn->login_req_dma) if (iser_conn->login_req_dma)
ib_dma_unmap_single(iser_conn->device->ib_device, ib_dma_unmap_single(device->ib_device,
iser_conn->login_req_dma, iser_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE); ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
if (iser_conn->login_resp_dma) if (iser_conn->login_resp_dma)
ib_dma_unmap_single(iser_conn->device->ib_device, ib_dma_unmap_single(device->ib_device,
iser_conn->login_resp_dma, iser_conn->login_resp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
...@@ -204,12 +206,10 @@ static void iser_free_login_buf(struct iser_conn *iser_conn) ...@@ -204,12 +206,10 @@ static void iser_free_login_buf(struct iser_conn *iser_conn)
static int iser_alloc_login_buf(struct iser_conn *iser_conn) static int iser_alloc_login_buf(struct iser_conn *iser_conn)
{ {
struct iser_device *device; struct iser_device *device = iser_conn->ib_conn.device;
int req_err, resp_err; int req_err, resp_err;
BUG_ON(iser_conn->device == NULL); BUG_ON(device == NULL);
device = iser_conn->device;
iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN + iser_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL); ISER_RX_LOGIN_SIZE, GFP_KERNEL);
...@@ -259,13 +259,14 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, ...@@ -259,13 +259,14 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
u64 dma_addr; u64 dma_addr;
struct iser_rx_desc *rx_desc; struct iser_rx_desc *rx_desc;
struct ib_sge *rx_sg; struct ib_sge *rx_sg;
struct iser_device *device = iser_conn->device; struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
iser_conn->qp_max_recv_dtos = session->cmds_max; iser_conn->qp_max_recv_dtos = session->cmds_max;
iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */ iser_conn->qp_max_recv_dtos_mask = session->cmds_max - 1; /* cmds_max is 2^N */
iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2; iser_conn->min_posted_rx = iser_conn->qp_max_recv_dtos >> 2;
if (device->iser_alloc_rdma_reg_res(iser_conn, session->scsi_cmds_max)) if (device->iser_alloc_rdma_reg_res(ib_conn, session->scsi_cmds_max))
goto create_rdma_reg_res_failed; goto create_rdma_reg_res_failed;
if (iser_alloc_login_buf(iser_conn)) if (iser_alloc_login_buf(iser_conn))
...@@ -305,7 +306,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn, ...@@ -305,7 +306,7 @@ int iser_alloc_rx_descriptors(struct iser_conn *iser_conn,
rx_desc_alloc_fail: rx_desc_alloc_fail:
iser_free_login_buf(iser_conn); iser_free_login_buf(iser_conn);
alloc_login_buf_fail: alloc_login_buf_fail:
device->iser_free_rdma_reg_res(iser_conn); device->iser_free_rdma_reg_res(ib_conn);
create_rdma_reg_res_failed: create_rdma_reg_res_failed:
iser_err("failed allocating rx descriptors / data buffers\n"); iser_err("failed allocating rx descriptors / data buffers\n");
return -ENOMEM; return -ENOMEM;
...@@ -315,13 +316,14 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) ...@@ -315,13 +316,14 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
{ {
int i; int i;
struct iser_rx_desc *rx_desc; struct iser_rx_desc *rx_desc;
struct iser_device *device = iser_conn->device; struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iser_device *device = ib_conn->device;
if (!iser_conn->rx_descs) if (!iser_conn->rx_descs)
goto free_login_buf; goto free_login_buf;
if (device->iser_free_rdma_reg_res) if (device->iser_free_rdma_reg_res)
device->iser_free_rdma_reg_res(iser_conn); device->iser_free_rdma_reg_res(ib_conn);
rx_desc = iser_conn->rx_descs; rx_desc = iser_conn->rx_descs;
for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++) for (i = 0; i < iser_conn->qp_max_recv_dtos; i++, rx_desc++)
...@@ -338,6 +340,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn) ...@@ -338,6 +340,7 @@ void iser_free_rx_descriptors(struct iser_conn *iser_conn)
static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
{ {
struct iser_conn *iser_conn = conn->dd_data; struct iser_conn *iser_conn = conn->dd_data;
struct ib_conn *ib_conn = &iser_conn->ib_conn;
struct iscsi_session *session = conn->session; struct iscsi_session *session = conn->session;
iser_dbg("req op %x flags %x\n", req->opcode, req->flags); iser_dbg("req op %x flags %x\n", req->opcode, req->flags);
...@@ -350,8 +353,8 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req) ...@@ -350,8 +353,8 @@ static int iser_post_rx_bufs(struct iscsi_conn *conn, struct iscsi_hdr *req)
* response) and no posted send buffers left - they must have been * response) and no posted send buffers left - they must have been
* consumed during previous login phases. * consumed during previous login phases.
*/ */
WARN_ON(iser_conn->post_recv_buf_count != 1); WARN_ON(ib_conn->post_recv_buf_count != 1);
WARN_ON(atomic_read(&iser_conn->post_send_buf_count) != 0); WARN_ON(atomic_read(&ib_conn->post_send_buf_count) != 0);
if (session->discovery_sess) { if (session->discovery_sess) {
iser_info("Discovery session, re-using login RX buffer\n"); iser_info("Discovery session, re-using login RX buffer\n");
...@@ -426,7 +429,7 @@ int iser_send_command(struct iscsi_conn *conn, ...@@ -426,7 +429,7 @@ int iser_send_command(struct iscsi_conn *conn,
iser_task->status = ISER_TASK_STATUS_STARTED; iser_task->status = ISER_TASK_STATUS_STARTED;
err = iser_post_send(iser_conn, tx_desc); err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err) if (!err)
return 0; return 0;
...@@ -491,7 +494,7 @@ int iser_send_data_out(struct iscsi_conn *conn, ...@@ -491,7 +494,7 @@ int iser_send_data_out(struct iscsi_conn *conn,
itt, buf_offset, data_seg_len); itt, buf_offset, data_seg_len);
err = iser_post_send(iser_conn, tx_desc); err = iser_post_send(&iser_conn->ib_conn, tx_desc);
if (!err) if (!err)
return 0; return 0;
...@@ -515,7 +518,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -515,7 +518,7 @@ int iser_send_control(struct iscsi_conn *conn,
mdesc->type = ISCSI_TX_CONTROL; mdesc->type = ISCSI_TX_CONTROL;
iser_create_send_desc(iser_conn, mdesc); iser_create_send_desc(iser_conn, mdesc);
device = iser_conn->device; device = iser_conn->ib_conn.device;
data_seg_len = ntoh24(task->hdr->dlength); data_seg_len = ntoh24(task->hdr->dlength);
...@@ -553,7 +556,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -553,7 +556,7 @@ int iser_send_control(struct iscsi_conn *conn,
goto send_control_error; goto send_control_error;
} }
err = iser_post_send(iser_conn, mdesc); err = iser_post_send(&iser_conn->ib_conn, mdesc);
if (!err) if (!err)
return 0; return 0;
...@@ -567,8 +570,10 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -567,8 +570,10 @@ int iser_send_control(struct iscsi_conn *conn,
*/ */
void iser_rcv_completion(struct iser_rx_desc *rx_desc, void iser_rcv_completion(struct iser_rx_desc *rx_desc,
unsigned long rx_xfer_len, unsigned long rx_xfer_len,
struct iser_conn *iser_conn) struct ib_conn *ib_conn)
{ {
struct iser_conn *iser_conn = container_of(ib_conn, struct iser_conn,
ib_conn);
struct iscsi_hdr *hdr; struct iscsi_hdr *hdr;
u64 rx_dma; u64 rx_dma;
int rx_buflen, outstanding, count, err; int rx_buflen, outstanding, count, err;
...@@ -582,7 +587,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, ...@@ -582,7 +587,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
rx_buflen = ISER_RX_PAYLOAD_SIZE; rx_buflen = ISER_RX_PAYLOAD_SIZE;
} }
ib_dma_sync_single_for_cpu(iser_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_cpu(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE); rx_buflen, DMA_FROM_DEVICE);
hdr = &rx_desc->iscsi_header; hdr = &rx_desc->iscsi_header;
...@@ -593,19 +598,19 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, ...@@ -593,19 +598,19 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data, iscsi_iser_recv(iser_conn->iscsi_conn, hdr, rx_desc->data,
rx_xfer_len - ISER_HEADERS_LEN); rx_xfer_len - ISER_HEADERS_LEN);
ib_dma_sync_single_for_device(iser_conn->device->ib_device, rx_dma, ib_dma_sync_single_for_device(ib_conn->device->ib_device, rx_dma,
rx_buflen, DMA_FROM_DEVICE); rx_buflen, DMA_FROM_DEVICE);
/* decrementing conn->post_recv_buf_count only --after-- freeing the * /* decrementing conn->post_recv_buf_count only --after-- freeing the *
* task eliminates the need to worry on tasks which are completed in * * task eliminates the need to worry on tasks which are completed in *
* parallel to the execution of iser_conn_term. So the code that waits * * parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */ * for the posted rx bufs refcount to become zero handles everything */
iser_conn->post_recv_buf_count--; ib_conn->post_recv_buf_count--;
if (rx_dma == iser_conn->login_resp_dma) if (rx_dma == iser_conn->login_resp_dma)
return; return;
outstanding = iser_conn->post_recv_buf_count; outstanding = ib_conn->post_recv_buf_count;
if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) { if (outstanding + iser_conn->min_posted_rx <= iser_conn->qp_max_recv_dtos) {
count = min(iser_conn->qp_max_recv_dtos - outstanding, count = min(iser_conn->qp_max_recv_dtos - outstanding,
iser_conn->min_posted_rx); iser_conn->min_posted_rx);
...@@ -616,10 +621,10 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, ...@@ -616,10 +621,10 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
} }
void iser_snd_completion(struct iser_tx_desc *tx_desc, void iser_snd_completion(struct iser_tx_desc *tx_desc,
struct iser_conn *iser_conn) struct ib_conn *ib_conn)
{ {
struct iscsi_task *task; struct iscsi_task *task;
struct iser_device *device = iser_conn->device; struct iser_device *device = ib_conn->device;
if (tx_desc->type == ISCSI_TX_DATAOUT) { if (tx_desc->type == ISCSI_TX_DATAOUT) {
ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr, ib_dma_unmap_single(device->ib_device, tx_desc->dma_addr,
...@@ -628,7 +633,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc, ...@@ -628,7 +633,7 @@ void iser_snd_completion(struct iser_tx_desc *tx_desc,
tx_desc = NULL; tx_desc = NULL;
} }
atomic_dec(&iser_conn->post_send_buf_count); atomic_dec(&ib_conn->post_send_buf_count);
if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) { if (tx_desc && tx_desc->type == ISCSI_TX_CONTROL) {
/* this arithmetic is legal by libiscsi dd_data allocation */ /* this arithmetic is legal by libiscsi dd_data allocation */
...@@ -661,7 +666,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task) ...@@ -661,7 +666,7 @@ void iser_task_rdma_init(struct iscsi_iser_task *iser_task)
void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task) void iser_task_rdma_finalize(struct iscsi_iser_task *iser_task)
{ {
struct iser_device *device = iser_task->iser_conn->device; struct iser_device *device = iser_task->iser_conn->ib_conn.device;
int is_rdma_data_aligned = 1; int is_rdma_data_aligned = 1;
int is_rdma_prot_aligned = 1; int is_rdma_prot_aligned = 1;
int prot_count = scsi_prot_sg_count(iser_task->sc); int prot_count = scsi_prot_sg_count(iser_task->sc);
......
...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -49,7 +49,7 @@ static int iser_start_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct iser_data_buf *data_copy, struct iser_data_buf *data_copy,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct ib_device *dev = iser_task->iser_conn->device->ib_device; struct ib_device *dev = iser_task->iser_conn->ib_conn.device->ib_device;
struct scatterlist *sgl = (struct scatterlist *)data->buf; struct scatterlist *sgl = (struct scatterlist *)data->buf;
struct scatterlist *sg; struct scatterlist *sg;
char *mem = NULL; char *mem = NULL;
...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task, ...@@ -116,7 +116,7 @@ void iser_finalize_rdma_unaligned_sg(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
unsigned long cmd_data_len; unsigned long cmd_data_len;
dev = iser_task->iser_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, &data_copy->sg_single, 1, ib_dma_unmap_sg(dev, &data_copy->sg_single, 1,
(cmd_dir == ISER_DIR_OUT) ? (cmd_dir == ISER_DIR_OUT) ?
...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task, ...@@ -322,7 +322,7 @@ int iser_dma_map_task_data(struct iscsi_iser_task *iser_task,
struct ib_device *dev; struct ib_device *dev;
iser_task->dir[iser_dir] = 1; iser_task->dir[iser_dir] = 1;
dev = iser_task->iser_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir); data->dma_nents = ib_dma_map_sg(dev, data->buf, data->size, dma_dir);
if (data->dma_nents == 0) { if (data->dma_nents == 0) {
...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task, ...@@ -337,7 +337,7 @@ void iser_dma_unmap_task_data(struct iscsi_iser_task *iser_task,
{ {
struct ib_device *dev; struct ib_device *dev;
dev = iser_task->iser_conn->device->ib_device; dev = iser_task->iser_conn->ib_conn.device->ib_device;
ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE); ib_dma_unmap_sg(dev, data->buf, data->size, DMA_FROM_DEVICE);
} }
...@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task, ...@@ -377,8 +377,8 @@ static int fall_to_bounce_buf(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *iser_conn = iser_task->iser_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = iser_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
...@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -418,8 +418,8 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
(unsigned long)regd_buf->reg.va, (unsigned long)regd_buf->reg.va,
(unsigned long)regd_buf->reg.len); (unsigned long)regd_buf->reg.len);
} else { /* use FMR for multiple dma entries */ } else { /* use FMR for multiple dma entries */
iser_page_vec_build(mem, iser_conn->fmr.page_vec, ibdev); iser_page_vec_build(mem, ib_conn->fmr.page_vec, ibdev);
err = iser_reg_page_vec(iser_conn, iser_conn->fmr.page_vec, err = iser_reg_page_vec(ib_conn, ib_conn->fmr.page_vec,
&regd_buf->reg); &regd_buf->reg);
if (err && err != -EAGAIN) { if (err && err != -EAGAIN) {
iser_data_buf_dump(mem, ibdev); iser_data_buf_dump(mem, ibdev);
...@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task, ...@@ -427,12 +427,12 @@ int iser_reg_rdma_mem_fmr(struct iscsi_iser_task *iser_task,
mem->dma_nents, mem->dma_nents,
ntoh24(iser_task->desc.iscsi_header.dlength)); ntoh24(iser_task->desc.iscsi_header.dlength));
iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n", iser_err("page_vec: data_size = 0x%x, length = %d, offset = 0x%x\n",
iser_conn->fmr.page_vec->data_size, ib_conn->fmr.page_vec->data_size,
iser_conn->fmr.page_vec->length, ib_conn->fmr.page_vec->length,
iser_conn->fmr.page_vec->offset); ib_conn->fmr.page_vec->offset);
for (i = 0; i < iser_conn->fmr.page_vec->length; i++) for (i = 0; i < ib_conn->fmr.page_vec->length; i++)
iser_err("page_vec[%d] = 0x%llx\n", i, iser_err("page_vec[%d] = 0x%llx\n", i,
(unsigned long long)iser_conn->fmr.page_vec->pages[i]); (unsigned long long)ib_conn->fmr.page_vec->pages[i]);
} }
if (err) if (err)
return err; return err;
...@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -533,7 +533,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
struct fast_reg_descriptor *desc, struct ib_sge *data_sge, struct fast_reg_descriptor *desc, struct ib_sge *data_sge,
struct ib_sge *prot_sge, struct ib_sge *sig_sge) struct ib_sge *prot_sge, struct ib_sge *sig_sge)
{ {
struct iser_conn *iser_conn = iser_task->iser_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_pi_context *pi_ctx = desc->pi_ctx; struct iser_pi_context *pi_ctx = desc->pi_ctx;
struct ib_send_wr sig_wr, inv_wr; struct ib_send_wr sig_wr, inv_wr;
struct ib_send_wr *bad_wr, *wr = NULL; struct ib_send_wr *bad_wr, *wr = NULL;
...@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task, ...@@ -579,7 +579,7 @@ iser_reg_sig_mr(struct iscsi_iser_task *iser_task,
else else
wr->next = &sig_wr; wr->next = &sig_wr;
ret = ib_post_send(iser_conn->qp, wr, &bad_wr); ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) { if (ret) {
iser_err("reg_sig_mr failed, ret:%d\n", ret); iser_err("reg_sig_mr failed, ret:%d\n", ret);
goto err; goto err;
...@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -609,8 +609,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
struct ib_sge *sge) struct ib_sge *sge)
{ {
struct fast_reg_descriptor *desc = regd_buf->reg.mem_h; struct fast_reg_descriptor *desc = regd_buf->reg.mem_h;
struct iser_conn *iser_conn = iser_task->iser_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = iser_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct ib_mr *mr; struct ib_mr *mr;
struct ib_fast_reg_page_list *frpl; struct ib_fast_reg_page_list *frpl;
...@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -677,7 +677,7 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
else else
wr->next = &fastreg_wr; wr->next = &fastreg_wr;
ret = ib_post_send(iser_conn->qp, wr, &bad_wr); ret = ib_post_send(ib_conn->qp, wr, &bad_wr);
if (ret) { if (ret) {
iser_err("fast registration failed, ret:%d\n", ret); iser_err("fast registration failed, ret:%d\n", ret);
return ret; return ret;
...@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task, ...@@ -700,8 +700,8 @@ static int iser_fast_reg_mr(struct iscsi_iser_task *iser_task,
int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
enum iser_data_dir cmd_dir) enum iser_data_dir cmd_dir)
{ {
struct iser_conn *iser_conn = iser_task->iser_conn; struct ib_conn *ib_conn = &iser_task->iser_conn->ib_conn;
struct iser_device *device = iser_conn->device; struct iser_device *device = ib_conn->device;
struct ib_device *ibdev = device->ib_device; struct ib_device *ibdev = device->ib_device;
struct iser_data_buf *mem = &iser_task->data[cmd_dir]; struct iser_data_buf *mem = &iser_task->data[cmd_dir];
struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir]; struct iser_regd_buf *regd_buf = &iser_task->rdma_regd[cmd_dir];
...@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, ...@@ -724,11 +724,11 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
if (mem->dma_nents != 1 || if (mem->dma_nents != 1 ||
scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) { scsi_get_prot_op(iser_task->sc) != SCSI_PROT_NORMAL) {
spin_lock_irqsave(&iser_conn->lock, flags); spin_lock_irqsave(&ib_conn->lock, flags);
desc = list_first_entry(&iser_conn->fastreg.pool, desc = list_first_entry(&ib_conn->fastreg.pool,
struct fast_reg_descriptor, list); struct fast_reg_descriptor, list);
list_del(&desc->list); list_del(&desc->list);
spin_unlock_irqrestore(&iser_conn->lock, flags); spin_unlock_irqrestore(&ib_conn->lock, flags);
regd_buf->reg.mem_h = desc; regd_buf->reg.mem_h = desc;
} }
...@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task, ...@@ -791,9 +791,9 @@ int iser_reg_rdma_mem_fastreg(struct iscsi_iser_task *iser_task,
return 0; return 0;
err_reg: err_reg:
if (desc) { if (desc) {
spin_lock_irqsave(&iser_conn->lock, flags); spin_lock_irqsave(&ib_conn->lock, flags);
list_add_tail(&desc->list, &iser_conn->fastreg.pool); list_add_tail(&desc->list, &ib_conn->fastreg.pool);
spin_unlock_irqrestore(&iser_conn->lock, flags); spin_unlock_irqrestore(&ib_conn->lock, flags);
} }
return err; return err;
......
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment