Commit bba7ebba authored by David Disseldorp's avatar David Disseldorp Committed by Roland Dreier

IB/iser: Avoid recv buffer exhaustion caused by unexpected PDUs

iSCSI/iSER targets may send PDUs without a prior request from the
initiator.  RFC 5046 refers to these PDUs as "unexpected".  NOP-In PDUs
with itt=RESERVED and Asynchronous Message PDUs occupy this category.

The amount of active "unexpected" PDU's an iSER target may have at any
time is governed by the MaxOutstandingUnexpectedPDUs key, which is not
yet supported.

Currently when an iSER target sends an "unexpected" PDU, the
initiators recv buffer consumed by the PDU is not replaced.  If over
initial_post_recv_bufs_num "unexpected" PDUs are received then the
receive queue will run out of receive work requests entirely.

This patch ensures recv buffers consumed by "unexpected" PDUs are
replaced in the next iser_post_receive_control() call.
Signed-off-by: default avatarDavid Disseldorp <ddiss@sgi.com>
Signed-off-by: default avatarKen Sandars <ksandars@sgi.com>
Acked-by: default avatarOr Gerlitz <ogerlitz@voltaire.com>
Signed-off-by: default avatarRoland Dreier <rolandd@cisco.com>
parent 061e41fd
...@@ -252,6 +252,9 @@ struct iser_conn { ...@@ -252,6 +252,9 @@ struct iser_conn {
wait_queue_head_t wait; /* waitq for conn/disconn */ wait_queue_head_t wait; /* waitq for conn/disconn */
atomic_t post_recv_buf_count; /* posted rx count */ atomic_t post_recv_buf_count; /* posted rx count */
atomic_t post_send_buf_count; /* posted tx count */ atomic_t post_send_buf_count; /* posted tx count */
atomic_t unexpected_pdu_count;/* count of received *
* unexpected pdus *
* not yet retired */
char name[ISER_OBJECT_NAME_SIZE]; char name[ISER_OBJECT_NAME_SIZE];
struct iser_page_vec *page_vec; /* represents SG to fmr maps* struct iser_page_vec *page_vec; /* represents SG to fmr maps*
* maps serialized as tx is*/ * maps serialized as tx is*/
......
...@@ -183,14 +183,8 @@ static int iser_post_receive_control(struct iscsi_conn *conn) ...@@ -183,14 +183,8 @@ static int iser_post_receive_control(struct iscsi_conn *conn)
struct iser_regd_buf *regd_data; struct iser_regd_buf *regd_data;
struct iser_dto *recv_dto = NULL; struct iser_dto *recv_dto = NULL;
struct iser_device *device = iser_conn->ib_conn->device; struct iser_device *device = iser_conn->ib_conn->device;
int rx_data_size, err = 0; int rx_data_size, err;
int posts, outstanding_unexp_pdus;
rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
if (rx_desc == NULL) {
iser_err("Failed to alloc desc for post recv\n");
return -ENOMEM;
}
rx_desc->type = ISCSI_RX;
/* for the login sequence we must support rx of upto 8K; login is done /* for the login sequence we must support rx of upto 8K; login is done
* after conn create/bind (connect) and conn stop/bind (reconnect), * after conn create/bind (connect) and conn stop/bind (reconnect),
...@@ -201,46 +195,80 @@ static int iser_post_receive_control(struct iscsi_conn *conn) ...@@ -201,46 +195,80 @@ static int iser_post_receive_control(struct iscsi_conn *conn)
else /* FIXME till user space sets conn->max_recv_dlength correctly */ else /* FIXME till user space sets conn->max_recv_dlength correctly */
rx_data_size = 128; rx_data_size = 128;
rx_desc->data = kmalloc(rx_data_size, GFP_NOIO); outstanding_unexp_pdus =
if (rx_desc->data == NULL) { atomic_xchg(&iser_conn->ib_conn->unexpected_pdu_count, 0);
iser_err("Failed to alloc data buf for post recv\n");
err = -ENOMEM;
goto post_rx_kmalloc_failure;
}
recv_dto = &rx_desc->dto; /*
recv_dto->ib_conn = iser_conn->ib_conn; * in addition to the response buffer, replace those consumed by
recv_dto->regd_vector_len = 0; * unexpected pdus.
*/
for (posts = 0; posts < 1 + outstanding_unexp_pdus; posts++) {
rx_desc = kmem_cache_alloc(ig.desc_cache, GFP_NOIO);
if (rx_desc == NULL) {
iser_err("Failed to alloc desc for post recv %d\n",
posts);
err = -ENOMEM;
goto post_rx_cache_alloc_failure;
}
rx_desc->type = ISCSI_RX;
rx_desc->data = kmalloc(rx_data_size, GFP_NOIO);
if (rx_desc->data == NULL) {
iser_err("Failed to alloc data buf for post recv %d\n",
posts);
err = -ENOMEM;
goto post_rx_kmalloc_failure;
}
regd_hdr = &rx_desc->hdr_regd_buf; recv_dto = &rx_desc->dto;
memset(regd_hdr, 0, sizeof(struct iser_regd_buf)); recv_dto->ib_conn = iser_conn->ib_conn;
regd_hdr->device = device; recv_dto->regd_vector_len = 0;
regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE); regd_hdr = &rx_desc->hdr_regd_buf;
memset(regd_hdr, 0, sizeof(struct iser_regd_buf));
regd_hdr->device = device;
regd_hdr->virt_addr = rx_desc; /* == &rx_desc->iser_header */
regd_hdr->data_size = ISER_TOTAL_HEADERS_LEN;
iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0); iser_reg_single(device, regd_hdr, DMA_FROM_DEVICE);
regd_data = &rx_desc->data_regd_buf; iser_dto_add_regd_buff(recv_dto, regd_hdr, 0, 0);
memset(regd_data, 0, sizeof(struct iser_regd_buf));
regd_data->device = device;
regd_data->virt_addr = rx_desc->data;
regd_data->data_size = rx_data_size;
iser_reg_single(device, regd_data, DMA_FROM_DEVICE); regd_data = &rx_desc->data_regd_buf;
memset(regd_data, 0, sizeof(struct iser_regd_buf));
regd_data->device = device;
regd_data->virt_addr = rx_desc->data;
regd_data->data_size = rx_data_size;
iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0); iser_reg_single(device, regd_data, DMA_FROM_DEVICE);
err = iser_post_recv(rx_desc); iser_dto_add_regd_buff(recv_dto, regd_data, 0, 0);
if (!err)
return 0;
/* iser_post_recv failed */ err = iser_post_recv(rx_desc);
if (err) {
iser_err("Failed iser_post_recv for post %d\n", posts);
goto post_rx_post_recv_failure;
}
}
/* all posts successful */
return 0;
post_rx_post_recv_failure:
iser_dto_buffs_release(recv_dto); iser_dto_buffs_release(recv_dto);
kfree(rx_desc->data); kfree(rx_desc->data);
post_rx_kmalloc_failure: post_rx_kmalloc_failure:
kmem_cache_free(ig.desc_cache, rx_desc); kmem_cache_free(ig.desc_cache, rx_desc);
post_rx_cache_alloc_failure:
if (posts > 0) {
/*
* response buffer posted, but did not replace all unexpected
* pdu recv bufs. Ignore error, retry occurs next send
*/
outstanding_unexp_pdus -= (posts - 1);
err = 0;
}
atomic_add(outstanding_unexp_pdus,
&iser_conn->ib_conn->unexpected_pdu_count);
return err; return err;
} }
...@@ -274,8 +302,10 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn) ...@@ -274,8 +302,10 @@ int iser_conn_set_full_featured_mode(struct iscsi_conn *conn)
struct iscsi_iser_conn *iser_conn = conn->dd_data; struct iscsi_iser_conn *iser_conn = conn->dd_data;
int i; int i;
/* no need to keep it in a var, we are after login so if this should /*
* be negotiated, by now the result should be available here */ * FIXME this value should be declared to the target during login with
* the MaxOutstandingUnexpectedPDUs key when supported
*/
int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS; int initial_post_recv_bufs_num = ISER_MAX_RX_MISC_PDUS;
iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num); iser_dbg("Initially post: %d\n", initial_post_recv_bufs_num);
...@@ -478,6 +508,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -478,6 +508,7 @@ int iser_send_control(struct iscsi_conn *conn,
int err = 0; int err = 0;
struct iser_regd_buf *regd_buf; struct iser_regd_buf *regd_buf;
struct iser_device *device; struct iser_device *device;
unsigned char opcode;
if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) { if (!iser_conn_state_comp(iser_conn->ib_conn, ISER_CONN_UP)) {
iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn); iser_err("Failed to send, conn: 0x%p is not up\n", iser_conn->ib_conn);
...@@ -512,10 +543,15 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -512,10 +543,15 @@ int iser_send_control(struct iscsi_conn *conn,
data_seg_len); data_seg_len);
} }
if (iser_post_receive_control(conn) != 0) { opcode = task->hdr->opcode & ISCSI_OPCODE_MASK;
iser_err("post_rcv_buff failed!\n");
err = -ENOMEM; /* post recv buffer for response if one is expected */
goto send_control_error; if (!(opcode == ISCSI_OP_NOOP_OUT && task->hdr->itt == RESERVED_ITT)) {
if (iser_post_receive_control(conn) != 0) {
iser_err("post_rcv_buff failed!\n");
err = -ENOMEM;
goto send_control_error;
}
} }
err = iser_post_send(mdesc); err = iser_post_send(mdesc);
...@@ -586,6 +622,20 @@ void iser_rcv_completion(struct iser_desc *rx_desc, ...@@ -586,6 +622,20 @@ void iser_rcv_completion(struct iser_desc *rx_desc,
* parallel to the execution of iser_conn_term. So the code that waits * * parallel to the execution of iser_conn_term. So the code that waits *
* for the posted rx bufs refcount to become zero handles everything */ * for the posted rx bufs refcount to become zero handles everything */
atomic_dec(&conn->ib_conn->post_recv_buf_count); atomic_dec(&conn->ib_conn->post_recv_buf_count);
/*
* if an unexpected PDU was received then the recv wr consumed must
* be replaced, this is done in the next send of a control-type PDU
*/
if (opcode == ISCSI_OP_NOOP_IN && hdr->itt == RESERVED_ITT) {
/* nop-in with itt = 0xffffffff */
atomic_inc(&conn->ib_conn->unexpected_pdu_count);
}
else if (opcode == ISCSI_OP_ASYNC_EVENT) {
/* asyncronous message */
atomic_inc(&conn->ib_conn->unexpected_pdu_count);
}
/* a reject PDU consumes the recv buf posted for the response */
} }
void iser_snd_completion(struct iser_desc *tx_desc) void iser_snd_completion(struct iser_desc *tx_desc)
......
...@@ -498,6 +498,7 @@ void iser_conn_init(struct iser_conn *ib_conn) ...@@ -498,6 +498,7 @@ void iser_conn_init(struct iser_conn *ib_conn)
init_waitqueue_head(&ib_conn->wait); init_waitqueue_head(&ib_conn->wait);
atomic_set(&ib_conn->post_recv_buf_count, 0); atomic_set(&ib_conn->post_recv_buf_count, 0);
atomic_set(&ib_conn->post_send_buf_count, 0); atomic_set(&ib_conn->post_send_buf_count, 0);
atomic_set(&ib_conn->unexpected_pdu_count, 0);
atomic_set(&ib_conn->refcount, 1); atomic_set(&ib_conn->refcount, 1);
INIT_LIST_HEAD(&ib_conn->conn_list); INIT_LIST_HEAD(&ib_conn->conn_list);
spin_lock_init(&ib_conn->lock); spin_lock_init(&ib_conn->lock);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment