Commit 16dfd1fa authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband

* 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/roland/infiniband:
  IB/qib: Fix panic in RC error flushing logic
  IB/iser: DMA unmap TX bufs used for iSCSI/iSER headers
  IB/iser: Use separate buffers for the login request/response
  IB/mthca: Fix buddy->num_free allocation size
parents b3196681 b8108d68
...@@ -146,7 +146,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order) ...@@ -146,7 +146,7 @@ static int mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *), buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
GFP_KERNEL); GFP_KERNEL);
buddy->num_free = kzalloc((buddy->max_order + 1) * sizeof (int *), buddy->num_free = kcalloc((buddy->max_order + 1), sizeof *buddy->num_free,
GFP_KERNEL); GFP_KERNEL);
if (!buddy->bits || !buddy->num_free) if (!buddy->bits || !buddy->num_free)
goto err_out; goto err_out;
......
...@@ -271,13 +271,9 @@ int qib_make_rc_req(struct qib_qp *qp) ...@@ -271,13 +271,9 @@ int qib_make_rc_req(struct qib_qp *qp)
goto bail; goto bail;
} }
wqe = get_swqe_ptr(qp, qp->s_last); wqe = get_swqe_ptr(qp, qp->s_last);
while (qp->s_last != qp->s_acked) { qib_send_complete(qp, wqe, qp->s_last != qp->s_acked ?
qib_send_complete(qp, wqe, IB_WC_SUCCESS); IB_WC_SUCCESS : IB_WC_WR_FLUSH_ERR);
if (++qp->s_last >= qp->s_size) /* will get called again */
qp->s_last = 0;
wqe = get_swqe_ptr(qp, qp->s_last);
}
qib_send_complete(qp, wqe, IB_WC_WR_FLUSH_ERR);
goto done; goto done;
} }
......
...@@ -151,7 +151,6 @@ int iser_initialize_task_headers(struct iscsi_task *task, ...@@ -151,7 +151,6 @@ int iser_initialize_task_headers(struct iscsi_task *task,
tx_desc->tx_sg[0].length = ISER_HEADERS_LEN; tx_desc->tx_sg[0].length = ISER_HEADERS_LEN;
tx_desc->tx_sg[0].lkey = device->mr->lkey; tx_desc->tx_sg[0].lkey = device->mr->lkey;
iser_task->headers_initialized = 1;
iser_task->iser_conn = iser_conn; iser_task->iser_conn = iser_conn;
return 0; return 0;
} }
...@@ -166,7 +165,6 @@ iscsi_iser_task_init(struct iscsi_task *task) ...@@ -166,7 +165,6 @@ iscsi_iser_task_init(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
if (!iser_task->headers_initialized)
if (iser_initialize_task_headers(task, &iser_task->desc)) if (iser_initialize_task_headers(task, &iser_task->desc))
return -ENOMEM; return -ENOMEM;
...@@ -278,6 +276,13 @@ iscsi_iser_task_xmit(struct iscsi_task *task) ...@@ -278,6 +276,13 @@ iscsi_iser_task_xmit(struct iscsi_task *task)
static void iscsi_iser_cleanup_task(struct iscsi_task *task) static void iscsi_iser_cleanup_task(struct iscsi_task *task)
{ {
struct iscsi_iser_task *iser_task = task->dd_data; struct iscsi_iser_task *iser_task = task->dd_data;
struct iser_tx_desc *tx_desc = &iser_task->desc;
struct iscsi_iser_conn *iser_conn = task->conn->dd_data;
struct iser_device *device = iser_conn->ib_conn->device;
ib_dma_unmap_single(device->ib_device,
tx_desc->dma_addr, ISER_HEADERS_LEN, DMA_TO_DEVICE);
/* mgmt tasks do not need special cleanup */ /* mgmt tasks do not need special cleanup */
if (!task->sc) if (!task->sc)
......
...@@ -257,7 +257,8 @@ struct iser_conn { ...@@ -257,7 +257,8 @@ struct iser_conn {
struct list_head conn_list; /* entry in ig conn list */ struct list_head conn_list; /* entry in ig conn list */
char *login_buf; char *login_buf;
u64 login_dma; char *login_req_buf, *login_resp_buf;
u64 login_req_dma, login_resp_dma;
unsigned int rx_desc_head; unsigned int rx_desc_head;
struct iser_rx_desc *rx_descs; struct iser_rx_desc *rx_descs;
struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX]; struct ib_recv_wr rx_wr[ISER_MIN_POSTED_RX];
...@@ -277,7 +278,6 @@ struct iscsi_iser_task { ...@@ -277,7 +278,6 @@ struct iscsi_iser_task {
struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */ struct iser_regd_buf rdma_regd[ISER_DIRS_NUM];/* regd rdma buf */
struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/ struct iser_data_buf data[ISER_DIRS_NUM]; /* orig. data des*/
struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */ struct iser_data_buf data_copy[ISER_DIRS_NUM];/* contig. copy */
int headers_initialized;
}; };
struct iser_page_vec { struct iser_page_vec {
......
...@@ -221,7 +221,13 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn) ...@@ -221,7 +221,13 @@ void iser_free_rx_descriptors(struct iser_conn *ib_conn)
struct iser_device *device = ib_conn->device; struct iser_device *device = ib_conn->device;
if (ib_conn->login_buf) { if (ib_conn->login_buf) {
ib_dma_unmap_single(device->ib_device, ib_conn->login_dma, if (ib_conn->login_req_dma)
ib_dma_unmap_single(device->ib_device,
ib_conn->login_req_dma,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
if (ib_conn->login_resp_dma)
ib_dma_unmap_single(device->ib_device,
ib_conn->login_resp_dma,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE); ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
kfree(ib_conn->login_buf); kfree(ib_conn->login_buf);
} }
...@@ -394,6 +400,7 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -394,6 +400,7 @@ int iser_send_control(struct iscsi_conn *conn,
unsigned long data_seg_len; unsigned long data_seg_len;
int err = 0; int err = 0;
struct iser_device *device; struct iser_device *device;
struct iser_conn *ib_conn = iser_conn->ib_conn;
/* build the tx desc regd header and add it to the tx desc dto */ /* build the tx desc regd header and add it to the tx desc dto */
mdesc->type = ISCSI_TX_CONTROL; mdesc->type = ISCSI_TX_CONTROL;
...@@ -409,9 +416,19 @@ int iser_send_control(struct iscsi_conn *conn, ...@@ -409,9 +416,19 @@ int iser_send_control(struct iscsi_conn *conn,
iser_err("data present on non login task!!!\n"); iser_err("data present on non login task!!!\n");
goto send_control_error; goto send_control_error;
} }
memcpy(iser_conn->ib_conn->login_buf, task->data,
ib_dma_sync_single_for_cpu(device->ib_device,
ib_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
memcpy(iser_conn->ib_conn->login_req_buf, task->data,
task->data_count); task->data_count);
tx_dsg->addr = iser_conn->ib_conn->login_dma;
ib_dma_sync_single_for_device(device->ib_device,
ib_conn->login_req_dma, task->data_count,
DMA_TO_DEVICE);
tx_dsg->addr = iser_conn->ib_conn->login_req_dma;
tx_dsg->length = task->data_count; tx_dsg->length = task->data_count;
tx_dsg->lkey = device->mr->lkey; tx_dsg->lkey = device->mr->lkey;
mdesc->num_sge = 2; mdesc->num_sge = 2;
...@@ -445,8 +462,8 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, ...@@ -445,8 +462,8 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
int rx_buflen, outstanding, count, err; int rx_buflen, outstanding, count, err;
/* differentiate between login to all other PDUs */ /* differentiate between login to all other PDUs */
if ((char *)rx_desc == ib_conn->login_buf) { if ((char *)rx_desc == ib_conn->login_resp_buf) {
rx_dma = ib_conn->login_dma; rx_dma = ib_conn->login_resp_dma;
rx_buflen = ISER_RX_LOGIN_SIZE; rx_buflen = ISER_RX_LOGIN_SIZE;
} else { } else {
rx_dma = rx_desc->dma_addr; rx_dma = rx_desc->dma_addr;
...@@ -473,7 +490,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc, ...@@ -473,7 +490,7 @@ void iser_rcv_completion(struct iser_rx_desc *rx_desc,
* for the posted rx bufs refcount to become zero handles everything */ * for the posted rx bufs refcount to become zero handles everything */
conn->ib_conn->post_recv_buf_count--; conn->ib_conn->post_recv_buf_count--;
if (rx_dma == ib_conn->login_dma) if (rx_dma == ib_conn->login_resp_dma)
return; return;
outstanding = ib_conn->post_recv_buf_count; outstanding = ib_conn->post_recv_buf_count;
......
...@@ -155,20 +155,39 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn) ...@@ -155,20 +155,39 @@ static int iser_create_ib_conn_res(struct iser_conn *ib_conn)
{ {
struct iser_device *device; struct iser_device *device;
struct ib_qp_init_attr init_attr; struct ib_qp_init_attr init_attr;
int ret = -ENOMEM; int req_err, resp_err, ret = -ENOMEM;
struct ib_fmr_pool_param params; struct ib_fmr_pool_param params;
BUG_ON(ib_conn->device == NULL); BUG_ON(ib_conn->device == NULL);
device = ib_conn->device; device = ib_conn->device;
ib_conn->login_buf = kmalloc(ISER_RX_LOGIN_SIZE, GFP_KERNEL); ib_conn->login_buf = kmalloc(ISCSI_DEF_MAX_RECV_SEG_LEN +
ISER_RX_LOGIN_SIZE, GFP_KERNEL);
if (!ib_conn->login_buf) if (!ib_conn->login_buf)
goto out_err; goto out_err;
ib_conn->login_dma = ib_dma_map_single(ib_conn->device->ib_device, ib_conn->login_req_buf = ib_conn->login_buf;
(void *)ib_conn->login_buf, ISER_RX_LOGIN_SIZE, ib_conn->login_resp_buf = ib_conn->login_buf + ISCSI_DEF_MAX_RECV_SEG_LEN;
DMA_FROM_DEVICE);
ib_conn->login_req_dma = ib_dma_map_single(ib_conn->device->ib_device,
(void *)ib_conn->login_req_buf,
ISCSI_DEF_MAX_RECV_SEG_LEN, DMA_TO_DEVICE);
ib_conn->login_resp_dma = ib_dma_map_single(ib_conn->device->ib_device,
(void *)ib_conn->login_resp_buf,
ISER_RX_LOGIN_SIZE, DMA_FROM_DEVICE);
req_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_req_dma);
resp_err = ib_dma_mapping_error(device->ib_device, ib_conn->login_resp_dma);
if (req_err || resp_err) {
if (req_err)
ib_conn->login_req_dma = 0;
if (resp_err)
ib_conn->login_resp_dma = 0;
goto out_err;
}
ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) + ib_conn->page_vec = kmalloc(sizeof(struct iser_page_vec) +
(sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)), (sizeof(u64) * (ISCSI_ISER_SG_TABLESIZE +1)),
...@@ -658,11 +677,11 @@ int iser_post_recvl(struct iser_conn *ib_conn) ...@@ -658,11 +677,11 @@ int iser_post_recvl(struct iser_conn *ib_conn)
struct ib_sge sge; struct ib_sge sge;
int ib_ret; int ib_ret;
sge.addr = ib_conn->login_dma; sge.addr = ib_conn->login_resp_dma;
sge.length = ISER_RX_LOGIN_SIZE; sge.length = ISER_RX_LOGIN_SIZE;
sge.lkey = ib_conn->device->mr->lkey; sge.lkey = ib_conn->device->mr->lkey;
rx_wr.wr_id = (unsigned long)ib_conn->login_buf; rx_wr.wr_id = (unsigned long)ib_conn->login_resp_buf;
rx_wr.sg_list = &sge; rx_wr.sg_list = &sge;
rx_wr.num_sge = 1; rx_wr.num_sge = 1;
rx_wr.next = NULL; rx_wr.next = NULL;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment