Commit b1df2a07 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma

Pull rdma fixes from Jason Gunthorpe:
 "A number of driver bug fixes and a few recent regressions:

   - Several bug fixes for bnxt_re. Crashing, incorrect data reported,
     and corruption on new HW

   - Memory leak and crash in rxe

   - Fix sysfs corruption in rxe if the netdev name is too long

   - Fix a crash on error unwind in the new cq_pool code

   - Fix kobject panics in rtrs by working device lifetime properly

   - Fix a data corruption bug in iser target related to misaligned
     buffers"

* tag 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rdma/rdma:
  IB/isert: Fix unaligned immediate-data handling
  RDMA/rtrs-srv: Set .release function for rtrs srv device during device init
  RDMA/bnxt_re: Remove set but not used variable 'qplib_ctx'
  RDMA/core: Fix reported speed and width
  RDMA/core: Fix unsafe linked list traversal after failing to allocate CQ
  RDMA/bnxt_re: Remove the qp from list only if the qp destroy succeeds
  RDMA/bnxt_re: Fix driver crash on unaligned PSN entry address
  RDMA/bnxt_re: Restrict the max_gids to 256
  RDMA/bnxt_re: Static NQ depth allocation
  RDMA/bnxt_re: Fix the qp table indexing
  RDMA/bnxt_re: Do not report transparent vlan from QP1
  RDMA/mlx4: Read pkey table length instead of hardcoded value
  RDMA/rxe: Fix panic when calling kmem_cache_create()
  RDMA/rxe: Fix memleak in rxe_mem_init_user
  RDMA/rxe: Fix the parent sysfs read when the interface has 15 chars
  RDMA/rtrs-srv: Replace device_register with device_initialize and device_add
parents 40249c69 0b089c1e
......@@ -379,7 +379,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
{
LIST_HEAD(tmp_list);
unsigned int nr_cqs, i;
struct ib_cq *cq;
struct ib_cq *cq, *n;
int ret;
if (poll_ctx > IB_POLL_LAST_POOL_TYPE) {
......@@ -412,7 +412,7 @@ static int ib_alloc_cqs(struct ib_device *dev, unsigned int nr_cqes,
return 0;
out_free_cqs:
list_for_each_entry(cq, &tmp_list, pool_entry) {
list_for_each_entry_safe(cq, n, &tmp_list, pool_entry) {
cq->shared = false;
ib_free_cq(cq);
}
......
......@@ -1801,7 +1801,7 @@ int ib_get_eth_speed(struct ib_device *dev, u8 port_num, u8 *speed, u8 *width)
dev_put(netdev);
if (!rc) {
if (!rc && lksettings.base.speed != (u32)SPEED_UNKNOWN) {
netdev_speed = lksettings.base.speed;
} else {
netdev_speed = SPEED_1000;
......
......@@ -752,12 +752,6 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
gsi_sqp = rdev->gsi_ctx.gsi_sqp;
gsi_sah = rdev->gsi_ctx.gsi_sah;
/* remove from active qp list */
mutex_lock(&rdev->qp_lock);
list_del(&gsi_sqp->list);
mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->qp_count);
ibdev_dbg(&rdev->ibdev, "Destroy the shadow AH\n");
bnxt_qplib_destroy_ah(&rdev->qplib_res,
&gsi_sah->qplib_ah,
......@@ -772,6 +766,12 @@ static int bnxt_re_destroy_gsi_sqp(struct bnxt_re_qp *qp)
}
bnxt_qplib_free_qp_res(&rdev->qplib_res, &gsi_sqp->qplib_qp);
/* remove from active qp list */
mutex_lock(&rdev->qp_lock);
list_del(&gsi_sqp->list);
mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->qp_count);
kfree(rdev->gsi_ctx.sqp_tbl);
kfree(gsi_sah);
kfree(gsi_sqp);
......@@ -792,11 +792,6 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
unsigned int flags;
int rc;
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->qp_count);
bnxt_qplib_flush_cqn_wq(&qp->qplib_qp);
rc = bnxt_qplib_destroy_qp(&rdev->qplib_res, &qp->qplib_qp);
......@@ -819,6 +814,11 @@ int bnxt_re_destroy_qp(struct ib_qp *ib_qp, struct ib_udata *udata)
goto sh_fail;
}
mutex_lock(&rdev->qp_lock);
list_del(&qp->list);
mutex_unlock(&rdev->qp_lock);
atomic_dec(&rdev->qp_count);
ib_umem_release(qp->rumem);
ib_umem_release(qp->sumem);
......@@ -3264,6 +3264,19 @@ static void bnxt_re_process_res_rawqp1_wc(struct ib_wc *wc,
wc->wc_flags |= IB_WC_GRH;
}
static bool bnxt_re_check_if_vlan_valid(struct bnxt_re_dev *rdev,
u16 vlan_id)
{
/*
* Check if the vlan is configured in the host. If not configured, it
* can be a transparent VLAN. So dont report the vlan id.
*/
if (!__vlan_find_dev_deep_rcu(rdev->netdev,
htons(ETH_P_8021Q), vlan_id))
return false;
return true;
}
static bool bnxt_re_is_vlan_pkt(struct bnxt_qplib_cqe *orig_cqe,
u16 *vid, u8 *sl)
{
......@@ -3332,9 +3345,11 @@ static void bnxt_re_process_res_shadow_qp_wc(struct bnxt_re_qp *gsi_sqp,
wc->src_qp = orig_cqe->src_qp;
memcpy(wc->smac, orig_cqe->smac, ETH_ALEN);
if (bnxt_re_is_vlan_pkt(orig_cqe, &vlan_id, &sl)) {
wc->vlan_id = vlan_id;
wc->sl = sl;
wc->wc_flags |= IB_WC_WITH_VLAN;
if (bnxt_re_check_if_vlan_valid(rdev, vlan_id)) {
wc->vlan_id = vlan_id;
wc->sl = sl;
wc->wc_flags |= IB_WC_WITH_VLAN;
}
}
wc->port_num = 1;
wc->vendor_err = orig_cqe->status;
......
......@@ -1009,7 +1009,6 @@ static void bnxt_re_free_res(struct bnxt_re_dev *rdev)
static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
{
struct bnxt_re_ring_attr rattr = {};
struct bnxt_qplib_ctx *qplib_ctx;
int num_vec_created = 0;
int rc = 0, i;
u8 type;
......@@ -1032,13 +1031,11 @@ static int bnxt_re_alloc_res(struct bnxt_re_dev *rdev)
if (rc)
goto dealloc_res;
qplib_ctx = &rdev->qplib_ctx;
for (i = 0; i < rdev->num_msix - 1; i++) {
struct bnxt_qplib_nq *nq;
nq = &rdev->nq[i];
nq->hwq.max_elements = (qplib_ctx->cq_count +
qplib_ctx->srqc_count + 2);
nq->hwq.max_elements = BNXT_QPLIB_NQE_MAX_CNT;
rc = bnxt_qplib_alloc_nq(&rdev->qplib_res, &rdev->nq[i]);
if (rc) {
ibdev_err(&rdev->ibdev, "Alloc Failed NQ%d rc:%#x",
......
......@@ -818,6 +818,7 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
u32 tbl_indx;
int rc;
RCFW_CMD_PREP(req, CREATE_QP1, cmd_flags);
......@@ -907,8 +908,9 @@ int bnxt_qplib_create_qp1(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
......@@ -935,10 +937,10 @@ static void bnxt_qplib_init_psn_ptr(struct bnxt_qplib_qp *qp, int size)
sq = &qp->sq;
hwq = &sq->hwq;
/* First psn entry */
fpsne = (u64)bnxt_qplib_get_qe(hwq, hwq->depth, &psn_pg);
if (!IS_ALIGNED(fpsne, PAGE_SIZE))
indx_pad = ALIGN(fpsne, PAGE_SIZE) / size;
indx_pad = (fpsne & ~PAGE_MASK) / size;
hwq->pad_pgofft = indx_pad;
hwq->pad_pg = (u64 *)psn_pg;
hwq->pad_stride = size;
......@@ -959,6 +961,7 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
u16 cmd_flags = 0;
u32 qp_flags = 0;
u8 pg_sz_lvl;
u32 tbl_indx;
u16 nsge;
RCFW_CMD_PREP(req, CREATE_QP, cmd_flags);
......@@ -1111,8 +1114,9 @@ int bnxt_qplib_create_qp(struct bnxt_qplib_res *res, struct bnxt_qplib_qp *qp)
rq->dbinfo.db = qp->dpi->dbr;
rq->dbinfo.max_slot = bnxt_qplib_set_rq_max_slot(rq->wqe_size);
}
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = (void *)qp;
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = (void *)qp;
return 0;
fail:
......@@ -1457,10 +1461,12 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
struct cmdq_destroy_qp req;
struct creq_destroy_qp_resp resp;
u16 cmd_flags = 0;
u32 tbl_indx;
int rc;
rcfw->qp_tbl[qp->id].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[qp->id].qp_handle = NULL;
tbl_indx = map_qp_id_to_tbl_indx(qp->id, rcfw);
rcfw->qp_tbl[tbl_indx].qp_id = BNXT_QPLIB_QP_ID_INVALID;
rcfw->qp_tbl[tbl_indx].qp_handle = NULL;
RCFW_CMD_PREP(req, DESTROY_QP, cmd_flags);
......@@ -1468,8 +1474,8 @@ int bnxt_qplib_destroy_qp(struct bnxt_qplib_res *res,
rc = bnxt_qplib_rcfw_send_message(rcfw, (void *)&req,
(void *)&resp, NULL, 0);
if (rc) {
rcfw->qp_tbl[qp->id].qp_id = qp->id;
rcfw->qp_tbl[qp->id].qp_handle = qp;
rcfw->qp_tbl[tbl_indx].qp_id = qp->id;
rcfw->qp_tbl[tbl_indx].qp_handle = qp;
return rc;
}
......
......@@ -307,14 +307,15 @@ static int bnxt_qplib_process_qp_event(struct bnxt_qplib_rcfw *rcfw,
__le16 mcookie;
u16 cookie;
int rc = 0;
u32 qp_id;
u32 qp_id, tbl_indx;
pdev = rcfw->pdev;
switch (qp_event->event) {
case CREQ_QP_EVENT_EVENT_QP_ERROR_NOTIFICATION:
err_event = (struct creq_qp_error_notification *)qp_event;
qp_id = le32_to_cpu(err_event->xid);
qp = rcfw->qp_tbl[qp_id].qp_handle;
tbl_indx = map_qp_id_to_tbl_indx(qp_id, rcfw);
qp = rcfw->qp_tbl[tbl_indx].qp_handle;
dev_dbg(&pdev->dev, "Received QP error notification\n");
dev_dbg(&pdev->dev,
"qpid 0x%x, req_err=0x%x, resp_err=0x%x\n",
......@@ -615,8 +616,9 @@ int bnxt_qplib_alloc_rcfw_channel(struct bnxt_qplib_res *res,
cmdq->bmap_size = bmap_size;
rcfw->qp_tbl_size = qp_tbl_sz;
rcfw->qp_tbl = kcalloc(qp_tbl_sz, sizeof(struct bnxt_qplib_qp_node),
/* Allocate one extra to hold the QP1 entries */
rcfw->qp_tbl_size = qp_tbl_sz + 1;
rcfw->qp_tbl = kcalloc(rcfw->qp_tbl_size, sizeof(struct bnxt_qplib_qp_node),
GFP_KERNEL);
if (!rcfw->qp_tbl)
goto fail;
......
......@@ -216,4 +216,9 @@ int bnxt_qplib_deinit_rcfw(struct bnxt_qplib_rcfw *rcfw);
int bnxt_qplib_init_rcfw(struct bnxt_qplib_rcfw *rcfw,
struct bnxt_qplib_ctx *ctx, int is_virtfn);
void bnxt_qplib_mark_qp_error(void *qp_handle);
static inline u32 map_qp_id_to_tbl_indx(u32 qid, struct bnxt_qplib_rcfw *rcfw)
{
/* Last index of the qp_tbl is for QP1 ie. qp_tbl_size - 1*/
return (qid == 1) ? rcfw->qp_tbl_size - 1 : qid % rcfw->qp_tbl_size - 2;
}
#endif /* __BNXT_QPLIB_RCFW_H__ */
......@@ -149,7 +149,7 @@ int bnxt_qplib_get_dev_attr(struct bnxt_qplib_rcfw *rcfw,
attr->max_inline_data = le32_to_cpu(sb->max_inline_data);
attr->l2_db_size = (sb->l2_db_space_size + 1) *
(0x01 << RCFW_DBR_BASE_PAGE_SHIFT);
attr->max_sgid = le32_to_cpu(sb->max_gid);
attr->max_sgid = BNXT_QPLIB_NUM_GIDS_SUPPORTED;
bnxt_qplib_query_version(rcfw, attr->fw_ver);
......
......@@ -47,6 +47,7 @@
struct bnxt_qplib_dev_attr {
#define FW_VER_ARR_LEN 4
u8 fw_ver[FW_VER_ARR_LEN];
#define BNXT_QPLIB_NUM_GIDS_SUPPORTED 256
u16 max_sgid;
u16 max_mrw;
u32 max_qp;
......
......@@ -784,7 +784,8 @@ static int eth_link_query_port(struct ib_device *ibdev, u8 port,
props->ip_gids = true;
props->gid_tbl_len = mdev->dev->caps.gid_table_len[port];
props->max_msg_sz = mdev->dev->caps.max_msg_sz;
props->pkey_tbl_len = 1;
if (mdev->dev->caps.pkey_table_len[port])
props->pkey_tbl_len = 1;
props->max_mtu = IB_MTU_4096;
props->max_vl_num = 2;
props->state = IB_PORT_DOWN;
......
......@@ -40,6 +40,8 @@ MODULE_AUTHOR("Bob Pearson, Frank Zago, John Groves, Kamal Heib");
MODULE_DESCRIPTION("Soft RDMA transport");
MODULE_LICENSE("Dual BSD/GPL");
bool rxe_initialized;
/* free resources for a rxe device all objects created for this device must
* have been destroyed
*/
......@@ -315,6 +317,7 @@ static int __init rxe_module_init(void)
return err;
rdma_link_register(&rxe_link_ops);
rxe_initialized = true;
pr_info("loaded\n");
return 0;
}
......@@ -326,6 +329,7 @@ static void __exit rxe_module_exit(void)
rxe_net_exit();
rxe_cache_exit();
rxe_initialized = false;
pr_info("unloaded\n");
}
......
......@@ -67,6 +67,8 @@
#define RXE_ROCE_V2_SPORT (0xc000)
extern bool rxe_initialized;
static inline u32 rxe_crc32(struct rxe_dev *rxe,
u32 crc, void *next, size_t len)
{
......
......@@ -205,6 +205,7 @@ int rxe_mem_init_user(struct rxe_pd *pd, u64 start,
vaddr = page_address(sg_page_iter_page(&sg_iter));
if (!vaddr) {
pr_warn("null vaddr\n");
ib_umem_release(umem);
err = -ENOMEM;
goto err1;
}
......
......@@ -61,6 +61,11 @@ static int rxe_param_set_add(const char *val, const struct kernel_param *kp)
struct net_device *ndev;
struct rxe_dev *exists;
if (!rxe_initialized) {
pr_err("Module parameters are not supported, use rdma link add or rxe_cfg\n");
return -EAGAIN;
}
len = sanitize_arg(val, intf, sizeof(intf));
if (!len) {
pr_err("add: invalid interface name\n");
......
......@@ -1056,7 +1056,7 @@ static ssize_t parent_show(struct device *device,
struct rxe_dev *rxe =
rdma_device_to_drv_device(device, struct rxe_dev, ib_dev);
return snprintf(buf, 16, "%s\n", rxe_parent_name(rxe, 1));
return scnprintf(buf, PAGE_SIZE, "%s\n", rxe_parent_name(rxe, 1));
}
static DEVICE_ATTR_RO(parent);
......
......@@ -140,15 +140,15 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
dma_addr = ib_dma_map_single(ib_dev, (void *)rx_desc,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
dma_addr = ib_dma_map_single(ib_dev, rx_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
if (ib_dma_mapping_error(ib_dev, dma_addr))
goto dma_map_fail;
rx_desc->dma_addr = dma_addr;
rx_sg = &rx_desc->rx_sg;
rx_sg->addr = rx_desc->dma_addr;
rx_sg->addr = rx_desc->dma_addr + isert_get_hdr_offset(rx_desc);
rx_sg->length = ISER_RX_PAYLOAD_SIZE;
rx_sg->lkey = device->pd->local_dma_lkey;
rx_desc->rx_cqe.done = isert_recv_done;
......@@ -160,7 +160,7 @@ isert_alloc_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (j = 0; j < i; j++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
isert_conn->rx_descs = NULL;
......@@ -181,7 +181,7 @@ isert_free_rx_descriptors(struct isert_conn *isert_conn)
rx_desc = isert_conn->rx_descs;
for (i = 0; i < ISERT_QP_MAX_RECV_DTOS; i++, rx_desc++) {
ib_dma_unmap_single(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
kfree(isert_conn->rx_descs);
......@@ -299,10 +299,9 @@ isert_free_login_buf(struct isert_conn *isert_conn)
ISER_RX_PAYLOAD_SIZE, DMA_TO_DEVICE);
kfree(isert_conn->login_rsp_buf);
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE,
DMA_FROM_DEVICE);
kfree(isert_conn->login_req_buf);
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
kfree(isert_conn->login_desc);
}
static int
......@@ -311,25 +310,25 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
{
int ret;
isert_conn->login_req_buf = kzalloc(sizeof(*isert_conn->login_req_buf),
isert_conn->login_desc = kzalloc(sizeof(*isert_conn->login_desc),
GFP_KERNEL);
if (!isert_conn->login_req_buf)
if (!isert_conn->login_desc)
return -ENOMEM;
isert_conn->login_req_dma = ib_dma_map_single(ib_dev,
isert_conn->login_req_buf,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_req_dma);
isert_conn->login_desc->dma_addr = ib_dma_map_single(ib_dev,
isert_conn->login_desc->buf,
ISER_RX_SIZE, DMA_FROM_DEVICE);
ret = ib_dma_mapping_error(ib_dev, isert_conn->login_desc->dma_addr);
if (ret) {
isert_err("login_req_dma mapping error: %d\n", ret);
isert_conn->login_req_dma = 0;
goto out_free_login_req_buf;
isert_err("login_desc dma mapping error: %d\n", ret);
isert_conn->login_desc->dma_addr = 0;
goto out_free_login_desc;
}
isert_conn->login_rsp_buf = kzalloc(ISER_RX_PAYLOAD_SIZE, GFP_KERNEL);
if (!isert_conn->login_rsp_buf) {
ret = -ENOMEM;
goto out_unmap_login_req_buf;
goto out_unmap_login_desc;
}
isert_conn->login_rsp_dma = ib_dma_map_single(ib_dev,
......@@ -346,11 +345,11 @@ isert_alloc_login_buf(struct isert_conn *isert_conn,
out_free_login_rsp_buf:
kfree(isert_conn->login_rsp_buf);
out_unmap_login_req_buf:
ib_dma_unmap_single(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
out_free_login_req_buf:
kfree(isert_conn->login_req_buf);
out_unmap_login_desc:
ib_dma_unmap_single(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
out_free_login_desc:
kfree(isert_conn->login_desc);
return ret;
}
......@@ -476,7 +475,7 @@ isert_connect_release(struct isert_conn *isert_conn)
if (isert_conn->qp)
isert_destroy_qp(isert_conn);
if (isert_conn->login_req_buf)
if (isert_conn->login_desc)
isert_free_login_buf(isert_conn);
isert_device_put(device);
......@@ -862,17 +861,18 @@ isert_login_post_recv(struct isert_conn *isert_conn)
int ret;
memset(&sge, 0, sizeof(struct ib_sge));
sge.addr = isert_conn->login_req_dma;
sge.addr = isert_conn->login_desc->dma_addr +
isert_get_hdr_offset(isert_conn->login_desc);
sge.length = ISER_RX_PAYLOAD_SIZE;
sge.lkey = isert_conn->device->pd->local_dma_lkey;
isert_dbg("Setup sge: addr: %llx length: %d 0x%08x\n",
sge.addr, sge.length, sge.lkey);
isert_conn->login_req_buf->rx_cqe.done = isert_login_recv_done;
isert_conn->login_desc->rx_cqe.done = isert_login_recv_done;
memset(&rx_wr, 0, sizeof(struct ib_recv_wr));
rx_wr.wr_cqe = &isert_conn->login_req_buf->rx_cqe;
rx_wr.wr_cqe = &isert_conn->login_desc->rx_cqe;
rx_wr.sg_list = &sge;
rx_wr.num_sge = 1;
......@@ -949,7 +949,7 @@ isert_put_login_tx(struct iscsi_conn *conn, struct iscsi_login *login,
static void
isert_rx_login_req(struct isert_conn *isert_conn)
{
struct iser_rx_desc *rx_desc = isert_conn->login_req_buf;
struct iser_rx_desc *rx_desc = isert_conn->login_desc;
int rx_buflen = isert_conn->login_req_len;
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_login *login = conn->conn_login;
......@@ -961,7 +961,7 @@ isert_rx_login_req(struct isert_conn *isert_conn)
if (login->first_request) {
struct iscsi_login_req *login_req =
(struct iscsi_login_req *)&rx_desc->iscsi_header;
(struct iscsi_login_req *)isert_get_iscsi_hdr(rx_desc);
/*
* Setup the initial iscsi_login values from the leading
* login request PDU.
......@@ -980,13 +980,13 @@ isert_rx_login_req(struct isert_conn *isert_conn)
login->tsih = be16_to_cpu(login_req->tsih);
}
memcpy(&login->req[0], (void *)&rx_desc->iscsi_header, ISCSI_HDR_LEN);
memcpy(&login->req[0], isert_get_iscsi_hdr(rx_desc), ISCSI_HDR_LEN);
size = min(rx_buflen, MAX_KEY_VALUE_PAIRS);
isert_dbg("Using login payload size: %d, rx_buflen: %d "
"MAX_KEY_VALUE_PAIRS: %d\n", size, rx_buflen,
MAX_KEY_VALUE_PAIRS);
memcpy(login->req_buf, &rx_desc->data[0], size);
memcpy(login->req_buf, isert_get_data(rx_desc), size);
if (login->first_request) {
complete(&isert_conn->login_comp);
......@@ -1051,14 +1051,15 @@ isert_handle_scsi_cmd(struct isert_conn *isert_conn,
if (imm_data_len != data_len) {
sg_nents = max(1UL, DIV_ROUND_UP(imm_data_len, PAGE_SIZE));
sg_copy_from_buffer(cmd->se_cmd.t_data_sg, sg_nents,
&rx_desc->data[0], imm_data_len);
isert_get_data(rx_desc), imm_data_len);
isert_dbg("Copy Immediate sg_nents: %u imm_data_len: %d\n",
sg_nents, imm_data_len);
} else {
sg_init_table(&isert_cmd->sg, 1);
cmd->se_cmd.t_data_sg = &isert_cmd->sg;
cmd->se_cmd.t_data_nents = 1;
sg_set_buf(&isert_cmd->sg, &rx_desc->data[0], imm_data_len);
sg_set_buf(&isert_cmd->sg, isert_get_data(rx_desc),
imm_data_len);
isert_dbg("Transfer Immediate imm_data_len: %d\n",
imm_data_len);
}
......@@ -1127,9 +1128,9 @@ isert_handle_iscsi_dataout(struct isert_conn *isert_conn,
}
isert_dbg("Copying DataOut: sg_start: %p, sg_off: %u "
"sg_nents: %u from %p %u\n", sg_start, sg_off,
sg_nents, &rx_desc->data[0], unsol_data_len);
sg_nents, isert_get_data(rx_desc), unsol_data_len);
sg_copy_from_buffer(sg_start, sg_nents, &rx_desc->data[0],
sg_copy_from_buffer(sg_start, sg_nents, isert_get_data(rx_desc),
unsol_data_len);
rc = iscsit_check_dataout_payload(cmd, hdr, false);
......@@ -1188,7 +1189,7 @@ isert_handle_text_cmd(struct isert_conn *isert_conn, struct isert_cmd *isert_cmd
}
cmd->text_in_ptr = text_in;
memcpy(cmd->text_in_ptr, &rx_desc->data[0], payload_length);
memcpy(cmd->text_in_ptr, isert_get_data(rx_desc), payload_length);
return iscsit_process_text_cmd(conn, cmd, hdr);
}
......@@ -1198,7 +1199,7 @@ isert_rx_opcode(struct isert_conn *isert_conn, struct iser_rx_desc *rx_desc,
uint32_t read_stag, uint64_t read_va,
uint32_t write_stag, uint64_t write_va)
{
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iscsi_conn *conn = isert_conn->conn;
struct iscsi_cmd *cmd;
struct isert_cmd *isert_cmd;
......@@ -1296,8 +1297,8 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
struct isert_conn *isert_conn = wc->qp->qp_context;
struct ib_device *ib_dev = isert_conn->cm_id->device;
struct iser_rx_desc *rx_desc = cqe_to_rx_desc(wc->wr_cqe);
struct iscsi_hdr *hdr = &rx_desc->iscsi_header;
struct iser_ctrl *iser_ctrl = &rx_desc->iser_header;
struct iscsi_hdr *hdr = isert_get_iscsi_hdr(rx_desc);
struct iser_ctrl *iser_ctrl = isert_get_iser_hdr(rx_desc);
uint64_t read_va = 0, write_va = 0;
uint32_t read_stag = 0, write_stag = 0;
......@@ -1311,7 +1312,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
rx_desc->in_use = true;
ib_dma_sync_single_for_cpu(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_dbg("DMA: 0x%llx, iSCSI opcode: 0x%02x, ITT: 0x%08x, flags: 0x%02x dlen: %d\n",
rx_desc->dma_addr, hdr->opcode, hdr->itt, hdr->flags,
......@@ -1346,7 +1347,7 @@ isert_recv_done(struct ib_cq *cq, struct ib_wc *wc)
read_stag, read_va, write_stag, write_va);
ib_dma_sync_single_for_device(ib_dev, rx_desc->dma_addr,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
......@@ -1360,8 +1361,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
return;
}
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ib_dma_sync_single_for_cpu(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
isert_conn->login_req_len = wc->byte_len - ISER_HEADERS_LEN;
......@@ -1376,8 +1377,8 @@ isert_login_recv_done(struct ib_cq *cq, struct ib_wc *wc)
complete(&isert_conn->login_req_comp);
mutex_unlock(&isert_conn->mutex);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_req_dma,
ISER_RX_PAYLOAD_SIZE, DMA_FROM_DEVICE);
ib_dma_sync_single_for_device(ib_dev, isert_conn->login_desc->dma_addr,
ISER_RX_SIZE, DMA_FROM_DEVICE);
}
static void
......
......@@ -59,9 +59,11 @@
ISERT_MAX_TX_MISC_PDUS + \
ISERT_MAX_RX_MISC_PDUS)
#define ISER_RX_PAD_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 4096 - \
(ISER_RX_PAYLOAD_SIZE + sizeof(u64) + sizeof(struct ib_sge) + \
sizeof(struct ib_cqe) + sizeof(bool)))
/*
* RX size is default of 8k plus headers, but data needs to align to
* 512 boundary, so use 1024 to have the extra space for alignment.
*/
#define ISER_RX_SIZE (ISCSI_DEF_MAX_RECV_SEG_LEN + 1024)
/* Maximum support is 16MB I/O size */
#define ISCSI_ISER_MAX_SG_TABLESIZE 4096
......@@ -81,21 +83,41 @@ enum iser_conn_state {
};
struct iser_rx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
char data[ISCSI_DEF_MAX_RECV_SEG_LEN];
char buf[ISER_RX_SIZE];
u64 dma_addr;
struct ib_sge rx_sg;
struct ib_cqe rx_cqe;
bool in_use;
char pad[ISER_RX_PAD_SIZE];
} __packed;
};
static inline struct iser_rx_desc *cqe_to_rx_desc(struct ib_cqe *cqe)
{
return container_of(cqe, struct iser_rx_desc, rx_cqe);
}
static void *isert_get_iser_hdr(struct iser_rx_desc *desc)
{
return PTR_ALIGN(desc->buf + ISER_HEADERS_LEN, 512) - ISER_HEADERS_LEN;
}
static size_t isert_get_hdr_offset(struct iser_rx_desc *desc)
{
return isert_get_iser_hdr(desc) - (void *)desc->buf;
}
static void *isert_get_iscsi_hdr(struct iser_rx_desc *desc)
{
return isert_get_iser_hdr(desc) + sizeof(struct iser_ctrl);
}
static void *isert_get_data(struct iser_rx_desc *desc)
{
void *data = isert_get_iser_hdr(desc) + ISER_HEADERS_LEN;
WARN_ON((uintptr_t)data & 511);
return data;
}
struct iser_tx_desc {
struct iser_ctrl iser_header;
struct iscsi_hdr iscsi_header;
......@@ -142,9 +164,8 @@ struct isert_conn {
u32 responder_resources;
u32 initiator_depth;
bool pi_support;
struct iser_rx_desc *login_req_buf;
struct iser_rx_desc *login_desc;
char *login_rsp_buf;
u64 login_req_dma;
int login_req_len;
u64 login_rsp_dma;
struct iser_rx_desc *rx_descs;
......
......@@ -152,13 +152,6 @@ static struct attribute_group rtrs_srv_stats_attr_group = {
.attrs = rtrs_srv_stats_attrs,
};
static void rtrs_srv_dev_release(struct device *dev)
{
struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
kfree(srv);
}
static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
{
struct rtrs_srv *srv = sess->srv;
......@@ -172,7 +165,6 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
goto unlock;
}
srv->dev.class = rtrs_dev_class;
srv->dev.release = rtrs_srv_dev_release;
err = dev_set_name(&srv->dev, "%s", sess->s.sessname);
if (err)
goto unlock;
......@@ -182,16 +174,16 @@ static int rtrs_srv_create_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
* sysfs files are created
*/
dev_set_uevent_suppress(&srv->dev, true);
err = device_register(&srv->dev);
err = device_add(&srv->dev);
if (err) {
pr_err("device_register(): %d\n", err);
pr_err("device_add(): %d\n", err);
goto put;
}
srv->kobj_paths = kobject_create_and_add("paths", &srv->dev.kobj);
if (!srv->kobj_paths) {
err = -ENOMEM;
pr_err("kobject_create_and_add(): %d\n", err);
device_unregister(&srv->dev);
device_del(&srv->dev);
goto unlock;
}
dev_set_uevent_suppress(&srv->dev, false);
......@@ -216,7 +208,7 @@ rtrs_srv_destroy_once_sysfs_root_folders(struct rtrs_srv_sess *sess)
kobject_del(srv->kobj_paths);
kobject_put(srv->kobj_paths);
mutex_unlock(&srv->paths_mutex);
device_unregister(&srv->dev);
device_del(&srv->dev);
} else {
mutex_unlock(&srv->paths_mutex);
}
......
......@@ -1319,6 +1319,13 @@ static int rtrs_srv_get_next_cq_vector(struct rtrs_srv_sess *sess)
return sess->cur_cq_vector;
}
static void rtrs_srv_dev_release(struct device *dev)
{
struct rtrs_srv *srv = container_of(dev, struct rtrs_srv, dev);
kfree(srv);
}
static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
const uuid_t *paths_uuid)
{
......@@ -1336,6 +1343,8 @@ static struct rtrs_srv *__alloc_srv(struct rtrs_srv_ctx *ctx,
uuid_copy(&srv->paths_uuid, paths_uuid);
srv->queue_depth = sess_queue_depth;
srv->ctx = ctx;
device_initialize(&srv->dev);
srv->dev.release = rtrs_srv_dev_release;
srv->chunks = kcalloc(srv->queue_depth, sizeof(*srv->chunks),
GFP_KERNEL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment