Commit a7efd777 authored by Ram Amrani's avatar Ram Amrani Committed by Doug Ledford

qedr: Add support for PD,PKEY and CQ verbs

Add support for protection domain and completion queue verbs.
Signed-off-by: default avatarRajesh Borundia <rajesh.borundia@cavium.com>
Signed-off-by: default avatarRam Amrani <Ram.Amrani@cavium.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent ac1b36e5
...@@ -87,7 +87,14 @@ static int qedr_register_device(struct qedr_dev *dev) ...@@ -87,7 +87,14 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) | dev->ibdev.uverbs_cmd_mask = QEDR_UVERBS(GET_CONTEXT) |
QEDR_UVERBS(QUERY_DEVICE) | QEDR_UVERBS(QUERY_DEVICE) |
QEDR_UVERBS(QUERY_PORT); QEDR_UVERBS(QUERY_PORT) |
QEDR_UVERBS(ALLOC_PD) |
QEDR_UVERBS(DEALLOC_PD) |
QEDR_UVERBS(CREATE_COMP_CHANNEL) |
QEDR_UVERBS(CREATE_CQ) |
QEDR_UVERBS(RESIZE_CQ) |
QEDR_UVERBS(DESTROY_CQ) |
QEDR_UVERBS(REQ_NOTIFY_CQ);
dev->ibdev.phys_port_cnt = 1; dev->ibdev.phys_port_cnt = 1;
dev->ibdev.num_comp_vectors = dev->num_cnq; dev->ibdev.num_comp_vectors = dev->num_cnq;
...@@ -105,6 +112,16 @@ static int qedr_register_device(struct qedr_dev *dev) ...@@ -105,6 +112,16 @@ static int qedr_register_device(struct qedr_dev *dev)
dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext; dev->ibdev.dealloc_ucontext = qedr_dealloc_ucontext;
dev->ibdev.mmap = qedr_mmap; dev->ibdev.mmap = qedr_mmap;
dev->ibdev.alloc_pd = qedr_alloc_pd;
dev->ibdev.dealloc_pd = qedr_dealloc_pd;
dev->ibdev.create_cq = qedr_create_cq;
dev->ibdev.destroy_cq = qedr_destroy_cq;
dev->ibdev.resize_cq = qedr_resize_cq;
dev->ibdev.req_notify_cq = qedr_arm_cq;
dev->ibdev.query_pkey = qedr_query_pkey;
dev->ibdev.dma_device = &dev->pdev->dev; dev->ibdev.dma_device = &dev->pdev->dev;
dev->ibdev.get_link_layer = qedr_link_layer; dev->ibdev.get_link_layer = qedr_link_layer;
...@@ -322,6 +339,8 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) ...@@ -322,6 +339,8 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
{ {
u16 hw_comp_cons, sw_comp_cons; u16 hw_comp_cons, sw_comp_cons;
struct qedr_cnq *cnq = handle; struct qedr_cnq *cnq = handle;
struct regpair *cq_handle;
struct qedr_cq *cq;
qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0); qed_sb_ack(cnq->sb, IGU_INT_DISABLE, 0);
...@@ -334,8 +353,36 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle) ...@@ -334,8 +353,36 @@ static irqreturn_t qedr_irq_handler(int irq, void *handle)
rmb(); rmb();
while (sw_comp_cons != hw_comp_cons) { while (sw_comp_cons != hw_comp_cons) {
cq_handle = (struct regpair *)qed_chain_consume(&cnq->pbl);
cq = (struct qedr_cq *)(uintptr_t)HILO_U64(cq_handle->hi,
cq_handle->lo);
if (cq == NULL) {
DP_ERR(cnq->dev,
"Received NULL CQ cq_handle->hi=%d cq_handle->lo=%d sw_comp_cons=%d hw_comp_cons=%d\n",
cq_handle->hi, cq_handle->lo, sw_comp_cons,
hw_comp_cons);
break;
}
if (cq->sig != QEDR_CQ_MAGIC_NUMBER) {
DP_ERR(cnq->dev,
"Problem with cq signature, cq_handle->hi=%d ch_handle->lo=%d cq=%p\n",
cq_handle->hi, cq_handle->lo, cq);
break;
}
cq->arm_flags = 0;
if (cq->ibcq.comp_handler)
(*cq->ibcq.comp_handler)
(&cq->ibcq, cq->ibcq.cq_context);
sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl); sw_comp_cons = qed_chain_get_cons_idx(&cnq->pbl);
cnq->n_comp++; cnq->n_comp++;
} }
qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index, qed_ops->rdma_cnq_prod_update(cnq->dev->rdma_ctx, cnq->index,
......
...@@ -50,6 +50,10 @@ ...@@ -50,6 +50,10 @@
#define QEDR_MSG_INIT "INIT" #define QEDR_MSG_INIT "INIT"
#define QEDR_MSG_MISC "MISC" #define QEDR_MSG_MISC "MISC"
#define QEDR_MSG_CQ " CQ"
#define QEDR_MSG_MR " MR"
#define QEDR_CQ_MAGIC_NUMBER (0x11223344)
struct qedr_dev; struct qedr_dev;
...@@ -181,6 +185,12 @@ struct qedr_dev { ...@@ -181,6 +185,12 @@ struct qedr_dev {
#define QEDR_ROCE_PKEY_TABLE_LEN 1 #define QEDR_ROCE_PKEY_TABLE_LEN 1
#define QEDR_ROCE_PKEY_DEFAULT 0xffff #define QEDR_ROCE_PKEY_DEFAULT 0xffff
struct qedr_pbl {
struct list_head list_entry;
void *va;
dma_addr_t pa;
};
struct qedr_ucontext { struct qedr_ucontext {
struct ib_ucontext ibucontext; struct ib_ucontext ibucontext;
struct qedr_dev *dev; struct qedr_dev *dev;
...@@ -196,6 +206,64 @@ struct qedr_ucontext { ...@@ -196,6 +206,64 @@ struct qedr_ucontext {
struct mutex mm_list_lock; struct mutex mm_list_lock;
}; };
union db_prod64 {
struct rdma_pwm_val32_data data;
u64 raw;
};
enum qedr_cq_type {
QEDR_CQ_TYPE_GSI,
QEDR_CQ_TYPE_KERNEL,
QEDR_CQ_TYPE_USER,
};
struct qedr_pbl_info {
u32 num_pbls;
u32 num_pbes;
u32 pbl_size;
u32 pbe_size;
bool two_layered;
};
struct qedr_userq {
struct ib_umem *umem;
struct qedr_pbl_info pbl_info;
struct qedr_pbl *pbl_tbl;
u64 buf_addr;
size_t buf_len;
};
struct qedr_cq {
struct ib_cq ibcq;
enum qedr_cq_type cq_type;
u32 sig;
u16 icid;
/* Lock to protect multiplem CQ's */
spinlock_t cq_lock;
u8 arm_flags;
struct qed_chain pbl;
void __iomem *db_addr;
union db_prod64 db;
u8 pbl_toggle;
union rdma_cqe *latest_cqe;
union rdma_cqe *toggle_cqe;
u32 cq_cons;
struct qedr_userq q;
};
struct qedr_pd {
struct ib_pd ibpd;
u32 pd_id;
struct qedr_ucontext *uctx;
};
struct qedr_mm { struct qedr_mm {
struct { struct {
u64 phy_addr; u64 phy_addr;
...@@ -215,4 +283,14 @@ static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev) ...@@ -215,4 +283,14 @@ static inline struct qedr_dev *get_qedr_dev(struct ib_device *ibdev)
return container_of(ibdev, struct qedr_dev, ibdev); return container_of(ibdev, struct qedr_dev, ibdev);
} }
static inline struct qedr_pd *get_qedr_pd(struct ib_pd *ibpd)
{
return container_of(ibpd, struct qedr_pd, ibpd);
}
static inline struct qedr_cq *get_qedr_cq(struct ib_cq *ibcq)
{
return container_of(ibcq, struct qedr_cq, ibcq);
}
#endif #endif
...@@ -47,6 +47,19 @@ struct rdma_cqe_responder { ...@@ -47,6 +47,19 @@ struct rdma_cqe_responder {
__le32 imm_data_hi; __le32 imm_data_hi;
__le16 rq_cons; __le16 rq_cons;
u8 flags; u8 flags;
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_RESPONDER_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_RESPONDER_TYPE_MASK 0x3
#define RDMA_CQE_RESPONDER_TYPE_SHIFT 1
#define RDMA_CQE_RESPONDER_INV_FLG_MASK 0x1
#define RDMA_CQE_RESPONDER_INV_FLG_SHIFT 3
#define RDMA_CQE_RESPONDER_IMM_FLG_MASK 0x1
#define RDMA_CQE_RESPONDER_IMM_FLG_SHIFT 4
#define RDMA_CQE_RESPONDER_RDMA_FLG_MASK 0x1
#define RDMA_CQE_RESPONDER_RDMA_FLG_SHIFT 5
#define RDMA_CQE_RESPONDER_RESERVED2_MASK 0x3
#define RDMA_CQE_RESPONDER_RESERVED2_SHIFT 6
u8 status;
}; };
struct rdma_cqe_requester { struct rdma_cqe_requester {
...@@ -58,6 +71,12 @@ struct rdma_cqe_requester { ...@@ -58,6 +71,12 @@ struct rdma_cqe_requester {
__le32 reserved3; __le32 reserved3;
__le16 reserved4; __le16 reserved4;
u8 flags; u8 flags;
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_REQUESTER_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_REQUESTER_TYPE_MASK 0x3
#define RDMA_CQE_REQUESTER_TYPE_SHIFT 1
#define RDMA_CQE_REQUESTER_RESERVED5_MASK 0x1F
#define RDMA_CQE_REQUESTER_RESERVED5_SHIFT 3
u8 status; u8 status;
}; };
...@@ -66,6 +85,12 @@ struct rdma_cqe_common { ...@@ -66,6 +85,12 @@ struct rdma_cqe_common {
struct regpair qp_handle; struct regpair qp_handle;
__le16 reserved1[7]; __le16 reserved1[7];
u8 flags; u8 flags;
#define RDMA_CQE_COMMON_TOGGLE_BIT_MASK 0x1
#define RDMA_CQE_COMMON_TOGGLE_BIT_SHIFT 0
#define RDMA_CQE_COMMON_TYPE_MASK 0x3
#define RDMA_CQE_COMMON_TYPE_SHIFT 1
#define RDMA_CQE_COMMON_RESERVED2_MASK 0x1F
#define RDMA_CQE_COMMON_RESERVED2_SHIFT 3
u8 status; u8 status;
}; };
...@@ -76,6 +101,45 @@ union rdma_cqe { ...@@ -76,6 +101,45 @@ union rdma_cqe {
struct rdma_cqe_common cmn; struct rdma_cqe_common cmn;
}; };
/* * CQE requester status enumeration */
enum rdma_cqe_requester_status_enum {
RDMA_CQE_REQ_STS_OK,
RDMA_CQE_REQ_STS_BAD_RESPONSE_ERR,
RDMA_CQE_REQ_STS_LOCAL_LENGTH_ERR,
RDMA_CQE_REQ_STS_LOCAL_QP_OPERATION_ERR,
RDMA_CQE_REQ_STS_LOCAL_PROTECTION_ERR,
RDMA_CQE_REQ_STS_MEMORY_MGT_OPERATION_ERR,
RDMA_CQE_REQ_STS_REMOTE_INVALID_REQUEST_ERR,
RDMA_CQE_REQ_STS_REMOTE_ACCESS_ERR,
RDMA_CQE_REQ_STS_REMOTE_OPERATION_ERR,
RDMA_CQE_REQ_STS_RNR_NAK_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_TRANSPORT_RETRY_CNT_ERR,
RDMA_CQE_REQ_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_RDMA_CQE_REQUESTER_STATUS_ENUM
};
/* CQE responder status enumeration */
enum rdma_cqe_responder_status_enum {
RDMA_CQE_RESP_STS_OK,
RDMA_CQE_RESP_STS_LOCAL_ACCESS_ERR,
RDMA_CQE_RESP_STS_LOCAL_LENGTH_ERR,
RDMA_CQE_RESP_STS_LOCAL_QP_OPERATION_ERR,
RDMA_CQE_RESP_STS_LOCAL_PROTECTION_ERR,
RDMA_CQE_RESP_STS_MEMORY_MGT_OPERATION_ERR,
RDMA_CQE_RESP_STS_REMOTE_INVALID_REQUEST_ERR,
RDMA_CQE_RESP_STS_WORK_REQUEST_FLUSHED_ERR,
MAX_RDMA_CQE_RESPONDER_STATUS_ENUM
};
/* CQE type enumeration */
enum rdma_cqe_type {
RDMA_CQE_TYPE_REQUESTER,
RDMA_CQE_TYPE_RESPONDER_RQ,
RDMA_CQE_TYPE_RESPONDER_SRQ,
RDMA_CQE_TYPE_INVALID,
MAX_RDMA_CQE_TYPE
};
struct rdma_sq_sge { struct rdma_sq_sge {
__le32 length; __le32 length;
struct regpair addr; struct regpair addr;
...@@ -93,4 +157,19 @@ struct rdma_srq_sge { ...@@ -93,4 +157,19 @@ struct rdma_srq_sge {
__le32 length; __le32 length;
__le32 l_key; __le32 l_key;
}; };
/* Rdma doorbell data for CQ */
struct rdma_pwm_val32_data {
__le16 icid;
u8 agg_flags;
u8 params;
#define RDMA_PWM_VAL32_DATA_AGG_CMD_MASK 0x3
#define RDMA_PWM_VAL32_DATA_AGG_CMD_SHIFT 0
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_MASK 0x1
#define RDMA_PWM_VAL32_DATA_BYPASS_EN_SHIFT 2
#define RDMA_PWM_VAL32_DATA_RESERVED_MASK 0x1F
#define RDMA_PWM_VAL32_DATA_RESERVED_SHIFT 3
__le32 value;
};
#endif /* __QED_HSI_RDMA__ */ #endif /* __QED_HSI_RDMA__ */
This diff is collapsed.
...@@ -40,6 +40,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask, ...@@ -40,6 +40,8 @@ int qedr_modify_port(struct ib_device *, u8 port, int mask,
int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid); int qedr_query_gid(struct ib_device *, u8 port, int index, union ib_gid *gid);
int qedr_query_pkey(struct ib_device *, u8 port, u16 index, u16 *pkey);
struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *); struct ib_ucontext *qedr_alloc_ucontext(struct ib_device *, struct ib_udata *);
int qedr_dealloc_ucontext(struct ib_ucontext *); int qedr_dealloc_ucontext(struct ib_ucontext *);
...@@ -49,4 +51,16 @@ int qedr_del_gid(struct ib_device *device, u8 port_num, ...@@ -49,4 +51,16 @@ int qedr_del_gid(struct ib_device *device, u8 port_num,
int qedr_add_gid(struct ib_device *device, u8 port_num, int qedr_add_gid(struct ib_device *device, u8 port_num,
unsigned int index, const union ib_gid *gid, unsigned int index, const union ib_gid *gid,
const struct ib_gid_attr *attr, void **context); const struct ib_gid_attr *attr, void **context);
struct ib_pd *qedr_alloc_pd(struct ib_device *,
struct ib_ucontext *, struct ib_udata *);
int qedr_dealloc_pd(struct ib_pd *pd);
struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
const struct ib_cq_init_attr *attr,
struct ib_ucontext *ib_ctx,
struct ib_udata *udata);
int qedr_resize_cq(struct ib_cq *, int cqe, struct ib_udata *);
int qedr_destroy_cq(struct ib_cq *);
int qedr_arm_cq(struct ib_cq *ibcq, enum ib_cq_notify_flags flags);
#endif #endif
...@@ -50,4 +50,23 @@ struct qedr_alloc_ucontext_resp { ...@@ -50,4 +50,23 @@ struct qedr_alloc_ucontext_resp {
__u32 sges_per_srq_wr; __u32 sges_per_srq_wr;
__u32 max_cqes; __u32 max_cqes;
}; };
struct qedr_alloc_pd_ureq {
__u64 rsvd1;
};
struct qedr_alloc_pd_uresp {
__u32 pd_id;
};
struct qedr_create_cq_ureq {
__u64 addr;
__u64 len;
};
struct qedr_create_cq_uresp {
__u32 db_offset;
__u16 icid;
};
#endif /* __QEDR_USER_H__ */ #endif /* __QEDR_USER_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment