Commit 93dda1e0 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-RDMA-and-infrastructure-for-iWARP'

Yuval Mintz says:

====================
qed*: RDMA and infrastructure for iWARP

This series focuses on RDMA in general with emphasis on required changes
toward adding iWARP support. The vast majority of the changes introduced
are in qed/qede, with a couple of small changes to qedr
[mentioned below].

The infrastructure changes:
 - Patch #1 adds the ability to pass PBL memory externally for a newly
created chain.
 - Patches #4, #5 rename qede_roce.[ch] into qede_rdma.[ch] + change
prefixes from _roce_ to _rdma_, as the API between qede and qedr is
agnostic to the variant of the RDMA protocol used. These patches also
touch qedr [basically to align it with the renaming, nothing more].
 - Patch #7 replaces the current SPQ async mechanism into serving
registered callbacks [before adding iWARP which would add another client
in need of this sort of functionallity].

The non-infrastrucutre changes:
 - Patches #2, #3 contain DCB-related changes to better align RDMA with
configured DCB.
 - Patch #6 contains a minor [mostly theoretical fix] to release flow.

Changes from previous versions
------------------------------
 - V4: This is actually a repost of V3 due to some confusion regarding
   the sent cover-letter
 - V3: Add commit log message in #4 indicating change in header inclusion
 - V2: Add several inclusion into qede_rdma.h to have proper declarations
   of all variable types used in it
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 8917a777 6c9e80ea
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <linux/iommu.h> #include <linux/iommu.h>
#include <linux/pci.h> #include <linux/pci.h>
#include <net/addrconf.h> #include <net/addrconf.h>
#include <linux/qed/qede_roce.h>
#include <linux/qed/qed_chain.h> #include <linux/qed/qed_chain.h>
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include "qedr.h" #include "qedr.h"
...@@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev) ...@@ -276,7 +276,7 @@ static int qedr_alloc_resources(struct qedr_dev *dev)
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
n_entries, n_entries,
sizeof(struct regpair *), sizeof(struct regpair *),
&cnq->pbl); &cnq->pbl, NULL);
if (rc) if (rc)
goto err4; goto err4;
...@@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev) ...@@ -902,7 +902,7 @@ static void qedr_mac_address_change(struct qedr_dev *dev)
* initialization done before RoCE driver notifies * initialization done before RoCE driver notifies
* event to stack. * event to stack.
*/ */
static void qedr_notify(struct qedr_dev *dev, enum qede_roce_event event) static void qedr_notify(struct qedr_dev *dev, enum qede_rdma_event event)
{ {
switch (event) { switch (event) {
case QEDE_UP: case QEDE_UP:
...@@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = { ...@@ -931,12 +931,12 @@ static struct qedr_driver qedr_drv = {
static int __init qedr_init_module(void) static int __init qedr_init_module(void)
{ {
return qede_roce_register_driver(&qedr_drv); return qede_rdma_register_driver(&qedr_drv);
} }
static void __exit qedr_exit_module(void) static void __exit qedr_exit_module(void)
{ {
qede_roce_unregister_driver(&qedr_drv); qede_rdma_unregister_driver(&qedr_drv);
} }
module_init(qedr_init_module); module_init(qedr_init_module);
......
...@@ -37,7 +37,7 @@ ...@@ -37,7 +37,7 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_chain.h> #include <linux/qed/qed_chain.h>
#include <linux/qed/qed_roce_if.h> #include <linux/qed/qed_roce_if.h>
#include <linux/qed/qede_roce.h> #include <linux/qed/qede_rdma.h>
#include <linux/qed/roce_common.h> #include <linux/qed/roce_common.h>
#include "qedr_hsi_rdma.h" #include "qedr_hsi_rdma.h"
......
...@@ -925,7 +925,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev, ...@@ -925,7 +925,7 @@ struct ib_cq *qedr_create_cq(struct ib_device *ibdev,
QED_CHAIN_CNT_TYPE_U32, QED_CHAIN_CNT_TYPE_U32,
chain_entries, chain_entries,
sizeof(union rdma_cqe), sizeof(union rdma_cqe),
&cq->pbl); &cq->pbl, NULL);
if (rc) if (rc)
goto err1; goto err1;
...@@ -1413,7 +1413,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, ...@@ -1413,7 +1413,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32, QED_CHAIN_CNT_TYPE_U32,
n_sq_elems, n_sq_elems,
QEDR_SQE_ELEMENT_SIZE, QEDR_SQE_ELEMENT_SIZE,
&qp->sq.pbl); &qp->sq.pbl, NULL);
if (rc) if (rc)
return rc; return rc;
...@@ -1427,7 +1427,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev, ...@@ -1427,7 +1427,7 @@ qedr_roce_create_kernel_qp(struct qedr_dev *dev,
QED_CHAIN_CNT_TYPE_U32, QED_CHAIN_CNT_TYPE_U32,
n_rq_elems, n_rq_elems,
QEDR_RQE_ELEMENT_SIZE, QEDR_RQE_ELEMENT_SIZE,
&qp->rq.pbl); &qp->rq.pbl, NULL);
if (rc) if (rc)
return rc; return rc;
......
...@@ -44,6 +44,7 @@ ...@@ -44,6 +44,7 @@
#include "qed_hsi.h" #include "qed_hsi.h"
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_roce.h"
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
#include <linux/qed/qed_eth_if.h> #include <linux/qed/qed_eth_if.h>
#endif #endif
...@@ -892,10 +893,33 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn, ...@@ -892,10 +893,33 @@ qed_dcbx_mib_update_event(struct qed_hwfn *p_hwfn,
/* update storm FW with negotiation results */ /* update storm FW with negotiation results */
qed_sp_pf_update(p_hwfn); qed_sp_pf_update(p_hwfn);
/* for roce PFs, we may want to enable/disable DPM
* when DCBx change occurs
*/
if (p_hwfn->hw_info.personality ==
QED_PCI_ETH_ROCE)
qed_roce_dpm_dcbx(p_hwfn, p_ptt);
} }
} }
qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type); qed_dcbx_get_params(p_hwfn, &p_hwfn->p_dcbx_info->get, type);
if (type == QED_DCBX_OPERATIONAL_MIB) {
struct qed_dcbx_results *p_data;
u16 val;
/* Configure in NIG which protocols support EDPM and should
* honor PFC.
*/
p_data = &p_hwfn->p_dcbx_info->results;
val = (0x1 << p_data->arr[DCBX_PROTOCOL_ROCE].tc) |
(0x1 << p_data->arr[DCBX_PROTOCOL_ROCE_V2].tc);
val <<= NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT;
val |= NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN;
qed_wr(p_hwfn, p_ptt, NIG_REG_TX_EDPM_CTRL, val);
}
qed_dcbx_aen(p_hwfn, type); qed_dcbx_aen(p_hwfn, type);
return rc; return rc;
......
...@@ -3075,12 +3075,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) ...@@ -3075,12 +3075,15 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
} }
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
dma_free_coherent(&cdev->pdev->dev,
pbl_size, if (!p_chain->b_external_pbl)
p_chain->pbl_sp.p_virt_table, dma_free_coherent(&cdev->pdev->dev,
p_chain->pbl_sp.p_phys_table); pbl_size,
p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
out: out:
vfree(p_chain->pbl.pp_virt_addr_tbl); vfree(p_chain->pbl.pp_virt_addr_tbl);
p_chain->pbl.pp_virt_addr_tbl = NULL;
} }
void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain) void qed_chain_free(struct qed_dev *cdev, struct qed_chain *p_chain)
...@@ -3174,7 +3177,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain) ...@@ -3174,7 +3177,10 @@ qed_chain_alloc_single(struct qed_dev *cdev, struct qed_chain *p_chain)
return 0; return 0;
} }
static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) static int
qed_chain_alloc_pbl(struct qed_dev *cdev,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{ {
u32 page_cnt = p_chain->page_cnt, size, i; u32 page_cnt = p_chain->page_cnt, size, i;
dma_addr_t p_phys = 0, p_pbl_phys = 0; dma_addr_t p_phys = 0, p_pbl_phys = 0;
...@@ -3194,8 +3200,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) ...@@ -3194,8 +3200,16 @@ static int qed_chain_alloc_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
* should be saved to allow its freeing during the error flow. * should be saved to allow its freeing during the error flow.
*/ */
size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
size, &p_pbl_phys, GFP_KERNEL); if (!ext_pbl) {
p_pbl_virt = dma_alloc_coherent(&cdev->pdev->dev,
size, &p_pbl_phys, GFP_KERNEL);
} else {
p_pbl_virt = ext_pbl->p_pbl_virt;
p_pbl_phys = ext_pbl->p_pbl_phys;
p_chain->b_external_pbl = true;
}
qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys, qed_chain_init_pbl_mem(p_chain, p_pbl_virt, p_pbl_phys,
pp_virt_addr_tbl); pp_virt_addr_tbl);
if (!p_pbl_virt) if (!p_pbl_virt)
...@@ -3228,7 +3242,10 @@ int qed_chain_alloc(struct qed_dev *cdev, ...@@ -3228,7 +3242,10 @@ int qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use, enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode, enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type, enum qed_chain_cnt_type cnt_type,
u32 num_elems, size_t elem_size, struct qed_chain *p_chain) u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl)
{ {
u32 page_cnt; u32 page_cnt;
int rc = 0; int rc = 0;
...@@ -3259,7 +3276,7 @@ int qed_chain_alloc(struct qed_dev *cdev, ...@@ -3259,7 +3276,7 @@ int qed_chain_alloc(struct qed_dev *cdev,
rc = qed_chain_alloc_single(cdev, p_chain); rc = qed_chain_alloc_single(cdev, p_chain);
break; break;
case QED_CHAIN_MODE_PBL: case QED_CHAIN_MODE_PBL:
rc = qed_chain_alloc_pbl(cdev, p_chain); rc = qed_chain_alloc_pbl(cdev, p_chain, ext_pbl);
break; break;
} }
if (rc) if (rc)
......
...@@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn, ...@@ -307,6 +307,7 @@ int qed_dmae_host2host(struct qed_hwfn *p_hwfn,
* @param num_elems * @param num_elems
* @param elem_size * @param elem_size
* @param p_chain * @param p_chain
* @param ext_pbl - a possible external PBL
* *
* @return int * @return int
*/ */
...@@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev, ...@@ -315,7 +316,9 @@ qed_chain_alloc(struct qed_dev *cdev,
enum qed_chain_use_mode intended_use, enum qed_chain_use_mode intended_use,
enum qed_chain_mode mode, enum qed_chain_mode mode,
enum qed_chain_cnt_type cnt_type, enum qed_chain_cnt_type cnt_type,
u32 num_elems, size_t elem_size, struct qed_chain *p_chain); u32 num_elems,
size_t elem_size,
struct qed_chain *p_chain, struct qed_chain_ext_pbl *ext_pbl);
/** /**
* @brief qed_chain_free - Free chain DMA memory * @brief qed_chain_free - Free chain DMA memory
......
...@@ -62,6 +62,22 @@ ...@@ -62,6 +62,22 @@
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_reg_addr.h" #include "qed_reg_addr.h"
static int
qed_iscsi_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{
if (p_hwfn->p_iscsi_info->event_cb) {
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info;
return p_iscsi->event_cb(p_iscsi->event_context,
fw_event_code, data);
} else {
DP_NOTICE(p_hwfn, "iSCSI async completion is not set\n");
return -EINVAL;
}
}
struct qed_iscsi_conn { struct qed_iscsi_conn {
struct list_head list_entry; struct list_head list_entry;
bool free_on_delete; bool free_on_delete;
...@@ -265,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn, ...@@ -265,6 +281,9 @@ qed_sp_iscsi_func_start(struct qed_hwfn *p_hwfn,
p_hwfn->p_iscsi_info->event_context = event_context; p_hwfn->p_iscsi_info->event_context = event_context;
p_hwfn->p_iscsi_info->event_cb = async_event_cb; p_hwfn->p_iscsi_info->event_cb = async_event_cb;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ISCSI,
qed_iscsi_async_event);
return qed_spq_post(p_hwfn, p_ent, NULL); return qed_spq_post(p_hwfn, p_ent, NULL);
} }
...@@ -631,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn, ...@@ -631,7 +650,10 @@ static int qed_sp_iscsi_func_stop(struct qed_hwfn *p_hwfn,
p_ramrod = &p_ent->ramrod.iscsi_destroy; p_ramrod = &p_ent->ramrod.iscsi_destroy;
p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC; p_ramrod->hdr.op_code = ISCSI_RAMROD_CMD_ID_DESTROY_FUNC;
return qed_spq_post(p_hwfn, p_ent, NULL); rc = qed_spq_post(p_hwfn, p_ent, NULL);
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ISCSI);
return rc;
} }
static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid) static void __iomem *qed_iscsi_get_db_addr(struct qed_hwfn *p_hwfn, u32 cid)
...@@ -752,7 +774,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, ...@@ -752,7 +774,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_USE_TO_CONSUME_PRODUCE, QED_CHAIN_USE_TO_CONSUME_PRODUCE,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
r2tq_num_elements, 0x80, &p_conn->r2tq); r2tq_num_elements, 0x80, &p_conn->r2tq, NULL);
if (rc) if (rc)
goto nomem_r2tq; goto nomem_r2tq;
...@@ -763,7 +785,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, ...@@ -763,7 +785,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
uhq_num_elements, uhq_num_elements,
sizeof(struct iscsi_uhqe), &p_conn->uhq); sizeof(struct iscsi_uhqe), &p_conn->uhq, NULL);
if (rc) if (rc)
goto nomem_uhq; goto nomem_uhq;
...@@ -773,7 +795,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn, ...@@ -773,7 +795,7 @@ static int qed_iscsi_allocate_connection(struct qed_hwfn *p_hwfn,
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
xhq_num_elements, xhq_num_elements,
sizeof(struct iscsi_xhqe), &p_conn->xhq); sizeof(struct iscsi_xhqe), &p_conn->xhq, NULL);
if (rc) if (rc)
goto nomem; goto nomem;
......
...@@ -1056,7 +1056,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, ...@@ -1056,7 +1056,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc, p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_bd), sizeof(struct core_rx_bd),
&p_ll2_info->rx_queue.rxq_chain); &p_ll2_info->rx_queue.rxq_chain, NULL);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n"); DP_NOTICE(p_hwfn, "Failed to allocate ll2 rxq chain\n");
goto out; goto out;
...@@ -1078,7 +1078,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn, ...@@ -1078,7 +1078,7 @@ qed_ll2_acquire_connection_rx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.rx_num_desc, p_ll2_info->input.rx_num_desc,
sizeof(struct core_rx_fast_path_cqe), sizeof(struct core_rx_fast_path_cqe),
&p_ll2_info->rx_queue.rcq_chain); &p_ll2_info->rx_queue.rcq_chain, NULL);
if (rc) { if (rc) {
DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n"); DP_NOTICE(p_hwfn, "Failed to allocate ll2 rcq chain\n");
goto out; goto out;
...@@ -1108,7 +1108,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn, ...@@ -1108,7 +1108,7 @@ static int qed_ll2_acquire_connection_tx(struct qed_hwfn *p_hwfn,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
p_ll2_info->input.tx_num_desc, p_ll2_info->input.tx_num_desc,
sizeof(struct core_tx_bd), sizeof(struct core_tx_bd),
&p_ll2_info->tx_queue.txq_chain); &p_ll2_info->tx_queue.txq_chain, NULL);
if (rc) if (rc)
goto out; goto out;
......
...@@ -1564,6 +1564,12 @@ ...@@ -1564,6 +1564,12 @@
#define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL #define NIG_REG_TSGEN_FREECNT_UPDATE_K2 0x509008UL
#define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL #define CNIG_REG_NIG_PORT0_CONF_K2 0x218200UL
#define NIG_REG_TX_EDPM_CTRL 0x501f0cUL
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN (0x1 << 0)
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_EN_SHIFT 0
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN (0xff << 1)
#define NIG_REG_TX_EDPM_CTRL_TX_EDPM_TC_EN_SHIFT 1
#define PRS_REG_SEARCH_GFT 0x1f11bcUL #define PRS_REG_SEARCH_GFT 0x1f11bcUL
#define PRS_REG_CM_HDR_GFT 0x1f11c8UL #define PRS_REG_CM_HDR_GFT 0x1f11c8UL
#define PRS_REG_GFT_CAM 0x1f1100UL #define PRS_REG_GFT_CAM 0x1f1100UL
......
...@@ -68,12 +68,14 @@ ...@@ -68,12 +68,14 @@
static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid); static void qed_roce_free_real_icid(struct qed_hwfn *p_hwfn, u16 icid);
void qed_roce_async_event(struct qed_hwfn *p_hwfn, static int
u8 fw_event_code, union rdma_eqe_data *rdma_data) qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code,
u16 echo, union event_ring_data *data, u8 fw_return_code)
{ {
if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) { if (fw_event_code == ROCE_ASYNC_EVENT_DESTROY_QP_DONE) {
u16 icid = u16 icid =
(u16)le32_to_cpu(rdma_data->rdma_destroy_qp_data.cid); (u16)le32_to_cpu(data->rdma_data.rdma_destroy_qp_data.cid);
/* icid release in this async event can occur only if the icid /* icid release in this async event can occur only if the icid
* was offloaded to the FW. In case it wasn't offloaded this is * was offloaded to the FW. In case it wasn't offloaded this is
...@@ -85,8 +87,10 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn, ...@@ -85,8 +87,10 @@ void qed_roce_async_event(struct qed_hwfn *p_hwfn,
events->affiliated_event(p_hwfn->p_rdma_info->events.context, events->affiliated_event(p_hwfn->p_rdma_info->events.context,
fw_event_code, fw_event_code,
&rdma_data->async_handle); (void *)&data->rdma_data.async_handle);
} }
return 0;
} }
static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn, static int qed_rdma_bmap_alloc(struct qed_hwfn *p_hwfn,
...@@ -162,6 +166,11 @@ static int qed_bmap_test_id(struct qed_hwfn *p_hwfn, ...@@ -162,6 +166,11 @@ static int qed_bmap_test_id(struct qed_hwfn *p_hwfn,
return test_bit(id_num, bmap->bitmap); return test_bit(id_num, bmap->bitmap);
} }
static bool qed_bmap_is_empty(struct qed_bmap *bmap)
{
return bmap->max_count == find_first_bit(bmap->bitmap, bmap->max_count);
}
static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id) static u32 qed_rdma_get_sb_id(void *p_hwfn, u32 rel_sb_id)
{ {
/* First sb id for RoCE is after all the l2 sb */ /* First sb id for RoCE is after all the l2 sb */
...@@ -367,22 +376,7 @@ static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn, ...@@ -367,22 +376,7 @@ static void qed_rdma_bmap_free(struct qed_hwfn *p_hwfn,
static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn) static void qed_rdma_resc_free(struct qed_hwfn *p_hwfn)
{ {
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info; struct qed_rdma_info *p_rdma_info = p_hwfn->p_rdma_info;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
* the synchronous part. The asynchronous part may take a little longer.
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
break;
}
}
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->cid_map, 1);
qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1); qed_rdma_bmap_free(p_hwfn, &p_hwfn->p_rdma_info->pd_map, 1);
...@@ -696,9 +690,32 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn, ...@@ -696,9 +690,32 @@ static int qed_rdma_setup(struct qed_hwfn *p_hwfn,
if (rc) if (rc)
return rc; return rc;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_ROCE,
qed_roce_async_event);
return qed_rdma_start_fw(p_hwfn, params, p_ptt); return qed_rdma_start_fw(p_hwfn, params, p_ptt);
} }
void qed_roce_stop(struct qed_hwfn *p_hwfn)
{
struct qed_bmap *rcid_map = &p_hwfn->p_rdma_info->real_cid_map;
int wait_count = 0;
/* when destroying a_RoCE QP the control is returned to the user after
* the synchronous part. The asynchronous part may take a little longer.
* We delay for a short while if an async destroy QP is still expected.
* Beyond the added delay we clear the bitmap anyway.
*/
while (bitmap_weight(rcid_map->bitmap, rcid_map->max_count)) {
msleep(100);
if (wait_count++ > 20) {
DP_NOTICE(p_hwfn, "cid bitmap wait timed out\n");
break;
}
}
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_ROCE);
}
static int qed_rdma_stop(void *rdma_cxt) static int qed_rdma_stop(void *rdma_cxt)
{ {
struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt; struct qed_hwfn *p_hwfn = (struct qed_hwfn *)rdma_cxt;
...@@ -728,6 +745,7 @@ static int qed_rdma_stop(void *rdma_cxt) ...@@ -728,6 +745,7 @@ static int qed_rdma_stop(void *rdma_cxt)
qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN, qed_wr(p_hwfn, p_ptt, PRS_REG_LIGHT_L2_ETHERTYPE_EN,
(ll2_ethertype_en & 0xFFFE)); (ll2_ethertype_en & 0xFFFE));
qed_roce_stop(p_hwfn);
qed_ptt_release(p_hwfn, p_ptt); qed_ptt_release(p_hwfn, p_ptt);
/* Get SPQ entry */ /* Get SPQ entry */
...@@ -2638,6 +2656,23 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev) ...@@ -2638,6 +2656,23 @@ static void *qed_rdma_get_rdma_ctx(struct qed_dev *cdev)
return QED_LEADING_HWFN(cdev); return QED_LEADING_HWFN(cdev);
} }
static bool qed_rdma_allocated_qps(struct qed_hwfn *p_hwfn)
{
bool result;
/* if rdma info has not been allocated, naturally there are no qps */
if (!p_hwfn->p_rdma_info)
return false;
spin_lock_bh(&p_hwfn->p_rdma_info->lock);
if (!p_hwfn->p_rdma_info->cid_map.bitmap)
result = false;
else
result = !qed_bmap_is_empty(&p_hwfn->p_rdma_info->cid_map);
spin_unlock_bh(&p_hwfn->p_rdma_info->lock);
return result;
}
static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
u32 val; u32 val;
...@@ -2650,6 +2685,20 @@ static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) ...@@ -2650,6 +2685,20 @@ static void qed_rdma_dpm_conf(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm); val, p_hwfn->dcbx_no_edpm, p_hwfn->db_bar_no_edpm);
} }
void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{
u8 val;
/* if any QPs are already active, we want to disable DPM, since their
* context information contains information from before the latest DCBx
* update. Otherwise enable it.
*/
val = qed_rdma_allocated_qps(p_hwfn) ? true : false;
p_hwfn->dcbx_no_edpm = (u8)val;
qed_rdma_dpm_conf(p_hwfn, p_ptt);
}
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt)
{ {
p_hwfn->db_bar_no_edpm = true; p_hwfn->db_bar_no_edpm = true;
......
...@@ -168,12 +168,11 @@ struct qed_rdma_qp { ...@@ -168,12 +168,11 @@ struct qed_rdma_qp {
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt); void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
void qed_roce_async_event(struct qed_hwfn *p_hwfn, void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt);
u8 fw_event_code, union rdma_eqe_data *rdma_data);
#else #else
static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {} static inline void qed_rdma_dpm_bar(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) {}
static inline void qed_roce_async_event(struct qed_hwfn *p_hwfn,
u8 fw_event_code, static inline void qed_roce_dpm_dcbx(struct qed_hwfn *p_hwfn,
union rdma_eqe_data *rdma_data) {} struct qed_ptt *p_ptt) {}
#endif #endif
#endif #endif
...@@ -174,6 +174,22 @@ struct qed_consq { ...@@ -174,6 +174,22 @@ struct qed_consq {
struct qed_chain chain; struct qed_chain chain;
}; };
typedef int
(*qed_spq_async_comp_cb)(struct qed_hwfn *p_hwfn,
u8 opcode,
u16 echo,
union event_ring_data *data,
u8 fw_return_code);
int
qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id,
qed_spq_async_comp_cb cb);
void
qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id);
struct qed_spq { struct qed_spq {
spinlock_t lock; /* SPQ lock */ spinlock_t lock; /* SPQ lock */
...@@ -203,6 +219,7 @@ struct qed_spq { ...@@ -203,6 +219,7 @@ struct qed_spq {
u32 comp_count; u32 comp_count;
u32 cid; u32 cid;
qed_spq_async_comp_cb async_comp_cb[MAX_PROTOCOL_TYPE];
}; };
/** /**
......
...@@ -302,32 +302,16 @@ static int ...@@ -302,32 +302,16 @@ static int
qed_async_event_completion(struct qed_hwfn *p_hwfn, qed_async_event_completion(struct qed_hwfn *p_hwfn,
struct event_ring_entry *p_eqe) struct event_ring_entry *p_eqe)
{ {
switch (p_eqe->protocol_id) { qed_spq_async_comp_cb cb;
#if IS_ENABLED(CONFIG_QED_RDMA)
case PROTOCOLID_ROCE:
qed_roce_async_event(p_hwfn, p_eqe->opcode,
&p_eqe->data.rdma_data);
return 0;
#endif
case PROTOCOLID_COMMON:
return qed_sriov_eqe_event(p_hwfn,
p_eqe->opcode,
p_eqe->echo, &p_eqe->data);
case PROTOCOLID_ISCSI:
if (!IS_ENABLED(CONFIG_QED_ISCSI))
return -EINVAL;
if (p_hwfn->p_iscsi_info->event_cb) { if (!p_hwfn->p_spq || (p_eqe->protocol_id >= MAX_PROTOCOL_TYPE))
struct qed_iscsi_info *p_iscsi = p_hwfn->p_iscsi_info; return -EINVAL;
return p_iscsi->event_cb(p_iscsi->event_context, cb = p_hwfn->p_spq->async_comp_cb[p_eqe->protocol_id];
p_eqe->opcode, &p_eqe->data); if (cb) {
} else { return cb(p_hwfn, p_eqe->opcode, p_eqe->echo,
DP_NOTICE(p_hwfn, &p_eqe->data, p_eqe->fw_return_code);
"iSCSI async completion is not set\n"); } else {
return -EINVAL;
}
default:
DP_NOTICE(p_hwfn, DP_NOTICE(p_hwfn,
"Unknown Async completion for protocol: %d\n", "Unknown Async completion for protocol: %d\n",
p_eqe->protocol_id); p_eqe->protocol_id);
...@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn, ...@@ -335,6 +319,28 @@ qed_async_event_completion(struct qed_hwfn *p_hwfn,
} }
} }
int
qed_spq_register_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id,
qed_spq_async_comp_cb cb)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
return -EINVAL;
p_hwfn->p_spq->async_comp_cb[protocol_id] = cb;
return 0;
}
void
qed_spq_unregister_async_cb(struct qed_hwfn *p_hwfn,
enum protocol_type protocol_id)
{
if (!p_hwfn->p_spq || (protocol_id >= MAX_PROTOCOL_TYPE))
return;
p_hwfn->p_spq->async_comp_cb[protocol_id] = NULL;
}
/*************************************************************************** /***************************************************************************
* EQ API * EQ API
***************************************************************************/ ***************************************************************************/
...@@ -419,7 +425,7 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem) ...@@ -419,7 +425,7 @@ int qed_eq_alloc(struct qed_hwfn *p_hwfn, u16 num_elem)
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
num_elem, num_elem,
sizeof(union event_ring_element), sizeof(union event_ring_element),
&p_eq->chain)) &p_eq->chain, NULL))
goto eq_allocate_fail; goto eq_allocate_fail;
/* register EQ completion on the SP SB */ /* register EQ completion on the SP SB */
...@@ -547,7 +553,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn) ...@@ -547,7 +553,7 @@ int qed_spq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
0, /* N/A when the mode is SINGLE */ 0, /* N/A when the mode is SINGLE */
sizeof(struct slow_path_element), sizeof(struct slow_path_element),
&p_spq->chain)) &p_spq->chain, NULL))
goto spq_allocate_fail; goto spq_allocate_fail;
/* allocate and fill the SPQ elements (incl. ramrod data list) */ /* allocate and fill the SPQ elements (incl. ramrod data list) */
...@@ -953,7 +959,7 @@ int qed_consq_alloc(struct qed_hwfn *p_hwfn) ...@@ -953,7 +959,7 @@ int qed_consq_alloc(struct qed_hwfn *p_hwfn)
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
QED_CHAIN_PAGE_SIZE / 0x80, QED_CHAIN_PAGE_SIZE / 0x80,
0x80, &p_consq->chain)) 0x80, &p_consq->chain, NULL))
goto consq_allocate_fail; goto consq_allocate_fail;
p_hwfn->p_consq = p_consq; p_hwfn->p_consq = p_consq;
......
...@@ -44,6 +44,11 @@ ...@@ -44,6 +44,11 @@
#include "qed_sp.h" #include "qed_sp.h"
#include "qed_sriov.h" #include "qed_sriov.h"
#include "qed_vf.h" #include "qed_vf.h"
static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code);
static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf) static u8 qed_vf_calculate_legacy(struct qed_vf_info *p_vf)
{ {
...@@ -565,6 +570,9 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn) ...@@ -565,6 +570,9 @@ int qed_iov_alloc(struct qed_hwfn *p_hwfn)
p_hwfn->pf_iov_info = p_sriov; p_hwfn->pf_iov_info = p_sriov;
qed_spq_register_async_cb(p_hwfn, PROTOCOLID_COMMON,
qed_sriov_eqe_event);
return qed_iov_allocate_vfdb(p_hwfn); return qed_iov_allocate_vfdb(p_hwfn);
} }
...@@ -578,6 +586,8 @@ void qed_iov_setup(struct qed_hwfn *p_hwfn) ...@@ -578,6 +586,8 @@ void qed_iov_setup(struct qed_hwfn *p_hwfn)
void qed_iov_free(struct qed_hwfn *p_hwfn) void qed_iov_free(struct qed_hwfn *p_hwfn)
{ {
qed_spq_unregister_async_cb(p_hwfn, PROTOCOLID_COMMON);
if (IS_PF_SRIOV_ALLOC(p_hwfn)) { if (IS_PF_SRIOV_ALLOC(p_hwfn)) {
qed_iov_free_vfdb(p_hwfn); qed_iov_free_vfdb(p_hwfn);
kfree(p_hwfn->pf_iov_info); kfree(p_hwfn->pf_iov_info);
...@@ -3833,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn, ...@@ -3833,8 +3843,10 @@ static void qed_sriov_vfpf_malicious(struct qed_hwfn *p_hwfn,
} }
} }
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn, static int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data) u8 opcode,
__le16 echo,
union event_ring_data *data, u8 fw_return_code)
{ {
switch (opcode) { switch (opcode) {
case COMMON_EVENT_VF_PF_CHANNEL: case COMMON_EVENT_VF_PF_CHANNEL:
......
...@@ -343,17 +343,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn); ...@@ -343,17 +343,6 @@ void qed_iov_free(struct qed_hwfn *p_hwfn);
*/ */
void qed_iov_free_hw_info(struct qed_dev *cdev); void qed_iov_free_hw_info(struct qed_dev *cdev);
/**
* @brief qed_sriov_eqe_event - handle async sriov event arrived on eqe.
*
* @param p_hwfn
* @param opcode
* @param echo
* @param data
*/
int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode, __le16 echo, union event_ring_data *data);
/** /**
* @brief Mark structs of vfs that have been FLR-ed. * @brief Mark structs of vfs that have been FLR-ed.
* *
...@@ -418,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev) ...@@ -418,13 +407,6 @@ static inline void qed_iov_free_hw_info(struct qed_dev *cdev)
{ {
} }
static inline int qed_sriov_eqe_event(struct qed_hwfn *p_hwfn,
u8 opcode,
__le16 echo, union event_ring_data *data)
{
return -EINVAL;
}
static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn, static inline bool qed_iov_mark_vf_flr(struct qed_hwfn *p_hwfn,
u32 *disabled_vfs) u32 *disabled_vfs)
{ {
......
...@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o ...@@ -2,4 +2,4 @@ obj-$(CONFIG_QEDE) := qede.o
qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o qede-y := qede_main.o qede_fp.o qede_filter.o qede_ethtool.o qede_ptp.o
qede-$(CONFIG_DCB) += qede_dcbnl.o qede-$(CONFIG_DCB) += qede_dcbnl.o
qede-$(CONFIG_QED_RDMA) += qede_roce.o qede-$(CONFIG_QED_RDMA) += qede_rdma.o
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/bpf.h> #include <linux/bpf.h>
#include <linux/qed/qede_rdma.h>
#include <linux/io.h> #include <linux/io.h>
#ifdef CONFIG_RFS_ACCEL #ifdef CONFIG_RFS_ACCEL
#include <linux/cpu_rmap.h> #include <linux/cpu_rmap.h>
...@@ -153,8 +154,8 @@ struct qede_vlan { ...@@ -153,8 +154,8 @@ struct qede_vlan {
struct qede_rdma_dev { struct qede_rdma_dev {
struct qedr_dev *qedr_dev; struct qedr_dev *qedr_dev;
struct list_head entry; struct list_head entry;
struct list_head roce_event_list; struct list_head rdma_event_list;
struct workqueue_struct *roce_wq; struct workqueue_struct *rdma_wq;
}; };
struct qede_ptp; struct qede_ptp;
......
...@@ -60,7 +60,6 @@ ...@@ -60,7 +60,6 @@
#include <net/ip6_checksum.h> #include <net/ip6_checksum.h>
#include <linux/bitops.h> #include <linux/bitops.h>
#include <linux/vmalloc.h> #include <linux/vmalloc.h>
#include <linux/qed/qede_roce.h>
#include "qede.h" #include "qede.h"
#include "qede_ptp.h" #include "qede_ptp.h"
...@@ -263,7 +262,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event, ...@@ -263,7 +262,7 @@ static int qede_netdev_event(struct notifier_block *this, unsigned long event,
break; break;
case NETDEV_CHANGEADDR: case NETDEV_CHANGEADDR:
edev = netdev_priv(ndev); edev = netdev_priv(ndev);
qede_roce_event_changeaddr(edev); qede_rdma_event_changeaddr(edev);
break; break;
} }
...@@ -978,7 +977,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -978,7 +977,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
qede_init_ndev(edev); qede_init_ndev(edev);
rc = qede_roce_dev_add(edev); rc = qede_rdma_dev_add(edev);
if (rc) if (rc)
goto err3; goto err3;
...@@ -1014,7 +1013,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level, ...@@ -1014,7 +1013,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
return 0; return 0;
err4: err4:
qede_roce_dev_remove(edev); qede_rdma_dev_remove(edev);
err3: err3:
free_netdev(edev->ndev); free_netdev(edev->ndev);
err2: err2:
...@@ -1065,7 +1064,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode) ...@@ -1065,7 +1064,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
qede_ptp_disable(edev); qede_ptp_disable(edev);
qede_roce_dev_remove(edev); qede_rdma_dev_remove(edev);
edev->ops->common->set_power_state(cdev, PCI_D0); edev->ops->common->set_power_state(cdev, PCI_D0);
...@@ -1317,8 +1316,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) ...@@ -1317,8 +1316,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE, RX_RING_SIZE,
sizeof(struct eth_rx_bd), sizeof(struct eth_rx_bd),
&rxq->rx_bd_ring); &rxq->rx_bd_ring, NULL);
if (rc) if (rc)
goto err; goto err;
...@@ -1329,7 +1327,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq) ...@@ -1329,7 +1327,7 @@ static int qede_alloc_mem_rxq(struct qede_dev *edev, struct qede_rx_queue *rxq)
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
RX_RING_SIZE, RX_RING_SIZE,
sizeof(union eth_rx_cqe), sizeof(union eth_rx_cqe),
&rxq->rx_comp_ring); &rxq->rx_comp_ring, NULL);
if (rc) if (rc)
goto err; goto err;
...@@ -1387,7 +1385,8 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq) ...@@ -1387,7 +1385,8 @@ static int qede_alloc_mem_txq(struct qede_dev *edev, struct qede_tx_queue *txq)
QED_CHAIN_MODE_PBL, QED_CHAIN_MODE_PBL,
QED_CHAIN_CNT_TYPE_U16, QED_CHAIN_CNT_TYPE_U16,
txq->num_tx_buffers, txq->num_tx_buffers,
sizeof(*p_virt), &txq->tx_pbl); sizeof(*p_virt),
&txq->tx_pbl, NULL);
if (rc) if (rc)
goto err; goto err;
...@@ -1965,7 +1964,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, ...@@ -1965,7 +1964,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode,
edev->state = QEDE_STATE_CLOSED; edev->state = QEDE_STATE_CLOSED;
qede_roce_dev_event_close(edev); qede_rdma_dev_event_close(edev);
/* Close OS Tx */ /* Close OS Tx */
netif_tx_disable(edev->ndev); netif_tx_disable(edev->ndev);
...@@ -2070,7 +2069,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, ...@@ -2070,7 +2069,7 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode,
link_params.link_up = true; link_params.link_up = true;
edev->ops->common->set_link(edev->cdev, &link_params); edev->ops->common->set_link(edev->cdev, &link_params);
qede_roce_dev_event_open(edev); qede_rdma_dev_event_open(edev);
edev->state = QEDE_STATE_OPEN; edev->state = QEDE_STATE_OPEN;
......
...@@ -33,19 +33,19 @@ ...@@ -33,19 +33,19 @@
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/list.h> #include <linux/list.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/qed/qede_roce.h> #include <linux/qed/qede_rdma.h>
#include "qede.h" #include "qede.h"
static struct qedr_driver *qedr_drv; static struct qedr_driver *qedr_drv;
static LIST_HEAD(qedr_dev_list); static LIST_HEAD(qedr_dev_list);
static DEFINE_MUTEX(qedr_dev_list_lock); static DEFINE_MUTEX(qedr_dev_list_lock);
bool qede_roce_supported(struct qede_dev *dev) bool qede_rdma_supported(struct qede_dev *dev)
{ {
return dev->dev_info.common.rdma_supported; return dev->dev_info.common.rdma_supported;
} }
static void _qede_roce_dev_add(struct qede_dev *edev) static void _qede_rdma_dev_add(struct qede_dev *edev)
{ {
if (!qedr_drv) if (!qedr_drv)
return; return;
...@@ -54,11 +54,11 @@ static void _qede_roce_dev_add(struct qede_dev *edev) ...@@ -54,11 +54,11 @@ static void _qede_roce_dev_add(struct qede_dev *edev)
edev->ndev); edev->ndev);
} }
static int qede_roce_create_wq(struct qede_dev *edev) static int qede_rdma_create_wq(struct qede_dev *edev)
{ {
INIT_LIST_HEAD(&edev->rdma_info.roce_event_list); INIT_LIST_HEAD(&edev->rdma_info.rdma_event_list);
edev->rdma_info.roce_wq = create_singlethread_workqueue("roce_wq"); edev->rdma_info.rdma_wq = create_singlethread_workqueue("rdma_wq");
if (!edev->rdma_info.roce_wq) { if (!edev->rdma_info.rdma_wq) {
DP_NOTICE(edev, "qedr: Could not create workqueue\n"); DP_NOTICE(edev, "qedr: Could not create workqueue\n");
return -ENOMEM; return -ENOMEM;
} }
...@@ -66,14 +66,14 @@ static int qede_roce_create_wq(struct qede_dev *edev) ...@@ -66,14 +66,14 @@ static int qede_roce_create_wq(struct qede_dev *edev)
return 0; return 0;
} }
static void qede_roce_cleanup_event(struct qede_dev *edev) static void qede_rdma_cleanup_event(struct qede_dev *edev)
{ {
struct list_head *head = &edev->rdma_info.roce_event_list; struct list_head *head = &edev->rdma_info.rdma_event_list;
struct qede_roce_event_work *event_node; struct qede_rdma_event_work *event_node;
flush_workqueue(edev->rdma_info.roce_wq); flush_workqueue(edev->rdma_info.rdma_wq);
while (!list_empty(head)) { while (!list_empty(head)) {
event_node = list_entry(head->next, struct qede_roce_event_work, event_node = list_entry(head->next, struct qede_rdma_event_work,
list); list);
cancel_work_sync(&event_node->work); cancel_work_sync(&event_node->work);
list_del(&event_node->list); list_del(&event_node->list);
...@@ -81,85 +81,85 @@ static void qede_roce_cleanup_event(struct qede_dev *edev) ...@@ -81,85 +81,85 @@ static void qede_roce_cleanup_event(struct qede_dev *edev)
} }
} }
static void qede_roce_destroy_wq(struct qede_dev *edev) static void qede_rdma_destroy_wq(struct qede_dev *edev)
{ {
qede_roce_cleanup_event(edev); qede_rdma_cleanup_event(edev);
destroy_workqueue(edev->rdma_info.roce_wq); destroy_workqueue(edev->rdma_info.rdma_wq);
} }
int qede_roce_dev_add(struct qede_dev *edev) int qede_rdma_dev_add(struct qede_dev *edev)
{ {
int rc = 0; int rc = 0;
if (qede_roce_supported(edev)) { if (qede_rdma_supported(edev)) {
rc = qede_roce_create_wq(edev); rc = qede_rdma_create_wq(edev);
if (rc) if (rc)
return rc; return rc;
INIT_LIST_HEAD(&edev->rdma_info.entry); INIT_LIST_HEAD(&edev->rdma_info.entry);
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
list_add_tail(&edev->rdma_info.entry, &qedr_dev_list); list_add_tail(&edev->rdma_info.entry, &qedr_dev_list);
_qede_roce_dev_add(edev); _qede_rdma_dev_add(edev);
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
return rc; return rc;
} }
static void _qede_roce_dev_remove(struct qede_dev *edev) static void _qede_rdma_dev_remove(struct qede_dev *edev)
{ {
if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev) if (qedr_drv && qedr_drv->remove && edev->rdma_info.qedr_dev)
qedr_drv->remove(edev->rdma_info.qedr_dev); qedr_drv->remove(edev->rdma_info.qedr_dev);
edev->rdma_info.qedr_dev = NULL; edev->rdma_info.qedr_dev = NULL;
} }
void qede_roce_dev_remove(struct qede_dev *edev) void qede_rdma_dev_remove(struct qede_dev *edev)
{ {
if (!qede_roce_supported(edev)) if (!qede_rdma_supported(edev))
return; return;
qede_roce_destroy_wq(edev); qede_rdma_destroy_wq(edev);
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
_qede_roce_dev_remove(edev); _qede_rdma_dev_remove(edev);
list_del(&edev->rdma_info.entry); list_del(&edev->rdma_info.entry);
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
static void _qede_roce_dev_open(struct qede_dev *edev) static void _qede_rdma_dev_open(struct qede_dev *edev)
{ {
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP); qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_UP);
} }
static void qede_roce_dev_open(struct qede_dev *edev) static void qede_rdma_dev_open(struct qede_dev *edev)
{ {
if (!qede_roce_supported(edev)) if (!qede_rdma_supported(edev))
return; return;
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
_qede_roce_dev_open(edev); _qede_rdma_dev_open(edev);
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
static void _qede_roce_dev_close(struct qede_dev *edev) static void _qede_rdma_dev_close(struct qede_dev *edev)
{ {
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN); qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_DOWN);
} }
static void qede_roce_dev_close(struct qede_dev *edev) static void qede_rdma_dev_close(struct qede_dev *edev)
{ {
if (!qede_roce_supported(edev)) if (!qede_rdma_supported(edev))
return; return;
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
_qede_roce_dev_close(edev); _qede_rdma_dev_close(edev);
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
static void qede_roce_dev_shutdown(struct qede_dev *edev) static void qede_rdma_dev_shutdown(struct qede_dev *edev)
{ {
if (!qede_roce_supported(edev)) if (!qede_rdma_supported(edev))
return; return;
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
...@@ -168,7 +168,7 @@ static void qede_roce_dev_shutdown(struct qede_dev *edev) ...@@ -168,7 +168,7 @@ static void qede_roce_dev_shutdown(struct qede_dev *edev)
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
int qede_roce_register_driver(struct qedr_driver *drv) int qede_rdma_register_driver(struct qedr_driver *drv)
{ {
struct qede_dev *edev; struct qede_dev *edev;
u8 qedr_counter = 0; u8 qedr_counter = 0;
...@@ -184,52 +184,52 @@ int qede_roce_register_driver(struct qedr_driver *drv) ...@@ -184,52 +184,52 @@ int qede_roce_register_driver(struct qedr_driver *drv)
struct net_device *ndev; struct net_device *ndev;
qedr_counter++; qedr_counter++;
_qede_roce_dev_add(edev); _qede_rdma_dev_add(edev);
ndev = edev->ndev; ndev = edev->ndev;
if (netif_running(ndev) && netif_oper_up(ndev)) if (netif_running(ndev) && netif_oper_up(ndev))
_qede_roce_dev_open(edev); _qede_rdma_dev_open(edev);
} }
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
pr_notice("qedr: discovered and registered %d RoCE funcs\n", pr_notice("qedr: discovered and registered %d RDMA funcs\n",
qedr_counter); qedr_counter);
return 0; return 0;
} }
EXPORT_SYMBOL(qede_roce_register_driver); EXPORT_SYMBOL(qede_rdma_register_driver);
void qede_roce_unregister_driver(struct qedr_driver *drv) void qede_rdma_unregister_driver(struct qedr_driver *drv)
{ {
struct qede_dev *edev; struct qede_dev *edev;
mutex_lock(&qedr_dev_list_lock); mutex_lock(&qedr_dev_list_lock);
list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) { list_for_each_entry(edev, &qedr_dev_list, rdma_info.entry) {
if (edev->rdma_info.qedr_dev) if (edev->rdma_info.qedr_dev)
_qede_roce_dev_remove(edev); _qede_rdma_dev_remove(edev);
} }
qedr_drv = NULL; qedr_drv = NULL;
mutex_unlock(&qedr_dev_list_lock); mutex_unlock(&qedr_dev_list_lock);
} }
EXPORT_SYMBOL(qede_roce_unregister_driver); EXPORT_SYMBOL(qede_rdma_unregister_driver);
static void qede_roce_changeaddr(struct qede_dev *edev) static void qede_rdma_changeaddr(struct qede_dev *edev)
{ {
if (!qede_roce_supported(edev)) if (!qede_rdma_supported(edev))
return; return;
if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify) if (qedr_drv && edev->rdma_info.qedr_dev && qedr_drv->notify)
qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR); qedr_drv->notify(edev->rdma_info.qedr_dev, QEDE_CHANGE_ADDR);
} }
static struct qede_roce_event_work * static struct qede_rdma_event_work *
qede_roce_get_free_event_node(struct qede_dev *edev) qede_rdma_get_free_event_node(struct qede_dev *edev)
{ {
struct qede_roce_event_work *event_node = NULL; struct qede_rdma_event_work *event_node = NULL;
struct list_head *list_node = NULL; struct list_head *list_node = NULL;
bool found = false; bool found = false;
list_for_each(list_node, &edev->rdma_info.roce_event_list) { list_for_each(list_node, &edev->rdma_info.rdma_event_list) {
event_node = list_entry(list_node, struct qede_roce_event_work, event_node = list_entry(list_node, struct qede_rdma_event_work,
list); list);
if (!work_pending(&event_node->work)) { if (!work_pending(&event_node->work)) {
found = true; found = true;
...@@ -241,74 +241,74 @@ qede_roce_get_free_event_node(struct qede_dev *edev) ...@@ -241,74 +241,74 @@ qede_roce_get_free_event_node(struct qede_dev *edev)
event_node = kzalloc(sizeof(*event_node), GFP_KERNEL); event_node = kzalloc(sizeof(*event_node), GFP_KERNEL);
if (!event_node) { if (!event_node) {
DP_NOTICE(edev, DP_NOTICE(edev,
"qedr: Could not allocate memory for roce work\n"); "qedr: Could not allocate memory for rdma work\n");
return NULL; return NULL;
} }
list_add_tail(&event_node->list, list_add_tail(&event_node->list,
&edev->rdma_info.roce_event_list); &edev->rdma_info.rdma_event_list);
} }
return event_node; return event_node;
} }
static void qede_roce_handle_event(struct work_struct *work) static void qede_rdma_handle_event(struct work_struct *work)
{ {
struct qede_roce_event_work *event_node; struct qede_rdma_event_work *event_node;
enum qede_roce_event event; enum qede_rdma_event event;
struct qede_dev *edev; struct qede_dev *edev;
event_node = container_of(work, struct qede_roce_event_work, work); event_node = container_of(work, struct qede_rdma_event_work, work);
event = event_node->event; event = event_node->event;
edev = event_node->ptr; edev = event_node->ptr;
switch (event) { switch (event) {
case QEDE_UP: case QEDE_UP:
qede_roce_dev_open(edev); qede_rdma_dev_open(edev);
break; break;
case QEDE_DOWN: case QEDE_DOWN:
qede_roce_dev_close(edev); qede_rdma_dev_close(edev);
break; break;
case QEDE_CLOSE: case QEDE_CLOSE:
qede_roce_dev_shutdown(edev); qede_rdma_dev_shutdown(edev);
break; break;
case QEDE_CHANGE_ADDR: case QEDE_CHANGE_ADDR:
qede_roce_changeaddr(edev); qede_rdma_changeaddr(edev);
break; break;
default: default:
DP_NOTICE(edev, "Invalid roce event %d", event); DP_NOTICE(edev, "Invalid rdma event %d", event);
} }
} }
static void qede_roce_add_event(struct qede_dev *edev, static void qede_rdma_add_event(struct qede_dev *edev,
enum qede_roce_event event) enum qede_rdma_event event)
{ {
struct qede_roce_event_work *event_node; struct qede_rdma_event_work *event_node;
if (!edev->rdma_info.qedr_dev) if (!edev->rdma_info.qedr_dev)
return; return;
event_node = qede_roce_get_free_event_node(edev); event_node = qede_rdma_get_free_event_node(edev);
if (!event_node) if (!event_node)
return; return;
event_node->event = event; event_node->event = event;
event_node->ptr = edev; event_node->ptr = edev;
INIT_WORK(&event_node->work, qede_roce_handle_event); INIT_WORK(&event_node->work, qede_rdma_handle_event);
queue_work(edev->rdma_info.roce_wq, &event_node->work); queue_work(edev->rdma_info.rdma_wq, &event_node->work);
} }
void qede_roce_dev_event_open(struct qede_dev *edev) void qede_rdma_dev_event_open(struct qede_dev *edev)
{ {
qede_roce_add_event(edev, QEDE_UP); qede_rdma_add_event(edev, QEDE_UP);
} }
void qede_roce_dev_event_close(struct qede_dev *edev) void qede_rdma_dev_event_close(struct qede_dev *edev)
{ {
qede_roce_add_event(edev, QEDE_DOWN); qede_rdma_add_event(edev, QEDE_DOWN);
} }
void qede_roce_event_changeaddr(struct qede_dev *edev) void qede_rdma_event_changeaddr(struct qede_dev *edev)
{ {
qede_roce_add_event(edev, QEDE_CHANGE_ADDR); qede_rdma_add_event(edev, QEDE_CHANGE_ADDR);
} }
...@@ -80,6 +80,11 @@ struct qed_chain_pbl_u32 { ...@@ -80,6 +80,11 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx; u32 cons_page_idx;
}; };
struct qed_chain_ext_pbl {
dma_addr_t p_pbl_phys;
void *p_pbl_virt;
};
struct qed_chain_u16 { struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */ /* Cyclic index of next element to produce/consme */
u16 prod_idx; u16 prod_idx;
...@@ -155,6 +160,8 @@ struct qed_chain { ...@@ -155,6 +160,8 @@ struct qed_chain {
u32 size; u32 size;
u8 intended_use; u8 intended_use;
bool b_external_pbl;
}; };
#define QED_CHAIN_PBL_ENTRY_SIZE (8) #define QED_CHAIN_PBL_ENTRY_SIZE (8)
......
...@@ -634,7 +634,8 @@ struct qed_common_ops { ...@@ -634,7 +634,8 @@ struct qed_common_ops {
enum qed_chain_cnt_type cnt_type, enum qed_chain_cnt_type cnt_type,
u32 num_elems, u32 num_elems,
size_t elem_size, size_t elem_size,
struct qed_chain *p_chain); struct qed_chain *p_chain,
struct qed_chain_ext_pbl *ext_pbl);
void (*chain_free)(struct qed_dev *cdev, void (*chain_free)(struct qed_dev *cdev,
struct qed_chain *p_chain); struct qed_chain *p_chain);
......
...@@ -32,22 +32,27 @@ ...@@ -32,22 +32,27 @@
#ifndef QEDE_ROCE_H #ifndef QEDE_ROCE_H
#define QEDE_ROCE_H #define QEDE_ROCE_H
#include <linux/pci.h>
#include <linux/netdevice.h>
#include <linux/types.h>
#include <linux/workqueue.h>
struct qedr_dev; struct qedr_dev;
struct qed_dev; struct qed_dev;
struct qede_dev; struct qede_dev;
enum qede_roce_event { enum qede_rdma_event {
QEDE_UP, QEDE_UP,
QEDE_DOWN, QEDE_DOWN,
QEDE_CHANGE_ADDR, QEDE_CHANGE_ADDR,
QEDE_CLOSE QEDE_CLOSE
}; };
struct qede_roce_event_work { struct qede_rdma_event_work {
struct list_head list; struct list_head list;
struct work_struct work; struct work_struct work;
void *ptr; void *ptr;
enum qede_roce_event event; enum qede_rdma_event event;
}; };
struct qedr_driver { struct qedr_driver {
...@@ -57,32 +62,33 @@ struct qedr_driver { ...@@ -57,32 +62,33 @@ struct qedr_driver {
struct net_device *); struct net_device *);
void (*remove)(struct qedr_dev *); void (*remove)(struct qedr_dev *);
void (*notify)(struct qedr_dev *, enum qede_roce_event); void (*notify)(struct qedr_dev *, enum qede_rdma_event);
}; };
/* APIs for RoCE driver to register callback handlers, /* APIs for RDMA driver to register callback handlers,
* which will be invoked when device is added, removed, ifup, ifdown * which will be invoked when device is added, removed, ifup, ifdown
*/ */
int qede_roce_register_driver(struct qedr_driver *drv); int qede_rdma_register_driver(struct qedr_driver *drv);
void qede_roce_unregister_driver(struct qedr_driver *drv); void qede_rdma_unregister_driver(struct qedr_driver *drv);
bool qede_roce_supported(struct qede_dev *dev); bool qede_rdma_supported(struct qede_dev *dev);
#if IS_ENABLED(CONFIG_QED_RDMA) #if IS_ENABLED(CONFIG_QED_RDMA)
int qede_roce_dev_add(struct qede_dev *dev); int qede_rdma_dev_add(struct qede_dev *dev);
void qede_roce_dev_event_open(struct qede_dev *dev); void qede_rdma_dev_event_open(struct qede_dev *dev);
void qede_roce_dev_event_close(struct qede_dev *dev); void qede_rdma_dev_event_close(struct qede_dev *dev);
void qede_roce_dev_remove(struct qede_dev *dev); void qede_rdma_dev_remove(struct qede_dev *dev);
void qede_roce_event_changeaddr(struct qede_dev *qedr); void qede_rdma_event_changeaddr(struct qede_dev *edr);
#else #else
static inline int qede_roce_dev_add(struct qede_dev *dev) static inline int qede_rdma_dev_add(struct qede_dev *dev);
{ {
return 0; return 0;
} }
static inline void qede_roce_dev_event_open(struct qede_dev *dev) {} static inline void qede_rdma_dev_event_open(struct qede_dev *dev) {}
static inline void qede_roce_dev_event_close(struct qede_dev *dev) {} static inline void qede_rdma_dev_event_close(struct qede_dev *dev) {}
static inline void qede_roce_dev_remove(struct qede_dev *dev) {} static inline void qede_rdma_dev_remove(struct qede_dev *dev) {}
static inline void qede_roce_event_changeaddr(struct qede_dev *qedr) {} static inline void qede_rdma_event_changeaddr(struct qede_dev *edr) {}
#endif #endif
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment