Commit 348bfec2 authored by David S. Miller's avatar David S. Miller

Merge branch 'qed-XDP-support'

Yuval Mintz says:

====================
qed*: Add XDP support

This patch series is intended to add XDP to the qede driver, although
it contains quite a bit of cleanups, refactorings and infrastructure
changes as well.

The content of this series can be roughly divided into:

 - Datapath improvements - mostly focused on having the datapath utilize
parameters which can be more tightly contained in cachelines.
Patches #1, #2, #8, #9 belong to this group.

 - Refactoring - done mostly in favour of XDP. Patches #3, #4, #5, #9.

 - Infrastructure changes - done in favour of XDP. Paches #6 and #7 belong
to this category [#7 being by far the biggest patch in the series].

 - Actual XDP support - last two patches [#10, #11].
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents f54b8cd6 cb6aeb07
...@@ -241,15 +241,6 @@ struct qed_hw_info { ...@@ -241,15 +241,6 @@ struct qed_hw_info {
enum qed_wol_support b_wol_support; enum qed_wol_support b_wol_support;
}; };
struct qed_hw_cid_data {
u32 cid;
bool b_cid_allocated;
/* Additional identifiers */
u16 opaque_fid;
u8 vport_id;
};
/* maximun size of read/write commands (HW limit) */ /* maximun size of read/write commands (HW limit) */
#define DMAE_MAX_RW_SIZE 0x2000 #define DMAE_MAX_RW_SIZE 0x2000
...@@ -416,9 +407,6 @@ struct qed_hwfn { ...@@ -416,9 +407,6 @@ struct qed_hwfn {
struct qed_dcbx_info *p_dcbx_info; struct qed_dcbx_info *p_dcbx_info;
struct qed_hw_cid_data *p_tx_cids;
struct qed_hw_cid_data *p_rx_cids;
struct qed_dmae_info dmae_info; struct qed_dmae_info dmae_info;
/* QM init */ /* QM init */
......
...@@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev) ...@@ -134,15 +134,6 @@ void qed_resc_free(struct qed_dev *cdev)
kfree(cdev->reset_stats); kfree(cdev->reset_stats);
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
kfree(p_hwfn->p_tx_cids);
p_hwfn->p_tx_cids = NULL;
kfree(p_hwfn->p_rx_cids);
p_hwfn->p_rx_cids = NULL;
}
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
...@@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev) ...@@ -425,23 +416,6 @@ int qed_resc_alloc(struct qed_dev *cdev)
if (!cdev->fw_data) if (!cdev->fw_data)
return -ENOMEM; return -ENOMEM;
/* Allocate Memory for the Queue->CID mapping */
for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
int tx_size = sizeof(struct qed_hw_cid_data) *
RESC_NUM(p_hwfn, QED_L2_QUEUE);
int rx_size = sizeof(struct qed_hw_cid_data) *
RESC_NUM(p_hwfn, QED_L2_QUEUE);
p_hwfn->p_tx_cids = kzalloc(tx_size, GFP_KERNEL);
if (!p_hwfn->p_tx_cids)
goto alloc_no_mem;
p_hwfn->p_rx_cids = kzalloc(rx_size, GFP_KERNEL);
if (!p_hwfn->p_rx_cids)
goto alloc_no_mem;
}
for_each_hwfn(cdev, i) { for_each_hwfn(cdev, i) {
struct qed_hwfn *p_hwfn = &cdev->hwfns[i]; struct qed_hwfn *p_hwfn = &cdev->hwfns[i];
u32 n_eqes, num_cons; u32 n_eqes, num_cons;
...@@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) ...@@ -2283,12 +2257,12 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
{ {
void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl; void **pp_virt_addr_tbl = p_chain->pbl.pp_virt_addr_tbl;
u32 page_cnt = p_chain->page_cnt, i, pbl_size; u32 page_cnt = p_chain->page_cnt, i, pbl_size;
u8 *p_pbl_virt = p_chain->pbl.p_virt_table; u8 *p_pbl_virt = p_chain->pbl_sp.p_virt_table;
if (!pp_virt_addr_tbl) if (!pp_virt_addr_tbl)
return; return;
if (!p_chain->pbl.p_virt_table) if (!p_pbl_virt)
goto out; goto out;
for (i = 0; i < page_cnt; i++) { for (i = 0; i < page_cnt; i++) {
...@@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain) ...@@ -2306,7 +2280,8 @@ static void qed_chain_free_pbl(struct qed_dev *cdev, struct qed_chain *p_chain)
pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE; pbl_size = page_cnt * QED_CHAIN_PBL_ENTRY_SIZE;
dma_free_coherent(&cdev->pdev->dev, dma_free_coherent(&cdev->pdev->dev,
pbl_size, pbl_size,
p_chain->pbl.p_virt_table, p_chain->pbl.p_phys_table); p_chain->pbl_sp.p_virt_table,
p_chain->pbl_sp.p_phys_table);
out: out:
vfree(p_chain->pbl.pp_virt_addr_tbl); vfree(p_chain->pbl.pp_virt_addr_tbl);
} }
......
This diff is collapsed.
...@@ -78,11 +78,34 @@ struct qed_filter_mcast { ...@@ -78,11 +78,34 @@ struct qed_filter_mcast {
unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN]; unsigned char mac[QED_MAX_MC_ADDRS][ETH_ALEN];
}; };
int qed_sp_eth_rx_queue_stop(struct qed_hwfn *p_hwfn, /**
u16 rx_queue_id, * @brief qed_eth_rx_queue_stop - This ramrod closes an Rx queue
bool eq_completion_only, bool cqe_completion); *
* @param p_hwfn
* @param p_rxq Handler of queue to close
* @param eq_completion_only If True completion will be on
* EQe, if False completion will be
* on EQe if p_hwfn opaque
* different from the RXQ opaque
* otherwise on CQe.
* @param cqe_completion If True completion will be
* receive on CQe.
* @return int
*/
int
qed_eth_rx_queue_stop(struct qed_hwfn *p_hwfn,
void *p_rxq,
bool eq_completion_only, bool cqe_completion);
int qed_sp_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, u16 tx_queue_id); /**
* @brief qed_eth_tx_queue_stop - closes a Tx queue
*
* @param p_hwfn
* @param p_txq - handle to Tx queue needed to be closed
*
* @return int
*/
int qed_eth_tx_queue_stop(struct qed_hwfn *p_hwfn, void *p_txq);
enum qed_tpa_mode { enum qed_tpa_mode {
QED_TPA_MODE_NONE, QED_TPA_MODE_NONE,
...@@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn, ...@@ -196,19 +219,19 @@ int qed_sp_eth_filter_ucast(struct qed_hwfn *p_hwfn,
* @note At the moment - only used by non-linux VFs. * @note At the moment - only used by non-linux VFs.
* *
* @param p_hwfn * @param p_hwfn
* @param rx_queue_id RX Queue ID * @param pp_rxq_handlers An array of queue handlers to be updated.
* @param num_rxqs Allow to update multiple rx * @param num_rxqs number of queues to update.
* queues, from rx_queue_id to
* (rx_queue_id + num_rxqs)
* @param complete_cqe_flg Post completion to the CQE Ring if set * @param complete_cqe_flg Post completion to the CQE Ring if set
* @param complete_event_flg Post completion to the Event Ring if set * @param complete_event_flg Post completion to the Event Ring if set
* @param comp_mode
* @param p_comp_data
* *
* @return int * @return int
*/ */
int int
qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
u16 rx_queue_id, void **pp_rxq_handlers,
u8 num_rxqs, u8 num_rxqs,
u8 complete_cqe_flg, u8 complete_cqe_flg,
u8 complete_event_flg, u8 complete_event_flg,
...@@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn, ...@@ -217,27 +240,79 @@ qed_sp_eth_rx_queues_update(struct qed_hwfn *p_hwfn,
void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats); void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats);
int qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn, void qed_reset_vport_stats(struct qed_dev *cdev);
struct qed_sp_vport_start_params *p_params);
struct qed_queue_cid {
/* 'Relative' is a relative term ;-). Usually the indices [not counting
* SBs] would be PF-relative, but there are some cases where that isn't
* the case - specifically for a PF configuring its VF indices it's
* possible some fields [E.g., stats-id] in 'rel' would already be abs.
*/
struct qed_queue_start_common_params rel;
struct qed_queue_start_common_params abs;
u32 cid;
u16 opaque_fid;
/* VFs queues are mapped differently, so we need to know the
* relative queue associated with them [0-based].
* Notice this is relevant on the *PF* queue-cid of its VF's queues,
* and not on the VF itself.
*/
bool is_vf;
u8 vf_qid;
/* Legacy VFs might have Rx producer located elsewhere */
bool b_legacy_vf;
};
int qed_sp_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn, void qed_eth_queue_cid_release(struct qed_hwfn *p_hwfn,
u16 opaque_fid, struct qed_queue_cid *p_cid);
u32 cid,
struct qed_queue_start_common_params *params, struct qed_queue_cid *_qed_eth_queue_to_cid(struct qed_hwfn *p_hwfn,
u8 stats_id, u16 opaque_fid,
u16 bd_max_bytes, u32 cid,
dma_addr_t bd_chain_phys_addr, u8 vf_qid,
dma_addr_t cqe_pbl_addr, struct qed_queue_start_common_params
u16 cqe_pbl_size, bool b_use_zone_a_prod); *p_params);
int qed_sp_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn, int
u16 opaque_fid, qed_sp_eth_vport_start(struct qed_hwfn *p_hwfn,
u32 cid, struct qed_sp_vport_start_params *p_params);
struct qed_queue_start_common_params *p_params,
u8 stats_id, /**
dma_addr_t pbl_addr, * @brief - Starts an Rx queue, when queue_cid is already prepared
u16 pbl_size, *
union qed_qm_pq_params *p_pq_params); * @param p_hwfn
* @param p_cid
* @param bd_max_bytes
* @param bd_chain_phys_addr
* @param cqe_pbl_addr
* @param cqe_pbl_size
*
* @return int
*/
int
qed_eth_rxq_start_ramrod(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size);
/**
* @brief - Starts a Tx queue, where queue_cid is already prepared
*
* @param p_hwfn
* @param p_cid
* @param pbl_addr
* @param pbl_size
* @param p_pq_params - parameters for choosing the PQ for this Tx queue
*
* @return int
*/
int
qed_eth_txq_start_ramrod(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid,
dma_addr_t pbl_addr, u16 pbl_size, u16 pq_id);
u8 qed_mcast_bin_from_mac(u8 *mac); u8 qed_mcast_bin_from_mac(u8 *mac);
......
...@@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn, ...@@ -347,11 +347,11 @@ int qed_sp_pf_start(struct qed_hwfn *p_hwfn,
/* Place EQ address in RAMROD */ /* Place EQ address in RAMROD */
DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr, DMA_REGPAIR_LE(p_ramrod->event_ring_pbl_addr,
p_hwfn->p_eq->chain.pbl.p_phys_table); p_hwfn->p_eq->chain.pbl_sp.p_phys_table);
page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain); page_cnt = (u8)qed_chain_get_page_cnt(&p_hwfn->p_eq->chain);
p_ramrod->event_ring_num_pages = page_cnt; p_ramrod->event_ring_num_pages = page_cnt;
DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr, DMA_REGPAIR_LE(p_ramrod->consolid_q_pbl_addr,
p_hwfn->p_consq->chain.pbl.p_phys_table); p_hwfn->p_consq->chain.pbl_sp.p_phys_table);
qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config); qed_tunn_set_pf_start_params(p_hwfn, p_tunn, &p_ramrod->tunnel_config);
......
This diff is collapsed.
...@@ -58,6 +58,23 @@ struct qed_public_vf_info { ...@@ -58,6 +58,23 @@ struct qed_public_vf_info {
int tx_rate; int tx_rate;
}; };
struct qed_iov_vf_init_params {
u16 rel_vf_id;
/* Number of requested Queues; Currently, don't support different
* number of Rx/Tx queues.
*/
u16 num_queues;
/* Allow the client to choose which qzones to use for Rx/Tx,
* and which queue_base to use for Tx queues on a per-queue basis.
* Notice values should be relative to the PF resources.
*/
u16 req_rx_queue[QED_MAX_VF_CHAINS_PER_PF];
u16 req_tx_queue[QED_MAX_VF_CHAINS_PER_PF];
};
/* This struct is part of qed_dev and contains data relevant to all hwfns; /* This struct is part of qed_dev and contains data relevant to all hwfns;
* Initialized only if SR-IOV cpabability is exposed in PCIe config space. * Initialized only if SR-IOV cpabability is exposed in PCIe config space.
*/ */
...@@ -99,10 +116,10 @@ struct qed_iov_vf_mbx { ...@@ -99,10 +116,10 @@ struct qed_iov_vf_mbx {
struct qed_vf_q_info { struct qed_vf_q_info {
u16 fw_rx_qid; u16 fw_rx_qid;
struct qed_queue_cid *p_rx_cid;
u16 fw_tx_qid; u16 fw_tx_qid;
struct qed_queue_cid *p_tx_cid;
u8 fw_cid; u8 fw_cid;
u8 rxq_active;
u8 txq_active;
}; };
enum vf_state { enum vf_state {
......
...@@ -388,18 +388,18 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) ...@@ -388,18 +388,18 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
#define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \ #define MSTORM_QZONE_START(dev) (TSTORM_QZONE_START + \
(TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev))) (TSTORM_QZONE_SIZE * NUM_OF_L2_QUEUES(dev)))
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, int
u8 rx_qid, qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u16 sb, struct qed_queue_cid *p_cid,
u8 sb_index, u16 bd_max_bytes,
u16 bd_max_bytes, dma_addr_t bd_chain_phys_addr,
dma_addr_t bd_chain_phys_addr, dma_addr_t cqe_pbl_addr,
dma_addr_t cqe_pbl_addr, u16 cqe_pbl_size, void __iomem **pp_prod)
u16 cqe_pbl_size, void __iomem **pp_prod)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp; struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_rxq_tlv *req; struct vfpf_start_rxq_tlv *req;
u8 rx_qid = p_cid->rel.queue_id;
int rc; int rc;
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
...@@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -409,21 +409,22 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
req->cqe_pbl_addr = cqe_pbl_addr; req->cqe_pbl_addr = cqe_pbl_addr;
req->cqe_pbl_size = cqe_pbl_size; req->cqe_pbl_size = cqe_pbl_size;
req->rxq_addr = bd_chain_phys_addr; req->rxq_addr = bd_chain_phys_addr;
req->hw_sb = sb; req->hw_sb = p_cid->rel.sb;
req->sb_index = sb_index; req->sb_index = p_cid->rel.sb_idx;
req->bd_max_bytes = bd_max_bytes; req->bd_max_bytes = bd_max_bytes;
req->stat_id = -1; req->stat_id = -1;
/* If PF is legacy, we'll need to calculate producers ourselves /* If PF is legacy, we'll need to calculate producers ourselves
* as well as clean them. * as well as clean them.
*/ */
if (pp_prod && p_iov->b_pre_fp_hsi) { if (p_iov->b_pre_fp_hsi) {
u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid]; u8 hw_qid = p_iov->acquire_resp.resc.hw_qid[rx_qid];
u32 init_prod_val = 0; u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + *pp_prod = (u8 __iomem *)
MSTORM_QZONE_START(p_hwfn->cdev) + p_hwfn->regview +
hw_qid * MSTORM_QZONE_SIZE; MSTORM_QZONE_START(p_hwfn->cdev) +
hw_qid * MSTORM_QZONE_SIZE;
/* Init the rcq, rx bd and rx sge (if valid) producers to 0 */ /* Init the rcq, rx bd and rx sge (if valid) producers to 0 */
__internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32), __internal_ram_wr(p_hwfn, *pp_prod, sizeof(u32),
...@@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -444,7 +445,7 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
} }
/* Learn the address of the producer from the response */ /* Learn the address of the producer from the response */
if (pp_prod && !p_iov->b_pre_fp_hsi) { if (!p_iov->b_pre_fp_hsi) {
u32 init_prod_val = 0; u32 init_prod_val = 0;
*pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset; *pp_prod = (u8 __iomem *)p_hwfn->regview + resp->offset;
...@@ -462,7 +463,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -462,7 +463,8 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
return rc; return rc;
} }
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid, bool cqe_completion)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_rxqs_tlv *req; struct vfpf_stop_rxqs_tlv *req;
...@@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) ...@@ -472,7 +474,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req)); req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_RXQS, sizeof(*req));
req->rx_qid = rx_qid; req->rx_qid = p_cid->rel.queue_id;
req->num_rxqs = 1; req->num_rxqs = 1;
req->cqe_completion = cqe_completion; req->cqe_completion = cqe_completion;
...@@ -496,28 +498,28 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion) ...@@ -496,28 +498,28 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, u16 rx_qid, bool cqe_completion)
return rc; return rc;
} }
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, int
u16 tx_queue_id, qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 sb, struct qed_queue_cid *p_cid,
u8 sb_index, dma_addr_t pbl_addr,
dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell)
u16 pbl_size, void __iomem **pp_doorbell)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct pfvf_start_queue_resp_tlv *resp; struct pfvf_start_queue_resp_tlv *resp;
struct vfpf_start_txq_tlv *req; struct vfpf_start_txq_tlv *req;
u16 qid = p_cid->rel.queue_id;
int rc; int rc;
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req)); req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_START_TXQ, sizeof(*req));
req->tx_qid = tx_queue_id; req->tx_qid = qid;
/* Tx */ /* Tx */
req->pbl_addr = pbl_addr; req->pbl_addr = pbl_addr;
req->pbl_size = pbl_size; req->pbl_size = pbl_size;
req->hw_sb = sb; req->hw_sb = p_cid->rel.sb;
req->sb_index = sb_index; req->sb_index = p_cid->rel.sb_idx;
/* add list termination tlv */ /* add list termination tlv */
qed_add_tlv(p_hwfn, &p_iov->offset, qed_add_tlv(p_hwfn, &p_iov->offset,
...@@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, ...@@ -533,33 +535,29 @@ int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
goto exit; goto exit;
} }
if (pp_doorbell) { /* Modern PFs provide the actual offsets, while legacy
/* Modern PFs provide the actual offsets, while legacy * provided only the queue id.
* provided only the queue id. */
*/ if (!p_iov->b_pre_fp_hsi) {
if (!p_iov->b_pre_fp_hsi) { *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + resp->offset;
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells + } else {
resp->offset; u8 cid = p_iov->acquire_resp.resc.cid[qid];
} else {
u8 cid = p_iov->acquire_resp.resc.cid[tx_queue_id];
u32 db_addr;
db_addr = qed_db_addr_vf(cid, DQ_DEMS_LEGACY);
*pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
db_addr;
}
DP_VERBOSE(p_hwfn, QED_MSG_IOV, *pp_doorbell = (u8 __iomem *)p_hwfn->doorbells +
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n", qed_db_addr_vf(cid,
tx_queue_id, *pp_doorbell, resp->offset); DQ_DEMS_LEGACY);
} }
DP_VERBOSE(p_hwfn, QED_MSG_IOV,
"Txq[0x%02x]: doorbell at %p [offset 0x%08x]\n",
qid, *pp_doorbell, resp->offset);
exit: exit:
qed_vf_pf_req_end(p_hwfn, rc); qed_vf_pf_req_end(p_hwfn, rc);
return rc; return rc;
} }
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid)
{ {
struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info; struct qed_vf_iov *p_iov = p_hwfn->vf_iov_info;
struct vfpf_stop_txqs_tlv *req; struct vfpf_stop_txqs_tlv *req;
...@@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) ...@@ -569,7 +567,7 @@ int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid)
/* clear mailbox and prep first tlv */ /* clear mailbox and prep first tlv */
req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req)); req = qed_vf_pf_prep(p_hwfn, CHANNEL_TLV_STOP_TXQS, sizeof(*req));
req->tx_qid = tx_qid; req->tx_qid = p_cid->rel.queue_id;
req->num_txqs = 1; req->num_txqs = 1;
/* add list termination tlv */ /* add list termination tlv */
......
...@@ -666,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); ...@@ -666,10 +666,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
/** /**
* @brief VF - start the RX Queue by sending a message to the PF * @brief VF - start the RX Queue by sending a message to the PF
* @param p_hwfn * @param p_hwfn
* @param cid - zero based within the VF * @param p_cid - Only relative fields are relevant
* @param rx_queue_id - zero based within the VF
* @param sb - VF status block for this queue
* @param sb_index - Index within the status block
* @param bd_max_bytes - maximum number of bytes per bd * @param bd_max_bytes - maximum number of bytes per bd
* @param bd_chain_phys_addr - physical address of bd chain * @param bd_chain_phys_addr - physical address of bd chain
* @param cqe_pbl_addr - physical address of pbl * @param cqe_pbl_addr - physical address of pbl
...@@ -680,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn); ...@@ -680,9 +677,7 @@ int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn);
* @return int * @return int
*/ */
int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id, struct qed_queue_cid *p_cid,
u16 sb,
u8 sb_index,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, dma_addr_t cqe_pbl_addr,
...@@ -702,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -702,24 +697,23 @@ int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
* *
* @return int * @return int
*/ */
int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, int
u16 tx_queue_id, qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 sb, struct qed_queue_cid *p_cid,
u8 sb_index, dma_addr_t pbl_addr,
dma_addr_t pbl_addr, u16 pbl_size, void __iomem **pp_doorbell);
u16 pbl_size, void __iomem **pp_doorbell);
/** /**
* @brief VF - stop the RX queue by sending a message to the PF * @brief VF - stop the RX queue by sending a message to the PF
* *
* @param p_hwfn * @param p_hwfn
* @param rx_qid * @param p_cid
* @param cqe_completion * @param cqe_completion
* *
* @return int * @return int
*/ */
int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion); struct qed_queue_cid *p_cid, bool cqe_completion);
/** /**
* @brief VF - stop the TX queue by sending a message to the PF * @brief VF - stop the TX queue by sending a message to the PF
...@@ -729,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, ...@@ -729,7 +723,7 @@ int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
* *
* @return int * @return int
*/ */
int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid); int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, struct qed_queue_cid *p_cid);
/** /**
* @brief VF - send a vport update command * @brief VF - send a vport update command
...@@ -902,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn) ...@@ -902,9 +896,7 @@ static inline int qed_vf_hw_prepare(struct qed_hwfn *p_hwfn)
} }
static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
u8 rx_queue_id, struct qed_queue_cid *p_cid,
u16 sb,
u8 sb_index,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_adr, dma_addr_t bd_chain_phys_adr,
dma_addr_t cqe_pbl_addr, dma_addr_t cqe_pbl_addr,
...@@ -914,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn, ...@@ -914,9 +906,7 @@ static inline int qed_vf_pf_rxq_start(struct qed_hwfn *p_hwfn,
} }
static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
u16 tx_queue_id, struct qed_queue_cid *p_cid,
u16 sb,
u8 sb_index,
dma_addr_t pbl_addr, dma_addr_t pbl_addr,
u16 pbl_size, void __iomem **pp_doorbell) u16 pbl_size, void __iomem **pp_doorbell)
{ {
...@@ -924,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn, ...@@ -924,12 +914,14 @@ static inline int qed_vf_pf_txq_start(struct qed_hwfn *p_hwfn,
} }
static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn, static inline int qed_vf_pf_rxq_stop(struct qed_hwfn *p_hwfn,
u16 rx_qid, bool cqe_completion) struct qed_queue_cid *p_cid,
bool cqe_completion)
{ {
return -EINVAL; return -EINVAL;
} }
static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn, u16 tx_qid) static inline int qed_vf_pf_txq_stop(struct qed_hwfn *p_hwfn,
struct qed_queue_cid *p_cid)
{ {
return -EINVAL; return -EINVAL;
} }
......
...@@ -16,6 +16,7 @@ ...@@ -16,6 +16,7 @@
#include <linux/bitmap.h> #include <linux/bitmap.h>
#include <linux/kernel.h> #include <linux/kernel.h>
#include <linux/mutex.h> #include <linux/mutex.h>
#include <linux/bpf.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/qed/common_hsi.h> #include <linux/qed/common_hsi.h>
#include <linux/qed/eth_common.h> #include <linux/qed/eth_common.h>
...@@ -127,10 +128,9 @@ struct qede_dev { ...@@ -127,10 +128,9 @@ struct qede_dev {
const struct qed_eth_ops *ops; const struct qed_eth_ops *ops;
struct qed_dev_eth_info dev_info; struct qed_dev_eth_info dev_info;
#define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues) #define QEDE_MAX_RSS_CNT(edev) ((edev)->dev_info.num_queues)
#define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues * \ #define QEDE_MAX_TSS_CNT(edev) ((edev)->dev_info.num_queues)
(edev)->dev_info.num_tc)
struct qede_fastpath *fp_array; struct qede_fastpath *fp_array;
u8 req_num_tx; u8 req_num_tx;
...@@ -139,17 +139,9 @@ struct qede_dev { ...@@ -139,17 +139,9 @@ struct qede_dev {
u8 fp_num_rx; u8 fp_num_rx;
u16 req_queues; u16 req_queues;
u16 num_queues; u16 num_queues;
u8 num_tc;
#define QEDE_QUEUE_CNT(edev) ((edev)->num_queues) #define QEDE_QUEUE_CNT(edev) ((edev)->num_queues)
#define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx) #define QEDE_RSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_tx)
#define QEDE_TSS_COUNT(edev) (((edev)->num_queues - (edev)->fp_num_rx) * \ #define QEDE_TSS_COUNT(edev) ((edev)->num_queues - (edev)->fp_num_rx)
(edev)->num_tc)
#define QEDE_TX_IDX(edev, txqidx) ((edev)->fp_num_rx + (txqidx) % \
QEDE_TSS_COUNT(edev))
#define QEDE_TC_IDX(edev, txqidx) ((txqidx) / QEDE_TSS_COUNT(edev))
#define QEDE_TX_QUEUE(edev, txqidx) \
(&(edev)->fp_array[QEDE_TX_IDX((edev), (txqidx))].txqs[QEDE_TC_IDX(\
(edev), (txqidx))])
struct qed_int_info int_info; struct qed_int_info int_info;
unsigned char primary_mac[ETH_ALEN]; unsigned char primary_mac[ETH_ALEN];
...@@ -196,6 +188,8 @@ struct qede_dev { ...@@ -196,6 +188,8 @@ struct qede_dev {
bool wol_enabled; bool wol_enabled;
struct qede_rdma_dev rdma_info; struct qede_rdma_dev rdma_info;
struct bpf_prog *xdp_prog;
}; };
enum QEDE_STATE { enum QEDE_STATE {
...@@ -225,39 +219,67 @@ enum qede_agg_state { ...@@ -225,39 +219,67 @@ enum qede_agg_state {
}; };
struct qede_agg_info { struct qede_agg_info {
struct sw_rx_data replace_buf; /* rx_buf is a data buffer that can be placed / consumed from rx bd
dma_addr_t replace_buf_mapping; * chain. It has two purposes: We will preallocate the data buffer
struct sw_rx_data start_buf; * for each aggregation when we open the interface and will place this
dma_addr_t start_buf_mapping; * buffer on the rx-bd-ring when we receive TPA_START. We don't want
struct eth_fast_path_rx_tpa_start_cqe start_cqe; * to be in a state where allocation fails, as we can't reuse the
enum qede_agg_state agg_state; * consumer buffer in the rx-chain since FW may still be writing to it
* (since header needs to be modified for TPA).
* The second purpose is to keep a pointer to the bd buffer during
* aggregation.
*/
struct sw_rx_data buffer;
dma_addr_t buffer_mapping;
struct sk_buff *skb; struct sk_buff *skb;
int frag_id;
/* We need some structs from the start cookie until termination */
u16 vlan_tag; u16 vlan_tag;
u16 start_cqe_bd_len;
u8 start_cqe_placement_offset;
u8 state;
u8 frag_id;
u8 tunnel_type;
}; };
struct qede_rx_queue { struct qede_rx_queue {
__le16 *hw_cons_ptr; __le16 *hw_cons_ptr;
struct sw_rx_data *sw_rx_ring; void __iomem *hw_rxq_prod_addr;
u16 sw_rx_cons;
u16 sw_rx_prod; /* Required for the allocation of replacement buffers */
struct qed_chain rx_bd_ring; struct device *dev;
struct qed_chain rx_comp_ring;
void __iomem *hw_rxq_prod_addr; struct bpf_prog *xdp_prog;
u16 sw_rx_cons;
u16 sw_rx_prod;
u16 num_rx_buffers; /* Slowpath */
u8 data_direction;
u8 rxq_id;
u32 rx_buf_size;
u32 rx_buf_seg_size;
u64 rcv_pkts;
struct sw_rx_data *sw_rx_ring;
struct qed_chain rx_bd_ring;
struct qed_chain rx_comp_ring ____cacheline_aligned;
/* GRO */ /* GRO */
struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM]; struct qede_agg_info tpa_info[ETH_TPA_MAX_AGGS_NUM];
int rx_buf_size; u64 rx_hw_errors;
unsigned int rx_buf_seg_size; u64 rx_alloc_errors;
u64 rx_ip_frags;
u16 num_rx_buffers; u64 xdp_no_pass;
u16 rxq_id;
u64 rcv_pkts; void *handle;
u64 rx_hw_errors;
u64 rx_alloc_errors;
u64 rx_ip_frags;
}; };
union db_prod { union db_prod {
...@@ -273,20 +295,39 @@ struct sw_tx_bd { ...@@ -273,20 +295,39 @@ struct sw_tx_bd {
}; };
struct qede_tx_queue { struct qede_tx_queue {
int index; /* Queue index */ u8 is_xdp;
__le16 *hw_cons_ptr; bool is_legacy;
struct sw_tx_bd *sw_tx_ring; u16 sw_tx_cons;
u16 sw_tx_cons; u16 sw_tx_prod;
u16 sw_tx_prod; u16 num_tx_buffers; /* Slowpath only */
struct qed_chain tx_pbl;
void __iomem *doorbell_addr; u64 xmit_pkts;
union db_prod tx_db; u64 stopped_cnt;
u16 num_tx_buffers; __le16 *hw_cons_ptr;
u64 xmit_pkts;
u64 stopped_cnt; /* Needed for the mapping of packets */
struct device *dev;
bool is_legacy;
void __iomem *doorbell_addr;
union db_prod tx_db;
int index; /* Slowpath only */
#define QEDE_TXQ_XDP_TO_IDX(edev, txq) ((txq)->index - \
QEDE_MAX_TSS_CNT(edev))
#define QEDE_TXQ_IDX_TO_XDP(edev, idx) ((idx) + QEDE_MAX_TSS_CNT(edev))
/* Regular Tx requires skb + metadata for release purpose,
* while XDP requires only the pages themselves.
*/
union {
struct sw_tx_bd *skbs;
struct page **pages;
} sw_tx_ring;
struct qed_chain tx_pbl;
/* Slowpath; Should be kept in end [unless missing padding] */
void *handle;
}; };
#define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \ #define BD_UNMAP_ADDR(bd) HILO_U64(le32_to_cpu((bd)->addr.hi), \
...@@ -303,13 +344,16 @@ struct qede_fastpath { ...@@ -303,13 +344,16 @@ struct qede_fastpath {
struct qede_dev *edev; struct qede_dev *edev;
#define QEDE_FASTPATH_TX BIT(0) #define QEDE_FASTPATH_TX BIT(0)
#define QEDE_FASTPATH_RX BIT(1) #define QEDE_FASTPATH_RX BIT(1)
#define QEDE_FASTPATH_XDP BIT(2)
#define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX) #define QEDE_FASTPATH_COMBINED (QEDE_FASTPATH_TX | QEDE_FASTPATH_RX)
u8 type; u8 type;
u8 id; u8 id;
u8 xdp_xmit;
struct napi_struct napi; struct napi_struct napi;
struct qed_sb_info *sb_info; struct qed_sb_info *sb_info;
struct qede_rx_queue *rxq; struct qede_rx_queue *rxq;
struct qede_tx_queue *txqs; struct qede_tx_queue *txq;
struct qede_tx_queue *xdp_tx;
#define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8) #define VEC_NAME_SIZE (sizeof(((struct net_device *)0)->name) + 8)
char name[VEC_NAME_SIZE]; char name[VEC_NAME_SIZE];
...@@ -332,8 +376,13 @@ struct qede_fastpath { ...@@ -332,8 +376,13 @@ struct qede_fastpath {
#define QEDE_SP_VXLAN_PORT_CONFIG 2 #define QEDE_SP_VXLAN_PORT_CONFIG 2
#define QEDE_SP_GENEVE_PORT_CONFIG 3 #define QEDE_SP_GENEVE_PORT_CONFIG 3
union qede_reload_args { struct qede_reload_args {
u16 mtu; void (*func)(struct qede_dev *edev, struct qede_reload_args *args);
union {
netdev_features_t features;
struct bpf_prog *new_prog;
u16 mtu;
} u;
}; };
#ifdef CONFIG_DCB #ifdef CONFIG_DCB
...@@ -342,15 +391,14 @@ void qede_set_dcbnl_ops(struct net_device *ndev); ...@@ -342,15 +391,14 @@ void qede_set_dcbnl_ops(struct net_device *ndev);
void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level); void qede_config_debug(uint debug, u32 *p_dp_module, u8 *p_dp_level);
void qede_set_ethtool_ops(struct net_device *netdev); void qede_set_ethtool_ops(struct net_device *netdev);
void qede_reload(struct qede_dev *edev, void qede_reload(struct qede_dev *edev,
void (*func)(struct qede_dev *edev, struct qede_reload_args *args, bool is_locked);
union qede_reload_args *args),
union qede_reload_args *args);
int qede_change_mtu(struct net_device *dev, int new_mtu); int qede_change_mtu(struct net_device *dev, int new_mtu);
void qede_fill_by_demand_stats(struct qede_dev *edev); void qede_fill_by_demand_stats(struct qede_dev *edev);
void __qede_lock(struct qede_dev *edev);
void __qede_unlock(struct qede_dev *edev);
bool qede_has_rx_work(struct qede_rx_queue *rxq); bool qede_has_rx_work(struct qede_rx_queue *rxq);
int qede_txq_has_work(struct qede_tx_queue *txq); int qede_txq_has_work(struct qede_tx_queue *txq);
void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, struct qede_dev *edev, void qede_recycle_rx_bd_ring(struct qede_rx_queue *rxq, u8 count);
u8 count);
void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq); void qede_update_rx_prod(struct qede_dev *edev, struct qede_rx_queue *rxq);
#define RX_RING_SIZE_POW 13 #define RX_RING_SIZE_POW 13
......
...@@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 { ...@@ -56,23 +56,6 @@ struct qed_chain_pbl_u32 {
u32 cons_page_idx; u32 cons_page_idx;
}; };
struct qed_chain_pbl {
/* Base address of a pre-allocated buffer for pbl */
dma_addr_t p_phys_table;
void *p_virt_table;
/* Table for keeping the virtual addresses of the chain pages,
* respectively to the physical addresses in the pbl table.
*/
void **pp_virt_addr_tbl;
/* Index to current used page by producer/consumer */
union {
struct qed_chain_pbl_u16 pbl16;
struct qed_chain_pbl_u32 pbl32;
} u;
};
struct qed_chain_u16 { struct qed_chain_u16 {
/* Cyclic index of next element to produce/consme */ /* Cyclic index of next element to produce/consme */
u16 prod_idx; u16 prod_idx;
...@@ -86,46 +69,78 @@ struct qed_chain_u32 { ...@@ -86,46 +69,78 @@ struct qed_chain_u32 {
}; };
struct qed_chain { struct qed_chain {
void *p_virt_addr; /* fastpath portion of the chain - required for commands such
dma_addr_t p_phys_addr; * as produce / consume.
void *p_prod_elem; */
void *p_cons_elem; /* Point to next element to produce/consume */
void *p_prod_elem;
void *p_cons_elem;
/* Fastpath portions of the PBL [if exists] */
struct {
/* Table for keeping the virtual addresses of the chain pages,
* respectively to the physical addresses in the pbl table.
*/
void **pp_virt_addr_tbl;
enum qed_chain_mode mode; union {
enum qed_chain_use_mode intended_use; /* used to produce/consume */ struct qed_chain_pbl_u16 u16;
enum qed_chain_cnt_type cnt_type; struct qed_chain_pbl_u32 u32;
} c;
} pbl;
union { union {
struct qed_chain_u16 chain16; struct qed_chain_u16 chain16;
struct qed_chain_u32 chain32; struct qed_chain_u32 chain32;
} u; } u;
/* Capacity counts only usable elements */
u32 capacity;
u32 page_cnt; u32 page_cnt;
/* Number of elements - capacity is for usable elements only, enum qed_chain_mode mode;
* while size will contain total number of elements [for entire chain].
/* Elements information for fast calculations */
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_size;
u16 next_page_mask;
u16 usable_per_page;
u8 elem_unusable;
u8 cnt_type;
/* Slowpath of the chain - required for initialization and destruction,
* but isn't involved in regular functionality.
*/ */
u32 capacity;
/* Base address of a pre-allocated buffer for pbl */
struct {
dma_addr_t p_phys_table;
void *p_virt_table;
} pbl_sp;
/* Address of first page of the chain - the address is required
* for fastpath operation [consume/produce] but only for the the SINGLE
* flavour which isn't considered fastpath [== SPQ].
*/
void *p_virt_addr;
dma_addr_t p_phys_addr;
/* Total number of elements [for entire chain] */
u32 size; u32 size;
/* Elements information for fast calculations */ u8 intended_use;
u16 elem_per_page;
u16 elem_per_page_mask;
u16 elem_unusable;
u16 usable_per_page;
u16 elem_size;
u16 next_page_mask;
struct qed_chain_pbl pbl;
}; };
#define QED_CHAIN_PBL_ENTRY_SIZE (8) #define QED_CHAIN_PBL_ENTRY_SIZE (8)
#define QED_CHAIN_PAGE_SIZE (0x1000) #define QED_CHAIN_PAGE_SIZE (0x1000)
#define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size)) #define ELEMS_PER_PAGE(elem_size) (QED_CHAIN_PAGE_SIZE / (elem_size))
#define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \ #define UNUSABLE_ELEMS_PER_PAGE(elem_size, mode) \
((mode == QED_CHAIN_MODE_NEXT_PTR) ? \ (((mode) == QED_CHAIN_MODE_NEXT_PTR) ? \
(1 + ((sizeof(struct qed_chain_next) - 1) / \ (u8)(1 + ((sizeof(struct qed_chain_next) - 1) / \
(elem_size))) : 0) (elem_size))) : 0)
#define USABLE_ELEMS_PER_PAGE(elem_size, mode) \ #define USABLE_ELEMS_PER_PAGE(elem_size, mode) \
((u32)(ELEMS_PER_PAGE(elem_size) - \ ((u32)(ELEMS_PER_PAGE(elem_size) - \
...@@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain) ...@@ -186,7 +201,7 @@ static inline u16 qed_chain_get_usable_per_page(struct qed_chain *p_chain)
return p_chain->usable_per_page; return p_chain->usable_per_page;
} }
static inline u16 qed_chain_get_unusable_per_page(struct qed_chain *p_chain) static inline u8 qed_chain_get_unusable_per_page(struct qed_chain *p_chain)
{ {
return p_chain->elem_unusable; return p_chain->elem_unusable;
} }
...@@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain) ...@@ -198,7 +213,7 @@ static inline u32 qed_chain_get_page_cnt(struct qed_chain *p_chain)
static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
{ {
return p_chain->pbl.p_phys_table; return p_chain->pbl_sp.p_phys_table;
} }
/** /**
...@@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain) ...@@ -214,10 +229,10 @@ static inline dma_addr_t qed_chain_get_pbl_phys(struct qed_chain *p_chain)
static inline void static inline void
qed_chain_advance_page(struct qed_chain *p_chain, qed_chain_advance_page(struct qed_chain *p_chain,
void **p_next_elem, void *idx_to_inc, void *page_to_inc) void **p_next_elem, void *idx_to_inc, void *page_to_inc)
{ {
struct qed_chain_next *p_next = NULL; struct qed_chain_next *p_next = NULL;
u32 page_index = 0; u32 page_index = 0;
switch (p_chain->mode) { switch (p_chain->mode) {
case QED_CHAIN_MODE_NEXT_PTR: case QED_CHAIN_MODE_NEXT_PTR:
p_next = *p_next_elem; p_next = *p_next_elem;
...@@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) ...@@ -305,7 +320,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain16.prod_idx & if ((p_chain->u.chain16.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain16.prod_idx; p_prod_idx = &p_chain->u.chain16.prod_idx;
p_prod_page_idx = &p_chain->pbl.u.pbl16.prod_page_idx; p_prod_page_idx = &p_chain->pbl.c.u16.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx); p_prod_idx, p_prod_page_idx);
} }
...@@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain) ...@@ -314,7 +329,7 @@ static inline void *qed_chain_produce(struct qed_chain *p_chain)
if ((p_chain->u.chain32.prod_idx & if ((p_chain->u.chain32.prod_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_prod_idx = &p_chain->u.chain32.prod_idx; p_prod_idx = &p_chain->u.chain32.prod_idx;
p_prod_page_idx = &p_chain->pbl.u.pbl32.prod_page_idx; p_prod_page_idx = &p_chain->pbl.c.u32.prod_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_prod_elem, qed_chain_advance_page(p_chain, &p_chain->p_prod_elem,
p_prod_idx, p_prod_page_idx); p_prod_idx, p_prod_page_idx);
} }
...@@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) ...@@ -378,7 +393,7 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain16.cons_idx & if ((p_chain->u.chain16.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain16.cons_idx; p_cons_idx = &p_chain->u.chain16.cons_idx;
p_cons_page_idx = &p_chain->pbl.u.pbl16.cons_page_idx; p_cons_page_idx = &p_chain->pbl.c.u16.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx); p_cons_idx, p_cons_page_idx);
} }
...@@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain) ...@@ -387,8 +402,8 @@ static inline void *qed_chain_consume(struct qed_chain *p_chain)
if ((p_chain->u.chain32.cons_idx & if ((p_chain->u.chain32.cons_idx &
p_chain->elem_per_page_mask) == p_chain->next_page_mask) { p_chain->elem_per_page_mask) == p_chain->next_page_mask) {
p_cons_idx = &p_chain->u.chain32.cons_idx; p_cons_idx = &p_chain->u.chain32.cons_idx;
p_cons_page_idx = &p_chain->pbl.u.pbl32.cons_page_idx; p_cons_page_idx = &p_chain->pbl.c.u32.cons_page_idx;
qed_chain_advance_page(p_chain, &p_chain->p_cons_elem, qed_chain_advance_page(p_chain, &p_chain->p_cons_elem,
p_cons_idx, p_cons_page_idx); p_cons_idx, p_cons_page_idx);
} }
p_chain->u.chain32.cons_idx++; p_chain->u.chain32.cons_idx++;
...@@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain) ...@@ -429,25 +444,26 @@ static inline void qed_chain_reset(struct qed_chain *p_chain)
u32 reset_val = p_chain->page_cnt - 1; u32 reset_val = p_chain->page_cnt - 1;
if (is_chain_u16(p_chain)) { if (is_chain_u16(p_chain)) {
p_chain->pbl.u.pbl16.prod_page_idx = (u16)reset_val; p_chain->pbl.c.u16.prod_page_idx = (u16)reset_val;
p_chain->pbl.u.pbl16.cons_page_idx = (u16)reset_val; p_chain->pbl.c.u16.cons_page_idx = (u16)reset_val;
} else { } else {
p_chain->pbl.u.pbl32.prod_page_idx = reset_val; p_chain->pbl.c.u32.prod_page_idx = reset_val;
p_chain->pbl.u.pbl32.cons_page_idx = reset_val; p_chain->pbl.c.u32.cons_page_idx = reset_val;
} }
} }
switch (p_chain->intended_use) { switch (p_chain->intended_use) {
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
case QED_CHAIN_USE_TO_PRODUCE:
/* Do nothing */
break;
case QED_CHAIN_USE_TO_CONSUME: case QED_CHAIN_USE_TO_CONSUME:
/* produce empty elements */ /* produce empty elements */
for (i = 0; i < p_chain->capacity; i++) for (i = 0; i < p_chain->capacity; i++)
qed_chain_recycle_consumed(p_chain); qed_chain_recycle_consumed(p_chain);
break; break;
case QED_CHAIN_USE_TO_CONSUME_PRODUCE:
case QED_CHAIN_USE_TO_PRODUCE:
default:
/* Do nothing */
break;
} }
} }
...@@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, ...@@ -473,13 +489,13 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->p_virt_addr = NULL; p_chain->p_virt_addr = NULL;
p_chain->p_phys_addr = 0; p_chain->p_phys_addr = 0;
p_chain->elem_size = elem_size; p_chain->elem_size = elem_size;
p_chain->intended_use = intended_use; p_chain->intended_use = (u8)intended_use;
p_chain->mode = mode; p_chain->mode = mode;
p_chain->cnt_type = cnt_type; p_chain->cnt_type = (u8)cnt_type;
p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size); p_chain->elem_per_page = ELEMS_PER_PAGE(elem_size);
p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->usable_per_page = USABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->elem_per_page_mask = p_chain->elem_per_page - 1; p_chain->elem_per_page_mask = p_chain->elem_per_page - 1;
p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode); p_chain->elem_unusable = UNUSABLE_ELEMS_PER_PAGE(elem_size, mode);
p_chain->next_page_mask = (p_chain->usable_per_page & p_chain->next_page_mask = (p_chain->usable_per_page &
p_chain->elem_per_page_mask); p_chain->elem_per_page_mask);
...@@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain, ...@@ -488,8 +504,8 @@ static inline void qed_chain_init_params(struct qed_chain *p_chain,
p_chain->capacity = p_chain->usable_per_page * page_cnt; p_chain->capacity = p_chain->usable_per_page * page_cnt;
p_chain->size = p_chain->elem_per_page * page_cnt; p_chain->size = p_chain->elem_per_page * page_cnt;
p_chain->pbl.p_phys_table = 0; p_chain->pbl_sp.p_phys_table = 0;
p_chain->pbl.p_virt_table = NULL; p_chain->pbl_sp.p_virt_table = NULL;
p_chain->pbl.pp_virt_addr_tbl = NULL; p_chain->pbl.pp_virt_addr_tbl = NULL;
} }
...@@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain, ...@@ -530,8 +546,8 @@ static inline void qed_chain_init_pbl_mem(struct qed_chain *p_chain,
dma_addr_t p_phys_pbl, dma_addr_t p_phys_pbl,
void **pp_virt_addr_tbl) void **pp_virt_addr_tbl)
{ {
p_chain->pbl.p_phys_table = p_phys_pbl; p_chain->pbl_sp.p_phys_table = p_phys_pbl;
p_chain->pbl.p_virt_table = p_virt_pbl; p_chain->pbl_sp.p_virt_table = p_virt_pbl;
p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl; p_chain->pbl.pp_virt_addr_tbl = pp_virt_addr_tbl;
} }
......
...@@ -15,6 +15,29 @@ ...@@ -15,6 +15,29 @@
#include <linux/qed/qed_if.h> #include <linux/qed/qed_if.h>
#include <linux/qed/qed_iov_if.h> #include <linux/qed/qed_iov_if.h>
struct qed_queue_start_common_params {
/* Should always be relative to entity sending this. */
u8 vport_id;
u16 queue_id;
/* Relative, but relevant only for PFs */
u8 stats_id;
/* These are always absolute */
u16 sb;
u8 sb_idx;
};
struct qed_rxq_start_ret_params {
void __iomem *p_prod;
void *p_handle;
};
struct qed_txq_start_ret_params {
void __iomem *p_doorbell;
void *p_handle;
};
struct qed_dev_eth_info { struct qed_dev_eth_info {
struct qed_dev_info common; struct qed_dev_info common;
...@@ -56,18 +79,6 @@ struct qed_start_vport_params { ...@@ -56,18 +79,6 @@ struct qed_start_vport_params {
bool clear_stats; bool clear_stats;
}; };
struct qed_stop_rxq_params {
u8 rss_id;
u8 rx_queue_id;
u8 vport_id;
bool eq_completion_only;
};
struct qed_stop_txq_params {
u8 rss_id;
u8 tx_queue_id;
};
enum qed_filter_rx_mode_type { enum qed_filter_rx_mode_type {
QED_FILTER_RX_MODE_TYPE_REGULAR, QED_FILTER_RX_MODE_TYPE_REGULAR,
QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC, QED_FILTER_RX_MODE_TYPE_MULTI_PROMISC,
...@@ -112,15 +123,6 @@ struct qed_filter_params { ...@@ -112,15 +123,6 @@ struct qed_filter_params {
union qed_filter_type_params filter; union qed_filter_type_params filter;
}; };
struct qed_queue_start_common_params {
u8 rss_id;
u8 queue_id;
u8 vport_id;
u16 sb;
u16 sb_idx;
u16 vf_qid;
};
struct qed_tunn_params { struct qed_tunn_params {
u16 vxlan_port; u16 vxlan_port;
u8 update_vxlan_port; u8 update_vxlan_port;
...@@ -220,24 +222,24 @@ struct qed_eth_ops { ...@@ -220,24 +222,24 @@ struct qed_eth_ops {
struct qed_update_vport_params *params); struct qed_update_vport_params *params);
int (*q_rx_start)(struct qed_dev *cdev, int (*q_rx_start)(struct qed_dev *cdev,
u8 rss_num,
struct qed_queue_start_common_params *params, struct qed_queue_start_common_params *params,
u16 bd_max_bytes, u16 bd_max_bytes,
dma_addr_t bd_chain_phys_addr, dma_addr_t bd_chain_phys_addr,
dma_addr_t cqe_pbl_addr, dma_addr_t cqe_pbl_addr,
u16 cqe_pbl_size, u16 cqe_pbl_size,
void __iomem **pp_prod); struct qed_rxq_start_ret_params *ret_params);
int (*q_rx_stop)(struct qed_dev *cdev, int (*q_rx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
struct qed_stop_rxq_params *params);
int (*q_tx_start)(struct qed_dev *cdev, int (*q_tx_start)(struct qed_dev *cdev,
u8 rss_num,
struct qed_queue_start_common_params *params, struct qed_queue_start_common_params *params,
dma_addr_t pbl_addr, dma_addr_t pbl_addr,
u16 pbl_size, u16 pbl_size,
void __iomem **pp_doorbell); struct qed_txq_start_ret_params *ret_params);
int (*q_tx_stop)(struct qed_dev *cdev, int (*q_tx_stop)(struct qed_dev *cdev, u8 rss_id, void *handle);
struct qed_stop_txq_params *params);
int (*filter_config)(struct qed_dev *cdev, int (*filter_config)(struct qed_dev *cdev,
struct qed_filter_params *params); struct qed_filter_params *params);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment