Commit fea28dd6 authored by Tariq Toukan's avatar Tariq Toukan Committed by Saeed Mahameed

net/mlx5e: XDP, Maintain a FIFO structure for xdp_info instances

This provides infrastructure to have multiple xdp_info instances
for the same consumer index.
Signed-off-by: default avatarTariq Toukan <tariqt@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
parent b8180392
......@@ -404,15 +404,24 @@ struct mlx5e_xdp_info {
struct mlx5e_dma_info di;
};
struct mlx5e_xdp_info_fifo {
struct mlx5e_xdp_info *xi;
u32 *cc;
u32 *pc;
u32 mask;
};
struct mlx5e_xdpsq {
/* data path */
/* dirtied @completion */
u32 xdpi_fifo_cc;
u16 cc;
bool redirect_flush;
/* dirtied @xmit */
u16 pc ____cacheline_aligned_in_smp;
u32 xdpi_fifo_pc ____cacheline_aligned_in_smp;
u16 pc;
struct mlx5_wqe_ctrl_seg *doorbell_cseg;
struct mlx5e_cq cq;
......@@ -421,7 +430,7 @@ struct mlx5e_xdpsq {
struct mlx5_wq_cyc wq;
struct mlx5e_xdpsq_stats *stats;
struct {
struct mlx5e_xdp_info *xdpi;
struct mlx5e_xdp_info_fifo xdpi_fifo;
} db;
void __iomem *uar_map;
u32 sqn;
......
......@@ -149,20 +149,18 @@ bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xdp_info *xdpi)
cseg->opmod_idx_opcode = cpu_to_be32((sq->pc << 8) | MLX5_OPCODE_SEND);
/* move page to reference to sq responsibility,
* and mark so it's not put back in page-cache.
*/
sq->db.xdpi[pi] = *xdpi;
sq->pc++;
sq->doorbell_cseg = cseg;
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, xdpi);
stats->xmit++;
return true;
}
bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo;
struct mlx5e_xdpsq *sq;
struct mlx5_cqe64 *cqe;
bool is_redirect;
......@@ -179,6 +177,7 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
return false;
is_redirect = !rq;
xdpi_fifo = &sq->db.xdpi_fifo;
/* sq->cc must be updated only after mlx5_cqwq_update_db_record(),
* otherwise a cq overrun may occur
......@@ -200,19 +199,19 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
get_cqe_opcode(cqe));
do {
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sqcc);
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
last_wqe = (sqcc == wqe_counter);
sqcc++;
if (is_redirect) {
xdp_return_frame(xdpi->xdpf);
dma_unmap_single(sq->pdev, xdpi->dma_addr,
xdpi->xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi->di, true);
mlx5e_page_release(rq, &xdpi.di, true);
}
} while (!last_wqe);
} while ((++i < MLX5E_TX_CQ_POLL_BUDGET) && (cqe = mlx5_cqwq_get_cqe(&cq->wq)));
......@@ -230,21 +229,22 @@ bool mlx5e_poll_xdpsq_cq(struct mlx5e_cq *cq, struct mlx5e_rq *rq)
void mlx5e_free_xdpsq_descs(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
bool is_redirect = !rq;
while (sq->cc != sq->pc) {
u16 ci = mlx5_wq_cyc_ctr2ix(&sq->wq, sq->cc);
struct mlx5e_xdp_info *xdpi = &sq->db.xdpi[ci];
struct mlx5e_xdp_info xdpi =
mlx5e_xdpi_fifo_pop(xdpi_fifo);
sq->cc++;
if (is_redirect) {
xdp_return_frame(xdpi->xdpf);
dma_unmap_single(sq->pdev, xdpi->dma_addr,
xdpi->xdpf->len, DMA_TO_DEVICE);
xdp_return_frame(xdpi.xdpf);
dma_unmap_single(sq->pdev, xdpi.dma_addr,
xdpi.xdpf->len, DMA_TO_DEVICE);
} else {
/* Recycle RX page */
mlx5e_page_release(rq, &xdpi->di, false);
mlx5e_page_release(rq, &xdpi.di, false);
}
}
}
......
......@@ -57,4 +57,19 @@ static inline void mlx5e_xmit_xdp_doorbell(struct mlx5e_xdpsq *sq)
}
}
static inline void
mlx5e_xdpi_fifo_push(struct mlx5e_xdp_info_fifo *fifo,
struct mlx5e_xdp_info *xi)
{
u32 i = (*fifo->pc)++ & fifo->mask;
fifo->xi[i] = *xi;
}
static inline struct mlx5e_xdp_info
mlx5e_xdpi_fifo_pop(struct mlx5e_xdp_info_fifo *fifo)
{
return fifo->xi[(*fifo->cc)++ & fifo->mask];
}
#endif
......@@ -992,18 +992,35 @@ static void mlx5e_close_rq(struct mlx5e_rq *rq)
static void mlx5e_free_xdpsq_db(struct mlx5e_xdpsq *sq)
{
kvfree(sq->db.xdpi);
kvfree(sq->db.xdpi_fifo.xi);
}
static int mlx5e_alloc_xdpsq_fifo(struct mlx5e_xdpsq *sq, int numa)
{
struct mlx5e_xdp_info_fifo *xdpi_fifo = &sq->db.xdpi_fifo;
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int dsegs_per_wq = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
xdpi_fifo->xi = kvzalloc_node(sizeof(*xdpi_fifo->xi) * dsegs_per_wq,
GFP_KERNEL, numa);
if (!xdpi_fifo->xi)
return -ENOMEM;
xdpi_fifo->pc = &sq->xdpi_fifo_pc;
xdpi_fifo->cc = &sq->xdpi_fifo_cc;
xdpi_fifo->mask = dsegs_per_wq - 1;
return 0;
}
static int mlx5e_alloc_xdpsq_db(struct mlx5e_xdpsq *sq, int numa)
{
int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
int err;
sq->db.xdpi = kvzalloc_node(array_size(wq_sz, sizeof(*sq->db.xdpi)),
GFP_KERNEL, numa);
if (!sq->db.xdpi) {
err = mlx5e_alloc_xdpsq_fifo(sq, numa);
if (err) {
mlx5e_free_xdpsq_db(sq);
return -ENOMEM;
return err;
}
return 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment