Commit 79008676 authored by Maxim Mikityanskiy's avatar Maxim Mikityanskiy Committed by Jakub Kicinski

net/mlx5e: Rename mlx5e_dma_info to prepare for removal of DMA address

The next commit will remove the DMA address from the struct currently
called mlx5e_dma_info, because the same value can be retrieved with
page_pool_get_dma_addr(page) in almost all cases, with the notable
exception of SHAMPO (HW GRO implementation) that modifies this address
on the fly, after the initial allocation.

To keep the SHAMPO logic intact, struct mlx5e_dma_info remains in the
SHAMPO code, consisting of addr and page (XSK is not compatible with
SHAMPO). The struct used in all other places is renamed to
mlx5e_alloc_unit, allowing the next commit to remove the addr field
without affecting SHAMPO.

The new name means "allocation unit", and it's more appropriate after
the field with the DMA address gets removed.
Signed-off-by: default avatarMaxim Mikityanskiy <maximmi@nvidia.com>
Reviewed-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@nvidia.com>
Signed-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parent 707f908e
......@@ -474,7 +474,7 @@ struct mlx5e_txqsq {
cqe_ts_to_ns ptp_cyc2time;
} ____cacheline_aligned_in_smp;
struct mlx5e_dma_info {
struct mlx5e_alloc_unit {
dma_addr_t addr;
union {
struct page *page;
......@@ -608,7 +608,7 @@ struct mlx5e_icosq {
} ____cacheline_aligned_in_smp;
struct mlx5e_wqe_frag_info {
struct mlx5e_dma_info *di;
struct mlx5e_alloc_unit *au;
u32 offset;
bool last_in_page;
};
......@@ -616,7 +616,7 @@ struct mlx5e_wqe_frag_info {
struct mlx5e_mpw_info {
u16 consumed_strides;
DECLARE_BITMAP(xdp_xmit_bitmap, MLX5_MPWRQ_MAX_PAGES_PER_WQE);
struct mlx5e_dma_info dma_info[];
struct mlx5e_alloc_unit alloc_units[];
};
#define MLX5E_MAX_RX_FRAGS 4
......@@ -665,6 +665,11 @@ struct mlx5e_rq_frags_info {
u8 wqe_bulk;
};
struct mlx5e_dma_info {
dma_addr_t addr;
struct page *page;
};
struct mlx5e_shampo_hd {
u32 mkey;
struct mlx5e_dma_info *info;
......@@ -690,7 +695,7 @@ struct mlx5e_rq {
struct {
struct mlx5_wq_cyc wq;
struct mlx5e_wqe_frag_info *frags;
struct mlx5e_dma_info *di;
struct mlx5e_alloc_unit *alloc_units;
struct mlx5e_rq_frags_info info;
mlx5e_fp_skb_from_cqe skb_from_cqe;
} wqe;
......@@ -1142,8 +1147,6 @@ void mlx5e_destroy_q_counters(struct mlx5e_priv *priv);
int mlx5e_open_drop_rq(struct mlx5e_priv *priv,
struct mlx5e_rq *drop_rq);
void mlx5e_close_drop_rq(struct mlx5e_rq *drop_rq);
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node);
void mlx5e_free_di_list(struct mlx5e_rq *rq);
int mlx5e_create_tis(struct mlx5_core_dev *mdev, void *in, u32 *tisn);
void mlx5e_destroy_tis(struct mlx5_core_dev *mdev, u32 tisn);
......
......@@ -30,7 +30,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq,
u32 head_offset,
u32 page_idx)
{
struct xdp_buff *xdp = wi->dma_info[page_idx].xsk;
struct xdp_buff *xdp = wi->alloc_units[page_idx].xsk;
struct bpf_prog *prog;
/* Check packet size. Note LRO doesn't use linear SKB */
......@@ -83,7 +83,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
struct mlx5e_wqe_frag_info *wi,
u32 cqe_bcnt)
{
struct xdp_buff *xdp = wi->di->xsk;
struct xdp_buff *xdp = wi->au->xsk;
struct bpf_prog *prog;
/* wi->offset is not used in this function, because xdp->data and the
......
......@@ -19,10 +19,10 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq,
u32 cqe_bcnt);
static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
struct mlx5e_dma_info *dma_info)
struct mlx5e_alloc_unit *au)
{
dma_info->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!dma_info->xsk)
au->xsk = xsk_buff_alloc(rq->xsk_pool);
if (!au->xsk)
return -ENOMEM;
/* Store the DMA address without headroom. In striding RQ case, we just
......@@ -30,7 +30,7 @@ static inline int mlx5e_xsk_page_alloc_pool(struct mlx5e_rq *rq,
* when creating a WQE. In non-striding RQ case, headroom is accounted
* in mlx5e_alloc_rx_wqe.
*/
dma_info->addr = xsk_buff_xdp_get_frame_dma(dma_info->xsk);
au->addr = xsk_buff_xdp_get_frame_dma(au->xsk);
return 0;
}
......
......@@ -271,7 +271,7 @@ static int mlx5e_rq_alloc_mpwqe_info(struct mlx5e_rq *rq, int node)
int wq_sz = mlx5_wq_ll_get_size(&rq->mpwqe.wq);
size_t alloc_size;
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, dma_info,
alloc_size = array_size(wq_sz, struct_size(rq->mpwqe.info, alloc_units,
rq->mpwqe.pages_per_wqe));
rq->mpwqe.info = kvzalloc_node(alloc_size, GFP_KERNEL, node);
......@@ -433,7 +433,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
struct mlx5e_wqe_frag_info *prev = NULL;
int i;
next_frag.di = &rq->wqe.di[0];
next_frag.au = &rq->wqe.alloc_units[0];
for (i = 0; i < mlx5_wq_cyc_get_size(&rq->wqe.wq); i++) {
struct mlx5e_rq_frag_info *frag_info = &rq->wqe.info.arr[0];
......@@ -443,7 +443,7 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
for (f = 0; f < rq->wqe.info.num_frags; f++, frag++) {
if (next_frag.offset + frag_info[f].frag_stride > PAGE_SIZE) {
next_frag.di++;
next_frag.au++;
next_frag.offset = 0;
if (prev)
prev->last_in_page = true;
......@@ -460,12 +460,13 @@ static void mlx5e_init_frags_partition(struct mlx5e_rq *rq)
prev->last_in_page = true;
}
int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
static int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
{
int len = wq_sz << rq->wqe.info.log_num_frags;
rq->wqe.di = kvzalloc_node(array_size(len, sizeof(*rq->wqe.di)), GFP_KERNEL, node);
if (!rq->wqe.di)
rq->wqe.alloc_units = kvzalloc_node(array_size(len, sizeof(*rq->wqe.alloc_units)),
GFP_KERNEL, node);
if (!rq->wqe.alloc_units)
return -ENOMEM;
mlx5e_init_frags_partition(rq);
......@@ -473,9 +474,9 @@ int mlx5e_init_di_list(struct mlx5e_rq *rq, int wq_sz, int node)
return 0;
}
void mlx5e_free_di_list(struct mlx5e_rq *rq)
static void mlx5e_free_di_list(struct mlx5e_rq *rq)
{
kvfree(rq->wqe.di);
kvfree(rq->wqe.alloc_units);
}
static void mlx5e_rq_err_cqe_work(struct work_struct *recover_work)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment