Commit eb9b9fdc authored by Tariq Toukan's avatar Tariq Toukan Committed by David S. Miller

net/mlx5e: Introduce extended version for mlx5e_xmit_data

Introduce struct mlx5e_xmit_data_frags to be used for non-linear xmit
buffers. Let it include sinfo pointer.

Take one bit from the len field to indicate if the descriptor has
fragments and can be casted-up into the extended version.

Zero-init to make sure has_frags, and potentially future fields, are
zero when not explicitly assigned.

Another field will be added in a downstream patch to indicate and point
to dma addresses of the different frags, for redirect-in requests.

This simplifies the mlx5e_xmit_xdp_frame/mlx5e_xmit_xdp_frame_mpwqe
functions params.
Signed-off-by: default avatarTariq Toukan <tariqt@nvidia.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent e32654f1
...@@ -487,7 +487,6 @@ struct mlx5e_xmit_data; ...@@ -487,7 +487,6 @@ struct mlx5e_xmit_data;
typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *); typedef int (*mlx5e_fp_xmit_xdp_frame_check)(struct mlx5e_xdpsq *);
typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *, typedef bool (*mlx5e_fp_xmit_xdp_frame)(struct mlx5e_xdpsq *,
struct mlx5e_xmit_data *, struct mlx5e_xmit_data *,
struct skb_shared_info *,
int); int);
struct mlx5e_xdpsq { struct mlx5e_xdpsq {
......
...@@ -80,7 +80,13 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config) ...@@ -80,7 +80,13 @@ static inline bool mlx5e_rx_hw_stamp(struct hwtstamp_config *config)
struct mlx5e_xmit_data { struct mlx5e_xmit_data {
dma_addr_t dma_addr; dma_addr_t dma_addr;
void *data; void *data;
u32 len; u32 len : 31;
u32 has_frags : 1;
};
struct mlx5e_xmit_data_frags {
struct mlx5e_xmit_data xd;
struct skb_shared_info *sinfo;
}; };
netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev); netdev_tx_t mlx5e_xmit(struct sk_buff *skb, struct net_device *dev);
......
...@@ -61,8 +61,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -61,8 +61,8 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
struct xdp_buff *xdp) struct xdp_buff *xdp)
{ {
struct page *page = virt_to_page(xdp->data); struct page *page = virt_to_page(xdp->data);
struct skb_shared_info *sinfo = NULL; struct mlx5e_xmit_data_frags xdptxdf = {};
struct mlx5e_xmit_data xdptxd; struct mlx5e_xmit_data *xdptxd;
struct mlx5e_xdp_info xdpi; struct mlx5e_xdp_info xdpi;
struct xdp_frame *xdpf; struct xdp_frame *xdpf;
dma_addr_t dma_addr; dma_addr_t dma_addr;
...@@ -72,8 +72,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -72,8 +72,10 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
if (unlikely(!xdpf)) if (unlikely(!xdpf))
return false; return false;
xdptxd.data = xdpf->data; xdptxd = &xdptxdf.xd;
xdptxd.len = xdpf->len; xdptxd->data = xdpf->data;
xdptxd->len = xdpf->len;
xdptxd->has_frags = xdp_frame_has_frags(xdpf);
if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) { if (xdp->rxq->mem.type == MEM_TYPE_XSK_BUFF_POOL) {
/* The xdp_buff was in the UMEM and was copied into a newly /* The xdp_buff was in the UMEM and was copied into a newly
...@@ -90,19 +92,22 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -90,19 +92,22 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME; xdpi.mode = MLX5E_XDP_XMIT_MODE_FRAME;
dma_addr = dma_map_single(sq->pdev, xdptxd.data, xdptxd.len, if (unlikely(xdptxd->has_frags))
return false;
dma_addr = dma_map_single(sq->pdev, xdptxd->data, xdptxd->len,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(sq->pdev, dma_addr)) { if (dma_mapping_error(sq->pdev, dma_addr)) {
xdp_return_frame(xdpf); xdp_return_frame(xdpf);
return false; return false;
} }
xdptxd.dma_addr = dma_addr; xdptxd->dma_addr = dma_addr;
xdpi.frame.xdpf = xdpf; xdpi.frame.xdpf = xdpf;
xdpi.frame.dma_addr = dma_addr; xdpi.frame.dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0))) mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
return false; return false;
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
...@@ -119,13 +124,13 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -119,13 +124,13 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
xdpi.page.rq = rq; xdpi.page.rq = rq;
dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf); dma_addr = page_pool_get_dma_addr(page) + (xdpf->data - (void *)xdpf);
dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd.len, DMA_BIDIRECTIONAL); dma_sync_single_for_device(sq->pdev, dma_addr, xdptxd->len, DMA_BIDIRECTIONAL);
if (unlikely(xdp_frame_has_frags(xdpf))) { if (unlikely(xdptxd->has_frags)) {
sinfo = xdp_get_shared_info_from_frame(xdpf); xdptxdf.sinfo = xdp_get_shared_info_from_frame(xdpf);
for (i = 0; i < sinfo->nr_frags; i++) { for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i]; skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
dma_addr_t addr; dma_addr_t addr;
u32 len; u32 len;
...@@ -137,18 +142,18 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, ...@@ -137,18 +142,18 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq,
} }
} }
xdptxd.dma_addr = dma_addr; xdptxd->dma_addr = dma_addr;
if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, if (unlikely(!INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, sinfo, 0))) mlx5e_xmit_xdp_frame, sq, xdptxd, 0)))
return false; return false;
xdpi.page.page = page; xdpi.page.page = page;
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
if (unlikely(xdp_frame_has_frags(xdpf))) { if (unlikely(xdptxd->has_frags)) {
for (i = 0; i < sinfo->nr_frags; i++) { for (i = 0; i < xdptxdf.sinfo->nr_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i]; skb_frag_t *frag = &xdptxdf.sinfo->frags[i];
xdpi.page.page = skb_frag_page(frag); xdpi.page.page = skb_frag_page(frag);
mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi); mlx5e_xdpi_fifo_push(&sq->db.xdpi_fifo, &xdpi);
...@@ -381,23 +386,23 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq ...@@ -381,23 +386,23 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq
INDIRECT_CALLABLE_SCOPE bool INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo, int check_result); int check_result);
INDIRECT_CALLABLE_SCOPE bool INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo, int check_result) int check_result)
{ {
struct mlx5e_tx_mpwqe *session = &sq->mpwqe; struct mlx5e_tx_mpwqe *session = &sq->mpwqe;
struct mlx5e_xdpsq_stats *stats = sq->stats; struct mlx5e_xdpsq_stats *stats = sq->stats;
if (unlikely(sinfo)) { if (unlikely(xdptxd->has_frags)) {
/* MPWQE is enabled, but a multi-buffer packet is queued for /* MPWQE is enabled, but a multi-buffer packet is queued for
* transmission. MPWQE can't send fragmented packets, so close * transmission. MPWQE can't send fragmented packets, so close
* the current session and fall back to a regular WQE. * the current session and fall back to a regular WQE.
*/ */
if (unlikely(sq->mpwqe.wqe)) if (unlikely(sq->mpwqe.wqe))
mlx5e_xdp_mpwqe_complete(sq); mlx5e_xdp_mpwqe_complete(sq);
return mlx5e_xmit_xdp_frame(sq, xdptxd, sinfo, 0); return mlx5e_xmit_xdp_frame(sq, xdptxd, 0);
} }
if (unlikely(xdptxd->len > sq->hw_mtu)) { if (unlikely(xdptxd->len > sq->hw_mtu)) {
...@@ -446,8 +451,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq) ...@@ -446,8 +451,10 @@ INDIRECT_CALLABLE_SCOPE int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)
INDIRECT_CALLABLE_SCOPE bool INDIRECT_CALLABLE_SCOPE bool
mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo, int check_result) int check_result)
{ {
struct mlx5e_xmit_data_frags *xdptxdf =
container_of(xdptxd, struct mlx5e_xmit_data_frags, xd);
struct mlx5_wq_cyc *wq = &sq->wq; struct mlx5_wq_cyc *wq = &sq->wq;
struct mlx5_wqe_ctrl_seg *cseg; struct mlx5_wqe_ctrl_seg *cseg;
struct mlx5_wqe_data_seg *dseg; struct mlx5_wqe_data_seg *dseg;
...@@ -476,9 +483,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -476,9 +483,9 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
if (!check_result) { if (!check_result) {
int stop_room = 1; int stop_room = 1;
if (unlikely(sinfo)) { if (unlikely(xdptxd->has_frags)) {
ds_cnt += sinfo->nr_frags; ds_cnt += xdptxdf->sinfo->nr_frags;
num_frags = sinfo->nr_frags; num_frags = xdptxdf->sinfo->nr_frags;
num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS); num_wqebbs = DIV_ROUND_UP(ds_cnt, MLX5_SEND_WQEBB_NUM_DS);
/* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big /* Assuming MLX5_CAP_GEN(mdev, max_wqe_sz_sq) is big
* enough to hold all fragments. * enough to hold all fragments.
...@@ -529,7 +536,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd, ...@@ -529,7 +536,7 @@ mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, struct mlx5e_xmit_data *xdptxd,
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
for (i = 0; i < num_frags; i++) { for (i = 0; i < num_frags; i++) {
skb_frag_t *frag = &sinfo->frags[i]; skb_frag_t *frag = &xdptxdf->sinfo->frags[i];
dma_addr_t addr; dma_addr_t addr;
addr = page_pool_get_dma_addr(skb_frag_page(frag)) + addr = page_pool_get_dma_addr(skb_frag_page(frag)) +
...@@ -718,7 +725,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -718,7 +725,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
for (i = 0; i < n; i++) { for (i = 0; i < n; i++) {
struct xdp_frame *xdpf = frames[i]; struct xdp_frame *xdpf = frames[i];
struct mlx5e_xmit_data xdptxd; struct mlx5e_xmit_data xdptxd = {};
struct mlx5e_xdp_info xdpi; struct mlx5e_xdp_info xdpi;
bool ret; bool ret;
...@@ -735,7 +742,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames, ...@@ -735,7 +742,7 @@ int mlx5e_xdp_xmit(struct net_device *dev, int n, struct xdp_frame **frames,
xdpi.frame.dma_addr = xdptxd.dma_addr; xdpi.frame.dma_addr = xdptxd.dma_addr;
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, 0); mlx5e_xmit_xdp_frame, sq, &xdptxd, 0);
if (unlikely(!ret)) { if (unlikely(!ret)) {
dma_unmap_single(sq->pdev, xdptxd.dma_addr, dma_unmap_single(sq->pdev, xdptxd.dma_addr,
xdptxd.len, DMA_TO_DEVICE); xdptxd.len, DMA_TO_DEVICE);
......
...@@ -101,11 +101,9 @@ extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops; ...@@ -101,11 +101,9 @@ extern const struct xdp_metadata_ops mlx5e_xdp_metadata_ops;
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq, INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame_mpwqe(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd, struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo,
int check_result)); int check_result));
INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq, INDIRECT_CALLABLE_DECLARE(bool mlx5e_xmit_xdp_frame(struct mlx5e_xdpsq *sq,
struct mlx5e_xmit_data *xdptxd, struct mlx5e_xmit_data *xdptxd,
struct skb_shared_info *sinfo,
int check_result)); int check_result));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq)); INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check_mpwqe(struct mlx5e_xdpsq *sq));
INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq)); INDIRECT_CALLABLE_DECLARE(int mlx5e_xmit_xdp_frame_check(struct mlx5e_xdpsq *sq));
......
...@@ -61,7 +61,6 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq, ...@@ -61,7 +61,6 @@ static void mlx5e_xsk_tx_post_err(struct mlx5e_xdpsq *sq,
bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
{ {
struct xsk_buff_pool *pool = sq->xsk_pool; struct xsk_buff_pool *pool = sq->xsk_pool;
struct mlx5e_xmit_data xdptxd;
struct mlx5e_xdp_info xdpi; struct mlx5e_xdp_info xdpi;
bool work_done = true; bool work_done = true;
bool flush = false; bool flush = false;
...@@ -73,6 +72,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -73,6 +72,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
mlx5e_xmit_xdp_frame_check_mpwqe, mlx5e_xmit_xdp_frame_check_mpwqe,
mlx5e_xmit_xdp_frame_check, mlx5e_xmit_xdp_frame_check,
sq); sq);
struct mlx5e_xmit_data xdptxd = {};
struct xdp_desc desc; struct xdp_desc desc;
bool ret; bool ret;
...@@ -97,7 +97,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget) ...@@ -97,7 +97,7 @@ bool mlx5e_xsk_tx(struct mlx5e_xdpsq *sq, unsigned int budget)
xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len); xsk_buff_raw_dma_sync_for_device(pool, xdptxd.dma_addr, xdptxd.len);
ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe, ret = INDIRECT_CALL_2(sq->xmit_xdp_frame, mlx5e_xmit_xdp_frame_mpwqe,
mlx5e_xmit_xdp_frame, sq, &xdptxd, NULL, mlx5e_xmit_xdp_frame, sq, &xdptxd,
check_result); check_result);
if (unlikely(!ret)) { if (unlikely(!ret)) {
if (sq->mpwqe.wqe) if (sq->mpwqe.wqe)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment