Commit d4e28cbd authored by Achiad Shochat's avatar Achiad Shochat Committed by David S. Miller

net/mlx5e: Use the right DMA free function on TX path

On xmit path we use skb_frag_dma_map() which is using dma_map_page(),
while upon completion we dma-unmap the skb fragments using
dma_unmap_single() rather than dma_unmap_page().

To fix this, we now save the dma map type on xmit path and use this
info to call the right dma unmap method upon TX completion.
Signed-off-by: default avatarAchiad Shochat <achiad@mellanox.com>
Signed-off-by: default avatarSaeed Mahameed <saeedm@mellanox.com>
Signed-off-by: default avatarOr Gerlitz <ogerlitz@mellanox.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 50a9eea6
...@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb { ...@@ -334,9 +334,15 @@ struct mlx5e_tx_skb_cb {
#define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb) #define MLX5E_TX_SKB_CB(__skb) ((struct mlx5e_tx_skb_cb *)__skb->cb)
enum mlx5e_dma_map_type {
MLX5E_DMA_MAP_SINGLE,
MLX5E_DMA_MAP_PAGE
};
struct mlx5e_sq_dma { struct mlx5e_sq_dma {
dma_addr_t addr; dma_addr_t addr;
u32 size; u32 size;
enum mlx5e_dma_map_type type;
}; };
enum { enum {
......
...@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw) ...@@ -61,39 +61,47 @@ void mlx5e_send_nop(struct mlx5e_sq *sq, bool notify_hw)
} }
} }
static void mlx5e_dma_pop_last_pushed(struct mlx5e_sq *sq, dma_addr_t *addr, static inline void mlx5e_tx_dma_unmap(struct device *pdev,
u32 *size) struct mlx5e_sq_dma *dma)
{ {
sq->dma_fifo_pc--; switch (dma->type) {
*addr = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr; case MLX5E_DMA_MAP_SINGLE:
*size = sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size; dma_unmap_single(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
} break;
case MLX5E_DMA_MAP_PAGE:
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb) dma_unmap_page(pdev, dma->addr, dma->size, DMA_TO_DEVICE);
{ break;
dma_addr_t addr; default:
u32 size; WARN_ONCE(true, "mlx5e_tx_dma_unmap unknown DMA type!\n");
int i;
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
mlx5e_dma_pop_last_pushed(sq, &addr, &size);
dma_unmap_single(sq->pdev, addr, size, DMA_TO_DEVICE);
} }
} }
static inline void mlx5e_dma_push(struct mlx5e_sq *sq, dma_addr_t addr, static inline void mlx5e_dma_push(struct mlx5e_sq *sq,
u32 size) dma_addr_t addr,
u32 size,
enum mlx5e_dma_map_type map_type)
{ {
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].addr = addr;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size; sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].size = size;
sq->dma_fifo[sq->dma_fifo_pc & sq->dma_fifo_mask].type = map_type;
sq->dma_fifo_pc++; sq->dma_fifo_pc++;
} }
static inline void mlx5e_dma_get(struct mlx5e_sq *sq, u32 i, dma_addr_t *addr, static inline struct mlx5e_sq_dma *mlx5e_dma_get(struct mlx5e_sq *sq, u32 i)
u32 *size) {
return &sq->dma_fifo[i & sq->dma_fifo_mask];
}
static void mlx5e_dma_unmap_wqe_err(struct mlx5e_sq *sq, struct sk_buff *skb)
{ {
*addr = sq->dma_fifo[i & sq->dma_fifo_mask].addr; int i;
*size = sq->dma_fifo[i & sq->dma_fifo_mask].size;
for (i = 0; i < MLX5E_TX_SKB_CB(skb)->num_dma; i++) {
struct mlx5e_sq_dma *last_pushed_dma =
mlx5e_dma_get(sq, --sq->dma_fifo_pc);
mlx5e_tx_dma_unmap(sq->pdev, last_pushed_dma);
}
} }
u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb, u16 mlx5e_select_queue(struct net_device *dev, struct sk_buff *skb,
...@@ -225,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -225,7 +233,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(headlen); dseg->byte_count = cpu_to_be32(headlen);
mlx5e_dma_push(sq, dma_addr, headlen); mlx5e_dma_push(sq, dma_addr, headlen, MLX5E_DMA_MAP_SINGLE);
MLX5E_TX_SKB_CB(skb)->num_dma++; MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++; dseg++;
...@@ -244,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb) ...@@ -244,7 +252,7 @@ static netdev_tx_t mlx5e_sq_xmit(struct mlx5e_sq *sq, struct sk_buff *skb)
dseg->lkey = sq->mkey_be; dseg->lkey = sq->mkey_be;
dseg->byte_count = cpu_to_be32(fsz); dseg->byte_count = cpu_to_be32(fsz);
mlx5e_dma_push(sq, dma_addr, fsz); mlx5e_dma_push(sq, dma_addr, fsz, MLX5E_DMA_MAP_PAGE);
MLX5E_TX_SKB_CB(skb)->num_dma++; MLX5E_TX_SKB_CB(skb)->num_dma++;
dseg++; dseg++;
...@@ -360,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq) ...@@ -360,13 +368,10 @@ bool mlx5e_poll_tx_cq(struct mlx5e_cq *cq)
} }
for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) { for (j = 0; j < MLX5E_TX_SKB_CB(skb)->num_dma; j++) {
dma_addr_t addr; struct mlx5e_sq_dma *dma =
u32 size; mlx5e_dma_get(sq, dma_fifo_cc++);
mlx5e_dma_get(sq, dma_fifo_cc, &addr, &size); mlx5e_tx_dma_unmap(sq->pdev, dma);
dma_fifo_cc++;
dma_unmap_single(sq->pdev, addr, size,
DMA_TO_DEVICE);
} }
npkts++; npkts++;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment