Commit 2e9361ef authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns: fix for unmapping problem when SMMU is on

If SMMU is on, there is more likely that skb_shinfo(skb)->frags[i]
can not send by a single BD. when this happen, the
hns_nic_net_xmit_hw function map the whole data in a frags using
skb_frag_dma_map, but unmap each BD' data individually when tx is
done, which causes problem when SMMU is on.

This patch fixes this problem by ummapping the whole data in a
frags when tx is done.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarPeng Li <lipeng321@huawei.com>
Reviewed-by: default avatarYisen Zhuang <yisen.zhuang@huawei.com>
Signed-off-by: default avatarSalil Mehta <salil.mehta@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 970f1713
...@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb) ...@@ -84,7 +84,7 @@ static void hnae_unmap_buffer(struct hnae_ring *ring, struct hnae_desc_cb *cb)
if (cb->type == DESC_TYPE_SKB) if (cb->type == DESC_TYPE_SKB)
dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length, dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring)); ring_to_dma_dir(ring));
else else if (cb->length)
dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length, dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
ring_to_dma_dir(ring)); ring_to_dma_dir(ring));
} }
......
...@@ -40,8 +40,8 @@ ...@@ -40,8 +40,8 @@
#define SKB_TMP_LEN(SKB) \ #define SKB_TMP_LEN(SKB) \
(((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB)) (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
static void fill_v2_desc(struct hnae_ring *ring, void *priv, static void fill_v2_desc_hw(struct hnae_ring *ring, void *priv, int size,
int size, dma_addr_t dma, int frag_end, int send_sz, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu) int buf_num, enum hns_desc_type type, int mtu)
{ {
struct hnae_desc *desc = &ring->desc[ring->next_to_use]; struct hnae_desc *desc = &ring->desc[ring->next_to_use];
...@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, ...@@ -64,7 +64,7 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
desc_cb->type = type; desc_cb->type = type;
desc->addr = cpu_to_le64(dma); desc->addr = cpu_to_le64(dma);
desc->tx.send_size = cpu_to_le16((u16)size); desc->tx.send_size = cpu_to_le16((u16)send_sz);
/* config bd buffer end */ /* config bd buffer end */
hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1); hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
...@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv, ...@@ -133,6 +133,14 @@ static void fill_v2_desc(struct hnae_ring *ring, void *priv,
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
} }
static void fill_v2_desc(struct hnae_ring *ring, void *priv,
int size, dma_addr_t dma, int frag_end,
int buf_num, enum hns_desc_type type, int mtu)
{
fill_v2_desc_hw(ring, priv, size, size, dma, frag_end,
buf_num, type, mtu);
}
static const struct acpi_device_id hns_enet_acpi_match[] = { static const struct acpi_device_id hns_enet_acpi_match[] = {
{ "HISI00C1", 0 }, { "HISI00C1", 0 },
{ "HISI00C2", 0 }, { "HISI00C2", 0 },
...@@ -289,7 +297,7 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv, ...@@ -289,7 +297,7 @@ static void fill_tso_desc(struct hnae_ring *ring, void *priv,
/* when the frag size is bigger than hardware, split this frag */ /* when the frag size is bigger than hardware, split this frag */
for (k = 0; k < frag_buf_num; k++) for (k = 0; k < frag_buf_num; k++)
fill_v2_desc(ring, priv, fill_v2_desc_hw(ring, priv, k == 0 ? size : 0,
(k == frag_buf_num - 1) ? (k == frag_buf_num - 1) ?
sizeoflast : BD_MAX_SEND_SIZE, sizeoflast : BD_MAX_SEND_SIZE,
dma + BD_MAX_SEND_SIZE * k, dma + BD_MAX_SEND_SIZE * k,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment