Commit 3d5f3741 authored by Yunsheng Lin's avatar Yunsheng Lin Committed by David S. Miller

net: hns3: unify maybe_stop_tx for TSO and non-TSO case

Currently, maybe_stop_tx ops for TSO and non-TSO case share some BD
calculation code, so this patch unifies the maybe_stop_tx by removing
the maybe_stop_tx ops. skb_is_gso() can be used to differentiate the
case between TSO and non-TSO case if there is need to handle special
case for TSO case.

This patch also add tx_copy field in "ethtool --statistics" to help
better debug the performance issue caused by calling skb_copy.
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 09934b03
...@@ -1154,64 +1154,48 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1154,64 +1154,48 @@ static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
return 0; return 0;
} }
static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum, static int hns3_nic_bd_num(struct sk_buff *skb)
struct hns3_enet_ring *ring)
{ {
struct sk_buff *skb = *out_skb; int size = skb_headlen(skb);
struct sk_buff *new_skb = NULL; int i, bd_num;
struct skb_frag_struct *frag;
int bdnum_for_frag;
int frag_num;
int buf_num;
int size;
int i;
size = skb_headlen(skb); /* if the total len is within the max bd limit */
buf_num = hns3_tx_bd_count(size); if (likely(skb->len <= HNS3_MAX_BD_SIZE))
return skb_shinfo(skb)->nr_frags + 1;
frag_num = skb_shinfo(skb)->nr_frags; bd_num = hns3_tx_bd_count(size);
for (i = 0; i < frag_num; i++) {
frag = &skb_shinfo(skb)->frags[i];
size = skb_frag_size(frag);
bdnum_for_frag = hns3_tx_bd_count(size);
if (unlikely(bdnum_for_frag > HNS3_MAX_BD_PER_FRAG))
return -ENOMEM;
buf_num += bdnum_for_frag; for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
} struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
int frag_bd_num;
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { size = skb_frag_size(frag);
buf_num = hns3_tx_bd_count(skb->len); frag_bd_num = hns3_tx_bd_count(size);
if (ring_space(ring) < buf_num)
return -EBUSY; if (unlikely(frag_bd_num > HNS3_MAX_BD_PER_FRAG))
/* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC);
if (!new_skb)
return -ENOMEM; return -ENOMEM;
dev_kfree_skb_any(skb);
*out_skb = new_skb;
}
if (unlikely(ring_space(ring) < buf_num)) bd_num += frag_bd_num;
return -EBUSY; }
*bnum = buf_num; return bd_num;
return 0;
} }
static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
struct hns3_enet_ring *ring) struct sk_buff **out_skb)
{ {
struct sk_buff *skb = *out_skb; struct sk_buff *skb = *out_skb;
struct sk_buff *new_skb = NULL; int bd_num;
int buf_num;
/* No. of segments (plus a header) */ bd_num = hns3_nic_bd_num(skb);
buf_num = skb_shinfo(skb)->nr_frags + 1; if (bd_num < 0)
return bd_num;
if (unlikely(bd_num > HNS3_MAX_BD_PER_FRAG)) {
struct sk_buff *new_skb;
if (unlikely(buf_num > HNS3_MAX_BD_PER_FRAG)) { bd_num = hns3_tx_bd_count(skb->len);
buf_num = hns3_tx_bd_count(skb->len); if (unlikely(ring_space(ring) < bd_num))
if (ring_space(ring) < buf_num)
return -EBUSY; return -EBUSY;
/* manual split the send packet */ /* manual split the send packet */
new_skb = skb_copy(skb, GFP_ATOMIC); new_skb = skb_copy(skb, GFP_ATOMIC);
...@@ -1219,14 +1203,16 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum, ...@@ -1219,14 +1203,16 @@ static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
return -ENOMEM; return -ENOMEM;
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
*out_skb = new_skb; *out_skb = new_skb;
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
} }
if (unlikely(ring_space(ring) < buf_num)) if (unlikely(ring_space(ring) < bd_num))
return -EBUSY; return -EBUSY;
*bnum = buf_num; return bd_num;
return 0;
} }
static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig) static void hns3_clear_desc(struct hns3_enet_ring *ring, int next_to_use_orig)
...@@ -1277,22 +1263,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1277,22 +1263,23 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
/* Prefetch the data used later */ /* Prefetch the data used later */
prefetch(skb->data); prefetch(skb->data);
switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) { buf_num = hns3_nic_maybe_stop_tx(ring, &skb);
case -EBUSY: if (unlikely(buf_num <= 0)) {
u64_stats_update_begin(&ring->syncp); if (buf_num == -EBUSY) {
ring->stats.tx_busy++; u64_stats_update_begin(&ring->syncp);
u64_stats_update_end(&ring->syncp); ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
goto out_net_tx_busy;
} else if (buf_num == -ENOMEM) {
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
}
goto out_net_tx_busy; if (net_ratelimit())
case -ENOMEM: netdev_err(netdev, "xmit error: %d!\n", buf_num);
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
netdev_err(netdev, "no memory to xmit!\n");
goto out_err_tx_ok; goto out_err_tx_ok;
default:
break;
} }
/* No. of segments (plus a header) */ /* No. of segments (plus a header) */
...@@ -1397,13 +1384,6 @@ static int hns3_nic_set_features(struct net_device *netdev, ...@@ -1397,13 +1384,6 @@ static int hns3_nic_set_features(struct net_device *netdev,
bool enable; bool enable;
int ret; int ret;
if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
if (features & (NETIF_F_TSO | NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) { if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
enable = !!(features & NETIF_F_GRO_HW); enable = !!(features & NETIF_F_GRO_HW);
ret = h->ae_algo->ops->set_gro_en(h, enable); ret = h->ae_algo->ops->set_gro_en(h, enable);
...@@ -3733,17 +3713,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list) ...@@ -3733,17 +3713,6 @@ static void hns3_del_all_fd_rules(struct net_device *netdev, bool clear_list)
h->ae_algo->ops->del_all_fd_entries(h, clear_list); h->ae_algo->ops->del_all_fd_entries(h, clear_list);
} }
static void hns3_nic_set_priv_ops(struct net_device *netdev)
{
struct hns3_nic_priv *priv = netdev_priv(netdev);
if ((netdev->features & NETIF_F_TSO) ||
(netdev->features & NETIF_F_TSO6))
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
else
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
static int hns3_client_start(struct hnae3_handle *handle) static int hns3_client_start(struct hnae3_handle *handle)
{ {
if (!handle->ae_algo->ops->client_start) if (!handle->ae_algo->ops->client_start)
...@@ -3810,7 +3779,6 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -3810,7 +3779,6 @@ static int hns3_client_init(struct hnae3_handle *handle)
netdev->netdev_ops = &hns3_nic_netdev_ops; netdev->netdev_ops = &hns3_nic_netdev_ops;
SET_NETDEV_DEV(netdev, &pdev->dev); SET_NETDEV_DEV(netdev, &pdev->dev);
hns3_ethtool_set_ops(netdev); hns3_ethtool_set_ops(netdev);
hns3_nic_set_priv_ops(netdev);
/* Carrier off reporting is important to ethtool even BEFORE open */ /* Carrier off reporting is important to ethtool even BEFORE open */
netif_carrier_off(netdev); netif_carrier_off(netdev);
......
...@@ -376,6 +376,7 @@ struct ring_stats { ...@@ -376,6 +376,7 @@ struct ring_stats {
u64 tx_err_cnt; u64 tx_err_cnt;
u64 restart_queue; u64 restart_queue;
u64 tx_busy; u64 tx_busy;
u64 tx_copy;
}; };
struct { struct {
u64 rx_pkts; u64 rx_pkts;
...@@ -444,11 +445,6 @@ struct hns3_nic_ring_data { ...@@ -444,11 +445,6 @@ struct hns3_nic_ring_data {
void (*fini_process)(struct hns3_nic_ring_data *); void (*fini_process)(struct hns3_nic_ring_data *);
}; };
struct hns3_nic_ops {
int (*maybe_stop_tx)(struct sk_buff **out_skb,
int *bnum, struct hns3_enet_ring *ring);
};
enum hns3_flow_level_range { enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0, HNS3_FLOW_LOW = 0,
HNS3_FLOW_MID = 1, HNS3_FLOW_MID = 1,
...@@ -538,7 +534,6 @@ struct hns3_nic_priv { ...@@ -538,7 +534,6 @@ struct hns3_nic_priv {
u32 port_id; u32 port_id;
struct net_device *netdev; struct net_device *netdev;
struct device *dev; struct device *dev;
struct hns3_nic_ops ops;
/** /**
* the cb for nic to manage the ring buffer, the first half of the * the cb for nic to manage the ring buffer, the first half of the
......
...@@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = { ...@@ -29,6 +29,7 @@ static const struct hns3_stats hns3_txq_stats[] = {
HNS3_TQP_STAT("errors", tx_err_cnt), HNS3_TQP_STAT("errors", tx_err_cnt),
HNS3_TQP_STAT("wake", restart_queue), HNS3_TQP_STAT("wake", restart_queue),
HNS3_TQP_STAT("busy", tx_busy), HNS3_TQP_STAT("busy", tx_busy),
HNS3_TQP_STAT("copy", tx_copy),
}; };
#define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats) #define HNS3_TXQ_STATS_COUNT ARRAY_SIZE(hns3_txq_stats)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment