Commit b061d14f authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-cleanups'

Guangbin Huang says:

====================
net: hns3: some cleanups for -next

To improve code readability and simplicity, this series add 9 cleanup
patches for the HNS3 ethernet driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ce8299b6 1b33341e
...@@ -1002,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring, ...@@ -1002,9 +1002,7 @@ static bool hns3_can_use_tx_bounce(struct hns3_enet_ring *ring,
return false; return false;
if (ALIGN(len, dma_get_cache_alignment()) > space) { if (ALIGN(len, dma_get_cache_alignment()) > space) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_spare_full);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
return false; return false;
} }
...@@ -1021,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring, ...@@ -1021,9 +1019,7 @@ static bool hns3_can_use_tx_sgl(struct hns3_enet_ring *ring,
return false; return false;
if (space < HNS3_MAX_SGL_SIZE) { if (space < HNS3_MAX_SGL_SIZE) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_spare_full);
ring->stats.tx_spare_full++;
u64_stats_update_end(&ring->syncp);
return false; return false;
} }
...@@ -1548,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb) ...@@ -1548,92 +1544,122 @@ static bool hns3_check_hw_tx_csum(struct sk_buff *skb)
return true; return true;
} }
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring, struct hns3_desc_param {
struct sk_buff *skb, struct hns3_desc *desc, u32 paylen_ol4cs;
struct hns3_desc_cb *desc_cb) u32 ol_type_vlan_len_msec;
u32 type_cs_vlan_tso;
u16 mss_hw_csum;
u16 inner_vtag;
u16 out_vtag;
};
static void hns3_init_desc_data(struct sk_buff *skb, struct hns3_desc_param *pa)
{
pa->paylen_ol4cs = skb->len;
pa->ol_type_vlan_len_msec = 0;
pa->type_cs_vlan_tso = 0;
pa->mss_hw_csum = 0;
pa->inner_vtag = 0;
pa->out_vtag = 0;
}
static int hns3_handle_vlan_info(struct hns3_enet_ring *ring,
struct sk_buff *skb,
struct hns3_desc_param *param)
{ {
u32 ol_type_vlan_len_msec = 0;
u32 paylen_ol4cs = skb->len;
u32 type_cs_vlan_tso = 0;
u16 mss_hw_csum = 0;
u16 inner_vtag = 0;
u16 out_vtag = 0;
int ret; int ret;
ret = hns3_handle_vtags(ring, skb); ret = hns3_handle_vtags(ring, skb);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_vlan_err);
ring->stats.tx_vlan_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} else if (ret == HNS3_INNER_VLAN_TAG) { } else if (ret == HNS3_INNER_VLAN_TAG) {
inner_vtag = skb_vlan_tag_get(skb); param->inner_vtag = skb_vlan_tag_get(skb);
inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & param->inner_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK; VLAN_PRIO_MASK;
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1); hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_VLAN_B, 1);
} else if (ret == HNS3_OUTER_VLAN_TAG) { } else if (ret == HNS3_OUTER_VLAN_TAG) {
out_vtag = skb_vlan_tag_get(skb); param->out_vtag = skb_vlan_tag_get(skb);
out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) & param->out_vtag |= (skb->priority << VLAN_PRIO_SHIFT) &
VLAN_PRIO_MASK; VLAN_PRIO_MASK;
hns3_set_field(ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B, hns3_set_field(param->ol_type_vlan_len_msec, HNS3_TXD_OVLAN_B,
1); 1);
} }
return 0;
}
desc_cb->send_bytes = skb->len; static int hns3_handle_csum_partial(struct hns3_enet_ring *ring,
struct sk_buff *skb,
if (skb->ip_summed == CHECKSUM_PARTIAL) { struct hns3_desc_cb *desc_cb,
struct hns3_desc_param *param)
{
u8 ol4_proto, il4_proto; u8 ol4_proto, il4_proto;
int ret;
if (hns3_check_hw_tx_csum(skb)) { if (hns3_check_hw_tx_csum(skb)) {
/* set checksum start and offset, defined in 2 Bytes */ /* set checksum start and offset, defined in 2 Bytes */
hns3_set_field(type_cs_vlan_tso, HNS3_TXD_CSUM_START_S, hns3_set_field(param->type_cs_vlan_tso, HNS3_TXD_CSUM_START_S,
skb_checksum_start_offset(skb) >> 1); skb_checksum_start_offset(skb) >> 1);
hns3_set_field(ol_type_vlan_len_msec, hns3_set_field(param->ol_type_vlan_len_msec,
HNS3_TXD_CSUM_OFFSET_S, HNS3_TXD_CSUM_OFFSET_S,
skb->csum_offset >> 1); skb->csum_offset >> 1);
mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B); param->mss_hw_csum |= BIT(HNS3_TXD_HW_CS_B);
goto out_hw_tx_csum; return 0;
} }
skb_reset_mac_len(skb); skb_reset_mac_len(skb);
ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto); ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_l4_proto_err);
ring->stats.tx_l4_proto_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto, ret = hns3_set_l2l3l4(skb, ol4_proto, il4_proto,
&type_cs_vlan_tso, &param->type_cs_vlan_tso,
&ol_type_vlan_len_msec); &param->ol_type_vlan_len_msec);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_l2l3l4_err);
ring->stats.tx_l2l3l4_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
ret = hns3_set_tso(skb, &paylen_ol4cs, &mss_hw_csum, ret = hns3_set_tso(skb, &param->paylen_ol4cs, &param->mss_hw_csum,
&type_cs_vlan_tso, &desc_cb->send_bytes); &param->type_cs_vlan_tso, &desc_cb->send_bytes);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_tso_err);
ring->stats.tx_tso_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
return 0;
}
static int hns3_fill_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb, struct hns3_desc *desc,
struct hns3_desc_cb *desc_cb)
{
struct hns3_desc_param param;
int ret;
hns3_init_desc_data(skb, &param);
ret = hns3_handle_vlan_info(ring, skb, &param);
if (unlikely(ret < 0))
return ret;
desc_cb->send_bytes = skb->len;
if (skb->ip_summed == CHECKSUM_PARTIAL) {
ret = hns3_handle_csum_partial(ring, skb, desc_cb, &param);
if (ret)
return ret;
} }
out_hw_tx_csum:
/* Set txbd */ /* Set txbd */
desc->tx.ol_type_vlan_len_msec = desc->tx.ol_type_vlan_len_msec =
cpu_to_le32(ol_type_vlan_len_msec); cpu_to_le32(param.ol_type_vlan_len_msec);
desc->tx.type_cs_vlan_tso_len = cpu_to_le32(type_cs_vlan_tso); desc->tx.type_cs_vlan_tso_len = cpu_to_le32(param.type_cs_vlan_tso);
desc->tx.paylen_ol4cs = cpu_to_le32(paylen_ol4cs); desc->tx.paylen_ol4cs = cpu_to_le32(param.paylen_ol4cs);
desc->tx.mss_hw_csum = cpu_to_le16(mss_hw_csum); desc->tx.mss_hw_csum = cpu_to_le16(param.mss_hw_csum);
desc->tx.vlan_tag = cpu_to_le16(inner_vtag); desc->tx.vlan_tag = cpu_to_le16(param.inner_vtag);
desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag); desc->tx.outer_vlan_tag = cpu_to_le16(param.out_vtag);
return 0; return 0;
} }
...@@ -1713,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv, ...@@ -1713,9 +1739,7 @@ static int hns3_map_and_fill_desc(struct hns3_enet_ring *ring, void *priv,
} }
if (unlikely(dma_mapping_error(dev, dma))) { if (unlikely(dma_mapping_error(dev, dma))) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1861,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, ...@@ -1861,9 +1885,7 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
* recursion level of over HNS3_MAX_RECURSION_LEVEL. * recursion level of over HNS3_MAX_RECURSION_LEVEL.
*/ */
if (bd_num == UINT_MAX) { if (bd_num == UINT_MAX) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, over_max_recursion);
ring->stats.over_max_recursion++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1872,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring, ...@@ -1872,16 +1894,12 @@ static int hns3_skb_linearize(struct hns3_enet_ring *ring,
*/ */
if (skb->len > HNS3_MAX_TSO_SIZE || if (skb->len > HNS3_MAX_TSO_SIZE ||
(!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) { (!skb_is_gso(skb) && skb->len > HNS3_MAX_NON_TSO_SIZE)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, hw_limitation);
ring->stats.hw_limitation++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
if (__skb_linearize(skb)) { if (__skb_linearize(skb)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -1911,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, ...@@ -1911,9 +1929,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
bd_num = hns3_tx_bd_count(skb->len); bd_num = hns3_tx_bd_count(skb->len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_copy);
ring->stats.tx_copy++;
u64_stats_update_end(&ring->syncp);
} }
out: out:
...@@ -1933,9 +1949,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring, ...@@ -1933,9 +1949,7 @@ static int hns3_nic_maybe_stop_tx(struct hns3_enet_ring *ring,
return bd_num; return bd_num;
} }
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_busy);
ring->stats.tx_busy++;
u64_stats_update_end(&ring->syncp);
return -EBUSY; return -EBUSY;
} }
...@@ -2020,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num, ...@@ -2020,9 +2034,7 @@ static void hns3_tx_doorbell(struct hns3_enet_ring *ring, int num,
ring->pending_buf += num; ring->pending_buf += num;
if (!doorbell) { if (!doorbell) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_more);
ring->stats.tx_more++;
u64_stats_update_end(&ring->syncp);
return; return;
} }
...@@ -2072,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ...@@ -2072,9 +2084,7 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
ret = skb_copy_bits(skb, 0, buf, size); ret = skb_copy_bits(skb, 0, buf, size);
if (unlikely(ret < 0)) { if (unlikely(ret < 0)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, copy_bits_err);
ring->stats.copy_bits_err++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
...@@ -2097,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring, ...@@ -2097,9 +2107,8 @@ static int hns3_handle_tx_bounce(struct hns3_enet_ring *ring,
dma_sync_single_for_device(ring_to_dev(ring), dma, size, dma_sync_single_for_device(ring_to_dev(ring), dma, size,
DMA_TO_DEVICE); DMA_TO_DEVICE);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, tx_bounce);
ring->stats.tx_bounce++;
u64_stats_update_end(&ring->syncp);
return bd_num; return bd_num;
} }
...@@ -2129,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, ...@@ -2129,9 +2138,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len);
if (unlikely(nents < 0)) { if (unlikely(nents < 0)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, skb2sgl_err);
ring->stats.skb2sgl_err++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2140,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, ...@@ -2140,9 +2147,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(!sgt->nents)) { if (unlikely(!sgt->nents)) {
hns3_tx_spare_rollback(ring, cb_len); hns3_tx_spare_rollback(ring, cb_len);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, map_sg_err);
ring->stats.map_sg_err++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -2154,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring, ...@@ -2154,10 +2159,7 @@ static int hns3_handle_tx_sgl(struct hns3_enet_ring *ring,
for (i = 0; i < sgt->nents; i++) for (i = 0; i < sgt->nents; i++)
bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i), bd_num += hns3_fill_desc(ring, sg_dma_address(sgt->sgl + i),
sg_dma_len(sgt->sgl + i)); sg_dma_len(sgt->sgl + i));
hns3_ring_stats_update(ring, tx_sgl);
u64_stats_update_begin(&ring->syncp);
ring->stats.tx_sgl++;
u64_stats_update_end(&ring->syncp);
return bd_num; return bd_num;
} }
...@@ -2182,23 +2184,45 @@ static int hns3_handle_desc_filling(struct hns3_enet_ring *ring, ...@@ -2182,23 +2184,45 @@ static int hns3_handle_desc_filling(struct hns3_enet_ring *ring,
return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB); return hns3_fill_skb_to_desc(ring, skb, DESC_TYPE_SKB);
} }
static int hns3_handle_skb_desc(struct hns3_enet_ring *ring,
struct sk_buff *skb,
struct hns3_desc_cb *desc_cb,
int next_to_use_head)
{
int ret;
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
desc_cb);
if (unlikely(ret < 0))
goto fill_err;
/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
* zero, which is unlikely, and 'ret > 0' means how many tx desc
* need to be notified to the hw.
*/
ret = hns3_handle_desc_filling(ring, skb);
if (likely(ret > 0))
return ret;
fill_err:
hns3_clear_desc(ring, next_to_use_head);
return ret;
}
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping]; struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
int pre_ntu, next_to_use_head; int pre_ntu, ret;
bool doorbell; bool doorbell;
int ret;
/* Hardware can only handle short frames above 32 bytes */ /* Hardware can only handle short frames above 32 bytes */
if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) { if (skb_put_padto(skb, HNS3_MIN_TX_LEN)) {
hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -2217,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2217,20 +2241,9 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
goto out_err_tx_ok; goto out_err_tx_ok;
} }
next_to_use_head = ring->next_to_use; ret = hns3_handle_skb_desc(ring, skb, desc_cb, ring->next_to_use);
ret = hns3_fill_skb_desc(ring, skb, &ring->desc[ring->next_to_use],
desc_cb);
if (unlikely(ret < 0))
goto fill_err;
/* 'ret < 0' means filling error, 'ret == 0' means skb->len is
* zero, which is unlikely, and 'ret > 0' means how many tx desc
* need to be notified to the hw.
*/
ret = hns3_handle_desc_filling(ring, skb);
if (unlikely(ret <= 0)) if (unlikely(ret <= 0))
goto fill_err; goto out_err_tx_ok;
pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) : pre_ntu = ring->next_to_use ? (ring->next_to_use - 1) :
(ring->desc_num - 1); (ring->desc_num - 1);
...@@ -2252,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -2252,9 +2265,6 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
fill_err:
hns3_clear_desc(ring, next_to_use_head);
out_err_tx_ok: out_err_tx_ok:
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
hns3_tx_doorbell(ring, 0, !netdev_xmit_more()); hns3_tx_doorbell(ring, 0, !netdev_xmit_more());
...@@ -3522,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -3522,17 +3532,13 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
for (i = 0; i < cleand_count; i++) { for (i = 0; i < cleand_count; i++) {
desc_cb = &ring->desc_cb[ring->next_to_use]; desc_cb = &ring->desc_cb[ring->next_to_use];
if (desc_cb->reuse_flag) { if (desc_cb->reuse_flag) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, reuse_pg_cnt);
ring->stats.reuse_pg_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_reuse_buffer(ring, ring->next_to_use); hns3_reuse_buffer(ring, ring->next_to_use);
} else { } else {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs); ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) { if (ret) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring), hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n", "alloc rx buffer failed: %d\n",
...@@ -3544,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -3544,9 +3550,7 @@ static bool hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
} }
hns3_replace_buffer(ring, ring->next_to_use, &res_cbs); hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, non_reuse_pg);
ring->stats.non_reuse_pg++;
u64_stats_update_end(&ring->syncp);
} }
ring_ptr_move_fw(ring, next_to_use); ring_ptr_move_fw(ring, next_to_use);
...@@ -3573,9 +3577,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, ...@@ -3573,9 +3577,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
void *frag = napi_alloc_frag(frag_size); void *frag = napi_alloc_frag(frag_size);
if (unlikely(!frag)) { if (unlikely(!frag)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, frag_alloc_err);
ring->stats.frag_alloc_err++;
u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring_to_netdev(ring), hns3_rl_err(ring_to_netdev(ring),
"failed to allocate rx frag\n"); "failed to allocate rx frag\n");
...@@ -3587,9 +3589,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i, ...@@ -3587,9 +3589,7 @@ static int hns3_handle_rx_copybreak(struct sk_buff *skb, int i,
skb_add_rx_frag(skb, i, virt_to_page(frag), skb_add_rx_frag(skb, i, virt_to_page(frag),
offset_in_page(frag), frag_size, frag_size); offset_in_page(frag), frag_size, frag_size);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, frag_alloc);
ring->stats.frag_alloc++;
u64_stats_update_end(&ring->syncp);
return 0; return 0;
} }
...@@ -3722,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring, ...@@ -3722,9 +3722,7 @@ static bool hns3_checksum_complete(struct hns3_enet_ring *ring,
hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE) hns3_rx_ptype_tbl[ptype].ip_summed != CHECKSUM_COMPLETE)
return false; return false;
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, csum_complete);
ring->stats.csum_complete++;
u64_stats_update_end(&ring->syncp);
skb->ip_summed = CHECKSUM_COMPLETE; skb->ip_summed = CHECKSUM_COMPLETE;
skb->csum = csum_unfold((__force __sum16)csum); skb->csum = csum_unfold((__force __sum16)csum);
...@@ -3798,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, ...@@ -3798,9 +3796,7 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) | if (unlikely(l234info & (BIT(HNS3_RXD_L3E_B) | BIT(HNS3_RXD_L4E_B) |
BIT(HNS3_RXD_OL3E_B) | BIT(HNS3_RXD_OL3E_B) |
BIT(HNS3_RXD_OL4E_B)))) { BIT(HNS3_RXD_OL4E_B)))) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, l3l4_csum_err);
ring->stats.l3l4_csum_err++;
u64_stats_update_end(&ring->syncp);
return; return;
} }
...@@ -3891,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -3891,10 +3887,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
skb = ring->skb; skb = ring->skb;
if (unlikely(!skb)) { if (unlikely(!skb)) {
hns3_rl_err(netdev, "alloc rx skb fail\n"); hns3_rl_err(netdev, "alloc rx skb fail\n");
hns3_ring_stats_update(ring, sw_err_cnt);
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM; return -ENOMEM;
} }
...@@ -3925,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -3925,9 +3918,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
if (ring->page_pool) if (ring->page_pool)
skb_mark_for_recycle(skb); skb_mark_for_recycle(skb);
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, seg_pkt_cnt);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE); ring->pull_len = eth_get_headlen(netdev, va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len); __skb_put(skb, ring->pull_len);
...@@ -4135,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ...@@ -4135,9 +4126,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
ret = hns3_set_gro_and_checksum(ring, skb, l234info, ret = hns3_set_gro_and_checksum(ring, skb, l234info,
bd_base_info, ol_info, csum); bd_base_info, ol_info, csum);
if (unlikely(ret)) { if (unlikely(ret)) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, rx_err_cnt);
ring->stats.rx_err_cnt++;
u64_stats_update_end(&ring->syncp);
return ret; return ret;
} }
...@@ -4353,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget) ...@@ -4353,87 +4342,70 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
return rx_pkt_total; return rx_pkt_total;
} }
static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, static int hns3_create_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct hnae3_ring_chain_node *head) struct hnae3_ring_chain_node **head,
bool is_tx)
{ {
u32 bit_value = is_tx ? HNAE3_RING_TYPE_TX : HNAE3_RING_TYPE_RX;
u32 field_value = is_tx ? HNAE3_RING_GL_TX : HNAE3_RING_GL_RX;
struct hnae3_ring_chain_node *cur_chain = *head;
struct pci_dev *pdev = tqp_vector->handle->pdev; struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *cur_chain = head;
struct hnae3_ring_chain_node *chain; struct hnae3_ring_chain_node *chain;
struct hns3_enet_ring *tx_ring; struct hns3_enet_ring *ring;
struct hns3_enet_ring *rx_ring;
tx_ring = tqp_vector->tx_group.ring;
if (tx_ring) {
cur_chain->tqp_index = tx_ring->tqp->tqp_index;
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX);
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
cur_chain->next = NULL; ring = is_tx ? tqp_vector->tx_group.ring : tqp_vector->rx_group.ring;
while (tx_ring->next) { if (cur_chain) {
tx_ring = tx_ring->next; while (cur_chain->next)
cur_chain = cur_chain->next;
}
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), while (ring) {
GFP_KERNEL); chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
if (!chain) if (!chain)
goto err_free_chain; return -ENOMEM;
if (cur_chain)
cur_chain->next = chain; cur_chain->next = chain;
chain->tqp_index = tx_ring->tqp->tqp_index; else
*head = chain;
chain->tqp_index = ring->tqp->tqp_index;
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_TX); bit_value);
hnae3_set_field(chain->int_gl_idx, hnae3_set_field(chain->int_gl_idx,
HNAE3_RING_GL_IDX_M, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_IDX_S, field_value);
HNAE3_RING_GL_TX);
cur_chain = chain; cur_chain = chain;
}
}
rx_ring = tqp_vector->rx_group.ring; ring = ring->next;
if (!tx_ring && rx_ring) {
cur_chain->next = NULL;
cur_chain->tqp_index = rx_ring->tqp->tqp_index;
hnae3_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
HNAE3_RING_TYPE_RX);
hnae3_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
rx_ring = rx_ring->next;
} }
while (rx_ring) { return 0;
chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL); }
if (!chain)
goto err_free_chain;
cur_chain->next = chain; static struct hnae3_ring_chain_node *
chain->tqp_index = rx_ring->tqp->tqp_index; hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector)
hnae3_set_bit(chain->flag, HNAE3_RING_TYPE_B, {
HNAE3_RING_TYPE_RX); struct pci_dev *pdev = tqp_vector->handle->pdev;
hnae3_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M, struct hnae3_ring_chain_node *cur_chain = NULL;
HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX); struct hnae3_ring_chain_node *chain;
cur_chain = chain; if (hns3_create_ring_chain(tqp_vector, &cur_chain, true))
goto err_free_chain;
rx_ring = rx_ring->next; if (hns3_create_ring_chain(tqp_vector, &cur_chain, false))
} goto err_free_chain;
return 0; return cur_chain;
err_free_chain: err_free_chain:
cur_chain = head->next;
while (cur_chain) { while (cur_chain) {
chain = cur_chain->next; chain = cur_chain->next;
devm_kfree(&pdev->dev, cur_chain); devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain; cur_chain = chain;
} }
head->next = NULL;
return -ENOMEM; return NULL;
} }
static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
...@@ -4442,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector, ...@@ -4442,7 +4414,7 @@ static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
struct pci_dev *pdev = tqp_vector->handle->pdev; struct pci_dev *pdev = tqp_vector->handle->pdev;
struct hnae3_ring_chain_node *chain_tmp, *chain; struct hnae3_ring_chain_node *chain_tmp, *chain;
chain = head->next; chain = head;
while (chain) { while (chain) {
chain_tmp = chain->next; chain_tmp = chain->next;
...@@ -4557,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -4557,7 +4529,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
} }
for (i = 0; i < priv->vector_num; i++) { for (i = 0; i < priv->vector_num; i++) {
struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_ring_chain_node *vector_ring_chain;
tqp_vector = &priv->tqp_vector[i]; tqp_vector = &priv->tqp_vector[i];
...@@ -4567,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -4567,15 +4539,16 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector->tx_group.total_packets = 0; tqp_vector->tx_group.total_packets = 0;
tqp_vector->handle = h; tqp_vector->handle = h;
ret = hns3_get_vector_ring_chain(tqp_vector, vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
&vector_ring_chain); if (!vector_ring_chain) {
if (ret) ret = -ENOMEM;
goto map_ring_fail; goto map_ring_fail;
}
ret = h->ae_algo->ops->map_ring_to_vector(h, ret = h->ae_algo->ops->map_ring_to_vector(h,
tqp_vector->vector_irq, &vector_ring_chain); tqp_vector->vector_irq, vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
if (ret) if (ret)
goto map_ring_fail; goto map_ring_fail;
...@@ -4674,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group) ...@@ -4674,7 +4647,7 @@ static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
{ {
struct hnae3_ring_chain_node vector_ring_chain; struct hnae3_ring_chain_node *vector_ring_chain;
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
int i; int i;
...@@ -4689,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv) ...@@ -4689,13 +4662,14 @@ static void hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
* chain between vector and ring, we should go on to deal with * chain between vector and ring, we should go on to deal with
* the remaining options. * the remaining options.
*/ */
if (hns3_get_vector_ring_chain(tqp_vector, &vector_ring_chain)) vector_ring_chain = hns3_get_vector_ring_chain(tqp_vector);
if (!vector_ring_chain)
dev_warn(priv->dev, "failed to get ring chain\n"); dev_warn(priv->dev, "failed to get ring chain\n");
h->ae_algo->ops->unmap_ring_from_vector(h, h->ae_algo->ops->unmap_ring_from_vector(h,
tqp_vector->vector_irq, &vector_ring_chain); tqp_vector->vector_irq, vector_ring_chain);
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain); hns3_free_vector_ring_chain(tqp_vector, vector_ring_chain);
hns3_clear_ring_group(&tqp_vector->rx_group); hns3_clear_ring_group(&tqp_vector->rx_group);
hns3_clear_ring_group(&tqp_vector->tx_group); hns3_clear_ring_group(&tqp_vector->tx_group);
...@@ -5347,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -5347,9 +5321,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
if (!ring->desc_cb[ring->next_to_use].reuse_flag) { if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
ret = hns3_alloc_and_map_buffer(ring, &res_cbs); ret = hns3_alloc_and_map_buffer(ring, &res_cbs);
if (ret) { if (ret) {
u64_stats_update_begin(&ring->syncp); hns3_ring_stats_update(ring, sw_err_cnt);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
/* if alloc new buffer fail, exit directly /* if alloc new buffer fail, exit directly
* and reclear in up flow. * and reclear in up flow.
*/ */
......
...@@ -660,6 +660,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -660,6 +660,13 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define hns3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
#define hns3_ring_stats_update(ring, cnt) do { \
typeof(ring) (tmp) = (ring); \
u64_stats_update_begin(&(tmp)->syncp); \
((tmp)->stats.cnt)++; \
u64_stats_update_end(&(tmp)->syncp); \
} while (0) \
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
{ {
#if (PAGE_SIZE < 8192) #if (PAGE_SIZE < 8192)
......
...@@ -1613,12 +1613,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev) ...@@ -1613,12 +1613,39 @@ static void hclge_init_kdump_kernel_config(struct hclge_dev *hdev)
hdev->num_rx_desc = HCLGE_MIN_RX_DESC; hdev->num_rx_desc = HCLGE_MIN_RX_DESC;
} }
static void hclge_init_tc_config(struct hclge_dev *hdev)
{
unsigned int i;
if (hdev->tc_max > HNAE3_MAX_TC ||
hdev->tc_max < 1) {
dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
/* Dev does not support DCB */
if (!hnae3_dev_dcb_supported(hdev)) {
hdev->tc_max = 1;
hdev->pfc_max = 0;
} else {
hdev->pfc_max = hdev->tc_max;
}
hdev->tm_info.num_tc = 1;
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
}
static int hclge_configure(struct hclge_dev *hdev) static int hclge_configure(struct hclge_dev *hdev)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(hdev->pdev);
const struct cpumask *cpumask = cpu_online_mask; const struct cpumask *cpumask = cpu_online_mask;
struct hclge_cfg cfg; struct hclge_cfg cfg;
unsigned int i;
int node, ret; int node, ret;
ret = hclge_get_cfg(hdev, &cfg); ret = hclge_get_cfg(hdev, &cfg);
...@@ -1662,29 +1689,7 @@ static int hclge_configure(struct hclge_dev *hdev) ...@@ -1662,29 +1689,7 @@ static int hclge_configure(struct hclge_dev *hdev)
hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability); hdev->hw.mac.max_speed = hclge_get_max_speed(cfg.speed_ability);
if ((hdev->tc_max > HNAE3_MAX_TC) || hclge_init_tc_config(hdev);
(hdev->tc_max < 1)) {
dev_warn(&hdev->pdev->dev, "TC num = %u.\n",
hdev->tc_max);
hdev->tc_max = 1;
}
/* Dev does not support DCB */
if (!hnae3_dev_dcb_supported(hdev)) {
hdev->tc_max = 1;
hdev->pfc_max = 0;
} else {
hdev->pfc_max = hdev->tc_max;
}
hdev->tm_info.num_tc = 1;
/* Currently not support uncontiuous tc */
for (i = 0; i < hdev->tm_info.num_tc; i++)
hnae3_set_bit(hdev->hw_tc_map, i, 1);
hdev->tx_sch_mode = HCLGE_FLAG_TC_BASE_SCH_MODE;
hclge_init_kdump_kernel_config(hdev); hclge_init_kdump_kernel_config(hdev);
/* Set the affinity based on numa node */ /* Set the affinity based on numa node */
...@@ -7172,6 +7177,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs, ...@@ -7172,6 +7177,37 @@ static void hclge_fd_get_ext_info(struct ethtool_rx_flow_spec *fs,
} }
} }
static struct hclge_fd_rule *hclge_get_fd_rule(struct hclge_dev *hdev,
u16 location)
{
struct hclge_fd_rule *rule = NULL;
struct hlist_node *node2;
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) {
if (rule->location == location)
return rule;
else if (rule->location > location)
return NULL;
}
return NULL;
}
static void hclge_fd_get_ring_cookie(struct ethtool_rx_flow_spec *fs,
struct hclge_fd_rule *rule)
{
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) {
fs->ring_cookie = RX_CLS_FLOW_DISC;
} else {
u64 vf_id;
fs->ring_cookie = rule->queue_id;
vf_id = rule->vf_id;
vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
fs->ring_cookie |= vf_id;
}
}
static int hclge_get_fd_rule_info(struct hnae3_handle *handle, static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct ethtool_rxnfc *cmd) struct ethtool_rxnfc *cmd)
{ {
...@@ -7179,7 +7215,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -7179,7 +7215,6 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
struct hclge_fd_rule *rule = NULL; struct hclge_fd_rule *rule = NULL;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
struct ethtool_rx_flow_spec *fs; struct ethtool_rx_flow_spec *fs;
struct hlist_node *node2;
if (!hnae3_dev_fd_supported(hdev)) if (!hnae3_dev_fd_supported(hdev))
return -EOPNOTSUPP; return -EOPNOTSUPP;
...@@ -7188,14 +7223,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -7188,14 +7223,9 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
spin_lock_bh(&hdev->fd_rule_lock); spin_lock_bh(&hdev->fd_rule_lock);
hlist_for_each_entry_safe(rule, node2, &hdev->fd_rule_list, rule_node) { rule = hclge_get_fd_rule(hdev, fs->location);
if (rule->location >= fs->location) if (!rule) {
break;
}
if (!rule || fs->location != rule->location) {
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
return -ENOENT; return -ENOENT;
} }
...@@ -7233,16 +7263,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle, ...@@ -7233,16 +7263,7 @@ static int hclge_get_fd_rule_info(struct hnae3_handle *handle,
hclge_fd_get_ext_info(fs, rule); hclge_fd_get_ext_info(fs, rule);
if (rule->action == HCLGE_FD_ACTION_DROP_PACKET) { hclge_fd_get_ring_cookie(fs, rule);
fs->ring_cookie = RX_CLS_FLOW_DISC;
} else {
u64 vf_id;
fs->ring_cookie = rule->queue_id;
vf_id = rule->vf_id;
vf_id <<= ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF;
fs->ring_cookie |= vf_id;
}
spin_unlock_bh(&hdev->fd_rule_lock); spin_unlock_bh(&hdev->fd_rule_lock);
...@@ -10194,67 +10215,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev) ...@@ -10194,67 +10215,80 @@ static int hclge_set_vlan_protocol_type(struct hclge_dev *hdev)
return status; return status;
} }
static int hclge_init_vlan_config(struct hclge_dev *hdev) static int hclge_init_vlan_filter(struct hclge_dev *hdev)
{ {
#define HCLGE_DEF_VLAN_TYPE 0x8100
struct hnae3_handle *handle = &hdev->vport[0].nic;
struct hclge_vport *vport; struct hclge_vport *vport;
int ret; int ret;
int i; int i;
if (hdev->ae_dev->dev_version >= HNAE3_DEVICE_VERSION_V2) { if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_FE_EGRESS_V1_B,
true, 0);
/* for revision 0x21, vf vlan filter is per function */ /* for revision 0x21, vf vlan filter is per function */
for (i = 0; i < hdev->num_alloc_vport; i++) { for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i]; vport = &hdev->vport[i];
ret = hclge_set_vlan_filter_ctrl(hdev, ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF,
HCLGE_FILTER_TYPE_VF, HCLGE_FILTER_FE_EGRESS, true,
HCLGE_FILTER_FE_EGRESS,
true,
vport->vport_id); vport->vport_id);
if (ret) if (ret)
return ret; return ret;
vport->cur_vlan_fltr_en = true; vport->cur_vlan_fltr_en = true;
} }
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT, return hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_PORT,
HCLGE_FILTER_FE_INGRESS, true, HCLGE_FILTER_FE_INGRESS, true, 0);
0); }
if (ret)
return ret; static int hclge_init_vlan_type(struct hclge_dev *hdev)
} else { {
ret = hclge_set_vlan_filter_ctrl(hdev, HCLGE_FILTER_TYPE_VF, hdev->vlan_type_cfg.rx_in_fst_vlan_type = ETH_P_8021Q;
HCLGE_FILTER_FE_EGRESS_V1_B, hdev->vlan_type_cfg.rx_in_sec_vlan_type = ETH_P_8021Q;
true, 0); hdev->vlan_type_cfg.rx_ot_fst_vlan_type = ETH_P_8021Q;
hdev->vlan_type_cfg.rx_ot_sec_vlan_type = ETH_P_8021Q;
hdev->vlan_type_cfg.tx_ot_vlan_type = ETH_P_8021Q;
hdev->vlan_type_cfg.tx_in_vlan_type = ETH_P_8021Q;
return hclge_set_vlan_protocol_type(hdev);
}
static int hclge_init_vport_vlan_offload(struct hclge_dev *hdev)
{
struct hclge_port_base_vlan_config *cfg;
struct hclge_vport *vport;
int ret;
int i;
for (i = 0; i < hdev->num_alloc_vport; i++) {
vport = &hdev->vport[i];
cfg = &vport->port_base_vlan_cfg;
ret = hclge_vlan_offload_cfg(vport, cfg->state,
cfg->vlan_info.vlan_tag,
cfg->vlan_info.qos);
if (ret) if (ret)
return ret; return ret;
} }
return 0;
}
hdev->vlan_type_cfg.rx_in_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; static int hclge_init_vlan_config(struct hclge_dev *hdev)
hdev->vlan_type_cfg.rx_in_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; {
hdev->vlan_type_cfg.rx_ot_fst_vlan_type = HCLGE_DEF_VLAN_TYPE; struct hnae3_handle *handle = &hdev->vport[0].nic;
hdev->vlan_type_cfg.rx_ot_sec_vlan_type = HCLGE_DEF_VLAN_TYPE; int ret;
hdev->vlan_type_cfg.tx_ot_vlan_type = HCLGE_DEF_VLAN_TYPE;
hdev->vlan_type_cfg.tx_in_vlan_type = HCLGE_DEF_VLAN_TYPE;
ret = hclge_set_vlan_protocol_type(hdev); ret = hclge_init_vlan_filter(hdev);
if (ret) if (ret)
return ret; return ret;
for (i = 0; i < hdev->num_alloc_vport; i++) { ret = hclge_init_vlan_type(hdev);
u16 vlan_tag; if (ret)
u8 qos; return ret;
vport = &hdev->vport[i];
vlan_tag = vport->port_base_vlan_cfg.vlan_info.vlan_tag;
qos = vport->port_base_vlan_cfg.vlan_info.qos;
ret = hclge_vlan_offload_cfg(vport, ret = hclge_init_vport_vlan_offload(hdev);
vport->port_base_vlan_cfg.state,
vlan_tag, qos);
if (ret) if (ret)
return ret; return ret;
}
return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false); return hclge_set_vlan_filter(handle, htons(ETH_P_8021Q), 0, false);
} }
...@@ -10511,54 +10545,57 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg, ...@@ -10511,54 +10545,57 @@ static bool hclge_need_update_vlan_filter(const struct hclge_vlan_info *new_cfg,
return false; return false;
} }
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state, static int hclge_modify_port_base_vlan_tag(struct hclge_vport *vport,
struct hclge_vlan_info *vlan_info) struct hclge_vlan_info *new_info,
struct hclge_vlan_info *old_info)
{ {
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
int ret; int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
vlan_info->qos);
if (ret)
return ret;
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out;
if (state == HNAE3_PORT_BASE_VLAN_MODIFY) {
/* add new VLAN tag */ /* add new VLAN tag */
ret = hclge_set_vlan_filter_hw(hdev, ret = hclge_set_vlan_filter_hw(hdev, htons(new_info->vlan_proto),
htons(vlan_info->vlan_proto), vport->vport_id, new_info->vlan_tag,
vport->vport_id,
vlan_info->vlan_tag,
false); false);
if (ret) if (ret)
return ret; return ret;
/* remove old VLAN tag */ /* remove old VLAN tag */
if (old_vlan_info->vlan_tag == 0) if (old_info->vlan_tag == 0)
ret = hclge_set_vf_vlan_common(hdev, vport->vport_id, ret = hclge_set_vf_vlan_common(hdev, vport->vport_id,
true, 0); true, 0);
else else
ret = hclge_set_vlan_filter_hw(hdev, ret = hclge_set_vlan_filter_hw(hdev, htons(ETH_P_8021Q),
htons(ETH_P_8021Q),
vport->vport_id, vport->vport_id,
old_vlan_info->vlan_tag, old_info->vlan_tag, true);
true); if (ret)
if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed to clear vport%u port base vlan %u, ret = %d.\n", "failed to clear vport%u port base vlan %u, ret = %d.\n",
vport->vport_id, old_vlan_info->vlan_tag, ret); vport->vport_id, old_info->vlan_tag, ret);
return ret; return ret;
} }
int hclge_update_port_base_vlan_cfg(struct hclge_vport *vport, u16 state,
struct hclge_vlan_info *vlan_info)
{
struct hnae3_handle *nic = &vport->nic;
struct hclge_vlan_info *old_vlan_info;
int ret;
old_vlan_info = &vport->port_base_vlan_cfg.vlan_info;
ret = hclge_vlan_offload_cfg(vport, state, vlan_info->vlan_tag,
vlan_info->qos);
if (ret)
return ret;
if (!hclge_need_update_vlan_filter(vlan_info, old_vlan_info))
goto out; goto out;
}
if (state == HNAE3_PORT_BASE_VLAN_MODIFY)
ret = hclge_modify_port_base_vlan_tag(vport, vlan_info,
old_vlan_info);
else
ret = hclge_update_vlan_filter_entries(vport, state, vlan_info, ret = hclge_update_vlan_filter_entries(vport, state, vlan_info,
old_vlan_info); old_vlan_info);
if (ret) if (ret)
...@@ -12310,19 +12347,42 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle, ...@@ -12310,19 +12347,42 @@ static void hclge_get_tqps_and_rss_info(struct hnae3_handle *handle,
*max_rss_size = hdev->pf_rss_size_max; *max_rss_size = hdev->pf_rss_size_max;
} }
static int hclge_set_rss_tc_mode_cfg(struct hnae3_handle *handle)
{
struct hclge_vport *vport = hclge_get_vport(handle);
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
unsigned int i;
roundup_size = roundup_pow_of_two(vport->nic.kinfo.rss_size);
roundup_size = ilog2(roundup_size);
/* Set the RSS TC mode according to the new RSS size */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
tc_valid[i] = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
tc_valid[i] = 1;
tc_size[i] = roundup_size;
tc_offset[i] = vport->nic.kinfo.rss_size * i;
}
return hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
}
static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
bool rxfh_configured) bool rxfh_configured)
{ {
struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev); struct hnae3_ae_dev *ae_dev = pci_get_drvdata(handle->pdev);
struct hclge_vport *vport = hclge_get_vport(handle); struct hclge_vport *vport = hclge_get_vport(handle);
struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo; struct hnae3_knic_private_info *kinfo = &vport->nic.kinfo;
u16 tc_offset[HCLGE_MAX_TC_NUM] = {0};
struct hclge_dev *hdev = vport->back; struct hclge_dev *hdev = vport->back;
u16 tc_size[HCLGE_MAX_TC_NUM] = {0};
u16 cur_rss_size = kinfo->rss_size; u16 cur_rss_size = kinfo->rss_size;
u16 cur_tqps = kinfo->num_tqps; u16 cur_tqps = kinfo->num_tqps;
u16 tc_valid[HCLGE_MAX_TC_NUM];
u16 roundup_size;
u32 *rss_indir; u32 *rss_indir;
unsigned int i; unsigned int i;
int ret; int ret;
...@@ -12335,20 +12395,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num, ...@@ -12335,20 +12395,7 @@ static int hclge_set_channels(struct hnae3_handle *handle, u32 new_tqps_num,
return ret; return ret;
} }
roundup_size = roundup_pow_of_two(kinfo->rss_size); ret = hclge_set_rss_tc_mode_cfg(handle);
roundup_size = ilog2(roundup_size);
/* Set the RSS TC mode according to the new RSS size */
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
tc_valid[i] = 0;
if (!(hdev->hw_tc_map & BIT(i)))
continue;
tc_valid[i] = 1;
tc_size[i] = roundup_size;
tc_offset[i] = kinfo->rss_size * i;
}
ret = hclge_set_rss_tc_mode(hdev, tc_valid, tc_size, tc_offset);
if (ret) if (ret)
return ret; return ret;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment