Commit bd5196b6 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-hwgro'

Salil Mehta says:

====================
net: hns3: Add support of hardware GRO to HNS3 Driver

This patch-set adds support of hardware assisted GRO feature to
HNS3 driver on Rev B(=0x21) platform. Current hardware only
supports TCP/IPv{4|6} flows.

Change Log:
V1->V2:
1. Remove redundant print reported by Leon Romanovsky.
   Link: https://lkml.org/lkml/2018/11/13/715
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents ba2f55b0 a6d53b97
......@@ -52,6 +52,7 @@
#define HNAE3_UNIC_CLIENT_INITED_B 0x4
#define HNAE3_ROCE_CLIENT_INITED_B 0x5
#define HNAE3_DEV_SUPPORT_FD_B 0x6
#define HNAE3_DEV_SUPPORT_GRO_B 0x7
#define HNAE3_DEV_SUPPORT_ROCE_DCB_BITS (BIT(HNAE3_DEV_SUPPORT_DCB_B) |\
BIT(HNAE3_DEV_SUPPORT_ROCE_B))
......@@ -65,6 +66,9 @@
#define hnae3_dev_fd_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B)
#define hnae3_dev_gro_supported(hdev) \
hnae3_get_bit((hdev)->ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B)
#define ring_ptr_move_fw(ring, p) \
((ring)->p = ((ring)->p + 1) % (ring)->desc_num)
#define ring_ptr_move_bw(ring, p) \
......@@ -301,6 +305,8 @@ struct hnae3_ae_dev {
* Set vlan filter config of vf
* enable_hw_strip_rxvtag()
* Enable/disable hardware strip vlan tag of packets received
* set_gro_en
* Enable/disable HW GRO
*/
struct hnae3_ae_ops {
int (*init_ae_dev)(struct hnae3_ae_dev *ae_dev);
......@@ -445,6 +451,7 @@ struct hnae3_ae_ops {
bool (*get_hw_reset_stat)(struct hnae3_handle *handle);
bool (*ae_dev_resetting)(struct hnae3_handle *handle);
unsigned long (*ae_dev_reset_cnt)(struct hnae3_handle *handle);
int (*set_gro_en)(struct hnae3_handle *handle, int enable);
};
struct hnae3_dcb_ops {
......
......@@ -15,6 +15,7 @@
#include <linux/vermagic.h>
#include <net/gre.h>
#include <net/pkt_cls.h>
#include <net/tcp.h>
#include <net/vxlan.h>
#include "hnae3.h"
......@@ -1345,6 +1346,15 @@ static int hns3_nic_set_features(struct net_device *netdev,
priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
}
if (changed & (NETIF_F_GRO_HW) && h->ae_algo->ops->set_gro_en) {
if (features & NETIF_F_GRO_HW)
ret = h->ae_algo->ops->set_gro_en(h, true);
else
ret = h->ae_algo->ops->set_gro_en(h, false);
if (ret)
return ret;
}
if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
h->ae_algo->ops->enable_vlan_filter) {
if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
......@@ -1714,8 +1724,10 @@ static void hns3_disable_sriov(struct pci_dev *pdev)
static void hns3_get_dev_capability(struct pci_dev *pdev,
struct hnae3_ae_dev *ae_dev)
{
if (pdev->revision >= 0x21)
if (pdev->revision >= 0x21) {
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_FD_B, 1);
hnae3_set_bit(ae_dev->flag, HNAE3_DEV_SUPPORT_GRO_B, 1);
}
}
/* hns3_probe - Device initialization routine
......@@ -1927,7 +1939,9 @@ static void hns3_set_default_feature(struct net_device *netdev)
NETIF_F_GSO_UDP_TUNNEL_CSUM | NETIF_F_SCTP_CRC;
if (pdev->revision >= 0x21) {
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER |
NETIF_F_GRO_HW;
netdev->features |= NETIF_F_GRO_HW;
if (!(h->flags & HNAE3_SUPPORT_VF)) {
netdev->hw_features |= NETIF_F_NTUPLE;
......@@ -2305,6 +2319,12 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
if (!(netdev->features & NETIF_F_RXCSUM))
return;
/* We MUST enable hardware checksum before enabling hardware GRO */
if (skb_shinfo(skb)->gso_size) {
skb->ip_summed = CHECKSUM_UNNECESSARY;
return;
}
/* check if hardware has done checksum */
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
return;
......@@ -2348,6 +2368,9 @@ static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
{
if (skb_has_frag_list(skb))
napi_gro_flush(&ring->tqp_vector->napi, false);
napi_gro_receive(&ring->tqp_vector->napi, skb);
}
......@@ -2381,6 +2404,153 @@ static bool hns3_parse_vlan_tag(struct hns3_enet_ring *ring,
}
}
static int hns3_alloc_skb(struct hns3_enet_ring *ring, int length,
unsigned char *va)
{
#define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
struct sk_buff *skb;
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
skb = ring->skb;
if (unlikely(!skb)) {
netdev_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
prefetchw(skb->data);
ring->pending_buf = 1;
ring->frag_num = 0;
ring->tail_skb = NULL;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
/* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
ring_ptr_move_fw(ring, next_to_clean);
return 0;
}
u64_stats_update_begin(&ring->syncp);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
ring->pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
__skb_put(skb, ring->pull_len);
hns3_nic_reuse_page(skb, ring->frag_num++, ring, ring->pull_len,
desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
return HNS3_NEED_ADD_FRAG;
}
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
struct sk_buff **out_skb, bool pending)
{
struct sk_buff *skb = *out_skb;
struct sk_buff *head_skb = *out_skb;
struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc;
u32 bd_base_info;
int pre_bd;
/* if there is pending bd, the SW param next_to_clean has moved
* to next and the next is NULL
*/
if (pending) {
pre_bd = (ring->next_to_clean - 1 + ring->desc_num) %
ring->desc_num;
pre_desc = &ring->desc[pre_bd];
bd_base_info = le32_to_cpu(pre_desc->rx.bd_base_info);
} else {
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
}
while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
if (!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B))
return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
new_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) {
netdev_err(ring->tqp->handle->kinfo.netdev,
"alloc rx skb frag fail\n");
return -ENXIO;
}
ring->frag_num = 0;
if (ring->tail_skb) {
ring->tail_skb->next = new_skb;
ring->tail_skb = new_skb;
} else {
skb_shinfo(skb)->frag_list = new_skb;
ring->tail_skb = new_skb;
}
}
if (ring->tail_skb) {
head_skb->truesize += hnae3_buf_size(ring);
head_skb->data_len += le16_to_cpu(desc->rx.size);
head_skb->len += le16_to_cpu(desc->rx.size);
skb = ring->tail_skb;
}
hns3_nic_reuse_page(skb, ring->frag_num++, ring, 0, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
ring->pending_buf++;
}
return 0;
}
static void hns3_set_gro_param(struct sk_buff *skb, u32 l234info,
u32 bd_base_info)
{
u16 gro_count;
u32 l3_type;
gro_count = hnae3_get_field(l234info, HNS3_RXD_GRO_COUNT_M,
HNS3_RXD_GRO_COUNT_S);
/* if there is no HW GRO, do not set gro params */
if (!gro_count)
return;
/* tcp_gro_complete() will copy NAPI_GRO_CB(skb)->count
* to skb_shinfo(skb)->gso_segs
*/
NAPI_GRO_CB(skb)->count = gro_count;
l3_type = hnae3_get_field(l234info, HNS3_RXD_L3ID_M,
HNS3_RXD_L3ID_S);
if (l3_type == HNS3_L3_TYPE_IPV4)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
else if (l3_type == HNS3_L3_TYPE_IPV6)
skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
else
return;
skb_shinfo(skb)->gso_size = hnae3_get_field(bd_base_info,
HNS3_RXD_GRO_SIZE_M,
HNS3_RXD_GRO_SIZE_S);
if (skb_shinfo(skb)->gso_size)
tcp_gro_complete(skb);
}
static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
struct sk_buff *skb)
{
......@@ -2397,18 +2567,16 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
}
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
struct sk_buff **out_skb, int *out_bnum)
struct sk_buff **out_skb)
{
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb;
struct hns3_desc *desc;
struct sk_buff *skb;
unsigned char *va;
u32 bd_base_info;
int pull_len;
u32 l234info;
int length;
int bnum;
int ret;
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
......@@ -2420,9 +2588,10 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
/* Check valid BD */
if (unlikely(!hnae3_get_bit(bd_base_info, HNS3_RXD_VLD_B)))
return -EFAULT;
return -ENXIO;
va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
if (!skb)
ring->va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
/* Prefetch first cache line of first page
* Idea is to cache few bytes of the header of the packet. Our L1 Cache
......@@ -2431,62 +2600,42 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
* lines. In such a case, single fetch would suffice to cache in the
* relevant part of the header.
*/
prefetch(va);
prefetch(ring->va);
#if L1_CACHE_BYTES < 128
prefetch(va + L1_CACHE_BYTES);
prefetch(ring->va + L1_CACHE_BYTES);
#endif
skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
HNS3_RX_HEAD_SIZE);
if (unlikely(!skb)) {
netdev_err(netdev, "alloc rx skb fail\n");
u64_stats_update_begin(&ring->syncp);
ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp);
return -ENOMEM;
}
prefetchw(skb->data);
bnum = 1;
if (length <= HNS3_RX_HEAD_SIZE) {
memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va);
*out_skb = skb = ring->skb;
/* We can reuse buffer as-is, just make sure it is local */
if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
desc_cb->reuse_flag = 1;
else /* This page cannot be reused so discard it */
put_page(desc_cb->priv);
if (ret < 0) /* alloc buffer fail */
return ret;
if (ret > 0) { /* need add frag */
ret = hns3_add_frag(ring, desc, &skb, false);
if (ret)
return ret;
ring_ptr_move_fw(ring, next_to_clean);
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
}
} else {
u64_stats_update_begin(&ring->syncp);
ring->stats.seg_pkt_cnt++;
u64_stats_update_end(&ring->syncp);
pull_len = eth_get_headlen(va, HNS3_RX_HEAD_SIZE);
memcpy(__skb_put(skb, pull_len), va,
ALIGN(pull_len, sizeof(long)));
hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
ret = hns3_add_frag(ring, desc, &skb, true);
if (ret)
return ret;
while (!hnae3_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
desc = &ring->desc[ring->next_to_clean];
desc_cb = &ring->desc_cb[ring->next_to_clean];
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
ring_ptr_move_fw(ring, next_to_clean);
bnum++;
}
/* As the head data may be changed when GRO enable, copy
* the head data in after other data rx completed
*/
memcpy(skb->data, ring->va,
ALIGN(ring->pull_len, sizeof(long)));
}
*out_bnum = bnum;
l234info = le32_to_cpu(desc->rx.l234_info);
bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
/* Based on hw strategy, the tag offloaded will be stored at
* ot_vlan_tag in two layer tag case, and stored at vlan_tag
......@@ -2536,7 +2685,11 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ring->tqp_vector->rx_group.total_bytes += skb->len;
/* This is needed in order to enable forwarding support */
hns3_set_gro_param(skb, l234info, bd_base_info);
hns3_rx_checksum(ring, skb, desc);
*out_skb = skb;
hns3_set_rx_skb_rss_type(ring, skb);
return 0;
......@@ -2549,9 +2702,9 @@ int hns3_clean_rx_ring(
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
int recv_pkts, recv_bds, clean_count, err;
int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = NULL;
int num, bnum = 0;
int unused_count = hns3_desc_unused(ring) - ring->pending_buf;
struct sk_buff *skb = ring->skb;
int num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
......@@ -2565,24 +2718,32 @@ int hns3_clean_rx_ring(
hns3_nic_alloc_rx_buffers(ring,
clean_count + unused_count);
clean_count = 0;
unused_count = hns3_desc_unused(ring);
unused_count = hns3_desc_unused(ring) -
ring->pending_buf;
}
/* Poll one pkt */
err = hns3_handle_rx_bd(ring, &skb, &bnum);
err = hns3_handle_rx_bd(ring, &skb);
if (unlikely(!skb)) /* This fault cannot be repaired */
goto out;
recv_bds += bnum;
clean_count += bnum;
if (unlikely(err)) { /* Do jump the err */
recv_pkts++;
if (err == -ENXIO) { /* Do not get FE for the packet */
goto out;
} else if (unlikely(err)) { /* Do jump the err */
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
}
/* Do update ip stack process */
skb->protocol = eth_type_trans(skb, netdev);
rx_fn(ring, skb);
recv_bds += ring->pending_buf;
clean_count += ring->pending_buf;
ring->skb = NULL;
ring->pending_buf = 0;
recv_pkts++;
}
......
......@@ -109,6 +109,10 @@ enum hns3_nic_state {
#define HNS3_RXD_DOI_B 21
#define HNS3_RXD_OL3E_B 22
#define HNS3_RXD_OL4E_B 23
#define HNS3_RXD_GRO_COUNT_S 24
#define HNS3_RXD_GRO_COUNT_M (0x3f << HNS3_RXD_GRO_COUNT_S)
#define HNS3_RXD_GRO_FIXID_B 30
#define HNS3_RXD_GRO_ECN_B 31
#define HNS3_RXD_ODMAC_S 0
#define HNS3_RXD_ODMAC_M (0x3 << HNS3_RXD_ODMAC_S)
......@@ -135,9 +139,8 @@ enum hns3_nic_state {
#define HNS3_RXD_TSIND_S 12
#define HNS3_RXD_TSIND_M (0x7 << HNS3_RXD_TSIND_S)
#define HNS3_RXD_LKBK_B 15
#define HNS3_RXD_HDL_S 16
#define HNS3_RXD_HDL_M (0x7ff << HNS3_RXD_HDL_S)
#define HNS3_RXD_HSIND_B 31
#define HNS3_RXD_GRO_SIZE_S 16
#define HNS3_RXD_GRO_SIZE_M (0x3ff << HNS3_RXD_GRO_SIZE_S)
#define HNS3_TXD_L3T_S 0
#define HNS3_TXD_L3T_M (0x3 << HNS3_TXD_L3T_S)
......@@ -401,11 +404,19 @@ struct hns3_enet_ring {
*/
int next_to_clean;
int pull_len; /* head length for current packet */
u32 frag_num;
unsigned char *va; /* first buffer address for current packet */
u32 flag; /* ring attribute */
int irq_init_flag;
int numa_node;
cpumask_t affinity_mask;
int pending_buf;
struct sk_buff *skb;
struct sk_buff *tail_skb;
};
struct hns_queue;
......
......@@ -152,6 +152,7 @@ enum hclge_opcode_type {
/* TSO command */
HCLGE_OPC_TSO_GENERIC_CONFIG = 0x0C01,
HCLGE_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS commands */
HCLGE_OPC_RSS_GENERIC_CONFIG = 0x0D01,
......@@ -758,6 +759,12 @@ struct hclge_cfg_tso_status_cmd {
u8 rsv[20];
};
#define HCLGE_GRO_EN_B 0
struct hclge_cfg_gro_status_cmd {
__le16 gro_en;
u8 rsv[22];
};
#define HCLGE_TSO_MSS_MIN 256
#define HCLGE_TSO_MSS_MAX 9668
......
......@@ -921,6 +921,28 @@ static int hclge_config_tso(struct hclge_dev *hdev, int tso_mss_min,
return hclge_cmd_send(&hdev->hw, &desc, 1);
}
static int hclge_config_gro(struct hclge_dev *hdev, bool en)
{
struct hclge_cfg_gro_status_cmd *req;
struct hclge_desc desc;
int ret;
if (!hnae3_dev_gro_supported(hdev))
return 0;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_GRO_GENERIC_CONFIG, false);
req = (struct hclge_cfg_gro_status_cmd *)desc.data;
req->gro_en = cpu_to_le16(en ? 1 : 0);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"GRO hardware config cmd failed, ret = %d\n", ret);
return ret;
}
static int hclge_alloc_tqps(struct hclge_dev *hdev)
{
struct hclge_tqp *tqp;
......@@ -7090,6 +7112,10 @@ static int hclge_init_ae_dev(struct hnae3_ae_dev *ae_dev)
goto err_mdiobus_unreg;
}
ret = hclge_config_gro(hdev, true);
if (ret)
goto err_mdiobus_unreg;
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
......@@ -7221,6 +7247,10 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
ret = hclge_config_gro(hdev, true);
if (ret)
return ret;
ret = hclge_init_vlan_config(hdev);
if (ret) {
dev_err(&pdev->dev, "VLAN init fail, ret =%d\n", ret);
......@@ -7637,6 +7667,14 @@ static void hclge_get_link_mode(struct hnae3_handle *handle,
}
}
static int hclge_gro_en(struct hnae3_handle *handle, int enable)
{
struct hclge_vport *vport = hclge_get_vport(handle);
struct hclge_dev *hdev = vport->back;
return hclge_config_gro(hdev, enable);
}
static const struct hnae3_ae_ops hclge_ops = {
.init_ae_dev = hclge_init_ae_dev,
.uninit_ae_dev = hclge_uninit_ae_dev,
......@@ -7708,6 +7746,7 @@ static const struct hnae3_ae_ops hclge_ops = {
.get_hw_reset_stat = hclge_get_hw_reset_stat,
.ae_dev_resetting = hclge_ae_dev_resetting,
.ae_dev_reset_cnt = hclge_ae_dev_reset_cnt,
.set_gro_en = hclge_gro_en,
};
static struct hnae3_ae_algo ae_algo = {
......
......@@ -87,6 +87,8 @@ enum hclgevf_opcode_type {
HCLGEVF_OPC_QUERY_TX_STATUS = 0x0B03,
HCLGEVF_OPC_QUERY_RX_STATUS = 0x0B13,
HCLGEVF_OPC_CFG_COM_TQP_QUEUE = 0x0B20,
/* GRO command */
HCLGEVF_OPC_GRO_GENERIC_CONFIG = 0x0C10,
/* RSS cmd */
HCLGEVF_OPC_RSS_GENERIC_CONFIG = 0x0D01,
HCLGEVF_OPC_RSS_INPUT_TUPLE = 0x0D02,
......@@ -149,6 +151,12 @@ struct hclgevf_query_res_cmd {
__le16 rsv[7];
};
#define HCLGEVF_GRO_EN_B 0
struct hclgevf_cfg_gro_status_cmd {
__le16 gro_en;
u8 rsv[22];
};
#define HCLGEVF_RSS_DEFAULT_OUTPORT_B 4
#define HCLGEVF_RSS_HASH_KEY_OFFSET_B 4
#define HCLGEVF_RSS_HASH_KEY_NUM 16
......
......@@ -1655,6 +1655,29 @@ static int hclgevf_init_roce_base_info(struct hclgevf_dev *hdev)
return 0;
}
static int hclgevf_config_gro(struct hclgevf_dev *hdev, bool en)
{
struct hclgevf_cfg_gro_status_cmd *req;
struct hclgevf_desc desc;
int ret;
if (!hnae3_dev_gro_supported(hdev))
return 0;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_GRO_GENERIC_CONFIG,
false);
req = (struct hclgevf_cfg_gro_status_cmd *)desc.data;
req->gro_en = cpu_to_le16(en ? 1 : 0);
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret)
dev_err(&hdev->pdev->dev,
"VF GRO hardware config cmd failed, ret = %d.\n", ret);
return ret;
}
static int hclgevf_rss_init_hw(struct hclgevf_dev *hdev)
{
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
......@@ -2122,6 +2145,10 @@ static int hclgevf_reset_hdev(struct hclgevf_dev *hdev)
return ret;
}
ret = hclgevf_config_gro(hdev, true);
if (ret)
return ret;
ret = hclgevf_init_vlan_config(hdev);
if (ret) {
dev_err(&hdev->pdev->dev,
......@@ -2199,6 +2226,10 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
goto err_config;
}
ret = hclgevf_config_gro(hdev, true);
if (ret)
goto err_config;
/* Initialize RSS for this VF */
ret = hclgevf_rss_init_hw(hdev);
if (ret) {
......@@ -2337,6 +2368,13 @@ void hclgevf_update_speed_duplex(struct hclgevf_dev *hdev, u32 speed,
hdev->hw.mac.duplex = duplex;
}
static int hclgevf_gro_en(struct hnae3_handle *handle, int enable)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
return hclgevf_config_gro(hdev, enable);
}
static void hclgevf_get_media_type(struct hnae3_handle *handle,
u8 *media_type)
{
......@@ -2411,6 +2449,7 @@ static const struct hnae3_ae_ops hclgevf_ops = {
.get_hw_reset_stat = hclgevf_get_hw_reset_stat,
.ae_dev_resetting = hclgevf_ae_dev_resetting,
.ae_dev_reset_cnt = hclgevf_ae_dev_reset_cnt,
.set_gro_en = hclgevf_gro_en,
};
static struct hnae3_ae_algo ae_algovf = {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment