Commit 13faf771 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Huazhong Tan says:

====================
net: hns3: add some cleanups & optimizations

This patchset includes some cleanups and optimizations for the HNS3
ethernet driver.

[patch 1/8] removes unused and unnecessary structures.

[patch 2/8] uses a ETH_ALEN u8 array to replace two mac_addr_*
field in struct hclge_mac_mgr_tbl_entry_cmd.

[patch 3/8] optimizes the barrier used in the IO path.

[patch 4/8] introduces macro ring_to_netdev() to get netdevive
from struct hns3_enet_ring variable.

[patch 5/8] makes struct hns3_enet_ring to be cacheline aligned

[patch 6/8] adds a minor cleanup for hns3_handle_rx_bd().

[patch 7/8] removes linear data allocating for fraglist SKB.

[patch 8/8] clears hardware error when resetting.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 2f184393 4fdd0bca
...@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -16,15 +16,14 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
const char *cmd_buf) const char *cmd_buf)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
u32 base_add_l, base_add_h; u32 base_add_l, base_add_h;
u32 queue_num, queue_max; u32 queue_num, queue_max;
u32 value, i = 0; u32 value, i = 0;
int cnt; int cnt;
if (!priv->ring_data) { if (!priv->ring) {
dev_err(&h->pdev->dev, "ring_data is NULL\n"); dev_err(&h->pdev->dev, "priv->ring is NULL\n");
return -EFAULT; return -EFAULT;
} }
...@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -44,7 +43,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
return -EINVAL; return -EINVAL;
} }
ring_data = priv->ring_data;
for (i = queue_num; i < queue_max; i++) { for (i = queue_num; i < queue_max; i++) {
/* Each cycle needs to determine whether the instance is reset, /* Each cycle needs to determine whether the instance is reset,
* to prevent reference to invalid memory. And need to ensure * to prevent reference to invalid memory. And need to ensure
...@@ -54,7 +52,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -54,7 +52,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
test_bit(HNS3_NIC_STATE_RESETTING, &priv->state)) test_bit(HNS3_NIC_STATE_RESETTING, &priv->state))
return -EPERM; return -EPERM;
ring = ring_data[(u32)(i + h->kinfo.num_tqps)].ring; ring = &priv->ring[(u32)(i + h->kinfo.num_tqps)];
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_RX_RING_BASEADDR_H_REG); HNS3_RING_RX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
...@@ -86,7 +84,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -86,7 +84,7 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
HNS3_RING_RX_RING_PKTNUM_RECORD_REG); HNS3_RING_RX_RING_PKTNUM_RECORD_REG);
dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value); dev_info(&h->pdev->dev, "RX(%d) RING PKTNUM: %u\n", i, value);
ring = ring_data[i].ring; ring = &priv->ring[i];
base_add_h = readl_relaxed(ring->tqp->io_base + base_add_h = readl_relaxed(ring->tqp->io_base +
HNS3_RING_TX_RING_BASEADDR_H_REG); HNS3_RING_TX_RING_BASEADDR_H_REG);
base_add_l = readl_relaxed(ring->tqp->io_base + base_add_l = readl_relaxed(ring->tqp->io_base +
...@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h, ...@@ -130,7 +128,6 @@ static int hns3_dbg_queue_info(struct hnae3_handle *h,
static int hns3_dbg_queue_map(struct hnae3_handle *h) static int hns3_dbg_queue_map(struct hnae3_handle *h)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
int i; int i;
if (!h->ae_algo->ops->get_global_queue_id) if (!h->ae_algo->ops->get_global_queue_id)
...@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) ...@@ -143,15 +140,12 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
u16 global_qid; u16 global_qid;
global_qid = h->ae_algo->ops->get_global_queue_id(h, i); global_qid = h->ae_algo->ops->get_global_queue_id(h, i);
ring_data = &priv->ring_data[i]; if (!priv->ring || !priv->ring[i].tqp_vector)
if (!ring_data || !ring_data->ring ||
!ring_data->ring->tqp_vector)
continue; continue;
dev_info(&h->pdev->dev, dev_info(&h->pdev->dev,
" %4d %4d %4d\n", " %4d %4d %4d\n",
i, global_qid, i, global_qid, priv->ring[i].tqp_vector->vector_irq);
ring_data->ring->tqp_vector->vector_irq);
} }
return 0; return 0;
...@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h) ...@@ -160,7 +154,6 @@ static int hns3_dbg_queue_map(struct hnae3_handle *h)
static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
{ {
struct hns3_nic_priv *priv = h->priv; struct hns3_nic_priv *priv = h->priv;
struct hns3_nic_ring_data *ring_data;
struct hns3_desc *rx_desc, *tx_desc; struct hns3_desc *rx_desc, *tx_desc;
struct device *dev = &h->pdev->dev; struct device *dev = &h->pdev->dev;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
...@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -183,8 +176,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
return -EINVAL; return -EINVAL;
} }
ring_data = priv->ring_data; ring = &priv->ring[q_num];
ring = ring_data[q_num].ring;
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_TAIL_REG);
tx_index = (cnt == 1) ? value : tx_index; tx_index = (cnt == 1) ? value : tx_index;
...@@ -214,7 +206,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf) ...@@ -214,7 +206,7 @@ static int hns3_dbg_bd_info(struct hnae3_handle *h, const char *cmd_buf)
dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri); dev_info(dev, "(TX)vld_ra_ri: %u\n", tx_desc->tx.bdtp_fe_sc_vld_ra_ri);
dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss); dev_info(dev, "(TX)mss: %u\n", tx_desc->tx.mss);
ring = ring_data[q_num + h->kinfo.num_tqps].ring; ring = &priv->ring[q_num + h->kinfo.num_tqps];
value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG); value = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_TAIL_REG);
rx_index = (cnt == 1) ? value : tx_index; rx_index = (cnt == 1) ? value : tx_index;
rx_desc = &ring->desc[rx_index]; rx_desc = &ring->desc[rx_index];
......
...@@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h) ...@@ -483,7 +483,7 @@ static void hns3_reset_tx_queue(struct hnae3_handle *h)
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
dev_queue = netdev_get_tx_queue(ndev, dev_queue = netdev_get_tx_queue(ndev,
priv->ring_data[i].queue_index); priv->ring[i].queue_index);
netdev_tx_reset_queue(dev_queue); netdev_tx_reset_queue(dev_queue);
} }
} }
...@@ -1390,9 +1390,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring, ...@@ -1390,9 +1390,7 @@ static int hns3_fill_skb_to_desc(struct hns3_enet_ring *ring,
netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
{ {
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct hns3_nic_ring_data *ring_data = struct hns3_enet_ring *ring = &priv->ring[skb->queue_mapping];
&tx_ring_data(priv, skb->queue_mapping);
struct hns3_enet_ring *ring = ring_data->ring;
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
int pre_ntu, next_to_use_head; int pre_ntu, next_to_use_head;
struct sk_buff *frag_skb; struct sk_buff *frag_skb;
...@@ -1444,7 +1442,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1444,7 +1442,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
cpu_to_le16(BIT(HNS3_TXD_FE_B)); cpu_to_le16(BIT(HNS3_TXD_FE_B));
/* Complete translate all packets */ /* Complete translate all packets */
dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index); dev_queue = netdev_get_tx_queue(netdev, ring->queue_index);
netdev_tx_sent_queue(dev_queue, skb->len); netdev_tx_sent_queue(dev_queue, skb->len);
wmb(); /* Commit all data before submit */ wmb(); /* Commit all data before submit */
...@@ -1461,7 +1459,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev) ...@@ -1461,7 +1459,7 @@ netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
out_net_tx_busy: out_net_tx_busy:
netif_stop_subqueue(netdev, ring_data->queue_index); netif_stop_subqueue(netdev, ring->queue_index);
smp_mb(); /* Commit all data before submit */ smp_mb(); /* Commit all data before submit */
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
...@@ -1584,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev, ...@@ -1584,7 +1582,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
for (idx = 0; idx < queue_num; idx++) { for (idx = 0; idx < queue_num; idx++) {
/* fetch the tx stats */ /* fetch the tx stats */
ring = priv->ring_data[idx].ring; ring = &priv->ring[idx];
do { do {
start = u64_stats_fetch_begin_irq(&ring->syncp); start = u64_stats_fetch_begin_irq(&ring->syncp);
tx_bytes += ring->stats.tx_bytes; tx_bytes += ring->stats.tx_bytes;
...@@ -1602,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev, ...@@ -1602,7 +1600,7 @@ static void hns3_nic_get_stats64(struct net_device *netdev,
} while (u64_stats_fetch_retry_irq(&ring->syncp, start)); } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
/* fetch the rx stats */ /* fetch the rx stats */
ring = priv->ring_data[idx + queue_num].ring; ring = &priv->ring[idx + queue_num];
do { do {
start = u64_stats_fetch_begin_irq(&ring->syncp); start = u64_stats_fetch_begin_irq(&ring->syncp);
rx_bytes += ring->stats.rx_bytes; rx_bytes += ring->stats.rx_bytes;
...@@ -1807,7 +1805,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev) ...@@ -1807,7 +1805,7 @@ static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
priv->tx_timeout_count++; priv->tx_timeout_count++;
tx_ring = priv->ring_data[timeout_queue].ring; tx_ring = &priv->ring[timeout_queue];
napi = &tx_ring->tqp_vector->napi; napi = &tx_ring->tqp_vector->napi;
netdev_info(ndev, netdev_info(ndev,
...@@ -2480,18 +2478,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h) ...@@ -2480,18 +2478,19 @@ static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
void hns3_clean_tx_ring(struct hns3_enet_ring *ring) void hns3_clean_tx_ring(struct hns3_enet_ring *ring)
{ {
struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct net_device *netdev = ring_to_netdev(ring);
struct hns3_nic_priv *priv = netdev_priv(netdev); struct hns3_nic_priv *priv = netdev_priv(netdev);
struct netdev_queue *dev_queue; struct netdev_queue *dev_queue;
int bytes, pkts; int bytes, pkts;
int head; int head;
head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG); head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
rmb(); /* Make sure head is ready before touch any data */
if (is_ring_empty(ring) || head == ring->next_to_clean) if (is_ring_empty(ring) || head == ring->next_to_clean)
return; /* no data to poll */ return; /* no data to poll */
rmb(); /* Make sure head is ready before touch any data */
if (unlikely(!is_valid_clean_head(ring, head))) { if (unlikely(!is_valid_clean_head(ring, head))) {
netdev_err(netdev, "wrong head (%d, %d-%d)\n", head, netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
ring->next_to_use, ring->next_to_clean); ring->next_to_use, ring->next_to_clean);
...@@ -2561,7 +2560,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, ...@@ -2561,7 +2560,7 @@ static void hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring,
ring->stats.sw_err_cnt++; ring->stats.sw_err_cnt++;
u64_stats_update_end(&ring->syncp); u64_stats_update_end(&ring->syncp);
hns3_rl_err(ring->tqp_vector->napi.dev, hns3_rl_err(ring_to_netdev(ring),
"alloc rx buffer failed: %d\n", "alloc rx buffer failed: %d\n",
ret); ret);
break; break;
...@@ -2670,7 +2669,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info) ...@@ -2670,7 +2669,7 @@ static int hns3_gro_complete(struct sk_buff *skb, u32 l234info)
static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb, static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
u32 l234info, u32 bd_base_info, u32 ol_info) u32 l234info, u32 bd_base_info, u32 ol_info)
{ {
struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct net_device *netdev = ring_to_netdev(ring);
int l3_type, l4_type; int l3_type, l4_type;
int ol4_type; int ol4_type;
...@@ -2786,7 +2785,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -2786,7 +2785,7 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
{ {
#define HNS3_NEED_ADD_FRAG 1 #define HNS3_NEED_ADD_FRAG 1
struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean]; struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct net_device *netdev = ring_to_netdev(ring);
struct sk_buff *skb; struct sk_buff *skb;
ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE); ring->skb = napi_alloc_skb(&ring->tqp_vector->napi, HNS3_RX_HEAD_SIZE);
...@@ -2832,10 +2831,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length, ...@@ -2832,10 +2831,10 @@ static int hns3_alloc_skb(struct hns3_enet_ring *ring, unsigned int length,
} }
static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
struct sk_buff **out_skb, bool pending) bool pending)
{ {
struct sk_buff *skb = *out_skb; struct sk_buff *skb = ring->skb;
struct sk_buff *head_skb = *out_skb; struct sk_buff *head_skb = skb;
struct sk_buff *new_skb; struct sk_buff *new_skb;
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
struct hns3_desc *pre_desc; struct hns3_desc *pre_desc;
...@@ -2864,10 +2863,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc, ...@@ -2864,10 +2863,9 @@ static int hns3_add_frag(struct hns3_enet_ring *ring, struct hns3_desc *desc,
return -ENXIO; return -ENXIO;
if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) { if (unlikely(ring->frag_num >= MAX_SKB_FRAGS)) {
new_skb = napi_alloc_skb(&ring->tqp_vector->napi, new_skb = napi_alloc_skb(&ring->tqp_vector->napi, 0);
HNS3_RX_HEAD_SIZE);
if (unlikely(!new_skb)) { if (unlikely(!new_skb)) {
hns3_rl_err(ring->tqp_vector->napi.dev, hns3_rl_err(ring_to_netdev(ring),
"alloc rx fraglist skb fail\n"); "alloc rx fraglist skb fail\n");
return -ENXIO; return -ENXIO;
} }
...@@ -2943,7 +2941,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring, ...@@ -2943,7 +2941,7 @@ static void hns3_set_rx_skb_rss_type(struct hns3_enet_ring *ring,
static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
{ {
struct net_device *netdev = ring->tqp->handle->kinfo.netdev; struct net_device *netdev = ring_to_netdev(ring);
enum hns3_pkt_l2t_type l2_frame_type; enum hns3_pkt_l2t_type l2_frame_type;
u32 bd_base_info, l234info, ol_info; u32 bd_base_info, l234info, ol_info;
struct hns3_desc *desc; struct hns3_desc *desc;
...@@ -3018,8 +3016,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb) ...@@ -3018,8 +3016,7 @@ static int hns3_handle_bdinfo(struct hns3_enet_ring *ring, struct sk_buff *skb)
return 0; return 0;
} }
static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, static int hns3_handle_rx_bd(struct hns3_enet_ring *ring)
struct sk_buff **out_skb)
{ {
struct sk_buff *skb = ring->skb; struct sk_buff *skb = ring->skb;
struct hns3_desc_cb *desc_cb; struct hns3_desc_cb *desc_cb;
...@@ -3057,12 +3054,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -3057,12 +3054,12 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
if (!skb) { if (!skb) {
ret = hns3_alloc_skb(ring, length, ring->va); ret = hns3_alloc_skb(ring, length, ring->va);
*out_skb = skb = ring->skb; skb = ring->skb;
if (ret < 0) /* alloc buffer fail */ if (ret < 0) /* alloc buffer fail */
return ret; return ret;
if (ret > 0) { /* need add frag */ if (ret > 0) { /* need add frag */
ret = hns3_add_frag(ring, desc, &skb, false); ret = hns3_add_frag(ring, desc, false);
if (ret) if (ret)
return ret; return ret;
...@@ -3073,7 +3070,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -3073,7 +3070,7 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
ALIGN(ring->pull_len, sizeof(long))); ALIGN(ring->pull_len, sizeof(long)));
} }
} else { } else {
ret = hns3_add_frag(ring, desc, &skb, true); ret = hns3_add_frag(ring, desc, true);
if (ret) if (ret)
return ret; return ret;
...@@ -3091,8 +3088,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring, ...@@ -3091,8 +3088,6 @@ static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
} }
skb_record_rx_queue(skb, ring->tqp->tqp_index); skb_record_rx_queue(skb, ring->tqp->tqp_index);
*out_skb = skb;
return 0; return 0;
} }
...@@ -3101,17 +3096,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, ...@@ -3101,17 +3096,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
{ {
#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16 #define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
int unused_count = hns3_desc_unused(ring); int unused_count = hns3_desc_unused(ring);
struct sk_buff *skb = ring->skb;
int recv_pkts = 0; int recv_pkts = 0;
int recv_bds = 0; int recv_bds = 0;
int err, num; int err, num;
num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG); num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
rmb(); /* Make sure num taken effect before the other data is touched */
num -= unused_count; num -= unused_count;
unused_count -= ring->pending_buf; unused_count -= ring->pending_buf;
if (num <= 0)
goto out;
rmb(); /* Make sure num taken effect before the other data is touched */
while (recv_pkts < budget && recv_bds < num) { while (recv_pkts < budget && recv_bds < num) {
/* Reuse or realloc buffers */ /* Reuse or realloc buffers */
if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) { if (unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
...@@ -3121,27 +3118,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget, ...@@ -3121,27 +3118,19 @@ int hns3_clean_rx_ring(struct hns3_enet_ring *ring, int budget,
} }
/* Poll one pkt */ /* Poll one pkt */
err = hns3_handle_rx_bd(ring, &skb); err = hns3_handle_rx_bd(ring);
if (unlikely(!skb)) /* This fault cannot be repaired */ /* Do not get FE for the packet or failed to alloc skb */
goto out; if (unlikely(!ring->skb || err == -ENXIO)) {
if (err == -ENXIO) { /* Do not get FE for the packet */
goto out; goto out;
} else if (unlikely(err)) { /* Do jump the err */ } else if (likely(!err)) {
recv_bds += ring->pending_buf; rx_fn(ring, ring->skb);
unused_count += ring->pending_buf; recv_pkts++;
ring->skb = NULL;
ring->pending_buf = 0;
continue;
} }
rx_fn(ring, skb);
recv_bds += ring->pending_buf; recv_bds += ring->pending_buf;
unused_count += ring->pending_buf; unused_count += ring->pending_buf;
ring->skb = NULL; ring->skb = NULL;
ring->pending_buf = 0; ring->pending_buf = 0;
recv_pkts++;
} }
out: out:
...@@ -3484,13 +3473,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv) ...@@ -3484,13 +3473,13 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
tqp_vector = &priv->tqp_vector[vector_i]; tqp_vector = &priv->tqp_vector[vector_i];
hns3_add_ring_to_group(&tqp_vector->tx_group, hns3_add_ring_to_group(&tqp_vector->tx_group,
priv->ring_data[i].ring); &priv->ring[i]);
hns3_add_ring_to_group(&tqp_vector->rx_group, hns3_add_ring_to_group(&tqp_vector->rx_group,
priv->ring_data[i + tqp_num].ring); &priv->ring[i + tqp_num]);
priv->ring_data[i].ring->tqp_vector = tqp_vector; priv->ring[i].tqp_vector = tqp_vector;
priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector; priv->ring[i + tqp_num].tqp_vector = tqp_vector;
tqp_vector->num_tqps++; tqp_vector->num_tqps++;
} }
...@@ -3634,28 +3623,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv) ...@@ -3634,28 +3623,22 @@ static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
return 0; return 0;
} }
static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, static void hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
unsigned int ring_type) unsigned int ring_type)
{ {
struct hns3_nic_ring_data *ring_data = priv->ring_data;
int queue_num = priv->ae_handle->kinfo.num_tqps; int queue_num = priv->ae_handle->kinfo.num_tqps;
struct pci_dev *pdev = priv->ae_handle->pdev;
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
int desc_num; int desc_num;
ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
if (!ring)
return -ENOMEM;
if (ring_type == HNAE3_RING_TYPE_TX) { if (ring_type == HNAE3_RING_TYPE_TX) {
ring = &priv->ring[q->tqp_index];
desc_num = priv->ae_handle->kinfo.num_tx_desc; desc_num = priv->ae_handle->kinfo.num_tx_desc;
ring_data[q->tqp_index].ring = ring; ring->queue_index = q->tqp_index;
ring_data[q->tqp_index].queue_index = q->tqp_index;
ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET; ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
} else { } else {
ring = &priv->ring[q->tqp_index + queue_num];
desc_num = priv->ae_handle->kinfo.num_rx_desc; desc_num = priv->ae_handle->kinfo.num_rx_desc;
ring_data[q->tqp_index + queue_num].ring = ring; ring->queue_index = q->tqp_index;
ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
ring->io_base = q->io_base; ring->io_base = q->io_base;
} }
...@@ -3670,76 +3653,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv, ...@@ -3670,76 +3653,41 @@ static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
ring->desc_num = desc_num; ring->desc_num = desc_num;
ring->next_to_use = 0; ring->next_to_use = 0;
ring->next_to_clean = 0; ring->next_to_clean = 0;
return 0;
} }
static int hns3_queue_to_ring(struct hnae3_queue *tqp, static void hns3_queue_to_ring(struct hnae3_queue *tqp,
struct hns3_nic_priv *priv) struct hns3_nic_priv *priv)
{ {
int ret; hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
if (ret)
return ret;
ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
if (ret) {
devm_kfree(priv->dev, priv->ring_data[tqp->tqp_index].ring);
return ret;
}
return 0;
} }
static int hns3_get_ring_config(struct hns3_nic_priv *priv) static int hns3_get_ring_config(struct hns3_nic_priv *priv)
{ {
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
struct pci_dev *pdev = h->pdev; struct pci_dev *pdev = h->pdev;
int i, ret; int i;
priv->ring_data = devm_kzalloc(&pdev->dev, priv->ring = devm_kzalloc(&pdev->dev,
array3_size(h->kinfo.num_tqps, array3_size(h->kinfo.num_tqps,
sizeof(*priv->ring_data), sizeof(*priv->ring), 2),
2), GFP_KERNEL);
GFP_KERNEL); if (!priv->ring)
if (!priv->ring_data)
return -ENOMEM; return -ENOMEM;
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++)
ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv); hns3_queue_to_ring(h->kinfo.tqp[i], priv);
if (ret)
goto err;
}
return 0; return 0;
err:
while (i--) {
devm_kfree(priv->dev, priv->ring_data[i].ring);
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(&pdev->dev, priv->ring_data);
priv->ring_data = NULL;
return ret;
} }
static void hns3_put_ring_config(struct hns3_nic_priv *priv) static void hns3_put_ring_config(struct hns3_nic_priv *priv)
{ {
struct hnae3_handle *h = priv->ae_handle; if (!priv->ring)
int i;
if (!priv->ring_data)
return; return;
for (i = 0; i < h->kinfo.num_tqps; i++) { devm_kfree(priv->dev, priv->ring);
devm_kfree(priv->dev, priv->ring_data[i].ring); priv->ring = NULL;
devm_kfree(priv->dev,
priv->ring_data[i + h->kinfo.num_tqps].ring);
}
devm_kfree(priv->dev, priv->ring_data);
priv->ring_data = NULL;
} }
static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring) static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
...@@ -3856,7 +3804,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv) ...@@ -3856,7 +3804,7 @@ static void hns3_init_tx_ring_tc(struct hns3_nic_priv *priv)
for (j = 0; j < tc_info->tqp_count; j++) { for (j = 0; j < tc_info->tqp_count; j++) {
struct hnae3_queue *q; struct hnae3_queue *q;
q = priv->ring_data[tc_info->tqp_offset + j].ring->tqp; q = priv->ring[tc_info->tqp_offset + j].tqp;
hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG, hns3_write_dev(q, HNS3_RING_TX_RING_TC_REG,
tc_info->tc); tc_info->tc);
} }
...@@ -3871,21 +3819,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv) ...@@ -3871,21 +3819,21 @@ int hns3_init_all_ring(struct hns3_nic_priv *priv)
int ret; int ret;
for (i = 0; i < ring_num; i++) { for (i = 0; i < ring_num; i++) {
ret = hns3_alloc_ring_memory(priv->ring_data[i].ring); ret = hns3_alloc_ring_memory(&priv->ring[i]);
if (ret) { if (ret) {
dev_err(priv->dev, dev_err(priv->dev,
"Alloc ring memory fail! ret=%d\n", ret); "Alloc ring memory fail! ret=%d\n", ret);
goto out_when_alloc_ring_memory; goto out_when_alloc_ring_memory;
} }
u64_stats_init(&priv->ring_data[i].ring->syncp); u64_stats_init(&priv->ring[i].syncp);
} }
return 0; return 0;
out_when_alloc_ring_memory: out_when_alloc_ring_memory:
for (j = i - 1; j >= 0; j--) for (j = i - 1; j >= 0; j--)
hns3_fini_ring(priv->ring_data[j].ring); hns3_fini_ring(&priv->ring[j]);
return -ENOMEM; return -ENOMEM;
} }
...@@ -3896,8 +3844,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv) ...@@ -3896,8 +3844,8 @@ int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
int i; int i;
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
hns3_fini_ring(priv->ring_data[i].ring); hns3_fini_ring(&priv->ring[i]);
hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring); hns3_fini_ring(&priv->ring[i + h->kinfo.num_tqps]);
} }
return 0; return 0;
} }
...@@ -4058,7 +4006,7 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -4058,7 +4006,7 @@ static int hns3_client_init(struct hnae3_handle *handle)
ret = hns3_init_all_ring(priv); ret = hns3_init_all_ring(priv);
if (ret) { if (ret) {
ret = -ENOMEM; ret = -ENOMEM;
goto out_init_ring_data; goto out_init_ring;
} }
ret = hns3_init_phy(netdev); ret = hns3_init_phy(netdev);
...@@ -4097,12 +4045,12 @@ static int hns3_client_init(struct hnae3_handle *handle) ...@@ -4097,12 +4045,12 @@ static int hns3_client_init(struct hnae3_handle *handle)
hns3_uninit_phy(netdev); hns3_uninit_phy(netdev);
out_init_phy: out_init_phy:
hns3_uninit_all_ring(priv); hns3_uninit_all_ring(priv);
out_init_ring_data: out_init_ring:
hns3_nic_uninit_vector_data(priv); hns3_nic_uninit_vector_data(priv);
out_init_vector_data: out_init_vector_data:
hns3_nic_dealloc_vector_data(priv); hns3_nic_dealloc_vector_data(priv);
out_alloc_vector_data: out_alloc_vector_data:
priv->ring_data = NULL; priv->ring = NULL;
out_get_ring_cfg: out_get_ring_cfg:
priv->ae_handle = NULL; priv->ae_handle = NULL;
free_netdev(netdev); free_netdev(netdev);
...@@ -4263,7 +4211,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring) ...@@ -4263,7 +4211,7 @@ static int hns3_clear_rx_ring(struct hns3_enet_ring *ring)
/* if alloc new buffer fail, exit directly /* if alloc new buffer fail, exit directly
* and reclear in up flow. * and reclear in up flow.
*/ */
netdev_warn(ring->tqp->handle->kinfo.netdev, netdev_warn(ring_to_netdev(ring),
"reserve buffer map failed, ret = %d\n", "reserve buffer map failed, ret = %d\n",
ret); ret);
return ret; return ret;
...@@ -4309,10 +4257,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force) ...@@ -4309,10 +4257,10 @@ static void hns3_clear_all_ring(struct hnae3_handle *h, bool force)
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
struct hns3_enet_ring *ring; struct hns3_enet_ring *ring;
ring = priv->ring_data[i].ring; ring = &priv->ring[i];
hns3_clear_tx_ring(ring); hns3_clear_tx_ring(ring);
ring = priv->ring_data[i + h->kinfo.num_tqps].ring; ring = &priv->ring[i + h->kinfo.num_tqps];
/* Continue to clear other rings even if clearing some /* Continue to clear other rings even if clearing some
* rings failed. * rings failed.
*/ */
...@@ -4336,16 +4284,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h) ...@@ -4336,16 +4284,16 @@ int hns3_nic_reset_all_ring(struct hnae3_handle *h)
if (ret) if (ret)
return ret; return ret;
hns3_init_ring_hw(priv->ring_data[i].ring); hns3_init_ring_hw(&priv->ring[i]);
/* We need to clear tx ring here because self test will /* We need to clear tx ring here because self test will
* use the ring and will not run down before up * use the ring and will not run down before up
*/ */
hns3_clear_tx_ring(priv->ring_data[i].ring); hns3_clear_tx_ring(&priv->ring[i]);
priv->ring_data[i].ring->next_to_clean = 0; priv->ring[i].next_to_clean = 0;
priv->ring_data[i].ring->next_to_use = 0; priv->ring[i].next_to_use = 0;
rx_ring = priv->ring_data[i + h->kinfo.num_tqps].ring; rx_ring = &priv->ring[i + h->kinfo.num_tqps];
hns3_init_ring_hw(rx_ring); hns3_init_ring_hw(rx_ring);
ret = hns3_clear_rx_ring(rx_ring); ret = hns3_clear_rx_ring(rx_ring);
if (ret) if (ret)
......
...@@ -409,6 +409,7 @@ struct hns3_enet_ring { ...@@ -409,6 +409,7 @@ struct hns3_enet_ring {
struct hns3_enet_ring *next; struct hns3_enet_ring *next;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
struct hnae3_queue *tqp; struct hnae3_queue *tqp;
int queue_index;
struct device *dev; /* will be used for DMA mapping of descriptors */ struct device *dev; /* will be used for DMA mapping of descriptors */
/* statistic */ /* statistic */
...@@ -434,18 +435,7 @@ struct hns3_enet_ring { ...@@ -434,18 +435,7 @@ struct hns3_enet_ring {
int pending_buf; int pending_buf;
struct sk_buff *skb; struct sk_buff *skb;
struct sk_buff *tail_skb; struct sk_buff *tail_skb;
}; } ____cacheline_internodealigned_in_smp;
struct hns_queue;
struct hns3_nic_ring_data {
struct hns3_enet_ring *ring;
struct napi_struct napi;
int queue_index;
int (*poll_one)(struct hns3_nic_ring_data *, int, void *);
void (*ex_process)(struct hns3_nic_ring_data *, struct sk_buff *);
void (*fini_process)(struct hns3_nic_ring_data *);
};
enum hns3_flow_level_range { enum hns3_flow_level_range {
HNS3_FLOW_LOW = 0, HNS3_FLOW_LOW = 0,
...@@ -522,7 +512,7 @@ struct hns3_nic_priv { ...@@ -522,7 +512,7 @@ struct hns3_nic_priv {
* the cb for nic to manage the ring buffer, the first half of the * the cb for nic to manage the ring buffer, the first half of the
* array is for tx_ring and vice versa for the second half * array is for tx_ring and vice versa for the second half
*/ */
struct hns3_nic_ring_data *ring_data; struct hns3_enet_ring *ring;
struct hns3_enet_tqp_vector *tqp_vector; struct hns3_enet_tqp_vector *tqp_vector;
u16 vector_num; u16 vector_num;
...@@ -617,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev) ...@@ -617,11 +607,11 @@ static inline bool hns3_nic_resetting(struct net_device *netdev)
#define ring_to_dev(ring) ((ring)->dev) #define ring_to_dev(ring) ((ring)->dev)
#define ring_to_netdev(ring) ((ring)->tqp_vector->napi.dev)
#define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \ #define ring_to_dma_dir(ring) (HNAE3_IS_TX_RING(ring) ? \
DMA_TO_DEVICE : DMA_FROM_DEVICE) DMA_TO_DEVICE : DMA_FROM_DEVICE)
#define tx_ring_data(priv, idx) ((priv)->ring_data[idx])
#define hns3_buf_size(_ring) ((_ring)->buf_size) #define hns3_buf_size(_ring) ((_ring)->buf_size)
static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring) static inline unsigned int hns3_page_order(struct hns3_enet_ring *ring)
......
...@@ -203,7 +203,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget) ...@@ -203,7 +203,7 @@ static u32 hns3_lb_check_rx_ring(struct hns3_nic_priv *priv, u32 budget)
kinfo = &h->kinfo; kinfo = &h->kinfo;
for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) { for (i = kinfo->num_tqps; i < kinfo->num_tqps * 2; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring; struct hns3_enet_ring *ring = &priv->ring[i];
struct hns3_enet_ring_group *rx_group; struct hns3_enet_ring_group *rx_group;
u64 pre_rx_pkt; u64 pre_rx_pkt;
...@@ -226,7 +226,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid, ...@@ -226,7 +226,7 @@ static void hns3_lb_clear_tx_ring(struct hns3_nic_priv *priv, u32 start_ringid,
u32 i; u32 i;
for (i = start_ringid; i <= end_ringid; i++) { for (i = start_ringid; i <= end_ringid; i++) {
struct hns3_enet_ring *ring = priv->ring_data[i].ring; struct hns3_enet_ring *ring = &priv->ring[i];
hns3_clean_tx_ring(ring); hns3_clean_tx_ring(ring);
} }
...@@ -491,7 +491,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -491,7 +491,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Tx */ /* get stats for Tx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i].ring; ring = &nic_priv->ring[i];
for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) { for (j = 0; j < HNS3_TXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_txq_stats[j].stats_offset; stat = (u8 *)ring + hns3_txq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
...@@ -500,7 +500,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data) ...@@ -500,7 +500,7 @@ static u64 *hns3_get_stats_tqps(struct hnae3_handle *handle, u64 *data)
/* get stats for Rx */ /* get stats for Rx */
for (i = 0; i < kinfo->num_tqps; i++) { for (i = 0; i < kinfo->num_tqps; i++) {
ring = nic_priv->ring_data[i + kinfo->num_tqps].ring; ring = &nic_priv->ring[i + kinfo->num_tqps];
for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) { for (j = 0; j < HNS3_RXQ_STATS_COUNT; j++) {
stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset; stat = (u8 *)ring + hns3_rxq_stats[j].stats_offset;
*data++ = *(u64 *)stat; *data++ = *(u64 *)stat;
...@@ -603,8 +603,8 @@ static void hns3_get_ringparam(struct net_device *netdev, ...@@ -603,8 +603,8 @@ static void hns3_get_ringparam(struct net_device *netdev,
param->tx_max_pending = HNS3_RING_MAX_PENDING; param->tx_max_pending = HNS3_RING_MAX_PENDING;
param->rx_max_pending = HNS3_RING_MAX_PENDING; param->rx_max_pending = HNS3_RING_MAX_PENDING;
param->tx_pending = priv->ring_data[0].ring->desc_num; param->tx_pending = priv->ring[0].desc_num;
param->rx_pending = priv->ring_data[queue_num].ring->desc_num; param->rx_pending = priv->ring[queue_num].desc_num;
} }
static void hns3_get_pauseparam(struct net_device *netdev, static void hns3_get_pauseparam(struct net_device *netdev,
...@@ -906,9 +906,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv, ...@@ -906,9 +906,8 @@ static void hns3_change_all_ring_bd_num(struct hns3_nic_priv *priv,
h->kinfo.num_rx_desc = rx_desc_num; h->kinfo.num_rx_desc = rx_desc_num;
for (i = 0; i < h->kinfo.num_tqps; i++) { for (i = 0; i < h->kinfo.num_tqps; i++) {
priv->ring_data[i].ring->desc_num = tx_desc_num; priv->ring[i].desc_num = tx_desc_num;
priv->ring_data[i + h->kinfo.num_tqps].ring->desc_num = priv->ring[i + h->kinfo.num_tqps].desc_num = rx_desc_num;
rx_desc_num;
} }
} }
...@@ -924,7 +923,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv) ...@@ -924,7 +923,7 @@ static struct hns3_enet_ring *hns3_backup_ringparam(struct hns3_nic_priv *priv)
return NULL; return NULL;
for (i = 0; i < handle->kinfo.num_tqps * 2; i++) { for (i = 0; i < handle->kinfo.num_tqps * 2; i++) {
memcpy(&tmp_rings[i], priv->ring_data[i].ring, memcpy(&tmp_rings[i], &priv->ring[i],
sizeof(struct hns3_enet_ring)); sizeof(struct hns3_enet_ring));
tmp_rings[i].skb = NULL; tmp_rings[i].skb = NULL;
} }
...@@ -972,8 +971,8 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -972,8 +971,8 @@ static int hns3_set_ringparam(struct net_device *ndev,
/* Hardware requires that its descriptors must be multiple of eight */ /* Hardware requires that its descriptors must be multiple of eight */
new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE); new_tx_desc_num = ALIGN(param->tx_pending, HNS3_RING_BD_MULTIPLE);
new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE); new_rx_desc_num = ALIGN(param->rx_pending, HNS3_RING_BD_MULTIPLE);
old_tx_desc_num = priv->ring_data[0].ring->desc_num; old_tx_desc_num = priv->ring[0].desc_num;
old_rx_desc_num = priv->ring_data[queue_num].ring->desc_num; old_rx_desc_num = priv->ring[queue_num].desc_num;
if (old_tx_desc_num == new_tx_desc_num && if (old_tx_desc_num == new_tx_desc_num &&
old_rx_desc_num == new_rx_desc_num) old_rx_desc_num == new_rx_desc_num)
return 0; return 0;
...@@ -1002,7 +1001,7 @@ static int hns3_set_ringparam(struct net_device *ndev, ...@@ -1002,7 +1001,7 @@ static int hns3_set_ringparam(struct net_device *ndev,
hns3_change_all_ring_bd_num(priv, old_tx_desc_num, hns3_change_all_ring_bd_num(priv, old_tx_desc_num,
old_rx_desc_num); old_rx_desc_num);
for (i = 0; i < h->kinfo.num_tqps * 2; i++) for (i = 0; i < h->kinfo.num_tqps * 2; i++)
memcpy(priv->ring_data[i].ring, &tmp_rings[i], memcpy(&priv->ring[i], &tmp_rings[i],
sizeof(struct hns3_enet_ring)); sizeof(struct hns3_enet_ring));
} else { } else {
for (i = 0; i < h->kinfo.num_tqps * 2; i++) for (i = 0; i < h->kinfo.num_tqps * 2; i++)
...@@ -1103,8 +1102,8 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue, ...@@ -1103,8 +1102,8 @@ static int hns3_get_coalesce_per_queue(struct net_device *netdev, u32 queue,
return -EINVAL; return -EINVAL;
} }
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
cmd->use_adaptive_tx_coalesce = cmd->use_adaptive_tx_coalesce =
tx_vector->tx_group.coal.gl_adapt_enable; tx_vector->tx_group.coal.gl_adapt_enable;
...@@ -1229,8 +1228,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev, ...@@ -1229,8 +1228,8 @@ static void hns3_set_coalesce_per_queue(struct net_device *netdev,
struct hnae3_handle *h = priv->ae_handle; struct hnae3_handle *h = priv->ae_handle;
int queue_num = h->kinfo.num_tqps; int queue_num = h->kinfo.num_tqps;
tx_vector = priv->ring_data[queue].ring->tqp_vector; tx_vector = priv->ring[queue].tqp_vector;
rx_vector = priv->ring_data[queue_num + queue].ring->tqp_vector; rx_vector = priv->ring[queue_num + queue].tqp_vector;
tx_vector->tx_group.coal.gl_adapt_enable = tx_vector->tx_group.coal.gl_adapt_enable =
cmd->use_adaptive_tx_coalesce; cmd->use_adaptive_tx_coalesce;
......
...@@ -5,6 +5,7 @@ ...@@ -5,6 +5,7 @@
#define __HCLGE_CMD_H #define __HCLGE_CMD_H
#include <linux/types.h> #include <linux/types.h>
#include <linux/io.h> #include <linux/io.h>
#include <linux/etherdevice.h>
#define HCLGE_CMDQ_TX_TIMEOUT 30000 #define HCLGE_CMDQ_TX_TIMEOUT 30000
...@@ -712,8 +713,7 @@ struct hclge_mac_mgr_tbl_entry_cmd { ...@@ -712,8 +713,7 @@ struct hclge_mac_mgr_tbl_entry_cmd {
u8 flags; u8 flags;
u8 resp_code; u8 resp_code;
__le16 vlan_tag; __le16 vlan_tag;
__le32 mac_addr_hi32; u8 mac_addr[ETH_ALEN];
__le16 mac_addr_lo16;
__le16 rsv1; __le16 rsv1;
__le16 ethter_type; __le16 ethter_type;
__le16 egress_port; __le16 egress_port;
......
...@@ -325,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = { ...@@ -325,8 +325,7 @@ static const struct hclge_mac_mgr_tbl_entry_cmd hclge_mgr_table[] = {
{ {
.flags = HCLGE_MAC_MGR_MASK_VLAN_B, .flags = HCLGE_MAC_MGR_MASK_VLAN_B,
.ethter_type = cpu_to_le16(ETH_P_LLDP), .ethter_type = cpu_to_le16(ETH_P_LLDP),
.mac_addr_hi32 = cpu_to_le32(htonl(0x0180C200)), .mac_addr = {0x01, 0x80, 0xc2, 0x00, 0x00, 0x0e},
.mac_addr_lo16 = cpu_to_le16(htons(0x000E)),
.i_port_bitmap = 0x1, .i_port_bitmap = 0x1,
}, },
}; };
...@@ -9801,6 +9800,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev) ...@@ -9801,6 +9800,9 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret; return ret;
} }
/* Log and clear the hw errors those already occurred */
hclge_handle_all_hns_hw_errors(ae_dev);
/* Re-enable the hw error interrupts because /* Re-enable the hw error interrupts because
* the interrupts get disabled on global reset. * the interrupts get disabled on global reset.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment