Commit a01aa768 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-next'

Peng Li says:

====================
net: hns3: code optimizations & bugfixes for HNS3 driver

This patchset includes bugfixes and code optimizations for the HNS3
ethernet controller driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 77c7a7b3 af854724
......@@ -240,7 +240,6 @@ static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
}
......@@ -2846,10 +2845,10 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
bool rx_update, tx_update;
if (tqp_vector->int_adapt_down > 0) {
tqp_vector->int_adapt_down--;
/* update param every 1000ms */
if (time_before(jiffies,
tqp_vector->last_jiffies + msecs_to_jiffies(1000)))
return;
}
if (rx_group->coal.gl_adapt_enable) {
rx_update = hns3_get_new_int_gl(rx_group);
......@@ -2866,7 +2865,6 @@ static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
}
tqp_vector->last_jiffies = jiffies;
tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
}
static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
......@@ -2909,8 +2907,8 @@ static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
if (!clean_complete)
return budget;
if (likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state)) &&
napi_complete(napi)) {
if (napi_complete(napi) &&
likely(!test_bit(HNS3_NIC_STATE_DOWN, &priv->state))) {
hns3_update_new_int_gl(tqp_vector);
hns3_mask_vector_irq(tqp_vector, 1);
}
......@@ -2993,9 +2991,10 @@ static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
cur_chain = head->next;
while (cur_chain) {
chain = cur_chain->next;
devm_kfree(&pdev->dev, chain);
devm_kfree(&pdev->dev, cur_chain);
cur_chain = chain;
}
head->next = NULL;
return -ENOMEM;
}
......@@ -3086,7 +3085,7 @@ static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
ret = hns3_get_vector_ring_chain(tqp_vector,
&vector_ring_chain);
if (ret)
return ret;
goto map_ring_fail;
ret = h->ae_algo->ops->map_ring_to_vector(h,
tqp_vector->vector_irq, &vector_ring_chain);
......@@ -3180,12 +3179,12 @@ static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
(void)irq_set_affinity_hint(
priv->tqp_vector[i].vector_irq,
NULL);
free_irq(priv->tqp_vector[i].vector_irq,
&priv->tqp_vector[i]);
if (tqp_vector->irq_init_flag == HNS3_VECTOR_INITED) {
irq_set_affinity_notifier(tqp_vector->vector_irq,
NULL);
irq_set_affinity_hint(tqp_vector->vector_irq, NULL);
free_irq(tqp_vector->vector_irq, tqp_vector);
tqp_vector->irq_init_flag = HNS3_VECTOR_NOT_INITED;
}
priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
......
......@@ -476,8 +476,6 @@ enum hns3_link_mode_bits {
#define HNS3_INT_RL_MAX 0x00EC
#define HNS3_INT_RL_ENABLE_MASK 0x40
#define HNS3_INT_ADAPT_DOWN_START 100
struct hns3_enet_coalesce {
u16 int_gl;
u8 gl_adapt_enable;
......@@ -512,8 +510,6 @@ struct hns3_enet_tqp_vector {
char name[HNAE3_INT_NAME_LEN];
/* when 0 should adjust interrupt coalesce parameter */
u8 int_adapt_down;
unsigned long last_jiffies;
} ____cacheline_internodealigned_in_smp;
......
......@@ -420,7 +420,9 @@ struct hclge_pf_res_cmd {
#define HCLGE_PF_VEC_NUM_M GENMASK(7, 0)
__le16 pf_intr_vector_number;
__le16 pf_own_fun_number;
__le32 rsv[3];
__le16 tx_buf_size;
__le16 dv_buf_size;
__le32 rsv[2];
};
#define HCLGE_CFG_OFFSET_S 0
......@@ -839,6 +841,7 @@ struct hclge_serdes_lb_cmd {
#define HCLGE_TOTAL_PKT_BUF 0x108000 /* 1.03125M bytes */
#define HCLGE_DEFAULT_DV 0xA000 /* 40k byte */
#define HCLGE_DEFAULT_NON_DCB_DV 0x7800 /* 30K byte */
#define HCLGE_NON_DCB_ADDITIONAL_BUF 0x200 /* 512 byte */
#define HCLGE_TYPE_CRQ 0
#define HCLGE_TYPE_CSQ 1
......
......@@ -26,6 +26,8 @@
#define HCLGE_STATS_READ(p, offset) (*((u64 *)((u8 *)(p) + (offset))))
#define HCLGE_MAC_STATS_FIELD_OFF(f) (offsetof(struct hclge_mac_stats, f))
#define HCLGE_BUF_SIZE_UNIT 256
static int hclge_set_mac_mtu(struct hclge_dev *hdev, int new_mps);
static int hclge_init_vlan_config(struct hclge_dev *hdev);
static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev);
......@@ -687,6 +689,22 @@ static int hclge_query_pf_resource(struct hclge_dev *hdev)
hdev->num_tqps = __le16_to_cpu(req->tqp_num);
hdev->pkt_buf_size = __le16_to_cpu(req->buf_size) << HCLGE_BUF_UNIT_S;
if (req->tx_buf_size)
hdev->tx_buf_size =
__le16_to_cpu(req->tx_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
hdev->tx_buf_size = roundup(hdev->tx_buf_size, HCLGE_BUF_SIZE_UNIT);
if (req->dv_buf_size)
hdev->dv_buf_size =
__le16_to_cpu(req->dv_buf_size) << HCLGE_BUF_UNIT_S;
else
hdev->dv_buf_size = HCLGE_DEFAULT_DV;
hdev->dv_buf_size = roundup(hdev->dv_buf_size, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_roce_supported(hdev)) {
hdev->roce_base_msix_offset =
hnae3_get_field(__le16_to_cpu(req->msixcap_localid_ba_rocee),
......@@ -1368,40 +1386,51 @@ static bool hclge_is_rx_buf_ok(struct hclge_dev *hdev,
{
u32 shared_buf_min, shared_buf_tc, shared_std;
int tc_num, pfc_enable_num;
u32 shared_buf;
u32 shared_buf, aligned_mps;
u32 rx_priv;
int i;
tc_num = hclge_get_tc_num(hdev);
pfc_enable_num = hclge_get_pfc_enalbe_num(hdev);
aligned_mps = roundup(hdev->mps, HCLGE_BUF_SIZE_UNIT);
if (hnae3_dev_dcb_supported(hdev))
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_DV;
shared_buf_min = 2 * aligned_mps + hdev->dv_buf_size;
else
shared_buf_min = 2 * hdev->mps + HCLGE_DEFAULT_NON_DCB_DV;
shared_buf_min = aligned_mps + HCLGE_NON_DCB_ADDITIONAL_BUF
+ hdev->dv_buf_size;
shared_buf_tc = pfc_enable_num * hdev->mps +
(tc_num - pfc_enable_num) * hdev->mps / 2 +
hdev->mps;
shared_std = max_t(u32, shared_buf_min, shared_buf_tc);
shared_buf_tc = pfc_enable_num * aligned_mps +
(tc_num - pfc_enable_num) * aligned_mps / 2 +
aligned_mps;
shared_std = roundup(max_t(u32, shared_buf_min, shared_buf_tc),
HCLGE_BUF_SIZE_UNIT);
rx_priv = hclge_get_rx_priv_buff_alloced(buf_alloc);
if (rx_all <= rx_priv + shared_std)
if (rx_all < rx_priv + shared_std)
return false;
shared_buf = rx_all - rx_priv;
shared_buf = rounddown(rx_all - rx_priv, HCLGE_BUF_SIZE_UNIT);
buf_alloc->s_buf.buf_size = shared_buf;
buf_alloc->s_buf.self.high = shared_buf;
buf_alloc->s_buf.self.low = 2 * hdev->mps;
if (hnae3_dev_dcb_supported(hdev)) {
buf_alloc->s_buf.self.high = shared_buf - hdev->dv_buf_size;
buf_alloc->s_buf.self.low = buf_alloc->s_buf.self.high
- roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
} else {
buf_alloc->s_buf.self.high = aligned_mps +
HCLGE_NON_DCB_ADDITIONAL_BUF;
buf_alloc->s_buf.self.low =
roundup(aligned_mps / 2, HCLGE_BUF_SIZE_UNIT);
}
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
if ((hdev->hw_tc_map & BIT(i)) &&
(hdev->tm_info.hw_pfc_map & BIT(i))) {
buf_alloc->s_buf.tc_thrd[i].low = hdev->mps;
buf_alloc->s_buf.tc_thrd[i].high = 2 * hdev->mps;
buf_alloc->s_buf.tc_thrd[i].low = aligned_mps;
buf_alloc->s_buf.tc_thrd[i].high = 2 * aligned_mps;
} else {
buf_alloc->s_buf.tc_thrd[i].low = 0;
buf_alloc->s_buf.tc_thrd[i].high = hdev->mps;
buf_alloc->s_buf.tc_thrd[i].high = aligned_mps;
}
}
......@@ -1419,11 +1448,11 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) {
struct hclge_priv_buf *priv = &buf_alloc->priv_buf[i];
if (total_size < HCLGE_DEFAULT_TX_BUF)
if (total_size < hdev->tx_buf_size)
return -ENOMEM;
if (hdev->hw_tc_map & BIT(i))
priv->tx_buf_size = HCLGE_DEFAULT_TX_BUF;
priv->tx_buf_size = hdev->tx_buf_size;
else
priv->tx_buf_size = 0;
......@@ -1441,7 +1470,6 @@ static int hclge_tx_buffer_calc(struct hclge_dev *hdev,
static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
struct hclge_pkt_buf_alloc *buf_alloc)
{
#define HCLGE_BUF_SIZE_UNIT 128
u32 rx_all = hdev->pkt_buf_size, aligned_mps;
int no_pfc_priv_num, pfc_priv_num;
struct hclge_priv_buf *priv;
......@@ -1467,13 +1495,16 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = aligned_mps;
priv->wl.high = priv->wl.low + aligned_mps;
priv->wl.high =
roundup(priv->wl.low + aligned_mps,
HCLGE_BUF_SIZE_UNIT);
priv->buf_size = priv->wl.high +
HCLGE_DEFAULT_DV;
hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = 2 * aligned_mps;
priv->buf_size = priv->wl.high;
priv->buf_size = priv->wl.high +
hdev->dv_buf_size;
}
} else {
priv->enable = 0;
......@@ -1503,13 +1534,13 @@ static int hclge_rx_buffer_calc(struct hclge_dev *hdev,
priv->enable = 1;
if (hdev->tm_info.hw_pfc_map & BIT(i)) {
priv->wl.low = 128;
priv->wl.low = 256;
priv->wl.high = priv->wl.low + aligned_mps;
priv->buf_size = priv->wl.high + HCLGE_DEFAULT_DV;
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
} else {
priv->wl.low = 0;
priv->wl.high = aligned_mps;
priv->buf_size = priv->wl.high;
priv->buf_size = priv->wl.high + hdev->dv_buf_size;
}
}
......@@ -2810,7 +2841,6 @@ static void hclge_reset(struct hclge_dev *hdev)
*/
ae_dev->reset_type = hdev->reset_type;
hdev->reset_count++;
hdev->last_reset_time = jiffies;
/* perform reset of the stack & ae device for a client */
ret = hclge_notify_roce_client(hdev, HNAE3_DOWN_CLIENT);
if (ret)
......@@ -2873,6 +2903,10 @@ static void hclge_reset(struct hclge_dev *hdev)
if (ret)
goto err_reset;
hdev->last_reset_time = jiffies;
hdev->reset_fail_cnt = 0;
ae_dev->reset_type = HNAE3_NONE_RESET;
return;
err_reset_lock:
......@@ -7377,19 +7411,6 @@ static int hclge_reset_ae_dev(struct hnae3_ae_dev *ae_dev)
return ret;
}
ret = hclge_get_cap(hdev);
if (ret) {
dev_err(&pdev->dev, "get hw capability error, ret = %d.\n",
ret);
return ret;
}
ret = hclge_configure(hdev);
if (ret) {
dev_err(&pdev->dev, "Configure dev error, ret = %d.\n", ret);
return ret;
}
ret = hclge_map_tqp(hdev);
if (ret) {
dev_err(&pdev->dev, "Map tqp error, ret = %d.\n", ret);
......
......@@ -736,6 +736,9 @@ struct hclge_dev {
u32 flag;
u32 pkt_buf_size; /* Total pf buf size for tx/rx */
u32 tx_buf_size; /* Tx buffer size for each TC */
u32 dv_buf_size; /* Dv buffer size for each TC */
u32 mps; /* Max packet size */
/* vport_lock protect resource shared by vports */
struct mutex vport_lock;
......
......@@ -12,7 +12,7 @@
SUPPORTED_TP | \
PHY_10BT_FEATURES | \
PHY_100BT_FEATURES | \
PHY_1000BT_FEATURES)
SUPPORTED_1000baseT_Full)
enum hclge_mdio_c22_op_seq {
HCLGE_MDIO_C22_WRITE = 1,
......@@ -179,6 +179,10 @@ static void hclge_mac_adjust_link(struct net_device *netdev)
int duplex, speed;
int ret;
/* When phy link down, do nothing */
if (netdev->phydev->link == 0)
return;
speed = netdev->phydev->speed;
duplex = netdev->phydev->duplex;
......
......@@ -1342,6 +1342,9 @@ static int hclgevf_reset(struct hclgevf_dev *hdev)
rtnl_unlock();
hdev->last_reset_time = jiffies;
ae_dev->reset_type = HNAE3_NONE_RESET;
return ret;
err_reset_lock:
rtnl_unlock();
......@@ -2401,9 +2404,9 @@ static void hclgevf_uninit_hdev(struct hclgevf_dev *hdev)
if (test_bit(HCLGEVF_STATE_IRQ_INITED, &hdev->state)) {
hclgevf_misc_irq_uninit(hdev);
hclgevf_uninit_msi(hdev);
hclgevf_pci_uninit(hdev);
}
hclgevf_pci_uninit(hdev);
hclgevf_cmd_uninit(hdev);
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment