Commit c3ff3b02 authored by David S. Miller's avatar David S. Miller

Merge branch 'hns3-cleanups'

Huazhong Tan says:

====================
net: hns3: some cleanups for -next

To improve code readability and maintainability, the series
refactor out some bloated functions in the HNS3 ethernet driver.

change log:
V2: remove an unused variable in #5

previous version:
V1: https://patchwork.kernel.org/project/netdevbpf/cover/1612943005-59416-1-git-send-email-tanhuazhong@huawei.com/
====================
Acked-by: default avatarJakub Kicinski <kuba@kernel.org>
parents 4fb37e72 80a9f3f1
...@@ -423,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer, ...@@ -423,6 +423,30 @@ static ssize_t hns3_dbg_cmd_read(struct file *filp, char __user *buffer,
return (*ppos = len); return (*ppos = len);
} }
static int hns3_dbg_check_cmd(struct hnae3_handle *handle, char *cmd_buf)
{
int ret = 0;
if (strncmp(cmd_buf, "help", 4) == 0)
hns3_dbg_help(handle);
else if (strncmp(cmd_buf, "queue info", 10) == 0)
ret = hns3_dbg_queue_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "queue map", 9) == 0)
ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "dev capability", 14) == 0)
hns3_dbg_dev_caps(handle);
else if (strncmp(cmd_buf, "dev spec", 8) == 0)
hns3_dbg_dev_specs(handle);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else
ret = -EOPNOTSUPP;
return ret;
}
static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
size_t count, loff_t *ppos) size_t count, loff_t *ppos)
{ {
...@@ -430,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ...@@ -430,7 +454,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
struct hns3_nic_priv *priv = handle->priv; struct hns3_nic_priv *priv = handle->priv;
char *cmd_buf, *cmd_buf_tmp; char *cmd_buf, *cmd_buf_tmp;
int uncopied_bytes; int uncopied_bytes;
int ret = 0; int ret;
if (*ppos != 0) if (*ppos != 0)
return 0; return 0;
...@@ -461,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer, ...@@ -461,23 +485,7 @@ static ssize_t hns3_dbg_cmd_write(struct file *filp, const char __user *buffer,
count = cmd_buf_tmp - cmd_buf + 1; count = cmd_buf_tmp - cmd_buf + 1;
} }
if (strncmp(cmd_buf, "help", 4) == 0) ret = hns3_dbg_check_cmd(handle, cmd_buf);
hns3_dbg_help(handle);
else if (strncmp(cmd_buf, "queue info", 10) == 0)
ret = hns3_dbg_queue_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "queue map", 9) == 0)
ret = hns3_dbg_queue_map(handle);
else if (strncmp(cmd_buf, "bd info", 7) == 0)
ret = hns3_dbg_bd_info(handle, cmd_buf);
else if (strncmp(cmd_buf, "dev capability", 14) == 0)
hns3_dbg_dev_caps(handle);
else if (strncmp(cmd_buf, "dev spec", 8) == 0)
hns3_dbg_dev_specs(handle);
else if (handle->ae_algo->ops->dbg_run_cmd)
ret = handle->ae_algo->ops->dbg_run_cmd(handle, cmd_buf);
else
ret = -EOPNOTSUPP;
if (ret) if (ret)
hns3_dbg_help(handle); hns3_dbg_help(handle);
......
...@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode) ...@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
return false; return false;
} }
static int hclge_cmd_convert_err_code(u16 desc_ret) struct errcode {
u32 imp_errcode;
int common_errno;
};
static void hclge_cmd_copy_desc(struct hclge_hw *hw, struct hclge_desc *desc,
int num)
{ {
switch (desc_ret) { struct hclge_desc *desc_to_use;
case HCLGE_CMD_EXEC_SUCCESS: int handle = 0;
return 0;
case HCLGE_CMD_NO_AUTH: while (handle < num) {
return -EPERM; desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
case HCLGE_CMD_NOT_SUPPORTED: *desc_to_use = desc[handle];
return -EOPNOTSUPP; (hw->cmq.csq.next_to_use)++;
case HCLGE_CMD_QUEUE_FULL: if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
return -EXFULL; hw->cmq.csq.next_to_use = 0;
case HCLGE_CMD_NEXT_ERR: handle++;
return -ENOSR;
case HCLGE_CMD_UNEXE_ERR:
return -ENOTBLK;
case HCLGE_CMD_PARA_ERR:
return -EINVAL;
case HCLGE_CMD_RESULT_ERR:
return -ERANGE;
case HCLGE_CMD_TIMEOUT:
return -ETIME;
case HCLGE_CMD_HILINK_ERR:
return -ENOLINK;
case HCLGE_CMD_QUEUE_ILLEGAL:
return -ENXIO;
case HCLGE_CMD_INVALID:
return -EBADR;
default:
return -EIO;
} }
} }
static int hclge_cmd_convert_err_code(u16 desc_ret)
{
struct errcode hclge_cmd_errcode[] = {
{HCLGE_CMD_EXEC_SUCCESS, 0},
{HCLGE_CMD_NO_AUTH, -EPERM},
{HCLGE_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
{HCLGE_CMD_QUEUE_FULL, -EXFULL},
{HCLGE_CMD_NEXT_ERR, -ENOSR},
{HCLGE_CMD_UNEXE_ERR, -ENOTBLK},
{HCLGE_CMD_PARA_ERR, -EINVAL},
{HCLGE_CMD_RESULT_ERR, -ERANGE},
{HCLGE_CMD_TIMEOUT, -ETIME},
{HCLGE_CMD_HILINK_ERR, -ENOLINK},
{HCLGE_CMD_QUEUE_ILLEGAL, -ENXIO},
{HCLGE_CMD_INVALID, -EBADR},
};
u32 errcode_count = ARRAY_SIZE(hclge_cmd_errcode);
u32 i;
for (i = 0; i < errcode_count; i++)
if (hclge_cmd_errcode[i].imp_errcode == desc_ret)
return hclge_cmd_errcode[i].common_errno;
return -EIO;
}
static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc) int num, int ntc)
{ {
...@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc, ...@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
return hclge_cmd_convert_err_code(desc_ret); return hclge_cmd_convert_err_code(desc_ret);
} }
static int hclge_cmd_check_result(struct hclge_hw *hw, struct hclge_desc *desc,
int num, int ntc)
{
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
bool is_completed = false;
u32 timeout = 0;
int handle, ret;
/**
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclge_cmd_csq_done(hw)) {
is_completed = true;
break;
}
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
if (!is_completed)
ret = -EBADE;
else
ret = hclge_cmd_check_retval(hw, desc, num, ntc);
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
if (handle < 0)
ret = handle;
else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
return ret;
}
/** /**
* hclge_cmd_send - send command to command queue * hclge_cmd_send - send command to command queue
* @hw: pointer to the hw struct * @hw: pointer to the hw struct
...@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
{ {
struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw); struct hclge_dev *hdev = container_of(hw, struct hclge_dev, hw);
struct hclge_cmq_ring *csq = &hw->cmq.csq; struct hclge_cmq_ring *csq = &hw->cmq.csq;
struct hclge_desc *desc_to_use; int ret;
bool complete = false;
u32 timeout = 0;
int handle = 0;
int retval;
int ntc; int ntc;
spin_lock_bh(&hw->cmq.csq.lock); spin_lock_bh(&hw->cmq.csq.lock);
...@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num) ...@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
* which will be use for hardware to write back * which will be use for hardware to write back
*/ */
ntc = hw->cmq.csq.next_to_use; ntc = hw->cmq.csq.next_to_use;
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use]; hclge_cmd_copy_desc(hw, desc, num);
*desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use >= hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0;
handle++;
}
/* Write to hardware */ /* Write to hardware */
hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use); hclge_write_dev(hw, HCLGE_NIC_CSQ_TAIL_REG, hw->cmq.csq.next_to_use);
/** ret = hclge_cmd_check_result(hw, desc, num, ntc);
* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGE_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclge_cmd_csq_done(hw)) {
complete = true;
break;
}
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
if (!complete)
retval = -EBADE;
else
retval = hclge_cmd_check_retval(hw, desc, num, ntc);
/* Clean the command send queue */
handle = hclge_cmd_csq_clean(hw);
if (handle < 0)
retval = handle;
else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
spin_unlock_bh(&hw->cmq.csq.lock); spin_unlock_bh(&hw->cmq.csq.lock);
return retval; return ret;
} }
static void hclge_set_default_capability(struct hclge_dev *hdev) static void hclge_set_default_capability(struct hclge_dev *hdev)
......
...@@ -984,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev) ...@@ -984,39 +984,39 @@ static void hclge_dbg_dump_qos_pri_map(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc); dev_info(&hdev->pdev->dev, "pri_7_to_tc: 0x%x\n", pri_map->pri7_tc);
} }
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) static int hclge_dbg_dump_tx_buf_cfg(struct hclge_dev *hdev)
{ {
struct hclge_tx_buff_alloc_cmd *tx_buf_cmd; struct hclge_tx_buff_alloc_cmd *tx_buf_cmd;
struct hclge_rx_priv_buff_cmd *rx_buf_cmd; struct hclge_desc desc;
struct hclge_rx_priv_wl_buf *rx_priv_wl;
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_rx_com_wl *rx_com_wl;
enum hclge_opcode_type cmd;
struct hclge_desc desc[2];
int i, ret; int i, ret;
cmd = HCLGE_OPC_TX_BUFF_ALLOC; hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_TX_BUFF_ALLOC, true);
hclge_cmd_setup_basic_desc(desc, cmd, true); ret = hclge_cmd_send(&hdev->hw, &desc, 1);
ret = hclge_cmd_send(&hdev->hw, desc, 1);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
dev_info(&hdev->pdev->dev, "dump qos buf cfg\n"); dev_info(&hdev->pdev->dev, "dump qos buf cfg\n");
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc.data;
tx_buf_cmd = (struct hclge_tx_buff_alloc_cmd *)desc[0].data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "tx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i])); le16_to_cpu(tx_buf_cmd->tx_pkt_buff[i]));
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC; return 0;
hclge_cmd_setup_basic_desc(desc, cmd, true); }
ret = hclge_cmd_send(&hdev->hw, desc, 1);
static int hclge_dbg_dump_rx_priv_buf_cfg(struct hclge_dev *hdev)
{
struct hclge_rx_priv_buff_cmd *rx_buf_cmd;
struct hclge_desc desc;
int i, ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_PRIV_BUFF_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc[0].data; rx_buf_cmd = (struct hclge_rx_priv_buff_cmd *)desc.data;
for (i = 0; i < HCLGE_MAX_TC_NUM; i++) for (i = 0; i < HCLGE_MAX_TC_NUM; i++)
dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i, dev_info(&hdev->pdev->dev, "rx_packet_buf_tc_%d: 0x%x\n", i,
le16_to_cpu(rx_buf_cmd->buf_num[i])); le16_to_cpu(rx_buf_cmd->buf_num[i]));
...@@ -1024,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -1024,43 +1024,61 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n", dev_info(&hdev->pdev->dev, "rx_share_buf: 0x%x\n",
le16_to_cpu(rx_buf_cmd->shared_buf)); le16_to_cpu(rx_buf_cmd->shared_buf));
cmd = HCLGE_OPC_RX_COM_WL_ALLOC; return 0;
hclge_cmd_setup_basic_desc(desc, cmd, true); }
ret = hclge_cmd_send(&hdev->hw, desc, 1);
static int hclge_dbg_dump_rx_common_wl_cfg(struct hclge_dev *hdev)
{
struct hclge_rx_com_wl *rx_com_wl;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_COM_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
rx_com_wl = (struct hclge_rx_com_wl *)desc[0].data; rx_com_wl = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n", dev_info(&hdev->pdev->dev, "rx_com_wl: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_com_wl->com_wl.high), le16_to_cpu(rx_com_wl->com_wl.high),
le16_to_cpu(rx_com_wl->com_wl.low)); le16_to_cpu(rx_com_wl->com_wl.low));
cmd = HCLGE_OPC_RX_GBL_PKT_CNT; return 0;
hclge_cmd_setup_basic_desc(desc, cmd, true); }
ret = hclge_cmd_send(&hdev->hw, desc, 1);
static int hclge_dbg_dump_rx_global_pkt_cnt(struct hclge_dev *hdev)
{
struct hclge_rx_com_wl *rx_packet_cnt;
struct hclge_desc desc;
int ret;
hclge_cmd_setup_basic_desc(&desc, HCLGE_OPC_RX_GBL_PKT_CNT, true);
ret = hclge_cmd_send(&hdev->hw, &desc, 1);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
rx_packet_cnt = (struct hclge_rx_com_wl *)desc[0].data; rx_packet_cnt = (struct hclge_rx_com_wl *)desc.data;
dev_info(&hdev->pdev->dev, dev_info(&hdev->pdev->dev,
"rx_global_packet_cnt: high: 0x%x, low: 0x%x\n", "rx_global_packet_cnt: high: 0x%x, low: 0x%x\n",
le16_to_cpu(rx_packet_cnt->com_wl.high), le16_to_cpu(rx_packet_cnt->com_wl.high),
le16_to_cpu(rx_packet_cnt->com_wl.low)); le16_to_cpu(rx_packet_cnt->com_wl.low));
dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) { return 0;
dev_info(&hdev->pdev->dev, }
"Only DCB-supported dev supports rx priv wl\n");
return; static int hclge_dbg_dump_rx_priv_wl_buf_cfg(struct hclge_dev *hdev)
} {
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC; struct hclge_rx_priv_wl_buf *rx_priv_wl;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); struct hclge_desc desc[2];
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_PRIV_WL_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2); ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data; rx_priv_wl = (struct hclge_rx_priv_wl_buf *)desc[0].data;
for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++) for (i = 0; i < HCLGE_TC_NUM_ONE_DESC; i++)
...@@ -1077,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -1077,13 +1095,21 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
le16_to_cpu(rx_priv_wl->tc_wl[i].high), le16_to_cpu(rx_priv_wl->tc_wl[i].high),
le16_to_cpu(rx_priv_wl->tc_wl[i].low)); le16_to_cpu(rx_priv_wl->tc_wl[i].low));
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC; return 0;
hclge_cmd_setup_basic_desc(&desc[0], cmd, true); }
static int hclge_dbg_dump_rx_common_threshold_cfg(struct hclge_dev *hdev)
{
struct hclge_rx_com_thrd *rx_com_thrd;
struct hclge_desc desc[2];
int i, ret;
hclge_cmd_setup_basic_desc(&desc[0], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT); desc[0].flag |= cpu_to_le16(HCLGE_CMD_FLAG_NEXT);
hclge_cmd_setup_basic_desc(&desc[1], cmd, true); hclge_cmd_setup_basic_desc(&desc[1], HCLGE_OPC_RX_COM_THRD_ALLOC, true);
ret = hclge_cmd_send(&hdev->hw, desc, 2); ret = hclge_cmd_send(&hdev->hw, desc, 2);
if (ret) if (ret)
goto err_qos_cmd_send; return ret;
dev_info(&hdev->pdev->dev, "\n"); dev_info(&hdev->pdev->dev, "\n");
rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data; rx_com_thrd = (struct hclge_rx_com_thrd *)desc[0].data;
...@@ -1100,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev) ...@@ -1100,6 +1126,52 @@ static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
i + HCLGE_TC_NUM_ONE_DESC, i + HCLGE_TC_NUM_ONE_DESC,
le16_to_cpu(rx_com_thrd->com_thrd[i].high), le16_to_cpu(rx_com_thrd->com_thrd[i].high),
le16_to_cpu(rx_com_thrd->com_thrd[i].low)); le16_to_cpu(rx_com_thrd->com_thrd[i].low));
return 0;
}
static void hclge_dbg_dump_qos_buf_cfg(struct hclge_dev *hdev)
{
enum hclge_opcode_type cmd;
int ret;
cmd = HCLGE_OPC_TX_BUFF_ALLOC;
ret = hclge_dbg_dump_tx_buf_cfg(hdev);
if (ret)
goto err_qos_cmd_send;
cmd = HCLGE_OPC_RX_PRIV_BUFF_ALLOC;
ret = hclge_dbg_dump_rx_priv_buf_cfg(hdev);
if (ret)
goto err_qos_cmd_send;
cmd = HCLGE_OPC_RX_COM_WL_ALLOC;
ret = hclge_dbg_dump_rx_common_wl_cfg(hdev);
if (ret)
goto err_qos_cmd_send;
cmd = HCLGE_OPC_RX_GBL_PKT_CNT;
ret = hclge_dbg_dump_rx_global_pkt_cnt(hdev);
if (ret)
goto err_qos_cmd_send;
dev_info(&hdev->pdev->dev, "\n");
if (!hnae3_dev_dcb_supported(hdev)) {
dev_info(&hdev->pdev->dev,
"Only DCB-supported dev supports rx priv wl\n");
return;
}
cmd = HCLGE_OPC_RX_PRIV_WL_ALLOC;
ret = hclge_dbg_dump_rx_priv_wl_buf_cfg(hdev);
if (ret)
goto err_qos_cmd_send;
cmd = HCLGE_OPC_RX_COM_THRD_ALLOC;
ret = hclge_dbg_dump_rx_common_threshold_cfg(hdev);
if (ret)
goto err_qos_cmd_send;
return; return;
err_qos_cmd_send: err_qos_cmd_send:
......
...@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, ...@@ -176,36 +176,111 @@ void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR); desc->flag &= cpu_to_le16(~HCLGEVF_CMD_FLAG_WR);
} }
struct vf_errcode {
u32 imp_errcode;
int common_errno;
};
static void hclgevf_cmd_copy_desc(struct hclgevf_hw *hw,
struct hclgevf_desc *desc, int num)
{
struct hclgevf_desc *desc_to_use;
int handle = 0;
while (handle < num) {
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0;
handle++;
}
}
static int hclgevf_cmd_convert_err_code(u16 desc_ret) static int hclgevf_cmd_convert_err_code(u16 desc_ret)
{ {
switch (desc_ret) { struct vf_errcode hclgevf_cmd_errcode[] = {
case HCLGEVF_CMD_EXEC_SUCCESS: {HCLGEVF_CMD_EXEC_SUCCESS, 0},
return 0; {HCLGEVF_CMD_NO_AUTH, -EPERM},
case HCLGEVF_CMD_NO_AUTH: {HCLGEVF_CMD_NOT_SUPPORTED, -EOPNOTSUPP},
return -EPERM; {HCLGEVF_CMD_QUEUE_FULL, -EXFULL},
case HCLGEVF_CMD_NOT_SUPPORTED: {HCLGEVF_CMD_NEXT_ERR, -ENOSR},
return -EOPNOTSUPP; {HCLGEVF_CMD_UNEXE_ERR, -ENOTBLK},
case HCLGEVF_CMD_QUEUE_FULL: {HCLGEVF_CMD_PARA_ERR, -EINVAL},
return -EXFULL; {HCLGEVF_CMD_RESULT_ERR, -ERANGE},
case HCLGEVF_CMD_NEXT_ERR: {HCLGEVF_CMD_TIMEOUT, -ETIME},
return -ENOSR; {HCLGEVF_CMD_HILINK_ERR, -ENOLINK},
case HCLGEVF_CMD_UNEXE_ERR: {HCLGEVF_CMD_QUEUE_ILLEGAL, -ENXIO},
return -ENOTBLK; {HCLGEVF_CMD_INVALID, -EBADR},
case HCLGEVF_CMD_PARA_ERR: };
return -EINVAL; u32 errcode_count = ARRAY_SIZE(hclgevf_cmd_errcode);
case HCLGEVF_CMD_RESULT_ERR: u32 i;
return -ERANGE;
case HCLGEVF_CMD_TIMEOUT: for (i = 0; i < errcode_count; i++)
return -ETIME; if (hclgevf_cmd_errcode[i].imp_errcode == desc_ret)
case HCLGEVF_CMD_HILINK_ERR: return hclgevf_cmd_errcode[i].common_errno;
return -ENOLINK;
case HCLGEVF_CMD_QUEUE_ILLEGAL: return -EIO;
return -ENXIO; }
case HCLGEVF_CMD_INVALID:
return -EBADR; static int hclgevf_cmd_check_retval(struct hclgevf_hw *hw,
default: struct hclgevf_desc *desc, int num, int ntc)
return -EIO; {
u16 opcode, desc_ret;
int handle;
opcode = le16_to_cpu(desc[0].opcode);
for (handle = 0; handle < num; handle++) {
/* Get the result of hardware write back */
desc[handle] = hw->cmq.csq.desc[ntc];
ntc++;
if (ntc == hw->cmq.csq.desc_num)
ntc = 0;
} }
if (likely(!hclgevf_is_special_opcode(opcode)))
desc_ret = le16_to_cpu(desc[num - 1].retval);
else
desc_ret = le16_to_cpu(desc[0].retval);
hw->cmq.last_status = desc_ret;
return hclgevf_cmd_convert_err_code(desc_ret);
}
static int hclgevf_cmd_check_result(struct hclgevf_hw *hw,
struct hclgevf_desc *desc, int num, int ntc)
{
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
bool is_completed = false;
u32 timeout = 0;
int handle, ret;
/* If the command is sync, wait for the firmware to write back,
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclgevf_cmd_csq_done(hw)) {
is_completed = true;
break;
}
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
if (!is_completed)
ret = -EBADE;
else
ret = hclgevf_cmd_check_retval(hw, desc, num, ntc);
/* Clean the command send queue */
handle = hclgevf_cmd_csq_clean(hw);
if (handle < 0)
ret = handle;
else if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
return ret;
} }
/* hclgevf_cmd_send - send command to command queue /* hclgevf_cmd_send - send command to command queue
...@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) ...@@ -220,13 +295,7 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
{ {
struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev; struct hclgevf_dev *hdev = (struct hclgevf_dev *)hw->hdev;
struct hclgevf_cmq_ring *csq = &hw->cmq.csq; struct hclgevf_cmq_ring *csq = &hw->cmq.csq;
struct hclgevf_desc *desc_to_use; int ret;
bool complete = false;
u32 timeout = 0;
int handle = 0;
int status = 0;
u16 retval;
u16 opcode;
int ntc; int ntc;
spin_lock_bh(&hw->cmq.csq.lock); spin_lock_bh(&hw->cmq.csq.lock);
...@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num) ...@@ -250,67 +319,18 @@ int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num)
* which will be use for hardware to write back * which will be use for hardware to write back
*/ */
ntc = hw->cmq.csq.next_to_use; ntc = hw->cmq.csq.next_to_use;
opcode = le16_to_cpu(desc[0].opcode);
while (handle < num) { hclgevf_cmd_copy_desc(hw, desc, num);
desc_to_use = &hw->cmq.csq.desc[hw->cmq.csq.next_to_use];
*desc_to_use = desc[handle];
(hw->cmq.csq.next_to_use)++;
if (hw->cmq.csq.next_to_use == hw->cmq.csq.desc_num)
hw->cmq.csq.next_to_use = 0;
handle++;
}
/* Write to hardware */ /* Write to hardware */
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG,
hw->cmq.csq.next_to_use); hw->cmq.csq.next_to_use);
/* If the command is sync, wait for the firmware to write back, ret = hclgevf_cmd_check_result(hw, desc, num, ntc);
* if multi descriptors to be sent, use the first one to check
*/
if (HCLGEVF_SEND_SYNC(le16_to_cpu(desc->flag))) {
do {
if (hclgevf_cmd_csq_done(hw))
break;
udelay(1);
timeout++;
} while (timeout < hw->cmq.tx_timeout);
}
if (hclgevf_cmd_csq_done(hw)) {
complete = true;
handle = 0;
while (handle < num) {
/* Get the result of hardware write back */
desc_to_use = &hw->cmq.csq.desc[ntc];
desc[handle] = *desc_to_use;
if (likely(!hclgevf_is_special_opcode(opcode)))
retval = le16_to_cpu(desc[handle].retval);
else
retval = le16_to_cpu(desc[0].retval);
status = hclgevf_cmd_convert_err_code(retval);
hw->cmq.last_status = (enum hclgevf_cmd_status)retval;
ntc++;
handle++;
if (ntc == hw->cmq.csq.desc_num)
ntc = 0;
}
}
if (!complete)
status = -EBADE;
/* Clean the command send queue */
handle = hclgevf_cmd_csq_clean(hw);
if (handle != num)
dev_warn(&hdev->pdev->dev,
"cleaned %d, need to clean %d\n", handle, num);
spin_unlock_bh(&hw->cmq.csq.lock); spin_unlock_bh(&hw->cmq.csq.lock);
return status; return ret;
} }
static void hclgevf_set_default_capability(struct hclgevf_dev *hdev) static void hclgevf_set_default_capability(struct hclgevf_dev *hdev)
......
...@@ -873,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc) ...@@ -873,25 +873,13 @@ static u8 hclgevf_get_rss_hash_bits(struct ethtool_rxnfc *nfc)
return hash_sets; return hash_sets;
} }
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, static int hclgevf_init_rss_tuple_cmd(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc) struct ethtool_rxnfc *nfc,
struct hclgevf_rss_input_tuple_cmd *req)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg; struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_input_tuple_cmd *req;
struct hclgevf_desc desc;
u8 tuple_sets; u8 tuple_sets;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
if (nfc->data &
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en; req->ipv4_tcp_en = rss_cfg->rss_tuple_sets.ipv4_tcp_en;
req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en; req->ipv4_udp_en = rss_cfg->rss_tuple_sets.ipv4_udp_en;
...@@ -936,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, ...@@ -936,6 +924,35 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return -EINVAL; return -EINVAL;
} }
return 0;
}
static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
struct hclgevf_rss_input_tuple_cmd *req;
struct hclgevf_desc desc;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
if (nfc->data &
~(RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 | RXH_L4_B_2_3))
return -EINVAL;
req = (struct hclgevf_rss_input_tuple_cmd *)desc.data;
hclgevf_cmd_setup_basic_desc(&desc, HCLGEVF_OPC_RSS_INPUT_TUPLE, false);
ret = hclgevf_init_rss_tuple_cmd(handle, nfc, req);
if (ret) {
dev_err(&hdev->pdev->dev,
"failed to init rss tuple cmd, ret = %d\n", ret);
return ret;
}
ret = hclgevf_cmd_send(&hdev->hw, &desc, 1); ret = hclgevf_cmd_send(&hdev->hw, &desc, 1);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
...@@ -954,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle, ...@@ -954,56 +971,73 @@ static int hclgevf_set_rss_tuple(struct hnae3_handle *handle,
return 0; return 0;
} }
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle, static int hclgevf_get_rss_tuple_by_flow_type(struct hclgevf_dev *hdev,
struct ethtool_rxnfc *nfc) int flow_type, u8 *tuple_sets)
{ {
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle); switch (flow_type) {
struct hclgevf_rss_cfg *rss_cfg = &hdev->rss_cfg;
u8 tuple_sets;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
nfc->data = 0;
switch (nfc->flow_type) {
case TCP_V4_FLOW: case TCP_V4_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_tcp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_tcp_en;
break; break;
case UDP_V4_FLOW: case UDP_V4_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_udp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_udp_en;
break; break;
case TCP_V6_FLOW: case TCP_V6_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_tcp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_tcp_en;
break; break;
case UDP_V6_FLOW: case UDP_V6_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_udp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_udp_en;
break; break;
case SCTP_V4_FLOW: case SCTP_V4_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv4_sctp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv4_sctp_en;
break; break;
case SCTP_V6_FLOW: case SCTP_V6_FLOW:
tuple_sets = rss_cfg->rss_tuple_sets.ipv6_sctp_en; *tuple_sets = hdev->rss_cfg.rss_tuple_sets.ipv6_sctp_en;
break; break;
case IPV4_FLOW: case IPV4_FLOW:
case IPV6_FLOW: case IPV6_FLOW:
tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT; *tuple_sets = HCLGEVF_S_IP_BIT | HCLGEVF_D_IP_BIT;
break; break;
default: default:
return -EINVAL; return -EINVAL;
} }
if (!tuple_sets) return 0;
return 0; }
static u64 hclgevf_convert_rss_tuple(u8 tuple_sets)
{
u64 tuple_data = 0;
if (tuple_sets & HCLGEVF_D_PORT_BIT) if (tuple_sets & HCLGEVF_D_PORT_BIT)
nfc->data |= RXH_L4_B_2_3; tuple_data |= RXH_L4_B_2_3;
if (tuple_sets & HCLGEVF_S_PORT_BIT) if (tuple_sets & HCLGEVF_S_PORT_BIT)
nfc->data |= RXH_L4_B_0_1; tuple_data |= RXH_L4_B_0_1;
if (tuple_sets & HCLGEVF_D_IP_BIT) if (tuple_sets & HCLGEVF_D_IP_BIT)
nfc->data |= RXH_IP_DST; tuple_data |= RXH_IP_DST;
if (tuple_sets & HCLGEVF_S_IP_BIT) if (tuple_sets & HCLGEVF_S_IP_BIT)
nfc->data |= RXH_IP_SRC; tuple_data |= RXH_IP_SRC;
return tuple_data;
}
static int hclgevf_get_rss_tuple(struct hnae3_handle *handle,
struct ethtool_rxnfc *nfc)
{
struct hclgevf_dev *hdev = hclgevf_ae_get_hdev(handle);
u8 tuple_sets;
int ret;
if (hdev->ae_dev->dev_version < HNAE3_DEVICE_VERSION_V2)
return -EOPNOTSUPP;
nfc->data = 0;
ret = hclgevf_get_rss_tuple_by_flow_type(hdev, nfc->flow_type,
&tuple_sets);
if (ret || !tuple_sets)
return ret;
nfc->data = hclgevf_convert_rss_tuple(tuple_sets);
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment