Commit 8b0195a3 authored by Huazhong Tan's avatar Huazhong Tan Committed by David S. Miller

net: hns3: fix for cmd queue memory not freed problem during reset

It is not necessary to reallocate the descriptor and remap the
descriptor memory in reset process, otherwise it may cause memory
not freed problem.

Also, this patch initializes the cmd queue's spinlocks in
hclgevf_alloc_cmd_queue, and take the spinlocks when reinitializing
cmd queue' registers.

Fixes: fedd0c15 ("net: hns3: Add HNS3 VF IMP(Integrated Management Proc) cmd interface")
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarYunsheng Lin <linyunsheng@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 65e41e7e
...@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode) ...@@ -72,6 +72,45 @@ static bool hclgevf_is_special_opcode(u16 opcode)
return false; return false;
} }
static void hclgevf_cmd_config_regs(struct hclgevf_cmq_ring *ring)
{
struct hclgevf_dev *hdev = ring->dev;
struct hclgevf_hw *hw = &hdev->hw;
u32 reg_val;
if (ring->flag == HCLGEVF_TYPE_CSQ) {
reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
} else {
reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
}
}
static void hclgevf_cmd_init_regs(struct hclgevf_hw *hw)
{
hclgevf_cmd_config_regs(&hw->cmq.csq);
hclgevf_cmd_config_regs(&hw->cmq.crq);
}
static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring) static int hclgevf_alloc_cmd_desc(struct hclgevf_cmq_ring *ring)
{ {
int size = ring->desc_num * sizeof(struct hclgevf_desc); int size = ring->desc_num * sizeof(struct hclgevf_desc);
...@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring) ...@@ -96,61 +135,23 @@ static void hclgevf_free_cmd_desc(struct hclgevf_cmq_ring *ring)
} }
} }
static int hclgevf_init_cmd_queue(struct hclgevf_dev *hdev, static int hclgevf_alloc_cmd_queue(struct hclgevf_dev *hdev, int ring_type)
struct hclgevf_cmq_ring *ring)
{ {
struct hclgevf_hw *hw = &hdev->hw; struct hclgevf_hw *hw = &hdev->hw;
int ring_type = ring->flag; struct hclgevf_cmq_ring *ring =
u32 reg_val; (ring_type == HCLGEVF_TYPE_CSQ) ? &hw->cmq.csq : &hw->cmq.crq;
int ret; int ret;
ring->desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
spin_lock_init(&ring->lock);
ring->next_to_clean = 0;
ring->next_to_use = 0;
ring->dev = hdev; ring->dev = hdev;
ring->flag = ring_type;
/* allocate CSQ/CRQ descriptor */ /* allocate CSQ/CRQ descriptor */
ret = hclgevf_alloc_cmd_desc(ring); ret = hclgevf_alloc_cmd_desc(ring);
if (ret) { if (ret)
dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret, dev_err(&hdev->pdev->dev, "failed(%d) to alloc %s desc\n", ret,
(ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ"); (ring_type == HCLGEVF_TYPE_CSQ) ? "CSQ" : "CRQ");
return ret;
}
/* initialize the hardware registers with csq/crq dma-address,
* descriptor number, head & tail pointers
*/
switch (ring_type) {
case HCLGEVF_TYPE_CSQ:
reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S);
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CSQ_TAIL_REG, 0);
return 0;
case HCLGEVF_TYPE_CRQ:
reg_val = (u32)ring->desc_dma_addr;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_L_REG, reg_val);
reg_val = (u32)((ring->desc_dma_addr >> 31) >> 1);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_BASEADDR_H_REG, reg_val);
reg_val = (ring->desc_num >> HCLGEVF_NIC_CMQ_DESC_NUM_S); return ret;
reg_val |= HCLGEVF_NIC_CMQ_ENABLE;
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_DEPTH_REG, reg_val);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_HEAD_REG, 0);
hclgevf_write_dev(hw, HCLGEVF_NIC_CRQ_TAIL_REG, 0);
return 0;
default:
return -EINVAL;
}
} }
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
...@@ -282,55 +283,73 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw, ...@@ -282,55 +283,73 @@ static int hclgevf_cmd_query_firmware_version(struct hclgevf_hw *hw,
return status; return status;
} }
int hclgevf_cmd_init(struct hclgevf_dev *hdev) int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev)
{ {
u32 version;
int ret; int ret;
/* setup Tx write back timeout */ /* Setup the lock for command queue */
spin_lock_init(&hdev->hw.cmq.csq.lock);
spin_lock_init(&hdev->hw.cmq.crq.lock);
hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT; hdev->hw.cmq.tx_timeout = HCLGEVF_CMDQ_TX_TIMEOUT;
hdev->hw.cmq.csq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
hdev->hw.cmq.crq.desc_num = HCLGEVF_NIC_CMQ_DESC_NUM;
/* setup queue CSQ/CRQ rings */ ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CSQ);
hdev->hw.cmq.csq.flag = HCLGEVF_TYPE_CSQ;
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.csq);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed(%d) to initialize CSQ ring\n", ret); "CSQ ring setup error %d\n", ret);
return ret; return ret;
} }
hdev->hw.cmq.crq.flag = HCLGEVF_TYPE_CRQ; ret = hclgevf_alloc_cmd_queue(hdev, HCLGEVF_TYPE_CRQ);
ret = hclgevf_init_cmd_queue(hdev, &hdev->hw.cmq.crq);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed(%d) to initialize CRQ ring\n", ret); "CRQ ring setup error %d\n", ret);
goto err_csq; goto err_csq;
} }
return 0;
err_csq:
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
return ret;
}
int hclgevf_cmd_init(struct hclgevf_dev *hdev)
{
u32 version;
int ret;
spin_lock_bh(&hdev->hw.cmq.csq.lock);
spin_lock_bh(&hdev->hw.cmq.crq.lock);
/* initialize the pointers of async rx queue of mailbox */ /* initialize the pointers of async rx queue of mailbox */
hdev->arq.hdev = hdev; hdev->arq.hdev = hdev;
hdev->arq.head = 0; hdev->arq.head = 0;
hdev->arq.tail = 0; hdev->arq.tail = 0;
hdev->arq.count = 0; hdev->arq.count = 0;
hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0;
hdev->hw.cmq.crq.next_to_use = 0;
hclgevf_cmd_init_regs(&hdev->hw);
spin_unlock_bh(&hdev->hw.cmq.crq.lock);
spin_unlock_bh(&hdev->hw.cmq.csq.lock);
/* get firmware version */ /* get firmware version */
ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version); ret = hclgevf_cmd_query_firmware_version(&hdev->hw, &version);
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"failed(%d) to query firmware version\n", ret); "failed(%d) to query firmware version\n", ret);
goto err_crq; return ret;
} }
hdev->fw_version = version; hdev->fw_version = version;
dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version); dev_info(&hdev->pdev->dev, "The firmware version is %08x\n", version);
return 0; return 0;
err_crq:
hclgevf_free_cmd_desc(&hdev->hw.cmq.crq);
err_csq:
hclgevf_free_cmd_desc(&hdev->hw.cmq.csq);
return ret;
} }
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev) void hclgevf_cmd_uninit(struct hclgevf_dev *hdev)
......
...@@ -256,6 +256,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg) ...@@ -256,6 +256,7 @@ static inline u32 hclgevf_read_reg(u8 __iomem *base, u32 reg)
int hclgevf_cmd_init(struct hclgevf_dev *hdev); int hclgevf_cmd_init(struct hclgevf_dev *hdev);
void hclgevf_cmd_uninit(struct hclgevf_dev *hdev); void hclgevf_cmd_uninit(struct hclgevf_dev *hdev);
int hclgevf_cmd_queue_init(struct hclgevf_dev *hdev);
int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num); int hclgevf_cmd_send(struct hclgevf_hw *hw, struct hclgevf_desc *desc, int num);
void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc, void hclgevf_cmd_setup_basic_desc(struct hclgevf_desc *desc,
......
...@@ -1966,6 +1966,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -1966,6 +1966,12 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
return ret; return ret;
} }
ret = hclgevf_cmd_queue_init(hdev);
if (ret) {
dev_err(&pdev->dev, "Cmd queue init failed: %d\n", ret);
goto err_cmd_queue_init;
}
ret = hclgevf_cmd_init(hdev); ret = hclgevf_cmd_init(hdev);
if (ret) if (ret)
goto err_cmd_init; goto err_cmd_init;
...@@ -1975,13 +1981,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -1975,13 +1981,13 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
if (ret) { if (ret) {
dev_err(&hdev->pdev->dev, dev_err(&hdev->pdev->dev,
"Query vf status error, ret = %d.\n", ret); "Query vf status error, ret = %d.\n", ret);
goto err_query_vf; goto err_cmd_init;
} }
ret = hclgevf_init_msi(hdev); ret = hclgevf_init_msi(hdev);
if (ret) { if (ret) {
dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret); dev_err(&pdev->dev, "failed(%d) to init MSI/MSI-X\n", ret);
goto err_query_vf; goto err_cmd_init;
} }
hclgevf_state_init(hdev); hclgevf_state_init(hdev);
...@@ -2037,9 +2043,9 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev) ...@@ -2037,9 +2043,9 @@ static int hclgevf_init_hdev(struct hclgevf_dev *hdev)
err_misc_irq_init: err_misc_irq_init:
hclgevf_state_uninit(hdev); hclgevf_state_uninit(hdev);
hclgevf_uninit_msi(hdev); hclgevf_uninit_msi(hdev);
err_query_vf:
hclgevf_cmd_uninit(hdev);
err_cmd_init: err_cmd_init:
hclgevf_cmd_uninit(hdev);
err_cmd_queue_init:
hclgevf_pci_uninit(hdev); hclgevf_pci_uninit(hdev);
return ret; return ret;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment