Commit 30780a8b authored by Huazhong Tan's avatar Huazhong Tan Committed by David S. Miller

net: hns3: use atomic_t replace u32 for arq's count

Since irq handler and mailbox task will both update arq's count,
so arq's count should use atomic_t instead of u32, otherwise
its value may go wrong finally.

Fixes: 07a0556a ("net: hns3: Changes to support ARQ(Asynchronous Receive Queue)")
Signed-off-by: default avatarHuazhong Tan <tanhuazhong@huawei.com>
Signed-off-by: default avatarPeng Li <lipeng321@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 1416d333
...@@ -111,7 +111,7 @@ struct hclgevf_mbx_arq_ring { ...@@ -111,7 +111,7 @@ struct hclgevf_mbx_arq_ring {
struct hclgevf_dev *hdev; struct hclgevf_dev *hdev;
u32 head; u32 head;
u32 tail; u32 tail;
u32 count; atomic_t count;
u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE]; u16 msg_q[HCLGE_MBX_MAX_ARQ_MSG_NUM][HCLGE_MBX_MAX_ARQ_MSG_SIZE];
}; };
......
...@@ -340,7 +340,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev) ...@@ -340,7 +340,7 @@ int hclgevf_cmd_init(struct hclgevf_dev *hdev)
hdev->arq.hdev = hdev; hdev->arq.hdev = hdev;
hdev->arq.head = 0; hdev->arq.head = 0;
hdev->arq.tail = 0; hdev->arq.tail = 0;
hdev->arq.count = 0; atomic_set(&hdev->arq.count, 0);
hdev->hw.cmq.csq.next_to_clean = 0; hdev->hw.cmq.csq.next_to_clean = 0;
hdev->hw.cmq.csq.next_to_use = 0; hdev->hw.cmq.csq.next_to_use = 0;
hdev->hw.cmq.crq.next_to_clean = 0; hdev->hw.cmq.crq.next_to_clean = 0;
......
...@@ -212,7 +212,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -212,7 +212,8 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
/* we will drop the async msg if we find ARQ as full /* we will drop the async msg if we find ARQ as full
* and continue with next message * and continue with next message
*/ */
if (hdev->arq.count >= HCLGE_MBX_MAX_ARQ_MSG_NUM) { if (atomic_read(&hdev->arq.count) >=
HCLGE_MBX_MAX_ARQ_MSG_NUM) {
dev_warn(&hdev->pdev->dev, dev_warn(&hdev->pdev->dev,
"Async Q full, dropping msg(%d)\n", "Async Q full, dropping msg(%d)\n",
req->msg[1]); req->msg[1]);
...@@ -224,7 +225,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev) ...@@ -224,7 +225,7 @@ void hclgevf_mbx_handler(struct hclgevf_dev *hdev)
memcpy(&msg_q[0], req->msg, memcpy(&msg_q[0], req->msg,
HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16)); HCLGE_MBX_MAX_ARQ_MSG_SIZE * sizeof(u16));
hclge_mbx_tail_ptr_move_arq(hdev->arq); hclge_mbx_tail_ptr_move_arq(hdev->arq);
hdev->arq.count++; atomic_inc(&hdev->arq.count);
hclgevf_mbx_task_schedule(hdev); hclgevf_mbx_task_schedule(hdev);
...@@ -317,7 +318,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev) ...@@ -317,7 +318,7 @@ void hclgevf_mbx_async_handler(struct hclgevf_dev *hdev)
} }
hclge_mbx_head_ptr_move_arq(hdev->arq); hclge_mbx_head_ptr_move_arq(hdev->arq);
hdev->arq.count--; atomic_dec(&hdev->arq.count);
msg_q = hdev->arq.msg_q[hdev->arq.head]; msg_q = hdev->arq.msg_q[hdev->arq.head];
} }
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment