Commit f29da408 authored by Yufeng Mo's avatar Yufeng Mo Committed by David S. Miller

net: hns3: change hclge/hclgevf workqueue to WQ_UNBOUND mode

Currently, the workqueue of hclge/hclgevf is executed on
the CPU that initiates scheduling requests by default. In
stress scenarios, the CPU may be busy and workqueue scheduling
is completed after a long period of time. To avoid this
situation and implement proper scheduling, use the WQ_UNBOUND
mode instead. In this way, the workqueue can be performed on
a relatively idle CPU.
Signed-off-by: default avatarYufeng Mo <moyufeng@huawei.com>
Signed-off-by: default avatarGuangbin Huang <huangguangbin2@huawei.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3bda2e5d
...@@ -2847,33 +2847,28 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev) ...@@ -2847,33 +2847,28 @@ static void hclge_mbx_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_MBX_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), mod_delayed_work(hclge_wq, &hdev->service_task, 0);
hclge_wq, &hdev->service_task, 0);
} }
static void hclge_reset_task_schedule(struct hclge_dev *hdev) static void hclge_reset_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_RST_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), mod_delayed_work(hclge_wq, &hdev->service_task, 0);
hclge_wq, &hdev->service_task, 0);
} }
static void hclge_errhand_task_schedule(struct hclge_dev *hdev) static void hclge_errhand_task_schedule(struct hclge_dev *hdev)
{ {
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state)) !test_and_set_bit(HCLGE_STATE_ERR_SERVICE_SCHED, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), mod_delayed_work(hclge_wq, &hdev->service_task, 0);
hclge_wq, &hdev->service_task, 0);
} }
void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time) void hclge_task_schedule(struct hclge_dev *hdev, unsigned long delay_time)
{ {
if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) && if (!test_bit(HCLGE_STATE_REMOVING, &hdev->state) &&
!test_bit(HCLGE_STATE_RST_FAIL, &hdev->state)) !test_bit(HCLGE_STATE_RST_FAIL, &hdev->state))
mod_delayed_work_on(cpumask_first(&hdev->affinity_mask), mod_delayed_work(hclge_wq, &hdev->service_task, delay_time);
hclge_wq, &hdev->service_task,
delay_time);
} }
static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status) static int hclge_get_mac_link_status(struct hclge_dev *hdev, int *link_status)
...@@ -3491,33 +3486,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev) ...@@ -3491,33 +3486,14 @@ static void hclge_get_misc_vector(struct hclge_dev *hdev)
hdev->num_msi_used += 1; hdev->num_msi_used += 1;
} }
static void hclge_irq_affinity_notify(struct irq_affinity_notify *notify,
const cpumask_t *mask)
{
struct hclge_dev *hdev = container_of(notify, struct hclge_dev,
affinity_notify);
cpumask_copy(&hdev->affinity_mask, mask);
}
static void hclge_irq_affinity_release(struct kref *ref)
{
}
static void hclge_misc_affinity_setup(struct hclge_dev *hdev) static void hclge_misc_affinity_setup(struct hclge_dev *hdev)
{ {
irq_set_affinity_hint(hdev->misc_vector.vector_irq, irq_set_affinity_hint(hdev->misc_vector.vector_irq,
&hdev->affinity_mask); &hdev->affinity_mask);
hdev->affinity_notify.notify = hclge_irq_affinity_notify;
hdev->affinity_notify.release = hclge_irq_affinity_release;
irq_set_affinity_notifier(hdev->misc_vector.vector_irq,
&hdev->affinity_notify);
} }
static void hclge_misc_affinity_teardown(struct hclge_dev *hdev) static void hclge_misc_affinity_teardown(struct hclge_dev *hdev)
{ {
irq_set_affinity_notifier(hdev->misc_vector.vector_irq, NULL);
irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL); irq_set_affinity_hint(hdev->misc_vector.vector_irq, NULL);
} }
...@@ -13082,7 +13058,7 @@ static int hclge_init(void) ...@@ -13082,7 +13058,7 @@ static int hclge_init(void)
{ {
pr_info("%s is initializing\n", HCLGE_NAME); pr_info("%s is initializing\n", HCLGE_NAME);
hclge_wq = alloc_workqueue("%s", 0, 0, HCLGE_NAME); hclge_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGE_NAME);
if (!hclge_wq) { if (!hclge_wq) {
pr_err("%s: failed to create workqueue\n", HCLGE_NAME); pr_err("%s: failed to create workqueue\n", HCLGE_NAME);
return -ENOMEM; return -ENOMEM;
......
...@@ -944,7 +944,6 @@ struct hclge_dev { ...@@ -944,7 +944,6 @@ struct hclge_dev {
/* affinity mask and notify for misc interrupt */ /* affinity mask and notify for misc interrupt */
cpumask_t affinity_mask; cpumask_t affinity_mask;
struct irq_affinity_notify affinity_notify;
struct hclge_ptp *ptp; struct hclge_ptp *ptp;
struct devlink *devlink; struct devlink *devlink;
}; };
......
...@@ -3899,7 +3899,7 @@ static int hclgevf_init(void) ...@@ -3899,7 +3899,7 @@ static int hclgevf_init(void)
{ {
pr_info("%s is initializing\n", HCLGEVF_NAME); pr_info("%s is initializing\n", HCLGEVF_NAME);
hclgevf_wq = alloc_workqueue("%s", 0, 0, HCLGEVF_NAME); hclgevf_wq = alloc_workqueue("%s", WQ_UNBOUND, 0, HCLGEVF_NAME);
if (!hclgevf_wq) { if (!hclgevf_wq) {
pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME); pr_err("%s: failed to create workqueue\n", HCLGEVF_NAME);
return -ENOMEM; return -ENOMEM;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment