Commit a9b5bb5c authored by Allen Pais's avatar Allen Pais Committed by Corey Minyard

ipmi: Convert from tasklet to BH workqueue

The only generic interface to execute asynchronously in the BH context is
tasklet; however, it's marked deprecated and has some design flaws. To
replace tasklets, BH workqueue support was recently added. A BH workqueue
behaves similarly to regular workqueues except that the queued work items
are executed in the BH context.

This patch converts drivers/char/ipmi/* from tasklet to BH workqueue.

Based on the work done by Tejun Heo <tj@kernel.org>
Branch: https://git.kernel.org/pub/scm/linux/kernel/git/tj/wq.git for-6.10
Signed-off-by: default avatarAllen Pais <allen.lkml@gmail.com>
Message-Id: <20240327160314.9982-7-apais@linux.microsoft.com>
[Removed a duplicate include of workqueue.h]
Signed-off-by: default avatarCorey Minyard <minyard@acm.org>
parent 8d025e20
...@@ -41,7 +41,7 @@ ...@@ -41,7 +41,7 @@
static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void); static struct ipmi_recv_msg *ipmi_alloc_recv_msg(void);
static int ipmi_init_msghandler(void); static int ipmi_init_msghandler(void);
static void smi_recv_tasklet(struct tasklet_struct *t); static void smi_recv_work(struct work_struct *t);
static void handle_new_recv_msgs(struct ipmi_smi *intf); static void handle_new_recv_msgs(struct ipmi_smi *intf);
static void need_waiter(struct ipmi_smi *intf); static void need_waiter(struct ipmi_smi *intf);
static int handle_one_recv_msg(struct ipmi_smi *intf, static int handle_one_recv_msg(struct ipmi_smi *intf,
...@@ -498,13 +498,13 @@ struct ipmi_smi { ...@@ -498,13 +498,13 @@ struct ipmi_smi {
/* /*
* Messages queued for delivery. If delivery fails (out of memory * Messages queued for delivery. If delivery fails (out of memory
* for instance), They will stay in here to be processed later in a * for instance), They will stay in here to be processed later in a
* periodic timer interrupt. The tasklet is for handling received * periodic timer interrupt. The workqueue is for handling received
* messages directly from the handler. * messages directly from the handler.
*/ */
spinlock_t waiting_rcv_msgs_lock; spinlock_t waiting_rcv_msgs_lock;
struct list_head waiting_rcv_msgs; struct list_head waiting_rcv_msgs;
atomic_t watchdog_pretimeouts_to_deliver; atomic_t watchdog_pretimeouts_to_deliver;
struct tasklet_struct recv_tasklet; struct work_struct recv_work;
spinlock_t xmit_msgs_lock; spinlock_t xmit_msgs_lock;
struct list_head xmit_msgs; struct list_head xmit_msgs;
...@@ -704,7 +704,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf) ...@@ -704,7 +704,7 @@ static void clean_up_interface_data(struct ipmi_smi *intf)
struct cmd_rcvr *rcvr, *rcvr2; struct cmd_rcvr *rcvr, *rcvr2;
struct list_head list; struct list_head list;
tasklet_kill(&intf->recv_tasklet); cancel_work_sync(&intf->recv_work);
free_smi_msg_list(&intf->waiting_rcv_msgs); free_smi_msg_list(&intf->waiting_rcv_msgs);
free_recv_msg_list(&intf->waiting_events); free_recv_msg_list(&intf->waiting_events);
...@@ -1319,7 +1319,7 @@ static void free_user(struct kref *ref) ...@@ -1319,7 +1319,7 @@ static void free_user(struct kref *ref)
{ {
struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount); struct ipmi_user *user = container_of(ref, struct ipmi_user, refcount);
/* SRCU cleanup must happen in task context. */ /* SRCU cleanup must happen in workqueue context. */
queue_work(remove_work_wq, &user->remove_work); queue_work(remove_work_wq, &user->remove_work);
} }
...@@ -3605,8 +3605,7 @@ int ipmi_add_smi(struct module *owner, ...@@ -3605,8 +3605,7 @@ int ipmi_add_smi(struct module *owner,
intf->curr_seq = 0; intf->curr_seq = 0;
spin_lock_init(&intf->waiting_rcv_msgs_lock); spin_lock_init(&intf->waiting_rcv_msgs_lock);
INIT_LIST_HEAD(&intf->waiting_rcv_msgs); INIT_LIST_HEAD(&intf->waiting_rcv_msgs);
tasklet_setup(&intf->recv_tasklet, INIT_WORK(&intf->recv_work, smi_recv_work);
smi_recv_tasklet);
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 0);
spin_lock_init(&intf->xmit_msgs_lock); spin_lock_init(&intf->xmit_msgs_lock);
INIT_LIST_HEAD(&intf->xmit_msgs); INIT_LIST_HEAD(&intf->xmit_msgs);
...@@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) ...@@ -4779,7 +4778,7 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
* To preserve message order, quit if we * To preserve message order, quit if we
* can't handle a message. Add the message * can't handle a message. Add the message
* back at the head, this is safe because this * back at the head, this is safe because this
* tasklet is the only thing that pulls the * workqueue is the only thing that pulls the
* messages. * messages.
*/ */
list_add(&smi_msg->link, &intf->waiting_rcv_msgs); list_add(&smi_msg->link, &intf->waiting_rcv_msgs);
...@@ -4812,10 +4811,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf) ...@@ -4812,10 +4811,10 @@ static void handle_new_recv_msgs(struct ipmi_smi *intf)
} }
} }
static void smi_recv_tasklet(struct tasklet_struct *t) static void smi_recv_work(struct work_struct *t)
{ {
unsigned long flags = 0; /* keep us warning-free. */ unsigned long flags = 0; /* keep us warning-free. */
struct ipmi_smi *intf = from_tasklet(intf, t, recv_tasklet); struct ipmi_smi *intf = from_work(intf, t, recv_work);
int run_to_completion = intf->run_to_completion; int run_to_completion = intf->run_to_completion;
struct ipmi_smi_msg *newmsg = NULL; struct ipmi_smi_msg *newmsg = NULL;
...@@ -4866,7 +4865,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, ...@@ -4866,7 +4865,7 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
/* /*
* To preserve message order, we keep a queue and deliver from * To preserve message order, we keep a queue and deliver from
* a tasklet. * a workqueue.
*/ */
if (!run_to_completion) if (!run_to_completion)
spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags); spin_lock_irqsave(&intf->waiting_rcv_msgs_lock, flags);
...@@ -4887,9 +4886,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf, ...@@ -4887,9 +4886,9 @@ void ipmi_smi_msg_received(struct ipmi_smi *intf,
spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags); spin_unlock_irqrestore(&intf->xmit_msgs_lock, flags);
if (run_to_completion) if (run_to_completion)
smi_recv_tasklet(&intf->recv_tasklet); smi_recv_work(&intf->recv_work);
else else
tasklet_schedule(&intf->recv_tasklet); queue_work(system_bh_wq, &intf->recv_work);
} }
EXPORT_SYMBOL(ipmi_smi_msg_received); EXPORT_SYMBOL(ipmi_smi_msg_received);
...@@ -4899,7 +4898,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf) ...@@ -4899,7 +4898,7 @@ void ipmi_smi_watchdog_pretimeout(struct ipmi_smi *intf)
return; return;
atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1); atomic_set(&intf->watchdog_pretimeouts_to_deliver, 1);
tasklet_schedule(&intf->recv_tasklet); queue_work(system_bh_wq, &intf->recv_work);
} }
EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout); EXPORT_SYMBOL(ipmi_smi_watchdog_pretimeout);
...@@ -5068,7 +5067,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf, ...@@ -5068,7 +5067,7 @@ static bool ipmi_timeout_handler(struct ipmi_smi *intf,
flags); flags);
} }
tasklet_schedule(&intf->recv_tasklet); queue_work(system_bh_wq, &intf->recv_work);
return need_timer; return need_timer;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment