Commit 89986496 authored by Corey Minyard's avatar Corey Minyard Committed by Linus Torvalds

ipmi: Turn off all activity on an idle ipmi interface

The IPMI driver would wake up periodically looking for events and
watchdog pretimeouts.  If there is nothing waiting for these events,
it's really kind of pointless to be checking for them.  So modify the
driver so the message handler can pass down if it needs the lower layer
to be waiting for these.  Modify the system interface lower layer to
turn off all timer and thread activity if the upper layer doesn't need
anything and it is not currently handling messages.  And modify the
message handler to not restart the timer if its timer is not needed.

The timers and kthread will still be enabled if:
 - the SI interface is handling a message.
 - a user has enabled watching for events.
 - the IPMI watchdog timer is in use (since it uses pretimeouts).
 - the message handler is waiting on a remote response.
 - a user has registered to receive commands.

This mostly affects interfaces without interrupts.  Interfaces with
interrupts already don't use CPU in the system interface when the
interface is idle.
Signed-off-by: default avatarCorey Minyard <cminyard@mvista.com>
Signed-off-by: default avatarLinus Torvalds <torvalds@linux-foundation.org>
parent 0dfe6e7e
This diff is collapsed.
...@@ -257,6 +257,9 @@ struct smi_info { ...@@ -257,6 +257,9 @@ struct smi_info {
/* Used to gracefully stop the timer without race conditions. */ /* Used to gracefully stop the timer without race conditions. */
atomic_t stop_operation; atomic_t stop_operation;
/* Are we waiting for the events, pretimeouts, received msgs? */
atomic_t need_watch;
/* /*
* The driver will disable interrupts when it gets into a * The driver will disable interrupts when it gets into a
* situation where it cannot handle messages due to lack of * situation where it cannot handle messages due to lack of
...@@ -862,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info, ...@@ -862,6 +865,19 @@ static enum si_sm_result smi_event_handler(struct smi_info *smi_info,
return si_sm_result; return si_sm_result;
} }
static void check_start_timer_thread(struct smi_info *smi_info)
{
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) {
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
}
static void sender(void *send_info, static void sender(void *send_info,
struct ipmi_smi_msg *msg, struct ipmi_smi_msg *msg,
int priority) int priority)
...@@ -915,15 +931,7 @@ static void sender(void *send_info, ...@@ -915,15 +931,7 @@ static void sender(void *send_info,
else else
list_add_tail(&msg->link, &smi_info->xmit_msgs); list_add_tail(&msg->link, &smi_info->xmit_msgs);
if (smi_info->si_state == SI_NORMAL && smi_info->curr_msg == NULL) { check_start_timer_thread(smi_info);
smi_mod_timer(smi_info, jiffies + SI_TIMEOUT_JIFFIES);
if (smi_info->thread)
wake_up_process(smi_info->thread);
start_next_msg(smi_info);
smi_event_handler(smi_info, 0);
}
spin_unlock_irqrestore(&smi_info->si_lock, flags); spin_unlock_irqrestore(&smi_info->si_lock, flags);
} }
...@@ -1023,9 +1031,15 @@ static int ipmi_thread(void *data) ...@@ -1023,9 +1031,15 @@ static int ipmi_thread(void *data)
; /* do nothing */ ; /* do nothing */
else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait) else if (smi_result == SI_SM_CALL_WITH_DELAY && busy_wait)
schedule(); schedule();
else if (smi_result == SI_SM_IDLE) else if (smi_result == SI_SM_IDLE) {
schedule_timeout_interruptible(100); if (atomic_read(&smi_info->need_watch)) {
else schedule_timeout_interruptible(100);
} else {
/* Wait to be woken up when we are needed. */
__set_current_state(TASK_INTERRUPTIBLE);
schedule();
}
} else
schedule_timeout_interruptible(1); schedule_timeout_interruptible(1);
} }
return 0; return 0;
...@@ -1061,6 +1075,17 @@ static void request_events(void *send_info) ...@@ -1061,6 +1075,17 @@ static void request_events(void *send_info)
atomic_set(&smi_info->req_events, 1); atomic_set(&smi_info->req_events, 1);
} }
static void set_need_watch(void *send_info, int enable)
{
struct smi_info *smi_info = send_info;
unsigned long flags;
atomic_set(&smi_info->need_watch, enable);
spin_lock_irqsave(&smi_info->si_lock, flags);
check_start_timer_thread(smi_info);
spin_unlock_irqrestore(&smi_info->si_lock, flags);
}
static int initialized; static int initialized;
static void smi_timeout(unsigned long data) static void smi_timeout(unsigned long data)
...@@ -1212,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = { ...@@ -1212,6 +1237,7 @@ static struct ipmi_smi_handlers handlers = {
.get_smi_info = get_smi_info, .get_smi_info = get_smi_info,
.sender = sender, .sender = sender,
.request_events = request_events, .request_events = request_events,
.set_need_watch = set_need_watch,
.set_maintenance_mode = set_maintenance_mode, .set_maintenance_mode = set_maintenance_mode,
.set_run_to_completion = set_run_to_completion, .set_run_to_completion = set_run_to_completion,
.poll = poll, .poll = poll,
...@@ -3352,6 +3378,7 @@ static int try_smi_init(struct smi_info *new_smi) ...@@ -3352,6 +3378,7 @@ static int try_smi_init(struct smi_info *new_smi)
new_smi->interrupt_disabled = 1; new_smi->interrupt_disabled = 1;
atomic_set(&new_smi->stop_operation, 0); atomic_set(&new_smi->stop_operation, 0);
atomic_set(&new_smi->need_watch, 0);
new_smi->intf_num = smi_num; new_smi->intf_num = smi_num;
smi_num++; smi_num++;
......
...@@ -237,7 +237,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode); ...@@ -237,7 +237,7 @@ int ipmi_set_maintenance_mode(ipmi_user_t user, int mode);
* The first user that sets this to TRUE will receive all events that * The first user that sets this to TRUE will receive all events that
* have been queued while no one was waiting for events. * have been queued while no one was waiting for events.
*/ */
int ipmi_set_gets_events(ipmi_user_t user, int val); int ipmi_set_gets_events(ipmi_user_t user, bool val);
/* /*
* Called when a new SMI is registered. This will also be called on * Called when a new SMI is registered. This will also be called on
......
...@@ -109,6 +109,13 @@ struct ipmi_smi_handlers { ...@@ -109,6 +109,13 @@ struct ipmi_smi_handlers {
events from the BMC we are attached to. */ events from the BMC we are attached to. */
void (*request_events)(void *send_info); void (*request_events)(void *send_info);
/* Called by the upper layer when some user requires that the
interface watch for events, received messages, watchdog
pretimeouts, or not. Used by the SMI to know if it should
watch for these. This may be NULL if the SMI does not
implement it. */
void (*set_need_watch)(void *send_info, int enable);
/* Called when the interface should go into "run to /* Called when the interface should go into "run to
completion" mode. If this call sets the value to true, the completion" mode. If this call sets the value to true, the
interface should make sure that all messages are flushed interface should make sure that all messages are flushed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment