Commit 3ba11103 authored by Anna-Maria Behnsen's avatar Anna-Maria Behnsen Committed by Thomas Gleixner

timers/migration: Use a single struct for hierarchy walk data

Two different structs are defined for propagating data from one to another
level when walking the hierarchy. Several struct members exist in both
structs which makes generalization harder.

Merge those two structs into a single one and use it directly in
walk_groups() and the corresponding function pointers instead of
introducing pointer casting all over the place.
Signed-off-by: default avatarAnna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240716-tmigr-fixes-v4-4-757baa7803fe@linutronix.de
parent 92506741
......@@ -475,69 +475,31 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
return bitmap_weight(&active, BIT_CNT) <= 1;
}
typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, void *);
static void __walk_groups(up_f up, void *data,
struct tmigr_cpu *tmc)
{
struct tmigr_group *child = NULL, *group = tmc->tmgroup;
do {
WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
if (up(group, child, data))
break;
child = group;
group = group->parent;
} while (group);
}
static void walk_groups(up_f up, void *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
__walk_groups(up, data, tmc);
}
/**
* struct tmigr_walk - data required for walking the hierarchy
* @nextexp: Next CPU event expiry information which is handed into
* the timer migration code by the timer code
* (get_next_timer_interrupt())
* @firstexp: Contains the first event expiry information when last
* active CPU of hierarchy is on the way to idle to make
* sure CPU will be back in time. It is updated in top
* level group only. Be aware, there could occur a new top
* level of the hierarchy between the 'top level call' in
* tmigr_update_events() and the check for the parent group
* in walk_groups(). Then @firstexp might contain a value
* != KTIME_MAX even if it was not the final top
* level. This is not a problem, as the worst outcome is a
* CPU which might wake up a little early.
* @firstexp: Contains the first event expiry information when
* hierarchy is completely idle. When CPU itself was the
* last going idle, information makes sure, that CPU will
* be back in time. When using this value in the remote
* expiry case, firstexp is stored in the per CPU tmigr_cpu
* struct of CPU which expires remote timers. It is updated
* in top level group only. Be aware, there could occur a
* new top level of the hierarchy between the 'top level
* call' in tmigr_update_events() and the check for the
* parent group in walk_groups(). Then @firstexp might
* contain a value != KTIME_MAX even if it was not the
* final top level. This is not a problem, as the worst
* outcome is a CPU which might wake up a little early.
* @evt: Pointer to tmigr_event which needs to be queued (of idle
* child group)
* @childmask: childmask of child group
* @remote: Is set, when the new timer path is executed in
* tmigr_handle_remote_cpu()
*/
struct tmigr_walk {
u64 nextexp;
u64 firstexp;
struct tmigr_event *evt;
u8 childmask;
bool remote;
};
/**
* struct tmigr_remote_data - data required for remote expiry hierarchy walk
* @basej: timer base in jiffies
* @now: timer base monotonic
* @firstexp: returns expiry of the first timer in the idle timer
* migration hierarchy to make sure the timer is handled in
* time; it is stored in the per CPU tmigr_cpu struct of
* CPU which expires remote timers
* @childmask: childmask of child group
* @check: is set if there is the need to handle remote timers;
* required in tmigr_requires_handle_remote() only
* @tmc_active: this flag indicates, whether the CPU which triggers
......@@ -546,15 +508,43 @@ struct tmigr_walk {
* idle, only the first event of the top level has to be
* considered.
*/
struct tmigr_remote_data {
unsigned long basej;
u64 now;
struct tmigr_walk {
u64 nextexp;
u64 firstexp;
struct tmigr_event *evt;
u8 childmask;
bool remote;
unsigned long basej;
u64 now;
bool check;
bool tmc_active;
};
typedef bool (*up_f)(struct tmigr_group *, struct tmigr_group *, struct tmigr_walk *);
static void __walk_groups(up_f up, struct tmigr_walk *data,
struct tmigr_cpu *tmc)
{
struct tmigr_group *child = NULL, *group = tmc->tmgroup;
do {
WARN_ON_ONCE(group->level >= tmigr_hierarchy_levels);
if (up(group, child, data))
break;
child = group;
group = group->parent;
} while (group);
}
static void walk_groups(up_f up, struct tmigr_walk *data, struct tmigr_cpu *tmc)
{
lockdep_assert_held(&tmc->lock);
__walk_groups(up, data, tmc);
}
/*
* Returns the next event of the timerqueue @group->events
*
......@@ -625,10 +615,9 @@ static u64 tmigr_next_groupevt_expires(struct tmigr_group *group)
static bool tmigr_active_up(struct tmigr_group *group,
struct tmigr_group *child,
void *ptr)
struct tmigr_walk *data)
{
union tmigr_state curstate, newstate;
struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
......@@ -867,10 +856,8 @@ bool tmigr_update_events(struct tmigr_group *group, struct tmigr_group *child,
static bool tmigr_new_timer_up(struct tmigr_group *group,
struct tmigr_group *child,
void *ptr)
struct tmigr_walk *data)
{
struct tmigr_walk *data = ptr;
return tmigr_update_events(group, child, data);
}
......@@ -1002,9 +989,8 @@ static void tmigr_handle_remote_cpu(unsigned int cpu, u64 now,
static bool tmigr_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
void *ptr)
struct tmigr_walk *data)
{
struct tmigr_remote_data *data = ptr;
struct tmigr_event *evt;
unsigned long jif;
u8 childmask;
......@@ -1062,7 +1048,7 @@ static bool tmigr_handle_remote_up(struct tmigr_group *group,
void tmigr_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
struct tmigr_remote_data data;
struct tmigr_walk data;
if (tmigr_is_not_available(tmc))
return;
......@@ -1104,9 +1090,8 @@ void tmigr_handle_remote(void)
static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
struct tmigr_group *child,
void *ptr)
struct tmigr_walk *data)
{
struct tmigr_remote_data *data = ptr;
u8 childmask;
childmask = data->childmask;
......@@ -1164,7 +1149,7 @@ static bool tmigr_requires_handle_remote_up(struct tmigr_group *group,
bool tmigr_requires_handle_remote(void)
{
struct tmigr_cpu *tmc = this_cpu_ptr(&tmigr_cpu);
struct tmigr_remote_data data;
struct tmigr_walk data;
unsigned long jif;
bool ret = false;
......@@ -1252,10 +1237,9 @@ u64 tmigr_cpu_new_timer(u64 nextexp)
static bool tmigr_inactive_up(struct tmigr_group *group,
struct tmigr_group *child,
void *ptr)
struct tmigr_walk *data)
{
union tmigr_state curstate, newstate, childstate;
struct tmigr_walk *data = ptr;
bool walk_done;
u8 childmask;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment