Commit 835a9a67 authored by Anna-Maria Behnsen's avatar Anna-Maria Behnsen Committed by Thomas Gleixner

timers/migration: Rename childmask by groupmask to make naming more obvious

childmask in the group reflects the mask that is required to 'reference'
this group in the parent. When reading childmask, this might be confusing,
as this suggests, that this is the mask of the child of the group.

Clarify this by renaming childmask in the tmigr_group and tmc_group by
groupmask.
Signed-off-by: default avatarAnna-Maria Behnsen <anna-maria@linutronix.de>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Reviewed-by: default avatarFrederic Weisbecker <frederic@kernel.org>
Link: https://lore.kernel.org/r/20240716-tmigr-fixes-v4-6-757baa7803fe@linutronix.de
parent d47be589
...@@ -43,7 +43,7 @@ TRACE_EVENT(tmigr_connect_child_parent, ...@@ -43,7 +43,7 @@ TRACE_EVENT(tmigr_connect_child_parent,
__field( unsigned int, lvl ) __field( unsigned int, lvl )
__field( unsigned int, numa_node ) __field( unsigned int, numa_node )
__field( unsigned int, num_children ) __field( unsigned int, num_children )
__field( u32, childmask ) __field( u32, groupmask )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -52,11 +52,11 @@ TRACE_EVENT(tmigr_connect_child_parent, ...@@ -52,11 +52,11 @@ TRACE_EVENT(tmigr_connect_child_parent,
__entry->lvl = child->parent->level; __entry->lvl = child->parent->level;
__entry->numa_node = child->parent->numa_node; __entry->numa_node = child->parent->numa_node;
__entry->num_children = child->parent->num_children; __entry->num_children = child->parent->num_children;
__entry->childmask = child->childmask; __entry->groupmask = child->groupmask;
), ),
TP_printk("group=%p childmask=%0x parent=%p lvl=%d numa=%d num_children=%d", TP_printk("group=%p groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
__entry->child, __entry->childmask, __entry->parent, __entry->child, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children) __entry->lvl, __entry->numa_node, __entry->num_children)
); );
...@@ -72,7 +72,7 @@ TRACE_EVENT(tmigr_connect_cpu_parent, ...@@ -72,7 +72,7 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__field( unsigned int, lvl ) __field( unsigned int, lvl )
__field( unsigned int, numa_node ) __field( unsigned int, numa_node )
__field( unsigned int, num_children ) __field( unsigned int, num_children )
__field( u32, childmask ) __field( u32, groupmask )
), ),
TP_fast_assign( TP_fast_assign(
...@@ -81,11 +81,11 @@ TRACE_EVENT(tmigr_connect_cpu_parent, ...@@ -81,11 +81,11 @@ TRACE_EVENT(tmigr_connect_cpu_parent,
__entry->lvl = tmc->tmgroup->level; __entry->lvl = tmc->tmgroup->level;
__entry->numa_node = tmc->tmgroup->numa_node; __entry->numa_node = tmc->tmgroup->numa_node;
__entry->num_children = tmc->tmgroup->num_children; __entry->num_children = tmc->tmgroup->num_children;
__entry->childmask = tmc->childmask; __entry->groupmask = tmc->groupmask;
), ),
TP_printk("cpu=%d childmask=%0x parent=%p lvl=%d numa=%d num_children=%d", TP_printk("cpu=%d groupmask=%0x parent=%p lvl=%d numa=%d num_children=%d",
__entry->cpu, __entry->childmask, __entry->parent, __entry->cpu, __entry->groupmask, __entry->parent,
__entry->lvl, __entry->numa_node, __entry->num_children) __entry->lvl, __entry->numa_node, __entry->num_children)
); );
......
...@@ -495,7 +495,7 @@ static bool tmigr_check_lonely(struct tmigr_group *group) ...@@ -495,7 +495,7 @@ static bool tmigr_check_lonely(struct tmigr_group *group)
* outcome is a CPU which might wake up a little early. * outcome is a CPU which might wake up a little early.
* @evt: Pointer to tmigr_event which needs to be queued (of idle * @evt: Pointer to tmigr_event which needs to be queued (of idle
* child group) * child group)
* @childmask: childmask of child group * @childmask: groupmask of child group
* @remote: Is set, when the new timer path is executed in * @remote: Is set, when the new timer path is executed in
* tmigr_handle_remote_cpu() * tmigr_handle_remote_cpu()
* @basej: timer base in jiffies * @basej: timer base in jiffies
...@@ -535,7 +535,7 @@ static void __walk_groups(up_f up, struct tmigr_walk *data, ...@@ -535,7 +535,7 @@ static void __walk_groups(up_f up, struct tmigr_walk *data,
child = group; child = group;
group = group->parent; group = group->parent;
data->childmask = child->childmask; data->childmask = child->groupmask;
} while (group); } while (group);
} }
...@@ -669,7 +669,7 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc) ...@@ -669,7 +669,7 @@ static void __tmigr_cpu_activate(struct tmigr_cpu *tmc)
{ {
struct tmigr_walk data; struct tmigr_walk data;
data.childmask = tmc->childmask; data.childmask = tmc->groupmask;
trace_tmigr_cpu_active(tmc); trace_tmigr_cpu_active(tmc);
...@@ -1049,7 +1049,7 @@ void tmigr_handle_remote(void) ...@@ -1049,7 +1049,7 @@ void tmigr_handle_remote(void)
if (tmigr_is_not_available(tmc)) if (tmigr_is_not_available(tmc))
return; return;
data.childmask = tmc->childmask; data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX; data.firstexp = KTIME_MAX;
/* /*
...@@ -1057,7 +1057,7 @@ void tmigr_handle_remote(void) ...@@ -1057,7 +1057,7 @@ void tmigr_handle_remote(void)
* in tmigr_handle_remote_up() anyway. Keep this check to speed up the * in tmigr_handle_remote_up() anyway. Keep this check to speed up the
* return when nothing has to be done. * return when nothing has to be done.
*/ */
if (!tmigr_check_migrator(tmc->tmgroup, tmc->childmask)) { if (!tmigr_check_migrator(tmc->tmgroup, tmc->groupmask)) {
/* /*
* If this CPU was an idle migrator, make sure to clear its wakeup * If this CPU was an idle migrator, make sure to clear its wakeup
* value so it won't chase timers that have already expired elsewhere. * value so it won't chase timers that have already expired elsewhere.
...@@ -1150,7 +1150,7 @@ bool tmigr_requires_handle_remote(void) ...@@ -1150,7 +1150,7 @@ bool tmigr_requires_handle_remote(void)
return ret; return ret;
data.now = get_jiffies_update(&jif); data.now = get_jiffies_update(&jif);
data.childmask = tmc->childmask; data.childmask = tmc->groupmask;
data.firstexp = KTIME_MAX; data.firstexp = KTIME_MAX;
data.tmc_active = !tmc->idle; data.tmc_active = !tmc->idle;
data.check = false; data.check = false;
...@@ -1310,7 +1310,7 @@ static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp) ...@@ -1310,7 +1310,7 @@ static u64 __tmigr_cpu_deactivate(struct tmigr_cpu *tmc, u64 nextexp)
struct tmigr_walk data = { .nextexp = nextexp, struct tmigr_walk data = { .nextexp = nextexp,
.firstexp = KTIME_MAX, .firstexp = KTIME_MAX,
.evt = &tmc->cpuevt, .evt = &tmc->cpuevt,
.childmask = tmc->childmask }; .childmask = tmc->groupmask };
/* /*
* If nextexp is KTIME_MAX, the CPU event will be ignored because the * If nextexp is KTIME_MAX, the CPU event will be ignored because the
...@@ -1388,7 +1388,7 @@ u64 tmigr_quick_check(u64 nextevt) ...@@ -1388,7 +1388,7 @@ u64 tmigr_quick_check(u64 nextevt)
if (WARN_ON_ONCE(tmc->idle)) if (WARN_ON_ONCE(tmc->idle))
return nextevt; return nextevt;
if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->childmask)) if (!tmigr_check_migrator_and_lonely(tmc->tmgroup, tmc->groupmask))
return KTIME_MAX; return KTIME_MAX;
do { do {
...@@ -1552,7 +1552,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, ...@@ -1552,7 +1552,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING); raw_spin_lock_nested(&parent->lock, SINGLE_DEPTH_NESTING);
child->parent = parent; child->parent = parent;
child->childmask = BIT(parent->num_children++); child->groupmask = BIT(parent->num_children++);
raw_spin_unlock(&parent->lock); raw_spin_unlock(&parent->lock);
raw_spin_unlock_irq(&child->lock); raw_spin_unlock_irq(&child->lock);
...@@ -1586,7 +1586,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child, ...@@ -1586,7 +1586,7 @@ static void tmigr_connect_child_parent(struct tmigr_group *child,
* the new childmask and parent to subsequent walkers through this * the new childmask and parent to subsequent walkers through this
* @child. Therefore propagate active state unconditionally. * @child. Therefore propagate active state unconditionally.
*/ */
data.childmask = child->childmask; data.childmask = child->groupmask;
/* /*
* There is only one new level per time (which is protected by * There is only one new level per time (which is protected by
...@@ -1652,7 +1652,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node) ...@@ -1652,7 +1652,7 @@ static int tmigr_setup_groups(unsigned int cpu, unsigned int node)
raw_spin_lock_irq(&group->lock); raw_spin_lock_irq(&group->lock);
tmc->tmgroup = group; tmc->tmgroup = group;
tmc->childmask = BIT(group->num_children++); tmc->groupmask = BIT(group->num_children++);
raw_spin_unlock_irq(&group->lock); raw_spin_unlock_irq(&group->lock);
...@@ -1731,7 +1731,7 @@ static int tmigr_cpu_prepare(unsigned int cpu) ...@@ -1731,7 +1731,7 @@ static int tmigr_cpu_prepare(unsigned int cpu)
if (ret < 0) if (ret < 0)
return ret; return ret;
if (tmc->childmask == 0) if (tmc->groupmask == 0)
return -EINVAL; return -EINVAL;
return ret; return ret;
......
...@@ -51,9 +51,8 @@ struct tmigr_event { ...@@ -51,9 +51,8 @@ struct tmigr_event {
* @num_children: Counter of group children to make sure the group is only * @num_children: Counter of group children to make sure the group is only
* filled with TMIGR_CHILDREN_PER_GROUP; Required for setup * filled with TMIGR_CHILDREN_PER_GROUP; Required for setup
* only * only
* @childmask: childmask of the group in the parent group; is set * @groupmask: mask of the group in the parent group; is set during
* during setup and will never change; can be read * setup and will never change; can be read lockless
* lockless
* @list: List head that is added to the per level * @list: List head that is added to the per level
* tmigr_level_list; is required during setup when a * tmigr_level_list; is required during setup when a
* new group needs to be connected to the existing * new group needs to be connected to the existing
...@@ -69,7 +68,7 @@ struct tmigr_group { ...@@ -69,7 +68,7 @@ struct tmigr_group {
unsigned int level; unsigned int level;
int numa_node; int numa_node;
unsigned int num_children; unsigned int num_children;
u8 childmask; u8 groupmask;
struct list_head list; struct list_head list;
}; };
...@@ -89,7 +88,7 @@ struct tmigr_group { ...@@ -89,7 +88,7 @@ struct tmigr_group {
* hierarchy * hierarchy
* @remote: Is set when timers of the CPU are expired remotely * @remote: Is set when timers of the CPU are expired remotely
* @tmgroup: Pointer to the parent group * @tmgroup: Pointer to the parent group
* @childmask: childmask of tmigr_cpu in the parent group * @groupmask: mask of tmigr_cpu in the parent group
* @wakeup: Stores the first timer when the timer migration * @wakeup: Stores the first timer when the timer migration
* hierarchy is completely idle and remote expiry was done; * hierarchy is completely idle and remote expiry was done;
* is returned to timer code in the idle path and is only * is returned to timer code in the idle path and is only
...@@ -102,7 +101,7 @@ struct tmigr_cpu { ...@@ -102,7 +101,7 @@ struct tmigr_cpu {
bool idle; bool idle;
bool remote; bool remote;
struct tmigr_group *tmgroup; struct tmigr_group *tmgroup;
u8 childmask; u8 groupmask;
u64 wakeup; u64 wakeup;
struct tmigr_event cpuevt; struct tmigr_event cpuevt;
}; };
...@@ -118,8 +117,8 @@ union tmigr_state { ...@@ -118,8 +117,8 @@ union tmigr_state {
u32 state; u32 state;
/** /**
* struct - split state of tmigr_group * struct - split state of tmigr_group
* @active: Contains each childmask bit of the active children * @active: Contains each mask bit of the active children
* @migrator: Contains childmask of the child which is migrator * @migrator: Contains mask of the child which is migrator
* @seq: Sequence counter needs to be increased when an update * @seq: Sequence counter needs to be increased when an update
* to the tmigr_state is done. It prevents a race when * to the tmigr_state is done. It prevents a race when
* updates in the child groups are propagated in changed * updates in the child groups are propagated in changed
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment