Commit 188a5696 authored by Ingo Molnar's avatar Ingo Molnar

genirq/affinity: Only build SMP-only helper functions on SMP kernels

allnoconfig grew these new build warnings in lib/group_cpus.c:

  lib/group_cpus.c:247:12: warning: ‘__group_cpus_evenly’ defined but not used [-Wunused-function]
  lib/group_cpus.c:75:13: warning: ‘build_node_to_cpumask’ defined but not used [-Wunused-function]
  lib/group_cpus.c:66:13: warning: ‘free_node_to_cpumask’ defined but not used [-Wunused-function]
  lib/group_cpus.c:43:23: warning: ‘alloc_node_to_cpumask’ defined but not used [-Wunused-function]

Widen the #ifdef CONFIG_SMP block to not expose unused helpers on
non-SMP builds.

Also annotate the preprocessor branches for better readability.

Fixes: f7b3ea8c ("genirq/affinity: Move group_cpus_evenly() into lib/")
Cc: Ming Lei <ming.lei@redhat.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Link: https://lore.kernel.org/r/20221227022905.352674-6-ming.lei@redhat.comSigned-off-by: default avatarIngo Molnar <mingo@kernel.org>
parent 6a6dcae8
...@@ -9,6 +9,8 @@ ...@@ -9,6 +9,8 @@
#include <linux/sort.h> #include <linux/sort.h>
#include <linux/group_cpus.h> #include <linux/group_cpus.h>
#ifdef CONFIG_SMP
static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, static void grp_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
unsigned int cpus_per_grp) unsigned int cpus_per_grp)
{ {
...@@ -327,7 +329,6 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps, ...@@ -327,7 +329,6 @@ static int __group_cpus_evenly(unsigned int startgrp, unsigned int numgrps,
return done; return done;
} }
#ifdef CONFIG_SMP
/** /**
* group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality * group_cpus_evenly - Group all CPUs evenly per NUMA/CPU locality
* @numgrps: number of groups * @numgrps: number of groups
...@@ -412,7 +413,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) ...@@ -412,7 +413,7 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
} }
return masks; return masks;
} }
#else #else /* CONFIG_SMP */
struct cpumask *group_cpus_evenly(unsigned int numgrps) struct cpumask *group_cpus_evenly(unsigned int numgrps)
{ {
struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL); struct cpumask *masks = kcalloc(numgrps, sizeof(*masks), GFP_KERNEL);
...@@ -424,4 +425,4 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps) ...@@ -424,4 +425,4 @@ struct cpumask *group_cpus_evenly(unsigned int numgrps)
cpumask_copy(&masks[0], cpu_possible_mask); cpumask_copy(&masks[0], cpu_possible_mask);
return masks; return masks;
} }
#endif #endif /* CONFIG_SMP */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment