Commit 84676c1f authored by Christoph Hellwig's avatar Christoph Hellwig Committed by Jens Axboe

genirq/affinity: assign vectors to all possible CPUs

Currently we assign managed interrupt vectors to all present CPUs.  This
works fine for systems were we only online/offline CPUs.  But in case of
systems that support physical CPU hotplug (or the virtualized version of
it) this means the additional CPUs covered for in the ACPI tables or on
the command line are not catered for.  To fix this we'd either need to
introduce new hotplug CPU states just for this case, or we can start
assining vectors to possible but not present CPUs.
Reported-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Tested-by: default avatarChristian Borntraeger <borntraeger@de.ibm.com>
Tested-by: default avatarStefan Haberland <sth@linux.vnet.ibm.com>
Fixes: 4b855ad3 ("blk-mq: Create hctx for each present CPU")
Cc: linux-kernel@vger.kernel.org
Cc: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: default avatarChristoph Hellwig <hch@lst.de>
Signed-off-by: default avatarJens Axboe <axboe@kernel.dk>
parent c27d53fb
...@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk, ...@@ -39,7 +39,7 @@ static void irq_spread_init_one(struct cpumask *irqmsk, struct cpumask *nmsk,
} }
} }
static cpumask_var_t *alloc_node_to_present_cpumask(void) static cpumask_var_t *alloc_node_to_possible_cpumask(void)
{ {
cpumask_var_t *masks; cpumask_var_t *masks;
int node; int node;
...@@ -62,7 +62,7 @@ static cpumask_var_t *alloc_node_to_present_cpumask(void) ...@@ -62,7 +62,7 @@ static cpumask_var_t *alloc_node_to_present_cpumask(void)
return NULL; return NULL;
} }
static void free_node_to_present_cpumask(cpumask_var_t *masks) static void free_node_to_possible_cpumask(cpumask_var_t *masks)
{ {
int node; int node;
...@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks) ...@@ -71,22 +71,22 @@ static void free_node_to_present_cpumask(cpumask_var_t *masks)
kfree(masks); kfree(masks);
} }
static void build_node_to_present_cpumask(cpumask_var_t *masks) static void build_node_to_possible_cpumask(cpumask_var_t *masks)
{ {
int cpu; int cpu;
for_each_present_cpu(cpu) for_each_possible_cpu(cpu)
cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]); cpumask_set_cpu(cpu, masks[cpu_to_node(cpu)]);
} }
static int get_nodes_in_cpumask(cpumask_var_t *node_to_present_cpumask, static int get_nodes_in_cpumask(cpumask_var_t *node_to_possible_cpumask,
const struct cpumask *mask, nodemask_t *nodemsk) const struct cpumask *mask, nodemask_t *nodemsk)
{ {
int n, nodes = 0; int n, nodes = 0;
/* Calculate the number of nodes in the supplied affinity mask */ /* Calculate the number of nodes in the supplied affinity mask */
for_each_node(n) { for_each_node(n) {
if (cpumask_intersects(mask, node_to_present_cpumask[n])) { if (cpumask_intersects(mask, node_to_possible_cpumask[n])) {
node_set(n, *nodemsk); node_set(n, *nodemsk);
nodes++; nodes++;
} }
...@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -109,7 +109,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
int last_affv = affv + affd->pre_vectors; int last_affv = affv + affd->pre_vectors;
nodemask_t nodemsk = NODE_MASK_NONE; nodemask_t nodemsk = NODE_MASK_NONE;
struct cpumask *masks; struct cpumask *masks;
cpumask_var_t nmsk, *node_to_present_cpumask; cpumask_var_t nmsk, *node_to_possible_cpumask;
/* /*
* If there aren't any vectors left after applying the pre/post * If there aren't any vectors left after applying the pre/post
...@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -125,8 +125,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (!masks) if (!masks)
goto out; goto out;
node_to_present_cpumask = alloc_node_to_present_cpumask(); node_to_possible_cpumask = alloc_node_to_possible_cpumask();
if (!node_to_present_cpumask) if (!node_to_possible_cpumask)
goto out; goto out;
/* Fill out vectors at the beginning that don't need affinity */ /* Fill out vectors at the beginning that don't need affinity */
...@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -135,8 +135,8 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Stabilize the cpumasks */ /* Stabilize the cpumasks */
get_online_cpus(); get_online_cpus();
build_node_to_present_cpumask(node_to_present_cpumask); build_node_to_possible_cpumask(node_to_possible_cpumask);
nodes = get_nodes_in_cpumask(node_to_present_cpumask, cpu_present_mask, nodes = get_nodes_in_cpumask(node_to_possible_cpumask, cpu_possible_mask,
&nodemsk); &nodemsk);
/* /*
...@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -146,7 +146,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
if (affv <= nodes) { if (affv <= nodes) {
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
cpumask_copy(masks + curvec, cpumask_copy(masks + curvec,
node_to_present_cpumask[n]); node_to_possible_cpumask[n]);
if (++curvec == last_affv) if (++curvec == last_affv)
break; break;
} }
...@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -160,7 +160,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes; vecs_per_node = (affv - (curvec - affd->pre_vectors)) / nodes;
/* Get the cpus on this node which are in the mask */ /* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_present_mask, node_to_present_cpumask[n]); cpumask_and(nmsk, cpu_possible_mask, node_to_possible_cpumask[n]);
/* Calculate the number of cpus per vector */ /* Calculate the number of cpus per vector */
ncpus = cpumask_weight(nmsk); ncpus = cpumask_weight(nmsk);
...@@ -192,7 +192,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -192,7 +192,7 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
/* Fill out vectors at the end that don't need affinity */ /* Fill out vectors at the end that don't need affinity */
for (; curvec < nvecs; curvec++) for (; curvec < nvecs; curvec++)
cpumask_copy(masks + curvec, irq_default_affinity); cpumask_copy(masks + curvec, irq_default_affinity);
free_node_to_present_cpumask(node_to_present_cpumask); free_node_to_possible_cpumask(node_to_possible_cpumask);
out: out:
free_cpumask_var(nmsk); free_cpumask_var(nmsk);
return masks; return masks;
...@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity ...@@ -214,7 +214,7 @@ int irq_calc_affinity_vectors(int minvec, int maxvec, const struct irq_affinity
return 0; return 0;
get_online_cpus(); get_online_cpus();
ret = min_t(int, cpumask_weight(cpu_present_mask), vecs) + resv; ret = min_t(int, cpumask_weight(cpu_possible_mask), vecs) + resv;
put_online_cpus(); put_online_cpus();
return ret; return ret;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment