Commit 060746d9 authored by Ming Lei's avatar Ming Lei Committed by Thomas Gleixner

genirq/affinity: Pass first vector to __irq_build_affinity_masks()

No functional change.

Prepares for support of allocating and affinitizing sets of interrupts, in
which each set of interrupts needs a full two stage spreading. The first
vector argument is necessary for this so the affinitizing starts from the
first vector of each set.

[ tglx: Minor changelog tweaks ]
Signed-off-by: default avatarMing Lei <ming.lei@redhat.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: Jens Axboe <axboe@kernel.dk>
Cc: linux-block@vger.kernel.org
Cc: Hannes Reinecke <hare@suse.com>
Cc: Keith Busch <keith.busch@intel.com>
Cc: Sagi Grimberg <sagi@grimberg.me>
Link: https://lkml.kernel.org/r/20181102145951.31979-4-ming.lei@redhat.com
parent 5c903e10
...@@ -95,14 +95,14 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask, ...@@ -95,14 +95,14 @@ static int get_nodes_in_cpumask(cpumask_var_t *node_to_cpumask,
} }
static int __irq_build_affinity_masks(const struct irq_affinity *affd, static int __irq_build_affinity_masks(const struct irq_affinity *affd,
int startvec, int numvecs, int startvec, int numvecs, int firstvec,
cpumask_var_t *node_to_cpumask, cpumask_var_t *node_to_cpumask,
const struct cpumask *cpu_mask, const struct cpumask *cpu_mask,
struct cpumask *nmsk, struct cpumask *nmsk,
struct cpumask *masks) struct cpumask *masks)
{ {
int n, nodes, cpus_per_vec, extra_vecs, done = 0; int n, nodes, cpus_per_vec, extra_vecs, done = 0;
int last_affv = affd->pre_vectors + numvecs; int last_affv = firstvec + numvecs;
int curvec = startvec; int curvec = startvec;
nodemask_t nodemsk = NODE_MASK_NONE; nodemask_t nodemsk = NODE_MASK_NONE;
...@@ -119,7 +119,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -119,7 +119,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
for_each_node_mask(n, nodemsk) { for_each_node_mask(n, nodemsk) {
cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]); cpumask_or(masks + curvec, masks + curvec, node_to_cpumask[n]);
if (++curvec == last_affv) if (++curvec == last_affv)
curvec = affd->pre_vectors; curvec = firstvec;
} }
done = numvecs; done = numvecs;
goto out; goto out;
...@@ -129,7 +129,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -129,7 +129,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
int ncpus, v, vecs_to_assign, vecs_per_node; int ncpus, v, vecs_to_assign, vecs_per_node;
/* Spread the vectors per node */ /* Spread the vectors per node */
vecs_per_node = (numvecs - (curvec - affd->pre_vectors)) / nodes; vecs_per_node = (numvecs - (curvec - firstvec)) / nodes;
/* Get the cpus on this node which are in the mask */ /* Get the cpus on this node which are in the mask */
cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]); cpumask_and(nmsk, cpu_mask, node_to_cpumask[n]);
...@@ -157,7 +157,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -157,7 +157,7 @@ static int __irq_build_affinity_masks(const struct irq_affinity *affd,
if (done >= numvecs) if (done >= numvecs)
break; break;
if (curvec >= last_affv) if (curvec >= last_affv)
curvec = affd->pre_vectors; curvec = firstvec;
--nodes; --nodes;
} }
...@@ -190,8 +190,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -190,8 +190,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
/* Spread on present CPUs starting from affd->pre_vectors */ /* Spread on present CPUs starting from affd->pre_vectors */
usedvecs = __irq_build_affinity_masks(affd, curvec, numvecs, usedvecs = __irq_build_affinity_masks(affd, curvec, numvecs,
node_to_cpumask, cpu_present_mask, affd->pre_vectors,
nmsk, masks); node_to_cpumask,
cpu_present_mask, nmsk, masks);
/* /*
* Spread on non present CPUs starting from the next vector to be * Spread on non present CPUs starting from the next vector to be
...@@ -205,8 +206,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd, ...@@ -205,8 +206,9 @@ static int irq_build_affinity_masks(const struct irq_affinity *affd,
curvec = affd->pre_vectors + usedvecs; curvec = affd->pre_vectors + usedvecs;
cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask); cpumask_andnot(npresmsk, cpu_possible_mask, cpu_present_mask);
usedvecs += __irq_build_affinity_masks(affd, curvec, numvecs, usedvecs += __irq_build_affinity_masks(affd, curvec, numvecs,
node_to_cpumask, npresmsk, affd->pre_vectors,
nmsk, masks); node_to_cpumask, npresmsk,
nmsk, masks);
put_online_cpus(); put_online_cpus();
free_cpumask_var(npresmsk); free_cpumask_var(npresmsk);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment