Commit c410abbb authored by Dou Liyang's avatar Dou Liyang Committed by Thomas Gleixner

genirq/affinity: Add is_managed to struct irq_affinity_desc

Devices which use managed interrupts usually have two classes of
interrupts:

  - Interrupts for multiple device queues
  - Interrupts for general device management

Currently both classes are treated the same way, i.e. as managed
interrupts. The general interrupts get the default affinity mask assigned
while the device queue interrupts are spread out over the possible CPUs.

Treating the general interrupts as managed is both a limitation and under
certain circumstances a bug. Assume the following situation:

 default_irq_affinity = 4..7

So if CPUs 4-7 are offlined, then the core code will shut down the device
management interrupts because the last CPU in their affinity mask went
offline.

It's also a limitation because it's desired to allow manual placement of
the general device interrupts for various reasons. If they are marked
managed then the interrupt affinity setting from both user and kernel space
is disabled. That limitation was reported by Kashyap and Sumit.

Expand struct irq_affinity_desc with a new bit 'is_managed' which is set
for truly managed interrupts (queue interrupts) and cleared for the general
device interrupts.

[ tglx: Simplify code and massage changelog ]
Reported-by: default avatarKashyap Desai <kashyap.desai@broadcom.com>
Reported-by: default avatarSumit Saxena <sumit.saxena@broadcom.com>
Signed-off-by: default avatarDou Liyang <douliyangs@gmail.com>
Signed-off-by: default avatarThomas Gleixner <tglx@linutronix.de>
Cc: linux-pci@vger.kernel.org
Cc: shivasharan.srikanteshwara@broadcom.com
Cc: ming.lei@redhat.com
Cc: hch@lst.de
Cc: bhelgaas@google.com
Cc: douliyang1@huawei.com
Link: https://lkml.kernel.org/r/20181204155122.6327-3-douliyangs@gmail.com
parent bec04037
...@@ -263,6 +263,7 @@ struct irq_affinity { ...@@ -263,6 +263,7 @@ struct irq_affinity {
*/ */
struct irq_affinity_desc { struct irq_affinity_desc {
struct cpumask mask; struct cpumask mask;
unsigned int is_managed : 1;
}; };
#if defined(CONFIG_SMP) #if defined(CONFIG_SMP)
......
...@@ -289,6 +289,10 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd) ...@@ -289,6 +289,10 @@ irq_create_affinity_masks(int nvecs, const struct irq_affinity *affd)
for (; curvec < nvecs; curvec++) for (; curvec < nvecs; curvec++)
cpumask_copy(&masks[curvec].mask, irq_default_affinity); cpumask_copy(&masks[curvec].mask, irq_default_affinity);
/* Mark the managed interrupts */
for (i = affd->pre_vectors; i < nvecs - affd->post_vectors; i++)
masks[i].is_managed = 1;
outnodemsk: outnodemsk:
free_node_to_cpumask(node_to_cpumask); free_node_to_cpumask(node_to_cpumask);
return masks; return masks;
......
...@@ -453,27 +453,30 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node, ...@@ -453,27 +453,30 @@ static int alloc_descs(unsigned int start, unsigned int cnt, int node,
struct module *owner) struct module *owner)
{ {
struct irq_desc *desc; struct irq_desc *desc;
unsigned int flags;
int i; int i;
/* Validate affinity mask(s) */ /* Validate affinity mask(s) */
if (affinity) { if (affinity) {
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++, i++) {
if (cpumask_empty(&affinity[i].mask)) if (cpumask_empty(&affinity[i].mask))
return -EINVAL; return -EINVAL;
} }
} }
flags = affinity ? IRQD_AFFINITY_MANAGED | IRQD_MANAGED_SHUTDOWN : 0;
for (i = 0; i < cnt; i++) { for (i = 0; i < cnt; i++) {
const struct cpumask *mask = NULL; const struct cpumask *mask = NULL;
unsigned int flags = 0;
if (affinity) { if (affinity) {
node = cpu_to_node(cpumask_first(affinity)); if (affinity->is_managed) {
flags = IRQD_AFFINITY_MANAGED |
IRQD_MANAGED_SHUTDOWN;
}
mask = &affinity->mask; mask = &affinity->mask;
node = cpu_to_node(cpumask_first(mask));
affinity++; affinity++;
} }
desc = alloc_desc(start + i, node, flags, mask, owner); desc = alloc_desc(start + i, node, flags, mask, owner);
if (!desc) if (!desc)
goto err; goto err;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment