Commit d6373019 authored by Sebastian Sanchez's avatar Sebastian Sanchez Committed by Doug Ledford

IB/hfi1: Reserve and collapse CPU cores for contexts

Kernel receive queues oversubscribe CPU cores on multi-HFI systems.
To prevent this, the kernel receive queues are separated onto
different cores, and the SDMA engine interrupts are constrained to
a lesser number of cores.

hfi1s_on_numa_node*krcvqs is the number of CPU cores that are
reserved for kernel receive queues for all HFIs. Each HFI initializes
its kernel receive queues to one of the reserved CPU cores. If there
ends up being 0 CPU cores leftover for SDMA engines, use the same
CPU cores as receive contexts.

In addition, general and control contexts are assigned to their own
CPU core, however, both types of contexts tend to have low traffic.
To save CPU cores, collapse general and control contexts to one CPU
core for all HFI units. This change prevents SDMA engine interrupts
from wrapping around general contexts.
Reviewed-by: default avatarDean Luick <dean.luick@intel.com>
Signed-off-by: default avatarSebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4197344b
...@@ -66,6 +66,9 @@ static const char * const irq_type_names[] = { ...@@ -66,6 +66,9 @@ static const char * const irq_type_names[] = {
"OTHER", "OTHER",
}; };
/* Per NUMA node count of HFI devices */
static unsigned int *hfi1_per_node_cntr;
static inline void init_cpu_mask_set(struct cpu_mask_set *set) static inline void init_cpu_mask_set(struct cpu_mask_set *set)
{ {
cpumask_clear(&set->mask); cpumask_clear(&set->mask);
...@@ -107,8 +110,12 @@ void init_real_cpu_mask(void) ...@@ -107,8 +110,12 @@ void init_real_cpu_mask(void)
} }
} }
void node_affinity_init(void) int node_affinity_init(void)
{ {
int node;
struct pci_dev *dev = NULL;
const struct pci_device_id *ids = hfi1_pci_tbl;
cpumask_copy(&node_affinity.proc.mask, cpu_online_mask); cpumask_copy(&node_affinity.proc.mask, cpu_online_mask);
/* /*
* The real cpu mask is part of the affinity struct but it has to be * The real cpu mask is part of the affinity struct but it has to be
...@@ -116,6 +123,25 @@ void node_affinity_init(void) ...@@ -116,6 +123,25 @@ void node_affinity_init(void)
* contexts in set_up_context_variables(). * contexts in set_up_context_variables().
*/ */
init_real_cpu_mask(); init_real_cpu_mask();
hfi1_per_node_cntr = kcalloc(num_possible_nodes(),
sizeof(*hfi1_per_node_cntr), GFP_KERNEL);
if (!hfi1_per_node_cntr)
return -ENOMEM;
while (ids->vendor) {
dev = NULL;
while ((dev = pci_get_device(ids->vendor, ids->device, dev))) {
node = pcibus_to_node(dev->bus);
if (node < 0)
node = numa_node_id();
hfi1_per_node_cntr[node]++;
}
ids++;
}
return 0;
} }
void node_affinity_destroy(void) void node_affinity_destroy(void)
...@@ -131,6 +157,7 @@ void node_affinity_destroy(void) ...@@ -131,6 +157,7 @@ void node_affinity_destroy(void)
kfree(entry); kfree(entry);
} }
spin_unlock(&node_affinity.lock); spin_unlock(&node_affinity.lock);
kfree(hfi1_per_node_cntr);
} }
static struct hfi1_affinity_node *node_affinity_allocate(int node) static struct hfi1_affinity_node *node_affinity_allocate(int node)
...@@ -213,6 +240,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -213,6 +240,7 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
} }
init_cpu_mask_set(&entry->def_intr); init_cpu_mask_set(&entry->def_intr);
init_cpu_mask_set(&entry->rcv_intr); init_cpu_mask_set(&entry->rcv_intr);
cpumask_clear(&entry->general_intr_mask);
/* Use the "real" cpu mask of this node as the default */ /* Use the "real" cpu mask of this node as the default */
cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask, cpumask_and(&entry->def_intr.mask, &node_affinity.real_cpu_mask,
local_mask); local_mask);
...@@ -224,11 +252,15 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -224,11 +252,15 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (possible == 1) { if (possible == 1) {
/* only one CPU, everyone will use it */ /* only one CPU, everyone will use it */
cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask); cpumask_set_cpu(curr_cpu, &entry->rcv_intr.mask);
cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
} else { } else {
/* /*
* Retain the first CPU in the default list for the * The general/control context will be the first CPU in
* control context. * the default list, so it is removed from the default
* list and added to the general interrupt list.
*/ */
cpumask_clear_cpu(curr_cpu, &entry->def_intr.mask);
cpumask_set_cpu(curr_cpu, &entry->general_intr_mask);
curr_cpu = cpumask_next(curr_cpu, curr_cpu = cpumask_next(curr_cpu,
&entry->def_intr.mask); &entry->def_intr.mask);
...@@ -236,7 +268,10 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -236,7 +268,10 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
* Remove the remaining kernel receive queues from * Remove the remaining kernel receive queues from
* the default list and add them to the receive list. * the default list and add them to the receive list.
*/ */
for (i = 0; i < dd->n_krcv_queues - 1; i++) { for (i = 0;
i < (dd->n_krcv_queues - 1) *
hfi1_per_node_cntr[dd->node];
i++) {
cpumask_clear_cpu(curr_cpu, cpumask_clear_cpu(curr_cpu,
&entry->def_intr.mask); &entry->def_intr.mask);
cpumask_set_cpu(curr_cpu, cpumask_set_cpu(curr_cpu,
...@@ -246,6 +281,15 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd) ...@@ -246,6 +281,15 @@ int hfi1_dev_affinity_init(struct hfi1_devdata *dd)
if (curr_cpu >= nr_cpu_ids) if (curr_cpu >= nr_cpu_ids)
break; break;
} }
/*
* If there ends up being 0 CPU cores leftover for SDMA
* engines, use the same CPU cores as general/control
* context.
*/
if (cpumask_weight(&entry->def_intr.mask) == 0)
cpumask_copy(&entry->def_intr.mask,
&entry->general_intr_mask);
} }
spin_lock(&node_affinity.lock); spin_lock(&node_affinity.lock);
...@@ -261,7 +305,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) ...@@ -261,7 +305,7 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
int ret; int ret;
cpumask_var_t diff; cpumask_var_t diff;
struct hfi1_affinity_node *entry; struct hfi1_affinity_node *entry;
struct cpu_mask_set *set; struct cpu_mask_set *set = NULL;
struct sdma_engine *sde = NULL; struct sdma_engine *sde = NULL;
struct hfi1_ctxtdata *rcd = NULL; struct hfi1_ctxtdata *rcd = NULL;
char extra[64]; char extra[64];
...@@ -282,18 +326,17 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) ...@@ -282,18 +326,17 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
case IRQ_SDMA: case IRQ_SDMA:
sde = (struct sdma_engine *)msix->arg; sde = (struct sdma_engine *)msix->arg;
scnprintf(extra, 64, "engine %u", sde->this_idx); scnprintf(extra, 64, "engine %u", sde->this_idx);
/* fall through */
case IRQ_GENERAL:
set = &entry->def_intr; set = &entry->def_intr;
break; break;
case IRQ_GENERAL:
cpu = cpumask_first(&entry->general_intr_mask);
break;
case IRQ_RCVCTXT: case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg; rcd = (struct hfi1_ctxtdata *)msix->arg;
if (rcd->ctxt == HFI1_CTRL_CTXT) { if (rcd->ctxt == HFI1_CTRL_CTXT)
set = &entry->def_intr; cpu = cpumask_first(&entry->general_intr_mask);
cpu = cpumask_first(&set->mask); else
} else {
set = &entry->rcv_intr; set = &entry->rcv_intr;
}
scnprintf(extra, 64, "ctxt %u", rcd->ctxt); scnprintf(extra, 64, "ctxt %u", rcd->ctxt);
break; break;
default: default:
...@@ -302,9 +345,9 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix) ...@@ -302,9 +345,9 @@ int hfi1_get_irq_affinity(struct hfi1_devdata *dd, struct hfi1_msix_entry *msix)
} }
/* /*
* The control receive context is placed on a particular CPU, which * The general and control contexts are placed on a particular
* is set above. Skip accounting for it. Everything else finds its * CPU, which is set above. Skip accounting for it. Everything else
* CPU here. * finds its CPU here.
*/ */
if (cpu == -1 && set) { if (cpu == -1 && set) {
spin_lock(&node_affinity.lock); spin_lock(&node_affinity.lock);
...@@ -355,12 +398,14 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd, ...@@ -355,12 +398,14 @@ void hfi1_put_irq_affinity(struct hfi1_devdata *dd,
switch (msix->type) { switch (msix->type) {
case IRQ_SDMA: case IRQ_SDMA:
case IRQ_GENERAL:
set = &entry->def_intr; set = &entry->def_intr;
break; break;
case IRQ_GENERAL:
/* Don't accounting for general contexts */
break;
case IRQ_RCVCTXT: case IRQ_RCVCTXT:
rcd = (struct hfi1_ctxtdata *)msix->arg; rcd = (struct hfi1_ctxtdata *)msix->arg;
/* only do accounting for non control contexts */ /* Don't do accounting for control contexts */
if (rcd->ctxt != HFI1_CTRL_CTXT) if (rcd->ctxt != HFI1_CTRL_CTXT)
set = &entry->rcv_intr; set = &entry->rcv_intr;
break; break;
...@@ -438,14 +483,20 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node) ...@@ -438,14 +483,20 @@ int hfi1_get_proc_affinity(struct hfi1_devdata *dd, int node)
cpumask_clear(&set->used); cpumask_clear(&set->used);
} }
entry = node_affinity_lookup(dd->node); /*
/* CPUs used by interrupt handlers */ * If NUMA node has CPUs used by interrupt handlers, include them in the
cpumask_copy(intrs, (entry->def_intr.gen ? * interrupt handler mask.
&entry->def_intr.mask : */
&entry->def_intr.used)); entry = node_affinity_lookup(node);
cpumask_or(intrs, intrs, (entry->rcv_intr.gen ? if (entry) {
&entry->rcv_intr.mask : cpumask_copy(intrs, (entry->def_intr.gen ?
&entry->rcv_intr.used)); &entry->def_intr.mask :
&entry->def_intr.used));
cpumask_or(intrs, intrs, (entry->rcv_intr.gen ?
&entry->rcv_intr.mask :
&entry->rcv_intr.used));
cpumask_or(intrs, intrs, &entry->general_intr_mask);
}
hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl", hfi1_cdbg(PROC, "CPUs used by interrupts: %*pbl",
cpumask_pr_args(intrs)); cpumask_pr_args(intrs));
......
...@@ -107,6 +107,7 @@ struct hfi1_affinity_node { ...@@ -107,6 +107,7 @@ struct hfi1_affinity_node {
int node; int node;
struct cpu_mask_set def_intr; struct cpu_mask_set def_intr;
struct cpu_mask_set rcv_intr; struct cpu_mask_set rcv_intr;
struct cpumask general_intr_mask;
struct list_head list; struct list_head list;
}; };
...@@ -118,7 +119,7 @@ struct hfi1_affinity_node_list { ...@@ -118,7 +119,7 @@ struct hfi1_affinity_node_list {
spinlock_t lock; spinlock_t lock;
}; };
void node_affinity_init(void); int node_affinity_init(void);
void node_affinity_destroy(void); void node_affinity_destroy(void);
extern struct hfi1_affinity_node_list node_affinity; extern struct hfi1_affinity_node_list node_affinity;
......
...@@ -1235,6 +1235,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int); ...@@ -1235,6 +1235,8 @@ int handle_receive_interrupt_nodma_rtail(struct hfi1_ctxtdata *, int);
int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int); int handle_receive_interrupt_dma_rtail(struct hfi1_ctxtdata *, int);
void set_all_slowpath(struct hfi1_devdata *dd); void set_all_slowpath(struct hfi1_devdata *dd);
extern const struct pci_device_id hfi1_pci_tbl[];
/* receive packet handler dispositions */ /* receive packet handler dispositions */
#define RCV_PKT_OK 0x0 /* keep going */ #define RCV_PKT_OK 0x0 /* keep going */
#define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */ #define RCV_PKT_LIMIT 0x1 /* stop, hit limit, start thread */
......
...@@ -1162,7 +1162,7 @@ static int init_one(struct pci_dev *, const struct pci_device_id *); ...@@ -1162,7 +1162,7 @@ static int init_one(struct pci_dev *, const struct pci_device_id *);
#define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: " #define DRIVER_LOAD_MSG "Intel " DRIVER_NAME " loaded: "
#define PFX DRIVER_NAME ": " #define PFX DRIVER_NAME ": "
static const struct pci_device_id hfi1_pci_tbl[] = { const struct pci_device_id hfi1_pci_tbl[] = {
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL0) },
{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) }, { PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL1) },
{ 0, } { 0, }
...@@ -1198,7 +1198,9 @@ static int __init hfi1_mod_init(void) ...@@ -1198,7 +1198,9 @@ static int __init hfi1_mod_init(void)
if (ret) if (ret)
goto bail; goto bail;
node_affinity_init(); ret = node_affinity_init();
if (ret)
goto bail;
/* validate max MTU before any devices start */ /* validate max MTU before any devices start */
if (!valid_opa_max_mtu(hfi1_max_mtu)) { if (!valid_opa_max_mtu(hfi1_max_mtu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment