Commit 9b07d735 authored by David S. Miller's avatar David S. Miller

Merge branch 'mlx4'

Amir Vadai says:

====================
cpumask,net: affinity hint helper function

This patchset will set affinity hint to influence IRQs to be allocated on the
same NUMA node as the one where the card resides. As discussed in
http://www.spinics.net/lists/netdev/msg271497.html

If number of IRQs allocated is greater than the number of local NUMA cores, all
local cores will be used first, and the rest of the IRQs will be on a remote
NUMA node.
If no NUMA support - IRQ's and cores will be mapped 1:1

Since the utility function to calculate the mapping could be useful in other mq
drivers in the kernel, it was added to cpumask.[ch]

This patchset was tested and applied on top of net-next since the first
consumer is a network device (mlx4_en).  Over commit fff1f59b "mac802154:
llsec: add forgotten list_del_rcu in key removal"
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents d4f38620 9e311e77
......@@ -163,6 +163,13 @@ int mlx4_en_activate_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq *cq,
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_tx_cq,
NAPI_POLL_WEIGHT);
} else {
struct mlx4_en_rx_ring *ring = priv->rx_ring[cq->ring];
err = irq_set_affinity_hint(cq->mcq.irq,
ring->affinity_mask);
if (err)
mlx4_warn(mdev, "Failed setting affinity hint\n");
netif_napi_add(cq->dev, &cq->napi, mlx4_en_poll_rx_cq, 64);
napi_hash_add(&cq->napi);
}
......@@ -179,8 +186,11 @@ void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq)
mlx4_en_unmap_buffer(&cq->wqres.buf);
mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size);
if (priv->mdev->dev->caps.comp_pool && cq->vector)
if (priv->mdev->dev->caps.comp_pool && cq->vector) {
if (!cq->is_tx)
irq_set_affinity_hint(cq->mcq.irq, NULL);
mlx4_release_eq(priv->mdev->dev, cq->vector);
}
cq->vector = 0;
cq->buf_size = 0;
cq->buf = NULL;
......
......@@ -1526,6 +1526,27 @@ static void mlx4_en_linkstate(struct work_struct *work)
mutex_unlock(&mdev->state_lock);
}
static int mlx4_en_init_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
struct mlx4_en_rx_ring *ring = priv->rx_ring[ring_idx];
int numa_node = priv->mdev->dev->numa_node;
int ret = 0;
if (!zalloc_cpumask_var(&ring->affinity_mask, GFP_KERNEL))
return -ENOMEM;
ret = cpumask_set_cpu_local_first(ring_idx, numa_node,
ring->affinity_mask);
if (ret)
free_cpumask_var(ring->affinity_mask);
return ret;
}
static void mlx4_en_free_affinity_hint(struct mlx4_en_priv *priv, int ring_idx)
{
free_cpumask_var(priv->rx_ring[ring_idx]->affinity_mask);
}
int mlx4_en_start_port(struct net_device *dev)
{
......@@ -1567,9 +1588,16 @@ int mlx4_en_start_port(struct net_device *dev)
mlx4_en_cq_init_lock(cq);
err = mlx4_en_init_affinity_hint(priv, i);
if (err) {
en_err(priv, "Failed preparing IRQ affinity hint\n");
goto cq_err;
}
err = mlx4_en_activate_cq(priv, cq, i);
if (err) {
en_err(priv, "Failed activating Rx CQ\n");
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
for (j = 0; j < cq->size; j++)
......@@ -1578,6 +1606,7 @@ int mlx4_en_start_port(struct net_device *dev)
if (err) {
en_err(priv, "Failed setting cq moderation parameters\n");
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
goto cq_err;
}
mlx4_en_arm_cq(priv, cq);
......@@ -1715,8 +1744,10 @@ int mlx4_en_start_port(struct net_device *dev)
mac_err:
mlx4_en_put_qp(priv);
cq_err:
while (rx_index--)
while (rx_index--) {
mlx4_en_deactivate_cq(priv, priv->rx_cq[rx_index]);
mlx4_en_free_affinity_hint(priv, i);
}
for (i = 0; i < priv->rx_ring_num; i++)
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
......@@ -1847,6 +1878,8 @@ void mlx4_en_stop_port(struct net_device *dev, int detach)
msleep(1);
mlx4_en_deactivate_rx_ring(priv, priv->rx_ring[i]);
mlx4_en_deactivate_cq(priv, cq);
mlx4_en_free_affinity_hint(priv, i);
}
}
......
......@@ -313,6 +313,7 @@ struct mlx4_en_rx_ring {
unsigned long csum_ok;
unsigned long csum_none;
int hwtstamp_rx_filter;
cpumask_var_t affinity_mask;
};
struct mlx4_en_cq {
......
......@@ -142,6 +142,13 @@ static inline unsigned int cpumask_any_but(const struct cpumask *mask,
return 1;
}
static inline int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
{
set_bit(0, cpumask_bits(dstp));
return 0;
}
#define for_each_cpu(cpu, mask) \
for ((cpu) = 0; (cpu) < 1; (cpu)++, (void)mask)
#define for_each_cpu_not(cpu, mask) \
......@@ -192,6 +199,7 @@ static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
int cpumask_next_and(int n, const struct cpumask *, const struct cpumask *);
int cpumask_any_but(const struct cpumask *mask, unsigned int cpu);
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp);
/**
* for_each_cpu - iterate over every cpu in a mask
......
......@@ -164,3 +164,66 @@ void __init free_bootmem_cpumask_var(cpumask_var_t mask)
memblock_free_early(__pa(mask), cpumask_size());
}
#endif
/**
* cpumask_set_cpu_local_first - set i'th cpu with local numa cpu's first
*
* @i: index number
* @numa_node: local numa_node
* @dstp: cpumask with the relevant cpu bit set according to the policy
*
* This function sets the cpumask according to a numa aware policy.
* cpumask could be used as an affinity hint for the IRQ related to a
* queue. When the policy is to spread queues across cores - local cores
* first.
*
* Returns 0 on success, -ENOMEM for no memory, and -EAGAIN when failed to set
* the cpu bit and need to re-call the function.
*/
int cpumask_set_cpu_local_first(int i, int numa_node, cpumask_t *dstp)
{
cpumask_var_t mask;
int cpu;
int ret = 0;
if (!zalloc_cpumask_var(&mask, GFP_KERNEL))
return -ENOMEM;
i %= num_online_cpus();
if (!cpumask_of_node(numa_node)) {
/* Use all online cpu's for non numa aware system */
cpumask_copy(mask, cpu_online_mask);
} else {
int n;
cpumask_and(mask,
cpumask_of_node(numa_node), cpu_online_mask);
n = cpumask_weight(mask);
if (i >= n) {
i -= n;
/* If index > number of local cpu's, mask out local
* cpu's
*/
cpumask_andnot(mask, cpu_online_mask, mask);
}
}
for_each_cpu(cpu, mask) {
if (--i < 0)
goto out;
}
ret = -EAGAIN;
out:
free_cpumask_var(mask);
if (!ret)
cpumask_set_cpu(cpu, dstp);
return ret;
}
EXPORT_SYMBOL(cpumask_set_cpu_local_first);
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment