Commit 5888511e authored by Gregory CLEMENT's avatar Gregory CLEMENT Committed by David S. Miller

net: mvneta: The mvneta_percpu_elect function should be atomic

Electing a CPU must be done in an atomic way: it should be done after or
before the removal/insertion of a CPU and this function is not reentrant.

During the loop of mvneta_percpu_elect we associates the queues to the
CPUs, if there is a topology change during this loop, then the mapping
between the CPUs and the queues could be wrong. During this loop the
interrupt mask is also updating for each CPUs, It should not be changed
in the same time by other part of the driver.

This patch adds spinlock to create the needed critical sections.
Signed-off-by: default avatarGregory CLEMENT <gregory.clement@free-electrons.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent db488c10
...@@ -370,6 +370,10 @@ struct mvneta_port { ...@@ -370,6 +370,10 @@ struct mvneta_port {
struct net_device *dev; struct net_device *dev;
struct notifier_block cpu_notifier; struct notifier_block cpu_notifier;
int rxq_def; int rxq_def;
/* Protect the access to the percpu interrupt registers,
* ensuring that the configuration remains coherent.
*/
spinlock_t lock;
/* Core clock */ /* Core clock */
struct clk *clk; struct clk *clk;
...@@ -2855,6 +2859,12 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) ...@@ -2855,6 +2859,12 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
{ {
int elected_cpu = 0, max_cpu, cpu, i = 0; int elected_cpu = 0, max_cpu, cpu, i = 0;
/* Electing a CPU must be done in an atomic way: it should be
* done after or before the removal/insertion of a CPU and
* this function is not reentrant.
*/
spin_lock(&pp->lock);
/* Use the cpu associated to the rxq when it is online, in all /* Use the cpu associated to the rxq when it is online, in all
* the other cases, use the cpu 0 which can't be offline. * the other cases, use the cpu 0 which can't be offline.
*/ */
...@@ -2898,6 +2908,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp) ...@@ -2898,6 +2908,7 @@ static void mvneta_percpu_elect(struct mvneta_port *pp)
i++; i++;
} }
spin_unlock(&pp->lock);
}; };
static int mvneta_percpu_notifier(struct notifier_block *nfb, static int mvneta_percpu_notifier(struct notifier_block *nfb,
...@@ -2952,8 +2963,13 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb, ...@@ -2952,8 +2963,13 @@ static int mvneta_percpu_notifier(struct notifier_block *nfb,
case CPU_DOWN_PREPARE: case CPU_DOWN_PREPARE:
case CPU_DOWN_PREPARE_FROZEN: case CPU_DOWN_PREPARE_FROZEN:
netif_tx_stop_all_queues(pp->dev); netif_tx_stop_all_queues(pp->dev);
/* Thanks to this lock we are sure that any pending
* cpu election is done
*/
spin_lock(&pp->lock);
/* Mask all ethernet port interrupts */ /* Mask all ethernet port interrupts */
on_each_cpu(mvneta_percpu_mask_interrupt, pp, true); on_each_cpu(mvneta_percpu_mask_interrupt, pp, true);
spin_unlock(&pp->lock);
napi_synchronize(&port->napi); napi_synchronize(&port->napi);
napi_disable(&port->napi); napi_disable(&port->napi);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment