diff options
Diffstat (limited to 'target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch')
-rw-r--r-- | target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch | 128 |
1 files changed, 128 insertions, 0 deletions
diff --git a/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch b/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch new file mode 100644 index 0000000000..9936ebf320 --- /dev/null +++ b/target/linux/mvebu/patches-4.4/042-net-mvneta-Fix-race-condition-during-stopping.patch @@ -0,0 +1,128 @@ +From: Gregory CLEMENT <gregory.clement@free-electrons.com> +Date: Thu, 4 Feb 2016 22:09:29 +0100 +Subject: [PATCH] net: mvneta: Fix race condition during stopping + +When stopping the port, the CPU notifier are still there whereas the +mvneta_stop_dev function calls mvneta_percpu_disable() on each CPUs. +It was possible to have a new CPU coming at this point which could be +racy. + +This patch adds a flag preventing executing the code notifier for a new +CPU when the port is stopping. It also uses the spinlock introduces +previously. To avoid the deadlock, the lock has been moved outside the +mvneta_percpu_elect function. + +Signed-off-by: Gregory CLEMENT <gregory.clement@free-electrons.com> +Signed-off-by: David S. Miller <davem@davemloft.net> +--- + +--- a/drivers/net/ethernet/marvell/mvneta.c ++++ b/drivers/net/ethernet/marvell/mvneta.c +@@ -374,6 +374,7 @@ struct mvneta_port { + * ensuring that the configuration remains coherent. + */ + spinlock_t lock; ++ bool is_stopped; + + /* Core clock */ + struct clk *clk; +@@ -2853,16 +2854,14 @@ static void mvneta_percpu_disable(void * + disable_percpu_irq(pp->dev->irq); + } + ++/* Electing a CPU must be done in an atomic way: it should be done ++ * after or before the removal/insertion of a CPU and this function is ++ * not reentrant. ++ */ + static void mvneta_percpu_elect(struct mvneta_port *pp) + { + int elected_cpu = 0, max_cpu, cpu, i = 0; + +- /* Electing a CPU must be done in an atomic way: it should be +- * done after or before the removal/insertion of a CPU and +- * this function is not reentrant. +- */ +- spin_lock(&pp->lock); +- + /* Use the cpu associated to the rxq when it is online, in all + * the other cases, use the cpu 0 which can't be offline. + */ +@@ -2906,7 +2905,6 @@ static void mvneta_percpu_elect(struct m + i++; + + } +- spin_unlock(&pp->lock); + }; + + static int mvneta_percpu_notifier(struct notifier_block *nfb, +@@ -2920,6 +2918,14 @@ static int mvneta_percpu_notifier(struct + switch (action) { + case CPU_ONLINE: + case CPU_ONLINE_FROZEN: ++ spin_lock(&pp->lock); ++ /* Configuring the driver for a new CPU while the ++ * driver is stopping is racy, so just avoid it. ++ */ ++ if (pp->is_stopped) { ++ spin_unlock(&pp->lock); ++ break; ++ } + netif_tx_stop_all_queues(pp->dev); + + /* We have to synchronise on tha napi of each CPU +@@ -2957,6 +2963,7 @@ static int mvneta_percpu_notifier(struct + MVNETA_CAUSE_LINK_CHANGE | + MVNETA_CAUSE_PSC_SYNC_CHANGE); + netif_tx_start_all_queues(pp->dev); ++ spin_unlock(&pp->lock); + break; + case CPU_DOWN_PREPARE: + case CPU_DOWN_PREPARE_FROZEN: +@@ -2981,7 +2988,9 @@ static int mvneta_percpu_notifier(struct + case CPU_DEAD: + case CPU_DEAD_FROZEN: + /* Check if a new CPU must be elected now this on is down */ ++ spin_lock(&pp->lock); + mvneta_percpu_elect(pp); ++ spin_unlock(&pp->lock); + /* Unmask all ethernet port interrupts */ + on_each_cpu(mvneta_percpu_unmask_interrupt, pp, true); + mvreg_write(pp, MVNETA_INTR_MISC_MASK, +@@ -3033,7 +3042,7 @@ static int mvneta_open(struct net_device + */ + on_each_cpu(mvneta_percpu_enable, pp, true); + +- ++ pp->is_stopped = false; + /* Register a CPU notifier to handle the case where our CPU + * might be taken offline. + */ +@@ -3066,9 +3075,18 @@ static int mvneta_stop(struct net_device + { + struct mvneta_port *pp = netdev_priv(dev); + ++ /* Inform that we are stopping so we don't want to setup the ++ * driver for new CPUs in the notifiers ++ */ ++ spin_lock(&pp->lock); ++ pp->is_stopped = true; + mvneta_stop_dev(pp); + mvneta_mdio_remove(pp); + unregister_cpu_notifier(&pp->cpu_notifier); ++ /* Now that the notifier are unregistered, we can release le ++ * lock ++ */ ++ spin_unlock(&pp->lock); + on_each_cpu(mvneta_percpu_disable, pp, true); + free_percpu_irq(dev->irq, pp->ports); + mvneta_cleanup_rxqs(pp); +@@ -3339,7 +3357,9 @@ static int mvneta_config_rss(struct mvn + mvreg_write(pp, MVNETA_PORT_CONFIG, val); + + /* Update the elected CPU matching the new rxq_def */ ++ spin_lock(&pp->lock); + mvneta_percpu_elect(pp); ++ spin_unlock(&pp->lock); + + /* We have to synchronise on the napi of each CPU */ + for_each_online_cpu(cpu) { |