Commit 87c831ce authored by David S. Miller's avatar David S. Miller

Merge branch 'net-macb-Wake-on-Lan-magic-packet-GEM-and-MACB-handling'

Nicolas Ferre says:

====================
net: macb: Wake-on-Lan magic packet GEM and MACB handling

Here is the second part of support for WoL magic-packet on the current macb
driver. This one
is addressing the bulk of the feature and is based on current net-next/master.

MACB and GEM code must co-exist and as they don't share exactly the same
register layout, I had to specialize a bit the suspend/resume paths and plug a
specific IRQ handler in order to avoid overloading the "normal" IRQ hot path.

These changes were tested on both sam9x60 which embeds a MACB+FIFO controller
and sama5d2 which has a GEM+packet buffer type of controller.

Best regards,
  Nicolas

Changes in v7:
- Release the spinlock before exiting macb_suspend/resume in case of error
  changing IRQ handler

Changes in v6:
- rebase on net-next/master now that the "fixes" patches of the series are
  merged in both net and net-next.
- GEM addition and MACB update to finish the support of WoL magic-packet on the
  two revisions of the controller.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents a8b7b2d0 9d45c8e8
...@@ -90,6 +90,7 @@ ...@@ -90,6 +90,7 @@
#define GEM_SA3T 0x009C /* Specific3 Top */ #define GEM_SA3T 0x009C /* Specific3 Top */
#define GEM_SA4B 0x00A0 /* Specific4 Bottom */ #define GEM_SA4B 0x00A0 /* Specific4 Bottom */
#define GEM_SA4T 0x00A4 /* Specific4 Top */ #define GEM_SA4T 0x00A4 /* Specific4 Top */
#define GEM_WOL 0x00b8 /* Wake on LAN */
#define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */ #define GEM_EFTSH 0x00e8 /* PTP Event Frame Transmitted Seconds Register 47:32 */
#define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */ #define GEM_EFRSH 0x00ec /* PTP Event Frame Received Seconds Register 47:32 */
#define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */ #define GEM_PEFTSH 0x00f0 /* PTP Peer Event Frame Transmitted Seconds Register 47:32 */
...@@ -396,6 +397,8 @@ ...@@ -396,6 +397,8 @@
#define MACB_PDRSFT_SIZE 1 #define MACB_PDRSFT_SIZE 1
#define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */ #define MACB_SRI_OFFSET 26 /* TSU Seconds Register Increment */
#define MACB_SRI_SIZE 1 #define MACB_SRI_SIZE 1
#define GEM_WOL_OFFSET 28 /* Enable wake-on-lan interrupt */
#define GEM_WOL_SIZE 1
/* Timer increment fields */ /* Timer increment fields */
#define MACB_TI_CNS_OFFSET 0 #define MACB_TI_CNS_OFFSET 0
......
...@@ -1517,6 +1517,64 @@ static void macb_tx_restart(struct macb_queue *queue) ...@@ -1517,6 +1517,64 @@ static void macb_tx_restart(struct macb_queue *queue)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
} }
static irqreturn_t macb_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
struct macb *bp = queue->bp;
u32 status;
status = queue_readl(queue, ISR);
if (unlikely(!status))
return IRQ_NONE;
spin_lock(&bp->lock);
if (status & MACB_BIT(WOL)) {
queue_writel(queue, IDR, MACB_BIT(WOL));
macb_writel(bp, WOL, 0);
netdev_vdbg(bp->dev, "MACB WoL: queue = %u, isr = 0x%08lx\n",
(unsigned int)(queue - bp->queues),
(unsigned long)status);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(WOL));
pm_wakeup_event(&bp->pdev->dev, 0);
}
spin_unlock(&bp->lock);
return IRQ_HANDLED;
}
static irqreturn_t gem_wol_interrupt(int irq, void *dev_id)
{
struct macb_queue *queue = dev_id;
struct macb *bp = queue->bp;
u32 status;
status = queue_readl(queue, ISR);
if (unlikely(!status))
return IRQ_NONE;
spin_lock(&bp->lock);
if (status & GEM_BIT(WOL)) {
queue_writel(queue, IDR, GEM_BIT(WOL));
gem_writel(bp, WOL, 0);
netdev_vdbg(bp->dev, "GEM WoL: queue = %u, isr = 0x%08lx\n",
(unsigned int)(queue - bp->queues),
(unsigned long)status);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, GEM_BIT(WOL));
pm_wakeup_event(&bp->pdev->dev, 0);
}
spin_unlock(&bp->lock);
return IRQ_HANDLED;
}
static irqreturn_t macb_interrupt(int irq, void *dev_id) static irqreturn_t macb_interrupt(int irq, void *dev_id)
{ {
struct macb_queue *queue = dev_id; struct macb_queue *queue = dev_id;
...@@ -3316,6 +3374,8 @@ static const struct ethtool_ops macb_ethtool_ops = { ...@@ -3316,6 +3374,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
static const struct ethtool_ops gem_ethtool_ops = { static const struct ethtool_ops gem_ethtool_ops = {
.get_regs_len = macb_get_regs_len, .get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs, .get_regs = macb_get_regs,
.get_wol = macb_get_wol,
.set_wol = macb_set_wol,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_ts_info = macb_get_ts_info, .get_ts_info = macb_get_ts_info,
.get_ethtool_stats = gem_get_ethtool_stats, .get_ethtool_stats = gem_get_ethtool_stats,
...@@ -4567,33 +4627,77 @@ static int __maybe_unused macb_suspend(struct device *dev) ...@@ -4567,33 +4627,77 @@ static int __maybe_unused macb_suspend(struct device *dev)
struct macb_queue *queue = bp->queues; struct macb_queue *queue = bp->queues;
unsigned long flags; unsigned long flags;
unsigned int q; unsigned int q;
int err;
if (!netif_running(netdev)) if (!netif_running(netdev))
return 0; return 0;
if (bp->wol & MACB_WOL_ENABLED) { if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IER, MACB_BIT(WOL)); spin_lock_irqsave(&bp->lock, flags);
/* Flush all status bits */
macb_writel(bp, TSR, -1);
macb_writel(bp, RSR, -1);
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) {
/* Disable all interrupts */
queue_writel(queue, IDR, -1);
queue_readl(queue, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, -1);
}
/* Change interrupt handler and
* Enable WoL IRQ on queue 0
*/
devm_free_irq(dev, bp->queues[0].irq, bp->queues);
if (macb_is_gem(bp)) {
err = devm_request_irq(dev, bp->queues[0].irq, gem_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
dev_err(dev,
"Unable to request IRQ %d (error %d)\n",
bp->queues[0].irq, err);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
queue_writel(bp->queues, IER, GEM_BIT(WOL));
gem_writel(bp, WOL, MACB_BIT(MAG));
} else {
err = devm_request_irq(dev, bp->queues[0].irq, macb_wol_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
dev_err(dev,
"Unable to request IRQ %d (error %d)\n",
bp->queues[0].irq, err);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
queue_writel(bp->queues, IER, MACB_BIT(WOL));
macb_writel(bp, WOL, MACB_BIT(MAG)); macb_writel(bp, WOL, MACB_BIT(MAG));
}
spin_unlock_irqrestore(&bp->lock, flags);
enable_irq_wake(bp->queues[0].irq); enable_irq_wake(bp->queues[0].irq);
netif_device_detach(netdev); }
} else {
netif_device_detach(netdev); netif_device_detach(netdev);
for (q = 0, queue = bp->queues; q < bp->num_queues; for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue) ++q, ++queue)
napi_disable(&queue->napi); napi_disable(&queue->napi);
if (!(bp->wol & MACB_WOL_ENABLED)) {
rtnl_lock(); rtnl_lock();
phylink_stop(bp->phylink); phylink_stop(bp->phylink);
rtnl_unlock(); rtnl_unlock();
spin_lock_irqsave(&bp->lock, flags); spin_lock_irqsave(&bp->lock, flags);
macb_reset_hw(bp); macb_reset_hw(bp);
spin_unlock_irqrestore(&bp->lock, flags); spin_unlock_irqrestore(&bp->lock, flags);
}
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO); bp->pm_data.usrio = macb_or_gem_readl(bp, USRIO);
if (netdev->hw_features & NETIF_F_NTUPLE) if (netdev->hw_features & NETIF_F_NTUPLE)
bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT); bp->pm_data.scrt2 = gem_readl_n(bp, ETHT, SCRT2_ETHT);
}
if (bp->ptp_info) if (bp->ptp_info)
bp->ptp_info->ptp_remove(netdev); bp->ptp_info->ptp_remove(netdev);
...@@ -4608,7 +4712,9 @@ static int __maybe_unused macb_resume(struct device *dev) ...@@ -4608,7 +4712,9 @@ static int __maybe_unused macb_resume(struct device *dev)
struct net_device *netdev = dev_get_drvdata(dev); struct net_device *netdev = dev_get_drvdata(dev);
struct macb *bp = netdev_priv(netdev); struct macb *bp = netdev_priv(netdev);
struct macb_queue *queue = bp->queues; struct macb_queue *queue = bp->queues;
unsigned long flags;
unsigned int q; unsigned int q;
int err;
if (!netif_running(netdev)) if (!netif_running(netdev))
return 0; return 0;
...@@ -4617,11 +4723,45 @@ static int __maybe_unused macb_resume(struct device *dev) ...@@ -4617,11 +4723,45 @@ static int __maybe_unused macb_resume(struct device *dev)
pm_runtime_force_resume(dev); pm_runtime_force_resume(dev);
if (bp->wol & MACB_WOL_ENABLED) { if (bp->wol & MACB_WOL_ENABLED) {
macb_writel(bp, IDR, MACB_BIT(WOL)); spin_lock_irqsave(&bp->lock, flags);
/* Disable WoL */
if (macb_is_gem(bp)) {
queue_writel(bp->queues, IDR, GEM_BIT(WOL));
gem_writel(bp, WOL, 0);
} else {
queue_writel(bp->queues, IDR, MACB_BIT(WOL));
macb_writel(bp, WOL, 0); macb_writel(bp, WOL, 0);
}
/* Clear ISR on queue 0 */
queue_readl(bp->queues, ISR);
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(bp->queues, ISR, -1);
/* Replace interrupt handler on queue 0 */
devm_free_irq(dev, bp->queues[0].irq, bp->queues);
err = devm_request_irq(dev, bp->queues[0].irq, macb_interrupt,
IRQF_SHARED, netdev->name, bp->queues);
if (err) {
dev_err(dev,
"Unable to request IRQ %d (error %d)\n",
bp->queues[0].irq, err);
spin_unlock_irqrestore(&bp->lock, flags);
return err;
}
spin_unlock_irqrestore(&bp->lock, flags);
disable_irq_wake(bp->queues[0].irq); disable_irq_wake(bp->queues[0].irq);
} else {
macb_writel(bp, NCR, MACB_BIT(MPE)); /* Now make sure we disable phy before moving
* to common restore path
*/
rtnl_lock();
phylink_stop(bp->phylink);
rtnl_unlock();
}
for (q = 0, queue = bp->queues; q < bp->num_queues;
++q, ++queue)
napi_enable(&queue->napi);
if (netdev->hw_features & NETIF_F_NTUPLE) if (netdev->hw_features & NETIF_F_NTUPLE)
gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2); gem_writel_n(bp, ETHT, SCRT2_ETHT, bp->pm_data.scrt2);
...@@ -4629,17 +4769,14 @@ static int __maybe_unused macb_resume(struct device *dev) ...@@ -4629,17 +4769,14 @@ static int __maybe_unused macb_resume(struct device *dev)
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio); macb_or_gem_writel(bp, USRIO, bp->pm_data.usrio);
for (q = 0, queue = bp->queues; q < bp->num_queues; macb_writel(bp, NCR, MACB_BIT(MPE));
++q, ++queue) macb_init_hw(bp);
napi_enable(&queue->napi); macb_set_rx_mode(netdev);
macb_restore_features(bp);
rtnl_lock(); rtnl_lock();
phylink_start(bp->phylink); phylink_start(bp->phylink);
rtnl_unlock(); rtnl_unlock();
}
macb_init_hw(bp);
macb_set_rx_mode(netdev);
macb_restore_features(bp);
netif_device_attach(netdev); netif_device_attach(netdev);
if (bp->ptp_info) if (bp->ptp_info)
bp->ptp_info->ptp_init(netdev); bp->ptp_info->ptp_init(netdev);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment