Commit 5302a84e authored by David S. Miller's avatar David S. Miller

Merge tag 'linux-can-fixes-for-4.18-20180723' of...

Merge tag 'linux-can-fixes-for-4.18-20180723' of ssh://gitolite.kernel.org/pub/scm/linux/kernel/git/mkl/linux-can

Marc Kleine-Budde says:

====================
pull-request: can 2018-07-23

this is a pull request of 12 patches for net/master.

The patch by Stephane Grosjean for the peak_canfd CAN driver fixes a problem
with older firmware. The next patch is by Roman Fietze and fixes the setup of
the CCCR register in the m_can driver. Nicholas Mc Guire's patch for the
mpc5xxx_can driver adds missing error checking. The two patches by Faiz Abbas
fix the runtime resume and clean up the probe function in the m_can driver. The
last 7 patches by Anssi Hannula fix several problem in the xilinx_can driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents c9ce1fa1 8ebd83bd
...@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv) ...@@ -634,10 +634,12 @@ static int m_can_clk_start(struct m_can_priv *priv)
int err; int err;
err = pm_runtime_get_sync(priv->device); err = pm_runtime_get_sync(priv->device);
if (err) if (err < 0) {
pm_runtime_put_noidle(priv->device); pm_runtime_put_noidle(priv->device);
return err;
}
return err; return 0;
} }
static void m_can_clk_stop(struct m_can_priv *priv) static void m_can_clk_stop(struct m_can_priv *priv)
...@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev) ...@@ -1109,7 +1111,8 @@ static void m_can_chip_config(struct net_device *dev)
} else { } else {
/* Version 3.1.x or 3.2.x */ /* Version 3.1.x or 3.2.x */
cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE); cccr &= ~(CCCR_TEST | CCCR_MON | CCCR_BRSE | CCCR_FDOE |
CCCR_NISO);
/* Only 3.2.x has NISO Bit implemented */ /* Only 3.2.x has NISO Bit implemented */
if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO) if (priv->can.ctrlmode & CAN_CTRLMODE_FD_NON_ISO)
...@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev) ...@@ -1642,8 +1645,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
priv->can.clock.freq = clk_get_rate(cclk); priv->can.clock.freq = clk_get_rate(cclk);
priv->mram_base = mram_addr; priv->mram_base = mram_addr;
m_can_of_parse_mram(priv, mram_config_vals);
platform_set_drvdata(pdev, dev); platform_set_drvdata(pdev, dev);
SET_NETDEV_DEV(dev, &pdev->dev); SET_NETDEV_DEV(dev, &pdev->dev);
...@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev) ...@@ -1666,6 +1667,8 @@ static int m_can_plat_probe(struct platform_device *pdev)
goto clk_disable; goto clk_disable;
} }
m_can_of_parse_mram(priv, mram_config_vals);
devm_can_led_init(dev); devm_can_led_init(dev);
of_can_transceiver(dev); of_can_transceiver(dev);
...@@ -1687,8 +1690,6 @@ static int m_can_plat_probe(struct platform_device *pdev) ...@@ -1687,8 +1690,6 @@ static int m_can_plat_probe(struct platform_device *pdev)
return ret; return ret;
} }
/* TODO: runtime PM with power down or sleep mode */
static __maybe_unused int m_can_suspend(struct device *dev) static __maybe_unused int m_can_suspend(struct device *dev)
{ {
struct net_device *ndev = dev_get_drvdata(dev); struct net_device *ndev = dev_get_drvdata(dev);
...@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev) ...@@ -1715,8 +1716,6 @@ static __maybe_unused int m_can_resume(struct device *dev)
pinctrl_pm_select_default_state(dev); pinctrl_pm_select_default_state(dev);
m_can_init_ram(priv);
priv->can.state = CAN_STATE_ERROR_ACTIVE; priv->can.state = CAN_STATE_ERROR_ACTIVE;
if (netif_running(ndev)) { if (netif_running(ndev)) {
...@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev) ...@@ -1726,6 +1725,7 @@ static __maybe_unused int m_can_resume(struct device *dev)
if (ret) if (ret)
return ret; return ret;
m_can_init_ram(priv);
m_can_start(ndev); m_can_start(ndev);
netif_device_attach(ndev); netif_device_attach(ndev);
netif_start_queue(ndev); netif_start_queue(ndev);
......
...@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev, ...@@ -86,6 +86,11 @@ static u32 mpc52xx_can_get_clock(struct platform_device *ofdev,
return 0; return 0;
} }
cdm = of_iomap(np_cdm, 0); cdm = of_iomap(np_cdm, 0);
if (!cdm) {
of_node_put(np_cdm);
dev_err(&ofdev->dev, "can't map clock node!\n");
return 0;
}
if (in_8(&cdm->ipb_clk_sel) & 0x1) if (in_8(&cdm->ipb_clk_sel) & 0x1)
freq *= 2; freq *= 2;
......
...@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2"); ...@@ -58,6 +58,10 @@ MODULE_LICENSE("GPL v2");
#define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */ #define PCIEFD_REG_SYS_VER1 0x0040 /* version reg #1 */
#define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */ #define PCIEFD_REG_SYS_VER2 0x0044 /* version reg #2 */
#define PCIEFD_FW_VERSION(x, y, z) (((u32)(x) << 24) | \
((u32)(y) << 16) | \
((u32)(z) << 8))
/* System Control Registers Bits */ /* System Control Registers Bits */
#define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */ #define PCIEFD_SYS_CTL_TS_RST 0x00000001 /* timestamp clock */
#define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */ #define PCIEFD_SYS_CTL_CLK_EN 0x00000002 /* system clock */
...@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev, ...@@ -782,6 +786,21 @@ static int peak_pciefd_probe(struct pci_dev *pdev,
"%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count, "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
hw_ver_major, hw_ver_minor, hw_ver_sub); hw_ver_major, hw_ver_minor, hw_ver_sub);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
/* FW < v3.3.0 DMA logic doesn't handle correctly the mix of 32-bit and
* 64-bit logical addresses: this workaround forces usage of 32-bit
* DMA addresses only when such a fw is detected.
*/
if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
PCIEFD_FW_VERSION(3, 3, 0)) {
err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
if (err)
dev_warn(&pdev->dev,
"warning: can't set DMA mask %llxh (err %d)\n",
DMA_BIT_MASK(32), err);
}
#endif
/* stop system clock */ /* stop system clock */
pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN, pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
PCIEFD_REG_SYS_CTL_CLR); PCIEFD_REG_SYS_CTL_CLR);
......
...@@ -2,6 +2,7 @@ ...@@ -2,6 +2,7 @@
* *
* Copyright (C) 2012 - 2014 Xilinx, Inc. * Copyright (C) 2012 - 2014 Xilinx, Inc.
* Copyright (C) 2009 PetaLogix. All rights reserved. * Copyright (C) 2009 PetaLogix. All rights reserved.
* Copyright (C) 2017 Sandvik Mining and Construction Oy
* *
* Description: * Description:
* This driver is developed for Axi CAN IP and for Zynq CANPS Controller. * This driver is developed for Axi CAN IP and for Zynq CANPS Controller.
...@@ -25,8 +26,10 @@ ...@@ -25,8 +26,10 @@
#include <linux/module.h> #include <linux/module.h>
#include <linux/netdevice.h> #include <linux/netdevice.h>
#include <linux/of.h> #include <linux/of.h>
#include <linux/of_device.h>
#include <linux/platform_device.h> #include <linux/platform_device.h>
#include <linux/skbuff.h> #include <linux/skbuff.h>
#include <linux/spinlock.h>
#include <linux/string.h> #include <linux/string.h>
#include <linux/types.h> #include <linux/types.h>
#include <linux/can/dev.h> #include <linux/can/dev.h>
...@@ -101,7 +104,7 @@ enum xcan_reg { ...@@ -101,7 +104,7 @@ enum xcan_reg {
#define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\ #define XCAN_INTR_ALL (XCAN_IXR_TXOK_MASK | XCAN_IXR_BSOFF_MASK |\
XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \ XCAN_IXR_WKUP_MASK | XCAN_IXR_SLP_MASK | \
XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \ XCAN_IXR_RXNEMP_MASK | XCAN_IXR_ERROR_MASK | \
XCAN_IXR_ARBLST_MASK | XCAN_IXR_RXOK_MASK) XCAN_IXR_RXOFLW_MASK | XCAN_IXR_ARBLST_MASK)
/* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */ /* CAN register bit shift - XCAN_<REG>_<BIT>_SHIFT */
#define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */ #define XCAN_BTR_SJW_SHIFT 7 /* Synchronous jump width */
...@@ -118,6 +121,7 @@ enum xcan_reg { ...@@ -118,6 +121,7 @@ enum xcan_reg {
/** /**
* struct xcan_priv - This definition define CAN driver instance * struct xcan_priv - This definition define CAN driver instance
* @can: CAN private data structure. * @can: CAN private data structure.
* @tx_lock: Lock for synchronizing TX interrupt handling
* @tx_head: Tx CAN packets ready to send on the queue * @tx_head: Tx CAN packets ready to send on the queue
* @tx_tail: Tx CAN packets successfully sended on the queue * @tx_tail: Tx CAN packets successfully sended on the queue
* @tx_max: Maximum number packets the driver can send * @tx_max: Maximum number packets the driver can send
...@@ -132,6 +136,7 @@ enum xcan_reg { ...@@ -132,6 +136,7 @@ enum xcan_reg {
*/ */
struct xcan_priv { struct xcan_priv {
struct can_priv can; struct can_priv can;
spinlock_t tx_lock;
unsigned int tx_head; unsigned int tx_head;
unsigned int tx_tail; unsigned int tx_tail;
unsigned int tx_max; unsigned int tx_max;
...@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = { ...@@ -159,6 +164,11 @@ static const struct can_bittiming_const xcan_bittiming_const = {
.brp_inc = 1, .brp_inc = 1,
}; };
#define XCAN_CAP_WATERMARK 0x0001
struct xcan_devtype_data {
unsigned int caps;
};
/** /**
* xcan_write_reg_le - Write a value to the device register little endian * xcan_write_reg_le - Write a value to the device register little endian
* @priv: Driver private data structure * @priv: Driver private data structure
...@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev) ...@@ -238,6 +248,10 @@ static int set_reset_mode(struct net_device *ndev)
usleep_range(500, 10000); usleep_range(500, 10000);
} }
/* reset clears FIFOs */
priv->tx_head = 0;
priv->tx_tail = 0;
return 0; return 0;
} }
...@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -392,6 +406,7 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
struct net_device_stats *stats = &ndev->stats; struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf = (struct can_frame *)skb->data; struct can_frame *cf = (struct can_frame *)skb->data;
u32 id, dlc, data[2] = {0, 0}; u32 id, dlc, data[2] = {0, 0};
unsigned long flags;
if (can_dropped_invalid_skb(ndev, skb)) if (can_dropped_invalid_skb(ndev, skb))
return NETDEV_TX_OK; return NETDEV_TX_OK;
...@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -439,6 +454,9 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
data[1] = be32_to_cpup((__be32 *)(cf->data + 4)); data[1] = be32_to_cpup((__be32 *)(cf->data + 4));
can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max); can_put_echo_skb(skb, ndev, priv->tx_head % priv->tx_max);
spin_lock_irqsave(&priv->tx_lock, flags);
priv->tx_head++; priv->tx_head++;
/* Write the Frame to Xilinx CAN TX FIFO */ /* Write the Frame to Xilinx CAN TX FIFO */
...@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -454,10 +472,16 @@ static int xcan_start_xmit(struct sk_buff *skb, struct net_device *ndev)
stats->tx_bytes += cf->can_dlc; stats->tx_bytes += cf->can_dlc;
} }
/* Clear TX-FIFO-empty interrupt for xcan_tx_interrupt() */
if (priv->tx_max > 1)
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXFEMP_MASK);
/* Check if the TX buffer is full */ /* Check if the TX buffer is full */
if ((priv->tx_head - priv->tx_tail) == priv->tx_max) if ((priv->tx_head - priv->tx_tail) == priv->tx_max)
netif_stop_queue(ndev); netif_stop_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
...@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev) ...@@ -529,6 +553,123 @@ static int xcan_rx(struct net_device *ndev)
return 1; return 1;
} }
/**
* xcan_current_error_state - Get current error state from HW
* @ndev: Pointer to net_device structure
*
* Checks the current CAN error state from the HW. Note that this
* only checks for ERROR_PASSIVE and ERROR_WARNING.
*
* Return:
* ERROR_PASSIVE or ERROR_WARNING if either is active, ERROR_ACTIVE
* otherwise.
*/
static enum can_state xcan_current_error_state(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 status = priv->read_reg(priv, XCAN_SR_OFFSET);
if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK)
return CAN_STATE_ERROR_PASSIVE;
else if (status & XCAN_SR_ERRWRN_MASK)
return CAN_STATE_ERROR_WARNING;
else
return CAN_STATE_ERROR_ACTIVE;
}
/**
* xcan_set_error_state - Set new CAN error state
* @ndev: Pointer to net_device structure
* @new_state: The new CAN state to be set
* @cf: Error frame to be populated or NULL
*
* Set new CAN error state for the device, updating statistics and
* populating the error frame if given.
*/
static void xcan_set_error_state(struct net_device *ndev,
enum can_state new_state,
struct can_frame *cf)
{
struct xcan_priv *priv = netdev_priv(ndev);
u32 ecr = priv->read_reg(priv, XCAN_ECR_OFFSET);
u32 txerr = ecr & XCAN_ECR_TEC_MASK;
u32 rxerr = (ecr & XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT;
priv->can.state = new_state;
if (cf) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
switch (new_state) {
case CAN_STATE_ERROR_PASSIVE:
priv->can.can_stats.error_passive++;
if (cf)
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
break;
case CAN_STATE_ERROR_WARNING:
priv->can.can_stats.error_warning++;
if (cf)
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
break;
case CAN_STATE_ERROR_ACTIVE:
if (cf)
cf->data[1] |= CAN_ERR_CRTL_ACTIVE;
break;
default:
/* non-ERROR states are handled elsewhere */
WARN_ON(1);
break;
}
}
/**
* xcan_update_error_state_after_rxtx - Update CAN error state after RX/TX
* @ndev: Pointer to net_device structure
*
* If the device is in a ERROR-WARNING or ERROR-PASSIVE state, check if
* the performed RX/TX has caused it to drop to a lesser state and set
* the interface state accordingly.
*/
static void xcan_update_error_state_after_rxtx(struct net_device *ndev)
{
struct xcan_priv *priv = netdev_priv(ndev);
enum can_state old_state = priv->can.state;
enum can_state new_state;
/* changing error state due to successful frame RX/TX can only
* occur from these states
*/
if (old_state != CAN_STATE_ERROR_WARNING &&
old_state != CAN_STATE_ERROR_PASSIVE)
return;
new_state = xcan_current_error_state(ndev);
if (new_state != old_state) {
struct sk_buff *skb;
struct can_frame *cf;
skb = alloc_can_err_skb(ndev, &cf);
xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
if (skb) {
struct net_device_stats *stats = &ndev->stats;
stats->rx_packets++;
stats->rx_bytes += cf->can_dlc;
netif_rx(skb);
}
}
}
/** /**
* xcan_err_interrupt - error frame Isr * xcan_err_interrupt - error frame Isr
* @ndev: net_device pointer * @ndev: net_device pointer
...@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) ...@@ -544,16 +685,12 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
struct net_device_stats *stats = &ndev->stats; struct net_device_stats *stats = &ndev->stats;
struct can_frame *cf; struct can_frame *cf;
struct sk_buff *skb; struct sk_buff *skb;
u32 err_status, status, txerr = 0, rxerr = 0; u32 err_status;
skb = alloc_can_err_skb(ndev, &cf); skb = alloc_can_err_skb(ndev, &cf);
err_status = priv->read_reg(priv, XCAN_ESR_OFFSET); err_status = priv->read_reg(priv, XCAN_ESR_OFFSET);
priv->write_reg(priv, XCAN_ESR_OFFSET, err_status); priv->write_reg(priv, XCAN_ESR_OFFSET, err_status);
txerr = priv->read_reg(priv, XCAN_ECR_OFFSET) & XCAN_ECR_TEC_MASK;
rxerr = ((priv->read_reg(priv, XCAN_ECR_OFFSET) &
XCAN_ECR_REC_MASK) >> XCAN_ESR_REC_SHIFT);
status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (isr & XCAN_IXR_BSOFF_MASK) { if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF; priv->can.state = CAN_STATE_BUS_OFF;
...@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) ...@@ -563,28 +700,10 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
can_bus_off(ndev); can_bus_off(ndev);
if (skb) if (skb)
cf->can_id |= CAN_ERR_BUSOFF; cf->can_id |= CAN_ERR_BUSOFF;
} else if ((status & XCAN_SR_ESTAT_MASK) == XCAN_SR_ESTAT_MASK) { } else {
priv->can.state = CAN_STATE_ERROR_PASSIVE; enum can_state new_state = xcan_current_error_state(ndev);
priv->can.can_stats.error_passive++;
if (skb) { xcan_set_error_state(ndev, new_state, skb ? cf : NULL);
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] = (rxerr > 127) ?
CAN_ERR_CRTL_RX_PASSIVE :
CAN_ERR_CRTL_TX_PASSIVE;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} else if (status & XCAN_SR_ERRWRN_MASK) {
priv->can.state = CAN_STATE_ERROR_WARNING;
priv->can.can_stats.error_warning++;
if (skb) {
cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= (txerr > rxerr) ?
CAN_ERR_CRTL_TX_WARNING :
CAN_ERR_CRTL_RX_WARNING;
cf->data[6] = txerr;
cf->data[7] = rxerr;
}
} }
/* Check for Arbitration lost interrupt */ /* Check for Arbitration lost interrupt */
...@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr) ...@@ -600,7 +719,6 @@ static void xcan_err_interrupt(struct net_device *ndev, u32 isr)
if (isr & XCAN_IXR_RXOFLW_MASK) { if (isr & XCAN_IXR_RXOFLW_MASK) {
stats->rx_over_errors++; stats->rx_over_errors++;
stats->rx_errors++; stats->rx_errors++;
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
if (skb) { if (skb) {
cf->can_id |= CAN_ERR_CRTL; cf->can_id |= CAN_ERR_CRTL;
cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW; cf->data[1] |= CAN_ERR_CRTL_RX_OVERFLOW;
...@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota) ...@@ -709,26 +827,20 @@ static int xcan_rx_poll(struct napi_struct *napi, int quota)
isr = priv->read_reg(priv, XCAN_ISR_OFFSET); isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) { while ((isr & XCAN_IXR_RXNEMP_MASK) && (work_done < quota)) {
if (isr & XCAN_IXR_RXOK_MASK) { work_done += xcan_rx(ndev);
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXOK_MASK);
work_done += xcan_rx(ndev);
} else {
priv->write_reg(priv, XCAN_ICR_OFFSET,
XCAN_IXR_RXNEMP_MASK);
break;
}
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_RXNEMP_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET); isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
} }
if (work_done) if (work_done) {
can_led_event(ndev, CAN_LED_EVENT_RX); can_led_event(ndev, CAN_LED_EVENT_RX);
xcan_update_error_state_after_rxtx(ndev);
}
if (work_done < quota) { if (work_done < quota) {
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier |= (XCAN_IXR_RXOK_MASK | XCAN_IXR_RXNEMP_MASK); ier |= XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier); priv->write_reg(priv, XCAN_IER_OFFSET, ier);
} }
return work_done; return work_done;
...@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr) ...@@ -743,18 +855,71 @@ static void xcan_tx_interrupt(struct net_device *ndev, u32 isr)
{ {
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
struct net_device_stats *stats = &ndev->stats; struct net_device_stats *stats = &ndev->stats;
unsigned int frames_in_fifo;
int frames_sent = 1; /* TXOK => at least 1 frame was sent */
unsigned long flags;
int retries = 0;
/* Synchronize with xmit as we need to know the exact number
* of frames in the FIFO to stay in sync due to the TXFEMP
* handling.
* This also prevents a race between netif_wake_queue() and
* netif_stop_queue().
*/
spin_lock_irqsave(&priv->tx_lock, flags);
frames_in_fifo = priv->tx_head - priv->tx_tail;
if (WARN_ON_ONCE(frames_in_fifo == 0)) {
/* clear TXOK anyway to avoid getting back here */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
spin_unlock_irqrestore(&priv->tx_lock, flags);
return;
}
/* Check if 2 frames were sent (TXOK only means that at least 1
* frame was sent).
*/
if (frames_in_fifo > 1) {
WARN_ON(frames_in_fifo > priv->tx_max);
/* Synchronize TXOK and isr so that after the loop:
* (1) isr variable is up-to-date at least up to TXOK clear
* time. This avoids us clearing a TXOK of a second frame
* but not noticing that the FIFO is now empty and thus
* marking only a single frame as sent.
* (2) No TXOK is left. Having one could mean leaving a
* stray TXOK as we might process the associated frame
* via TXFEMP handling as we read TXFEMP *after* TXOK
* clear to satisfy (1).
*/
while ((isr & XCAN_IXR_TXOK_MASK) && !WARN_ON(++retries == 100)) {
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
}
while ((priv->tx_head - priv->tx_tail > 0) && if (isr & XCAN_IXR_TXFEMP_MASK) {
(isr & XCAN_IXR_TXOK_MASK)) { /* nothing in FIFO anymore */
frames_sent = frames_in_fifo;
}
} else {
/* single frame in fifo, just clear TXOK */
priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK); priv->write_reg(priv, XCAN_ICR_OFFSET, XCAN_IXR_TXOK_MASK);
}
while (frames_sent--) {
can_get_echo_skb(ndev, priv->tx_tail % can_get_echo_skb(ndev, priv->tx_tail %
priv->tx_max); priv->tx_max);
priv->tx_tail++; priv->tx_tail++;
stats->tx_packets++; stats->tx_packets++;
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
} }
can_led_event(ndev, CAN_LED_EVENT_TX);
netif_wake_queue(ndev); netif_wake_queue(ndev);
spin_unlock_irqrestore(&priv->tx_lock, flags);
can_led_event(ndev, CAN_LED_EVENT_TX);
xcan_update_error_state_after_rxtx(ndev);
} }
/** /**
...@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) ...@@ -773,6 +938,7 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
struct net_device *ndev = (struct net_device *)dev_id; struct net_device *ndev = (struct net_device *)dev_id;
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
u32 isr, ier; u32 isr, ier;
u32 isr_errors;
/* Get the interrupt status from Xilinx CAN */ /* Get the interrupt status from Xilinx CAN */
isr = priv->read_reg(priv, XCAN_ISR_OFFSET); isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
...@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) ...@@ -791,18 +957,17 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
xcan_tx_interrupt(ndev, isr); xcan_tx_interrupt(ndev, isr);
/* Check for the type of error interrupt and Processing it */ /* Check for the type of error interrupt and Processing it */
if (isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK | isr_errors = isr & (XCAN_IXR_ERROR_MASK | XCAN_IXR_RXOFLW_MASK |
XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK)) { XCAN_IXR_BSOFF_MASK | XCAN_IXR_ARBLST_MASK);
priv->write_reg(priv, XCAN_ICR_OFFSET, (XCAN_IXR_ERROR_MASK | if (isr_errors) {
XCAN_IXR_RXOFLW_MASK | XCAN_IXR_BSOFF_MASK | priv->write_reg(priv, XCAN_ICR_OFFSET, isr_errors);
XCAN_IXR_ARBLST_MASK));
xcan_err_interrupt(ndev, isr); xcan_err_interrupt(ndev, isr);
} }
/* Check for the type of receive interrupt and Processing it */ /* Check for the type of receive interrupt and Processing it */
if (isr & (XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK)) { if (isr & XCAN_IXR_RXNEMP_MASK) {
ier = priv->read_reg(priv, XCAN_IER_OFFSET); ier = priv->read_reg(priv, XCAN_IER_OFFSET);
ier &= ~(XCAN_IXR_RXNEMP_MASK | XCAN_IXR_RXOK_MASK); ier &= ~XCAN_IXR_RXNEMP_MASK;
priv->write_reg(priv, XCAN_IER_OFFSET, ier); priv->write_reg(priv, XCAN_IER_OFFSET, ier);
napi_schedule(&priv->napi); napi_schedule(&priv->napi);
} }
...@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id) ...@@ -819,13 +984,9 @@ static irqreturn_t xcan_interrupt(int irq, void *dev_id)
static void xcan_chip_stop(struct net_device *ndev) static void xcan_chip_stop(struct net_device *ndev)
{ {
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
u32 ier;
/* Disable interrupts and leave the can in configuration mode */ /* Disable interrupts and leave the can in configuration mode */
ier = priv->read_reg(priv, XCAN_IER_OFFSET); set_reset_mode(ndev);
ier &= ~XCAN_INTR_ALL;
priv->write_reg(priv, XCAN_IER_OFFSET, ier);
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
priv->can.state = CAN_STATE_STOPPED; priv->can.state = CAN_STATE_STOPPED;
} }
...@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = { ...@@ -958,10 +1119,15 @@ static const struct net_device_ops xcan_netdev_ops = {
*/ */
static int __maybe_unused xcan_suspend(struct device *dev) static int __maybe_unused xcan_suspend(struct device *dev)
{ {
if (!device_may_wakeup(dev)) struct net_device *ndev = dev_get_drvdata(dev);
return pm_runtime_force_suspend(dev);
return 0; if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
xcan_chip_stop(ndev);
}
return pm_runtime_force_suspend(dev);
} }
/** /**
...@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev) ...@@ -973,11 +1139,27 @@ static int __maybe_unused xcan_suspend(struct device *dev)
*/ */
static int __maybe_unused xcan_resume(struct device *dev) static int __maybe_unused xcan_resume(struct device *dev)
{ {
if (!device_may_wakeup(dev)) struct net_device *ndev = dev_get_drvdata(dev);
return pm_runtime_force_resume(dev); int ret;
return 0; ret = pm_runtime_force_resume(dev);
if (ret) {
dev_err(dev, "pm_runtime_force_resume failed on resume\n");
return ret;
}
if (netif_running(ndev)) {
ret = xcan_chip_start(ndev);
if (ret) {
dev_err(dev, "xcan_chip_start failed on resume\n");
return ret;
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
return 0;
} }
/** /**
...@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev) ...@@ -992,14 +1174,6 @@ static int __maybe_unused xcan_runtime_suspend(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev); struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
if (netif_running(ndev)) {
netif_stop_queue(ndev);
netif_device_detach(ndev);
}
priv->write_reg(priv, XCAN_MSR_OFFSET, XCAN_MSR_SLEEP_MASK);
priv->can.state = CAN_STATE_SLEEPING;
clk_disable_unprepare(priv->bus_clk); clk_disable_unprepare(priv->bus_clk);
clk_disable_unprepare(priv->can_clk); clk_disable_unprepare(priv->can_clk);
...@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) ...@@ -1018,7 +1192,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
struct net_device *ndev = dev_get_drvdata(dev); struct net_device *ndev = dev_get_drvdata(dev);
struct xcan_priv *priv = netdev_priv(ndev); struct xcan_priv *priv = netdev_priv(ndev);
int ret; int ret;
u32 isr, status;
ret = clk_prepare_enable(priv->bus_clk); ret = clk_prepare_enable(priv->bus_clk);
if (ret) { if (ret) {
...@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev) ...@@ -1032,27 +1205,6 @@ static int __maybe_unused xcan_runtime_resume(struct device *dev)
return ret; return ret;
} }
priv->write_reg(priv, XCAN_SRR_OFFSET, XCAN_SRR_RESET_MASK);
isr = priv->read_reg(priv, XCAN_ISR_OFFSET);
status = priv->read_reg(priv, XCAN_SR_OFFSET);
if (netif_running(ndev)) {
if (isr & XCAN_IXR_BSOFF_MASK) {
priv->can.state = CAN_STATE_BUS_OFF;
priv->write_reg(priv, XCAN_SRR_OFFSET,
XCAN_SRR_RESET_MASK);
} else if ((status & XCAN_SR_ESTAT_MASK) ==
XCAN_SR_ESTAT_MASK) {
priv->can.state = CAN_STATE_ERROR_PASSIVE;
} else if (status & XCAN_SR_ERRWRN_MASK) {
priv->can.state = CAN_STATE_ERROR_WARNING;
} else {
priv->can.state = CAN_STATE_ERROR_ACTIVE;
}
netif_device_attach(ndev);
netif_start_queue(ndev);
}
return 0; return 0;
} }
...@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = { ...@@ -1061,6 +1213,18 @@ static const struct dev_pm_ops xcan_dev_pm_ops = {
SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL) SET_RUNTIME_PM_OPS(xcan_runtime_suspend, xcan_runtime_resume, NULL)
}; };
static const struct xcan_devtype_data xcan_zynq_data = {
.caps = XCAN_CAP_WATERMARK,
};
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", .data = &xcan_zynq_data },
{ .compatible = "xlnx,axi-can-1.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
/** /**
* xcan_probe - Platform registration call * xcan_probe - Platform registration call
* @pdev: Handle to the platform device structure * @pdev: Handle to the platform device structure
...@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1075,8 +1239,10 @@ static int xcan_probe(struct platform_device *pdev)
struct resource *res; /* IO mem resources */ struct resource *res; /* IO mem resources */
struct net_device *ndev; struct net_device *ndev;
struct xcan_priv *priv; struct xcan_priv *priv;
const struct of_device_id *of_id;
int caps = 0;
void __iomem *addr; void __iomem *addr;
int ret, rx_max, tx_max; int ret, rx_max, tx_max, tx_fifo_depth;
/* Get the virtual base address for the device */ /* Get the virtual base address for the device */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0); res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
...@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1086,7 +1252,8 @@ static int xcan_probe(struct platform_device *pdev)
goto err; goto err;
} }
ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth", &tx_max); ret = of_property_read_u32(pdev->dev.of_node, "tx-fifo-depth",
&tx_fifo_depth);
if (ret < 0) if (ret < 0)
goto err; goto err;
...@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1094,6 +1261,30 @@ static int xcan_probe(struct platform_device *pdev)
if (ret < 0) if (ret < 0)
goto err; goto err;
of_id = of_match_device(xcan_of_match, &pdev->dev);
if (of_id) {
const struct xcan_devtype_data *devtype_data = of_id->data;
if (devtype_data)
caps = devtype_data->caps;
}
/* There is no way to directly figure out how many frames have been
* sent when the TXOK interrupt is processed. If watermark programming
* is supported, we can have 2 frames in the FIFO and use TXFEMP
* to determine if 1 or 2 frames have been sent.
* Theoretically we should be able to use TXFWMEMP to determine up
* to 3 frames, but it seems that after putting a second frame in the
* FIFO, with watermark at 2 frames, it can happen that TXFWMEMP (less
* than 2 frames in FIFO) is set anyway with no TXOK (a frame was
* sent), which is not a sensible state - possibly TXFWMEMP is not
* completely synchronized with the rest of the bits?
*/
if (caps & XCAN_CAP_WATERMARK)
tx_max = min(tx_fifo_depth, 2);
else
tx_max = 1;
/* Create a CAN device instance */ /* Create a CAN device instance */
ndev = alloc_candev(sizeof(struct xcan_priv), tx_max); ndev = alloc_candev(sizeof(struct xcan_priv), tx_max);
if (!ndev) if (!ndev)
...@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1108,6 +1299,7 @@ static int xcan_probe(struct platform_device *pdev)
CAN_CTRLMODE_BERR_REPORTING; CAN_CTRLMODE_BERR_REPORTING;
priv->reg_base = addr; priv->reg_base = addr;
priv->tx_max = tx_max; priv->tx_max = tx_max;
spin_lock_init(&priv->tx_lock);
/* Get IRQ for the device */ /* Get IRQ for the device */
ndev->irq = platform_get_irq(pdev, 0); ndev->irq = platform_get_irq(pdev, 0);
...@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev) ...@@ -1172,9 +1364,9 @@ static int xcan_probe(struct platform_device *pdev)
pm_runtime_put(&pdev->dev); pm_runtime_put(&pdev->dev);
netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth:%d\n", netdev_dbg(ndev, "reg_base=0x%p irq=%d clock=%d, tx fifo depth: actual %d, using %d\n",
priv->reg_base, ndev->irq, priv->can.clock.freq, priv->reg_base, ndev->irq, priv->can.clock.freq,
priv->tx_max); tx_fifo_depth, priv->tx_max);
return 0; return 0;
...@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev) ...@@ -1208,14 +1400,6 @@ static int xcan_remove(struct platform_device *pdev)
return 0; return 0;
} }
/* Match table for OF platform binding */
static const struct of_device_id xcan_of_match[] = {
{ .compatible = "xlnx,zynq-can-1.0", },
{ .compatible = "xlnx,axi-can-1.00.a", },
{ /* end of list */ },
};
MODULE_DEVICE_TABLE(of, xcan_of_match);
static struct platform_driver xcan_driver = { static struct platform_driver xcan_driver = {
.probe = xcan_probe, .probe = xcan_probe,
.remove = xcan_remove, .remove = xcan_remove,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment