Commit d6700790 authored by David S. Miller's avatar David S. Miller

Merge branch 'cpmac-next'

Varka Bhadram says:

====================
This patch series cleanup for AR7 CPMAC Ethernet controller driver
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 405fd707 0465be8f
......@@ -67,42 +67,42 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_RX_CONTROL 0x0014
#define CPMAC_RX_TEARDOWN 0x0018
#define CPMAC_MBP 0x0100
# define MBP_RXPASSCRC 0x40000000
# define MBP_RXQOS 0x20000000
# define MBP_RXNOCHAIN 0x10000000
# define MBP_RXCMF 0x01000000
# define MBP_RXSHORT 0x00800000
# define MBP_RXCEF 0x00400000
# define MBP_RXPROMISC 0x00200000
# define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
# define MBP_RXBCAST 0x00002000
# define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
# define MBP_RXMCAST 0x00000020
# define MBP_MCASTCHAN(channel) ((channel) & 0x7)
#define MBP_RXPASSCRC 0x40000000
#define MBP_RXQOS 0x20000000
#define MBP_RXNOCHAIN 0x10000000
#define MBP_RXCMF 0x01000000
#define MBP_RXSHORT 0x00800000
#define MBP_RXCEF 0x00400000
#define MBP_RXPROMISC 0x00200000
#define MBP_PROMISCCHAN(channel) (((channel) & 0x7) << 16)
#define MBP_RXBCAST 0x00002000
#define MBP_BCASTCHAN(channel) (((channel) & 0x7) << 8)
#define MBP_RXMCAST 0x00000020
#define MBP_MCASTCHAN(channel) ((channel) & 0x7)
#define CPMAC_UNICAST_ENABLE 0x0104
#define CPMAC_UNICAST_CLEAR 0x0108
#define CPMAC_MAX_LENGTH 0x010c
#define CPMAC_BUFFER_OFFSET 0x0110
#define CPMAC_MAC_CONTROL 0x0160
# define MAC_TXPTYPE 0x00000200
# define MAC_TXPACE 0x00000040
# define MAC_MII 0x00000020
# define MAC_TXFLOW 0x00000010
# define MAC_RXFLOW 0x00000008
# define MAC_MTEST 0x00000004
# define MAC_LOOPBACK 0x00000002
# define MAC_FDX 0x00000001
#define MAC_TXPTYPE 0x00000200
#define MAC_TXPACE 0x00000040
#define MAC_MII 0x00000020
#define MAC_TXFLOW 0x00000010
#define MAC_RXFLOW 0x00000008
#define MAC_MTEST 0x00000004
#define MAC_LOOPBACK 0x00000002
#define MAC_FDX 0x00000001
#define CPMAC_MAC_STATUS 0x0164
# define MAC_STATUS_QOS 0x00000004
# define MAC_STATUS_RXFLOW 0x00000002
# define MAC_STATUS_TXFLOW 0x00000001
#define MAC_STATUS_QOS 0x00000004
#define MAC_STATUS_RXFLOW 0x00000002
#define MAC_STATUS_TXFLOW 0x00000001
#define CPMAC_TX_INT_ENABLE 0x0178
#define CPMAC_TX_INT_CLEAR 0x017c
#define CPMAC_MAC_INT_VECTOR 0x0180
# define MAC_INT_STATUS 0x00080000
# define MAC_INT_HOST 0x00040000
# define MAC_INT_RX 0x00020000
# define MAC_INT_TX 0x00010000
#define MAC_INT_STATUS 0x00080000
#define MAC_INT_HOST 0x00040000
#define MAC_INT_RX 0x00020000
#define MAC_INT_TX 0x00010000
#define CPMAC_MAC_EOI_VECTOR 0x0184
#define CPMAC_RX_INT_ENABLE 0x0198
#define CPMAC_RX_INT_CLEAR 0x019c
......@@ -118,8 +118,8 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
#define CPMAC_TX_ACK(channel) (0x0640 + (channel) * 4)
#define CPMAC_RX_ACK(channel) (0x0660 + (channel) * 4)
#define CPMAC_REG_END 0x0680
/*
* Rx/Tx statistics
/* Rx/Tx statistics
* TODO: use some of them to fill stats in cpmac_stats()
*/
#define CPMAC_STATS_RX_GOOD 0x0200
......@@ -157,24 +157,24 @@ MODULE_PARM_DESC(dumb_switch, "Assume switch is not connected to MDIO bus");
/* MDIO bus */
#define CPMAC_MDIO_VERSION 0x0000
#define CPMAC_MDIO_CONTROL 0x0004
# define MDIOC_IDLE 0x80000000
# define MDIOC_ENABLE 0x40000000
# define MDIOC_PREAMBLE 0x00100000
# define MDIOC_FAULT 0x00080000
# define MDIOC_FAULTDETECT 0x00040000
# define MDIOC_INTTEST 0x00020000
# define MDIOC_CLKDIV(div) ((div) & 0xff)
#define MDIOC_IDLE 0x80000000
#define MDIOC_ENABLE 0x40000000
#define MDIOC_PREAMBLE 0x00100000
#define MDIOC_FAULT 0x00080000
#define MDIOC_FAULTDETECT 0x00040000
#define MDIOC_INTTEST 0x00020000
#define MDIOC_CLKDIV(div) ((div) & 0xff)
#define CPMAC_MDIO_ALIVE 0x0008
#define CPMAC_MDIO_LINK 0x000c
#define CPMAC_MDIO_ACCESS(channel) (0x0080 + (channel) * 8)
# define MDIO_BUSY 0x80000000
# define MDIO_WRITE 0x40000000
# define MDIO_REG(reg) (((reg) & 0x1f) << 21)
# define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
# define MDIO_DATA(data) ((data) & 0xffff)
#define MDIO_BUSY 0x80000000
#define MDIO_WRITE 0x40000000
#define MDIO_REG(reg) (((reg) & 0x1f) << 21)
#define MDIO_PHY(phy) (((phy) & 0x1f) << 16)
#define MDIO_DATA(data) ((data) & 0xffff)
#define CPMAC_MDIO_PHYSEL(channel) (0x0084 + (channel) * 8)
# define PHYSEL_LINKSEL 0x00000040
# define PHYSEL_LINKINT 0x00000020
#define PHYSEL_LINKSEL 0x00000040
#define PHYSEL_LINKINT 0x00000020
struct cpmac_desc {
u32 hw_next;
......@@ -224,31 +224,33 @@ static void cpmac_dump_regs(struct net_device *dev)
{
int i;
struct cpmac_priv *priv = netdev_priv(dev);
for (i = 0; i < CPMAC_REG_END; i += 4) {
if (i % 16 == 0) {
if (i)
pr_cont("\n");
printk(KERN_DEBUG "%s: reg[%p]:", dev->name,
priv->regs + i);
netdev_dbg(dev, "reg[%p]:", priv->regs + i);
}
printk(" %08x", cpmac_read(priv->regs, i));
pr_debug(" %08x", cpmac_read(priv->regs, i));
}
printk("\n");
pr_debug("\n");
}
static void cpmac_dump_desc(struct net_device *dev, struct cpmac_desc *desc)
{
int i;
printk(KERN_DEBUG "%s: desc[%p]:", dev->name, desc);
netdev_dbg(dev, "desc[%p]:", desc);
for (i = 0; i < sizeof(*desc) / 4; i++)
printk(" %08x", ((u32 *)desc)[i]);
printk("\n");
pr_debug(" %08x", ((u32 *)desc)[i]);
pr_debug("\n");
}
static void cpmac_dump_all_desc(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *dump = priv->rx_head;
do {
cpmac_dump_desc(dev, dump);
dump = dump->next;
......@@ -258,17 +260,17 @@ static void cpmac_dump_all_desc(struct net_device *dev)
static void cpmac_dump_skb(struct net_device *dev, struct sk_buff *skb)
{
int i;
printk(KERN_DEBUG "%s: skb 0x%p, len=%d\n", dev->name, skb, skb->len);
netdev_dbg(dev, "skb 0x%p, len=%d\n", skb, skb->len);
for (i = 0; i < skb->len; i++) {
if (i % 16 == 0) {
if (i)
pr_cont("\n");
printk(KERN_DEBUG "%s: data[%p]:", dev->name,
skb->data + i);
netdev_dbg(dev, "data[%p]:", skb->data + i);
}
printk(" %02x", ((u8 *)skb->data)[i]);
pr_debug(" %02x", ((u8 *)skb->data)[i]);
}
printk("\n");
pr_debug("\n");
}
static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
......@@ -281,6 +283,7 @@ static int cpmac_mdio_read(struct mii_bus *bus, int phy_id, int reg)
MDIO_PHY(phy_id));
while ((val = cpmac_read(bus->priv, CPMAC_MDIO_ACCESS(0))) & MDIO_BUSY)
cpu_relax();
return MDIO_DATA(val);
}
......@@ -291,6 +294,7 @@ static int cpmac_mdio_write(struct mii_bus *bus, int phy_id,
cpu_relax();
cpmac_write(bus->priv, CPMAC_MDIO_ACCESS(0), MDIO_BUSY | MDIO_WRITE |
MDIO_REG(reg) | MDIO_PHY(phy_id) | MDIO_DATA(val));
return 0;
}
......@@ -300,12 +304,13 @@ static int cpmac_mdio_reset(struct mii_bus *bus)
cpmac_clk = clk_get(&bus->dev, "cpmac");
if (IS_ERR(cpmac_clk)) {
printk(KERN_ERR "unable to get cpmac clock\n");
pr_err("unable to get cpmac clock\n");
return -1;
}
ar7_device_reset(AR7_RESET_BIT_MDIO);
cpmac_write(bus->priv, CPMAC_MDIO_CONTROL, MDIOC_ENABLE |
MDIOC_CLKDIV(clk_get_rate(cpmac_clk) / 2200000 - 1));
return 0;
}
......@@ -331,8 +336,7 @@ static void cpmac_set_multicast_list(struct net_device *dev)
cpmac_write(priv->regs, CPMAC_MAC_HASH_LO, 0xffffffff);
cpmac_write(priv->regs, CPMAC_MAC_HASH_HI, 0xffffffff);
} else {
/*
* cpmac uses some strange mac address hashing
/* cpmac uses some strange mac address hashing
* (not crc32)
*/
netdev_for_each_mc_addr(ha, dev) {
......@@ -369,8 +373,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
cpmac_write(priv->regs, CPMAC_RX_ACK(0), (u32)desc->mapping);
if (unlikely(!desc->datalen)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: spurious interrupt\n",
priv->dev->name);
netdev_warn(priv->dev, "rx: spurious interrupt\n");
return NULL;
}
......@@ -390,15 +394,14 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
DMA_FROM_DEVICE);
desc->hw_data = (u32)desc->data_mapping;
if (unlikely(netif_msg_pktdata(priv))) {
printk(KERN_DEBUG "%s: received packet:\n",
priv->dev->name);
netdev_dbg(priv->dev, "received packet:\n");
cpmac_dump_skb(priv->dev, result);
}
} else {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING
"%s: low on skbs, dropping packet\n",
priv->dev->name);
netdev_warn(priv->dev,
"low on skbs, dropping packet\n");
priv->dev->stats.rx_dropped++;
}
......@@ -418,8 +421,8 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
spin_lock(&priv->rx_lock);
if (unlikely(!priv->rx_head)) {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx: polling, but no queue\n",
priv->dev->name);
netdev_warn(priv->dev, "rx: polling, but no queue\n");
spin_unlock(&priv->rx_lock);
napi_complete(napi);
return 0;
......@@ -432,15 +435,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
if ((desc->dataflags & CPMAC_EOQ) != 0) {
/* The last update to eoq->hw_next didn't happen
* soon enough, and the receiver stopped here.
*Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
* soon enough, and the receiver stopped here.
* Remember this descriptor so we can restart
* the receiver after freeing some space.
*/
if (unlikely(restart)) {
if (netif_msg_rx_err(priv))
printk(KERN_ERR "%s: poll found a"
" duplicate EOQ: %p and %p\n",
priv->dev->name, restart, desc);
netdev_err(priv->dev, "poll found a"
" duplicate EOQ: %p and %p\n",
restart, desc);
goto fatal_error;
}
......@@ -457,25 +460,27 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
if (desc != priv->rx_head) {
/* We freed some buffers, but not the whole ring,
* add what we did free to the rx list */
* add what we did free to the rx list
*/
desc->prev->hw_next = (u32)0;
priv->rx_head->prev->hw_next = priv->rx_head->mapping;
}
/* Optimization: If we did not actually process an EOQ (perhaps because
* of quota limits), check to see if the tail of the queue has EOQ set.
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size) */
* We should immediately restart in that case so that the receiver can
* restart and run in parallel with more packet processing.
* This lets us handle slightly larger bursts before running
* out of ring space (assuming dev->weight < ring_size)
*/
if (!restart &&
(priv->rx_head->prev->dataflags & (CPMAC_OWN|CPMAC_EOQ))
== CPMAC_EOQ &&
(priv->rx_head->dataflags & CPMAC_OWN) != 0) {
/* reset EOQ so the poll loop (above) doesn't try to
* restart this when it eventually gets to this descriptor.
*/
* restart this when it eventually gets to this descriptor.
*/
priv->rx_head->prev->dataflags &= ~CPMAC_EOQ;
restart = priv->rx_head;
}
......@@ -484,15 +489,13 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
priv->dev->stats.rx_errors++;
priv->dev->stats.rx_fifo_errors++;
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: rx dma ring overrun\n",
priv->dev->name);
netdev_warn(priv->dev, "rx dma ring overrun\n");
if (unlikely((restart->dataflags & CPMAC_OWN) == 0)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: cpmac_poll is trying to "
"restart rx from a descriptor that's "
"not free: %p\n",
priv->dev->name, restart);
netdev_err(priv->dev, "cpmac_poll is trying "
"to restart rx from a descriptor "
"that's not free: %p\n", restart);
goto fatal_error;
}
......@@ -502,11 +505,12 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
priv->rx_head = desc;
spin_unlock(&priv->rx_lock);
if (unlikely(netif_msg_rx_status(priv)))
printk(KERN_DEBUG "%s: poll processed %d packets\n",
priv->dev->name, received);
netdev_dbg(priv->dev, "poll processed %d packets\n", received);
if (processed == 0) {
/* we ran out of packets to read,
* revert to interrupt-driven mode */
* revert to interrupt-driven mode
*/
napi_complete(napi);
cpmac_write(priv->regs, CPMAC_RX_INT_ENABLE, 1);
return 0;
......@@ -516,16 +520,15 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
fatal_error:
/* Something went horribly wrong.
* Reset hardware to try to recover rather than wedging. */
* Reset hardware to try to recover rather than wedging.
*/
if (netif_msg_drv(priv)) {
printk(KERN_ERR "%s: cpmac_poll is confused. "
"Resetting hardware\n", priv->dev->name);
netdev_err(priv->dev, "cpmac_poll is confused. "
"Resetting hardware\n");
cpmac_dump_all_desc(priv->dev);
printk(KERN_DEBUG "%s: RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
priv->dev->name,
cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
netdev_dbg(priv->dev, "RX_PTR(0)=0x%08x RX_ACK(0)=0x%08x\n",
cpmac_read(priv->regs, CPMAC_RX_PTR(0)),
cpmac_read(priv->regs, CPMAC_RX_ACK(0)));
}
spin_unlock(&priv->rx_lock);
......@@ -537,6 +540,7 @@ static int cpmac_poll(struct napi_struct *napi, int budget)
cpmac_hw_stop(priv->dev);
if (!schedule_work(&priv->reset_work))
atomic_dec(&priv->reset_pending);
return 0;
}
......@@ -560,8 +564,8 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc = &priv->desc_ring[queue];
if (unlikely(desc->dataflags & CPMAC_OWN)) {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: tx dma ring full\n",
dev->name);
netdev_warn(dev, "tx dma ring full\n");
return NETDEV_TX_BUSY;
}
......@@ -575,8 +579,7 @@ static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
desc->datalen = len;
desc->buflen = len;
if (unlikely(netif_msg_tx_queued(priv)))
printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
skb->len);
netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(dev, desc);
if (unlikely(netif_msg_pktdata(priv)))
......@@ -602,8 +605,8 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
DMA_TO_DEVICE);
if (unlikely(netif_msg_tx_done(priv)))
printk(KERN_DEBUG "%s: sent 0x%p, len=%d\n", dev->name,
desc->skb, desc->skb->len);
netdev_dbg(dev, "sent 0x%p, len=%d\n",
desc->skb, desc->skb->len);
dev_kfree_skb_irq(desc->skb);
desc->skb = NULL;
......@@ -611,8 +614,7 @@ static void cpmac_end_xmit(struct net_device *dev, int queue)
netif_wake_subqueue(dev, queue);
} else {
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING
"%s: end_xmit: spurious interrupt\n", dev->name);
netdev_warn(dev, "end_xmit: spurious interrupt\n");
if (__netif_subqueue_stopped(dev, queue))
netif_wake_subqueue(dev, queue);
}
......@@ -687,14 +689,14 @@ static void cpmac_clear_rx(struct net_device *dev)
struct cpmac_priv *priv = netdev_priv(dev);
struct cpmac_desc *desc;
int i;
if (unlikely(!priv->rx_head))
return;
desc = priv->rx_head;
for (i = 0; i < priv->ring_size; i++) {
if ((desc->dataflags & CPMAC_OWN) == 0) {
if (netif_msg_rx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: packet dropped\n",
dev->name);
netdev_warn(dev, "packet dropped\n");
if (unlikely(netif_msg_hw(priv)))
cpmac_dump_desc(dev, desc);
desc->dataflags = CPMAC_OWN;
......@@ -710,6 +712,7 @@ static void cpmac_clear_tx(struct net_device *dev)
{
struct cpmac_priv *priv = netdev_priv(dev);
int i;
if (unlikely(!priv->desc_ring))
return;
for (i = 0; i < CPMAC_QUEUES; i++) {
......@@ -751,16 +754,16 @@ static void cpmac_check_status(struct net_device *dev)
if (rx_code || tx_code) {
if (netif_msg_drv(priv) && net_ratelimit()) {
/* Can't find any documentation on what these
*error codes actually are. So just log them and hope..
* error codes actually are. So just log them and hope..
*/
if (rx_code)
printk(KERN_WARNING "%s: host error %d on rx "
"channel %d (macstatus %08x), resetting\n",
dev->name, rx_code, rx_channel, macstatus);
netdev_warn(dev, "host error %d on rx "
"channel %d (macstatus %08x), resetting\n",
rx_code, rx_channel, macstatus);
if (tx_code)
printk(KERN_WARNING "%s: host error %d on tx "
"channel %d (macstatus %08x), resetting\n",
dev->name, tx_code, tx_channel, macstatus);
netdev_warn(dev, "host error %d on tx "
"channel %d (macstatus %08x), resetting\n",
tx_code, tx_channel, macstatus);
}
netif_tx_stop_all_queues(dev);
......@@ -785,8 +788,7 @@ static irqreturn_t cpmac_irq(int irq, void *dev_id)
status = cpmac_read(priv->regs, CPMAC_MAC_INT_VECTOR);
if (unlikely(netif_msg_intr(priv)))
printk(KERN_DEBUG "%s: interrupt status: 0x%08x\n", dev->name,
status);
netdev_dbg(dev, "interrupt status: 0x%08x\n", status);
if (status & MAC_INT_TX)
cpmac_end_xmit(dev, (status & 7));
......@@ -815,7 +817,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
dev->stats.tx_errors++;
spin_unlock(&priv->lock);
if (netif_msg_tx_err(priv) && net_ratelimit())
printk(KERN_WARNING "%s: transmit timeout\n", dev->name);
netdev_warn(dev, "transmit timeout\n");
atomic_inc(&priv->reset_pending);
barrier();
......@@ -829,6 +831,7 @@ static void cpmac_tx_timeout(struct net_device *dev)
static int cpmac_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
{
struct cpmac_priv *priv = netdev_priv(dev);
if (!(netif_running(dev)))
return -EINVAL;
if (!priv->phy)
......@@ -884,6 +887,7 @@ static int cpmac_set_ringparam(struct net_device *dev,
if (netif_running(dev))
return -EBUSY;
priv->ring_size = ring->rx_pending;
return 0;
}
......@@ -951,8 +955,8 @@ static int cpmac_open(struct net_device *dev)
mem = platform_get_resource_byname(priv->pdev, IORESOURCE_MEM, "regs");
if (!request_mem_region(mem->start, resource_size(mem), dev->name)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to request registers\n",
dev->name);
netdev_err(dev, "failed to request registers\n");
res = -ENXIO;
goto fail_reserve;
}
......@@ -960,8 +964,8 @@ static int cpmac_open(struct net_device *dev)
priv->regs = ioremap(mem->start, resource_size(mem));
if (!priv->regs) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to remap registers\n",
dev->name);
netdev_err(dev, "failed to remap registers\n");
res = -ENXIO;
goto fail_remap;
}
......@@ -1003,8 +1007,8 @@ static int cpmac_open(struct net_device *dev)
res = request_irq(dev->irq, cpmac_irq, IRQF_SHARED, dev->name, dev);
if (res) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: failed to obtain irq\n",
dev->name);
netdev_err(dev, "failed to obtain irq\n");
goto fail_irq;
}
......@@ -1077,6 +1081,7 @@ static int cpmac_stop(struct net_device *dev)
dma_free_coherent(&dev->dev, sizeof(struct cpmac_desc) *
(CPMAC_QUEUES + priv->ring_size),
priv->desc_ring, priv->dma_ring);
return 0;
}
......@@ -1121,7 +1126,7 @@ static int cpmac_probe(struct platform_device *pdev)
if (phy_id == PHY_MAX_ADDR) {
dev_err(&pdev->dev, "no PHY present, falling back "
"to switch on MDIO bus 0\n");
"to switch on MDIO bus 0\n");
strncpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE); /* fixed phys bus */
phy_id = pdev->id;
}
......@@ -1137,7 +1142,7 @@ static int cpmac_probe(struct platform_device *pdev)
mem = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs");
if (!mem) {
rc = -ENODEV;
goto fail;
goto out;
}
dev->irq = platform_get_irq_byname(pdev, "irq");
......@@ -1162,44 +1167,48 @@ static int cpmac_probe(struct platform_device *pdev)
if (IS_ERR(priv->phy)) {
if (netif_msg_drv(priv))
printk(KERN_ERR "%s: Could not attach to PHY\n",
dev->name);
dev_err(&pdev->dev, "Could not attach to PHY\n");
rc = PTR_ERR(priv->phy);
goto fail;
goto out;
}
rc = register_netdev(dev);
if (rc) {
printk(KERN_ERR "cpmac: error %i registering device %s\n", rc,
dev->name);
dev_err(&pdev->dev, "Could not register net device\n");
goto fail;
}
if (netif_msg_probe(priv)) {
printk(KERN_INFO
"cpmac: device %s (regs: %p, irq: %d, phy: %s, "
"mac: %pM)\n", dev->name, (void *)mem->start, dev->irq,
priv->phy_name, dev->dev_addr);
dev_info(&pdev->dev, "regs: %p, irq: %d, phy: %s, "
"mac: %pM\n", (void *)mem->start, dev->irq,
priv->phy_name, dev->dev_addr);
}
return 0;
fail:
free_netdev(dev);
out:
return rc;
}
static int cpmac_remove(struct platform_device *pdev)
{
struct net_device *dev = platform_get_drvdata(pdev);
unregister_netdev(dev);
free_netdev(dev);
return 0;
}
static struct platform_driver cpmac_driver = {
.driver.name = "cpmac",
.driver.owner = THIS_MODULE,
.probe = cpmac_probe,
.driver = {
.name = "cpmac",
.owner = THIS_MODULE,
},
.probe = cpmac_probe,
.remove = cpmac_remove,
};
......@@ -1221,7 +1230,7 @@ int cpmac_init(void)
cpmac_mii->priv = ioremap(AR7_REGS_MDIO, 256);
if (!cpmac_mii->priv) {
printk(KERN_ERR "Can't ioremap mdio registers\n");
pr_err("Can't ioremap mdio registers\n");
res = -ENXIO;
goto fail_alloc;
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment