Commit 64ec42fe authored by Moritz Fischer's avatar Moritz Fischer Committed by David S. Miller

net: macb: Fix coding style warnings

This commit takes care of the coding style warnings
that are mostly due to a different comment style and
lines over 80 chars, as well as a dangling else.
Acked-by: default avatarNicolas Ferre <nicolas.ferre@atmel.com>
Signed-off-by: default avatarMoritz Fischer <moritz.fischer@ettus.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 96ec6310
...@@ -61,8 +61,7 @@ ...@@ -61,8 +61,7 @@
#define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0) #define MACB_WOL_HAS_MAGIC_PACKET (0x1 << 0)
#define MACB_WOL_ENABLED (0x1 << 1) #define MACB_WOL_ENABLED (0x1 << 1)
/* /* Graceful stop timeouts in us. We should allow up to
* Graceful stop timeouts in us. We should allow up to
* 1 frame time (10 Mbits/s, full-duplex, ignoring collisions) * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
*/ */
#define MACB_HALT_TIMEOUT 1230 #define MACB_HALT_TIMEOUT 1230
...@@ -130,8 +129,7 @@ static void hw_writel(struct macb *bp, int offset, u32 value) ...@@ -130,8 +129,7 @@ static void hw_writel(struct macb *bp, int offset, u32 value)
writel_relaxed(value, bp->regs + offset); writel_relaxed(value, bp->regs + offset);
} }
/* /* Find the CPU endianness by using the loopback bit of NCR register. When the
* Find the CPU endianness by using the loopback bit of NCR register. When the
* CPU is in big endian we need to program swaped mode for management * CPU is in big endian we need to program swaped mode for management
* descriptor access. * descriptor access.
*/ */
...@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev) ...@@ -386,7 +384,8 @@ static int macb_mii_probe(struct net_device *dev)
pdata = dev_get_platdata(&bp->pdev->dev); pdata = dev_get_platdata(&bp->pdev->dev);
if (pdata && gpio_is_valid(pdata->phy_irq_pin)) { if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int"); ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin,
"phy int");
if (!ret) { if (!ret) {
phy_irq = gpio_to_irq(pdata->phy_irq_pin); phy_irq = gpio_to_irq(pdata->phy_irq_pin);
phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq; phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
...@@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp) ...@@ -452,7 +451,8 @@ static int macb_mii_init(struct macb *bp)
err = of_mdiobus_register(bp->mii_bus, np); err = of_mdiobus_register(bp->mii_bus, np);
/* fallback to standard phy registration if no phy were /* fallback to standard phy registration if no phy were
found during dt phy registration */ * found during dt phy registration
*/
if (!err && !phy_find_first(bp->mii_bus)) { if (!err && !phy_find_first(bp->mii_bus)) {
for (i = 0; i < PHY_MAX_ADDR; i++) { for (i = 0; i < PHY_MAX_ADDR; i++) {
struct phy_device *phydev; struct phy_device *phydev;
...@@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work) ...@@ -567,8 +567,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Make sure nobody is trying to queue up new packets */ /* Make sure nobody is trying to queue up new packets */
netif_tx_stop_all_queues(bp->dev); netif_tx_stop_all_queues(bp->dev);
/* /* Stop transmission now
* Stop transmission now
* (in case we have just queued new packets) * (in case we have just queued new packets)
* macb/gem must be halted to write TBQP register * macb/gem must be halted to write TBQP register
*/ */
...@@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work) ...@@ -576,8 +575,7 @@ static void macb_tx_error_task(struct work_struct *work)
/* Just complain for now, reinitializing TX path can be good */ /* Just complain for now, reinitializing TX path can be good */
netdev_err(bp->dev, "BUG: halt tx timed out\n"); netdev_err(bp->dev, "BUG: halt tx timed out\n");
/* /* Treat frames in TX queue including the ones that caused the error.
* Treat frames in TX queue including the ones that caused the error.
* Free transmit buffers in upper layer. * Free transmit buffers in upper layer.
*/ */
for (tail = queue->tx_tail; tail != queue->tx_head; tail++) { for (tail = queue->tx_tail; tail != queue->tx_head; tail++) {
...@@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work) ...@@ -607,10 +605,9 @@ static void macb_tx_error_task(struct work_struct *work)
bp->stats.tx_bytes += skb->len; bp->stats.tx_bytes += skb->len;
} }
} else { } else {
/* /* "Buffers exhausted mid-frame" errors may only happen
* "Buffers exhausted mid-frame" errors may only happen * if the driver is buggy, so complain loudly about
* if the driver is buggy, so complain loudly about those. * those. Statistics are updated by hardware.
* Statistics are updated by hardware.
*/ */
if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED)) if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
netdev_err(bp->dev, netdev_err(bp->dev,
...@@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp) ...@@ -722,7 +719,8 @@ static void gem_rx_refill(struct macb *bp)
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t paddr; dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) { while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
RX_RING_SIZE) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head); entry = macb_rx_ring_wrap(bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
...@@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp) ...@@ -741,7 +739,8 @@ static void gem_rx_refill(struct macb *bp)
/* now fill corresponding descriptor entry */ /* now fill corresponding descriptor entry */
paddr = dma_map_single(&bp->pdev->dev, skb->data, paddr = dma_map_single(&bp->pdev->dev, skb->data,
bp->rx_buffer_size, DMA_FROM_DEVICE); bp->rx_buffer_size,
DMA_FROM_DEVICE);
if (dma_mapping_error(&bp->pdev->dev, paddr)) { if (dma_mapping_error(&bp->pdev->dev, paddr)) {
dev_kfree_skb(skb); dev_kfree_skb(skb);
break; break;
...@@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, ...@@ -777,14 +776,14 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
for (frag = begin; frag != end; frag++) { for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag); struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
} }
/* Make descriptor updates visible to hardware */ /* Make descriptor updates visible to hardware */
wmb(); wmb();
/* /* When this happens, the hardware stats registers for
* When this happens, the hardware stats registers for
* whatever caused this is updated, so we don't have to record * whatever caused this is updated, so we don't have to record
* anything. * anything.
*/ */
...@@ -883,8 +882,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -883,8 +882,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
macb_rx_ring_wrap(first_frag), macb_rx_ring_wrap(first_frag),
macb_rx_ring_wrap(last_frag), len); macb_rx_ring_wrap(last_frag), len);
/* /* The ethernet header starts NET_IP_ALIGN bytes into the
* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the * first buffer. Since the header is 14 bytes, this makes the
* payload word-aligned. * payload word-aligned.
* *
...@@ -1099,8 +1097,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -1099,8 +1097,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
(unsigned long)status); (unsigned long)status);
if (status & MACB_RX_INT_FLAGS) { if (status & MACB_RX_INT_FLAGS) {
/* /* There's no point taking any more interrupts
* There's no point taking any more interrupts
* until we have processed the buffers. The * until we have processed the buffers. The
* scheduling call may fail if the poll routine * scheduling call may fail if the poll routine
* is already scheduled, so disable interrupts * is already scheduled, so disable interrupts
...@@ -1129,8 +1126,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -1129,8 +1126,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (status & MACB_BIT(TCOMP)) if (status & MACB_BIT(TCOMP))
macb_tx_interrupt(queue); macb_tx_interrupt(queue);
/* /* Link change detection isn't possible with RMII, so we'll
* Link change detection isn't possible with RMII, so we'll
* add that if/when we get our hands on a full-blown MII PHY. * add that if/when we get our hands on a full-blown MII PHY.
*/ */
...@@ -1161,8 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -1161,8 +1157,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
} }
if (status & MACB_BIT(HRESP)) { if (status & MACB_BIT(HRESP)) {
/* /* TODO: Reset the hardware, and maybe move the
* TODO: Reset the hardware, and maybe move the
* netdev_err to a lower-priority context as well * netdev_err to a lower-priority context as well
* (work queue?) * (work queue?)
*/ */
...@@ -1181,8 +1176,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -1181,8 +1176,7 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
} }
#ifdef CONFIG_NET_POLL_CONTROLLER #ifdef CONFIG_NET_POLL_CONTROLLER
/* /* Polling receive - used by netconsole and other diagnostic tools
* Polling receive - used by netconsole and other diagnostic tools
* to allow network i/o with interrupts disabled. * to allow network i/o with interrupts disabled.
*/ */
static void macb_poll_controller(struct net_device *dev) static void macb_poll_controller(struct net_device *dev)
...@@ -1478,10 +1472,10 @@ static int gem_alloc_rx_buffers(struct macb *bp) ...@@ -1478,10 +1472,10 @@ static int gem_alloc_rx_buffers(struct macb *bp)
bp->rx_skbuff = kzalloc(size, GFP_KERNEL); bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff) if (!bp->rx_skbuff)
return -ENOMEM; return -ENOMEM;
else
netdev_dbg(bp->dev, netdev_dbg(bp->dev,
"Allocated %d RX struct sk_buff entries at %p\n", "Allocated %d RX struct sk_buff entries at %p\n",
RX_RING_SIZE, bp->rx_skbuff); RX_RING_SIZE, bp->rx_skbuff);
return 0; return 0;
} }
...@@ -1494,10 +1488,10 @@ static int macb_alloc_rx_buffers(struct macb *bp) ...@@ -1494,10 +1488,10 @@ static int macb_alloc_rx_buffers(struct macb *bp)
&bp->rx_buffers_dma, GFP_KERNEL); &bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers) if (!bp->rx_buffers)
return -ENOMEM; return -ENOMEM;
else
netdev_dbg(bp->dev, netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
return 0; return 0;
} }
...@@ -1588,8 +1582,7 @@ static void macb_reset_hw(struct macb *bp) ...@@ -1588,8 +1582,7 @@ static void macb_reset_hw(struct macb *bp)
struct macb_queue *queue; struct macb_queue *queue;
unsigned int q; unsigned int q;
/* /* Disable RX and TX (XXX: Should we halt the transmission
* Disable RX and TX (XXX: Should we halt the transmission
* more gracefully?) * more gracefully?)
*/ */
macb_writel(bp, NCR, 0); macb_writel(bp, NCR, 0);
...@@ -1652,8 +1645,7 @@ static u32 macb_mdc_clk_div(struct macb *bp) ...@@ -1652,8 +1645,7 @@ static u32 macb_mdc_clk_div(struct macb *bp)
return config; return config;
} }
/* /* Get the DMA bus width field of the network configuration register that we
* Get the DMA bus width field of the network configuration register that we
* should program. We find the width from decoding the design configuration * should program. We find the width from decoding the design configuration
* register to find the maximum supported data bus width. * register to find the maximum supported data bus width.
*/ */
...@@ -1673,8 +1665,7 @@ static u32 macb_dbw(struct macb *bp) ...@@ -1673,8 +1665,7 @@ static u32 macb_dbw(struct macb *bp)
} }
} }
/* /* Configure the receive DMA engine
* Configure the receive DMA engine
* - use the correct receive buffer size * - use the correct receive buffer size
* - set best burst length for DMA operations * - set best burst length for DMA operations
* (if not supported by FIFO, it will fallback to default) * (if not supported by FIFO, it will fallback to default)
...@@ -1762,8 +1753,7 @@ static void macb_init_hw(struct macb *bp) ...@@ -1762,8 +1753,7 @@ static void macb_init_hw(struct macb *bp)
macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE)); macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
} }
/* /* The hash address register is 64 bits long and takes up two
* The hash address register is 64 bits long and takes up two
* locations in the memory map. The least significant bits are stored * locations in the memory map. The least significant bits are stored
* in EMAC_HSL and the most significant bits in EMAC_HSH. * in EMAC_HSL and the most significant bits in EMAC_HSH.
* *
...@@ -1803,9 +1793,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr) ...@@ -1803,9 +1793,7 @@ static inline int hash_bit_value(int bitnr, __u8 *addr)
return 0; return 0;
} }
/* /* Return the hash index value for the specified address. */
* Return the hash index value for the specified address.
*/
static int hash_get_index(__u8 *addr) static int hash_get_index(__u8 *addr)
{ {
int i, j, bitval; int i, j, bitval;
...@@ -1821,9 +1809,7 @@ static int hash_get_index(__u8 *addr) ...@@ -1821,9 +1809,7 @@ static int hash_get_index(__u8 *addr)
return hash_index; return hash_index;
} }
/* /* Add multicast addresses to the internal multicast-hash table. */
* Add multicast addresses to the internal multicast-hash table.
*/
static void macb_sethashtable(struct net_device *dev) static void macb_sethashtable(struct net_device *dev)
{ {
struct netdev_hw_addr *ha; struct netdev_hw_addr *ha;
...@@ -1842,9 +1828,7 @@ static void macb_sethashtable(struct net_device *dev) ...@@ -1842,9 +1828,7 @@ static void macb_sethashtable(struct net_device *dev)
macb_or_gem_writel(bp, HRT, mc_filter[1]); macb_or_gem_writel(bp, HRT, mc_filter[1]);
} }
/* /* Enable/Disable promiscuous and multicast modes. */
* Enable/Disable promiscuous and multicast modes.
*/
static void macb_set_rx_mode(struct net_device *dev) static void macb_set_rx_mode(struct net_device *dev)
{ {
unsigned long cfg; unsigned long cfg;
...@@ -2161,9 +2145,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, ...@@ -2161,9 +2145,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) if (!(bp->caps & MACB_CAPS_USRIO_DISABLED))
regs_buff[12] = macb_or_gem_readl(bp, USRIO); regs_buff[12] = macb_or_gem_readl(bp, USRIO);
if (macb_is_gem(bp)) { if (macb_is_gem(bp))
regs_buff[13] = gem_readl(bp, DMACFG); regs_buff[13] = gem_readl(bp, DMACFG);
}
} }
static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) static void macb_get_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
...@@ -2286,11 +2269,11 @@ static const struct net_device_ops macb_netdev_ops = { ...@@ -2286,11 +2269,11 @@ static const struct net_device_ops macb_netdev_ops = {
.ndo_set_features = macb_set_features, .ndo_set_features = macb_set_features,
}; };
/* /* Configure peripheral capabilities according to device tree
* Configure peripheral capabilities according to device tree
* and integration options used * and integration options used
*/ */
static void macb_configure_caps(struct macb *bp, const struct macb_config *dt_conf) static void macb_configure_caps(struct macb *bp,
const struct macb_config *dt_conf)
{ {
u32 dcfg; u32 dcfg;
...@@ -2996,6 +2979,7 @@ static int macb_probe(struct platform_device *pdev) ...@@ -2996,6 +2979,7 @@ static int macb_probe(struct platform_device *pdev)
phy_node = of_get_next_available_child(np, NULL); phy_node = of_get_next_available_child(np, NULL);
if (phy_node) { if (phy_node) {
int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0); int gpio = of_get_named_gpio(phy_node, "reset-gpios", 0);
if (gpio_is_valid(gpio)) { if (gpio_is_valid(gpio)) {
bp->reset_gpio = gpio_to_desc(gpio); bp->reset_gpio = gpio_to_desc(gpio);
gpiod_direction_output(bp->reset_gpio, 1); gpiod_direction_output(bp->reset_gpio, 1);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment