Commit 201c78e0 authored by David S. Miller's avatar David S. Miller

Merge branch 'macb-rx-packet-filtering'

Rafal Ozieblo says:

====================
Receive packets filtering for macb driver

This patch series adds support for receive packets
filtering for Cadence GEM driver. Packets can be redirect
to different hardware queues based on source IP, destination IP,
source port or destination port. To enable filtering,
support for RX queueing was added as well.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 7475908f ae8223de
...@@ -164,14 +164,38 @@ ...@@ -164,14 +164,38 @@
#define GEM_DCFG5 0x0290 /* Design Config 5 */ #define GEM_DCFG5 0x0290 /* Design Config 5 */
#define GEM_DCFG6 0x0294 /* Design Config 6 */ #define GEM_DCFG6 0x0294 /* Design Config 6 */
#define GEM_DCFG7 0x0298 /* Design Config 7 */ #define GEM_DCFG7 0x0298 /* Design Config 7 */
#define GEM_DCFG8 0x029C /* Design Config 8 */
#define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */ #define GEM_TXBDCTRL 0x04cc /* TX Buffer Descriptor control register */
#define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */ #define GEM_RXBDCTRL 0x04d0 /* RX Buffer Descriptor control register */
/* Screener Type 2 match registers */
#define GEM_SCRT2 0x540
/* EtherType registers */
#define GEM_ETHT 0x06E0
/* Type 2 compare registers */
#define GEM_T2CMPW0 0x0700
#define GEM_T2CMPW1 0x0704
#define T2CMP_OFST(t2idx) (t2idx * 2)
/* type 2 compare registers
* each location requires 3 compare regs
*/
#define GEM_IP4SRC_CMP(idx) (idx * 3)
#define GEM_IP4DST_CMP(idx) (idx * 3 + 1)
#define GEM_PORT_CMP(idx) (idx * 3 + 2)
/* Which screening type 2 EtherType register will be used (0 - 7) */
#define SCRT2_ETHT 0
#define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2)) #define GEM_ISR(hw_q) (0x0400 + ((hw_q) << 2))
#define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2)) #define GEM_TBQP(hw_q) (0x0440 + ((hw_q) << 2))
#define GEM_TBQPH(hw_q) (0x04C8) #define GEM_TBQPH(hw_q) (0x04C8)
#define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2)) #define GEM_RBQP(hw_q) (0x0480 + ((hw_q) << 2))
#define GEM_RBQS(hw_q) (0x04A0 + ((hw_q) << 2))
#define GEM_RBQPH(hw_q) (0x04D4)
#define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2)) #define GEM_IER(hw_q) (0x0600 + ((hw_q) << 2))
#define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2)) #define GEM_IDR(hw_q) (0x0620 + ((hw_q) << 2))
#define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2)) #define GEM_IMR(hw_q) (0x0640 + ((hw_q) << 2))
...@@ -455,6 +479,16 @@ ...@@ -455,6 +479,16 @@
#define GEM_DAW64_OFFSET 23 #define GEM_DAW64_OFFSET 23
#define GEM_DAW64_SIZE 1 #define GEM_DAW64_SIZE 1
/* Bitfields in DCFG8. */
#define GEM_T1SCR_OFFSET 24
#define GEM_T1SCR_SIZE 8
#define GEM_T2SCR_OFFSET 16
#define GEM_T2SCR_SIZE 8
#define GEM_SCR2ETH_OFFSET 8
#define GEM_SCR2ETH_SIZE 8
#define GEM_SCR2CMP_OFFSET 0
#define GEM_SCR2CMP_SIZE 8
/* Bitfields in TISUBN */ /* Bitfields in TISUBN */
#define GEM_SUBNSINCR_OFFSET 0 #define GEM_SUBNSINCR_OFFSET 0
#define GEM_SUBNSINCR_SIZE 16 #define GEM_SUBNSINCR_SIZE 16
...@@ -483,6 +517,66 @@ ...@@ -483,6 +517,66 @@
#define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */ #define GEM_RXTSMODE_OFFSET 4 /* RX Descriptor Timestamp Insertion mode */
#define GEM_RXTSMODE_SIZE 2 #define GEM_RXTSMODE_SIZE 2
/* Bitfields in SCRT2 */
#define GEM_QUEUE_OFFSET 0 /* Queue Number */
#define GEM_QUEUE_SIZE 4
#define GEM_VLANPR_OFFSET 4 /* VLAN Priority */
#define GEM_VLANPR_SIZE 3
#define GEM_VLANEN_OFFSET 8 /* VLAN Enable */
#define GEM_VLANEN_SIZE 1
#define GEM_ETHT2IDX_OFFSET 9 /* Index to screener type 2 EtherType register */
#define GEM_ETHT2IDX_SIZE 3
#define GEM_ETHTEN_OFFSET 12 /* EtherType Enable */
#define GEM_ETHTEN_SIZE 1
#define GEM_CMPA_OFFSET 13 /* Compare A - Index to screener type 2 Compare register */
#define GEM_CMPA_SIZE 5
#define GEM_CMPAEN_OFFSET 18 /* Compare A Enable */
#define GEM_CMPAEN_SIZE 1
#define GEM_CMPB_OFFSET 19 /* Compare B - Index to screener type 2 Compare register */
#define GEM_CMPB_SIZE 5
#define GEM_CMPBEN_OFFSET 24 /* Compare B Enable */
#define GEM_CMPBEN_SIZE 1
#define GEM_CMPC_OFFSET 25 /* Compare C - Index to screener type 2 Compare register */
#define GEM_CMPC_SIZE 5
#define GEM_CMPCEN_OFFSET 30 /* Compare C Enable */
#define GEM_CMPCEN_SIZE 1
/* Bitfields in ETHT */
#define GEM_ETHTCMP_OFFSET 0 /* EtherType compare value */
#define GEM_ETHTCMP_SIZE 16
/* Bitfields in T2CMPW0 */
#define GEM_T2CMP_OFFSET 16 /* 0xFFFF0000 compare value */
#define GEM_T2CMP_SIZE 16
#define GEM_T2MASK_OFFSET 0 /* 0x0000FFFF compare value or mask */
#define GEM_T2MASK_SIZE 16
/* Bitfields in T2CMPW1 */
#define GEM_T2DISMSK_OFFSET 9 /* disable mask */
#define GEM_T2DISMSK_SIZE 1
#define GEM_T2CMPOFST_OFFSET 7 /* compare offset */
#define GEM_T2CMPOFST_SIZE 2
#define GEM_T2OFST_OFFSET 0 /* offset value */
#define GEM_T2OFST_SIZE 7
/* Offset for screener type 2 compare values (T2CMPOFST).
* Note the offset is applied after the specified point,
* e.g. GEM_T2COMPOFST_ETYPE denotes the EtherType field, so an offset
* of 12 bytes from this would be the source IP address in an IP header
*/
#define GEM_T2COMPOFST_SOF 0
#define GEM_T2COMPOFST_ETYPE 1
#define GEM_T2COMPOFST_IPHDR 2
#define GEM_T2COMPOFST_TCPUDP 3
/* offset from EtherType to IP address */
#define ETYPE_SRCIP_OFFSET 12
#define ETYPE_DSTIP_OFFSET 16
/* offset from IP header to port */
#define IPHDR_SRCPORT_OFFSET 0
#define IPHDR_DSTPORT_OFFSET 2
/* Transmit DMA buffer descriptor Word 1 */ /* Transmit DMA buffer descriptor Word 1 */
#define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */ #define GEM_DMA_TXVALID_OFFSET 23 /* timestamp has been captured in the Buffer Descriptor */
#define GEM_DMA_TXVALID_SIZE 1 #define GEM_DMA_TXVALID_SIZE 1
...@@ -583,6 +677,8 @@ ...@@ -583,6 +677,8 @@
#define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value)) #define gem_writel(port, reg, value) (port)->macb_reg_writel((port), GEM_##reg, (value))
#define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg) #define queue_readl(queue, reg) (queue)->bp->macb_reg_readl((queue)->bp, (queue)->reg)
#define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value)) #define queue_writel(queue, reg, value) (queue)->bp->macb_reg_writel((queue)->bp, (queue)->reg, (value))
#define gem_readl_n(port, reg, idx) (port)->macb_reg_readl((port), GEM_##reg + idx * 4)
#define gem_writel_n(port, reg, idx, value) (port)->macb_reg_writel((port), GEM_##reg + idx * 4, (value))
#define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */ #define PTP_TS_BUFFER_SIZE 128 /* must be power of 2 */
...@@ -920,13 +1016,42 @@ static const struct gem_statistic gem_statistics[] = { ...@@ -920,13 +1016,42 @@ static const struct gem_statistic gem_statistics[] = {
#define GEM_STATS_LEN ARRAY_SIZE(gem_statistics) #define GEM_STATS_LEN ARRAY_SIZE(gem_statistics)
#define QUEUE_STAT_TITLE(title) { \
.stat_string = title, \
}
/* per queue statistics, each should be unsigned long type */
struct queue_stats {
union {
unsigned long first;
unsigned long rx_packets;
};
unsigned long rx_bytes;
unsigned long rx_dropped;
unsigned long tx_packets;
unsigned long tx_bytes;
unsigned long tx_dropped;
};
static const struct gem_statistic queue_statistics[] = {
QUEUE_STAT_TITLE("rx_packets"),
QUEUE_STAT_TITLE("rx_bytes"),
QUEUE_STAT_TITLE("rx_dropped"),
QUEUE_STAT_TITLE("tx_packets"),
QUEUE_STAT_TITLE("tx_bytes"),
QUEUE_STAT_TITLE("tx_dropped"),
};
#define QUEUE_STATS_LEN ARRAY_SIZE(queue_statistics)
struct macb; struct macb;
struct macb_queue;
struct macb_or_gem_ops { struct macb_or_gem_ops {
int (*mog_alloc_rx_buffers)(struct macb *bp); int (*mog_alloc_rx_buffers)(struct macb *bp);
void (*mog_free_rx_buffers)(struct macb *bp); void (*mog_free_rx_buffers)(struct macb *bp);
void (*mog_init_rings)(struct macb *bp); void (*mog_init_rings)(struct macb *bp);
int (*mog_rx)(struct macb *bp, int budget); int (*mog_rx)(struct macb_queue *queue, int budget);
}; };
/* MACB-PTP interface: adapt to platform needs. */ /* MACB-PTP interface: adapt to platform needs. */
...@@ -968,6 +1093,9 @@ struct macb_queue { ...@@ -968,6 +1093,9 @@ struct macb_queue {
unsigned int IMR; unsigned int IMR;
unsigned int TBQP; unsigned int TBQP;
unsigned int TBQPH; unsigned int TBQPH;
unsigned int RBQS;
unsigned int RBQP;
unsigned int RBQPH;
unsigned int tx_head, tx_tail; unsigned int tx_head, tx_tail;
struct macb_dma_desc *tx_ring; struct macb_dma_desc *tx_ring;
...@@ -975,6 +1103,16 @@ struct macb_queue { ...@@ -975,6 +1103,16 @@ struct macb_queue {
dma_addr_t tx_ring_dma; dma_addr_t tx_ring_dma;
struct work_struct tx_error_task; struct work_struct tx_error_task;
dma_addr_t rx_ring_dma;
dma_addr_t rx_buffers_dma;
unsigned int rx_tail;
unsigned int rx_prepared_head;
struct macb_dma_desc *rx_ring;
struct sk_buff **rx_skbuff;
void *rx_buffers;
struct napi_struct napi;
struct queue_stats stats;
#ifdef CONFIG_MACB_USE_HWSTAMP #ifdef CONFIG_MACB_USE_HWSTAMP
struct work_struct tx_ts_task; struct work_struct tx_ts_task;
unsigned int tx_ts_head, tx_ts_tail; unsigned int tx_ts_head, tx_ts_tail;
...@@ -982,6 +1120,16 @@ struct macb_queue { ...@@ -982,6 +1120,16 @@ struct macb_queue {
#endif #endif
}; };
struct ethtool_rx_fs_item {
struct ethtool_rx_flow_spec fs;
struct list_head list;
};
struct ethtool_rx_fs_list {
struct list_head list;
unsigned int count;
};
struct macb { struct macb {
void __iomem *regs; void __iomem *regs;
bool native_io; bool native_io;
...@@ -990,11 +1138,6 @@ struct macb { ...@@ -990,11 +1138,6 @@ struct macb {
u32 (*macb_reg_readl)(struct macb *bp, int offset); u32 (*macb_reg_readl)(struct macb *bp, int offset);
void (*macb_reg_writel)(struct macb *bp, int offset, u32 value); void (*macb_reg_writel)(struct macb *bp, int offset, u32 value);
unsigned int rx_tail;
unsigned int rx_prepared_head;
struct macb_dma_desc *rx_ring;
struct sk_buff **rx_skbuff;
void *rx_buffers;
size_t rx_buffer_size; size_t rx_buffer_size;
unsigned int rx_ring_size; unsigned int rx_ring_size;
...@@ -1011,15 +1154,11 @@ struct macb { ...@@ -1011,15 +1154,11 @@ struct macb {
struct clk *tx_clk; struct clk *tx_clk;
struct clk *rx_clk; struct clk *rx_clk;
struct net_device *dev; struct net_device *dev;
struct napi_struct napi;
union { union {
struct macb_stats macb; struct macb_stats macb;
struct gem_stats gem; struct gem_stats gem;
} hw_stats; } hw_stats;
dma_addr_t rx_ring_dma;
dma_addr_t rx_buffers_dma;
struct macb_or_gem_ops macbgem_ops; struct macb_or_gem_ops macbgem_ops;
struct mii_bus *mii_bus; struct mii_bus *mii_bus;
...@@ -1040,7 +1179,7 @@ struct macb { ...@@ -1040,7 +1179,7 @@ struct macb {
int skb_length; /* saved skb length for pci_unmap_single */ int skb_length; /* saved skb length for pci_unmap_single */
unsigned int max_tx_length; unsigned int max_tx_length;
u64 ethtool_stats[GEM_STATS_LEN]; u64 ethtool_stats[GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES];
unsigned int rx_frm_len_mask; unsigned int rx_frm_len_mask;
unsigned int jumbo_max_len; unsigned int jumbo_max_len;
...@@ -1057,6 +1196,11 @@ struct macb { ...@@ -1057,6 +1196,11 @@ struct macb {
struct ptp_clock_info ptp_clock_info; struct ptp_clock_info ptp_clock_info;
struct tsu_incr tsu_incr; struct tsu_incr tsu_incr;
struct hwtstamp_config tstamp_config; struct hwtstamp_config tstamp_config;
/* RX queue filer rule set*/
struct ethtool_rx_fs_list rx_fs_list;
spinlock_t rx_fs_lock;
unsigned int max_tuples;
}; };
#ifdef CONFIG_MACB_USE_HWSTAMP #ifdef CONFIG_MACB_USE_HWSTAMP
......
...@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index) ...@@ -194,17 +194,17 @@ static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
return index & (bp->rx_ring_size - 1); return index & (bp->rx_ring_size - 1);
} }
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) static struct macb_dma_desc *macb_rx_desc(struct macb_queue *queue, unsigned int index)
{ {
index = macb_rx_ring_wrap(bp, index); index = macb_rx_ring_wrap(queue->bp, index);
index = macb_adj_dma_desc_idx(bp, index); index = macb_adj_dma_desc_idx(queue->bp, index);
return &bp->rx_ring[index]; return &queue->rx_ring[index];
} }
static void *macb_rx_buffer(struct macb *bp, unsigned int index) static void *macb_rx_buffer(struct macb_queue *queue, unsigned int index)
{ {
return bp->rx_buffers + bp->rx_buffer_size * return queue->rx_buffers + queue->bp->rx_buffer_size *
macb_rx_ring_wrap(bp, index); macb_rx_ring_wrap(queue->bp, index);
} }
/* I/O accessors */ /* I/O accessors */
...@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work) ...@@ -759,7 +759,9 @@ static void macb_tx_error_task(struct work_struct *work)
macb_tx_ring_wrap(bp, tail), macb_tx_ring_wrap(bp, tail),
skb->data); skb->data);
bp->dev->stats.tx_packets++; bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len; bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
} }
} else { } else {
/* "Buffers exhausted mid-frame" errors may only happen /* "Buffers exhausted mid-frame" errors may only happen
...@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue) ...@@ -859,7 +861,9 @@ static void macb_tx_interrupt(struct macb_queue *queue)
macb_tx_ring_wrap(bp, tail), macb_tx_ring_wrap(bp, tail),
skb->data); skb->data);
bp->dev->stats.tx_packets++; bp->dev->stats.tx_packets++;
queue->stats.tx_packets++;
bp->dev->stats.tx_bytes += skb->len; bp->dev->stats.tx_bytes += skb->len;
queue->stats.tx_bytes += skb->len;
} }
/* Now we can safely release resources */ /* Now we can safely release resources */
...@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue) ...@@ -881,24 +885,25 @@ static void macb_tx_interrupt(struct macb_queue *queue)
netif_wake_subqueue(bp->dev, queue_index); netif_wake_subqueue(bp->dev, queue_index);
} }
static void gem_rx_refill(struct macb *bp) static void gem_rx_refill(struct macb_queue *queue)
{ {
unsigned int entry; unsigned int entry;
struct sk_buff *skb; struct sk_buff *skb;
dma_addr_t paddr; dma_addr_t paddr;
struct macb *bp = queue->bp;
struct macb_dma_desc *desc; struct macb_dma_desc *desc;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, while (CIRC_SPACE(queue->rx_prepared_head, queue->rx_tail,
bp->rx_ring_size) > 0) { bp->rx_ring_size) > 0) {
entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head); entry = macb_rx_ring_wrap(bp, queue->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
bp->rx_prepared_head++; queue->rx_prepared_head++;
desc = macb_rx_desc(bp, entry); desc = macb_rx_desc(queue, entry);
if (!bp->rx_skbuff[entry]) { if (!queue->rx_skbuff[entry]) {
/* allocate sk_buff for this free entry in ring */ /* allocate sk_buff for this free entry in ring */
skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size); skb = netdev_alloc_skb(bp->dev, bp->rx_buffer_size);
if (unlikely(!skb)) { if (unlikely(!skb)) {
...@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp) ...@@ -916,7 +921,7 @@ static void gem_rx_refill(struct macb *bp)
break; break;
} }
bp->rx_skbuff[entry] = skb; queue->rx_skbuff[entry] = skb;
if (entry == bp->rx_ring_size - 1) if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP); paddr |= MACB_BIT(RX_WRAP);
...@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp) ...@@ -934,18 +939,18 @@ static void gem_rx_refill(struct macb *bp)
/* Make descriptor updates visible to hardware */ /* Make descriptor updates visible to hardware */
wmb(); wmb();
netdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n", netdev_vdbg(bp->dev, "rx ring: queue: %p, prepared head %d, tail %d\n",
bp->rx_prepared_head, bp->rx_tail); queue, queue->rx_prepared_head, queue->rx_tail);
} }
/* Mark DMA descriptors from begin up to and not including end as unused */ /* Mark DMA descriptors from begin up to and not including end as unused */
static void discard_partial_frame(struct macb *bp, unsigned int begin, static void discard_partial_frame(struct macb_queue *queue, unsigned int begin,
unsigned int end) unsigned int end)
{ {
unsigned int frag; unsigned int frag;
for (frag = begin; frag != end; frag++) { for (frag = begin; frag != end; frag++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, frag); struct macb_dma_desc *desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
} }
...@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin, ...@@ -959,8 +964,9 @@ static void discard_partial_frame(struct macb *bp, unsigned int begin,
*/ */
} }
static int gem_rx(struct macb *bp, int budget) static int gem_rx(struct macb_queue *queue, int budget)
{ {
struct macb *bp = queue->bp;
unsigned int len; unsigned int len;
unsigned int entry; unsigned int entry;
struct sk_buff *skb; struct sk_buff *skb;
...@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget) ...@@ -972,8 +978,8 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr; dma_addr_t addr;
bool rxused; bool rxused;
entry = macb_rx_ring_wrap(bp, bp->rx_tail); entry = macb_rx_ring_wrap(bp, queue->rx_tail);
desc = macb_rx_desc(bp, entry); desc = macb_rx_desc(queue, entry);
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
...@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget) ...@@ -985,24 +991,26 @@ static int gem_rx(struct macb *bp, int budget)
if (!rxused) if (!rxused)
break; break;
bp->rx_tail++; queue->rx_tail++;
count++; count++;
if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) { if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
netdev_err(bp->dev, netdev_err(bp->dev,
"not whole frame pointed by descriptor\n"); "not whole frame pointed by descriptor\n");
bp->dev->stats.rx_dropped++; bp->dev->stats.rx_dropped++;
queue->stats.rx_dropped++;
break; break;
} }
skb = bp->rx_skbuff[entry]; skb = queue->rx_skbuff[entry];
if (unlikely(!skb)) { if (unlikely(!skb)) {
netdev_err(bp->dev, netdev_err(bp->dev,
"inconsistent Rx descriptor chain\n"); "inconsistent Rx descriptor chain\n");
bp->dev->stats.rx_dropped++; bp->dev->stats.rx_dropped++;
queue->stats.rx_dropped++;
break; break;
} }
/* now everything is ready for receiving packet */ /* now everything is ready for receiving packet */
bp->rx_skbuff[entry] = NULL; queue->rx_skbuff[entry] = NULL;
len = ctrl & bp->rx_frm_len_mask; len = ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len); netdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
...@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget) ...@@ -1019,7 +1027,9 @@ static int gem_rx(struct macb *bp, int budget)
skb->ip_summed = CHECKSUM_UNNECESSARY; skb->ip_summed = CHECKSUM_UNNECESSARY;
bp->dev->stats.rx_packets++; bp->dev->stats.rx_packets++;
queue->stats.rx_packets++;
bp->dev->stats.rx_bytes += skb->len; bp->dev->stats.rx_bytes += skb->len;
queue->stats.rx_bytes += skb->len;
gem_ptp_do_rxstamp(bp, skb, desc); gem_ptp_do_rxstamp(bp, skb, desc);
...@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget) ...@@ -1035,12 +1045,12 @@ static int gem_rx(struct macb *bp, int budget)
netif_receive_skb(skb); netif_receive_skb(skb);
} }
gem_rx_refill(bp); gem_rx_refill(queue);
return count; return count;
} }
static int macb_rx_frame(struct macb *bp, unsigned int first_frag, static int macb_rx_frame(struct macb_queue *queue, unsigned int first_frag,
unsigned int last_frag) unsigned int last_frag)
{ {
unsigned int len; unsigned int len;
...@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -1048,8 +1058,9 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
unsigned int offset; unsigned int offset;
struct sk_buff *skb; struct sk_buff *skb;
struct macb_dma_desc *desc; struct macb_dma_desc *desc;
struct macb *bp = queue->bp;
desc = macb_rx_desc(bp, last_frag); desc = macb_rx_desc(queue, last_frag);
len = desc->ctrl & bp->rx_frm_len_mask; len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
...@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -1068,7 +1079,7 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
if (!skb) { if (!skb) {
bp->dev->stats.rx_dropped++; bp->dev->stats.rx_dropped++;
for (frag = first_frag; ; frag++) { for (frag = first_frag; ; frag++) {
desc = macb_rx_desc(bp, frag); desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag) if (frag == last_frag)
break; break;
...@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -1096,10 +1107,10 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
frag_len = len - offset; frag_len = len - offset;
} }
skb_copy_to_linear_data_offset(skb, offset, skb_copy_to_linear_data_offset(skb, offset,
macb_rx_buffer(bp, frag), macb_rx_buffer(queue, frag),
frag_len); frag_len);
offset += bp->rx_buffer_size; offset += bp->rx_buffer_size;
desc = macb_rx_desc(bp, frag); desc = macb_rx_desc(queue, frag);
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
if (frag == last_frag) if (frag == last_frag)
...@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -1121,32 +1132,34 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
return 0; return 0;
} }
static inline void macb_init_rx_ring(struct macb *bp) static inline void macb_init_rx_ring(struct macb_queue *queue)
{ {
struct macb *bp = queue->bp;
dma_addr_t addr; dma_addr_t addr;
struct macb_dma_desc *desc = NULL; struct macb_dma_desc *desc = NULL;
int i; int i;
addr = bp->rx_buffers_dma; addr = queue->rx_buffers_dma;
for (i = 0; i < bp->rx_ring_size; i++) { for (i = 0; i < bp->rx_ring_size; i++) {
desc = macb_rx_desc(bp, i); desc = macb_rx_desc(queue, i);
macb_set_addr(bp, desc, addr); macb_set_addr(bp, desc, addr);
desc->ctrl = 0; desc->ctrl = 0;
addr += bp->rx_buffer_size; addr += bp->rx_buffer_size;
} }
desc->addr |= MACB_BIT(RX_WRAP); desc->addr |= MACB_BIT(RX_WRAP);
bp->rx_tail = 0; queue->rx_tail = 0;
} }
static int macb_rx(struct macb *bp, int budget) static int macb_rx(struct macb_queue *queue, int budget)
{ {
struct macb *bp = queue->bp;
bool reset_rx_queue = false; bool reset_rx_queue = false;
int received = 0; int received = 0;
unsigned int tail; unsigned int tail;
int first_frag = -1; int first_frag = -1;
for (tail = bp->rx_tail; budget > 0; tail++) { for (tail = queue->rx_tail; budget > 0; tail++) {
struct macb_dma_desc *desc = macb_rx_desc(bp, tail); struct macb_dma_desc *desc = macb_rx_desc(queue, tail);
u32 ctrl; u32 ctrl;
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
...@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget) ...@@ -1159,7 +1172,7 @@ static int macb_rx(struct macb *bp, int budget)
if (ctrl & MACB_BIT(RX_SOF)) { if (ctrl & MACB_BIT(RX_SOF)) {
if (first_frag != -1) if (first_frag != -1)
discard_partial_frame(bp, first_frag, tail); discard_partial_frame(queue, first_frag, tail);
first_frag = tail; first_frag = tail;
} }
...@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget) ...@@ -1171,7 +1184,7 @@ static int macb_rx(struct macb *bp, int budget)
continue; continue;
} }
dropped = macb_rx_frame(bp, first_frag, tail); dropped = macb_rx_frame(queue, first_frag, tail);
first_frag = -1; first_frag = -1;
if (unlikely(dropped < 0)) { if (unlikely(dropped < 0)) {
reset_rx_queue = true; reset_rx_queue = true;
...@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget) ...@@ -1195,8 +1208,8 @@ static int macb_rx(struct macb *bp, int budget)
ctrl = macb_readl(bp, NCR); ctrl = macb_readl(bp, NCR);
macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE)); macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
macb_init_rx_ring(bp); macb_init_rx_ring(queue);
macb_writel(bp, RBQP, bp->rx_ring_dma); queue_writel(queue, RBQP, queue->rx_ring_dma);
macb_writel(bp, NCR, ctrl | MACB_BIT(RE)); macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
...@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget) ...@@ -1205,16 +1218,17 @@ static int macb_rx(struct macb *bp, int budget)
} }
if (first_frag != -1) if (first_frag != -1)
bp->rx_tail = first_frag; queue->rx_tail = first_frag;
else else
bp->rx_tail = tail; queue->rx_tail = tail;
return received; return received;
} }
static int macb_poll(struct napi_struct *napi, int budget) static int macb_poll(struct napi_struct *napi, int budget)
{ {
struct macb *bp = container_of(napi, struct macb, napi); struct macb_queue *queue = container_of(napi, struct macb_queue, napi);
struct macb *bp = queue->bp;
int work_done; int work_done;
u32 status; u32 status;
...@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget) ...@@ -1224,7 +1238,7 @@ static int macb_poll(struct napi_struct *napi, int budget)
netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n", netdev_vdbg(bp->dev, "poll: status = %08lx, budget = %d\n",
(unsigned long)status, budget); (unsigned long)status, budget);
work_done = bp->macbgem_ops.mog_rx(bp, budget); work_done = bp->macbgem_ops.mog_rx(queue, budget);
if (work_done < budget) { if (work_done < budget) {
napi_complete_done(napi, work_done); napi_complete_done(napi, work_done);
...@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget) ...@@ -1232,10 +1246,10 @@ static int macb_poll(struct napi_struct *napi, int budget)
status = macb_readl(bp, RSR); status = macb_readl(bp, RSR);
if (status) { if (status) {
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
macb_writel(bp, ISR, MACB_BIT(RCOMP)); queue_writel(queue, ISR, MACB_BIT(RCOMP));
napi_reschedule(napi); napi_reschedule(napi);
} else { } else {
macb_writel(bp, IER, MACB_RX_INT_FLAGS); queue_writel(queue, IER, MACB_RX_INT_FLAGS);
} }
} }
...@@ -1282,9 +1296,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id) ...@@ -1282,9 +1296,9 @@ static irqreturn_t macb_interrupt(int irq, void *dev_id)
if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE) if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
queue_writel(queue, ISR, MACB_BIT(RCOMP)); queue_writel(queue, ISR, MACB_BIT(RCOMP));
if (napi_schedule_prep(&bp->napi)) { if (napi_schedule_prep(&queue->napi)) {
netdev_vdbg(bp->dev, "scheduling RX softirq\n"); netdev_vdbg(bp->dev, "scheduling RX softirq\n");
__napi_schedule(&bp->napi); __napi_schedule(&queue->napi);
} }
} }
...@@ -1708,38 +1722,44 @@ static void gem_free_rx_buffers(struct macb *bp) ...@@ -1708,38 +1722,44 @@ static void gem_free_rx_buffers(struct macb *bp)
{ {
struct sk_buff *skb; struct sk_buff *skb;
struct macb_dma_desc *desc; struct macb_dma_desc *desc;
struct macb_queue *queue;
dma_addr_t addr; dma_addr_t addr;
unsigned int q;
int i; int i;
if (!bp->rx_skbuff) for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
return; if (!queue->rx_skbuff)
continue;
for (i = 0; i < bp->rx_ring_size; i++) { for (i = 0; i < bp->rx_ring_size; i++) {
skb = bp->rx_skbuff[i]; skb = queue->rx_skbuff[i];
if (!skb) if (!skb)
continue; continue;
desc = macb_rx_desc(bp, i); desc = macb_rx_desc(queue, i);
addr = macb_get_addr(bp, desc); addr = macb_get_addr(bp, desc);
dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size, dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
dev_kfree_skb_any(skb); dev_kfree_skb_any(skb);
skb = NULL; skb = NULL;
} }
kfree(bp->rx_skbuff); kfree(queue->rx_skbuff);
bp->rx_skbuff = NULL; queue->rx_skbuff = NULL;
}
} }
static void macb_free_rx_buffers(struct macb *bp) static void macb_free_rx_buffers(struct macb *bp)
{ {
if (bp->rx_buffers) { struct macb_queue *queue = &bp->queues[0];
if (queue->rx_buffers) {
dma_free_coherent(&bp->pdev->dev, dma_free_coherent(&bp->pdev->dev,
bp->rx_ring_size * bp->rx_buffer_size, bp->rx_ring_size * bp->rx_buffer_size,
bp->rx_buffers, bp->rx_buffers_dma); queue->rx_buffers, queue->rx_buffers_dma);
bp->rx_buffers = NULL; queue->rx_buffers = NULL;
} }
} }
...@@ -1748,11 +1768,12 @@ static void macb_free_consistent(struct macb *bp) ...@@ -1748,11 +1768,12 @@ static void macb_free_consistent(struct macb *bp)
struct macb_queue *queue; struct macb_queue *queue;
unsigned int q; unsigned int q;
queue = &bp->queues[0];
bp->macbgem_ops.mog_free_rx_buffers(bp); bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) { if (queue->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp), dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
bp->rx_ring, bp->rx_ring_dma); queue->rx_ring, queue->rx_ring_dma);
bp->rx_ring = NULL; queue->rx_ring = NULL;
} }
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
...@@ -1768,32 +1789,37 @@ static void macb_free_consistent(struct macb *bp) ...@@ -1768,32 +1789,37 @@ static void macb_free_consistent(struct macb *bp)
static int gem_alloc_rx_buffers(struct macb *bp) static int gem_alloc_rx_buffers(struct macb *bp)
{ {
struct macb_queue *queue;
unsigned int q;
int size; int size;
size = bp->rx_ring_size * sizeof(struct sk_buff *); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
bp->rx_skbuff = kzalloc(size, GFP_KERNEL); size = bp->rx_ring_size * sizeof(struct sk_buff *);
if (!bp->rx_skbuff) queue->rx_skbuff = kzalloc(size, GFP_KERNEL);
return -ENOMEM; if (!queue->rx_skbuff)
else return -ENOMEM;
netdev_dbg(bp->dev, else
"Allocated %d RX struct sk_buff entries at %p\n", netdev_dbg(bp->dev,
bp->rx_ring_size, bp->rx_skbuff); "Allocated %d RX struct sk_buff entries at %p\n",
bp->rx_ring_size, queue->rx_skbuff);
}
return 0; return 0;
} }
static int macb_alloc_rx_buffers(struct macb *bp) static int macb_alloc_rx_buffers(struct macb *bp)
{ {
struct macb_queue *queue = &bp->queues[0];
int size; int size;
size = bp->rx_ring_size * bp->rx_buffer_size; size = bp->rx_ring_size * bp->rx_buffer_size;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, queue->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL); &queue->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers) if (!queue->rx_buffers)
return -ENOMEM; return -ENOMEM;
netdev_dbg(bp->dev, netdev_dbg(bp->dev,
"Allocated RX buffers of %d bytes at %08lx (mapped %p)\n", "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers); size, (unsigned long)queue->rx_buffers_dma, queue->rx_buffers);
return 0; return 0;
} }
...@@ -1819,17 +1845,16 @@ static int macb_alloc_consistent(struct macb *bp) ...@@ -1819,17 +1845,16 @@ static int macb_alloc_consistent(struct macb *bp)
queue->tx_skb = kmalloc(size, GFP_KERNEL); queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb) if (!queue->tx_skb)
goto out_err; goto out_err;
}
size = RX_RING_BYTES(bp);
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring)
goto out_err;
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
size = RX_RING_BYTES(bp);
queue->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->rx_ring_dma, GFP_KERNEL);
if (!queue->rx_ring)
goto out_err;
netdev_dbg(bp->dev,
"Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
size, (unsigned long)queue->rx_ring_dma, queue->rx_ring);
}
if (bp->macbgem_ops.mog_alloc_rx_buffers(bp)) if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
goto out_err; goto out_err;
...@@ -1856,12 +1881,13 @@ static void gem_init_rings(struct macb *bp) ...@@ -1856,12 +1881,13 @@ static void gem_init_rings(struct macb *bp)
desc->ctrl |= MACB_BIT(TX_WRAP); desc->ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0; queue->tx_head = 0;
queue->tx_tail = 0; queue->tx_tail = 0;
}
bp->rx_tail = 0; queue->rx_tail = 0;
bp->rx_prepared_head = 0; queue->rx_prepared_head = 0;
gem_rx_refill(queue);
}
gem_rx_refill(bp);
} }
static void macb_init_rings(struct macb *bp) static void macb_init_rings(struct macb *bp)
...@@ -1869,7 +1895,7 @@ static void macb_init_rings(struct macb *bp) ...@@ -1869,7 +1895,7 @@ static void macb_init_rings(struct macb *bp)
int i; int i;
struct macb_dma_desc *desc = NULL; struct macb_dma_desc *desc = NULL;
macb_init_rx_ring(bp); macb_init_rx_ring(&bp->queues[0]);
for (i = 0; i < bp->tx_ring_size; i++) { for (i = 0; i < bp->tx_ring_size; i++) {
desc = macb_tx_desc(&bp->queues[0], i); desc = macb_tx_desc(&bp->queues[0], i);
...@@ -1978,11 +2004,20 @@ static u32 macb_dbw(struct macb *bp) ...@@ -1978,11 +2004,20 @@ static u32 macb_dbw(struct macb *bp)
*/ */
static void macb_configure_dma(struct macb *bp) static void macb_configure_dma(struct macb *bp)
{ {
struct macb_queue *queue;
u32 buffer_size;
unsigned int q;
u32 dmacfg; u32 dmacfg;
buffer_size = bp->rx_buffer_size / RX_BUFFER_MULTIPLE;
if (macb_is_gem(bp)) { if (macb_is_gem(bp)) {
dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L); dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
if (q)
queue_writel(queue, RBQS, buffer_size);
else
dmacfg |= GEM_BF(RXBS, buffer_size);
}
if (bp->dma_burst_length) if (bp->dma_burst_length)
dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg); dmacfg = GEM_BFINS(FBLDO, bp->dma_burst_length, dmacfg);
dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L); dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
...@@ -2051,12 +2086,12 @@ static void macb_init_hw(struct macb *bp) ...@@ -2051,12 +2086,12 @@ static void macb_init_hw(struct macb *bp)
macb_configure_dma(bp); macb_configure_dma(bp);
/* Initialize TX and RX buffers */ /* Initialize TX and RX buffers */
macb_writel(bp, RBQP, lower_32_bits(bp->rx_ring_dma)); for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, RBQP, lower_32_bits(queue->rx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) if (bp->hw_dma_cap & HW_DMA_CAP_64B)
macb_writel(bp, RBQPH, upper_32_bits(bp->rx_ring_dma)); queue_writel(queue, RBQPH, upper_32_bits(queue->rx_ring_dma));
#endif #endif
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma)); queue_writel(queue, TBQP, lower_32_bits(queue->tx_ring_dma));
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) if (bp->hw_dma_cap & HW_DMA_CAP_64B)
...@@ -2197,6 +2232,8 @@ static int macb_open(struct net_device *dev) ...@@ -2197,6 +2232,8 @@ static int macb_open(struct net_device *dev)
{ {
struct macb *bp = netdev_priv(dev); struct macb *bp = netdev_priv(dev);
size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN; size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
struct macb_queue *queue;
unsigned int q;
int err; int err;
netdev_dbg(bp->dev, "open\n"); netdev_dbg(bp->dev, "open\n");
...@@ -2218,11 +2255,12 @@ static int macb_open(struct net_device *dev) ...@@ -2218,11 +2255,12 @@ static int macb_open(struct net_device *dev)
return err; return err;
} }
napi_enable(&bp->napi);
bp->macbgem_ops.mog_init_rings(bp); bp->macbgem_ops.mog_init_rings(bp);
macb_init_hw(bp); macb_init_hw(bp);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_enable(&queue->napi);
/* schedule a link state check */ /* schedule a link state check */
phy_start(dev->phydev); phy_start(dev->phydev);
...@@ -2237,10 +2275,14 @@ static int macb_open(struct net_device *dev) ...@@ -2237,10 +2275,14 @@ static int macb_open(struct net_device *dev)
static int macb_close(struct net_device *dev) static int macb_close(struct net_device *dev)
{ {
struct macb *bp = netdev_priv(dev); struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned long flags; unsigned long flags;
unsigned int q;
netif_tx_stop_all_queues(dev); netif_tx_stop_all_queues(dev);
napi_disable(&bp->napi);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
napi_disable(&queue->napi);
if (dev->phydev) if (dev->phydev)
phy_stop(dev->phydev); phy_stop(dev->phydev);
...@@ -2270,7 +2312,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu) ...@@ -2270,7 +2312,10 @@ static int macb_change_mtu(struct net_device *dev, int new_mtu)
static void gem_update_stats(struct macb *bp) static void gem_update_stats(struct macb *bp)
{ {
unsigned int i; struct macb_queue *queue;
unsigned int i, q, idx;
unsigned long *stat;
u32 *p = &bp->hw_stats.gem.tx_octets_31_0; u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
for (i = 0; i < GEM_STATS_LEN; ++i, ++p) { for (i = 0; i < GEM_STATS_LEN; ++i, ++p) {
...@@ -2287,6 +2332,11 @@ static void gem_update_stats(struct macb *bp) ...@@ -2287,6 +2332,11 @@ static void gem_update_stats(struct macb *bp)
*(++p) += val; *(++p) += val;
} }
} }
idx = GEM_STATS_LEN;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue)
for (i = 0, stat = &queue->stats.first; i < QUEUE_STATS_LEN; ++i, ++stat)
bp->ethtool_stats[idx++] = *stat;
} }
static struct net_device_stats *gem_get_stats(struct macb *bp) static struct net_device_stats *gem_get_stats(struct macb *bp)
...@@ -2334,14 +2384,17 @@ static void gem_get_ethtool_stats(struct net_device *dev, ...@@ -2334,14 +2384,17 @@ static void gem_get_ethtool_stats(struct net_device *dev,
bp = netdev_priv(dev); bp = netdev_priv(dev);
gem_update_stats(bp); gem_update_stats(bp);
memcpy(data, &bp->ethtool_stats, sizeof(u64) * GEM_STATS_LEN); memcpy(data, &bp->ethtool_stats, sizeof(u64)
* (GEM_STATS_LEN + QUEUE_STATS_LEN * MACB_MAX_QUEUES));
} }
static int gem_get_sset_count(struct net_device *dev, int sset) static int gem_get_sset_count(struct net_device *dev, int sset)
{ {
struct macb *bp = netdev_priv(dev);
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
return GEM_STATS_LEN; return GEM_STATS_LEN + bp->num_queues * QUEUE_STATS_LEN;
default: default:
return -EOPNOTSUPP; return -EOPNOTSUPP;
} }
...@@ -2349,13 +2402,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset) ...@@ -2349,13 +2402,25 @@ static int gem_get_sset_count(struct net_device *dev, int sset)
static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p) static void gem_get_ethtool_strings(struct net_device *dev, u32 sset, u8 *p)
{ {
char stat_string[ETH_GSTRING_LEN];
struct macb *bp = netdev_priv(dev);
struct macb_queue *queue;
unsigned int i; unsigned int i;
unsigned int q;
switch (sset) { switch (sset) {
case ETH_SS_STATS: case ETH_SS_STATS:
for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN) for (i = 0; i < GEM_STATS_LEN; i++, p += ETH_GSTRING_LEN)
memcpy(p, gem_statistics[i].stat_string, memcpy(p, gem_statistics[i].stat_string,
ETH_GSTRING_LEN); ETH_GSTRING_LEN);
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < QUEUE_STATS_LEN; i++, p += ETH_GSTRING_LEN) {
snprintf(stat_string, ETH_GSTRING_LEN, "q%d_%s",
q, queue_statistics[i].stat_string);
memcpy(p, stat_string, ETH_GSTRING_LEN);
}
}
break; break;
} }
} }
...@@ -2603,6 +2668,308 @@ static int macb_get_ts_info(struct net_device *netdev, ...@@ -2603,6 +2668,308 @@ static int macb_get_ts_info(struct net_device *netdev,
return ethtool_op_get_ts_info(netdev, info); return ethtool_op_get_ts_info(netdev, info);
} }
static void gem_enable_flow_filters(struct macb *bp, bool enable)
{
struct ethtool_rx_fs_item *item;
u32 t2_scr;
int num_t2_scr;
num_t2_scr = GEM_BFEXT(T2SCR, gem_readl(bp, DCFG8));
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
struct ethtool_rx_flow_spec *fs = &item->fs;
struct ethtool_tcpip4_spec *tp4sp_m;
if (fs->location >= num_t2_scr)
continue;
t2_scr = gem_readl_n(bp, SCRT2, fs->location);
/* enable/disable screener regs for the flow entry */
t2_scr = GEM_BFINS(ETHTEN, enable, t2_scr);
/* only enable fields with no masking */
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
if (enable && (tp4sp_m->ip4src == 0xFFFFFFFF))
t2_scr = GEM_BFINS(CMPAEN, 1, t2_scr);
else
t2_scr = GEM_BFINS(CMPAEN, 0, t2_scr);
if (enable && (tp4sp_m->ip4dst == 0xFFFFFFFF))
t2_scr = GEM_BFINS(CMPBEN, 1, t2_scr);
else
t2_scr = GEM_BFINS(CMPBEN, 0, t2_scr);
if (enable && ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)))
t2_scr = GEM_BFINS(CMPCEN, 1, t2_scr);
else
t2_scr = GEM_BFINS(CMPCEN, 0, t2_scr);
gem_writel_n(bp, SCRT2, fs->location, t2_scr);
}
}
static void gem_prog_cmp_regs(struct macb *bp, struct ethtool_rx_flow_spec *fs)
{
struct ethtool_tcpip4_spec *tp4sp_v, *tp4sp_m;
uint16_t index = fs->location;
u32 w0, w1, t2_scr;
bool cmp_a = false;
bool cmp_b = false;
bool cmp_c = false;
tp4sp_v = &(fs->h_u.tcp_ip4_spec);
tp4sp_m = &(fs->m_u.tcp_ip4_spec);
/* ignore field if any masking set */
if (tp4sp_m->ip4src == 0xFFFFFFFF) {
/* 1st compare reg - IP source address */
w0 = 0;
w1 = 0;
w0 = tp4sp_v->ip4src;
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
w1 = GEM_BFINS(T2OFST, ETYPE_SRCIP_OFFSET, w1);
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w0);
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4SRC_CMP(index)), w1);
cmp_a = true;
}
/* ignore field if any masking set */
if (tp4sp_m->ip4dst == 0xFFFFFFFF) {
/* 2nd compare reg - IP destination address */
w0 = 0;
w1 = 0;
w0 = tp4sp_v->ip4dst;
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_ETYPE, w1);
w1 = GEM_BFINS(T2OFST, ETYPE_DSTIP_OFFSET, w1);
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_IP4DST_CMP(index)), w0);
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_IP4DST_CMP(index)), w1);
cmp_b = true;
}
/* ignore both port fields if masking set in both */
if ((tp4sp_m->psrc == 0xFFFF) || (tp4sp_m->pdst == 0xFFFF)) {
/* 3rd compare reg - source port, destination port */
w0 = 0;
w1 = 0;
w1 = GEM_BFINS(T2CMPOFST, GEM_T2COMPOFST_IPHDR, w1);
if (tp4sp_m->psrc == tp4sp_m->pdst) {
w0 = GEM_BFINS(T2MASK, tp4sp_v->psrc, w0);
w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
w1 = GEM_BFINS(T2DISMSK, 1, w1); /* 32-bit compare */
w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
} else {
/* only one port definition */
w1 = GEM_BFINS(T2DISMSK, 0, w1); /* 16-bit compare */
w0 = GEM_BFINS(T2MASK, 0xFFFF, w0);
if (tp4sp_m->psrc == 0xFFFF) { /* src port */
w0 = GEM_BFINS(T2CMP, tp4sp_v->psrc, w0);
w1 = GEM_BFINS(T2OFST, IPHDR_SRCPORT_OFFSET, w1);
} else { /* dst port */
w0 = GEM_BFINS(T2CMP, tp4sp_v->pdst, w0);
w1 = GEM_BFINS(T2OFST, IPHDR_DSTPORT_OFFSET, w1);
}
}
gem_writel_n(bp, T2CMPW0, T2CMP_OFST(GEM_PORT_CMP(index)), w0);
gem_writel_n(bp, T2CMPW1, T2CMP_OFST(GEM_PORT_CMP(index)), w1);
cmp_c = true;
}
t2_scr = 0;
t2_scr = GEM_BFINS(QUEUE, (fs->ring_cookie) & 0xFF, t2_scr);
t2_scr = GEM_BFINS(ETHT2IDX, SCRT2_ETHT, t2_scr);
if (cmp_a)
t2_scr = GEM_BFINS(CMPA, GEM_IP4SRC_CMP(index), t2_scr);
if (cmp_b)
t2_scr = GEM_BFINS(CMPB, GEM_IP4DST_CMP(index), t2_scr);
if (cmp_c)
t2_scr = GEM_BFINS(CMPC, GEM_PORT_CMP(index), t2_scr);
gem_writel_n(bp, SCRT2, index, t2_scr);
}
static int gem_add_flow_filter(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_flow_spec *fs = &cmd->fs;
struct ethtool_rx_fs_item *item, *newfs;
int ret = -EINVAL;
bool added = false;
newfs = kmalloc(sizeof(*newfs), GFP_KERNEL);
if (newfs == NULL)
return -ENOMEM;
memcpy(&newfs->fs, fs, sizeof(newfs->fs));
netdev_dbg(netdev,
"Adding flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
htons(fs->h_u.tcp_ip4_spec.psrc), htons(fs->h_u.tcp_ip4_spec.pdst));
/* find correct place to add in list */
if (list_empty(&bp->rx_fs_list.list))
list_add(&newfs->list, &bp->rx_fs_list.list);
else {
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location > newfs->fs.location) {
list_add_tail(&newfs->list, &item->list);
added = true;
break;
} else if (item->fs.location == fs->location) {
netdev_err(netdev, "Rule not added: location %d not free!\n",
fs->location);
ret = -EBUSY;
goto err;
}
}
if (!added)
list_add_tail(&newfs->list, &bp->rx_fs_list.list);
}
gem_prog_cmp_regs(bp, fs);
bp->rx_fs_list.count++;
/* enable filtering if NTUPLE on */
if (netdev->features & NETIF_F_NTUPLE)
gem_enable_flow_filters(bp, 1);
return 0;
err:
kfree(newfs);
return ret;
}
static int gem_del_flow_filter(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_fs_item *item;
struct ethtool_rx_flow_spec *fs;
if (list_empty(&bp->rx_fs_list.list))
return -EINVAL;
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location == cmd->fs.location) {
/* disable screener regs for the flow entry */
fs = &(item->fs);
netdev_dbg(netdev,
"Deleting flow filter entry,type=%u,queue=%u,loc=%u,src=%08X,dst=%08X,ps=%u,pd=%u\n",
fs->flow_type, (int)fs->ring_cookie, fs->location,
htonl(fs->h_u.tcp_ip4_spec.ip4src),
htonl(fs->h_u.tcp_ip4_spec.ip4dst),
htons(fs->h_u.tcp_ip4_spec.psrc),
htons(fs->h_u.tcp_ip4_spec.pdst));
gem_writel_n(bp, SCRT2, fs->location, 0);
list_del(&item->list);
kfree(item);
bp->rx_fs_list.count--;
return 0;
}
}
return -EINVAL;
}
static int gem_get_flow_entry(struct net_device *netdev,
struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_fs_item *item;
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (item->fs.location == cmd->fs.location) {
memcpy(&cmd->fs, &item->fs, sizeof(cmd->fs));
return 0;
}
}
return -EINVAL;
}
static int gem_get_all_flow_entries(struct net_device *netdev,
struct ethtool_rxnfc *cmd, u32 *rule_locs)
{
struct macb *bp = netdev_priv(netdev);
struct ethtool_rx_fs_item *item;
uint32_t cnt = 0;
list_for_each_entry(item, &bp->rx_fs_list.list, list) {
if (cnt == cmd->rule_cnt)
return -EMSGSIZE;
rule_locs[cnt] = item->fs.location;
cnt++;
}
cmd->data = bp->max_tuples;
cmd->rule_cnt = cnt;
return 0;
}
static int gem_get_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd,
u32 *rule_locs)
{
struct macb *bp = netdev_priv(netdev);
int ret = 0;
switch (cmd->cmd) {
case ETHTOOL_GRXRINGS:
cmd->data = bp->num_queues;
break;
case ETHTOOL_GRXCLSRLCNT:
cmd->rule_cnt = bp->rx_fs_list.count;
break;
case ETHTOOL_GRXCLSRULE:
ret = gem_get_flow_entry(netdev, cmd);
break;
case ETHTOOL_GRXCLSRLALL:
ret = gem_get_all_flow_entries(netdev, cmd, rule_locs);
break;
default:
netdev_err(netdev,
"Command parameter %d is not supported\n", cmd->cmd);
ret = -EOPNOTSUPP;
}
return ret;
}
static int gem_set_rxnfc(struct net_device *netdev, struct ethtool_rxnfc *cmd)
{
struct macb *bp = netdev_priv(netdev);
unsigned long flags;
int ret;
spin_lock_irqsave(&bp->rx_fs_lock, flags);
switch (cmd->cmd) {
case ETHTOOL_SRXCLSRLINS:
if ((cmd->fs.location >= bp->max_tuples)
|| (cmd->fs.ring_cookie >= bp->num_queues)) {
ret = -EINVAL;
break;
}
ret = gem_add_flow_filter(netdev, cmd);
break;
case ETHTOOL_SRXCLSRLDEL:
ret = gem_del_flow_filter(netdev, cmd);
break;
default:
netdev_err(netdev,
"Command parameter %d is not supported\n", cmd->cmd);
ret = -EOPNOTSUPP;
}
spin_unlock_irqrestore(&bp->rx_fs_lock, flags);
return ret;
}
static const struct ethtool_ops macb_ethtool_ops = { static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len, .get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs, .get_regs = macb_get_regs,
...@@ -2628,6 +2995,8 @@ static const struct ethtool_ops gem_ethtool_ops = { ...@@ -2628,6 +2995,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam, .get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam, .set_ringparam = macb_set_ringparam,
.get_rxnfc = gem_get_rxnfc,
.set_rxnfc = gem_set_rxnfc,
}; };
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
...@@ -2685,6 +3054,12 @@ static int macb_set_features(struct net_device *netdev, ...@@ -2685,6 +3054,12 @@ static int macb_set_features(struct net_device *netdev,
gem_writel(bp, NCFGR, netcfg); gem_writel(bp, NCFGR, netcfg);
} }
/* RX Flow Filters */
if ((changed & NETIF_F_NTUPLE) && macb_is_gem(bp)) {
bool turn_on = features & NETIF_F_NTUPLE;
gem_enable_flow_filters(bp, turn_on);
}
return 0; return 0;
} }
...@@ -2850,7 +3225,7 @@ static int macb_init(struct platform_device *pdev) ...@@ -2850,7 +3225,7 @@ static int macb_init(struct platform_device *pdev)
struct macb *bp = netdev_priv(dev); struct macb *bp = netdev_priv(dev);
struct macb_queue *queue; struct macb_queue *queue;
int err; int err;
u32 val; u32 val, reg;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE; bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE; bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
...@@ -2865,15 +3240,20 @@ static int macb_init(struct platform_device *pdev) ...@@ -2865,15 +3240,20 @@ static int macb_init(struct platform_device *pdev)
queue = &bp->queues[q]; queue = &bp->queues[q];
queue->bp = bp; queue->bp = bp;
netif_napi_add(dev, &queue->napi, macb_poll, 64);
if (hw_q) { if (hw_q) {
queue->ISR = GEM_ISR(hw_q - 1); queue->ISR = GEM_ISR(hw_q - 1);
queue->IER = GEM_IER(hw_q - 1); queue->IER = GEM_IER(hw_q - 1);
queue->IDR = GEM_IDR(hw_q - 1); queue->IDR = GEM_IDR(hw_q - 1);
queue->IMR = GEM_IMR(hw_q - 1); queue->IMR = GEM_IMR(hw_q - 1);
queue->TBQP = GEM_TBQP(hw_q - 1); queue->TBQP = GEM_TBQP(hw_q - 1);
queue->RBQP = GEM_RBQP(hw_q - 1);
queue->RBQS = GEM_RBQS(hw_q - 1);
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = GEM_TBQPH(hw_q - 1); queue->TBQPH = GEM_TBQPH(hw_q - 1);
queue->RBQPH = GEM_RBQPH(hw_q - 1);
}
#endif #endif
} else { } else {
/* queue0 uses legacy registers */ /* queue0 uses legacy registers */
...@@ -2882,9 +3262,12 @@ static int macb_init(struct platform_device *pdev) ...@@ -2882,9 +3262,12 @@ static int macb_init(struct platform_device *pdev)
queue->IDR = MACB_IDR; queue->IDR = MACB_IDR;
queue->IMR = MACB_IMR; queue->IMR = MACB_IMR;
queue->TBQP = MACB_TBQP; queue->TBQP = MACB_TBQP;
queue->RBQP = MACB_RBQP;
#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
if (bp->hw_dma_cap & HW_DMA_CAP_64B) if (bp->hw_dma_cap & HW_DMA_CAP_64B) {
queue->TBQPH = MACB_TBQPH; queue->TBQPH = MACB_TBQPH;
queue->RBQPH = MACB_RBQPH;
}
#endif #endif
} }
...@@ -2908,7 +3291,6 @@ static int macb_init(struct platform_device *pdev) ...@@ -2908,7 +3291,6 @@ static int macb_init(struct platform_device *pdev)
} }
dev->netdev_ops = &macb_netdev_ops; dev->netdev_ops = &macb_netdev_ops;
netif_napi_add(dev, &bp->napi, macb_poll, 64);
/* setup appropriated routines according to adapter type */ /* setup appropriated routines according to adapter type */
if (macb_is_gem(bp)) { if (macb_is_gem(bp)) {
...@@ -2941,6 +3323,30 @@ static int macb_init(struct platform_device *pdev) ...@@ -2941,6 +3323,30 @@ static int macb_init(struct platform_device *pdev)
dev->hw_features &= ~NETIF_F_SG; dev->hw_features &= ~NETIF_F_SG;
dev->features = dev->hw_features; dev->features = dev->hw_features;
/* Check RX Flow Filters support.
* Max Rx flows set by availability of screeners & compare regs:
* each 4-tuple define requires 1 T2 screener reg + 3 compare regs
*/
reg = gem_readl(bp, DCFG8);
bp->max_tuples = min((GEM_BFEXT(SCR2CMP, reg) / 3),
GEM_BFEXT(T2SCR, reg));
if (bp->max_tuples > 0) {
/* also needs one ethtype match to check IPv4 */
if (GEM_BFEXT(SCR2ETH, reg) > 0) {
/* program this reg now */
reg = 0;
reg = GEM_BFINS(ETHTCMP, (uint16_t)ETH_P_IP, reg);
gem_writel_n(bp, ETHT, SCRT2_ETHT, reg);
/* Filtering is supported in hw but don't enable it in kernel now */
dev->hw_features |= NETIF_F_NTUPLE;
/* init Rx flow definitions */
INIT_LIST_HEAD(&bp->rx_fs_list.list);
bp->rx_fs_list.count = 0;
spin_lock_init(&bp->rx_fs_lock);
} else
bp->max_tuples = 0;
}
if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) { if (!(bp->caps & MACB_CAPS_USRIO_DISABLED)) {
val = 0; val = 0;
if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII) if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
...@@ -2977,34 +3383,35 @@ static int macb_init(struct platform_device *pdev) ...@@ -2977,34 +3383,35 @@ static int macb_init(struct platform_device *pdev)
static int at91ether_start(struct net_device *dev) static int at91ether_start(struct net_device *dev)
{ {
struct macb *lp = netdev_priv(dev); struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc; struct macb_dma_desc *desc;
dma_addr_t addr; dma_addr_t addr;
u32 ctl; u32 ctl;
int i; int i;
lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev, q->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
(AT91ETHER_MAX_RX_DESCR * (AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp)), macb_dma_desc_get_size(lp)),
&lp->rx_ring_dma, GFP_KERNEL); &q->rx_ring_dma, GFP_KERNEL);
if (!lp->rx_ring) if (!q->rx_ring)
return -ENOMEM; return -ENOMEM;
lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev, q->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RX_DESCR *
AT91ETHER_MAX_RBUFF_SZ, AT91ETHER_MAX_RBUFF_SZ,
&lp->rx_buffers_dma, GFP_KERNEL); &q->rx_buffers_dma, GFP_KERNEL);
if (!lp->rx_buffers) { if (!q->rx_buffers) {
dma_free_coherent(&lp->pdev->dev, dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp), macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma); q->rx_ring, q->rx_ring_dma);
lp->rx_ring = NULL; q->rx_ring = NULL;
return -ENOMEM; return -ENOMEM;
} }
addr = lp->rx_buffers_dma; addr = q->rx_buffers_dma;
for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) { for (i = 0; i < AT91ETHER_MAX_RX_DESCR; i++) {
desc = macb_rx_desc(lp, i); desc = macb_rx_desc(q, i);
macb_set_addr(lp, desc, addr); macb_set_addr(lp, desc, addr);
desc->ctrl = 0; desc->ctrl = 0;
addr += AT91ETHER_MAX_RBUFF_SZ; addr += AT91ETHER_MAX_RBUFF_SZ;
...@@ -3014,10 +3421,10 @@ static int at91ether_start(struct net_device *dev) ...@@ -3014,10 +3421,10 @@ static int at91ether_start(struct net_device *dev)
desc->addr |= MACB_BIT(RX_WRAP); desc->addr |= MACB_BIT(RX_WRAP);
/* Reset buffer index */ /* Reset buffer index */
lp->rx_tail = 0; q->rx_tail = 0;
/* Program address of descriptor list in Rx Buffer Queue register */ /* Program address of descriptor list in Rx Buffer Queue register */
macb_writel(lp, RBQP, lp->rx_ring_dma); macb_writel(lp, RBQP, q->rx_ring_dma);
/* Enable Receive and Transmit */ /* Enable Receive and Transmit */
ctl = macb_readl(lp, NCR); ctl = macb_readl(lp, NCR);
...@@ -3064,6 +3471,7 @@ static int at91ether_open(struct net_device *dev) ...@@ -3064,6 +3471,7 @@ static int at91ether_open(struct net_device *dev)
static int at91ether_close(struct net_device *dev) static int at91ether_close(struct net_device *dev)
{ {
struct macb *lp = netdev_priv(dev); struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
u32 ctl; u32 ctl;
/* Disable Receiver and Transmitter */ /* Disable Receiver and Transmitter */
...@@ -3084,13 +3492,13 @@ static int at91ether_close(struct net_device *dev) ...@@ -3084,13 +3492,13 @@ static int at91ether_close(struct net_device *dev)
dma_free_coherent(&lp->pdev->dev, dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RX_DESCR *
macb_dma_desc_get_size(lp), macb_dma_desc_get_size(lp),
lp->rx_ring, lp->rx_ring_dma); q->rx_ring, q->rx_ring_dma);
lp->rx_ring = NULL; q->rx_ring = NULL;
dma_free_coherent(&lp->pdev->dev, dma_free_coherent(&lp->pdev->dev,
AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ, AT91ETHER_MAX_RX_DESCR * AT91ETHER_MAX_RBUFF_SZ,
lp->rx_buffers, lp->rx_buffers_dma); q->rx_buffers, q->rx_buffers_dma);
lp->rx_buffers = NULL; q->rx_buffers = NULL;
return 0; return 0;
} }
...@@ -3134,14 +3542,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -3134,14 +3542,15 @@ static int at91ether_start_xmit(struct sk_buff *skb, struct net_device *dev)
static void at91ether_rx(struct net_device *dev) static void at91ether_rx(struct net_device *dev)
{ {
struct macb *lp = netdev_priv(dev); struct macb *lp = netdev_priv(dev);
struct macb_queue *q = &lp->queues[0];
struct macb_dma_desc *desc; struct macb_dma_desc *desc;
unsigned char *p_recv; unsigned char *p_recv;
struct sk_buff *skb; struct sk_buff *skb;
unsigned int pktlen; unsigned int pktlen;
desc = macb_rx_desc(lp, lp->rx_tail); desc = macb_rx_desc(q, q->rx_tail);
while (desc->addr & MACB_BIT(RX_USED)) { while (desc->addr & MACB_BIT(RX_USED)) {
p_recv = lp->rx_buffers + lp->rx_tail * AT91ETHER_MAX_RBUFF_SZ; p_recv = q->rx_buffers + q->rx_tail * AT91ETHER_MAX_RBUFF_SZ;
pktlen = MACB_BF(RX_FRMLEN, desc->ctrl); pktlen = MACB_BF(RX_FRMLEN, desc->ctrl);
skb = netdev_alloc_skb(dev, pktlen + 2); skb = netdev_alloc_skb(dev, pktlen + 2);
if (skb) { if (skb) {
...@@ -3163,12 +3572,12 @@ static void at91ether_rx(struct net_device *dev) ...@@ -3163,12 +3572,12 @@ static void at91ether_rx(struct net_device *dev)
desc->addr &= ~MACB_BIT(RX_USED); desc->addr &= ~MACB_BIT(RX_USED);
/* wrap after last buffer */ /* wrap after last buffer */
if (lp->rx_tail == AT91ETHER_MAX_RX_DESCR - 1) if (q->rx_tail == AT91ETHER_MAX_RX_DESCR - 1)
lp->rx_tail = 0; q->rx_tail = 0;
else else
lp->rx_tail++; q->rx_tail++;
desc = macb_rx_desc(lp, lp->rx_tail); desc = macb_rx_desc(q, q->rx_tail);
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment