Commit 7b1536ef authored by David S. Miller's avatar David S. Miller

Merge branch 'macb-ethtool-ringparam'

Zach Brown says:

====================
macb: Add ethtool get_ringparam and set_ringparam to cadence

There are use cases like RT that would benefit from being able to tune the
macb rx/tx ring sizes. The ethtool set_ringparam function is the standard way
of doing so.

The first patch changes the hardcoded tx/rx ring sizes to variables that are
set to a hardcoded default.

The second patch implements the get_ringparam and set_ringparam fucntions.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents 0f6e8761 8441bb33
...@@ -37,14 +37,21 @@ ...@@ -37,14 +37,21 @@
#define MACB_RX_BUFFER_SIZE 128 #define MACB_RX_BUFFER_SIZE 128
#define RX_BUFFER_MULTIPLE 64 /* bytes */ #define RX_BUFFER_MULTIPLE 64 /* bytes */
#define RX_RING_SIZE 512 /* must be power of 2 */
#define RX_RING_BYTES (sizeof(struct macb_dma_desc) * RX_RING_SIZE)
#define TX_RING_SIZE 128 /* must be power of 2 */ #define DEFAULT_RX_RING_SIZE 512 /* must be power of 2 */
#define TX_RING_BYTES (sizeof(struct macb_dma_desc) * TX_RING_SIZE) #define MIN_RX_RING_SIZE 64
#define MAX_RX_RING_SIZE 8192
#define RX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
* (bp)->rx_ring_size)
#define DEFAULT_TX_RING_SIZE 512 /* must be power of 2 */
#define MIN_TX_RING_SIZE 64
#define MAX_TX_RING_SIZE 4096
#define TX_RING_BYTES(bp) (sizeof(struct macb_dma_desc) \
* (bp)->tx_ring_size)
/* level of occupied TX descriptors under which we wake up TX process */ /* level of occupied TX descriptors under which we wake up TX process */
#define MACB_TX_WAKEUP_THRESH (3 * TX_RING_SIZE / 4) #define MACB_TX_WAKEUP_THRESH(bp) (3 * (bp)->tx_ring_size / 4)
#define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \ #define MACB_RX_INT_FLAGS (MACB_BIT(RCOMP) | MACB_BIT(RXUBR) \
| MACB_BIT(ISR_ROVR)) | MACB_BIT(ISR_ROVR))
...@@ -67,45 +74,47 @@ ...@@ -67,45 +74,47 @@
#define MACB_HALT_TIMEOUT 1230 #define MACB_HALT_TIMEOUT 1230
/* Ring buffer accessors */ /* Ring buffer accessors */
static unsigned int macb_tx_ring_wrap(unsigned int index) static unsigned int macb_tx_ring_wrap(struct macb *bp, unsigned int index)
{ {
return index & (TX_RING_SIZE - 1); return index & (bp->tx_ring_size - 1);
} }
static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue, static struct macb_dma_desc *macb_tx_desc(struct macb_queue *queue,
unsigned int index) unsigned int index)
{ {
return &queue->tx_ring[macb_tx_ring_wrap(index)]; return &queue->tx_ring[macb_tx_ring_wrap(queue->bp, index)];
} }
static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue, static struct macb_tx_skb *macb_tx_skb(struct macb_queue *queue,
unsigned int index) unsigned int index)
{ {
return &queue->tx_skb[macb_tx_ring_wrap(index)]; return &queue->tx_skb[macb_tx_ring_wrap(queue->bp, index)];
} }
static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index) static dma_addr_t macb_tx_dma(struct macb_queue *queue, unsigned int index)
{ {
dma_addr_t offset; dma_addr_t offset;
offset = macb_tx_ring_wrap(index) * sizeof(struct macb_dma_desc); offset = macb_tx_ring_wrap(queue->bp, index) *
sizeof(struct macb_dma_desc);
return queue->tx_ring_dma + offset; return queue->tx_ring_dma + offset;
} }
static unsigned int macb_rx_ring_wrap(unsigned int index) static unsigned int macb_rx_ring_wrap(struct macb *bp, unsigned int index)
{ {
return index & (RX_RING_SIZE - 1); return index & (bp->rx_ring_size - 1);
} }
static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index) static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
{ {
return &bp->rx_ring[macb_rx_ring_wrap(index)]; return &bp->rx_ring[macb_rx_ring_wrap(bp, index)];
} }
static void *macb_rx_buffer(struct macb *bp, unsigned int index) static void *macb_rx_buffer(struct macb *bp, unsigned int index)
{ {
return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index); return bp->rx_buffers + bp->rx_buffer_size *
macb_rx_ring_wrap(bp, index);
} }
/* I/O accessors */ /* I/O accessors */
...@@ -608,7 +617,8 @@ static void macb_tx_error_task(struct work_struct *work) ...@@ -608,7 +617,8 @@ static void macb_tx_error_task(struct work_struct *work)
*/ */
if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) { if (!(ctrl & MACB_BIT(TX_BUF_EXHAUSTED))) {
netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n", netdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(tail), skb->data); macb_tx_ring_wrap(bp, tail),
skb->data);
bp->stats.tx_packets++; bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len; bp->stats.tx_bytes += skb->len;
} }
...@@ -700,7 +710,8 @@ static void macb_tx_interrupt(struct macb_queue *queue) ...@@ -700,7 +710,8 @@ static void macb_tx_interrupt(struct macb_queue *queue)
/* First, update TX stats if needed */ /* First, update TX stats if needed */
if (skb) { if (skb) {
netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n", netdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
macb_tx_ring_wrap(tail), skb->data); macb_tx_ring_wrap(bp, tail),
skb->data);
bp->stats.tx_packets++; bp->stats.tx_packets++;
bp->stats.tx_bytes += skb->len; bp->stats.tx_bytes += skb->len;
} }
...@@ -720,7 +731,7 @@ static void macb_tx_interrupt(struct macb_queue *queue) ...@@ -720,7 +731,7 @@ static void macb_tx_interrupt(struct macb_queue *queue)
queue->tx_tail = tail; queue->tx_tail = tail;
if (__netif_subqueue_stopped(bp->dev, queue_index) && if (__netif_subqueue_stopped(bp->dev, queue_index) &&
CIRC_CNT(queue->tx_head, queue->tx_tail, CIRC_CNT(queue->tx_head, queue->tx_tail,
TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH) bp->tx_ring_size) <= MACB_TX_WAKEUP_THRESH(bp))
netif_wake_subqueue(bp->dev, queue_index); netif_wake_subqueue(bp->dev, queue_index);
} }
...@@ -731,8 +742,8 @@ static void gem_rx_refill(struct macb *bp) ...@@ -731,8 +742,8 @@ static void gem_rx_refill(struct macb *bp)
dma_addr_t paddr; dma_addr_t paddr;
while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail,
RX_RING_SIZE) > 0) { bp->rx_ring_size) > 0) {
entry = macb_rx_ring_wrap(bp->rx_prepared_head); entry = macb_rx_ring_wrap(bp, bp->rx_prepared_head);
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
rmb(); rmb();
...@@ -759,7 +770,7 @@ static void gem_rx_refill(struct macb *bp) ...@@ -759,7 +770,7 @@ static void gem_rx_refill(struct macb *bp)
bp->rx_skbuff[entry] = skb; bp->rx_skbuff[entry] = skb;
if (entry == RX_RING_SIZE - 1) if (entry == bp->rx_ring_size - 1)
paddr |= MACB_BIT(RX_WRAP); paddr |= MACB_BIT(RX_WRAP);
macb_set_addr(&(bp->rx_ring[entry]), paddr); macb_set_addr(&(bp->rx_ring[entry]), paddr);
bp->rx_ring[entry].ctrl = 0; bp->rx_ring[entry].ctrl = 0;
...@@ -813,7 +824,7 @@ static int gem_rx(struct macb *bp, int budget) ...@@ -813,7 +824,7 @@ static int gem_rx(struct macb *bp, int budget)
dma_addr_t addr; dma_addr_t addr;
bool rxused; bool rxused;
entry = macb_rx_ring_wrap(bp->rx_tail); entry = macb_rx_ring_wrap(bp, bp->rx_tail);
desc = &bp->rx_ring[entry]; desc = &bp->rx_ring[entry];
/* Make hw descriptor updates visible to CPU */ /* Make hw descriptor updates visible to CPU */
...@@ -895,8 +906,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag, ...@@ -895,8 +906,8 @@ static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
len = desc->ctrl & bp->rx_frm_len_mask; len = desc->ctrl & bp->rx_frm_len_mask;
netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n", netdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
macb_rx_ring_wrap(first_frag), macb_rx_ring_wrap(bp, first_frag),
macb_rx_ring_wrap(last_frag), len); macb_rx_ring_wrap(bp, last_frag), len);
/* The ethernet header starts NET_IP_ALIGN bytes into the /* The ethernet header starts NET_IP_ALIGN bytes into the
* first buffer. Since the header is 14 bytes, this makes the * first buffer. Since the header is 14 bytes, this makes the
...@@ -969,12 +980,12 @@ static inline void macb_init_rx_ring(struct macb *bp) ...@@ -969,12 +980,12 @@ static inline void macb_init_rx_ring(struct macb *bp)
int i; int i;
addr = bp->rx_buffers_dma; addr = bp->rx_buffers_dma;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < bp->rx_ring_size; i++) {
bp->rx_ring[i].addr = addr; bp->rx_ring[i].addr = addr;
bp->rx_ring[i].ctrl = 0; bp->rx_ring[i].ctrl = 0;
addr += bp->rx_buffer_size; addr += bp->rx_buffer_size;
} }
bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP); bp->rx_ring[bp->rx_ring_size - 1].addr |= MACB_BIT(RX_WRAP);
} }
static int macb_rx(struct macb *bp, int budget) static int macb_rx(struct macb *bp, int budget)
...@@ -1228,7 +1239,7 @@ static unsigned int macb_tx_map(struct macb *bp, ...@@ -1228,7 +1239,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0; offset = 0;
while (len) { while (len) {
size = min(len, bp->max_tx_length); size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head); entry = macb_tx_ring_wrap(bp, tx_head);
tx_skb = &queue->tx_skb[entry]; tx_skb = &queue->tx_skb[entry];
mapping = dma_map_single(&bp->pdev->dev, mapping = dma_map_single(&bp->pdev->dev,
...@@ -1257,7 +1268,7 @@ static unsigned int macb_tx_map(struct macb *bp, ...@@ -1257,7 +1268,7 @@ static unsigned int macb_tx_map(struct macb *bp,
offset = 0; offset = 0;
while (len) { while (len) {
size = min(len, bp->max_tx_length); size = min(len, bp->max_tx_length);
entry = macb_tx_ring_wrap(tx_head); entry = macb_tx_ring_wrap(bp, tx_head);
tx_skb = &queue->tx_skb[entry]; tx_skb = &queue->tx_skb[entry];
mapping = skb_frag_dma_map(&bp->pdev->dev, frag, mapping = skb_frag_dma_map(&bp->pdev->dev, frag,
...@@ -1295,14 +1306,14 @@ static unsigned int macb_tx_map(struct macb *bp, ...@@ -1295,14 +1306,14 @@ static unsigned int macb_tx_map(struct macb *bp,
* to set the end of TX queue * to set the end of TX queue
*/ */
i = tx_head; i = tx_head;
entry = macb_tx_ring_wrap(i); entry = macb_tx_ring_wrap(bp, i);
ctrl = MACB_BIT(TX_USED); ctrl = MACB_BIT(TX_USED);
desc = &queue->tx_ring[entry]; desc = &queue->tx_ring[entry];
desc->ctrl = ctrl; desc->ctrl = ctrl;
do { do {
i--; i--;
entry = macb_tx_ring_wrap(i); entry = macb_tx_ring_wrap(bp, i);
tx_skb = &queue->tx_skb[entry]; tx_skb = &queue->tx_skb[entry];
desc = &queue->tx_ring[entry]; desc = &queue->tx_ring[entry];
...@@ -1311,7 +1322,7 @@ static unsigned int macb_tx_map(struct macb *bp, ...@@ -1311,7 +1322,7 @@ static unsigned int macb_tx_map(struct macb *bp,
ctrl |= MACB_BIT(TX_LAST); ctrl |= MACB_BIT(TX_LAST);
eof = 0; eof = 0;
} }
if (unlikely(entry == (TX_RING_SIZE - 1))) if (unlikely(entry == (bp->tx_ring_size - 1)))
ctrl |= MACB_BIT(TX_WRAP); ctrl |= MACB_BIT(TX_WRAP);
/* Set TX buffer descriptor */ /* Set TX buffer descriptor */
...@@ -1388,7 +1399,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1388,7 +1399,8 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
spin_lock_irqsave(&bp->lock, flags); spin_lock_irqsave(&bp->lock, flags);
/* This is a hard error, log it. */ /* This is a hard error, log it. */
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < count) { if (CIRC_SPACE(queue->tx_head, queue->tx_tail,
bp->tx_ring_size) < count) {
netif_stop_subqueue(dev, queue_index); netif_stop_subqueue(dev, queue_index);
spin_unlock_irqrestore(&bp->lock, flags); spin_unlock_irqrestore(&bp->lock, flags);
netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n", netdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
...@@ -1414,7 +1426,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev) ...@@ -1414,7 +1426,7 @@ static int macb_start_xmit(struct sk_buff *skb, struct net_device *dev)
macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART)); macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
if (CIRC_SPACE(queue->tx_head, queue->tx_tail, TX_RING_SIZE) < 1) if (CIRC_SPACE(queue->tx_head, queue->tx_tail, bp->tx_ring_size) < 1)
netif_stop_subqueue(dev, queue_index); netif_stop_subqueue(dev, queue_index);
unlock: unlock:
...@@ -1453,7 +1465,7 @@ static void gem_free_rx_buffers(struct macb *bp) ...@@ -1453,7 +1465,7 @@ static void gem_free_rx_buffers(struct macb *bp)
if (!bp->rx_skbuff) if (!bp->rx_skbuff)
return; return;
for (i = 0; i < RX_RING_SIZE; i++) { for (i = 0; i < bp->rx_ring_size; i++) {
skb = bp->rx_skbuff[i]; skb = bp->rx_skbuff[i];
if (!skb) if (!skb)
...@@ -1478,7 +1490,7 @@ static void macb_free_rx_buffers(struct macb *bp) ...@@ -1478,7 +1490,7 @@ static void macb_free_rx_buffers(struct macb *bp)
{ {
if (bp->rx_buffers) { if (bp->rx_buffers) {
dma_free_coherent(&bp->pdev->dev, dma_free_coherent(&bp->pdev->dev,
RX_RING_SIZE * bp->rx_buffer_size, bp->rx_ring_size * bp->rx_buffer_size,
bp->rx_buffers, bp->rx_buffers_dma); bp->rx_buffers, bp->rx_buffers_dma);
bp->rx_buffers = NULL; bp->rx_buffers = NULL;
} }
...@@ -1491,7 +1503,7 @@ static void macb_free_consistent(struct macb *bp) ...@@ -1491,7 +1503,7 @@ static void macb_free_consistent(struct macb *bp)
bp->macbgem_ops.mog_free_rx_buffers(bp); bp->macbgem_ops.mog_free_rx_buffers(bp);
if (bp->rx_ring) { if (bp->rx_ring) {
dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES, dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES(bp),
bp->rx_ring, bp->rx_ring_dma); bp->rx_ring, bp->rx_ring_dma);
bp->rx_ring = NULL; bp->rx_ring = NULL;
} }
...@@ -1500,7 +1512,7 @@ static void macb_free_consistent(struct macb *bp) ...@@ -1500,7 +1512,7 @@ static void macb_free_consistent(struct macb *bp)
kfree(queue->tx_skb); kfree(queue->tx_skb);
queue->tx_skb = NULL; queue->tx_skb = NULL;
if (queue->tx_ring) { if (queue->tx_ring) {
dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES, dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES(bp),
queue->tx_ring, queue->tx_ring_dma); queue->tx_ring, queue->tx_ring_dma);
queue->tx_ring = NULL; queue->tx_ring = NULL;
} }
...@@ -1511,14 +1523,14 @@ static int gem_alloc_rx_buffers(struct macb *bp) ...@@ -1511,14 +1523,14 @@ static int gem_alloc_rx_buffers(struct macb *bp)
{ {
int size; int size;
size = RX_RING_SIZE * sizeof(struct sk_buff *); size = bp->rx_ring_size * sizeof(struct sk_buff *);
bp->rx_skbuff = kzalloc(size, GFP_KERNEL); bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
if (!bp->rx_skbuff) if (!bp->rx_skbuff)
return -ENOMEM; return -ENOMEM;
else
netdev_dbg(bp->dev, netdev_dbg(bp->dev,
"Allocated %d RX struct sk_buff entries at %p\n", "Allocated %d RX struct sk_buff entries at %p\n",
RX_RING_SIZE, bp->rx_skbuff); bp->rx_ring_size, bp->rx_skbuff);
return 0; return 0;
} }
...@@ -1526,7 +1538,7 @@ static int macb_alloc_rx_buffers(struct macb *bp) ...@@ -1526,7 +1538,7 @@ static int macb_alloc_rx_buffers(struct macb *bp)
{ {
int size; int size;
size = RX_RING_SIZE * bp->rx_buffer_size; size = bp->rx_ring_size * bp->rx_buffer_size;
bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size, bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_buffers_dma, GFP_KERNEL); &bp->rx_buffers_dma, GFP_KERNEL);
if (!bp->rx_buffers) if (!bp->rx_buffers)
...@@ -1545,7 +1557,7 @@ static int macb_alloc_consistent(struct macb *bp) ...@@ -1545,7 +1557,7 @@ static int macb_alloc_consistent(struct macb *bp)
int size; int size;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
size = TX_RING_BYTES; size = TX_RING_BYTES(bp);
queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size, queue->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&queue->tx_ring_dma, &queue->tx_ring_dma,
GFP_KERNEL); GFP_KERNEL);
...@@ -1556,13 +1568,13 @@ static int macb_alloc_consistent(struct macb *bp) ...@@ -1556,13 +1568,13 @@ static int macb_alloc_consistent(struct macb *bp)
q, size, (unsigned long)queue->tx_ring_dma, q, size, (unsigned long)queue->tx_ring_dma,
queue->tx_ring); queue->tx_ring);
size = TX_RING_SIZE * sizeof(struct macb_tx_skb); size = bp->tx_ring_size * sizeof(struct macb_tx_skb);
queue->tx_skb = kmalloc(size, GFP_KERNEL); queue->tx_skb = kmalloc(size, GFP_KERNEL);
if (!queue->tx_skb) if (!queue->tx_skb)
goto out_err; goto out_err;
} }
size = RX_RING_BYTES; size = RX_RING_BYTES(bp);
bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size, bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
&bp->rx_ring_dma, GFP_KERNEL); &bp->rx_ring_dma, GFP_KERNEL);
if (!bp->rx_ring) if (!bp->rx_ring)
...@@ -1588,11 +1600,11 @@ static void gem_init_rings(struct macb *bp) ...@@ -1588,11 +1600,11 @@ static void gem_init_rings(struct macb *bp)
int i; int i;
for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) { for (q = 0, queue = bp->queues; q < bp->num_queues; ++q, ++queue) {
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < bp->tx_ring_size; i++) {
macb_set_addr(&(queue->tx_ring[i]), 0); queue->tx_ring[i].addr = 0;
queue->tx_ring[i].ctrl = MACB_BIT(TX_USED); queue->tx_ring[i].ctrl = MACB_BIT(TX_USED);
} }
queue->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); queue->tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
queue->tx_head = 0; queue->tx_head = 0;
queue->tx_tail = 0; queue->tx_tail = 0;
} }
...@@ -1609,13 +1621,13 @@ static void macb_init_rings(struct macb *bp) ...@@ -1609,13 +1621,13 @@ static void macb_init_rings(struct macb *bp)
macb_init_rx_ring(bp); macb_init_rx_ring(bp);
for (i = 0; i < TX_RING_SIZE; i++) { for (i = 0; i < bp->tx_ring_size; i++) {
bp->queues[0].tx_ring[i].addr = 0; bp->queues[0].tx_ring[i].addr = 0;
bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED); bp->queues[0].tx_ring[i].ctrl = MACB_BIT(TX_USED);
} }
bp->queues[0].tx_head = 0; bp->queues[0].tx_head = 0;
bp->queues[0].tx_tail = 0; bp->queues[0].tx_tail = 0;
bp->queues[0].tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP); bp->queues[0].tx_ring[bp->tx_ring_size - 1].ctrl |= MACB_BIT(TX_WRAP);
bp->rx_tail = 0; bp->rx_tail = 0;
} }
...@@ -2148,8 +2160,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs, ...@@ -2148,8 +2160,8 @@ static void macb_get_regs(struct net_device *dev, struct ethtool_regs *regs,
regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1)) regs->version = (macb_readl(bp, MID) & ((1 << MACB_REV_SIZE) - 1))
| MACB_GREGS_VERSION; | MACB_GREGS_VERSION;
tail = macb_tx_ring_wrap(bp->queues[0].tx_tail); tail = macb_tx_ring_wrap(bp, bp->queues[0].tx_tail);
head = macb_tx_ring_wrap(bp->queues[0].tx_head); head = macb_tx_ring_wrap(bp, bp->queues[0].tx_head);
regs_buff[0] = macb_readl(bp, NCR); regs_buff[0] = macb_readl(bp, NCR);
regs_buff[1] = macb_or_gem_readl(bp, NCFGR); regs_buff[1] = macb_or_gem_readl(bp, NCFGR);
...@@ -2204,6 +2216,56 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) ...@@ -2204,6 +2216,56 @@ static int macb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
return 0; return 0;
} }
static void macb_get_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct macb *bp = netdev_priv(netdev);
ring->rx_max_pending = MAX_RX_RING_SIZE;
ring->tx_max_pending = MAX_TX_RING_SIZE;
ring->rx_pending = bp->rx_ring_size;
ring->tx_pending = bp->tx_ring_size;
}
static int macb_set_ringparam(struct net_device *netdev,
struct ethtool_ringparam *ring)
{
struct macb *bp = netdev_priv(netdev);
u32 new_rx_size, new_tx_size;
unsigned int reset = 0;
if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
return -EINVAL;
new_rx_size = clamp_t(u32, ring->rx_pending,
MIN_RX_RING_SIZE, MAX_RX_RING_SIZE);
new_rx_size = roundup_pow_of_two(new_rx_size);
new_tx_size = clamp_t(u32, ring->tx_pending,
MIN_TX_RING_SIZE, MAX_TX_RING_SIZE);
new_tx_size = roundup_pow_of_two(new_tx_size);
if ((new_tx_size == bp->tx_ring_size) &&
(new_rx_size == bp->rx_ring_size)) {
/* nothing to do */
return 0;
}
if (netif_running(bp->dev)) {
reset = 1;
macb_close(bp->dev);
}
bp->rx_ring_size = new_rx_size;
bp->tx_ring_size = new_tx_size;
if (reset)
macb_open(bp->dev);
return 0;
}
static const struct ethtool_ops macb_ethtool_ops = { static const struct ethtool_ops macb_ethtool_ops = {
.get_regs_len = macb_get_regs_len, .get_regs_len = macb_get_regs_len,
.get_regs = macb_get_regs, .get_regs = macb_get_regs,
...@@ -2213,6 +2275,8 @@ static const struct ethtool_ops macb_ethtool_ops = { ...@@ -2213,6 +2275,8 @@ static const struct ethtool_ops macb_ethtool_ops = {
.set_wol = macb_set_wol, .set_wol = macb_set_wol,
.get_link_ksettings = phy_ethtool_get_link_ksettings, .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
}; };
static const struct ethtool_ops gem_ethtool_ops = { static const struct ethtool_ops gem_ethtool_ops = {
...@@ -2225,6 +2289,8 @@ static const struct ethtool_ops gem_ethtool_ops = { ...@@ -2225,6 +2289,8 @@ static const struct ethtool_ops gem_ethtool_ops = {
.get_sset_count = gem_get_sset_count, .get_sset_count = gem_get_sset_count,
.get_link_ksettings = phy_ethtool_get_link_ksettings, .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = macb_get_ringparam,
.set_ringparam = macb_set_ringparam,
}; };
static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) static int macb_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
...@@ -2419,6 +2485,9 @@ static int macb_init(struct platform_device *pdev) ...@@ -2419,6 +2485,9 @@ static int macb_init(struct platform_device *pdev)
int err; int err;
u32 val; u32 val;
bp->tx_ring_size = DEFAULT_TX_RING_SIZE;
bp->rx_ring_size = DEFAULT_RX_RING_SIZE;
/* set the queue register mapping once for all: queue0 has a special /* set the queue register mapping once for all: queue0 has a special
* register mapping but we don't want to test the queue index then * register mapping but we don't want to test the queue index then
* compute the corresponding register offset at run time. * compute the corresponding register offset at run time.
......
...@@ -811,6 +811,9 @@ struct macb { ...@@ -811,6 +811,9 @@ struct macb {
void *rx_buffers; void *rx_buffers;
size_t rx_buffer_size; size_t rx_buffer_size;
unsigned int rx_ring_size;
unsigned int tx_ring_size;
unsigned int num_queues; unsigned int num_queues;
unsigned int queue_mask; unsigned int queue_mask;
struct macb_queue queues[MACB_MAX_QUEUES]; struct macb_queue queues[MACB_MAX_QUEUES];
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment