Commit b0251fbe authored by David S. Miller's avatar David S. Miller

Merge branch 'net-ll_temac-RX-TX-ring-size-and-coalesce-ethtool-parameters'

Esben Haabendal says:

====================
net: ll_temac: RX/TX ring size and coalesce ethtool parameters

This series adds support for RX/TX ring size and irq coalesce ethtool
parameters to ll_temac driver.
====================
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parents b90feaff 227d4617
...@@ -369,18 +369,20 @@ struct temac_local { ...@@ -369,18 +369,20 @@ struct temac_local {
/* Buffer descriptors */ /* Buffer descriptors */
struct cdmac_bd *tx_bd_v; struct cdmac_bd *tx_bd_v;
dma_addr_t tx_bd_p; dma_addr_t tx_bd_p;
u32 tx_bd_num;
struct cdmac_bd *rx_bd_v; struct cdmac_bd *rx_bd_v;
dma_addr_t rx_bd_p; dma_addr_t rx_bd_p;
u32 rx_bd_num;
int tx_bd_ci; int tx_bd_ci;
int tx_bd_next;
int tx_bd_tail; int tx_bd_tail;
int rx_bd_ci; int rx_bd_ci;
int rx_bd_tail; int rx_bd_tail;
/* DMA channel control setup */ /* DMA channel control setup */
u32 tx_chnl_ctrl; u8 coalesce_count_tx;
u32 rx_chnl_ctrl; u8 coalesce_delay_tx;
u8 coalesce_count_rx; u8 coalesce_count_rx;
u8 coalesce_delay_rx;
struct delayed_work restart_work; struct delayed_work restart_work;
}; };
......
...@@ -58,8 +58,11 @@ ...@@ -58,8 +58,11 @@
#include "ll_temac.h" #include "ll_temac.h"
#define TX_BD_NUM 64 /* Descriptors defines for Tx and Rx DMA */
#define RX_BD_NUM 128 #define TX_BD_NUM_DEFAULT 64
#define RX_BD_NUM_DEFAULT 1024
#define TX_BD_NUM_MAX 4096
#define RX_BD_NUM_MAX 4096
/* --------------------------------------------------------------------- /* ---------------------------------------------------------------------
* Low level register access functions * Low level register access functions
...@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev) ...@@ -301,7 +304,7 @@ static void temac_dma_bd_release(struct net_device *ndev)
/* Reset Local Link (DMA) */ /* Reset Local Link (DMA) */
lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST); lp->dma_out(lp, DMA_CONTROL_REG, DMA_CONTROL_RST);
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
if (!lp->rx_skb[i]) if (!lp->rx_skb[i])
break; break;
else { else {
...@@ -312,11 +315,11 @@ static void temac_dma_bd_release(struct net_device *ndev) ...@@ -312,11 +315,11 @@ static void temac_dma_bd_release(struct net_device *ndev)
} }
if (lp->rx_bd_v) if (lp->rx_bd_v)
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
lp->rx_bd_v, lp->rx_bd_p); lp->rx_bd_v, lp->rx_bd_p);
if (lp->tx_bd_v) if (lp->tx_bd_v)
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
lp->tx_bd_v, lp->tx_bd_p); lp->tx_bd_v, lp->tx_bd_p);
} }
...@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -330,33 +333,33 @@ static int temac_dma_bd_init(struct net_device *ndev)
dma_addr_t skb_dma_addr; dma_addr_t skb_dma_addr;
int i; int i;
lp->rx_skb = devm_kcalloc(&ndev->dev, RX_BD_NUM, sizeof(*lp->rx_skb), lp->rx_skb = devm_kcalloc(&ndev->dev, lp->rx_bd_num,
GFP_KERNEL); sizeof(*lp->rx_skb), GFP_KERNEL);
if (!lp->rx_skb) if (!lp->rx_skb)
goto out; goto out;
/* allocate the tx and rx ring buffer descriptors. */ /* allocate the tx and rx ring buffer descriptors. */
/* returns a virtual address and a physical address. */ /* returns a virtual address and a physical address. */
lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->tx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->tx_bd_v) * TX_BD_NUM, sizeof(*lp->tx_bd_v) * lp->tx_bd_num,
&lp->tx_bd_p, GFP_KERNEL); &lp->tx_bd_p, GFP_KERNEL);
if (!lp->tx_bd_v) if (!lp->tx_bd_v)
goto out; goto out;
lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent, lp->rx_bd_v = dma_alloc_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * RX_BD_NUM, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
&lp->rx_bd_p, GFP_KERNEL); &lp->rx_bd_p, GFP_KERNEL);
if (!lp->rx_bd_v) if (!lp->rx_bd_v)
goto out; goto out;
for (i = 0; i < TX_BD_NUM; i++) { for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p lp->tx_bd_v[i].next = cpu_to_be32(lp->tx_bd_p
+ sizeof(*lp->tx_bd_v) * ((i + 1) % TX_BD_NUM)); + sizeof(*lp->tx_bd_v) * ((i + 1) % lp->tx_bd_num));
} }
for (i = 0; i < RX_BD_NUM; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p lp->rx_bd_v[i].next = cpu_to_be32(lp->rx_bd_p
+ sizeof(*lp->rx_bd_v) * ((i + 1) % RX_BD_NUM)); + sizeof(*lp->rx_bd_v) * ((i + 1) % lp->rx_bd_num));
skb = netdev_alloc_skb_ip_align(ndev, skb = netdev_alloc_skb_ip_align(ndev,
XTE_MAX_JUMBO_FRAME_SIZE); XTE_MAX_JUMBO_FRAME_SIZE);
...@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev) ...@@ -376,21 +379,22 @@ static int temac_dma_bd_init(struct net_device *ndev)
} }
/* Configure DMA channel (irq setup) */ /* Configure DMA channel (irq setup) */
lp->dma_out(lp, TX_CHNL_CTRL, lp->tx_chnl_ctrl | lp->dma_out(lp, TX_CHNL_CTRL,
lp->coalesce_delay_tx << 24 | lp->coalesce_count_tx << 16 |
0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used! 0x00000400 | // Use 1 Bit Wide Counters. Currently Not Used!
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
lp->dma_out(lp, RX_CHNL_CTRL, lp->rx_chnl_ctrl | lp->dma_out(lp, RX_CHNL_CTRL,
lp->coalesce_delay_rx << 24 | lp->coalesce_count_rx << 16 |
CHNL_CTRL_IRQ_IOE | CHNL_CTRL_IRQ_IOE |
CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN | CHNL_CTRL_IRQ_EN | CHNL_CTRL_IRQ_ERR_EN |
CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN); CHNL_CTRL_IRQ_DLY_EN | CHNL_CTRL_IRQ_COAL_EN);
/* Init descriptor indexes */ /* Init descriptor indexes */
lp->tx_bd_ci = 0; lp->tx_bd_ci = 0;
lp->tx_bd_next = 0;
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
lp->rx_bd_ci = 0; lp->rx_bd_ci = 0;
lp->rx_bd_tail = RX_BD_NUM - 1; lp->rx_bd_tail = lp->rx_bd_num - 1;
/* Enable RX DMA transfers */ /* Enable RX DMA transfers */
wmb(); wmb();
...@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev) ...@@ -785,7 +789,7 @@ static void temac_start_xmit_done(struct net_device *ndev)
ndev->stats.tx_bytes += be32_to_cpu(cur_p->len); ndev->stats.tx_bytes += be32_to_cpu(cur_p->len);
lp->tx_bd_ci++; lp->tx_bd_ci++;
if (lp->tx_bd_ci >= TX_BD_NUM) if (lp->tx_bd_ci >= lp->tx_bd_num)
lp->tx_bd_ci = 0; lp->tx_bd_ci = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_ci]; cur_p = &lp->tx_bd_v[lp->tx_bd_ci];
...@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag) ...@@ -811,7 +815,7 @@ static inline int temac_check_tx_bd_space(struct temac_local *lp, int num_frag)
return NETDEV_TX_BUSY; return NETDEV_TX_BUSY;
tail++; tail++;
if (tail >= TX_BD_NUM) if (tail >= lp->tx_bd_num)
tail = 0; tail = 0;
cur_p = &lp->tx_bd_v[tail]; cur_p = &lp->tx_bd_v[tail];
...@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -826,14 +830,13 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{ {
struct temac_local *lp = netdev_priv(ndev); struct temac_local *lp = netdev_priv(ndev);
struct cdmac_bd *cur_p; struct cdmac_bd *cur_p;
dma_addr_t start_p, tail_p, skb_dma_addr; dma_addr_t tail_p, skb_dma_addr;
int ii; int ii;
unsigned long num_frag; unsigned long num_frag;
skb_frag_t *frag; skb_frag_t *frag;
num_frag = skb_shinfo(skb)->nr_frags; num_frag = skb_shinfo(skb)->nr_frags;
frag = &skb_shinfo(skb)->frags[0]; frag = &skb_shinfo(skb)->frags[0];
start_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
if (temac_check_tx_bd_space(lp, num_frag + 1)) { if (temac_check_tx_bd_space(lp, num_frag + 1)) {
...@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -876,7 +879,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
ptr_to_txbd((void *)skb, cur_p); ptr_to_txbd((void *)skb, cur_p);
for (ii = 0; ii < num_frag; ii++) { for (ii = 0; ii < num_frag; ii++) {
if (++lp->tx_bd_tail >= TX_BD_NUM) if (++lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
...@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -886,7 +889,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) { if (dma_mapping_error(ndev->dev.parent, skb_dma_addr)) {
if (--lp->tx_bd_tail < 0) if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1; lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
while (--ii >= 0) { while (--ii >= 0) {
--frag; --frag;
...@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -895,7 +898,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (--lp->tx_bd_tail < 0) if (--lp->tx_bd_tail < 0)
lp->tx_bd_tail = TX_BD_NUM - 1; lp->tx_bd_tail = lp->tx_bd_num - 1;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
} }
dma_unmap_single(ndev->dev.parent, dma_unmap_single(ndev->dev.parent,
...@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -914,7 +917,7 @@ temac_start_xmit(struct sk_buff *skb, struct net_device *ndev)
tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail; tail_p = lp->tx_bd_p + sizeof(*lp->tx_bd_v) * lp->tx_bd_tail;
lp->tx_bd_tail++; lp->tx_bd_tail++;
if (lp->tx_bd_tail >= TX_BD_NUM) if (lp->tx_bd_tail >= lp->tx_bd_num)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
skb_tx_timestamp(skb); skb_tx_timestamp(skb);
...@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp) ...@@ -934,7 +937,7 @@ static int ll_temac_recv_buffers_available(struct temac_local *lp)
return 0; return 0;
available = 1 + lp->rx_bd_tail - lp->rx_bd_ci; available = 1 + lp->rx_bd_tail - lp->rx_bd_ci;
if (available <= 0) if (available <= 0)
available += RX_BD_NUM; available += lp->rx_bd_num;
return available; return available;
} }
...@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev) ...@@ -1003,7 +1006,7 @@ static void ll_temac_recv(struct net_device *ndev)
ndev->stats.rx_bytes += length; ndev->stats.rx_bytes += length;
rx_bd = lp->rx_bd_ci; rx_bd = lp->rx_bd_ci;
if (++lp->rx_bd_ci >= RX_BD_NUM) if (++lp->rx_bd_ci >= lp->rx_bd_num)
lp->rx_bd_ci = 0; lp->rx_bd_ci = 0;
} while (rx_bd != lp->rx_bd_tail); } while (rx_bd != lp->rx_bd_tail);
...@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev) ...@@ -1034,7 +1037,7 @@ static void ll_temac_recv(struct net_device *ndev)
dma_addr_t skb_dma_addr; dma_addr_t skb_dma_addr;
rx_bd = lp->rx_bd_tail + 1; rx_bd = lp->rx_bd_tail + 1;
if (rx_bd >= RX_BD_NUM) if (rx_bd >= lp->rx_bd_num)
rx_bd = 0; rx_bd = 0;
bd = &lp->rx_bd_v[rx_bd]; bd = &lp->rx_bd_v[rx_bd];
...@@ -1250,13 +1253,113 @@ static const struct attribute_group temac_attr_group = { ...@@ -1250,13 +1253,113 @@ static const struct attribute_group temac_attr_group = {
.attrs = temac_device_attrs, .attrs = temac_device_attrs,
}; };
/* ethtool support */ /* ---------------------------------------------------------------------
* ethtool support
*/
static void ll_temac_ethtools_get_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct temac_local *lp = netdev_priv(ndev);
ering->rx_max_pending = RX_BD_NUM_MAX;
ering->rx_mini_max_pending = 0;
ering->rx_jumbo_max_pending = 0;
ering->tx_max_pending = TX_BD_NUM_MAX;
ering->rx_pending = lp->rx_bd_num;
ering->rx_mini_pending = 0;
ering->rx_jumbo_pending = 0;
ering->tx_pending = lp->tx_bd_num;
}
static int ll_temac_ethtools_set_ringparam(struct net_device *ndev,
struct ethtool_ringparam *ering)
{
struct temac_local *lp = netdev_priv(ndev);
if (ering->rx_pending > RX_BD_NUM_MAX ||
ering->rx_mini_pending ||
ering->rx_jumbo_pending ||
ering->rx_pending > TX_BD_NUM_MAX)
return -EINVAL;
if (netif_running(ndev))
return -EBUSY;
lp->rx_bd_num = ering->rx_pending;
lp->tx_bd_num = ering->tx_pending;
return 0;
}
static int ll_temac_ethtools_get_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ec)
{
struct temac_local *lp = netdev_priv(ndev);
ec->rx_max_coalesced_frames = lp->coalesce_count_rx;
ec->tx_max_coalesced_frames = lp->coalesce_count_tx;
ec->rx_coalesce_usecs = (lp->coalesce_delay_rx * 512) / 100;
ec->tx_coalesce_usecs = (lp->coalesce_delay_tx * 512) / 100;
return 0;
}
static int ll_temac_ethtools_set_coalesce(struct net_device *ndev,
struct ethtool_coalesce *ec)
{
struct temac_local *lp = netdev_priv(ndev);
if (netif_running(ndev)) {
netdev_err(ndev,
"Please stop netif before applying configuration\n");
return -EFAULT;
}
if (ec->rx_coalesce_usecs_irq ||
ec->rx_max_coalesced_frames_irq ||
ec->tx_coalesce_usecs_irq ||
ec->tx_max_coalesced_frames_irq ||
ec->stats_block_coalesce_usecs ||
ec->use_adaptive_rx_coalesce ||
ec->use_adaptive_tx_coalesce ||
ec->pkt_rate_low ||
ec->rx_coalesce_usecs_low ||
ec->rx_max_coalesced_frames_low ||
ec->tx_coalesce_usecs_low ||
ec->tx_max_coalesced_frames_low ||
ec->pkt_rate_high ||
ec->rx_coalesce_usecs_high ||
ec->rx_max_coalesced_frames_high ||
ec->tx_coalesce_usecs_high ||
ec->tx_max_coalesced_frames_high ||
ec->rate_sample_interval)
return -EOPNOTSUPP;
if (ec->rx_max_coalesced_frames)
lp->coalesce_count_rx = ec->rx_max_coalesced_frames;
if (ec->tx_max_coalesced_frames)
lp->coalesce_count_tx = ec->tx_max_coalesced_frames;
/* With typical LocalLink clock speed of 200 MHz and
* C_PRESCALAR=1023, each delay count corresponds to 5.12 us.
*/
if (ec->rx_coalesce_usecs)
lp->coalesce_delay_rx =
min(255U, (ec->rx_coalesce_usecs * 100) / 512);
if (ec->tx_coalesce_usecs)
lp->coalesce_delay_tx =
min(255U, (ec->tx_coalesce_usecs * 100) / 512);
return 0;
}
static const struct ethtool_ops temac_ethtool_ops = { static const struct ethtool_ops temac_ethtool_ops = {
.nway_reset = phy_ethtool_nway_reset, .nway_reset = phy_ethtool_nway_reset,
.get_link = ethtool_op_get_link, .get_link = ethtool_op_get_link,
.get_ts_info = ethtool_op_get_ts_info, .get_ts_info = ethtool_op_get_ts_info,
.get_link_ksettings = phy_ethtool_get_link_ksettings, .get_link_ksettings = phy_ethtool_get_link_ksettings,
.set_link_ksettings = phy_ethtool_set_link_ksettings, .set_link_ksettings = phy_ethtool_set_link_ksettings,
.get_ringparam = ll_temac_ethtools_get_ringparam,
.set_ringparam = ll_temac_ethtools_set_ringparam,
.get_coalesce = ll_temac_ethtools_get_coalesce,
.set_coalesce = ll_temac_ethtools_set_coalesce,
}; };
static int temac_probe(struct platform_device *pdev) static int temac_probe(struct platform_device *pdev)
...@@ -1300,6 +1403,8 @@ static int temac_probe(struct platform_device *pdev) ...@@ -1300,6 +1403,8 @@ static int temac_probe(struct platform_device *pdev)
lp->ndev = ndev; lp->ndev = ndev;
lp->dev = &pdev->dev; lp->dev = &pdev->dev;
lp->options = XTE_OPTION_DEFAULTS; lp->options = XTE_OPTION_DEFAULTS;
lp->rx_bd_num = RX_BD_NUM_DEFAULT;
lp->tx_bd_num = TX_BD_NUM_DEFAULT;
spin_lock_init(&lp->rx_lock); spin_lock_init(&lp->rx_lock);
INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func); INIT_DELAYED_WORK(&lp->restart_work, ll_temac_restart_work_func);
...@@ -1364,6 +1469,14 @@ static int temac_probe(struct platform_device *pdev) ...@@ -1364,6 +1469,14 @@ static int temac_probe(struct platform_device *pdev)
/* Can checksum TCP/UDP over IPv4. */ /* Can checksum TCP/UDP over IPv4. */
ndev->features |= NETIF_F_IP_CSUM; ndev->features |= NETIF_F_IP_CSUM;
/* Defaults for IRQ delay/coalescing setup. These are
* configuration values, so does not belong in device-tree.
*/
lp->coalesce_delay_tx = 0x10;
lp->coalesce_count_tx = 0x22;
lp->coalesce_delay_rx = 0xff;
lp->coalesce_count_rx = 0x07;
/* Setup LocalLink DMA */ /* Setup LocalLink DMA */
if (temac_np) { if (temac_np) {
/* Find the DMA node, map the DMA registers, and /* Find the DMA node, map the DMA registers, and
...@@ -1402,14 +1515,6 @@ static int temac_probe(struct platform_device *pdev) ...@@ -1402,14 +1515,6 @@ static int temac_probe(struct platform_device *pdev)
lp->rx_irq = irq_of_parse_and_map(dma_np, 0); lp->rx_irq = irq_of_parse_and_map(dma_np, 0);
lp->tx_irq = irq_of_parse_and_map(dma_np, 1); lp->tx_irq = irq_of_parse_and_map(dma_np, 1);
/* Use defaults for IRQ delay/coalescing setup. These
* are configuration values, so does not belong in
* device-tree.
*/
lp->tx_chnl_ctrl = 0x10220000;
lp->rx_chnl_ctrl = 0xff070000;
lp->coalesce_count_rx = 0x07;
/* Finished with the DMA node; drop the reference */ /* Finished with the DMA node; drop the reference */
of_node_put(dma_np); of_node_put(dma_np);
} else if (pdata) { } else if (pdata) {
...@@ -1435,18 +1540,13 @@ static int temac_probe(struct platform_device *pdev) ...@@ -1435,18 +1540,13 @@ static int temac_probe(struct platform_device *pdev)
lp->tx_irq = platform_get_irq(pdev, 1); lp->tx_irq = platform_get_irq(pdev, 1);
/* IRQ delay/coalescing setup */ /* IRQ delay/coalescing setup */
if (pdata->tx_irq_timeout || pdata->tx_irq_count) if (pdata->tx_irq_timeout || pdata->tx_irq_count) {
lp->tx_chnl_ctrl = (pdata->tx_irq_timeout << 24) | lp->coalesce_delay_tx = pdata->tx_irq_timeout;
(pdata->tx_irq_count << 16); lp->coalesce_count_tx = pdata->tx_irq_count;
else }
lp->tx_chnl_ctrl = 0x10220000;
if (pdata->rx_irq_timeout || pdata->rx_irq_count) { if (pdata->rx_irq_timeout || pdata->rx_irq_count) {
lp->rx_chnl_ctrl = (pdata->rx_irq_timeout << 24) | lp->coalesce_delay_rx = pdata->rx_irq_timeout;
(pdata->rx_irq_count << 16);
lp->coalesce_count_rx = pdata->rx_irq_count; lp->coalesce_count_rx = pdata->rx_irq_count;
} else {
lp->rx_chnl_ctrl = 0xff070000;
lp->coalesce_count_rx = 0x07;
} }
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment