Commit 18f9f0ac authored by Lendacky, Thomas's avatar Lendacky, Thomas Committed by David S. Miller

amd-xgbe: Add NUMA affinity support for memory allocations

Add support to perform memory allocations on the node of the device. The
original allocation or the ring structure and Tx/Rx queues allocated all
of the memory at once and then carved it up for each channel and queue.
To best ensure that we get as much memory from the NUMA node as we can,
break the channel and ring allocations into individual allocations.
Signed-off-by: default avatarTom Lendacky <thomas.lendacky@amd.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 85b85c85
...@@ -176,8 +176,8 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) ...@@ -176,8 +176,8 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_free_ring_resources\n"); DBGPR("-->xgbe_free_ring_resources\n");
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { channel = pdata->channel[i];
xgbe_free_ring(pdata, channel->tx_ring); xgbe_free_ring(pdata, channel->tx_ring);
xgbe_free_ring(pdata, channel->rx_ring); xgbe_free_ring(pdata, channel->rx_ring);
} }
...@@ -185,34 +185,60 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata) ...@@ -185,34 +185,60 @@ static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
DBGPR("<--xgbe_free_ring_resources\n"); DBGPR("<--xgbe_free_ring_resources\n");
} }
static void *xgbe_alloc_node(size_t size, int node)
{
void *mem;
mem = kzalloc_node(size, GFP_KERNEL, node);
if (!mem)
mem = kzalloc(size, GFP_KERNEL);
return mem;
}
static void *xgbe_dma_alloc_node(struct device *dev, size_t size,
dma_addr_t *dma, int node)
{
void *mem;
int cur_node = dev_to_node(dev);
set_dev_node(dev, node);
mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
set_dev_node(dev, cur_node);
if (!mem)
mem = dma_alloc_coherent(dev, size, dma, GFP_KERNEL);
return mem;
}
static int xgbe_init_ring(struct xgbe_prv_data *pdata, static int xgbe_init_ring(struct xgbe_prv_data *pdata,
struct xgbe_ring *ring, unsigned int rdesc_count) struct xgbe_ring *ring, unsigned int rdesc_count)
{ {
DBGPR("-->xgbe_init_ring\n"); size_t size;
if (!ring) if (!ring)
return 0; return 0;
/* Descriptors */ /* Descriptors */
size = rdesc_count * sizeof(struct xgbe_ring_desc);
ring->rdesc_count = rdesc_count; ring->rdesc_count = rdesc_count;
ring->rdesc = dma_alloc_coherent(pdata->dev, ring->rdesc = xgbe_dma_alloc_node(pdata->dev, size, &ring->rdesc_dma,
(sizeof(struct xgbe_ring_desc) * ring->node);
rdesc_count), &ring->rdesc_dma,
GFP_KERNEL);
if (!ring->rdesc) if (!ring->rdesc)
return -ENOMEM; return -ENOMEM;
/* Descriptor information */ /* Descriptor information */
ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data), size = rdesc_count * sizeof(struct xgbe_ring_data);
GFP_KERNEL);
ring->rdata = xgbe_alloc_node(size, ring->node);
if (!ring->rdata) if (!ring->rdata)
return -ENOMEM; return -ENOMEM;
netif_dbg(pdata, drv, pdata->netdev, netif_dbg(pdata, drv, pdata->netdev,
"rdesc=%p, rdesc_dma=%pad, rdata=%p\n", "rdesc=%p, rdesc_dma=%pad, rdata=%p, node=%d\n",
ring->rdesc, &ring->rdesc_dma, ring->rdata); ring->rdesc, &ring->rdesc_dma, ring->rdata, ring->node);
DBGPR("<--xgbe_init_ring\n");
return 0; return 0;
} }
...@@ -223,10 +249,8 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) ...@@ -223,10 +249,8 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
unsigned int i; unsigned int i;
int ret; int ret;
DBGPR("-->xgbe_alloc_ring_resources\n"); for (i = 0; i < pdata->channel_count; i++) {
channel = pdata->channel[i];
channel = pdata->channel;
for (i = 0; i < pdata->channel_count; i++, channel++) {
netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n", netif_dbg(pdata, drv, pdata->netdev, "%s - Tx ring:\n",
channel->name); channel->name);
...@@ -250,8 +274,6 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) ...@@ -250,8 +274,6 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
} }
} }
DBGPR("<--xgbe_alloc_ring_resources\n");
return 0; return 0;
err_ring: err_ring:
...@@ -261,21 +283,33 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata) ...@@ -261,21 +283,33 @@ static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
} }
static int xgbe_alloc_pages(struct xgbe_prv_data *pdata, static int xgbe_alloc_pages(struct xgbe_prv_data *pdata,
struct xgbe_page_alloc *pa, gfp_t gfp, int order) struct xgbe_page_alloc *pa, int alloc_order,
int node)
{ {
struct page *pages = NULL; struct page *pages = NULL;
dma_addr_t pages_dma; dma_addr_t pages_dma;
int ret; gfp_t gfp;
int order, ret;
again:
order = alloc_order;
/* Try to obtain pages, decreasing order if necessary */ /* Try to obtain pages, decreasing order if necessary */
gfp |= __GFP_COLD | __GFP_COMP | __GFP_NOWARN; gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_NOWARN;
while (order >= 0) { while (order >= 0) {
pages = alloc_pages(gfp, order); pages = alloc_pages_node(node, gfp, order);
if (pages) if (pages)
break; break;
order--; order--;
} }
/* If we couldn't get local pages, try getting from anywhere */
if (!pages && (node != NUMA_NO_NODE)) {
node = NUMA_NO_NODE;
goto again;
}
if (!pages) if (!pages)
return -ENOMEM; return -ENOMEM;
...@@ -327,14 +361,14 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata, ...@@ -327,14 +361,14 @@ static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
int ret; int ret;
if (!ring->rx_hdr_pa.pages) { if (!ring->rx_hdr_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, GFP_ATOMIC, 0); ret = xgbe_alloc_pages(pdata, &ring->rx_hdr_pa, 0, ring->node);
if (ret) if (ret)
return ret; return ret;
} }
if (!ring->rx_buf_pa.pages) { if (!ring->rx_buf_pa.pages) {
ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa, GFP_ATOMIC, ret = xgbe_alloc_pages(pdata, &ring->rx_buf_pa,
PAGE_ALLOC_COSTLY_ORDER); PAGE_ALLOC_COSTLY_ORDER, ring->node);
if (ret) if (ret)
return ret; return ret;
} }
...@@ -362,8 +396,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -362,8 +396,8 @@ static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_tx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { channel = pdata->channel[i];
ring = channel->tx_ring; ring = channel->tx_ring;
if (!ring) if (!ring)
break; break;
...@@ -403,8 +437,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata) ...@@ -403,8 +437,8 @@ static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
DBGPR("-->xgbe_wrapper_rx_descriptor_init\n"); DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { channel = pdata->channel[i];
ring = channel->rx_ring; ring = channel->rx_ring;
if (!ring) if (!ring)
break; break;
......
...@@ -176,12 +176,10 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, ...@@ -176,12 +176,10 @@ static unsigned int xgbe_riwt_to_usec(struct xgbe_prv_data *pdata,
static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++)
for (i = 0; i < pdata->channel_count; i++, channel++) XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, PBLX8,
pdata->pblx8); pdata->pblx8);
return 0; return 0;
...@@ -189,20 +187,18 @@ static int xgbe_config_pblx8(struct xgbe_prv_data *pdata) ...@@ -189,20 +187,18 @@ static int xgbe_config_pblx8(struct xgbe_prv_data *pdata)
static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata) static int xgbe_get_tx_pbl_val(struct xgbe_prv_data *pdata)
{ {
return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_TCR, PBL); return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_TCR, PBL);
} }
static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, PBL, XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, PBL,
pdata->tx_pbl); pdata->tx_pbl);
} }
...@@ -211,20 +207,18 @@ static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata) ...@@ -211,20 +207,18 @@ static int xgbe_config_tx_pbl_val(struct xgbe_prv_data *pdata)
static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata) static int xgbe_get_rx_pbl_val(struct xgbe_prv_data *pdata)
{ {
return XGMAC_DMA_IOREAD_BITS(pdata->channel, DMA_CH_RCR, PBL); return XGMAC_DMA_IOREAD_BITS(pdata->channel[0], DMA_CH_RCR, PBL);
} }
static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, PBL, XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, PBL,
pdata->rx_pbl); pdata->rx_pbl);
} }
...@@ -233,15 +227,13 @@ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata) ...@@ -233,15 +227,13 @@ static int xgbe_config_rx_pbl_val(struct xgbe_prv_data *pdata)
static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata) static int xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, OSP, XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
pdata->tx_osp_mode); pdata->tx_osp_mode);
} }
...@@ -292,15 +284,13 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, ...@@ -292,15 +284,13 @@ static int xgbe_config_tx_threshold(struct xgbe_prv_data *pdata,
static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata) static int xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RIWT, RWT, XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
pdata->rx_riwt); pdata->rx_riwt);
} }
...@@ -314,44 +304,38 @@ static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata) ...@@ -314,44 +304,38 @@ static int xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata) static void xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, RBSZ, XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
pdata->rx_buf_size); pdata->rx_buf_size);
} }
} }
static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata) static void xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, TSE, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, 1);
} }
} }
static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata) static void xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_CR, SPH, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
} }
XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE); XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
...@@ -651,8 +635,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata) ...@@ -651,8 +635,9 @@ static void xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM, XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
pdata->channel_irq_mode); pdata->channel_irq_mode);
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { channel = pdata->channel[i];
/* Clear all the interrupts which are set */ /* Clear all the interrupts which are set */
dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR); dma_ch_isr = XGMAC_DMA_IOREAD(channel, DMA_CH_SR);
XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr); XGMAC_DMA_IOWRITE(channel, DMA_CH_SR, dma_ch_isr);
...@@ -3213,16 +3198,14 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, ...@@ -3213,16 +3198,14 @@ static void xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_tx(struct xgbe_prv_data *pdata) static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Enable each Tx DMA channel */ /* Enable each Tx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
} }
/* Enable each Tx queue */ /* Enable each Tx queue */
...@@ -3236,7 +3219,6 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata) ...@@ -3236,7 +3219,6 @@ static void xgbe_enable_tx(struct xgbe_prv_data *pdata)
static void xgbe_disable_tx(struct xgbe_prv_data *pdata) static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Prepare for Tx DMA channel stop */ /* Prepare for Tx DMA channel stop */
...@@ -3251,12 +3233,11 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata) ...@@ -3251,12 +3233,11 @@ static void xgbe_disable_tx(struct xgbe_prv_data *pdata)
XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0); XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
/* Disable each Tx DMA channel */ /* Disable each Tx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
} }
} }
...@@ -3288,16 +3269,14 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, ...@@ -3288,16 +3269,14 @@ static void xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata,
static void xgbe_enable_rx(struct xgbe_prv_data *pdata) static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int reg_val, i; unsigned int reg_val, i;
/* Enable each Rx DMA channel */ /* Enable each Rx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
} }
/* Enable each Rx queue */ /* Enable each Rx queue */
...@@ -3315,7 +3294,6 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata) ...@@ -3315,7 +3294,6 @@ static void xgbe_enable_rx(struct xgbe_prv_data *pdata)
static void xgbe_disable_rx(struct xgbe_prv_data *pdata) static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Disable MAC Rx */ /* Disable MAC Rx */
...@@ -3332,27 +3310,24 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata) ...@@ -3332,27 +3310,24 @@ static void xgbe_disable_rx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE(pdata, MAC_RQC0R, 0); XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
/* Disable each Rx DMA channel */ /* Disable each Rx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
} }
} }
static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Enable each Tx DMA channel */ /* Enable each Tx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
} }
/* Enable MAC Tx */ /* Enable MAC Tx */
...@@ -3361,7 +3336,6 @@ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata) ...@@ -3361,7 +3336,6 @@ static void xgbe_powerup_tx(struct xgbe_prv_data *pdata)
static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Prepare for Tx DMA channel stop */ /* Prepare for Tx DMA channel stop */
...@@ -3372,42 +3346,37 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata) ...@@ -3372,42 +3346,37 @@ static void xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0); XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
/* Disable each Tx DMA channel */ /* Disable each Tx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->tx_ring)
if (!channel->tx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_TCR, ST, 0); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
} }
} }
static void xgbe_powerup_rx(struct xgbe_prv_data *pdata) static void xgbe_powerup_rx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Enable each Rx DMA channel */ /* Enable each Rx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 1); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
} }
} }
static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata) static void xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
{ {
struct xgbe_channel *channel;
unsigned int i; unsigned int i;
/* Disable each Rx DMA channel */ /* Disable each Rx DMA channel */
channel = pdata->channel; for (i = 0; i < pdata->channel_count; i++) {
for (i = 0; i < pdata->channel_count; i++, channel++) { if (!pdata->channel[i]->rx_ring)
if (!channel->rx_ring)
break; break;
XGMAC_DMA_IOWRITE_BITS(channel, DMA_CH_RCR, SR, 0); XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
} }
} }
......
This diff is collapsed.
...@@ -412,6 +412,7 @@ struct xgbe_ring { ...@@ -412,6 +412,7 @@ struct xgbe_ring {
/* Page allocation for RX buffers */ /* Page allocation for RX buffers */
struct xgbe_page_alloc rx_hdr_pa; struct xgbe_page_alloc rx_hdr_pa;
struct xgbe_page_alloc rx_buf_pa; struct xgbe_page_alloc rx_buf_pa;
int node;
/* Ring index values /* Ring index values
* cur - Tx: index of descriptor to be used for current transfer * cur - Tx: index of descriptor to be used for current transfer
...@@ -462,6 +463,8 @@ struct xgbe_channel { ...@@ -462,6 +463,8 @@ struct xgbe_channel {
struct xgbe_ring *tx_ring; struct xgbe_ring *tx_ring;
struct xgbe_ring *rx_ring; struct xgbe_ring *rx_ring;
int node;
} ____cacheline_aligned; } ____cacheline_aligned;
enum xgbe_state { enum xgbe_state {
...@@ -1012,7 +1015,7 @@ struct xgbe_prv_data { ...@@ -1012,7 +1015,7 @@ struct xgbe_prv_data {
struct timer_list service_timer; struct timer_list service_timer;
/* Rings for Tx/Rx on a DMA channel */ /* Rings for Tx/Rx on a DMA channel */
struct xgbe_channel *channel; struct xgbe_channel *channel[XGBE_MAX_DMA_CHANNELS];
unsigned int tx_max_channel_count; unsigned int tx_max_channel_count;
unsigned int rx_max_channel_count; unsigned int rx_max_channel_count;
unsigned int channel_count; unsigned int channel_count;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment