Commit 4e958f33 authored by Andre Przywara's avatar Andre Przywara Committed by David S. Miller

net: axienet: Upgrade descriptors to hold 64-bit addresses

Newer revisions of the AXI DMA IP (>= v7.1) support 64-bit addresses,
both for the descriptors itself, as well as for the buffers they are
pointing to.
This is realised by adding "MSB" words for the next and phys pointer
right behind the existing address word, now named "LSB". These MSB words
live in formerly reserved areas of the descriptor.

If the hardware supports it, write both words when setting an address.
The buffer address is handled by two wrapper functions, the two
occasions where we set the next pointers are open coded.

For now this is guarded by a flag which we don't set yet.
Signed-off-by: default avatarAndre Przywara <andre.przywara@arm.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 6a00d0dd
...@@ -328,6 +328,7 @@ ...@@ -328,6 +328,7 @@
#define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1) #define XAE_FEATURE_PARTIAL_TX_CSUM (1 << 1)
#define XAE_FEATURE_FULL_RX_CSUM (1 << 2) #define XAE_FEATURE_FULL_RX_CSUM (1 << 2)
#define XAE_FEATURE_FULL_TX_CSUM (1 << 3) #define XAE_FEATURE_FULL_TX_CSUM (1 << 3)
#define XAE_FEATURE_DMA_64BIT (1 << 4)
#define XAE_NO_CSUM_OFFLOAD 0 #define XAE_NO_CSUM_OFFLOAD 0
...@@ -340,9 +341,9 @@ ...@@ -340,9 +341,9 @@
/** /**
* struct axidma_bd - Axi Dma buffer descriptor layout * struct axidma_bd - Axi Dma buffer descriptor layout
* @next: MM2S/S2MM Next Descriptor Pointer * @next: MM2S/S2MM Next Descriptor Pointer
* @reserved1: Reserved and not used * @next_msb: MM2S/S2MM Next Descriptor Pointer (high 32 bits)
* @phys: MM2S/S2MM Buffer Address * @phys: MM2S/S2MM Buffer Address
* @reserved2: Reserved and not used * @phys_msb: MM2S/S2MM Buffer Address (high 32 bits)
* @reserved3: Reserved and not used * @reserved3: Reserved and not used
* @reserved4: Reserved and not used * @reserved4: Reserved and not used
* @cntrl: MM2S/S2MM Control value * @cntrl: MM2S/S2MM Control value
...@@ -355,9 +356,9 @@ ...@@ -355,9 +356,9 @@
*/ */
struct axidma_bd { struct axidma_bd {
u32 next; /* Physical address of next buffer descriptor */ u32 next; /* Physical address of next buffer descriptor */
u32 reserved1; u32 next_msb; /* high 32 bits for IP >= v7.1, reserved on older IP */
u32 phys; u32 phys;
u32 reserved2; u32 phys_msb; /* for IP >= v7.1, reserved for older IP */
u32 reserved3; u32 reserved3;
u32 reserved4; u32 reserved4;
u32 cntrl; u32 cntrl;
......
...@@ -153,6 +153,25 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg, ...@@ -153,6 +153,25 @@ static void axienet_dma_out_addr(struct axienet_local *lp, off_t reg,
axienet_dma_out32(lp, reg, lower_32_bits(addr)); axienet_dma_out32(lp, reg, lower_32_bits(addr));
} }
static void desc_set_phys_addr(struct axienet_local *lp, dma_addr_t addr,
struct axidma_bd *desc)
{
desc->phys = lower_32_bits(addr);
if (lp->features & XAE_FEATURE_DMA_64BIT)
desc->phys_msb = upper_32_bits(addr);
}
static dma_addr_t desc_get_phys_addr(struct axienet_local *lp,
struct axidma_bd *desc)
{
dma_addr_t ret = desc->phys;
if (lp->features & XAE_FEATURE_DMA_64BIT)
ret |= ((dma_addr_t)desc->phys_msb << 16) << 16;
return ret;
}
/** /**
* axienet_dma_bd_release - Release buffer descriptor rings * axienet_dma_bd_release - Release buffer descriptor rings
* @ndev: Pointer to the net_device structure * @ndev: Pointer to the net_device structure
...@@ -176,6 +195,8 @@ static void axienet_dma_bd_release(struct net_device *ndev) ...@@ -176,6 +195,8 @@ static void axienet_dma_bd_release(struct net_device *ndev)
return; return;
for (i = 0; i < lp->rx_bd_num; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
dma_addr_t phys;
/* A NULL skb means this descriptor has not been initialised /* A NULL skb means this descriptor has not been initialised
* at all. * at all.
*/ */
...@@ -188,10 +209,12 @@ static void axienet_dma_bd_release(struct net_device *ndev) ...@@ -188,10 +209,12 @@ static void axienet_dma_bd_release(struct net_device *ndev)
* descriptor size, after it had been successfully allocated. * descriptor size, after it had been successfully allocated.
* So a non-zero value in there means we need to unmap it. * So a non-zero value in there means we need to unmap it.
*/ */
if (lp->rx_bd_v[i].cntrl) if (lp->rx_bd_v[i].cntrl) {
dma_unmap_single(ndev->dev.parent, lp->rx_bd_v[i].phys, phys = desc_get_phys_addr(lp, &lp->rx_bd_v[i]);
dma_unmap_single(ndev->dev.parent, phys,
lp->max_frm_size, DMA_FROM_DEVICE); lp->max_frm_size, DMA_FROM_DEVICE);
} }
}
dma_free_coherent(ndev->dev.parent, dma_free_coherent(ndev->dev.parent,
sizeof(*lp->rx_bd_v) * lp->rx_bd_num, sizeof(*lp->rx_bd_v) * lp->rx_bd_num,
...@@ -235,29 +258,36 @@ static int axienet_dma_bd_init(struct net_device *ndev) ...@@ -235,29 +258,36 @@ static int axienet_dma_bd_init(struct net_device *ndev)
goto out; goto out;
for (i = 0; i < lp->tx_bd_num; i++) { for (i = 0; i < lp->tx_bd_num; i++) {
lp->tx_bd_v[i].next = lp->tx_bd_p + dma_addr_t addr = lp->tx_bd_p +
sizeof(*lp->tx_bd_v) * sizeof(*lp->tx_bd_v) *
((i + 1) % lp->tx_bd_num); ((i + 1) % lp->tx_bd_num);
lp->tx_bd_v[i].next = lower_32_bits(addr);
if (lp->features & XAE_FEATURE_DMA_64BIT)
lp->tx_bd_v[i].next_msb = upper_32_bits(addr);
} }
for (i = 0; i < lp->rx_bd_num; i++) { for (i = 0; i < lp->rx_bd_num; i++) {
lp->rx_bd_v[i].next = lp->rx_bd_p + dma_addr_t addr;
sizeof(*lp->rx_bd_v) *
addr = lp->rx_bd_p + sizeof(*lp->rx_bd_v) *
((i + 1) % lp->rx_bd_num); ((i + 1) % lp->rx_bd_num);
lp->rx_bd_v[i].next = lower_32_bits(addr);
if (lp->features & XAE_FEATURE_DMA_64BIT)
lp->rx_bd_v[i].next_msb = upper_32_bits(addr);
skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size); skb = netdev_alloc_skb_ip_align(ndev, lp->max_frm_size);
if (!skb) if (!skb)
goto out; goto out;
lp->rx_bd_v[i].skb = skb; lp->rx_bd_v[i].skb = skb;
lp->rx_bd_v[i].phys = dma_map_single(ndev->dev.parent, addr = dma_map_single(ndev->dev.parent, skb->data,
skb->data, lp->max_frm_size, DMA_FROM_DEVICE);
lp->max_frm_size, if (dma_mapping_error(ndev->dev.parent, addr)) {
DMA_FROM_DEVICE);
if (dma_mapping_error(ndev->dev.parent, lp->rx_bd_v[i].phys)) {
netdev_err(ndev, "DMA mapping error\n"); netdev_err(ndev, "DMA mapping error\n");
goto out; goto out;
} }
desc_set_phys_addr(lp, addr, &lp->rx_bd_v[i]);
lp->rx_bd_v[i].cntrl = lp->max_frm_size; lp->rx_bd_v[i].cntrl = lp->max_frm_size;
} }
...@@ -574,6 +604,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -574,6 +604,7 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
struct axidma_bd *cur_p; struct axidma_bd *cur_p;
int max_bds = nr_bds; int max_bds = nr_bds;
unsigned int status; unsigned int status;
dma_addr_t phys;
int i; int i;
if (max_bds == -1) if (max_bds == -1)
...@@ -589,7 +620,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd, ...@@ -589,7 +620,8 @@ static int axienet_free_tx_chain(struct net_device *ndev, u32 first_bd,
if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK)) if (nr_bds == -1 && !(status & XAXIDMA_BD_STS_COMPLETE_MASK))
break; break;
dma_unmap_single(ndev->dev.parent, cur_p->phys, phys = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, phys,
(cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK), (cur_p->cntrl & XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE); DMA_TO_DEVICE);
...@@ -687,7 +719,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -687,7 +719,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
u32 csum_start_off; u32 csum_start_off;
u32 csum_index_off; u32 csum_index_off;
skb_frag_t *frag; skb_frag_t *frag;
dma_addr_t tail_p; dma_addr_t tail_p, phys;
struct axienet_local *lp = netdev_priv(ndev); struct axienet_local *lp = netdev_priv(ndev);
struct axidma_bd *cur_p; struct axidma_bd *cur_p;
u32 orig_tail_ptr = lp->tx_bd_tail; u32 orig_tail_ptr = lp->tx_bd_tail;
...@@ -726,14 +758,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -726,14 +758,15 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */ cur_p->app0 |= 2; /* Tx Full Checksum Offload Enabled */
} }
cur_p->phys = dma_map_single(ndev->dev.parent, skb->data, phys = dma_map_single(ndev->dev.parent, skb->data,
skb_headlen(skb), DMA_TO_DEVICE); skb_headlen(skb), DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n"); netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK; cur_p->cntrl = skb_headlen(skb) | XAXIDMA_BD_CTRL_TXSOF_MASK;
for (ii = 0; ii < num_frag; ii++) { for (ii = 0; ii < num_frag; ii++) {
...@@ -741,11 +774,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -741,11 +774,11 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
lp->tx_bd_tail = 0; lp->tx_bd_tail = 0;
cur_p = &lp->tx_bd_v[lp->tx_bd_tail]; cur_p = &lp->tx_bd_v[lp->tx_bd_tail];
frag = &skb_shinfo(skb)->frags[ii]; frag = &skb_shinfo(skb)->frags[ii];
cur_p->phys = dma_map_single(ndev->dev.parent, phys = dma_map_single(ndev->dev.parent,
skb_frag_address(frag), skb_frag_address(frag),
skb_frag_size(frag), skb_frag_size(frag),
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "TX DMA mapping error\n"); netdev_err(ndev, "TX DMA mapping error\n");
ndev->stats.tx_dropped++; ndev->stats.tx_dropped++;
...@@ -755,6 +788,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev) ...@@ -755,6 +788,7 @@ axienet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
return NETDEV_TX_OK; return NETDEV_TX_OK;
} }
desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = skb_frag_size(frag); cur_p->cntrl = skb_frag_size(frag);
} }
...@@ -793,10 +827,12 @@ static void axienet_recv(struct net_device *ndev) ...@@ -793,10 +827,12 @@ static void axienet_recv(struct net_device *ndev)
cur_p = &lp->rx_bd_v[lp->rx_bd_ci]; cur_p = &lp->rx_bd_v[lp->rx_bd_ci];
while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) { while ((cur_p->status & XAXIDMA_BD_STS_COMPLETE_MASK)) {
dma_addr_t phys;
tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci; tail_p = lp->rx_bd_p + sizeof(*lp->rx_bd_v) * lp->rx_bd_ci;
dma_unmap_single(ndev->dev.parent, cur_p->phys, phys = desc_get_phys_addr(lp, cur_p);
lp->max_frm_size, dma_unmap_single(ndev->dev.parent, phys, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
skb = cur_p->skb; skb = cur_p->skb;
...@@ -832,15 +868,16 @@ static void axienet_recv(struct net_device *ndev) ...@@ -832,15 +868,16 @@ static void axienet_recv(struct net_device *ndev)
if (!new_skb) if (!new_skb)
return; return;
cur_p->phys = dma_map_single(ndev->dev.parent, new_skb->data, phys = dma_map_single(ndev->dev.parent, new_skb->data,
lp->max_frm_size, lp->max_frm_size,
DMA_FROM_DEVICE); DMA_FROM_DEVICE);
if (unlikely(dma_mapping_error(ndev->dev.parent, cur_p->phys))) { if (unlikely(dma_mapping_error(ndev->dev.parent, phys))) {
if (net_ratelimit()) if (net_ratelimit())
netdev_err(ndev, "RX DMA mapping error\n"); netdev_err(ndev, "RX DMA mapping error\n");
dev_kfree_skb(new_skb); dev_kfree_skb(new_skb);
return; return;
} }
desc_set_phys_addr(lp, phys, cur_p);
cur_p->cntrl = lp->max_frm_size; cur_p->cntrl = lp->max_frm_size;
cur_p->status = 0; cur_p->status = 0;
...@@ -885,7 +922,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev) ...@@ -885,7 +922,8 @@ static irqreturn_t axienet_tx_irq(int irq, void *_ndev)
return IRQ_NONE; return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) { if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status); dev_err(&ndev->dev, "DMA Tx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n", dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
(lp->tx_bd_v[lp->tx_bd_ci]).phys_msb,
(lp->tx_bd_v[lp->tx_bd_ci]).phys); (lp->tx_bd_v[lp->tx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
...@@ -934,7 +972,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev) ...@@ -934,7 +972,8 @@ static irqreturn_t axienet_rx_irq(int irq, void *_ndev)
return IRQ_NONE; return IRQ_NONE;
if (status & XAXIDMA_IRQ_ERROR_MASK) { if (status & XAXIDMA_IRQ_ERROR_MASK) {
dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status); dev_err(&ndev->dev, "DMA Rx error 0x%x\n", status);
dev_err(&ndev->dev, "Current BD is at: 0x%x\n", dev_err(&ndev->dev, "Current BD is at: 0x%x%08x\n",
(lp->rx_bd_v[lp->rx_bd_ci]).phys_msb,
(lp->rx_bd_v[lp->rx_bd_ci]).phys); (lp->rx_bd_v[lp->rx_bd_ci]).phys);
cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET); cr = axienet_dma_in32(lp, XAXIDMA_TX_CR_OFFSET);
...@@ -1616,14 +1655,18 @@ static void axienet_dma_err_handler(struct work_struct *work) ...@@ -1616,14 +1655,18 @@ static void axienet_dma_err_handler(struct work_struct *work)
for (i = 0; i < lp->tx_bd_num; i++) { for (i = 0; i < lp->tx_bd_num; i++) {
cur_p = &lp->tx_bd_v[i]; cur_p = &lp->tx_bd_v[i];
if (cur_p->cntrl) if (cur_p->cntrl) {
dma_unmap_single(ndev->dev.parent, cur_p->phys, dma_addr_t addr = desc_get_phys_addr(lp, cur_p);
dma_unmap_single(ndev->dev.parent, addr,
(cur_p->cntrl & (cur_p->cntrl &
XAXIDMA_BD_CTRL_LENGTH_MASK), XAXIDMA_BD_CTRL_LENGTH_MASK),
DMA_TO_DEVICE); DMA_TO_DEVICE);
}
if (cur_p->skb) if (cur_p->skb)
dev_kfree_skb_irq(cur_p->skb); dev_kfree_skb_irq(cur_p->skb);
cur_p->phys = 0; cur_p->phys = 0;
cur_p->phys_msb = 0;
cur_p->cntrl = 0; cur_p->cntrl = 0;
cur_p->status = 0; cur_p->status = 0;
cur_p->app0 = 0; cur_p->app0 = 0;
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment