Commit 9a01c7c2 authored by Mika Westerberg's avatar Mika Westerberg Committed by David S. Miller

thunderbolt: Allocate ring HopID automatically if requested

Thunderbolt services should not care which HopID (ring) they use for
sending and receiving packets over the high-speed DMA path, so make
tb_ring_alloc_rx() and tb_ring_alloc_tx() accept negative HopID. This
means that the NHI will allocate next available HopID for the caller
automatically.

These HopIDs will be allocated from the range which is not reserved for
the Thunderbolt protocol (8 .. hop_count - 1).

The allocated HopID can be retrieved from ring->hop field after the ring
has been allocated successfully if needed.
Signed-off-by: default avatarMika Westerberg <mika.westerberg@linux.intel.com>
Reviewed-by: default avatarMichael Jamet <michael.jamet@intel.com>
Reviewed-by: default avatarYehezkel Bernat <yehezkel.bernat@intel.com>
Reviewed-by: default avatarAndy Shevchenko <andriy.shevchenko@linux.intel.com>
Signed-off-by: default avatarDavid S. Miller <davem@davemloft.net>
parent 3304559e
...@@ -26,6 +26,8 @@ ...@@ -26,6 +26,8 @@
* use this ring for anything else. * use this ring for anything else.
*/ */
#define RING_E2E_UNUSED_HOPID 2 #define RING_E2E_UNUSED_HOPID 2
/* HopIDs 0-7 are reserved by the Thunderbolt protocol */
#define RING_FIRST_USABLE_HOPID 8
/* /*
* Minimal number of vectors when we use MSI-X. Two for control channel * Minimal number of vectors when we use MSI-X. Two for control channel
...@@ -411,6 +413,62 @@ static void ring_release_msix(struct tb_ring *ring) ...@@ -411,6 +413,62 @@ static void ring_release_msix(struct tb_ring *ring)
ring->irq = 0; ring->irq = 0;
} }
static int nhi_alloc_hop(struct tb_nhi *nhi, struct tb_ring *ring)
{
int ret = 0;
spin_lock_irq(&nhi->lock);
if (ring->hop < 0) {
unsigned int i;
/*
* Automatically allocate HopID from the non-reserved
* range 8 .. hop_count - 1.
*/
for (i = RING_FIRST_USABLE_HOPID; i < nhi->hop_count; i++) {
if (ring->is_tx) {
if (!nhi->tx_rings[i]) {
ring->hop = i;
break;
}
} else {
if (!nhi->rx_rings[i]) {
ring->hop = i;
break;
}
}
}
}
if (ring->hop < 0 || ring->hop >= nhi->hop_count) {
dev_warn(&nhi->pdev->dev, "invalid hop: %d\n", ring->hop);
ret = -EINVAL;
goto err_unlock;
}
if (ring->is_tx && nhi->tx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "TX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
goto err_unlock;
} else if (!ring->is_tx && nhi->rx_rings[ring->hop]) {
dev_warn(&nhi->pdev->dev, "RX hop %d already allocated\n",
ring->hop);
ret = -EBUSY;
goto err_unlock;
}
if (ring->is_tx)
nhi->tx_rings[ring->hop] = ring;
else
nhi->rx_rings[ring->hop] = ring;
err_unlock:
spin_unlock_irq(&nhi->lock);
return ret;
}
static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
bool transmit, unsigned int flags, bool transmit, unsigned int flags,
u16 sof_mask, u16 eof_mask, u16 sof_mask, u16 eof_mask,
...@@ -456,28 +514,12 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size, ...@@ -456,28 +514,12 @@ static struct tb_ring *tb_ring_alloc(struct tb_nhi *nhi, u32 hop, int size,
if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND)) if (ring_request_msix(ring, flags & RING_FLAG_NO_SUSPEND))
goto err_free_descs; goto err_free_descs;
spin_lock_irq(&nhi->lock); if (nhi_alloc_hop(nhi, ring))
if (hop >= nhi->hop_count) {
dev_WARN(&nhi->pdev->dev, "invalid hop: %d\n", hop);
goto err_release_msix; goto err_release_msix;
}
if (transmit && nhi->tx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "TX hop %d already allocated\n", hop);
goto err_release_msix;
} else if (!transmit && nhi->rx_rings[hop]) {
dev_WARN(&nhi->pdev->dev, "RX hop %d already allocated\n", hop);
goto err_release_msix;
}
if (transmit)
nhi->tx_rings[hop] = ring;
else
nhi->rx_rings[hop] = ring;
spin_unlock_irq(&nhi->lock);
return ring; return ring;
err_release_msix: err_release_msix:
spin_unlock_irq(&nhi->lock);
ring_release_msix(ring); ring_release_msix(ring);
err_free_descs: err_free_descs:
dma_free_coherent(&ring->nhi->pdev->dev, dma_free_coherent(&ring->nhi->pdev->dev,
...@@ -506,7 +548,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx); ...@@ -506,7 +548,7 @@ EXPORT_SYMBOL_GPL(tb_ring_alloc_tx);
/** /**
* tb_ring_alloc_rx() - Allocate DMA ring for receive * tb_ring_alloc_rx() - Allocate DMA ring for receive
* @nhi: Pointer to the NHI the ring is to be allocated * @nhi: Pointer to the NHI the ring is to be allocated
* @hop: HopID (ring) to allocate * @hop: HopID (ring) to allocate. Pass %-1 for automatic allocation.
* @size: Number of entries in the ring * @size: Number of entries in the ring
* @flags: Flags for the ring * @flags: Flags for the ring
* @sof_mask: Mask of PDF values that start a frame * @sof_mask: Mask of PDF values that start a frame
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment