Commit 63df8e09 authored by Mike Marciniszyn's avatar Mike Marciniszyn Committed by Doug Ledford

IB/hfi1: Inline sdma_txclean() for verbs pio

Short circuit sdma_txclean() by adding an __sdma_txclean()
that is only called when the tx has sdma mappings.

Convert internal calls to __sdma_txclean().

This removes a call from the critical path.
Reviewed-by: default avatarSebastian Sanchez <sebastian.sanchez@intel.com>
Signed-off-by: default avatarMike Marciniszyn <mike.marciniszyn@intel.com>
Signed-off-by: default avatarDennis Dalessandro <dennis.dalessandro@intel.com>
Signed-off-by: default avatarDoug Ledford <dledford@redhat.com>
parent 4e045572
...@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde, ...@@ -375,7 +375,7 @@ static inline void complete_tx(struct sdma_engine *sde,
sde->head_sn, tx->sn); sde->head_sn, tx->sn);
sde->head_sn++; sde->head_sn++;
#endif #endif
sdma_txclean(sde->dd, tx); __sdma_txclean(sde->dd, tx);
if (complete) if (complete)
(*complete)(tx, res); (*complete)(tx, res);
if (wait && iowait_sdma_dec(wait)) if (wait && iowait_sdma_dec(wait))
...@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx) ...@@ -1643,7 +1643,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
} }
/** /**
* sdma_txclean() - clean tx of mappings, descp *kmalloc's * __sdma_txclean() - clean tx of mappings, descp *kmalloc's
* @dd: hfi1_devdata for unmapping * @dd: hfi1_devdata for unmapping
* @tx: tx request to clean * @tx: tx request to clean
* *
...@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx) ...@@ -1653,7 +1653,7 @@ static inline u8 ahg_mode(struct sdma_txreq *tx)
* The code can be called multiple times without issue. * The code can be called multiple times without issue.
* *
*/ */
void sdma_txclean( void __sdma_txclean(
struct hfi1_devdata *dd, struct hfi1_devdata *dd,
struct sdma_txreq *tx) struct sdma_txreq *tx)
{ {
...@@ -3080,7 +3080,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) ...@@ -3080,7 +3080,7 @@ static int _extend_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
tx->descp[i] = tx->descs[i]; tx->descp[i] = tx->descs[i];
return 0; return 0;
enomem: enomem:
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOMEM; return -ENOMEM;
} }
...@@ -3109,14 +3109,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, ...@@ -3109,14 +3109,14 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
rval = _extend_sdma_tx_descs(dd, tx); rval = _extend_sdma_tx_descs(dd, tx);
if (rval) { if (rval) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return rval; return rval;
} }
/* If coalesce buffer is allocated, copy data into it */ /* If coalesce buffer is allocated, copy data into it */
if (tx->coalesce_buf) { if (tx->coalesce_buf) {
if (type == SDMA_MAP_NONE) { if (type == SDMA_MAP_NONE) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -EINVAL; return -EINVAL;
} }
...@@ -3124,7 +3124,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, ...@@ -3124,7 +3124,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
kvaddr = kmap(page); kvaddr = kmap(page);
kvaddr += offset; kvaddr += offset;
} else if (WARN_ON(!kvaddr)) { } else if (WARN_ON(!kvaddr)) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -EINVAL; return -EINVAL;
} }
...@@ -3154,7 +3154,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, ...@@ -3154,7 +3154,7 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
...@@ -3196,7 +3196,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx) ...@@ -3196,7 +3196,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
if ((unlikely(tx->num_desc == tx->desc_limit))) { if ((unlikely(tx->num_desc == tx->desc_limit))) {
rval = _extend_sdma_tx_descs(dd, tx); rval = _extend_sdma_tx_descs(dd, tx);
if (rval) { if (rval) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return rval; return rval;
} }
} }
......
...@@ -667,7 +667,13 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx, ...@@ -667,7 +667,13 @@ int ext_coal_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx,
int type, void *kvaddr, struct page *page, int type, void *kvaddr, struct page *page,
unsigned long offset, u16 len); unsigned long offset, u16 len);
int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *); int _pad_sdma_tx_descs(struct hfi1_devdata *, struct sdma_txreq *);
void sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *); void __sdma_txclean(struct hfi1_devdata *, struct sdma_txreq *);
static inline void sdma_txclean(struct hfi1_devdata *dd, struct sdma_txreq *tx)
{
if (tx->num_desc)
__sdma_txclean(dd, tx);
}
/* helpers used by public routines */ /* helpers used by public routines */
static inline void _sdma_close_tx(struct hfi1_devdata *dd, static inline void _sdma_close_tx(struct hfi1_devdata *dd,
...@@ -753,7 +759,7 @@ static inline int sdma_txadd_page( ...@@ -753,7 +759,7 @@ static inline int sdma_txadd_page(
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
...@@ -834,7 +840,7 @@ static inline int sdma_txadd_kvaddr( ...@@ -834,7 +840,7 @@ static inline int sdma_txadd_kvaddr(
DMA_TO_DEVICE); DMA_TO_DEVICE);
if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) { if (unlikely(dma_mapping_error(&dd->pcidev->dev, addr))) {
sdma_txclean(dd, tx); __sdma_txclean(dd, tx);
return -ENOSPC; return -ENOSPC;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment