Commit a8f75cb3 authored by Loic Poulain's avatar Loic Poulain Committed by Manivannan Sadhasivam

mhi: core: Factorize mhi queuing

Instead of duplicating queuing procedure in mhi_queue_dma(),
mhi_queue_buf() and mhi_queue_skb(), add a new generic mhi_queue()
as common helper.

Note that the unified mhi_queue align pm_lock locking on mhi_queue_buf
behavior, taking it with irqsave variant (vs _bh for former queue_skb
and queue_dma version).
Signed-off-by: default avatarLoic Poulain <loic.poulain@linaro.org>
Reviewed-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
Signed-off-by: default avatarManivannan Sadhasivam <manivannan.sadhasivam@linaro.org>
parent cdce2663
...@@ -967,118 +967,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl, ...@@ -967,118 +967,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp); return (tmp == ring->rp);
} }
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir, static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
struct sk_buff *skb, size_t len, enum mhi_flags mflags) enum dma_data_direction dir, enum mhi_flags mflags)
{ {
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl; struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan; mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring; struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { }; unsigned long flags;
int ret; int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */ if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
if (mhi_chan->pre_alloc) return -EIO;
return -EINVAL;
if (mhi_is_ring_full(mhi_cntrl, tre_ring)) read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
return -ENOMEM;
read_lock_bh(&mhi_cntrl->pm_lock); ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock); ret = -ENOMEM;
return -EIO; goto exit_unlock;
} }
/* we're in M3 or transitioning to M3 */ ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
goto exit_unlock;
/* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl); mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */ /* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl); mhi_cntrl->wake_toggle(mhi_cntrl);
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}
if (mhi_chan->dir == DMA_TO_DEVICE) if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts); atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) { if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock); ret = -EIO;
mhi_ring_chan_db(mhi_cntrl, mhi_chan); goto exit_unlock;
read_unlock_bh(&mhi_chan->lock);
} }
read_unlock_bh(&mhi_cntrl->pm_lock); mhi_ring_chan_db(mhi_cntrl, mhi_chan);
return 0; exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
} }
EXPORT_SYMBOL_GPL(mhi_queue_skb);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir, int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags) struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{ {
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan : struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan; mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { }; struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
return -EINVAL;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
read_lock_bh(&mhi_cntrl->pm_lock); buf_info.v_addr = skb->data;
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) { buf_info.cb_buf = skb;
dev_err(dev, "MHI is not in activate state, PM state: %s\n", buf_info.len = len;
to_mhi_pm_state_str(mhi_cntrl->pm_state));
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO; if (unlikely(mhi_chan->pre_alloc))
} return -EINVAL;
/* we're in M3 or transitioning to M3 */ return mhi_queue(mhi_dev, &buf_info, dir, mflags);
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state)) }
mhi_trigger_resume(mhi_cntrl); EXPORT_SYMBOL_GPL(mhi_queue_skb);
/* Toggle wake to exit out of M2 */ int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
mhi_cntrl->wake_toggle(mhi_cntrl); struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
{
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr; buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf; buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true; buf_info.pre_mapped = true;
buf_info.len = len; buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); if (unlikely(mhi_chan->pre_alloc))
if (unlikely(ret)) { return -EINVAL;
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
}
read_unlock_bh(&mhi_cntrl->pm_lock);
return 0; return mhi_queue(mhi_dev, &buf_info, dir, mflags);
} }
EXPORT_SYMBOL_GPL(mhi_queue_dma); EXPORT_SYMBOL_GPL(mhi_queue_dma);
...@@ -1132,57 +1102,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan, ...@@ -1132,57 +1102,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir, int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags) void *buf, size_t len, enum mhi_flags mflags)
{ {
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { }; struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
/*
* this check here only as a guard, it's always
* possible mhi can enter error while executing rest of function,
* which is not fatal so we do not need to hold pm_lock
*/
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
tre_ring = &mhi_chan->tre_ring;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
buf_info.v_addr = buf; buf_info.v_addr = buf;
buf_info.cb_buf = buf; buf_info.cb_buf = buf;
buf_info.len = len; buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags); return mhi_queue(mhi_dev, &buf_info, dir, mflags);
if (unlikely(ret))
return ret;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* we're in M3 or transitioning to M3 */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
unsigned long flags;
read_lock_irqsave(&mhi_chan->lock, flags);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irqrestore(&mhi_chan->lock, flags);
}
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return 0;
} }
EXPORT_SYMBOL_GPL(mhi_queue_buf); EXPORT_SYMBOL_GPL(mhi_queue_buf);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment