Commit 37f1cda4 authored by Greg Kroah-Hartman's avatar Greg Kroah-Hartman

Merge tag 'mhi-for-v5.12' of...

Merge tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi into char-misc-next

Manivannan writes:

MHI changes for v5.12

Loic improved the MHI PCI generic controller by adding support for DIAG channel,
PCI error handling, suspend/recovery/resume, and health check. Loic also added
support for resetting the MHI device as per the MHI specification. This includes
writing to a specific register for default cases and looking for controller
specific callback when provided.

Along with this Loic, also added a new API which gets the number for free TREs
(Transfer Ring Elements) from the MHI core. The client drivers can make use of
this API and the current consumer is the "mhi-net" driver. For taking both the
"mhi-net" driver change and the API change, we created "mhi-net-immutable"
branch for this patch and merged the same into net-next and mhi-next.

Carl added a patch which lets the controller driver to pass the custom IRQ
flags for BHI and MHI event interrupts to the MHI core. The current consumer of
this feature is the ath11k MHI controller driver. For taking both the changes,
we created "mhi-ath11k-immutable" branch for this patch and merged into
ath11k-next and mhi-next.

Finally, Loic cleaned up the MHI queue APIs and fixed the shared MSI vector
support.

* tag 'mhi-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/mani/mhi:
  bus: mhi: pci_generic: Increase num of elements in hw event ring
  mhi: pci_generic: Print warning in case of firmware crash
  bus: mhi: core: Add helper API to return number of free TREs
  mhi: core: Factorize mhi queuing
  mhi: use irq_flags if controller driver configures it
  mhi: pci_generic: Fix shared MSI vector support
  mhi: unconstify mhi_event_config
  bus: mhi: Ensure correct ring update ordering with memory barrier
  mhi: pci_generic: Set irq moderation value to 1ms for hw channels
  mhi: pci_generic: Add diag channels
  mhi: pci_generic: Increase controller timeout value
  mhi: pci_generic: Add health-check
  mhi: pci_generic: Add PCI error handlers
  mhi: pci_generic: Add suspend/resume/recovery procedure
  mhi: pci_generic: Add support for reset
  mhi: pci_generic: Enable burst mode for hardware channels
  mhi: pci-generic: Increase number of hardware events
  bus: mhi: core: Add device hardware reset support
parents 1609faa9 026c5b1e
......@@ -151,12 +151,17 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
{
struct mhi_event *mhi_event = mhi_cntrl->mhi_event;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
unsigned long irq_flags = IRQF_SHARED | IRQF_NO_SUSPEND;
int i, ret;
/* if controller driver has set irq_flags, use it */
if (mhi_cntrl->irq_flags)
irq_flags = mhi_cntrl->irq_flags;
/* Setup BHI_INTVEC IRQ */
ret = request_threaded_irq(mhi_cntrl->irq[0], mhi_intvec_handler,
mhi_intvec_threaded_handler,
IRQF_SHARED | IRQF_NO_SUSPEND,
irq_flags,
"bhi", mhi_cntrl);
if (ret)
return ret;
......@@ -174,7 +179,7 @@ int mhi_init_irq_setup(struct mhi_controller *mhi_cntrl)
ret = request_irq(mhi_cntrl->irq[mhi_event->irq],
mhi_irq_handler,
IRQF_SHARED | IRQF_NO_SUSPEND,
irq_flags,
"mhi", mhi_event);
if (ret) {
dev_err(dev, "Error requesting irq:%d for ev:%d\n",
......
......@@ -111,7 +111,14 @@ void mhi_ring_chan_db(struct mhi_controller *mhi_cntrl,
dma_addr_t db;
db = ring->iommu_base + (ring->wp - ring->base);
/*
* Writes to the new ring element must be visible to the hardware
* before letting h/w know there is new element to fetch.
*/
dma_wmb();
*ring->ctxt_wp = db;
mhi_chan->db_cfg.process_db(mhi_cntrl, &mhi_chan->db_cfg,
ring->db_addr, db);
}
......@@ -135,6 +142,19 @@ enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl)
}
EXPORT_SYMBOL_GPL(mhi_get_mhi_state);
void mhi_soc_reset(struct mhi_controller *mhi_cntrl)
{
if (mhi_cntrl->reset) {
mhi_cntrl->reset(mhi_cntrl);
return;
}
/* Generic MHI SoC reset */
mhi_write_reg(mhi_cntrl, mhi_cntrl->regs, MHI_SOC_RESET_REQ_OFFSET,
MHI_SOC_RESET_REQ);
}
EXPORT_SYMBOL_GPL(mhi_soc_reset);
int mhi_map_single_no_bb(struct mhi_controller *mhi_cntrl,
struct mhi_buf_info *buf_info)
{
......@@ -260,6 +280,18 @@ int mhi_destroy_device(struct device *dev, void *data)
return 0;
}
int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
enum dma_data_direction dir)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ?
mhi_dev->ul_chan : mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
return get_nr_avail_ring_elements(mhi_cntrl, tre_ring);
}
EXPORT_SYMBOL_GPL(mhi_get_free_desc_count);
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason)
{
struct mhi_driver *mhi_drv;
......@@ -947,118 +979,88 @@ static bool mhi_is_ring_full(struct mhi_controller *mhi_cntrl,
return (tmp == ring->rp);
}
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
static int mhi_queue(struct mhi_device *mhi_dev, struct mhi_buf_info *buf_info,
enum dma_data_direction dir, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
return -EINVAL;
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
read_lock_bh(&mhi_cntrl->pm_lock);
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return -EIO;
ret = mhi_is_ring_full(mhi_cntrl, tre_ring);
if (unlikely(ret)) {
ret = -ENOMEM;
goto exit_unlock;
}
/* we're in M3 or transitioning to M3 */
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, buf_info, mflags);
if (unlikely(ret))
goto exit_unlock;
/* trigger M3 exit if necessary */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
/* Assert dev_wake (to exit/prevent M1/M2)*/
mhi_cntrl->wake_toggle(mhi_cntrl);
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
if (unlikely(!MHI_DB_ACCESS_VALID(mhi_cntrl))) {
ret = -EIO;
goto exit_unlock;
}
read_unlock_bh(&mhi_cntrl->pm_lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
return 0;
exit_unlock:
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return ret;
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
int mhi_queue_skb(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct sk_buff *skb, size_t len, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct device *dev = &mhi_cntrl->mhi_dev->dev;
struct mhi_ring *tre_ring = &mhi_chan->tre_ring;
struct mhi_buf_info buf_info = { };
int ret;
/* If MHI host pre-allocates buffers then client drivers cannot queue */
if (mhi_chan->pre_alloc)
return -EINVAL;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
read_lock_bh(&mhi_cntrl->pm_lock);
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state))) {
dev_err(dev, "MHI is not in activate state, PM state: %s\n",
to_mhi_pm_state_str(mhi_cntrl->pm_state));
read_unlock_bh(&mhi_cntrl->pm_lock);
buf_info.v_addr = skb->data;
buf_info.cb_buf = skb;
buf_info.len = len;
return -EIO;
}
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;
/* we're in M3 or transitioning to M3 */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_skb);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
int mhi_queue_dma(struct mhi_device *mhi_dev, enum dma_data_direction dir,
struct mhi_buf *mhi_buf, size_t len, enum mhi_flags mflags)
{
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_buf_info buf_info = { };
buf_info.p_addr = mhi_buf->dma_addr;
buf_info.cb_buf = mhi_buf;
buf_info.pre_mapped = true;
buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret)) {
read_unlock_bh(&mhi_cntrl->pm_lock);
return ret;
}
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
read_lock_bh(&mhi_chan->lock);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_bh(&mhi_chan->lock);
}
read_unlock_bh(&mhi_cntrl->pm_lock);
if (unlikely(mhi_chan->pre_alloc))
return -EINVAL;
return 0;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_dma);
......@@ -1112,57 +1114,13 @@ int mhi_gen_tre(struct mhi_controller *mhi_cntrl, struct mhi_chan *mhi_chan,
int mhi_queue_buf(struct mhi_device *mhi_dev, enum dma_data_direction dir,
void *buf, size_t len, enum mhi_flags mflags)
{
struct mhi_controller *mhi_cntrl = mhi_dev->mhi_cntrl;
struct mhi_chan *mhi_chan = (dir == DMA_TO_DEVICE) ? mhi_dev->ul_chan :
mhi_dev->dl_chan;
struct mhi_ring *tre_ring;
struct mhi_buf_info buf_info = { };
unsigned long flags;
int ret;
/*
* this check here only as a guard, it's always
* possible mhi can enter error while executing rest of function,
* which is not fatal so we do not need to hold pm_lock
*/
if (unlikely(MHI_PM_IN_ERROR_STATE(mhi_cntrl->pm_state)))
return -EIO;
tre_ring = &mhi_chan->tre_ring;
if (mhi_is_ring_full(mhi_cntrl, tre_ring))
return -ENOMEM;
buf_info.v_addr = buf;
buf_info.cb_buf = buf;
buf_info.len = len;
ret = mhi_gen_tre(mhi_cntrl, mhi_chan, &buf_info, mflags);
if (unlikely(ret))
return ret;
read_lock_irqsave(&mhi_cntrl->pm_lock, flags);
/* we're in M3 or transitioning to M3 */
if (MHI_PM_IN_SUSPEND_STATE(mhi_cntrl->pm_state))
mhi_trigger_resume(mhi_cntrl);
/* Toggle wake to exit out of M2 */
mhi_cntrl->wake_toggle(mhi_cntrl);
if (mhi_chan->dir == DMA_TO_DEVICE)
atomic_inc(&mhi_cntrl->pending_pkts);
if (likely(MHI_DB_ACCESS_VALID(mhi_cntrl))) {
unsigned long flags;
read_lock_irqsave(&mhi_chan->lock, flags);
mhi_ring_chan_db(mhi_cntrl, mhi_chan);
read_unlock_irqrestore(&mhi_chan->lock, flags);
}
read_unlock_irqrestore(&mhi_cntrl->pm_lock, flags);
return 0;
return mhi_queue(mhi_dev, &buf_info, dir, mflags);
}
EXPORT_SYMBOL_GPL(mhi_queue_buf);
......
This diff is collapsed.
......@@ -279,7 +279,7 @@ struct mhi_controller_config {
u32 num_channels;
const struct mhi_channel_config *ch_cfg;
u32 num_events;
const struct mhi_event_config *event_cfg;
struct mhi_event_config *event_cfg;
bool use_bounce_buf;
bool m2_no_db;
};
......@@ -347,12 +347,14 @@ struct mhi_controller_config {
* @unmap_single: CB function to destroy TRE buffer
* @read_reg: Read a MHI register via the physical link (required)
* @write_reg: Write a MHI register via the physical link (required)
* @reset: Controller specific reset function (optional)
* @buffer_len: Bounce buffer length
* @index: Index of the MHI controller instance
* @bounce_buf: Use of bounce buffer
* @fbc_download: MHI host needs to do complete image transfer (optional)
* @pre_init: MHI host needs to do pre-initialization before power up
* @wake_set: Device wakeup set flag
* @irq_flags: irq flags passed to request_irq (optional)
*
* Fields marked as (required) need to be populated by the controller driver
* before calling mhi_register_controller(). For the fields marked as (optional)
......@@ -437,6 +439,7 @@ struct mhi_controller {
u32 *out);
void (*write_reg)(struct mhi_controller *mhi_cntrl, void __iomem *addr,
u32 val);
void (*reset)(struct mhi_controller *mhi_cntrl);
size_t buffer_len;
int index;
......@@ -444,6 +447,7 @@ struct mhi_controller {
bool fbc_download;
bool pre_init;
bool wake_set;
unsigned long irq_flags;
};
/**
......@@ -598,6 +602,15 @@ void mhi_set_mhi_state(struct mhi_controller *mhi_cntrl,
*/
void mhi_notify(struct mhi_device *mhi_dev, enum mhi_callback cb_reason);
/**
* mhi_get_free_desc_count - Get transfer ring length
* Get # of TD available to queue buffers
* @mhi_dev: Device associated with the channels
* @dir: Direction of the channel
*/
int mhi_get_free_desc_count(struct mhi_device *mhi_dev,
enum dma_data_direction dir);
/**
* mhi_prepare_for_power_up - Do pre-initialization before power up.
* This is optional, call this before power up if
......@@ -672,6 +685,13 @@ enum mhi_ee_type mhi_get_exec_env(struct mhi_controller *mhi_cntrl);
*/
enum mhi_state mhi_get_mhi_state(struct mhi_controller *mhi_cntrl);
/**
* mhi_soc_reset - Trigger a device reset. This can be used as a last resort
* to reset and recover a device.
* @mhi_cntrl: MHI controller
*/
void mhi_soc_reset(struct mhi_controller *mhi_cntrl);
/**
* mhi_device_get - Disable device low power mode
* @mhi_dev: Device associated with the channel
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment