Commit 2d152760 authored by Jakub Kicinski's avatar Jakub Kicinski

Merge branch 'net-ipa-constrain-gsi-interrupts'

Alex Elder says:

====================
net: ipa: constrain GSI interrupts

The goal of this series is to more tightly control when GSI
interrupts are enabled.  This is a long-ish series, so I'll
describe it in parts.

The first patch is actually unrelated...  I forgot to include
it in my previous series (which exposed the GSI layer to the
IPA version).  It is a trivial comments-only update patch.

The second patch defers registering the GSI interrupt handler
until *after* all of the resources that handler touches have
been initialized.  In practice, we don't see this interrupt
that early, but this precludes an obvious problem.

The next two patches are simple changes.  The first just
trivially renames a field.  The second switches from using
constant mask values to using an enumerated type of bit
positions to represent each GSI interrupt type.

The rest implement the "real work."  First, all interrupts
are disabled at initialization time.  Next, we keep track of
a bitmask of enabled GSI interrupt types, updating it each
time we enable or disable one of them.  From there we have
a set of patches that one-by-one enable each interrupt type
only during the period it is required.  This includes allowing
a channel to generate IEOB interrupts only when it has been
enabled.  And finally, the last patch simplifies some code
now that all GSI interrupt types are handled uniformly.
====================

Link: https://lore.kernel.org/r/20201105181407.8006-1-elder@linaro.orgSigned-off-by: default avatarJakub Kicinski <kuba@kernel.org>
parents f8ae7bbe 8194be79
...@@ -230,21 +230,70 @@ static u32 gsi_channel_id(struct gsi_channel *channel) ...@@ -230,21 +230,70 @@ static u32 gsi_channel_id(struct gsi_channel *channel)
return channel - &channel->gsi->channel[0]; return channel - &channel->gsi->channel[0];
} }
/* Update the GSI IRQ type register with the cached value */
static void gsi_irq_type_update(struct gsi *gsi, u32 val)
{
gsi->type_enabled_bitmap = val;
iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
}
static void gsi_irq_type_enable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(type_id));
}
static void gsi_irq_type_disable(struct gsi *gsi, enum gsi_irq_type_id type_id)
{
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap & ~BIT(type_id));
}
/* Turn off all GSI interrupts initially */
static void gsi_irq_setup(struct gsi *gsi)
{
/* Disable all interrupt types */
gsi_irq_type_update(gsi, 0);
/* Clear all type-specific interrupt masks */
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + GSI_INTER_EE_SRC_EV_CH_IRQ_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
}
/* Turn off all GSI interrupts when we're all done */
static void gsi_irq_teardown(struct gsi *gsi)
{
/* Nothing to do */
}
static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id) static void gsi_irq_ieob_enable(struct gsi *gsi, u32 evt_ring_id)
{ {
bool enable_ieob = !gsi->ieob_enabled_bitmap;
u32 val; u32 val;
gsi->event_enable_bitmap |= BIT(evt_ring_id); gsi->ieob_enabled_bitmap |= BIT(evt_ring_id);
val = gsi->event_enable_bitmap; val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
/* Enable the interrupt type if this is the first channel enabled */
if (enable_ieob)
gsi_irq_type_enable(gsi, GSI_IEOB);
} }
static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id) static void gsi_irq_ieob_disable(struct gsi *gsi, u32 evt_ring_id)
{ {
u32 val; u32 val;
gsi->event_enable_bitmap &= ~BIT(evt_ring_id); gsi->ieob_enabled_bitmap &= ~BIT(evt_ring_id);
val = gsi->event_enable_bitmap;
/* Disable the interrupt type if this was the last enabled channel */
if (!gsi->ieob_enabled_bitmap)
gsi_irq_type_disable(gsi, GSI_IEOB);
val = gsi->ieob_enabled_bitmap;
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
} }
...@@ -253,38 +302,32 @@ static void gsi_irq_enable(struct gsi *gsi) ...@@ -253,38 +302,32 @@ static void gsi_irq_enable(struct gsi *gsi)
{ {
u32 val; u32 val;
/* We don't use inter-EE channel or event interrupts */ /* Global interrupts include hardware error reports. Enable
val = GSI_CNTXT_TYPE_IRQ_MSK_ALL; * that so we can at least report the error should it occur.
val &= ~INTER_EE_CH_CTRL_FMASK; */
val &= ~INTER_EE_EV_CTRL_FMASK; iowrite32(ERROR_INT_FMASK, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(val, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET); gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GLOB_EE));
val = GENMASK(gsi->channel_count - 1, 0);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
val = GENMASK(gsi->evt_ring_count - 1, 0);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
/* Each IEOB interrupt is enabled (later) as needed by channels */
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
val = GSI_CNTXT_GLOB_IRQ_ALL;
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
/* Never enable GSI_BREAK_POINT */ /* General GSI interrupts are reported to all EEs; if they occur
val = GSI_CNTXT_GSI_IRQ_ALL & ~BREAK_POINT_FMASK; * they are unrecoverable (without reset). A breakpoint interrupt
* also exists, but we don't support that. We want to be notified
* of errors so we can report them, even if they can't be handled.
*/
val = BUS_ERROR_FMASK;
val |= CMD_FIFO_OVRFLOW_FMASK;
val |= MCS_STACK_OVRFLOW_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
gsi_irq_type_update(gsi, gsi->type_enabled_bitmap | BIT(GSI_GENERAL));
} }
/* Disable all GSI_interrupt types */ /* Disable all GSI interrupt types */
static void gsi_irq_disable(struct gsi *gsi) static void gsi_irq_disable(struct gsi *gsi)
{ {
gsi_irq_type_update(gsi, 0);
/* Clear the type-specific interrupt masks set by gsi_irq_enable() */
iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET); iowrite32(0, gsi->virt + GSI_CNTXT_GSI_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET); iowrite32(0, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_IEOB_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
iowrite32(0, gsi->virt + GSI_CNTXT_TYPE_IRQ_MSK_OFFSET);
} }
/* Return the virtual address associated with a ring index */ /* Return the virtual address associated with a ring index */
...@@ -338,13 +381,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id, ...@@ -338,13 +381,30 @@ static int evt_ring_command(struct gsi *gsi, u32 evt_ring_id,
struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id]; struct gsi_evt_ring *evt_ring = &gsi->evt_ring[evt_ring_id];
struct completion *completion = &evt_ring->completion; struct completion *completion = &evt_ring->completion;
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
bool success;
u32 val; u32 val;
/* We only perform one event ring command at a time, and event
* control interrupts should only occur when such a command
* is issued here. Only permit *this* event ring to trigger
* an interrupt, and only enable the event control IRQ type
* when we expect it to occur.
*/
val = BIT(evt_ring_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_EV_CTRL);
val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK); val = u32_encode_bits(evt_ring_id, EV_CHID_FMASK);
val |= u32_encode_bits(opcode, EV_OPCODE_FMASK); val |= u32_encode_bits(opcode, EV_OPCODE_FMASK);
if (gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion)) success = gsi_command(gsi, GSI_EV_CH_CMD_OFFSET, val, completion);
return 0; /* Success! */
/* Disable the interrupt again */
gsi_irq_type_disable(gsi, GSI_EV_CTRL);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_EV_CH_IRQ_MSK_OFFSET);
if (success)
return 0;
dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n", dev_err(dev, "GSI command %u for event ring %u timed out, state %u\n",
opcode, evt_ring_id, evt_ring->state); opcode, evt_ring_id, evt_ring->state);
...@@ -434,13 +494,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode) ...@@ -434,13 +494,29 @@ gsi_channel_command(struct gsi_channel *channel, enum gsi_ch_cmd_opcode opcode)
u32 channel_id = gsi_channel_id(channel); u32 channel_id = gsi_channel_id(channel);
struct gsi *gsi = channel->gsi; struct gsi *gsi = channel->gsi;
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
bool success;
u32 val; u32 val;
/* We only perform one channel command at a time, and channel
* control interrupts should only occur when such a command is
* issued here. So we only permit *this* channel to trigger
* an interrupt and only enable the channel control IRQ type
* when we expect it to occur.
*/
val = BIT(channel_id);
iowrite32(val, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
gsi_irq_type_enable(gsi, GSI_CH_CTRL);
val = u32_encode_bits(channel_id, CH_CHID_FMASK); val = u32_encode_bits(channel_id, CH_CHID_FMASK);
val |= u32_encode_bits(opcode, CH_OPCODE_FMASK); val |= u32_encode_bits(opcode, CH_OPCODE_FMASK);
success = gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion);
if (gsi_command(gsi, GSI_CH_CMD_OFFSET, val, completion)) /* Disable the interrupt again */
return 0; /* Success! */ gsi_irq_type_disable(gsi, GSI_CH_CTRL);
iowrite32(0, gsi->virt + GSI_CNTXT_SRC_CH_IRQ_MSK_OFFSET);
if (success)
return 0;
dev_err(dev, "GSI command %u for channel %u timed out, state %u\n", dev_err(dev, "GSI command %u for channel %u timed out, state %u\n",
opcode, channel_id, gsi_channel_state(channel)); opcode, channel_id, gsi_channel_state(channel));
...@@ -1036,8 +1112,8 @@ static void gsi_isr_glob_err(struct gsi *gsi) ...@@ -1036,8 +1112,8 @@ static void gsi_isr_glob_err(struct gsi *gsi)
iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET); iowrite32(~0, gsi->virt + GSI_ERROR_LOG_CLR_OFFSET);
ee = u32_get_bits(val, ERR_EE_FMASK); ee = u32_get_bits(val, ERR_EE_FMASK);
which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
type = u32_get_bits(val, ERR_TYPE_FMASK); type = u32_get_bits(val, ERR_TYPE_FMASK);
which = u32_get_bits(val, ERR_VIRT_IDX_FMASK);
code = u32_get_bits(val, ERR_CODE_FMASK); code = u32_get_bits(val, ERR_CODE_FMASK);
if (type == GSI_ERR_TYPE_CHAN) if (type == GSI_ERR_TYPE_CHAN)
...@@ -1112,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi) ...@@ -1112,8 +1188,7 @@ static void gsi_isr_general(struct gsi *gsi)
val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET); val = ioread32(gsi->virt + GSI_CNTXT_GSI_IRQ_STTS_OFFSET);
iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET); iowrite32(val, gsi->virt + GSI_CNTXT_GSI_IRQ_CLR_OFFSET);
if (val) dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
dev_err(dev, "unexpected general interrupt 0x%08x\n", val);
} }
/** /**
...@@ -1130,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) ...@@ -1130,6 +1205,7 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
u32 intr_mask; u32 intr_mask;
u32 cnt = 0; u32 cnt = 0;
/* enum gsi_irq_type_id defines GSI interrupt types */
while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) { while ((intr_mask = ioread32(gsi->virt + GSI_CNTXT_TYPE_IRQ_OFFSET))) {
/* intr_mask contains bitmask of pending GSI interrupts */ /* intr_mask contains bitmask of pending GSI interrupts */
do { do {
...@@ -1138,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) ...@@ -1138,19 +1214,19 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
intr_mask ^= gsi_intr; intr_mask ^= gsi_intr;
switch (gsi_intr) { switch (gsi_intr) {
case CH_CTRL_FMASK: case BIT(GSI_CH_CTRL):
gsi_isr_chan_ctrl(gsi); gsi_isr_chan_ctrl(gsi);
break; break;
case EV_CTRL_FMASK: case BIT(GSI_EV_CTRL):
gsi_isr_evt_ctrl(gsi); gsi_isr_evt_ctrl(gsi);
break; break;
case GLOB_EE_FMASK: case BIT(GSI_GLOB_EE):
gsi_isr_glob_ee(gsi); gsi_isr_glob_ee(gsi);
break; break;
case IEOB_FMASK: case BIT(GSI_IEOB):
gsi_isr_ieob(gsi); gsi_isr_ieob(gsi);
break; break;
case GENERAL_FMASK: case BIT(GSI_GENERAL):
gsi_isr_general(gsi); gsi_isr_general(gsi);
break; break;
default: default:
...@@ -1170,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id) ...@@ -1170,6 +1246,34 @@ static irqreturn_t gsi_isr(int irq, void *dev_id)
return IRQ_HANDLED; return IRQ_HANDLED;
} }
static int gsi_irq_init(struct gsi *gsi, struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
unsigned int irq;
int ret;
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
return ret ? : -EINVAL;
}
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
if (ret) {
dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
gsi->irq = irq;
return 0;
}
static void gsi_irq_exit(struct gsi *gsi)
{
free_irq(gsi->irq, gsi);
}
/* Return the transaction associated with a transfer completion event */ /* Return the transaction associated with a transfer completion event */
static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel, static struct gsi_trans *gsi_event_trans(struct gsi_channel *channel,
struct gsi_event *event) struct gsi_event *event)
...@@ -1512,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, ...@@ -1512,8 +1616,19 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
enum gsi_generic_cmd_opcode opcode) enum gsi_generic_cmd_opcode opcode)
{ {
struct completion *completion = &gsi->completion; struct completion *completion = &gsi->completion;
bool success;
u32 val; u32 val;
/* The error global interrupt type is always enabled (until we
* teardown), so we won't change that. A generic EE command
* completes with a GSI global interrupt of type GP_INT1. We
* only perform one generic command at a time (to allocate or
* halt a modem channel) and only from this function. So we
* enable the GP_INT1 IRQ type here while we're expecting it.
*/
val = ERROR_INT_FMASK | GP_INT1_FMASK;
iowrite32(val, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
/* First zero the result code field */ /* First zero the result code field */
val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET); val = ioread32(gsi->virt + GSI_CNTXT_SCRATCH_0_OFFSET);
val &= ~GENERIC_EE_RESULT_FMASK; val &= ~GENERIC_EE_RESULT_FMASK;
...@@ -1524,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id, ...@@ -1524,8 +1639,13 @@ static int gsi_generic_command(struct gsi *gsi, u32 channel_id,
val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK); val |= u32_encode_bits(channel_id, GENERIC_CHID_FMASK);
val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK); val |= u32_encode_bits(GSI_EE_MODEM, GENERIC_EE_FMASK);
if (gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion)) success = gsi_command(gsi, GSI_GENERIC_CMD_OFFSET, val, completion);
return 0; /* Success! */
/* Disable the GP_INT1 IRQ type again */
iowrite32(ERROR_INT_FMASK, gsi->virt + GSI_CNTXT_GLOB_IRQ_EN_OFFSET);
if (success)
return 0;
dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n", dev_err(gsi->dev, "GSI generic command %u to channel %u timed out\n",
opcode, channel_id); opcode, channel_id);
...@@ -1652,6 +1772,7 @@ int gsi_setup(struct gsi *gsi) ...@@ -1652,6 +1772,7 @@ int gsi_setup(struct gsi *gsi)
{ {
struct device *dev = gsi->dev; struct device *dev = gsi->dev;
u32 val; u32 val;
int ret;
/* Here is where we first touch the GSI hardware */ /* Here is where we first touch the GSI hardware */
val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET); val = ioread32(gsi->virt + GSI_GSI_STATUS_OFFSET);
...@@ -1660,6 +1781,8 @@ int gsi_setup(struct gsi *gsi) ...@@ -1660,6 +1781,8 @@ int gsi_setup(struct gsi *gsi)
return -EIO; return -EIO;
} }
gsi_irq_setup(gsi);
val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET); val = ioread32(gsi->virt + GSI_GSI_HW_PARAM_2_OFFSET);
gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK); gsi->channel_count = u32_get_bits(val, NUM_CH_PER_EE_FMASK);
...@@ -1692,13 +1815,18 @@ int gsi_setup(struct gsi *gsi) ...@@ -1692,13 +1815,18 @@ int gsi_setup(struct gsi *gsi)
/* Writing 1 indicates IRQ interrupts; 0 would be MSI */ /* Writing 1 indicates IRQ interrupts; 0 would be MSI */
iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET); iowrite32(1, gsi->virt + GSI_CNTXT_INTSET_OFFSET);
return gsi_channel_setup(gsi); ret = gsi_channel_setup(gsi);
if (ret)
gsi_irq_teardown(gsi);
return ret;
} }
/* Inverse of gsi_setup() */ /* Inverse of gsi_setup() */
void gsi_teardown(struct gsi *gsi) void gsi_teardown(struct gsi *gsi)
{ {
gsi_channel_teardown(gsi); gsi_channel_teardown(gsi);
gsi_irq_teardown(gsi);
} }
/* Initialize a channel's event ring */ /* Initialize a channel's event ring */
...@@ -1746,7 +1874,7 @@ static void gsi_evt_ring_init(struct gsi *gsi) ...@@ -1746,7 +1874,7 @@ static void gsi_evt_ring_init(struct gsi *gsi)
u32 evt_ring_id = 0; u32 evt_ring_id = 0;
gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX); gsi->event_bitmap = gsi_event_bitmap_init(GSI_EVT_RING_COUNT_MAX);
gsi->event_enable_bitmap = 0; gsi->ieob_enabled_bitmap = 0;
do do
init_completion(&gsi->evt_ring[evt_ring_id].completion); init_completion(&gsi->evt_ring[evt_ring_id].completion);
while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX); while (++evt_ring_id < GSI_EVT_RING_COUNT_MAX);
...@@ -1962,7 +2090,6 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, ...@@ -1962,7 +2090,6 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct resource *res; struct resource *res;
resource_size_t size; resource_size_t size;
unsigned int irq;
int ret; int ret;
gsi_validate_build(); gsi_validate_build();
...@@ -1976,55 +2103,43 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev, ...@@ -1976,55 +2103,43 @@ int gsi_init(struct gsi *gsi, struct platform_device *pdev,
*/ */
init_dummy_netdev(&gsi->dummy_dev); init_dummy_netdev(&gsi->dummy_dev);
ret = platform_get_irq_byname(pdev, "gsi");
if (ret <= 0) {
dev_err(dev, "DT error %d getting \"gsi\" IRQ property\n", ret);
return ret ? : -EINVAL;
}
irq = ret;
ret = request_irq(irq, gsi_isr, 0, "gsi", gsi);
if (ret) {
dev_err(dev, "error %d requesting \"gsi\" IRQ\n", ret);
return ret;
}
gsi->irq = irq;
/* Get GSI memory range and map it */ /* Get GSI memory range and map it */
res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi"); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "gsi");
if (!res) { if (!res) {
dev_err(dev, "DT error getting \"gsi\" memory property\n"); dev_err(dev, "DT error getting \"gsi\" memory property\n");
ret = -ENODEV; return -ENODEV;
goto err_free_irq;
} }
size = resource_size(res); size = resource_size(res);
if (res->start > U32_MAX || size > U32_MAX - res->start) { if (res->start > U32_MAX || size > U32_MAX - res->start) {
dev_err(dev, "DT memory resource \"gsi\" out of range\n"); dev_err(dev, "DT memory resource \"gsi\" out of range\n");
ret = -EINVAL; return -EINVAL;
goto err_free_irq;
} }
gsi->virt = ioremap(res->start, size); gsi->virt = ioremap(res->start, size);
if (!gsi->virt) { if (!gsi->virt) {
dev_err(dev, "unable to remap \"gsi\" memory\n"); dev_err(dev, "unable to remap \"gsi\" memory\n");
ret = -ENOMEM; return -ENOMEM;
goto err_free_irq;
} }
ret = gsi_channel_init(gsi, count, data); init_completion(&gsi->completion);
ret = gsi_irq_init(gsi, pdev);
if (ret) if (ret)
goto err_iounmap; goto err_iounmap;
ret = gsi_channel_init(gsi, count, data);
if (ret)
goto err_irq_exit;
mutex_init(&gsi->mutex); mutex_init(&gsi->mutex);
init_completion(&gsi->completion);
return 0; return 0;
err_irq_exit:
gsi_irq_exit(gsi);
err_iounmap: err_iounmap:
iounmap(gsi->virt); iounmap(gsi->virt);
err_free_irq:
free_irq(gsi->irq, gsi);
return ret; return ret;
} }
...@@ -2034,7 +2149,7 @@ void gsi_exit(struct gsi *gsi) ...@@ -2034,7 +2149,7 @@ void gsi_exit(struct gsi *gsi)
{ {
mutex_destroy(&gsi->mutex); mutex_destroy(&gsi->mutex);
gsi_channel_exit(gsi); gsi_channel_exit(gsi);
free_irq(gsi->irq, gsi); gsi_irq_exit(gsi);
iounmap(gsi->virt); iounmap(gsi->virt);
} }
......
...@@ -156,9 +156,10 @@ struct gsi { ...@@ -156,9 +156,10 @@ struct gsi {
u32 evt_ring_count; u32 evt_ring_count;
struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX]; struct gsi_channel channel[GSI_CHANNEL_COUNT_MAX];
struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX]; struct gsi_evt_ring evt_ring[GSI_EVT_RING_COUNT_MAX];
u32 event_bitmap; u32 event_bitmap; /* allocated event rings */
u32 event_enable_bitmap; u32 modem_channel_bitmap; /* modem channels to allocate */
u32 modem_channel_bitmap; u32 type_enabled_bitmap; /* GSI IRQ types enabled */
u32 ieob_enabled_bitmap; /* IEOB IRQ enabled (event rings) */
struct completion completion; /* for global EE commands */ struct completion completion; /* for global EE commands */
struct mutex mutex; /* protects commands, programming */ struct mutex mutex; /* protects commands, programming */
}; };
......
...@@ -66,7 +66,7 @@ ...@@ -66,7 +66,7 @@
#define CHTYPE_DIR_FMASK GENMASK(3, 3) #define CHTYPE_DIR_FMASK GENMASK(3, 3)
#define EE_FMASK GENMASK(7, 4) #define EE_FMASK GENMASK(7, 4)
#define CHID_FMASK GENMASK(12, 8) #define CHID_FMASK GENMASK(12, 8)
/* The next field is present for GSI v2.0 and above */ /* The next field is present for IPA v4.5 and above */
#define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13) #define CHTYPE_PROTOCOL_MSB_FMASK GENMASK(13, 13)
#define ERINDEX_FMASK GENMASK(18, 14) #define ERINDEX_FMASK GENMASK(18, 14)
#define CHSTATE_FMASK GENMASK(23, 20) #define CHSTATE_FMASK GENMASK(23, 20)
...@@ -95,7 +95,7 @@ ...@@ -95,7 +95,7 @@
#define WRR_WEIGHT_FMASK GENMASK(3, 0) #define WRR_WEIGHT_FMASK GENMASK(3, 0)
#define MAX_PREFETCH_FMASK GENMASK(8, 8) #define MAX_PREFETCH_FMASK GENMASK(8, 8)
#define USE_DB_ENG_FMASK GENMASK(9, 9) #define USE_DB_ENG_FMASK GENMASK(9, 9)
/* The next field is present for GSI v2.0 and above */ /* The next field is only present for IPA v4.0, v4.1, and v4.2 */
#define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10) #define USE_ESCAPE_BUF_ONLY_FMASK GENMASK(10, 10)
#define GSI_CH_C_SCRATCH_0_OFFSET(ch) \ #define GSI_CH_C_SCRATCH_0_OFFSET(ch) \
...@@ -238,19 +238,19 @@ ...@@ -238,19 +238,19 @@
#define IRAM_SIZE_FMASK GENMASK(2, 0) #define IRAM_SIZE_FMASK GENMASK(2, 0)
#define IRAM_SIZE_ONE_KB_FVAL 0 #define IRAM_SIZE_ONE_KB_FVAL 0
#define IRAM_SIZE_TWO_KB_FVAL 1 #define IRAM_SIZE_TWO_KB_FVAL 1
/* The next two values are available for GSI v2.0 and above */ /* The next two values are available for IPA v4.0 and above */
#define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2 #define IRAM_SIZE_TWO_N_HALF_KB_FVAL 2
#define IRAM_SIZE_THREE_KB_FVAL 3 #define IRAM_SIZE_THREE_KB_FVAL 3
#define NUM_CH_PER_EE_FMASK GENMASK(7, 3) #define NUM_CH_PER_EE_FMASK GENMASK(7, 3)
#define NUM_EV_PER_EE_FMASK GENMASK(12, 8) #define NUM_EV_PER_EE_FMASK GENMASK(12, 8)
#define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13) #define GSI_CH_PEND_TRANSLATE_FMASK GENMASK(13, 13)
#define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14) #define GSI_CH_FULL_LOGIC_FMASK GENMASK(14, 14)
/* Fields below are present for GSI v2.0 and above */ /* Fields below are present for IPA v4.0 and above */
#define GSI_USE_SDMA_FMASK GENMASK(15, 15) #define GSI_USE_SDMA_FMASK GENMASK(15, 15)
#define GSI_SDMA_N_INT_FMASK GENMASK(18, 16) #define GSI_SDMA_N_INT_FMASK GENMASK(18, 16)
#define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19) #define GSI_SDMA_MAX_BURST_FMASK GENMASK(26, 19)
#define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27) #define GSI_SDMA_N_IOVEC_FMASK GENMASK(29, 27)
/* Fields below are present for GSI v2.2 and above */ /* Fields below are present for IPA v4.2 and above */
#define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30) #define GSI_USE_RD_WR_ENG_FMASK GENMASK(30, 30)
#define GSI_USE_INTER_EE_FMASK GENMASK(31, 31) #define GSI_USE_INTER_EE_FMASK GENMASK(31, 31)
...@@ -262,15 +262,16 @@ ...@@ -262,15 +262,16 @@
GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP) GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(GSI_EE_AP)
#define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \ #define GSI_EE_N_CNTXT_TYPE_IRQ_MSK_OFFSET(ee) \
(0x0001f088 + 0x4000 * (ee)) (0x0001f088 + 0x4000 * (ee))
/* The masks below are used for the TYPE_IRQ and TYPE_IRQ_MASK registers */ /* Values here are bit positions in the TYPE_IRQ and TYPE_IRQ_MSK registers */
#define CH_CTRL_FMASK GENMASK(0, 0) enum gsi_irq_type_id {
#define EV_CTRL_FMASK GENMASK(1, 1) GSI_CH_CTRL = 0, /* channel allocation, etc. */
#define GLOB_EE_FMASK GENMASK(2, 2) GSI_EV_CTRL = 1, /* event ring allocation, etc. */
#define IEOB_FMASK GENMASK(3, 3) GSI_GLOB_EE = 2, /* global/general event */
#define INTER_EE_CH_CTRL_FMASK GENMASK(4, 4) GSI_IEOB = 3, /* TRE completion */
#define INTER_EE_EV_CTRL_FMASK GENMASK(5, 5) GSI_INTER_EE_CH_CTRL = 4, /* remote-issued stop/reset (unused) */
#define GENERAL_FMASK GENMASK(6, 6) GSI_INTER_EE_EV_CTRL = 5, /* remote-issued event reset (unused) */
#define GSI_CNTXT_TYPE_IRQ_MSK_ALL GENMASK(6, 0) GSI_GENERAL = 6, /* general-purpose event */
};
#define GSI_CNTXT_SRC_CH_IRQ_OFFSET \ #define GSI_CNTXT_SRC_CH_IRQ_OFFSET \
GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP) GSI_EE_N_CNTXT_SRC_CH_IRQ_OFFSET(GSI_EE_AP)
...@@ -334,7 +335,6 @@ ...@@ -334,7 +335,6 @@
#define GP_INT1_FMASK GENMASK(1, 1) #define GP_INT1_FMASK GENMASK(1, 1)
#define GP_INT2_FMASK GENMASK(2, 2) #define GP_INT2_FMASK GENMASK(2, 2)
#define GP_INT3_FMASK GENMASK(3, 3) #define GP_INT3_FMASK GENMASK(3, 3)
#define GSI_CNTXT_GLOB_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \ #define GSI_CNTXT_GSI_IRQ_STTS_OFFSET \
GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP) GSI_EE_N_CNTXT_GSI_IRQ_STTS_OFFSET(GSI_EE_AP)
...@@ -353,7 +353,6 @@ ...@@ -353,7 +353,6 @@
#define BUS_ERROR_FMASK GENMASK(1, 1) #define BUS_ERROR_FMASK GENMASK(1, 1)
#define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2) #define CMD_FIFO_OVRFLOW_FMASK GENMASK(2, 2)
#define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3) #define MCS_STACK_OVRFLOW_FMASK GENMASK(3, 3)
#define GSI_CNTXT_GSI_IRQ_ALL GENMASK(3, 0)
#define GSI_CNTXT_INTSET_OFFSET \ #define GSI_CNTXT_INTSET_OFFSET \
GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP) GSI_EE_N_CNTXT_INTSET_OFFSET(GSI_EE_AP)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment