Commit 6263b51e authored by Gary R Hook's avatar Gary R Hook Committed by Herbert Xu

crypto: ccp - Change ISR handler method for a v5 CCP

The CCP has the ability to perform several operations simultaneously,
but only one interrupt.  When implemented as a PCI device and using
MSI-X/MSI interrupts, use a tasklet model to service interrupts. By
disabling and enabling interrupts from the CCP, coupled with the
queuing that tasklets provide, we can ensure that all events
(occurring on the device) are recognized and serviced.

This change fixes a problem wherein 2 or more busy queues can cause
notification bits to change state while a (CCP) interrupt is being
serviced, but after the queue state has been evaluated. This results
in the event being 'lost' and the queue hanging, waiting to be
serviced. Since the status bits are never fully de-asserted, the
CCP never generates another interrupt (all bits zero -> one or more
bits one), and no further CCP operations will be executed.

Cc: <stable@vger.kernel.org> # 4.9.x+
Signed-off-by: default avatarGary R Hook <gary.hook@amd.com>
Signed-off-by: default avatarHerbert Xu <herbert@gondor.apana.org.au>
parent 7b537b24
...@@ -705,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp) ...@@ -705,6 +705,65 @@ static int ccp_assign_lsbs(struct ccp_device *ccp)
return rc; return rc;
} }
static void ccp5_disable_queue_interrupts(struct ccp_device *ccp)
{
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++)
iowrite32(0x0, ccp->cmd_q[i].reg_int_enable);
}
static void ccp5_enable_queue_interrupts(struct ccp_device *ccp)
{
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++)
iowrite32(SUPPORTED_INTERRUPTS, ccp->cmd_q[i].reg_int_enable);
}
static void ccp5_irq_bh(unsigned long data)
{
struct ccp_device *ccp = (struct ccp_device *)data;
u32 status;
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++) {
struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
status = ioread32(cmd_q->reg_interrupt_status);
if (status) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(status, cmd_q->reg_interrupt_status);
wake_up_interruptible(&cmd_q->int_queue);
}
}
ccp5_enable_queue_interrupts(ccp);
}
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
ccp5_disable_queue_interrupts(ccp);
if (ccp->use_tasklet)
tasklet_schedule(&ccp->irq_tasklet);
else
ccp5_irq_bh((unsigned long)ccp);
return IRQ_HANDLED;
}
static int ccp5_init(struct ccp_device *ccp) static int ccp5_init(struct ccp_device *ccp)
{ {
struct device *dev = ccp->dev; struct device *dev = ccp->dev;
...@@ -789,18 +848,17 @@ static int ccp5_init(struct ccp_device *ccp) ...@@ -789,18 +848,17 @@ static int ccp5_init(struct ccp_device *ccp)
} }
/* Turn off the queues and disable interrupts until ready */ /* Turn off the queues and disable interrupts until ready */
ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) { for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i]; cmd_q = &ccp->cmd_q[i];
cmd_q->qcontrol = 0; /* Start with nothing */ cmd_q->qcontrol = 0; /* Start with nothing */
iowrite32(cmd_q->qcontrol, cmd_q->reg_control); iowrite32(cmd_q->qcontrol, cmd_q->reg_control);
/* Disable the interrupts */
iowrite32(0x00, cmd_q->reg_int_enable);
ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_int_status);
ioread32(cmd_q->reg_status); ioread32(cmd_q->reg_status);
/* Clear the interrupts */ /* Clear the interrupt status */
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
} }
...@@ -811,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp) ...@@ -811,6 +869,10 @@ static int ccp5_init(struct ccp_device *ccp)
dev_err(dev, "unable to allocate an IRQ\n"); dev_err(dev, "unable to allocate an IRQ\n");
goto e_pool; goto e_pool;
} }
/* Initialize the ISR tasklet */
if (ccp->use_tasklet)
tasklet_init(&ccp->irq_tasklet, ccp5_irq_bh,
(unsigned long)ccp);
dev_dbg(dev, "Loading LSB map...\n"); dev_dbg(dev, "Loading LSB map...\n");
/* Copy the private LSB mask to the public registers */ /* Copy the private LSB mask to the public registers */
...@@ -879,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp) ...@@ -879,11 +941,7 @@ static int ccp5_init(struct ccp_device *ccp)
} }
dev_dbg(dev, "Enabling interrupts...\n"); dev_dbg(dev, "Enabling interrupts...\n");
/* Enable interrupts */ ccp5_enable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i];
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_int_enable);
}
dev_dbg(dev, "Registering device...\n"); dev_dbg(dev, "Registering device...\n");
/* Put this on the unit list to make it available */ /* Put this on the unit list to make it available */
...@@ -935,15 +993,13 @@ static void ccp5_destroy(struct ccp_device *ccp) ...@@ -935,15 +993,13 @@ static void ccp5_destroy(struct ccp_device *ccp)
ccp_del_device(ccp); ccp_del_device(ccp);
/* Disable and clear interrupts */ /* Disable and clear interrupts */
ccp5_disable_queue_interrupts(ccp);
for (i = 0; i < ccp->cmd_q_count; i++) { for (i = 0; i < ccp->cmd_q_count; i++) {
cmd_q = &ccp->cmd_q[i]; cmd_q = &ccp->cmd_q[i];
/* Turn off the run bit */ /* Turn off the run bit */
iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control); iowrite32(cmd_q->qcontrol & ~CMD5_Q_RUN, cmd_q->reg_control);
/* Disable the interrupts */
iowrite32(0x00, cmd_q->reg_int_enable);
/* Clear the interrupt status */ /* Clear the interrupt status */
iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status); iowrite32(SUPPORTED_INTERRUPTS, cmd_q->reg_interrupt_status);
ioread32(cmd_q->reg_int_status); ioread32(cmd_q->reg_int_status);
...@@ -978,39 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp) ...@@ -978,39 +1034,6 @@ static void ccp5_destroy(struct ccp_device *ccp)
} }
} }
static irqreturn_t ccp5_irq_handler(int irq, void *data)
{
struct device *dev = data;
struct ccp_device *ccp = dev_get_drvdata(dev);
u32 status;
unsigned int i;
for (i = 0; i < ccp->cmd_q_count; i++) {
struct ccp_cmd_queue *cmd_q = &ccp->cmd_q[i];
status = ioread32(cmd_q->reg_interrupt_status);
if (status) {
cmd_q->int_status = status;
cmd_q->q_status = ioread32(cmd_q->reg_status);
cmd_q->q_int_status = ioread32(cmd_q->reg_int_status);
/* On error, only save the first error value */
if ((status & INT_ERROR) && !cmd_q->cmd_error)
cmd_q->cmd_error = CMD_Q_ERROR(cmd_q->q_status);
cmd_q->int_rcvd = 1;
/* Acknowledge the interrupt and wake the kthread */
iowrite32(SUPPORTED_INTERRUPTS,
cmd_q->reg_interrupt_status);
wake_up_interruptible(&cmd_q->int_queue);
}
}
return IRQ_HANDLED;
}
static void ccp5_config(struct ccp_device *ccp) static void ccp5_config(struct ccp_device *ccp)
{ {
/* Public side */ /* Public side */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment