Commit f918b4a8 authored by Matthew R. Ochs's avatar Matthew R. Ochs Committed by Martin K. Petersen

scsi: cxlflash: Serialize RRQ access and support offlevel processing

As further staging to support processing the HRRQ by other means, access to
the HRRQ needs to be serialized by a disabled lock. This will allow safe
access in other non-hardware interrupt contexts. In an effort to minimize the
period where interrupts are disabled, support is added to queue up commands
harvested from the RRQ such that they can be processed with hardware
interrupts enabled. While this doesn't offer any improvement with processing
on a hardware interrupt it will help when IRQ polling is supported and the
command completions can execute on softirq context.
Signed-off-by: default avatarMatthew R. Ochs <mrochs@linux.vnet.ibm.com>
Signed-off-by: default avatarUma Krishnan <ukrishn@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 76a6ebbe
...@@ -134,6 +134,7 @@ struct afu_cmd { ...@@ -134,6 +134,7 @@ struct afu_cmd {
struct afu *parent; struct afu *parent;
struct scsi_cmnd *scp; struct scsi_cmnd *scp;
struct completion cevent; struct completion cevent;
struct list_head queue;
u8 cmd_tmf:1; u8 cmd_tmf:1;
...@@ -181,6 +182,7 @@ struct afu { ...@@ -181,6 +182,7 @@ struct afu {
struct sisl_ioarcb *hsq_start; struct sisl_ioarcb *hsq_start;
struct sisl_ioarcb *hsq_end; struct sisl_ioarcb *hsq_end;
struct sisl_ioarcb *hsq_curr; struct sisl_ioarcb *hsq_curr;
spinlock_t hrrq_slock;
u64 *hrrq_start; u64 *hrrq_start;
u64 *hrrq_end; u64 *hrrq_end;
u64 *hrrq_curr; u64 *hrrq_curr;
......
...@@ -1157,10 +1157,13 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data) ...@@ -1157,10 +1157,13 @@ static irqreturn_t cxlflash_sync_err_irq(int irq, void *data)
/** /**
* process_hrrq() - process the read-response queue * process_hrrq() - process the read-response queue
* @afu: AFU associated with the host. * @afu: AFU associated with the host.
* @doneq: Queue of commands harvested from the RRQ.
*
* This routine must be called holding the disabled RRQ spin lock.
* *
* Return: The number of entries processed. * Return: The number of entries processed.
*/ */
static int process_hrrq(struct afu *afu) static int process_hrrq(struct afu *afu, struct list_head *doneq)
{ {
struct afu_cmd *cmd; struct afu_cmd *cmd;
struct sisl_ioasa *ioasa; struct sisl_ioasa *ioasa;
...@@ -1189,7 +1192,7 @@ static int process_hrrq(struct afu *afu) ...@@ -1189,7 +1192,7 @@ static int process_hrrq(struct afu *afu)
cmd = container_of(ioarcb, struct afu_cmd, rcb); cmd = container_of(ioarcb, struct afu_cmd, rcb);
} }
cmd_complete(cmd); list_add_tail(&cmd->queue, doneq);
/* Advance to next entry or wrap and flip the toggle bit */ /* Advance to next entry or wrap and flip the toggle bit */
if (hrrq_curr < hrrq_end) if (hrrq_curr < hrrq_end)
...@@ -1209,18 +1212,44 @@ static int process_hrrq(struct afu *afu) ...@@ -1209,18 +1212,44 @@ static int process_hrrq(struct afu *afu)
return num_hrrq; return num_hrrq;
} }
/**
* process_cmd_doneq() - process a queue of harvested RRQ commands
* @doneq: Queue of completed commands.
*
* Note that upon return the queue can no longer be trusted.
*/
static void process_cmd_doneq(struct list_head *doneq)
{
struct afu_cmd *cmd, *tmp;
WARN_ON(list_empty(doneq));
list_for_each_entry_safe(cmd, tmp, doneq, queue)
cmd_complete(cmd);
}
/** /**
* cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path) * cxlflash_rrq_irq() - interrupt handler for read-response queue (normal path)
* @irq: Interrupt number. * @irq: Interrupt number.
* @data: Private data provided at interrupt registration, the AFU. * @data: Private data provided at interrupt registration, the AFU.
* *
* Return: Always return IRQ_HANDLED. * Return: IRQ_HANDLED or IRQ_NONE when no ready entries found.
*/ */
static irqreturn_t cxlflash_rrq_irq(int irq, void *data) static irqreturn_t cxlflash_rrq_irq(int irq, void *data)
{ {
struct afu *afu = (struct afu *)data; struct afu *afu = (struct afu *)data;
unsigned long hrrq_flags;
LIST_HEAD(doneq);
int num_entries = 0;
process_hrrq(afu); spin_lock_irqsave(&afu->hrrq_slock, hrrq_flags);
num_entries = process_hrrq(afu, &doneq);
spin_unlock_irqrestore(&afu->hrrq_slock, hrrq_flags);
if (num_entries == 0)
return IRQ_NONE;
process_cmd_doneq(&doneq);
return IRQ_HANDLED; return IRQ_HANDLED;
} }
...@@ -1540,14 +1569,13 @@ static int start_afu(struct cxlflash_cfg *cfg) ...@@ -1540,14 +1569,13 @@ static int start_afu(struct cxlflash_cfg *cfg)
init_pcr(cfg); init_pcr(cfg);
/* After an AFU reset, RRQ entries are stale, clear them */ /* Initialize RRQ */
memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry)); memset(&afu->rrq_entry, 0, sizeof(afu->rrq_entry));
/* Initialize RRQ pointers */
afu->hrrq_start = &afu->rrq_entry[0]; afu->hrrq_start = &afu->rrq_entry[0];
afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1]; afu->hrrq_end = &afu->rrq_entry[NUM_RRQ_ENTRY - 1];
afu->hrrq_curr = afu->hrrq_start; afu->hrrq_curr = afu->hrrq_start;
afu->toggle = 1; afu->toggle = 1;
spin_lock_init(&afu->hrrq_slock);
/* Initialize SQ */ /* Initialize SQ */
if (afu_is_sq_cmd_mode(afu)) { if (afu_is_sq_cmd_mode(afu)) {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment