Commit de01283b authored by Matthew R. Ochs's avatar Matthew R. Ochs Committed by Martin K. Petersen

scsi: cxlflash: Wait for active AFU commands to timeout upon tear down

With the removal of the static private command pool, the ability to
'complete' outstanding commands was lost. While not an issue for the
commands originating outside the driver, internal AFU commands are
synchronous and therefore have a timeout associated with them. To
avoid a stale memory access, the tear down sequence needs to ensure
that there are not any active commands before proceeding. As these
internal AFU commands are rare events, the simplest way to accomplish
this is detecting the activity and waiting for it to timeout.
Signed-off-by: default avatarMatthew R. Ochs <mrochs@linux.vnet.ibm.com>
Acked-by: default avatarUma Krishnan <ukrishn@linux.vnet.ibm.com>
Signed-off-by: default avatarMartin K. Petersen <martin.petersen@oracle.com>
parent 25bced2b
...@@ -180,6 +180,7 @@ struct afu { ...@@ -180,6 +180,7 @@ struct afu {
u64 *hrrq_end; u64 *hrrq_end;
u64 *hrrq_curr; u64 *hrrq_curr;
bool toggle; bool toggle;
atomic_t cmds_active; /* Number of currently active AFU commands */
s64 room; s64 room;
spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */ spinlock_t rrin_slock; /* Lock to rrin queuing and cmd_room updates */
u64 hb; u64 hb;
......
...@@ -531,7 +531,7 @@ static void free_mem(struct cxlflash_cfg *cfg) ...@@ -531,7 +531,7 @@ static void free_mem(struct cxlflash_cfg *cfg)
* *
* Safe to call with AFU in a partially allocated/initialized state. * Safe to call with AFU in a partially allocated/initialized state.
* *
* Cleans up all state associated with the command queue, and unmaps * Waits for any active internal AFU commands to timeout and then unmaps
* the MMIO space. * the MMIO space.
*/ */
static void stop_afu(struct cxlflash_cfg *cfg) static void stop_afu(struct cxlflash_cfg *cfg)
...@@ -539,6 +539,8 @@ static void stop_afu(struct cxlflash_cfg *cfg) ...@@ -539,6 +539,8 @@ static void stop_afu(struct cxlflash_cfg *cfg)
struct afu *afu = cfg->afu; struct afu *afu = cfg->afu;
if (likely(afu)) { if (likely(afu)) {
while (atomic_read(&afu->cmds_active))
ssleep(1);
if (likely(afu->afu_map)) { if (likely(afu->afu_map)) {
cxl_psa_unmap((void __iomem *)afu->afu_map); cxl_psa_unmap((void __iomem *)afu->afu_map);
afu->afu_map = NULL; afu->afu_map = NULL;
...@@ -1721,6 +1723,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, ...@@ -1721,6 +1723,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
} }
mutex_lock(&sync_active); mutex_lock(&sync_active);
atomic_inc(&afu->cmds_active);
buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL); buf = kzalloc(sizeof(*cmd) + __alignof__(*cmd) - 1, GFP_KERNEL);
if (unlikely(!buf)) { if (unlikely(!buf)) {
dev_err(dev, "%s: no memory for command\n", __func__); dev_err(dev, "%s: no memory for command\n", __func__);
...@@ -1762,6 +1765,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u, ...@@ -1762,6 +1765,7 @@ int cxlflash_afu_sync(struct afu *afu, ctx_hndl_t ctx_hndl_u,
(cmd->sa.host_use_b[0] & B_ERROR))) (cmd->sa.host_use_b[0] & B_ERROR)))
rc = -1; rc = -1;
out: out:
atomic_dec(&afu->cmds_active);
mutex_unlock(&sync_active); mutex_unlock(&sync_active);
kfree(buf); kfree(buf);
pr_debug("%s: returning rc=%d\n", __func__, rc); pr_debug("%s: returning rc=%d\n", __func__, rc);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment