Commit e30a1065 authored by Dan Williams's avatar Dan Williams

cxl/pci: Cleanup 'sanitize' to always poll

In preparation for fixing the init/teardown of the 'sanitize' workqueue
and sysfs notification mechanism, arrange for cxl_mbox_sanitize_work()
to be the single location where the sysfs attribute is notified. With
that change there is no distinction between polled mode and interrupt
mode. All the interrupt does is accelerate the polling interval.

The change to check for "mds->security.sanitize_node" under the lock is
there to ensure that the interrupt, the work routine and the
setup/teardown code can all have a consistent view of the registered
notifier and the workqueue state. I.e. the expectation is that the
interrupt is live past the point that the sanitize sysfs attribute is
published, and it may race teardown, so it must be consulted under a
lock. Given that new locking requirement, cxl_pci_mbox_irq() is moved
from hard to thread irq context.

Lastly, some opportunistic replacements of
"queue_delayed_work(system_wq, ...)", which is just open coded
schedule_delayed_work(), are included.
Reviewed-by: default avatarDave Jiang <dave.jiang@intel.com>
Reviewed-by: default avatarJonathan Cameron <Jonathan.Cameron@huawei.com>
Reviewed-by: default avatarIra Weiny <ira.weiny@intel.com>
Reviewed-by: default avatarDavidlohr Bueso <dave@stgolabs.net>
Signed-off-by: default avatarDan Williams <dan.j.williams@intel.com>
parent 76fe8713
...@@ -561,7 +561,6 @@ static void cxl_memdev_security_shutdown(struct device *dev) ...@@ -561,7 +561,6 @@ static void cxl_memdev_security_shutdown(struct device *dev)
struct cxl_memdev *cxlmd = to_cxl_memdev(dev); struct cxl_memdev *cxlmd = to_cxl_memdev(dev);
struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds); struct cxl_memdev_state *mds = to_cxl_memdev_state(cxlmd->cxlds);
if (mds->security.poll)
cancel_delayed_work_sync(&mds->security.poll_dwork); cancel_delayed_work_sync(&mds->security.poll_dwork);
} }
......
...@@ -360,7 +360,6 @@ struct cxl_fw_state { ...@@ -360,7 +360,6 @@ struct cxl_fw_state {
* *
* @state: state of last security operation * @state: state of last security operation
* @enabled_cmds: All security commands enabled in the CEL * @enabled_cmds: All security commands enabled in the CEL
* @poll: polling for sanitization is enabled, device has no mbox irq support
* @poll_tmo_secs: polling timeout * @poll_tmo_secs: polling timeout
* @poll_dwork: polling work item * @poll_dwork: polling work item
* @sanitize_node: sanitation sysfs file to notify * @sanitize_node: sanitation sysfs file to notify
...@@ -368,7 +367,6 @@ struct cxl_fw_state { ...@@ -368,7 +367,6 @@ struct cxl_fw_state {
struct cxl_security_state { struct cxl_security_state {
unsigned long state; unsigned long state;
DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX); DECLARE_BITMAP(enabled_cmds, CXL_SEC_ENABLED_MAX);
bool poll;
int poll_tmo_secs; int poll_tmo_secs;
struct delayed_work poll_dwork; struct delayed_work poll_dwork;
struct kernfs_node *sanitize_node; struct kernfs_node *sanitize_node;
......
...@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id) ...@@ -128,10 +128,10 @@ static irqreturn_t cxl_pci_mbox_irq(int irq, void *id)
reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET); reg = readq(cxlds->regs.mbox + CXLDEV_MBOX_BG_CMD_STATUS_OFFSET);
opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg); opcode = FIELD_GET(CXLDEV_MBOX_BG_CMD_COMMAND_OPCODE_MASK, reg);
if (opcode == CXL_MBOX_OP_SANITIZE) { if (opcode == CXL_MBOX_OP_SANITIZE) {
mutex_lock(&mds->mbox_mutex);
if (mds->security.sanitize_node) if (mds->security.sanitize_node)
sysfs_notify_dirent(mds->security.sanitize_node); mod_delayed_work(system_wq, &mds->security.poll_dwork, 0);
mutex_unlock(&mds->mbox_mutex);
dev_dbg(cxlds->dev, "Sanitization operation ended\n");
} else { } else {
/* short-circuit the wait in __cxl_pci_mbox_send_cmd() */ /* short-circuit the wait in __cxl_pci_mbox_send_cmd() */
rcuwait_wake_up(&mds->mbox_wait); rcuwait_wake_up(&mds->mbox_wait);
...@@ -160,8 +160,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work) ...@@ -160,8 +160,7 @@ static void cxl_mbox_sanitize_work(struct work_struct *work)
int timeout = mds->security.poll_tmo_secs + 10; int timeout = mds->security.poll_tmo_secs + 10;
mds->security.poll_tmo_secs = min(15 * 60, timeout); mds->security.poll_tmo_secs = min(15 * 60, timeout);
queue_delayed_work(system_wq, &mds->security.poll_dwork, schedule_delayed_work(&mds->security.poll_dwork, timeout * HZ);
timeout * HZ);
} }
mutex_unlock(&mds->mbox_mutex); mutex_unlock(&mds->mbox_mutex);
} }
...@@ -293,15 +292,11 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds, ...@@ -293,15 +292,11 @@ static int __cxl_pci_mbox_send_cmd(struct cxl_memdev_state *mds,
* and allow userspace to poll(2) for completion. * and allow userspace to poll(2) for completion.
*/ */
if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) { if (mbox_cmd->opcode == CXL_MBOX_OP_SANITIZE) {
if (mds->security.poll) {
/* give first timeout a second */ /* give first timeout a second */
timeout = 1; timeout = 1;
mds->security.poll_tmo_secs = timeout; mds->security.poll_tmo_secs = timeout;
queue_delayed_work(system_wq, schedule_delayed_work(&mds->security.poll_dwork,
&mds->security.poll_dwork,
timeout * HZ); timeout * HZ);
}
dev_dbg(dev, "Sanitization operation started\n"); dev_dbg(dev, "Sanitization operation started\n");
goto success; goto success;
} }
...@@ -384,7 +379,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) ...@@ -384,7 +379,9 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET); const int cap = readl(cxlds->regs.mbox + CXLDEV_MBOX_CAPS_OFFSET);
struct device *dev = cxlds->dev; struct device *dev = cxlds->dev;
unsigned long timeout; unsigned long timeout;
int irq, msgnum;
u64 md_status; u64 md_status;
u32 ctrl;
timeout = jiffies + mbox_ready_timeout * HZ; timeout = jiffies + mbox_ready_timeout * HZ;
do { do {
...@@ -432,33 +429,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds) ...@@ -432,33 +429,26 @@ static int cxl_pci_setup_mailbox(struct cxl_memdev_state *mds)
dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size); dev_dbg(dev, "Mailbox payload sized %zu", mds->payload_size);
rcuwait_init(&mds->mbox_wait); rcuwait_init(&mds->mbox_wait);
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
if (cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ) { /* background command interrupts are optional */
u32 ctrl; if (!(cap & CXLDEV_MBOX_CAP_BG_CMD_IRQ))
int irq, msgnum; return 0;
struct pci_dev *pdev = to_pci_dev(cxlds->dev);
msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap); msgnum = FIELD_GET(CXLDEV_MBOX_CAP_IRQ_MSGNUM_MASK, cap);
irq = pci_irq_vector(pdev, msgnum); irq = pci_irq_vector(to_pci_dev(cxlds->dev), msgnum);
if (irq < 0) if (irq < 0)
goto mbox_poll; return 0;
if (cxl_request_irq(cxlds, irq, cxl_pci_mbox_irq, NULL)) if (cxl_request_irq(cxlds, irq, NULL, cxl_pci_mbox_irq))
goto mbox_poll; return 0;
dev_dbg(cxlds->dev, "Mailbox interrupts enabled\n");
/* enable background command mbox irq support */ /* enable background command mbox irq support */
ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); ctrl = readl(cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ; ctrl |= CXLDEV_MBOX_CTRL_BG_CMD_IRQ;
writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET); writel(ctrl, cxlds->regs.mbox + CXLDEV_MBOX_CTRL_OFFSET);
return 0;
}
mbox_poll:
mds->security.poll = true;
INIT_DELAYED_WORK(&mds->security.poll_dwork, cxl_mbox_sanitize_work);
dev_dbg(cxlds->dev, "Mailbox interrupts are unsupported");
return 0; return 0;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment