Commit 83f47eea authored by Alexander Usyskin's avatar Alexander Usyskin Committed by Greg Kroah-Hartman

mei: add timeout to send

When driver wakes up the firmware from the low power state,
it is sending a memory ready message.
The send is done via synchronous/blocking function to ensure
that firmware is in ready state. However, in case of firmware
undergoing reset send might be block forever.
To address this issue a timeout is added to blocking
write command on the internal bus.

Introduce the __mei_cl_send_timeout function to use instead of
__mei_cl_send in cases where timeout is required.
The mei_cl_write has only two callers and there is no need to split
it into two functions.
Signed-off-by: default avatarAlexander Usyskin <alexander.usyskin@intel.com>
Link: https://lore.kernel.org/r/20221116124735.2493847-2-alexander.usyskin@intel.comSigned-off-by: default avatarGreg Kroah-Hartman <gregkh@linuxfoundation.org>
parent 6865788f
...@@ -188,17 +188,20 @@ static int mei_fwver(struct mei_cl_device *cldev) ...@@ -188,17 +188,20 @@ static int mei_fwver(struct mei_cl_device *cldev)
return ret; return ret;
} }
#define GFX_MEMORY_READY_TIMEOUT 200 /* timeout in milliseconds */
static int mei_gfx_memory_ready(struct mei_cl_device *cldev) static int mei_gfx_memory_ready(struct mei_cl_device *cldev)
{ {
struct mkhi_gfx_mem_ready req = {0}; struct mkhi_gfx_mem_ready req = {0};
unsigned int mode = MEI_CL_IO_TX_INTERNAL; unsigned int mode = MEI_CL_IO_TX_INTERNAL | MEI_CL_IO_TX_BLOCKING;
req.hdr.group_id = MKHI_GROUP_ID_GFX; req.hdr.group_id = MKHI_GROUP_ID_GFX;
req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ; req.hdr.command = MKHI_GFX_MEMORY_READY_CMD_REQ;
req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED; req.flags = MKHI_GFX_MEM_READY_PXP_ALLOWED;
dev_dbg(&cldev->dev, "Sending memory ready command\n"); dev_dbg(&cldev->dev, "Sending memory ready command\n");
return __mei_cl_send(cldev->cl, (u8 *)&req, sizeof(req), 0, mode); return __mei_cl_send_timeout(cldev->cl, (u8 *)&req, sizeof(req), 0,
mode, GFX_MEMORY_READY_TIMEOUT);
} }
static void mei_mkhi_fix(struct mei_cl_device *cldev) static void mei_mkhi_fix(struct mei_cl_device *cldev)
......
...@@ -33,6 +33,26 @@ ...@@ -33,6 +33,26 @@
*/ */
ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode) unsigned int mode)
{
return __mei_cl_send_timeout(cl, buf, length, vtag, mode, MAX_SCHEDULE_TIMEOUT);
}
/**
* __mei_cl_send_timeout - internal client send (write)
*
* @cl: host client
* @buf: buffer to send
* @length: buffer length
* @vtag: virtual tag
* @mode: sending mode
* @timeout: send timeout in milliseconds.
* effective only for blocking writes: the MEI_CL_IO_TX_BLOCKING mode bit is set.
* set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
*
* Return: written size bytes or < 0 on error
*/
ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode, unsigned long timeout)
{ {
struct mei_device *bus; struct mei_device *bus;
struct mei_cl_cb *cb; struct mei_cl_cb *cb;
...@@ -101,7 +121,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, ...@@ -101,7 +121,7 @@ ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING); cb->blocking = !!(mode & MEI_CL_IO_TX_BLOCKING);
memcpy(cb->buf.data, buf, length); memcpy(cb->buf.data, buf, length);
rets = mei_cl_write(cl, cb); rets = mei_cl_write(cl, cb, timeout);
out: out:
mutex_unlock(&bus->device_lock); mutex_unlock(&bus->device_lock);
......
...@@ -1926,10 +1926,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, ...@@ -1926,10 +1926,13 @@ int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
* *
* @cl: host client * @cl: host client
* @cb: write callback with filled data * @cb: write callback with filled data
* @timeout: send timeout in milliseconds.
* effective only for blocking writes: the cb->blocking is set.
* set timeout to the MAX_SCHEDULE_TIMEOUT to maixum allowed wait.
* *
* Return: number of bytes sent on success, <0 on failure. * Return: number of bytes sent on success, <0 on failure.
*/ */
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout)
{ {
struct mei_device *dev; struct mei_device *dev;
struct mei_msg_data *buf; struct mei_msg_data *buf;
...@@ -2056,11 +2059,20 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb) ...@@ -2056,11 +2059,20 @@ ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) { if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
mutex_unlock(&dev->device_lock); mutex_unlock(&dev->device_lock);
rets = wait_event_interruptible(cl->tx_wait, rets = wait_event_interruptible_timeout(cl->tx_wait,
cl->writing_state == MEI_WRITE_COMPLETE || cl->writing_state == MEI_WRITE_COMPLETE ||
(!mei_cl_is_connected(cl))); (!mei_cl_is_connected(cl)),
msecs_to_jiffies(timeout));
mutex_lock(&dev->device_lock); mutex_lock(&dev->device_lock);
/* clean all queue on timeout as something fatal happened */
if (rets == 0) {
rets = -ETIME;
mei_io_tx_list_free_cl(&dev->write_list, cl, NULL);
mei_io_tx_list_free_cl(&dev->write_waiting_list, cl, NULL);
}
/* wait_event_interruptible returns -ERESTARTSYS */ /* wait_event_interruptible returns -ERESTARTSYS */
if (rets > 0)
rets = 0;
if (rets) { if (rets) {
if (signal_pending(current)) if (signal_pending(current))
rets = -EINTR; rets = -EINTR;
......
...@@ -246,7 +246,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl, ...@@ -246,7 +246,7 @@ int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb, int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list); struct list_head *cmpl_list);
int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp); int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp);
ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb); ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb, unsigned long timeout);
int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb, int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
struct list_head *cmpl_list); struct list_head *cmpl_list);
......
...@@ -383,7 +383,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf, ...@@ -383,7 +383,7 @@ static ssize_t mei_write(struct file *file, const char __user *ubuf,
goto out; goto out;
} }
rets = mei_cl_write(cl, cb); rets = mei_cl_write(cl, cb, MAX_SCHEDULE_TIMEOUT);
out: out:
mutex_unlock(&dev->device_lock); mutex_unlock(&dev->device_lock);
return rets; return rets;
......
...@@ -373,6 +373,8 @@ void mei_cl_bus_rescan_work(struct work_struct *work); ...@@ -373,6 +373,8 @@ void mei_cl_bus_rescan_work(struct work_struct *work);
void mei_cl_bus_dev_fixup(struct mei_cl_device *dev); void mei_cl_bus_dev_fixup(struct mei_cl_device *dev);
ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag, ssize_t __mei_cl_send(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode); unsigned int mode);
ssize_t __mei_cl_send_timeout(struct mei_cl *cl, const u8 *buf, size_t length, u8 vtag,
unsigned int mode, unsigned long timeout);
ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag, ssize_t __mei_cl_recv(struct mei_cl *cl, u8 *buf, size_t length, u8 *vtag,
unsigned int mode, unsigned long timeout); unsigned int mode, unsigned long timeout);
bool mei_cl_bus_rx_event(struct mei_cl *cl); bool mei_cl_bus_rx_event(struct mei_cl *cl);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment