Commit da1642bc authored by Justin Chen's avatar Justin Chen Committed by Sudeep Holla

firmware: arm_scmi: Queue in scmi layer for mailbox implementation

send_message() does not block in the MBOX implementation. This is
because the mailbox layer has its own queue. However, this confuses
the per xfer timeouts as they all start their timeout ticks in
parallel.

Consider a case where the xfer timeout is 30ms and a SCMI transaction
takes 25ms:

  | 0ms: Message #0 is queued in mailbox layer and sent out, then sits
  |      at scmi_wait_for_message_response() with a timeout of 30ms
  | 1ms: Message #1 is queued in mailbox layer but not sent out yet.
  |      Since send_message() doesn't block, it also sits at
  |      scmi_wait_for_message_response() with a timeout of 30ms
  |  ...
  | 25ms: Message #0 is completed, txdone is called and message #1 is sent
  | 31ms: Message #1 times out since the count started at 1ms. Even though
  |       it has only been inflight for 6ms.

Fixes: 5c8a47a5 ("firmware: arm_scmi: Make scmi core independent of the transport type")
Signed-off-by: default avatarJustin Chen <justin.chen@broadcom.com>
Message-Id: <20241014160717.1678953-1-justin.chen@broadcom.com>
Reviewed-by: default avatarCristian Marussi <cristian.marussi@arm.com>
Tested-by: default avatarCristian Marussi <cristian.marussi@arm.com>
Signed-off-by: default avatarSudeep Holla <sudeep.holla@arm.com>
parent db8f0b80
...@@ -25,6 +25,7 @@ ...@@ -25,6 +25,7 @@
* @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel * @chan_platform_receiver: Optional Platform Receiver mailbox unidirectional channel
* @cinfo: SCMI channel info * @cinfo: SCMI channel info
* @shmem: Transmit/Receive shared memory area * @shmem: Transmit/Receive shared memory area
* @chan_lock: Lock that prevents multiple xfers from being queued
*/ */
struct scmi_mailbox { struct scmi_mailbox {
struct mbox_client cl; struct mbox_client cl;
...@@ -33,6 +34,7 @@ struct scmi_mailbox { ...@@ -33,6 +34,7 @@ struct scmi_mailbox {
struct mbox_chan *chan_platform_receiver; struct mbox_chan *chan_platform_receiver;
struct scmi_chan_info *cinfo; struct scmi_chan_info *cinfo;
struct scmi_shared_mem __iomem *shmem; struct scmi_shared_mem __iomem *shmem;
struct mutex chan_lock;
}; };
#define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl) #define client_to_scmi_mailbox(c) container_of(c, struct scmi_mailbox, cl)
...@@ -238,6 +240,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev, ...@@ -238,6 +240,7 @@ static int mailbox_chan_setup(struct scmi_chan_info *cinfo, struct device *dev,
cinfo->transport_info = smbox; cinfo->transport_info = smbox;
smbox->cinfo = cinfo; smbox->cinfo = cinfo;
mutex_init(&smbox->chan_lock);
return 0; return 0;
} }
...@@ -267,13 +270,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo, ...@@ -267,13 +270,23 @@ static int mailbox_send_message(struct scmi_chan_info *cinfo,
struct scmi_mailbox *smbox = cinfo->transport_info; struct scmi_mailbox *smbox = cinfo->transport_info;
int ret; int ret;
ret = mbox_send_message(smbox->chan, xfer); /*
* The mailbox layer has its own queue. However the mailbox queue
* confuses the per message SCMI timeouts since the clock starts when
* the message is submitted into the mailbox queue. So when multiple
* messages are queued up the clock starts on all messages instead of
* only the one inflight.
*/
mutex_lock(&smbox->chan_lock);
/* mbox_send_message returns non-negative value on success, so reset */ ret = mbox_send_message(smbox->chan, xfer);
if (ret > 0) /* mbox_send_message returns non-negative value on success */
ret = 0; if (ret < 0) {
mutex_unlock(&smbox->chan_lock);
return ret;
}
return ret; return 0;
} }
static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
...@@ -281,13 +294,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret, ...@@ -281,13 +294,10 @@ static void mailbox_mark_txdone(struct scmi_chan_info *cinfo, int ret,
{ {
struct scmi_mailbox *smbox = cinfo->transport_info; struct scmi_mailbox *smbox = cinfo->transport_info;
/*
* NOTE: we might prefer not to need the mailbox ticker to manage the
* transfer queueing since the protocol layer queues things by itself.
* Unfortunately, we have to kick the mailbox framework after we have
* received our message.
*/
mbox_client_txdone(smbox->chan, ret); mbox_client_txdone(smbox->chan, ret);
/* Release channel */
mutex_unlock(&smbox->chan_lock);
} }
static void mailbox_fetch_response(struct scmi_chan_info *cinfo, static void mailbox_fetch_response(struct scmi_chan_info *cinfo,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment