Commit df0dff13 authored by Jordan Crouse's avatar Jordan Crouse Committed by Rob Clark

drm/msm/a6xx: Poll for HFI responses

The only HFI communication with the GMU on sdm845 happens
during initialization and all commands are synchronous. A fancy
interrupt tasklet and associated infrastructure is entirely
not eeded and puts us at the mercy of the scheduler.

Instead poll for the message signal and handle the response
immediately and go on our way.
Signed-off-by: default avatarJordan Crouse <jcrouse@codeaurora.org>
Signed-off-by: default avatarRob Clark <robdclark@gmail.com>
parent 32aa27e1
......@@ -2,7 +2,6 @@
/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
#include <linux/clk.h>
#include <linux/iopoll.h>
#include <linux/pm_opp.h>
#include <soc/qcom/cmd-db.h>
......@@ -42,9 +41,6 @@ static irqreturn_t a6xx_hfi_irq(int irq, void *data)
status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
tasklet_schedule(&gmu->hfi_tasklet);
if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
......@@ -135,9 +131,6 @@ static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
u32 val;
int ret;
gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
......@@ -566,8 +559,7 @@ static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
}
#define A6XX_HFI_IRQ_MASK \
(A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
(A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
#define A6XX_GMU_IRQ_MASK \
(A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
......@@ -1199,9 +1191,6 @@ int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
goto err;
/* Set up a tasklet to handle GMU HFI responses */
tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
/* Get the power levels for the GMU and GPU */
a6xx_gmu_pwrlevels_probe(gmu);
......
......@@ -4,6 +4,7 @@
#ifndef _A6XX_GMU_H_
#define _A6XX_GMU_H_
#include <linux/iopoll.h>
#include <linux/interrupt.h>
#include "msm_drv.h"
#include "a6xx_hfi.h"
......@@ -151,6 +152,4 @@ void a6xx_hfi_init(struct a6xx_gmu *gmu);
int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
void a6xx_hfi_stop(struct a6xx_gmu *gmu);
void a6xx_hfi_task(unsigned long data);
#endif
......@@ -79,83 +79,72 @@ static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
return 0;
}
struct a6xx_hfi_response {
u32 id;
u32 seqnum;
struct list_head node;
struct completion complete;
u32 error;
u32 payload[16];
};
/*
* Incoming HFI ack messages can come in out of order so we need to store all
* the pending messages on a list until they are handled.
*/
static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
static LIST_HEAD(hfi_ack_list);
static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
struct a6xx_hfi_msg_response *msg)
static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum,
u32 *payload, u32 payload_size)
{
struct a6xx_hfi_response *resp;
u32 id, seqnum;
/* msg->ret_header contains the header of the message being acked */
id = HFI_HEADER_ID(msg->ret_header);
seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
spin_lock(&hfi_ack_lock);
list_for_each_entry(resp, &hfi_ack_list, node) {
if (resp->id == id && resp->seqnum == seqnum) {
resp->error = msg->error;
memcpy(resp->payload, msg->payload,
sizeof(resp->payload));
complete(&resp->complete);
spin_unlock(&hfi_ack_lock);
return;
}
}
spin_unlock(&hfi_ack_lock);
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
u32 val;
int ret;
dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
}
/* Wait for a response */
ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
val & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 100, 5000);
static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
struct a6xx_hfi_msg_response *msg)
{
struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
if (ret) {
dev_err(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
return -ETIMEDOUT;
}
dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
}
/* Clear the interrupt */
gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR,
A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ);
void a6xx_hfi_task(unsigned long data)
{
struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
for (;;) {
struct a6xx_hfi_msg_response resp;
for (;;) {
u32 id;
int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
/* Get the next packet */
ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
sizeof(resp) >> 2);
/* Returns the number of bytes copied or negative on error */
if (ret <= 0) {
if (ret < 0)
/* If the queue is empty our response never made it */
if (!ret) {
dev_err(gmu->dev,
"The HFI response queue is unexpectedly empty\n");
return -ENOENT;
}
if (HFI_HEADER_ID(resp.header) == HFI_F2H_MSG_ERROR) {
struct a6xx_hfi_msg_error *error =
(struct a6xx_hfi_msg_error *) &resp;
dev_err(gmu->dev, "GMU firmware error %d\n",
error->code);
continue;
}
if (seqnum != HFI_HEADER_SEQNUM(resp.ret_header)) {
dev_err(gmu->dev,
"Unable to read the HFI message queue\n");
break;
"Unexpected message id %d on the response queue\n",
HFI_HEADER_SEQNUM(resp.ret_header));
continue;
}
id = HFI_HEADER_ID(resp.header);
if (resp.error) {
dev_err(gmu->dev,
"Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
}
if (id == HFI_F2H_MSG_ACK)
a6xx_hfi_handle_ack(gmu, &resp);
else if (id == HFI_F2H_MSG_ERROR)
a6xx_hfi_handle_error(gmu, &resp);
/* All is well, copy over the buffer */
if (payload && payload_size)
memcpy(payload, resp.payload,
min_t(u32, payload_size, sizeof(resp.payload)));
return 0;
}
}
......@@ -163,7 +152,6 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
void *data, u32 size, u32 *payload, u32 payload_size)
{
struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
struct a6xx_hfi_response resp = { 0 };
int ret, dwords = size >> 2;
u32 seqnum;
......@@ -173,53 +161,14 @@ static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
*((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
(dwords << 8) | id;
init_completion(&resp.complete);
resp.id = id;
resp.seqnum = seqnum;
spin_lock_bh(&hfi_ack_lock);
list_add_tail(&resp.node, &hfi_ack_list);
spin_unlock_bh(&hfi_ack_lock);
ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
if (ret) {
dev_err(gmu->dev, "Unable to send message %s id %d\n",
a6xx_hfi_msg_id[id], seqnum);
goto out;
}
/* Wait up to 5 seconds for the response */
ret = wait_for_completion_timeout(&resp.complete,
msecs_to_jiffies(5000));
if (!ret) {
dev_err(gmu->dev,
"Message %s id %d timed out waiting for response\n",
a6xx_hfi_msg_id[id], seqnum);
ret = -ETIMEDOUT;
} else
ret = 0;
out:
spin_lock_bh(&hfi_ack_lock);
list_del(&resp.node);
spin_unlock_bh(&hfi_ack_lock);
if (ret)
return ret;
if (resp.error) {
dev_err(gmu->dev, "Message %s id %d returned error %d\n",
a6xx_hfi_msg_id[id], seqnum, resp.error);
return -EINVAL;
}
if (payload && payload_size) {
int copy = min_t(u32, payload_size, sizeof(resp.payload));
memcpy(payload, resp.payload, copy);
}
return 0;
return a6xx_hfi_wait_for_ack(gmu, id, seqnum, payload, payload_size);
}
static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment