Commit a08b51a9 authored by Oded Gabbay's avatar Oded Gabbay

habanalabs: change polling functions to macros

This patch changes two polling functions to macros, in order to make their
API the same as the standard readl_poll_timeout so we would be able to
define the "condition for exit" when calling these macros.

This will simplify the code as it will eliminate the need to check both
for timeout and for the (cond) in the calling function.
Signed-off-by: default avatarOded Gabbay <oded.gabbay@gmail.com>
parent 1f2c999b
...@@ -682,14 +682,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data) ...@@ -682,14 +682,12 @@ int hl_cs_ioctl(struct hl_fpriv *hpriv, void *data)
u32 tmp; u32 tmp;
rc = hl_poll_timeout_memory(hdev, rc = hl_poll_timeout_memory(hdev,
(u64) (uintptr_t) &ctx->thread_ctx_switch_wait_token, &ctx->thread_ctx_switch_wait_token, tmp, (tmp == 1),
jiffies_to_usecs(hdev->timeout_jiffies), 100, jiffies_to_usecs(hdev->timeout_jiffies));
&tmp);
if (rc || !tmp) { if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, dev_err(hdev->dev,
"context switch phase didn't finish in time\n"); "context switch phase timeout (%d)\n", tmp);
rc = -ETIMEDOUT;
goto out; goto out;
} }
} }
......
...@@ -1173,95 +1173,6 @@ void hl_device_fini(struct hl_device *hdev) ...@@ -1173,95 +1173,6 @@ void hl_device_fini(struct hl_device *hdev)
pr_info("removed device successfully\n"); pr_info("removed device successfully\n");
} }
/*
* hl_poll_timeout_memory - Periodically poll a host memory address
* until it is not zero or a timeout occurs
* @hdev: pointer to habanalabs device structure
* @addr: Address to poll
* @timeout_us: timeout in us
* @val: Variable to read the value into
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* The function sleeps for 100us with timeout value of
* timeout_us
*/
int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr,
u32 timeout_us, u32 *val)
{
/*
* address in this function points always to a memory location in the
* host's (server's) memory. That location is updated asynchronously
* either by the direct access of the device or by another core
*/
u32 *paddr = (u32 *) (uintptr_t) addr;
ktime_t timeout;
/* timeout should be longer when working with simulator */
if (!hdev->pdev)
timeout_us *= 10;
timeout = ktime_add_us(ktime_get(), timeout_us);
might_sleep();
for (;;) {
/*
* Flush CPU read/write buffers to make sure we read updates
* done by other cores or by the device
*/
mb();
*val = *paddr;
if (*val)
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
*val = *paddr;
break;
}
usleep_range((100 >> 2) + 1, 100);
}
return *val ? 0 : -ETIMEDOUT;
}
/*
* hl_poll_timeout_devicememory - Periodically poll a device memory address
* until it is not zero or a timeout occurs
* @hdev: pointer to habanalabs device structure
* @addr: Device address to poll
* @timeout_us: timeout in us
* @val: Variable to read the value into
*
* Returns 0 on success and -ETIMEDOUT upon a timeout. In either
* case, the last read value at @addr is stored in @val. Must not
* be called from atomic context if sleep_us or timeout_us are used.
*
* The function sleeps for 100us with timeout value of
* timeout_us
*/
int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
u32 timeout_us, u32 *val)
{
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
might_sleep();
for (;;) {
*val = readl(addr);
if (*val)
break;
if (ktime_compare(ktime_get(), timeout) > 0) {
*val = readl(addr);
break;
}
usleep_range((100 >> 2) + 1, 100);
}
return *val ? 0 : -ETIMEDOUT;
}
/* /*
* MMIO register access helper functions. * MMIO register access helper functions.
*/ */
......
...@@ -117,34 +117,29 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg, ...@@ -117,34 +117,29 @@ int hl_fw_send_cpu_message(struct hl_device *hdev, u32 hw_queue_id, u32 *msg,
goto out; goto out;
} }
rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) &pkt->fence, rc = hl_poll_timeout_memory(hdev, &pkt->fence, tmp,
timeout, &tmp); (tmp == ARMCP_PACKET_FENCE_VAL), 1000, timeout);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if (rc == -ETIMEDOUT) { if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, "Timeout while waiting for device CPU\n"); dev_err(hdev->dev, "Device CPU packet timeout (0x%x)\n", tmp);
hdev->device_cpu_disabled = true; hdev->device_cpu_disabled = true;
goto out; goto out;
} }
if (tmp == ARMCP_PACKET_FENCE_VAL) { tmp = le32_to_cpu(pkt->ctl);
u32 ctl = le32_to_cpu(pkt->ctl);
rc = (ctl & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT; rc = (tmp & ARMCP_PKT_CTL_RC_MASK) >> ARMCP_PKT_CTL_RC_SHIFT;
if (rc) { if (rc) {
dev_err(hdev->dev, dev_err(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
"F/W ERROR %d for CPU packet %d\n", rc,
rc, (ctl & ARMCP_PKT_CTL_OPCODE_MASK) (tmp & ARMCP_PKT_CTL_OPCODE_MASK)
>> ARMCP_PKT_CTL_OPCODE_SHIFT); >> ARMCP_PKT_CTL_OPCODE_SHIFT);
rc = -EINVAL; rc = -EIO;
} else if (result) { } else if (result) {
*result = (long) le64_to_cpu(pkt->result); *result = (long) le64_to_cpu(pkt->result);
} }
} else {
dev_err(hdev->dev, "CPU packet wrong fence value\n");
rc = -EINVAL;
}
out: out:
mutex_unlock(&hdev->send_cpu_message_lock); mutex_unlock(&hdev->send_cpu_message_lock);
......
...@@ -2844,14 +2844,14 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job) ...@@ -2844,14 +2844,14 @@ static int goya_send_job_on_qman0(struct hl_device *hdev, struct hl_cs_job *job)
goto free_fence_ptr; goto free_fence_ptr;
} }
rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, timeout, rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp,
&tmp); (tmp == GOYA_QMAN0_FENCE_VAL), 1000, timeout);
hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0); hl_hw_queue_inc_ci_kernel(hdev, GOYA_QUEUE_ID_DMA_0);
if ((rc) || (tmp != GOYA_QMAN0_FENCE_VAL)) { if (rc == -ETIMEDOUT) {
dev_err(hdev->dev, "QMAN0 Job hasn't finished in time\n"); dev_err(hdev->dev, "QMAN0 Job timeout (0x%x)\n", tmp);
rc = -ETIMEDOUT; goto free_fence_ptr;
} }
free_fence_ptr: free_fence_ptr:
...@@ -2925,20 +2925,19 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id) ...@@ -2925,20 +2925,19 @@ int goya_test_queue(struct hl_device *hdev, u32 hw_queue_id)
goto free_pkt; goto free_pkt;
} }
rc = hl_poll_timeout_memory(hdev, (u64) (uintptr_t) fence_ptr, rc = hl_poll_timeout_memory(hdev, fence_ptr, tmp, (tmp == fence_val),
GOYA_TEST_QUEUE_WAIT_USEC, &tmp); 1000, GOYA_TEST_QUEUE_WAIT_USEC);
hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id); hl_hw_queue_inc_ci_kernel(hdev, hw_queue_id);
if ((!rc) && (tmp == fence_val)) { if (rc == -ETIMEDOUT) {
dev_info(hdev->dev,
"queue test on H/W queue %d succeeded\n",
hw_queue_id);
} else {
dev_err(hdev->dev, dev_err(hdev->dev,
"H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n", "H/W queue %d test failed (scratch(0x%08llX) == 0x%08X)\n",
hw_queue_id, (unsigned long long) fence_dma_addr, tmp); hw_queue_id, (unsigned long long) fence_dma_addr, tmp);
rc = -EINVAL; rc = -EIO;
} else {
dev_info(hdev->dev, "queue test on H/W queue %d succeeded\n",
hw_queue_id);
} }
free_pkt: free_pkt:
......
...@@ -1065,6 +1065,59 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val); ...@@ -1065,6 +1065,59 @@ void hl_wreg(struct hl_device *hdev, u32 reg, u32 val);
(cond) ? 0 : -ETIMEDOUT; \ (cond) ? 0 : -ETIMEDOUT; \
}) })
/*
* address in this macro points always to a memory location in the
* host's (server's) memory. That location is updated asynchronously
* either by the direct access of the device or by another core
*/
#define hl_poll_timeout_memory(hdev, addr, val, cond, sleep_us, timeout_us) \
({ \
ktime_t __timeout; \
/* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
__timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
might_sleep_if(sleep_us); \
for (;;) { \
/* Verify we read updates done by other cores or by device */ \
mb(); \
(val) = *((u32 *) (uintptr_t) (addr)); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = *((u32 *) (uintptr_t) (addr)); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
#define hl_poll_timeout_device_memory(hdev, addr, val, cond, sleep_us, \
timeout_us) \
({ \
ktime_t __timeout; \
/* timeout should be longer when working with simulator */ \
if (hdev->pdev) \
__timeout = ktime_add_us(ktime_get(), timeout_us); \
else \
__timeout = ktime_add_us(ktime_get(), (timeout_us * 10)); \
might_sleep_if(sleep_us); \
for (;;) { \
(val) = readl(addr); \
if (cond) \
break; \
if (timeout_us && ktime_compare(ktime_get(), __timeout) > 0) { \
(val) = readl(addr); \
break; \
} \
if (sleep_us) \
usleep_range((sleep_us >> 2) + 1, sleep_us); \
} \
(cond) ? 0 : -ETIMEDOUT; \
})
#define HL_ENG_BUSY(buf, size, fmt, ...) ({ \ #define HL_ENG_BUSY(buf, size, fmt, ...) ({ \
if (buf) \ if (buf) \
...@@ -1334,10 +1387,6 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable); ...@@ -1334,10 +1387,6 @@ int hl_device_set_debug_mode(struct hl_device *hdev, bool enable);
int create_hdev(struct hl_device **dev, struct pci_dev *pdev, int create_hdev(struct hl_device **dev, struct pci_dev *pdev,
enum hl_asic_type asic_type, int minor); enum hl_asic_type asic_type, int minor);
void destroy_hdev(struct hl_device *hdev); void destroy_hdev(struct hl_device *hdev);
int hl_poll_timeout_memory(struct hl_device *hdev, u64 addr, u32 timeout_us,
u32 *val);
int hl_poll_timeout_device_memory(struct hl_device *hdev, void __iomem *addr,
u32 timeout_us, u32 *val);
int hl_hw_queues_create(struct hl_device *hdev); int hl_hw_queues_create(struct hl_device *hdev);
void hl_hw_queues_destroy(struct hl_device *hdev); void hl_hw_queues_destroy(struct hl_device *hdev);
int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id, int hl_hw_queue_send_cb_no_cmpl(struct hl_device *hdev, u32 hw_queue_id,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment