Commit a1d2a633 authored by Qiang Yu's avatar Qiang Yu Committed by Eric Anholt

drm/lima: driver for ARM Mali4xx GPUs

- Mali 4xx GPUs have two kinds of processors GP and PP. GP is for
  OpenGL vertex shader processing and PP is for fragment shader
  processing. Each processor has its own MMU so prcessors work in
  virtual address space.
- There's only one GP but multiple PP (max 4 for mali 400 and 8
  for mali 450) in the same mali 4xx GPU. All PPs are grouped
  togather to handle a single fragment shader task divided by
  FB output tiled pixels. Mali 400 user space driver is
  responsible for assign target tiled pixels to each PP, but mali
  450 has a HW module called DLBU to dynamically balance each
  PP's load.
- User space driver allocate buffer object and map into GPU
  virtual address space, upload command stream and draw data with
  CPU mmap of the buffer object, then submit task to GP/PP with
  a register frame indicating where is the command stream and misc
  settings.
- There's no command stream validation/relocation due to each user
  process has its own GPU virtual address space. GP/PP's MMU switch
  virtual address space before running two tasks from different
  user process. Error or evil user space code just get MMU fault
  or GP/PP error IRQ, then the HW/SW will be recovered.
- Use GEM+shmem for MM. Currently just alloc and pin memory when
  gem object creation. GPU vm map of the buffer is also done in
  the alloc stage in kernel space. We may delay the memory
  allocation and real GPU vm map to command submission stage in the
  furture as improvement.
- Use drm_sched for GPU task schedule. Each OpenGL context should
  have a lima context object in the kernel to distinguish tasks
  from different user. drm_sched gets task from each lima context
  in a fair way.

mesa driver can be found here before upstreamed:
https://gitlab.freedesktop.org/lima/mesa

v8:
- add comments for in_sync
- fix ctx free miss mutex unlock

v7:
- remove lima_fence_ops with default value
- move fence slab create to device probe
- check pad ioctl args to be zero
- add comments for user/kernel interface

v6:
- fix comments by checkpatch.pl

v5:
- export gp/pp version to userspace
- rebase on drm-misc-next

v4:
- use get param interface to get info
- separate context create/free ioctl
- remove unused max sched task param
- update copyright time
- use xarray instead of idr
- stop using drmP.h

v3:
- fix comments from kbuild robot
- restrict supported arch to tested ones

v2:
- fix syscall argument check
- fix job finish fence leak since kernel 5.0
- use drm syncobj to replace native fence
- move buffer object GPU va map into kernel
- reserve syscall argument space for future info
- remove kernel gem modifier
- switch TTM back to GEM+shmem MM
- use time based io poll
- use whole register name
- adopt gem reservation obj integration
- use drm_timeout_abs_to_jiffies

Cc: Eric Anholt <eric@anholt.net>
Cc: Rob Herring <robh@kernel.org>
Cc: Christian König <ckoenig.leichtzumerken@gmail.com>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Alex Deucher <alexdeucher@gmail.com>
Cc: Sam Ravnborg <sam@ravnborg.org>
Cc: Rob Clark <robdclark@gmail.com>
Cc: Dave Airlie <airlied@gmail.com>
Signed-off-by: default avatarAndreas Baierl <ichgeh@imkreisrum.de>
Signed-off-by: default avatarErico Nunes <nunes.erico@gmail.com>
Signed-off-by: default avatarHeiko Stuebner <heiko@sntech.de>
Signed-off-by: default avatarMarek Vasut <marex@denx.de>
Signed-off-by: default avatarNeil Armstrong <narmstrong@baylibre.com>
Signed-off-by: default avatarSimon Shields <simon@lineageos.org>
Signed-off-by: default avatarVasily Khoruzhick <anarsoul@gmail.com>
Signed-off-by: default avatarQiang Yu <yuq825@gmail.com>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Reviewed-by: default avatarRob Herring <robh@kerrnel.org>
Signed-off-by: default avatarEric Anholt <eric@anholt.net>
Link: https://patchwork.freedesktop.org/patch/291200/
parent 6234fc0f
...@@ -337,6 +337,8 @@ source "drivers/gpu/drm/xen/Kconfig" ...@@ -337,6 +337,8 @@ source "drivers/gpu/drm/xen/Kconfig"
source "drivers/gpu/drm/vboxvideo/Kconfig" source "drivers/gpu/drm/vboxvideo/Kconfig"
source "drivers/gpu/drm/lima/Kconfig"
# Keep legacy drivers last # Keep legacy drivers last
menuconfig DRM_LEGACY menuconfig DRM_LEGACY
......
...@@ -111,3 +111,4 @@ obj-$(CONFIG_DRM_PL111) += pl111/ ...@@ -111,3 +111,4 @@ obj-$(CONFIG_DRM_PL111) += pl111/
obj-$(CONFIG_DRM_TVE200) += tve200/ obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/
# SPDX-License-Identifier: GPL-2.0 OR MIT
# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
config DRM_LIMA
tristate "LIMA (DRM support for ARM Mali 400/450 GPU)"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
select DRM_SCHED
help
DRM driver for ARM Mali 400/450 GPUs.
# SPDX-License-Identifier: GPL-2.0 OR MIT
# Copyright 2017-2019 Qiang Yu <yuq825@gmail.com>
lima-y := \
lima_drv.o \
lima_device.o \
lima_pmu.o \
lima_l2_cache.o \
lima_mmu.o \
lima_gp.o \
lima_pp.o \
lima_gem.o \
lima_vm.o \
lima_sched.o \
lima_ctx.o \
lima_gem_prime.o \
lima_dlbu.o \
lima_bcast.o \
lima_object.o
obj-$(CONFIG_DRM_LIMA) += lima.o
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/io.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_bcast.h"
#include "lima_regs.h"
#define bcast_write(reg, data) writel(data, ip->iomem + reg)
#define bcast_read(reg) readl(ip->iomem + reg)
void lima_bcast_enable(struct lima_device *dev, int num_pp)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
struct lima_ip *ip = dev->ip + lima_ip_bcast;
int i, mask = bcast_read(LIMA_BCAST_BROADCAST_MASK) & 0xffff0000;
for (i = 0; i < num_pp; i++) {
struct lima_ip *pp = pipe->processor[i];
mask |= 1 << (pp->id - lima_ip_pp0);
}
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask);
}
int lima_bcast_init(struct lima_ip *ip)
{
int i, mask = 0;
for (i = lima_ip_pp0; i <= lima_ip_pp7; i++) {
if (ip->dev->ip[i].present)
mask |= 1 << (i - lima_ip_pp0);
}
bcast_write(LIMA_BCAST_BROADCAST_MASK, mask << 16);
bcast_write(LIMA_BCAST_INTERRUPT_MASK, mask);
return 0;
}
void lima_bcast_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_BCAST_H__
#define __LIMA_BCAST_H__
struct lima_ip;
int lima_bcast_init(struct lima_ip *ip);
void lima_bcast_fini(struct lima_ip *ip);
void lima_bcast_enable(struct lima_device *dev, int num_pp);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/slab.h>
#include "lima_device.h"
#include "lima_ctx.h"
int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id)
{
struct lima_ctx *ctx;
int i, err;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
ctx->dev = dev;
kref_init(&ctx->refcnt);
for (i = 0; i < lima_pipe_num; i++) {
err = lima_sched_context_init(dev->pipe + i, ctx->context + i, &ctx->guilty);
if (err)
goto err_out0;
}
err = xa_alloc(&mgr->handles, id, UINT_MAX, ctx, GFP_KERNEL);
if (err < 0)
goto err_out0;
return 0;
err_out0:
for (i--; i >= 0; i--)
lima_sched_context_fini(dev->pipe + i, ctx->context + i);
kfree(ctx);
return err;
}
static void lima_ctx_do_release(struct kref *ref)
{
struct lima_ctx *ctx = container_of(ref, struct lima_ctx, refcnt);
int i;
for (i = 0; i < lima_pipe_num; i++)
lima_sched_context_fini(ctx->dev->pipe + i, ctx->context + i);
kfree(ctx);
}
int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id)
{
struct lima_ctx *ctx;
int ret = 0;
mutex_lock(&mgr->lock);
ctx = xa_erase(&mgr->handles, id);
if (ctx)
kref_put(&ctx->refcnt, lima_ctx_do_release);
else
ret = -EINVAL;
mutex_unlock(&mgr->lock);
return ret;
}
struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id)
{
struct lima_ctx *ctx;
mutex_lock(&mgr->lock);
ctx = xa_load(&mgr->handles, id);
if (ctx)
kref_get(&ctx->refcnt);
mutex_unlock(&mgr->lock);
return ctx;
}
void lima_ctx_put(struct lima_ctx *ctx)
{
kref_put(&ctx->refcnt, lima_ctx_do_release);
}
void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr)
{
mutex_init(&mgr->lock);
xa_init_flags(&mgr->handles, XA_FLAGS_ALLOC);
}
void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr)
{
struct lima_ctx *ctx;
unsigned long id;
xa_for_each(&mgr->handles, id, ctx) {
kref_put(&ctx->refcnt, lima_ctx_do_release);
}
xa_destroy(&mgr->handles);
mutex_destroy(&mgr->lock);
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_CTX_H__
#define __LIMA_CTX_H__
#include <linux/xarray.h>
#include "lima_device.h"
struct lima_ctx {
struct kref refcnt;
struct lima_device *dev;
struct lima_sched_context context[lima_pipe_num];
atomic_t guilty;
};
struct lima_ctx_mgr {
struct mutex lock;
struct xarray handles;
};
int lima_ctx_create(struct lima_device *dev, struct lima_ctx_mgr *mgr, u32 *id);
int lima_ctx_free(struct lima_ctx_mgr *mgr, u32 id);
struct lima_ctx *lima_ctx_get(struct lima_ctx_mgr *mgr, u32 id);
void lima_ctx_put(struct lima_ctx *ctx);
void lima_ctx_mgr_init(struct lima_ctx_mgr *mgr);
void lima_ctx_mgr_fini(struct lima_ctx_mgr *mgr);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/platform_device.h>
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_pp.h"
#include "lima_mmu.h"
#include "lima_pmu.h"
#include "lima_l2_cache.h"
#include "lima_dlbu.h"
#include "lima_bcast.h"
#include "lima_vm.h"
struct lima_ip_desc {
char *name;
char *irq_name;
bool must_have[lima_gpu_num];
int offset[lima_gpu_num];
int (*init)(struct lima_ip *ip);
void (*fini)(struct lima_ip *ip);
};
#define LIMA_IP_DESC(ipname, mst0, mst1, off0, off1, func, irq) \
[lima_ip_##ipname] = { \
.name = #ipname, \
.irq_name = irq, \
.must_have = { \
[lima_gpu_mali400] = mst0, \
[lima_gpu_mali450] = mst1, \
}, \
.offset = { \
[lima_gpu_mali400] = off0, \
[lima_gpu_mali450] = off1, \
}, \
.init = lima_##func##_init, \
.fini = lima_##func##_fini, \
}
static struct lima_ip_desc lima_ip_desc[lima_ip_num] = {
LIMA_IP_DESC(pmu, false, false, 0x02000, 0x02000, pmu, "pmu"),
LIMA_IP_DESC(l2_cache0, true, true, 0x01000, 0x10000, l2_cache, NULL),
LIMA_IP_DESC(l2_cache1, false, true, -1, 0x01000, l2_cache, NULL),
LIMA_IP_DESC(l2_cache2, false, false, -1, 0x11000, l2_cache, NULL),
LIMA_IP_DESC(gp, true, true, 0x00000, 0x00000, gp, "gp"),
LIMA_IP_DESC(pp0, true, true, 0x08000, 0x08000, pp, "pp0"),
LIMA_IP_DESC(pp1, false, false, 0x0A000, 0x0A000, pp, "pp1"),
LIMA_IP_DESC(pp2, false, false, 0x0C000, 0x0C000, pp, "pp2"),
LIMA_IP_DESC(pp3, false, false, 0x0E000, 0x0E000, pp, "pp3"),
LIMA_IP_DESC(pp4, false, false, -1, 0x28000, pp, "pp4"),
LIMA_IP_DESC(pp5, false, false, -1, 0x2A000, pp, "pp5"),
LIMA_IP_DESC(pp6, false, false, -1, 0x2C000, pp, "pp6"),
LIMA_IP_DESC(pp7, false, false, -1, 0x2E000, pp, "pp7"),
LIMA_IP_DESC(gpmmu, true, true, 0x03000, 0x03000, mmu, "gpmmu"),
LIMA_IP_DESC(ppmmu0, true, true, 0x04000, 0x04000, mmu, "ppmmu0"),
LIMA_IP_DESC(ppmmu1, false, false, 0x05000, 0x05000, mmu, "ppmmu1"),
LIMA_IP_DESC(ppmmu2, false, false, 0x06000, 0x06000, mmu, "ppmmu2"),
LIMA_IP_DESC(ppmmu3, false, false, 0x07000, 0x07000, mmu, "ppmmu3"),
LIMA_IP_DESC(ppmmu4, false, false, -1, 0x1C000, mmu, "ppmmu4"),
LIMA_IP_DESC(ppmmu5, false, false, -1, 0x1D000, mmu, "ppmmu5"),
LIMA_IP_DESC(ppmmu6, false, false, -1, 0x1E000, mmu, "ppmmu6"),
LIMA_IP_DESC(ppmmu7, false, false, -1, 0x1F000, mmu, "ppmmu7"),
LIMA_IP_DESC(dlbu, false, true, -1, 0x14000, dlbu, NULL),
LIMA_IP_DESC(bcast, false, true, -1, 0x13000, bcast, NULL),
LIMA_IP_DESC(pp_bcast, false, true, -1, 0x16000, pp_bcast, "pp"),
LIMA_IP_DESC(ppmmu_bcast, false, true, -1, 0x15000, mmu, NULL),
};
const char *lima_ip_name(struct lima_ip *ip)
{
return lima_ip_desc[ip->id].name;
}
static int lima_clk_init(struct lima_device *dev)
{
int err;
unsigned long bus_rate, gpu_rate;
dev->clk_bus = devm_clk_get(dev->dev, "bus");
if (IS_ERR(dev->clk_bus)) {
dev_err(dev->dev, "get bus clk failed %ld\n", PTR_ERR(dev->clk_bus));
return PTR_ERR(dev->clk_bus);
}
dev->clk_gpu = devm_clk_get(dev->dev, "core");
if (IS_ERR(dev->clk_gpu)) {
dev_err(dev->dev, "get core clk failed %ld\n", PTR_ERR(dev->clk_gpu));
return PTR_ERR(dev->clk_gpu);
}
bus_rate = clk_get_rate(dev->clk_bus);
dev_info(dev->dev, "bus rate = %lu\n", bus_rate);
gpu_rate = clk_get_rate(dev->clk_gpu);
dev_info(dev->dev, "mod rate = %lu", gpu_rate);
err = clk_prepare_enable(dev->clk_bus);
if (err)
return err;
err = clk_prepare_enable(dev->clk_gpu);
if (err)
goto error_out0;
dev->reset = devm_reset_control_get_optional(dev->dev, NULL);
if (IS_ERR(dev->reset)) {
err = PTR_ERR(dev->reset);
goto error_out1;
} else if (dev->reset != NULL) {
err = reset_control_deassert(dev->reset);
if (err)
goto error_out1;
}
return 0;
error_out1:
clk_disable_unprepare(dev->clk_gpu);
error_out0:
clk_disable_unprepare(dev->clk_bus);
return err;
}
static void lima_clk_fini(struct lima_device *dev)
{
if (dev->reset != NULL)
reset_control_assert(dev->reset);
clk_disable_unprepare(dev->clk_gpu);
clk_disable_unprepare(dev->clk_bus);
}
static int lima_regulator_init(struct lima_device *dev)
{
int ret;
dev->regulator = devm_regulator_get_optional(dev->dev, "mali");
if (IS_ERR(dev->regulator)) {
ret = PTR_ERR(dev->regulator);
dev->regulator = NULL;
if (ret == -ENODEV)
return 0;
dev_err(dev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
ret = regulator_enable(dev->regulator);
if (ret < 0) {
dev_err(dev->dev, "failed to enable regulator: %d\n", ret);
return ret;
}
return 0;
}
static void lima_regulator_fini(struct lima_device *dev)
{
if (dev->regulator)
regulator_disable(dev->regulator);
}
static int lima_init_ip(struct lima_device *dev, int index)
{
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = dev->ip + index;
int offset = desc->offset[dev->id];
bool must = desc->must_have[dev->id];
int err;
if (offset < 0)
return 0;
ip->dev = dev;
ip->id = index;
ip->iomem = dev->iomem + offset;
if (desc->irq_name) {
err = platform_get_irq_byname(dev->pdev, desc->irq_name);
if (err < 0)
goto out;
ip->irq = err;
}
err = desc->init(ip);
if (!err) {
ip->present = true;
return 0;
}
out:
return must ? err : 0;
}
static void lima_fini_ip(struct lima_device *ldev, int index)
{
struct lima_ip_desc *desc = lima_ip_desc + index;
struct lima_ip *ip = ldev->ip + index;
if (ip->present)
desc->fini(ip);
}
static int lima_init_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
int err;
err = lima_sched_pipe_init(pipe, "gp");
if (err)
return err;
pipe->l2_cache[pipe->num_l2_cache++] = dev->ip + lima_ip_l2_cache0;
pipe->mmu[pipe->num_mmu++] = dev->ip + lima_ip_gpmmu;
pipe->processor[pipe->num_processor++] = dev->ip + lima_ip_gp;
err = lima_gp_pipe_init(dev);
if (err) {
lima_sched_pipe_fini(pipe);
return err;
}
return 0;
}
static void lima_fini_gp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
lima_gp_pipe_fini(dev);
lima_sched_pipe_fini(pipe);
}
static int lima_init_pp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
int err, i;
err = lima_sched_pipe_init(pipe, "pp");
if (err)
return err;
for (i = 0; i < LIMA_SCHED_PIPE_MAX_PROCESSOR; i++) {
struct lima_ip *pp = dev->ip + lima_ip_pp0 + i;
struct lima_ip *ppmmu = dev->ip + lima_ip_ppmmu0 + i;
struct lima_ip *l2_cache;
if (dev->id == lima_gpu_mali400)
l2_cache = dev->ip + lima_ip_l2_cache0;
else
l2_cache = dev->ip + lima_ip_l2_cache1 + (i >> 2);
if (pp->present && ppmmu->present && l2_cache->present) {
pipe->mmu[pipe->num_mmu++] = ppmmu;
pipe->processor[pipe->num_processor++] = pp;
if (!pipe->l2_cache[i >> 2])
pipe->l2_cache[pipe->num_l2_cache++] = l2_cache;
}
}
if (dev->ip[lima_ip_bcast].present) {
pipe->bcast_processor = dev->ip + lima_ip_pp_bcast;
pipe->bcast_mmu = dev->ip + lima_ip_ppmmu_bcast;
}
err = lima_pp_pipe_init(dev);
if (err) {
lima_sched_pipe_fini(pipe);
return err;
}
return 0;
}
static void lima_fini_pp_pipe(struct lima_device *dev)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
lima_pp_pipe_fini(dev);
lima_sched_pipe_fini(pipe);
}
int lima_device_init(struct lima_device *ldev)
{
int err, i;
struct resource *res;
dma_set_coherent_mask(ldev->dev, DMA_BIT_MASK(32));
err = lima_clk_init(ldev);
if (err) {
dev_err(ldev->dev, "clk init fail %d\n", err);
return err;
}
err = lima_regulator_init(ldev);
if (err) {
dev_err(ldev->dev, "regulator init fail %d\n", err);
goto err_out0;
}
ldev->empty_vm = lima_vm_create(ldev);
if (!ldev->empty_vm) {
err = -ENOMEM;
goto err_out1;
}
ldev->va_start = 0;
if (ldev->id == lima_gpu_mali450) {
ldev->va_end = LIMA_VA_RESERVE_START;
ldev->dlbu_cpu = dma_alloc_wc(
ldev->dev, LIMA_PAGE_SIZE,
&ldev->dlbu_dma, GFP_KERNEL);
if (!ldev->dlbu_cpu) {
err = -ENOMEM;
goto err_out2;
}
} else
ldev->va_end = LIMA_VA_RESERVE_END;
res = platform_get_resource(ldev->pdev, IORESOURCE_MEM, 0);
ldev->iomem = devm_ioremap_resource(ldev->dev, res);
if (IS_ERR(ldev->iomem)) {
dev_err(ldev->dev, "fail to ioremap iomem\n");
err = PTR_ERR(ldev->iomem);
goto err_out3;
}
for (i = 0; i < lima_ip_num; i++) {
err = lima_init_ip(ldev, i);
if (err)
goto err_out4;
}
err = lima_init_gp_pipe(ldev);
if (err)
goto err_out4;
err = lima_init_pp_pipe(ldev);
if (err)
goto err_out5;
return 0;
err_out5:
lima_fini_gp_pipe(ldev);
err_out4:
while (--i >= 0)
lima_fini_ip(ldev, i);
err_out3:
if (ldev->dlbu_cpu)
dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
ldev->dlbu_cpu, ldev->dlbu_dma);
err_out2:
lima_vm_put(ldev->empty_vm);
err_out1:
lima_regulator_fini(ldev);
err_out0:
lima_clk_fini(ldev);
return err;
}
void lima_device_fini(struct lima_device *ldev)
{
int i;
lima_fini_pp_pipe(ldev);
lima_fini_gp_pipe(ldev);
for (i = lima_ip_num - 1; i >= 0; i--)
lima_fini_ip(ldev, i);
if (ldev->dlbu_cpu)
dma_free_wc(ldev->dev, LIMA_PAGE_SIZE,
ldev->dlbu_cpu, ldev->dlbu_dma);
lima_vm_put(ldev->empty_vm);
lima_regulator_fini(ldev);
lima_clk_fini(ldev);
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DEVICE_H__
#define __LIMA_DEVICE_H__
#include <drm/drm_device.h>
#include <linux/delay.h>
#include "lima_sched.h"
enum lima_gpu_id {
lima_gpu_mali400 = 0,
lima_gpu_mali450,
lima_gpu_num,
};
enum lima_ip_id {
lima_ip_pmu,
lima_ip_gpmmu,
lima_ip_ppmmu0,
lima_ip_ppmmu1,
lima_ip_ppmmu2,
lima_ip_ppmmu3,
lima_ip_ppmmu4,
lima_ip_ppmmu5,
lima_ip_ppmmu6,
lima_ip_ppmmu7,
lima_ip_gp,
lima_ip_pp0,
lima_ip_pp1,
lima_ip_pp2,
lima_ip_pp3,
lima_ip_pp4,
lima_ip_pp5,
lima_ip_pp6,
lima_ip_pp7,
lima_ip_l2_cache0,
lima_ip_l2_cache1,
lima_ip_l2_cache2,
lima_ip_dlbu,
lima_ip_bcast,
lima_ip_pp_bcast,
lima_ip_ppmmu_bcast,
lima_ip_num,
};
struct lima_device;
struct lima_ip {
struct lima_device *dev;
enum lima_ip_id id;
bool present;
void __iomem *iomem;
int irq;
union {
/* gp/pp */
bool async_reset;
/* l2 cache */
spinlock_t lock;
} data;
};
enum lima_pipe_id {
lima_pipe_gp,
lima_pipe_pp,
lima_pipe_num,
};
struct lima_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
enum lima_gpu_id id;
u32 gp_version;
u32 pp_version;
int num_pp;
void __iomem *iomem;
struct clk *clk_bus;
struct clk *clk_gpu;
struct reset_control *reset;
struct regulator *regulator;
struct lima_ip ip[lima_ip_num];
struct lima_sched_pipe pipe[lima_pipe_num];
struct lima_vm *empty_vm;
uint64_t va_start;
uint64_t va_end;
u32 *dlbu_cpu;
dma_addr_t dlbu_dma;
};
static inline struct lima_device *
to_lima_dev(struct drm_device *dev)
{
return dev->dev_private;
}
int lima_device_init(struct lima_device *ldev);
void lima_device_fini(struct lima_device *ldev);
const char *lima_ip_name(struct lima_ip *ip);
typedef int (*lima_poll_func_t)(struct lima_ip *);
static inline int lima_poll_timeout(struct lima_ip *ip, lima_poll_func_t func,
int sleep_us, int timeout_us)
{
ktime_t timeout = ktime_add_us(ktime_get(), timeout_us);
might_sleep_if(sleep_us);
while (1) {
if (func(ip))
return 0;
if (timeout_us && ktime_compare(ktime_get(), timeout) > 0)
return -ETIMEDOUT;
if (sleep_us)
usleep_range((sleep_us >> 2) + 1, sleep_us);
}
return 0;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/io.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_dlbu.h"
#include "lima_vm.h"
#include "lima_regs.h"
#define dlbu_write(reg, data) writel(data, ip->iomem + reg)
#define dlbu_read(reg) readl(ip->iomem + reg)
void lima_dlbu_enable(struct lima_device *dev, int num_pp)
{
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_pp;
struct lima_ip *ip = dev->ip + lima_ip_dlbu;
int i, mask = 0;
for (i = 0; i < num_pp; i++) {
struct lima_ip *pp = pipe->processor[i];
mask |= 1 << (pp->id - lima_ip_pp0);
}
dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, mask);
}
void lima_dlbu_disable(struct lima_device *dev)
{
struct lima_ip *ip = dev->ip + lima_ip_dlbu;
dlbu_write(LIMA_DLBU_PP_ENABLE_MASK, 0);
}
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg)
{
dlbu_write(LIMA_DLBU_TLLIST_VBASEADDR, reg[0]);
dlbu_write(LIMA_DLBU_FB_DIM, reg[1]);
dlbu_write(LIMA_DLBU_TLLIST_CONF, reg[2]);
dlbu_write(LIMA_DLBU_START_TILE_POS, reg[3]);
}
int lima_dlbu_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
dlbu_write(LIMA_DLBU_MASTER_TLLIST_PHYS_ADDR, dev->dlbu_dma | 1);
dlbu_write(LIMA_DLBU_MASTER_TLLIST_VADDR, LIMA_VA_RESERVE_DLBU);
return 0;
}
void lima_dlbu_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DLBU_H__
#define __LIMA_DLBU_H__
struct lima_ip;
struct lima_device;
void lima_dlbu_enable(struct lima_device *dev, int num_pp);
void lima_dlbu_disable(struct lima_device *dev);
void lima_dlbu_set_reg(struct lima_ip *ip, u32 *reg);
int lima_dlbu_init(struct lima_ip *ip);
void lima_dlbu_fini(struct lima_ip *ip);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/module.h>
#include <linux/of_platform.h>
#include <linux/uaccess.h>
#include <linux/slab.h>
#include <drm/drm_ioctl.h>
#include <drm/drm_drv.h>
#include <drm/drm_prime.h>
#include <drm/lima_drm.h>
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_gem_prime.h"
#include "lima_vm.h"
int lima_sched_timeout_ms;
MODULE_PARM_DESC(sched_timeout_ms, "task run timeout in ms (0 = no timeout (default))");
module_param_named(sched_timeout_ms, lima_sched_timeout_ms, int, 0444);
static int lima_ioctl_get_param(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_get_param *args = data;
struct lima_device *ldev = to_lima_dev(dev);
if (args->pad)
return -EINVAL;
switch (args->param) {
case DRM_LIMA_PARAM_GPU_ID:
switch (ldev->id) {
case lima_gpu_mali400:
args->value = DRM_LIMA_PARAM_GPU_ID_MALI400;
break;
case lima_gpu_mali450:
args->value = DRM_LIMA_PARAM_GPU_ID_MALI450;
break;
default:
args->value = DRM_LIMA_PARAM_GPU_ID_UNKNOWN;
break;
}
break;
case DRM_LIMA_PARAM_NUM_PP:
args->value = ldev->pipe[lima_pipe_pp].num_processor;
break;
case DRM_LIMA_PARAM_GP_VERSION:
args->value = ldev->gp_version;
break;
case DRM_LIMA_PARAM_PP_VERSION:
args->value = ldev->pp_version;
break;
default:
return -EINVAL;
}
return 0;
}
static int lima_ioctl_gem_create(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_create *args = data;
if (args->pad)
return -EINVAL;
if (args->flags)
return -EINVAL;
if (args->size == 0)
return -EINVAL;
return lima_gem_create_handle(dev, file, args->size, args->flags, &args->handle);
}
static int lima_ioctl_gem_info(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_info *args = data;
return lima_gem_get_info(file, args->handle, &args->va, &args->offset);
}
static int lima_ioctl_gem_submit(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_submit *args = data;
struct lima_device *ldev = to_lima_dev(dev);
struct lima_drm_priv *priv = file->driver_priv;
struct drm_lima_gem_submit_bo *bos;
struct lima_sched_pipe *pipe;
struct lima_sched_task *task;
struct lima_ctx *ctx;
struct lima_submit submit = {0};
size_t size;
int err = 0;
if (args->pipe >= lima_pipe_num || args->nr_bos == 0)
return -EINVAL;
if (args->flags & ~(LIMA_SUBMIT_FLAG_EXPLICIT_FENCE))
return -EINVAL;
pipe = ldev->pipe + args->pipe;
if (args->frame_size != pipe->frame_size)
return -EINVAL;
bos = kvcalloc(args->nr_bos, sizeof(*submit.bos) + sizeof(*submit.lbos), GFP_KERNEL);
if (!bos)
return -ENOMEM;
size = args->nr_bos * sizeof(*submit.bos);
if (copy_from_user(bos, u64_to_user_ptr(args->bos), size)) {
err = -EFAULT;
goto out0;
}
task = kmem_cache_zalloc(pipe->task_slab, GFP_KERNEL);
if (!task) {
err = -ENOMEM;
goto out0;
}
task->frame = task + 1;
if (copy_from_user(task->frame, u64_to_user_ptr(args->frame), args->frame_size)) {
err = -EFAULT;
goto out1;
}
err = pipe->task_validate(pipe, task);
if (err)
goto out1;
ctx = lima_ctx_get(&priv->ctx_mgr, args->ctx);
if (!ctx) {
err = -ENOENT;
goto out1;
}
submit.pipe = args->pipe;
submit.bos = bos;
submit.lbos = (void *)bos + size;
submit.nr_bos = args->nr_bos;
submit.task = task;
submit.ctx = ctx;
submit.flags = args->flags;
submit.in_sync[0] = args->in_sync[0];
submit.in_sync[1] = args->in_sync[1];
submit.out_sync = args->out_sync;
err = lima_gem_submit(file, &submit);
lima_ctx_put(ctx);
out1:
if (err)
kmem_cache_free(pipe->task_slab, task);
out0:
kvfree(bos);
return err;
}
static int lima_ioctl_gem_wait(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_gem_wait *args = data;
if (args->op & ~(LIMA_GEM_WAIT_READ|LIMA_GEM_WAIT_WRITE))
return -EINVAL;
return lima_gem_wait(file, args->handle, args->op, args->timeout_ns);
}
static int lima_ioctl_ctx_create(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_ctx_create *args = data;
struct lima_drm_priv *priv = file->driver_priv;
struct lima_device *ldev = to_lima_dev(dev);
if (args->_pad)
return -EINVAL;
return lima_ctx_create(ldev, &priv->ctx_mgr, &args->id);
}
static int lima_ioctl_ctx_free(struct drm_device *dev, void *data, struct drm_file *file)
{
struct drm_lima_ctx_create *args = data;
struct lima_drm_priv *priv = file->driver_priv;
if (args->_pad)
return -EINVAL;
return lima_ctx_free(&priv->ctx_mgr, args->id);
}
static int lima_drm_driver_open(struct drm_device *dev, struct drm_file *file)
{
int err;
struct lima_drm_priv *priv;
struct lima_device *ldev = to_lima_dev(dev);
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv)
return -ENOMEM;
priv->vm = lima_vm_create(ldev);
if (!priv->vm) {
err = -ENOMEM;
goto err_out0;
}
lima_ctx_mgr_init(&priv->ctx_mgr);
file->driver_priv = priv;
return 0;
err_out0:
kfree(priv);
return err;
}
static void lima_drm_driver_postclose(struct drm_device *dev, struct drm_file *file)
{
struct lima_drm_priv *priv = file->driver_priv;
lima_ctx_mgr_fini(&priv->ctx_mgr);
lima_vm_put(priv->vm);
kfree(priv);
}
static const struct drm_ioctl_desc lima_drm_driver_ioctls[] = {
DRM_IOCTL_DEF_DRV(LIMA_GET_PARAM, lima_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_CREATE, lima_ioctl_gem_create, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_INFO, lima_ioctl_gem_info, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_SUBMIT, lima_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_GEM_WAIT, lima_ioctl_gem_wait, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_CTX_CREATE, lima_ioctl_ctx_create, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(LIMA_CTX_FREE, lima_ioctl_ctx_free, DRM_AUTH|DRM_RENDER_ALLOW),
};
static const struct file_operations lima_drm_driver_fops = {
.owner = THIS_MODULE,
.open = drm_open,
.release = drm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
#endif
.mmap = lima_gem_mmap,
};
static struct drm_driver lima_drm_driver = {
.driver_features = DRIVER_RENDER | DRIVER_GEM | DRIVER_PRIME | DRIVER_SYNCOBJ,
.open = lima_drm_driver_open,
.postclose = lima_drm_driver_postclose,
.ioctls = lima_drm_driver_ioctls,
.num_ioctls = ARRAY_SIZE(lima_drm_driver_ioctls),
.fops = &lima_drm_driver_fops,
.gem_free_object_unlocked = lima_gem_free_object,
.gem_open_object = lima_gem_object_open,
.gem_close_object = lima_gem_object_close,
.gem_vm_ops = &lima_gem_vm_ops,
.name = "lima",
.desc = "lima DRM",
.date = "20190217",
.major = 1,
.minor = 0,
.patchlevel = 0,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_import_sg_table = lima_gem_prime_import_sg_table,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.gem_prime_get_sg_table = lima_gem_prime_get_sg_table,
.gem_prime_mmap = lima_gem_prime_mmap,
};
static int lima_pdev_probe(struct platform_device *pdev)
{
struct lima_device *ldev;
struct drm_device *ddev;
int err;
err = lima_sched_slab_init();
if (err)
return err;
ldev = devm_kzalloc(&pdev->dev, sizeof(*ldev), GFP_KERNEL);
if (!ldev) {
err = -ENOMEM;
goto err_out0;
}
ldev->pdev = pdev;
ldev->dev = &pdev->dev;
ldev->id = (enum lima_gpu_id)of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, ldev);
/* Allocate and initialize the DRM device. */
ddev = drm_dev_alloc(&lima_drm_driver, &pdev->dev);
if (IS_ERR(ddev))
return PTR_ERR(ddev);
ddev->dev_private = ldev;
ldev->ddev = ddev;
err = lima_device_init(ldev);
if (err) {
dev_err(&pdev->dev, "Fatal error during GPU init\n");
goto err_out1;
}
/*
* Register the DRM device with the core and the connectors with
* sysfs.
*/
err = drm_dev_register(ddev, 0);
if (err < 0)
goto err_out2;
return 0;
err_out2:
lima_device_fini(ldev);
err_out1:
drm_dev_put(ddev);
err_out0:
lima_sched_slab_fini();
return err;
}
static int lima_pdev_remove(struct platform_device *pdev)
{
struct lima_device *ldev = platform_get_drvdata(pdev);
struct drm_device *ddev = ldev->ddev;
drm_dev_unregister(ddev);
lima_device_fini(ldev);
drm_dev_put(ddev);
lima_sched_slab_fini();
return 0;
}
static const struct of_device_id dt_match[] = {
{ .compatible = "arm,mali-400", .data = (void *)lima_gpu_mali400 },
{ .compatible = "arm,mali-450", .data = (void *)lima_gpu_mali450 },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver lima_platform_driver = {
.probe = lima_pdev_probe,
.remove = lima_pdev_remove,
.driver = {
.name = "lima",
.of_match_table = dt_match,
},
};
static int __init lima_init(void)
{
return platform_driver_register(&lima_platform_driver);
}
module_init(lima_init);
static void __exit lima_exit(void)
{
platform_driver_unregister(&lima_platform_driver);
}
module_exit(lima_exit);
MODULE_AUTHOR("Lima Project Developers");
MODULE_DESCRIPTION("Lima DRM Driver");
MODULE_LICENSE("GPL v2");
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DRV_H__
#define __LIMA_DRV_H__
#include <drm/drm_file.h>
#include "lima_ctx.h"
extern int lima_sched_timeout_ms;
struct lima_vm;
struct lima_bo;
struct lima_sched_task;
struct drm_lima_gem_submit_bo;
struct lima_drm_priv {
struct lima_vm *vm;
struct lima_ctx_mgr ctx_mgr;
};
struct lima_submit {
struct lima_ctx *ctx;
int pipe;
u32 flags;
struct drm_lima_gem_submit_bo *bos;
struct lima_bo **lbos;
u32 nr_bos;
u32 in_sync[2];
u32 out_sync;
struct lima_sched_task *task;
};
static inline struct lima_drm_priv *
to_lima_drm_priv(struct drm_file *file)
{
return file->driver_priv;
}
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/sync_file.h>
#include <linux/pfn_t.h>
#include <drm/drm_file.h>
#include <drm/drm_syncobj.h>
#include <drm/drm_utils.h>
#include <drm/lima_drm.h>
#include "lima_drv.h"
#include "lima_gem.h"
#include "lima_gem_prime.h"
#include "lima_vm.h"
#include "lima_object.h"
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle)
{
int err;
struct lima_bo *bo;
struct lima_device *ldev = to_lima_dev(dev);
bo = lima_bo_create(ldev, size, flags, NULL, NULL);
if (IS_ERR(bo))
return PTR_ERR(bo);
err = drm_gem_handle_create(file, &bo->gem, handle);
/* drop reference from allocate - handle holds it now */
drm_gem_object_put_unlocked(&bo->gem);
return err;
}
void lima_gem_free_object(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
if (!list_empty(&bo->va))
dev_err(obj->dev->dev, "lima gem free bo still has va\n");
lima_bo_destroy(bo);
}
int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
return lima_vm_bo_add(vm, bo, true);
}
void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file)
{
struct lima_bo *bo = to_lima_bo(obj);
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
lima_vm_bo_del(vm, bo);
}
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset)
{
struct drm_gem_object *obj;
struct lima_bo *bo;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
int err;
obj = drm_gem_object_lookup(file, handle);
if (!obj)
return -ENOENT;
bo = to_lima_bo(obj);
*va = lima_vm_get_va(vm, bo);
err = drm_gem_create_mmap_offset(obj);
if (!err)
*offset = drm_vma_node_offset_addr(&obj->vma_node);
drm_gem_object_put_unlocked(obj);
return err;
}
static vm_fault_t lima_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct lima_bo *bo = to_lima_bo(obj);
pfn_t pfn;
pgoff_t pgoff;
/* We don't use vmf->pgoff since that has the fake offset: */
pgoff = (vmf->address - vma->vm_start) >> PAGE_SHIFT;
pfn = __pfn_to_pfn_t(page_to_pfn(bo->pages[pgoff]), PFN_DEV);
return vmf_insert_mixed(vma, vmf->address, pfn);
}
const struct vm_operations_struct lima_gem_vm_ops = {
.fault = lima_gem_fault,
.open = drm_gem_vm_open,
.close = drm_gem_vm_close,
};
void lima_set_vma_flags(struct vm_area_struct *vma)
{
pgprot_t prot = vm_get_page_prot(vma->vm_flags);
vma->vm_flags |= VM_MIXEDMAP;
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_page_prot = pgprot_writecombine(prot);
}
int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap(filp, vma);
if (ret)
return ret;
lima_set_vma_flags(vma);
return 0;
}
static int lima_gem_sync_bo(struct lima_sched_task *task, struct lima_bo *bo,
bool write, bool explicit)
{
int err = 0;
if (!write) {
err = reservation_object_reserve_shared(bo->gem.resv, 1);
if (err)
return err;
}
/* explicit sync use user passed dep fence */
if (explicit)
return 0;
/* implicit sync use bo fence in resv obj */
if (write) {
unsigned nr_fences;
struct dma_fence **fences;
int i;
err = reservation_object_get_fences_rcu(
bo->gem.resv, NULL, &nr_fences, &fences);
if (err || !nr_fences)
return err;
for (i = 0; i < nr_fences; i++) {
err = lima_sched_task_add_dep(task, fences[i]);
if (err)
break;
}
/* for error case free remaining fences */
for ( ; i < nr_fences; i++)
dma_fence_put(fences[i]);
kfree(fences);
} else {
struct dma_fence *fence;
fence = reservation_object_get_excl_rcu(bo->gem.resv);
if (fence) {
err = lima_sched_task_add_dep(task, fence);
if (err)
dma_fence_put(fence);
}
}
return err;
}
static int lima_gem_lock_bos(struct lima_bo **bos, u32 nr_bos,
struct ww_acquire_ctx *ctx)
{
int i, ret = 0, contended, slow_locked = -1;
ww_acquire_init(ctx, &reservation_ww_class);
retry:
for (i = 0; i < nr_bos; i++) {
if (i == slow_locked) {
slow_locked = -1;
continue;
}
ret = ww_mutex_lock_interruptible(&bos[i]->gem.resv->lock, ctx);
if (ret < 0) {
contended = i;
goto err;
}
}
ww_acquire_done(ctx);
return 0;
err:
for (i--; i >= 0; i--)
ww_mutex_unlock(&bos[i]->gem.resv->lock);
if (slow_locked >= 0)
ww_mutex_unlock(&bos[slow_locked]->gem.resv->lock);
if (ret == -EDEADLK) {
/* we lost out in a seqno race, lock and retry.. */
ret = ww_mutex_lock_slow_interruptible(
&bos[contended]->gem.resv->lock, ctx);
if (!ret) {
slow_locked = contended;
goto retry;
}
}
ww_acquire_fini(ctx);
return ret;
}
static void lima_gem_unlock_bos(struct lima_bo **bos, u32 nr_bos,
struct ww_acquire_ctx *ctx)
{
int i;
for (i = 0; i < nr_bos; i++)
ww_mutex_unlock(&bos[i]->gem.resv->lock);
ww_acquire_fini(ctx);
}
static int lima_gem_add_deps(struct drm_file *file, struct lima_submit *submit)
{
int i, err;
for (i = 0; i < ARRAY_SIZE(submit->in_sync); i++) {
struct dma_fence *fence = NULL;
if (!submit->in_sync[i])
continue;
err = drm_syncobj_find_fence(file, submit->in_sync[i],
0, 0, &fence);
if (err)
return err;
err = lima_sched_task_add_dep(submit->task, fence);
if (err) {
dma_fence_put(fence);
return err;
}
}
return 0;
}
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit)
{
int i, err = 0;
struct ww_acquire_ctx ctx;
struct lima_drm_priv *priv = to_lima_drm_priv(file);
struct lima_vm *vm = priv->vm;
struct drm_syncobj *out_sync = NULL;
struct dma_fence *fence;
struct lima_bo **bos = submit->lbos;
if (submit->out_sync) {
out_sync = drm_syncobj_find(file, submit->out_sync);
if (!out_sync)
return -ENOENT;
}
for (i = 0; i < submit->nr_bos; i++) {
struct drm_gem_object *obj;
struct lima_bo *bo;
obj = drm_gem_object_lookup(file, submit->bos[i].handle);
if (!obj) {
err = -ENOENT;
goto err_out0;
}
bo = to_lima_bo(obj);
/* increase refcnt of gpu va map to prevent unmapped when executing,
* will be decreased when task done
*/
err = lima_vm_bo_add(vm, bo, false);
if (err) {
drm_gem_object_put_unlocked(obj);
goto err_out0;
}
bos[i] = bo;
}
err = lima_gem_lock_bos(bos, submit->nr_bos, &ctx);
if (err)
goto err_out0;
err = lima_sched_task_init(
submit->task, submit->ctx->context + submit->pipe,
bos, submit->nr_bos, vm);
if (err)
goto err_out1;
err = lima_gem_add_deps(file, submit);
if (err)
goto err_out2;
for (i = 0; i < submit->nr_bos; i++) {
err = lima_gem_sync_bo(
submit->task, bos[i],
submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE,
submit->flags & LIMA_SUBMIT_FLAG_EXPLICIT_FENCE);
if (err)
goto err_out2;
}
fence = lima_sched_context_queue_task(
submit->ctx->context + submit->pipe, submit->task);
for (i = 0; i < submit->nr_bos; i++) {
if (submit->bos[i].flags & LIMA_SUBMIT_BO_WRITE)
reservation_object_add_excl_fence(bos[i]->gem.resv, fence);
else
reservation_object_add_shared_fence(bos[i]->gem.resv, fence);
}
lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
for (i = 0; i < submit->nr_bos; i++)
drm_gem_object_put_unlocked(&bos[i]->gem);
if (out_sync) {
drm_syncobj_replace_fence(out_sync, fence);
drm_syncobj_put(out_sync);
}
dma_fence_put(fence);
return 0;
err_out2:
lima_sched_task_fini(submit->task);
err_out1:
lima_gem_unlock_bos(bos, submit->nr_bos, &ctx);
err_out0:
for (i = 0; i < submit->nr_bos; i++) {
if (!bos[i])
break;
lima_vm_bo_del(vm, bos[i]);
drm_gem_object_put_unlocked(&bos[i]->gem);
}
if (out_sync)
drm_syncobj_put(out_sync);
return err;
}
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns)
{
bool write = op & LIMA_GEM_WAIT_WRITE;
long ret, timeout;
if (!op)
return 0;
timeout = drm_timeout_abs_to_jiffies(timeout_ns);
ret = drm_gem_reservation_object_wait(file, handle, write, timeout);
if (ret == 0)
ret = timeout ? -ETIMEDOUT : -EBUSY;
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_GEM_H__
#define __LIMA_GEM_H__
struct lima_bo;
struct lima_submit;
extern const struct vm_operations_struct lima_gem_vm_ops;
struct lima_bo *lima_gem_create_bo(struct drm_device *dev, u32 size, u32 flags);
int lima_gem_create_handle(struct drm_device *dev, struct drm_file *file,
u32 size, u32 flags, u32 *handle);
void lima_gem_free_object(struct drm_gem_object *obj);
int lima_gem_object_open(struct drm_gem_object *obj, struct drm_file *file);
void lima_gem_object_close(struct drm_gem_object *obj, struct drm_file *file);
int lima_gem_get_info(struct drm_file *file, u32 handle, u32 *va, u64 *offset);
int lima_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int lima_gem_submit(struct drm_file *file, struct lima_submit *submit);
int lima_gem_wait(struct drm_file *file, u32 handle, u32 op, s64 timeout_ns);
void lima_set_vma_flags(struct vm_area_struct *vma);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/dma-buf.h>
#include <drm/drm_prime.h>
#include <drm/drm_drv.h>
#include <drm/drm_file.h>
#include "lima_device.h"
#include "lima_object.h"
#include "lima_gem.h"
#include "lima_gem_prime.h"
struct drm_gem_object *lima_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct lima_device *ldev = to_lima_dev(dev);
struct lima_bo *bo;
bo = lima_bo_create(ldev, attach->dmabuf->size, 0, sgt,
attach->dmabuf->resv);
if (IS_ERR(bo))
return ERR_CAST(bo);
return &bo->gem;
}
struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
struct lima_bo *bo = to_lima_bo(obj);
int npages = obj->size >> PAGE_SHIFT;
return drm_prime_pages_to_sg(bo->pages, npages);
}
int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
{
int ret;
ret = drm_gem_mmap_obj(obj, obj->size, vma);
if (ret)
return ret;
lima_set_vma_flags(vma);
return 0;
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_GEM_PRIME_H__
#define __LIMA_GEM_PRIME_H__
struct drm_gem_object *lima_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
struct sg_table *lima_gem_prime_get_sg_table(struct drm_gem_object *obj);
int lima_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/device.h>
#include <linux/slab.h>
#include <drm/lima_drm.h>
#include "lima_device.h"
#include "lima_gp.h"
#include "lima_regs.h"
#define gp_write(reg, data) writel(data, ip->iomem + reg)
#define gp_read(reg) readl(ip->iomem + reg)
static irqreturn_t lima_gp_irq_handler(int irq, void *data)
{
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
u32 state = gp_read(LIMA_GP_INT_STAT);
u32 status = gp_read(LIMA_GP_STATUS);
bool done = false;
/* for shared irq case */
if (!state)
return IRQ_NONE;
if (state & LIMA_GP_IRQ_MASK_ERROR) {
dev_err(dev->dev, "gp error irq state=%x status=%x\n",
state, status);
/* mask all interrupts before hard reset */
gp_write(LIMA_GP_INT_MASK, 0);
pipe->error = true;
done = true;
} else {
bool valid = state & (LIMA_GP_IRQ_VS_END_CMD_LST |
LIMA_GP_IRQ_PLBU_END_CMD_LST);
bool active = status & (LIMA_GP_STATUS_VS_ACTIVE |
LIMA_GP_STATUS_PLBU_ACTIVE);
done = valid && !active;
}
gp_write(LIMA_GP_INT_CLEAR, state);
if (done)
lima_sched_pipe_task_done(pipe);
return IRQ_HANDLED;
}
static void lima_gp_soft_reset_async(struct lima_ip *ip)
{
if (ip->data.async_reset)
return;
gp_write(LIMA_GP_INT_MASK, 0);
gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_RESET_COMPLETED);
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_SOFT_RESET);
ip->data.async_reset = true;
}
static int lima_gp_soft_reset_async_wait(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
if (!ip->data.async_reset)
return 0;
err = readl_poll_timeout(ip->iomem + LIMA_GP_INT_RAWSTAT, v,
v & LIMA_GP_IRQ_RESET_COMPLETED,
0, 100);
if (err) {
dev_err(dev->dev, "gp soft reset time out\n");
return err;
}
gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
ip->data.async_reset = false;
return 0;
}
static int lima_gp_task_validate(struct lima_sched_pipe *pipe,
struct lima_sched_task *task)
{
struct drm_lima_gp_frame *frame = task->frame;
u32 *f = frame->frame;
(void)pipe;
if (f[LIMA_GP_VSCL_START_ADDR >> 2] >
f[LIMA_GP_VSCL_END_ADDR >> 2] ||
f[LIMA_GP_PLBUCL_START_ADDR >> 2] >
f[LIMA_GP_PLBUCL_END_ADDR >> 2] ||
f[LIMA_GP_PLBU_ALLOC_START_ADDR >> 2] >
f[LIMA_GP_PLBU_ALLOC_END_ADDR >> 2])
return -EINVAL;
if (f[LIMA_GP_VSCL_START_ADDR >> 2] ==
f[LIMA_GP_VSCL_END_ADDR >> 2] &&
f[LIMA_GP_PLBUCL_START_ADDR >> 2] ==
f[LIMA_GP_PLBUCL_END_ADDR >> 2])
return -EINVAL;
return 0;
}
static void lima_gp_task_run(struct lima_sched_pipe *pipe,
struct lima_sched_task *task)
{
struct lima_ip *ip = pipe->processor[0];
struct drm_lima_gp_frame *frame = task->frame;
u32 *f = frame->frame;
u32 cmd = 0;
int i;
if (f[LIMA_GP_VSCL_START_ADDR >> 2] !=
f[LIMA_GP_VSCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_VS;
if (f[LIMA_GP_PLBUCL_START_ADDR >> 2] !=
f[LIMA_GP_PLBUCL_END_ADDR >> 2])
cmd |= LIMA_GP_CMD_START_PLBU;
/* before any hw ops, wait last success task async soft reset */
lima_gp_soft_reset_async_wait(ip);
for (i = 0; i < LIMA_GP_FRAME_REG_NUM; i++)
writel(f[i], ip->iomem + LIMA_GP_VSCL_START_ADDR + i * 4);
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_UPDATE_PLBU_ALLOC);
gp_write(LIMA_GP_CMD, cmd);
}
static int lima_gp_hard_reset_poll(struct lima_ip *ip)
{
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC01A0000);
return gp_read(LIMA_GP_PERF_CNT_0_LIMIT) == 0xC01A0000;
}
static int lima_gp_hard_reset(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int ret;
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0xC0FFE000);
gp_write(LIMA_GP_INT_MASK, 0);
gp_write(LIMA_GP_CMD, LIMA_GP_CMD_RESET);
ret = lima_poll_timeout(ip, lima_gp_hard_reset_poll, 10, 100);
if (ret) {
dev_err(dev->dev, "gp hard reset timeout\n");
return ret;
}
gp_write(LIMA_GP_PERF_CNT_0_LIMIT, 0);
gp_write(LIMA_GP_INT_CLEAR, LIMA_GP_IRQ_MASK_ALL);
gp_write(LIMA_GP_INT_MASK, LIMA_GP_IRQ_MASK_USED);
return 0;
}
static void lima_gp_task_fini(struct lima_sched_pipe *pipe)
{
lima_gp_soft_reset_async(pipe->processor[0]);
}
static void lima_gp_task_error(struct lima_sched_pipe *pipe)
{
struct lima_ip *ip = pipe->processor[0];
dev_err(ip->dev->dev, "gp task error int_state=%x status=%x\n",
gp_read(LIMA_GP_INT_STAT), gp_read(LIMA_GP_STATUS));
lima_gp_hard_reset(ip);
}
static void lima_gp_task_mmu_error(struct lima_sched_pipe *pipe)
{
lima_sched_pipe_task_done(pipe);
}
static void lima_gp_print_version(struct lima_ip *ip)
{
u32 version, major, minor;
char *name;
version = gp_read(LIMA_GP_VERSION);
major = (version >> 8) & 0xFF;
minor = version & 0xFF;
switch (version >> 16) {
case 0xA07:
name = "mali200";
break;
case 0xC07:
name = "mali300";
break;
case 0xB07:
name = "mali400";
break;
case 0xD07:
name = "mali450";
break;
default:
name = "unknown";
break;
}
dev_info(ip->dev->dev, "%s - %s version major %d minor %d\n",
lima_ip_name(ip), name, major, minor);
}
static struct kmem_cache *lima_gp_task_slab;
static int lima_gp_task_slab_refcnt;
int lima_gp_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
lima_gp_print_version(ip);
ip->data.async_reset = false;
lima_gp_soft_reset_async(ip);
err = lima_gp_soft_reset_async_wait(ip);
if (err)
return err;
err = devm_request_irq(dev->dev, ip->irq, lima_gp_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
dev_err(dev->dev, "gp %s fail to request irq\n",
lima_ip_name(ip));
return err;
}
dev->gp_version = gp_read(LIMA_GP_VERSION);
return 0;
}
void lima_gp_fini(struct lima_ip *ip)
{
}
int lima_gp_pipe_init(struct lima_device *dev)
{
int frame_size = sizeof(struct drm_lima_gp_frame);
struct lima_sched_pipe *pipe = dev->pipe + lima_pipe_gp;
if (!lima_gp_task_slab) {
lima_gp_task_slab = kmem_cache_create_usercopy(
"lima_gp_task", sizeof(struct lima_sched_task) + frame_size,
0, SLAB_HWCACHE_ALIGN, sizeof(struct lima_sched_task),
frame_size, NULL);
if (!lima_gp_task_slab)
return -ENOMEM;
}
lima_gp_task_slab_refcnt++;
pipe->frame_size = frame_size;
pipe->task_slab = lima_gp_task_slab;
pipe->task_validate = lima_gp_task_validate;
pipe->task_run = lima_gp_task_run;
pipe->task_fini = lima_gp_task_fini;
pipe->task_error = lima_gp_task_error;
pipe->task_mmu_error = lima_gp_task_mmu_error;
return 0;
}
void lima_gp_pipe_fini(struct lima_device *dev)
{
if (!--lima_gp_task_slab_refcnt) {
kmem_cache_destroy(lima_gp_task_slab);
lima_gp_task_slab = NULL;
}
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_GP_H__
#define __LIMA_GP_H__
struct lima_ip;
struct lima_device;
int lima_gp_init(struct lima_ip *ip);
void lima_gp_fini(struct lima_ip *ip);
int lima_gp_pipe_init(struct lima_device *dev);
void lima_gp_pipe_fini(struct lima_device *dev);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/iopoll.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_l2_cache.h"
#include "lima_regs.h"
#define l2_cache_write(reg, data) writel(data, ip->iomem + reg)
#define l2_cache_read(reg) readl(ip->iomem + reg)
static int lima_l2_cache_wait_idle(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
err = readl_poll_timeout(ip->iomem + LIMA_L2_CACHE_STATUS, v,
!(v & LIMA_L2_CACHE_STATUS_COMMAND_BUSY),
0, 1000);
if (err) {
dev_err(dev->dev, "l2 cache wait command timeout\n");
return err;
}
return 0;
}
int lima_l2_cache_flush(struct lima_ip *ip)
{
int ret;
spin_lock(&ip->data.lock);
l2_cache_write(LIMA_L2_CACHE_COMMAND, LIMA_L2_CACHE_COMMAND_CLEAR_ALL);
ret = lima_l2_cache_wait_idle(ip);
spin_unlock(&ip->data.lock);
return ret;
}
int lima_l2_cache_init(struct lima_ip *ip)
{
int i, err;
u32 size;
struct lima_device *dev = ip->dev;
/* l2_cache2 only exists when one of PP4-7 present */
if (ip->id == lima_ip_l2_cache2) {
for (i = lima_ip_pp4; i <= lima_ip_pp7; i++) {
if (dev->ip[i].present)
break;
}
if (i > lima_ip_pp7)
return -ENODEV;
}
spin_lock_init(&ip->data.lock);
size = l2_cache_read(LIMA_L2_CACHE_SIZE);
dev_info(dev->dev, "l2 cache %uK, %u-way, %ubyte cache line, %ubit external bus\n",
1 << (((size >> 16) & 0xff) - 10),
1 << ((size >> 8) & 0xff),
1 << (size & 0xff),
1 << ((size >> 24) & 0xff));
err = lima_l2_cache_flush(ip);
if (err)
return err;
l2_cache_write(LIMA_L2_CACHE_ENABLE,
LIMA_L2_CACHE_ENABLE_ACCESS|LIMA_L2_CACHE_ENABLE_READ_ALLOCATE);
l2_cache_write(LIMA_L2_CACHE_MAX_READS, 0x1c);
return 0;
}
void lima_l2_cache_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_L2_CACHE_H__
#define __LIMA_L2_CACHE_H__
struct lima_ip;
int lima_l2_cache_init(struct lima_ip *ip);
void lima_l2_cache_fini(struct lima_ip *ip);
int lima_l2_cache_flush(struct lima_ip *ip);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/interrupt.h>
#include <linux/iopoll.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_mmu.h"
#include "lima_vm.h"
#include "lima_object.h"
#include "lima_regs.h"
#define mmu_write(reg, data) writel(data, ip->iomem + reg)
#define mmu_read(reg) readl(ip->iomem + reg)
#define lima_mmu_send_command(cmd, addr, val, cond) \
({ \
int __ret; \
\
mmu_write(LIMA_MMU_COMMAND, cmd); \
__ret = readl_poll_timeout(ip->iomem + (addr), val, \
cond, 0, 100); \
if (__ret) \
dev_err(dev->dev, \
"mmu command %x timeout\n", cmd); \
__ret; \
})
static irqreturn_t lima_mmu_irq_handler(int irq, void *data)
{
struct lima_ip *ip = data;
struct lima_device *dev = ip->dev;
u32 status = mmu_read(LIMA_MMU_INT_STATUS);
struct lima_sched_pipe *pipe;
/* for shared irq case */
if (!status)
return IRQ_NONE;
if (status & LIMA_MMU_INT_PAGE_FAULT) {
u32 fault = mmu_read(LIMA_MMU_PAGE_FAULT_ADDR);
dev_err(dev->dev, "mmu page fault at 0x%x from bus id %d of type %s on %s\n",
fault, LIMA_MMU_STATUS_BUS_ID(status),
status & LIMA_MMU_STATUS_PAGE_FAULT_IS_WRITE ? "write" : "read",
lima_ip_name(ip));
}
if (status & LIMA_MMU_INT_READ_BUS_ERROR)
dev_err(dev->dev, "mmu %s irq bus error\n", lima_ip_name(ip));
/* mask all interrupts before resume */
mmu_write(LIMA_MMU_INT_MASK, 0);
mmu_write(LIMA_MMU_INT_CLEAR, status);
pipe = dev->pipe + (ip->id == lima_ip_gpmmu ? lima_pipe_gp : lima_pipe_pp);
lima_sched_pipe_mmu_error(pipe);
return IRQ_HANDLED;
}
int lima_mmu_init(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
if (ip->id == lima_ip_ppmmu_bcast)
return 0;
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
if (mmu_read(LIMA_MMU_DTE_ADDR) != 0xCAFEB000) {
dev_err(dev->dev, "mmu %s dte write test fail\n", lima_ip_name(ip));
return -EIO;
}
mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_HARD_RESET);
err = lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
LIMA_MMU_DTE_ADDR, v, v == 0);
if (err)
return err;
err = devm_request_irq(dev->dev, ip->irq, lima_mmu_irq_handler,
IRQF_SHARED, lima_ip_name(ip), ip);
if (err) {
dev_err(dev->dev, "mmu %s fail to request irq\n", lima_ip_name(ip));
return err;
}
mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
return lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_PAGING_ENABLED);
}
void lima_mmu_fini(struct lima_ip *ip)
{
}
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm)
{
struct lima_device *dev = ip->dev;
u32 v;
lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_STALL,
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_STALL_ACTIVE);
if (vm)
mmu_write(LIMA_MMU_DTE_ADDR, vm->pd.dma);
/* flush the TLB */
mmu_write(LIMA_MMU_COMMAND, LIMA_MMU_COMMAND_ZAP_CACHE);
lima_mmu_send_command(LIMA_MMU_COMMAND_DISABLE_STALL,
LIMA_MMU_STATUS, v,
!(v & LIMA_MMU_STATUS_STALL_ACTIVE));
}
void lima_mmu_page_fault_resume(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
u32 status = mmu_read(LIMA_MMU_STATUS);
u32 v;
if (status & LIMA_MMU_STATUS_PAGE_FAULT_ACTIVE) {
dev_info(dev->dev, "mmu resume\n");
mmu_write(LIMA_MMU_INT_MASK, 0);
mmu_write(LIMA_MMU_DTE_ADDR, 0xCAFEBABE);
lima_mmu_send_command(LIMA_MMU_COMMAND_HARD_RESET,
LIMA_MMU_DTE_ADDR, v, v == 0);
mmu_write(LIMA_MMU_INT_MASK, LIMA_MMU_INT_PAGE_FAULT | LIMA_MMU_INT_READ_BUS_ERROR);
mmu_write(LIMA_MMU_DTE_ADDR, dev->empty_vm->pd.dma);
lima_mmu_send_command(LIMA_MMU_COMMAND_ENABLE_PAGING,
LIMA_MMU_STATUS, v,
v & LIMA_MMU_STATUS_PAGING_ENABLED);
}
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_MMU_H__
#define __LIMA_MMU_H__
struct lima_ip;
struct lima_vm;
int lima_mmu_init(struct lima_ip *ip);
void lima_mmu_fini(struct lima_ip *ip);
void lima_mmu_switch_vm(struct lima_ip *ip, struct lima_vm *vm);
void lima_mmu_page_fault_resume(struct lima_ip *ip);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#include <drm/drm_prime.h>
#include <linux/pagemap.h>
#include <linux/dma-mapping.h>
#include "lima_object.h"
void lima_bo_destroy(struct lima_bo *bo)
{
if (bo->sgt) {
kfree(bo->pages);
drm_prime_gem_destroy(&bo->gem, bo->sgt);
} else {
if (bo->pages_dma_addr) {
int i, npages = bo->gem.size >> PAGE_SHIFT;
for (i = 0; i < npages; i++) {
if (bo->pages_dma_addr[i])
dma_unmap_page(bo->gem.dev->dev,
bo->pages_dma_addr[i],
PAGE_SIZE, DMA_BIDIRECTIONAL);
}
}
if (bo->pages)
drm_gem_put_pages(&bo->gem, bo->pages, true, true);
}
kfree(bo->pages_dma_addr);
drm_gem_object_release(&bo->gem);
kfree(bo);
}
static struct lima_bo *lima_bo_create_struct(struct lima_device *dev, u32 size, u32 flags,
struct reservation_object *resv)
{
struct lima_bo *bo;
int err;
size = PAGE_ALIGN(size);
bo = kzalloc(sizeof(*bo), GFP_KERNEL);
if (!bo)
return ERR_PTR(-ENOMEM);
mutex_init(&bo->lock);
INIT_LIST_HEAD(&bo->va);
bo->gem.resv = resv;
err = drm_gem_object_init(dev->ddev, &bo->gem, size);
if (err) {
kfree(bo);
return ERR_PTR(err);
}
return bo;
}
struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
u32 flags, struct sg_table *sgt,
struct reservation_object *resv)
{
int i, err;
size_t npages;
struct lima_bo *bo, *ret;
bo = lima_bo_create_struct(dev, size, flags, resv);
if (IS_ERR(bo))
return bo;
npages = bo->gem.size >> PAGE_SHIFT;
bo->pages_dma_addr = kcalloc(npages, sizeof(dma_addr_t), GFP_KERNEL);
if (!bo->pages_dma_addr) {
ret = ERR_PTR(-ENOMEM);
goto err_out;
}
if (sgt) {
bo->sgt = sgt;
bo->pages = kcalloc(npages, sizeof(*bo->pages), GFP_KERNEL);
if (!bo->pages) {
ret = ERR_PTR(-ENOMEM);
goto err_out;
}
err = drm_prime_sg_to_page_addr_arrays(
sgt, bo->pages, bo->pages_dma_addr, npages);
if (err) {
ret = ERR_PTR(err);
goto err_out;
}
} else {
mapping_set_gfp_mask(bo->gem.filp->f_mapping, GFP_DMA32);
bo->pages = drm_gem_get_pages(&bo->gem);
if (IS_ERR(bo->pages)) {
ret = ERR_CAST(bo->pages);
bo->pages = NULL;
goto err_out;
}
for (i = 0; i < npages; i++) {
dma_addr_t addr = dma_map_page(dev->dev, bo->pages[i], 0,
PAGE_SIZE, DMA_BIDIRECTIONAL);
if (dma_mapping_error(dev->dev, addr)) {
ret = ERR_PTR(-EFAULT);
goto err_out;
}
bo->pages_dma_addr[i] = addr;
}
}
return bo;
err_out:
lima_bo_destroy(bo);
return ret;
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2018-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_OBJECT_H__
#define __LIMA_OBJECT_H__
#include <drm/drm_gem.h>
#include "lima_device.h"
struct lima_bo {
struct drm_gem_object gem;
struct page **pages;
dma_addr_t *pages_dma_addr;
struct sg_table *sgt;
void *vaddr;
struct mutex lock;
struct list_head va;
};
static inline struct lima_bo *
to_lima_bo(struct drm_gem_object *obj)
{
return container_of(obj, struct lima_bo, gem);
}
struct lima_bo *lima_bo_create(struct lima_device *dev, u32 size,
u32 flags, struct sg_table *sgt,
struct reservation_object *resv);
void lima_bo_destroy(struct lima_bo *bo);
void *lima_bo_vmap(struct lima_bo *bo);
void lima_bo_vunmap(struct lima_bo *bo);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/iopoll.h>
#include <linux/device.h>
#include "lima_device.h"
#include "lima_pmu.h"
#include "lima_regs.h"
#define pmu_write(reg, data) writel(data, ip->iomem + reg)
#define pmu_read(reg) readl(ip->iomem + reg)
static int lima_pmu_wait_cmd(struct lima_ip *ip)
{
struct lima_device *dev = ip->dev;
int err;
u32 v;
err = readl_poll_timeout(ip->iomem + LIMA_PMU_INT_RAWSTAT,
v, v & LIMA_PMU_INT_CMD_MASK,
100, 100000);
if (err) {
dev_err(dev->dev, "timeout wait pmd cmd\n");
return err;
}
pmu_write(LIMA_PMU_INT_CLEAR, LIMA_PMU_INT_CMD_MASK);
return 0;
}
int lima_pmu_init(struct lima_ip *ip)
{
int err;
u32 stat;
pmu_write(LIMA_PMU_INT_MASK, 0);
/* If this value is too low, when in high GPU clk freq,
* GPU will be in unstable state.
*/
pmu_write(LIMA_PMU_SW_DELAY, 0xffff);
/* status reg 1=off 0=on */
stat = pmu_read(LIMA_PMU_STATUS);
/* power up all ip */
if (stat) {
pmu_write(LIMA_PMU_POWER_UP, stat);
err = lima_pmu_wait_cmd(ip);
if (err)
return err;
}
return 0;
}
void lima_pmu_fini(struct lima_ip *ip)
{
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_PMU_H__
#define __LIMA_PMU_H__
struct lima_ip;
int lima_pmu_init(struct lima_ip *ip);
void lima_pmu_fini(struct lima_ip *ip);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_PP_H__
#define __LIMA_PP_H__
struct lima_ip;
struct lima_device;
int lima_pp_init(struct lima_ip *ip);
void lima_pp_fini(struct lima_ip *ip);
int lima_pp_bcast_init(struct lima_ip *ip);
void lima_pp_bcast_fini(struct lima_ip *ip);
int lima_pp_pipe_init(struct lima_device *dev);
void lima_pp_pipe_fini(struct lima_device *dev);
#endif
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/kthread.h>
#include <linux/slab.h>
#include "lima_drv.h"
#include "lima_sched.h"
#include "lima_vm.h"
#include "lima_mmu.h"
#include "lima_l2_cache.h"
#include "lima_object.h"
struct lima_fence {
struct dma_fence base;
struct lima_sched_pipe *pipe;
};
static struct kmem_cache *lima_fence_slab;
static int lima_fence_slab_refcnt;
int lima_sched_slab_init(void)
{
if (!lima_fence_slab) {
lima_fence_slab = kmem_cache_create(
"lima_fence", sizeof(struct lima_fence), 0,
SLAB_HWCACHE_ALIGN, NULL);
if (!lima_fence_slab)
return -ENOMEM;
}
lima_fence_slab_refcnt++;
return 0;
}
void lima_sched_slab_fini(void)
{
if (!--lima_fence_slab_refcnt) {
kmem_cache_destroy(lima_fence_slab);
lima_fence_slab = NULL;
}
}
static inline struct lima_fence *to_lima_fence(struct dma_fence *fence)
{
return container_of(fence, struct lima_fence, base);
}
static const char *lima_fence_get_driver_name(struct dma_fence *fence)
{
return "lima";
}
static const char *lima_fence_get_timeline_name(struct dma_fence *fence)
{
struct lima_fence *f = to_lima_fence(fence);
return f->pipe->base.name;
}
static void lima_fence_release_rcu(struct rcu_head *rcu)
{
struct dma_fence *f = container_of(rcu, struct dma_fence, rcu);
struct lima_fence *fence = to_lima_fence(f);
kmem_cache_free(lima_fence_slab, fence);
}
static void lima_fence_release(struct dma_fence *fence)
{
struct lima_fence *f = to_lima_fence(fence);
call_rcu(&f->base.rcu, lima_fence_release_rcu);
}
static const struct dma_fence_ops lima_fence_ops = {
.get_driver_name = lima_fence_get_driver_name,
.get_timeline_name = lima_fence_get_timeline_name,
.release = lima_fence_release,
};
static struct lima_fence *lima_fence_create(struct lima_sched_pipe *pipe)
{
struct lima_fence *fence;
fence = kmem_cache_zalloc(lima_fence_slab, GFP_KERNEL);
if (!fence)
return NULL;
fence->pipe = pipe;
dma_fence_init(&fence->base, &lima_fence_ops, &pipe->fence_lock,
pipe->fence_context, ++pipe->fence_seqno);
return fence;
}
static inline struct lima_sched_task *to_lima_task(struct drm_sched_job *job)
{
return container_of(job, struct lima_sched_task, base);
}
static inline struct lima_sched_pipe *to_lima_pipe(struct drm_gpu_scheduler *sched)
{
return container_of(sched, struct lima_sched_pipe, base);
}
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
struct lima_vm *vm)
{
int err, i;
task->bos = kmemdup(bos, sizeof(*bos) * num_bos, GFP_KERNEL);
if (!task->bos)
return -ENOMEM;
for (i = 0; i < num_bos; i++)
drm_gem_object_get(&bos[i]->gem);
err = drm_sched_job_init(&task->base, &context->base, vm);
if (err) {
kfree(task->bos);
return err;
}
task->num_bos = num_bos;
task->vm = lima_vm_get(vm);
return 0;
}
void lima_sched_task_fini(struct lima_sched_task *task)
{
int i;
drm_sched_job_cleanup(&task->base);
for (i = 0; i < task->num_dep; i++)
dma_fence_put(task->dep[i]);
kfree(task->dep);
if (task->bos) {
for (i = 0; i < task->num_bos; i++)
drm_gem_object_put_unlocked(&task->bos[i]->gem);
kfree(task->bos);
}
lima_vm_put(task->vm);
}
int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence)
{
int i, new_dep = 4;
/* same context's fence is definitly earlier then this task */
if (fence->context == task->base.s_fence->finished.context) {
dma_fence_put(fence);
return 0;
}
if (task->dep && task->num_dep == task->max_dep)
new_dep = task->max_dep * 2;
if (task->max_dep < new_dep) {
void *dep = krealloc(task->dep, sizeof(*task->dep) * new_dep, GFP_KERNEL);
if (!dep)
return -ENOMEM;
task->max_dep = new_dep;
task->dep = dep;
}
for (i = 0; i < task->num_dep; i++) {
if (task->dep[i]->context == fence->context &&
dma_fence_is_later(fence, task->dep[i])) {
dma_fence_put(task->dep[i]);
task->dep[i] = fence;
return 0;
}
}
task->dep[task->num_dep++] = fence;
return 0;
}
int lima_sched_context_init(struct lima_sched_pipe *pipe,
struct lima_sched_context *context,
atomic_t *guilty)
{
struct drm_sched_rq *rq = pipe->base.sched_rq + DRM_SCHED_PRIORITY_NORMAL;
return drm_sched_entity_init(&context->base, &rq, 1, guilty);
}
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context)
{
drm_sched_entity_fini(&context->base);
}
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
struct lima_sched_task *task)
{
struct dma_fence *fence = dma_fence_get(&task->base.s_fence->finished);
drm_sched_entity_push_job(&task->base, &context->base);
return fence;
}
static struct dma_fence *lima_sched_dependency(struct drm_sched_job *job,
struct drm_sched_entity *entity)
{
struct lima_sched_task *task = to_lima_task(job);
int i;
for (i = 0; i < task->num_dep; i++) {
struct dma_fence *fence = task->dep[i];
if (!task->dep[i])
continue;
task->dep[i] = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
return NULL;
}
static struct dma_fence *lima_sched_run_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_fence *fence;
struct dma_fence *ret;
struct lima_vm *vm = NULL, *last_vm = NULL;
int i;
/* after GPU reset */
if (job->s_fence->finished.error < 0)
return NULL;
fence = lima_fence_create(pipe);
if (!fence)
return NULL;
task->fence = &fence->base;
/* for caller usage of the fence, otherwise irq handler
* may consume the fence before caller use it
*/
ret = dma_fence_get(task->fence);
pipe->current_task = task;
/* this is needed for MMU to work correctly, otherwise GP/PP
* will hang or page fault for unknown reason after running for
* a while.
*
* Need to investigate:
* 1. is it related to TLB
* 2. how much performance will be affected by L2 cache flush
* 3. can we reduce the calling of this function because all
* GP/PP use the same L2 cache on mali400
*
* TODO:
* 1. move this to task fini to save some wait time?
* 2. when GP/PP use different l2 cache, need PP wait GP l2
* cache flush?
*/
for (i = 0; i < pipe->num_l2_cache; i++)
lima_l2_cache_flush(pipe->l2_cache[i]);
if (task->vm != pipe->current_vm) {
vm = lima_vm_get(task->vm);
last_vm = pipe->current_vm;
pipe->current_vm = task->vm;
}
if (pipe->bcast_mmu)
lima_mmu_switch_vm(pipe->bcast_mmu, vm);
else {
for (i = 0; i < pipe->num_mmu; i++)
lima_mmu_switch_vm(pipe->mmu[i], vm);
}
if (last_vm)
lima_vm_put(last_vm);
pipe->error = false;
pipe->task_run(pipe, task);
return task->fence;
}
static void lima_sched_handle_error_task(struct lima_sched_pipe *pipe,
struct lima_sched_task *task)
{
drm_sched_stop(&pipe->base);
if (task)
drm_sched_increase_karma(&task->base);
pipe->task_error(pipe);
if (pipe->bcast_mmu)
lima_mmu_page_fault_resume(pipe->bcast_mmu);
else {
int i;
for (i = 0; i < pipe->num_mmu; i++)
lima_mmu_page_fault_resume(pipe->mmu[i]);
}
if (pipe->current_vm)
lima_vm_put(pipe->current_vm);
pipe->current_vm = NULL;
pipe->current_task = NULL;
drm_sched_resubmit_jobs(&pipe->base);
drm_sched_start(&pipe->base, true);
}
static void lima_sched_timedout_job(struct drm_sched_job *job)
{
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_sched_task *task = to_lima_task(job);
DRM_ERROR("lima job timeout\n");
lima_sched_handle_error_task(pipe, task);
}
static void lima_sched_free_job(struct drm_sched_job *job)
{
struct lima_sched_task *task = to_lima_task(job);
struct lima_sched_pipe *pipe = to_lima_pipe(job->sched);
struct lima_vm *vm = task->vm;
struct lima_bo **bos = task->bos;
int i;
dma_fence_put(task->fence);
for (i = 0; i < task->num_bos; i++)
lima_vm_bo_del(vm, bos[i]);
lima_sched_task_fini(task);
kmem_cache_free(pipe->task_slab, task);
}
const struct drm_sched_backend_ops lima_sched_ops = {
.dependency = lima_sched_dependency,
.run_job = lima_sched_run_job,
.timedout_job = lima_sched_timedout_job,
.free_job = lima_sched_free_job,
};
static void lima_sched_error_work(struct work_struct *work)
{
struct lima_sched_pipe *pipe =
container_of(work, struct lima_sched_pipe, error_work);
struct lima_sched_task *task = pipe->current_task;
lima_sched_handle_error_task(pipe, task);
}
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name)
{
long timeout;
if (lima_sched_timeout_ms <= 0)
timeout = MAX_SCHEDULE_TIMEOUT;
else
timeout = msecs_to_jiffies(lima_sched_timeout_ms);
pipe->fence_context = dma_fence_context_alloc(1);
spin_lock_init(&pipe->fence_lock);
INIT_WORK(&pipe->error_work, lima_sched_error_work);
return drm_sched_init(&pipe->base, &lima_sched_ops, 1, 0, timeout, name);
}
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe)
{
drm_sched_fini(&pipe->base);
}
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe)
{
if (pipe->error)
schedule_work(&pipe->error_work);
else {
struct lima_sched_task *task = pipe->current_task;
pipe->task_fini(pipe);
dma_fence_signal(task->fence);
}
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_SCHED_H__
#define __LIMA_SCHED_H__
#include <drm/gpu_scheduler.h>
struct lima_vm;
struct lima_sched_task {
struct drm_sched_job base;
struct lima_vm *vm;
void *frame;
struct dma_fence **dep;
int num_dep;
int max_dep;
struct lima_bo **bos;
int num_bos;
/* pipe fence */
struct dma_fence *fence;
};
struct lima_sched_context {
struct drm_sched_entity base;
};
#define LIMA_SCHED_PIPE_MAX_MMU 8
#define LIMA_SCHED_PIPE_MAX_L2_CACHE 2
#define LIMA_SCHED_PIPE_MAX_PROCESSOR 8
struct lima_ip;
struct lima_sched_pipe {
struct drm_gpu_scheduler base;
u64 fence_context;
u32 fence_seqno;
spinlock_t fence_lock;
struct lima_sched_task *current_task;
struct lima_vm *current_vm;
struct lima_ip *mmu[LIMA_SCHED_PIPE_MAX_MMU];
int num_mmu;
struct lima_ip *l2_cache[LIMA_SCHED_PIPE_MAX_L2_CACHE];
int num_l2_cache;
struct lima_ip *processor[LIMA_SCHED_PIPE_MAX_PROCESSOR];
int num_processor;
struct lima_ip *bcast_processor;
struct lima_ip *bcast_mmu;
u32 done;
bool error;
atomic_t task;
int frame_size;
struct kmem_cache *task_slab;
int (*task_validate)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
void (*task_run)(struct lima_sched_pipe *pipe, struct lima_sched_task *task);
void (*task_fini)(struct lima_sched_pipe *pipe);
void (*task_error)(struct lima_sched_pipe *pipe);
void (*task_mmu_error)(struct lima_sched_pipe *pipe);
struct work_struct error_work;
};
int lima_sched_task_init(struct lima_sched_task *task,
struct lima_sched_context *context,
struct lima_bo **bos, int num_bos,
struct lima_vm *vm);
void lima_sched_task_fini(struct lima_sched_task *task);
int lima_sched_task_add_dep(struct lima_sched_task *task, struct dma_fence *fence);
int lima_sched_context_init(struct lima_sched_pipe *pipe,
struct lima_sched_context *context,
atomic_t *guilty);
void lima_sched_context_fini(struct lima_sched_pipe *pipe,
struct lima_sched_context *context);
struct dma_fence *lima_sched_context_queue_task(struct lima_sched_context *context,
struct lima_sched_task *task);
int lima_sched_pipe_init(struct lima_sched_pipe *pipe, const char *name);
void lima_sched_pipe_fini(struct lima_sched_pipe *pipe);
void lima_sched_pipe_task_done(struct lima_sched_pipe *pipe);
static inline void lima_sched_pipe_mmu_error(struct lima_sched_pipe *pipe)
{
pipe->error = true;
pipe->task_mmu_error(pipe);
}
int lima_sched_slab_init(void);
void lima_sched_slab_fini(void);
#endif
// SPDX-License-Identifier: GPL-2.0 OR MIT
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#include <linux/slab.h>
#include <linux/dma-mapping.h>
#include "lima_device.h"
#include "lima_vm.h"
#include "lima_object.h"
#include "lima_regs.h"
struct lima_bo_va {
struct list_head list;
unsigned int ref_count;
struct drm_mm_node node;
struct lima_vm *vm;
};
#define LIMA_VM_PD_SHIFT 22
#define LIMA_VM_PT_SHIFT 12
#define LIMA_VM_PB_SHIFT (LIMA_VM_PD_SHIFT + LIMA_VM_NUM_PT_PER_BT_SHIFT)
#define LIMA_VM_BT_SHIFT LIMA_VM_PT_SHIFT
#define LIMA_VM_PT_MASK ((1 << LIMA_VM_PD_SHIFT) - 1)
#define LIMA_VM_BT_MASK ((1 << LIMA_VM_PB_SHIFT) - 1)
#define LIMA_PDE(va) (va >> LIMA_VM_PD_SHIFT)
#define LIMA_PTE(va) ((va & LIMA_VM_PT_MASK) >> LIMA_VM_PT_SHIFT)
#define LIMA_PBE(va) (va >> LIMA_VM_PB_SHIFT)
#define LIMA_BTE(va) ((va & LIMA_VM_BT_MASK) >> LIMA_VM_BT_SHIFT)
static void lima_vm_unmap_page_table(struct lima_vm *vm, u32 start, u32 end)
{
u32 addr;
for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
u32 pbe = LIMA_PBE(addr);
u32 bte = LIMA_BTE(addr);
vm->bts[pbe].cpu[bte] = 0;
}
}
static int lima_vm_map_page_table(struct lima_vm *vm, dma_addr_t *dma,
u32 start, u32 end)
{
u64 addr;
int i = 0;
for (addr = start; addr <= end; addr += LIMA_PAGE_SIZE) {
u32 pbe = LIMA_PBE(addr);
u32 bte = LIMA_BTE(addr);
if (!vm->bts[pbe].cpu) {
dma_addr_t pts;
u32 *pd;
int j;
vm->bts[pbe].cpu = dma_alloc_wc(
vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
&vm->bts[pbe].dma, GFP_KERNEL | __GFP_ZERO);
if (!vm->bts[pbe].cpu) {
if (addr != start)
lima_vm_unmap_page_table(vm, start, addr - 1);
return -ENOMEM;
}
pts = vm->bts[pbe].dma;
pd = vm->pd.cpu + (pbe << LIMA_VM_NUM_PT_PER_BT_SHIFT);
for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
pd[j] = pts | LIMA_VM_FLAG_PRESENT;
pts += LIMA_PAGE_SIZE;
}
}
vm->bts[pbe].cpu[bte] = dma[i++] | LIMA_VM_FLAGS_CACHE;
}
return 0;
}
static struct lima_bo_va *
lima_vm_bo_find(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va, *ret = NULL;
list_for_each_entry(bo_va, &bo->va, list) {
if (bo_va->vm == vm) {
ret = bo_va;
break;
}
}
return ret;
}
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create)
{
struct lima_bo_va *bo_va;
int err;
mutex_lock(&bo->lock);
bo_va = lima_vm_bo_find(vm, bo);
if (bo_va) {
bo_va->ref_count++;
mutex_unlock(&bo->lock);
return 0;
}
/* should not create new bo_va if not asked by caller */
if (!create) {
mutex_unlock(&bo->lock);
return -ENOENT;
}
bo_va = kzalloc(sizeof(*bo_va), GFP_KERNEL);
if (!bo_va) {
err = -ENOMEM;
goto err_out0;
}
bo_va->vm = vm;
bo_va->ref_count = 1;
mutex_lock(&vm->lock);
err = drm_mm_insert_node(&vm->mm, &bo_va->node, bo->gem.size);
if (err)
goto err_out1;
err = lima_vm_map_page_table(vm, bo->pages_dma_addr, bo_va->node.start,
bo_va->node.start + bo_va->node.size - 1);
if (err)
goto err_out2;
mutex_unlock(&vm->lock);
list_add_tail(&bo_va->list, &bo->va);
mutex_unlock(&bo->lock);
return 0;
err_out2:
drm_mm_remove_node(&bo_va->node);
err_out1:
mutex_unlock(&vm->lock);
kfree(bo_va);
err_out0:
mutex_unlock(&bo->lock);
return err;
}
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
mutex_lock(&bo->lock);
bo_va = lima_vm_bo_find(vm, bo);
if (--bo_va->ref_count > 0) {
mutex_unlock(&bo->lock);
return;
}
mutex_lock(&vm->lock);
lima_vm_unmap_page_table(vm, bo_va->node.start,
bo_va->node.start + bo_va->node.size - 1);
drm_mm_remove_node(&bo_va->node);
mutex_unlock(&vm->lock);
list_del(&bo_va->list);
mutex_unlock(&bo->lock);
kfree(bo_va);
}
u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo)
{
struct lima_bo_va *bo_va;
u32 ret;
mutex_lock(&bo->lock);
bo_va = lima_vm_bo_find(vm, bo);
ret = bo_va->node.start;
mutex_unlock(&bo->lock);
return ret;
}
struct lima_vm *lima_vm_create(struct lima_device *dev)
{
struct lima_vm *vm;
vm = kzalloc(sizeof(*vm), GFP_KERNEL);
if (!vm)
return NULL;
vm->dev = dev;
mutex_init(&vm->lock);
kref_init(&vm->refcount);
vm->pd.cpu = dma_alloc_wc(dev->dev, LIMA_PAGE_SIZE, &vm->pd.dma,
GFP_KERNEL | __GFP_ZERO);
if (!vm->pd.cpu)
goto err_out0;
if (dev->dlbu_cpu) {
int err = lima_vm_map_page_table(
vm, &dev->dlbu_dma, LIMA_VA_RESERVE_DLBU,
LIMA_VA_RESERVE_DLBU + LIMA_PAGE_SIZE - 1);
if (err)
goto err_out1;
}
drm_mm_init(&vm->mm, dev->va_start, dev->va_end - dev->va_start);
return vm;
err_out1:
dma_free_wc(dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
err_out0:
kfree(vm);
return NULL;
}
void lima_vm_release(struct kref *kref)
{
struct lima_vm *vm = container_of(kref, struct lima_vm, refcount);
int i;
drm_mm_takedown(&vm->mm);
for (i = 0; i < LIMA_VM_NUM_BT; i++) {
if (vm->bts[i].cpu)
dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE << LIMA_VM_NUM_PT_PER_BT_SHIFT,
vm->bts[i].cpu, vm->bts[i].dma);
}
if (vm->pd.cpu)
dma_free_wc(vm->dev->dev, LIMA_PAGE_SIZE, vm->pd.cpu, vm->pd.dma);
kfree(vm);
}
void lima_vm_print(struct lima_vm *vm)
{
int i, j, k;
u32 *pd, *pt;
if (!vm->pd.cpu)
return;
pd = vm->pd.cpu;
for (i = 0; i < LIMA_VM_NUM_BT; i++) {
if (!vm->bts[i].cpu)
continue;
pt = vm->bts[i].cpu;
for (j = 0; j < LIMA_VM_NUM_PT_PER_BT; j++) {
int idx = (i << LIMA_VM_NUM_PT_PER_BT_SHIFT) + j;
printk(KERN_INFO "lima vm pd %03x:%08x\n", idx, pd[idx]);
for (k = 0; k < LIMA_PAGE_ENT_NUM; k++) {
u32 pte = *pt++;
if (pte)
printk(KERN_INFO " pt %03x:%08x\n", k, pte);
}
}
}
}
/* SPDX-License-Identifier: GPL-2.0 OR MIT */
/* Copyright 2017-2019 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_VM_H__
#define __LIMA_VM_H__
#include <drm/drm_mm.h>
#include <linux/kref.h>
#define LIMA_PAGE_SIZE 4096
#define LIMA_PAGE_MASK (LIMA_PAGE_SIZE - 1)
#define LIMA_PAGE_ENT_NUM (LIMA_PAGE_SIZE / sizeof(u32))
#define LIMA_VM_NUM_PT_PER_BT_SHIFT 3
#define LIMA_VM_NUM_PT_PER_BT (1 << LIMA_VM_NUM_PT_PER_BT_SHIFT)
#define LIMA_VM_NUM_BT (LIMA_PAGE_ENT_NUM >> LIMA_VM_NUM_PT_PER_BT_SHIFT)
#define LIMA_VA_RESERVE_START 0xFFF00000
#define LIMA_VA_RESERVE_DLBU LIMA_VA_RESERVE_START
#define LIMA_VA_RESERVE_END 0x100000000
struct lima_device;
struct lima_vm_page {
u32 *cpu;
dma_addr_t dma;
};
struct lima_vm {
struct mutex lock;
struct kref refcount;
struct drm_mm mm;
struct lima_device *dev;
struct lima_vm_page pd;
struct lima_vm_page bts[LIMA_VM_NUM_BT];
};
int lima_vm_bo_add(struct lima_vm *vm, struct lima_bo *bo, bool create);
void lima_vm_bo_del(struct lima_vm *vm, struct lima_bo *bo);
u32 lima_vm_get_va(struct lima_vm *vm, struct lima_bo *bo);
struct lima_vm *lima_vm_create(struct lima_device *dev);
void lima_vm_release(struct kref *kref);
static inline struct lima_vm *lima_vm_get(struct lima_vm *vm)
{
kref_get(&vm->refcount);
return vm;
}
static inline void lima_vm_put(struct lima_vm *vm)
{
kref_put(&vm->refcount, lima_vm_release);
}
void lima_vm_print(struct lima_vm *vm);
#endif
/* SPDX-License-Identifier: (GPL-2.0 WITH Linux-syscall-note) OR MIT */
/* Copyright 2017-2018 Qiang Yu <yuq825@gmail.com> */
#ifndef __LIMA_DRM_H__
#define __LIMA_DRM_H__
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
enum drm_lima_param_gpu_id {
DRM_LIMA_PARAM_GPU_ID_UNKNOWN,
DRM_LIMA_PARAM_GPU_ID_MALI400,
DRM_LIMA_PARAM_GPU_ID_MALI450,
};
enum drm_lima_param {
DRM_LIMA_PARAM_GPU_ID,
DRM_LIMA_PARAM_NUM_PP,
DRM_LIMA_PARAM_GP_VERSION,
DRM_LIMA_PARAM_PP_VERSION,
};
/**
* get various information of the GPU
*/
struct drm_lima_get_param {
__u32 param; /* in, value in enum drm_lima_param */
__u32 pad; /* pad, must be zero */
__u64 value; /* out, parameter value */
};
/**
* create a buffer for used by GPU
*/
struct drm_lima_gem_create {
__u32 size; /* in, buffer size */
__u32 flags; /* in, currently no flags, must be zero */
__u32 handle; /* out, GEM buffer handle */
__u32 pad; /* pad, must be zero */
};
/**
* get information of a buffer
*/
struct drm_lima_gem_info {
__u32 handle; /* in, GEM buffer handle */
__u32 va; /* out, virtual address mapped into GPU MMU */
__u64 offset; /* out, used to mmap this buffer to CPU */
};
#define LIMA_SUBMIT_BO_READ 0x01
#define LIMA_SUBMIT_BO_WRITE 0x02
/* buffer information used by one task */
struct drm_lima_gem_submit_bo {
__u32 handle; /* in, GEM buffer handle */
__u32 flags; /* in, buffer read/write by GPU */
};
#define LIMA_GP_FRAME_REG_NUM 6
/* frame used to setup GP for each task */
struct drm_lima_gp_frame {
__u32 frame[LIMA_GP_FRAME_REG_NUM];
};
#define LIMA_PP_FRAME_REG_NUM 23
#define LIMA_PP_WB_REG_NUM 12
/* frame used to setup mali400 GPU PP for each task */
struct drm_lima_m400_pp_frame {
__u32 frame[LIMA_PP_FRAME_REG_NUM];
__u32 num_pp;
__u32 wb[3 * LIMA_PP_WB_REG_NUM];
__u32 plbu_array_address[4];
__u32 fragment_stack_address[4];
};
/* frame used to setup mali450 GPU PP for each task */
struct drm_lima_m450_pp_frame {
__u32 frame[LIMA_PP_FRAME_REG_NUM];
__u32 num_pp;
__u32 wb[3 * LIMA_PP_WB_REG_NUM];
__u32 use_dlbu;
__u32 _pad;
union {
__u32 plbu_array_address[8];
__u32 dlbu_regs[4];
};
__u32 fragment_stack_address[8];
};
#define LIMA_PIPE_GP 0x00
#define LIMA_PIPE_PP 0x01
#define LIMA_SUBMIT_FLAG_EXPLICIT_FENCE (1 << 0)
/**
* submit a task to GPU
*
* User can always merge multi sync_file and drm_syncobj
* into one drm_syncobj as in_sync[0], but we reserve
* in_sync[1] for another task's out_sync to avoid the
* export/import/merge pass when explicit sync.
*/
struct drm_lima_gem_submit {
__u32 ctx; /* in, context handle task is submitted to */
__u32 pipe; /* in, which pipe to use, GP/PP */
__u32 nr_bos; /* in, array length of bos field */
__u32 frame_size; /* in, size of frame field */
__u64 bos; /* in, array of drm_lima_gem_submit_bo */
__u64 frame; /* in, GP/PP frame */
__u32 flags; /* in, submit flags */
__u32 out_sync; /* in, drm_syncobj handle used to wait task finish after submission */
__u32 in_sync[2]; /* in, drm_syncobj handle used to wait before start this task */
};
#define LIMA_GEM_WAIT_READ 0x01
#define LIMA_GEM_WAIT_WRITE 0x02
/**
* wait pending GPU task finish of a buffer
*/
struct drm_lima_gem_wait {
__u32 handle; /* in, GEM buffer handle */
__u32 op; /* in, CPU want to read/write this buffer */
__s64 timeout_ns; /* in, wait timeout in absulute time */
};
/**
* create a context
*/
struct drm_lima_ctx_create {
__u32 id; /* out, context handle */
__u32 _pad; /* pad, must be zero */
};
/**
* free a context
*/
struct drm_lima_ctx_free {
__u32 id; /* in, context handle */
__u32 _pad; /* pad, must be zero */
};
#define DRM_LIMA_GET_PARAM 0x00
#define DRM_LIMA_GEM_CREATE 0x01
#define DRM_LIMA_GEM_INFO 0x02
#define DRM_LIMA_GEM_SUBMIT 0x03
#define DRM_LIMA_GEM_WAIT 0x04
#define DRM_LIMA_CTX_CREATE 0x05
#define DRM_LIMA_CTX_FREE 0x06
#define DRM_IOCTL_LIMA_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GET_PARAM, struct drm_lima_get_param)
#define DRM_IOCTL_LIMA_GEM_CREATE DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_CREATE, struct drm_lima_gem_create)
#define DRM_IOCTL_LIMA_GEM_INFO DRM_IOWR(DRM_COMMAND_BASE + DRM_LIMA_GEM_INFO, struct drm_lima_gem_info)
#define DRM_IOCTL_LIMA_GEM_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_SUBMIT, struct drm_lima_gem_submit)
#define DRM_IOCTL_LIMA_GEM_WAIT DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_GEM_WAIT, struct drm_lima_gem_wait)
#define DRM_IOCTL_LIMA_CTX_CREATE DRM_IOR(DRM_COMMAND_BASE + DRM_LIMA_CTX_CREATE, struct drm_lima_ctx_create)
#define DRM_IOCTL_LIMA_CTX_FREE DRM_IOW(DRM_COMMAND_BASE + DRM_LIMA_CTX_FREE, struct drm_lima_ctx_free)
#if defined(__cplusplus)
}
#endif
#endif /* __LIMA_DRM_H__ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment