Commit f3924ae7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

Changes this time mostly come down to:
- hook up the DRM GPU scheduler
- prep work for GC7000L support, to be completed in the next cycle

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux: (22 commits)
  drm/etnaviv: bump HW job limit to 4
  drm/etnaviv: etnaviv_sched: Staticize functions when possible
  drm/etnaviv: add PTA handling to MMUv2
  drm/etnaviv: add function to load the initial PTA state
  drm/etnaviv: handle security states
  drm/etnaviv: add security handling mode enum
  drm/etnaviv: add hardware database
  drm/etnaviv: add more minor features fields
  drm/etnaviv: update hardware headers from rnndb
  drm/etnaviv: add support for slave interface clock
  drm/etnaviv: split out and optimize MMU fault dumping
  drm/etnaviv: remove the need for a gpu-subsystem DT node
  dt-bindings: etnaviv: add slave interface clock
  drm/etnaviv: use correct format specifier for size_t
  drm/etnaviv: replace hangcheck with scheduler timeout
  drm/etnaviv: lock BOs after all other submit work is done
  drm/etnaviv: move dependency handling to scheduler
  drm/etnaviv: hook up DRM GPU scheduler
  drm/etnaviv: track fences by IDR instead of seqno
  drm/etnaviv: add missing major features field to debugfs
  ...
parents 0c5286a8 4ed75c3e
Etnaviv DRM master device
=========================
The Etnaviv DRM master device is a virtual device needed to list all
Vivante GPU cores that comprise the GPU subsystem.
Required properties:
- compatible: Should be one of
"fsl,imx-gpu-subsystem"
"marvell,dove-gpu-subsystem"
- cores: Should contain a list of phandles pointing to Vivante GPU devices
example:
gpu-subsystem {
compatible = "fsl,imx-gpu-subsystem";
cores = <&gpu_2d>, <&gpu_3d>;
};
Vivante GPU core devices
========================
......@@ -32,7 +12,9 @@ Required properties:
- clocks: should contain one clock for entry in clock-names
see Documentation/devicetree/bindings/clock/clock-bindings.txt
- clock-names:
- "bus": AXI/register clock
- "bus": AXI/master interface clock
- "reg": AHB/slave interface clock
(only required if GPU can gate slave interface independently)
- "core": GPU core clock
- "shader": Shader clock (only required if GPU has feature PIPE_3D)
......
......@@ -11,6 +11,7 @@ config DRM_ETNAVIV
select WANT_DEV_COREDUMP
select CMA if HAVE_DMA_CONTIGUOUS
select DMA_CMA if HAVE_DMA_CONTIGUOUS
select DRM_SCHED
help
DRM driver for Vivante GPUs.
......
......@@ -9,9 +9,11 @@ etnaviv-y := \
etnaviv_gem_submit.o \
etnaviv_gem.o \
etnaviv_gpu.o \
etnaviv_hwdb.o \
etnaviv_iommu_v2.o \
etnaviv_iommu.o \
etnaviv_mmu.o \
etnaviv_perfmon.o
etnaviv_perfmon.o \
etnaviv_sched.o
obj-$(CONFIG_DRM_ETNAVIV) += etnaviv.o
This diff is collapsed.
......@@ -215,6 +215,24 @@ u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe
return buffer->user_size / 8;
}
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
lockdep_assert_held(&gpu->lock);
buffer->user_size = 0;
CMD_LOAD_STATE(buffer, VIVS_MMUv2_PTA_CONFIG,
VIVS_MMUv2_PTA_CONFIG_INDEX(0));
CMD_END(buffer);
buffer->user_size = ALIGN(buffer->user_size, 8);
return buffer->user_size / 8;
}
void etnaviv_buffer_end(struct etnaviv_gpu *gpu)
{
struct etnaviv_cmdbuf *buffer = &gpu->buffer;
......
......@@ -101,12 +101,25 @@ static void load_gpu(struct drm_device *dev)
static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
{
struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx;
int i;
ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
if (!ctx)
return -ENOMEM;
for (i = 0; i < ETNA_MAX_PIPES; i++) {
struct etnaviv_gpu *gpu = priv->gpu[i];
if (gpu) {
drm_sched_entity_init(&gpu->sched,
&ctx->sched_entity[i],
&gpu->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL],
32, NULL);
}
}
file->driver_priv = ctx;
return 0;
......@@ -126,6 +139,9 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
if (gpu->lastctx == ctx)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
drm_sched_entity_fini(&gpu->sched,
&ctx->sched_entity[i]);
}
}
......@@ -637,25 +653,21 @@ static int compare_str(struct device *dev, void *data)
static int etnaviv_pdev_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct device_node *node = dev->of_node;
struct component_match *match = NULL;
dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
if (node) {
if (!dev->platform_data) {
struct device_node *core_node;
int i;
for (i = 0; ; i++) {
core_node = of_parse_phandle(node, "cores", i);
if (!core_node)
break;
for_each_compatible_node(core_node, NULL, "vivante,gc") {
if (!of_device_is_available(core_node))
continue;
drm_of_component_match_add(&pdev->dev, &match,
compare_of, core_node);
of_node_put(core_node);
}
} else if (dev->platform_data) {
} else {
char **names = dev->platform_data;
unsigned i;
......@@ -673,25 +685,18 @@ static int etnaviv_pdev_remove(struct platform_device *pdev)
return 0;
}
static const struct of_device_id dt_match[] = {
{ .compatible = "fsl,imx-gpu-subsystem" },
{ .compatible = "marvell,dove-gpu-subsystem" },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
static struct platform_driver etnaviv_platform_driver = {
.probe = etnaviv_pdev_probe,
.remove = etnaviv_pdev_remove,
.driver = {
.name = "etnaviv",
.of_match_table = dt_match,
},
};
static int __init etnaviv_init(void)
{
int ret;
struct device_node *np;
etnaviv_validate_init();
......@@ -703,6 +708,19 @@ static int __init etnaviv_init(void)
if (ret != 0)
platform_driver_unregister(&etnaviv_gpu_driver);
/*
* If the DT contains at least one available GPU device, instantiate
* the DRM platform device.
*/
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
platform_device_register_simple("etnaviv", -1, NULL, 0);
of_node_put(np);
break;
}
return ret;
}
module_init(etnaviv_init);
......
......@@ -34,6 +34,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_gem.h>
#include <drm/etnaviv_drm.h>
#include <drm/gpu_scheduler.h>
struct etnaviv_cmdbuf;
struct etnaviv_gpu;
......@@ -42,11 +43,11 @@ struct etnaviv_gem_object;
struct etnaviv_gem_submit;
struct etnaviv_file_private {
/* currently we don't do anything useful with this.. but when
* per-context address spaces are supported we'd keep track of
/*
* When per-context address spaces are supported we'd keep track of
* the context's page-tables here.
*/
int dummy;
struct drm_sched_entity sched_entity[ETNA_MAX_PIPES];
};
struct etnaviv_drm_private {
......@@ -85,6 +86,7 @@ int etnaviv_gem_new_userptr(struct drm_device *dev, struct drm_file *file,
uintptr_t ptr, u32 size, u32 flags, u32 *handle);
u16 etnaviv_buffer_init(struct etnaviv_gpu *gpu);
u16 etnaviv_buffer_config_mmuv2(struct etnaviv_gpu *gpu, u32 mtlb_addr, u32 safe_addr);
u16 etnaviv_buffer_config_pta(struct etnaviv_gpu *gpu);
void etnaviv_buffer_end(struct etnaviv_gpu *gpu);
void etnaviv_sync_point_queue(struct etnaviv_gpu *gpu, unsigned int event);
void etnaviv_buffer_queue(struct etnaviv_gpu *gpu, u32 exec_state,
......
......@@ -20,9 +20,13 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_mmu.h"
#include "etnaviv_sched.h"
#include "state.xml.h"
#include "state_hi.xml.h"
static bool etnaviv_dump_core = true;
module_param_named(dump_core, etnaviv_dump_core, bool, 0600);
struct core_dump_iterator {
void *start;
struct etnaviv_dump_object_header *hdr;
......@@ -121,10 +125,16 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
struct etnaviv_vram_mapping *vram;
struct etnaviv_gem_object *obj;
struct etnaviv_gem_submit *submit;
struct drm_sched_job *s_job;
unsigned int n_obj, n_bomap_pages;
size_t file_size, mmu_size;
__le64 *bomap, *bomap_start;
/* Only catch the first event, or when manually re-armed */
if (!etnaviv_dump_core)
return;
etnaviv_dump_core = false;
mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */
......@@ -135,10 +145,13 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
mmu_size + gpu->buffer.size;
/* Add in the active command buffers */
list_for_each_entry(submit, &gpu->active_submit_list, node) {
spin_lock(&gpu->sched.job_list_lock);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
file_size += submit->cmdbuf.size;
n_obj++;
}
spin_unlock(&gpu->sched.job_list_lock);
/* Add in the active buffer objects */
list_for_each_entry(vram, &gpu->mmu->mappings, mmu_node) {
......@@ -180,10 +193,14 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
gpu->buffer.size,
etnaviv_cmdbuf_get_va(&gpu->buffer));
list_for_each_entry(submit, &gpu->active_submit_list, node)
spin_lock(&gpu->sched.job_list_lock);
list_for_each_entry(s_job, &gpu->sched.ring_mirror_list, node) {
submit = to_etnaviv_submit(s_job);
etnaviv_core_dump_mem(&iter, ETDUMP_BUF_CMD,
submit->cmdbuf.vaddr, submit->cmdbuf.size,
etnaviv_cmdbuf_get_va(&submit->cmdbuf));
}
spin_unlock(&gpu->sched.job_list_lock);
/* Reserve space for the bomap */
if (n_bomap_pages) {
......
......@@ -94,6 +94,9 @@ struct etnaviv_gem_submit_bo {
u32 flags;
struct etnaviv_gem_object *obj;
struct etnaviv_vram_mapping *mapping;
struct dma_fence *excl;
unsigned int nr_shared;
struct dma_fence **shared;
};
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
......@@ -101,9 +104,11 @@ struct etnaviv_gem_submit_bo {
* make it easier to unwind when things go wrong, etc).
*/
struct etnaviv_gem_submit {
struct drm_sched_job sched_job;
struct kref refcount;
struct etnaviv_gpu *gpu;
struct dma_fence *out_fence, *in_fence;
int out_fence_id;
struct list_head node; /* GPU active submit list */
struct etnaviv_cmdbuf cmdbuf;
bool runtime_resumed;
......
......@@ -22,6 +22,7 @@
#include "etnaviv_gpu.h"
#include "etnaviv_gem.h"
#include "etnaviv_perfmon.h"
#include "etnaviv_sched.h"
/*
* Cmdstream submission:
......@@ -169,29 +170,33 @@ static int submit_lock_objects(struct etnaviv_gem_submit *submit,
return ret;
}
static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
static int submit_fence_sync(struct etnaviv_gem_submit *submit)
{
unsigned int context = submit->gpu->fence_context;
int i, ret = 0;
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
bool explicit = !!(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
struct reservation_object *robj = bo->obj->resv;
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
explicit);
if (ret)
break;
}
if (!(bo->flags & ETNA_SUBMIT_BO_WRITE)) {
ret = reservation_object_reserve_shared(robj);
if (ret)
return ret;
}
if (submit->flags & ETNA_SUBMIT_NO_IMPLICIT)
continue;
if (bo->flags & ETNA_SUBMIT_BO_WRITE) {
ret = reservation_object_get_fences_rcu(robj, &bo->excl,
&bo->nr_shared,
&bo->shared);
if (ret)
return ret;
} else {
bo->excl = reservation_object_get_excl_rcu(robj);
}
if (submit->flags & ETNA_SUBMIT_FENCE_FD_IN) {
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (!dma_fence_match_context(submit->in_fence, context))
ret = dma_fence_wait(submit->in_fence, true);
}
return ret;
......@@ -381,8 +386,13 @@ static void submit_cleanup(struct kref *kref)
if (submit->in_fence)
dma_fence_put(submit->in_fence);
if (submit->out_fence)
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
mutex_lock(&submit->gpu->fence_idr_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
mutex_unlock(&submit->gpu->fence_idr_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
kfree(submit);
}
......@@ -395,6 +405,7 @@ void etnaviv_submit_put(struct etnaviv_gem_submit *submit)
int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct etnaviv_file_private *ctx = file->driver_priv;
struct etnaviv_drm_private *priv = dev->dev_private;
struct drm_etnaviv_gem_submit *args = data;
struct drm_etnaviv_gem_submit_reloc *relocs;
......@@ -503,10 +514,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret)
goto err_submit_objects;
ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_objects;
if (!etnaviv_cmd_validate_one(gpu, stream, args->stream_size / 4,
relocs, args->nr_relocs)) {
ret = -EINVAL;
......@@ -521,10 +528,6 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
}
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
ret = submit_pin_objects(submit);
if (ret)
goto err_submit_objects;
......@@ -539,9 +542,16 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects;
memcpy(submit->cmdbuf.vaddr, stream, args->stream_size);
submit->cmdbuf.user_size = ALIGN(args->stream_size, 8);
ret = etnaviv_gpu_submit(gpu, submit);
ret = submit_lock_objects(submit, &ticket);
if (ret)
goto err_submit_objects;
ret = submit_fence_sync(submit);
if (ret)
goto err_submit_objects;
ret = etnaviv_sched_push_job(&ctx->sched_entity[args->pipe], submit);
if (ret)
goto err_submit_objects;
......@@ -563,7 +573,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
}
args->fence_fd = out_fence_fd;
args->fence = submit->out_fence->seqno;
args->fence = submit->out_fence_id;
err_submit_objects:
etnaviv_submit_put(submit);
......
This diff is collapsed.
......@@ -38,21 +38,17 @@ struct etnaviv_chip_identity {
/* Supported minor feature fields. */
u32 minor_features0;
/* Supported minor feature 1 fields. */
u32 minor_features1;
/* Supported minor feature 2 fields. */
u32 minor_features2;
/* Supported minor feature 3 fields. */
u32 minor_features3;
/* Supported minor feature 4 fields. */
u32 minor_features4;
/* Supported minor feature 5 fields. */
u32 minor_features5;
u32 minor_features6;
u32 minor_features7;
u32 minor_features8;
u32 minor_features9;
u32 minor_features10;
u32 minor_features11;
/* Number of streams supported. */
u32 stream_count;
......@@ -88,6 +84,12 @@ struct etnaviv_chip_identity {
u8 varyings_count;
};
enum etnaviv_sec_mode {
ETNA_SEC_NONE = 0,
ETNA_SEC_KERNEL,
ETNA_SEC_TZ
};
struct etnaviv_event {
struct dma_fence *fence;
struct etnaviv_gem_submit *submit;
......@@ -106,8 +108,10 @@ struct etnaviv_gpu {
struct device *dev;
struct mutex lock;
struct etnaviv_chip_identity identity;
enum etnaviv_sec_mode sec_mode;
struct etnaviv_file_private *lastctx;
struct workqueue_struct *wq;
struct drm_gpu_scheduler sched;
/* 'ring'-buffer: */
struct etnaviv_cmdbuf buffer;
......@@ -122,23 +126,18 @@ struct etnaviv_gpu {
struct completion event_free;
spinlock_t event_spinlock;
/* list of currently in-flight command buffers */
struct list_head active_submit_list;
u32 idle_mask;
/* Fencing support */
struct mutex fence_idr_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;
u32 completed_fence;
u32 retired_fence;
wait_queue_head_t fence_event;
u64 fence_context;
spinlock_t fence_spinlock;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
/* worker for handling 'sync' points: */
struct work_struct sync_point_work;
int sync_point_event;
......@@ -151,16 +150,10 @@ struct etnaviv_gpu {
/* Power Control: */
struct clk *clk_bus;
struct clk *clk_reg;
struct clk *clk_core;
struct clk *clk_shader;
/* Hang Detction: */
#define DRM_ETNAVIV_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_ETNAVIV_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_ETNAVIV_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
u32 hangcheck_fence;
u32 hangcheck_dma_addr;
struct work_struct recover_work;
unsigned int freq_scale;
unsigned long base_rate_core;
unsigned long base_rate_shader;
......@@ -181,29 +174,22 @@ static inline bool fence_completed(struct etnaviv_gpu *gpu, u32 fence)
return fence_after_eq(gpu->completed_fence, fence);
}
static inline bool fence_retired(struct etnaviv_gpu *gpu, u32 fence)
{
return fence_after_eq(gpu->retired_fence, fence);
}
int etnaviv_gpu_get_param(struct etnaviv_gpu *gpu, u32 param, u64 *value);
int etnaviv_gpu_init(struct etnaviv_gpu *gpu);
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
unsigned int context, bool exclusive, bool implicit);
void etnaviv_gpu_recover_hang(struct etnaviv_gpu *gpu);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
u32 fence, struct timespec *timeout);
int etnaviv_gpu_wait_obj_inactive(struct etnaviv_gpu *gpu,
struct etnaviv_gem_object *etnaviv_obj, struct timespec *timeout);
int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
struct etnaviv_gem_submit *submit);
struct dma_fence *etnaviv_gpu_submit(struct etnaviv_gem_submit *submit);
int etnaviv_gpu_pm_get_sync(struct etnaviv_gpu *gpu);
void etnaviv_gpu_pm_put(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_idle(struct etnaviv_gpu *gpu, unsigned int timeout_ms);
......
/*
* Copyright (C) 2018 Etnaviv Project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include "etnaviv_gpu.h"
static const struct etnaviv_chip_identity etnaviv_chip_identities[] = {
{
.model = 0x7000,
.revision = 0x6214,
.stream_count = 16,
.register_max = 64,
.thread_count = 1024,
.shader_core_count = 4,
.vertex_cache_size = 16,
.vertex_output_buffer_size = 1024,
.pixel_pipes = 2,
.instruction_count = 512,
.num_constants = 320,
.buffer_size = 0,
.varyings_count = 16,
.features = 0xe0287cad,
.minor_features0 = 0xc1799eff,
.minor_features1 = 0xfefbfad9,
.minor_features2 = 0xeb9d4fbf,
.minor_features3 = 0xedfffced,
.minor_features4 = 0xdb0dafc7,
.minor_features5 = 0xbb5ac333,
.minor_features6 = 0xfc8ee200,
.minor_features7 = 0x03fbfa6f,
.minor_features8 = 0x00ef0ef0,
.minor_features9 = 0x0edbf03c,
.minor_features10 = 0x90044250,
.minor_features11 = 0x00000024,
},
};
bool etnaviv_fill_identity_from_hwdb(struct etnaviv_gpu *gpu)
{
struct etnaviv_chip_identity *ident = &gpu->identity;
int i;
for (i = 0; i < ARRAY_SIZE(etnaviv_chip_identities); i++) {
if (etnaviv_chip_identities[i].model == ident->model &&
etnaviv_chip_identities[i].revision == ident->revision) {
memcpy(ident, &etnaviv_chip_identities[i],
sizeof(*ident));
return true;
}
}
return false;
}
......@@ -158,7 +158,7 @@ void etnaviv_iommuv1_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MC_MMU_RA_PAGE_TABLE, pgtable);
}
const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
static const struct etnaviv_iommu_domain_ops etnaviv_iommuv1_ops = {
.free = etnaviv_iommuv1_domain_free,
.map = etnaviv_iommuv1_map,
.unmap = etnaviv_iommuv1_unmap,
......
......@@ -40,6 +40,9 @@
struct etnaviv_iommuv2_domain {
struct etnaviv_iommu_domain base;
/* P(age) T(able) A(rray) */
u64 *pta_cpu;
dma_addr_t pta_dma;
/* M(aster) TLB aka first level pagetable */
u32 *mtlb_cpu;
dma_addr_t mtlb_dma;
......@@ -114,6 +117,15 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
for (i = 0; i < SZ_4K / 4; i++)
*p++ = 0xdead55aa;
etnaviv_domain->pta_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->pta_dma,
GFP_KERNEL);
if (!etnaviv_domain->pta_cpu) {
ret = -ENOMEM;
goto fail_mem;
}
etnaviv_domain->mtlb_cpu = dma_alloc_coherent(etnaviv_domain->base.dev,
SZ_4K,
&etnaviv_domain->mtlb_dma,
......@@ -150,6 +162,11 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
if (etnaviv_domain->pta_cpu)
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->pta_cpu,
etnaviv_domain->pta_dma);
if (etnaviv_domain->mtlb_cpu)
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
......@@ -175,6 +192,10 @@ static void etnaviv_iommuv2_domain_free(struct etnaviv_iommu_domain *domain)
etnaviv_domain->base.bad_page_cpu,
etnaviv_domain->base.bad_page_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->pta_cpu,
etnaviv_domain->pta_dma);
dma_free_coherent(etnaviv_domain->base.dev, SZ_4K,
etnaviv_domain->mtlb_cpu,
etnaviv_domain->mtlb_dma);
......@@ -216,7 +237,7 @@ static void etnaviv_iommuv2_dump(struct etnaviv_iommu_domain *domain, void *buf)
memcpy(buf, etnaviv_domain->stlb_cpu[i], SZ_4K);
}
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
static void etnaviv_iommuv2_restore_nonsec(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
......@@ -236,7 +257,60 @@ void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
gpu_write(gpu, VIVS_MMUv2_CONTROL, VIVS_MMUv2_CONTROL_ENABLE);
}
const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
static void etnaviv_iommuv2_restore_sec(struct etnaviv_gpu *gpu)
{
struct etnaviv_iommuv2_domain *etnaviv_domain =
to_etnaviv_domain(gpu->mmu->domain);
u16 prefetch;
/* If the MMU is already enabled the state is still there. */
if (gpu_read(gpu, VIVS_MMUv2_SEC_CONTROL) & VIVS_MMUv2_SEC_CONTROL_ENABLE)
return;
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_LOW,
lower_32_bits(etnaviv_domain->pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_ADDRESS_HIGH,
upper_32_bits(etnaviv_domain->pta_dma));
gpu_write(gpu, VIVS_MMUv2_PTA_CONTROL, VIVS_MMUv2_PTA_CONTROL_ENABLE);
gpu_write(gpu, VIVS_MMUv2_NONSEC_SAFE_ADDR_LOW,
lower_32_bits(etnaviv_domain->base.bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SEC_SAFE_ADDR_LOW,
lower_32_bits(etnaviv_domain->base.bad_page_dma));
gpu_write(gpu, VIVS_MMUv2_SAFE_ADDRESS_CONFIG,
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_NON_SEC_SAFE_ADDR_HIGH(
upper_32_bits(etnaviv_domain->base.bad_page_dma)) |
VIVS_MMUv2_SAFE_ADDRESS_CONFIG_SEC_SAFE_ADDR_HIGH(
upper_32_bits(etnaviv_domain->base.bad_page_dma)));
etnaviv_domain->pta_cpu[0] = etnaviv_domain->mtlb_dma |
VIVS_MMUv2_CONFIGURATION_MODE_MODE4_K;
/* trigger a PTA load through the FE */
prefetch = etnaviv_buffer_config_pta(gpu);
etnaviv_gpu_start_fe(gpu, (u32)etnaviv_cmdbuf_get_pa(&gpu->buffer),
prefetch);
etnaviv_gpu_wait_idle(gpu, 100);
gpu_write(gpu, VIVS_MMUv2_SEC_CONTROL, VIVS_MMUv2_SEC_CONTROL_ENABLE);
}
void etnaviv_iommuv2_restore(struct etnaviv_gpu *gpu)
{
switch (gpu->sec_mode) {
case ETNA_SEC_NONE:
etnaviv_iommuv2_restore_nonsec(gpu);
break;
case ETNA_SEC_KERNEL:
etnaviv_iommuv2_restore_sec(gpu);
break;
default:
WARN(1, "unhandled GPU security mode\n");
break;
}
}
static const struct etnaviv_iommu_domain_ops etnaviv_iommuv2_ops = {
.free = etnaviv_iommuv2_domain_free,
.map = etnaviv_iommuv2_map,
.unmap = etnaviv_iommuv2_unmap,
......
......@@ -29,7 +29,7 @@ static void etnaviv_domain_unmap(struct etnaviv_iommu_domain *domain,
size_t pgsize = SZ_4K;
if (!IS_ALIGNED(iova | size, pgsize)) {
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%x\n",
pr_err("unaligned: iova 0x%lx size 0x%zx min_pagesz 0x%zx\n",
iova, size, pgsize);
return;
}
......@@ -54,7 +54,7 @@ static int etnaviv_domain_map(struct etnaviv_iommu_domain *domain,
int ret = 0;
if (!IS_ALIGNED(iova | paddr | size, pgsize)) {
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x\n",
pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%zx\n",
iova, &paddr, size, pgsize);
return -EINVAL;
}
......
/*
* Copyright (C) 2017 Etnaviv Project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#include <linux/kthread.h>
#include "etnaviv_drv.h"
#include "etnaviv_dump.h"
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
static int etnaviv_hw_jobs_limit = 4;
module_param_named(hw_job_limit, etnaviv_hw_jobs_limit, int , 0444);
static struct dma_fence *
etnaviv_sched_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *entity)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct dma_fence *fence;
int i;
if (unlikely(submit->in_fence)) {
fence = submit->in_fence;
submit->in_fence = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_submit_bo *bo = &submit->bos[i];
int j;
if (bo->excl) {
fence = bo->excl;
bo->excl = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
for (j = 0; j < bo->nr_shared; j++) {
if (!bo->shared[j])
continue;
fence = bo->shared[j];
bo->shared[j] = NULL;
if (!dma_fence_is_signaled(fence))
return fence;
dma_fence_put(fence);
}
kfree(bo->shared);
bo->nr_shared = 0;
bo->shared = NULL;
}
return NULL;
}
static struct dma_fence *etnaviv_sched_run_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct dma_fence *fence = NULL;
if (likely(!sched_job->s_fence->finished.error))
fence = etnaviv_gpu_submit(submit);
else
dev_dbg(submit->gpu->dev, "skipping bad job\n");
return fence;
}
static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
/* block scheduler */
kthread_park(gpu->sched.thread);
drm_sched_hw_job_reset(&gpu->sched, sched_job);
/* get the GPU back into the init state */
etnaviv_core_dump(gpu);
etnaviv_gpu_recover_hang(gpu);
/* restart scheduler after GPU is usable again */
drm_sched_job_recovery(&gpu->sched);
kthread_unpark(gpu->sched.thread);
}
static void etnaviv_sched_free_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
etnaviv_submit_put(submit);
}
static const struct drm_sched_backend_ops etnaviv_sched_ops = {
.dependency = etnaviv_sched_dependency,
.run_job = etnaviv_sched_run_job,
.timedout_job = etnaviv_sched_timedout_job,
.free_job = etnaviv_sched_free_job,
};
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
int ret;
ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
sched_entity, submit->cmdbuf.ctx);
if (ret)
return ret;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
mutex_unlock(&submit->gpu->fence_idr_lock);
if (submit->out_fence_id < 0)
return -ENOMEM;
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
return 0;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
{
int ret;
ret = drm_sched_init(&gpu->sched, &etnaviv_sched_ops,
etnaviv_hw_jobs_limit, etnaviv_job_hang_limit,
msecs_to_jiffies(500), dev_name(gpu->dev));
if (ret)
return ret;
return 0;
}
void etnaviv_sched_fini(struct etnaviv_gpu *gpu)
{
drm_sched_fini(&gpu->sched);
}
/*
* Copyright (C) 2017 Etnaviv Project
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
* the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful, but WITHOUT
* ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
* more details.
*
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
#ifndef __ETNAVIV_SCHED_H__
#define __ETNAVIV_SCHED_H__
#include <drm/gpu_scheduler.h>
struct etnaviv_gpu;
static inline
struct etnaviv_gem_submit *to_etnaviv_submit(struct drm_sched_job *sched_job)
{
return container_of(sched_job, struct etnaviv_gem_submit, sched_job);
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu);
void etnaviv_sched_fini(struct etnaviv_gpu *gpu);
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit);
#endif /* __ETNAVIV_SCHED_H__ */
This diff is collapsed.
......@@ -7,4 +7,9 @@
#define VIVS_TS_FLUSH_CACHE 0x00001650
#define VIVS_TS_FLUSH_CACHE_FLUSH 0x00000001
#define VIVS_NTE_DESCRIPTOR_FLUSH 0x00014c44
#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK 0xf0000000
#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT 28
#define VIVS_NTE_DESCRIPTOR_FLUSH_UNK28(x) (((x) << VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__SHIFT) & VIVS_NTE_DESCRIPTOR_FLUSH_UNK28__MASK)
#endif /* STATE_3D_XML */
#ifndef STATE_BLT_XML
#define STATE_BLT_XML
/* Autogenerated file, DO NOT EDIT manually!
This file was generated by the rules-ng-ng headergen tool in this git repository:
http://0x04.net/cgit/index.cgi/rules-ng-ng
git clone git://0x04.net/rules-ng-ng
The rules-ng-ng source files this header was generated from are:
- state.xml ( 26087 bytes, from 2017-12-18 16:51:59)
- common.xml ( 35468 bytes, from 2018-01-22 13:48:54)
- common_3d.xml ( 14615 bytes, from 2017-12-18 16:51:59)
- state_hi.xml ( 30232 bytes, from 2018-02-15 15:48:01)
- copyright.xml ( 1597 bytes, from 2016-12-08 16:37:56)
- state_2d.xml ( 51552 bytes, from 2016-12-08 16:37:56)
- state_3d.xml ( 79992 bytes, from 2017-12-18 16:51:59)
- state_blt.xml ( 13405 bytes, from 2017-12-18 16:51:59)
- state_vg.xml ( 5975 bytes, from 2016-12-08 16:37:56)
Copyright (C) 2012-2017 by the following authors:
- Wladimir J. van der Laan <laanwj@gmail.com>
- Christian Gmeiner <christian.gmeiner@gmail.com>
- Lucas Stach <l.stach@pengutronix.de>
- Russell King <rmk@arm.linux.org.uk>
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the "Software"),
to deal in the Software without restriction, including without limitation
the rights to use, copy, modify, merge, publish, distribute, sub license,
and/or sell copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice (including the
next paragraph) shall be included in all copies or substantial portions
of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
DEALINGS IN THE SOFTWARE.
*/
/* This is a cut-down version of the state_blt.xml.h file */
#define VIVS_BLT_ENABLE 0x000140b8
#define VIVS_BLT_ENABLE_ENABLE 0x00000001
#endif /* STATE_BLT_XML */
This diff is collapsed.
......@@ -55,6 +55,12 @@ struct drm_etnaviv_timespec {
#define ETNAVIV_PARAM_GPU_FEATURES_4 0x07
#define ETNAVIV_PARAM_GPU_FEATURES_5 0x08
#define ETNAVIV_PARAM_GPU_FEATURES_6 0x09
#define ETNAVIV_PARAM_GPU_FEATURES_7 0x0a
#define ETNAVIV_PARAM_GPU_FEATURES_8 0x0b
#define ETNAVIV_PARAM_GPU_FEATURES_9 0x0c
#define ETNAVIV_PARAM_GPU_FEATURES_10 0x0d
#define ETNAVIV_PARAM_GPU_FEATURES_11 0x0e
#define ETNAVIV_PARAM_GPU_FEATURES_12 0x0f
#define ETNAVIV_PARAM_GPU_STREAM_COUNT 0x10
#define ETNAVIV_PARAM_GPU_REGISTER_MAX 0x11
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment