Commit f3ba9122 authored by Rob Herring's avatar Rob Herring

drm/panfrost: Add initial panfrost driver

This adds the initial driver for panfrost which supports Arm Mali
Midgard and Bifrost family of GPUs. Currently, only the T860 and
T760 Midgard GPUs have been tested.

v2:
- Add GPU reset on job hangs (Tomeu)
- Add RuntimePM and devfreq support (Tomeu)
- Fix T760 support (Tomeu)
- Add a TODO file (Rob, Tomeu)
- Support multiple in fences (Tomeu)
- Drop support for shared fences (Tomeu)
- Fill in MMU de-init (Rob)
- Move register definitions back to single header (Rob)
- Clean-up hardcoded job submit todos (Rob)
- Implement feature setup based on features/issues (Rob)
- Add remaining Midgard DT compatible strings (Rob)

v3:
- Add support for reset lines (Neil)
- Add a MAINTAINERS entry (Rob)
- Call dma_set_mask_and_coherent (Rob)
- Do MMU invalidate on map and unmap. Restructure to do a single
  operation per map/unmap call. (Rob)
- Add a missing explicit padding to struct drm_panfrost_create_bo (Rob)
- Fix 0-day error: "panfrost_devfreq.c:151:9-16: ERROR: PTR_ERR applied after initialization to constant on line 150"
- Drop HW_FEATURE_AARCH64_MMU conditional (Rob)
- s/DRM_PANFROST_PARAM_GPU_ID/DRM_PANFROST_PARAM_GPU_PROD_ID/ (Rob)
- Check drm_gem_shmem_prime_import_sg_table() error code (Rob)
- Re-order power on sequence (Rob)
- Move panfrost_acquire_object_fences() before scheduling job (Rob)
- Add NULL checks on array pointers in job clean-up (Rob)
- Rework devfreq (Tomeu)
- Fix devfreq init with no regulator (Rob)
- Various WS and comments clean-up (Rob)

Cc: Maarten Lankhorst <maarten.lankhorst@linux.intel.com>
Cc: Maxime Ripard <maxime.ripard@bootlin.com>
Cc: Sean Paul <sean@poorly.run>
Cc: David Airlie <airlied@linux.ie>
Cc: Daniel Vetter <daniel@ffwll.ch>
Cc: Lyude Paul <lyude@redhat.com>
Reviewed-by: default avatarAlyssa Rosenzweig <alyssa@rosenzweig.io>
Reviewed-by: default avatarEric Anholt <eric@anholt.net>
Reviewed-by: default avatarSteven Price <steven.price@arm.com>
Signed-off-by: default avatarMarty E. Plummer <hanetzer@startmail.com>
Signed-off-by: default avatarTomeu Vizoso <tomeu.vizoso@collabora.com>
Signed-off-by: default avatarNeil Armstrong <narmstrong@baylibre.com>
Signed-off-by: default avatarRob Herring <robh@kernel.org>
Link: https://patchwork.freedesktop.org/patch/msgid/20190409205427.6943-4-robh@kernel.org
parent c117aa4d
...@@ -1180,6 +1180,15 @@ F: drivers/gpu/drm/arm/ ...@@ -1180,6 +1180,15 @@ F: drivers/gpu/drm/arm/
F: Documentation/devicetree/bindings/display/arm,malidp.txt F: Documentation/devicetree/bindings/display/arm,malidp.txt
F: Documentation/gpu/afbc.rst F: Documentation/gpu/afbc.rst
ARM MALI PANFROST DRM DRIVER
M: Rob Herring <robh@kernel.org>
M: Tomeu Vizoso <tomeu.vizoso@collabora.com>
L: dri-devel@lists.freedesktop.org
S: Supported
T: git git://anongit.freedesktop.org/drm/drm-misc
F: drivers/gpu/drm/panfrost/
F: include/uapi/drm/panfrost_drm.h
ARM MFM AND FLOPPY DRIVERS ARM MFM AND FLOPPY DRIVERS
M: Ian Molton <spyro@f2s.com> M: Ian Molton <spyro@f2s.com>
S: Maintained S: Maintained
......
...@@ -337,6 +337,8 @@ source "drivers/gpu/drm/vboxvideo/Kconfig" ...@@ -337,6 +337,8 @@ source "drivers/gpu/drm/vboxvideo/Kconfig"
source "drivers/gpu/drm/lima/Kconfig" source "drivers/gpu/drm/lima/Kconfig"
source "drivers/gpu/drm/panfrost/Kconfig"
source "drivers/gpu/drm/aspeed/Kconfig" source "drivers/gpu/drm/aspeed/Kconfig"
# Keep legacy drivers last # Keep legacy drivers last
......
...@@ -112,4 +112,5 @@ obj-$(CONFIG_DRM_TVE200) += tve200/ ...@@ -112,4 +112,5 @@ obj-$(CONFIG_DRM_TVE200) += tve200/
obj-$(CONFIG_DRM_XEN) += xen/ obj-$(CONFIG_DRM_XEN) += xen/
obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/ obj-$(CONFIG_DRM_VBOXVIDEO) += vboxvideo/
obj-$(CONFIG_DRM_LIMA) += lima/ obj-$(CONFIG_DRM_LIMA) += lima/
obj-$(CONFIG_DRM_PANFROST) += panfrost/
obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/ obj-$(CONFIG_DRM_ASPEED_GFX) += aspeed/
# SPDX-License-Identifier: GPL-2.0
config DRM_PANFROST
tristate "Panfrost (DRM support for ARM Mali Midgard/Bifrost GPUs)"
depends on DRM
depends on ARM || ARM64 || COMPILE_TEST
depends on MMU
select DRM_SCHED
select IOMMU_SUPPORT
select IOMMU_IO_PGTABLE_LPAE
select DRM_GEM_SHMEM_HELPER
help
DRM driver for ARM Mali Midgard (T6xx, T7xx, T8xx) and
Bifrost (G3x, G5x, G7x) GPUs.
# SPDX-License-Identifier: GPL-2.0
panfrost-y := \
panfrost_drv.o \
panfrost_device.o \
panfrost_devfreq.o \
panfrost_gem.o \
panfrost_gpu.o \
panfrost_job.o \
panfrost_mmu.o
obj-$(CONFIG_DRM_PANFROST) += panfrost.o
- Thermal support.
- Bifrost support:
- DT bindings (Neil, WIP)
- MMU page table format and address space setup
- Bifrost specific feature and issue handling
- Coherent DMA support
- Support for 2MB pages. The io-pgtable code already supports this. Finishing
support involves either copying or adapting the iommu API to handle passing
aligned addresses and sizes to the io-pgtable code.
- Per FD address space support. The h/w supports multiple addresses spaces.
The hard part is handling when more address spaces are needed than what
the h/w provides.
- Support pinning pages on demand (GPU page faults).
- Support userspace controlled GPU virtual addresses. Needed for Vulkan. (Tomeu)
- Support for madvise and a shrinker.
- Compute job support. So called 'compute only' jobs need to be plumbed up to
userspace.
- Performance counter support. (Boris)
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Collabora ltd. */
#include <linux/devfreq.h>
#include <linux/platform_device.h>
#include <linux/pm_opp.h>
#include <linux/clk.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
#include "panfrost_features.h"
#include "panfrost_issues.h"
#include "panfrost_gpu.h"
#include "panfrost_regs.h"
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot);
static int panfrost_devfreq_target(struct device *dev, unsigned long *freq,
u32 flags)
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
struct dev_pm_opp *opp;
unsigned long old_clk_rate = pfdev->devfreq.cur_freq;
unsigned long target_volt, target_rate;
int err;
opp = devfreq_recommended_opp(dev, freq, flags);
if (IS_ERR(opp))
return PTR_ERR(opp);
target_rate = dev_pm_opp_get_freq(opp);
target_volt = dev_pm_opp_get_voltage(opp);
dev_pm_opp_put(opp);
if (old_clk_rate == target_rate)
return 0;
/*
* If frequency scaling from low to high, adjust voltage first.
* If frequency scaling from high to low, adjust frequency first.
*/
if (old_clk_rate < target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err) {
dev_err(dev, "Cannot set voltage %lu uV\n",
target_volt);
return err;
}
}
err = clk_set_rate(pfdev->clock, target_rate);
if (err) {
dev_err(dev, "Cannot set frequency %lu (%d)\n", target_rate,
err);
regulator_set_voltage(pfdev->regulator, pfdev->devfreq.cur_volt,
pfdev->devfreq.cur_volt);
return err;
}
if (old_clk_rate > target_rate) {
err = regulator_set_voltage(pfdev->regulator, target_volt,
target_volt);
if (err)
dev_err(dev, "Cannot set voltage %lu uV\n", target_volt);
}
pfdev->devfreq.cur_freq = target_rate;
pfdev->devfreq.cur_volt = target_volt;
return 0;
}
static void panfrost_devfreq_reset(struct panfrost_device *pfdev)
{
ktime_t now = ktime_get();
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
pfdev->devfreq.slot[i].busy_time = 0;
pfdev->devfreq.slot[i].idle_time = 0;
pfdev->devfreq.slot[i].time_last_update = now;
}
}
static int panfrost_devfreq_get_dev_status(struct device *dev,
struct devfreq_dev_status *status)
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
int i;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
panfrost_devfreq_update_utilization(pfdev, i);
}
status->current_frequency = clk_get_rate(pfdev->clock);
status->total_time = ktime_to_ns(ktime_add(pfdev->devfreq.slot[0].busy_time,
pfdev->devfreq.slot[0].idle_time));
status->busy_time = 0;
for (i = 0; i < NUM_JOB_SLOTS; i++) {
status->busy_time += ktime_to_ns(pfdev->devfreq.slot[i].busy_time);
}
/* We're scheduling only to one core atm, so don't divide for now */
/* status->busy_time /= NUM_JOB_SLOTS; */
panfrost_devfreq_reset(pfdev);
dev_dbg(pfdev->dev, "busy %lu total %lu %lu %% freq %lu MHz\n", status->busy_time,
status->total_time,
status->busy_time / (status->total_time / 100),
status->current_frequency / 1000 / 1000);
return 0;
}
static int panfrost_devfreq_get_cur_freq(struct device *dev, unsigned long *freq)
{
struct panfrost_device *pfdev = platform_get_drvdata(to_platform_device(dev));
*freq = pfdev->devfreq.cur_freq;
return 0;
}
static struct devfreq_dev_profile panfrost_devfreq_profile = {
.polling_ms = 50, /* ~3 frames */
.target = panfrost_devfreq_target,
.get_dev_status = panfrost_devfreq_get_dev_status,
.get_cur_freq = panfrost_devfreq_get_cur_freq,
};
int panfrost_devfreq_init(struct panfrost_device *pfdev)
{
int ret;
struct dev_pm_opp *opp;
if (!pfdev->regulator)
return 0;
ret = dev_pm_opp_of_add_table(&pfdev->pdev->dev);
if (ret == -ENODEV) /* Optional, continue without devfreq */
return 0;
panfrost_devfreq_reset(pfdev);
pfdev->devfreq.cur_freq = clk_get_rate(pfdev->clock);
opp = devfreq_recommended_opp(&pfdev->pdev->dev, &pfdev->devfreq.cur_freq, 0);
if (IS_ERR(opp))
return PTR_ERR(opp);
panfrost_devfreq_profile.initial_freq = pfdev->devfreq.cur_freq;
dev_pm_opp_put(opp);
pfdev->devfreq.devfreq = devm_devfreq_add_device(&pfdev->pdev->dev,
&panfrost_devfreq_profile, "simple_ondemand", NULL);
if (IS_ERR(pfdev->devfreq.devfreq)) {
DRM_DEV_ERROR(&pfdev->pdev->dev, "Couldn't initialize GPU devfreq\n");
ret = PTR_ERR(pfdev->devfreq.devfreq);
pfdev->devfreq.devfreq = NULL;
return ret;
}
return 0;
}
void panfrost_devfreq_resume(struct panfrost_device *pfdev)
{
int i;
if (!pfdev->devfreq.devfreq)
return;
panfrost_devfreq_reset(pfdev);
for (i = 0; i < NUM_JOB_SLOTS; i++)
pfdev->devfreq.slot[i].busy = false;
devfreq_resume_device(pfdev->devfreq.devfreq);
}
void panfrost_devfreq_suspend(struct panfrost_device *pfdev)
{
if (!pfdev->devfreq.devfreq)
return;
devfreq_suspend_device(pfdev->devfreq.devfreq);
}
static void panfrost_devfreq_update_utilization(struct panfrost_device *pfdev, int slot)
{
struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
ktime_t now;
ktime_t last;
if (!pfdev->devfreq.devfreq)
return;
now = ktime_get();
last = pfdev->devfreq.slot[slot].time_last_update;
/* If we last recorded a transition to busy, we have been idle since */
if (devfreq_slot->busy)
pfdev->devfreq.slot[slot].busy_time += ktime_sub(now, last);
else
pfdev->devfreq.slot[slot].idle_time += ktime_sub(now, last);
pfdev->devfreq.slot[slot].time_last_update = now;
}
/* The job scheduler is expected to call this at every transition busy <-> idle */
void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot)
{
struct panfrost_devfreq_slot *devfreq_slot = &pfdev->devfreq.slot[slot];
panfrost_devfreq_update_utilization(pfdev, slot);
devfreq_slot->busy = !devfreq_slot->busy;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Collabora ltd. */
#ifndef __PANFROST_DEVFREQ_H__
#define __PANFROST_DEVFREQ_H__
int panfrost_devfreq_init(struct panfrost_device *pfdev);
void panfrost_devfreq_resume(struct panfrost_device *pfdev);
void panfrost_devfreq_suspend(struct panfrost_device *pfdev);
void panfrost_devfreq_record_transition(struct panfrost_device *pfdev, int slot);
#endif /* __PANFROST_DEVFREQ_H__ */
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#include <linux/clk.h>
#include <linux/reset.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/regulator/consumer.h>
#include "panfrost_device.h"
#include "panfrost_devfreq.h"
#include "panfrost_features.h"
#include "panfrost_gpu.h"
#include "panfrost_job.h"
#include "panfrost_mmu.h"
static int panfrost_reset_init(struct panfrost_device *pfdev)
{
int err;
pfdev->rstc = devm_reset_control_array_get(pfdev->dev, false, true);
if (IS_ERR(pfdev->rstc)) {
dev_err(pfdev->dev, "get reset failed %ld\n", PTR_ERR(pfdev->rstc));
return PTR_ERR(pfdev->rstc);
}
err = reset_control_deassert(pfdev->rstc);
if (err)
return err;
return 0;
}
static void panfrost_reset_fini(struct panfrost_device *pfdev)
{
reset_control_assert(pfdev->rstc);
}
static int panfrost_clk_init(struct panfrost_device *pfdev)
{
int err;
unsigned long rate;
pfdev->clock = devm_clk_get(pfdev->dev, NULL);
if (IS_ERR(pfdev->clock)) {
dev_err(pfdev->dev, "get clock failed %ld\n", PTR_ERR(pfdev->clock));
return PTR_ERR(pfdev->clock);
}
rate = clk_get_rate(pfdev->clock);
dev_info(pfdev->dev, "clock rate = %lu\n", rate);
err = clk_prepare_enable(pfdev->clock);
if (err)
return err;
return 0;
}
static void panfrost_clk_fini(struct panfrost_device *pfdev)
{
clk_disable_unprepare(pfdev->clock);
}
static int panfrost_regulator_init(struct panfrost_device *pfdev)
{
int ret;
pfdev->regulator = devm_regulator_get_optional(pfdev->dev, "mali");
if (IS_ERR(pfdev->regulator)) {
ret = PTR_ERR(pfdev->regulator);
pfdev->regulator = NULL;
if (ret == -ENODEV)
return 0;
dev_err(pfdev->dev, "failed to get regulator: %d\n", ret);
return ret;
}
ret = regulator_enable(pfdev->regulator);
if (ret < 0) {
dev_err(pfdev->dev, "failed to enable regulator: %d\n", ret);
return ret;
}
return 0;
}
static void panfrost_regulator_fini(struct panfrost_device *pfdev)
{
if (pfdev->regulator)
regulator_disable(pfdev->regulator);
}
int panfrost_device_init(struct panfrost_device *pfdev)
{
int err;
struct resource *res;
mutex_init(&pfdev->sched_lock);
INIT_LIST_HEAD(&pfdev->scheduled_jobs);
spin_lock_init(&pfdev->hwaccess_lock);
err = panfrost_clk_init(pfdev);
if (err) {
dev_err(pfdev->dev, "clk init failed %d\n", err);
return err;
}
err = panfrost_regulator_init(pfdev);
if (err) {
dev_err(pfdev->dev, "regulator init failed %d\n", err);
goto err_out0;
}
err = panfrost_reset_init(pfdev);
if (err) {
dev_err(pfdev->dev, "reset init failed %d\n", err);
goto err_out1;
}
res = platform_get_resource(pfdev->pdev, IORESOURCE_MEM, 0);
pfdev->iomem = devm_ioremap_resource(pfdev->dev, res);
if (IS_ERR(pfdev->iomem)) {
dev_err(pfdev->dev, "failed to ioremap iomem\n");
err = PTR_ERR(pfdev->iomem);
goto err_out2;
}
err = panfrost_gpu_init(pfdev);
if (err)
goto err_out2;
err = panfrost_mmu_init(pfdev);
if (err)
goto err_out3;
err = panfrost_job_init(pfdev);
if (err)
goto err_out4;
/* runtime PM will wake us up later */
panfrost_gpu_power_off(pfdev);
pm_runtime_set_active(pfdev->dev);
pm_runtime_get_sync(pfdev->dev);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
err_out4:
panfrost_mmu_fini(pfdev);
err_out3:
panfrost_gpu_fini(pfdev);
err_out2:
panfrost_reset_fini(pfdev);
err_out1:
panfrost_regulator_fini(pfdev);
err_out0:
panfrost_clk_fini(pfdev);
return err;
}
void panfrost_device_fini(struct panfrost_device *pfdev)
{
panfrost_regulator_fini(pfdev);
panfrost_clk_fini(pfdev);
}
const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code)
{
switch (exception_code) {
/* Non-Fault Status code */
case 0x00: return "NOT_STARTED/IDLE/OK";
case 0x01: return "DONE";
case 0x02: return "INTERRUPTED";
case 0x03: return "STOPPED";
case 0x04: return "TERMINATED";
case 0x08: return "ACTIVE";
/* Job exceptions */
case 0x40: return "JOB_CONFIG_FAULT";
case 0x41: return "JOB_POWER_FAULT";
case 0x42: return "JOB_READ_FAULT";
case 0x43: return "JOB_WRITE_FAULT";
case 0x44: return "JOB_AFFINITY_FAULT";
case 0x48: return "JOB_BUS_FAULT";
case 0x50: return "INSTR_INVALID_PC";
case 0x51: return "INSTR_INVALID_ENC";
case 0x52: return "INSTR_TYPE_MISMATCH";
case 0x53: return "INSTR_OPERAND_FAULT";
case 0x54: return "INSTR_TLS_FAULT";
case 0x55: return "INSTR_BARRIER_FAULT";
case 0x56: return "INSTR_ALIGN_FAULT";
case 0x58: return "DATA_INVALID_FAULT";
case 0x59: return "TILE_RANGE_FAULT";
case 0x5A: return "ADDR_RANGE_FAULT";
case 0x60: return "OUT_OF_MEMORY";
/* GPU exceptions */
case 0x80: return "DELAYED_BUS_FAULT";
case 0x88: return "SHAREABILITY_FAULT";
/* MMU exceptions */
case 0xC1: return "TRANSLATION_FAULT_LEVEL1";
case 0xC2: return "TRANSLATION_FAULT_LEVEL2";
case 0xC3: return "TRANSLATION_FAULT_LEVEL3";
case 0xC4: return "TRANSLATION_FAULT_LEVEL4";
case 0xC8: return "PERMISSION_FAULT";
case 0xC9 ... 0xCF: return "PERMISSION_FAULT";
case 0xD1: return "TRANSTAB_BUS_FAULT_LEVEL1";
case 0xD2: return "TRANSTAB_BUS_FAULT_LEVEL2";
case 0xD3: return "TRANSTAB_BUS_FAULT_LEVEL3";
case 0xD4: return "TRANSTAB_BUS_FAULT_LEVEL4";
case 0xD8: return "ACCESS_FLAG";
case 0xD9 ... 0xDF: return "ACCESS_FLAG";
case 0xE0 ... 0xE7: return "ADDRESS_SIZE_FAULT";
case 0xE8 ... 0xEF: return "MEMORY_ATTRIBUTES_FAULT";
}
return "UNKNOWN";
}
#ifdef CONFIG_PM
int panfrost_device_resume(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
panfrost_gpu_soft_reset(pfdev);
/* TODO: Re-enable all other address spaces */
panfrost_gpu_power_on(pfdev);
panfrost_mmu_enable(pfdev, 0);
panfrost_job_enable_interrupts(pfdev);
panfrost_devfreq_resume(pfdev);
return 0;
}
int panfrost_device_suspend(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
struct panfrost_device *pfdev = platform_get_drvdata(pdev);
if (!panfrost_job_is_idle(pfdev))
return -EBUSY;
panfrost_devfreq_suspend(pfdev);
panfrost_gpu_power_off(pfdev);
return 0;
}
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#ifndef __PANFROST_DEVICE_H__
#define __PANFROST_DEVICE_H__
#include <linux/spinlock.h>
#include <drm/drm_device.h>
#include <drm/drm_mm.h>
#include <drm/gpu_scheduler.h>
struct panfrost_device;
struct panfrost_mmu;
struct panfrost_job_slot;
struct panfrost_job;
#define NUM_JOB_SLOTS 3
struct panfrost_features {
u16 id;
u16 revision;
u64 shader_present;
u64 tiler_present;
u64 l2_present;
u64 stack_present;
u32 as_present;
u32 js_present;
u32 l2_features;
u32 core_features;
u32 tiler_features;
u32 mem_features;
u32 mmu_features;
u32 thread_features;
u32 max_threads;
u32 thread_max_workgroup_sz;
u32 thread_max_barrier_sz;
u32 coherency_features;
u32 texture_features[4];
u32 js_features[16];
u32 nr_core_groups;
unsigned long hw_features[64 / BITS_PER_LONG];
unsigned long hw_issues[64 / BITS_PER_LONG];
};
struct panfrost_devfreq_slot {
ktime_t busy_time;
ktime_t idle_time;
ktime_t time_last_update;
bool busy;
};
struct panfrost_device {
struct device *dev;
struct drm_device *ddev;
struct platform_device *pdev;
spinlock_t hwaccess_lock;
struct drm_mm mm;
spinlock_t mm_lock;
void __iomem *iomem;
struct clk *clock;
struct regulator *regulator;
struct reset_control *rstc;
struct panfrost_features features;
struct panfrost_mmu *mmu;
struct panfrost_job_slot *js;
struct panfrost_job *jobs[NUM_JOB_SLOTS];
struct list_head scheduled_jobs;
struct mutex sched_lock;
struct {
struct devfreq *devfreq;
struct thermal_cooling_device *cooling;
unsigned long cur_freq;
unsigned long cur_volt;
struct panfrost_devfreq_slot slot[NUM_JOB_SLOTS];
} devfreq;
};
struct panfrost_file_priv {
struct panfrost_device *pfdev;
struct drm_sched_entity sched_entity[NUM_JOB_SLOTS];
};
static inline struct panfrost_device *to_panfrost_device(struct drm_device *ddev)
{
return ddev->dev_private;
}
static inline int panfrost_model_cmp(struct panfrost_device *pfdev, s32 id)
{
s32 match_id = pfdev->features.id;
if (match_id & 0xf000)
match_id &= 0xf00f;
return match_id - id;
}
static inline bool panfrost_model_eq(struct panfrost_device *pfdev, s32 id)
{
return !panfrost_model_cmp(pfdev, id);
}
int panfrost_device_init(struct panfrost_device *pfdev);
void panfrost_device_fini(struct panfrost_device *pfdev);
int panfrost_device_resume(struct device *dev);
int panfrost_device_suspend(struct device *dev);
const char *panfrost_exception_name(struct panfrost_device *pfdev, u32 exception_code);
#endif
This diff is collapsed.
This diff is collapsed.
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#include <linux/err.h>
#include <linux/slab.h>
#include <linux/dma-buf.h>
#include <linux/dma-mapping.h>
#include <drm/panfrost_drm.h>
#include "panfrost_device.h"
#include "panfrost_gem.h"
#include "panfrost_mmu.h"
/* Called DRM core on the last userspace/kernel unreference of the
* BO.
*/
void panfrost_gem_free_object(struct drm_gem_object *obj)
{
struct panfrost_gem_object *bo = to_panfrost_bo(obj);
struct panfrost_device *pfdev = obj->dev->dev_private;
panfrost_mmu_unmap(bo);
spin_lock(&pfdev->mm_lock);
drm_mm_remove_node(&bo->node);
spin_unlock(&pfdev->mm_lock);
drm_gem_shmem_free_object(obj);
}
static const struct drm_gem_object_funcs panfrost_gem_funcs = {
.free = panfrost_gem_free_object,
.print_info = drm_gem_shmem_print_info,
.pin = drm_gem_shmem_pin,
.unpin = drm_gem_shmem_unpin,
.get_sg_table = drm_gem_shmem_get_sg_table,
.vmap = drm_gem_shmem_vmap,
.vunmap = drm_gem_shmem_vunmap,
.vm_ops = &drm_gem_shmem_vm_ops,
};
/**
* panfrost_gem_create_object - Implementation of driver->gem_create_object.
* @dev: DRM device
* @size: Size in bytes of the memory the object will reference
*
* This lets the GEM helpers allocate object structs for us, and keep
* our BO stats correct.
*/
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size)
{
int ret;
struct panfrost_device *pfdev = dev->dev_private;
struct panfrost_gem_object *obj;
obj = kzalloc(sizeof(*obj), GFP_KERNEL);
if (!obj)
return NULL;
obj->base.base.funcs = &panfrost_gem_funcs;
spin_lock(&pfdev->mm_lock);
ret = drm_mm_insert_node(&pfdev->mm, &obj->node,
roundup(size, PAGE_SIZE) >> PAGE_SHIFT);
spin_unlock(&pfdev->mm_lock);
if (ret)
goto free_obj;
return &obj->base.base;
free_obj:
kfree(obj);
return ERR_PTR(ret);
}
struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt)
{
struct drm_gem_object *obj;
struct panfrost_gem_object *pobj;
obj = drm_gem_shmem_prime_import_sg_table(dev, attach, sgt);
if (IS_ERR(obj))
return ERR_CAST(obj);
pobj = to_panfrost_bo(obj);
obj->resv = attach->dmabuf->resv;
panfrost_mmu_map(pobj);
return obj;
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#ifndef __PANFROST_GEM_H__
#define __PANFROST_GEM_H__
#include <drm/drm_gem_shmem_helper.h>
#include <drm/drm_mm.h>
struct panfrost_gem_object {
struct drm_gem_shmem_object base;
struct drm_mm_node node;
};
static inline
struct panfrost_gem_object *to_panfrost_bo(struct drm_gem_object *obj)
{
return container_of(to_drm_gem_shmem_obj(obj), struct panfrost_gem_object, base);
}
struct drm_gem_object *panfrost_gem_create_object(struct drm_device *dev, size_t size);
struct drm_gem_object *
panfrost_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach,
struct sg_table *sgt);
#endif /* __PANFROST_GEM_H__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2018 Marty E. Plummer <hanetzer@startmail.com> */
/* Copyright 2019 Collabora ltd. */
#ifndef __PANFROST_GPU_H__
#define __PANFROST_GPU_H__
struct panfrost_device;
int panfrost_gpu_init(struct panfrost_device *pfdev);
void panfrost_gpu_fini(struct panfrost_device *pfdev);
u32 panfrost_gpu_get_latest_flush_id(struct panfrost_device *pfdev);
int panfrost_gpu_soft_reset(struct panfrost_device *pfdev);
void panfrost_gpu_power_on(struct panfrost_device *pfdev);
void panfrost_gpu_power_off(struct panfrost_device *pfdev);
#endif
/* SPDX-License-Identifier: GPL-2.0 */
/* (C) COPYRIGHT 2014-2018 ARM Limited. All rights reserved. */
/* Copyright 2019 Linaro, Ltd., Rob Herring <robh@kernel.org> */
#ifndef __PANFROST_ISSUES_H__
#define __PANFROST_ISSUES_H__
#include <linux/bitops.h>
#include "panfrost_device.h"
/*
* This is not a complete list of issues, but only the ones the driver needs
* to care about.
*/
enum panfrost_hw_issue {
HW_ISSUE_6367,
HW_ISSUE_6787,
HW_ISSUE_8186,
HW_ISSUE_8245,
HW_ISSUE_8316,
HW_ISSUE_8394,
HW_ISSUE_8401,
HW_ISSUE_8408,
HW_ISSUE_8443,
HW_ISSUE_8987,
HW_ISSUE_9435,
HW_ISSUE_9510,
HW_ISSUE_9630,
HW_ISSUE_10327,
HW_ISSUE_10649,
HW_ISSUE_10676,
HW_ISSUE_10797,
HW_ISSUE_10817,
HW_ISSUE_10883,
HW_ISSUE_10959,
HW_ISSUE_10969,
HW_ISSUE_11020,
HW_ISSUE_11024,
HW_ISSUE_11035,
HW_ISSUE_11056,
HW_ISSUE_T76X_3542,
HW_ISSUE_T76X_3953,
HW_ISSUE_TMIX_8463,
GPUCORE_1619,
HW_ISSUE_TMIX_8438,
HW_ISSUE_TGOX_R1_1234,
HW_ISSUE_END
};
#define hw_issues_all (\
BIT_ULL(HW_ISSUE_9435))
#define hw_issues_t600 (\
BIT_ULL(HW_ISSUE_6367) | \
BIT_ULL(HW_ISSUE_6787) | \
BIT_ULL(HW_ISSUE_8408) | \
BIT_ULL(HW_ISSUE_9510) | \
BIT_ULL(HW_ISSUE_10649) | \
BIT_ULL(HW_ISSUE_10676) | \
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11035) | \
BIT_ULL(HW_ISSUE_11056) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t600_r0p0_15dev0 (\
BIT_ULL(HW_ISSUE_8186) | \
BIT_ULL(HW_ISSUE_8245) | \
BIT_ULL(HW_ISSUE_8316) | \
BIT_ULL(HW_ISSUE_8394) | \
BIT_ULL(HW_ISSUE_8401) | \
BIT_ULL(HW_ISSUE_8443) | \
BIT_ULL(HW_ISSUE_8987) | \
BIT_ULL(HW_ISSUE_9630) | \
BIT_ULL(HW_ISSUE_10969) | \
BIT_ULL(GPUCORE_1619))
#define hw_issues_t620 (\
BIT_ULL(HW_ISSUE_10649) | \
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_10959) | \
BIT_ULL(HW_ISSUE_11056) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t620_r0p1 (\
BIT_ULL(HW_ISSUE_10327) | \
BIT_ULL(HW_ISSUE_10676) | \
BIT_ULL(HW_ISSUE_10817) | \
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11024) | \
BIT_ULL(HW_ISSUE_11035))
#define hw_issues_t620_r1p0 (\
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11024))
#define hw_issues_t720 (\
BIT_ULL(HW_ISSUE_10649) | \
BIT_ULL(HW_ISSUE_10797) | \
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_11056) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t760 (\
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_T76X_3953) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t760_r0p0 (\
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11024) | \
BIT_ULL(HW_ISSUE_T76X_3542))
#define hw_issues_t760_r0p1 (\
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11024) | \
BIT_ULL(HW_ISSUE_T76X_3542))
#define hw_issues_t760_r0p1_50rel0 (\
BIT_ULL(HW_ISSUE_T76X_3542))
#define hw_issues_t760_r0p2 (\
BIT_ULL(HW_ISSUE_11020) | \
BIT_ULL(HW_ISSUE_11024) | \
BIT_ULL(HW_ISSUE_T76X_3542))
#define hw_issues_t760_r0p3 (\
BIT_ULL(HW_ISSUE_T76X_3542))
#define hw_issues_t820 (\
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_T76X_3953) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t830 (\
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_T76X_3953) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t860 (\
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_T76X_3953) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_t880 (\
BIT_ULL(HW_ISSUE_10883) | \
BIT_ULL(HW_ISSUE_T76X_3953) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_g31 0
#define hw_issues_g31_r1p0 (\
BIT_ULL(HW_ISSUE_TGOX_R1_1234))
#define hw_issues_g51 0
#define hw_issues_g52 0
#define hw_issues_g71 (\
BIT_ULL(HW_ISSUE_TMIX_8463) | \
BIT_ULL(HW_ISSUE_TMIX_8438))
#define hw_issues_g71_r0p0_05dev0 (\
BIT_ULL(HW_ISSUE_T76X_3953))
#define hw_issues_g72 0
#define hw_issues_g76 0
static inline bool panfrost_has_hw_issue(struct panfrost_device *pfdev,
enum panfrost_hw_issue issue)
{
return test_bit(issue, pfdev->features.hw_issues);
}
#endif /* __PANFROST_ISSUES_H__ */
This diff is collapsed.
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Collabora ltd. */
#ifndef __PANFROST_JOB_H__
#define __PANFROST_JOB_H__
#include <uapi/drm/panfrost_drm.h>
#include <drm/gpu_scheduler.h>
struct panfrost_device;
struct panfrost_gem_object;
struct panfrost_file_priv;
struct panfrost_job {
struct drm_sched_job base;
struct kref refcount;
struct panfrost_device *pfdev;
struct panfrost_file_priv *file_priv;
/* Optional fences userspace can pass in for the job to depend on. */
struct dma_fence **in_fences;
u32 in_fence_count;
/* Fence to be signaled by IRQ handler when the job is complete. */
struct dma_fence *done_fence;
__u64 jc;
__u32 requirements;
__u32 flush_id;
/* Exclusive fences we have taken from the BOs to wait for */
struct dma_fence **implicit_fences;
struct drm_gem_object **bos;
u32 bo_count;
/* Fence to be signaled by drm-sched once its done with the job */
struct dma_fence *render_done_fence;
};
int panfrost_job_init(struct panfrost_device *pfdev);
void panfrost_job_fini(struct panfrost_device *pfdev);
int panfrost_job_open(struct panfrost_file_priv *panfrost_priv);
void panfrost_job_close(struct panfrost_file_priv *panfrost_priv);
int panfrost_job_push(struct panfrost_job *job);
void panfrost_job_put(struct panfrost_job *job);
void panfrost_job_enable_interrupts(struct panfrost_device *pfdev);
int panfrost_job_is_idle(struct panfrost_device *pfdev);
#endif
// SPDX-License-Identifier: GPL-2.0
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#include <linux/bitfield.h>
#include <linux/delay.h>
#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/io-pgtable.h>
#include <linux/iommu.h>
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
#include <linux/sizes.h>
#include "panfrost_device.h"
#include "panfrost_mmu.h"
#include "panfrost_gem.h"
#include "panfrost_features.h"
#include "panfrost_regs.h"
#define mmu_write(dev, reg, data) writel(data, dev->iomem + reg)
#define mmu_read(dev, reg) readl(dev->iomem + reg)
struct panfrost_mmu {
struct io_pgtable_cfg pgtbl_cfg;
struct io_pgtable_ops *pgtbl_ops;
struct mutex lock;
};
static int wait_ready(struct panfrost_device *pfdev, u32 as_nr)
{
int ret;
u32 val;
/* Wait for the MMU status to indicate there is no active command, in
* case one is pending. */
ret = readl_relaxed_poll_timeout_atomic(pfdev->iomem + AS_STATUS(as_nr),
val, !(val & AS_STATUS_AS_ACTIVE), 10, 1000);
if (ret)
dev_err(pfdev->dev, "AS_ACTIVE bit stuck\n");
return ret;
}
static int write_cmd(struct panfrost_device *pfdev, u32 as_nr, u32 cmd)
{
int status;
/* write AS_COMMAND when MMU is ready to accept another command */
status = wait_ready(pfdev, as_nr);
if (!status)
mmu_write(pfdev, AS_COMMAND(as_nr), cmd);
return status;
}
static void lock_region(struct panfrost_device *pfdev, u32 as_nr,
u64 iova, size_t size)
{
u8 region_width;
u64 region = iova & PAGE_MASK;
/*
* fls returns:
* 1 .. 32
*
* 10 + fls(num_pages)
* results in the range (11 .. 42)
*/
size = round_up(size, PAGE_SIZE);
region_width = 10 + fls(size >> PAGE_SHIFT);
if ((size >> PAGE_SHIFT) != (1ul << (region_width - 11))) {
/* not pow2, so must go up to the next pow2 */
region_width += 1;
}
region |= region_width;
/* Lock the region that needs to be updated */
mmu_write(pfdev, AS_LOCKADDR_LO(as_nr), region & 0xFFFFFFFFUL);
mmu_write(pfdev, AS_LOCKADDR_HI(as_nr), (region >> 32) & 0xFFFFFFFFUL);
write_cmd(pfdev, as_nr, AS_COMMAND_LOCK);
}
static int mmu_hw_do_operation(struct panfrost_device *pfdev, u32 as_nr,
u64 iova, size_t size, u32 op)
{
unsigned long flags;
int ret;
spin_lock_irqsave(&pfdev->hwaccess_lock, flags);
if (op != AS_COMMAND_UNLOCK)
lock_region(pfdev, as_nr, iova, size);
/* Run the MMU operation */
write_cmd(pfdev, as_nr, op);
/* Wait for the flush to complete */
ret = wait_ready(pfdev, as_nr);
spin_unlock_irqrestore(&pfdev->hwaccess_lock, flags);
return ret;
}
void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr)
{
struct io_pgtable_cfg *cfg = &pfdev->mmu->pgtbl_cfg;
u64 transtab = cfg->arm_mali_lpae_cfg.transtab;
u64 memattr = cfg->arm_mali_lpae_cfg.memattr;
mmu_write(pfdev, MMU_INT_CLEAR, ~0);
mmu_write(pfdev, MMU_INT_MASK, ~0);
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), transtab & 0xffffffffUL);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), transtab >> 32);
/* Need to revisit mem attrs.
* NC is the default, Mali driver is inner WT.
*/
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), memattr & 0xffffffffUL);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), memattr >> 32);
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
static void mmu_disable(struct panfrost_device *pfdev, u32 as_nr)
{
mmu_write(pfdev, AS_TRANSTAB_LO(as_nr), 0);
mmu_write(pfdev, AS_TRANSTAB_HI(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_LO(as_nr), 0);
mmu_write(pfdev, AS_MEMATTR_HI(as_nr), 0);
write_cmd(pfdev, as_nr, AS_COMMAND_UPDATE);
}
int panfrost_mmu_map(struct panfrost_gem_object *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT;
unsigned int count;
struct scatterlist *sgl;
struct sg_table *sgt;
int ret;
sgt = drm_gem_shmem_get_pages_sgt(obj);
if (WARN_ON(IS_ERR(sgt)))
return PTR_ERR(sgt);
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
return ret;
mutex_lock(&pfdev->mmu->lock);
for_each_sg(sgt->sgl, sgl, sgt->nents, count) {
unsigned long paddr = sg_dma_address(sgl);
size_t len = sg_dma_len(sgl);
dev_dbg(pfdev->dev, "map: iova=%llx, paddr=%lx, len=%zx", iova, paddr, len);
while (len) {
ops->map(ops, iova, paddr, SZ_4K, IOMMU_WRITE | IOMMU_READ);
iova += SZ_4K;
paddr += SZ_4K;
len -= SZ_4K;
}
}
mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
mutex_unlock(&pfdev->mmu->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
return 0;
}
void panfrost_mmu_unmap(struct panfrost_gem_object *bo)
{
struct drm_gem_object *obj = &bo->base.base;
struct panfrost_device *pfdev = to_panfrost_device(obj->dev);
struct io_pgtable_ops *ops = pfdev->mmu->pgtbl_ops;
u64 iova = bo->node.start << PAGE_SHIFT;
size_t len = bo->node.size << PAGE_SHIFT;
size_t unmapped_len = 0;
int ret;
dev_dbg(pfdev->dev, "unmap: iova=%llx, len=%zx", iova, len);
ret = pm_runtime_get_sync(pfdev->dev);
if (ret < 0)
return;
mutex_lock(&pfdev->mmu->lock);
while (unmapped_len < len) {
ops->unmap(ops, iova, SZ_4K);
iova += SZ_4K;
unmapped_len += SZ_4K;
}
mmu_hw_do_operation(pfdev, 0, bo->node.start << PAGE_SHIFT,
bo->node.size << PAGE_SHIFT, AS_COMMAND_FLUSH_PT);
mutex_unlock(&pfdev->mmu->lock);
pm_runtime_mark_last_busy(pfdev->dev);
pm_runtime_put_autosuspend(pfdev->dev);
}
static void mmu_tlb_inv_context_s1(void *cookie)
{
struct panfrost_device *pfdev = cookie;
mmu_hw_do_operation(pfdev, 0, 0, ~0UL, AS_COMMAND_FLUSH_MEM);
}
static void mmu_tlb_inv_range_nosync(unsigned long iova, size_t size,
size_t granule, bool leaf, void *cookie)
{}
static void mmu_tlb_sync_context(void *cookie)
{
//struct panfrost_device *pfdev = cookie;
// TODO: Wait 1000 GPU cycles for HW_ISSUE_6367/T60X
}
static const struct iommu_gather_ops mmu_tlb_ops = {
.tlb_flush_all = mmu_tlb_inv_context_s1,
.tlb_add_flush = mmu_tlb_inv_range_nosync,
.tlb_sync = mmu_tlb_sync_context,
};
static const char *access_type_name(struct panfrost_device *pfdev,
u32 fault_status)
{
switch (fault_status & AS_FAULTSTATUS_ACCESS_TYPE_MASK) {
case AS_FAULTSTATUS_ACCESS_TYPE_ATOMIC:
if (panfrost_has_hw_feature(pfdev, HW_FEATURE_AARCH64_MMU))
return "ATOMIC";
else
return "UNKNOWN";
case AS_FAULTSTATUS_ACCESS_TYPE_READ:
return "READ";
case AS_FAULTSTATUS_ACCESS_TYPE_WRITE:
return "WRITE";
case AS_FAULTSTATUS_ACCESS_TYPE_EX:
return "EXECUTE";
default:
WARN_ON(1);
return NULL;
}
}
static irqreturn_t panfrost_mmu_irq_handler(int irq, void *data)
{
struct panfrost_device *pfdev = data;
u32 status = mmu_read(pfdev, MMU_INT_STAT);
int i;
if (!status)
return IRQ_NONE;
dev_err(pfdev->dev, "mmu irq status=%x\n", status);
for (i = 0; status; i++) {
u32 mask = BIT(i) | BIT(i + 16);
u64 addr;
u32 fault_status;
u32 exception_type;
u32 access_type;
u32 source_id;
if (!(status & mask))
continue;
fault_status = mmu_read(pfdev, AS_FAULTSTATUS(i));
addr = mmu_read(pfdev, AS_FAULTADDRESS_LO(i));
addr |= (u64)mmu_read(pfdev, AS_FAULTADDRESS_HI(i)) << 32;
/* decode the fault status */
exception_type = fault_status & 0xFF;
access_type = (fault_status >> 8) & 0x3;
source_id = (fault_status >> 16);
/* terminal fault, print info about the fault */
dev_err(pfdev->dev,
"Unhandled Page fault in AS%d at VA 0x%016llX\n"
"Reason: %s\n"
"raw fault status: 0x%X\n"
"decoded fault status: %s\n"
"exception type 0x%X: %s\n"
"access type 0x%X: %s\n"
"source id 0x%X\n",
i, addr,
"TODO",
fault_status,
(fault_status & (1 << 10) ? "DECODER FAULT" : "SLAVE FAULT"),
exception_type, panfrost_exception_name(pfdev, exception_type),
access_type, access_type_name(pfdev, fault_status),
source_id);
mmu_write(pfdev, MMU_INT_CLEAR, mask);
status &= ~mask;
}
return IRQ_HANDLED;
};
int panfrost_mmu_init(struct panfrost_device *pfdev)
{
struct io_pgtable_ops *pgtbl_ops;
int err, irq;
pfdev->mmu = devm_kzalloc(pfdev->dev, sizeof(*pfdev->mmu), GFP_KERNEL);
if (!pfdev->mmu)
return -ENOMEM;
mutex_init(&pfdev->mmu->lock);
irq = platform_get_irq_byname(to_platform_device(pfdev->dev), "mmu");
if (irq <= 0)
return -ENODEV;
err = devm_request_irq(pfdev->dev, irq, panfrost_mmu_irq_handler,
IRQF_SHARED, "mmu", pfdev);
if (err) {
dev_err(pfdev->dev, "failed to request mmu irq");
return err;
}
mmu_write(pfdev, MMU_INT_CLEAR, ~0);
mmu_write(pfdev, MMU_INT_MASK, ~0);
pfdev->mmu->pgtbl_cfg = (struct io_pgtable_cfg) {
.pgsize_bitmap = SZ_4K, // | SZ_2M | SZ_1G),
.ias = FIELD_GET(0xff, pfdev->features.mmu_features),
.oas = FIELD_GET(0xff00, pfdev->features.mmu_features),
.tlb = &mmu_tlb_ops,
.iommu_dev = pfdev->dev,
};
pgtbl_ops = alloc_io_pgtable_ops(ARM_MALI_LPAE, &pfdev->mmu->pgtbl_cfg,
pfdev);
if (!pgtbl_ops)
return -ENOMEM;
pfdev->mmu->pgtbl_ops = pgtbl_ops;
panfrost_mmu_enable(pfdev, 0);
return 0;
}
void panfrost_mmu_fini(struct panfrost_device *pfdev)
{
mmu_write(pfdev, MMU_INT_MASK, 0);
mmu_disable(pfdev, 0);
free_io_pgtable_ops(pfdev->mmu->pgtbl_ops);
}
/* SPDX-License-Identifier: GPL-2.0 */
/* Copyright 2019 Linaro, Ltd, Rob Herring <robh@kernel.org> */
#ifndef __PANFROST_MMU_H__
#define __PANFROST_MMU_H__
struct panfrost_gem_object;
int panfrost_mmu_map(struct panfrost_gem_object *bo);
void panfrost_mmu_unmap(struct panfrost_gem_object *bo);
int panfrost_mmu_init(struct panfrost_device *pfdev);
void panfrost_mmu_fini(struct panfrost_device *pfdev);
void panfrost_mmu_enable(struct panfrost_device *pfdev, u32 as_nr);
#endif
This diff is collapsed.
/* SPDX-License-Identifier: MIT */
/*
* Copyright © 2014-2018 Broadcom
* Copyright © 2019 Collabora ltd.
*/
#ifndef _PANFROST_DRM_H_
#define _PANFROST_DRM_H_
#include "drm.h"
#if defined(__cplusplus)
extern "C" {
#endif
#define DRM_PANFROST_SUBMIT 0x00
#define DRM_PANFROST_WAIT_BO 0x01
#define DRM_PANFROST_CREATE_BO 0x02
#define DRM_PANFROST_MMAP_BO 0x03
#define DRM_PANFROST_GET_PARAM 0x04
#define DRM_PANFROST_GET_BO_OFFSET 0x05
#define DRM_IOCTL_PANFROST_SUBMIT DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_SUBMIT, struct drm_panfrost_submit)
#define DRM_IOCTL_PANFROST_WAIT_BO DRM_IOW(DRM_COMMAND_BASE + DRM_PANFROST_WAIT_BO, struct drm_panfrost_wait_bo)
#define DRM_IOCTL_PANFROST_CREATE_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_CREATE_BO, struct drm_panfrost_create_bo)
#define DRM_IOCTL_PANFROST_MMAP_BO DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_MMAP_BO, struct drm_panfrost_mmap_bo)
#define DRM_IOCTL_PANFROST_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_PARAM, struct drm_panfrost_get_param)
#define DRM_IOCTL_PANFROST_GET_BO_OFFSET DRM_IOWR(DRM_COMMAND_BASE + DRM_PANFROST_GET_BO_OFFSET, struct drm_panfrost_get_bo_offset)
#define PANFROST_JD_REQ_FS (1 << 0)
/**
* struct drm_panfrost_submit - ioctl argument for submitting commands to the 3D
* engine.
*
* This asks the kernel to have the GPU execute a render command list.
*/
struct drm_panfrost_submit {
/** Address to GPU mapping of job descriptor */
__u64 jc;
/** An optional array of sync objects to wait on before starting this job. */
__u64 in_syncs;
/** Number of sync objects to wait on before starting this job. */
__u32 in_sync_count;
/** An optional sync object to place the completion fence in. */
__u32 out_sync;
/** Pointer to a u32 array of the BOs that are referenced by the job. */
__u64 bo_handles;
/** Number of BO handles passed in (size is that times 4). */
__u32 bo_handle_count;
/** A combination of PANFROST_JD_REQ_* */
__u32 requirements;
};
/**
* struct drm_panfrost_wait_bo - ioctl argument for waiting for
* completion of the last DRM_PANFROST_SUBMIT on a BO.
*
* This is useful for cases where multiple processes might be
* rendering to a BO and you want to wait for all rendering to be
* completed.
*/
struct drm_panfrost_wait_bo {
__u32 handle;
__u32 pad;
__s64 timeout_ns; /* absolute */
};
/**
* struct drm_panfrost_create_bo - ioctl argument for creating Panfrost BOs.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_panfrost_create_bo {
__u32 size;
__u32 flags;
/** Returned GEM handle for the BO. */
__u32 handle;
/* Pad, must be zero-filled. */
__u32 pad;
/**
* Returned offset for the BO in the GPU address space. This offset
* is private to the DRM fd and is valid for the lifetime of the GEM
* handle.
*
* This offset value will always be nonzero, since various HW
* units treat 0 specially.
*/
__u64 offset;
};
/**
* struct drm_panfrost_mmap_bo - ioctl argument for mapping Panfrost BOs.
*
* This doesn't actually perform an mmap. Instead, it returns the
* offset you need to use in an mmap on the DRM device node. This
* means that tools like valgrind end up knowing about the mapped
* memory.
*
* There are currently no values for the flags argument, but it may be
* used in a future extension.
*/
struct drm_panfrost_mmap_bo {
/** Handle for the object being mapped. */
__u32 handle;
__u32 flags;
/** offset into the drm node to use for subsequent mmap call. */
__u64 offset;
};
enum drm_panfrost_param {
DRM_PANFROST_PARAM_GPU_PROD_ID,
};
struct drm_panfrost_get_param {
__u32 param;
__u32 pad;
__u64 value;
};
/**
* Returns the offset for the BO in the GPU address space for this DRM fd.
* This is the same value returned by drm_panfrost_create_bo, if that was called
* from this DRM fd.
*/
struct drm_panfrost_get_bo_offset {
__u32 handle;
__u32 pad;
__u64 offset;
};
#if defined(__cplusplus)
}
#endif
#endif /* _PANFROST_DRM_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment