Commit aed93ee7 authored by Dave Airlie's avatar Dave Airlie

Merge branch 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux into drm-next

Highlights:
- Cooling device support from Russell, to allow GPU throttling on system
thermal overload.
- Explicit fencing support from Philipp, implemented in a similar way to
drm/msm.

* 'etnaviv/next' of https://git.pengutronix.de/git/lst/linux:
  drm/etnaviv: submit support for out-fences
  drm/etnaviv: return GPU fence through the submit structure
  drm/etnaviv: submit support for in-fences
  drm/etnaviv: add etnaviv cooling device
  drm/etnaviv: switch to postclose
  drm/etnaviv: add lockdep assert to fence allocation
parents 0e961332 78ec187f
...@@ -5,6 +5,7 @@ config DRM_ETNAVIV ...@@ -5,6 +5,7 @@ config DRM_ETNAVIV
depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST) depends on ARCH_MXC || ARCH_DOVE || (ARM && COMPILE_TEST)
depends on MMU depends on MMU
select SHMEM select SHMEM
select SYNC_FILE
select TMPFS select TMPFS
select IOMMU_API select IOMMU_API
select IOMMU_SUPPORT select IOMMU_SUPPORT
......
...@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file) ...@@ -111,7 +111,7 @@ static int etnaviv_open(struct drm_device *dev, struct drm_file *file)
return 0; return 0;
} }
static void etnaviv_preclose(struct drm_device *dev, struct drm_file *file) static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
{ {
struct etnaviv_drm_private *priv = dev->dev_private; struct etnaviv_drm_private *priv = dev->dev_private;
struct etnaviv_file_private *ctx = file->driver_priv; struct etnaviv_file_private *ctx = file->driver_priv;
...@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = { ...@@ -488,7 +488,7 @@ static struct drm_driver etnaviv_drm_driver = {
DRIVER_PRIME | DRIVER_PRIME |
DRIVER_RENDER, DRIVER_RENDER,
.open = etnaviv_open, .open = etnaviv_open,
.preclose = etnaviv_preclose, .postclose = etnaviv_postclose,
.gem_free_object_unlocked = etnaviv_gem_free_object, .gem_free_object_unlocked = etnaviv_gem_free_object,
.gem_vm_ops = &vm_ops, .gem_vm_ops = &vm_ops,
.prime_handle_to_fd = drm_gem_prime_handle_to_fd, .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
...@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = { ...@@ -512,7 +512,7 @@ static struct drm_driver etnaviv_drm_driver = {
.desc = "etnaviv DRM", .desc = "etnaviv DRM",
.date = "20151214", .date = "20151214",
.major = 1, .major = 1,
.minor = 0, .minor = 1,
}; };
/* /*
......
...@@ -20,6 +20,7 @@ ...@@ -20,6 +20,7 @@
#include <linux/reservation.h> #include <linux/reservation.h>
#include "etnaviv_drv.h" #include "etnaviv_drv.h"
struct dma_fence;
struct etnaviv_gem_ops; struct etnaviv_gem_ops;
struct etnaviv_gem_object; struct etnaviv_gem_object;
...@@ -104,9 +105,10 @@ struct etnaviv_gem_submit { ...@@ -104,9 +105,10 @@ struct etnaviv_gem_submit {
struct drm_device *dev; struct drm_device *dev;
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
struct ww_acquire_ctx ticket; struct ww_acquire_ctx ticket;
u32 fence; struct dma_fence *fence;
unsigned int nr_bos; unsigned int nr_bos;
struct etnaviv_gem_submit_bo bos[0]; struct etnaviv_gem_submit_bo bos[0];
u32 flags;
}; };
int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj, int etnaviv_gem_wait_bo(struct etnaviv_gpu *gpu, struct drm_gem_object *obj,
......
...@@ -14,7 +14,9 @@ ...@@ -14,7 +14,9 @@
* this program. If not, see <http://www.gnu.org/licenses/>. * this program. If not, see <http://www.gnu.org/licenses/>.
*/ */
#include <linux/dma-fence-array.h>
#include <linux/reservation.h> #include <linux/reservation.h>
#include <linux/sync_file.h>
#include "etnaviv_cmdbuf.h" #include "etnaviv_cmdbuf.h"
#include "etnaviv_drv.h" #include "etnaviv_drv.h"
#include "etnaviv_gpu.h" #include "etnaviv_gpu.h"
...@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit) ...@@ -169,8 +171,10 @@ static int submit_fence_sync(const struct etnaviv_gem_submit *submit)
for (i = 0; i < submit->nr_bos; i++) { for (i = 0; i < submit->nr_bos; i++) {
struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj; struct etnaviv_gem_object *etnaviv_obj = submit->bos[i].obj;
bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE; bool write = submit->bos[i].flags & ETNA_SUBMIT_BO_WRITE;
bool explicit = !(submit->flags & ETNA_SUBMIT_NO_IMPLICIT);
ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write); ret = etnaviv_gpu_fence_sync_obj(etnaviv_obj, context, write,
explicit);
if (ret) if (ret)
break; break;
} }
...@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit) ...@@ -290,6 +294,7 @@ static void submit_cleanup(struct etnaviv_gem_submit *submit)
} }
ww_acquire_fini(&submit->ticket); ww_acquire_fini(&submit->ticket);
dma_fence_put(submit->fence);
kfree(submit); kfree(submit);
} }
...@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -303,6 +308,9 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct etnaviv_gem_submit *submit; struct etnaviv_gem_submit *submit;
struct etnaviv_cmdbuf *cmdbuf; struct etnaviv_cmdbuf *cmdbuf;
struct etnaviv_gpu *gpu; struct etnaviv_gpu *gpu;
struct dma_fence *in_fence = NULL;
struct sync_file *sync_file = NULL;
int out_fence_fd = -1;
void *stream; void *stream;
int ret; int ret;
...@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -326,6 +334,11 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
return -EINVAL; return -EINVAL;
} }
if (args->flags & ~ETNA_SUBMIT_FLAGS) {
DRM_ERROR("invalid flags: 0x%x\n", args->flags);
return -EINVAL;
}
/* /*
* Copy the command submission and bo array to kernel space in * Copy the command submission and bo array to kernel space in
* one go, and do this outside of any locks. * one go, and do this outside of any locks.
...@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -365,12 +378,22 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_cmds; goto err_submit_cmds;
} }
if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
out_fence_fd = get_unused_fd_flags(O_CLOEXEC);
if (out_fence_fd < 0) {
ret = out_fence_fd;
goto err_submit_cmds;
}
}
submit = submit_create(dev, gpu, args->nr_bos); submit = submit_create(dev, gpu, args->nr_bos);
if (!submit) { if (!submit) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_submit_cmds; goto err_submit_cmds;
} }
submit->flags = args->flags;
ret = submit_lookup_objects(submit, file, bos, args->nr_bos); ret = submit_lookup_objects(submit, file, bos, args->nr_bos);
if (ret) if (ret)
goto err_submit_objects; goto err_submit_objects;
...@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -385,6 +408,24 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
goto err_submit_objects; goto err_submit_objects;
} }
if (args->flags & ETNA_SUBMIT_FENCE_FD_IN) {
in_fence = sync_file_get_fence(args->fence_fd);
if (!in_fence) {
ret = -EINVAL;
goto err_submit_objects;
}
/*
* Wait if the fence is from a foreign context, or if the fence
* array contains any fence from a foreign context.
*/
if (!dma_fence_match_context(in_fence, gpu->fence_context)) {
ret = dma_fence_wait(in_fence, true);
if (ret)
goto err_submit_objects;
}
}
ret = submit_fence_sync(submit); ret = submit_fence_sync(submit);
if (ret) if (ret)
goto err_submit_objects; goto err_submit_objects;
...@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -405,7 +446,23 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret == 0) if (ret == 0)
cmdbuf = NULL; cmdbuf = NULL;
args->fence = submit->fence; if (args->flags & ETNA_SUBMIT_FENCE_FD_OUT) {
/*
* This can be improved: ideally we want to allocate the sync
* file before kicking off the GPU job and just attach the
* fence to the sync file here, eliminating the ENOMEM
* possibility at this stage.
*/
sync_file = sync_file_create(submit->fence);
if (!sync_file) {
ret = -ENOMEM;
goto out;
}
fd_install(out_fence_fd, sync_file->file);
}
args->fence_fd = out_fence_fd;
args->fence = submit->fence->seqno;
out: out:
submit_unpin_objects(submit); submit_unpin_objects(submit);
...@@ -419,9 +476,13 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -419,9 +476,13 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
flush_workqueue(priv->wq); flush_workqueue(priv->wq);
err_submit_objects: err_submit_objects:
if (in_fence)
dma_fence_put(in_fence);
submit_cleanup(submit); submit_cleanup(submit);
err_submit_cmds: err_submit_cmds:
if (ret && (out_fence_fd >= 0))
put_unused_fd(out_fence_fd);
/* if we still own the cmdbuf */ /* if we still own the cmdbuf */
if (cmdbuf) if (cmdbuf)
etnaviv_cmdbuf_free(cmdbuf); etnaviv_cmdbuf_free(cmdbuf);
......
...@@ -18,6 +18,7 @@ ...@@ -18,6 +18,7 @@
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/moduleparam.h> #include <linux/moduleparam.h>
#include <linux/of_device.h> #include <linux/of_device.h>
#include <linux/thermal.h>
#include "etnaviv_cmdbuf.h" #include "etnaviv_cmdbuf.h"
#include "etnaviv_dump.h" #include "etnaviv_dump.h"
...@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock) ...@@ -409,6 +410,17 @@ static void etnaviv_gpu_load_clock(struct etnaviv_gpu *gpu, u32 clock)
gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock); gpu_write(gpu, VIVS_HI_CLOCK_CONTROL, clock);
} }
static void etnaviv_gpu_update_clock(struct etnaviv_gpu *gpu)
{
unsigned int fscale = 1 << (6 - gpu->freq_scale);
u32 clock;
clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(fscale);
etnaviv_gpu_load_clock(gpu, clock);
}
static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
{ {
u32 control, idle; u32 control, idle;
...@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) ...@@ -426,11 +438,10 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
timeout = jiffies + msecs_to_jiffies(1000); timeout = jiffies + msecs_to_jiffies(1000);
while (time_is_after_jiffies(timeout)) { while (time_is_after_jiffies(timeout)) {
control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS |
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
/* enable clock */ /* enable clock */
etnaviv_gpu_load_clock(gpu, control); etnaviv_gpu_update_clock(gpu);
control = gpu_read(gpu, VIVS_HI_CLOCK_CONTROL);
/* Wait for stable clock. Vivante's code waited for 1ms */ /* Wait for stable clock. Vivante's code waited for 1ms */
usleep_range(1000, 10000); usleep_range(1000, 10000);
...@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu) ...@@ -490,11 +501,7 @@ static int etnaviv_hw_reset(struct etnaviv_gpu *gpu)
} }
/* We rely on the GPU running, so program the clock */ /* We rely on the GPU running, so program the clock */
control = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | etnaviv_gpu_update_clock(gpu);
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
/* enable clock */
etnaviv_gpu_load_clock(gpu, control);
return 0; return 0;
} }
...@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) ...@@ -1051,6 +1058,12 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
{ {
struct etnaviv_fence *f; struct etnaviv_fence *f;
/*
* GPU lock must already be held, otherwise fence completion order might
* not match the seqno order assigned here.
*/
lockdep_assert_held(&gpu->lock);
f = kzalloc(sizeof(*f), GFP_KERNEL); f = kzalloc(sizeof(*f), GFP_KERNEL);
if (!f) if (!f)
return NULL; return NULL;
...@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu) ...@@ -1064,7 +1077,7 @@ static struct dma_fence *etnaviv_gpu_fence_alloc(struct etnaviv_gpu *gpu)
} }
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
unsigned int context, bool exclusive) unsigned int context, bool exclusive, bool explicit)
{ {
struct reservation_object *robj = etnaviv_obj->resv; struct reservation_object *robj = etnaviv_obj->resv;
struct reservation_object_list *fobj; struct reservation_object_list *fobj;
...@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, ...@@ -1077,6 +1090,9 @@ int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
return ret; return ret;
} }
if (explicit)
return 0;
/* /*
* If we have any shared fences, then the exclusive fence * If we have any shared fences, then the exclusive fence
* should be ignored as it will already have been signalled. * should be ignored as it will already have been signalled.
...@@ -1321,8 +1337,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu, ...@@ -1321,8 +1337,8 @@ int etnaviv_gpu_submit(struct etnaviv_gpu *gpu,
mutex_lock(&gpu->lock); mutex_lock(&gpu->lock);
gpu->event[event].fence = fence; gpu->event[event].fence = fence;
submit->fence = fence->seqno; submit->fence = dma_fence_get(fence);
gpu->active_fence = submit->fence; gpu->active_fence = submit->fence->seqno;
if (gpu->lastctx != cmdbuf->ctx) { if (gpu->lastctx != cmdbuf->ctx) {
gpu->mmu->need_flush = true; gpu->mmu->need_flush = true;
...@@ -1526,17 +1542,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu) ...@@ -1526,17 +1542,13 @@ static int etnaviv_gpu_hw_suspend(struct etnaviv_gpu *gpu)
#ifdef CONFIG_PM #ifdef CONFIG_PM
static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
{ {
u32 clock;
int ret; int ret;
ret = mutex_lock_killable(&gpu->lock); ret = mutex_lock_killable(&gpu->lock);
if (ret) if (ret)
return ret; return ret;
clock = VIVS_HI_CLOCK_CONTROL_DISABLE_DEBUG_REGISTERS | etnaviv_gpu_update_clock(gpu);
VIVS_HI_CLOCK_CONTROL_FSCALE_VAL(0x40);
etnaviv_gpu_load_clock(gpu, clock);
etnaviv_gpu_hw_init(gpu); etnaviv_gpu_hw_init(gpu);
gpu->switch_context = true; gpu->switch_context = true;
...@@ -1548,6 +1560,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu) ...@@ -1548,6 +1560,47 @@ static int etnaviv_gpu_hw_resume(struct etnaviv_gpu *gpu)
} }
#endif #endif
static int
etnaviv_gpu_cooling_get_max_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
*state = 6;
return 0;
}
static int
etnaviv_gpu_cooling_get_cur_state(struct thermal_cooling_device *cdev,
unsigned long *state)
{
struct etnaviv_gpu *gpu = cdev->devdata;
*state = gpu->freq_scale;
return 0;
}
static int
etnaviv_gpu_cooling_set_cur_state(struct thermal_cooling_device *cdev,
unsigned long state)
{
struct etnaviv_gpu *gpu = cdev->devdata;
mutex_lock(&gpu->lock);
gpu->freq_scale = state;
if (!pm_runtime_suspended(gpu->dev))
etnaviv_gpu_update_clock(gpu);
mutex_unlock(&gpu->lock);
return 0;
}
static struct thermal_cooling_device_ops cooling_ops = {
.get_max_state = etnaviv_gpu_cooling_get_max_state,
.get_cur_state = etnaviv_gpu_cooling_get_cur_state,
.set_cur_state = etnaviv_gpu_cooling_set_cur_state,
};
static int etnaviv_gpu_bind(struct device *dev, struct device *master, static int etnaviv_gpu_bind(struct device *dev, struct device *master,
void *data) void *data)
{ {
...@@ -1556,13 +1609,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master, ...@@ -1556,13 +1609,20 @@ static int etnaviv_gpu_bind(struct device *dev, struct device *master,
struct etnaviv_gpu *gpu = dev_get_drvdata(dev); struct etnaviv_gpu *gpu = dev_get_drvdata(dev);
int ret; int ret;
gpu->cooling = thermal_of_cooling_device_register(dev->of_node,
(char *)dev_name(dev), gpu, &cooling_ops);
if (IS_ERR(gpu->cooling))
return PTR_ERR(gpu->cooling);
#ifdef CONFIG_PM #ifdef CONFIG_PM
ret = pm_runtime_get_sync(gpu->dev); ret = pm_runtime_get_sync(gpu->dev);
#else #else
ret = etnaviv_gpu_clk_enable(gpu); ret = etnaviv_gpu_clk_enable(gpu);
#endif #endif
if (ret < 0) if (ret < 0) {
thermal_cooling_device_unregister(gpu->cooling);
return ret; return ret;
}
gpu->drm = drm; gpu->drm = drm;
gpu->fence_context = dma_fence_context_alloc(1); gpu->fence_context = dma_fence_context_alloc(1);
...@@ -1616,6 +1676,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master, ...@@ -1616,6 +1676,9 @@ static void etnaviv_gpu_unbind(struct device *dev, struct device *master,
} }
gpu->drm = NULL; gpu->drm = NULL;
thermal_cooling_device_unregister(gpu->cooling);
gpu->cooling = NULL;
} }
static const struct component_ops gpu_ops = { static const struct component_ops gpu_ops = {
......
...@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf; ...@@ -97,6 +97,7 @@ struct etnaviv_cmdbuf;
struct etnaviv_gpu { struct etnaviv_gpu {
struct drm_device *drm; struct drm_device *drm;
struct thermal_cooling_device *cooling;
struct device *dev; struct device *dev;
struct mutex lock; struct mutex lock;
struct etnaviv_chip_identity identity; struct etnaviv_chip_identity identity;
...@@ -150,6 +151,7 @@ struct etnaviv_gpu { ...@@ -150,6 +151,7 @@ struct etnaviv_gpu {
u32 hangcheck_fence; u32 hangcheck_fence;
u32 hangcheck_dma_addr; u32 hangcheck_dma_addr;
struct work_struct recover_work; struct work_struct recover_work;
unsigned int freq_scale;
}; };
static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data) static inline void gpu_write(struct etnaviv_gpu *gpu, u32 reg, u32 data)
...@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m); ...@@ -181,7 +183,7 @@ int etnaviv_gpu_debugfs(struct etnaviv_gpu *gpu, struct seq_file *m);
#endif #endif
int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj, int etnaviv_gpu_fence_sync_obj(struct etnaviv_gem_object *etnaviv_obj,
unsigned int context, bool exclusive); unsigned int context, bool exclusive, bool implicit);
void etnaviv_gpu_retire(struct etnaviv_gpu *gpu); void etnaviv_gpu_retire(struct etnaviv_gpu *gpu);
int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu, int etnaviv_gpu_wait_fence_interruptible(struct etnaviv_gpu *gpu,
......
...@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo { ...@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo {
* one or more cmdstream buffers. This allows for conditional execution * one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds. * (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/ */
#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
ETNA_SUBMIT_FENCE_FD_IN | \
ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00 #define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01 #define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02 #define ETNA_PIPE_VG 0x02
...@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit { ...@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */ __u64 bos; /* in, ptr to array of submit_bo's */
__u64 relocs; /* in, ptr to array of submit_reloc's */ __u64 relocs; /* in, ptr to array of submit_reloc's */
__u64 stream; /* in, ptr to cmdstream */ __u64 stream; /* in, ptr to cmdstream */
__u32 flags; /* in, mask of ETNA_SUBMIT_x */
__s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
}; };
/* The normal way to synchronize with the GPU is just to CPU_PREP on /* The normal way to synchronize with the GPU is just to CPU_PREP on
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment