Commit fd694aaa authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "This is the main request for rc6, since really the one earlier was the
  rc5 one :-)

  The main thing are the nouveau specific race fixes for the connector
  locking bug we fixed in -next and reverted here as it has quite large
  prereqs. These two fixes should solve the problem at that level and we
  can fix it properly in 4.11

  Otherwise i915 has a bunch of changes, one ABI change for GVT related
  stuff, some VC4 leak fixes, one core fence fix and some AMD changes,
  oh and one ast hang avoidance fix.

  Hoping it calms down around now"

* tag 'drm-fixes-for-v4.10-rc6-part-two' of git://people.freedesktop.org/~airlied/linux: (25 commits)
  drm/nouveau: Handle fbcon suspend/resume in seperate worker
  drm/nouveau: Don't enabling polling twice on runtime resume
  drm/ast: Fixed system hanged if disable P2A
  Revert "drm/radeon: always apply pci shutdown callbacks"
  drm/i915: reinstate call to trace_i915_vma_bind
  drm/i915: Move atomic state free from out of fence release
  drm/i915: Check for NULL atomic state in intel_crtc_disable_noatomic()
  drm/i915: Fix calculation of rotated x and y offsets for planar formats
  drm/i915: Don't init hpd polling for vlv and chv from runtime_suspend()
  drm/i915: Don't leak edid in intel_crt_detect_ddc()
  drm/i915: Release temporary load-detect state upon switching
  drm/i915: prevent crash with .disable_display parameter
  drm/i915: Avoid drm_atomic_state_put(NULL) in intel_display_resume
  MAINTAINERS: update new mail list for intel gvt driver
  drm/i915/gvt: Fix kmem_cache_create() name
  drm/i915/gvt/kvmgt: mdev ABI is available_instances, not available_instance
  drm/amdgpu: fix unload driver issue for virtual display
  drm/amdgpu: check ring being ready before using
  drm/vc4: Return -EINVAL on the overflow checks failing.
  drm/vc4: Fix an integer overflow in temporary allocation layout.
  ...
parents 2287a240 736a1494
......@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org
L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git
......
......@@ -83,6 +83,13 @@ int amdgpu_cs_get_ring(struct amdgpu_device *adev, u32 ip_type,
}
break;
}
if (!(*out_ring && (*out_ring)->adev)) {
DRM_ERROR("Ring %d is not initialized on IP %d\n",
ring, ip_type);
return -EINVAL;
}
return 0;
}
......
......@@ -627,11 +627,8 @@ static const struct drm_encoder_helper_funcs dce_virtual_encoder_helper_funcs =
static void dce_virtual_encoder_destroy(struct drm_encoder *encoder)
{
struct amdgpu_encoder *amdgpu_encoder = to_amdgpu_encoder(encoder);
kfree(amdgpu_encoder->enc_priv);
drm_encoder_cleanup(encoder);
kfree(amdgpu_encoder);
kfree(encoder);
}
static const struct drm_encoder_funcs dce_virtual_encoder_funcs = {
......
......@@ -113,6 +113,7 @@ struct ast_private {
struct ttm_bo_kmap_obj cache_kmap;
int next_cursor;
bool support_wide_screen;
bool DisableP2A;
enum ast_tx_chip tx_chip_type;
u8 dp501_maxclk;
......
......@@ -124,6 +124,12 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
} else
*need_post = false;
/* Check P2A Access */
ast->DisableP2A = true;
data = ast_read32(ast, 0xf004);
if (data != 0xFFFFFFFF)
ast->DisableP2A = false;
/* Check if we support wide screen */
switch (ast->chip) {
case AST1180:
......@@ -140,6 +146,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
ast->support_wide_screen = true;
else {
ast->support_wide_screen = false;
if (ast->DisableP2A == false) {
/* Read SCU7c (silicon revision register) */
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
......@@ -150,6 +157,7 @@ static int ast_detect_chip(struct drm_device *dev, bool *need_post)
if (ast->chip == AST2400 && data == 0x100) /* ast1400 */
ast->support_wide_screen = true;
}
}
break;
}
......@@ -216,16 +224,16 @@ static int ast_get_dram_info(struct drm_device *dev)
uint32_t data, data2;
uint32_t denum, num, div, ref_pll;
if (ast->DisableP2A)
{
ast->dram_bus_width = 16;
ast->dram_type = AST_DRAM_1Gx16;
ast->mclk = 396;
}
else
{
ast_write32(ast, 0xf004, 0x1e6e0000);
ast_write32(ast, 0xf000, 0x1);
ast_write32(ast, 0x10000, 0xfc600309);
do {
if (pci_channel_offline(dev->pdev))
return -EIO;
} while (ast_read32(ast, 0x10000) != 0x01);
data = ast_read32(ast, 0x10004);
if (data & 0x40)
......@@ -290,6 +298,7 @@ static int ast_get_dram_info(struct drm_device *dev)
break;
}
ast->mclk = ref_pll * (num + 2) / (denum + 2) * (div * 1000);
}
return 0;
}
......
......@@ -379,12 +379,20 @@ void ast_post_gpu(struct drm_device *dev)
ast_open_key(ast);
ast_set_def_ext_reg(dev);
if (ast->DisableP2A == false)
{
if (ast->chip == AST2300 || ast->chip == AST2400)
ast_init_dram_2300(dev);
else
ast_init_dram_reg(dev);
ast_init_3rdtx(dev);
}
else
{
if (ast->tx_chip_type != AST_TX_NONE)
ast_set_index_reg_mask(ast, AST_IO_CRTC_PORT, 0xa3, 0xcf, 0x80); /* Enable DVO */
}
}
/* AST 2300 DRAM settings */
......
......@@ -291,15 +291,15 @@ drm_atomic_get_crtc_state(struct drm_atomic_state *state,
EXPORT_SYMBOL(drm_atomic_get_crtc_state);
static void set_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc, s64 __user *fence_ptr)
struct drm_crtc *crtc, s32 __user *fence_ptr)
{
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = fence_ptr;
}
static s64 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
static s32 __user *get_out_fence_for_crtc(struct drm_atomic_state *state,
struct drm_crtc *crtc)
{
s64 __user *fence_ptr;
s32 __user *fence_ptr;
fence_ptr = state->crtcs[drm_crtc_index(crtc)].out_fence_ptr;
state->crtcs[drm_crtc_index(crtc)].out_fence_ptr = NULL;
......@@ -512,7 +512,7 @@ int drm_atomic_crtc_set_property(struct drm_crtc *crtc,
state->color_mgmt_changed |= replaced;
return ret;
} else if (property == config->prop_out_fence_ptr) {
s64 __user *fence_ptr = u64_to_user_ptr(val);
s32 __user *fence_ptr = u64_to_user_ptr(val);
if (!fence_ptr)
return 0;
......@@ -1915,7 +1915,7 @@ EXPORT_SYMBOL(drm_atomic_clean_old_fb);
*/
struct drm_out_fence_state {
s64 __user *out_fence_ptr;
s32 __user *out_fence_ptr;
struct sync_file *sync_file;
int fd;
};
......@@ -1952,7 +1952,7 @@ static int prepare_crtc_signaling(struct drm_device *dev,
return 0;
for_each_crtc_in_state(state, crtc, crtc_state, i) {
u64 __user *fence_ptr;
s32 __user *fence_ptr;
fence_ptr = get_out_fence_for_crtc(crtc_state->state, crtc);
......
......@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0;
static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = {
......@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{
struct intel_gvt *gvt = s->vgpu->gvt;
if (bypass_batch_buffer_scan)
return 0;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8))
......
......@@ -364,43 +364,16 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
struct i915_vma *vma;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
4, 0);
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
......@@ -412,10 +385,9 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
*/
/* update the relocate gma with shadow batch buffer*/
set_gma_to_bb_cmd(entry_obj,
i915_ggtt_offset(vma),
gmadr_bytes);
}
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
}
}
......@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
}
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload",
vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN,
NULL);
......
......@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL;
}
static ssize_t available_instance_show(struct kobject *kobj, struct device *dev,
char *buf)
static ssize_t available_instances_show(struct kobject *kobj,
struct device *dev, char *buf)
{
struct intel_vgpu_type *type;
unsigned int num = 0;
......@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence);
}
static MDEV_TYPE_ATTR_RO(available_instance);
static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = {
&mdev_type_attr_available_instance.attr,
&mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr,
NULL,
......
......@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
void *bb_start_cmd_va;
u32 *bb_start_cmd_va;
};
#define workload_q_head(vgpu, ring_id) \
......
......@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(dev_priv);
if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv))
if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv);
DRM_DEBUG_KMS("Device suspended\n");
......
......@@ -1977,6 +1977,11 @@ struct drm_i915_private {
struct i915_frontbuffer_tracking fb_tracking;
struct intel_atomic_helper {
struct llist_head free_list;
struct work_struct free_work;
} atomic_helper;
u16 orig_clock;
bool mchbar_need_disable;
......
......@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret;
}
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret)
return ret;
......
......@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid;
struct i2c_adapter *i2c;
bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
......@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/
if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true;
}
ret = true;
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
}
kfree(edid);
return false;
return ret;
}
static enum drm_connector_status
......
......@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets.
*/
_intel_adjust_tile_offset(&x, &y, tile_size,
tile_width, tile_height, pitch_tiles,
_intel_adjust_tile_offset(&x, &y,
tile_width, tile_height,
tile_size, pitch_tiles,
gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
......@@ -6849,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
}
state = drm_atomic_state_alloc(crtc->dev);
if (!state) {
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
crtc->base.id, crtc->name);
return;
}
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
/* Everything's already locked, -EDEADLK can't happen. */
......@@ -11246,6 +11253,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
}
old->restore_state = restore_state;
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
......@@ -14515,9 +14523,15 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break;
case FENCE_FREE:
drm_atomic_state_put(&state->base);
{
struct intel_atomic_helper *helper =
&to_i915(state->base.dev)->atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work);
break;
}
}
return NOTIFY_DONE;
}
......@@ -16395,6 +16409,18 @@ static void sanitize_watermarks(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx);
}
static void intel_atomic_helper_free_state(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
struct intel_atomic_state *state, *next;
struct llist_node *freed;
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
int intel_modeset_init(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
......@@ -16414,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs;
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state);
intel_init_quirks(dev);
intel_init_pm(dev_priv);
......@@ -17027,6 +17056,7 @@ void intel_display_resume(struct drm_device *dev)
if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state);
}
......@@ -17097,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
{
struct drm_i915_private *dev_priv = to_i915(dev);
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
intel_disable_gt_powersave(dev_priv);
/*
......
......@@ -370,6 +370,8 @@ struct intel_atomic_state {
struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready;
struct llist_node freed;
};
struct intel_plane_state {
......
......@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
}
......
......@@ -411,6 +411,7 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable polling for external displays */
if (!dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(dev);
/* enable hotplug interrupts */
......
......@@ -773,7 +773,10 @@ nouveau_pmops_runtime_resume(struct device *dev)
pci_set_master(pdev);
ret = nouveau_do_resume(drm_dev, true);
if (!drm_dev->mode_config.poll_enabled)
drm_kms_helper_poll_enable(drm_dev);
/* do magic */
nvif_mask(&device->object, 0x088488, (1 << 25), (1 << 25));
vga_switcheroo_set_dynamic_switch(pdev, VGA_SWITCHEROO_ON);
......
......@@ -165,6 +165,8 @@ struct nouveau_drm {
struct backlight_device *backlight;
struct list_head bl_connectors;
struct work_struct hpd_work;
struct work_struct fbcon_work;
int fbcon_new_state;
#ifdef CONFIG_ACPI
struct notifier_block acpi_nb;
#endif
......
......@@ -470,21 +470,45 @@ static const struct drm_fb_helper_funcs nouveau_fbcon_helper_funcs = {
.fb_probe = nouveau_fbcon_create,
};
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
static void
nouveau_fbcon_set_suspend_work(struct work_struct *work)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (drm->fbcon) {
struct nouveau_drm *drm = container_of(work, typeof(*drm), fbcon_work);
int state = READ_ONCE(drm->fbcon_new_state);
if (state == FBINFO_STATE_RUNNING)
pm_runtime_get_sync(drm->dev->dev);
console_lock();
if (state == FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_restore(dev);
nouveau_fbcon_accel_restore(drm->dev);
drm_fb_helper_set_suspend(&drm->fbcon->helper, state);
if (state != FBINFO_STATE_RUNNING)
nouveau_fbcon_accel_save_disable(dev);
nouveau_fbcon_accel_save_disable(drm->dev);
console_unlock();
if (state == FBINFO_STATE_RUNNING) {
pm_runtime_mark_last_busy(drm->dev->dev);
pm_runtime_put_sync(drm->dev->dev);
}
}
void
nouveau_fbcon_set_suspend(struct drm_device *dev, int state)
{
struct nouveau_drm *drm = nouveau_drm(dev);
if (!drm->fbcon)
return;
drm->fbcon_new_state = state;
/* Since runtime resume can happen as a result of a sysfs operation,
* it's possible we already have the console locked. So handle fbcon
* init/deinit from a seperate work thread
*/
schedule_work(&drm->fbcon_work);
}
int
nouveau_fbcon_init(struct drm_device *dev)
{
......@@ -502,6 +526,7 @@ nouveau_fbcon_init(struct drm_device *dev)
return -ENOMEM;
drm->fbcon = fbcon;
INIT_WORK(&drm->fbcon_work, nouveau_fbcon_set_suspend_work);
drm_fb_helper_prepare(dev, &fbcon->helper, &nouveau_fbcon_helper_funcs);
......
......@@ -366,10 +366,9 @@ static void
radeon_pci_shutdown(struct pci_dev *pdev)
{
/* if we are running in a VM, make sure the device
* torn down properly on reboot/shutdown.
* unfortunately we can't detect certain
* hypervisors so just do this all the time.
* torn down properly on reboot/shutdown
*/
if (radeon_device_is_virtual())
radeon_pci_remove(pdev);
}
......
......@@ -839,7 +839,7 @@ static void vc4_crtc_destroy_state(struct drm_crtc *crtc,
}
__drm_atomic_helper_crtc_destroy_state(state);
drm_atomic_helper_crtc_destroy_state(crtc, state);
}
static const struct drm_crtc_funcs vc4_crtc_funcs = {
......
......@@ -594,12 +594,14 @@ vc4_get_bcl(struct drm_device *dev, struct vc4_exec_info *exec)
args->shader_rec_count);
struct vc4_bo *bo;
if (uniforms_offset < shader_rec_offset ||
if (shader_rec_offset < args->bin_cl_size ||
uniforms_offset < shader_rec_offset ||
exec_size < uniforms_offset ||
args->shader_rec_count >= (UINT_MAX /
sizeof(struct vc4_shader_state)) ||
temp_size < exec_size) {
DRM_ERROR("overflow in exec arguments\n");
ret = -EINVAL;
goto fail;
}
......
......@@ -461,7 +461,7 @@ static int vc4_rcl_surface_setup(struct vc4_exec_info *exec,
}
ret = vc4_full_res_bounds_check(exec, *obj, surf);
if (!ret)
if (ret)
return ret;
return 0;
......
......@@ -144,7 +144,7 @@ struct __drm_crtcs_state {
struct drm_crtc *ptr;
struct drm_crtc_state *state;
struct drm_crtc_commit *commit;
s64 __user *out_fence_ptr;
s32 __user *out_fence_ptr;
};
struct __drm_connnectors_state {
......
......@@ -488,7 +488,7 @@ struct drm_mode_config {
/**
* @prop_out_fence_ptr: Sync File fd pointer representing the
* outgoing fences for a CRTC. Userspace should provide a pointer to a
* value of type s64, and then cast that pointer to u64.
* value of type s32, and then cast that pointer to u64.
*/
struct drm_property *prop_out_fence_ptr;
/**
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment