Commit 736a1494 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-fixes-2017-01-26' of...

Merge tag 'drm-intel-fixes-2017-01-26' of git://anongit.freedesktop.org/git/drm-intel into drm-fixes

More fixes than I'd like at this stage, but I think the holidays and
conferences have delayed finding and fixing the stuff a bit. Almost all
of them have Fixes: tags, so it's not just random fixes, we can point
fingers at the commits that broke stuff.

There's an ABI fix to GVT from Alex, before we go on an release a kernel
with the wrong attribute name.

* tag 'drm-intel-fixes-2017-01-26' of git://anongit.freedesktop.org/git/drm-intel:
  drm/i915: reinstate call to trace_i915_vma_bind
  drm/i915: Move atomic state free from out of fence release
  drm/i915: Check for NULL atomic state in intel_crtc_disable_noatomic()
  drm/i915: Fix calculation of rotated x and y offsets for planar formats
  drm/i915: Don't init hpd polling for vlv and chv from runtime_suspend()
  drm/i915: Don't leak edid in intel_crt_detect_ddc()
  drm/i915: Release temporary load-detect state upon switching
  drm/i915: prevent crash with .disable_display parameter
  drm/i915: Avoid drm_atomic_state_put(NULL) in intel_display_resume
  MAINTAINERS: update new mail list for intel gvt driver
  drm/i915/gvt: Fix kmem_cache_create() name
  drm/i915/gvt/kvmgt: mdev ABI is available_instances, not available_instance
  drm/i915/gvt: Fix relocation of shadow bb
  drm/i915/gvt: Enable the shadow batch buffer
parents 15266ae3 45d9f439
...@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst ...@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization) INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com> M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com> M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git T: git https://github.com/01org/gvt-linux.git
......
...@@ -481,7 +481,6 @@ struct parser_exec_state { ...@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0; static unsigned long bypass_scan_mask = 0;
static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */ /* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = { static struct sub_op_bits sub_op_mi[] = {
...@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) ...@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{ {
struct intel_gvt *gvt = s->vgpu->gvt; struct intel_gvt *gvt = s->vgpu->gvt;
if (bypass_batch_buffer_scan)
return 0;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */ /* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8)) if (cmd_val(s, 0) & (1 << 8))
......
...@@ -364,43 +364,16 @@ static void free_workload(struct intel_vgpu_workload *workload) ...@@ -364,43 +364,16 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \ #define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ {
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */ /* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) { list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
struct i915_vma *vma; struct i915_vma *vma;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
4, 0);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
gvt_err("Cannot pin\n"); gvt_err("Cannot pin\n");
return; return;
...@@ -412,10 +385,9 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) ...@@ -412,10 +385,9 @@ static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
*/ */
/* update the relocate gma with shadow batch buffer*/ /* update the relocate gma with shadow batch buffer*/
set_gma_to_bb_cmd(entry_obj, entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
i915_ggtt_offset(vma), if (gmadr_bytes == 8)
gmadr_bytes); entry_obj->bb_start_cmd_va[2] = 0;
}
} }
} }
...@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) ...@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]); INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
} }
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0, sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
......
...@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, ...@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL; return NULL;
} }
static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, static ssize_t available_instances_show(struct kobject *kobj,
char *buf) struct device *dev, char *buf)
{ {
struct intel_vgpu_type *type; struct intel_vgpu_type *type;
unsigned int num = 0; unsigned int num = 0;
...@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, ...@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence); type->fence);
} }
static MDEV_TYPE_ATTR_RO(available_instance); static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description); static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = { static struct attribute *type_attrs[] = {
&mdev_type_attr_available_instance.attr, &mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr, &mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr, &mdev_type_attr_description.attr,
NULL, NULL,
......
...@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { ...@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
void *va; void *va;
unsigned long len; unsigned long len;
void *bb_start_cmd_va; u32 *bb_start_cmd_va;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
...@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2378,7 +2378,7 @@ static int intel_runtime_suspend(struct device *kdev)
assert_forcewakes_inactive(dev_priv); assert_forcewakes_inactive(dev_priv);
if (!IS_VALLEYVIEW(dev_priv) || !IS_CHERRYVIEW(dev_priv)) if (!IS_VALLEYVIEW(dev_priv) && !IS_CHERRYVIEW(dev_priv))
intel_hpd_poll_init(dev_priv); intel_hpd_poll_init(dev_priv);
DRM_DEBUG_KMS("Device suspended\n"); DRM_DEBUG_KMS("Device suspended\n");
......
...@@ -1977,6 +1977,11 @@ struct drm_i915_private { ...@@ -1977,6 +1977,11 @@ struct drm_i915_private {
struct i915_frontbuffer_tracking fb_tracking; struct i915_frontbuffer_tracking fb_tracking;
struct intel_atomic_helper {
struct llist_head free_list;
struct work_struct free_work;
} atomic_helper;
u16 orig_clock; u16 orig_clock;
bool mchbar_need_disable; bool mchbar_need_disable;
......
...@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level, ...@@ -185,6 +185,7 @@ int i915_vma_bind(struct i915_vma *vma, enum i915_cache_level cache_level,
return ret; return ret;
} }
trace_i915_vma_bind(vma, bind_flags);
ret = vma->vm->bind_vma(vma, cache_level, bind_flags); ret = vma->vm->bind_vma(vma, cache_level, bind_flags);
if (ret) if (ret)
return ret; return ret;
......
...@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) ...@@ -499,6 +499,7 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev); struct drm_i915_private *dev_priv = to_i915(crt->base.base.dev);
struct edid *edid; struct edid *edid;
struct i2c_adapter *i2c; struct i2c_adapter *i2c;
bool ret = false;
BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG); BUG_ON(crt->base.type != INTEL_OUTPUT_ANALOG);
...@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector) ...@@ -515,17 +516,17 @@ static bool intel_crt_detect_ddc(struct drm_connector *connector)
*/ */
if (!is_digital) { if (!is_digital) {
DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n"); DRM_DEBUG_KMS("CRT detected via DDC:0x50 [EDID]\n");
return true; ret = true;
} } else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n"); DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [EDID reports a digital panel]\n");
}
} else { } else {
DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n"); DRM_DEBUG_KMS("CRT not detected via DDC:0x50 [no valid EDID found]\n");
} }
kfree(edid); kfree(edid);
return false; return ret;
} }
static enum drm_connector_status static enum drm_connector_status
......
...@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv, ...@@ -2585,8 +2585,9 @@ intel_fill_fb_info(struct drm_i915_private *dev_priv,
* We only keep the x/y offsets, so push all of the * We only keep the x/y offsets, so push all of the
* gtt offset into the x/y offsets. * gtt offset into the x/y offsets.
*/ */
_intel_adjust_tile_offset(&x, &y, tile_size, _intel_adjust_tile_offset(&x, &y,
tile_width, tile_height, pitch_tiles, tile_width, tile_height,
tile_size, pitch_tiles,
gtt_offset_rotated * tile_size, 0); gtt_offset_rotated * tile_size, 0);
gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height; gtt_offset_rotated += rot_info->plane[i].width * rot_info->plane[i].height;
...@@ -6849,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc) ...@@ -6849,6 +6850,12 @@ static void intel_crtc_disable_noatomic(struct drm_crtc *crtc)
} }
state = drm_atomic_state_alloc(crtc->dev); state = drm_atomic_state_alloc(crtc->dev);
if (!state) {
DRM_DEBUG_KMS("failed to disable [CRTC:%d:%s], out of memory",
crtc->base.id, crtc->name);
return;
}
state->acquire_ctx = crtc->dev->mode_config.acquire_ctx; state->acquire_ctx = crtc->dev->mode_config.acquire_ctx;
/* Everything's already locked, -EDEADLK can't happen. */ /* Everything's already locked, -EDEADLK can't happen. */
...@@ -11246,6 +11253,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector, ...@@ -11246,6 +11253,7 @@ bool intel_get_load_detect_pipe(struct drm_connector *connector,
} }
old->restore_state = restore_state; old->restore_state = restore_state;
drm_atomic_state_put(state);
/* let the connector get through one full cycle before testing */ /* let the connector get through one full cycle before testing */
intel_wait_for_vblank(dev_priv, intel_crtc->pipe); intel_wait_for_vblank(dev_priv, intel_crtc->pipe);
...@@ -14515,9 +14523,15 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence, ...@@ -14515,9 +14523,15 @@ intel_atomic_commit_ready(struct i915_sw_fence *fence,
break; break;
case FENCE_FREE: case FENCE_FREE:
drm_atomic_state_put(&state->base); {
struct intel_atomic_helper *helper =
&to_i915(state->base.dev)->atomic_helper;
if (llist_add(&state->freed, &helper->free_list))
schedule_work(&helper->free_work);
break; break;
} }
}
return NOTIFY_DONE; return NOTIFY_DONE;
} }
...@@ -16395,6 +16409,18 @@ static void sanitize_watermarks(struct drm_device *dev) ...@@ -16395,6 +16409,18 @@ static void sanitize_watermarks(struct drm_device *dev)
drm_modeset_acquire_fini(&ctx); drm_modeset_acquire_fini(&ctx);
} }
static void intel_atomic_helper_free_state(struct work_struct *work)
{
struct drm_i915_private *dev_priv =
container_of(work, typeof(*dev_priv), atomic_helper.free_work);
struct intel_atomic_state *state, *next;
struct llist_node *freed;
freed = llist_del_all(&dev_priv->atomic_helper.free_list);
llist_for_each_entry_safe(state, next, freed, freed)
drm_atomic_state_put(&state->base);
}
int intel_modeset_init(struct drm_device *dev) int intel_modeset_init(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
...@@ -16414,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev) ...@@ -16414,6 +16440,9 @@ int intel_modeset_init(struct drm_device *dev)
dev->mode_config.funcs = &intel_mode_funcs; dev->mode_config.funcs = &intel_mode_funcs;
INIT_WORK(&dev_priv->atomic_helper.free_work,
intel_atomic_helper_free_state);
intel_init_quirks(dev); intel_init_quirks(dev);
intel_init_pm(dev_priv); intel_init_pm(dev_priv);
...@@ -17027,6 +17056,7 @@ void intel_display_resume(struct drm_device *dev) ...@@ -17027,6 +17056,7 @@ void intel_display_resume(struct drm_device *dev)
if (ret) if (ret)
DRM_ERROR("Restoring old state failed with %i\n", ret); DRM_ERROR("Restoring old state failed with %i\n", ret);
if (state)
drm_atomic_state_put(state); drm_atomic_state_put(state);
} }
...@@ -17097,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev) ...@@ -17097,6 +17127,9 @@ void intel_modeset_cleanup(struct drm_device *dev)
{ {
struct drm_i915_private *dev_priv = to_i915(dev); struct drm_i915_private *dev_priv = to_i915(dev);
flush_work(&dev_priv->atomic_helper.free_work);
WARN_ON(!llist_empty(&dev_priv->atomic_helper.free_list));
intel_disable_gt_powersave(dev_priv); intel_disable_gt_powersave(dev_priv);
/* /*
......
...@@ -370,6 +370,8 @@ struct intel_atomic_state { ...@@ -370,6 +370,8 @@ struct intel_atomic_state {
struct skl_wm_values wm_results; struct skl_wm_values wm_results;
struct i915_sw_fence commit_ready; struct i915_sw_fence commit_ready;
struct llist_node freed;
}; };
struct intel_plane_state { struct intel_plane_state {
......
...@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev) ...@@ -742,6 +742,9 @@ void intel_fbdev_initial_config_async(struct drm_device *dev)
{ {
struct intel_fbdev *ifbdev = to_i915(dev)->fbdev; struct intel_fbdev *ifbdev = to_i915(dev)->fbdev;
if (!ifbdev)
return;
ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev); ifbdev->cookie = async_schedule(intel_fbdev_initial_config, ifbdev);
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment