Commit f5835372 authored by Linus Torvalds's avatar Linus Torvalds

Merge branch 'drm-fixes' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Xmas fixes pull, all small nothing major, intel, radeon, one ttm
  regression, and one build fix"

* 'drm-fixes' of git://people.freedesktop.org/~airlied/linux:
  drm/ttm: Fix swapin regression
  gpu: fix qxl missing crc32_le
  drm/radeon: fix asic gfx values for scrapper asics
  drm/i915: Use the correct GMCH_CTRL register for Sandybridge+
  drm/radeon: check for 0 count in speaker allocation and SAD code
  drm/radeon/dpm: disable ss on Cayman
  drm/radeon/dce6: set correct number of audio pins
  drm/i915: get a PC8 reference when enabling the power well
  drm/i915: change CRTC assertion on LCPLL disable
  drm/i915: Fix erroneous dereference of batch_obj inside reset_status
  drm/i915: Prevent double unref following alloc failure during execbuffer
parents a3981a52 418cb50b
...@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request) ...@@ -2343,15 +2343,24 @@ static void i915_gem_free_request(struct drm_i915_gem_request *request)
kfree(request); kfree(request);
} }
static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, static void i915_gem_reset_ring_status(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring) struct intel_ring_buffer *ring)
{ {
u32 completed_seqno; u32 completed_seqno = ring->get_seqno(ring, false);
u32 acthd; u32 acthd = intel_ring_get_active_head(ring);
struct drm_i915_gem_request *request;
list_for_each_entry(request, &ring->request_list, list) {
if (i915_seqno_passed(completed_seqno, request->seqno))
continue;
acthd = intel_ring_get_active_head(ring); i915_set_reset_status(ring, request, acthd);
completed_seqno = ring->get_seqno(ring, false); }
}
static void i915_gem_reset_ring_cleanup(struct drm_i915_private *dev_priv,
struct intel_ring_buffer *ring)
{
while (!list_empty(&ring->request_list)) { while (!list_empty(&ring->request_list)) {
struct drm_i915_gem_request *request; struct drm_i915_gem_request *request;
...@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv, ...@@ -2359,9 +2368,6 @@ static void i915_gem_reset_ring_lists(struct drm_i915_private *dev_priv,
struct drm_i915_gem_request, struct drm_i915_gem_request,
list); list);
if (request->seqno > completed_seqno)
i915_set_reset_status(ring, request, acthd);
i915_gem_free_request(request); i915_gem_free_request(request);
} }
...@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev) ...@@ -2403,8 +2409,16 @@ void i915_gem_reset(struct drm_device *dev)
struct intel_ring_buffer *ring; struct intel_ring_buffer *ring;
int i; int i;
/*
* Before we free the objects from the requests, we need to inspect
* them for finding the guilty party. As the requests only borrow
* their reference to the objects, the inspection must be done first.
*/
for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_status(dev_priv, ring);
for_each_ring(ring, dev_priv, i) for_each_ring(ring, dev_priv, i)
i915_gem_reset_ring_lists(dev_priv, ring); i915_gem_reset_ring_cleanup(dev_priv, ring);
i915_gem_cleanup_ringbuffer(dev); i915_gem_cleanup_ringbuffer(dev);
......
...@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -93,7 +93,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
{ {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
struct list_head objects; struct list_head objects;
int i, ret = 0; int i, ret;
INIT_LIST_HEAD(&objects); INIT_LIST_HEAD(&objects);
spin_lock(&file->table_lock); spin_lock(&file->table_lock);
...@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -106,7 +106,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Invalid object handle %d at index %d\n", DRM_DEBUG("Invalid object handle %d at index %d\n",
exec[i].handle, i); exec[i].handle, i);
ret = -ENOENT; ret = -ENOENT;
goto out; goto err;
} }
if (!list_empty(&obj->obj_exec_link)) { if (!list_empty(&obj->obj_exec_link)) {
...@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -114,7 +114,7 @@ eb_lookup_vmas(struct eb_vmas *eb,
DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n", DRM_DEBUG("Object %p [handle %d, index %d] appears more than once in object list\n",
obj, exec[i].handle, i); obj, exec[i].handle, i);
ret = -EINVAL; ret = -EINVAL;
goto out; goto err;
} }
drm_gem_object_reference(&obj->base); drm_gem_object_reference(&obj->base);
...@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -123,9 +123,13 @@ eb_lookup_vmas(struct eb_vmas *eb,
spin_unlock(&file->table_lock); spin_unlock(&file->table_lock);
i = 0; i = 0;
list_for_each_entry(obj, &objects, obj_exec_link) { while (!list_empty(&objects)) {
struct i915_vma *vma; struct i915_vma *vma;
obj = list_first_entry(&objects,
struct drm_i915_gem_object,
obj_exec_link);
/* /*
* NOTE: We can leak any vmas created here when something fails * NOTE: We can leak any vmas created here when something fails
* later on. But that's no issue since vma_unbind can deal with * later on. But that's no issue since vma_unbind can deal with
...@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -138,10 +142,12 @@ eb_lookup_vmas(struct eb_vmas *eb,
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
DRM_DEBUG("Failed to lookup VMA\n"); DRM_DEBUG("Failed to lookup VMA\n");
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto out; goto err;
} }
/* Transfer ownership from the objects list to the vmas list. */
list_add_tail(&vma->exec_list, &eb->vmas); list_add_tail(&vma->exec_list, &eb->vmas);
list_del_init(&obj->obj_exec_link);
vma->exec_entry = &exec[i]; vma->exec_entry = &exec[i];
if (eb->and < 0) { if (eb->and < 0) {
...@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb, ...@@ -155,16 +161,22 @@ eb_lookup_vmas(struct eb_vmas *eb,
++i; ++i;
} }
return 0;
out: err:
while (!list_empty(&objects)) { while (!list_empty(&objects)) {
obj = list_first_entry(&objects, obj = list_first_entry(&objects,
struct drm_i915_gem_object, struct drm_i915_gem_object,
obj_exec_link); obj_exec_link);
list_del_init(&obj->obj_exec_link); list_del_init(&obj->obj_exec_link);
if (ret) drm_gem_object_unreference(&obj->base);
drm_gem_object_unreference(&obj->base);
} }
/*
* Objects already transfered to the vmas list will be unreferenced by
* eb_destroy.
*/
return ret; return ret;
} }
......
...@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv) ...@@ -6303,7 +6303,7 @@ static void assert_can_disable_lcpll(struct drm_i915_private *dev_priv)
uint32_t val; uint32_t val;
list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head) list_for_each_entry(crtc, &dev->mode_config.crtc_list, base.head)
WARN(crtc->base.enabled, "CRTC for pipe %c enabled\n", WARN(crtc->active, "CRTC for pipe %c enabled\n",
pipe_name(crtc->pipe)); pipe_name(crtc->pipe));
WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n"); WARN(I915_READ(HSW_PWR_WELL_DRIVER), "Power well on\n");
...@@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector, ...@@ -11126,14 +11126,15 @@ void intel_connector_attach_encoder(struct intel_connector *connector,
int intel_modeset_vga_set_state(struct drm_device *dev, bool state) int intel_modeset_vga_set_state(struct drm_device *dev, bool state)
{ {
struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_private *dev_priv = dev->dev_private;
unsigned reg = INTEL_INFO(dev)->gen >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL;
u16 gmch_ctrl; u16 gmch_ctrl;
pci_read_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, &gmch_ctrl); pci_read_config_word(dev_priv->bridge_dev, reg, &gmch_ctrl);
if (state) if (state)
gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE; gmch_ctrl &= ~INTEL_GMCH_VGA_DISABLE;
else else
gmch_ctrl |= INTEL_GMCH_VGA_DISABLE; gmch_ctrl |= INTEL_GMCH_VGA_DISABLE;
pci_write_config_word(dev_priv->bridge_dev, INTEL_GMCH_CTRL, gmch_ctrl); pci_write_config_word(dev_priv->bridge_dev, reg, gmch_ctrl);
return 0; return 0;
} }
......
...@@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5688,6 +5688,8 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
unsigned long irqflags; unsigned long irqflags;
uint32_t tmp; uint32_t tmp;
WARN_ON(dev_priv->pc8.enabled);
tmp = I915_READ(HSW_PWR_WELL_DRIVER); tmp = I915_READ(HSW_PWR_WELL_DRIVER);
is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED; is_enabled = tmp & HSW_PWR_WELL_STATE_ENABLED;
enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST; enable_requested = tmp & HSW_PWR_WELL_ENABLE_REQUEST;
...@@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable) ...@@ -5747,16 +5749,24 @@ static void __intel_set_power_well(struct drm_device *dev, bool enable)
static void __intel_power_well_get(struct drm_device *dev, static void __intel_power_well_get(struct drm_device *dev,
struct i915_power_well *power_well) struct i915_power_well *power_well)
{ {
if (!power_well->count++) struct drm_i915_private *dev_priv = dev->dev_private;
if (!power_well->count++) {
hsw_disable_package_c8(dev_priv);
__intel_set_power_well(dev, true); __intel_set_power_well(dev, true);
}
} }
static void __intel_power_well_put(struct drm_device *dev, static void __intel_power_well_put(struct drm_device *dev,
struct i915_power_well *power_well) struct i915_power_well *power_well)
{ {
struct drm_i915_private *dev_priv = dev->dev_private;
WARN_ON(!power_well->count); WARN_ON(!power_well->count);
if (!--power_well->count && i915_disable_power_well) if (!--power_well->count && i915_disable_power_well) {
__intel_set_power_well(dev, false); __intel_set_power_well(dev, false);
hsw_enable_package_c8(dev_priv);
}
} }
void intel_display_power_get(struct drm_device *dev, void intel_display_power_get(struct drm_device *dev,
......
...@@ -8,5 +8,6 @@ config DRM_QXL ...@@ -8,5 +8,6 @@ config DRM_QXL
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_KMS_FB_HELPER select DRM_KMS_FB_HELPER
select DRM_TTM select DRM_TTM
select CRC32
help help
QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting. QXL virtual GPU for Spice virtualization desktop integration. Do not enable this driver unless your distro ships a corresponding X.org QXL driver that can handle kernel modesetting.
...@@ -24,7 +24,7 @@ ...@@ -24,7 +24,7 @@
*/ */
#include "linux/crc32.h" #include <linux/crc32.h>
#include "qxl_drv.h" #include "qxl_drv.h"
#include "qxl_object.h" #include "qxl_object.h"
......
...@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder) ...@@ -174,7 +174,7 @@ void dce6_afmt_write_speaker_allocation(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
if (sad_count < 0) { if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return; return;
} }
...@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder) ...@@ -235,7 +235,7 @@ void dce6_afmt_write_sad_regs(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
if (sad_count < 0) { if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count); DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return; return;
} }
...@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev) ...@@ -308,7 +308,9 @@ int dce6_audio_init(struct radeon_device *rdev)
rdev->audio.enabled = true; rdev->audio.enabled = true;
if (ASIC_IS_DCE8(rdev)) if (ASIC_IS_DCE8(rdev))
rdev->audio.num_pins = 7; rdev->audio.num_pins = 6;
else if (ASIC_IS_DCE61(rdev))
rdev->audio.num_pins = 4;
else else
rdev->audio.num_pins = 6; rdev->audio.num_pins = 6;
......
...@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder) ...@@ -118,7 +118,7 @@ static void dce4_afmt_write_speaker_allocation(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb); sad_count = drm_edid_to_speaker_allocation(radeon_connector->edid, &sadb);
if (sad_count < 0) { if (sad_count <= 0) {
DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count); DRM_ERROR("Couldn't read Speaker Allocation Data Block: %d\n", sad_count);
return; return;
} }
...@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder) ...@@ -173,7 +173,7 @@ static void evergreen_hdmi_write_sad_regs(struct drm_encoder *encoder)
} }
sad_count = drm_edid_to_sad(radeon_connector->edid, &sads); sad_count = drm_edid_to_sad(radeon_connector->edid, &sads);
if (sad_count < 0) { if (sad_count <= 0) {
DRM_ERROR("Couldn't read SADs: %d\n", sad_count); DRM_ERROR("Couldn't read SADs: %d\n", sad_count);
return; return;
} }
......
...@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -895,6 +895,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999C)) { (rdev->pdev->device == 0x999C)) {
rdev->config.cayman.max_simds_per_se = 6; rdev->config.cayman.max_simds_per_se = 6;
rdev->config.cayman.max_backends_per_se = 2; rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9903) || } else if ((rdev->pdev->device == 0x9903) ||
(rdev->pdev->device == 0x9904) || (rdev->pdev->device == 0x9904) ||
(rdev->pdev->device == 0x990A) || (rdev->pdev->device == 0x990A) ||
...@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -905,6 +909,10 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x999D)) { (rdev->pdev->device == 0x999D)) {
rdev->config.cayman.max_simds_per_se = 4; rdev->config.cayman.max_simds_per_se = 4;
rdev->config.cayman.max_backends_per_se = 2; rdev->config.cayman.max_backends_per_se = 2;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
} else if ((rdev->pdev->device == 0x9919) || } else if ((rdev->pdev->device == 0x9919) ||
(rdev->pdev->device == 0x9990) || (rdev->pdev->device == 0x9990) ||
(rdev->pdev->device == 0x9991) || (rdev->pdev->device == 0x9991) ||
...@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -915,9 +923,17 @@ static void cayman_gpu_init(struct radeon_device *rdev)
(rdev->pdev->device == 0x99A0)) { (rdev->pdev->device == 0x99A0)) {
rdev->config.cayman.max_simds_per_se = 3; rdev->config.cayman.max_simds_per_se = 3;
rdev->config.cayman.max_backends_per_se = 1; rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
} else { } else {
rdev->config.cayman.max_simds_per_se = 2; rdev->config.cayman.max_simds_per_se = 2;
rdev->config.cayman.max_backends_per_se = 1; rdev->config.cayman.max_backends_per_se = 1;
rdev->config.cayman.max_hw_contexts = 4;
rdev->config.cayman.sx_max_export_size = 128;
rdev->config.cayman.sx_max_export_pos_size = 32;
rdev->config.cayman.sx_max_export_smx_size = 96;
} }
rdev->config.cayman.max_texture_channel_caches = 2; rdev->config.cayman.max_texture_channel_caches = 2;
rdev->config.cayman.max_gprs = 256; rdev->config.cayman.max_gprs = 256;
...@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev) ...@@ -925,10 +941,6 @@ static void cayman_gpu_init(struct radeon_device *rdev)
rdev->config.cayman.max_gs_threads = 32; rdev->config.cayman.max_gs_threads = 32;
rdev->config.cayman.max_stack_entries = 512; rdev->config.cayman.max_stack_entries = 512;
rdev->config.cayman.sx_num_of_sets = 8; rdev->config.cayman.sx_num_of_sets = 8;
rdev->config.cayman.sx_max_export_size = 256;
rdev->config.cayman.sx_max_export_pos_size = 64;
rdev->config.cayman.sx_max_export_smx_size = 192;
rdev->config.cayman.max_hw_contexts = 8;
rdev->config.cayman.sq_num_cf_insts = 2; rdev->config.cayman.sq_num_cf_insts = 2;
rdev->config.cayman.sc_prim_fifo_size = 0x40; rdev->config.cayman.sc_prim_fifo_size = 0x40;
......
...@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev) ...@@ -2328,6 +2328,12 @@ void rv770_get_engine_memory_ss(struct radeon_device *rdev)
pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss, pi->mclk_ss = radeon_atombios_get_asic_ss_info(rdev, &ss,
ASIC_INTERNAL_MEMORY_SS, 0); ASIC_INTERNAL_MEMORY_SS, 0);
/* disable ss, causes hangs on some cayman boards */
if (rdev->family == CHIP_CAYMAN) {
pi->sclk_ss = false;
pi->mclk_ss = false;
}
if (pi->sclk_ss || pi->mclk_ss) if (pi->sclk_ss || pi->mclk_ss)
pi->dynamic_ss = true; pi->dynamic_ss = true;
else else
......
...@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo, ...@@ -353,7 +353,8 @@ int ttm_bo_move_memcpy(struct ttm_buffer_object *bo,
* Don't move nonexistent data. Clear destination instead. * Don't move nonexistent data. Clear destination instead.
*/ */
if (old_iomap == NULL && if (old_iomap == NULL &&
(ttm == NULL || ttm->state == tt_unpopulated)) { (ttm == NULL || (ttm->state == tt_unpopulated &&
!(ttm->page_flags & TTM_PAGE_FLAG_SWAPPED)))) {
memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE); memset_io(new_iomap, 0, new_mem->num_pages*PAGE_SIZE);
goto out2; goto out2;
} }
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment