Commit ca191804 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-05-31' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Nothing too crazy, pretty quiet, maybe too quiet.

  amdgpu:
   - a fixed version of the raven firmware fix we previously reverted
   - stolen memory fix

  imx:
   - regression fix

  qxl:
   - remove a bad warning

  etnaviv:
   - VM locking fix"

* tag 'drm-fixes-2019-05-31' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: reserve stollen vram for raven series
  drm/etnaviv: lock MMU while dumping core
  drm/imx: ipuv3-plane: fix atomic update status query for non-plus i.MX6Q
  drm/qxl: drop WARN_ONCE()
  drm/amd/display: Don't load DMCU for Raven 1 (v2)
parents 036e3431 2a3e0b71
...@@ -624,9 +624,8 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev) ...@@ -624,9 +624,8 @@ static bool gmc_v9_0_keep_stolen_memory(struct amdgpu_device *adev)
*/ */
switch (adev->asic_type) { switch (adev->asic_type) {
case CHIP_VEGA10: case CHIP_VEGA10:
return true;
case CHIP_RAVEN: case CHIP_RAVEN:
return (adev->pdev->device == 0x15d8); return true;
case CHIP_VEGA12: case CHIP_VEGA12:
case CHIP_VEGA20: case CHIP_VEGA20:
default: default:
......
...@@ -29,6 +29,7 @@ ...@@ -29,6 +29,7 @@
#include "dm_services_types.h" #include "dm_services_types.h"
#include "dc.h" #include "dc.h"
#include "dc/inc/core_types.h" #include "dc/inc/core_types.h"
#include "dal_asic_id.h"
#include "vid.h" #include "vid.h"
#include "amdgpu.h" #include "amdgpu.h"
...@@ -640,7 +641,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev) ...@@ -640,7 +641,7 @@ static void amdgpu_dm_fini(struct amdgpu_device *adev)
static int load_dmcu_fw(struct amdgpu_device *adev) static int load_dmcu_fw(struct amdgpu_device *adev)
{ {
const char *fw_name_dmcu; const char *fw_name_dmcu = NULL;
int r; int r;
const struct dmcu_firmware_header_v1_0 *hdr; const struct dmcu_firmware_header_v1_0 *hdr;
...@@ -663,7 +664,14 @@ static int load_dmcu_fw(struct amdgpu_device *adev) ...@@ -663,7 +664,14 @@ static int load_dmcu_fw(struct amdgpu_device *adev)
case CHIP_VEGA20: case CHIP_VEGA20:
return 0; return 0;
case CHIP_RAVEN: case CHIP_RAVEN:
fw_name_dmcu = FIRMWARE_RAVEN_DMCU; #if defined(CONFIG_DRM_AMD_DC_DCN1_01)
if (ASICREV_IS_PICASSO(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else if (ASICREV_IS_RAVEN2(adev->external_rev_id))
fw_name_dmcu = FIRMWARE_RAVEN_DMCU;
else
#endif
return 0;
break; break;
default: default:
DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type); DRM_ERROR("Unsupported ASIC type: 0x%X\n", adev->asic_type);
......
...@@ -125,6 +125,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) ...@@ -125,6 +125,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
return; return;
etnaviv_dump_core = false; etnaviv_dump_core = false;
mutex_lock(&gpu->mmu->lock);
mmu_size = etnaviv_iommu_dump_size(gpu->mmu); mmu_size = etnaviv_iommu_dump_size(gpu->mmu);
/* We always dump registers, mmu, ring and end marker */ /* We always dump registers, mmu, ring and end marker */
...@@ -167,6 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) ...@@ -167,6 +169,7 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY, iter.start = __vmalloc(file_size, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY,
PAGE_KERNEL); PAGE_KERNEL);
if (!iter.start) { if (!iter.start) {
mutex_unlock(&gpu->mmu->lock);
dev_warn(gpu->dev, "failed to allocate devcoredump file\n"); dev_warn(gpu->dev, "failed to allocate devcoredump file\n");
return; return;
} }
...@@ -234,6 +237,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu) ...@@ -234,6 +237,8 @@ void etnaviv_core_dump(struct etnaviv_gpu *gpu)
obj->base.size); obj->base.size);
} }
mutex_unlock(&gpu->mmu->lock);
etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data); etnaviv_core_dump_header(&iter, ETDUMP_BUF_END, iter.data);
dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL); dev_coredumpv(gpu->dev, iter.start, iter.data - iter.start, GFP_KERNEL);
......
...@@ -605,7 +605,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ...@@ -605,7 +605,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch); active = ipu_idmac_get_current_buffer(ipu_plane->ipu_ch);
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, !active, eba);
ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active); ipu_idmac_select_buffer(ipu_plane->ipu_ch, !active);
ipu_plane->next_buf = !active;
if (ipu_plane_separate_alpha(ipu_plane)) { if (ipu_plane_separate_alpha(ipu_plane)) {
active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch); active = ipu_idmac_get_current_buffer(ipu_plane->alpha_ch);
ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active, ipu_cpmem_set_buffer(ipu_plane->alpha_ch, !active,
...@@ -710,7 +709,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane, ...@@ -710,7 +709,6 @@ static void ipu_plane_atomic_update(struct drm_plane *plane,
ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba); ipu_cpmem_set_buffer(ipu_plane->ipu_ch, 1, eba);
ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts); ipu_idmac_lock_enable(ipu_plane->ipu_ch, num_bursts);
ipu_plane_enable(ipu_plane); ipu_plane_enable(ipu_plane);
ipu_plane->next_buf = -1;
} }
static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = { static const struct drm_plane_helper_funcs ipu_plane_helper_funcs = {
...@@ -732,10 +730,15 @@ bool ipu_plane_atomic_update_pending(struct drm_plane *plane) ...@@ -732,10 +730,15 @@ bool ipu_plane_atomic_update_pending(struct drm_plane *plane)
if (ipu_state->use_pre) if (ipu_state->use_pre)
return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch); return ipu_prg_channel_configure_pending(ipu_plane->ipu_ch);
else if (ipu_plane->next_buf >= 0)
return ipu_idmac_get_current_buffer(ipu_plane->ipu_ch) !=
ipu_plane->next_buf;
/*
* Pretend no update is pending in the non-PRE/PRG case. For this to
* happen, an atomic update would have to be deferred until after the
* start of the next frame and simultaneously interrupt latency would
* have to be high enough to let the atomic update finish and issue an
* event before the previous end of frame interrupt handler can be
* executed.
*/
return false; return false;
} }
int ipu_planes_assign_pre(struct drm_device *dev, int ipu_planes_assign_pre(struct drm_device *dev,
......
...@@ -27,7 +27,6 @@ struct ipu_plane { ...@@ -27,7 +27,6 @@ struct ipu_plane {
int dp_flow; int dp_flow;
bool disabling; bool disabling;
int next_buf;
}; };
struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu, struct ipu_plane *ipu_plane_init(struct drm_device *dev, struct ipu_soc *ipu,
......
...@@ -77,6 +77,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) ...@@ -77,6 +77,5 @@ void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr)
int qxl_gem_prime_mmap(struct drm_gem_object *obj, int qxl_gem_prime_mmap(struct drm_gem_object *obj,
struct vm_area_struct *area) struct vm_area_struct *area)
{ {
WARN_ONCE(1, "not implemented");
return -ENOSYS; return -ENOSYS;
} }
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment