Commit b2dbdf2c authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux

Pull drm fixes from Dave Airlie:
 "Nothing too earth shattering here, it just seems like lots of little
  things all over the place.

  msm has probably the larger amount of changes, but they all seem fine,
  otherwise, some rockchip, i915, etnaviv and exynos fixes, along with
  one nouveau regression fix for some older GPUs"

* tag 'drm-fixes-for-v4.13-rc5' of git://people.freedesktop.org/~airlied/linux: (35 commits)
  drm/nouveau/disp/nv04: avoid creation of output paths
  drm: make DRM_STM default n
  drm/exynos: forbid creating framebuffers from too small GEM buffers
  drm/etnaviv: Fix off-by-one error in reloc checking
  drm/i915: fix backlight invert for non-zero minimum brightness
  drm/i915/shrinker: Wrap need_resched() inside preempt-disable
  drm/i915/perf: fix flex eu registers programming
  drm/i915: Fix out-of-bounds array access in bdw_load_gamma_lut
  drm/i915/gvt: Change the max length of mmio_reg_rw from 4 to 8
  drm/i915/gvt: Initialize MMIO Block with HW state
  drm/rockchip: vop: report error when check resource error
  drm/rockchip: vop: round_up pitches to word align
  drm/rockchip: vop: fix NV12 video display error
  drm/rockchip: vop: fix iommu page fault when resume
  drm/i915/gvt: clean workload queue if error happened
  drm/i915/gvt: change resetting to resetting_eng
  drm/msm: gpu: don't abuse dma_alloc for non-DMA allocations
  drm/msm: gpu: call qcom_mdt interfaces only for ARCH_QCOM
  drm/msm/adreno: Prevent unclocked access when retrieving timestamps
  drm/msm: Remove __user from __u64 data types
  ...
parents 27df704d 46828dc7
...@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file) ...@@ -304,7 +304,7 @@ static int sync_file_release(struct inode *inode, struct file *file)
{ {
struct sync_file *sync_file = file->private_data; struct sync_file *sync_file = file->private_data;
if (test_bit(POLL_ENABLED, &sync_file->fence->flags)) if (test_bit(POLL_ENABLED, &sync_file->flags))
dma_fence_remove_callback(sync_file->fence, &sync_file->cb); dma_fence_remove_callback(sync_file->fence, &sync_file->cb);
dma_fence_put(sync_file->fence); dma_fence_put(sync_file->fence);
kfree(sync_file); kfree(sync_file);
...@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait) ...@@ -318,7 +318,8 @@ static unsigned int sync_file_poll(struct file *file, poll_table *wait)
poll_wait(file, &sync_file->wq, wait); poll_wait(file, &sync_file->wq, wait);
if (!test_and_set_bit(POLL_ENABLED, &sync_file->fence->flags)) { if (list_empty(&sync_file->cb.node) &&
!test_and_set_bit(POLL_ENABLED, &sync_file->flags)) {
if (dma_fence_add_callback(sync_file->fence, &sync_file->cb, if (dma_fence_add_callback(sync_file->fence, &sync_file->cb,
fence_check_cb_func) < 0) fence_check_cb_func) < 0)
wake_up_all(&sync_file->wq); wake_up_all(&sync_file->wq);
......
...@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id) ...@@ -1255,7 +1255,7 @@ static int tc_probe(struct i2c_client *client, const struct i2c_device_id *id)
/* port@2 is the output port */ /* port@2 is the output port */
ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL); ret = drm_of_find_panel_or_bridge(dev->of_node, 2, 0, &tc->panel, NULL);
if (ret) if (ret && ret != -ENODEV)
return ret; return ret;
/* Shut down GPIO is optional */ /* Shut down GPIO is optional */
......
...@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream, ...@@ -270,8 +270,8 @@ static int submit_reloc(struct etnaviv_gem_submit *submit, void *stream,
if (ret) if (ret)
return ret; return ret;
if (r->reloc_offset >= bo->obj->base.size - sizeof(*ptr)) { if (r->reloc_offset > bo->obj->base.size - sizeof(*ptr)) {
DRM_ERROR("relocation %u outside object", i); DRM_ERROR("relocation %u outside object\n", i);
return -EINVAL; return -EINVAL;
} }
......
...@@ -145,13 +145,19 @@ static struct drm_framebuffer * ...@@ -145,13 +145,19 @@ static struct drm_framebuffer *
exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
const struct drm_mode_fb_cmd2 *mode_cmd) const struct drm_mode_fb_cmd2 *mode_cmd)
{ {
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER]; struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct drm_framebuffer *fb; struct drm_framebuffer *fb;
int i; int i;
int ret; int ret;
for (i = 0; i < drm_format_num_planes(mode_cmd->pixel_format); i++) { for (i = 0; i < info->num_planes; i++) {
unsigned int height = (i == 0) ? mode_cmd->height :
DIV_ROUND_UP(mode_cmd->height, info->vsub);
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]); obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
if (!obj) { if (!obj) {
DRM_ERROR("failed to lookup gem object\n"); DRM_ERROR("failed to lookup gem object\n");
...@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv, ...@@ -160,6 +166,12 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
} }
exynos_gem[i] = to_exynos_gem(obj); exynos_gem[i] = to_exynos_gem(obj);
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
goto err;
}
} }
fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i); fb = exynos_drm_framebuffer_init(dev, mode_cmd, exynos_gem, i);
......
...@@ -46,6 +46,8 @@ ...@@ -46,6 +46,8 @@
#define same_context(a, b) (((a)->context_id == (b)->context_id) && \ #define same_context(a, b) (((a)->context_id == (b)->context_id) && \
((a)->lrca == (b)->lrca)) ((a)->lrca == (b)->lrca))
static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask);
static int context_switch_events[] = { static int context_switch_events[] = {
[RCS] = RCS_AS_CONTEXT_SWITCH, [RCS] = RCS_AS_CONTEXT_SWITCH,
[BCS] = BCS_AS_CONTEXT_SWITCH, [BCS] = BCS_AS_CONTEXT_SWITCH,
...@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -499,10 +501,10 @@ static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int complete_execlist_workload(struct intel_vgpu_workload *workload) static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_execlist *execlist = int ring_id = workload->ring_id;
&vgpu->execlist[workload->ring_id]; struct intel_vgpu_execlist *execlist = &vgpu->execlist[ring_id];
struct intel_vgpu_workload *next_workload; struct intel_vgpu_workload *next_workload;
struct list_head *next = workload_q_head(vgpu, workload->ring_id)->next; struct list_head *next = workload_q_head(vgpu, ring_id)->next;
bool lite_restore = false; bool lite_restore = false;
int ret; int ret;
...@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -512,10 +514,25 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
release_shadow_batch_buffer(workload); release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx); release_shadow_wa_ctx(&workload->wa_ctx);
if (workload->status || vgpu->resetting) if (workload->status || (vgpu->resetting_eng & ENGINE_MASK(ring_id))) {
/* if workload->status is not successful means HW GPU
* has occurred GPU hang or something wrong with i915/GVT,
* and GVT won't inject context switch interrupt to guest.
* So this error is a vGPU hang actually to the guest.
* According to this we should emunlate a vGPU hang. If
* there are pending workloads which are already submitted
* from guest, we should clean them up like HW GPU does.
*
* if it is in middle of engine resetting, the pending
* workloads won't be submitted to HW GPU and will be
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
clean_workloads(vgpu, ENGINE_MASK(ring_id));
goto out; goto out;
}
if (!list_empty(workload_q_head(vgpu, workload->ring_id))) { if (!list_empty(workload_q_head(vgpu, ring_id))) {
struct execlist_ctx_descriptor_format *this_desc, *next_desc; struct execlist_ctx_descriptor_format *this_desc, *next_desc;
next_workload = container_of(next, next_workload = container_of(next,
......
...@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) ...@@ -72,11 +72,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
struct pci_dev *pdev = gvt->dev_priv->drm.pdev; struct pci_dev *pdev = gvt->dev_priv->drm.pdev;
struct intel_gvt_mmio_info *e; struct intel_gvt_mmio_info *e;
struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
struct gvt_firmware_header *h; struct gvt_firmware_header *h;
void *firmware; void *firmware;
void *p; void *p;
unsigned long size, crc32_start; unsigned long size, crc32_start;
int i; int i, j;
int ret; int ret;
size = sizeof(*h) + info->mmio_size + info->cfg_space_size; size = sizeof(*h) + info->mmio_size + info->cfg_space_size;
...@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt) ...@@ -105,6 +107,13 @@ static int expose_firmware_sysfs(struct intel_gvt *gvt)
hash_for_each(gvt->mmio.mmio_info_table, i, e, node) hash_for_each(gvt->mmio.mmio_info_table, i, e, node)
*(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset)); *(u32 *)(p + e->offset) = I915_READ_NOTRACE(_MMIO(e->offset));
for (i = 0; i < num; i++, block++) {
for (j = 0; j < block->size; j += 4)
*(u32 *)(p + INTEL_GVT_MMIO_OFFSET(block->offset) + j) =
I915_READ_NOTRACE(_MMIO(INTEL_GVT_MMIO_OFFSET(
block->offset) + j));
}
memcpy(gvt->firmware.mmio, p, info->mmio_size); memcpy(gvt->firmware.mmio, p, info->mmio_size);
crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4; crc32_start = offsetof(struct gvt_firmware_header, crc32) + 4;
......
...@@ -149,7 +149,7 @@ struct intel_vgpu { ...@@ -149,7 +149,7 @@ struct intel_vgpu {
bool active; bool active;
bool pv_notified; bool pv_notified;
bool failsafe; bool failsafe;
bool resetting; unsigned int resetting_eng;
void *sched_data; void *sched_data;
struct vgpu_sched_ctl sched_ctl; struct vgpu_sched_ctl sched_ctl;
...@@ -195,6 +195,15 @@ struct intel_gvt_fence { ...@@ -195,6 +195,15 @@ struct intel_gvt_fence {
unsigned long vgpu_allocated_fence_num; unsigned long vgpu_allocated_fence_num;
}; };
/* Special MMIO blocks. */
struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
gvt_mmio_func write;
};
#define INTEL_GVT_MMIO_HASH_BITS 11 #define INTEL_GVT_MMIO_HASH_BITS 11
struct intel_gvt_mmio { struct intel_gvt_mmio {
...@@ -214,6 +223,9 @@ struct intel_gvt_mmio { ...@@ -214,6 +223,9 @@ struct intel_gvt_mmio {
/* This reg could be accessed by unaligned address */ /* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6) #define F_UNALIGN (1 << 6)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS); DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
unsigned int num_tracked_mmio; unsigned int num_tracked_mmio;
}; };
......
...@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -2857,31 +2857,15 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
return 0; return 0;
} }
/* Special MMIO blocks. */
static struct gvt_mmio_block {
unsigned int device;
i915_reg_t offset;
unsigned int size;
gvt_mmio_func read;
gvt_mmio_func write;
} gvt_mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt, static struct gvt_mmio_block *find_mmio_block(struct intel_gvt *gvt,
unsigned int offset) unsigned int offset)
{ {
unsigned long device = intel_gvt_get_device_type(gvt); unsigned long device = intel_gvt_get_device_type(gvt);
struct gvt_mmio_block *block = gvt_mmio_blocks; struct gvt_mmio_block *block = gvt->mmio.mmio_block;
int num = gvt->mmio.num_mmio_block;
int i; int i;
for (i = 0; i < ARRAY_SIZE(gvt_mmio_blocks); i++, block++) { for (i = 0; i < num; i++, block++) {
if (!(device & block->device)) if (!(device & block->device))
continue; continue;
if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) && if (offset >= INTEL_GVT_MMIO_OFFSET(block->offset) &&
...@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt) ...@@ -2912,6 +2896,17 @@ void intel_gvt_clean_mmio_info(struct intel_gvt *gvt)
gvt->mmio.mmio_attribute = NULL; gvt->mmio.mmio_attribute = NULL;
} }
/* Special MMIO blocks. */
static struct gvt_mmio_block mmio_blocks[] = {
{D_SKL_PLUS, _MMIO(CSR_MMIO_START_RANGE), 0x3000, NULL, NULL},
{D_ALL, _MMIO(MCHBAR_MIRROR_BASE_SNB), 0x40000, NULL, NULL},
{D_ALL, _MMIO(VGT_PVINFO_PAGE), VGT_PVINFO_SIZE,
pvinfo_mmio_read, pvinfo_mmio_write},
{D_ALL, LGC_PALETTE(PIPE_A, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_B, 0), 1024, NULL, NULL},
{D_ALL, LGC_PALETTE(PIPE_C, 0), 1024, NULL, NULL},
};
/** /**
* intel_gvt_setup_mmio_info - setup MMIO information table for GVT device * intel_gvt_setup_mmio_info - setup MMIO information table for GVT device
* @gvt: GVT device * @gvt: GVT device
...@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ...@@ -2951,6 +2946,9 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
goto err; goto err;
} }
gvt->mmio.mmio_block = mmio_blocks;
gvt->mmio.num_mmio_block = ARRAY_SIZE(mmio_blocks);
gvt_dbg_mmio("traced %u virtual mmio registers\n", gvt_dbg_mmio("traced %u virtual mmio registers\n",
gvt->mmio.num_tracked_mmio); gvt->mmio.num_tracked_mmio);
return 0; return 0;
...@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -3030,7 +3028,7 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
gvt_mmio_func func; gvt_mmio_func func;
int ret; int ret;
if (WARN_ON(bytes > 4)) if (WARN_ON(bytes > 8))
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -432,7 +432,8 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
i915_gem_request_put(fetch_and_zero(&workload->req)); i915_gem_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !vgpu->resetting) { if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) {
update_guest_context(workload); update_guest_context(workload);
for_each_set_bit(event, workload->pending_events, for_each_set_bit(event, workload->pending_events,
......
...@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -480,11 +480,13 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
unsigned int resetting_eng = dmlr ? ALL_ENGINES : engine_mask;
gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("------------------------------------------\n");
gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n", gvt_dbg_core("resseting vgpu%d, dmlr %d, engine_mask %08x\n",
vgpu->id, dmlr, engine_mask); vgpu->id, dmlr, engine_mask);
vgpu->resetting = true;
vgpu->resetting_eng = resetting_eng;
intel_vgpu_stop_schedule(vgpu); intel_vgpu_stop_schedule(vgpu);
/* /*
...@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -497,7 +499,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
} }
intel_vgpu_reset_execlist(vgpu, dmlr ? ALL_ENGINES : engine_mask); intel_vgpu_reset_execlist(vgpu, resetting_eng);
/* full GPU reset or device model level reset */ /* full GPU reset or device model level reset */
if (engine_mask == ALL_ENGINES || dmlr) { if (engine_mask == ALL_ENGINES || dmlr) {
...@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr, ...@@ -520,7 +522,7 @@ void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
} }
} }
vgpu->resetting = false; vgpu->resetting_eng = 0;
gvt_dbg_core("reset vgpu%d done\n", vgpu->id); gvt_dbg_core("reset vgpu%d done\n", vgpu->id);
gvt_dbg_core("------------------------------------------\n"); gvt_dbg_core("------------------------------------------\n");
} }
......
...@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock) ...@@ -43,16 +43,21 @@ static bool shrinker_lock(struct drm_i915_private *dev_priv, bool *unlock)
return true; return true;
case MUTEX_TRYLOCK_FAILED: case MUTEX_TRYLOCK_FAILED:
*unlock = false;
preempt_disable();
do { do {
cpu_relax(); cpu_relax();
if (mutex_trylock(&dev_priv->drm.struct_mutex)) { if (mutex_trylock(&dev_priv->drm.struct_mutex)) {
case MUTEX_TRYLOCK_SUCCESS:
*unlock = true; *unlock = true;
return true; break;
} }
} while (!need_resched()); } while (!need_resched());
preempt_enable();
return *unlock;
return false; case MUTEX_TRYLOCK_SUCCESS:
*unlock = true;
return true;
} }
BUG(); BUG();
......
...@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req) ...@@ -1601,11 +1601,11 @@ static int gen8_emit_oa_config(struct drm_i915_gem_request *req)
u32 *cs; u32 *cs;
int i; int i;
cs = intel_ring_begin(req, n_flex_regs * 2 + 4); cs = intel_ring_begin(req, ARRAY_SIZE(flex_mmio) * 2 + 4);
if (IS_ERR(cs)) if (IS_ERR(cs))
return PTR_ERR(cs); return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(n_flex_regs + 1); *cs++ = MI_LOAD_REGISTER_IMM(ARRAY_SIZE(flex_mmio) + 1);
*cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL); *cs++ = i915_mmio_reg_offset(GEN8_OACTXCONTROL);
*cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) | *cs++ = (dev_priv->perf.oa.period_exponent << GEN8_OA_TIMER_PERIOD_SHIFT) |
......
...@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset) ...@@ -398,6 +398,7 @@ static void bdw_load_gamma_lut(struct drm_crtc_state *state, u32 offset)
} }
/* Program the max register to clamp values > 1.0. */ /* Program the max register to clamp values > 1.0. */
i = lut_size - 1;
I915_WRITE(PREC_PAL_GC_MAX(pipe, 0), I915_WRITE(PREC_PAL_GC_MAX(pipe, 0),
drm_color_lut_extract(lut[i].red, 16)); drm_color_lut_extract(lut[i].red, 16));
I915_WRITE(PREC_PAL_GC_MAX(pipe, 1), I915_WRITE(PREC_PAL_GC_MAX(pipe, 1),
......
...@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector, ...@@ -469,7 +469,7 @@ static u32 intel_panel_compute_brightness(struct intel_connector *connector,
if (i915.invert_brightness > 0 || if (i915.invert_brightness > 0 ||
dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) { dev_priv->quirks & QUIRK_INVERT_BRIGHTNESS) {
return panel->backlight.max - val; return panel->backlight.max - val + panel->backlight.min;
} }
return val; return val;
......
...@@ -5,7 +5,7 @@ config DRM_MSM ...@@ -5,7 +5,7 @@ config DRM_MSM
depends on ARCH_QCOM || (ARM && COMPILE_TEST) depends on ARCH_QCOM || (ARM && COMPILE_TEST)
depends on OF && COMMON_CLK depends on OF && COMMON_CLK
depends on MMU depends on MMU
select QCOM_MDT_LOADER select QCOM_MDT_LOADER if ARCH_QCOM
select REGULATOR select REGULATOR
select DRM_KMS_HELPER select DRM_KMS_HELPER
select DRM_PANEL select DRM_PANEL
......
...@@ -15,7 +15,7 @@ ...@@ -15,7 +15,7 @@
#include <linux/cpumask.h> #include <linux/cpumask.h>
#include <linux/qcom_scm.h> #include <linux/qcom_scm.h>
#include <linux/dma-mapping.h> #include <linux/dma-mapping.h>
#include <linux/of_reserved_mem.h> #include <linux/of_address.h>
#include <linux/soc/qcom/mdt_loader.h> #include <linux/soc/qcom/mdt_loader.h>
#include "msm_gem.h" #include "msm_gem.h"
#include "msm_mmu.h" #include "msm_mmu.h"
...@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu); ...@@ -26,16 +26,34 @@ static void a5xx_dump(struct msm_gpu *gpu);
#define GPU_PAS_ID 13 #define GPU_PAS_ID 13
#if IS_ENABLED(CONFIG_QCOM_MDT_LOADER)
static int zap_shader_load_mdt(struct device *dev, const char *fwname) static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{ {
const struct firmware *fw; const struct firmware *fw;
struct device_node *np;
struct resource r;
phys_addr_t mem_phys; phys_addr_t mem_phys;
ssize_t mem_size; ssize_t mem_size;
void *mem_region = NULL; void *mem_region = NULL;
int ret; int ret;
if (!IS_ENABLED(CONFIG_ARCH_QCOM))
return -EINVAL;
np = of_get_child_by_name(dev->of_node, "zap-shader");
if (!np)
return -ENODEV;
np = of_parse_phandle(np, "memory-region", 0);
if (!np)
return -EINVAL;
ret = of_address_to_resource(np, 0, &r);
if (ret)
return ret;
mem_phys = r.start;
mem_size = resource_size(&r);
/* Request the MDT file for the firmware */ /* Request the MDT file for the firmware */
ret = request_firmware(&fw, fwname, dev); ret = request_firmware(&fw, fwname, dev);
if (ret) { if (ret) {
...@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) ...@@ -51,7 +69,7 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
} }
/* Allocate memory for the firmware image */ /* Allocate memory for the firmware image */
mem_region = dmam_alloc_coherent(dev, mem_size, &mem_phys, GFP_KERNEL); mem_region = memremap(mem_phys, mem_size, MEMREMAP_WC);
if (!mem_region) { if (!mem_region) {
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
...@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname) ...@@ -69,16 +87,13 @@ static int zap_shader_load_mdt(struct device *dev, const char *fwname)
DRM_DEV_ERROR(dev, "Unable to authorize the image\n"); DRM_DEV_ERROR(dev, "Unable to authorize the image\n");
out: out:
if (mem_region)
memunmap(mem_region);
release_firmware(fw); release_firmware(fw);
return ret; return ret;
} }
#else
static int zap_shader_load_mdt(struct device *dev, const char *fwname)
{
return -ENODEV;
}
#endif
static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx) struct msm_file_private *ctx)
...@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit, ...@@ -117,12 +132,10 @@ static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
gpu->funcs->flush(gpu); gpu->funcs->flush(gpu);
} }
struct a5xx_hwcg { static const struct {
u32 offset; u32 offset;
u32 value; u32 value;
}; } a5xx_hwcg[] = {
static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
{REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222}, {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
...@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = { ...@@ -217,38 +230,16 @@ static const struct a5xx_hwcg a530_hwcg[] = {
{REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222} {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
}; };
static const struct { void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
int (*test)(struct adreno_gpu *gpu);
const struct a5xx_hwcg *regs;
unsigned int count;
} a5xx_hwcg_regs[] = {
{ adreno_is_a530, a530_hwcg, ARRAY_SIZE(a530_hwcg), },
};
static void _a5xx_enable_hwcg(struct msm_gpu *gpu,
const struct a5xx_hwcg *regs, unsigned int count)
{ {
unsigned int i; unsigned int i;
for (i = 0; i < count; i++) for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
gpu_write(gpu, regs[i].offset, regs[i].value); gpu_write(gpu, a5xx_hwcg[i].offset,
state ? a5xx_hwcg[i].value : 0);
gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, 0xAAA8AA00); gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, 0x182); gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
}
static void a5xx_enable_hwcg(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
unsigned int i;
for (i = 0; i < ARRAY_SIZE(a5xx_hwcg_regs); i++) {
if (a5xx_hwcg_regs[i].test(adreno_gpu)) {
_a5xx_enable_hwcg(gpu, a5xx_hwcg_regs[i].regs,
a5xx_hwcg_regs[i].count);
return;
}
}
} }
static int a5xx_me_init(struct msm_gpu *gpu) static int a5xx_me_init(struct msm_gpu *gpu)
...@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu) ...@@ -377,45 +368,6 @@ static int a5xx_zap_shader_resume(struct msm_gpu *gpu)
return ret; return ret;
} }
/* Set up a child device to "own" the zap shader */
static int a5xx_zap_shader_dev_init(struct device *parent, struct device *dev)
{
struct device_node *node;
int ret;
if (dev->parent)
return 0;
/* Find the sub-node for the zap shader */
node = of_get_child_by_name(parent->of_node, "zap-shader");
if (!node) {
DRM_DEV_ERROR(parent, "zap-shader not found in device tree\n");
return -ENODEV;
}
dev->parent = parent;
dev->of_node = node;
dev_set_name(dev, "adreno_zap_shader");
ret = device_register(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Couldn't register zap shader device\n");
goto out;
}
ret = of_reserved_mem_device_init(dev);
if (ret) {
DRM_DEV_ERROR(parent, "Unable to set up the reserved memory\n");
device_unregister(dev);
}
out:
if (ret)
dev->parent = NULL;
return ret;
}
static int a5xx_zap_shader_init(struct msm_gpu *gpu) static int a5xx_zap_shader_init(struct msm_gpu *gpu)
{ {
static bool loaded; static bool loaded;
...@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu) ...@@ -444,11 +396,7 @@ static int a5xx_zap_shader_init(struct msm_gpu *gpu)
return -ENODEV; return -ENODEV;
} }
ret = a5xx_zap_shader_dev_init(&pdev->dev, &a5xx_gpu->zap_dev); ret = zap_shader_load_mdt(&pdev->dev, adreno_gpu->info->zapfw);
if (!ret)
ret = zap_shader_load_mdt(&a5xx_gpu->zap_dev,
adreno_gpu->info->zapfw);
loaded = !ret; loaded = !ret;
...@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu) ...@@ -545,7 +493,7 @@ static int a5xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
/* Enable HWCG */ /* Enable HWCG */
a5xx_enable_hwcg(gpu); a5xx_set_hwcg(gpu, true);
gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F); gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
...@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu) ...@@ -691,9 +639,6 @@ static void a5xx_destroy(struct msm_gpu *gpu)
DBG("%s", gpu->name); DBG("%s", gpu->name);
if (a5xx_gpu->zap_dev.parent)
device_unregister(&a5xx_gpu->zap_dev);
if (a5xx_gpu->pm4_bo) { if (a5xx_gpu->pm4_bo) {
if (a5xx_gpu->pm4_iova) if (a5xx_gpu->pm4_iova)
msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace); msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
...@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = { ...@@ -920,31 +865,30 @@ static const u32 a5xx_registers[] = {
0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B, 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002B,
0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095, 0x002E, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3, 0x0097, 0x00BB, 0x03A0, 0x0464, 0x0469, 0x046F, 0x04D2, 0x04D3,
0x04E0, 0x0533, 0x0540, 0x0555, 0xF400, 0xF400, 0xF800, 0xF807, 0x04E0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081A, 0x081F, 0x0841,
0x0800, 0x081A, 0x081F, 0x0841, 0x0860, 0x0860, 0x0880, 0x08A0, 0x0860, 0x0860, 0x0880, 0x08A0, 0x0B00, 0x0B12, 0x0B15, 0x0B28,
0x0B00, 0x0B12, 0x0B15, 0x0B28, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0B78, 0x0B7F, 0x0BB0, 0x0BBD, 0x0BC0, 0x0BC6, 0x0BD0, 0x0C53,
0x0BC0, 0x0BC6, 0x0BD0, 0x0C53, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C60, 0x0C61, 0x0C80, 0x0C82, 0x0C84, 0x0C85, 0x0C90, 0x0C98,
0x0C84, 0x0C85, 0x0C90, 0x0C98, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x0CA0, 0x0CA0, 0x0CB0, 0x0CB2, 0x2180, 0x2185, 0x2580, 0x2585,
0x2180, 0x2185, 0x2580, 0x2585, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CC1, 0x0CC1, 0x0CC4, 0x0CC7, 0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8,
0x0CCC, 0x0CCC, 0x0CD0, 0x0CD8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CE0, 0x0CE5, 0x0CE8, 0x0CE8, 0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E,
0x0CEC, 0x0CF1, 0x0CFB, 0x0D0E, 0x2100, 0x211E, 0x2140, 0x2145, 0x2100, 0x211E, 0x2140, 0x2145, 0x2500, 0x251E, 0x2540, 0x2545,
0x2500, 0x251E, 0x2540, 0x2545, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D10, 0x0D17, 0x0D20, 0x0D23, 0x0D30, 0x0D30, 0x20C0, 0x20C0,
0x0D30, 0x0D30, 0x20C0, 0x20C0, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x24C0, 0x24C0, 0x0E40, 0x0E43, 0x0E4A, 0x0E4A, 0x0E50, 0x0E57,
0x0E4A, 0x0E4A, 0x0E50, 0x0E57, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E60, 0x0E7C, 0x0E80, 0x0E8E, 0x0E90, 0x0E96, 0x0EA0, 0x0EA8,
0x0E90, 0x0E96, 0x0EA0, 0x0EA8, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0x0EB0, 0x0EB2, 0xE140, 0xE147, 0xE150, 0xE187, 0xE1A0, 0xE1A9,
0xE150, 0xE187, 0xE1A0, 0xE1A9, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1B0, 0xE1B6, 0xE1C0, 0xE1C7, 0xE1D0, 0xE1D1, 0xE200, 0xE201,
0xE1D0, 0xE1D1, 0xE200, 0xE201, 0xE210, 0xE21C, 0xE240, 0xE268, 0xE210, 0xE21C, 0xE240, 0xE268, 0xE000, 0xE006, 0xE010, 0xE09A,
0xE000, 0xE006, 0xE010, 0xE09A, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE0A0, 0xE0A4, 0xE0AA, 0xE0EB, 0xE100, 0xE105, 0xE380, 0xE38F,
0xE100, 0xE105, 0xE380, 0xE38F, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE3B0, 0xE3B0, 0xE400, 0xE405, 0xE408, 0xE4E9, 0xE4F0, 0xE4F0,
0xE408, 0xE4E9, 0xE4F0, 0xE4F0, 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE280, 0xE280, 0xE282, 0xE2A3, 0xE2A5, 0xE2C2, 0xE940, 0xE947,
0xE2A5, 0xE2C2, 0xE940, 0xE947, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE950, 0xE987, 0xE9A0, 0xE9A9, 0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7,
0xE9B0, 0xE9B6, 0xE9C0, 0xE9C7, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xE9D0, 0xE9D1, 0xEA00, 0xEA01, 0xEA10, 0xEA1C, 0xEA40, 0xEA68,
0xEA10, 0xEA1C, 0xEA40, 0xEA68, 0xE800, 0xE806, 0xE810, 0xE89A, 0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB, 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEBB0, 0xEBB0, 0xEC00, 0xEC05, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
0xEA80, 0xEA80, 0xEA82, 0xEAA3, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
0xAC60, 0xAC60, 0xB000, 0xB97F, 0xB9A0, 0xB9BF, 0xB9A0, 0xB9BF, ~0
~0
}; };
static void a5xx_dump(struct msm_gpu *gpu) static void a5xx_dump(struct msm_gpu *gpu)
...@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m) ...@@ -1020,7 +964,14 @@ static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
{ {
seq_printf(m, "status: %08x\n", seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A5XX_RBBM_STATUS)); gpu_read(gpu, REG_A5XX_RBBM_STATUS));
/*
* Temporarily disable hardware clock gating before going into
* adreno_show to avoid issues while reading the registers
*/
a5xx_set_hwcg(gpu, false);
adreno_show(gpu, m); adreno_show(gpu, m);
a5xx_set_hwcg(gpu, true);
} }
#endif #endif
......
...@@ -36,8 +36,6 @@ struct a5xx_gpu { ...@@ -36,8 +36,6 @@ struct a5xx_gpu {
uint32_t gpmu_dwords; uint32_t gpmu_dwords;
uint32_t lm_leakage; uint32_t lm_leakage;
struct device zap_dev;
}; };
#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base) #define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
...@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs, ...@@ -59,5 +57,6 @@ static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
} }
bool a5xx_idle(struct msm_gpu *gpu); bool a5xx_idle(struct msm_gpu *gpu);
void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
#endif /* __A5XX_GPU_H__ */ #endif /* __A5XX_GPU_H__ */
...@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value) ...@@ -48,8 +48,15 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
*value = adreno_gpu->base.fast_rate; *value = adreno_gpu->base.fast_rate;
return 0; return 0;
case MSM_PARAM_TIMESTAMP: case MSM_PARAM_TIMESTAMP:
if (adreno_gpu->funcs->get_timestamp) if (adreno_gpu->funcs->get_timestamp) {
return adreno_gpu->funcs->get_timestamp(gpu, value); int ret;
pm_runtime_get_sync(&gpu->pdev->dev);
ret = adreno_gpu->funcs->get_timestamp(gpu, value);
pm_runtime_put_autosuspend(&gpu->pdev->dev);
return ret;
}
return -EINVAL; return -EINVAL;
default: default:
DBG("%s: invalid param: %u", gpu->name, param); DBG("%s: invalid param: %u", gpu->name, param);
......
...@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host, ...@@ -2137,6 +2137,13 @@ void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
struct msm_dsi_phy_clk_request *clk_req) struct msm_dsi_phy_clk_request *clk_req)
{ {
struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
}
clk_req->bitclk_rate = msm_host->byte_clk_rate * 8; clk_req->bitclk_rate = msm_host->byte_clk_rate * 8;
clk_req->escclk_rate = msm_host->esc_clk_rate; clk_req->escclk_rate = msm_host->esc_clk_rate;
...@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, ...@@ -2280,7 +2287,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode) struct drm_display_mode *mode)
{ {
struct msm_dsi_host *msm_host = to_msm_dsi_host(host); struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
int ret;
if (msm_host->mode) { if (msm_host->mode) {
drm_mode_destroy(msm_host->dev, msm_host->mode); drm_mode_destroy(msm_host->dev, msm_host->mode);
...@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host, ...@@ -2293,12 +2299,6 @@ int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
return -ENOMEM; return -ENOMEM;
} }
ret = dsi_calc_clk_rate(msm_host);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return ret;
}
return 0; return 0;
} }
......
...@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc) ...@@ -221,8 +221,8 @@ static void blend_setup(struct drm_crtc *crtc)
struct mdp5_ctl *ctl = mdp5_cstate->ctl; struct mdp5_ctl *ctl = mdp5_cstate->ctl;
uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0; uint32_t blend_op, fg_alpha, bg_alpha, ctl_blend_flags = 0;
unsigned long flags; unsigned long flags;
enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; enum mdp5_pipe stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { SSPP_NONE }; enum mdp5_pipe r_stage[STAGE_MAX + 1][MAX_PIPE_STAGE] = { { SSPP_NONE } };
int i, plane_cnt = 0; int i, plane_cnt = 0;
bool bg_alpha_enabled = false; bool bg_alpha_enabled = false;
u32 mixer_op_mode = 0; u32 mixer_op_mode = 0;
...@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -753,6 +753,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!handle) { if (!handle) {
DBG("Cursor off"); DBG("Cursor off");
cursor_enable = false; cursor_enable = false;
mdp5_enable(mdp5_kms);
goto set_cursor; goto set_cursor;
} }
...@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -776,6 +777,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
get_roi(crtc, &roi_w, &roi_h); get_roi(crtc, &roi_w, &roi_h);
mdp5_enable(mdp5_kms);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride); mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm), mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888)); MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
...@@ -804,6 +807,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc, ...@@ -804,6 +807,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
crtc_flush(crtc, flush_mask); crtc_flush(crtc, flush_mask);
end: end:
mdp5_disable(mdp5_kms);
if (old_bo) { if (old_bo) {
drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo); drm_flip_work_queue(&mdp5_crtc->unref_cursor_work, old_bo);
/* enable vblank to complete cursor work: */ /* enable vblank to complete cursor work: */
...@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -836,6 +840,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
get_roi(crtc, &roi_w, &roi_h); get_roi(crtc, &roi_w, &roi_h);
mdp5_enable(mdp5_kms);
spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags); spin_lock_irqsave(&mdp5_crtc->cursor.lock, flags);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm), mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_SIZE(lm),
MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) | MDP5_LM_CURSOR_SIZE_ROI_H(roi_h) |
...@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y) ...@@ -847,6 +853,8 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
crtc_flush(crtc, flush_mask); crtc_flush(crtc, flush_mask);
mdp5_disable(mdp5_kms);
return 0; return 0;
} }
......
...@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder) ...@@ -299,7 +299,7 @@ static void mdp5_encoder_enable(struct drm_encoder *encoder)
struct mdp5_interface *intf = mdp5_encoder->intf; struct mdp5_interface *intf = mdp5_encoder->intf;
if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND) if (intf->mode == MDP5_INTF_DSI_MODE_COMMAND)
mdp5_cmd_encoder_disable(encoder); mdp5_cmd_encoder_enable(encoder);
else else
mdp5_vid_encoder_enable(encoder); mdp5_vid_encoder_enable(encoder);
} }
......
...@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp, ...@@ -502,7 +502,7 @@ static int get_clk(struct platform_device *pdev, struct clk **clkp,
const char *name, bool mandatory) const char *name, bool mandatory)
{ {
struct device *dev = &pdev->dev; struct device *dev = &pdev->dev;
struct clk *clk = devm_clk_get(dev, name); struct clk *clk = msm_clk_get(pdev, name);
if (IS_ERR(clk) && mandatory) { if (IS_ERR(clk) && mandatory) {
dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk)); dev_err(dev, "failed to get %s (%ld)\n", name, PTR_ERR(clk));
return PTR_ERR(clk); return PTR_ERR(clk);
...@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev) ...@@ -887,21 +887,21 @@ static int mdp5_init(struct platform_device *pdev, struct drm_device *dev)
} }
/* mandatory clocks: */ /* mandatory clocks: */
ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus_clk", true); ret = get_clk(pdev, &mdp5_kms->axi_clk, "bus", true);
if (ret) if (ret)
goto fail; goto fail;
ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface_clk", true); ret = get_clk(pdev, &mdp5_kms->ahb_clk, "iface", true);
if (ret) if (ret)
goto fail; goto fail;
ret = get_clk(pdev, &mdp5_kms->core_clk, "core_clk", true); ret = get_clk(pdev, &mdp5_kms->core_clk, "core", true);
if (ret) if (ret)
goto fail; goto fail;
ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync_clk", true); ret = get_clk(pdev, &mdp5_kms->vsync_clk, "vsync", true);
if (ret) if (ret)
goto fail; goto fail;
/* optional clocks: */ /* optional clocks: */
get_clk(pdev, &mdp5_kms->lut_clk, "lut_clk", false); get_clk(pdev, &mdp5_kms->lut_clk, "lut", false);
/* we need to set a default rate before enabling. Set a safe /* we need to set a default rate before enabling. Set a safe
* rate first, then figure out hw revision, and then set a * rate first, then figure out hw revision, and then set a
......
...@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane, ...@@ -890,8 +890,8 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
struct mdp5_hw_pipe *right_hwpipe; struct mdp5_hw_pipe *right_hwpipe;
const struct mdp_format *format; const struct mdp_format *format;
uint32_t nplanes, config = 0; uint32_t nplanes, config = 0;
struct phase_step step = { 0 }; struct phase_step step = { { 0 } };
struct pixel_ext pe = { 0 }; struct pixel_ext pe = { { 0 } };
uint32_t hdecm = 0, vdecm = 0; uint32_t hdecm = 0, vdecm = 0;
uint32_t pix_format; uint32_t pix_format;
unsigned int rotation; unsigned int rotation;
......
...@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj, ...@@ -383,8 +383,10 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
struct page **pages; struct page **pages;
vma = add_vma(obj, aspace); vma = add_vma(obj, aspace);
if (IS_ERR(vma)) if (IS_ERR(vma)) {
return PTR_ERR(vma); ret = PTR_ERR(vma);
goto unlock;
}
pages = get_pages(obj); pages = get_pages(obj);
if (IS_ERR(pages)) { if (IS_ERR(pages)) {
...@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, ...@@ -405,7 +407,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj,
fail: fail:
del_vma(vma); del_vma(vma);
unlock:
mutex_unlock(&msm_obj->lock); mutex_unlock(&msm_obj->lock);
return ret; return ret;
} }
...@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev, ...@@ -928,8 +930,12 @@ static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
if (use_vram) { if (use_vram) {
struct msm_gem_vma *vma; struct msm_gem_vma *vma;
struct page **pages; struct page **pages;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
mutex_lock(&msm_obj->lock);
vma = add_vma(obj, NULL); vma = add_vma(obj, NULL);
mutex_unlock(&msm_obj->lock);
if (IS_ERR(vma)) { if (IS_ERR(vma)) {
ret = PTR_ERR(vma); ret = PTR_ERR(vma);
goto fail; goto fail;
......
...@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev, ...@@ -34,8 +34,8 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds) struct msm_gpu *gpu, uint32_t nr_bos, uint32_t nr_cmds)
{ {
struct msm_gem_submit *submit; struct msm_gem_submit *submit;
uint64_t sz = sizeof(*submit) + (nr_bos * sizeof(submit->bos[0])) + uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
(nr_cmds * sizeof(submit->cmd[0])); ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX) if (sz > SIZE_MAX)
return NULL; return NULL;
...@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, ...@@ -451,7 +451,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
if (ret) if (ret)
goto out; goto out;
if (!(args->fence & MSM_SUBMIT_NO_IMPLICIT)) { if (!(args->flags & MSM_SUBMIT_NO_IMPLICIT)) {
ret = submit_fence_sync(submit); ret = submit_fence_sync(submit);
if (ret) if (ret)
goto out; goto out;
......
...@@ -42,7 +42,7 @@ void ...@@ -42,7 +42,7 @@ void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace, msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt) struct msm_gem_vma *vma, struct sg_table *sgt)
{ {
if (!vma->iova) if (!aspace || !vma->iova)
return; return;
if (aspace->mmu) { if (aspace->mmu) {
......
...@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine) ...@@ -267,6 +267,8 @@ nvkm_disp_oneinit(struct nvkm_engine *engine)
/* Create output path objects for each VBIOS display path. */ /* Create output path objects for each VBIOS display path. */
i = -1; i = -1;
while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) { while ((data = dcb_outp_parse(bios, ++i, &ver, &hdr, &dcbE))) {
if (ver < 0x40) /* No support for chipsets prior to NV50. */
break;
if (dcbE.type == DCB_OUTPUT_UNUSED) if (dcbE.type == DCB_OUTPUT_UNUSED)
continue; continue;
if (dcbE.type == DCB_OUTPUT_EOL) if (dcbE.type == DCB_OUTPUT_EOL)
......
...@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop) ...@@ -500,7 +500,7 @@ static void vop_line_flag_irq_disable(struct vop *vop)
static int vop_enable(struct drm_crtc *crtc) static int vop_enable(struct drm_crtc *crtc)
{ {
struct vop *vop = to_vop(crtc); struct vop *vop = to_vop(crtc);
int ret; int ret, i;
ret = pm_runtime_get_sync(vop->dev); ret = pm_runtime_get_sync(vop->dev);
if (ret < 0) { if (ret < 0) {
...@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc) ...@@ -533,6 +533,20 @@ static int vop_enable(struct drm_crtc *crtc)
} }
memcpy(vop->regs, vop->regsbak, vop->len); memcpy(vop->regs, vop->regsbak, vop->len);
/*
* We need to make sure that all windows are disabled before we
* enable the crtc. Otherwise we might try to scan from a destroyed
* buffer later.
*/
for (i = 0; i < vop->data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
spin_unlock(&vop->reg_lock);
}
vop_cfg_done(vop); vop_cfg_done(vop);
/* /*
...@@ -566,28 +580,11 @@ static int vop_enable(struct drm_crtc *crtc) ...@@ -566,28 +580,11 @@ static int vop_enable(struct drm_crtc *crtc)
static void vop_crtc_disable(struct drm_crtc *crtc) static void vop_crtc_disable(struct drm_crtc *crtc)
{ {
struct vop *vop = to_vop(crtc); struct vop *vop = to_vop(crtc);
int i;
WARN_ON(vop->event); WARN_ON(vop->event);
rockchip_drm_psr_deactivate(&vop->crtc); rockchip_drm_psr_deactivate(&vop->crtc);
/*
* We need to make sure that all windows are disabled before we
* disable that crtc. Otherwise we might try to scan from a destroyed
* buffer later.
*/
for (i = 0; i < vop->data->win_size; i++) {
struct vop_win *vop_win = &vop->win[i];
const struct vop_win_data *win = vop_win->data;
spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, enable, 0);
spin_unlock(&vop->reg_lock);
}
vop_cfg_done(vop);
drm_crtc_vblank_off(crtc); drm_crtc_vblank_off(crtc);
/* /*
...@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane, ...@@ -682,8 +679,10 @@ static int vop_plane_atomic_check(struct drm_plane *plane,
* Src.x1 can be odd when do clip, but yuv plane start point * Src.x1 can be odd when do clip, but yuv plane start point
* need align with 2 pixel. * need align with 2 pixel.
*/ */
if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) if (is_yuv_support(fb->format->format) && ((state->src.x1 >> 16) % 2)) {
DRM_ERROR("Invalid Source: Yuv format not support odd xpos\n");
return -EINVAL; return -EINVAL;
}
return 0; return 0;
} }
...@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, ...@@ -764,7 +763,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
spin_lock(&vop->reg_lock); spin_lock(&vop->reg_lock);
VOP_WIN_SET(vop, win, format, format); VOP_WIN_SET(vop, win, format, format);
VOP_WIN_SET(vop, win, yrgb_vir, fb->pitches[0] >> 2); VOP_WIN_SET(vop, win, yrgb_vir, DIV_ROUND_UP(fb->pitches[0], 4));
VOP_WIN_SET(vop, win, yrgb_mst, dma_addr); VOP_WIN_SET(vop, win, yrgb_mst, dma_addr);
if (is_yuv_support(fb->format->format)) { if (is_yuv_support(fb->format->format)) {
int hsub = drm_format_horz_chroma_subsampling(fb->format->format); int hsub = drm_format_horz_chroma_subsampling(fb->format->format);
...@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane, ...@@ -778,7 +777,7 @@ static void vop_plane_atomic_update(struct drm_plane *plane,
offset += (src->y1 >> 16) * fb->pitches[1] / vsub; offset += (src->y1 >> 16) * fb->pitches[1] / vsub;
dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1]; dma_addr = rk_uv_obj->dma_addr + offset + fb->offsets[1];
VOP_WIN_SET(vop, win, uv_vir, fb->pitches[1] >> 2); VOP_WIN_SET(vop, win, uv_vir, DIV_ROUND_UP(fb->pitches[1], 4));
VOP_WIN_SET(vop, win, uv_mst, dma_addr); VOP_WIN_SET(vop, win, uv_mst, dma_addr);
} }
......
...@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h, ...@@ -282,6 +282,9 @@ static inline uint16_t scl_get_bili_dn_vskip(int src_h, int dst_h,
act_height = (src_h + vskiplines - 1) / vskiplines; act_height = (src_h + vskiplines - 1) / vskiplines;
if (act_height == dst_h)
return GET_SCL_FT_BILI_DN(src_h, dst_h) / vskiplines;
return GET_SCL_FT_BILI_DN(act_height, dst_h); return GET_SCL_FT_BILI_DN(act_height, dst_h);
} }
......
...@@ -7,7 +7,6 @@ config DRM_STM ...@@ -7,7 +7,6 @@ config DRM_STM
select DRM_PANEL select DRM_PANEL
select VIDEOMODE_HELPERS select VIDEOMODE_HELPERS
select FB_PROVIDE_GET_FB_UNMAPPED_AREA select FB_PROVIDE_GET_FB_UNMAPPED_AREA
default y
help help
Enable support for the on-chip display controller on Enable support for the on-chip display controller on
......
...@@ -43,12 +43,13 @@ struct sync_file { ...@@ -43,12 +43,13 @@ struct sync_file {
#endif #endif
wait_queue_head_t wq; wait_queue_head_t wq;
unsigned long flags;
struct dma_fence *fence; struct dma_fence *fence;
struct dma_fence_cb cb; struct dma_fence_cb cb;
}; };
#define POLL_ENABLED DMA_FENCE_FLAG_USER_BITS #define POLL_ENABLED 0
struct sync_file *sync_file_create(struct dma_fence *fence); struct sync_file *sync_file_create(struct dma_fence *fence);
struct dma_fence *sync_file_get_fence(int fd); struct dma_fence *sync_file_get_fence(int fd);
......
...@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd { ...@@ -171,7 +171,7 @@ struct drm_msm_gem_submit_cmd {
__u32 size; /* in, cmdstream size */ __u32 size; /* in, cmdstream size */
__u32 pad; __u32 pad;
__u32 nr_relocs; /* in, number of submit_reloc's */ __u32 nr_relocs; /* in, number of submit_reloc's */
__u64 __user relocs; /* in, ptr to array of submit_reloc's */ __u64 relocs; /* in, ptr to array of submit_reloc's */
}; };
/* Each buffer referenced elsewhere in the cmdstream submit (ie. the /* Each buffer referenced elsewhere in the cmdstream submit (ie. the
...@@ -215,8 +215,8 @@ struct drm_msm_gem_submit { ...@@ -215,8 +215,8 @@ struct drm_msm_gem_submit {
__u32 fence; /* out */ __u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */ __u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */ __u32 nr_cmds; /* in, number of submit_cmd's */
__u64 __user bos; /* in, ptr to array of submit_bo's */ __u64 bos; /* in, ptr to array of submit_bo's */
__u64 __user cmds; /* in, ptr to array of submit_cmd's */ __u64 cmds; /* in, ptr to array of submit_cmd's */
__s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */ __s32 fence_fd; /* in/out fence fd (see MSM_SUBMIT_FENCE_FD_IN/OUT) */
}; };
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment