Commit 38d741cb authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2022-04-29' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "Another relatively quiet week, amdgpu leads the way, some i915 display
  fixes, and a single sunxi fix.

  amdgpu:
   - Runtime pm fix
   - DCN memory leak fix in error path
   - SI DPM deadlock fix
   - S0ix fix

  amdkfd:
   - GWS fix
   - GWS support for CRIU

  i915:
   - Fix #5284: Backlight control regression on XMG Core 15 e21
   - Fix black display plane on Acer One AO532h
   - Two smaller display fixes

  sunxi:
   - Single fix removing applying PHYS_OFFSET twice"

* tag 'drm-fixes-2022-04-29' of git://anongit.freedesktop.org/drm/drm:
  drm/amdgpu: keep mmhub clock gating being enabled during s2idle suspend
  drm/amd/pm: fix the deadlock issue observed on SI
  drm/amd/display: Fix memory leak in dcn21_clock_source_create
  drm/amdgpu: don't runtime suspend if there are displays attached (v3)
  drm/amdkfd: CRIU add support for GWS queues
  drm/amdkfd: Fix GWS queue count
  drm/sun4i: Remove obsolete references to PHYS_OFFSET
  drm/i915/fbc: Consult hw.crtc instead of uapi.crtc
  drm/i915: Fix SEL_FETCH_PLANE_*(PIPE_B+) register addresses
  drm/i915: Check EDID for HDR static metadata when choosing blc
  drm/i915: Fix DISP_POS_Y and DISP_HEIGHT defines
parents 249aca0d 9d9f7207
......@@ -2395,6 +2395,71 @@ static int amdgpu_pmops_restore(struct device *dev)
return amdgpu_device_resume(drm_dev, true);
}
static int amdgpu_runtime_idle_check_display(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
struct drm_device *drm_dev = pci_get_drvdata(pdev);
struct amdgpu_device *adev = drm_to_adev(drm_dev);
if (adev->mode_info.num_crtc) {
struct drm_connector *list_connector;
struct drm_connector_list_iter iter;
int ret = 0;
/* XXX: Return busy if any displays are connected to avoid
* possible display wakeups after runtime resume due to
* hotplug events in case any displays were connected while
* the GPU was in suspend. Remove this once that is fixed.
*/
mutex_lock(&drm_dev->mode_config.mutex);
drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->status == connector_status_connected) {
ret = -EBUSY;
break;
}
}
drm_connector_list_iter_end(&iter);
mutex_unlock(&drm_dev->mode_config.mutex);
if (ret)
return ret;
if (amdgpu_device_has_dc_support(adev)) {
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev) {
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state->active)
ret = -EBUSY;
drm_modeset_unlock(&crtc->mutex);
if (ret < 0)
break;
}
} else {
mutex_lock(&drm_dev->mode_config.mutex);
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
ret = -EBUSY;
break;
}
}
drm_connector_list_iter_end(&iter);
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
mutex_unlock(&drm_dev->mode_config.mutex);
}
if (ret)
return ret;
}
return 0;
}
static int amdgpu_pmops_runtime_suspend(struct device *dev)
{
struct pci_dev *pdev = to_pci_dev(dev);
......@@ -2407,6 +2472,10 @@ static int amdgpu_pmops_runtime_suspend(struct device *dev)
return -EBUSY;
}
ret = amdgpu_runtime_idle_check_display(dev);
if (ret)
return ret;
/* wait for all rings to drain before suspending */
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
......@@ -2516,41 +2585,7 @@ static int amdgpu_pmops_runtime_idle(struct device *dev)
return -EBUSY;
}
if (amdgpu_device_has_dc_support(adev)) {
struct drm_crtc *crtc;
drm_for_each_crtc(crtc, drm_dev) {
drm_modeset_lock(&crtc->mutex, NULL);
if (crtc->state->active)
ret = -EBUSY;
drm_modeset_unlock(&crtc->mutex);
if (ret < 0)
break;
}
} else {
struct drm_connector *list_connector;
struct drm_connector_list_iter iter;
mutex_lock(&drm_dev->mode_config.mutex);
drm_modeset_lock(&drm_dev->mode_config.connection_mutex, NULL);
drm_connector_list_iter_begin(drm_dev, &iter);
drm_for_each_connector_iter(list_connector, &iter) {
if (list_connector->dpms == DRM_MODE_DPMS_ON) {
ret = -EBUSY;
break;
}
}
drm_connector_list_iter_end(&iter);
drm_modeset_unlock(&drm_dev->mode_config.connection_mutex);
mutex_unlock(&drm_dev->mode_config.mutex);
}
if (ret == -EBUSY)
DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
ret = amdgpu_runtime_idle_check_display(dev);
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
......
......@@ -1151,6 +1151,16 @@ static int gmc_v10_0_set_clockgating_state(void *handle,
int r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
/*
* The issue mmhub can't disconnect from DF with MMHUB clock gating being disabled
* is a new problem observed at DF 3.0.3, however with the same suspend sequence not
* seen any issue on the DF 3.0.2 series platform.
*/
if (adev->in_s0ix && adev->ip_versions[DF_HWIP][0] > IP_VERSION(3, 0, 2)) {
dev_dbg(adev->dev, "keep mmhub clock gating being enabled for s0ix\n");
return 0;
}
r = adev->mmhub.funcs->set_clockgating(adev, state);
if (r)
return r;
......
......@@ -130,19 +130,33 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
}
static void increment_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count++;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count++;
if (q->properties.is_gws) {
dqm->gws_queue_count++;
qpd->mapped_gws_queue = true;
}
}
static void decrement_queue_count(struct device_queue_manager *dqm,
enum kfd_queue_type type)
struct qcm_process_device *qpd,
struct queue *q)
{
dqm->active_queue_count--;
if (type == KFD_QUEUE_TYPE_COMPUTE || type == KFD_QUEUE_TYPE_DIQ)
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_DIQ)
dqm->active_cp_queue_count--;
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
/*
......@@ -412,7 +426,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
list_add(&q->list, &qpd->queues_list);
qpd->queue_count++;
if (q->properties.is_active)
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, qpd, q);
/*
* Unconditionally increment this counter, regardless of the queue's
......@@ -601,13 +615,8 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_vmid(dqm, qpd, q);
}
qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);
return retval;
}
......@@ -700,12 +709,11 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q,
* dqm->active_queue_count to determine whether a new runlist must be
* uploaded.
*/
if (q->properties.is_active && !prev_active)
increment_queue_count(dqm, q->properties.type);
else if (!q->properties.is_active && prev_active)
decrement_queue_count(dqm, q->properties.type);
if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active && !prev_active) {
increment_queue_count(dqm, &pdd->qpd, q);
} else if (!q->properties.is_active && prev_active) {
decrement_queue_count(dqm, &pdd->qpd, q);
} else if (q->gws && !q->properties.is_gws) {
if (q->properties.is_active) {
dqm->gws_queue_count++;
pdd->qpd.mapped_gws_queue = true;
......@@ -767,11 +775,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
decrement_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Evict when stopped\n"))
continue;
......@@ -817,7 +821,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = false;
decrement_queue_count(dqm, q->properties.type);
decrement_queue_count(dqm, qpd, q);
}
pdd->last_evict_timestamp = get_jiffies_64();
retval = execute_queues_cpsch(dqm,
......@@ -888,11 +892,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
mqd_mgr = dqm->mqd_mgrs[get_mqd_type_from_queue_type(
q->properties.type)];
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count++;
qpd->mapped_gws_queue = true;
}
increment_queue_count(dqm, qpd, q);
if (WARN_ONCE(!dqm->sched_running, "Restore when stopped\n"))
continue;
......@@ -950,7 +950,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
continue;
q->properties.is_active = true;
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, &pdd->qpd, q);
}
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
......@@ -1378,7 +1378,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
list_add(&kq->list, &qpd->priv_queue_list);
increment_queue_count(dqm, kq->queue->properties.type);
increment_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
dqm_unlock(dqm);
......@@ -1392,7 +1392,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
{
dqm_lock(dqm);
list_del(&kq->list);
decrement_queue_count(dqm, kq->queue->properties.type);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
/*
......@@ -1467,7 +1467,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
qpd->queue_count++;
if (q->properties.is_active) {
increment_queue_count(dqm, q->properties.type);
increment_queue_count(dqm, qpd, q);
execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
......@@ -1683,15 +1683,11 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
list_del(&q->list);
qpd->queue_count--;
if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
decrement_queue_count(dqm, qpd, q);
retval = execute_queues_cpsch(dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
/*
......@@ -1932,7 +1928,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
list_del(&kq->list);
decrement_queue_count(dqm, kq->queue->properties.type);
decrement_queue_count(dqm, qpd, kq->queue);
qpd->is_debug = false;
dqm->total_queue_count--;
filter = KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES;
......@@ -1945,13 +1941,8 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
else if (q->properties.type == KFD_QUEUE_TYPE_SDMA_XGMI)
deallocate_sdma_queue(dqm, q);
if (q->properties.is_active) {
decrement_queue_count(dqm, q->properties.type);
if (q->properties.is_gws) {
dqm->gws_queue_count--;
qpd->mapped_gws_queue = false;
}
}
if (q->properties.is_active)
decrement_queue_count(dqm, qpd, q);
dqm->total_queue_count--;
}
......
......@@ -1103,7 +1103,7 @@ struct kfd_criu_queue_priv_data {
uint32_t priority;
uint32_t q_percent;
uint32_t doorbell_id;
uint32_t is_gws;
uint32_t gws;
uint32_t sdma_id;
uint32_t eop_ring_buffer_size;
uint32_t ctx_save_restore_area_size;
......
......@@ -636,6 +636,8 @@ static int criu_checkpoint_queue(struct kfd_process_device *pdd,
q_data->ctx_save_restore_area_size =
q->properties.ctx_save_restore_area_size;
q_data->gws = !!q->gws;
ret = pqm_checkpoint_mqd(&pdd->process->pqm, q->properties.queue_id, mqd, ctl_stack);
if (ret) {
pr_err("Failed checkpoint queue_mqd (%d)\n", ret);
......@@ -743,7 +745,6 @@ static void set_queue_properties_from_criu(struct queue_properties *qp,
struct kfd_criu_queue_priv_data *q_data)
{
qp->is_interop = false;
qp->is_gws = q_data->is_gws;
qp->queue_percent = q_data->q_percent;
qp->priority = q_data->priority;
qp->queue_address = q_data->q_address;
......@@ -826,12 +827,15 @@ int kfd_criu_restore_queue(struct kfd_process *p,
NULL);
if (ret) {
pr_err("Failed to create new queue err:%d\n", ret);
ret = -EINVAL;
goto exit;
}
if (q_data->gws)
ret = pqm_set_gws(&p->pqm, q_data->q_id, pdd->dev->gws);
exit:
if (ret)
pr_err("Failed to create queue (%d)\n", ret);
pr_err("Failed to restore queue (%d)\n", ret);
else
pr_debug("Queue id %d was restored successfully\n", queue_id);
......
......@@ -997,6 +997,7 @@ static struct clock_source *dcn21_clock_source_create(
return &clk_src->base;
}
kfree(clk_src);
BREAK_TO_DEBUGGER();
return NULL;
}
......
......@@ -427,6 +427,7 @@ int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors senso
void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
{
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
int i;
if (!adev->pm.dpm_enabled)
return;
......@@ -434,6 +435,15 @@ void amdgpu_dpm_compute_clocks(struct amdgpu_device *adev)
if (!pp_funcs->pm_compute_clocks)
return;
if (adev->mode_info.num_crtc)
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
mutex_lock(&adev->pm.mutex);
pp_funcs->pm_compute_clocks(adev->powerplay.pp_handle);
mutex_unlock(&adev->pm.mutex);
......@@ -443,6 +453,20 @@ void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
if (adev->family == AMDGPU_FAMILY_SI) {
mutex_lock(&adev->pm.mutex);
if (enable) {
adev->pm.dpm.uvd_active = true;
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
} else {
adev->pm.dpm.uvd_active = false;
}
mutex_unlock(&adev->pm.mutex);
amdgpu_dpm_compute_clocks(adev);
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable);
if (ret)
DRM_ERROR("Dpm %s uvd failed, ret = %d. \n",
......@@ -453,6 +477,21 @@ void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable)
{
int ret = 0;
if (adev->family == AMDGPU_FAMILY_SI) {
mutex_lock(&adev->pm.mutex);
if (enable) {
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
} else {
adev->pm.dpm.vce_active = false;
}
mutex_unlock(&adev->pm.mutex);
amdgpu_dpm_compute_clocks(adev);
return;
}
ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable);
if (ret)
DRM_ERROR("Dpm %s vce failed, ret = %d. \n",
......
......@@ -1028,16 +1028,6 @@ static int amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev)
void amdgpu_legacy_dpm_compute_clocks(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
int i = 0;
if (adev->mode_info.num_crtc)
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
amdgpu_dpm_get_active_displays(adev);
......
......@@ -3892,40 +3892,6 @@ static int si_set_boot_state(struct amdgpu_device *adev)
}
#endif
static int si_set_powergating_by_smu(void *handle,
uint32_t block_type,
bool gate)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
switch (block_type) {
case AMD_IP_BLOCK_TYPE_UVD:
if (!gate) {
adev->pm.dpm.uvd_active = true;
adev->pm.dpm.state = POWER_STATE_TYPE_INTERNAL_UVD;
} else {
adev->pm.dpm.uvd_active = false;
}
amdgpu_legacy_dpm_compute_clocks(handle);
break;
case AMD_IP_BLOCK_TYPE_VCE:
if (!gate) {
adev->pm.dpm.vce_active = true;
/* XXX select vce level based on ring/task */
adev->pm.dpm.vce_level = AMD_VCE_LEVEL_AC_ALL;
} else {
adev->pm.dpm.vce_active = false;
}
amdgpu_legacy_dpm_compute_clocks(handle);
break;
default:
break;
}
return 0;
}
static int si_set_sw_state(struct amdgpu_device *adev)
{
return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
......@@ -8125,7 +8091,6 @@ static const struct amd_pm_funcs si_dpm_funcs = {
.print_power_state = &si_dpm_print_power_state,
.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
.force_performance_level = &si_dpm_force_performance_level,
.set_powergating_by_smu = &si_set_powergating_by_smu,
.vblank_too_short = &si_dpm_vblank_too_short,
.set_fan_control_mode = &si_dpm_set_fan_control_mode,
.get_fan_control_mode = &si_dpm_get_fan_control_mode,
......
......@@ -1487,16 +1487,6 @@ static void pp_pm_compute_clocks(void *handle)
{
struct pp_hwmgr *hwmgr = handle;
struct amdgpu_device *adev = hwmgr->adev;
int i = 0;
if (adev->mode_info.num_crtc)
amdgpu_display_bandwidth_update(adev);
for (i = 0; i < AMDGPU_MAX_RINGS; i++) {
struct amdgpu_ring *ring = adev->rings[i];
if (ring && ring->sched.ready)
amdgpu_fence_wait_empty(ring);
}
if (!amdgpu_device_has_dc_support(adev)) {
amdgpu_dpm_get_active_displays(adev);
......
......@@ -97,6 +97,14 @@
#define INTEL_EDP_BRIGHTNESS_OPTIMIZATION_1 0x359
enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
INTEL_DP_AUX_BACKLIGHT_OFF = 0,
INTEL_DP_AUX_BACKLIGHT_ON = 1,
INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
/* Intel EDP backlight callbacks */
static bool
intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
......@@ -126,6 +134,24 @@ intel_dp_aux_supports_hdr_backlight(struct intel_connector *connector)
return false;
}
/*
* If we don't have HDR static metadata there is no way to
* runtime detect used range for nits based control. For now
* do not use Intel proprietary eDP backlight control if we
* don't have this data in panel EDID. In case we find panel
* which supports only nits based control, but doesn't provide
* HDR static metadata we need to start maintaining table of
* ranges for such panels.
*/
if (i915->params.enable_dpcd_backlight != INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL &&
!(connector->base.hdr_sink_metadata.hdmi_type1.metadata_type &
BIT(HDMI_STATIC_METADATA_TYPE1))) {
drm_info(&i915->drm,
"Panel is missing HDR static metadata. Possible support for Intel HDR backlight interface is not used. If your backlight controls don't work try booting with i915.enable_dpcd_backlight=%d. needs this, please file a _new_ bug report on drm/i915, see " FDO_BUG_URL " for details.\n",
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL);
return false;
}
panel->backlight.edp.intel.sdr_uses_aux =
tcon_cap[2] & INTEL_EDP_SDR_TCON_BRIGHTNESS_AUX_CAP;
......@@ -413,14 +439,6 @@ static const struct intel_panel_bl_funcs intel_dp_vesa_bl_funcs = {
.get = intel_dp_aux_vesa_get_backlight,
};
enum intel_dp_aux_backlight_modparam {
INTEL_DP_AUX_BACKLIGHT_AUTO = -1,
INTEL_DP_AUX_BACKLIGHT_OFF = 0,
INTEL_DP_AUX_BACKLIGHT_ON = 1,
INTEL_DP_AUX_BACKLIGHT_FORCE_VESA = 2,
INTEL_DP_AUX_BACKLIGHT_FORCE_INTEL = 3,
};
int intel_dp_aux_init_backlight_funcs(struct intel_connector *connector)
{
struct drm_device *dev = connector->base.dev;
......
......@@ -1037,7 +1037,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state,
struct intel_plane_state *plane_state =
intel_atomic_get_new_plane_state(state, plane);
const struct drm_framebuffer *fb = plane_state->hw.fb;
struct intel_crtc *crtc = to_intel_crtc(plane_state->uapi.crtc);
struct intel_crtc *crtc = to_intel_crtc(plane_state->hw.crtc);
const struct intel_crtc_state *crtc_state;
struct intel_fbc *fbc = plane->fbc;
......
......@@ -4345,12 +4345,12 @@
#define _DSPAADDR 0x70184
#define _DSPASTRIDE 0x70188
#define _DSPAPOS 0x7018C /* reserved */
#define DISP_POS_Y_MASK REG_GENMASK(31, 0)
#define DISP_POS_Y_MASK REG_GENMASK(31, 16)
#define DISP_POS_Y(y) REG_FIELD_PREP(DISP_POS_Y_MASK, (y))
#define DISP_POS_X_MASK REG_GENMASK(15, 0)
#define DISP_POS_X(x) REG_FIELD_PREP(DISP_POS_X_MASK, (x))
#define _DSPASIZE 0x70190
#define DISP_HEIGHT_MASK REG_GENMASK(31, 0)
#define DISP_HEIGHT_MASK REG_GENMASK(31, 16)
#define DISP_HEIGHT(h) REG_FIELD_PREP(DISP_HEIGHT_MASK, (h))
#define DISP_WIDTH_MASK REG_GENMASK(15, 0)
#define DISP_WIDTH(w) REG_FIELD_PREP(DISP_WIDTH_MASK, (w))
......@@ -5152,7 +5152,7 @@
#define _SEL_FETCH_PLANE_BASE_6_A 0x70940
#define _SEL_FETCH_PLANE_BASE_7_A 0x70960
#define _SEL_FETCH_PLANE_BASE_CUR_A 0x70880
#define _SEL_FETCH_PLANE_BASE_1_B 0x70990
#define _SEL_FETCH_PLANE_BASE_1_B 0x71890
#define _SEL_FETCH_PLANE_BASE_A(plane) _PICK(plane, \
_SEL_FETCH_PLANE_BASE_1_A, \
......
......@@ -222,13 +222,11 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
/* Set the physical address of the buffer in memory */
paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #0 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR0_REG, paddr);
if (fb->format->num_planes > 1) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 2 : 1);
paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #1 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR1_REG,
paddr);
......@@ -236,7 +234,6 @@ void sun4i_frontend_update_buffer(struct sun4i_frontend *frontend,
if (fb->format->num_planes > 2) {
paddr = drm_fb_cma_get_gem_addr(fb, state, swap ? 1 : 2);
paddr -= PHYS_OFFSET;
DRM_DEBUG_DRIVER("Setting buffer #2 address to %pad\n", &paddr);
regmap_write(frontend->regs, SUN4I_FRONTEND_BUF_ADDR2_REG,
paddr);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment