Commit 0d81a3f2 authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2020-03-13' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "It's a bit quieter, probably not as much as it could be.

  There is on large regression fix in here from Lyude for displayport
  bandwidth calculations, there've been reports of multi-monitor in
  docks not working since -rc1 and this has been tested to fix those.

  Otherwise it's a bunch of i915 (with some GVT fixes), a set of amdgpu
  watermark + bios fixes, and an exynos iommu cleanup fix.

  core:
   - DP MST bandwidth regression fix.

  i915:
   - hard lockup fix
   - GVT fixes
   - 32-bit alignment issue fix
   - timeline wait fixes
   - cacheline_retire and free

  amdgpu:
   - Update the display watermark bounding box for navi14
   - Fix fetching vbios directly from rom on vega20/arcturus
   - Navi and renoir watermark fixes

  exynos:
   - iommu object cleanup fix"

`

* tag 'drm-fixes-2020-03-13' of git://anongit.freedesktop.org/drm/drm:
  drm/dp_mst: Rewrite and fix bandwidth limit checks
  drm/dp_mst: Reprobe path resources in CSN handler
  drm/dp_mst: Use full_pbn instead of available_pbn for bandwidth checks
  drm/dp_mst: Rename drm_dp_mst_is_dp_mst_end_device() to be less redundant
  drm/i915: Defer semaphore priority bumping to a workqueue
  drm/i915/gt: Close race between cacheline_retire and free
  drm/i915/execlists: Enable timeslice on partial virtual engine dequeue
  drm/i915: be more solid in checking the alignment
  drm/i915/gvt: Fix dma-buf display blur issue on CFL
  drm/i915: Return early for await_start on same timeline
  drm/i915: Actually emit the await_start
  drm/amdgpu/powerplay: nv1x, renior copy dcn clock settings of watermark to smu during boot up
  drm/exynos: Fix cleanup of IOMMU related objects
  drm/amdgpu: correct ROM_INDEX/DATA offset for VEGA20
  drm/amd/display: update soc bb for nv14
  drm/i915/gvt: Fix emulated vbt size issue
  drm/i915/gvt: Fix unnecessary schedule timer when no vGPU exits
parents 1b51f694 16b78f05
...@@ -89,6 +89,13 @@ ...@@ -89,6 +89,13 @@
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_CTRL_EN_MASK 0x00010000L
#define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L #define HDP_MEM_POWER_CTRL__RC_MEM_POWER_LS_EN_MASK 0x00020000L
#define mmHDP_MEM_POWER_CTRL_BASE_IDX 0 #define mmHDP_MEM_POWER_CTRL_BASE_IDX 0
/* for Vega20/arcturus regiter offset change */
#define mmROM_INDEX_VG20 0x00e4
#define mmROM_INDEX_VG20_BASE_IDX 0
#define mmROM_DATA_VG20 0x00e5
#define mmROM_DATA_VG20_BASE_IDX 0
/* /*
* Indirect registers accessor * Indirect registers accessor
*/ */
...@@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, ...@@ -309,6 +316,8 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
{ {
u32 *dw_ptr; u32 *dw_ptr;
u32 i, length_dw; u32 i, length_dw;
uint32_t rom_index_offset;
uint32_t rom_data_offset;
if (bios == NULL) if (bios == NULL)
return false; return false;
...@@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev, ...@@ -321,11 +330,23 @@ static bool soc15_read_bios_from_rom(struct amdgpu_device *adev,
dw_ptr = (u32 *)bios; dw_ptr = (u32 *)bios;
length_dw = ALIGN(length_bytes, 4) / 4; length_dw = ALIGN(length_bytes, 4) / 4;
switch (adev->asic_type) {
case CHIP_VEGA20:
case CHIP_ARCTURUS:
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX_VG20);
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA_VG20);
break;
default:
rom_index_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX);
rom_data_offset = SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA);
break;
}
/* set rom index to 0 */ /* set rom index to 0 */
WREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_INDEX), 0); WREG32(rom_index_offset, 0);
/* read out the rom data */ /* read out the rom data */
for (i = 0; i < length_dw; i++) for (i = 0; i < length_dw; i++)
dw_ptr[i] = RREG32(SOC15_REG_OFFSET(SMUIO, 0, mmROM_DATA)); dw_ptr[i] = RREG32(rom_data_offset);
return true; return true;
} }
......
...@@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = { ...@@ -335,6 +335,117 @@ struct _vcs_dpi_soc_bounding_box_st dcn2_0_soc = {
.use_urgent_burst_bw = 0 .use_urgent_burst_bw = 0
}; };
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv14_soc = {
.clock_limits = {
{
.state = 0,
.dcfclk_mhz = 560.0,
.fabricclk_mhz = 560.0,
.dispclk_mhz = 513.0,
.dppclk_mhz = 513.0,
.phyclk_mhz = 540.0,
.socclk_mhz = 560.0,
.dscclk_mhz = 171.0,
.dram_speed_mts = 8960.0,
},
{
.state = 1,
.dcfclk_mhz = 694.0,
.fabricclk_mhz = 694.0,
.dispclk_mhz = 642.0,
.dppclk_mhz = 642.0,
.phyclk_mhz = 600.0,
.socclk_mhz = 694.0,
.dscclk_mhz = 214.0,
.dram_speed_mts = 11104.0,
},
{
.state = 2,
.dcfclk_mhz = 875.0,
.fabricclk_mhz = 875.0,
.dispclk_mhz = 734.0,
.dppclk_mhz = 734.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 875.0,
.dscclk_mhz = 245.0,
.dram_speed_mts = 14000.0,
},
{
.state = 3,
.dcfclk_mhz = 1000.0,
.fabricclk_mhz = 1000.0,
.dispclk_mhz = 1100.0,
.dppclk_mhz = 1100.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1000.0,
.dscclk_mhz = 367.0,
.dram_speed_mts = 16000.0,
},
{
.state = 4,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
/*Extra state, no dispclk ramping*/
{
.state = 5,
.dcfclk_mhz = 1200.0,
.fabricclk_mhz = 1200.0,
.dispclk_mhz = 1284.0,
.dppclk_mhz = 1284.0,
.phyclk_mhz = 810.0,
.socclk_mhz = 1200.0,
.dscclk_mhz = 428.0,
.dram_speed_mts = 16000.0,
},
},
.num_states = 5,
.sr_exit_time_us = 8.6,
.sr_enter_plus_exit_time_us = 10.9,
.urgent_latency_us = 4.0,
.urgent_latency_pixel_data_only_us = 4.0,
.urgent_latency_pixel_mixed_with_vm_data_us = 4.0,
.urgent_latency_vm_data_only_us = 4.0,
.urgent_out_of_order_return_per_channel_pixel_only_bytes = 4096,
.urgent_out_of_order_return_per_channel_pixel_and_vm_bytes = 4096,
.urgent_out_of_order_return_per_channel_vm_only_bytes = 4096,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_only = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_pixel_and_vm = 40.0,
.pct_ideal_dram_sdp_bw_after_urgent_vm_only = 40.0,
.max_avg_sdp_bw_use_normal_percent = 40.0,
.max_avg_dram_bw_use_normal_percent = 40.0,
.writeback_latency_us = 12.0,
.ideal_dram_bw_after_urgent_percent = 40.0,
.max_request_size_bytes = 256,
.dram_channel_width_bytes = 2,
.fabric_datapath_to_dcn_data_return_bytes = 64,
.dcn_downspread_percent = 0.5,
.downspread_percent = 0.38,
.dram_page_open_time_ns = 50.0,
.dram_rw_turnaround_time_ns = 17.5,
.dram_return_buffer_per_channel_bytes = 8192,
.round_trip_ping_latency_dcfclk_cycles = 131,
.urgent_out_of_order_return_per_channel_bytes = 256,
.channel_interleave_bytes = 256,
.num_banks = 8,
.num_chans = 8,
.vmm_page_size_bytes = 4096,
.dram_clock_change_latency_us = 404.0,
.dummy_pstate_latency_us = 5.0,
.writeback_dram_clock_change_latency_us = 23.0,
.return_bus_width_bytes = 64,
.dispclk_dppclk_vco_speed_mhz = 3850,
.xfc_bus_transport_time_us = 20,
.xfc_xbuf_latency_tolerance_us = 4,
.use_urgent_burst_bw = 0
};
struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 }; struct _vcs_dpi_soc_bounding_box_st dcn2_0_nv12_soc = { 0 };
#ifndef mmDP0_DP_DPHY_INTERNAL_CTRL #ifndef mmDP0_DP_DPHY_INTERNAL_CTRL
...@@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st ...@@ -3291,6 +3402,9 @@ void dcn20_patch_bounding_box(struct dc *dc, struct _vcs_dpi_soc_bounding_box_st
static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb( static struct _vcs_dpi_soc_bounding_box_st *get_asic_rev_soc_bb(
uint32_t hw_internal_rev) uint32_t hw_internal_rev)
{ {
if (ASICREV_IS_NAVI14_M(hw_internal_rev))
return &dcn2_0_nv14_soc;
if (ASICREV_IS_NAVI12_P(hw_internal_rev)) if (ASICREV_IS_NAVI12_P(hw_internal_rev))
return &dcn2_0_nv12_soc; return &dcn2_0_nv12_soc;
......
...@@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu, ...@@ -2006,8 +2006,11 @@ int smu_set_watermarks_for_clock_ranges(struct smu_context *smu,
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_enabled(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_feature_is_enabled(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
smu_set_watermarks_table(smu, table, clock_ranges); smu_set_watermarks_table(smu, table, clock_ranges);
smu->watermarks_bitmap |= WATERMARKS_EXIST;
smu->watermarks_bitmap &= ~WATERMARKS_LOADED; if (!(smu->watermarks_bitmap & WATERMARKS_EXIST)) {
smu->watermarks_bitmap |= WATERMARKS_EXIST;
smu->watermarks_bitmap &= ~WATERMARKS_LOADED;
}
} }
mutex_unlock(&smu->mutex); mutex_unlock(&smu->mutex);
......
...@@ -1062,15 +1062,6 @@ static int navi10_display_config_changed(struct smu_context *smu) ...@@ -1062,15 +1062,6 @@ static int navi10_display_config_changed(struct smu_context *smu)
{ {
int ret = 0; int ret = 0;
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret)
return ret;
smu->watermarks_bitmap |= WATERMARKS_LOADED;
}
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && if ((smu->watermarks_bitmap & WATERMARKS_EXIST) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) && smu_feature_is_supported(smu, SMU_FEATURE_DPM_DCEFCLK_BIT) &&
smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) { smu_feature_is_supported(smu, SMU_FEATURE_DPM_SOCCLK_BIT)) {
...@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu, ...@@ -1493,6 +1484,7 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
*clock_ranges) *clock_ranges)
{ {
int i; int i;
int ret = 0;
Watermarks_t *table = watermarks; Watermarks_t *table = watermarks;
if (!table || !clock_ranges) if (!table || !clock_ranges)
...@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu, ...@@ -1544,6 +1536,18 @@ static int navi10_set_watermarks_table(struct smu_context *smu,
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
} }
smu->watermarks_bitmap |= WATERMARKS_EXIST;
/* pass data to smu controller */
if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu);
if (ret) {
pr_err("Failed to update WMTABLE!");
return ret;
}
smu->watermarks_bitmap |= WATERMARKS_LOADED;
}
return 0; return 0;
} }
......
...@@ -806,9 +806,10 @@ static int renoir_set_watermarks_table( ...@@ -806,9 +806,10 @@ static int renoir_set_watermarks_table(
clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id; clock_ranges->wm_mcif_clocks_ranges[i].wm_set_id;
} }
smu->watermarks_bitmap |= WATERMARKS_EXIST;
/* pass data to smu controller */ /* pass data to smu controller */
if ((smu->watermarks_bitmap & WATERMARKS_EXIST) && if (!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
!(smu->watermarks_bitmap & WATERMARKS_LOADED)) {
ret = smu_write_watermarks_table(smu); ret = smu_write_watermarks_table(smu);
if (ret) { if (ret) {
pr_err("Failed to update WMTABLE!"); pr_err("Failed to update WMTABLE!");
......
...@@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port, ...@@ -1935,7 +1935,7 @@ static u8 drm_dp_calculate_rad(struct drm_dp_mst_port *port,
return parent_lct + 1; return parent_lct + 1;
} }
static bool drm_dp_mst_is_dp_mst_end_device(u8 pdt, bool mcs) static bool drm_dp_mst_is_end_device(u8 pdt, bool mcs)
{ {
switch (pdt) { switch (pdt) {
case DP_PEER_DEVICE_DP_LEGACY_CONV: case DP_PEER_DEVICE_DP_LEGACY_CONV:
...@@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, ...@@ -1965,13 +1965,13 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
/* Teardown the old pdt, if there is one */ /* Teardown the old pdt, if there is one */
if (port->pdt != DP_PEER_DEVICE_NONE) { if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* /*
* If the new PDT would also have an i2c bus, * If the new PDT would also have an i2c bus,
* don't bother with reregistering it * don't bother with reregistering it
*/ */
if (new_pdt != DP_PEER_DEVICE_NONE && if (new_pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_dp_mst_end_device(new_pdt, new_mcs)) { drm_dp_mst_is_end_device(new_pdt, new_mcs)) {
port->pdt = new_pdt; port->pdt = new_pdt;
port->mcs = new_mcs; port->mcs = new_mcs;
return 0; return 0;
...@@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt, ...@@ -1991,7 +1991,7 @@ drm_dp_port_set_pdt(struct drm_dp_mst_port *port, u8 new_pdt,
port->mcs = new_mcs; port->mcs = new_mcs;
if (port->pdt != DP_PEER_DEVICE_NONE) { if (port->pdt != DP_PEER_DEVICE_NONE) {
if (drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
/* add i2c over sideband */ /* add i2c over sideband */
ret = drm_dp_mst_register_i2c_bus(&port->aux); ret = drm_dp_mst_register_i2c_bus(&port->aux);
} else { } else {
...@@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb, ...@@ -2172,7 +2172,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
} }
if (port->pdt != DP_PEER_DEVICE_NONE && if (port->pdt != DP_PEER_DEVICE_NONE &&
drm_dp_mst_is_dp_mst_end_device(port->pdt, port->mcs)) { drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
port->cached_edid = drm_get_edid(port->connector, port->cached_edid = drm_get_edid(port->connector,
&port->aux.ddc); &port->aux.ddc);
drm_connector_set_tile_property(port->connector); drm_connector_set_tile_property(port->connector);
...@@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb, ...@@ -2302,14 +2302,18 @@ drm_dp_mst_handle_link_address_port(struct drm_dp_mst_branch *mstb,
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
} }
if (old_ddps != port->ddps) { /*
if (port->ddps) { * Reprobe PBN caps on both hotplug, and when re-probing the link
if (!port->input) { * for our parent mstb
drm_dp_send_enum_path_resources(mgr, mstb, */
port); if (old_ddps != port->ddps || !created) {
} if (port->ddps && !port->input) {
ret = drm_dp_send_enum_path_resources(mgr, mstb,
port);
if (ret == 1)
changed = true;
} else { } else {
port->available_pbn = 0; port->full_pbn = 0;
} }
} }
...@@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb, ...@@ -2401,11 +2405,10 @@ drm_dp_mst_handle_conn_stat(struct drm_dp_mst_branch *mstb,
port->ddps = conn_stat->displayport_device_plug_status; port->ddps = conn_stat->displayport_device_plug_status;
if (old_ddps != port->ddps) { if (old_ddps != port->ddps) {
if (port->ddps) { if (port->ddps && !port->input)
dowork = true; drm_dp_send_enum_path_resources(mgr, mstb, port);
} else { else
port->available_pbn = 0; port->full_pbn = 0;
}
} }
new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type; new_pdt = port->input ? DP_PEER_DEVICE_NONE : conn_stat->peer_device_type;
...@@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg ...@@ -2556,13 +2559,6 @@ static int drm_dp_check_and_send_link_address(struct drm_dp_mst_topology_mgr *mg
if (port->input || !port->ddps) if (port->input || !port->ddps)
continue; continue;
if (!port->available_pbn) {
drm_modeset_lock(&mgr->base.lock, NULL);
drm_dp_send_enum_path_resources(mgr, mstb, port);
drm_modeset_unlock(&mgr->base.lock);
changed = true;
}
if (port->mstb) if (port->mstb)
mstb_child = drm_dp_mst_topology_get_mstb_validated( mstb_child = drm_dp_mst_topology_get_mstb_validated(
mgr, port->mstb); mgr, port->mstb);
...@@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ...@@ -2990,6 +2986,7 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
ret = drm_dp_mst_wait_tx_reply(mstb, txmsg); ret = drm_dp_mst_wait_tx_reply(mstb, txmsg);
if (ret > 0) { if (ret > 0) {
ret = 0;
path_res = &txmsg->reply.u.path_resources; path_res = &txmsg->reply.u.path_resources;
if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) { if (txmsg->reply.reply_type == DP_SIDEBAND_REPLY_NAK) {
...@@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr, ...@@ -3002,14 +2999,22 @@ drm_dp_send_enum_path_resources(struct drm_dp_mst_topology_mgr *mgr,
path_res->port_number, path_res->port_number,
path_res->full_payload_bw_number, path_res->full_payload_bw_number,
path_res->avail_payload_bw_number); path_res->avail_payload_bw_number);
port->available_pbn =
path_res->avail_payload_bw_number; /*
* If something changed, make sure we send a
* hotplug
*/
if (port->full_pbn != path_res->full_payload_bw_number ||
port->fec_capable != path_res->fec_capable)
ret = 1;
port->full_pbn = path_res->full_payload_bw_number;
port->fec_capable = path_res->fec_capable; port->fec_capable = path_res->fec_capable;
} }
} }
kfree(txmsg); kfree(txmsg);
return 0; return ret;
} }
static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb) static struct drm_dp_mst_port *drm_dp_get_last_connected_port_to_mstb(struct drm_dp_mst_branch *mstb)
...@@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb) ...@@ -3596,13 +3601,9 @@ drm_dp_mst_topology_mgr_invalidate_mstb(struct drm_dp_mst_branch *mstb)
/* The link address will need to be re-sent on resume */ /* The link address will need to be re-sent on resume */
mstb->link_address_sent = false; mstb->link_address_sent = false;
list_for_each_entry(port, &mstb->ports, next) { list_for_each_entry(port, &mstb->ports, next)
/* The PBN for each port will also need to be re-probed */
port->available_pbn = 0;
if (port->mstb) if (port->mstb)
drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb); drm_dp_mst_topology_mgr_invalidate_mstb(port->mstb);
}
} }
/** /**
...@@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port, ...@@ -4829,41 +4830,102 @@ static bool drm_dp_mst_port_downstream_of_branch(struct drm_dp_mst_port *port,
return false; return false;
} }
static inline static int
int drm_dp_mst_atomic_check_bw_limit(struct drm_dp_mst_branch *branch, drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *mst_state) struct drm_dp_mst_topology_state *state);
static int
drm_dp_mst_atomic_check_mstb_bw_limit(struct drm_dp_mst_branch *mstb,
struct drm_dp_mst_topology_state *state)
{ {
struct drm_dp_mst_port *port;
struct drm_dp_vcpi_allocation *vcpi; struct drm_dp_vcpi_allocation *vcpi;
int pbn_limit = 0, pbn_used = 0; struct drm_dp_mst_port *port;
int pbn_used = 0, ret;
bool found = false;
list_for_each_entry(port, &branch->ports, next) { /* Check that we have at least one port in our state that's downstream
if (port->mstb) * of this branch, otherwise we can skip this branch
if (drm_dp_mst_atomic_check_bw_limit(port->mstb, mst_state)) */
return -ENOSPC; list_for_each_entry(vcpi, &state->vcpis, next) {
if (!vcpi->pbn ||
!drm_dp_mst_port_downstream_of_branch(vcpi->port, mstb))
continue;
if (port->available_pbn > 0) found = true;
pbn_limit = port->available_pbn; break;
} }
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch has %d PBN available\n", if (!found)
branch, pbn_limit); return 0;
list_for_each_entry(vcpi, &mst_state->vcpis, next) { if (mstb->port_parent)
if (!vcpi->pbn) DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] Checking bandwidth limits on [MSTB:%p]\n",
continue; mstb->port_parent->parent, mstb->port_parent,
mstb);
else
DRM_DEBUG_ATOMIC("[MSTB:%p] Checking bandwidth limits\n",
mstb);
list_for_each_entry(port, &mstb->ports, next) {
ret = drm_dp_mst_atomic_check_port_bw_limit(port, state);
if (ret < 0)
return ret;
if (drm_dp_mst_port_downstream_of_branch(vcpi->port, branch)) pbn_used += ret;
pbn_used += vcpi->pbn;
} }
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] branch used %d PBN\n",
branch, pbn_used);
if (pbn_used > pbn_limit) { return pbn_used;
DRM_DEBUG_ATOMIC("[MST BRANCH:%p] No available bandwidth\n", }
branch);
static int
drm_dp_mst_atomic_check_port_bw_limit(struct drm_dp_mst_port *port,
struct drm_dp_mst_topology_state *state)
{
struct drm_dp_vcpi_allocation *vcpi;
int pbn_used = 0;
if (port->pdt == DP_PEER_DEVICE_NONE)
return 0;
if (drm_dp_mst_is_end_device(port->pdt, port->mcs)) {
bool found = false;
list_for_each_entry(vcpi, &state->vcpis, next) {
if (vcpi->port != port)
continue;
if (!vcpi->pbn)
return 0;
found = true;
break;
}
if (!found)
return 0;
/* This should never happen, as it means we tried to
* set a mode before querying the full_pbn
*/
if (WARN_ON(!port->full_pbn))
return -EINVAL;
pbn_used = vcpi->pbn;
} else {
pbn_used = drm_dp_mst_atomic_check_mstb_bw_limit(port->mstb,
state);
if (pbn_used <= 0)
return pbn_used;
}
if (pbn_used > port->full_pbn) {
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] required PBN of %d exceeds port limit of %d\n",
port->parent, port, pbn_used,
port->full_pbn);
return -ENOSPC; return -ENOSPC;
} }
return 0;
DRM_DEBUG_ATOMIC("[MSTB:%p] [MST PORT:%p] uses %d out of %d PBN\n",
port->parent, port, pbn_used, port->full_pbn);
return pbn_used;
} }
static inline int static inline int
...@@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state) ...@@ -5061,9 +5123,15 @@ int drm_dp_mst_atomic_check(struct drm_atomic_state *state)
ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state); ret = drm_dp_mst_atomic_check_vcpi_alloc_limit(mgr, mst_state);
if (ret) if (ret)
break; break;
ret = drm_dp_mst_atomic_check_bw_limit(mgr->mst_primary, mst_state);
if (ret) mutex_lock(&mgr->lock);
ret = drm_dp_mst_atomic_check_mstb_bw_limit(mgr->mst_primary,
mst_state);
mutex_unlock(&mgr->lock);
if (ret < 0)
break; break;
else
ret = 0;
} }
return ret; return ret;
......
...@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = { ...@@ -55,6 +55,7 @@ static const char * const decon_clks_name[] = {
struct decon_context { struct decon_context {
struct device *dev; struct device *dev;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc; struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR];
...@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data) ...@@ -644,7 +645,7 @@ static int decon_bind(struct device *dev, struct device *master, void *data)
decon_clear_channels(ctx->crtc); decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, dev); return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
} }
static void decon_unbind(struct device *dev, struct device *master, void *data) static void decon_unbind(struct device *dev, struct device *master, void *data)
...@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data) ...@@ -654,7 +655,7 @@ static void decon_unbind(struct device *dev, struct device *master, void *data)
decon_atomic_disable(ctx->crtc); decon_atomic_disable(ctx->crtc);
/* detach this sub driver from iommu mapping if supported. */ /* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
} }
static const struct component_ops decon_component_ops = { static const struct component_ops decon_component_ops = {
......
...@@ -40,6 +40,7 @@ ...@@ -40,6 +40,7 @@
struct decon_context { struct decon_context {
struct device *dev; struct device *dev;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc; struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR];
...@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx, ...@@ -127,13 +128,13 @@ static int decon_ctx_initialize(struct decon_context *ctx,
decon_clear_channels(ctx->crtc); decon_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, ctx->dev); return exynos_drm_register_dma(drm_dev, ctx->dev, &ctx->dma_priv);
} }
static void decon_ctx_remove(struct decon_context *ctx) static void decon_ctx_remove(struct decon_context *ctx)
{ {
/* detach this sub driver from iommu mapping if supported. */ /* detach this sub driver from iommu mapping if supported. */
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
} }
static u32 decon_calc_clkdiv(struct decon_context *ctx, static u32 decon_calc_clkdiv(struct decon_context *ctx,
......
...@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev) ...@@ -58,7 +58,7 @@ static inline void clear_dma_max_seg_size(struct device *dev)
* mapping. * mapping.
*/ */
static int drm_iommu_attach_device(struct drm_device *drm_dev, static int drm_iommu_attach_device(struct drm_device *drm_dev,
struct device *subdrv_dev) struct device *subdrv_dev, void **dma_priv)
{ {
struct exynos_drm_private *priv = drm_dev->dev_private; struct exynos_drm_private *priv = drm_dev->dev_private;
int ret; int ret;
...@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, ...@@ -74,7 +74,14 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
return ret; return ret;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) { if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
if (to_dma_iommu_mapping(subdrv_dev)) /*
* Keep the original DMA mapping of the sub-device and
* restore it on Exynos DRM detach, otherwise the DMA
* framework considers it as IOMMU-less during the next
* probe (in case of deferred probe or modular build)
*/
*dma_priv = to_dma_iommu_mapping(subdrv_dev);
if (*dma_priv)
arm_iommu_detach_device(subdrv_dev); arm_iommu_detach_device(subdrv_dev);
ret = arm_iommu_attach_device(subdrv_dev, priv->mapping); ret = arm_iommu_attach_device(subdrv_dev, priv->mapping);
...@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev, ...@@ -98,19 +105,21 @@ static int drm_iommu_attach_device(struct drm_device *drm_dev,
* mapping * mapping
*/ */
static void drm_iommu_detach_device(struct drm_device *drm_dev, static void drm_iommu_detach_device(struct drm_device *drm_dev,
struct device *subdrv_dev) struct device *subdrv_dev, void **dma_priv)
{ {
struct exynos_drm_private *priv = drm_dev->dev_private; struct exynos_drm_private *priv = drm_dev->dev_private;
if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) if (IS_ENABLED(CONFIG_ARM_DMA_USE_IOMMU)) {
arm_iommu_detach_device(subdrv_dev); arm_iommu_detach_device(subdrv_dev);
else if (IS_ENABLED(CONFIG_IOMMU_DMA)) arm_iommu_attach_device(subdrv_dev, *dma_priv);
} else if (IS_ENABLED(CONFIG_IOMMU_DMA))
iommu_detach_device(priv->mapping, subdrv_dev); iommu_detach_device(priv->mapping, subdrv_dev);
clear_dma_max_seg_size(subdrv_dev); clear_dma_max_seg_size(subdrv_dev);
} }
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{ {
struct exynos_drm_private *priv = drm->dev_private; struct exynos_drm_private *priv = drm->dev_private;
...@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev) ...@@ -137,13 +146,14 @@ int exynos_drm_register_dma(struct drm_device *drm, struct device *dev)
priv->mapping = mapping; priv->mapping = mapping;
} }
return drm_iommu_attach_device(drm, dev); return drm_iommu_attach_device(drm, dev, dma_priv);
} }
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev) void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
void **dma_priv)
{ {
if (IS_ENABLED(CONFIG_EXYNOS_IOMMU)) if (IS_ENABLED(CONFIG_EXYNOS_IOMMU))
drm_iommu_detach_device(drm, dev); drm_iommu_detach_device(drm, dev, dma_priv);
} }
void exynos_drm_cleanup_dma(struct drm_device *drm) void exynos_drm_cleanup_dma(struct drm_device *drm)
......
...@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev) ...@@ -223,8 +223,10 @@ static inline bool is_drm_iommu_supported(struct drm_device *drm_dev)
return priv->mapping ? true : false; return priv->mapping ? true : false;
} }
int exynos_drm_register_dma(struct drm_device *drm, struct device *dev); int exynos_drm_register_dma(struct drm_device *drm, struct device *dev,
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev); void **dma_priv);
void exynos_drm_unregister_dma(struct drm_device *drm, struct device *dev,
void **dma_priv);
void exynos_drm_cleanup_dma(struct drm_device *drm); void exynos_drm_cleanup_dma(struct drm_device *drm);
#ifdef CONFIG_DRM_EXYNOS_DPI #ifdef CONFIG_DRM_EXYNOS_DPI
......
...@@ -97,6 +97,7 @@ struct fimc_scaler { ...@@ -97,6 +97,7 @@ struct fimc_scaler {
struct fimc_context { struct fimc_context {
struct exynos_drm_ipp ipp; struct exynos_drm_ipp ipp;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct device *dev; struct device *dev;
struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats; struct exynos_drm_ipp_formats *formats;
...@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data) ...@@ -1133,7 +1134,7 @@ static int fimc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev;
ipp->drm_dev = drm_dev; ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev); exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
...@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master, ...@@ -1153,7 +1154,7 @@ static void fimc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp; struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp); exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev); exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
} }
static const struct component_ops fimc_component_ops = { static const struct component_ops fimc_component_ops = {
......
...@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = { ...@@ -167,6 +167,7 @@ static struct fimd_driver_data exynos5420_fimd_driver_data = {
struct fimd_context { struct fimd_context {
struct device *dev; struct device *dev;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc; struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[WINDOWS_NR]; struct exynos_drm_plane planes[WINDOWS_NR];
struct exynos_drm_plane_config configs[WINDOWS_NR]; struct exynos_drm_plane_config configs[WINDOWS_NR];
...@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data) ...@@ -1090,7 +1091,7 @@ static int fimd_bind(struct device *dev, struct device *master, void *data)
if (is_drm_iommu_supported(drm_dev)) if (is_drm_iommu_supported(drm_dev))
fimd_clear_channels(ctx->crtc); fimd_clear_channels(ctx->crtc);
return exynos_drm_register_dma(drm_dev, dev); return exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
} }
static void fimd_unbind(struct device *dev, struct device *master, static void fimd_unbind(struct device *dev, struct device *master,
...@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master, ...@@ -1100,7 +1101,7 @@ static void fimd_unbind(struct device *dev, struct device *master,
fimd_atomic_disable(ctx->crtc); fimd_atomic_disable(ctx->crtc);
exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev); exynos_drm_unregister_dma(ctx->drm_dev, ctx->dev, &ctx->dma_priv);
if (ctx->encoder) if (ctx->encoder)
exynos_dpi_remove(ctx->encoder); exynos_dpi_remove(ctx->encoder);
......
...@@ -232,6 +232,7 @@ struct g2d_runqueue_node { ...@@ -232,6 +232,7 @@ struct g2d_runqueue_node {
struct g2d_data { struct g2d_data {
struct device *dev; struct device *dev;
void *dma_priv;
struct clk *gate_clk; struct clk *gate_clk;
void __iomem *regs; void __iomem *regs;
int irq; int irq;
...@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data) ...@@ -1409,7 +1410,7 @@ static int g2d_bind(struct device *dev, struct device *master, void *data)
return ret; return ret;
} }
ret = exynos_drm_register_dma(drm_dev, dev); ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
if (ret < 0) { if (ret < 0) {
dev_err(dev, "failed to enable iommu.\n"); dev_err(dev, "failed to enable iommu.\n");
g2d_fini_cmdlist(g2d); g2d_fini_cmdlist(g2d);
...@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data) ...@@ -1434,7 +1435,7 @@ static void g2d_unbind(struct device *dev, struct device *master, void *data)
priv->g2d_dev = NULL; priv->g2d_dev = NULL;
cancel_work_sync(&g2d->runqueue_work); cancel_work_sync(&g2d->runqueue_work);
exynos_drm_unregister_dma(g2d->drm_dev, dev); exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
} }
static const struct component_ops g2d_component_ops = { static const struct component_ops g2d_component_ops = {
......
...@@ -97,6 +97,7 @@ struct gsc_scaler { ...@@ -97,6 +97,7 @@ struct gsc_scaler {
struct gsc_context { struct gsc_context {
struct exynos_drm_ipp ipp; struct exynos_drm_ipp ipp;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct device *dev; struct device *dev;
struct exynos_drm_ipp_task *task; struct exynos_drm_ipp_task *task;
struct exynos_drm_ipp_formats *formats; struct exynos_drm_ipp_formats *formats;
...@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data) ...@@ -1169,7 +1170,7 @@ static int gsc_bind(struct device *dev, struct device *master, void *data)
ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev;
ctx->drm_dev = drm_dev; ctx->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev); exynos_drm_register_dma(drm_dev, dev, &ctx->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
...@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master, ...@@ -1189,7 +1190,7 @@ static void gsc_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &ctx->ipp; struct exynos_drm_ipp *ipp = &ctx->ipp;
exynos_drm_ipp_unregister(dev, ipp); exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(drm_dev, dev); exynos_drm_unregister_dma(drm_dev, dev, &ctx->dma_priv);
} }
static const struct component_ops gsc_component_ops = { static const struct component_ops gsc_component_ops = {
......
...@@ -56,6 +56,7 @@ struct rot_variant { ...@@ -56,6 +56,7 @@ struct rot_variant {
struct rot_context { struct rot_context {
struct exynos_drm_ipp ipp; struct exynos_drm_ipp ipp;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct device *dev; struct device *dev;
void __iomem *regs; void __iomem *regs;
struct clk *clock; struct clk *clock;
...@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data) ...@@ -243,7 +244,7 @@ static int rotator_bind(struct device *dev, struct device *master, void *data)
rot->drm_dev = drm_dev; rot->drm_dev = drm_dev;
ipp->drm_dev = drm_dev; ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev); exynos_drm_register_dma(drm_dev, dev, &rot->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE, DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE,
...@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master, ...@@ -261,7 +262,7 @@ static void rotator_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &rot->ipp; struct exynos_drm_ipp *ipp = &rot->ipp;
exynos_drm_ipp_unregister(dev, ipp); exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(rot->drm_dev, rot->dev); exynos_drm_unregister_dma(rot->drm_dev, rot->dev, &rot->dma_priv);
} }
static const struct component_ops rotator_component_ops = { static const struct component_ops rotator_component_ops = {
......
...@@ -39,6 +39,7 @@ struct scaler_data { ...@@ -39,6 +39,7 @@ struct scaler_data {
struct scaler_context { struct scaler_context {
struct exynos_drm_ipp ipp; struct exynos_drm_ipp ipp;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct device *dev; struct device *dev;
void __iomem *regs; void __iomem *regs;
struct clk *clock[SCALER_MAX_CLK]; struct clk *clock[SCALER_MAX_CLK];
...@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data) ...@@ -450,7 +451,7 @@ static int scaler_bind(struct device *dev, struct device *master, void *data)
scaler->drm_dev = drm_dev; scaler->drm_dev = drm_dev;
ipp->drm_dev = drm_dev; ipp->drm_dev = drm_dev;
exynos_drm_register_dma(drm_dev, dev); exynos_drm_register_dma(drm_dev, dev, &scaler->dma_priv);
exynos_drm_ipp_register(dev, ipp, &ipp_funcs, exynos_drm_ipp_register(dev, ipp, &ipp_funcs,
DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE | DRM_EXYNOS_IPP_CAP_CROP | DRM_EXYNOS_IPP_CAP_ROTATE |
...@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master, ...@@ -470,7 +471,8 @@ static void scaler_unbind(struct device *dev, struct device *master,
struct exynos_drm_ipp *ipp = &scaler->ipp; struct exynos_drm_ipp *ipp = &scaler->ipp;
exynos_drm_ipp_unregister(dev, ipp); exynos_drm_ipp_unregister(dev, ipp);
exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev); exynos_drm_unregister_dma(scaler->drm_dev, scaler->dev,
&scaler->dma_priv);
} }
static const struct component_ops scaler_component_ops = { static const struct component_ops scaler_component_ops = {
......
...@@ -94,6 +94,7 @@ struct mixer_context { ...@@ -94,6 +94,7 @@ struct mixer_context {
struct platform_device *pdev; struct platform_device *pdev;
struct device *dev; struct device *dev;
struct drm_device *drm_dev; struct drm_device *drm_dev;
void *dma_priv;
struct exynos_drm_crtc *crtc; struct exynos_drm_crtc *crtc;
struct exynos_drm_plane planes[MIXER_WIN_NR]; struct exynos_drm_plane planes[MIXER_WIN_NR];
unsigned long flags; unsigned long flags;
...@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx, ...@@ -894,12 +895,14 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
} }
} }
return exynos_drm_register_dma(drm_dev, mixer_ctx->dev); return exynos_drm_register_dma(drm_dev, mixer_ctx->dev,
&mixer_ctx->dma_priv);
} }
static void mixer_ctx_remove(struct mixer_context *mixer_ctx) static void mixer_ctx_remove(struct mixer_context *mixer_ctx)
{ {
exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev); exynos_drm_unregister_dma(mixer_ctx->drm_dev, mixer_ctx->dev,
&mixer_ctx->dma_priv);
} }
static int mixer_enable_vblank(struct exynos_drm_crtc *crtc) static int mixer_enable_vblank(struct exynos_drm_crtc *crtc)
......
...@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb, ...@@ -423,7 +423,8 @@ eb_validate_vma(struct i915_execbuffer *eb,
if (unlikely(entry->flags & eb->invalid_flags)) if (unlikely(entry->flags & eb->invalid_flags))
return -EINVAL; return -EINVAL;
if (unlikely(entry->alignment && !is_power_of_2(entry->alignment))) if (unlikely(entry->alignment &&
!is_power_of_2_u64(entry->alignment)))
return -EINVAL; return -EINVAL;
/* /*
......
...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq) ...@@ -1679,11 +1679,9 @@ need_timeslice(struct intel_engine_cs *engine, const struct i915_request *rq)
if (!intel_engine_has_timeslices(engine)) if (!intel_engine_has_timeslices(engine))
return false; return false;
if (list_is_last(&rq->sched.link, &engine->active.requests)) hint = engine->execlists.queue_priority_hint;
return false; if (!list_is_last(&rq->sched.link, &engine->active.requests))
hint = max(hint, rq_prio(list_next_entry(rq, sched.link)));
hint = max(rq_prio(list_next_entry(rq, sched.link)),
engine->execlists.queue_priority_hint);
return hint >= effective_prio(rq); return hint >= effective_prio(rq);
} }
...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine) ...@@ -1725,6 +1723,18 @@ static void set_timeslice(struct intel_engine_cs *engine)
set_timer_ms(&engine->execlists.timer, active_timeslice(engine)); set_timer_ms(&engine->execlists.timer, active_timeslice(engine));
} }
static void start_timeslice(struct intel_engine_cs *engine)
{
struct intel_engine_execlists *execlists = &engine->execlists;
execlists->switch_priority_hint = execlists->queue_priority_hint;
if (timer_pending(&execlists->timer))
return;
set_timer_ms(&execlists->timer, timeslice(engine));
}
static void record_preemption(struct intel_engine_execlists *execlists) static void record_preemption(struct intel_engine_execlists *execlists)
{ {
(void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++); (void)I915_SELFTEST_ONLY(execlists->preempt_hang.count++);
...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1888,11 +1898,7 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
* Even if ELSP[1] is occupied and not worthy * Even if ELSP[1] is occupied and not worthy
* of timeslices, our queue might be. * of timeslices, our queue might be.
*/ */
if (!execlists->timer.expires && start_timeslice(engine);
need_timeslice(engine, last))
set_timer_ms(&execlists->timer,
timeslice(engine));
return; return;
} }
} }
...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine) ...@@ -1927,7 +1933,8 @@ static void execlists_dequeue(struct intel_engine_cs *engine)
if (last && !can_merge_rq(last, rq)) { if (last && !can_merge_rq(last, rq)) {
spin_unlock(&ve->base.active.lock); spin_unlock(&ve->base.active.lock);
return; /* leave this for another */ start_timeslice(engine);
return; /* leave this for another sibling */
} }
ENGINE_TRACE(engine, ENGINE_TRACE(engine,
......
...@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl) ...@@ -192,11 +192,15 @@ static void cacheline_release(struct intel_timeline_cacheline *cl)
static void cacheline_free(struct intel_timeline_cacheline *cl) static void cacheline_free(struct intel_timeline_cacheline *cl)
{ {
if (!i915_active_acquire_if_busy(&cl->active)) {
__idle_cacheline_free(cl);
return;
}
GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE)); GEM_BUG_ON(ptr_test_bit(cl->vaddr, CACHELINE_FREE));
cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE); cl->vaddr = ptr_set_bit(cl->vaddr, CACHELINE_FREE);
if (i915_active_is_idle(&cl->active)) i915_active_release(&cl->active);
__idle_cacheline_free(cl);
} }
int intel_timeline_init(struct intel_timeline *timeline, int intel_timeline_init(struct intel_timeline *timeline,
......
...@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -457,7 +457,8 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
/* TODO: add more platforms support */ /* TODO: add more platforms support */
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
if (connected) { if (connected) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDID_DETECTED; SFUSE_STRAP_DDID_DETECTED;
......
...@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v) ...@@ -147,15 +147,14 @@ static void virt_vbt_generation(struct vbt *v)
/* there's features depending on version! */ /* there's features depending on version! */
v->header.version = 155; v->header.version = 155;
v->header.header_size = sizeof(v->header); v->header.header_size = sizeof(v->header);
v->header.vbt_size = sizeof(struct vbt) - sizeof(v->header); v->header.vbt_size = sizeof(struct vbt);
v->header.bdb_offset = offsetof(struct vbt, bdb_header); v->header.bdb_offset = offsetof(struct vbt, bdb_header);
strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK"); strcpy(&v->bdb_header.signature[0], "BIOS_DATA_BLOCK");
v->bdb_header.version = 186; /* child_dev_size = 33 */ v->bdb_header.version = 186; /* child_dev_size = 33 */
v->bdb_header.header_size = sizeof(v->bdb_header); v->bdb_header.header_size = sizeof(v->bdb_header);
v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header) v->bdb_header.bdb_size = sizeof(struct vbt) - sizeof(struct vbt_header);
- sizeof(struct bdb_header);
/* general features */ /* general features */
v->general_features_header.id = BDB_GENERAL_FEATURES; v->general_features_header.id = BDB_GENERAL_FEATURES;
......
...@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -272,10 +272,17 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
mutex_lock(&vgpu->vgpu_lock);
WARN(vgpu->active, "vGPU is still active!\n"); WARN(vgpu->active, "vGPU is still active!\n");
/*
* remove idr first so later clean can judge if need to stop
* service if no active vgpu.
*/
mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
mutex_unlock(&gvt->lock);
mutex_lock(&vgpu->vgpu_lock);
intel_gvt_debugfs_remove_vgpu(vgpu); intel_gvt_debugfs_remove_vgpu(vgpu);
intel_vgpu_clean_sched_policy(vgpu); intel_vgpu_clean_sched_policy(vgpu);
intel_vgpu_clean_submission(vgpu); intel_vgpu_clean_submission(vgpu);
...@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -290,7 +297,6 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
mutex_unlock(&vgpu->vgpu_lock); mutex_unlock(&vgpu->vgpu_lock);
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
idr_remove(&gvt->vgpu_idr, vgpu->id);
if (idr_is_empty(&gvt->vgpu_idr)) if (idr_is_empty(&gvt->vgpu_idr))
intel_gvt_clean_irq(gvt); intel_gvt_clean_irq(gvt);
intel_gvt_update_vgpu_types(gvt); intel_gvt_update_vgpu_types(gvt);
......
...@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) ...@@ -527,19 +527,31 @@ submit_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
return NOTIFY_DONE; return NOTIFY_DONE;
} }
static void irq_semaphore_cb(struct irq_work *wrk)
{
struct i915_request *rq =
container_of(wrk, typeof(*rq), semaphore_work);
i915_schedule_bump_priority(rq, I915_PRIORITY_NOSEMAPHORE);
i915_request_put(rq);
}
static int __i915_sw_fence_call static int __i915_sw_fence_call
semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state) semaphore_notify(struct i915_sw_fence *fence, enum i915_sw_fence_notify state)
{ {
struct i915_request *request = struct i915_request *rq = container_of(fence, typeof(*rq), semaphore);
container_of(fence, typeof(*request), semaphore);
switch (state) { switch (state) {
case FENCE_COMPLETE: case FENCE_COMPLETE:
i915_schedule_bump_priority(request, I915_PRIORITY_NOSEMAPHORE); if (!(READ_ONCE(rq->sched.attr.priority) & I915_PRIORITY_NOSEMAPHORE)) {
i915_request_get(rq);
init_irq_work(&rq->semaphore_work, irq_semaphore_cb);
irq_work_queue(&rq->semaphore_work);
}
break; break;
case FENCE_FREE: case FENCE_FREE:
i915_request_put(request); i915_request_put(rq);
break; break;
} }
...@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -776,8 +788,8 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
struct dma_fence *fence; struct dma_fence *fence;
int err; int err;
GEM_BUG_ON(i915_request_timeline(rq) == if (i915_request_timeline(rq) == rcu_access_pointer(signal->timeline))
rcu_access_pointer(signal->timeline)); return 0;
if (i915_request_started(signal)) if (i915_request_started(signal))
return 0; return 0;
...@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal) ...@@ -821,7 +833,7 @@ i915_request_await_start(struct i915_request *rq, struct i915_request *signal)
return 0; return 0;
err = 0; err = 0;
if (intel_timeline_sync_is_later(i915_request_timeline(rq), fence)) if (!intel_timeline_sync_is_later(i915_request_timeline(rq), fence))
err = i915_sw_fence_await_dma_fence(&rq->submit, err = i915_sw_fence_await_dma_fence(&rq->submit,
fence, 0, fence, 0,
I915_FENCE_GFP); I915_FENCE_GFP);
...@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq, ...@@ -1318,9 +1330,9 @@ void __i915_request_queue(struct i915_request *rq,
* decide whether to preempt the entire chain so that it is ready to * decide whether to preempt the entire chain so that it is ready to
* run at the earliest possible convenience. * run at the earliest possible convenience.
*/ */
i915_sw_fence_commit(&rq->semaphore);
if (attr && rq->engine->schedule) if (attr && rq->engine->schedule)
rq->engine->schedule(rq, attr); rq->engine->schedule(rq, attr);
i915_sw_fence_commit(&rq->semaphore);
i915_sw_fence_commit(&rq->submit); i915_sw_fence_commit(&rq->submit);
} }
......
...@@ -26,6 +26,7 @@ ...@@ -26,6 +26,7 @@
#define I915_REQUEST_H #define I915_REQUEST_H
#include <linux/dma-fence.h> #include <linux/dma-fence.h>
#include <linux/irq_work.h>
#include <linux/lockdep.h> #include <linux/lockdep.h>
#include "gem/i915_gem_context_types.h" #include "gem/i915_gem_context_types.h"
...@@ -208,6 +209,7 @@ struct i915_request { ...@@ -208,6 +209,7 @@ struct i915_request {
}; };
struct list_head execute_cb; struct list_head execute_cb;
struct i915_sw_fence semaphore; struct i915_sw_fence semaphore;
struct irq_work semaphore_work;
/* /*
* A list of everyone we wait upon, and everyone who waits upon us. * A list of everyone we wait upon, and everyone who waits upon us.
......
...@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr) ...@@ -234,6 +234,11 @@ static inline u64 ptr_to_u64(const void *ptr)
__idx; \ __idx; \
}) })
static inline bool is_power_of_2_u64(u64 n)
{
return (n != 0 && ((n & (n - 1)) == 0));
}
static inline void __list_del_many(struct list_head *head, static inline void __list_del_many(struct list_head *head,
struct list_head *first) struct list_head *first)
{ {
......
...@@ -81,7 +81,7 @@ struct drm_dp_vcpi { ...@@ -81,7 +81,7 @@ struct drm_dp_vcpi {
* &drm_dp_mst_topology_mgr.base.lock. * &drm_dp_mst_topology_mgr.base.lock.
* @num_sdp_stream_sinks: Number of stream sinks. Protected by * @num_sdp_stream_sinks: Number of stream sinks. Protected by
* &drm_dp_mst_topology_mgr.base.lock. * &drm_dp_mst_topology_mgr.base.lock.
* @available_pbn: Available bandwidth for this port. Protected by * @full_pbn: Max possible bandwidth for this port. Protected by
* &drm_dp_mst_topology_mgr.base.lock. * &drm_dp_mst_topology_mgr.base.lock.
* @next: link to next port on this branch device * @next: link to next port on this branch device
* @aux: i2c aux transport to talk to device connected to this port, protected * @aux: i2c aux transport to talk to device connected to this port, protected
...@@ -126,7 +126,7 @@ struct drm_dp_mst_port { ...@@ -126,7 +126,7 @@ struct drm_dp_mst_port {
u8 dpcd_rev; u8 dpcd_rev;
u8 num_sdp_streams; u8 num_sdp_streams;
u8 num_sdp_stream_sinks; u8 num_sdp_stream_sinks;
uint16_t available_pbn; uint16_t full_pbn;
struct list_head next; struct list_head next;
/** /**
* @mstb: the branch device connected to this port, if there is one. * @mstb: the branch device connected to this port, if there is one.
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment