Commit 1092a94f authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2019-01-18' of git://anongit.freedesktop.org/drm/drm

Pull drm fixes from Dave Airlie:
 "The rc3 fixes are a bit scattered:

   - meson, sun4i and rockchip all had missing of_node_put.

   - qxl and virtio both were advertising dma-buf to userspace when they
     really shouldn't have.

  Otherwise:

  meson:
   - modesetting regression fix

  i915 GVT:
   - one cmd parser failure fix
   - region cleanup fix in vGPU destroy

  amdgpu:
   - KFD fixes for arm64 mixed APU/DGPU
   - vega12 powerplay fix
   - raven DC fixes
   - freesync fix"

* tag 'drm-fixes-2019-01-18' of git://anongit.freedesktop.org/drm/drm:
  drm/amd/display: Detach backlight from stream
  drm/sun4i: backend: add missing of_node_puts
  Revert "drm/amdgpu: validate user pitch alignment"
  Revert "drm/amdgpu: validate user GEM object size"
  drm/meson: Fix atomic mode switching regression
  drm/i915/gvt: Fix mmap range check
  drm/i915/gvt: free VFIO region space in vgpu detach
  drm/amd/display: Fix disabled cursor on top screen edge
  drm/amd/display: fix warning on raven hotplug
  drm/amd/display: fix PME notification not working in RV desktop
  drm/amd/display: Only get the connector state for VRR when toggled
  drm/amd/display: Pack DMCU iRAM alignment
  drm/amd/powerplay: run acg btc for Vega12
  drm/amdkfd: Don't assign dGPUs to APU topology devices
  drm/amdkfd: Allow building KFD on ARM64 (v2)
  drm/meson: add missing of_node_put
  drm/virtio: drop prime import/export callbacks
  drm/qxl: drop prime import/export callbacks
  drm/i915/gvt: Allow F_CMD_ACCESS on mmio 0x21f0
  drm/rockchip: add missing of_node_put
parents 2451f371 df0219b4
...@@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, ...@@ -531,17 +531,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
struct drm_gem_object *obj; struct drm_gem_object *obj;
struct amdgpu_framebuffer *amdgpu_fb; struct amdgpu_framebuffer *amdgpu_fb;
int ret; int ret;
int height;
struct amdgpu_device *adev = dev->dev_private;
int cpp = drm_format_plane_cpp(mode_cmd->pixel_format, 0);
int pitch = mode_cmd->pitches[0] / cpp;
pitch = amdgpu_align_pitch(adev, pitch, cpp, false);
if (mode_cmd->pitches[0] != pitch) {
DRM_DEBUG_KMS("Invalid pitch: expecting %d but got %d\n",
pitch, mode_cmd->pitches[0]);
return ERR_PTR(-EINVAL);
}
obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]); obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[0]);
if (obj == NULL) { if (obj == NULL) {
...@@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev, ...@@ -556,13 +545,6 @@ amdgpu_display_user_framebuffer_create(struct drm_device *dev,
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
height = ALIGN(mode_cmd->height, 8);
if (obj->size < pitch * height) {
DRM_DEBUG_KMS("Invalid GEM size: expecting >= %d but got %zu\n",
pitch * height, obj->size);
return ERR_PTR(-EINVAL);
}
amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL); amdgpu_fb = kzalloc(sizeof(*amdgpu_fb), GFP_KERNEL);
if (amdgpu_fb == NULL) { if (amdgpu_fb == NULL) {
drm_gem_object_put_unlocked(obj); drm_gem_object_put_unlocked(obj);
......
...@@ -4,8 +4,8 @@ ...@@ -4,8 +4,8 @@
config HSA_AMD config HSA_AMD
bool "HSA kernel driver for AMD GPU devices" bool "HSA kernel driver for AMD GPU devices"
depends on DRM_AMDGPU && X86_64 depends on DRM_AMDGPU && (X86_64 || ARM64)
imply AMD_IOMMU_V2 imply AMD_IOMMU_V2 if X86_64
select MMU_NOTIFIER select MMU_NOTIFIER
help help
Enable this if you want to use HSA features on AMD GPU devices. Enable this if you want to use HSA features on AMD GPU devices.
...@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size, ...@@ -863,6 +863,7 @@ static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
return 0; return 0;
} }
#if CONFIG_X86_64
static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
uint32_t *num_entries, uint32_t *num_entries,
struct crat_subtype_iolink *sub_type_hdr) struct crat_subtype_iolink *sub_type_hdr)
...@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size, ...@@ -905,6 +906,7 @@ static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
return 0; return 0;
} }
#endif
/* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
* *
...@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) ...@@ -920,7 +922,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
struct crat_subtype_generic *sub_type_hdr; struct crat_subtype_generic *sub_type_hdr;
int avail_size = *size; int avail_size = *size;
int numa_node_id; int numa_node_id;
#ifdef CONFIG_X86_64
uint32_t entries = 0; uint32_t entries = 0;
#endif
int ret = 0; int ret = 0;
if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU) if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_CPU)
...@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) ...@@ -982,6 +986,7 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr->length); sub_type_hdr->length);
/* Fill in Subtype: IO Link */ /* Fill in Subtype: IO Link */
#ifdef CONFIG_X86_64
ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size, ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
&entries, &entries,
(struct crat_subtype_iolink *)sub_type_hdr); (struct crat_subtype_iolink *)sub_type_hdr);
...@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size) ...@@ -992,6 +997,9 @@ static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr + sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
sub_type_hdr->length * entries); sub_type_hdr->length * entries);
#else
pr_info("IO link not available for non x86 platforms\n");
#endif
crat_table->num_domains++; crat_table->num_domains++;
} }
......
...@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu) ...@@ -1093,8 +1093,6 @@ static uint32_t kfd_generate_gpu_id(struct kfd_dev *gpu)
* the GPU device is not already present in the topology device * the GPU device is not already present in the topology device
* list then return NULL. This means a new topology device has to * list then return NULL. This means a new topology device has to
* be created for this GPU. * be created for this GPU.
* TODO: Rather than assiging @gpu to first topology device withtout
* gpu attached, it will better to have more stringent check.
*/ */
static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
{ {
...@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu) ...@@ -1102,12 +1100,20 @@ static struct kfd_topology_device *kfd_assign_gpu(struct kfd_dev *gpu)
struct kfd_topology_device *out_dev = NULL; struct kfd_topology_device *out_dev = NULL;
down_write(&topology_lock); down_write(&topology_lock);
list_for_each_entry(dev, &topology_device_list, list) list_for_each_entry(dev, &topology_device_list, list) {
/* Discrete GPUs need their own topology device list
* entries. Don't assign them to CPU/APU nodes.
*/
if (!gpu->device_info->needs_iommu_device &&
dev->node_props.cpu_cores_count)
continue;
if (!dev->gpu && (dev->node_props.simd_count > 0)) { if (!dev->gpu && (dev->node_props.simd_count > 0)) {
dev->gpu = gpu; dev->gpu = gpu;
out_dev = dev; out_dev = dev;
break; break;
} }
}
up_write(&topology_lock); up_write(&topology_lock);
return out_dev; return out_dev;
} }
...@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev) ...@@ -1392,7 +1398,6 @@ int kfd_topology_enum_kfd_devices(uint8_t idx, struct kfd_dev **kdev)
static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
{ {
const struct cpuinfo_x86 *cpuinfo;
int first_cpu_of_numa_node; int first_cpu_of_numa_node;
if (!cpumask || cpumask == cpu_none_mask) if (!cpumask || cpumask == cpu_none_mask)
...@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask) ...@@ -1400,9 +1405,11 @@ static int kfd_cpumask_to_apic_id(const struct cpumask *cpumask)
first_cpu_of_numa_node = cpumask_first(cpumask); first_cpu_of_numa_node = cpumask_first(cpumask);
if (first_cpu_of_numa_node >= nr_cpu_ids) if (first_cpu_of_numa_node >= nr_cpu_ids)
return -1; return -1;
cpuinfo = &cpu_data(first_cpu_of_numa_node); #ifdef CONFIG_X86_64
return cpu_data(first_cpu_of_numa_node).apicid;
return cpuinfo->apicid; #else
return first_cpu_of_numa_node;
#endif
} }
/* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor /* kfd_numa_node_to_apic_id - Returns the APIC ID of the first logical processor
......
...@@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd) ...@@ -1772,7 +1772,7 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
+ caps.min_input_signal * 0x101; + caps.min_input_signal * 0x101;
if (dc_link_set_backlight_level(dm->backlight_link, if (dc_link_set_backlight_level(dm->backlight_link,
brightness, 0, 0)) brightness, 0))
return 0; return 0;
else else
return 1; return 1;
...@@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev, ...@@ -5933,7 +5933,7 @@ static int amdgpu_dm_atomic_check(struct drm_device *dev,
for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) { for_each_oldnew_crtc_in_state(state, crtc, old_crtc_state, new_crtc_state, i) {
if (!drm_atomic_crtc_needs_modeset(new_crtc_state) && if (!drm_atomic_crtc_needs_modeset(new_crtc_state) &&
!new_crtc_state->color_mgmt_changed && !new_crtc_state->color_mgmt_changed &&
!new_crtc_state->vrr_enabled) old_crtc_state->vrr_enabled == new_crtc_state->vrr_enabled)
continue; continue;
if (!new_crtc_state->enable) if (!new_crtc_state->enable)
......
...@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link) ...@@ -2190,8 +2190,7 @@ int dc_link_get_backlight_level(const struct dc_link *link)
bool dc_link_set_backlight_level(const struct dc_link *link, bool dc_link_set_backlight_level(const struct dc_link *link,
uint32_t backlight_pwm_u16_16, uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp, uint32_t frame_ramp)
const struct dc_stream_state *stream)
{ {
struct dc *core_dc = link->ctx->dc; struct dc *core_dc = link->ctx->dc;
struct abm *abm = core_dc->res_pool->abm; struct abm *abm = core_dc->res_pool->abm;
...@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link, ...@@ -2206,10 +2205,6 @@ bool dc_link_set_backlight_level(const struct dc_link *link,
(abm->funcs->set_backlight_level_pwm == NULL)) (abm->funcs->set_backlight_level_pwm == NULL))
return false; return false;
if (stream)
((struct dc_stream_state *)stream)->bl_pwm_level =
backlight_pwm_u16_16;
use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu); use_smooth_brightness = dmcu->funcs->is_dmcu_initialized(dmcu);
DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n", DC_LOG_BACKLIGHT("New Backlight level: %d (0x%X)\n",
...@@ -2637,11 +2632,6 @@ void core_link_enable_stream( ...@@ -2637,11 +2632,6 @@ void core_link_enable_stream(
if (dc_is_dp_signal(pipe_ctx->stream->signal)) if (dc_is_dp_signal(pipe_ctx->stream->signal))
enable_stream_features(pipe_ctx); enable_stream_features(pipe_ctx);
dc_link_set_backlight_level(pipe_ctx->stream->sink->link,
pipe_ctx->stream->bl_pwm_level,
0,
pipe_ctx->stream);
} }
} }
......
...@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_ ...@@ -146,8 +146,7 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
*/ */
bool dc_link_set_backlight_level(const struct dc_link *dc_link, bool dc_link_set_backlight_level(const struct dc_link *dc_link,
uint32_t backlight_pwm_u16_16, uint32_t backlight_pwm_u16_16,
uint32_t frame_ramp, uint32_t frame_ramp);
const struct dc_stream_state *stream);
int dc_link_get_backlight_level(const struct dc_link *dc_link); int dc_link_get_backlight_level(const struct dc_link *dc_link);
......
...@@ -91,7 +91,6 @@ struct dc_stream_state { ...@@ -91,7 +91,6 @@ struct dc_stream_state {
/* DMCU info */ /* DMCU info */
unsigned int abm_level; unsigned int abm_level;
unsigned int bl_pwm_level;
/* from core_stream struct */ /* from core_stream struct */
struct dc_context *ctx; struct dc_context *ctx;
......
...@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx) ...@@ -1000,7 +1000,7 @@ void dce110_enable_audio_stream(struct pipe_ctx *pipe_ctx)
pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio); pipe_ctx->stream_res.audio->funcs->az_enable(pipe_ctx->stream_res.audio);
if (num_audio == 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL) if (num_audio >= 1 && pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/ /*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu); pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* un-mute audio */ /* un-mute audio */
...@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) ...@@ -1017,6 +1017,8 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control( pipe_ctx->stream_res.stream_enc->funcs->audio_mute_control(
pipe_ctx->stream_res.stream_enc, true); pipe_ctx->stream_res.stream_enc, true);
if (pipe_ctx->stream_res.audio) { if (pipe_ctx->stream_res.audio) {
struct pp_smu_funcs_rv *pp_smu = dc->res_pool->pp_smu;
if (option != KEEP_ACQUIRED_RESOURCE || if (option != KEEP_ACQUIRED_RESOURCE ||
!dc->debug.az_endpoint_mute_only) { !dc->debug.az_endpoint_mute_only) {
/*only disalbe az_endpoint if power down or free*/ /*only disalbe az_endpoint if power down or free*/
...@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option) ...@@ -1036,6 +1038,9 @@ void dce110_disable_audio_stream(struct pipe_ctx *pipe_ctx, int option)
update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false); update_audio_usage(&dc->current_state->res_ctx, dc->res_pool, pipe_ctx->stream_res.audio, false);
pipe_ctx->stream_res.audio = NULL; pipe_ctx->stream_res.audio = NULL;
} }
if (pp_smu != NULL && pp_smu->set_pme_wa_enable != NULL)
/*this is the first audio. apply the PME w/a in order to wake AZ from D3*/
pp_smu->set_pme_wa_enable(&pp_smu->pp_smu);
/* TODO: notify audio driver for if audio modes list changed /* TODO: notify audio driver for if audio modes list changed
* add audio mode list change flag */ * add audio mode list change flag */
......
...@@ -463,7 +463,7 @@ void dpp1_set_cursor_position( ...@@ -463,7 +463,7 @@ void dpp1_set_cursor_position(
if (src_y_offset >= (int)param->viewport.height) if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/ cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset < 0) if (src_y_offset + (int)height <= 0)
cur_en = 0; /* not visible beyond top edge*/ cur_en = 0; /* not visible beyond top edge*/
REG_UPDATE(CURSOR0_CONTROL, REG_UPDATE(CURSOR0_CONTROL,
......
...@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position( ...@@ -1140,7 +1140,7 @@ void hubp1_cursor_set_position(
if (src_y_offset >= (int)param->viewport.height) if (src_y_offset >= (int)param->viewport.height)
cur_en = 0; /* not visible beyond bottom edge*/ cur_en = 0; /* not visible beyond bottom edge*/
if (src_y_offset < 0) //+ (int)hubp->curs_attr.height if (src_y_offset + (int)hubp->curs_attr.height <= 0)
cur_en = 0; /* not visible beyond top edge*/ cur_en = 0; /* not visible beyond top edge*/
if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0) if (cur_en && REG_READ(CURSOR_SURFACE_ADDRESS) == 0)
......
...@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface( ...@@ -2355,29 +2355,22 @@ static void dcn10_apply_ctx_for_surface(
top_pipe_to_program->plane_state->update_flags.bits.full_update) top_pipe_to_program->plane_state->update_flags.bits.full_update)
for (i = 0; i < dc->res_pool->pipe_count; i++) { for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
tg = pipe_ctx->stream_res.tg;
/* Skip inactive pipes and ones already updated */ /* Skip inactive pipes and ones already updated */
if (!pipe_ctx->stream || pipe_ctx->stream == stream if (!pipe_ctx->stream || pipe_ctx->stream == stream
|| !pipe_ctx->plane_state) || !pipe_ctx->plane_state
|| !tg->funcs->is_tg_enabled(tg))
continue; continue;
pipe_ctx->stream_res.tg->funcs->lock(pipe_ctx->stream_res.tg); tg->funcs->lock(tg);
pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent( pipe_ctx->plane_res.hubp->funcs->hubp_setup_interdependent(
pipe_ctx->plane_res.hubp, pipe_ctx->plane_res.hubp,
&pipe_ctx->dlg_regs, &pipe_ctx->dlg_regs,
&pipe_ctx->ttu_regs); &pipe_ctx->ttu_regs);
}
for (i = 0; i < dc->res_pool->pipe_count; i++) {
struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i];
if (!pipe_ctx->stream || pipe_ctx->stream == stream tg->funcs->unlock(tg);
|| !pipe_ctx->plane_state) }
continue;
dcn10_pipe_control_lock(dc, pipe_ctx, false);
}
if (num_planes == 0) if (num_planes == 0)
false_optc_underflow_wa(dc, stream, tg); false_optc_underflow_wa(dc, stream, tg);
......
...@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le ...@@ -57,6 +57,7 @@ static const unsigned char abm_config[abm_defines_max_config][abm_defines_max_le
#define NUM_POWER_FN_SEGS 8 #define NUM_POWER_FN_SEGS 8
#define NUM_BL_CURVE_SEGS 16 #define NUM_BL_CURVE_SEGS 16
#pragma pack(push, 1)
/* NOTE: iRAM is 256B in size */ /* NOTE: iRAM is 256B in size */
struct iram_table_v_2 { struct iram_table_v_2 {
/* flags */ /* flags */
...@@ -100,6 +101,7 @@ struct iram_table_v_2 { ...@@ -100,6 +101,7 @@ struct iram_table_v_2 {
uint8_t dummy8; /* 0xfe */ uint8_t dummy8; /* 0xfe */
uint8_t dummy9; /* 0xff */ uint8_t dummy9; /* 0xff */
}; };
#pragma pack(pop)
static uint16_t backlight_8_to_16(unsigned int backlight_8bit) static uint16_t backlight_8_to_16(unsigned int backlight_8bit)
{ {
......
...@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr) ...@@ -753,6 +753,22 @@ static int vega12_init_smc_table(struct pp_hwmgr *hwmgr)
return 0; return 0;
} }
static int vega12_run_acg_btc(struct pp_hwmgr *hwmgr)
{
uint32_t result;
PP_ASSERT_WITH_CODE(
smum_send_msg_to_smc(hwmgr, PPSMC_MSG_RunAcgBtc) == 0,
"[Run_ACG_BTC] Attempt to run ACG BTC failed!",
return -EINVAL);
result = smum_get_argument(hwmgr);
PP_ASSERT_WITH_CODE(result == 1,
"Failed to run ACG BTC!", return -EINVAL);
return 0;
}
static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr) static int vega12_set_allowed_featuresmask(struct pp_hwmgr *hwmgr)
{ {
struct vega12_hwmgr *data = struct vega12_hwmgr *data =
...@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr) ...@@ -931,6 +947,11 @@ static int vega12_enable_dpm_tasks(struct pp_hwmgr *hwmgr)
"Failed to initialize SMC table!", "Failed to initialize SMC table!",
result = tmp_result); result = tmp_result);
tmp_result = vega12_run_acg_btc(hwmgr);
PP_ASSERT_WITH_CODE(!tmp_result,
"Failed to run ACG BTC!",
result = tmp_result);
result = vega12_enable_all_smu_features(hwmgr); result = vega12_enable_all_smu_features(hwmgr);
PP_ASSERT_WITH_CODE(!result, PP_ASSERT_WITH_CODE(!result,
"Failed to enable all smu features!", "Failed to enable all smu features!",
......
...@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt) ...@@ -2799,6 +2799,7 @@ static int init_broadwell_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2a0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2b0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(_MMIO(0xe2c0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(_MMIO(0x21f0), D_BDW_PLUS, F_CMD_ACCESS, NULL, NULL);
return 0; return 0;
} }
......
...@@ -41,7 +41,7 @@ struct intel_gvt_mpt { ...@@ -41,7 +41,7 @@ struct intel_gvt_mpt {
int (*host_init)(struct device *dev, void *gvt, const void *ops); int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle); int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle); void (*detach_vgpu)(void *vgpu);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p); unsigned long (*from_virt_to_mfn)(void *p);
int (*enable_page_track)(unsigned long handle, u64 gfn); int (*enable_page_track)(unsigned long handle, u64 gfn);
......
...@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) ...@@ -996,7 +996,7 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
{ {
unsigned int index; unsigned int index;
u64 virtaddr; u64 virtaddr;
unsigned long req_size, pgoff = 0; unsigned long req_size, pgoff, req_start;
pgprot_t pg_prot; pgprot_t pg_prot;
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
...@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma) ...@@ -1014,7 +1014,17 @@ static int intel_vgpu_mmap(struct mdev_device *mdev, struct vm_area_struct *vma)
pg_prot = vma->vm_page_prot; pg_prot = vma->vm_page_prot;
virtaddr = vma->vm_start; virtaddr = vma->vm_start;
req_size = vma->vm_end - vma->vm_start; req_size = vma->vm_end - vma->vm_start;
pgoff = vgpu_aperture_pa_base(vgpu) >> PAGE_SHIFT; pgoff = vma->vm_pgoff &
((1U << (VFIO_PCI_OFFSET_SHIFT - PAGE_SHIFT)) - 1);
req_start = pgoff << PAGE_SHIFT;
if (!intel_vgpu_in_aperture(vgpu, req_start))
return -EINVAL;
if (req_start + req_size >
vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu))
return -EINVAL;
pgoff = (gvt_aperture_pa_base(vgpu->gvt) >> PAGE_SHIFT) + pgoff;
return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot); return remap_pfn_range(vma, virtaddr, pgoff, req_size, pg_prot);
} }
...@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle) ...@@ -1662,9 +1672,21 @@ static int kvmgt_attach_vgpu(void *vgpu, unsigned long *handle)
return 0; return 0;
} }
static void kvmgt_detach_vgpu(unsigned long handle) static void kvmgt_detach_vgpu(void *p_vgpu)
{ {
/* nothing to do here */ int i;
struct intel_vgpu *vgpu = (struct intel_vgpu *)p_vgpu;
if (!vgpu->vdev.region)
return;
for (i = 0; i < vgpu->vdev.num_regions; i++)
if (vgpu->vdev.region[i].ops->release)
vgpu->vdev.region[i].ops->release(vgpu,
&vgpu->vdev.region[i]);
vgpu->vdev.num_regions = 0;
kfree(vgpu->vdev.region);
vgpu->vdev.region = NULL;
} }
static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data) static int kvmgt_inject_msi(unsigned long handle, u32 addr, u16 data)
......
...@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) ...@@ -101,7 +101,7 @@ static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
if (!intel_gvt_host.mpt->detach_vgpu) if (!intel_gvt_host.mpt->detach_vgpu)
return; return;
intel_gvt_host.mpt->detach_vgpu(vgpu->handle); intel_gvt_host.mpt->detach_vgpu(vgpu);
} }
#define MSI_CAP_CONTROL(offset) (offset + 2) #define MSI_CAP_CONTROL(offset) (offset + 2)
......
...@@ -46,7 +46,6 @@ struct meson_crtc { ...@@ -46,7 +46,6 @@ struct meson_crtc {
struct drm_crtc base; struct drm_crtc base;
struct drm_pending_vblank_event *event; struct drm_pending_vblank_event *event;
struct meson_drm *priv; struct meson_drm *priv;
bool enabled;
}; };
#define to_meson_crtc(x) container_of(x, struct meson_crtc, base) #define to_meson_crtc(x) container_of(x, struct meson_crtc, base)
...@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = { ...@@ -82,7 +81,8 @@ static const struct drm_crtc_funcs meson_crtc_funcs = {
}; };
static void meson_crtc_enable(struct drm_crtc *crtc) static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{ {
struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct drm_crtc_state *crtc_state = crtc->state; struct drm_crtc_state *crtc_state = crtc->state;
...@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc) ...@@ -108,20 +108,6 @@ static void meson_crtc_enable(struct drm_crtc *crtc)
drm_crtc_vblank_on(crtc); drm_crtc_vblank_on(crtc);
meson_crtc->enabled = true;
}
static void meson_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
struct meson_drm *priv = meson_crtc->priv;
DRM_DEBUG_DRIVER("\n");
if (!meson_crtc->enabled)
meson_crtc_enable(crtc);
priv->viu.osd1_enabled = true; priv->viu.osd1_enabled = true;
} }
...@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc, ...@@ -153,8 +139,6 @@ static void meson_crtc_atomic_disable(struct drm_crtc *crtc,
crtc->state->event = NULL; crtc->state->event = NULL;
} }
meson_crtc->enabled = false;
} }
static void meson_crtc_atomic_begin(struct drm_crtc *crtc, static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
...@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc, ...@@ -163,9 +147,6 @@ static void meson_crtc_atomic_begin(struct drm_crtc *crtc,
struct meson_crtc *meson_crtc = to_meson_crtc(crtc); struct meson_crtc *meson_crtc = to_meson_crtc(crtc);
unsigned long flags; unsigned long flags;
if (crtc->state->enable && !meson_crtc->enabled)
meson_crtc_enable(crtc);
if (crtc->state->event) { if (crtc->state->event) {
WARN_ON(drm_crtc_vblank_get(crtc) != 0); WARN_ON(drm_crtc_vblank_get(crtc) != 0);
......
...@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = { ...@@ -75,6 +75,10 @@ static const struct drm_mode_config_funcs meson_mode_config_funcs = {
.fb_create = drm_gem_fb_create, .fb_create = drm_gem_fb_create,
}; };
static const struct drm_mode_config_helper_funcs meson_mode_config_helpers = {
.atomic_commit_tail = drm_atomic_helper_commit_tail_rpm,
};
static irqreturn_t meson_irq(int irq, void *arg) static irqreturn_t meson_irq(int irq, void *arg)
{ {
struct drm_device *dev = arg; struct drm_device *dev = arg;
...@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components) ...@@ -266,6 +270,7 @@ static int meson_drv_bind_master(struct device *dev, bool has_components)
drm->mode_config.max_width = 3840; drm->mode_config.max_width = 3840;
drm->mode_config.max_height = 2160; drm->mode_config.max_height = 2160;
drm->mode_config.funcs = &meson_mode_config_funcs; drm->mode_config.funcs = &meson_mode_config_funcs;
drm->mode_config.helper_private = &meson_mode_config_helpers;
/* Hardware Initialization */ /* Hardware Initialization */
...@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev, ...@@ -388,8 +393,10 @@ static int meson_probe_remote(struct platform_device *pdev,
remote_node = of_graph_get_remote_port_parent(ep); remote_node = of_graph_get_remote_port_parent(ep);
if (!remote_node || if (!remote_node ||
remote_node == parent || /* Ignore parent endpoint */ remote_node == parent || /* Ignore parent endpoint */
!of_device_is_available(remote_node)) !of_device_is_available(remote_node)) {
of_node_put(remote_node);
continue; continue;
}
count += meson_probe_remote(pdev, match, remote, remote_node); count += meson_probe_remote(pdev, match, remote, remote_node);
...@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev) ...@@ -408,10 +415,13 @@ static int meson_drv_probe(struct platform_device *pdev)
for_each_endpoint_of_node(np, ep) { for_each_endpoint_of_node(np, ep) {
remote = of_graph_get_remote_port_parent(ep); remote = of_graph_get_remote_port_parent(ep);
if (!remote || !of_device_is_available(remote)) if (!remote || !of_device_is_available(remote)) {
of_node_put(remote);
continue; continue;
}
count += meson_probe_remote(pdev, &match, np, remote); count += meson_probe_remote(pdev, &match, np, remote);
of_node_put(remote);
} }
if (count && !match) if (count && !match)
......
...@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = { ...@@ -250,14 +250,10 @@ static struct drm_driver qxl_driver = {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
.debugfs_init = qxl_debugfs_init, .debugfs_init = qxl_debugfs_init,
#endif #endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export, .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = qxl_gem_prime_pin, .gem_prime_pin = qxl_gem_prime_pin,
.gem_prime_unpin = qxl_gem_prime_unpin, .gem_prime_unpin = qxl_gem_prime_unpin,
.gem_prime_get_sg_table = qxl_gem_prime_get_sg_table,
.gem_prime_import_sg_table = qxl_gem_prime_import_sg_table,
.gem_prime_vmap = qxl_gem_prime_vmap, .gem_prime_vmap = qxl_gem_prime_vmap,
.gem_prime_vunmap = qxl_gem_prime_vunmap, .gem_prime_vunmap = qxl_gem_prime_vunmap,
.gem_prime_mmap = qxl_gem_prime_mmap, .gem_prime_mmap = qxl_gem_prime_mmap,
......
...@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -38,20 +38,6 @@ void qxl_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented"); WARN_ONCE(1, "not implemented");
} }
struct sg_table *qxl_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
struct drm_gem_object *qxl_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENOSYS);
}
void *qxl_gem_prime_vmap(struct drm_gem_object *obj) void *qxl_gem_prime_vmap(struct drm_gem_object *obj)
{ {
WARN_ONCE(1, "not implemented"); WARN_ONCE(1, "not implemented");
......
...@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev, ...@@ -113,8 +113,10 @@ struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
child_count++; child_count++;
ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id, ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
&panel, &bridge); &panel, &bridge);
if (!ret) if (!ret) {
of_node_put(endpoint);
break; break;
}
} }
of_node_put(port); of_node_put(port);
......
...@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv, ...@@ -786,17 +786,18 @@ static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
remote = of_graph_get_remote_port_parent(ep); remote = of_graph_get_remote_port_parent(ep);
if (!remote) if (!remote)
continue; continue;
of_node_put(remote);
/* does this node match any registered engines? */ /* does this node match any registered engines? */
list_for_each_entry(frontend, &drv->frontend_list, list) { list_for_each_entry(frontend, &drv->frontend_list, list) {
if (remote == frontend->node) { if (remote == frontend->node) {
of_node_put(remote);
of_node_put(port); of_node_put(port);
of_node_put(ep);
return frontend; return frontend;
} }
} }
} }
of_node_put(port);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
} }
......
...@@ -127,14 +127,10 @@ static struct drm_driver driver = { ...@@ -127,14 +127,10 @@ static struct drm_driver driver = {
#if defined(CONFIG_DEBUG_FS) #if defined(CONFIG_DEBUG_FS)
.debugfs_init = virtio_gpu_debugfs_init, .debugfs_init = virtio_gpu_debugfs_init,
#endif #endif
.prime_handle_to_fd = drm_gem_prime_handle_to_fd,
.prime_fd_to_handle = drm_gem_prime_fd_to_handle,
.gem_prime_export = drm_gem_prime_export, .gem_prime_export = drm_gem_prime_export,
.gem_prime_import = drm_gem_prime_import, .gem_prime_import = drm_gem_prime_import,
.gem_prime_pin = virtgpu_gem_prime_pin, .gem_prime_pin = virtgpu_gem_prime_pin,
.gem_prime_unpin = virtgpu_gem_prime_unpin, .gem_prime_unpin = virtgpu_gem_prime_unpin,
.gem_prime_get_sg_table = virtgpu_gem_prime_get_sg_table,
.gem_prime_import_sg_table = virtgpu_gem_prime_import_sg_table,
.gem_prime_vmap = virtgpu_gem_prime_vmap, .gem_prime_vmap = virtgpu_gem_prime_vmap,
.gem_prime_vunmap = virtgpu_gem_prime_vunmap, .gem_prime_vunmap = virtgpu_gem_prime_vunmap,
.gem_prime_mmap = virtgpu_gem_prime_mmap, .gem_prime_mmap = virtgpu_gem_prime_mmap,
......
...@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait); ...@@ -372,10 +372,6 @@ int virtio_gpu_object_wait(struct virtio_gpu_object *bo, bool no_wait);
/* virtgpu_prime.c */ /* virtgpu_prime.c */
int virtgpu_gem_prime_pin(struct drm_gem_object *obj); int virtgpu_gem_prime_pin(struct drm_gem_object *obj);
void virtgpu_gem_prime_unpin(struct drm_gem_object *obj); void virtgpu_gem_prime_unpin(struct drm_gem_object *obj);
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj);
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *sgt);
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj); void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj);
void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr); void virtgpu_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr);
int virtgpu_gem_prime_mmap(struct drm_gem_object *obj, int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
......
...@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj) ...@@ -39,20 +39,6 @@ void virtgpu_gem_prime_unpin(struct drm_gem_object *obj)
WARN_ONCE(1, "not implemented"); WARN_ONCE(1, "not implemented");
} }
struct sg_table *virtgpu_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENODEV);
}
struct drm_gem_object *virtgpu_gem_prime_import_sg_table(
struct drm_device *dev, struct dma_buf_attachment *attach,
struct sg_table *table)
{
WARN_ONCE(1, "not implemented");
return ERR_PTR(-ENODEV);
}
void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj) void *virtgpu_gem_prime_vmap(struct drm_gem_object *obj)
{ {
struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj); struct virtio_gpu_object *bo = gem_to_virtio_gpu_obj(obj);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment