Commit 15833fea authored by Linus Torvalds's avatar Linus Torvalds

Merge tag 'drm-fixes-2024-08-10' of https://gitlab.freedesktop.org/drm/kernel

Pull drm fixes from Dave Airlie:
 "Weekly regular fixes, mostly amdgpu with i915/xe having a few each,
  and then some misc bits across the board, seems about right for rc3
  time.

  client:
   - fix null ptr deref

  bridge:
   - connector: fix double free

  atomic:
   - fix async flip update

  panel:
   - document panel

  omap:
   - add config dependency

  tests:
   - fix gem shmem test

  drm buddy:
   - Add start address to trim function

  amdgpu:
   - DMCUB fix
   - Fix DET programming on some DCNs
   - DCC fixes
   - DCN 4.0.1 fixes
   - SMU 14.0.x update
   - MMHUB fix
   - DCN 3.1.4 fix
   - GC 12.0 fixes
   - Fix soft recovery error propogation
   - SDMA 7.0 fixes
   - DSC fix

  xe:
   - Fix off-by-one when processing RTP rules
   - Use dma_fence_chain_free in chain fence unused as a sync
   - Fix PL1 disable flow in xe_hwmon_power_max_write
   - Take ref to VM in delayed dump snapshot

  i915:
   - correct dual pps handling for MTL_PCH+ [display]
   - Adjust vma offset for framebuffer mmap offset [gem]
   - Fix Virtual Memory mapping boundaries calculation [gem]
   - Allow evicting to use the requested placement
   - Attempt to get pages without eviction first"

* tag 'drm-fixes-2024-08-10' of https://gitlab.freedesktop.org/drm/kernel: (31 commits)
  drm/xe: Take ref to VM in delayed snapshot
  drm/xe/hwmon: Fix PL1 disable flow in xe_hwmon_power_max_write
  drm/xe: Use dma_fence_chain_free in chain fence unused as a sync
  drm/xe/rtp: Fix off-by-one when processing rules
  drm/amdgpu: Add DCC GFX12 flag to enable address alignment
  drm/amdgpu: correct sdma7 max dw
  drm/amdgpu: Add address alignment support to DCC buffers
  drm/amd/display: Skip Recompute DSC Params if no Stream on Link
  drm/amdgpu: change non-dcc buffer copy configuration
  drm/amdgpu: Forward soft recovery errors to userspace
  drm/amdgpu: add golden setting for gc v12
  drm/buddy: Add start address support to trim function
  drm/amd/display: Add missing program DET segment call to pipe init
  drm/amd/display: Add missing DCN314 to the DML Makefile
  drm/amdgpu: force to use legacy inv in mmhub
  drm/amd/pm: update powerplay structure on smu v14.0.2/3
  drm/amd/display: Add missing mcache registers
  drm/amd/display: Add dcc propagation value
  drm/amd/display: Add missing DET segments programming
  drm/amd/display: Replace dm_execute_dmub_cmd with dc_wake_and_execute_dmub_cmd
  ...
parents afdab700 06f5b920
...@@ -17,9 +17,12 @@ properties: ...@@ -17,9 +17,12 @@ properties:
oneOf: oneOf:
# Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel # Samsung 13.3" FHD (1920x1080 pixels) eDP AMOLED panel
- const: samsung,atna33xc20 - const: samsung,atna33xc20
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- items: - items:
- const: samsung,atna45af01 - enum:
# Samsung 14.5" WQXGA+ (2880x1800 pixels) eDP AMOLED panel
- samsung,atna45af01
# Samsung 14.5" 3K (2944x1840 pixels) eDP AMOLED panel
- samsung,atna45dc02
- const: samsung,atna33xc20 - const: samsung,atna33xc20
enable-gpios: true enable-gpios: true
......
...@@ -156,6 +156,8 @@ struct amdgpu_gmc_funcs { ...@@ -156,6 +156,8 @@ struct amdgpu_gmc_funcs {
uint64_t addr, uint64_t *flags); uint64_t addr, uint64_t *flags);
/* get the amount of memory used by the vbios for pre-OS console */ /* get the amount of memory used by the vbios for pre-OS console */
unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev); unsigned int (*get_vbios_fb_size)(struct amdgpu_device *adev);
/* get the DCC buffer alignment */
unsigned int (*get_dcc_alignment)(struct amdgpu_device *adev);
enum amdgpu_memory_partition (*query_mem_partition_mode)( enum amdgpu_memory_partition (*query_mem_partition_mode)(
struct amdgpu_device *adev); struct amdgpu_device *adev);
...@@ -363,6 +365,10 @@ struct amdgpu_gmc { ...@@ -363,6 +365,10 @@ struct amdgpu_gmc {
(adev)->gmc.gmc_funcs->override_vm_pte_flags \ (adev)->gmc.gmc_funcs->override_vm_pte_flags \
((adev), (vm), (addr), (pte_flags)) ((adev), (vm), (addr), (pte_flags))
#define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev)) #define amdgpu_gmc_get_vbios_fb_size(adev) (adev)->gmc.gmc_funcs->get_vbios_fb_size((adev))
#define amdgpu_gmc_get_dcc_alignment(adev) ({ \
typeof(adev) _adev = (adev); \
_adev->gmc.gmc_funcs->get_dcc_alignment(_adev); \
})
/** /**
* amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR * amdgpu_gmc_vram_full_visible - Check if full VRAM is visible through the BAR
......
...@@ -264,9 +264,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job, ...@@ -264,9 +264,8 @@ amdgpu_job_prepare_job(struct drm_sched_job *sched_job,
struct dma_fence *fence = NULL; struct dma_fence *fence = NULL;
int r; int r;
/* Ignore soft recovered fences here */
r = drm_sched_entity_error(s_entity); r = drm_sched_entity_error(s_entity);
if (r && r != -ENODATA) if (r)
goto error; goto error;
if (!fence && job->gang_submit) if (!fence && job->gang_submit)
......
...@@ -456,6 +456,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -456,6 +456,7 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
u64 vis_usage = 0, max_bytes, min_block_size; u64 vis_usage = 0, max_bytes, min_block_size;
struct amdgpu_vram_mgr_resource *vres; struct amdgpu_vram_mgr_resource *vres;
u64 size, remaining_size, lpfn, fpfn; u64 size, remaining_size, lpfn, fpfn;
unsigned int adjust_dcc_size = 0;
struct drm_buddy *mm = &mgr->mm; struct drm_buddy *mm = &mgr->mm;
struct drm_buddy_block *block; struct drm_buddy_block *block;
unsigned long pages_per_block; unsigned long pages_per_block;
...@@ -511,7 +512,19 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -511,7 +512,19 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
/* Allocate blocks in desired range */ /* Allocate blocks in desired range */
vres->flags |= DRM_BUDDY_RANGE_ALLOCATION; vres->flags |= DRM_BUDDY_RANGE_ALLOCATION;
if (bo->flags & AMDGPU_GEM_CREATE_GFX12_DCC &&
adev->gmc.gmc_funcs->get_dcc_alignment)
adjust_dcc_size = amdgpu_gmc_get_dcc_alignment(adev);
remaining_size = (u64)vres->base.size; remaining_size = (u64)vres->base.size;
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
unsigned int dcc_size;
dcc_size = roundup_pow_of_two(vres->base.size + adjust_dcc_size);
remaining_size = (u64)dcc_size;
vres->flags |= DRM_BUDDY_TRIM_DISABLE;
}
mutex_lock(&mgr->lock); mutex_lock(&mgr->lock);
while (remaining_size) { while (remaining_size) {
...@@ -521,7 +534,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -521,7 +534,10 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
min_block_size = mgr->default_page_size; min_block_size = mgr->default_page_size;
size = remaining_size; size = remaining_size;
if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size)
min_block_size = size;
else if ((size >= (u64)pages_per_block << PAGE_SHIFT) &&
!(size & (((u64)pages_per_block << PAGE_SHIFT) - 1))) !(size & (((u64)pages_per_block << PAGE_SHIFT) - 1)))
min_block_size = (u64)pages_per_block << PAGE_SHIFT; min_block_size = (u64)pages_per_block << PAGE_SHIFT;
...@@ -553,6 +569,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -553,6 +569,22 @@ static int amdgpu_vram_mgr_new(struct ttm_resource_manager *man,
} }
mutex_unlock(&mgr->lock); mutex_unlock(&mgr->lock);
if (bo->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS && adjust_dcc_size) {
struct drm_buddy_block *dcc_block;
unsigned long dcc_start;
u64 trim_start;
dcc_block = amdgpu_vram_mgr_first_block(&vres->blocks);
/* Adjust the start address for DCC buffers only */
dcc_start =
roundup((unsigned long)amdgpu_vram_mgr_block_start(dcc_block),
adjust_dcc_size);
trim_start = (u64)dcc_start;
drm_buddy_block_trim(mm, &trim_start,
(u64)vres->base.size,
&vres->blocks);
}
vres->base.start = 0; vres->base.start = 0;
size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks), size = max_t(u64, amdgpu_vram_mgr_blocks_size(&vres->blocks),
vres->base.size); vres->base.size);
......
...@@ -202,6 +202,12 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = { ...@@ -202,6 +202,12 @@ static const struct amdgpu_hwip_reg_entry gc_gfx_queue_reg_list_12[] = {
SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ) SOC15_REG_ENTRY_STR(GC, 0, regCP_IB1_BUFSZ)
}; };
static const struct soc15_reg_golden golden_settings_gc_12_0[] = {
SOC15_REG_GOLDEN_VALUE(GC, 0, regDB_MEM_CONFIG, 0x0000000f, 0x0000000f),
SOC15_REG_GOLDEN_VALUE(GC, 0, regCB_HW_CONTROL_1, 0x03000000, 0x03000000),
SOC15_REG_GOLDEN_VALUE(GC, 0, regGL2C_CTRL5, 0x00000070, 0x00000020)
};
#define DEFAULT_SH_MEM_CONFIG \ #define DEFAULT_SH_MEM_CONFIG \
((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \ ((SH_MEM_ADDRESS_MODE_64 << SH_MEM_CONFIG__ADDRESS_MODE__SHIFT) | \
(SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \ (SH_MEM_ALIGNMENT_MODE_UNALIGNED << SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT) | \
...@@ -3432,6 +3438,24 @@ static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev) ...@@ -3432,6 +3438,24 @@ static void gfx_v12_0_disable_gpa_mode(struct amdgpu_device *adev)
WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data); WREG32_SOC15(GC, 0, regCPG_PSP_DEBUG, data);
} }
static void gfx_v12_0_init_golden_registers(struct amdgpu_device *adev)
{
if (amdgpu_sriov_vf(adev))
return;
switch (amdgpu_ip_version(adev, GC_HWIP, 0)) {
case IP_VERSION(12, 0, 0):
case IP_VERSION(12, 0, 1):
if (adev->rev_id == 0)
soc15_program_register_sequence(adev,
golden_settings_gc_12_0,
(const u32)ARRAY_SIZE(golden_settings_gc_12_0));
break;
default:
break;
}
}
static int gfx_v12_0_hw_init(void *handle) static int gfx_v12_0_hw_init(void *handle)
{ {
int r; int r;
...@@ -3472,6 +3496,9 @@ static int gfx_v12_0_hw_init(void *handle) ...@@ -3472,6 +3496,9 @@ static int gfx_v12_0_hw_init(void *handle)
} }
} }
if (!amdgpu_emu_mode)
gfx_v12_0_init_golden_registers(adev);
adev->gfx.is_poweron = true; adev->gfx.is_poweron = true;
if (get_gb_addr_config(adev)) if (get_gb_addr_config(adev))
......
...@@ -542,6 +542,23 @@ static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev) ...@@ -542,6 +542,23 @@ static unsigned gmc_v12_0_get_vbios_fb_size(struct amdgpu_device *adev)
return 0; return 0;
} }
static unsigned int gmc_v12_0_get_dcc_alignment(struct amdgpu_device *adev)
{
unsigned int max_tex_channel_caches, alignment;
if (amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 0) &&
amdgpu_ip_version(adev, GC_HWIP, 0) != IP_VERSION(12, 0, 1))
return 0;
max_tex_channel_caches = adev->gfx.config.max_texture_channel_caches;
if (is_power_of_2(max_tex_channel_caches))
alignment = (unsigned int)(max_tex_channel_caches / SZ_4);
else
alignment = roundup_pow_of_two(max_tex_channel_caches);
return (unsigned int)(alignment * max_tex_channel_caches * SZ_1K);
}
static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb, .flush_gpu_tlb = gmc_v12_0_flush_gpu_tlb,
.flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid, .flush_gpu_tlb_pasid = gmc_v12_0_flush_gpu_tlb_pasid,
...@@ -551,6 +568,7 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = { ...@@ -551,6 +568,7 @@ static const struct amdgpu_gmc_funcs gmc_v12_0_gmc_funcs = {
.get_vm_pde = gmc_v12_0_get_vm_pde, .get_vm_pde = gmc_v12_0_get_vm_pde,
.get_vm_pte = gmc_v12_0_get_vm_pte, .get_vm_pte = gmc_v12_0_get_vm_pte,
.get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size, .get_vbios_fb_size = gmc_v12_0_get_vbios_fb_size,
.get_dcc_alignment = gmc_v12_0_get_dcc_alignment,
}; };
static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev) static void gmc_v12_0_set_gmc_funcs(struct amdgpu_device *adev)
......
...@@ -80,7 +80,8 @@ static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid, ...@@ -80,7 +80,8 @@ static uint32_t mmhub_v4_1_0_get_invalidate_req(unsigned int vmid,
/* invalidate using legacy mode on vmid*/ /* invalidate using legacy mode on vmid*/
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ,
PER_VMID_INVALIDATE_REQ, 1 << vmid); PER_VMID_INVALIDATE_REQ, 1 << vmid);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, flush_type); /* Only use legacy inv on mmhub side */
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, FLUSH_TYPE, 0);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PTES, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE0, 1);
req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1); req = REG_SET_FIELD(req, MMVM_INVALIDATE_ENG0_REQ, INVALIDATE_L2_PDE1, 1);
......
...@@ -1575,8 +1575,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib, ...@@ -1575,8 +1575,7 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) | ib->ptr[ib->length_dw++] = SDMA_PKT_COPY_LINEAR_HEADER_OP(SDMA_OP_COPY) |
SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) | SDMA_PKT_COPY_LINEAR_HEADER_SUB_OP(SDMA_SUBOP_COPY_LINEAR) |
SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) | SDMA_PKT_COPY_LINEAR_HEADER_TMZ((copy_flags & AMDGPU_COPY_FLAGS_TMZ) ? 1 : 0) |
SDMA_PKT_COPY_LINEAR_HEADER_CPV((copy_flags & SDMA_PKT_COPY_LINEAR_HEADER_CPV(1);
(AMDGPU_COPY_FLAGS_READ_DECOMPRESSED | AMDGPU_COPY_FLAGS_WRITE_COMPRESSED)) ? 1 : 0);
ib->ptr[ib->length_dw++] = byte_count - 1; ib->ptr[ib->length_dw++] = byte_count - 1;
ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */ ib->ptr[ib->length_dw++] = 0; /* src/dst endian swap */
...@@ -1590,6 +1589,8 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib, ...@@ -1590,6 +1589,8 @@ static void sdma_v7_0_emit_copy_buffer(struct amdgpu_ib *ib,
((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) | ((copy_flags & AMDGPU_COPY_FLAGS_READ_DECOMPRESSED) ? SDMA_DCC_READ_CM(2) : 0) |
((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) | ((copy_flags & AMDGPU_COPY_FLAGS_WRITE_COMPRESSED) ? SDMA_DCC_WRITE_CM(1) : 0) |
SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1); SDMA_DCC_MAX_COM(max_com) | SDMA_DCC_MAX_UCOM(1);
else
ib->ptr[ib->length_dw++] = 0;
} }
/** /**
...@@ -1616,7 +1617,7 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib, ...@@ -1616,7 +1617,7 @@ static void sdma_v7_0_emit_fill_buffer(struct amdgpu_ib *ib,
static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = { static const struct amdgpu_buffer_funcs sdma_v7_0_buffer_funcs = {
.copy_max_bytes = 0x400000, .copy_max_bytes = 0x400000,
.copy_num_dw = 7, .copy_num_dw = 8,
.emit_copy_buffer = sdma_v7_0_emit_copy_buffer, .emit_copy_buffer = sdma_v7_0_emit_copy_buffer,
.fill_max_bytes = 0x400000, .fill_max_bytes = 0x400000,
.fill_num_dw = 5, .fill_num_dw = 5,
......
...@@ -1270,6 +1270,9 @@ static bool is_dsc_need_re_compute( ...@@ -1270,6 +1270,9 @@ static bool is_dsc_need_re_compute(
} }
} }
if (new_stream_on_link_num == 0)
return false;
/* check current_state if there stream on link but it is not in /* check current_state if there stream on link but it is not in
* new request state * new request state
*/ */
......
...@@ -185,8 +185,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub, ...@@ -185,8 +185,7 @@ static bool dmub_replay_copy_settings(struct dmub_replay *dmub,
else else
copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0; copy_settings_data->flags.bitfields.force_wakeup_by_tps3 = 0;
dc_wake_and_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
dm_execute_dmub_cmd(dc, &cmd, DM_DMUB_WAIT_TYPE_WAIT);
return true; return true;
} }
......
...@@ -83,6 +83,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcfla ...@@ -83,6 +83,8 @@ CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn31/display_rq_dlg_calc_31.o := $(dml_rcfla
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_rq_dlg_calc_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn32/display_mode_vba_util_32.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_mode_vba_314.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn314/display_rq_dlg_calc_314.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dcn301/dcn301_fpu.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/display_mode_lib.o := $(dml_rcflags)
CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags) CFLAGS_REMOVE_$(AMDDALPATH)/dc/dml/dsc/rc_calc_fpu.o := $(dml_rcflags)
......
...@@ -1402,6 +1402,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context) ...@@ -1402,6 +1402,8 @@ void dcn10_init_pipes(struct dc *dc, struct dc_state *context)
if (hubbub && hubp) { if (hubbub && hubp) {
if (hubbub->funcs->program_det_size) if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
} }
} }
......
...@@ -771,6 +771,8 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context) ...@@ -771,6 +771,8 @@ void dcn35_init_pipes(struct dc *dc, struct dc_state *context)
if (hubbub && hubp) { if (hubbub && hubp) {
if (hubbub->funcs->program_det_size) if (hubbub->funcs->program_det_size)
hubbub->funcs->program_det_size(hubbub, hubp->inst, 0); hubbub->funcs->program_det_size(hubbub, hubp->inst, 0);
if (hubbub->funcs->program_det_segments)
hubbub->funcs->program_det_segments(hubbub, hubp->inst, 0);
} }
} }
......
...@@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = { ...@@ -723,6 +723,7 @@ static const struct dc_debug_options debug_defaults_drv = {
.min_prefetch_in_strobe_ns = 60000, // 60us .min_prefetch_in_strobe_ns = 60000, // 60us
.disable_unbounded_requesting = false, .disable_unbounded_requesting = false,
.enable_legacy_fast_update = false, .enable_legacy_fast_update = false,
.dcc_meta_propagation_delay_us = 10,
.fams2_config = { .fams2_config = {
.bits = { .bits = {
.enable = true, .enable = true,
......
...@@ -138,7 +138,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context); ...@@ -138,7 +138,9 @@ void dcn401_prepare_mcache_programming(struct dc *dc, struct dc_state *context);
SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \ SRI_ARR(DCHUBP_MALL_CONFIG, HUBP, id), \
SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \ SRI_ARR(DCHUBP_VMPG_CONFIG, HUBP, id), \
SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \ SRI_ARR(UCLK_PSTATE_FORCE, HUBPREQ, id), \
HUBP_3DLUT_FL_REG_LIST_DCN401(id) HUBP_3DLUT_FL_REG_LIST_DCN401(id), \
SRI_ARR(DCSURF_VIEWPORT_MCACHE_SPLIT_COORDINATE, HUBP, id), \
SRI_ARR(DCHUBP_MCACHEID_CONFIG, HUBP, id)
/* ABM */ /* ABM */
#define ABM_DCN401_REG_LIST_RI(id) \ #define ABM_DCN401_REG_LIST_RI(id) \
......
...@@ -27,7 +27,8 @@ ...@@ -27,7 +27,8 @@
#pragma pack(push, 1) #pragma pack(push, 1)
#define SMU_14_0_2_TABLE_FORMAT_REVISION 3 #define SMU_14_0_2_TABLE_FORMAT_REVISION 23
#define SMU_14_0_2_CUSTOM_TABLE_FORMAT_REVISION 1
// POWERPLAYTABLE::ulPlatformCaps // POWERPLAYTABLE::ulPlatformCaps
#define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page. #define SMU_14_0_2_PP_PLATFORM_CAP_POWERPLAY 0x1 // This cap indicates whether CCC need to show Powerplay page.
...@@ -43,6 +44,7 @@ ...@@ -43,6 +44,7 @@
#define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0 #define SMU_14_0_2_PP_THERMALCONTROLLER_NONE 0
#define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD #define SMU_14_0_2_PP_OVERDRIVE_VERSION 0x1 // TODO: FIX OverDrive Version TBD
#define SMU_14_0_2_PP_CUSTOM_OVERDRIVE_VERSION 0x1
#define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00 #define SMU_14_0_2_PP_POWERSAVINGCLOCK_VERSION 0x01 // Power Saving Clock Table Version 1.00
enum SMU_14_0_2_OD_SW_FEATURE_CAP enum SMU_14_0_2_OD_SW_FEATURE_CAP
...@@ -107,6 +109,7 @@ enum SMU_14_0_2_PWRMODE_SETTING ...@@ -107,6 +109,7 @@ enum SMU_14_0_2_PWRMODE_SETTING
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_BALANCE,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_TURBO,
SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE, SMU_14_0_2_PMSETTING_ACOUSTIC_LIMIT_RPM_RAGE,
SMU_14_0_2_PMSETTING_COUNT
}; };
#define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings #define SMU_14_0_2_MAX_PMSETTING 32 // Maximum Number of PowerMode Settings
...@@ -127,16 +130,23 @@ struct smu_14_0_2_overdrive_table ...@@ -127,16 +130,23 @@ struct smu_14_0_2_overdrive_table
int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings int16_t pm_setting[SMU_14_0_2_MAX_PMSETTING]; // Optimized power mode feature settings
}; };
enum smu_14_0_3_pptable_source {
PPTABLE_SOURCE_IFWI = 0,
PPTABLE_SOURCE_DRIVER_HARDCODED = 1,
PPTABLE_SOURCE_PPGEN_REGISTRY = 2,
PPTABLE_SOURCE_MAX = PPTABLE_SOURCE_PPGEN_REGISTRY,
};
struct smu_14_0_2_powerplay_table struct smu_14_0_2_powerplay_table
{ {
struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen. struct atom_common_table_header header; // header.format_revision = 3 (HAS TO MATCH SMU_14_0_2_TABLE_FORMAT_REVISION), header.content_revision = ? structuresize is calculated by PPGen.
uint8_t table_revision; // PPGen use only: table_revision = 3 uint8_t table_revision; // PPGen use only: table_revision = 3
uint8_t padding; // Padding 1 byte to align table_size offset to 6 bytes (pmfw_start_offset, for PMFW to know the starting offset of PPTable_t). uint8_t pptable_source; // PPGen UI dropdown box
uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t) uint16_t pmfw_pptable_start_offset; // The start offset of the pmfw portion. i.e. start of PPTable_t (start of SkuTable_t)
uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t. uint16_t pmfw_pptable_size; // The total size of pmfw_pptable, i.e PPTable_t.
uint16_t pmfw_pfe_table_start_offset; // The start offset of the PFE_Settings_t within pmfw_pptable. uint16_t pmfw_sku_table_start_offset; // DO NOT CHANGE ORDER; The absolute start offset of the SkuTable_t (within smu_14_0_3_powerplay_table).
uint16_t pmfw_pfe_table_size; // The size of PFE_Settings_t. uint16_t pmfw_sku_table_size; // DO NOT CHANGE ORDER; The size of SkuTable_t.
uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t within pmfw_pptable. uint16_t pmfw_board_table_start_offset; // The start offset of the BoardTable_t
uint16_t pmfw_board_table_size; // The size of BoardTable_t. uint16_t pmfw_board_table_size; // The size of BoardTable_t.
uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable. uint16_t pmfw_custom_sku_table_start_offset; // The start offset of the CustomSkuTable_t within pmfw_pptable.
uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t. uint16_t pmfw_custom_sku_table_size; // The size of the CustomSkuTable_t.
...@@ -159,6 +169,36 @@ struct smu_14_0_2_powerplay_table ...@@ -159,6 +169,36 @@ struct smu_14_0_2_powerplay_table
PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes PPTable_t smc_pptable; // PPTable_t in driver_if.h -- as requested by PMFW, this offset should start at a 32-byte boundary, and the table_size above should remain at offset=6 bytes
}; };
enum SMU_14_0_2_CUSTOM_OD_SW_FEATURE_CAP {
SMU_14_0_2_CUSTOM_ODCAP_POWER_MODE = 0,
SMU_14_0_2_CUSTOM_ODCAP_COUNT
};
enum SMU_14_0_2_CUSTOM_OD_FEATURE_SETTING_ID {
SMU_14_0_2_CUSTOM_ODSETTING_POWER_MODE = 0,
SMU_14_0_2_CUSTOM_ODSETTING_COUNT,
};
struct smu_14_0_2_custom_overdrive_table {
uint8_t revision;
uint8_t reserve[3];
uint8_t cap[SMU_14_0_2_CUSTOM_ODCAP_COUNT];
int32_t max[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
int32_t min[SMU_14_0_2_CUSTOM_ODSETTING_COUNT];
int16_t pm_setting[SMU_14_0_2_PMSETTING_COUNT];
};
struct smu_14_0_3_custom_powerplay_table {
uint8_t custom_table_revision;
uint16_t custom_table_size;
uint16_t custom_sku_table_offset;
uint32_t custom_platform_caps;
uint16_t software_shutdown_temp;
struct smu_14_0_2_custom_overdrive_table custom_overdrive_table;
uint32_t reserve[8];
CustomSkuTable_t custom_sku_table_pmfw;
};
#pragma pack(pop) #pragma pack(pop)
#endif #endif
...@@ -1071,23 +1071,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state, ...@@ -1071,23 +1071,16 @@ int drm_atomic_set_property(struct drm_atomic_state *state,
} }
if (async_flip && if (async_flip &&
prop != config->prop_fb_id && (plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY ||
(prop != config->prop_fb_id &&
prop != config->prop_in_fence_fd && prop != config->prop_in_fence_fd &&
prop != config->prop_fb_damage_clips) { prop != config->prop_fb_damage_clips))) {
ret = drm_atomic_plane_get_property(plane, plane_state, ret = drm_atomic_plane_get_property(plane, plane_state,
prop, &old_val); prop, &old_val);
ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop); ret = drm_atomic_check_prop_changes(ret, old_val, prop_value, prop);
break; break;
} }
if (async_flip && plane_state->plane->type != DRM_PLANE_TYPE_PRIMARY) {
drm_dbg_atomic(prop->dev,
"[OBJECT:%d] Only primary planes can be changed during async flip\n",
obj->id);
ret = -EINVAL;
break;
}
ret = drm_atomic_plane_set_property(plane, ret = drm_atomic_plane_set_property(plane,
plane_state, file_priv, plane_state, file_priv,
prop, prop_value); prop, prop_value);
......
...@@ -443,10 +443,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, ...@@ -443,10 +443,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
panel_bridge = bridge; panel_bridge = bridge;
} }
if (connector_type == DRM_MODE_CONNECTOR_Unknown) { if (connector_type == DRM_MODE_CONNECTOR_Unknown)
kfree(bridge_connector);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
}
if (bridge_connector->bridge_hdmi) if (bridge_connector->bridge_hdmi)
ret = drmm_connector_hdmi_init(drm, connector, ret = drmm_connector_hdmi_init(drm, connector,
...@@ -461,10 +459,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, ...@@ -461,10 +459,8 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm,
ret = drmm_connector_init(drm, connector, ret = drmm_connector_init(drm, connector,
&drm_bridge_connector_funcs, &drm_bridge_connector_funcs,
connector_type, ddc); connector_type, ddc);
if (ret) { if (ret)
kfree(bridge_connector);
return ERR_PTR(ret); return ERR_PTR(ret);
}
drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs); drm_connector_helper_add(connector, &drm_bridge_connector_helper_funcs);
......
...@@ -851,6 +851,7 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, ...@@ -851,6 +851,7 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
* drm_buddy_block_trim - free unused pages * drm_buddy_block_trim - free unused pages
* *
* @mm: DRM buddy manager * @mm: DRM buddy manager
* @start: start address to begin the trimming.
* @new_size: original size requested * @new_size: original size requested
* @blocks: Input and output list of allocated blocks. * @blocks: Input and output list of allocated blocks.
* MUST contain single block as input to be trimmed. * MUST contain single block as input to be trimmed.
...@@ -866,11 +867,13 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm, ...@@ -866,11 +867,13 @@ static int __alloc_contig_try_harder(struct drm_buddy *mm,
* 0 on success, error code on failure. * 0 on success, error code on failure.
*/ */
int drm_buddy_block_trim(struct drm_buddy *mm, int drm_buddy_block_trim(struct drm_buddy *mm,
u64 *start,
u64 new_size, u64 new_size,
struct list_head *blocks) struct list_head *blocks)
{ {
struct drm_buddy_block *parent; struct drm_buddy_block *parent;
struct drm_buddy_block *block; struct drm_buddy_block *block;
u64 block_start, block_end;
LIST_HEAD(dfs); LIST_HEAD(dfs);
u64 new_start; u64 new_start;
int err; int err;
...@@ -882,6 +885,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm, ...@@ -882,6 +885,9 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
struct drm_buddy_block, struct drm_buddy_block,
link); link);
block_start = drm_buddy_block_offset(block);
block_end = block_start + drm_buddy_block_size(mm, block);
if (WARN_ON(!drm_buddy_block_is_allocated(block))) if (WARN_ON(!drm_buddy_block_is_allocated(block)))
return -EINVAL; return -EINVAL;
...@@ -894,6 +900,20 @@ int drm_buddy_block_trim(struct drm_buddy *mm, ...@@ -894,6 +900,20 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
if (new_size == drm_buddy_block_size(mm, block)) if (new_size == drm_buddy_block_size(mm, block))
return 0; return 0;
new_start = block_start;
if (start) {
new_start = *start;
if (new_start < block_start)
return -EINVAL;
if (!IS_ALIGNED(new_start, mm->chunk_size))
return -EINVAL;
if (range_overflows(new_start, new_size, block_end))
return -EINVAL;
}
list_del(&block->link); list_del(&block->link);
mark_free(mm, block); mark_free(mm, block);
mm->avail += drm_buddy_block_size(mm, block); mm->avail += drm_buddy_block_size(mm, block);
...@@ -904,7 +924,6 @@ int drm_buddy_block_trim(struct drm_buddy *mm, ...@@ -904,7 +924,6 @@ int drm_buddy_block_trim(struct drm_buddy *mm,
parent = block->parent; parent = block->parent;
block->parent = NULL; block->parent = NULL;
new_start = drm_buddy_block_offset(block);
list_add(&block->tmp_link, &dfs); list_add(&block->tmp_link, &dfs);
err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL); err = __alloc_range(mm, &dfs, new_start, new_size, blocks, NULL);
if (err) { if (err) {
...@@ -1066,7 +1085,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, ...@@ -1066,7 +1085,8 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
} while (1); } while (1);
/* Trim the allocated block to the required size */ /* Trim the allocated block to the required size */
if (original_size != size) { if (!(flags & DRM_BUDDY_TRIM_DISABLE) &&
original_size != size) {
struct list_head *trim_list; struct list_head *trim_list;
LIST_HEAD(temp); LIST_HEAD(temp);
u64 trim_size; u64 trim_size;
...@@ -1083,6 +1103,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, ...@@ -1083,6 +1103,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
} }
drm_buddy_block_trim(mm, drm_buddy_block_trim(mm,
NULL,
trim_size, trim_size,
trim_list); trim_list);
......
...@@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width, ...@@ -880,6 +880,11 @@ int drm_client_modeset_probe(struct drm_client_dev *client, unsigned int width,
kfree(modeset->mode); kfree(modeset->mode);
modeset->mode = drm_mode_duplicate(dev, mode); modeset->mode = drm_mode_duplicate(dev, mode);
if (!modeset->mode) {
ret = -ENOMEM;
break;
}
drm_connector_get(connector); drm_connector_get(connector);
modeset->connectors[modeset->num_connectors++] = connector; modeset->connectors[modeset->num_connectors++] = connector;
modeset->x = offset->x; modeset->x = offset->x;
......
...@@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused) ...@@ -1449,6 +1449,9 @@ bxt_setup_backlight(struct intel_connector *connector, enum pipe unused)
static int cnp_num_backlight_controllers(struct drm_i915_private *i915) static int cnp_num_backlight_controllers(struct drm_i915_private *i915)
{ {
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;
......
...@@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915) ...@@ -351,6 +351,9 @@ static int intel_num_pps(struct drm_i915_private *i915)
if (IS_GEMINILAKE(i915) || IS_BROXTON(i915)) if (IS_GEMINILAKE(i915) || IS_BROXTON(i915))
return 2; return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_MTL)
return 2;
if (INTEL_PCH_TYPE(i915) >= PCH_DG1) if (INTEL_PCH_TYPE(i915) >= PCH_DG1)
return 1; return 1;
......
...@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf) ...@@ -290,6 +290,41 @@ static vm_fault_t vm_fault_cpu(struct vm_fault *vmf)
return i915_error_to_vmf_fault(err); return i915_error_to_vmf_fault(err);
} }
static void set_address_limits(struct vm_area_struct *area,
struct i915_vma *vma,
unsigned long obj_offset,
unsigned long *start_vaddr,
unsigned long *end_vaddr)
{
unsigned long vm_start, vm_end, vma_size; /* user's memory parameters */
long start, end; /* memory boundaries */
/*
* Let's move into the ">> PAGE_SHIFT"
* domain to be sure not to lose bits
*/
vm_start = area->vm_start >> PAGE_SHIFT;
vm_end = area->vm_end >> PAGE_SHIFT;
vma_size = vma->size >> PAGE_SHIFT;
/*
* Calculate the memory boundaries by considering the offset
* provided by the user during memory mapping and the offset
* provided for the partial mapping.
*/
start = vm_start;
start -= obj_offset;
start += vma->gtt_view.partial.offset;
end = start + vma_size;
start = max_t(long, start, vm_start);
end = min_t(long, end, vm_end);
/* Let's move back into the "<< PAGE_SHIFT" domain */
*start_vaddr = (unsigned long)start << PAGE_SHIFT;
*end_vaddr = (unsigned long)end << PAGE_SHIFT;
}
static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
{ {
#define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT) #define MIN_CHUNK_PAGES (SZ_1M >> PAGE_SHIFT)
...@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) ...@@ -302,14 +337,18 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
struct i915_ggtt *ggtt = to_gt(i915)->ggtt; struct i915_ggtt *ggtt = to_gt(i915)->ggtt;
bool write = area->vm_flags & VM_WRITE; bool write = area->vm_flags & VM_WRITE;
struct i915_gem_ww_ctx ww; struct i915_gem_ww_ctx ww;
unsigned long obj_offset;
unsigned long start, end; /* memory boundaries */
intel_wakeref_t wakeref; intel_wakeref_t wakeref;
struct i915_vma *vma; struct i915_vma *vma;
pgoff_t page_offset; pgoff_t page_offset;
unsigned long pfn;
int srcu; int srcu;
int ret; int ret;
/* We don't use vmf->pgoff since that has the fake offset */ obj_offset = area->vm_pgoff - drm_vma_node_start(&mmo->vma_node);
page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT; page_offset = (vmf->address - area->vm_start) >> PAGE_SHIFT;
page_offset += obj_offset;
trace_i915_gem_object_fault(obj, page_offset, true, write); trace_i915_gem_object_fault(obj, page_offset, true, write);
...@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf) ...@@ -402,12 +441,14 @@ static vm_fault_t vm_fault_gtt(struct vm_fault *vmf)
if (ret) if (ret)
goto err_unpin; goto err_unpin;
set_address_limits(area, vma, obj_offset, &start, &end);
pfn = (ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT;
pfn += (start - area->vm_start) >> PAGE_SHIFT;
pfn += obj_offset - vma->gtt_view.partial.offset;
/* Finally, remap it using the new GTT offset */ /* Finally, remap it using the new GTT offset */
ret = remap_io_mapping(area, ret = remap_io_mapping(area, start, pfn, end - start, &ggtt->iomap);
area->vm_start + (vma->gtt_view.partial.offset << PAGE_SHIFT),
(ggtt->gmadr.start + i915_ggtt_offset(vma)) >> PAGE_SHIFT,
min_t(u64, vma->size, area->vm_end - area->vm_start),
&ggtt->iomap);
if (ret) if (ret)
goto err_fence; goto err_fence;
...@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma ...@@ -1084,6 +1125,8 @@ int i915_gem_fb_mmap(struct drm_i915_gem_object *obj, struct vm_area_struct *vma
mmo = mmap_offset_attach(obj, mmap_type, NULL); mmo = mmap_offset_attach(obj, mmap_type, NULL);
if (IS_ERR(mmo)) if (IS_ERR(mmo))
return PTR_ERR(mmo); return PTR_ERR(mmo);
vma->vm_pgoff += drm_vma_node_start(&mmo->vma_node);
} }
/* /*
......
...@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj, ...@@ -165,7 +165,6 @@ i915_ttm_placement_from_obj(const struct drm_i915_gem_object *obj,
i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] : i915_ttm_place_from_region(num_allowed ? obj->mm.placements[0] :
obj->mm.region, &places[0], obj->bo_offset, obj->mm.region, &places[0], obj->bo_offset,
obj->base.size, flags); obj->base.size, flags);
places[0].flags |= TTM_PL_FLAG_DESIRED;
/* Cache this on object? */ /* Cache this on object? */
for (i = 0; i < num_allowed; ++i) { for (i = 0; i < num_allowed; ++i) {
...@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, ...@@ -779,13 +778,16 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
.interruptible = true, .interruptible = true,
.no_wait_gpu = false, .no_wait_gpu = false,
}; };
int real_num_busy; struct ttm_placement initial_placement;
struct ttm_place initial_place;
int ret; int ret;
/* First try only the requested placement. No eviction. */ /* First try only the requested placement. No eviction. */
real_num_busy = placement->num_placement; initial_placement.num_placement = 1;
placement->num_placement = 1; memcpy(&initial_place, placement->placement, sizeof(struct ttm_place));
ret = ttm_bo_validate(bo, placement, &ctx); initial_place.flags |= TTM_PL_FLAG_DESIRED;
initial_placement.placement = &initial_place;
ret = ttm_bo_validate(bo, &initial_placement, &ctx);
if (ret) { if (ret) {
ret = i915_ttm_err_to_gem(ret); ret = i915_ttm_err_to_gem(ret);
/* /*
...@@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj, ...@@ -800,7 +802,6 @@ static int __i915_ttm_get_pages(struct drm_i915_gem_object *obj,
* If the initial attempt fails, allow all accepted placements, * If the initial attempt fails, allow all accepted placements,
* evicting if necessary. * evicting if necessary.
*/ */
placement->num_placement = real_num_busy;
ret = ttm_bo_validate(bo, placement, &ctx); ret = ttm_bo_validate(bo, placement, &ctx);
if (ret) if (ret)
return i915_ttm_err_to_gem(ret); return i915_ttm_err_to_gem(ret);
......
# SPDX-License-Identifier: GPL-2.0-only # SPDX-License-Identifier: GPL-2.0-only
config DRM_OMAP config DRM_OMAP
tristate "OMAP DRM" tristate "OMAP DRM"
depends on MMU
depends on DRM && OF depends on DRM && OF
depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB) depends on ARCH_OMAP2PLUS || (COMPILE_TEST && PAGE_SIZE_LESS_THAN_64KB)
select DRM_KMS_HELPER select DRM_KMS_HELPER
......
...@@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test) ...@@ -102,6 +102,17 @@ static void drm_gem_shmem_test_obj_create_private(struct kunit *test)
sg_init_one(sgt->sgl, buf, TEST_SIZE); sg_init_one(sgt->sgl, buf, TEST_SIZE);
/*
* Set the DMA mask to 64-bits and map the sgtables
* otherwise drm_gem_shmem_free will cause a warning
* on debug kernels.
*/
ret = dma_set_mask(drm_dev->dev, DMA_BIT_MASK(64));
KUNIT_ASSERT_EQ(test, ret, 0);
ret = dma_map_sgtable(drm_dev->dev, sgt, DMA_BIDIRECTIONAL, 0);
KUNIT_ASSERT_EQ(test, ret, 0);
/* Init a mock DMA-BUF */ /* Init a mock DMA-BUF */
buf_mock.size = TEST_SIZE; buf_mock.size = TEST_SIZE;
attach_mock.dmabuf = &buf_mock; attach_mock.dmabuf = &buf_mock;
......
...@@ -203,9 +203,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va ...@@ -203,9 +203,10 @@ static int xe_hwmon_power_max_write(struct xe_hwmon *hwmon, int channel, long va
reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0); reg_val = xe_mmio_rmw32(hwmon->gt, rapl_limit, PKG_PWR_LIM_1_EN, 0);
reg_val = xe_mmio_read32(hwmon->gt, rapl_limit); reg_val = xe_mmio_read32(hwmon->gt, rapl_limit);
if (reg_val & PKG_PWR_LIM_1_EN) { if (reg_val & PKG_PWR_LIM_1_EN) {
drm_warn(&gt_to_xe(hwmon->gt)->drm, "PL1 disable is not supported!\n");
ret = -EOPNOTSUPP; ret = -EOPNOTSUPP;
goto unlock;
} }
goto unlock;
} }
/* Computation in 64-bits to avoid overflow. Round to nearest. */ /* Computation in 64-bits to avoid overflow. Round to nearest. */
......
...@@ -1634,6 +1634,9 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) ...@@ -1634,6 +1634,9 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
if (!snapshot) if (!snapshot)
return NULL; return NULL;
if (lrc->bo && lrc->bo->vm)
xe_vm_get(lrc->bo->vm);
snapshot->context_desc = xe_lrc_ggtt_addr(lrc); snapshot->context_desc = xe_lrc_ggtt_addr(lrc);
snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc); snapshot->indirect_context_desc = xe_lrc_indirect_ring_ggtt_addr(lrc);
snapshot->head = xe_lrc_ring_head(lrc); snapshot->head = xe_lrc_ring_head(lrc);
...@@ -1653,12 +1656,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc) ...@@ -1653,12 +1656,14 @@ struct xe_lrc_snapshot *xe_lrc_snapshot_capture(struct xe_lrc *lrc)
void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
{ {
struct xe_bo *bo; struct xe_bo *bo;
struct xe_vm *vm;
struct iosys_map src; struct iosys_map src;
if (!snapshot) if (!snapshot)
return; return;
bo = snapshot->lrc_bo; bo = snapshot->lrc_bo;
vm = bo->vm;
snapshot->lrc_bo = NULL; snapshot->lrc_bo = NULL;
snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL); snapshot->lrc_snapshot = kvmalloc(snapshot->lrc_size, GFP_KERNEL);
...@@ -1678,6 +1683,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot) ...@@ -1678,6 +1683,8 @@ void xe_lrc_snapshot_capture_delayed(struct xe_lrc_snapshot *snapshot)
xe_bo_unlock(bo); xe_bo_unlock(bo);
put_bo: put_bo:
xe_bo_put(bo); xe_bo_put(bo);
if (vm)
xe_vm_put(vm);
} }
void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p) void xe_lrc_snapshot_print(struct xe_lrc_snapshot *snapshot, struct drm_printer *p)
...@@ -1727,8 +1734,14 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot) ...@@ -1727,8 +1734,14 @@ void xe_lrc_snapshot_free(struct xe_lrc_snapshot *snapshot)
return; return;
kvfree(snapshot->lrc_snapshot); kvfree(snapshot->lrc_snapshot);
if (snapshot->lrc_bo) if (snapshot->lrc_bo) {
struct xe_vm *vm;
vm = snapshot->lrc_bo->vm;
xe_bo_put(snapshot->lrc_bo); xe_bo_put(snapshot->lrc_bo);
if (vm)
xe_vm_put(vm);
}
kfree(snapshot); kfree(snapshot);
} }
......
...@@ -231,7 +231,7 @@ static void rtp_mark_active(struct xe_device *xe, ...@@ -231,7 +231,7 @@ static void rtp_mark_active(struct xe_device *xe,
if (first == last) if (first == last)
bitmap_set(ctx->active_entries, first, 1); bitmap_set(ctx->active_entries, first, 1);
else else
bitmap_set(ctx->active_entries, first, last - first + 2); bitmap_set(ctx->active_entries, first, last - first + 1);
} }
/** /**
......
...@@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync) ...@@ -263,7 +263,7 @@ void xe_sync_entry_cleanup(struct xe_sync_entry *sync)
if (sync->fence) if (sync->fence)
dma_fence_put(sync->fence); dma_fence_put(sync->fence);
if (sync->chain_fence) if (sync->chain_fence)
dma_fence_put(&sync->chain_fence->base); dma_fence_chain_free(sync->chain_fence);
if (sync->ufence) if (sync->ufence)
user_fence_put(sync->ufence); user_fence_put(sync->ufence);
} }
......
...@@ -150,7 +150,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man, ...@@ -150,7 +150,7 @@ static int xe_ttm_vram_mgr_new(struct ttm_resource_manager *man,
} while (remaining_size); } while (remaining_size);
if (place->flags & TTM_PL_FLAG_CONTIGUOUS) { if (place->flags & TTM_PL_FLAG_CONTIGUOUS) {
if (!drm_buddy_block_trim(mm, vres->base.size, &vres->blocks)) if (!drm_buddy_block_trim(mm, NULL, vres->base.size, &vres->blocks))
size = vres->base.size; size = vres->base.size;
} }
......
...@@ -27,6 +27,7 @@ ...@@ -27,6 +27,7 @@
#define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2) #define DRM_BUDDY_CONTIGUOUS_ALLOCATION BIT(2)
#define DRM_BUDDY_CLEAR_ALLOCATION BIT(3) #define DRM_BUDDY_CLEAR_ALLOCATION BIT(3)
#define DRM_BUDDY_CLEARED BIT(4) #define DRM_BUDDY_CLEARED BIT(4)
#define DRM_BUDDY_TRIM_DISABLE BIT(5)
struct drm_buddy_block { struct drm_buddy_block {
#define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12) #define DRM_BUDDY_HEADER_OFFSET GENMASK_ULL(63, 12)
...@@ -155,6 +156,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm, ...@@ -155,6 +156,7 @@ int drm_buddy_alloc_blocks(struct drm_buddy *mm,
unsigned long flags); unsigned long flags);
int drm_buddy_block_trim(struct drm_buddy *mm, int drm_buddy_block_trim(struct drm_buddy *mm,
u64 *start,
u64 new_size, u64 new_size,
struct list_head *blocks); struct list_head *blocks);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment