Commit 1bafeaf2 authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-xe-fixes-2024-04-11' of https://gitlab.freedesktop.org/drm/xe/kernel into drm-fixes

- Fix double display mutex initializations
- Fix u32 -> u64 implicit conversions
- Fix RING_CONTEXT_CONTROL not marked as masked
Signed-off-by: default avatarDave Airlie <airlied@redhat.com>

From: Lucas De Marchi <lucas.demarchi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/ewvvtgcb2gonxvccws6nt6fqswoyfp4g43t5ex24vpqwtrxdzm@hgjoz5uirmxx
parents 1b24b3cd f76646c8
...@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe) ...@@ -108,11 +108,6 @@ int xe_display_create(struct xe_device *xe)
xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0); xe->display.hotplug.dp_wq = alloc_ordered_workqueue("xe-dp", 0);
drmm_mutex_init(&xe->drm, &xe->sb_lock); drmm_mutex_init(&xe->drm, &xe->sb_lock);
drmm_mutex_init(&xe->drm, &xe->display.backlight.lock);
drmm_mutex_init(&xe->drm, &xe->display.audio.mutex);
drmm_mutex_init(&xe->drm, &xe->display.wm.wm_mutex);
drmm_mutex_init(&xe->drm, &xe->display.pps.mutex);
drmm_mutex_init(&xe->drm, &xe->display.hdcp.hdcp_mutex);
xe->enabled_irq_mask = ~0; xe->enabled_irq_mask = ~0;
err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL); err = drmm_add_action_or_reset(&xe->drm, display_destroy, NULL);
......
...@@ -125,7 +125,7 @@ ...@@ -125,7 +125,7 @@
#define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234) #define RING_EXECLIST_STATUS_LO(base) XE_REG((base) + 0x234)
#define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4) #define RING_EXECLIST_STATUS_HI(base) XE_REG((base) + 0x234 + 4)
#define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244) #define RING_CONTEXT_CONTROL(base) XE_REG((base) + 0x244, XE_REG_OPTION_MASKED)
#define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3) #define CTX_CTRL_INHIBIT_SYN_CTX_SWITCH REG_BIT(3)
#define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0) #define CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT REG_BIT(0)
......
...@@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a ...@@ -290,7 +290,7 @@ xe_hwmon_power1_max_interval_show(struct device *dev, struct device_attribute *a
* As y can be < 2, we compute tau4 = (4 | x) << y * As y can be < 2, we compute tau4 = (4 | x) << y
* and then add 2 when doing the final right shift to account for units * and then add 2 when doing the final right shift to account for units
*/ */
tau4 = ((1 << x_w) | x) << y; tau4 = (u64)((1 << x_w) | x) << y;
/* val in hwmon interface units (millisec) */ /* val in hwmon interface units (millisec) */
out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); out = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
...@@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute * ...@@ -330,7 +330,7 @@ xe_hwmon_power1_max_interval_store(struct device *dev, struct device_attribute *
r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT); r = FIELD_PREP(PKG_MAX_WIN, PKG_MAX_WIN_DEFAULT);
x = REG_FIELD_GET(PKG_MAX_WIN_X, r); x = REG_FIELD_GET(PKG_MAX_WIN_X, r);
y = REG_FIELD_GET(PKG_MAX_WIN_Y, r); y = REG_FIELD_GET(PKG_MAX_WIN_Y, r);
tau4 = ((1 << x_w) | x) << y; tau4 = (u64)((1 << x_w) | x) << y;
max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w); max_win = mul_u64_u32_shr(tau4, SF_TIME, hwmon->scl_shift_time + x_w);
if (val > max_win) if (val > max_win)
......
...@@ -525,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class) ...@@ -525,9 +525,8 @@ static const u8 *reg_offsets(struct xe_device *xe, enum xe_engine_class class)
static void set_context_control(u32 *regs, struct xe_hw_engine *hwe) static void set_context_control(u32 *regs, struct xe_hw_engine *hwe)
{ {
regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH) | regs[CTX_CONTEXT_CONTROL] = _MASKED_BIT_ENABLE(CTX_CTRL_INHIBIT_SYN_CTX_SWITCH |
_MASKED_BIT_DISABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT) | CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT;
/* TODO: Timestamp */ /* TODO: Timestamp */
} }
......
...@@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -227,7 +227,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
if (vm->flags & XE_VM_FLAG_64K && level == 1) if (vm->flags & XE_VM_FLAG_64K && level == 1)
flags = XE_PDE_64K; flags = XE_PDE_64K;
entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (level - 1) * entry = vm->pt_ops->pde_encode_bo(bo, map_ofs + (u64)(level - 1) *
XE_PAGE_SIZE, pat_index); XE_PAGE_SIZE, pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64, xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE * level, u64,
entry | flags); entry | flags);
...@@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -235,7 +235,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
/* Write PDE's that point to our BO. */ /* Write PDE's that point to our BO. */
for (i = 0; i < num_entries - num_level; i++) { for (i = 0; i < num_entries - num_level; i++) {
entry = vm->pt_ops->pde_encode_bo(bo, i * XE_PAGE_SIZE, entry = vm->pt_ops->pde_encode_bo(bo, (u64)i * XE_PAGE_SIZE,
pat_index); pat_index);
xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE + xe_map_wr(xe, &bo->vmap, map_ofs + XE_PAGE_SIZE +
...@@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m, ...@@ -291,7 +291,7 @@ static int xe_migrate_prepare_vm(struct xe_tile *tile, struct xe_migrate *m,
#define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE) #define VM_SA_UPDATE_UNIT_SIZE (XE_PAGE_SIZE / NUM_VMUSA_UNIT_PER_PAGE)
#define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64)) #define NUM_VMUSA_WRITES_PER_UNIT (VM_SA_UPDATE_UNIT_SIZE / sizeof(u64))
drm_suballoc_manager_init(&m->vm_update_sa, drm_suballoc_manager_init(&m->vm_update_sa,
(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) * (size_t)(map_ofs / XE_PAGE_SIZE - NUM_KERNEL_PDE) *
NUM_VMUSA_UNIT_PER_PAGE, 0); NUM_VMUSA_UNIT_PER_PAGE, 0);
m->pt_bo = bo; m->pt_bo = bo;
...@@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m, ...@@ -490,7 +490,7 @@ static void emit_pte(struct xe_migrate *m,
struct xe_vm *vm = m->q->vm; struct xe_vm *vm = m->q->vm;
u16 pat_index; u16 pat_index;
u32 ptes; u32 ptes;
u64 ofs = at_pt * XE_PAGE_SIZE; u64 ofs = (u64)at_pt * XE_PAGE_SIZE;
u64 cur_ofs; u64 cur_ofs;
/* Indirect access needs compression enabled uncached PAT index */ /* Indirect access needs compression enabled uncached PAT index */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment