Commit e4eabf27 authored by Joonas Lahtinen's avatar Joonas Lahtinen

Merge tag 'gvt-next-2019-04-16' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2019-04-16

- Refine range of MCHBAR snapshot (Yakui)
- Refine out-of-sync page struct (Yakui)
- Remove unused vGPU sreg (Yan)
- Refind MMIO reg names (Xiaolin)
- Proper handling of sync/async flip (Colin)
- Proper handling of PIPE_CONTROL/MI_FLUSH_DW index mode (Xiaolin)
- EXCC reg mask fix (Colin)
Signed-off-by: default avatarJoonas Lahtinen <joonas.lahtinen@linux.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190416084814.GH17995@zhen-hp.sh.intel.com
parents adc674ce 2bfc4975
...@@ -1077,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ...@@ -1077,6 +1077,7 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
bool index_mode = false; bool index_mode = false;
unsigned int post_sync; unsigned int post_sync;
int ret = 0; int ret = 0;
u32 hws_pga, val;
post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14; post_sync = (cmd_val(s, 1) & PIPE_CONTROL_POST_SYNC_OP_MASK) >> 14;
...@@ -1100,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s) ...@@ -1100,6 +1101,15 @@ static int cmd_handler_pipe_control(struct parser_exec_state *s)
index_mode = true; index_mode = true;
ret |= cmd_address_audit(s, gma, sizeof(u64), ret |= cmd_address_audit(s, gma, sizeof(u64),
index_mode); index_mode);
if (ret)
return ret;
if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 2), gma);
val = cmd_val(s, 1) & (~(1 << 21));
patch_value(s, cmd_ptr(s, 1), val);
}
} }
} }
} }
...@@ -1317,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1317,8 +1327,14 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
info->tile_val << 10); info->tile_val << 10);
} }
vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; if (info->plane == PLANE_PRIMARY)
intel_vgpu_trigger_virtual_event(vgpu, info->event); vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(info->pipe))++;
if (info->async_flip)
intel_vgpu_trigger_virtual_event(vgpu, info->event);
else
set_bit(info->event, vgpu->irq.flip_done_event[info->pipe]);
return 0; return 0;
} }
...@@ -1563,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) ...@@ -1563,6 +1579,7 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
unsigned long gma; unsigned long gma;
bool index_mode = false; bool index_mode = false;
int ret = 0; int ret = 0;
u32 hws_pga, val;
/* Check post-sync and ppgtt bit */ /* Check post-sync and ppgtt bit */
if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) { if (((cmd_val(s, 0) >> 14) & 0x3) && (cmd_val(s, 1) & (1 << 2))) {
...@@ -1573,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s) ...@@ -1573,6 +1590,15 @@ static int cmd_handler_mi_flush_dw(struct parser_exec_state *s)
if (cmd_val(s, 0) & (1 << 21)) if (cmd_val(s, 0) & (1 << 21))
index_mode = true; index_mode = true;
ret = cmd_address_audit(s, gma, sizeof(u64), index_mode); ret = cmd_address_audit(s, gma, sizeof(u64), index_mode);
if (ret)
return ret;
if (index_mode) {
hws_pga = s->vgpu->hws_pga[s->ring_id];
gma = hws_pga + gma;
patch_value(s, cmd_ptr(s, 1), gma);
val = cmd_val(s, 0) & (~(1 << 21));
patch_value(s, cmd_ptr(s, 0), val);
}
} }
/* Check notify bit */ /* Check notify bit */
if ((cmd_val(s, 0) & (1 << 8))) if ((cmd_val(s, 0) & (1 << 8)))
......
...@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) ...@@ -407,7 +407,6 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe)) if (!pipe_is_enabled(vgpu, pipe))
continue; continue;
vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
......
...@@ -2489,6 +2489,7 @@ static void clean_spt_oos(struct intel_gvt *gvt) ...@@ -2489,6 +2489,7 @@ static void clean_spt_oos(struct intel_gvt *gvt)
list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) { list_for_each_safe(pos, n, &gtt->oos_page_free_list_head) {
oos_page = container_of(pos, struct intel_vgpu_oos_page, list); oos_page = container_of(pos, struct intel_vgpu_oos_page, list);
list_del(&oos_page->list); list_del(&oos_page->list);
free_page((unsigned long)oos_page->mem);
kfree(oos_page); kfree(oos_page);
} }
} }
...@@ -2509,6 +2510,12 @@ static int setup_spt_oos(struct intel_gvt *gvt) ...@@ -2509,6 +2510,12 @@ static int setup_spt_oos(struct intel_gvt *gvt)
ret = -ENOMEM; ret = -ENOMEM;
goto fail; goto fail;
} }
oos_page->mem = (void *)__get_free_pages(GFP_KERNEL, 0);
if (!oos_page->mem) {
ret = -ENOMEM;
kfree(oos_page);
goto fail;
}
INIT_LIST_HEAD(&oos_page->list); INIT_LIST_HEAD(&oos_page->list);
INIT_LIST_HEAD(&oos_page->vm_list); INIT_LIST_HEAD(&oos_page->vm_list);
......
...@@ -221,7 +221,7 @@ struct intel_vgpu_oos_page { ...@@ -221,7 +221,7 @@ struct intel_vgpu_oos_page {
struct list_head list; struct list_head list;
struct list_head vm_list; struct list_head vm_list;
int id; int id;
unsigned char mem[I915_GTT_PAGE_SIZE]; void *mem;
}; };
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
......
...@@ -94,7 +94,6 @@ struct intel_vgpu_fence { ...@@ -94,7 +94,6 @@ struct intel_vgpu_fence {
struct intel_vgpu_mmio { struct intel_vgpu_mmio {
void *vreg; void *vreg;
void *sreg;
}; };
#define INTEL_GVT_MAX_BAR_NUM 4 #define INTEL_GVT_MAX_BAR_NUM 4
...@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space { ...@@ -111,11 +110,9 @@ struct intel_vgpu_cfg_space {
#define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space) #define vgpu_cfg_space(vgpu) ((vgpu)->cfg_space.virtual_cfg_space)
#define INTEL_GVT_MAX_PIPE 4
struct intel_vgpu_irq { struct intel_vgpu_irq {
bool irq_warn_once[INTEL_GVT_EVENT_MAX]; bool irq_warn_once[INTEL_GVT_EVENT_MAX];
DECLARE_BITMAP(flip_done_event[INTEL_GVT_MAX_PIPE], DECLARE_BITMAP(flip_done_event[I915_MAX_PIPES],
INTEL_GVT_EVENT_MAX); INTEL_GVT_EVENT_MAX);
}; };
...@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu, ...@@ -449,10 +446,6 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
(*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg))) (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
#define vgpu_vreg64(vgpu, offset) \ #define vgpu_vreg64(vgpu, offset) \
(*(u64 *)(vgpu->mmio.vreg + (offset))) (*(u64 *)(vgpu->mmio.vreg + (offset)))
#define vgpu_sreg_t(vgpu, reg) \
(*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
#define vgpu_sreg(vgpu, offset) \
(*(u32 *)(vgpu->mmio.sreg + (offset)))
#define for_each_active_vgpu(gvt, vgpu, id) \ #define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
......
This diff is collapsed.
...@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -239,7 +239,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
if (dmlr) { if (dmlr) {
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
...@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -280,7 +279,6 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
* touched * touched
*/ */
memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET); memcpy(vgpu->mmio.vreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
memcpy(vgpu->mmio.sreg, mmio, GVT_GEN8_MMIO_RESET_OFFSET);
} }
} }
...@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) ...@@ -296,12 +294,10 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
{ {
const struct intel_gvt_device_info *info = &vgpu->gvt->device_info; const struct intel_gvt_device_info *info = &vgpu->gvt->device_info;
vgpu->mmio.vreg = vzalloc(array_size(info->mmio_size, 2)); vgpu->mmio.vreg = vzalloc(info->mmio_size);
if (!vgpu->mmio.vreg) if (!vgpu->mmio.vreg)
return -ENOMEM; return -ENOMEM;
vgpu->mmio.sreg = vgpu->mmio.vreg + info->mmio_size;
intel_vgpu_reset_mmio(vgpu, true); intel_vgpu_reset_mmio(vgpu, true);
return 0; return 0;
...@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu) ...@@ -315,5 +311,5 @@ int intel_vgpu_init_mmio(struct intel_vgpu *vgpu)
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu) void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu)
{ {
vfree(vgpu->mmio.vreg); vfree(vgpu->mmio.vreg);
vgpu->mmio.vreg = vgpu->mmio.sreg = NULL; vgpu->mmio.vreg = NULL;
} }
...@@ -68,7 +68,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { ...@@ -68,7 +68,7 @@ static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */ {RCS0, INVALID_MMIO_REG, 0, false } /* Terminated */
}; };
...@@ -119,7 +119,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { ...@@ -119,7 +119,7 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */ {BCS0, RING_MI_MODE(BLT_RING_BASE), 0xffff, false}, /* 0x2209c */
{BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */ {BCS0, RING_INSTPM(BLT_RING_BASE), 0xffff, false}, /* 0x220c0 */
{BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */ {BCS0, RING_HWSTAM(BLT_RING_BASE), 0x0, false}, /* 0x22098 */
{BCS0, RING_EXCC(BLT_RING_BASE), 0x0, false}, /* 0x22028 */ {BCS0, RING_EXCC(BLT_RING_BASE), 0xffff, false}, /* 0x22028 */
{VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */ {VCS1, RING_EXCC(GEN8_BSD2_RING_BASE), 0xffff, false}, /* 0x1c028 */
......
...@@ -60,6 +60,37 @@ ...@@ -60,6 +60,37 @@
#define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100) #define _REG_701C0(pipe, plane) (0x701c0 + pipe * 0x1000 + (plane - 1) * 0x100)
#define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100) #define _REG_701C4(pipe, plane) (0x701c4 + pipe * 0x1000 + (plane - 1) * 0x100)
#define SKL_FLIP_EVENT(pipe, plane) (PRIMARY_A_FLIP_DONE + (plane) * 3 + (pipe))
#define PLANE_CTL_ASYNC_FLIP (1 << 9)
#define REG50080_FLIP_TYPE_MASK 0x3
#define REG50080_FLIP_TYPE_ASYNC 0x1
#define REG_50080(_pipe, _plane) ({ \
typeof(_pipe) (p) = (_pipe); \
typeof(_plane) (q) = (_plane); \
(((p) == PIPE_A) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50080)) : \
(_MMIO(0x50090))) : \
(((p) == PIPE_B) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x50088)) : \
(_MMIO(0x50098))) : \
(((p) == PIPE_C) ? (((q) == PLANE_PRIMARY) ? (_MMIO(0x5008C)) : \
(_MMIO(0x5009C))) : \
(_MMIO(0x50080))))); })
#define REG_50080_TO_PIPE(_reg) ({ \
typeof(_reg) (reg) = (_reg); \
(((reg) == 0x50080 || (reg) == 0x50090) ? (PIPE_A) : \
(((reg) == 0x50088 || (reg) == 0x50098) ? (PIPE_B) : \
(((reg) == 0x5008C || (reg) == 0x5009C) ? (PIPE_C) : \
(INVALID_PIPE)))); })
#define REG_50080_TO_PLANE(_reg) ({ \
typeof(_reg) (reg) = (_reg); \
(((reg) == 0x50080 || (reg) == 0x50088 || (reg) == 0x5008C) ? \
(PLANE_PRIMARY) : \
(((reg) == 0x50090 || (reg) == 0x50098 || (reg) == 0x5009C) ? \
(PLANE_SPRITE0) : (I915_MAX_PLANES))); })
#define GFX_MODE_BIT_SET_IN_MASK(val, bit) \ #define GFX_MODE_BIT_SET_IN_MASK(val, bit) \
((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16)))) ((((bit) & 0xffff0000) == 0) && !!((val) & (((bit) << 16))))
...@@ -95,4 +126,7 @@ ...@@ -95,4 +126,7 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4) #define VF_GUARDBAND _MMIO(0x83a4)
/* define the effective range of MCHBAR register on Sandybridge+ */
#define MCHBAR_MIRROR_REG_BASE _MMIO(MCHBAR_MIRROR_BASE_SNB + 0x4000)
#endif #endif
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment