Commit ff00d85b authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2019-01-24' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2019-01-24

- split kvmgt as seperate module (Zhenyu)
- Coffeelake GVT support (Fred)
- const treatment and change for kernel type (Jani)
Signed-off-by: default avatarRodrigo Vivi <vivijim@rdvivi-cozumel.jf.intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20190124054048.GO7203@zhen-hp.sh.intel.com
parents 0cdc1d07 2e679d48
...@@ -200,3 +200,4 @@ endif ...@@ -200,3 +200,4 @@ endif
i915-y += intel_lpe_audio.o i915-y += intel_lpe_audio.o
obj-$(CONFIG_DRM_I915) += i915.o obj-$(CONFIG_DRM_I915) += i915.o
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += gvt/kvmgt.o
...@@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ ...@@ -7,4 +7,3 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
...@@ -55,10 +55,10 @@ struct sub_op_bits { ...@@ -55,10 +55,10 @@ struct sub_op_bits {
int low; int low;
}; };
struct decode_info { struct decode_info {
char *name; const char *name;
int op_len; int op_len;
int nr_sub_op; int nr_sub_op;
struct sub_op_bits *sub_op; const struct sub_op_bits *sub_op;
}; };
#define MAX_CMD_BUDGET 0x7fffffff #define MAX_CMD_BUDGET 0x7fffffff
...@@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s); ...@@ -375,7 +375,7 @@ typedef int (*parser_cmd_handler)(struct parser_exec_state *s);
#define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5)) #define ADDR_FIX_5(x1, x2, x3, x4, x5) (ADDR_FIX_1(x1) | ADDR_FIX_4(x2, x3, x4, x5))
struct cmd_info { struct cmd_info {
char *name; const char *name;
u32 opcode; u32 opcode;
#define F_LEN_MASK (1U<<0) #define F_LEN_MASK (1U<<0)
...@@ -399,10 +399,10 @@ struct cmd_info { ...@@ -399,10 +399,10 @@ struct cmd_info {
#define R_VECS (1 << VECS) #define R_VECS (1 << VECS)
#define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS) #define R_ALL (R_RCS | R_VCS | R_BCS | R_VECS)
/* rings that support this cmd: BLT/RCS/VCS/VECS */ /* rings that support this cmd: BLT/RCS/VCS/VECS */
uint16_t rings; u16 rings;
/* devices that support this cmd: SNB/IVB/HSW/... */ /* devices that support this cmd: SNB/IVB/HSW/... */
uint16_t devices; u16 devices;
/* which DWords are address that need fix up. /* which DWords are address that need fix up.
* bit 0 means a 32-bit non address operand in command * bit 0 means a 32-bit non address operand in command
...@@ -412,20 +412,20 @@ struct cmd_info { ...@@ -412,20 +412,20 @@ struct cmd_info {
* No matter the address length, each address only takes * No matter the address length, each address only takes
* one bit in the bitmap. * one bit in the bitmap.
*/ */
uint16_t addr_bitmap; u16 addr_bitmap;
/* flag == F_LEN_CONST : command length /* flag == F_LEN_CONST : command length
* flag == F_LEN_VAR : length bias bits * flag == F_LEN_VAR : length bias bits
* Note: length is in DWord * Note: length is in DWord
*/ */
uint8_t len; u8 len;
parser_cmd_handler handler; parser_cmd_handler handler;
}; };
struct cmd_entry { struct cmd_entry {
struct hlist_node hlist; struct hlist_node hlist;
struct cmd_info *info; const struct cmd_info *info;
}; };
enum { enum {
...@@ -474,7 +474,7 @@ struct parser_exec_state { ...@@ -474,7 +474,7 @@ struct parser_exec_state {
int saved_buf_addr_type; int saved_buf_addr_type;
bool is_ctx_wa; bool is_ctx_wa;
struct cmd_info *info; const struct cmd_info *info;
struct intel_vgpu_workload *workload; struct intel_vgpu_workload *workload;
}; };
...@@ -485,12 +485,12 @@ struct parser_exec_state { ...@@ -485,12 +485,12 @@ struct parser_exec_state {
static unsigned long bypass_scan_mask = 0; static unsigned long bypass_scan_mask = 0;
/* ring ALL, type = 0 */ /* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = { static const struct sub_op_bits sub_op_mi[] = {
{31, 29}, {31, 29},
{28, 23}, {28, 23},
}; };
static struct decode_info decode_info_mi = { static const struct decode_info decode_info_mi = {
"MI", "MI",
OP_LEN_MI, OP_LEN_MI,
ARRAY_SIZE(sub_op_mi), ARRAY_SIZE(sub_op_mi),
...@@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = { ...@@ -498,12 +498,12 @@ static struct decode_info decode_info_mi = {
}; };
/* ring RCS, command type 2 */ /* ring RCS, command type 2 */
static struct sub_op_bits sub_op_2d[] = { static const struct sub_op_bits sub_op_2d[] = {
{31, 29}, {31, 29},
{28, 22}, {28, 22},
}; };
static struct decode_info decode_info_2d = { static const struct decode_info decode_info_2d = {
"2D", "2D",
OP_LEN_2D, OP_LEN_2D,
ARRAY_SIZE(sub_op_2d), ARRAY_SIZE(sub_op_2d),
...@@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = { ...@@ -511,14 +511,14 @@ static struct decode_info decode_info_2d = {
}; };
/* ring RCS, command type 3 */ /* ring RCS, command type 3 */
static struct sub_op_bits sub_op_3d_media[] = { static const struct sub_op_bits sub_op_3d_media[] = {
{31, 29}, {31, 29},
{28, 27}, {28, 27},
{26, 24}, {26, 24},
{23, 16}, {23, 16},
}; };
static struct decode_info decode_info_3d_media = { static const struct decode_info decode_info_3d_media = {
"3D_Media", "3D_Media",
OP_LEN_3D_MEDIA, OP_LEN_3D_MEDIA,
ARRAY_SIZE(sub_op_3d_media), ARRAY_SIZE(sub_op_3d_media),
...@@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = { ...@@ -526,7 +526,7 @@ static struct decode_info decode_info_3d_media = {
}; };
/* ring VCS, command type 3 */ /* ring VCS, command type 3 */
static struct sub_op_bits sub_op_mfx_vc[] = { static const struct sub_op_bits sub_op_mfx_vc[] = {
{31, 29}, {31, 29},
{28, 27}, {28, 27},
{26, 24}, {26, 24},
...@@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = { ...@@ -534,7 +534,7 @@ static struct sub_op_bits sub_op_mfx_vc[] = {
{20, 16}, {20, 16},
}; };
static struct decode_info decode_info_mfx_vc = { static const struct decode_info decode_info_mfx_vc = {
"MFX_VC", "MFX_VC",
OP_LEN_MFX_VC, OP_LEN_MFX_VC,
ARRAY_SIZE(sub_op_mfx_vc), ARRAY_SIZE(sub_op_mfx_vc),
...@@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = { ...@@ -542,7 +542,7 @@ static struct decode_info decode_info_mfx_vc = {
}; };
/* ring VECS, command type 3 */ /* ring VECS, command type 3 */
static struct sub_op_bits sub_op_vebox[] = { static const struct sub_op_bits sub_op_vebox[] = {
{31, 29}, {31, 29},
{28, 27}, {28, 27},
{26, 24}, {26, 24},
...@@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = { ...@@ -550,14 +550,14 @@ static struct sub_op_bits sub_op_vebox[] = {
{20, 16}, {20, 16},
}; };
static struct decode_info decode_info_vebox = { static const struct decode_info decode_info_vebox = {
"VEBOX", "VEBOX",
OP_LEN_VEBOX, OP_LEN_VEBOX,
ARRAY_SIZE(sub_op_vebox), ARRAY_SIZE(sub_op_vebox),
sub_op_vebox, sub_op_vebox,
}; };
static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { static const struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
[RCS] = { [RCS] = {
&decode_info_mi, &decode_info_mi,
NULL, NULL,
...@@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = { ...@@ -616,7 +616,7 @@ static struct decode_info *ring_decode_info[I915_NUM_ENGINES][8] = {
static inline u32 get_opcode(u32 cmd, int ring_id) static inline u32 get_opcode(u32 cmd, int ring_id)
{ {
struct decode_info *d_info; const struct decode_info *d_info;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
if (d_info == NULL) if (d_info == NULL)
...@@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id) ...@@ -625,7 +625,7 @@ static inline u32 get_opcode(u32 cmd, int ring_id)
return cmd >> (32 - d_info->op_len); return cmd >> (32 - d_info->op_len);
} }
static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, static inline const struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
unsigned int opcode, int ring_id) unsigned int opcode, int ring_id)
{ {
struct cmd_entry *e; struct cmd_entry *e;
...@@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt, ...@@ -638,7 +638,7 @@ static inline struct cmd_info *find_cmd_entry(struct intel_gvt *gvt,
return NULL; return NULL;
} }
static inline struct cmd_info *get_cmd_info(struct intel_gvt *gvt, static inline const struct cmd_info *get_cmd_info(struct intel_gvt *gvt,
u32 cmd, int ring_id) u32 cmd, int ring_id)
{ {
u32 opcode; u32 opcode;
...@@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low) ...@@ -657,7 +657,7 @@ static inline u32 sub_op_val(u32 cmd, u32 hi, u32 low)
static inline void print_opcode(u32 cmd, int ring_id) static inline void print_opcode(u32 cmd, int ring_id)
{ {
struct decode_info *d_info; const struct decode_info *d_info;
int i; int i;
d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)]; d_info = ring_decode_info[ring_id][CMD_TYPE(cmd)];
...@@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s, ...@@ -776,7 +776,7 @@ static inline int ip_gma_advance(struct parser_exec_state *s,
return 0; return 0;
} }
static inline int get_cmd_length(struct cmd_info *info, u32 cmd) static inline int get_cmd_length(const struct cmd_info *info, u32 cmd)
{ {
if ((info->flag & F_LEN_MASK) == F_LEN_CONST) if ((info->flag & F_LEN_MASK) == F_LEN_CONST)
return info->len; return info->len;
...@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -901,7 +901,8 @@ static int cmd_reg_handler(struct parser_exec_state *s,
* It's good enough to support initializing mmio by lri command in * It's good enough to support initializing mmio by lri command in
* vgpu inhibit context on KBL. * vgpu inhibit context on KBL.
*/ */
if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) && if ((IS_KABYLAKE(s->vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(s->vgpu->gvt->dev_priv)) &&
intel_gvt_mmio_is_in_ctx(gvt, offset) && intel_gvt_mmio_is_in_ctx(gvt, offset) &&
!strncmp(cmd, "lri", 3)) { !strncmp(cmd, "lri", 3)) {
intel_gvt_hypervisor_read_gpa(s->vgpu, intel_gvt_hypervisor_read_gpa(s->vgpu,
...@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, ...@@ -1280,9 +1281,7 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
if (!info->async_flip) if (!info->async_flip)
return 0; return 0;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
...@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1310,9 +1309,7 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12); info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val); info->stride_val);
set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
...@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s, ...@@ -1336,9 +1333,7 @@ static int decode_mi_display_flip(struct parser_exec_state *s,
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
return gen8_decode_mi_display_flip(s, info); return gen8_decode_mi_display_flip(s, info);
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
return skl_decode_mi_display_flip(s, info); return skl_decode_mi_display_flip(s, info);
return -ENODEV; return -ENODEV;
...@@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) ...@@ -1643,8 +1638,8 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size) static int find_bb_size(struct parser_exec_state *s, unsigned long *bb_size)
{ {
unsigned long gma = 0; unsigned long gma = 0;
struct cmd_info *info; const struct cmd_info *info;
uint32_t cmd_len = 0; u32 cmd_len = 0;
bool bb_end = false; bool bb_end = false;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
u32 cmd; u32 cmd;
...@@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s) ...@@ -1842,7 +1837,7 @@ static int cmd_handler_mi_batch_buffer_start(struct parser_exec_state *s)
static int mi_noop_index; static int mi_noop_index;
static struct cmd_info cmd_info[] = { static const struct cmd_info cmd_info[] = {
{"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL}, {"MI_NOOP", OP_MI_NOOP, F_LEN_CONST, R_ALL, D_ALL, 0, 1, NULL},
{"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL, {"MI_SET_PREDICATE", OP_MI_SET_PREDICATE, F_LEN_CONST, R_ALL, D_ALL,
...@@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e) ...@@ -2521,7 +2516,7 @@ static void add_cmd_entry(struct intel_gvt *gvt, struct cmd_entry *e)
static int cmd_parser_exec(struct parser_exec_state *s) static int cmd_parser_exec(struct parser_exec_state *s)
{ {
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
struct cmd_info *info; const struct cmd_info *info;
u32 cmd; u32 cmd;
int ret = 0; int ret = 0;
...@@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2683,7 +2678,7 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
I915_GTT_PAGE_SIZE))) I915_GTT_PAGE_SIZE)))
return -EINVAL; return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(u32);
ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES, ring_size = round_up(wa_ctx->indirect_ctx.size + CACHELINE_BYTES,
PAGE_SIZE); PAGE_SIZE);
gma_head = wa_ctx->indirect_ctx.guest_gma; gma_head = wa_ctx->indirect_ctx.guest_gma;
...@@ -2850,7 +2845,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2850,7 +2845,7 @@ static int shadow_indirect_ctx(struct intel_shadow_wa_ctx *wa_ctx)
static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) static int combine_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{ {
uint32_t per_ctx_start[CACHELINE_DWORDS] = {0}; u32 per_ctx_start[CACHELINE_DWORDS] = {0};
unsigned char *bb_start_sva; unsigned char *bb_start_sva;
if (!wa_ctx->per_ctx.valid) if (!wa_ctx->per_ctx.valid)
...@@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2895,10 +2890,10 @@ int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
return 0; return 0;
} }
static struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt, static const struct cmd_info *find_cmd_entry_any_ring(struct intel_gvt *gvt,
unsigned int opcode, unsigned long rings) unsigned int opcode, unsigned long rings)
{ {
struct cmd_info *info = NULL; const struct cmd_info *info = NULL;
unsigned int ring; unsigned int ring;
for_each_set_bit(ring, &rings, I915_NUM_ENGINES) { for_each_set_bit(ring, &rings, I915_NUM_ENGINES) {
...@@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt) ...@@ -2913,7 +2908,7 @@ static int init_cmd_table(struct intel_gvt *gvt)
{ {
int i; int i;
struct cmd_entry *e; struct cmd_entry *e;
struct cmd_info *info; const struct cmd_info *info;
unsigned int gen_type; unsigned int gen_type;
gen_type = intel_gvt_get_device_type(gvt); gen_type = intel_gvt_get_device_type(gvt);
......
...@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -198,7 +198,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) {
vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |= vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
...@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -273,7 +274,8 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
} }
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
} }
...@@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu) ...@@ -453,7 +455,8 @@ void intel_vgpu_clean_display(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
clean_virtual_dp_monitor(vgpu, PORT_D); clean_virtual_dp_monitor(vgpu, PORT_D);
else else
clean_virtual_dp_monitor(vgpu, PORT_B); clean_virtual_dp_monitor(vgpu, PORT_B);
...@@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution) ...@@ -476,7 +479,8 @@ int intel_vgpu_init_display(struct intel_vgpu *vgpu, u64 resolution)
intel_vgpu_init_i2c_edid(vgpu); intel_vgpu_init_i2c_edid(vgpu);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv) ||
IS_COFFEELAKE(dev_priv))
return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D, return setup_virtual_dp_monitor(vgpu, PORT_D, GVT_DP_D,
resolution); resolution);
else else
......
...@@ -29,7 +29,6 @@ ...@@ -29,7 +29,6 @@
*/ */
#include <linux/dma-buf.h> #include <linux/dma-buf.h>
#include <drm/drmP.h>
#include <linux/vfio.h> #include <linux/vfio.h>
#include "i915_drv.h" #include "i915_drv.h"
...@@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev, ...@@ -164,9 +163,7 @@ static struct drm_i915_gem_object *vgpu_create_gem(struct drm_device *dev,
obj->read_domains = I915_GEM_DOMAIN_GTT; obj->read_domains = I915_GEM_DOMAIN_GTT;
obj->write_domain = 0; obj->write_domain = 0;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
unsigned int tiling_mode = 0; unsigned int tiling_mode = 0;
unsigned int stride = 0; unsigned int stride = 0;
......
...@@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu) ...@@ -77,16 +77,32 @@ static unsigned char edid_get_byte(struct intel_vgpu *vgpu)
return chr; return chr;
} }
static inline int cnp_get_port_from_gmbus0(u32 gmbus0)
{
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL;
if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B;
else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C;
else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D;
else if (port_select == GMBUS_PIN_4_CNP)
port = PORT_E;
return port;
}
static inline int bxt_get_port_from_gmbus0(u32 gmbus0) static inline int bxt_get_port_from_gmbus0(u32 gmbus0)
{ {
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL; int port = -EINVAL;
if (port_select == 1) if (port_select == GMBUS_PIN_1_BXT)
port = PORT_B; port = PORT_B;
else if (port_select == 2) else if (port_select == GMBUS_PIN_2_BXT)
port = PORT_C; port = PORT_C;
else if (port_select == 3) else if (port_select == GMBUS_PIN_3_BXT)
port = PORT_D; port = PORT_D;
return port; return port;
} }
...@@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0) ...@@ -96,13 +112,13 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK; int port_select = gmbus0 & _GMBUS_PIN_SEL_MASK;
int port = -EINVAL; int port = -EINVAL;
if (port_select == 2) if (port_select == GMBUS_PIN_VGADDC)
port = PORT_E; port = PORT_E;
else if (port_select == 4) else if (port_select == GMBUS_PIN_DPC)
port = PORT_C; port = PORT_C;
else if (port_select == 5) else if (port_select == GMBUS_PIN_DPB)
port = PORT_B; port = PORT_B;
else if (port_select == 6) else if (port_select == GMBUS_PIN_DPD)
port = PORT_D; port = PORT_D;
return port; return port;
} }
...@@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, ...@@ -133,6 +149,8 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
port = bxt_get_port_from_gmbus0(pin_select); port = bxt_get_port_from_gmbus0(pin_select);
else if (IS_COFFEELAKE(dev_priv))
port = cnp_get_port_from_gmbus0(pin_select);
else else
port = get_port_from_gmbus0(pin_select); port = get_port_from_gmbus0(pin_select);
if (WARN_ON(port < 0)) if (WARN_ON(port < 0))
......
...@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, ...@@ -151,9 +151,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
switch (tiled) { switch (tiled) {
case PLANE_CTL_TILED_LINEAR: case PLANE_CTL_TILED_LINEAR:
stride = stride_reg * 64; stride = stride_reg * 64;
...@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -217,9 +215,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9) {
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) {
plane->tiled = val & PLANE_CTL_TILED_MASK; plane->tiled = val & PLANE_CTL_TILED_MASK;
fmt = skl_format_to_drm( fmt = skl_format_to_drm(
val & PLANE_CTL_FORMAT_MASK, val & PLANE_CTL_FORMAT_MASK,
...@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -260,9 +256,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
} }
plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled, plane->stride = intel_vgpu_get_stride(vgpu, pipe, plane->tiled,
(IS_SKYLAKE(dev_priv) (INTEL_GEN(dev_priv) >= 9) ?
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv)) ?
(_PRI_PLANE_STRIDE_MASK >> 6) : (_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp); _PRI_PLANE_STRIDE_MASK, plane->bpp);
......
...@@ -187,52 +187,6 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -187,52 +187,6 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.write_protect_handler = intel_vgpu_page_track_handler, .write_protect_handler = intel_vgpu_page_track_handler,
}; };
/**
* intel_gvt_init_host - Load MPT modules and detect if we're running in host
*
* This function is called at the driver loading stage. If failed to find a
* loadable MPT module or detect currently we're running in a VM, then GVT-g
* will be disabled
*
* Returns:
* Zero on success, negative error code if failed.
*
*/
int intel_gvt_init_host(void)
{
if (intel_gvt_host.initialized)
return 0;
/* Xen DOM U */
if (xen_domain() && !xen_initial_domain())
return -ENODEV;
/* Try to load MPT modules for hypervisors */
if (xen_initial_domain()) {
/* In Xen dom0 */
intel_gvt_host.mpt = try_then_request_module(
symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module(
symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
#endif
}
/* Fail to load MPT modules - bail out */
if (!intel_gvt_host.mpt)
return -EINVAL;
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
intel_gvt_host.initialized = true;
return 0;
}
static void init_device_info(struct intel_gvt *gvt) static void init_device_info(struct intel_gvt *gvt)
{ {
struct intel_gvt_device_info *info = &gvt->device_info; struct intel_gvt_device_info *info = &gvt->device_info;
...@@ -316,7 +270,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -316,7 +270,6 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return; return;
intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu); intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_cleanup_vgpu_type_groups(gvt); intel_gvt_cleanup_vgpu_type_groups(gvt);
intel_gvt_clean_vgpu_types(gvt); intel_gvt_clean_vgpu_types(gvt);
...@@ -352,13 +305,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -352,13 +305,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ret; int ret;
/*
* Cannot initialize GVT device without intel_gvt_host gets
* initialized first.
*/
if (WARN_ON(!intel_gvt_host.initialized))
return -EINVAL;
if (WARN_ON(dev_priv->gvt)) if (WARN_ON(dev_priv->gvt))
return -EEXIST; return -EEXIST;
...@@ -420,13 +366,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -420,13 +366,6 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
goto out_clean_types; goto out_clean_types;
} }
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
gvt_err("failed to register gvt-g host device: %d\n", ret);
goto out_clean_types;
}
vgpu = intel_gvt_create_idle_vgpu(gvt); vgpu = intel_gvt_create_idle_vgpu(gvt);
if (IS_ERR(vgpu)) { if (IS_ERR(vgpu)) {
ret = PTR_ERR(vgpu); ret = PTR_ERR(vgpu);
...@@ -441,6 +380,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -441,6 +380,8 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
gvt_dbg_core("gvt device initialization is done\n"); gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt; dev_priv->gvt = gvt;
intel_gvt_host.dev = &dev_priv->drm.pdev->dev;
intel_gvt_host.initialized = true;
return 0; return 0;
out_clean_types: out_clean_types:
...@@ -467,6 +408,45 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -467,6 +408,45 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
return ret; return ret;
} }
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT) int
MODULE_SOFTDEP("pre: kvmgt"); intel_gvt_register_hypervisor(struct intel_gvt_mpt *m)
#endif {
int ret;
void *gvt;
if (!intel_gvt_host.initialized)
return -ENODEV;
if (m->type != INTEL_GVT_HYPERVISOR_KVM &&
m->type != INTEL_GVT_HYPERVISOR_XEN)
return -EINVAL;
/* Get a reference for device model module */
if (!try_module_get(THIS_MODULE))
return -ENODEV;
intel_gvt_host.mpt = m;
intel_gvt_host.hypervisor_type = m->type;
gvt = (void *)kdev_to_i915(intel_gvt_host.dev)->gvt;
ret = intel_gvt_hypervisor_host_init(intel_gvt_host.dev, gvt,
&intel_gvt_ops);
if (ret < 0) {
gvt_err("Failed to init %s hypervisor module\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
module_put(THIS_MODULE);
return -ENODEV;
}
gvt_dbg_core("Running with hypervisor %s in host mode\n",
supported_hypervisors[intel_gvt_host.hypervisor_type]);
return 0;
}
EXPORT_SYMBOL_GPL(intel_gvt_register_hypervisor);
void
intel_gvt_unregister_hypervisor(void)
{
intel_gvt_hypervisor_host_exit(intel_gvt_host.dev);
module_put(THIS_MODULE);
}
EXPORT_SYMBOL_GPL(intel_gvt_unregister_hypervisor);
...@@ -52,12 +52,8 @@ ...@@ -52,12 +52,8 @@
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
enum {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
struct intel_gvt_host { struct intel_gvt_host {
struct device *dev;
bool initialized; bool initialized;
int hypervisor_type; int hypervisor_type;
struct intel_gvt_mpt *mpt; struct intel_gvt_mpt *mpt;
......
...@@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt) ...@@ -57,6 +57,8 @@ unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
return D_KBL; return D_KBL;
else if (IS_BROXTON(gvt->dev_priv)) else if (IS_BROXTON(gvt->dev_priv))
return D_BXT; return D_BXT;
else if (IS_COFFEELAKE(gvt->dev_priv))
return D_CFL;
return 0; return 0;
} }
...@@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu, ...@@ -276,14 +278,12 @@ static int mul_force_wake_write(struct intel_vgpu *vgpu,
unsigned int offset, void *p_data, unsigned int bytes) unsigned int offset, void *p_data, unsigned int bytes)
{ {
u32 old, new; u32 old, new;
uint32_t ack_reg_offset; u32 ack_reg_offset;
old = vgpu_vreg(vgpu, offset); old = vgpu_vreg(vgpu, offset);
new = CALC_MODE_MASK_REG(old, *(u32 *)p_data); new = CALC_MODE_MASK_REG(old, *(u32 *)p_data);
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (INTEL_GEN(vgpu->gvt->dev_priv) >= 9) {
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv)) {
switch (offset) { switch (offset) {
case FORCEWAKE_RENDER_GEN9_REG: case FORCEWAKE_RENDER_GEN9_REG:
ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG; ack_reg_offset = FORCEWAKE_ACK_RENDER_GEN9_REG;
...@@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value, ...@@ -833,7 +833,7 @@ static int dp_aux_ch_ctl_trans_done(struct intel_vgpu *vgpu, u32 value,
} }
static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd, static void dp_aux_ch_ctl_link_training(struct intel_vgpu_dpcd_data *dpcd,
uint8_t t) u8 t)
{ {
if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) { if ((t & DPCD_TRAINING_PATTERN_SET_MASK) == DPCD_TRAINING_PATTERN_1) {
/* training pattern 1 for CR */ /* training pattern 1 for CR */
...@@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -889,9 +889,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
write_vreg(vgpu, offset, p_data, bytes); write_vreg(vgpu, offset, p_data, bytes);
data = vgpu_vreg(vgpu, offset); data = vgpu_vreg(vgpu, offset);
if ((IS_SKYLAKE(vgpu->gvt->dev_priv) if ((INTEL_GEN(vgpu->gvt->dev_priv) >= 9)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_BROXTON(vgpu->gvt->dev_priv))
&& offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) { && offset != _REG_SKL_DP_AUX_CH_CTL(port_index)) {
/* SKL DPB/C/D aux ctl register changed */ /* SKL DPB/C/D aux ctl register changed */
return 0; return 0;
...@@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu, ...@@ -919,7 +917,7 @@ static int dp_aux_ch_ctl_mmio_write(struct intel_vgpu *vgpu,
if (op == GVT_AUX_NATIVE_WRITE) { if (op == GVT_AUX_NATIVE_WRITE) {
int t; int t;
uint8_t buf[16]; u8 buf[16];
if ((addr + len + 1) >= DPCD_SIZE) { if ((addr + len + 1) >= DPCD_SIZE) {
/* /*
...@@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1407,7 +1405,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
switch (cmd) { switch (cmd) {
case GEN9_PCODE_READ_MEM_LATENCY: case GEN9_PCODE_READ_MEM_LATENCY:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) { || IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv)) {
/** /**
* "Read memory latency" command on gen9. * "Read memory latency" command on gen9.
* Below memory latency values are read * Below memory latency values are read
...@@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1431,7 +1430,8 @@ static int mailbox_write(struct intel_vgpu *vgpu, unsigned int offset,
break; break;
case SKL_PCODE_CDCLK_CONTROL: case SKL_PCODE_CDCLK_CONTROL:
if (IS_SKYLAKE(vgpu->gvt->dev_priv) if (IS_SKYLAKE(vgpu->gvt->dev_priv)
|| IS_KABYLAKE(vgpu->gvt->dev_priv)) || IS_KABYLAKE(vgpu->gvt->dev_priv)
|| IS_COFFEELAKE(vgpu->gvt->dev_priv))
*data0 = SKL_CDCLK_READY_FOR_CHANGE; *data0 = SKL_CDCLK_READY_FOR_CHANGE;
break; break;
case GEN6_PCODE_READ_RC6VIDS: case GEN6_PCODE_READ_RC6VIDS:
...@@ -3041,8 +3041,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -3041,8 +3041,8 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS, MMIO_DFH(GEN9_WM_CHICKEN3, D_SKL_PLUS, F_MODE_MASK | F_CMD_ACCESS,
NULL, NULL); NULL, NULL);
MMIO_D(_MMIO(0x4ab8), D_KBL); MMIO_D(_MMIO(0x4ab8), D_KBL | D_CFL);
MMIO_D(_MMIO(0x2248), D_KBL | D_SKL); MMIO_D(_MMIO(0x2248), D_SKL_PLUS);
return 0; return 0;
} }
...@@ -3302,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt) ...@@ -3302,7 +3302,8 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
if (ret) if (ret)
goto err; goto err;
} else if (IS_SKYLAKE(dev_priv) } else if (IS_SKYLAKE(dev_priv)
|| IS_KABYLAKE(dev_priv)) { || IS_KABYLAKE(dev_priv)
|| IS_COFFEELAKE(dev_priv)) {
ret = init_broadwell_mmio_info(gvt); ret = init_broadwell_mmio_info(gvt);
if (ret) if (ret)
goto err; goto err;
......
...@@ -33,13 +33,19 @@ ...@@ -33,13 +33,19 @@
#ifndef _GVT_HYPERCALL_H_ #ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_
enum hypervisor_type {
INTEL_GVT_HYPERVISOR_XEN = 0,
INTEL_GVT_HYPERVISOR_KVM,
};
/* /*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports * Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules. * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/ */
struct intel_gvt_mpt { struct intel_gvt_mpt {
enum hypervisor_type type;
int (*host_init)(struct device *dev, void *gvt, const void *ops); int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt); void (*host_exit)(struct device *dev);
int (*attach_vgpu)(void *vgpu, unsigned long *handle); int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle); void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
...@@ -67,6 +73,5 @@ struct intel_gvt_mpt { ...@@ -67,6 +73,5 @@ struct intel_gvt_mpt {
}; };
extern struct intel_gvt_mpt xengt_mpt; extern struct intel_gvt_mpt xengt_mpt;
extern struct intel_gvt_mpt kvmgt_mpt;
#endif /* _GVT_HYPERCALL_H_ */ #endif /* _GVT_HYPERCALL_H_ */
...@@ -581,9 +581,7 @@ static void gen8_init_irq( ...@@ -581,9 +581,7 @@ static void gen8_init_irq(
SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 4, PRIMARY_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C); SET_BIT_INFO(irq, 5, SPRITE_C_FLIP_DONE, INTEL_GVT_IRQ_INFO_DE_PIPE_C);
} else if (IS_SKYLAKE(gvt->dev_priv) } else if (INTEL_GEN(gvt->dev_priv) >= 9) {
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv)) {
SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 25, AUX_CHANNEL_B, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 26, AUX_CHANNEL_C, INTEL_GVT_IRQ_INFO_DE_PORT);
SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT); SET_BIT_INFO(irq, 27, AUX_CHANNEL_D, INTEL_GVT_IRQ_INFO_DE_PORT);
......
...@@ -627,6 +627,12 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -627,6 +627,12 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu; goto undo_iommu;
} }
/* Take a module reference as mdev core doesn't take
* a reference for vendor driver.
*/
if (!try_module_get(THIS_MODULE))
goto undo_group;
ret = kvmgt_guest_init(mdev); ret = kvmgt_guest_init(mdev);
if (ret) if (ret)
goto undo_group; goto undo_group;
...@@ -679,6 +685,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu) ...@@ -679,6 +685,9 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
&vgpu->vdev.group_notifier); &vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret); WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
/* dereference module reference taken at open */
module_put(THIS_MODULE);
info = (struct kvmgt_guest_info *)vgpu->handle; info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info); kvmgt_guest_exit(info);
...@@ -703,7 +712,7 @@ static void intel_vgpu_release_work(struct work_struct *work) ...@@ -703,7 +712,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
__intel_vgpu_release(vgpu); __intel_vgpu_release(vgpu);
} }
static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) static u64 intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
{ {
u32 start_lo, start_hi; u32 start_lo, start_hi;
u32 mem_type; u32 mem_type;
...@@ -730,10 +739,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar) ...@@ -730,10 +739,10 @@ static uint64_t intel_vgpu_get_bar_addr(struct intel_vgpu *vgpu, int bar)
return ((u64)start_hi << 32) | start_lo; return ((u64)start_hi << 32) | start_lo;
} }
static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, u64 off,
void *buf, unsigned int count, bool is_write) void *buf, unsigned int count, bool is_write)
{ {
uint64_t bar_start = intel_vgpu_get_bar_addr(vgpu, bar); u64 bar_start = intel_vgpu_get_bar_addr(vgpu, bar);
int ret; int ret;
if (is_write) if (is_write)
...@@ -745,13 +754,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off, ...@@ -745,13 +754,13 @@ static int intel_vgpu_bar_rw(struct intel_vgpu *vgpu, int bar, uint64_t off,
return ret; return ret;
} }
static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, uint64_t off) static inline bool intel_vgpu_in_aperture(struct intel_vgpu *vgpu, u64 off)
{ {
return off >= vgpu_aperture_offset(vgpu) && return off >= vgpu_aperture_offset(vgpu) &&
off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu); off < vgpu_aperture_offset(vgpu) + vgpu_aperture_sz(vgpu);
} }
static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, uint64_t off, static int intel_vgpu_aperture_rw(struct intel_vgpu *vgpu, u64 off,
void *buf, unsigned long count, bool is_write) void *buf, unsigned long count, bool is_write)
{ {
void *aperture_va; void *aperture_va;
...@@ -783,7 +792,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf, ...@@ -783,7 +792,7 @@ static ssize_t intel_vgpu_rw(struct mdev_device *mdev, char *buf,
{ {
struct intel_vgpu *vgpu = mdev_get_drvdata(mdev); struct intel_vgpu *vgpu = mdev_get_drvdata(mdev);
unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos); unsigned int index = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
uint64_t pos = *ppos & VFIO_PCI_OFFSET_MASK; u64 pos = *ppos & VFIO_PCI_OFFSET_MASK;
int ret = -EINVAL; int ret = -EINVAL;
...@@ -1029,7 +1038,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type) ...@@ -1029,7 +1038,7 @@ static int intel_vgpu_get_irq_count(struct intel_vgpu *vgpu, int type)
static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int index, unsigned int start,
unsigned int count, uint32_t flags, unsigned int count, u32 flags,
void *data) void *data)
{ {
return 0; return 0;
...@@ -1037,21 +1046,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu, ...@@ -1037,21 +1046,21 @@ static int intel_vgpu_set_intx_mask(struct intel_vgpu *vgpu,
static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu, static int intel_vgpu_set_intx_unmask(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int index, unsigned int start,
unsigned int count, uint32_t flags, void *data) unsigned int count, u32 flags, void *data)
{ {
return 0; return 0;
} }
static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu, static int intel_vgpu_set_intx_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count, unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data) u32 flags, void *data)
{ {
return 0; return 0;
} }
static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
unsigned int index, unsigned int start, unsigned int count, unsigned int index, unsigned int start, unsigned int count,
uint32_t flags, void *data) u32 flags, void *data)
{ {
struct eventfd_ctx *trigger; struct eventfd_ctx *trigger;
...@@ -1070,12 +1079,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu, ...@@ -1070,12 +1079,12 @@ static int intel_vgpu_set_msi_trigger(struct intel_vgpu *vgpu,
return 0; return 0;
} }
static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, uint32_t flags, static int intel_vgpu_set_irqs(struct intel_vgpu *vgpu, u32 flags,
unsigned int index, unsigned int start, unsigned int count, unsigned int index, unsigned int start, unsigned int count,
void *data) void *data)
{ {
int (*func)(struct intel_vgpu *vgpu, unsigned int index, int (*func)(struct intel_vgpu *vgpu, unsigned int index,
unsigned int start, unsigned int count, uint32_t flags, unsigned int start, unsigned int count, u32 flags,
void *data) = NULL; void *data) = NULL;
switch (index) { switch (index) {
...@@ -1467,7 +1476,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops) ...@@ -1467,7 +1476,7 @@ static int kvmgt_host_init(struct device *dev, void *gvt, const void *ops)
return mdev_register_device(dev, &intel_vgpu_ops); return mdev_register_device(dev, &intel_vgpu_ops);
} }
static void kvmgt_host_exit(struct device *dev, void *gvt) static void kvmgt_host_exit(struct device *dev)
{ {
mdev_unregister_device(dev); mdev_unregister_device(dev);
} }
...@@ -1849,7 +1858,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) ...@@ -1849,7 +1858,8 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
return ret; return ret;
} }
struct intel_gvt_mpt kvmgt_mpt = { static struct intel_gvt_mpt kvmgt_mpt = {
.type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
.attach_vgpu = kvmgt_attach_vgpu, .attach_vgpu = kvmgt_attach_vgpu,
...@@ -1868,15 +1878,17 @@ struct intel_gvt_mpt kvmgt_mpt = { ...@@ -1868,15 +1878,17 @@ struct intel_gvt_mpt kvmgt_mpt = {
.put_vfio_device = kvmgt_put_vfio_device, .put_vfio_device = kvmgt_put_vfio_device,
.is_valid_gfn = kvmgt_is_valid_gfn, .is_valid_gfn = kvmgt_is_valid_gfn,
}; };
EXPORT_SYMBOL_GPL(kvmgt_mpt);
static int __init kvmgt_init(void) static int __init kvmgt_init(void)
{ {
if (intel_gvt_register_hypervisor(&kvmgt_mpt) < 0)
return -ENODEV;
return 0; return 0;
} }
static void __exit kvmgt_exit(void) static void __exit kvmgt_exit(void)
{ {
intel_gvt_unregister_hypervisor();
} }
module_init(kvmgt_init); module_init(kvmgt_init);
......
...@@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) ...@@ -57,7 +57,7 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
(reg >= gvt->device_info.gtt_start_offset \ (reg >= gvt->device_info.gtt_start_offset \
&& reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt)) && reg < gvt->device_info.gtt_start_offset + gvt_ggtt_sz(gvt))
static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes, bool read) void *p_data, unsigned int bytes, bool read)
{ {
struct intel_gvt *gvt = NULL; struct intel_gvt *gvt = NULL;
...@@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -99,7 +99,7 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
* Returns: * Returns:
* Zero on success, negative error code if failed * Zero on success, negative error code if failed
*/ */
int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
...@@ -171,7 +171,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -171,7 +171,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
* Returns: * Returns:
* Zero on success, negative error code if failed * Zero on success, negative error code if failed
*/ */
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
......
...@@ -43,15 +43,16 @@ struct intel_vgpu; ...@@ -43,15 +43,16 @@ struct intel_vgpu;
#define D_SKL (1 << 1) #define D_SKL (1 << 1)
#define D_KBL (1 << 2) #define D_KBL (1 << 2)
#define D_BXT (1 << 3) #define D_BXT (1 << 3)
#define D_CFL (1 << 4)
#define D_GEN9PLUS (D_SKL | D_KBL | D_BXT) #define D_GEN9PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
#define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_GEN8PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_SKL_PLUS (D_SKL | D_KBL | D_BXT) #define D_SKL_PLUS (D_SKL | D_KBL | D_BXT | D_CFL)
#define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT) #define D_BDW_PLUS (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
#define D_PRE_SKL (D_BDW) #define D_PRE_SKL (D_BDW)
#define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT) #define D_ALL (D_BDW | D_SKL | D_KBL | D_BXT | D_CFL)
typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *, typedef int (*gvt_mmio_func)(struct intel_vgpu *, unsigned int, void *,
unsigned int); unsigned int);
......
...@@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -353,8 +353,7 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
*/ */
fw = intel_uncore_forcewake_for_reg(dev_priv, reg, fw = intel_uncore_forcewake_for_reg(dev_priv, reg,
FW_REG_READ | FW_REG_WRITE); FW_REG_READ | FW_REG_WRITE);
if (ring_id == RCS && (IS_SKYLAKE(dev_priv) || if (ring_id == RCS && (INTEL_GEN(dev_priv) >= 9))
IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)))
fw |= FORCEWAKE_RENDER; fw |= FORCEWAKE_RENDER;
intel_uncore_forcewake_get(dev_priv, fw); intel_uncore_forcewake_get(dev_priv, fw);
...@@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -391,7 +390,8 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) && ring_id == RCS) if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
|| IS_COFFEELAKE(dev_priv)) && ring_id == RCS)
return; return;
if (!pre && !gen9_render_mocs.initialized) if (!pre && !gen9_render_mocs.initialized)
...@@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -457,9 +457,7 @@ static void switch_mmio(struct intel_vgpu *pre,
u32 old_v, new_v; u32 old_v, new_v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv; dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) if (INTEL_GEN(dev_priv) >= 9)
|| IS_KABYLAKE(dev_priv)
|| IS_BROXTON(dev_priv))
switch_mocs(pre, next, ring_id); switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list.mmio; for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
...@@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -471,8 +469,8 @@ static void switch_mmio(struct intel_vgpu *pre,
* state image on kabylake, it's initialized by lri command and * state image on kabylake, it's initialized by lri command and
* save or restore with context together. * save or restore with context together.
*/ */
if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)) if ((IS_KABYLAKE(dev_priv) || IS_BROXTON(dev_priv)
&& mmio->in_context) || IS_COFFEELAKE(dev_priv)) && mmio->in_context)
continue; continue;
// save // save
...@@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) ...@@ -565,9 +563,7 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{ {
struct engine_mmio *mmio; struct engine_mmio *mmio;
if (IS_SKYLAKE(gvt->dev_priv) || if (INTEL_GEN(gvt->dev_priv) >= 9)
IS_KABYLAKE(gvt->dev_priv) ||
IS_BROXTON(gvt->dev_priv))
gvt->engine_mmio_list.mmio = gen9_engine_mmio_list; gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else else
gvt->engine_mmio_list.mmio = gen8_engine_mmio_list; gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
......
...@@ -52,9 +52,8 @@ ...@@ -52,9 +52,8 @@
static inline int intel_gvt_hypervisor_host_init(struct device *dev, static inline int intel_gvt_hypervisor_host_init(struct device *dev,
void *gvt, const void *ops) void *gvt, const void *ops)
{ {
/* optional to provide */
if (!intel_gvt_host.mpt->host_init) if (!intel_gvt_host.mpt->host_init)
return 0; return -ENODEV;
return intel_gvt_host.mpt->host_init(dev, gvt, ops); return intel_gvt_host.mpt->host_init(dev, gvt, ops);
} }
...@@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev, ...@@ -62,14 +61,13 @@ static inline int intel_gvt_hypervisor_host_init(struct device *dev,
/** /**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side * intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/ */
static inline void intel_gvt_hypervisor_host_exit(struct device *dev, static inline void intel_gvt_hypervisor_host_exit(struct device *dev)
void *gvt)
{ {
/* optional to provide */ /* optional to provide */
if (!intel_gvt_host.mpt->host_exit) if (!intel_gvt_host.mpt->host_exit)
return; return;
intel_gvt_host.mpt->host_exit(dev, gvt); intel_gvt_host.mpt->host_exit(dev);
} }
/** /**
...@@ -362,4 +360,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( ...@@ -362,4 +360,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
} }
int intel_gvt_register_hypervisor(struct intel_gvt_mpt *);
void intel_gvt_unregister_hypervisor(void);
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
...@@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) ...@@ -94,7 +94,7 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
{ {
struct vgpu_sched_data *vgpu_data; struct vgpu_sched_data *vgpu_data;
struct list_head *pos; struct list_head *pos;
static uint64_t stage_check; static u64 stage_check;
int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM; int stage = stage_check++ % GVT_TS_BALANCE_STAGE_NUM;
/* The timeslice accumulation reset at stage 0, which is /* The timeslice accumulation reset at stage 0, which is
......
...@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -299,7 +299,8 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
u32 *cs; u32 *cs;
if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)) if ((IS_KABYLAKE(req->i915) || IS_BROXTON(req->i915)
|| IS_COFFEELAKE(req->i915))
&& is_inhibit_context(req->hw_context)) && is_inhibit_context(req->hw_context))
intel_vgpu_restore_inhibit_context(vgpu, req); intel_vgpu_restore_inhibit_context(vgpu, req);
...@@ -939,9 +940,7 @@ static int workload_thread(void *priv) ...@@ -939,9 +940,7 @@ static int workload_thread(void *priv)
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
struct intel_vgpu *vgpu = NULL; struct intel_vgpu *vgpu = NULL;
int ret; int ret;
bool need_force_wake = IS_SKYLAKE(gvt->dev_priv) bool need_force_wake = (INTEL_GEN(gvt->dev_priv) >= 9);
|| IS_KABYLAKE(gvt->dev_priv)
|| IS_BROXTON(gvt->dev_priv);
DEFINE_WAIT_FUNC(wait, woken_wake_function); DEFINE_WAIT_FUNC(wait, woken_wake_function);
kfree(p); kfree(p);
......
...@@ -61,7 +61,7 @@ struct shadow_indirect_ctx { ...@@ -61,7 +61,7 @@ struct shadow_indirect_ctx {
unsigned long guest_gma; unsigned long guest_gma;
unsigned long shadow_gma; unsigned long shadow_gma;
void *shadow_va; void *shadow_va;
uint32_t size; u32 size;
}; };
#define PER_CTX_ADDR_MASK 0xfffff000 #define PER_CTX_ADDR_MASK 0xfffff000
......
...@@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync, ...@@ -228,7 +228,7 @@ TRACE_EVENT(oos_sync,
TRACE_EVENT(gvt_command, TRACE_EVENT(gvt_command,
TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, TP_PROTO(u8 vgpu_id, u8 ring_id, u32 ip_gma, u32 *cmd_va,
u32 cmd_len, u32 buf_type, u32 buf_addr_type, u32 cmd_len, u32 buf_type, u32 buf_addr_type,
void *workload, char *cmd_name), void *workload, const char *cmd_name),
TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type, TP_ARGS(vgpu_id, ring_id, ip_gma, cmd_va, cmd_len, buf_type,
buf_addr_type, workload, cmd_name), buf_addr_type, workload, cmd_name),
......
...@@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv) ...@@ -49,6 +49,9 @@ static bool is_supported_device(struct drm_i915_private *dev_priv)
return true; return true;
if (IS_BROXTON(dev_priv)) if (IS_BROXTON(dev_priv))
return true; return true;
if (IS_COFFEELAKE(dev_priv))
return true;
return false; return false;
} }
...@@ -105,15 +108,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv) ...@@ -105,15 +108,6 @@ int intel_gvt_init(struct drm_i915_private *dev_priv)
return -EIO; return -EIO;
} }
/*
* We're not in host or fail to find a MPT module, disable GVT-g
*/
ret = intel_gvt_init_host();
if (ret) {
DRM_DEBUG_DRIVER("Not in host or MPT modules not found\n");
goto bail;
}
ret = intel_gvt_init_device(dev_priv); ret = intel_gvt_init_device(dev_priv);
if (ret) { if (ret) {
DRM_DEBUG_DRIVER("Fail to init GVT device\n"); DRM_DEBUG_DRIVER("Fail to init GVT device\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment