Commit e329ef67 authored by Rodrigo Vivi's avatar Rodrigo Vivi

Merge tag 'gvt-next-2017-12-22' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2017-12-22:

- more mmio switch optimization (Weinan)
- cleanup i915_reg_t vs. offset usage (Zhenyu)
- move write protect handler out of mmio handler (Zhenyu)
Signed-off-by: default avatarRodrigo Vivi <rodrigo.vivi@intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20171222085141.vgewlvvni37dljdt@zhen-hp.sh.intel.com
parents 757fffcf 4fafba2d
...@@ -825,6 +825,21 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s, ...@@ -825,6 +825,21 @@ static int force_nonpriv_reg_handler(struct parser_exec_state *s,
return 0; return 0;
} }
static inline bool is_mocs_mmio(unsigned int offset)
{
return ((offset >= 0xc800) && (offset <= 0xcff8)) ||
((offset >= 0xb020) && (offset <= 0xb0a0));
}
static int mocs_cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index)
{
if (!is_mocs_mmio(offset))
return -EINVAL;
vgpu_vreg(s->vgpu, offset) = cmd_val(s, index + 1);
return 0;
}
static int cmd_reg_handler(struct parser_exec_state *s, static int cmd_reg_handler(struct parser_exec_state *s,
unsigned int offset, unsigned int index, char *cmd) unsigned int offset, unsigned int index, char *cmd)
{ {
...@@ -848,6 +863,10 @@ static int cmd_reg_handler(struct parser_exec_state *s, ...@@ -848,6 +863,10 @@ static int cmd_reg_handler(struct parser_exec_state *s,
return 0; return 0;
} }
if (is_mocs_mmio(offset) &&
mocs_cmd_reg_handler(s, offset, index))
return -EINVAL;
if (is_force_nonpriv_mmio(offset) && if (is_force_nonpriv_mmio(offset) &&
force_nonpriv_reg_handler(s, offset, index)) force_nonpriv_reg_handler(s, offset, index))
return -EPERM; return -EPERM;
...@@ -1220,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s, ...@@ -1220,13 +1239,13 @@ static int gen8_check_mi_display_flip(struct parser_exec_state *s,
return 0; return 0;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
stride = vgpu_vreg(s->vgpu, info->stride_reg) & GENMASK(9, 0); stride = vgpu_vreg_t(s->vgpu, info->stride_reg) & GENMASK(9, 0);
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) &
GENMASK(12, 10)) >> 10; GENMASK(12, 10)) >> 10;
} else { } else {
stride = (vgpu_vreg(s->vgpu, info->stride_reg) & stride = (vgpu_vreg_t(s->vgpu, info->stride_reg) &
GENMASK(15, 6)) >> 6; GENMASK(15, 6)) >> 6;
tile = (vgpu_vreg(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10; tile = (vgpu_vreg_t(s->vgpu, info->ctrl_reg) & (1 << 10)) >> 10;
} }
if (stride != info->stride_val) if (stride != info->stride_val)
...@@ -1245,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip( ...@@ -1245,21 +1264,21 @@ static int gen8_update_plane_mmio_from_mi_display_flip(
struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = s->vgpu->gvt->dev_priv;
struct intel_vgpu *vgpu = s->vgpu; struct intel_vgpu *vgpu = s->vgpu;
set_mask_bits(&vgpu_vreg(vgpu, info->surf_reg), GENMASK(31, 12), set_mask_bits(&vgpu_vreg_t(vgpu, info->surf_reg), GENMASK(31, 12),
info->surf_val << 12); info->surf_val << 12);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(9, 0), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(9, 0),
info->stride_val); info->stride_val);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(12, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(12, 10),
info->tile_val << 10); info->tile_val << 10);
} else { } else {
set_mask_bits(&vgpu_vreg(vgpu, info->stride_reg), GENMASK(15, 6), set_mask_bits(&vgpu_vreg_t(vgpu, info->stride_reg), GENMASK(15, 6),
info->stride_val << 6); info->stride_val << 6);
set_mask_bits(&vgpu_vreg(vgpu, info->ctrl_reg), GENMASK(10, 10), set_mask_bits(&vgpu_vreg_t(vgpu, info->ctrl_reg), GENMASK(10, 10),
info->tile_val << 10); info->tile_val << 10);
} }
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++; vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(info->pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, info->event); intel_vgpu_trigger_virtual_event(vgpu, info->event);
return 0; return 0;
} }
......
...@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu) ...@@ -59,7 +59,7 @@ static int edp_pipe_is_enabled(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
if (!(vgpu_vreg(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE)) if (!(vgpu_vreg_t(vgpu, PIPECONF(_PIPE_EDP)) & PIPECONF_ENABLE))
return 0; return 0;
if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE)) if (!(vgpu_vreg(vgpu, _TRANS_DDI_FUNC_CTL_EDP) & TRANS_DDI_FUNC_ENABLE))
...@@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe) ...@@ -74,7 +74,7 @@ int pipe_is_enabled(struct intel_vgpu *vgpu, int pipe)
if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES)) if (WARN_ON(pipe < PIPE_A || pipe >= I915_MAX_PIPES))
return -EINVAL; return -EINVAL;
if (vgpu_vreg(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE) if (vgpu_vreg_t(vgpu, PIPECONF(pipe)) & PIPECONF_ENABLE)
return 1; return 1;
if (edp_pipe_is_enabled(vgpu) && if (edp_pipe_is_enabled(vgpu) &&
...@@ -169,103 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = { ...@@ -169,103 +169,105 @@ static u8 dpcd_fix_data[DPCD_HEADER_SIZE] = {
static void emulate_monitor_status_change(struct intel_vgpu *vgpu) static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTB_HOTPLUG_CPT |
SDE_PORTC_HOTPLUG_CPT | SDE_PORTC_HOTPLUG_CPT |
SDE_PORTD_HOTPLUG_CPT); SDE_PORTD_HOTPLUG_CPT);
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
vgpu_vreg(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT | vgpu_vreg_t(vgpu, SDEISR) &= ~(SDE_PORTA_HOTPLUG_SPT |
SDE_PORTE_HOTPLUG_SPT); SDE_PORTE_HOTPLUG_SPT);
vgpu_vreg(vgpu, SKL_FUSE_STATUS) |= vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS | SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) | SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) | SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2); SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
vgpu_vreg(vgpu, LCPLL1_CTL) |= vgpu_vreg_t(vgpu, LCPLL1_CTL) |=
LCPLL_PLL_ENABLE | LCPLL_PLL_ENABLE |
LCPLL_PLL_LOCK; LCPLL_PLL_LOCK;
vgpu_vreg(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE; vgpu_vreg_t(vgpu, LCPLL2_CTL) |= LCPLL_PLL_ENABLE;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) | (PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_B)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_B)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTB_HOTPLUG_CPT;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTC_HOTPLUG_CPT;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_C << TRANS_DDI_PORT_SHIFT) | (PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_C)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_C)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_D)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTD_HOTPLUG_CPT;
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK | ~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK); TRANS_DDI_PORT_MASK);
vgpu_vreg(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |= vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST | (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_D << TRANS_DDI_PORT_SHIFT) | (PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE); TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) { if (IS_BROADWELL(dev_priv)) {
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) &= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) &=
~PORT_CLK_SEL_MASK; ~PORT_CLK_SEL_MASK;
vgpu_vreg(vgpu, PORT_CLK_SEL(PORT_D)) |= vgpu_vreg_t(vgpu, PORT_CLK_SEL(PORT_D)) |=
PORT_CLK_SEL_LCPLL_810; PORT_CLK_SEL_LCPLL_810;
} }
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) |= DDI_BUF_CTL_ENABLE;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_D)) &= ~DDI_BUF_IS_IDLE;
vgpu_vreg(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED; vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDID_DETECTED;
} }
if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) && if ((IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) &&
intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) { intel_vgpu_has_monitor_on_port(vgpu, PORT_E)) {
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTE_HOTPLUG_SPT;
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_PORT_DP_A_HOTPLUG; GEN8_PORT_DP_A_HOTPLUG;
else else
vgpu_vreg(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT; vgpu_vreg_t(vgpu, SDEISR) |= SDE_PORTA_HOTPLUG_SPT;
vgpu_vreg(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED; vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |= DDI_INIT_DISPLAY_DETECTED;
} }
/* Clear host CRT status, so guest couldn't detect this host CRT. */ /* Clear host CRT status, so guest couldn't detect this host CRT. */
if (IS_BROADWELL(dev_priv)) if (IS_BROADWELL(dev_priv))
vgpu_vreg(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK; vgpu_vreg_t(vgpu, PCH_ADPA) &= ~ADPA_CRT_HOTPLUG_MONITOR_MASK;
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
} }
static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
...@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num) ...@@ -282,7 +284,6 @@ static void clean_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num)
static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
int type, unsigned int resolution) int type, unsigned int resolution)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num); struct intel_vgpu_port *port = intel_vgpu_port(vgpu, port_num);
if (WARN_ON(resolution >= GVT_EDID_NUM)) if (WARN_ON(resolution >= GVT_EDID_NUM))
...@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num, ...@@ -308,7 +309,7 @@ static int setup_virtual_dp_monitor(struct intel_vgpu *vgpu, int port_num,
port->type = type; port->type = type;
emulate_monitor_status_change(vgpu); emulate_monitor_status_change(vgpu);
vgpu_vreg(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
return 0; return 0;
} }
...@@ -368,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe) ...@@ -368,12 +369,12 @@ static void emulate_vblank_on_pipe(struct intel_vgpu *vgpu, int pipe)
if (!pipe_is_enabled(vgpu, pipe)) if (!pipe_is_enabled(vgpu, pipe))
continue; continue;
vgpu_vreg(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++; vgpu_vreg_t(vgpu, PIPE_FLIPCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, event); intel_vgpu_trigger_virtual_event(vgpu, event);
} }
if (pipe_is_enabled(vgpu, pipe)) { if (pipe_is_enabled(vgpu, pipe)) {
vgpu_vreg(vgpu, PIPE_FRMCOUNT_G4X(pipe))++; vgpu_vreg_t(vgpu, PIPE_FRMCOUNT_G4X(pipe))++;
intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]); intel_vgpu_trigger_virtual_event(vgpu, vblank_event[pipe]);
} }
} }
......
...@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0) ...@@ -95,9 +95,9 @@ static inline int get_port_from_gmbus0(u32 gmbus0)
static void reset_gmbus_controller(struct intel_vgpu *vgpu) static void reset_gmbus_controller(struct intel_vgpu *vgpu)
{ {
vgpu_vreg(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY; vgpu_vreg_t(vgpu, PCH_GMBUS2) = GMBUS_HW_RDY;
if (!vgpu->display.i2c_edid.edid_available) if (!vgpu->display.i2c_edid.edid_available)
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
} }
...@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu, ...@@ -123,16 +123,16 @@ static int gmbus0_mmio_write(struct intel_vgpu *vgpu,
vgpu->display.i2c_edid.state = I2C_GMBUS; vgpu->display.i2c_edid.state = I2C_GMBUS;
vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE; vgpu->display.i2c_edid.gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY | GMBUS_HW_WAIT_PHASE;
if (intel_vgpu_has_monitor_on_port(vgpu, port) && if (intel_vgpu_has_monitor_on_port(vgpu, port) &&
!intel_vgpu_port_is_dp(vgpu, port)) { !intel_vgpu_port_is_dp(vgpu, port)) {
vgpu->display.i2c_edid.port = port; vgpu->display.i2c_edid.port = port;
vgpu->display.i2c_edid.edid_available = true; vgpu->display.i2c_edid.edid_available = true;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_SATOER;
} else } else
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_SATOER; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_SATOER;
return 0; return 0;
} }
...@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -159,8 +159,8 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* 2) HW_RDY bit asserted * 2) HW_RDY bit asserted
*/ */
if (wvalue & GMBUS_SW_CLR_INT) { if (wvalue & GMBUS_SW_CLR_INT) {
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_INT; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_INT;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_HW_RDY;
} }
/* For virtualization, we suppose that HW is always ready, /* For virtualization, we suppose that HW is always ready,
...@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -208,7 +208,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* visible in gmbus interface) * visible in gmbus interface)
*/ */
i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE; i2c_edid->gmbus.phase = GMBUS_IDLE_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) &= ~GMBUS_ACTIVE;
} }
break; break;
case NIDX_NS_W: case NIDX_NS_W:
...@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -220,7 +220,7 @@ static int gmbus1_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
* START (-->INDEX) -->DATA * START (-->INDEX) -->DATA
*/ */
i2c_edid->gmbus.phase = GMBUS_DATA_PHASE; i2c_edid->gmbus.phase = GMBUS_DATA_PHASE;
vgpu_vreg(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE; vgpu_vreg_t(vgpu, PCH_GMBUS2) |= GMBUS_ACTIVE;
break; break;
default: default:
gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n"); gvt_vgpu_err("Unknown/reserved GMBUS cycle detected!\n");
...@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -256,7 +256,7 @@ static int gmbus3_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
u32 reg_data = 0; u32 reg_data = 0;
/* Data can only be recevied if previous settings correct */ /* Data can only be recevied if previous settings correct */
if (vgpu_vreg(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) { if (vgpu_vreg_t(vgpu, PCH_GMBUS1) & GMBUS_SLAVE_READ) {
if (byte_left <= 0) { if (byte_left <= 0) {
memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes); memcpy(p_data, &vgpu_vreg(vgpu, offset), bytes);
return 0; return 0;
......
...@@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe, ...@@ -147,7 +147,7 @@ static u32 intel_vgpu_get_stride(struct intel_vgpu *vgpu, int pipe,
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
u32 stride_reg = vgpu_vreg(vgpu, DSPSTRIDE(pipe)) & stride_mask; u32 stride_reg = vgpu_vreg_t(vgpu, DSPSTRIDE(pipe)) & stride_mask;
u32 stride = stride_reg; u32 stride = stride_reg;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) { if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) {
...@@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -209,7 +209,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, DSPCNTR(pipe)); val = vgpu_vreg_t(vgpu, DSPCNTR(pipe));
plane->enabled = !!(val & DISPLAY_PLANE_ENABLE); plane->enabled = !!(val & DISPLAY_PLANE_ENABLE);
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
...@@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -244,7 +244,7 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
plane->hw_format = fmt; plane->hw_format = fmt;
plane->base = vgpu_vreg(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, DSPSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu, ...@@ -263,14 +263,14 @@ int intel_vgpu_decode_primary_plane(struct intel_vgpu *vgpu,
(_PRI_PLANE_STRIDE_MASK >> 6) : (_PRI_PLANE_STRIDE_MASK >> 6) :
_PRI_PLANE_STRIDE_MASK, plane->bpp); _PRI_PLANE_STRIDE_MASK, plane->bpp);
plane->width = (vgpu_vreg(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >> plane->width = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) & _PIPE_H_SRCSZ_MASK) >>
_PIPE_H_SRCSZ_SHIFT; _PIPE_H_SRCSZ_SHIFT;
plane->width += 1; plane->width += 1;
plane->height = (vgpu_vreg(vgpu, PIPESRC(pipe)) & plane->height = (vgpu_vreg_t(vgpu, PIPESRC(pipe)) &
_PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT; _PIPE_V_SRCSZ_MASK) >> _PIPE_V_SRCSZ_SHIFT;
plane->height += 1; /* raw height is one minus the real value */ plane->height += 1; /* raw height is one minus the real value */
val = vgpu_vreg(vgpu, DSPTILEOFF(pipe)); val = vgpu_vreg_t(vgpu, DSPTILEOFF(pipe));
plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >> plane->x_offset = (val & _PRI_PLANE_X_OFF_MASK) >>
_PRI_PLANE_X_OFF_SHIFT; _PRI_PLANE_X_OFF_SHIFT;
plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >> plane->y_offset = (val & _PRI_PLANE_Y_OFF_MASK) >>
...@@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -344,7 +344,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, CURCNTR(pipe)); val = vgpu_vreg_t(vgpu, CURCNTR(pipe));
mode = val & CURSOR_MODE; mode = val & CURSOR_MODE;
plane->enabled = (mode != CURSOR_MODE_DISABLE); plane->enabled = (mode != CURSOR_MODE_DISABLE);
if (!plane->enabled) if (!plane->enabled)
...@@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -370,7 +370,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n", gvt_dbg_core("alpha_plane=0x%x, alpha_force=0x%x\n",
alpha_plane, alpha_force); alpha_plane, alpha_force);
plane->base = vgpu_vreg(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, CURBASE(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu, ...@@ -384,7 +384,7 @@ int intel_vgpu_decode_cursor_plane(struct intel_vgpu *vgpu,
return -EINVAL; return -EINVAL;
} }
val = vgpu_vreg(vgpu, CURPOS(pipe)); val = vgpu_vreg_t(vgpu, CURPOS(pipe));
plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT; plane->x_pos = (val & _CURSOR_POS_X_MASK) >> _CURSOR_POS_X_SHIFT;
plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT; plane->x_sign = (val & _CURSOR_SIGN_X_MASK) >> _CURSOR_SIGN_X_SHIFT;
plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT; plane->y_pos = (val & _CURSOR_POS_Y_MASK) >> _CURSOR_POS_Y_SHIFT;
...@@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -424,7 +424,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
if (pipe >= I915_MAX_PIPES) if (pipe >= I915_MAX_PIPES)
return -ENODEV; return -ENODEV;
val = vgpu_vreg(vgpu, SPRCTL(pipe)); val = vgpu_vreg_t(vgpu, SPRCTL(pipe));
plane->enabled = !!(val & SPRITE_ENABLE); plane->enabled = !!(val & SPRITE_ENABLE);
if (!plane->enabled) if (!plane->enabled)
return -ENODEV; return -ENODEV;
...@@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -475,7 +475,7 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->drm_format = drm_format; plane->drm_format = drm_format;
plane->base = vgpu_vreg(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK; plane->base = vgpu_vreg_t(vgpu, SPRSURF(pipe)) & I915_GTT_PAGE_MASK;
if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) { if (!intel_gvt_ggtt_validate_range(vgpu, plane->base, 0)) {
gvt_vgpu_err("invalid gma address: %lx\n", gvt_vgpu_err("invalid gma address: %lx\n",
(unsigned long)plane->base); (unsigned long)plane->base);
...@@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -489,10 +489,10 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
return -EINVAL; return -EINVAL;
} }
plane->stride = vgpu_vreg(vgpu, SPRSTRIDE(pipe)) & plane->stride = vgpu_vreg_t(vgpu, SPRSTRIDE(pipe)) &
_SPRITE_STRIDE_MASK; _SPRITE_STRIDE_MASK;
val = vgpu_vreg(vgpu, SPRSIZE(pipe)); val = vgpu_vreg_t(vgpu, SPRSIZE(pipe));
plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >> plane->height = (val & _SPRITE_SIZE_HEIGHT_MASK) >>
_SPRITE_SIZE_HEIGHT_SHIFT; _SPRITE_SIZE_HEIGHT_SHIFT;
plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >> plane->width = (val & _SPRITE_SIZE_WIDTH_MASK) >>
...@@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu, ...@@ -500,11 +500,11 @@ int intel_vgpu_decode_sprite_plane(struct intel_vgpu *vgpu,
plane->height += 1; /* raw height is one minus the real value */ plane->height += 1; /* raw height is one minus the real value */
plane->width += 1; /* raw width is one minus the real value */ plane->width += 1; /* raw width is one minus the real value */
val = vgpu_vreg(vgpu, SPRPOS(pipe)); val = vgpu_vreg_t(vgpu, SPRPOS(pipe));
plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT; plane->x_pos = (val & _SPRITE_POS_X_MASK) >> _SPRITE_POS_X_SHIFT;
plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT; plane->y_pos = (val & _SPRITE_POS_Y_MASK) >> _SPRITE_POS_Y_SHIFT;
val = vgpu_vreg(vgpu, SPROFFSET(pipe)); val = vgpu_vreg_t(vgpu, SPROFFSET(pipe));
plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >> plane->x_offset = (val & _SPRITE_OFFSET_START_X_MASK) >>
_SPRITE_OFFSET_START_X_SHIFT; _SPRITE_OFFSET_START_X_SHIFT;
plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >> plane->y_offset = (val & _SPRITE_OFFSET_START_Y_MASK) >>
......
...@@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1968,6 +1968,39 @@ int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
return ret; return ret;
} }
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
int ret = 0;
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
mutex_lock(&gvt->lock);
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
if (unlikely(vgpu->failsafe)) {
/* remove write protection to prevent furture traps */
intel_vgpu_clean_page_track(vgpu, t);
} else {
ret = t->handler(t, pa, p_data, bytes);
if (ret) {
gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, "
"var 0x%x, len %d\n",
ret, t->gfn, pa,
*(u32 *)p_data, bytes);
}
}
}
mutex_unlock(&gvt->lock);
}
return ret;
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int alloc_scratch_pages(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t type) intel_gvt_gtt_type_t type)
{ {
...@@ -2244,7 +2277,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2244,7 +2277,7 @@ struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level) int page_table_level)
{ {
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
...@@ -2279,7 +2312,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2279,7 +2312,7 @@ int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level) int page_table_level)
{ {
u64 *pdp = (u64 *)&vgpu_vreg64(vgpu, vgtif_reg(pdp[0])); u64 *pdp = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
if (WARN_ON((page_table_level != 4) && (page_table_level != 3))) if (WARN_ON((page_table_level != 4) && (page_table_level != 3)))
......
...@@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, ...@@ -308,4 +308,7 @@ int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */ #endif /* _GVT_GTT_H_ */
...@@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -183,6 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.get_gvt_attrs = intel_get_gvt_attrs, .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane, .vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf, .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_write_protect_handler,
}; };
/** /**
......
...@@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu); ...@@ -412,23 +412,20 @@ void intel_vgpu_free_resource(struct intel_vgpu *vgpu);
void intel_vgpu_write_fence(struct intel_vgpu *vgpu, void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
u32 fence, u64 value); u32 fence, u64 value);
/* Macros for easily accessing vGPU virtual/shadow register */ /* Macros for easily accessing vGPU virtual/shadow register.
#define vgpu_vreg(vgpu, reg) \ Explicitly seperate use for typed MMIO reg or real offset.*/
(*(u32 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg_t(vgpu, reg) \
#define vgpu_vreg8(vgpu, reg) \ (*(u32 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
(*(u8 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg(vgpu, offset) \
#define vgpu_vreg16(vgpu, reg) \ (*(u32 *)(vgpu->mmio.vreg + (offset)))
(*(u16 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg64_t(vgpu, reg) \
#define vgpu_vreg64(vgpu, reg) \ (*(u64 *)(vgpu->mmio.vreg + i915_mmio_reg_offset(reg)))
(*(u64 *)(vgpu->mmio.vreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_vreg64(vgpu, offset) \
#define vgpu_sreg(vgpu, reg) \ (*(u64 *)(vgpu->mmio.vreg + (offset)))
(*(u32 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_sreg_t(vgpu, reg) \
#define vgpu_sreg8(vgpu, reg) \ (*(u32 *)(vgpu->mmio.sreg + i915_mmio_reg_offset(reg)))
(*(u8 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg))) #define vgpu_sreg(vgpu, offset) \
#define vgpu_sreg16(vgpu, reg) \ (*(u32 *)(vgpu->mmio.sreg + (offset)))
(*(u16 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define vgpu_sreg64(vgpu, reg) \
(*(u64 *)(vgpu->mmio.sreg + INTEL_GVT_MMIO_OFFSET(reg)))
#define for_each_active_vgpu(gvt, vgpu, id) \ #define for_each_active_vgpu(gvt, vgpu, id) \
idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \ idr_for_each_entry((&(gvt)->vgpu_idr), (vgpu), (id)) \
...@@ -549,6 +546,8 @@ struct intel_gvt_ops { ...@@ -549,6 +546,8 @@ struct intel_gvt_ops {
struct attribute_group ***intel_vgpu_type_groups); struct attribute_group ***intel_vgpu_type_groups);
int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *); int (*vgpu_query_plane)(struct intel_vgpu *vgpu, void *);
int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int); int (*vgpu_get_dmabuf)(struct intel_vgpu *vgpu, unsigned int);
int (*write_protect_handler)(struct intel_vgpu *, u64, void *,
unsigned int);
}; };
......
This diff is collapsed.
...@@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa, ...@@ -1360,8 +1360,8 @@ static void kvmgt_page_track_write(struct kvm_vcpu *vcpu, gpa_t gpa,
struct kvmgt_guest_info, track_node); struct kvmgt_guest_info, track_node);
if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa))) if (kvmgt_gfn_is_write_protected(info, gpa_to_gfn(gpa)))
intel_gvt_ops->emulate_mmio_write(info->vgpu, gpa, intel_gvt_ops->write_protect_handler(info->vgpu, gpa,
(void *)val, len); (void *)val, len);
} }
static void kvmgt_page_track_flush_slot(struct kvm *kvm, static void kvmgt_page_track_flush_slot(struct kvm *kvm,
......
...@@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -117,25 +117,6 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else else
memcpy(pt, p_data, bytes); memcpy(pt, p_data, bytes);
} else if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
/* Since we enter the failsafe mode early during guest boot,
* guest may not have chance to set up its ppgtt table, so
* there should not be any wp pages for guest. Keep the wp
* related code here in case we need to handle it in furture.
*/
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
/* remove write protection to prevent furture traps */
intel_vgpu_clean_page_track(vgpu, t);
if (read)
intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
else
intel_gvt_hypervisor_write_gpa(vgpu, pa,
p_data, bytes);
}
} }
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
} }
...@@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -168,23 +149,6 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
goto out; goto out;
} }
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, pa,
p_data, bytes);
if (ret) {
gvt_vgpu_err("guest page read error %d, "
"gfn 0x%lx, pa 0x%llx, var 0x%x, len %d\n",
ret, t->gfn, pa, *(u32 *)p_data,
bytes);
}
goto out;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8)) if (WARN_ON(bytes > 8))
...@@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -263,23 +227,6 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
goto out; goto out;
} }
if (atomic_read(&vgpu->gtt.n_tracked_guest_page)) {
struct intel_vgpu_page_track *t;
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
ret = t->handler(t, pa, p_data, bytes);
if (ret) {
gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, "
"var 0x%x, len %d\n",
ret, t->gfn, pa,
*(u32 *)p_data, bytes);
}
goto out;
}
}
offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa); offset = intel_vgpu_gpa_to_mmio_offset(vgpu, pa);
if (WARN_ON(bytes > 8)) if (WARN_ON(bytes > 8))
...@@ -336,10 +283,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -336,10 +283,10 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
memcpy(vgpu->mmio.vreg, mmio, info->mmio_size); memcpy(vgpu->mmio.vreg, mmio, info->mmio_size);
memcpy(vgpu->mmio.sreg, mmio, info->mmio_size); memcpy(vgpu->mmio.sreg, mmio, info->mmio_size);
vgpu_vreg(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0; vgpu_vreg_t(vgpu, GEN6_GT_THREAD_STATUS_REG) = 0;
/* set the bit 0:2(Core C-State ) to C0 */ /* set the bit 0:2(Core C-State ) to C0 */
vgpu_vreg(vgpu, GEN6_GT_CORE_STATUS) = 0; vgpu_vreg_t(vgpu, GEN6_GT_CORE_STATUS) = 0;
vgpu->mmio.disable_warn_untrack = false; vgpu->mmio.disable_warn_untrack = false;
} else { } else {
......
...@@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt, ...@@ -76,13 +76,6 @@ int intel_gvt_for_each_tracked_mmio(struct intel_gvt *gvt,
int (*handler)(struct intel_gvt *gvt, u32 offset, void *data), int (*handler)(struct intel_gvt *gvt, u32 offset, void *data),
void *data); void *data);
#define INTEL_GVT_MMIO_OFFSET(reg) ({ \
typeof(reg) __reg = reg; \
u32 *offset = (u32 *)&__reg; \
*offset; \
})
int intel_vgpu_init_mmio(struct intel_vgpu *vgpu); int intel_vgpu_init_mmio(struct intel_vgpu *vgpu);
void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr); void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr);
void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu); void intel_vgpu_clean_mmio(struct intel_vgpu *vgpu);
......
...@@ -149,8 +149,41 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { ...@@ -149,8 +149,41 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
{ /* Terminated */ } { /* Terminated */ }
}; };
static u32 gen9_render_mocs[I915_NUM_ENGINES][64]; static struct {
static u32 gen9_render_mocs_L3[32]; bool initialized;
u32 control_table[I915_NUM_ENGINES][64];
u32 l3cc_table[32];
} gen9_render_mocs;
static void load_render_mocs(struct drm_i915_private *dev_priv)
{
i915_reg_t offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int ring_id, i;
for (ring_id = 0; ring_id < I915_NUM_ENGINES; ring_id++) {
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset);
offset.reg += 4;
}
}
offset.reg = 0xb020;
for (i = 0; i < 32; i++) {
gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset);
offset.reg += 4;
}
gen9_render_mocs.initialized = true;
}
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{ {
...@@ -191,17 +224,20 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) ...@@ -191,17 +224,20 @@ static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
if (wait_for_atomic((I915_READ_FW(reg) == 0), 50)) if (wait_for_atomic((I915_READ_FW(reg) == 0), 50))
gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id); gvt_vgpu_err("timeout in invalidate ring (%d) tlb\n", ring_id);
else else
vgpu_vreg(vgpu, regs[ring_id]) = 0; vgpu_vreg_t(vgpu, reg) = 0;
intel_uncore_forcewake_put(dev_priv, fw); intel_uncore_forcewake_put(dev_priv, fw);
gvt_dbg_core("invalidate TLB for ring %d\n", ring_id); gvt_dbg_core("invalidate TLB for ring %d\n", ring_id);
} }
static void load_mocs(struct intel_vgpu *vgpu, int ring_id) static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv;
i915_reg_t offset, l3_offset; i915_reg_t offset, l3_offset;
u32 old_v, new_v;
u32 regs[] = { u32 regs[] = {
[RCS] = 0xc800, [RCS] = 0xc800,
[VCS] = 0xc900, [VCS] = 0xc900,
...@@ -211,54 +247,45 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -211,54 +247,45 @@ static void load_mocs(struct intel_vgpu *vgpu, int ring_id)
}; };
int i; int i;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(dev_priv);
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < 64; i++) {
gen9_render_mocs[ring_id][i] = I915_READ_FW(offset); if (pre)
I915_WRITE_FW(offset, vgpu_vreg(vgpu, offset)); old_v = vgpu_vreg_t(pre, offset);
offset.reg += 4; else
} old_v = gen9_render_mocs.control_table[ring_id][i];
if (next)
if (ring_id == RCS) { new_v = vgpu_vreg_t(next, offset);
l3_offset.reg = 0xb020; else
for (i = 0; i < 32; i++) { new_v = gen9_render_mocs.control_table[ring_id][i];
gen9_render_mocs_L3[i] = I915_READ_FW(l3_offset);
I915_WRITE_FW(l3_offset, vgpu_vreg(vgpu, l3_offset));
l3_offset.reg += 4;
}
}
}
static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) if (old_v != new_v)
{ I915_WRITE_FW(offset, new_v);
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
i915_reg_t offset, l3_offset;
u32 regs[] = {
[RCS] = 0xc800,
[VCS] = 0xc900,
[VCS2] = 0xca00,
[BCS] = 0xcc00,
[VECS] = 0xcb00,
};
int i;
if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return;
offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) {
vgpu_vreg(vgpu, offset) = I915_READ_FW(offset);
I915_WRITE_FW(offset, gen9_render_mocs[ring_id][i]);
offset.reg += 4; offset.reg += 4;
} }
if (ring_id == RCS) { if (ring_id == RCS) {
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < 32; i++) {
vgpu_vreg(vgpu, l3_offset) = I915_READ_FW(l3_offset); if (pre)
I915_WRITE_FW(l3_offset, gen9_render_mocs_L3[i]); old_v = vgpu_vreg_t(pre, l3_offset);
else
old_v = gen9_render_mocs.l3cc_table[i];
if (next)
new_v = vgpu_vreg_t(next, l3_offset);
else
new_v = gen9_render_mocs.l3cc_table[i];
if (old_v != new_v)
I915_WRITE_FW(l3_offset, new_v);
l3_offset.reg += 4; l3_offset.reg += 4;
} }
} }
...@@ -266,84 +293,77 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id) ...@@ -266,84 +293,77 @@ static void restore_mocs(struct intel_vgpu *vgpu, int ring_id)
#define CTX_CONTEXT_CONTROL_VAL 0x03 #define CTX_CONTEXT_CONTROL_VAL 0x03
/* Switch ring mmio values (context) from host to a vgpu. */ /* Switch ring mmio values (context). */
static void switch_mmio_to_vgpu(struct intel_vgpu *vgpu, int ring_id) static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next,
int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s;
u32 *reg_state = s->shadow_ctx->engine[ring_id].lrc_reg_state; u32 *reg_state, ctx_ctrl;
u32 ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
u32 inhibit_mask = u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT); _MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
struct engine_mmio *mmio; struct engine_mmio *mmio;
u32 v; u32 old_v, new_v;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
load_mocs(vgpu, ring_id);
mmio = vgpu->gvt->engine_mmio_list;
while (i915_mmio_reg_offset((mmio++)->reg)) {
if (mmio->ring_id != ring_id)
continue;
mmio->value = I915_READ_FW(mmio->reg);
/*
* if it is an inhibit context, load in_context mmio
* into HW by mmio write. If it is not, skip this mmio
* write.
*/
if (mmio->in_context &&
(ctx_ctrl & inhibit_mask) != inhibit_mask)
continue;
if (mmio->mask)
v = vgpu_vreg(vgpu, mmio->reg) | (mmio->mask << 16);
else
v = vgpu_vreg(vgpu, mmio->reg);
I915_WRITE_FW(mmio->reg, v);
trace_render_mmio(vgpu->id, "load",
i915_mmio_reg_offset(mmio->reg),
mmio->value, v);
}
handle_tlb_pending_event(vgpu, ring_id);
}
/* Switch ring mmio values (context) from vgpu to host. */
static void switch_mmio_to_host(struct intel_vgpu *vgpu, int ring_id)
{
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
struct engine_mmio *mmio;
u32 v;
dev_priv = pre ? pre->gvt->dev_priv : next->gvt->dev_priv;
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
restore_mocs(vgpu, ring_id); switch_mocs(pre, next, ring_id);
mmio = vgpu->gvt->engine_mmio_list; mmio = dev_priv->gvt->engine_mmio_list;
while (i915_mmio_reg_offset((mmio++)->reg)) { while (i915_mmio_reg_offset((mmio++)->reg)) {
if (mmio->ring_id != ring_id) if (mmio->ring_id != ring_id)
continue; continue;
// save
vgpu_vreg(vgpu, mmio->reg) = I915_READ_FW(mmio->reg); if (pre) {
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
if (mmio->mask) { if (mmio->mask)
vgpu_vreg(vgpu, mmio->reg) &= ~(mmio->mask << 16); vgpu_vreg_t(pre, mmio->reg) &=
v = mmio->value | (mmio->mask << 16); ~(mmio->mask << 16);
old_v = vgpu_vreg_t(pre, mmio->reg);
} else } else
v = mmio->value; old_v = mmio->value = I915_READ_FW(mmio->reg);
if (mmio->in_context) // restore
continue; if (next) {
s = &next->submission;
reg_state =
s->shadow_ctx->engine[ring_id].lrc_reg_state;
ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
/*
* if it is an inhibit context, load in_context mmio
* into HW by mmio write. If it is not, skip this mmio
* write.
*/
if (mmio->in_context &&
(ctx_ctrl & inhibit_mask) != inhibit_mask)
continue;
if (mmio->mask)
new_v = vgpu_vreg_t(next, mmio->reg) |
(mmio->mask << 16);
else
new_v = vgpu_vreg_t(next, mmio->reg);
} else {
if (mmio->in_context)
continue;
if (mmio->mask)
new_v = mmio->value | (mmio->mask << 16);
else
new_v = mmio->value;
}
I915_WRITE_FW(mmio->reg, v); I915_WRITE_FW(mmio->reg, new_v);
trace_render_mmio(vgpu->id, "restore", trace_render_mmio(pre ? pre->id : 0,
next ? next->id : 0,
"switch",
i915_mmio_reg_offset(mmio->reg), i915_mmio_reg_offset(mmio->reg),
mmio->value, v); old_v, new_v);
} }
if (next)
handle_tlb_pending_event(next, ring_id);
} }
/** /**
...@@ -374,17 +394,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -374,17 +394,7 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
* handle forcewake mannually. * handle forcewake mannually.
*/ */
intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_get(dev_priv, FORCEWAKE_ALL);
switch_mmio(pre, next, ring_id);
/**
* TODO: Optimize for vGPU to vGPU switch by merging
* switch_mmio_to_host() and switch_mmio_to_vgpu().
*/
if (pre)
switch_mmio_to_host(pre, ring_id);
if (next)
switch_mmio_to_vgpu(next, ring_id);
intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL); intel_uncore_forcewake_put(dev_priv, FORCEWAKE_ALL);
} }
......
...@@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi, ...@@ -330,13 +330,14 @@ TRACE_EVENT(inject_msi,
); );
TRACE_EVENT(render_mmio, TRACE_EVENT(render_mmio,
TP_PROTO(int id, char *action, unsigned int reg, TP_PROTO(int old_id, int new_id, char *action, unsigned int reg,
unsigned int old_val, unsigned int new_val), unsigned int old_val, unsigned int new_val),
TP_ARGS(id, action, reg, new_val, old_val), TP_ARGS(old_id, new_id, action, reg, new_val, old_val),
TP_STRUCT__entry( TP_STRUCT__entry(
__field(int, id) __field(int, old_id)
__field(int, new_id)
__array(char, buf, GVT_TEMP_STR_LEN) __array(char, buf, GVT_TEMP_STR_LEN)
__field(unsigned int, reg) __field(unsigned int, reg)
__field(unsigned int, old_val) __field(unsigned int, old_val)
...@@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio, ...@@ -344,15 +345,17 @@ TRACE_EVENT(render_mmio,
), ),
TP_fast_assign( TP_fast_assign(
__entry->id = id; __entry->old_id = old_id;
__entry->new_id = new_id;
snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action); snprintf(__entry->buf, GVT_TEMP_STR_LEN, "%s", action);
__entry->reg = reg; __entry->reg = reg;
__entry->old_val = old_val; __entry->old_val = old_val;
__entry->new_val = new_val; __entry->new_val = new_val;
), ),
TP_printk("VM%u %s reg %x, old %08x new %08x\n", TP_printk("VM%u -> VM%u %s reg %x, old %08x new %08x\n",
__entry->id, __entry->buf, __entry->reg, __entry->old_id, __entry->new_id,
__entry->buf, __entry->reg,
__entry->old_val, __entry->new_val) __entry->old_val, __entry->new_val)
); );
......
...@@ -38,25 +38,25 @@ ...@@ -38,25 +38,25 @@
void populate_pvinfo_page(struct intel_vgpu *vgpu) void populate_pvinfo_page(struct intel_vgpu *vgpu)
{ {
/* setup the ballooning information */ /* setup the ballooning information */
vgpu_vreg64(vgpu, vgtif_reg(magic)) = VGT_MAGIC; vgpu_vreg64_t(vgpu, vgtif_reg(magic)) = VGT_MAGIC;
vgpu_vreg(vgpu, vgtif_reg(version_major)) = 1; vgpu_vreg_t(vgpu, vgtif_reg(version_major)) = 1;
vgpu_vreg(vgpu, vgtif_reg(version_minor)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(version_minor)) = 0;
vgpu_vreg(vgpu, vgtif_reg(display_ready)) = 0; vgpu_vreg_t(vgpu, vgtif_reg(display_ready)) = 0;
vgpu_vreg(vgpu, vgtif_reg(vgt_id)) = vgpu->id; vgpu_vreg_t(vgpu, vgtif_reg(vgt_id)) = vgpu->id;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) = VGT_CAPS_FULL_48BIT_PPGTT;
vgpu_vreg(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION; vgpu_vreg_t(vgpu, vgtif_reg(vgt_caps)) |= VGT_CAPS_HWSP_EMULATION;
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.base)) =
vgpu_aperture_gmadr_base(vgpu); vgpu_aperture_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.mappable_gmadr.size)) =
vgpu_aperture_sz(vgpu); vgpu_aperture_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.base)) =
vgpu_hidden_gmadr_base(vgpu); vgpu_hidden_gmadr_base(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) = vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.nonmappable_gmadr.size)) =
vgpu_hidden_sz(vgpu); vgpu_hidden_sz(vgpu);
vgpu_vreg(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu); vgpu_vreg_t(vgpu, vgtif_reg(avail_rs.fence_num)) = vgpu_fence_sz(vgpu);
gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id); gvt_dbg_core("Populate PVINFO PAGE for vGPU %d\n", vgpu->id);
gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n", gvt_dbg_core("aperture base [GMADR] 0x%llx size 0x%llx\n",
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment