Commit 69d5c4b5 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-next-2020-11-23' of https://github.com/intel/gvt-linux into drm-intel-next-queued

gvt-next-2020-11-23

- Fix host suspend/resume with vGPU (Colin)
- optimize idr init (Varma)
- Change intel_gvt_mpt as const (Julian)
- One comment error fix (Yan)
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
From: Zhenyu Wang <zhenyuw@linux.intel.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20201123090517.GC16939@zhen-hp.sh.intel.com
parents f287c536 9a3a238b
...@@ -173,22 +173,162 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu) ...@@ -173,22 +173,162 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
int pipe; int pipe;
if (IS_BROXTON(dev_priv)) { if (IS_BROXTON(dev_priv)) {
enum transcoder trans;
enum port port;
/* Clear PIPE, DDI, PHY, HPD before setting new */
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) | ~(GEN8_DE_PORT_HOTPLUG(HPD_PORT_A) |
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) | GEN8_DE_PORT_HOTPLUG(HPD_PORT_B) |
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C)); GEN8_DE_PORT_HOTPLUG(HPD_PORT_C));
for_each_pipe(dev_priv, pipe) {
vgpu_vreg_t(vgpu, PIPECONF(pipe)) &=
~(PIPECONF_ENABLE | I965_PIPECONF_ACTIVE);
vgpu_vreg_t(vgpu, DSPCNTR(pipe)) &= ~DISPLAY_PLANE_ENABLE;
vgpu_vreg_t(vgpu, SPRCTL(pipe)) &= ~SPRITE_ENABLE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) &= ~MCURSOR_MODE;
vgpu_vreg_t(vgpu, CURCNTR(pipe)) |= MCURSOR_MODE_DISABLE;
}
for (trans = TRANSCODER_A; trans <= TRANSCODER_EDP; trans++) {
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(trans)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK | TRANS_DDI_FUNC_ENABLE);
}
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) &=
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
for (port = PORT_A; port <= PORT_C; port++) {
vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) &=
~BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(port)) |=
(BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK);
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(port)) &=
~(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
PORT_PLL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) &=
~(DDI_INIT_DISPLAY_DETECTED |
DDI_BUF_CTL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(port)) |= DDI_BUF_IS_IDLE;
}
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) &= ~(BIT(0) | BIT(1));
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) &=
~PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) &=
~PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) &= ~BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) &= ~BIT(30);
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, SFUSE_STRAP) &= ~SFUSE_STRAP_DDIC_DETECTED;
/*
* Only 1 PIPE enabled in current vGPU display and PIPE_A is
* tied to TRANSCODER_A in HW, so it's safe to assume PIPE_A,
* TRANSCODER_A can be enabled. PORT_x depends on the input of
* setup_virtual_dp_monitor.
*/
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= PIPECONF_ENABLE;
vgpu_vreg_t(vgpu, PIPECONF(PIPE_A)) |= I965_PIPECONF_ACTIVE;
/*
* Golden M/N are calculated based on:
* 24 bpp, 4 lanes, 154000 pixel clk (from virtual EDID),
* DP link clk 1620 MHz and non-constant_n.
* TODO: calculate DP link symbol clk and stream clk m/n.
*/
vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) = 63 << TU_SIZE_SHIFT;
vgpu_vreg_t(vgpu, PIPE_DATA_M1(TRANSCODER_A)) |= 0x5b425e;
vgpu_vreg_t(vgpu, PIPE_DATA_N1(TRANSCODER_A)) = 0x800000;
vgpu_vreg_t(vgpu, PIPE_LINK_M1(TRANSCODER_A)) = 0x3cd6e;
vgpu_vreg_t(vgpu, PIPE_LINK_N1(TRANSCODER_A)) = 0x80000;
/* Enable per-DDI/PORT vreg */
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(1);
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY1)) |=
PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY1)) |=
BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) |=
BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_A)) &=
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK);
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_A)) |=
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
PORT_PLL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) |=
(DDI_BUF_CTL_ENABLE | DDI_INIT_DISPLAY_DETECTED);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_A)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_EDP)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A); GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) |=
BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_B)) &=
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK);
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_B)) |=
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
PORT_PLL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) |=
DDI_BUF_CTL_ENABLE;
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_B)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B); GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
} }
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) { if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |= SFUSE_STRAP_DDIC_DETECTED;
vgpu_vreg_t(vgpu, BXT_P_CR_GT_DISP_PWRON) |= BIT(0);
vgpu_vreg_t(vgpu, BXT_PORT_CL1CM_DW0(DPIO_PHY0)) |=
PHY_POWER_GOOD;
vgpu_vreg_t(vgpu, BXT_PHY_CTL_FAMILY(DPIO_PHY0)) |=
BIT(30);
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
BXT_PHY_LANE_ENABLED;
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) &=
~(BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK);
vgpu_vreg_t(vgpu, BXT_PORT_PLL_ENABLE(PORT_C)) |=
(PORT_PLL_POWER_STATE | PORT_PLL_POWER_ENABLE |
PORT_PLL_REF_SEL | PORT_PLL_LOCK |
PORT_PLL_ENABLE);
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) |=
DDI_BUF_CTL_ENABLE;
vgpu_vreg_t(vgpu, DDI_BUF_CTL(PORT_C)) &=
~DDI_BUF_IS_IDLE;
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
(TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |= vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C); GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
} }
...@@ -520,6 +660,45 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected) ...@@ -520,6 +660,45 @@ void intel_vgpu_emulate_hotplug(struct intel_vgpu *vgpu, bool connected)
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |= vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTD_HOTPLUG_STATUS_MASK; PORTD_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG); intel_vgpu_trigger_virtual_event(vgpu, DP_D_HOTPLUG);
} else if (IS_BROXTON(i915)) {
if (connected) {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) |=
SFUSE_STRAP_DDIC_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) |=
GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
} else {
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_A)) {
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_A);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_B)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIB_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_B);
}
if (intel_vgpu_has_monitor_on_port(vgpu, PORT_C)) {
vgpu_vreg_t(vgpu, SFUSE_STRAP) &=
~SFUSE_STRAP_DDIC_DETECTED;
vgpu_vreg_t(vgpu, GEN8_DE_PORT_ISR) &=
~GEN8_DE_PORT_HOTPLUG(HPD_PORT_C);
}
}
vgpu_vreg_t(vgpu, PCH_PORT_HOTPLUG) |=
PORTB_HOTPLUG_STATUS_MASK;
intel_vgpu_trigger_virtual_event(vgpu, DP_B_HOTPLUG);
} }
} }
......
...@@ -636,9 +636,18 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm, ...@@ -636,9 +636,18 @@ static void ggtt_set_host_entry(struct intel_vgpu_mm *mm,
struct intel_gvt_gtt_entry *entry, unsigned long index) struct intel_gvt_gtt_entry *entry, unsigned long index)
{ {
struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *pte_ops = mm->vgpu->gvt->gtt.pte_ops;
unsigned long offset = index;
GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT); GEM_BUG_ON(mm->type != INTEL_GVT_MM_GGTT);
if (vgpu_gmadr_is_aperture(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
offset -= (vgpu_aperture_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
mm->ggtt_mm.host_ggtt_aperture[offset] = entry->val64;
} else if (vgpu_gmadr_is_hidden(mm->vgpu, index << I915_GTT_PAGE_SHIFT)) {
offset -= (vgpu_hidden_gmadr_base(mm->vgpu) >> PAGE_SHIFT);
mm->ggtt_mm.host_ggtt_hidden[offset] = entry->val64;
}
pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu); pte_ops->set_entry(NULL, entry, index, false, 0, mm->vgpu);
} }
...@@ -1944,6 +1953,21 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu) ...@@ -1944,6 +1953,21 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
return ERR_PTR(-ENOMEM); return ERR_PTR(-ENOMEM);
} }
mm->ggtt_mm.host_ggtt_aperture = vzalloc((vgpu_aperture_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
if (!mm->ggtt_mm.host_ggtt_aperture) {
vfree(mm->ggtt_mm.virtual_ggtt);
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
mm->ggtt_mm.host_ggtt_hidden = vzalloc((vgpu_hidden_sz(vgpu) >> PAGE_SHIFT) * sizeof(u64));
if (!mm->ggtt_mm.host_ggtt_hidden) {
vfree(mm->ggtt_mm.host_ggtt_aperture);
vfree(mm->ggtt_mm.virtual_ggtt);
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
return mm; return mm;
} }
...@@ -1971,6 +1995,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref) ...@@ -1971,6 +1995,8 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm); invalidate_ppgtt_mm(mm);
} else { } else {
vfree(mm->ggtt_mm.virtual_ggtt); vfree(mm->ggtt_mm.virtual_ggtt);
vfree(mm->ggtt_mm.host_ggtt_aperture);
vfree(mm->ggtt_mm.host_ggtt_hidden);
} }
vgpu_free_mm(mm); vgpu_free_mm(mm);
...@@ -2852,3 +2878,41 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu) ...@@ -2852,3 +2878,41 @@ void intel_vgpu_reset_gtt(struct intel_vgpu *vgpu)
intel_vgpu_destroy_all_ppgtt_mm(vgpu); intel_vgpu_destroy_all_ppgtt_mm(vgpu);
intel_vgpu_reset_ggtt(vgpu, true); intel_vgpu_reset_ggtt(vgpu, true);
} }
/**
* intel_gvt_restore_ggtt - restore all vGPU's ggtt entries
* @gvt: intel gvt device
*
* This function is called at driver resume stage to restore
* GGTT entries of every vGPU.
*
*/
void intel_gvt_restore_ggtt(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
struct intel_vgpu_mm *mm;
int id;
gen8_pte_t pte;
u32 idx, num_low, num_hi, offset;
/* Restore dirty host ggtt for all vGPUs */
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
mm = vgpu->gtt.ggtt_mm;
num_low = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
offset = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_low; idx++) {
pte = mm->ggtt_mm.host_ggtt_aperture[idx];
if (pte & _PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
num_hi = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
offset = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
for (idx = 0; idx < num_hi; idx++) {
pte = mm->ggtt_mm.host_ggtt_hidden[idx];
if (pte & _PAGE_PRESENT)
write_pte64(vgpu->gvt->gt->ggtt, offset + idx, pte);
}
}
}
...@@ -164,6 +164,9 @@ struct intel_vgpu_mm { ...@@ -164,6 +164,9 @@ struct intel_vgpu_mm {
} ppgtt_mm; } ppgtt_mm;
struct { struct {
void *virtual_ggtt; void *virtual_ggtt;
/* Save/restore for PM */
u64 *host_ggtt_aperture;
u64 *host_ggtt_hidden;
struct list_head partial_pte_list; struct list_head partial_pte_list;
} ggtt_mm; } ggtt_mm;
}; };
...@@ -280,5 +283,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, ...@@ -280,5 +283,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu); void intel_vgpu_destroy_all_ppgtt_mm(struct intel_vgpu *vgpu);
void intel_gvt_restore_ggtt(struct intel_gvt *gvt);
#endif /* _GVT_GTT_H_ */ #endif /* _GVT_GTT_H_ */
...@@ -312,7 +312,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915) ...@@ -312,7 +312,7 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
gvt_dbg_core("init gvt device\n"); gvt_dbg_core("init gvt device\n");
idr_init(&gvt->vgpu_idr); idr_init_base(&gvt->vgpu_idr, 1);
spin_lock_init(&gvt->scheduler.mmio_context_lock); spin_lock_init(&gvt->scheduler.mmio_context_lock);
mutex_init(&gvt->lock); mutex_init(&gvt->lock);
mutex_init(&gvt->sched_lock); mutex_init(&gvt->sched_lock);
...@@ -406,7 +406,16 @@ int intel_gvt_init_device(struct drm_i915_private *i915) ...@@ -406,7 +406,16 @@ int intel_gvt_init_device(struct drm_i915_private *i915)
} }
int int
intel_gvt_register_hypervisor(struct intel_gvt_mpt *m) intel_gvt_pm_resume(struct intel_gvt *gvt)
{
intel_gvt_restore_fence(gvt);
intel_gvt_restore_mmio(gvt);
intel_gvt_restore_ggtt(gvt);
return 0;
}
int
intel_gvt_register_hypervisor(const struct intel_gvt_mpt *m)
{ {
int ret; int ret;
void *gvt; void *gvt;
......
...@@ -56,7 +56,7 @@ struct intel_gvt_host { ...@@ -56,7 +56,7 @@ struct intel_gvt_host {
struct device *dev; struct device *dev;
bool initialized; bool initialized;
int hypervisor_type; int hypervisor_type;
struct intel_gvt_mpt *mpt; const struct intel_gvt_mpt *mpt;
}; };
extern struct intel_gvt_host intel_gvt_host; extern struct intel_gvt_host intel_gvt_host;
...@@ -255,7 +255,9 @@ struct intel_gvt_mmio { ...@@ -255,7 +255,9 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESS (1 << 3) #define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */ /* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4) #define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */ /* This reg requires save & restore during host PM suspend/resume */
#define F_PM_SAVE (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6) #define F_UNALIGN (1 << 6)
/* This reg is in GVT's mmio save-restor list and in hardware /* This reg is in GVT's mmio save-restor list and in hardware
* logical context image * logical context image
...@@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu); ...@@ -685,6 +687,7 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_init(struct intel_gvt *gvt); void intel_gvt_debugfs_init(struct intel_gvt *gvt);
void intel_gvt_debugfs_clean(struct intel_gvt *gvt); void intel_gvt_debugfs_clean(struct intel_gvt *gvt);
int intel_gvt_pm_resume(struct intel_gvt *gvt);
#include "trace.h" #include "trace.h"
#include "mpt.h" #include "mpt.h"
......
...@@ -3091,9 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt) ...@@ -3091,9 +3091,10 @@ static int init_skl_mmio_info(struct intel_gvt *gvt)
MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(2), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVATTL3PTRDW(3), D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL); MMIO_DFH(TRVADR, D_SKL_PLUS, F_CMD_ACCESS, NULL, NULL);
MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS, MMIO_DFH(TRTTE, D_SKL_PLUS, F_CMD_ACCESS | F_PM_SAVE,
NULL, gen9_trtte_write); NULL, gen9_trtte_write);
MMIO_DH(_MMIO(0x4dfc), D_SKL_PLUS, NULL, gen9_trtt_chicken_write); MMIO_DFH(_MMIO(0x4dfc), D_SKL_PLUS, F_PM_SAVE,
NULL, gen9_trtt_chicken_write);
MMIO_D(_MMIO(0x46430), D_SKL_PLUS); MMIO_D(_MMIO(0x46430), D_SKL_PLUS);
...@@ -3630,3 +3631,40 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -3630,3 +3631,40 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) : intel_vgpu_default_mmio_read(vgpu, offset, pdata, bytes) :
intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes); intel_vgpu_default_mmio_write(vgpu, offset, pdata, bytes);
} }
void intel_gvt_restore_fence(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int i, id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
mmio_hw_access_pre(gvt->gt);
for (i = 0; i < vgpu_fence_sz(vgpu); i++)
intel_vgpu_write_fence(vgpu, i, vgpu_vreg64(vgpu, fence_num_to_offset(i)));
mmio_hw_access_post(gvt->gt);
}
}
static inline int mmio_pm_restore_handler(struct intel_gvt *gvt,
u32 offset, void *data)
{
struct intel_vgpu *vgpu = data;
struct drm_i915_private *dev_priv = gvt->gt->i915;
if (gvt->mmio.mmio_attribute[offset >> 2] & F_PM_SAVE)
I915_WRITE(_MMIO(offset), vgpu_vreg(vgpu, offset));
return 0;
}
void intel_gvt_restore_mmio(struct intel_gvt *gvt)
{
struct intel_vgpu *vgpu;
int id;
idr_for_each_entry(&(gvt)->vgpu_idr, vgpu, id) {
mmio_hw_access_pre(gvt->gt);
intel_gvt_for_each_tracked_mmio(gvt, mmio_pm_restore_handler, vgpu);
mmio_hw_access_post(gvt->gt);
}
}
...@@ -2099,7 +2099,7 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn) ...@@ -2099,7 +2099,7 @@ static bool kvmgt_is_valid_gfn(unsigned long handle, unsigned long gfn)
return ret; return ret;
} }
static struct intel_gvt_mpt kvmgt_mpt = { static const struct intel_gvt_mpt kvmgt_mpt = {
.type = INTEL_GVT_HYPERVISOR_KVM, .type = INTEL_GVT_HYPERVISOR_KVM,
.host_init = kvmgt_host_init, .host_init = kvmgt_host_init,
.host_exit = kvmgt_host_exit, .host_exit = kvmgt_host_exit,
......
...@@ -280,6 +280,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr) ...@@ -280,6 +280,11 @@ void intel_vgpu_reset_mmio(struct intel_vgpu *vgpu, bool dmlr)
vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |= vgpu_vreg_t(vgpu, BXT_PHY_CTL(PORT_C)) |=
BXT_PHY_CMNLANE_POWERDOWN_ACK | BXT_PHY_CMNLANE_POWERDOWN_ACK |
BXT_PHY_LANE_POWERDOWN_ACK; BXT_PHY_LANE_POWERDOWN_ACK;
vgpu_vreg_t(vgpu, SKL_FUSE_STATUS) |=
SKL_FUSE_DOWNLOAD_STATUS |
SKL_FUSE_PG_DIST_STATUS(SKL_PG0) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG1) |
SKL_FUSE_PG_DIST_STATUS(SKL_PG2);
} }
} else { } else {
#define GVT_GEN8_MMIO_RESET_OFFSET (0x44200) #define GVT_GEN8_MMIO_RESET_OFFSET (0x44200)
......
...@@ -104,4 +104,8 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -104,4 +104,8 @@ int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset, int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
void intel_gvt_restore_fence(struct intel_gvt *gvt);
void intel_gvt_restore_mmio(struct intel_gvt *gvt);
#endif #endif
...@@ -392,7 +392,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn( ...@@ -392,7 +392,7 @@ static inline bool intel_gvt_hypervisor_is_valid_gfn(
return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn); return intel_gvt_host.mpt->is_valid_gfn(vgpu->handle, gfn);
} }
int intel_gvt_register_hypervisor(struct intel_gvt_mpt *); int intel_gvt_register_hypervisor(const struct intel_gvt_mpt *);
void intel_gvt_unregister_hypervisor(void); void intel_gvt_unregister_hypervisor(void);
#endif /* _GVT_MPT_H_ */ #endif /* _GVT_MPT_H_ */
...@@ -393,7 +393,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -393,7 +393,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_init(&vgpu->dmabuf_lock); mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL); INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr); idr_init_base(&vgpu->object_idr, 1);
intel_vgpu_init_cfg_space(vgpu, param->primary); intel_vgpu_init_cfg_space(vgpu, param->primary);
vgpu->d3_entered = false; vgpu->d3_entered = false;
......
...@@ -1271,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev) ...@@ -1271,6 +1271,8 @@ static int i915_drm_resume(struct drm_device *dev)
intel_power_domains_enable(dev_priv); intel_power_domains_enable(dev_priv);
intel_gvt_resume(dev_priv);
enable_rpm_wakeref_asserts(&dev_priv->runtime_pm); enable_rpm_wakeref_asserts(&dev_priv->runtime_pm);
return 0; return 0;
......
...@@ -24,6 +24,7 @@ ...@@ -24,6 +24,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_vgpu.h" #include "i915_vgpu.h"
#include "intel_gvt.h" #include "intel_gvt.h"
#include "gvt/gvt.h"
/** /**
* DOC: Intel GVT-g host support * DOC: Intel GVT-g host support
...@@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) ...@@ -147,3 +148,17 @@ void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
intel_gvt_clean_device(dev_priv); intel_gvt_clean_device(dev_priv);
} }
/**
* intel_gvt_resume - GVT resume routine wapper
*
* @dev_priv: drm i915 private *
*
* This function is called at the i915 driver resume stage to restore required
* HW status for GVT so that vGPU can continue running after resumed.
*/
void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
if (intel_gvt_active(dev_priv))
intel_gvt_pm_resume(dev_priv->gvt);
}
...@@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv); ...@@ -33,6 +33,7 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv);
void intel_gvt_clean_device(struct drm_i915_private *dev_priv); void intel_gvt_clean_device(struct drm_i915_private *dev_priv);
int intel_gvt_init_host(void); int intel_gvt_init_host(void);
void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv); void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv);
void intel_gvt_resume(struct drm_i915_private *dev_priv);
#else #else
static inline int intel_gvt_init(struct drm_i915_private *dev_priv) static inline int intel_gvt_init(struct drm_i915_private *dev_priv)
{ {
...@@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv) ...@@ -46,6 +47,10 @@ static inline void intel_gvt_driver_remove(struct drm_i915_private *dev_priv)
static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv) static inline void intel_gvt_sanitize_options(struct drm_i915_private *dev_priv)
{ {
} }
static inline void intel_gvt_resume(struct drm_i915_private *dev_priv)
{
}
#endif #endif
#endif /* _INTEL_GVT_H_ */ #endif /* _INTEL_GVT_H_ */
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment