Commit 8be8f4a9 authored by Daniel Vetter's avatar Daniel Vetter

Merge tag 'gvt-next-kvmgt-framework' of https://github.com/01org/gvt-linux...

Merge tag 'gvt-next-kvmgt-framework' of https://github.com/01org/gvt-linux into drm-intel-next-queued

Zhenyu Wang writes:

gvt-next-kvmgt-framework

This adds initial KVMGT framework based on GVT-g MPT(Mediated Passthrough) interface.
Signed-off-by: default avatarDaniel Vetter <daniel.vetter@intel.com>
parents 0c40ce13 f30437c5
...@@ -107,6 +107,15 @@ config DRM_I915_GVT ...@@ -107,6 +107,15 @@ config DRM_I915_GVT
If in doubt, say "N". If in doubt, say "N".
config DRM_I915_GVT_KVMGT
tristate "Enable KVM/VFIO support for Intel GVT-g"
depends on DRM_I915_GVT
depends on KVM
default n
help
Choose this option if you want to enable KVMGT support for
Intel GVT-g.
menu "drm/i915 Debugging" menu "drm/i915 Debugging"
depends on DRM_I915 depends on DRM_I915
depends on EXPERT depends on EXPERT
......
...@@ -3,5 +3,8 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ ...@@ -3,5 +3,8 @@ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o cmd_parser.o execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
CFLAGS_kvmgt.o := -Wno-unused-function
obj-$(CONFIG_DRM_I915_GVT_KVMGT) += $(GVT_DIR)/kvmgt.o
...@@ -47,11 +47,9 @@ enum { ...@@ -47,11 +47,9 @@ enum {
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
*/ */
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_vgpu *vgpu = __vgpu;
if (WARN_ON(bytes > 4)) if (WARN_ON(bytes > 4))
return -EINVAL; return -EINVAL;
...@@ -82,9 +80,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map) ...@@ -82,9 +80,8 @@ static int map_aperture(struct intel_vgpu *vgpu, bool map)
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, first_gfn,
first_mfn, first_mfn,
vgpu_aperture_sz(vgpu) vgpu_aperture_sz(vgpu) >>
>> PAGE_SHIFT, map, PAGE_SHIFT, map);
GVT_MAP_APERTURE);
if (ret) if (ret)
return ret; return ret;
...@@ -235,10 +232,9 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -235,10 +232,9 @@ static int emulate_pci_bar_write(struct intel_vgpu *vgpu, unsigned int offset,
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
*/ */
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_vgpu *vgpu = __vgpu;
int ret; int ret;
if (WARN_ON(bytes > 4)) if (WARN_ON(bytes > 4))
......
...@@ -974,7 +974,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -974,7 +974,7 @@ static int ppgtt_populate_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
} }
static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry *we, unsigned long index) unsigned long index)
{ {
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu_shadow_page *sp = &spt->shadow_page; struct intel_vgpu_shadow_page *sp = &spt->shadow_page;
...@@ -983,25 +983,26 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ...@@ -983,25 +983,26 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
struct intel_gvt_gtt_entry e; struct intel_gvt_gtt_entry e;
int ret; int ret;
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type,
we->val64, index);
ppgtt_get_shadow_entry(spt, &e, index); ppgtt_get_shadow_entry(spt, &e, index);
trace_gpt_change(spt->vgpu->id, "remove", spt, sp->type, e.val64,
index);
if (!ops->test_present(&e)) if (!ops->test_present(&e))
return 0; return 0;
if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn) if (ops->get_pfn(&e) == vgpu->gtt.scratch_pt[sp->type].page_mfn)
return 0; return 0;
if (gtt_type_is_pt(get_next_pt_type(we->type))) { if (gtt_type_is_pt(get_next_pt_type(e.type))) {
struct intel_vgpu_guest_page *g = struct intel_vgpu_ppgtt_spt *s =
intel_vgpu_find_guest_page(vgpu, ops->get_pfn(we)); ppgtt_find_shadow_page(vgpu, ops->get_pfn(&e));
if (!g) { if (!s) {
gvt_err("fail to find guest page\n"); gvt_err("fail to find guest page\n");
ret = -ENXIO; ret = -ENXIO;
goto fail; goto fail;
} }
ret = ppgtt_invalidate_shadow_page(guest_page_to_ppgtt_spt(g)); ret = ppgtt_invalidate_shadow_page(s);
if (ret) if (ret)
goto fail; goto fail;
} }
...@@ -1010,7 +1011,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt, ...@@ -1010,7 +1011,7 @@ static int ppgtt_handle_guest_entry_removal(struct intel_vgpu_guest_page *gpt,
return 0; return 0;
fail: fail:
gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n", gvt_err("vgpu%d: fail: shadow page %p guest entry 0x%llx type %d\n",
vgpu->id, spt, we->val64, we->type); vgpu->id, spt, e.val64, e.type);
return ret; return ret;
} }
...@@ -1231,23 +1232,16 @@ static int ppgtt_handle_guest_write_page_table( ...@@ -1231,23 +1232,16 @@ static int ppgtt_handle_guest_write_page_table(
struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt); struct intel_vgpu_ppgtt_spt *spt = guest_page_to_ppgtt_spt(gpt);
struct intel_vgpu *vgpu = spt->vgpu; struct intel_vgpu *vgpu = spt->vgpu;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
struct intel_gvt_gtt_entry ge;
int old_present, new_present;
int ret; int ret;
int new_present;
ppgtt_get_guest_entry(spt, &ge, index);
old_present = ops->test_present(&ge);
new_present = ops->test_present(we); new_present = ops->test_present(we);
ppgtt_set_guest_entry(spt, we, index); ret = ppgtt_handle_guest_entry_removal(gpt, index);
if (ret)
goto fail;
if (old_present) {
ret = ppgtt_handle_guest_entry_removal(gpt, &ge, index);
if (ret)
goto fail;
}
if (new_present) { if (new_present) {
ret = ppgtt_handle_guest_entry_add(gpt, we, index); ret = ppgtt_handle_guest_entry_add(gpt, we, index);
if (ret) if (ret)
...@@ -1293,7 +1287,7 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) ...@@ -1293,7 +1287,7 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
{ {
struct list_head *pos, *n; struct list_head *pos, *n;
struct intel_vgpu_ppgtt_spt *spt; struct intel_vgpu_ppgtt_spt *spt;
struct intel_gvt_gtt_entry ge, e; struct intel_gvt_gtt_entry ge;
unsigned long index; unsigned long index;
int ret; int ret;
...@@ -1304,9 +1298,6 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu) ...@@ -1304,9 +1298,6 @@ int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu)
for_each_set_bit(index, spt->post_shadow_bitmap, for_each_set_bit(index, spt->post_shadow_bitmap,
GTT_ENTRY_NUM_IN_ONE_PAGE) { GTT_ENTRY_NUM_IN_ONE_PAGE) {
ppgtt_get_guest_entry(spt, &ge, index); ppgtt_get_guest_entry(spt, &ge, index);
e = ge;
e.val64 = 0;
ppgtt_set_guest_entry(spt, &e, index);
ret = ppgtt_handle_guest_write_page_table( ret = ppgtt_handle_guest_write_page_table(
&spt->guest_page, &ge, index); &spt->guest_page, &ge, index);
...@@ -1334,8 +1325,6 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, ...@@ -1334,8 +1325,6 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift; index = (pa & (PAGE_SIZE - 1)) >> info->gtt_entry_size_shift;
ppgtt_get_guest_entry(spt, &we, index); ppgtt_get_guest_entry(spt, &we, index);
memcpy((void *)&we.val64 + (pa & (info->gtt_entry_size - 1)),
p_data, bytes);
ops->test_pse(&we); ops->test_pse(&we);
...@@ -1344,19 +1333,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp, ...@@ -1344,19 +1333,13 @@ static int ppgtt_handle_guest_write_page_table_bytes(void *gp,
if (ret) if (ret)
return ret; return ret;
} else { } else {
struct intel_gvt_gtt_entry ge;
ppgtt_get_guest_entry(spt, &ge, index);
if (!test_bit(index, spt->post_shadow_bitmap)) { if (!test_bit(index, spt->post_shadow_bitmap)) {
ret = ppgtt_handle_guest_entry_removal(gpt, ret = ppgtt_handle_guest_entry_removal(gpt, index);
&ge, index);
if (ret) if (ret)
return ret; return ret;
} }
ppgtt_set_post_shadow(spt, index); ppgtt_set_post_shadow(spt, index);
ppgtt_set_guest_entry(spt, &we, index);
} }
if (!enable_out_of_sync) if (!enable_out_of_sync)
......
...@@ -44,11 +44,14 @@ static const char * const supported_hypervisors[] = { ...@@ -44,11 +44,14 @@ static const char * const supported_hypervisors[] = {
[INTEL_GVT_HYPERVISOR_KVM] = "KVM", [INTEL_GVT_HYPERVISOR_KVM] = "KVM",
}; };
struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops = { static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_cfg_read = intel_vgpu_emulate_cfg_read, .emulate_cfg_read = intel_vgpu_emulate_cfg_read,
.emulate_cfg_write = intel_vgpu_emulate_cfg_write, .emulate_cfg_write = intel_vgpu_emulate_cfg_write,
.emulate_mmio_read = intel_vgpu_emulate_mmio_read, .emulate_mmio_read = intel_vgpu_emulate_mmio_read,
.emulate_mmio_write = intel_vgpu_emulate_mmio_write, .emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
}; };
/** /**
...@@ -81,10 +84,12 @@ int intel_gvt_init_host(void) ...@@ -81,10 +84,12 @@ int intel_gvt_init_host(void)
symbol_get(xengt_mpt), "xengt"); symbol_get(xengt_mpt), "xengt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN; intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_XEN;
} else { } else {
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
/* not in Xen. Try KVMGT */ /* not in Xen. Try KVMGT */
intel_gvt_host.mpt = try_then_request_module( intel_gvt_host.mpt = try_then_request_module(
symbol_get(kvmgt_mpt), "kvm"); symbol_get(kvmgt_mpt), "kvmgt");
intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM; intel_gvt_host.hypervisor_type = INTEL_GVT_HYPERVISOR_KVM;
#endif
} }
/* Fail to load MPT modules - bail out */ /* Fail to load MPT modules - bail out */
...@@ -193,6 +198,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -193,6 +198,9 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_mmio_info(gvt); intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt); intel_gvt_free_firmware(gvt);
intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
intel_gvt_clean_vgpu_types(gvt);
kfree(dev_priv->gvt); kfree(dev_priv->gvt);
dev_priv->gvt = NULL; dev_priv->gvt = NULL;
} }
...@@ -270,10 +278,25 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -270,10 +278,25 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto out_clean_cmd_parser; goto out_clean_cmd_parser;
gvt_dbg_core("gvt device creation is done\n"); ret = intel_gvt_init_vgpu_types(gvt);
if (ret)
goto out_clean_thread;
ret = intel_gvt_hypervisor_host_init(&dev_priv->drm.pdev->dev, gvt,
&intel_gvt_ops);
if (ret) {
gvt_err("failed to register gvt-g host device: %d\n", ret);
goto out_clean_types;
}
gvt_dbg_core("gvt device initialization is done\n");
dev_priv->gvt = gvt; dev_priv->gvt = gvt;
return 0; return 0;
out_clean_types:
intel_gvt_clean_vgpu_types(gvt);
out_clean_thread:
clean_service_thread(gvt);
out_clean_cmd_parser: out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt); intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy: out_clean_sched_policy:
......
...@@ -161,6 +161,20 @@ struct intel_vgpu { ...@@ -161,6 +161,20 @@ struct intel_vgpu {
DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES); DECLARE_BITMAP(tlb_handle_pending, I915_NUM_ENGINES);
struct i915_gem_context *shadow_ctx; struct i915_gem_context *shadow_ctx;
struct notifier_block shadow_ctx_notifier_block; struct notifier_block shadow_ctx_notifier_block;
#if IS_ENABLED(CONFIG_DRM_I915_GVT_KVMGT)
struct {
struct device *mdev;
struct vfio_region *region;
int num_regions;
struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger;
struct rb_root cache;
struct mutex cache_lock;
void *vfio_group;
struct notifier_block iommu_notifier;
} vdev;
#endif
}; };
struct intel_gvt_gm { struct intel_gvt_gm {
...@@ -190,6 +204,16 @@ struct intel_gvt_opregion { ...@@ -190,6 +204,16 @@ struct intel_gvt_opregion {
u32 opregion_pa; u32 opregion_pa;
}; };
#define NR_MAX_INTEL_VGPU_TYPES 20
struct intel_vgpu_type {
char name[16];
unsigned int max_instance;
unsigned int avail_instance;
unsigned int low_gm_size;
unsigned int high_gm_size;
unsigned int fence;
};
struct intel_gvt { struct intel_gvt {
struct mutex lock; struct mutex lock;
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
...@@ -205,6 +229,8 @@ struct intel_gvt { ...@@ -205,6 +229,8 @@ struct intel_gvt {
struct intel_gvt_opregion opregion; struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler; struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS); DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct intel_vgpu_type *types;
unsigned int num_types;
struct task_struct *service_thread; struct task_struct *service_thread;
wait_queue_head_t service_thread_wq; wait_queue_head_t service_thread_wq;
...@@ -230,6 +256,14 @@ static inline void intel_gvt_request_service(struct intel_gvt *gvt, ...@@ -230,6 +256,14 @@ static inline void intel_gvt_request_service(struct intel_gvt *gvt,
void intel_gvt_free_firmware(struct intel_gvt *gvt); void intel_gvt_free_firmware(struct intel_gvt *gvt);
int intel_gvt_load_firmware(struct intel_gvt *gvt); int intel_gvt_load_firmware(struct intel_gvt *gvt);
/* Aperture/GM space definitions for GVT device */
#define MB_TO_BYTES(mb) ((mb) << 20ULL)
#define BYTES_TO_MB(b) ((b) >> 20ULL)
#define HOST_LOW_GM_SIZE MB_TO_BYTES(128)
#define HOST_HIGH_GM_SIZE MB_TO_BYTES(384)
#define HOST_FENCE 4
/* Aperture/GM space definitions for GVT device */ /* Aperture/GM space definitions for GVT device */
#define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end) #define gvt_aperture_sz(gvt) (gvt->dev_priv->ggtt.mappable_end)
#define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base) #define gvt_aperture_pa_base(gvt) (gvt->dev_priv->ggtt.mappable_base)
...@@ -330,11 +364,14 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu, ...@@ -330,11 +364,14 @@ static inline void intel_vgpu_write_pci_bar(struct intel_vgpu *vgpu,
} }
} }
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, int intel_gvt_init_vgpu_types(struct intel_gvt *gvt);
struct intel_vgpu_creation_params * void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt);
param);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu); void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
/* validating GM functions */ /* validating GM functions */
#define vgpu_gmadr_is_aperture(vgpu, gmadr) \ #define vgpu_gmadr_is_aperture(vgpu, gmadr) \
...@@ -369,10 +406,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, ...@@ -369,10 +406,10 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
unsigned long *g_index); unsigned long *g_index);
int intel_vgpu_emulate_cfg_read(void *__vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
int intel_vgpu_emulate_cfg_write(void *__vgpu, unsigned int offset, int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes); void *p_data, unsigned int bytes);
void intel_gvt_clean_opregion(struct intel_gvt *gvt); void intel_gvt_clean_opregion(struct intel_gvt *gvt);
...@@ -385,6 +422,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci); ...@@ -385,6 +422,22 @@ int intel_vgpu_emulate_opregion_request(struct intel_vgpu *vgpu, u32 swsci);
int setup_vgpu_mmio(struct intel_vgpu *vgpu); int setup_vgpu_mmio(struct intel_vgpu *vgpu);
void populate_pvinfo_page(struct intel_vgpu *vgpu); void populate_pvinfo_page(struct intel_vgpu *vgpu);
struct intel_gvt_ops {
int (*emulate_cfg_read)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_cfg_write)(struct intel_vgpu *, unsigned int, void *,
unsigned int);
int (*emulate_mmio_read)(struct intel_vgpu *, u64, void *,
unsigned int);
int (*emulate_mmio_write)(struct intel_vgpu *, u64, void *,
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
void (*vgpu_destroy)(struct intel_vgpu *);
void (*vgpu_reset)(struct intel_vgpu *);
};
#include "mpt.h" #include "mpt.h"
#endif #endif
...@@ -33,21 +33,14 @@ ...@@ -33,21 +33,14 @@
#ifndef _GVT_HYPERCALL_H_ #ifndef _GVT_HYPERCALL_H_
#define _GVT_HYPERCALL_H_ #define _GVT_HYPERCALL_H_
struct intel_gvt_io_emulation_ops {
int (*emulate_cfg_read)(void *, unsigned int, void *, unsigned int);
int (*emulate_cfg_write)(void *, unsigned int, void *, unsigned int);
int (*emulate_mmio_read)(void *, u64, void *, unsigned int);
int (*emulate_mmio_write)(void *, u64, void *, unsigned int);
};
extern struct intel_gvt_io_emulation_ops intel_gvt_io_emulation_ops;
/* /*
* Specific GVT-g MPT modules function collections. Currently GVT-g supports * Specific GVT-g MPT modules function collections. Currently GVT-g supports
* both Xen and KVM by providing dedicated hypervisor-related MPT modules. * both Xen and KVM by providing dedicated hypervisor-related MPT modules.
*/ */
struct intel_gvt_mpt { struct intel_gvt_mpt {
int (*detect_host)(void); int (*detect_host)(void);
int (*host_init)(struct device *dev, void *gvt, const void *ops);
void (*host_exit)(struct device *dev, void *gvt);
int (*attach_vgpu)(void *vgpu, unsigned long *handle); int (*attach_vgpu)(void *vgpu, unsigned long *handle);
void (*detach_vgpu)(unsigned long handle); void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
...@@ -60,8 +53,7 @@ struct intel_gvt_mpt { ...@@ -60,8 +53,7 @@ struct intel_gvt_mpt {
unsigned long len); unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map, unsigned long mfn, unsigned int nr, bool map);
int type);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
bool map); bool map);
}; };
......
This diff is collapsed.
...@@ -67,10 +67,9 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa) ...@@ -67,10 +67,9 @@ int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa)
* Returns: * Returns:
* Zero on success, negative error code if failed * Zero on success, negative error code if failed
*/ */
int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa, int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio; struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0; unsigned int offset = 0;
...@@ -179,10 +178,9 @@ int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa, ...@@ -179,10 +178,9 @@ int intel_vgpu_emulate_mmio_read(void *__vgpu, uint64_t pa,
* Returns: * Returns:
* Zero on success, negative error code if failed * Zero on success, negative error code if failed
*/ */
int intel_vgpu_emulate_mmio_write(void *__vgpu, uint64_t pa, int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
void *p_data, unsigned int bytes) void *p_data, unsigned int bytes)
{ {
struct intel_vgpu *vgpu = __vgpu;
struct intel_gvt *gvt = vgpu->gvt; struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_mmio_info *mmio; struct intel_gvt_mmio_info *mmio;
unsigned int offset = 0; unsigned int offset = 0;
......
...@@ -87,10 +87,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt, ...@@ -87,10 +87,11 @@ struct intel_gvt_mmio_info *intel_gvt_find_mmio_info(struct intel_gvt *gvt,
}) })
int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa); int intel_vgpu_gpa_to_mmio_offset(struct intel_vgpu *vgpu, u64 gpa);
int intel_vgpu_emulate_mmio_read(void *__vgpu, u64 pa, void *p_data,
unsigned int bytes); int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
int intel_vgpu_emulate_mmio_write(void *__vgpu, u64 pa, void *p_data, void *p_data, unsigned int bytes);
unsigned int bytes); int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt, bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset); unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset); bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
......
...@@ -55,6 +55,35 @@ static inline int intel_gvt_hypervisor_detect_host(void) ...@@ -55,6 +55,35 @@ static inline int intel_gvt_hypervisor_detect_host(void)
return intel_gvt_host.mpt->detect_host(); return intel_gvt_host.mpt->detect_host();
} }
/**
* intel_gvt_hypervisor_host_init - init GVT-g host side
*
* Returns:
* Zero on success, negative error code if failed
*/
static inline int intel_gvt_hypervisor_host_init(struct device *dev,
void *gvt, const void *ops)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_init)
return 0;
return intel_gvt_host.mpt->host_init(dev, gvt, ops);
}
/**
* intel_gvt_hypervisor_host_exit - exit GVT-g host side
*/
static inline void intel_gvt_hypervisor_host_exit(struct device *dev,
void *gvt)
{
/* optional to provide */
if (!intel_gvt_host.mpt->host_exit)
return;
intel_gvt_host.mpt->host_exit(dev, gvt);
}
/** /**
* intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU * intel_gvt_hypervisor_attach_vgpu - call hypervisor to initialize vGPU
* related stuffs inside hypervisor. * related stuffs inside hypervisor.
...@@ -64,6 +93,10 @@ static inline int intel_gvt_hypervisor_detect_host(void) ...@@ -64,6 +93,10 @@ static inline int intel_gvt_hypervisor_detect_host(void)
*/ */
static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
{ {
/* optional to provide */
if (!intel_gvt_host.mpt->attach_vgpu)
return 0;
return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle); return intel_gvt_host.mpt->attach_vgpu(vgpu, &vgpu->handle);
} }
...@@ -76,6 +109,10 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu) ...@@ -76,6 +109,10 @@ static inline int intel_gvt_hypervisor_attach_vgpu(struct intel_vgpu *vgpu)
*/ */
static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu) static inline void intel_gvt_hypervisor_detach_vgpu(struct intel_vgpu *vgpu)
{ {
/* optional to provide */
if (!intel_gvt_host.mpt->detach_vgpu)
return;
intel_gvt_host.mpt->detach_vgpu(vgpu->handle); intel_gvt_host.mpt->detach_vgpu(vgpu->handle);
} }
...@@ -224,11 +261,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( ...@@ -224,11 +261,6 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
} }
enum {
GVT_MAP_APERTURE = 0,
GVT_MAP_OPREGION,
};
/** /**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU * @vgpu: a vGPU
...@@ -236,7 +268,6 @@ enum { ...@@ -236,7 +268,6 @@ enum {
* @mfn: host PFN * @mfn: host PFN
* @nr: amount of PFNs * @nr: amount of PFNs
* @map: map or unmap * @map: map or unmap
* @type: map type
* *
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
...@@ -244,10 +275,14 @@ enum { ...@@ -244,10 +275,14 @@ enum {
static inline int intel_gvt_hypervisor_map_gfn_to_mfn( static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
struct intel_vgpu *vgpu, unsigned long gfn, struct intel_vgpu *vgpu, unsigned long gfn,
unsigned long mfn, unsigned int nr, unsigned long mfn, unsigned int nr,
bool map, int type) bool map)
{ {
/* a MPT implementation could have MMIO mapped elsewhere */
if (!intel_gvt_host.mpt->map_gfn_to_mfn)
return 0;
return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr, return intel_gvt_host.mpt->map_gfn_to_mfn(vgpu->handle, gfn, mfn, nr,
map, type); map);
} }
/** /**
...@@ -263,6 +298,10 @@ static inline int intel_gvt_hypervisor_map_gfn_to_mfn( ...@@ -263,6 +298,10 @@ static inline int intel_gvt_hypervisor_map_gfn_to_mfn(
static inline int intel_gvt_hypervisor_set_trap_area( static inline int intel_gvt_hypervisor_set_trap_area(
struct intel_vgpu *vgpu, u64 start, u64 end, bool map) struct intel_vgpu *vgpu, u64 start, u64 end, bool map)
{ {
/* a MPT implementation could have MMIO trapped elsewhere */
if (!intel_gvt_host.mpt->set_trap_area)
return 0;
return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map); return intel_gvt_host.mpt->set_trap_area(vgpu->handle, start, end, map);
} }
......
...@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -73,7 +73,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
} }
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu, ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
vgpu_opregion(vgpu)->gfn[i], vgpu_opregion(vgpu)->gfn[i],
mfn, 1, map, GVT_MAP_OPREGION); mfn, 1, map);
if (ret) { if (ret) {
gvt_err("fail to map GFN to MFN, errno: %d\n", ret); gvt_err("fail to map GFN to MFN, errno: %d\n", ret);
return ret; return ret;
...@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -89,28 +89,18 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
*/ */
void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu) void intel_vgpu_clean_opregion(struct intel_vgpu *vgpu)
{ {
int i;
gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id); gvt_dbg_core("vgpu%d: clean vgpu opregion\n", vgpu->id);
if (!vgpu_opregion(vgpu)->va) if (!vgpu_opregion(vgpu)->va)
return; return;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) { if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_XEN) {
vunmap(vgpu_opregion(vgpu)->va);
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
if (vgpu_opregion(vgpu)->pages[i]) {
put_page(vgpu_opregion(vgpu)->pages[i]);
vgpu_opregion(vgpu)->pages[i] = NULL;
}
}
} else {
map_vgpu_opregion(vgpu, false); map_vgpu_opregion(vgpu, false);
free_pages((unsigned long)vgpu_opregion(vgpu)->va, free_pages((unsigned long)vgpu_opregion(vgpu)->va,
INTEL_GVT_OPREGION_PORDER); INTEL_GVT_OPREGION_PORDER);
}
vgpu_opregion(vgpu)->va = NULL; vgpu_opregion(vgpu)->va = NULL;
}
} }
/** /**
...@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa) ...@@ -137,22 +127,8 @@ int intel_vgpu_init_opregion(struct intel_vgpu *vgpu, u32 gpa)
ret = map_vgpu_opregion(vgpu, true); ret = map_vgpu_opregion(vgpu, true);
if (ret) if (ret)
return ret; return ret;
} else {
gvt_dbg_core("emulate opregion from userspace\n");
/*
* If opregion pages are not allocated from host kenrel,
* most of the params are meaningless
*/
ret = intel_gvt_hypervisor_map_gfn_to_mfn(vgpu,
0, /* not used */
0, /* not used */
2, /* not used */
1,
GVT_MAP_OPREGION);
if (ret)
return ret;
} }
return 0; return 0;
} }
......
...@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -89,15 +89,15 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
} }
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
dst = kmap_atomic(page); dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE); GTT_PAGE_SIZE);
kunmap_atomic(dst); kunmap(page);
i++; i++;
} }
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page); shadow_ring_context = kmap(page);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \ intel_gvt_hypervisor_read_gpa(vgpu, workload->ring_context_gpa \
...@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -123,7 +123,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context); kunmap(page);
return 0; return 0;
} }
...@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -318,10 +318,10 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
} }
page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i); page = i915_gem_object_get_page(ctx_obj, LRC_PPHWSP_PN + i);
src = kmap_atomic(page); src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE); GTT_PAGE_SIZE);
kunmap_atomic(src); kunmap(page);
i++; i++;
} }
...@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -329,7 +329,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4); RING_CTX_OFF(ring_header.val), &workload->rb_tail, 4);
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN); page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page); shadow_ring_context = kmap(page);
#define COPY_REG(name) \ #define COPY_REG(name) \
intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \ intel_gvt_hypervisor_write_gpa(vgpu, workload->ring_context_gpa + \
...@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -347,7 +347,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap_atomic(shadow_ring_context); kunmap(page);
} }
static void complete_current_workload(struct intel_gvt *gvt, int ring_id) static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
......
...@@ -132,6 +132,106 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu) ...@@ -132,6 +132,106 @@ void populate_pvinfo_page(struct intel_vgpu *vgpu)
WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE); WARN_ON(sizeof(struct vgt_if) != VGT_PVINFO_SIZE);
} }
/**
* intel_gvt_init_vgpu_types - initialize vGPU type list
* @gvt : GVT device
*
* Initialize vGPU type list based on available resource.
*
*/
int intel_gvt_init_vgpu_types(struct intel_gvt *gvt)
{
unsigned int num_types;
unsigned int i, low_avail;
unsigned int min_low;
/* vGPU type name is defined as GVTg_Vx_y which contains
* physical GPU generation type and 'y' means maximum vGPU
* instances user can create on one physical GPU for this
* type.
*
* Depend on physical SKU resource, might see vGPU types like
* GVTg_V4_8, GVTg_V4_4, GVTg_V4_2, etc. We can create
* different types of vGPU on same physical GPU depending on
* available resource. Each vGPU type will have "avail_instance"
* to indicate how many vGPU instance can be created for this
* type.
*
* Currently use static size here as we init type earlier..
*/
low_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE;
num_types = 4;
gvt->types = kzalloc(num_types * sizeof(struct intel_vgpu_type),
GFP_KERNEL);
if (!gvt->types)
return -ENOMEM;
min_low = MB_TO_BYTES(32);
for (i = 0; i < num_types; ++i) {
if (low_avail / min_low == 0)
break;
gvt->types[i].low_gm_size = min_low;
gvt->types[i].high_gm_size = 3 * gvt->types[i].low_gm_size;
gvt->types[i].fence = 4;
gvt->types[i].max_instance = low_avail / min_low;
gvt->types[i].avail_instance = gvt->types[i].max_instance;
if (IS_GEN8(gvt->dev_priv))
sprintf(gvt->types[i].name, "GVTg_V4_%u",
gvt->types[i].max_instance);
else if (IS_GEN9(gvt->dev_priv))
sprintf(gvt->types[i].name, "GVTg_V5_%u",
gvt->types[i].max_instance);
min_low <<= 1;
gvt_dbg_core("type[%d]: %s max %u avail %u low %u high %u fence %u\n",
i, gvt->types[i].name, gvt->types[i].max_instance,
gvt->types[i].avail_instance,
gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence);
}
gvt->num_types = i;
return 0;
}
void intel_gvt_clean_vgpu_types(struct intel_gvt *gvt)
{
kfree(gvt->types);
}
static void intel_gvt_update_vgpu_types(struct intel_gvt *gvt)
{
int i;
unsigned int low_gm_avail, high_gm_avail, fence_avail;
unsigned int low_gm_min, high_gm_min, fence_min, total_min;
/* Need to depend on maxium hw resource size but keep on
* static config for now.
*/
low_gm_avail = MB_TO_BYTES(256) - HOST_LOW_GM_SIZE -
gvt->gm.vgpu_allocated_low_gm_size;
high_gm_avail = MB_TO_BYTES(256) * 3 - HOST_HIGH_GM_SIZE -
gvt->gm.vgpu_allocated_high_gm_size;
fence_avail = gvt_fence_sz(gvt) - HOST_FENCE -
gvt->fence.vgpu_allocated_fence_num;
for (i = 0; i < gvt->num_types; i++) {
low_gm_min = low_gm_avail / gvt->types[i].low_gm_size;
high_gm_min = high_gm_avail / gvt->types[i].high_gm_size;
fence_min = fence_avail / gvt->types[i].fence;
total_min = min(min(low_gm_min, high_gm_min), fence_min);
gvt->types[i].avail_instance = min(gvt->types[i].max_instance,
total_min);
gvt_dbg_core("update type[%d]: %s max %u avail %u low %u high %u fence %u\n",
i, gvt->types[i].name, gvt->types[i].max_instance,
gvt->types[i].avail_instance, gvt->types[i].low_gm_size,
gvt->types[i].high_gm_size, gvt->types[i].fence);
}
}
/** /**
* intel_gvt_destroy_vgpu - destroy a virtual GPU * intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU * @vgpu: virtual GPU
...@@ -166,20 +266,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu) ...@@ -166,20 +266,11 @@ void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu)
clean_vgpu_mmio(vgpu); clean_vgpu_mmio(vgpu);
vfree(vgpu); vfree(vgpu);
intel_gvt_update_vgpu_types(gvt);
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
} }
/** static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
* intel_gvt_create_vgpu - create a virtual GPU
* @gvt: GVT device
* @param: vGPU creation parameters
*
* This function is called when user wants to create a virtual GPU.
*
* Returns:
* pointer to intel_vgpu, error pointer if failed.
*/
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_creation_params *param) struct intel_vgpu_creation_params *param)
{ {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
...@@ -224,15 +315,9 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -224,15 +315,9 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
if (ret) if (ret)
goto out_detach_hypervisor_vgpu; goto out_detach_hypervisor_vgpu;
if (intel_gvt_host.hypervisor_type == INTEL_GVT_HYPERVISOR_KVM) {
ret = intel_vgpu_init_opregion(vgpu, 0);
if (ret)
goto out_clean_gtt;
}
ret = intel_vgpu_init_display(vgpu); ret = intel_vgpu_init_display(vgpu);
if (ret) if (ret)
goto out_clean_opregion; goto out_clean_gtt;
ret = intel_vgpu_init_execlist(vgpu); ret = intel_vgpu_init_execlist(vgpu);
if (ret) if (ret)
...@@ -257,8 +342,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -257,8 +342,6 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
intel_vgpu_clean_execlist(vgpu); intel_vgpu_clean_execlist(vgpu);
out_clean_display: out_clean_display:
intel_vgpu_clean_display(vgpu); intel_vgpu_clean_display(vgpu);
out_clean_opregion:
intel_vgpu_clean_opregion(vgpu);
out_clean_gtt: out_clean_gtt:
intel_vgpu_clean_gtt(vgpu); intel_vgpu_clean_gtt(vgpu);
out_detach_hypervisor_vgpu: out_detach_hypervisor_vgpu:
...@@ -272,3 +355,49 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -272,3 +355,49 @@ struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
mutex_unlock(&gvt->lock); mutex_unlock(&gvt->lock);
return ERR_PTR(ret); return ERR_PTR(ret);
} }
/**
* intel_gvt_create_vgpu - create a virtual GPU
* @gvt: GVT device
* @type: type of the vGPU to create
*
* This function is called when user wants to create a virtual GPU.
*
* Returns:
* pointer to intel_vgpu, error pointer if failed.
*/
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type)
{
struct intel_vgpu_creation_params param;
struct intel_vgpu *vgpu;
param.handle = 0;
param.low_gm_sz = type->low_gm_size;
param.high_gm_sz = type->high_gm_size;
param.fence_sz = type->fence;
/* XXX current param based on MB */
param.low_gm_sz = BYTES_TO_MB(param.low_gm_sz);
param.high_gm_sz = BYTES_TO_MB(param.high_gm_sz);
vgpu = __intel_gvt_create_vgpu(gvt, &param);
if (IS_ERR(vgpu))
return vgpu;
/* calculate left instance change for types */
intel_gvt_update_vgpu_types(gvt);
return vgpu;
}
/**
* intel_gvt_reset_vgpu - reset a virtual GPU
* @vgpu: virtual GPU
*
* This function is called when user wants to reset a virtual GPU.
*
*/
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu)
{
}
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment