Commit ade4d441 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2016-12-26' of https://github.com/01org/gvt-linux into drm-intel-fixes

From Zhenyu, "This is current GVT-g device model fixes for 4.10. I need
to base on v4.10-rc1 for merged vfio and KVMGT support."
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents 7ce7d89f 4e0203ba
...@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, ...@@ -123,6 +123,7 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
u8 changed = old ^ new; u8 changed = old ^ new;
int ret; int ret;
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
if (!(changed & PCI_COMMAND_MEMORY)) if (!(changed & PCI_COMMAND_MEMORY))
return 0; return 0;
...@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu, ...@@ -142,7 +143,6 @@ static int emulate_pci_command_write(struct intel_vgpu *vgpu,
return ret; return ret;
} }
memcpy(vgpu_cfg_space(vgpu) + offset, p_data, bytes);
return 0; return 0;
} }
...@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -240,7 +240,7 @@ int intel_vgpu_emulate_cfg_write(struct intel_vgpu *vgpu, unsigned int offset,
if (WARN_ON(bytes > 4)) if (WARN_ON(bytes > 4))
return -EINVAL; return -EINVAL;
if (WARN_ON(offset + bytes >= INTEL_GVT_MAX_CFG_SPACE_SZ)) if (WARN_ON(offset + bytes > INTEL_GVT_MAX_CFG_SPACE_SZ))
return -EINVAL; return -EINVAL;
/* First check if it's PCI_COMMAND */ /* First check if it's PCI_COMMAND */
......
...@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ...@@ -1998,6 +1998,8 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&gtt->oos_page_list_head); INIT_LIST_HEAD(&gtt->oos_page_list_head);
INIT_LIST_HEAD(&gtt->post_shadow_list_head); INIT_LIST_HEAD(&gtt->post_shadow_list_head);
intel_vgpu_reset_ggtt(vgpu);
ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT, ggtt_mm = intel_vgpu_create_mm(vgpu, INTEL_GVT_MM_GGTT,
NULL, 1, 0); NULL, 1, 0);
if (IS_ERR(ggtt_mm)) { if (IS_ERR(ggtt_mm)) {
...@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, ...@@ -2206,6 +2208,7 @@ int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu,
int intel_gvt_init_gtt(struct intel_gvt *gvt) int intel_gvt_init_gtt(struct intel_gvt *gvt)
{ {
int ret; int ret;
void *page_addr;
gvt_dbg_core("init gtt\n"); gvt_dbg_core("init gtt\n");
...@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2218,6 +2221,23 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENODEV; return -ENODEV;
} }
gvt->gtt.scratch_ggtt_page =
alloc_page(GFP_KERNEL | GFP_ATOMIC | __GFP_ZERO);
if (!gvt->gtt.scratch_ggtt_page) {
gvt_err("fail to allocate scratch ggtt page\n");
return -ENOMEM;
}
page_addr = page_address(gvt->gtt.scratch_ggtt_page);
gvt->gtt.scratch_ggtt_mfn =
intel_gvt_hypervisor_virt_to_mfn(page_addr);
if (gvt->gtt.scratch_ggtt_mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to translate scratch ggtt page\n");
__free_page(gvt->gtt.scratch_ggtt_page);
return -EFAULT;
}
if (enable_out_of_sync) { if (enable_out_of_sync) {
ret = setup_spt_oos(gvt); ret = setup_spt_oos(gvt);
if (ret) { if (ret) {
...@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2239,6 +2259,41 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
*/ */
void intel_gvt_clean_gtt(struct intel_gvt *gvt) void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{ {
__free_page(gvt->gtt.scratch_ggtt_page);
if (enable_out_of_sync) if (enable_out_of_sync)
clean_spt_oos(gvt); clean_spt_oos(gvt);
} }
/**
* intel_vgpu_reset_ggtt - reset the GGTT entry
* @vgpu: a vGPU
*
* This function is called at the vGPU create stage
* to reset all the GGTT entries.
*
*/
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
u32 index;
u32 offset;
u32 num_entries;
struct intel_gvt_gtt_entry e;
memset(&e, 0, sizeof(struct intel_gvt_gtt_entry));
e.type = GTT_TYPE_GGTT_PTE;
ops->set_pfn(&e, gvt->gtt.scratch_ggtt_mfn);
e.val64 |= _PAGE_PRESENT;
index = vgpu_aperture_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_aperture_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
index = vgpu_hidden_gmadr_base(vgpu) >> PAGE_SHIFT;
num_entries = vgpu_hidden_sz(vgpu) >> PAGE_SHIFT;
for (offset = 0; offset < num_entries; offset++)
ops->set_entry(NULL, &e, index + offset, false, 0, vgpu);
}
...@@ -81,6 +81,9 @@ struct intel_gvt_gtt { ...@@ -81,6 +81,9 @@ struct intel_gvt_gtt {
struct list_head oos_page_use_list_head; struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head; struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head; struct list_head mm_lru_list_head;
struct page *scratch_ggtt_page;
unsigned long scratch_ggtt_mfn;
}; };
enum { enum {
...@@ -202,6 +205,7 @@ struct intel_vgpu_gtt { ...@@ -202,6 +205,7 @@ struct intel_vgpu_gtt {
extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu); extern int intel_vgpu_init_gtt(struct intel_vgpu *vgpu);
extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu); extern void intel_vgpu_clean_gtt(struct intel_vgpu *vgpu);
void intel_vgpu_reset_ggtt(struct intel_vgpu *vgpu);
extern int intel_gvt_init_gtt(struct intel_gvt *gvt); extern int intel_gvt_init_gtt(struct intel_gvt *gvt);
extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
......
...@@ -175,6 +175,7 @@ struct intel_vgpu { ...@@ -175,6 +175,7 @@ struct intel_vgpu {
struct notifier_block group_notifier; struct notifier_block group_notifier;
struct kvm *kvm; struct kvm *kvm;
struct work_struct release_work; struct work_struct release_work;
atomic_t released;
} vdev; } vdev;
#endif #endif
}; };
......
...@@ -114,12 +114,15 @@ static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) ...@@ -114,12 +114,15 @@ static struct gvt_dma *__gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn) static kvm_pfn_t gvt_cache_find(struct intel_vgpu *vgpu, gfn_t gfn)
{ {
struct gvt_dma *entry; struct gvt_dma *entry;
kvm_pfn_t pfn;
mutex_lock(&vgpu->vdev.cache_lock); mutex_lock(&vgpu->vdev.cache_lock);
entry = __gvt_cache_find(vgpu, gfn); entry = __gvt_cache_find(vgpu, gfn);
mutex_unlock(&vgpu->vdev.cache_lock); pfn = (entry == NULL) ? 0 : entry->pfn;
return entry == NULL ? 0 : entry->pfn; mutex_unlock(&vgpu->vdev.cache_lock);
return pfn;
} }
static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn) static void gvt_cache_add(struct intel_vgpu *vgpu, gfn_t gfn, kvm_pfn_t pfn)
...@@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -497,7 +500,16 @@ static int intel_vgpu_open(struct mdev_device *mdev)
goto undo_iommu; goto undo_iommu;
} }
return kvmgt_guest_init(mdev); ret = kvmgt_guest_init(mdev);
if (ret)
goto undo_group;
atomic_set(&vgpu->vdev.released, 0);
return ret;
undo_group:
vfio_unregister_notifier(&mdev->dev, VFIO_GROUP_NOTIFY,
&vgpu->vdev.group_notifier);
undo_iommu: undo_iommu:
vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY, vfio_unregister_notifier(&mdev->dev, VFIO_IOMMU_NOTIFY,
...@@ -509,17 +521,26 @@ static int intel_vgpu_open(struct mdev_device *mdev) ...@@ -509,17 +521,26 @@ static int intel_vgpu_open(struct mdev_device *mdev)
static void __intel_vgpu_release(struct intel_vgpu *vgpu) static void __intel_vgpu_release(struct intel_vgpu *vgpu)
{ {
struct kvmgt_guest_info *info; struct kvmgt_guest_info *info;
int ret;
if (!handle_valid(vgpu->handle)) if (!handle_valid(vgpu->handle))
return; return;
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY, if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier); &vgpu->vdev.iommu_notifier);
vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY, WARN(ret, "vfio_unregister_notifier for iommu failed: %d\n", ret);
ret = vfio_unregister_notifier(&vgpu->vdev.mdev->dev, VFIO_GROUP_NOTIFY,
&vgpu->vdev.group_notifier); &vgpu->vdev.group_notifier);
WARN(ret, "vfio_unregister_notifier for group failed: %d\n", ret);
info = (struct kvmgt_guest_info *)vgpu->handle; info = (struct kvmgt_guest_info *)vgpu->handle;
kvmgt_guest_exit(info); kvmgt_guest_exit(info);
vgpu->vdev.kvm = NULL;
vgpu->handle = 0; vgpu->handle = 0;
} }
...@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work) ...@@ -534,6 +555,7 @@ static void intel_vgpu_release_work(struct work_struct *work)
{ {
struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu, struct intel_vgpu *vgpu = container_of(work, struct intel_vgpu,
vdev.release_work); vdev.release_work);
__intel_vgpu_release(vgpu); __intel_vgpu_release(vgpu);
} }
...@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn) ...@@ -1134,6 +1156,10 @@ static int kvmgt_write_protect_add(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
srcu_read_unlock(&kvm->srcu, idx);
return -EINVAL;
}
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
...@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn) ...@@ -1164,6 +1190,10 @@ static int kvmgt_write_protect_remove(unsigned long handle, u64 gfn)
idx = srcu_read_lock(&kvm->srcu); idx = srcu_read_lock(&kvm->srcu);
slot = gfn_to_memslot(kvm, gfn); slot = gfn_to_memslot(kvm, gfn);
if (!slot) {
srcu_read_unlock(&kvm->srcu, idx);
return -EINVAL;
}
spin_lock(&kvm->mmu_lock); spin_lock(&kvm->mmu_lock);
...@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev) ...@@ -1311,18 +1341,14 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
static bool kvmgt_guest_exit(struct kvmgt_guest_info *info) static bool kvmgt_guest_exit(struct kvmgt_guest_info *info)
{ {
struct intel_vgpu *vgpu;
if (!info) { if (!info) {
gvt_err("kvmgt_guest_info invalid\n"); gvt_err("kvmgt_guest_info invalid\n");
return false; return false;
} }
vgpu = info->vgpu;
kvm_page_track_unregister_notifier(info->kvm, &info->track_node); kvm_page_track_unregister_notifier(info->kvm, &info->track_node);
kvmgt_protect_table_destroy(info); kvmgt_protect_table_destroy(info);
gvt_cache_destroy(vgpu); gvt_cache_destroy(info->vgpu);
vfree(info); vfree(info);
return true; return true;
......
...@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map) ...@@ -65,7 +65,7 @@ static int map_vgpu_opregion(struct intel_vgpu *vgpu, bool map)
int i, ret; int i, ret;
for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) { for (i = 0; i < INTEL_GVT_OPREGION_PAGES; i++) {
mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu) mfn = intel_gvt_hypervisor_virt_to_mfn(vgpu_opregion(vgpu)->va
+ i * PAGE_SIZE); + i * PAGE_SIZE);
if (mfn == INTEL_GVT_INVALID_ADDR) { if (mfn == INTEL_GVT_INVALID_ADDR) {
gvt_err("fail to get MFN from VA\n"); gvt_err("fail to get MFN from VA\n");
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment