Commit e502a2af authored by Changbin Du's avatar Changbin Du Committed by Zhenyu Wang

drm/i915/gvt: Provide generic page_track infrastructure for write-protected page

This patch provide generic page_track infrastructure for write-protected
guest page. The old page_track logic gets rewrote and now stays in a new
standalone page_track.c. This page track infrastructure can be both used
by vGUC and GTT shadowing.

The important change is that it uses radix tree instead of hash table.
We don't have a predictable number of pages that will be tracked.

Here is some performance data (duration in us) of looking up a element:
Before: (aka. intel_vgpu_find_tracked_page)
 0.091 0.089 0.090 ... 0.093 0.091 0.087 ... 0.292 0.285 0.292 0.291
After: (aka. intel_vgpu_find_page_track)
 0.104 0.105 0.100 0.102 0.102 0.100 ... 0.101 0.101 0.105 0.105

The hash table has good performance at beginning, but turns bad with
more pages being tracked even no 3D applications are running. As
expected, radix tree has stable duration and very quick.

The overall benchmark (tested with Heaven Benchmark) marginally improved
since this is not the bottleneck. What we benefit more from this change
is scalability.
Signed-off-by: default avatarChangbin Du <changbin.du@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 09475728
...@@ -3,7 +3,7 @@ GVT_DIR := gvt ...@@ -3,7 +3,7 @@ GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
...@@ -559,7 +559,7 @@ static inline int ppgtt_spt_get_entry( ...@@ -559,7 +559,7 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL; return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest, ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu); spt->vgpu);
if (ret) if (ret)
return ret; return ret;
...@@ -587,7 +587,7 @@ static inline int ppgtt_spt_set_entry( ...@@ -587,7 +587,7 @@ static inline int ppgtt_spt_set_entry(
type, e->type, index, e->val64); type, e->type, index, e->val64);
return ops->set_entry(page_table, e, index, guest, return ops->set_entry(page_table, e, index, guest,
spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT, spt->guest_page.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu); spt->vgpu);
} }
...@@ -607,9 +607,6 @@ static inline int ppgtt_spt_set_entry( ...@@ -607,9 +607,6 @@ static inline int ppgtt_spt_set_entry(
ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \ ppgtt_spt_set_entry(spt, spt->shadow_page.vaddr, \
spt->shadow_page.type, e, index, false) spt->shadow_page.type, e, index, false)
#define page_track_to_ppgtt_spt(ptr) \
container_of(ptr, struct intel_vgpu_ppgtt_spt, guest_page.track)
static void *alloc_spt(gfp_t gfp_mask) static void *alloc_spt(gfp_t gfp_mask)
{ {
struct intel_vgpu_ppgtt_spt *spt; struct intel_vgpu_ppgtt_spt *spt;
...@@ -632,30 +629,6 @@ static void free_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -632,30 +629,6 @@ static void free_spt(struct intel_vgpu_ppgtt_spt *spt)
kfree(spt); kfree(spt);
} }
/**
* intel_vgpu_find_tracked_page - find a tracked guest page
* @vgpu: a vGPU
* @gfn: guest memory page frame number
*
* This function is called when the emulation layer wants to figure out if a
* trapped GFN is a tracked guest page.
*
* Returns:
* Pointer to page track data structure, NULL if not found.
*/
struct intel_vgpu_page_track *intel_vgpu_find_tracked_page(
struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *t;
hash_for_each_possible(vgpu->gtt.tracked_guest_page_hash_table,
t, node, gfn) {
if (t->gfn == gfn)
return t;
}
return NULL;
}
static int detach_oos_page(struct intel_vgpu *vgpu, static int detach_oos_page(struct intel_vgpu *vgpu,
struct intel_vgpu_oos_page *oos_page); struct intel_vgpu_oos_page *oos_page);
...@@ -673,12 +646,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -673,12 +646,7 @@ static void ppgtt_free_spt(struct intel_vgpu_ppgtt_spt *spt)
if (spt->guest_page.oos_page) if (spt->guest_page.oos_page)
detach_oos_page(spt->vgpu, spt->guest_page.oos_page); detach_oos_page(spt->vgpu, spt->guest_page.oos_page);
if (!hlist_unhashed(&spt->guest_page.track.node)) intel_vgpu_unregister_page_track(spt->vgpu, spt->guest_page.gfn);
hash_del(&spt->guest_page.track.node);
if (spt->guest_page.track.tracked)
intel_gvt_hypervisor_disable_page_track(spt->vgpu,
spt->guest_page.track.gfn);
list_del_init(&spt->post_shadow_list); list_del_init(&spt->post_shadow_list);
free_spt(spt); free_spt(spt);
...@@ -698,21 +666,18 @@ static int ppgtt_handle_guest_write_page_table_bytes( ...@@ -698,21 +666,18 @@ static int ppgtt_handle_guest_write_page_table_bytes(
struct intel_vgpu_ppgtt_spt *spt, struct intel_vgpu_ppgtt_spt *spt,
u64 pa, void *p_data, int bytes); u64 pa, void *p_data, int bytes);
static int ppgtt_write_protection_handler(void *data, u64 pa, static int ppgtt_write_protection_handler(
void *p_data, int bytes) struct intel_vgpu_page_track *page_track,
u64 gpa, void *data, int bytes)
{ {
struct intel_vgpu_page_track *t = data; struct intel_vgpu_ppgtt_spt *spt = page_track->priv_data;
struct intel_vgpu_ppgtt_spt *spt = page_track_to_ppgtt_spt(t);
int ret; int ret;
if (bytes != 4 && bytes != 8) if (bytes != 4 && bytes != 8)
return -EINVAL; return -EINVAL;
if (!t->tracked) ret = ppgtt_handle_guest_write_page_table_bytes(spt, gpa, data, bytes);
return -EINVAL;
ret = ppgtt_handle_guest_write_page_table_bytes(spt,
pa, p_data, bytes);
if (ret) if (ret)
return ret; return ret;
return ret; return ret;
...@@ -724,9 +689,9 @@ static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn( ...@@ -724,9 +689,9 @@ static struct intel_vgpu_ppgtt_spt *intel_vgpu_find_spt_by_gfn(
{ {
struct intel_vgpu_page_track *track; struct intel_vgpu_page_track *track;
track = intel_vgpu_find_tracked_page(vgpu, gfn); track = intel_vgpu_find_page_track(vgpu, gfn);
if (track) if (track && track->handler == ppgtt_write_protection_handler)
return page_track_to_ppgtt_spt(track); return track->priv_data;
return NULL; return NULL;
} }
...@@ -752,6 +717,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( ...@@ -752,6 +717,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
struct intel_vgpu_ppgtt_spt *spt = NULL; struct intel_vgpu_ppgtt_spt *spt = NULL;
dma_addr_t daddr; dma_addr_t daddr;
int ret;
retry: retry:
spt = alloc_spt(GFP_KERNEL | __GFP_ZERO); spt = alloc_spt(GFP_KERNEL | __GFP_ZERO);
...@@ -787,10 +753,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt( ...@@ -787,10 +753,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_alloc_spt(
spt->guest_page.type = type; spt->guest_page.type = type;
spt->guest_page.gfn = gfn; spt->guest_page.gfn = gfn;
spt->guest_page.track.gfn = gfn; ret = intel_vgpu_register_page_track(vgpu, spt->guest_page.gfn,
spt->guest_page.track.handler = ppgtt_write_protection_handler; ppgtt_write_protection_handler, spt);
hash_add(vgpu->gtt.tracked_guest_page_hash_table, if (ret) {
&spt->guest_page.track.node, gfn); free_spt(spt);
dma_unmap_page(kdev, daddr, PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
return ERR_PTR(ret);
}
INIT_HLIST_NODE(&spt->node); INIT_HLIST_NODE(&spt->node);
hash_add(vgpu->gtt.spt_hash_table, &spt->node, spt->shadow_page.mfn); hash_add(vgpu->gtt.spt_hash_table, &spt->node, spt->shadow_page.mfn);
...@@ -926,11 +895,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry( ...@@ -926,11 +895,10 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_populate_spt_by_guest_entry(
goto fail; goto fail;
} }
ret = intel_gvt_hypervisor_enable_page_track(vgpu, spt->guest_page.track.gfn); ret = intel_vgpu_enable_page_track(vgpu, spt->guest_page.gfn);
if (ret) if (ret)
goto fail; goto fail;
spt->guest_page.track.tracked = true;
ret = ppgtt_populate_spt(spt); ret = ppgtt_populate_spt(spt);
if (ret) if (ret)
goto fail; goto fail;
...@@ -1002,7 +970,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1002,7 +970,7 @@ static int ppgtt_populate_spt(struct intel_vgpu_ppgtt_spt *spt)
int ret; int ret;
trace_spt_change(spt->vgpu->id, "born", spt, trace_spt_change(spt->vgpu->id, "born", spt,
spt->guest_page.track.gfn, spt->shadow_page.type); spt->guest_page.gfn, spt->shadow_page.type);
for_each_present_guest_entry(spt, &ge, i) { for_each_present_guest_entry(spt, &ge, i) {
if (gtt_type_is_pt(get_next_pt_type(ge.type))) { if (gtt_type_is_pt(get_next_pt_type(ge.type))) {
...@@ -1197,10 +1165,9 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1197,10 +1165,9 @@ static int ppgtt_set_guest_page_sync(struct intel_vgpu_ppgtt_spt *spt)
struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret; int ret;
ret = intel_gvt_hypervisor_enable_page_track(spt->vgpu, spt->guest_page.track.gfn); ret = intel_vgpu_enable_page_track(spt->vgpu, spt->guest_page.gfn);
if (ret) if (ret)
return ret; return ret;
spt->guest_page.track.tracked = true;
trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id, trace_oos_change(spt->vgpu->id, "set page sync", oos_page->id,
spt, spt->guest_page.type); spt, spt->guest_page.type);
...@@ -1236,7 +1203,6 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1236,7 +1203,6 @@ static int ppgtt_allocate_oos_page(struct intel_vgpu_ppgtt_spt *spt)
static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
{ {
struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page; struct intel_vgpu_oos_page *oos_page = spt->guest_page.oos_page;
int ret;
if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n")) if (WARN(!oos_page, "shadow PPGTT page should have a oos page\n"))
return -EINVAL; return -EINVAL;
...@@ -1245,11 +1211,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt) ...@@ -1245,11 +1211,7 @@ static int ppgtt_set_guest_page_oos(struct intel_vgpu_ppgtt_spt *spt)
spt, spt->guest_page.type); spt, spt->guest_page.type);
list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head); list_add_tail(&oos_page->vm_list, &spt->vgpu->gtt.oos_page_list_head);
ret = intel_gvt_hypervisor_disable_page_track(spt->vgpu, spt->guest_page.track.gfn); return intel_vgpu_disable_page_track(spt->vgpu, spt->guest_page.gfn);
if (ret)
return ret;
spt->guest_page.track.tracked = false;
return 0;
} }
/** /**
...@@ -1918,38 +1880,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, ...@@ -1918,38 +1880,6 @@ int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
return ret; return ret;
} }
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
int ret = 0;
struct intel_vgpu_page_track *t;
mutex_lock(&gvt->lock);
t = intel_vgpu_find_tracked_page(vgpu, pa >> PAGE_SHIFT);
if (t) {
if (unlikely(vgpu->failsafe)) {
/* remove write protection to prevent furture traps */
intel_gvt_hypervisor_disable_page_track(vgpu, t->gfn);
} else {
ret = t->handler(t, pa, p_data, bytes);
if (ret) {
gvt_err("guest page write error %d, "
"gfn 0x%lx, pa 0x%llx, "
"var 0x%x, len %d\n",
ret, t->gfn, pa,
*(u32 *)p_data, bytes);
}
}
}
mutex_unlock(&gvt->lock);
return ret;
}
static int alloc_scratch_pages(struct intel_vgpu *vgpu, static int alloc_scratch_pages(struct intel_vgpu *vgpu,
intel_gvt_gtt_type_t type) intel_gvt_gtt_type_t type)
{ {
...@@ -2064,7 +1994,6 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu) ...@@ -2064,7 +1994,6 @@ int intel_vgpu_init_gtt(struct intel_vgpu *vgpu)
{ {
struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_gtt *gtt = &vgpu->gtt;
hash_init(gtt->tracked_guest_page_hash_table);
hash_init(gtt->spt_hash_table); hash_init(gtt->spt_hash_table);
INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head); INIT_LIST_HEAD(&gtt->ppgtt_mm_list_head);
......
...@@ -187,8 +187,6 @@ struct intel_vgpu_gtt { ...@@ -187,8 +187,6 @@ struct intel_vgpu_gtt {
unsigned long active_ppgtt_mm_bitmap; unsigned long active_ppgtt_mm_bitmap;
struct list_head ppgtt_mm_list_head; struct list_head ppgtt_mm_list_head;
DECLARE_HASHTABLE(spt_hash_table, INTEL_GVT_GTT_HASH_BITS); DECLARE_HASHTABLE(spt_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_tracked_guest_page;
struct list_head oos_page_list_head; struct list_head oos_page_list_head;
struct list_head post_shadow_list_head; struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
...@@ -205,14 +203,6 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); ...@@ -205,14 +203,6 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry); int page_table_level, void *root_entry);
struct intel_vgpu_page_track {
struct hlist_node node;
bool tracked;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
};
struct intel_vgpu_oos_page { struct intel_vgpu_oos_page {
struct intel_vgpu_ppgtt_spt *spt; struct intel_vgpu_ppgtt_spt *spt;
struct list_head list; struct list_head list;
...@@ -240,7 +230,6 @@ struct intel_vgpu_ppgtt_spt { ...@@ -240,7 +230,6 @@ struct intel_vgpu_ppgtt_spt {
intel_gvt_gtt_type_t type; intel_gvt_gtt_type_t type;
unsigned long gfn; unsigned long gfn;
unsigned long write_cnt; unsigned long write_cnt;
struct intel_vgpu_page_track track;
struct intel_vgpu_oos_page *oos_page; struct intel_vgpu_oos_page *oos_page;
} guest_page; } guest_page;
...@@ -273,7 +262,4 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu, ...@@ -273,7 +262,4 @@ int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */ #endif /* _GVT_GTT_H_ */
...@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.get_gvt_attrs = intel_get_gvt_attrs, .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane, .vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf, .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_write_protect_handler, .write_protect_handler = intel_vgpu_page_track_handler,
}; };
/** /**
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "cmd_parser.h" #include "cmd_parser.h"
#include "fb_decoder.h" #include "fb_decoder.h"
#include "dmabuf.h" #include "dmabuf.h"
#include "page_track.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -190,6 +191,7 @@ struct intel_vgpu { ...@@ -190,6 +191,7 @@ struct intel_vgpu {
struct intel_vgpu_opregion opregion; struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display; struct intel_vgpu_display display;
struct intel_vgpu_submission submission; struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES]; u32 hws_pga[I915_NUM_ENGINES];
struct dentry *debugfs; struct dentry *debugfs;
......
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "i915_drv.h"
#include "gvt.h"
/**
* intel_vgpu_find_page_track - find page track rcord of guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* A pointer to struct intel_vgpu_page_track if found, else NULL returned.
*/
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return radix_tree_lookup(&vgpu->page_track_tree, gfn);
}
/**
* intel_vgpu_register_page_track - register a guest page to be tacked
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
gvt_page_track_handler_t handler, void *priv)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (track)
return -EEXIST;
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (!track)
return -ENOMEM;
track->handler = handler;
track->priv_data = priv;
ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
if (ret) {
kfree(track);
return ret;
}
return 0;
}
/**
* intel_vgpu_unregister_page_track - unregister the tracked guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
*/
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn)
{
struct intel_vgpu_page_track *track;
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
kfree(track);
}
}
/**
* intel_vgpu_enable_page_track - set write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (track->tracked)
return 0;
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
return 0;
}
/**
* intel_vgpu_enable_page_track - cancel write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (!track->tracked)
return 0;
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
return 0;
}
/**
* intel_vgpu_page_track_handler - called when write to write-protected page
* @vgpu: a vGPU
* @gpa: the gpa of this write
* @data: the writed data
* @bytes: the length of this write
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
mutex_lock(&gvt->lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track)
return 0;
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
gvt_err("guest page write error, gpa %llx\n", gpa);
}
mutex_unlock(&gvt->lock);
return ret;
}
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
struct intel_vgpu_page_track *page_track,
u64 gpa, void *data, int bytes);
/* Track record for a write-protected guest page. */
struct intel_vgpu_page_track {
gvt_page_track_handler_t handler;
bool tracked;
void *priv_data;
};
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
unsigned long gfn, gvt_page_track_handler_t handler,
void *priv);
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn);
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes);
#endif
...@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt; vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight; vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr); idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary); intel_vgpu_init_cfg_space(vgpu, param->primary);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment