Commit 4b2dbbc2 authored by Changbin Du's avatar Changbin Du Committed by Zhenyu Wang

drm/i915/gvt: Add carefully checking in GTT walker paths

When debugging the gtt code, found the intel_vgpu_gma_to_gpa() can
translate any given GMA though the GMA is not valid. This because
the GTT ops suppress the possible errors, which may result in an
invalid PT entry is retrieved by upper caller.

This patch changed the prototype of pte ops to propagate status to
callers. Then we make sure the GTT walker stop as early as when
a error is detected to prevent undefined behavior.
Signed-off-by: default avatarChangbin Du <changbin.du@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 36ed7e97
...@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv, ...@@ -259,7 +259,7 @@ static void write_pte64(struct drm_i915_private *dev_priv,
writeq(pte, addr); writeq(pte, addr);
} }
static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt, static inline int gtt_get_entry64(void *pt,
struct intel_gvt_gtt_entry *e, struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa, unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu) struct intel_vgpu *vgpu)
...@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt, ...@@ -268,22 +268,23 @@ static inline struct intel_gvt_gtt_entry *gtt_get_entry64(void *pt,
int ret; int ret;
if (WARN_ON(info->gtt_entry_size != 8)) if (WARN_ON(info->gtt_entry_size != 8))
return e; return -EINVAL;
if (hypervisor_access) { if (hypervisor_access) {
ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa + ret = intel_gvt_hypervisor_read_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift), (index << info->gtt_entry_size_shift),
&e->val64, 8); &e->val64, 8);
WARN_ON(ret); if (WARN_ON(ret))
return ret;
} else if (!pt) { } else if (!pt) {
e->val64 = read_pte64(vgpu->gvt->dev_priv, index); e->val64 = read_pte64(vgpu->gvt->dev_priv, index);
} else { } else {
e->val64 = *((u64 *)pt + index); e->val64 = *((u64 *)pt + index);
} }
return e; return 0;
} }
static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt, static inline int gtt_set_entry64(void *pt,
struct intel_gvt_gtt_entry *e, struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa, unsigned long index, bool hypervisor_access, unsigned long gpa,
struct intel_vgpu *vgpu) struct intel_vgpu *vgpu)
...@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt, ...@@ -292,19 +293,20 @@ static inline struct intel_gvt_gtt_entry *gtt_set_entry64(void *pt,
int ret; int ret;
if (WARN_ON(info->gtt_entry_size != 8)) if (WARN_ON(info->gtt_entry_size != 8))
return e; return -EINVAL;
if (hypervisor_access) { if (hypervisor_access) {
ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa + ret = intel_gvt_hypervisor_write_gpa(vgpu, gpa +
(index << info->gtt_entry_size_shift), (index << info->gtt_entry_size_shift),
&e->val64, 8); &e->val64, 8);
WARN_ON(ret); if (WARN_ON(ret))
return ret;
} else if (!pt) { } else if (!pt) {
write_pte64(vgpu->gvt->dev_priv, index, e->val64); write_pte64(vgpu->gvt->dev_priv, index, e->val64);
} else { } else {
*((u64 *)pt + index) = e->val64; *((u64 *)pt + index) = e->val64;
} }
return e; return 0;
} }
#define GTT_HAW 46 #define GTT_HAW 46
...@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p, ...@@ -445,21 +447,25 @@ static int gtt_entry_p2m(struct intel_vgpu *vgpu, struct intel_gvt_gtt_entry *p,
/* /*
* MM helpers. * MM helpers.
*/ */
struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm, int intel_vgpu_mm_get_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index) unsigned long index)
{ {
struct intel_gvt *gvt = mm->vgpu->gvt; struct intel_gvt *gvt = mm->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
int ret;
e->type = mm->page_table_entry_type; e->type = mm->page_table_entry_type;
ops->get_entry(page_table, e, index, false, 0, mm->vgpu); ret = ops->get_entry(page_table, e, index, false, 0, mm->vgpu);
if (ret)
return ret;
ops->test_pse(e); ops->test_pse(e);
return e; return 0;
} }
struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm, int intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index) unsigned long index)
{ {
...@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm, ...@@ -472,7 +478,7 @@ struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry(struct intel_vgpu_mm *mm,
/* /*
* PPGTT shadow page table helpers. * PPGTT shadow page table helpers.
*/ */
static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry( static inline int ppgtt_spt_get_entry(
struct intel_vgpu_ppgtt_spt *spt, struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type, void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index, struct intel_gvt_gtt_entry *e, unsigned long index,
...@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry( ...@@ -480,20 +486,24 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_get_entry(
{ {
struct intel_gvt *gvt = spt->vgpu->gvt; struct intel_gvt *gvt = spt->vgpu->gvt;
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
int ret;
e->type = get_entry_type(type); e->type = get_entry_type(type);
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return e; return -EINVAL;
ops->get_entry(page_table, e, index, guest, ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT, spt->guest_page.gfn << GTT_PAGE_SHIFT,
spt->vgpu); spt->vgpu);
if (ret)
return ret;
ops->test_pse(e); ops->test_pse(e);
return e; return 0;
} }
static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry( static inline int ppgtt_spt_set_entry(
struct intel_vgpu_ppgtt_spt *spt, struct intel_vgpu_ppgtt_spt *spt,
void *page_table, int type, void *page_table, int type,
struct intel_gvt_gtt_entry *e, unsigned long index, struct intel_gvt_gtt_entry *e, unsigned long index,
...@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry( ...@@ -503,7 +513,7 @@ static inline struct intel_gvt_gtt_entry *ppgtt_spt_set_entry(
struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = gvt->gtt.pte_ops;
if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n")) if (WARN(!gtt_type_is_entry(e->type), "invalid entry type\n"))
return e; return -EINVAL;
return ops->set_entry(page_table, e, index, guest, return ops->set_entry(page_table, e, index, guest,
spt->guest_page.gfn << GTT_PAGE_SHIFT, spt->guest_page.gfn << GTT_PAGE_SHIFT,
...@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( ...@@ -792,13 +802,13 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
#define for_each_present_guest_entry(spt, e, i) \ #define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \ for (i = 0; i < pt_entries(spt); i++) \
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \ if (!ppgtt_get_guest_entry(spt, e, i) && \
ppgtt_get_guest_entry(spt, e, i))) spt->vgpu->gvt->gtt.pte_ops->test_present(e))
#define for_each_present_shadow_entry(spt, e, i) \ #define for_each_present_shadow_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \ for (i = 0; i < pt_entries(spt); i++) \
if (spt->vgpu->gvt->gtt.pte_ops->test_present( \ if (!ppgtt_get_shadow_entry(spt, e, i) && \
ppgtt_get_shadow_entry(spt, e, i))) spt->vgpu->gvt->gtt.pte_ops->test_present(e))
static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt) static void ppgtt_get_shadow_page(struct intel_vgpu_ppgtt_spt *spt)
{ {
...@@ -1713,8 +1723,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1713,8 +1723,10 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
if (!vgpu_gmadr_is_valid(vgpu, gma)) if (!vgpu_gmadr_is_valid(vgpu, gma))
goto err; goto err;
ggtt_get_guest_entry(mm, &e, ret = ggtt_get_guest_entry(mm, &e,
gma_ops->gma_to_ggtt_pte_index(gma)); gma_ops->gma_to_ggtt_pte_index(gma));
if (ret)
goto err;
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK); + (gma & ~GTT_PAGE_MASK);
...@@ -1724,7 +1736,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1724,7 +1736,9 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
switch (mm->page_table_level) { switch (mm->page_table_level) {
case 4: case 4:
ppgtt_get_shadow_root_entry(mm, &e, 0); ret = ppgtt_get_shadow_root_entry(mm, &e, 0);
if (ret)
goto err;
gma_index[0] = gma_ops->gma_to_pml4_index(gma); gma_index[0] = gma_ops->gma_to_pml4_index(gma);
gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma); gma_index[1] = gma_ops->gma_to_l4_pdp_index(gma);
gma_index[2] = gma_ops->gma_to_pde_index(gma); gma_index[2] = gma_ops->gma_to_pde_index(gma);
...@@ -1732,15 +1746,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1732,15 +1746,19 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
index = 4; index = 4;
break; break;
case 3: case 3:
ppgtt_get_shadow_root_entry(mm, &e, ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_l3_pdp_index(gma)); gma_ops->gma_to_l3_pdp_index(gma));
if (ret)
goto err;
gma_index[0] = gma_ops->gma_to_pde_index(gma); gma_index[0] = gma_ops->gma_to_pde_index(gma);
gma_index[1] = gma_ops->gma_to_pte_index(gma); gma_index[1] = gma_ops->gma_to_pte_index(gma);
index = 2; index = 2;
break; break;
case 2: case 2:
ppgtt_get_shadow_root_entry(mm, &e, ret = ppgtt_get_shadow_root_entry(mm, &e,
gma_ops->gma_to_pde_index(gma)); gma_ops->gma_to_pde_index(gma));
if (ret)
goto err;
gma_index[0] = gma_ops->gma_to_pte_index(gma); gma_index[0] = gma_ops->gma_to_pte_index(gma);
index = 1; index = 1;
break; break;
...@@ -1755,6 +1773,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1755,6 +1773,11 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
(i == index - 1)); (i == index - 1));
if (ret) if (ret)
goto err; goto err;
if (!pte_ops->test_present(&e)) {
gvt_dbg_core("GMA 0x%lx is not present\n", gma);
goto err;
}
} }
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT)
......
...@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry { ...@@ -49,14 +49,18 @@ struct intel_gvt_gtt_entry {
}; };
struct intel_gvt_gtt_pte_ops { struct intel_gvt_gtt_pte_ops {
struct intel_gvt_gtt_entry *(*get_entry)(void *pt, int (*get_entry)(void *pt,
struct intel_gvt_gtt_entry *e, struct intel_gvt_gtt_entry *e,
unsigned long index, bool hypervisor_access, unsigned long gpa, unsigned long index,
struct intel_vgpu *vgpu); bool hypervisor_access,
struct intel_gvt_gtt_entry *(*set_entry)(void *pt, unsigned long gpa,
struct intel_gvt_gtt_entry *e, struct intel_vgpu *vgpu);
unsigned long index, bool hypervisor_access, unsigned long gpa, int (*set_entry)(void *pt,
struct intel_vgpu *vgpu); struct intel_gvt_gtt_entry *e,
unsigned long index,
bool hypervisor_access,
unsigned long gpa,
struct intel_vgpu *vgpu);
bool (*test_present)(struct intel_gvt_gtt_entry *e); bool (*test_present)(struct intel_gvt_gtt_entry *e);
void (*clear_present)(struct intel_gvt_gtt_entry *e); void (*clear_present)(struct intel_gvt_gtt_entry *e);
bool (*test_pse)(struct intel_gvt_gtt_entry *e); bool (*test_pse)(struct intel_gvt_gtt_entry *e);
...@@ -143,12 +147,12 @@ struct intel_vgpu_mm { ...@@ -143,12 +147,12 @@ struct intel_vgpu_mm {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
}; };
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_get_entry( extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm, struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index); unsigned long index);
extern struct intel_gvt_gtt_entry *intel_vgpu_mm_set_entry( extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm, struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e, void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index); unsigned long index);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment