Commit 9556e118 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: Use I915_GTT_PAGE_SIZE

As there is already an I915_GTT_PAGE_SIZE marco in i915, let GVT-g use it
as well. Also this patch re-names some GTT marcos with additional prefix.
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
parent 62a6a537
...@@ -1396,7 +1396,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s, ...@@ -1396,7 +1396,7 @@ static inline int cmd_address_audit(struct parser_exec_state *s,
} }
if (index_mode) { if (index_mode) {
if (guest_gma >= GTT_PAGE_SIZE / sizeof(u64)) { if (guest_gma >= I915_GTT_PAGE_SIZE / sizeof(u64)) {
ret = -EFAULT; ret = -EFAULT;
goto err; goto err;
} }
...@@ -1563,10 +1563,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm, ...@@ -1563,10 +1563,10 @@ static int copy_gma_to_hva(struct intel_vgpu *vgpu, struct intel_vgpu_mm *mm,
return -EFAULT; return -EFAULT;
} }
offset = gma & (GTT_PAGE_SIZE - 1); offset = gma & (I915_GTT_PAGE_SIZE - 1);
copy_len = (end_gma - gma) >= (GTT_PAGE_SIZE - offset) ? copy_len = (end_gma - gma) >= (I915_GTT_PAGE_SIZE - offset) ?
GTT_PAGE_SIZE - offset : end_gma - gma; I915_GTT_PAGE_SIZE - offset : end_gma - gma;
intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len); intel_gvt_hypervisor_read_gpa(vgpu, gpa, va + len, copy_len);
...@@ -2540,7 +2540,7 @@ static int scan_workload(struct intel_vgpu_workload *workload) ...@@ -2540,7 +2540,7 @@ static int scan_workload(struct intel_vgpu_workload *workload)
int ret = 0; int ret = 0;
/* ring base is page aligned */ /* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(workload->rb_start, GTT_PAGE_SIZE))) if (WARN_ON(!IS_ALIGNED(workload->rb_start, I915_GTT_PAGE_SIZE)))
return -EINVAL; return -EINVAL;
gma_head = workload->rb_start + workload->rb_head; gma_head = workload->rb_start + workload->rb_head;
...@@ -2589,7 +2589,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx) ...@@ -2589,7 +2589,8 @@ static int scan_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
wa_ctx); wa_ctx);
/* ring base is page aligned */ /* ring base is page aligned */
if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma, GTT_PAGE_SIZE))) if (WARN_ON(!IS_ALIGNED(wa_ctx->indirect_ctx.guest_gma,
I915_GTT_PAGE_SIZE)))
return -EINVAL; return -EINVAL;
ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t); ring_tail = wa_ctx->indirect_ctx.size + 3 * sizeof(uint32_t);
......
...@@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index, ...@@ -94,12 +94,12 @@ int intel_gvt_ggtt_index_g2h(struct intel_vgpu *vgpu, unsigned long g_index,
u64 h_addr; u64 h_addr;
int ret; int ret;
ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << GTT_PAGE_SHIFT, ret = intel_gvt_ggtt_gmadr_g2h(vgpu, g_index << I915_GTT_PAGE_SHIFT,
&h_addr); &h_addr);
if (ret) if (ret)
return ret; return ret;
*h_index = h_addr >> GTT_PAGE_SHIFT; *h_index = h_addr >> I915_GTT_PAGE_SHIFT;
return 0; return 0;
} }
...@@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index, ...@@ -109,12 +109,12 @@ int intel_gvt_ggtt_h2g_index(struct intel_vgpu *vgpu, unsigned long h_index,
u64 g_addr; u64 g_addr;
int ret; int ret;
ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << GTT_PAGE_SHIFT, ret = intel_gvt_ggtt_gmadr_h2g(vgpu, h_index << I915_GTT_PAGE_SHIFT,
&g_addr); &g_addr);
if (ret) if (ret)
return ret; return ret;
*g_index = g_addr >> GTT_PAGE_SHIFT; *g_index = g_addr >> I915_GTT_PAGE_SHIFT;
return 0; return 0;
} }
...@@ -382,7 +382,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e) ...@@ -382,7 +382,7 @@ static void gtt_entry_clear_present(struct intel_gvt_gtt_entry *e)
*/ */
static unsigned long gma_to_ggtt_pte_index(unsigned long gma) static unsigned long gma_to_ggtt_pte_index(unsigned long gma)
{ {
unsigned long x = (gma >> GTT_PAGE_SHIFT); unsigned long x = (gma >> I915_GTT_PAGE_SHIFT);
trace_gma_index(__func__, gma, x); trace_gma_index(__func__, gma, x);
return x; return x;
...@@ -494,7 +494,7 @@ static inline int ppgtt_spt_get_entry( ...@@ -494,7 +494,7 @@ static inline int ppgtt_spt_get_entry(
return -EINVAL; return -EINVAL;
ret = ops->get_entry(page_table, e, index, guest, ret = ops->get_entry(page_table, e, index, guest,
spt->guest_page.track.gfn << GTT_PAGE_SHIFT, spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu); spt->vgpu);
if (ret) if (ret)
return ret; return ret;
...@@ -516,7 +516,7 @@ static inline int ppgtt_spt_set_entry( ...@@ -516,7 +516,7 @@ static inline int ppgtt_spt_set_entry(
return -EINVAL; return -EINVAL;
return ops->set_entry(page_table, e, index, guest, return ops->set_entry(page_table, e, index, guest,
spt->guest_page.track.gfn << GTT_PAGE_SHIFT, spt->guest_page.track.gfn << I915_GTT_PAGE_SHIFT,
spt->vgpu); spt->vgpu);
} }
...@@ -649,7 +649,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu, ...@@ -649,7 +649,7 @@ static inline int init_shadow_page(struct intel_vgpu *vgpu,
INIT_HLIST_NODE(&p->node); INIT_HLIST_NODE(&p->node);
p->mfn = daddr >> GTT_PAGE_SHIFT; p->mfn = daddr >> I915_GTT_PAGE_SHIFT;
hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn); hash_add(vgpu->gtt.shadow_page_hash_table, &p->node, p->mfn);
return 0; return 0;
} }
...@@ -659,7 +659,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu, ...@@ -659,7 +659,7 @@ static inline void clean_shadow_page(struct intel_vgpu *vgpu,
{ {
struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev; struct device *kdev = &vgpu->gvt->dev_priv->drm.pdev->dev;
dma_unmap_page(kdev, p->mfn << GTT_PAGE_SHIFT, 4096, dma_unmap_page(kdev, p->mfn << I915_GTT_PAGE_SHIFT, 4096,
PCI_DMA_BIDIRECTIONAL); PCI_DMA_BIDIRECTIONAL);
if (!hlist_unhashed(&p->node)) if (!hlist_unhashed(&p->node))
...@@ -818,7 +818,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page( ...@@ -818,7 +818,7 @@ static struct intel_vgpu_ppgtt_spt *ppgtt_find_shadow_page(
((spt)->vgpu->gvt->device_info.gtt_entry_size_shift) ((spt)->vgpu->gvt->device_info.gtt_entry_size_shift)
#define pt_entries(spt) \ #define pt_entries(spt) \
(GTT_PAGE_SIZE >> pt_entry_size_shift(spt)) (I915_GTT_PAGE_SIZE >> pt_entry_size_shift(spt))
#define for_each_present_guest_entry(spt, e, i) \ #define for_each_present_guest_entry(spt, e, i) \
for (i = 0; i < pt_entries(spt); i++) \ for (i = 0; i < pt_entries(spt); i++) \
...@@ -1101,8 +1101,8 @@ static int sync_oos_page(struct intel_vgpu *vgpu, ...@@ -1101,8 +1101,8 @@ static int sync_oos_page(struct intel_vgpu *vgpu,
old.type = new.type = get_entry_type(spt->guest_page_type); old.type = new.type = get_entry_type(spt->guest_page_type);
old.val64 = new.val64 = 0; old.val64 = new.val64 = 0;
for (index = 0; index < (GTT_PAGE_SIZE >> info->gtt_entry_size_shift); for (index = 0; index < (I915_GTT_PAGE_SIZE >>
index++) { info->gtt_entry_size_shift); index++) {
ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu); ops->get_entry(oos_page->mem, &old, index, false, 0, vgpu);
ops->get_entry(NULL, &new, index, true, ops->get_entry(NULL, &new, index, true,
oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu); oos_page->guest_page->track.gfn << PAGE_SHIFT, vgpu);
...@@ -1156,8 +1156,8 @@ static int attach_oos_page(struct intel_vgpu *vgpu, ...@@ -1156,8 +1156,8 @@ static int attach_oos_page(struct intel_vgpu *vgpu,
int ret; int ret;
ret = intel_gvt_hypervisor_read_gpa(vgpu, ret = intel_gvt_hypervisor_read_gpa(vgpu,
gpt->track.gfn << GTT_PAGE_SHIFT, gpt->track.gfn << I915_GTT_PAGE_SHIFT,
oos_page->mem, GTT_PAGE_SIZE); oos_page->mem, I915_GTT_PAGE_SIZE);
if (ret) if (ret)
return ret; return ret;
...@@ -1439,7 +1439,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm) ...@@ -1439,7 +1439,7 @@ static int gen8_mm_alloc_page_table(struct intel_vgpu_mm *mm)
mm->shadow_page_table = mem + mm->page_table_entry_size; mm->shadow_page_table = mem + mm->page_table_entry_size;
} else if (mm->type == INTEL_GVT_MM_GGTT) { } else if (mm->type == INTEL_GVT_MM_GGTT) {
mm->page_table_entry_cnt = mm->page_table_entry_cnt =
(gvt_ggtt_gm_sz(gvt) >> GTT_PAGE_SHIFT); (gvt_ggtt_gm_sz(gvt) >> I915_GTT_PAGE_SHIFT);
mm->page_table_entry_size = mm->page_table_entry_cnt * mm->page_table_entry_size = mm->page_table_entry_cnt *
info->gtt_entry_size; info->gtt_entry_size;
mem = vzalloc(mm->page_table_entry_size); mem = vzalloc(mm->page_table_entry_size);
...@@ -1761,8 +1761,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1761,8 +1761,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
gma_ops->gma_to_ggtt_pte_index(gma)); gma_ops->gma_to_ggtt_pte_index(gma));
if (ret) if (ret)
goto err; goto err;
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK); + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa); trace_gma_translate(vgpu->id, "ggtt", 0, 0, gma, gpa);
return gpa; return gpa;
...@@ -1814,8 +1814,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma) ...@@ -1814,8 +1814,8 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, unsigned long gma)
} }
} }
gpa = (pte_ops->get_pfn(&e) << GTT_PAGE_SHIFT) gpa = (pte_ops->get_pfn(&e) << I915_GTT_PAGE_SHIFT)
+ (gma & ~GTT_PAGE_MASK); + (gma & ~I915_GTT_PAGE_MASK);
trace_gma_translate(vgpu->id, "ppgtt", 0, trace_gma_translate(vgpu->id, "ppgtt", 0,
mm->page_table_level, gma, gpa); mm->page_table_level, gma, gpa);
...@@ -1883,7 +1883,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off, ...@@ -1883,7 +1883,7 @@ static int emulate_gtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
if (bytes != 4 && bytes != 8) if (bytes != 4 && bytes != 8)
return -EINVAL; return -EINVAL;
gma = g_gtt_index << GTT_PAGE_SHIFT; gma = g_gtt_index << I915_GTT_PAGE_SHIFT;
/* the VM may configure the whole GM space when ballooning is used */ /* the VM may configure the whole GM space when ballooning is used */
if (!vgpu_gmadr_is_valid(vgpu, gma)) if (!vgpu_gmadr_is_valid(vgpu, gma))
...@@ -1946,7 +1946,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -1946,7 +1946,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
{ {
struct intel_vgpu_gtt *gtt = &vgpu->gtt; struct intel_vgpu_gtt *gtt = &vgpu->gtt;
struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops; struct intel_gvt_gtt_pte_ops *ops = vgpu->gvt->gtt.pte_ops;
int page_entry_num = GTT_PAGE_SIZE >> int page_entry_num = I915_GTT_PAGE_SIZE >>
vgpu->gvt->device_info.gtt_entry_size_shift; vgpu->gvt->device_info.gtt_entry_size_shift;
void *scratch_pt; void *scratch_pt;
int i; int i;
...@@ -1970,7 +1970,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu, ...@@ -1970,7 +1970,7 @@ static int alloc_scratch_pages(struct intel_vgpu *vgpu,
return -ENOMEM; return -ENOMEM;
} }
gtt->scratch_pt[type].page_mfn = gtt->scratch_pt[type].page_mfn =
(unsigned long)(daddr >> GTT_PAGE_SHIFT); (unsigned long)(daddr >> I915_GTT_PAGE_SHIFT);
gtt->scratch_pt[type].page = virt_to_page(scratch_pt); gtt->scratch_pt[type].page = virt_to_page(scratch_pt);
gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n", gvt_dbg_mm("vgpu%d create scratch_pt: type %d mfn=0x%lx\n",
vgpu->id, type, gtt->scratch_pt[type].page_mfn); vgpu->id, type, gtt->scratch_pt[type].page_mfn);
...@@ -2013,7 +2013,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu) ...@@ -2013,7 +2013,7 @@ static int release_scratch_page_tree(struct intel_vgpu *vgpu)
for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) { for (i = GTT_TYPE_PPGTT_PTE_PT; i < GTT_TYPE_MAX; i++) {
if (vgpu->gtt.scratch_pt[i].page != NULL) { if (vgpu->gtt.scratch_pt[i].page != NULL) {
daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn << daddr = (dma_addr_t)(vgpu->gtt.scratch_pt[i].page_mfn <<
GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
__free_page(vgpu->gtt.scratch_pt[i].page); __free_page(vgpu->gtt.scratch_pt[i].page);
vgpu->gtt.scratch_pt[i].page = NULL; vgpu->gtt.scratch_pt[i].page = NULL;
...@@ -2310,7 +2310,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt) ...@@ -2310,7 +2310,8 @@ int intel_gvt_init_gtt(struct intel_gvt *gvt)
return -ENOMEM; return -ENOMEM;
} }
gvt->gtt.scratch_ggtt_page = virt_to_page(page); gvt->gtt.scratch_ggtt_page = virt_to_page(page);
gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >> GTT_PAGE_SHIFT); gvt->gtt.scratch_ggtt_mfn = (unsigned long)(daddr >>
I915_GTT_PAGE_SHIFT);
if (enable_out_of_sync) { if (enable_out_of_sync) {
ret = setup_spt_oos(gvt); ret = setup_spt_oos(gvt);
...@@ -2337,7 +2338,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt) ...@@ -2337,7 +2338,7 @@ void intel_gvt_clean_gtt(struct intel_gvt *gvt)
{ {
struct device *dev = &gvt->dev_priv->drm.pdev->dev; struct device *dev = &gvt->dev_priv->drm.pdev->dev;
dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn << dma_addr_t daddr = (dma_addr_t)(gvt->gtt.scratch_ggtt_mfn <<
GTT_PAGE_SHIFT); I915_GTT_PAGE_SHIFT);
dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL); dma_unmap_page(dev, daddr, 4096, PCI_DMA_BIDIRECTIONAL);
......
...@@ -34,9 +34,8 @@ ...@@ -34,9 +34,8 @@
#ifndef _GVT_GTT_H_ #ifndef _GVT_GTT_H_
#define _GVT_GTT_H_ #define _GVT_GTT_H_
#define GTT_PAGE_SHIFT 12 #define I915_GTT_PAGE_SHIFT 12
#define GTT_PAGE_SIZE (1UL << GTT_PAGE_SHIFT) #define I915_GTT_PAGE_MASK (~(I915_GTT_PAGE_SIZE - 1))
#define GTT_PAGE_MASK (~(GTT_PAGE_SIZE-1))
struct intel_vgpu_mm; struct intel_vgpu_mm;
...@@ -245,7 +244,7 @@ struct intel_vgpu_oos_page { ...@@ -245,7 +244,7 @@ struct intel_vgpu_oos_page {
struct list_head list; struct list_head list;
struct list_head vm_list; struct list_head vm_list;
int id; int id;
unsigned char mem[GTT_PAGE_SIZE]; unsigned char mem[I915_GTT_PAGE_SIZE];
}; };
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
......
...@@ -74,6 +74,7 @@ ...@@ -74,6 +74,7 @@
#define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2)) #define RB_HEAD_OFF_MASK ((1U << 21) - (1U << 2))
#define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3)) #define RB_TAIL_OFF_MASK ((1U << 21) - (1U << 3))
#define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12)) #define RB_TAIL_SIZE_MASK ((1U << 21) - (1U << 12))
#define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + GTT_PAGE_SIZE) #define _RING_CTL_BUF_SIZE(ctl) (((ctl) & RB_TAIL_SIZE_MASK) + \
I915_GTT_PAGE_SIZE)
#endif #endif
...@@ -81,7 +81,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -81,7 +81,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) { while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("Invalid guest context descriptor\n"); gvt_vgpu_err("Invalid guest context descriptor\n");
return -EFAULT; return -EFAULT;
...@@ -90,7 +90,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -90,7 +90,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
dst = kmap(page); dst = kmap(page);
intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst, intel_gvt_hypervisor_read_gpa(vgpu, context_gpa, dst,
GTT_PAGE_SIZE); I915_GTT_PAGE_SIZE);
kunmap(page); kunmap(page);
i++; i++;
} }
...@@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -120,7 +120,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
(void *)shadow_ring_context + (void *)shadow_ring_context +
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page); kunmap(page);
return 0; return 0;
...@@ -635,7 +635,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -635,7 +635,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
while (i < context_page_num) { while (i < context_page_num) {
context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((workload->ctx_desc.lrca + i) << (u32)((workload->ctx_desc.lrca + i) <<
GTT_PAGE_SHIFT)); I915_GTT_PAGE_SHIFT));
if (context_gpa == INTEL_GVT_INVALID_ADDR) { if (context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context descriptor\n"); gvt_vgpu_err("invalid guest context descriptor\n");
return; return;
...@@ -644,7 +644,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -644,7 +644,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i); page = i915_gem_object_get_page(ctx_obj, LRC_HEADER_PAGES + i);
src = kmap(page); src = kmap(page);
intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src, intel_gvt_hypervisor_write_gpa(vgpu, context_gpa, src,
GTT_PAGE_SIZE); I915_GTT_PAGE_SIZE);
kunmap(page); kunmap(page);
i++; i++;
} }
...@@ -669,7 +669,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload) ...@@ -669,7 +669,7 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
(void *)shadow_ring_context + (void *)shadow_ring_context +
sizeof(*shadow_ring_context), sizeof(*shadow_ring_context),
GTT_PAGE_SIZE - sizeof(*shadow_ring_context)); I915_GTT_PAGE_SIZE - sizeof(*shadow_ring_context));
kunmap(page); kunmap(page);
} }
...@@ -1198,7 +1198,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id, ...@@ -1198,7 +1198,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
int ret; int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
(u32)((desc->lrca + 1) << GTT_PAGE_SHIFT)); (u32)((desc->lrca + 1) << I915_GTT_PAGE_SHIFT));
if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) { if (ring_context_gpa == INTEL_GVT_INVALID_ADDR) {
gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca); gvt_vgpu_err("invalid guest context LRCA: %x\n", desc->lrca);
return ERR_PTR(-EINVAL); return ERR_PTR(-EINVAL);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment