Commit ede9d0cf authored by Changbin Du's avatar Changbin Du Committed by Zhenyu Wang

drm/i915/gvt: Rework shadow graphic memory management code

This is a big one and the GVT shadow graphic memory management code is
heavily refined. The new code is more straightforward with less code.

The struct intel_vgpu_mm is restructured to be clearly defined, use
accurate names and some of the original fields are removed which are
really redundant.

Now we only manage ppgtt mm object with mm->ppgtt_mm.lru_list. No need
to mix ppgtt and ggtt together, since one vGPU only has one ggtt object.

v4: Don't invoke ppgtt_free_all_shadow_page before intel_vgpu_destroy_all_ppgtt_mm.
v3: Add GVT_RING_CTX_NR_PDPS to avoid confusing about the PDPs.
v2: Split some changes into small standalone patches.
Signed-off-by: default avatarChangbin Du <changbin.du@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 1f267a57
This diff is collapsed.
...@@ -84,17 +84,12 @@ struct intel_gvt_gtt { ...@@ -84,17 +84,12 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head; struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head; struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head; struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page; struct page *scratch_page;
unsigned long scratch_mfn; unsigned long scratch_mfn;
}; };
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
typedef enum { typedef enum {
GTT_TYPE_INVALID = -1, GTT_TYPE_INVALID = -1,
...@@ -125,26 +120,39 @@ typedef enum { ...@@ -125,26 +120,39 @@ typedef enum {
GTT_TYPE_MAX, GTT_TYPE_MAX,
} intel_gvt_gtt_type_t; } intel_gvt_gtt_type_t;
struct intel_vgpu_mm { enum intel_gvt_mm_type {
int type; INTEL_GVT_MM_GGTT,
bool initialized; INTEL_GVT_MM_PPGTT,
bool shadowed; };
int page_table_entry_type; #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
int page_table_level; struct intel_vgpu_mm {
bool has_shadow_page_table; enum intel_gvt_mm_type type;
u32 pde_base_index; struct intel_vgpu *vgpu;
struct list_head list;
struct kref ref; struct kref ref;
atomic_t pincount; atomic_t pincount;
union {
struct {
intel_gvt_gtt_type_t root_entry_type;
/*
* The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
* addressing, all 4 are used as true PDPs.
*/
u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
bool shadowed;
struct list_head list;
struct list_head lru_list; struct list_head lru_list;
struct intel_vgpu *vgpu; } ppgtt_mm;
struct {
void *virtual_ggtt;
} ggtt_mm;
};
}; };
extern int intel_vgpu_mm_get_entry( extern int intel_vgpu_mm_get_entry(
...@@ -158,32 +166,31 @@ extern int intel_vgpu_mm_set_entry( ...@@ -158,32 +166,31 @@ extern int intel_vgpu_mm_set_entry(
unsigned long index); unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \ #define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) intel_vgpu_mm_get_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index)
#define ggtt_set_guest_entry(mm, e, index) \ #define ggtt_set_guest_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) intel_vgpu_mm_set_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index)
#define ggtt_get_shadow_entry(mm, e, index) \ #define ggtt_get_shadow_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) intel_vgpu_mm_get_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index)
#define ggtt_set_shadow_entry(mm, e, index) \ #define ggtt_set_shadow_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) intel_vgpu_mm_set_entry(mm, mm->ggtt_mm.virtual_ggtt, e, index)
#define ppgtt_get_guest_root_entry(mm, e, index) \ #define ppgtt_get_guest_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) intel_vgpu_mm_get_entry(mm, mm->ppgtt_mm.guest_pdps, e, index)
#define ppgtt_set_guest_root_entry(mm, e, index) \ #define ppgtt_set_guest_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) intel_vgpu_mm_set_entry(mm, mm->ppgtt_mm.guest_pdps, e, index)
#define ppgtt_get_shadow_root_entry(mm, e, index) \ #define ppgtt_get_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) intel_vgpu_mm_get_entry(mm, mm->ppgtt_mm.shadow_pdps, e, index)
#define ppgtt_set_shadow_root_entry(mm, e, index) \ #define ppgtt_set_shadow_root_entry(mm, e, index) \
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) intel_vgpu_mm_set_entry(mm, mm->ppgtt_mm.shadow_pdps, e, index)
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
int mm_type, void *virtual_page_table, int page_table_level, intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
u32 pde_base_index);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref); extern void intel_vgpu_destroy_mm(struct kref *mm_ref);
struct intel_vgpu_guest_page; struct intel_vgpu_guest_page;
...@@ -196,7 +203,7 @@ struct intel_vgpu_scratch_pt { ...@@ -196,7 +203,7 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt { struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm; struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap; unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head; struct list_head ppgtt_mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS); DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_tracked_guest_page; atomic_t n_tracked_guest_page;
...@@ -294,13 +301,12 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, ...@@ -294,13 +301,12 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma); unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry); u64 pdps[]);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level); intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
int page_table_level);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
......
...@@ -1139,20 +1139,27 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1139,20 +1139,27 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{ {
u64 *pdps;
int ret = 0; int ret = 0;
pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
switch (notification) { switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3); ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu,
GTT_TYPE_PPGTT_ROOT_L3_ENTRY,
pdps);
break; break;
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3); ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, pdps);
break; break;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4); ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu,
GTT_TYPE_PPGTT_ROOT_L4_ENTRY,
pdps);
break; break;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4); ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, pdps);
break; break;
case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
......
...@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else else
intel_vgpu_default_mmio_write(vgpu, offset, p_data, intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes); bytes);
} else if (reg_is_gtt(gvt, offset) && } else if (reg_is_gtt(gvt, offset)) {
vgpu->gtt.ggtt_mm->virtual_page_table) {
offset -= gvt->device_info.gtt_start_offset; offset -= gvt->device_info.gtt_start_offset;
pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
if (read) if (read)
memcpy(p_data, pt, bytes); memcpy(p_data, pt, bytes);
else else
......
...@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#undef COPY_REG #undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context, set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table); (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
intel_gvt_hypervisor_read_gpa(vgpu, intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa + workload->ring_context_gpa +
...@@ -1181,27 +1181,30 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -1181,27 +1181,30 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level; intel_gvt_gtt_type_t root_entry_type;
u32 pdp[8]; u64 pdps[GVT_RING_CTX_NR_PDPS];
if (desc->addressing_mode == 1) { /* legacy 32-bit */ switch (desc->addressing_mode) {
page_table_level = 3; case 1: /* legacy 32-bit */
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
page_table_level = 4; break;
} else { case 3: /* legacy 64-bit */
root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
break;
default:
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL; return -EINVAL;
} }
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, pdps);
if (mm) { if (mm) {
intel_gvt_mm_reference(mm); intel_gvt_mm_reference(mm);
} else { } else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT, mm = intel_vgpu_create_ppgtt_mm(workload->vgpu, root_entry_type,
pdp, page_table_level, 0); pdps);
if (IS_ERR(mm)) { if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n"); gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm); return PTR_ERR(mm);
......
...@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index, ...@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
); );
TRACE_EVENT(gma_translate, TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level, TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
unsigned long gma, unsigned long gpa), unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa), TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
TP_STRUCT__entry( TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN) __array(char, buf, MAX_BUF_LEN)
...@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate, ...@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
TP_fast_assign( TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN, snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n", "VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa); id, type, ring_id, root_entry_type, gma, gpa);
), ),
TP_printk("%s", __entry->buf) TP_printk("%s", __entry->buf)
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment