Commit 963976cf authored by Dave Airlie's avatar Dave Airlie

Merge tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel into drm-next

UAPI Changes:

- Query uAPI interface (used for GPU topology information currently)
	* Mesa: https://patchwork.freedesktop.org/series/38795/

Driver Changes:

- Increase PSR2 size for CNL (DK)
- Avoid retraining LSPCON link unnecessarily (Ville)
- Decrease request signaling latency (Chris)
- GuC error capture fix (Daniele)

* tag 'drm-intel-next-2018-03-08' of git://anongit.freedesktop.org/drm/drm-intel: (127 commits)
  drm/i915: Update DRIVER_DATE to 20180308
  drm/i915: add schedule out notification of preempted but completed request
  drm/i915: expose rcs topology through query uAPI
  drm/i915: add query uAPI
  drm/i915: add rcs topology to error state
  drm/i915/debugfs: add rcs topology entry
  drm/i915/debugfs: reuse max slice/subslices already stored in sseu
  drm/i915: store all subslice masks
  drm/i915/guc: work around gcc-4.4.4 union initializer issue
  drm/i915/cnl: Add Wa_2201832410
  drm/i915/icl: Gen11 forcewake support
  drm/i915/icl: Add Indirect Context Offset for Gen11
  drm/i915/icl: Enhanced execution list support
  drm/i915/icl: new context descriptor support
  drm/i915/icl: Correctly initialize the Gen11 engines
  drm/i915: Assert that the request is indeed complete when signaled from irq
  drm/i915: Handle changing enable_fbc parameter at runtime better.
  drm/i915: Track whether the DP link is trained or not
  drm/i915: Nuke intel_dp->channel_eq_status
  drm/i915: Move SST DP link retraining into the ->post_hotplug() hook
  ...
parents 6fa7324a cf07a60f
...@@ -450,5 +450,12 @@ See drivers/gpu/drm/amd/display/TODO for tasks. ...@@ -450,5 +450,12 @@ See drivers/gpu/drm/amd/display/TODO for tasks.
Contact: Harry Wentland, Alex Deucher Contact: Harry Wentland, Alex Deucher
i915
----
- Our early/late pm callbacks could be removed in favour of using
device_link_add to model the dependency between i915 and snd_had. See
https://dri.freedesktop.org/docs/drm/driver-api/device_link.html
Outside DRM Outside DRM
=========== ===========
...@@ -1247,12 +1247,15 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc) ...@@ -1247,12 +1247,15 @@ void drm_crtc_vblank_on(struct drm_crtc *crtc)
EXPORT_SYMBOL(drm_crtc_vblank_on); EXPORT_SYMBOL(drm_crtc_vblank_on);
/** /**
* drm_vblank_restore - estimated vblanks using timestamps and update it. * drm_vblank_restore - estimate missed vblanks and update vblank count.
* @dev: DRM device
* @pipe: CRTC index
* *
* Power manamement features can cause frame counter resets between vblank * Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can then use this function in their * disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank. * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
* *
* This function is the legacy version of drm_crtc_vblank_restore(). * This function is the legacy version of drm_crtc_vblank_restore().
*/ */
...@@ -1293,11 +1296,14 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe) ...@@ -1293,11 +1296,14 @@ void drm_vblank_restore(struct drm_device *dev, unsigned int pipe)
EXPORT_SYMBOL(drm_vblank_restore); EXPORT_SYMBOL(drm_vblank_restore);
/** /**
* drm_crtc_vblank_restore - estimate vblanks using timestamps and update it. * drm_crtc_vblank_restore - estimate missed vblanks and update vblank count.
* @crtc: CRTC in question
*
* Power manamement features can cause frame counter resets between vblank * Power manamement features can cause frame counter resets between vblank
* disable and enable. Drivers can then use this function in their * disable and enable. Drivers can use this function in their
* &drm_crtc_funcs.enable_vblank implementation to estimate the vblanks since * &drm_crtc_funcs.enable_vblank implementation to estimate missed vblanks since
* the last &drm_crtc_funcs.disable_vblank. * the last &drm_crtc_funcs.disable_vblank using timestamps and update the
* vblank counter.
*/ */
void drm_crtc_vblank_restore(struct drm_crtc *crtc) void drm_crtc_vblank_restore(struct drm_crtc *crtc)
{ {
......
...@@ -63,13 +63,14 @@ i915-y += i915_cmd_parser.o \ ...@@ -63,13 +63,14 @@ i915-y += i915_cmd_parser.o \
i915_gem.o \ i915_gem.o \
i915_gem_object.o \ i915_gem_object.o \
i915_gem_render_state.o \ i915_gem_render_state.o \
i915_gem_request.o \
i915_gem_shrinker.o \ i915_gem_shrinker.o \
i915_gem_stolen.o \ i915_gem_stolen.o \
i915_gem_tiling.o \ i915_gem_tiling.o \
i915_gem_timeline.o \ i915_gem_timeline.o \
i915_gem_userptr.o \ i915_gem_userptr.o \
i915_gemfs.o \ i915_gemfs.o \
i915_query.o \
i915_request.o \
i915_trace_points.o \ i915_trace_points.o \
i915_vma.o \ i915_vma.o \
intel_breadcrumbs.o \ intel_breadcrumbs.o \
...@@ -89,7 +90,8 @@ i915-y += intel_uc.o \ ...@@ -89,7 +90,8 @@ i915-y += intel_uc.o \
intel_guc_fw.o \ intel_guc_fw.o \
intel_guc_log.o \ intel_guc_log.o \
intel_guc_submission.o \ intel_guc_submission.o \
intel_huc.o intel_huc.o \
intel_huc_fw.o
# autogenerated null render state # autogenerated null render state
i915-y += intel_renderstate_gen6.o \ i915-y += intel_renderstate_gen6.o \
......
...@@ -3,7 +3,7 @@ GVT_DIR := gvt ...@@ -3,7 +3,7 @@ GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \ execlist.o scheduler.o sched_policy.o mmio_context.o cmd_parser.o debugfs.o \
fb_decoder.o dmabuf.o fb_decoder.o dmabuf.o page_track.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) ccflags-y += -I$(src) -I$(src)/$(GVT_DIR)
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
......
...@@ -459,7 +459,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id) ...@@ -459,7 +459,7 @@ int intel_vgpu_get_dmabuf(struct intel_vgpu *vgpu, unsigned int dmabuf_id)
obj = vgpu_create_gem(dev, dmabuf_obj->info); obj = vgpu_create_gem(dev, dmabuf_obj->info);
if (obj == NULL) { if (obj == NULL) {
gvt_vgpu_err("create gvt gem obj failed:%d\n", vgpu->id); gvt_vgpu_err("create gvt gem obj failed\n");
ret = -ENOMEM; ret = -ENOMEM;
goto out; goto out;
} }
......
This diff is collapsed.
...@@ -39,7 +39,6 @@ ...@@ -39,7 +39,6 @@
struct intel_vgpu_mm; struct intel_vgpu_mm;
#define INTEL_GVT_GTT_HASH_BITS 8
#define INTEL_GVT_INVALID_ADDR (~0UL) #define INTEL_GVT_INVALID_ADDR (~0UL)
struct intel_gvt_gtt_entry { struct intel_gvt_gtt_entry {
...@@ -84,17 +83,12 @@ struct intel_gvt_gtt { ...@@ -84,17 +83,12 @@ struct intel_gvt_gtt {
void (*mm_free_page_table)(struct intel_vgpu_mm *mm); void (*mm_free_page_table)(struct intel_vgpu_mm *mm);
struct list_head oos_page_use_list_head; struct list_head oos_page_use_list_head;
struct list_head oos_page_free_list_head; struct list_head oos_page_free_list_head;
struct list_head mm_lru_list_head; struct list_head ppgtt_mm_lru_list_head;
struct page *scratch_page; struct page *scratch_page;
unsigned long scratch_mfn; unsigned long scratch_mfn;
}; };
enum {
INTEL_GVT_MM_GGTT = 0,
INTEL_GVT_MM_PPGTT,
};
typedef enum { typedef enum {
GTT_TYPE_INVALID = -1, GTT_TYPE_INVALID = -1,
...@@ -125,66 +119,60 @@ typedef enum { ...@@ -125,66 +119,60 @@ typedef enum {
GTT_TYPE_MAX, GTT_TYPE_MAX,
} intel_gvt_gtt_type_t; } intel_gvt_gtt_type_t;
struct intel_vgpu_mm { enum intel_gvt_mm_type {
int type; INTEL_GVT_MM_GGTT,
bool initialized; INTEL_GVT_MM_PPGTT,
bool shadowed; };
int page_table_entry_type; #define GVT_RING_CTX_NR_PDPS GEN8_3LVL_PDPES
u32 page_table_entry_size;
u32 page_table_entry_cnt;
void *virtual_page_table;
void *shadow_page_table;
int page_table_level; struct intel_vgpu_mm {
bool has_shadow_page_table; enum intel_gvt_mm_type type;
u32 pde_base_index; struct intel_vgpu *vgpu;
struct list_head list;
struct kref ref; struct kref ref;
atomic_t pincount; atomic_t pincount;
struct list_head lru_list;
struct intel_vgpu *vgpu;
};
extern int intel_vgpu_mm_get_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
extern int intel_vgpu_mm_set_entry(
struct intel_vgpu_mm *mm,
void *page_table, struct intel_gvt_gtt_entry *e,
unsigned long index);
#define ggtt_get_guest_entry(mm, e, index) \
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index)
#define ggtt_set_guest_entry(mm, e, index) \ union {
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) struct {
intel_gvt_gtt_type_t root_entry_type;
#define ggtt_get_shadow_entry(mm, e, index) \ /*
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index) * The 4 PDPs in ring context. For 48bit addressing,
* only PDP0 is valid and point to PML4. For 32it
* addressing, all 4 are used as true PDPs.
*/
u64 guest_pdps[GVT_RING_CTX_NR_PDPS];
u64 shadow_pdps[GVT_RING_CTX_NR_PDPS];
bool shadowed;
#define ggtt_set_shadow_entry(mm, e, index) \ struct list_head list;
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) struct list_head lru_list;
} ppgtt_mm;
struct {
void *virtual_ggtt;
} ggtt_mm;
};
};
#define ppgtt_get_guest_root_entry(mm, e, index) \ struct intel_vgpu_mm *intel_vgpu_create_ppgtt_mm(struct intel_vgpu *vgpu,
intel_vgpu_mm_get_entry(mm, mm->virtual_page_table, e, index) intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
#define ppgtt_set_guest_root_entry(mm, e, index) \ static inline void intel_vgpu_mm_get(struct intel_vgpu_mm *mm)
intel_vgpu_mm_set_entry(mm, mm->virtual_page_table, e, index) {
kref_get(&mm->ref);
}
#define ppgtt_get_shadow_root_entry(mm, e, index) \ void _intel_vgpu_mm_release(struct kref *mm_ref);
intel_vgpu_mm_get_entry(mm, mm->shadow_page_table, e, index)
#define ppgtt_set_shadow_root_entry(mm, e, index) \ static inline void intel_vgpu_mm_put(struct intel_vgpu_mm *mm)
intel_vgpu_mm_set_entry(mm, mm->shadow_page_table, e, index) {
kref_put(&mm->ref, _intel_vgpu_mm_release);
}
extern struct intel_vgpu_mm *intel_vgpu_create_mm(struct intel_vgpu *vgpu, static inline void intel_vgpu_destroy_mm(struct intel_vgpu_mm *mm)
int mm_type, void *virtual_page_table, int page_table_level, {
u32 pde_base_index); intel_vgpu_mm_put(mm);
extern void intel_vgpu_destroy_mm(struct kref *mm_ref); }
struct intel_vgpu_guest_page; struct intel_vgpu_guest_page;
...@@ -196,10 +184,8 @@ struct intel_vgpu_scratch_pt { ...@@ -196,10 +184,8 @@ struct intel_vgpu_scratch_pt {
struct intel_vgpu_gtt { struct intel_vgpu_gtt {
struct intel_vgpu_mm *ggtt_mm; struct intel_vgpu_mm *ggtt_mm;
unsigned long active_ppgtt_mm_bitmap; unsigned long active_ppgtt_mm_bitmap;
struct list_head mm_list_head; struct list_head ppgtt_mm_list_head;
DECLARE_HASHTABLE(shadow_page_hash_table, INTEL_GVT_GTT_HASH_BITS); struct radix_tree_root spt_tree;
DECLARE_HASHTABLE(tracked_guest_page_hash_table, INTEL_GVT_GTT_HASH_BITS);
atomic_t n_tracked_guest_page;
struct list_head oos_page_list_head; struct list_head oos_page_list_head;
struct list_head post_shadow_list_head; struct list_head post_shadow_list_head;
struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX]; struct intel_vgpu_scratch_pt scratch_pt[GTT_TYPE_MAX];
...@@ -216,32 +202,8 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt); ...@@ -216,32 +202,8 @@ extern void intel_gvt_clean_gtt(struct intel_gvt *gvt);
extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu, extern struct intel_vgpu_mm *intel_gvt_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry); int page_table_level, void *root_entry);
struct intel_vgpu_oos_page;
struct intel_vgpu_shadow_page {
void *vaddr;
struct page *page;
int type;
struct hlist_node node;
unsigned long mfn;
};
struct intel_vgpu_page_track {
struct hlist_node node;
bool tracked;
unsigned long gfn;
int (*handler)(void *, u64, void *, int);
void *data;
};
struct intel_vgpu_guest_page {
struct intel_vgpu_page_track track;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
};
struct intel_vgpu_oos_page { struct intel_vgpu_oos_page {
struct intel_vgpu_guest_page *guest_page; struct intel_vgpu_ppgtt_spt *spt;
struct list_head list; struct list_head list;
struct list_head vm_list; struct list_head vm_list;
int id; int id;
...@@ -250,42 +212,33 @@ struct intel_vgpu_oos_page { ...@@ -250,42 +212,33 @@ struct intel_vgpu_oos_page {
#define GTT_ENTRY_NUM_IN_ONE_PAGE 512 #define GTT_ENTRY_NUM_IN_ONE_PAGE 512
/* Represent a vgpu shadow page table. */
struct intel_vgpu_ppgtt_spt { struct intel_vgpu_ppgtt_spt {
struct intel_vgpu_shadow_page shadow_page;
struct intel_vgpu_guest_page guest_page;
int guest_page_type;
atomic_t refcount; atomic_t refcount;
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct list_head post_shadow_list;
};
int intel_vgpu_init_page_track(struct intel_vgpu *vgpu, struct {
struct intel_vgpu_page_track *t, intel_gvt_gtt_type_t type;
unsigned long gfn, void *vaddr;
int (*handler)(void *gp, u64, void *, int), struct page *page;
void *data); unsigned long mfn;
} shadow_page;
void intel_vgpu_clean_page_track(struct intel_vgpu *vgpu, struct {
struct intel_vgpu_page_track *t); intel_gvt_gtt_type_t type;
unsigned long gfn;
unsigned long write_cnt;
struct intel_vgpu_oos_page *oos_page;
} guest_page;
struct intel_vgpu_page_track *intel_vgpu_find_tracked_page( DECLARE_BITMAP(post_shadow_bitmap, GTT_ENTRY_NUM_IN_ONE_PAGE);
struct intel_vgpu *vgpu, unsigned long gfn); struct list_head post_shadow_list;
};
int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu); int intel_vgpu_sync_oos_pages(struct intel_vgpu *vgpu);
int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu); int intel_vgpu_flush_post_shadow(struct intel_vgpu *vgpu);
static inline void intel_gvt_mm_reference(struct intel_vgpu_mm *mm)
{
kref_get(&mm->ref);
}
static inline void intel_gvt_mm_unreference(struct intel_vgpu_mm *mm)
{
kref_put(&mm->ref, intel_vgpu_destroy_mm);
}
int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm); int intel_vgpu_pin_mm(struct intel_vgpu_mm *mm);
void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm); void intel_vgpu_unpin_mm(struct intel_vgpu_mm *mm);
...@@ -294,21 +247,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm, ...@@ -294,21 +247,17 @@ unsigned long intel_vgpu_gma_to_gpa(struct intel_vgpu_mm *mm,
unsigned long gma); unsigned long gma);
struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *intel_vgpu_find_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level, void *root_entry); u64 pdps[]);
int intel_vgpu_g2v_create_ppgtt_mm(struct intel_vgpu *vgpu, struct intel_vgpu_mm *intel_vgpu_get_ppgtt_mm(struct intel_vgpu *vgpu,
int page_table_level); intel_gvt_gtt_type_t root_entry_type, u64 pdps[]);
int intel_vgpu_g2v_destroy_ppgtt_mm(struct intel_vgpu *vgpu, int intel_vgpu_put_ppgtt_mm(struct intel_vgpu *vgpu, u64 pdps[]);
int page_table_level);
int intel_vgpu_emulate_gtt_mmio_read(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_read(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_emulate_gtt_mmio_write(struct intel_vgpu *vgpu, int intel_vgpu_emulate_ggtt_mmio_write(struct intel_vgpu *vgpu,
unsigned int off, void *p_data, unsigned int bytes); unsigned int off, void *p_data, unsigned int bytes);
int intel_vgpu_write_protect_handler(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
#endif /* _GVT_GTT_H_ */ #endif /* _GVT_GTT_H_ */
...@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = { ...@@ -183,7 +183,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.get_gvt_attrs = intel_get_gvt_attrs, .get_gvt_attrs = intel_get_gvt_attrs,
.vgpu_query_plane = intel_vgpu_query_plane, .vgpu_query_plane = intel_vgpu_query_plane,
.vgpu_get_dmabuf = intel_vgpu_get_dmabuf, .vgpu_get_dmabuf = intel_vgpu_get_dmabuf,
.write_protect_handler = intel_vgpu_write_protect_handler, .write_protect_handler = intel_vgpu_page_track_handler,
}; };
/** /**
......
...@@ -48,6 +48,7 @@ ...@@ -48,6 +48,7 @@
#include "cmd_parser.h" #include "cmd_parser.h"
#include "fb_decoder.h" #include "fb_decoder.h"
#include "dmabuf.h" #include "dmabuf.h"
#include "page_track.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -131,11 +132,9 @@ struct intel_vgpu_opregion { ...@@ -131,11 +132,9 @@ struct intel_vgpu_opregion {
#define vgpu_opregion(vgpu) (&(vgpu->opregion)) #define vgpu_opregion(vgpu) (&(vgpu->opregion))
#define INTEL_GVT_MAX_PORT 5
struct intel_vgpu_display { struct intel_vgpu_display {
struct intel_vgpu_i2c_edid i2c_edid; struct intel_vgpu_i2c_edid i2c_edid;
struct intel_vgpu_port ports[INTEL_GVT_MAX_PORT]; struct intel_vgpu_port ports[I915_MAX_PORTS];
struct intel_vgpu_sbi sbi; struct intel_vgpu_sbi sbi;
}; };
...@@ -190,6 +189,7 @@ struct intel_vgpu { ...@@ -190,6 +189,7 @@ struct intel_vgpu {
struct intel_vgpu_opregion opregion; struct intel_vgpu_opregion opregion;
struct intel_vgpu_display display; struct intel_vgpu_display display;
struct intel_vgpu_submission submission; struct intel_vgpu_submission submission;
struct radix_tree_root page_track_tree;
u32 hws_pga[I915_NUM_ENGINES]; u32 hws_pga[I915_NUM_ENGINES];
struct dentry *debugfs; struct dentry *debugfs;
...@@ -201,8 +201,16 @@ struct intel_vgpu { ...@@ -201,8 +201,16 @@ struct intel_vgpu {
int num_regions; int num_regions;
struct eventfd_ctx *intx_trigger; struct eventfd_ctx *intx_trigger;
struct eventfd_ctx *msi_trigger; struct eventfd_ctx *msi_trigger;
struct rb_root cache;
/*
* Two caches are used to avoid mapping duplicated pages (eg.
* scratch pages). This help to reduce dma setup overhead.
*/
struct rb_root gfn_cache;
struct rb_root dma_addr_cache;
unsigned long nr_cache_entries;
struct mutex cache_lock; struct mutex cache_lock;
struct notifier_block iommu_notifier; struct notifier_block iommu_notifier;
struct notifier_block group_notifier; struct notifier_block group_notifier;
struct kvm *kvm; struct kvm *kvm;
...@@ -308,7 +316,10 @@ struct intel_gvt { ...@@ -308,7 +316,10 @@ struct intel_gvt {
wait_queue_head_t service_thread_wq; wait_queue_head_t service_thread_wq;
unsigned long service_request; unsigned long service_request;
struct engine_mmio *engine_mmio_list; struct {
struct engine_mmio *mmio;
int ctx_mmio_count[I915_NUM_ENGINES];
} engine_mmio_list;
struct dentry *debugfs_root; struct dentry *debugfs_root;
}; };
......
...@@ -188,7 +188,9 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason) ...@@ -188,7 +188,9 @@ void enter_failsafe_mode(struct intel_vgpu *vgpu, int reason)
static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
unsigned int fence_num, void *p_data, unsigned int bytes) unsigned int fence_num, void *p_data, unsigned int bytes)
{ {
if (fence_num >= vgpu_fence_sz(vgpu)) { unsigned int max_fence = vgpu_fence_sz(vgpu);
if (fence_num >= max_fence) {
/* When guest access oob fence regs without access /* When guest access oob fence regs without access
* pv_info first, we treat guest not supporting GVT, * pv_info first, we treat guest not supporting GVT,
...@@ -201,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu, ...@@ -201,7 +203,7 @@ static int sanitize_fence_mmio_access(struct intel_vgpu *vgpu,
if (!vgpu->mmio.disable_warn_untrack) { if (!vgpu->mmio.disable_warn_untrack) {
gvt_vgpu_err("found oob fence register access\n"); gvt_vgpu_err("found oob fence register access\n");
gvt_vgpu_err("total fence %d, access fence %d\n", gvt_vgpu_err("total fence %d, access fence %d\n",
vgpu_fence_sz(vgpu), fence_num); max_fence, fence_num);
} }
memset(p_data, 0, bytes); memset(p_data, 0, bytes);
return -EINVAL; return -EINVAL;
...@@ -1139,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1139,21 +1141,21 @@ static int pvinfo_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
{ {
int ret = 0; intel_gvt_gtt_type_t root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
struct intel_vgpu_mm *mm;
u64 *pdps;
pdps = (u64 *)&vgpu_vreg64_t(vgpu, vgtif_reg(pdp[0]));
switch (notification) { switch (notification) {
case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE: case VGT_G2V_PPGTT_L3_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 3); root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
break;
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 3);
break;
case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE: case VGT_G2V_PPGTT_L4_PAGE_TABLE_CREATE:
ret = intel_vgpu_g2v_create_ppgtt_mm(vgpu, 4); mm = intel_vgpu_get_ppgtt_mm(vgpu, root_entry_type, pdps);
break; return PTR_ERR_OR_ZERO(mm);
case VGT_G2V_PPGTT_L3_PAGE_TABLE_DESTROY:
case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY: case VGT_G2V_PPGTT_L4_PAGE_TABLE_DESTROY:
ret = intel_vgpu_g2v_destroy_ppgtt_mm(vgpu, 4); return intel_vgpu_put_ppgtt_mm(vgpu, pdps);
break;
case VGT_G2V_EXECLIST_CONTEXT_CREATE: case VGT_G2V_EXECLIST_CONTEXT_CREATE:
case VGT_G2V_EXECLIST_CONTEXT_DESTROY: case VGT_G2V_EXECLIST_CONTEXT_DESTROY:
case 1: /* Remove this in guest driver. */ case 1: /* Remove this in guest driver. */
...@@ -1161,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification) ...@@ -1161,7 +1163,7 @@ static int handle_g2v_notification(struct intel_vgpu *vgpu, int notification)
default: default:
gvt_vgpu_err("Invalid PV notification %d\n", notification); gvt_vgpu_err("Invalid PV notification %d\n", notification);
} }
return ret; return 0;
} }
static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready) static int send_display_ready_uevent(struct intel_vgpu *vgpu, int ready)
...@@ -1389,8 +1391,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1389,8 +1391,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset); int ring_id = intel_gvt_render_mmio_to_ring_id(vgpu->gvt, offset);
if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) { if (!intel_gvt_ggtt_validate_range(vgpu, value, I915_GTT_PAGE_SIZE)) {
gvt_vgpu_err("VM(%d) write invalid HWSP address, reg:0x%x, value:0x%x\n", gvt_vgpu_err("write invalid HWSP address, reg:0x%x, value:0x%x\n",
vgpu->id, offset, value); offset, value);
return -EINVAL; return -EINVAL;
} }
/* /*
...@@ -1399,8 +1401,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset, ...@@ -1399,8 +1401,8 @@ static int hws_pga_write(struct intel_vgpu *vgpu, unsigned int offset,
* support BDW, SKL or other platforms with same HWSP registers. * support BDW, SKL or other platforms with same HWSP registers.
*/ */
if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) { if (unlikely(ring_id < 0 || ring_id >= I915_NUM_ENGINES)) {
gvt_vgpu_err("VM(%d) access unknown hardware status page register:0x%x\n", gvt_vgpu_err("access unknown hardware status page register:0x%x\n",
vgpu->id, offset); offset);
return -EINVAL; return -EINVAL;
} }
vgpu->hws_pga[ring_id] = value; vgpu->hws_pga[ring_id] = value;
......
...@@ -44,13 +44,18 @@ struct intel_gvt_mpt { ...@@ -44,13 +44,18 @@ struct intel_gvt_mpt {
void (*detach_vgpu)(unsigned long handle); void (*detach_vgpu)(unsigned long handle);
int (*inject_msi)(unsigned long handle, u32 addr, u16 data); int (*inject_msi)(unsigned long handle, u32 addr, u16 data);
unsigned long (*from_virt_to_mfn)(void *p); unsigned long (*from_virt_to_mfn)(void *p);
int (*set_wp_page)(unsigned long handle, u64 gfn); int (*enable_page_track)(unsigned long handle, u64 gfn);
int (*unset_wp_page)(unsigned long handle, u64 gfn); int (*disable_page_track)(unsigned long handle, u64 gfn);
int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf, int (*read_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len); unsigned long len);
int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf, int (*write_gpa)(unsigned long handle, unsigned long gpa, void *buf,
unsigned long len); unsigned long len);
unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn); unsigned long (*gfn_to_mfn)(unsigned long handle, unsigned long gfn);
int (*dma_map_guest_page)(unsigned long handle, unsigned long gfn,
dma_addr_t *dma_addr);
void (*dma_unmap_guest_page)(unsigned long handle, dma_addr_t dma_addr);
int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn, int (*map_gfn_to_mfn)(unsigned long handle, unsigned long gfn,
unsigned long mfn, unsigned int nr, bool map); unsigned long mfn, unsigned int nr, bool map);
int (*set_trap_area)(unsigned long handle, u64 start, u64 end, int (*set_trap_area)(unsigned long handle, u64 start, u64 end,
......
This diff is collapsed.
...@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -76,10 +76,9 @@ static void failsafe_emulate_mmio_rw(struct intel_vgpu *vgpu, uint64_t pa,
else else
intel_vgpu_default_mmio_write(vgpu, offset, p_data, intel_vgpu_default_mmio_write(vgpu, offset, p_data,
bytes); bytes);
} else if (reg_is_gtt(gvt, offset) && } else if (reg_is_gtt(gvt, offset)) {
vgpu->gtt.ggtt_mm->virtual_page_table) {
offset -= gvt->device_info.gtt_start_offset; offset -= gvt->device_info.gtt_start_offset;
pt = vgpu->gtt.ggtt_mm->virtual_page_table + offset; pt = vgpu->gtt.ggtt_mm->ggtt_mm.virtual_ggtt + offset;
if (read) if (read)
memcpy(p_data, pt, bytes); memcpy(p_data, pt, bytes);
else else
...@@ -125,7 +124,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -125,7 +124,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err; goto err;
ret = intel_vgpu_emulate_gtt_mmio_read(vgpu, offset, ret = intel_vgpu_emulate_ggtt_mmio_read(vgpu, offset,
p_data, bytes); p_data, bytes);
if (ret) if (ret)
goto err; goto err;
...@@ -198,7 +197,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa, ...@@ -198,7 +197,7 @@ int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, uint64_t pa,
if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1))) if (WARN_ON(!reg_is_gtt(gvt, offset + bytes - 1)))
goto err; goto err;
ret = intel_vgpu_emulate_gtt_mmio_write(vgpu, offset, ret = intel_vgpu_emulate_ggtt_mmio_write(vgpu, offset,
p_data, bytes); p_data, bytes);
if (ret) if (ret)
goto err; goto err;
......
...@@ -50,6 +50,8 @@ ...@@ -50,6 +50,8 @@
#define RING_GFX_MODE(base) _MMIO((base) + 0x29c) #define RING_GFX_MODE(base) _MMIO((base) + 0x29c)
#define VF_GUARDBAND _MMIO(0x83a4) #define VF_GUARDBAND _MMIO(0x83a4)
#define GEN9_MOCS_SIZE 64
/* Raw offset is appened to each line for convenience. */ /* Raw offset is appened to each line for convenience. */
static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = { static struct engine_mmio gen8_engine_mmio_list[] __cacheline_aligned = {
{RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */ {RCS, GFX_MODE_GEN7, 0xffff, false}, /* 0x229c */
...@@ -151,8 +153,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = { ...@@ -151,8 +153,8 @@ static struct engine_mmio gen9_engine_mmio_list[] __cacheline_aligned = {
static struct { static struct {
bool initialized; bool initialized;
u32 control_table[I915_NUM_ENGINES][64]; u32 control_table[I915_NUM_ENGINES][GEN9_MOCS_SIZE];
u32 l3cc_table[32]; u32 l3cc_table[GEN9_MOCS_SIZE / 2];
} gen9_render_mocs; } gen9_render_mocs;
static void load_render_mocs(struct drm_i915_private *dev_priv) static void load_render_mocs(struct drm_i915_private *dev_priv)
...@@ -169,7 +171,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -169,7 +171,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) { for (ring_id = 0; ring_id < ARRAY_SIZE(regs); ring_id++) {
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
gen9_render_mocs.control_table[ring_id][i] = gen9_render_mocs.control_table[ring_id][i] =
I915_READ_FW(offset); I915_READ_FW(offset);
offset.reg += 4; offset.reg += 4;
...@@ -177,7 +179,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -177,7 +179,7 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
} }
offset.reg = 0xb020; offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
gen9_render_mocs.l3cc_table[i] = gen9_render_mocs.l3cc_table[i] =
I915_READ_FW(offset); I915_READ_FW(offset);
offset.reg += 4; offset.reg += 4;
...@@ -185,6 +187,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv) ...@@ -185,6 +187,153 @@ static void load_render_mocs(struct drm_i915_private *dev_priv)
gen9_render_mocs.initialized = true; gen9_render_mocs.initialized = true;
} }
static int
restore_context_mmio_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
u32 *cs;
int ret;
struct engine_mmio *mmio;
struct intel_gvt *gvt = vgpu->gvt;
int ring_id = req->engine->id;
int count = gvt->engine_mmio_list.ctx_mmio_count[ring_id];
if (count == 0)
return 0;
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
cs = intel_ring_begin(req, count * 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(count);
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id ||
!mmio->in_context)
continue;
*cs++ = i915_mmio_reg_offset(mmio->reg);
*cs++ = vgpu_vreg_t(vgpu, mmio->reg) |
(mmio->mask << 16);
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, ring_id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = req->engine->emit_flush(req, EMIT_BARRIER);
if (ret)
return ret;
return 0;
}
static int
restore_render_mocs_control_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
unsigned int index;
u32 *cs;
cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE);
for (index = 0; index < GEN9_MOCS_SIZE; index++) {
*cs++ = i915_mmio_reg_offset(GEN9_GFX_MOCS(index));
*cs++ = vgpu_vreg_t(vgpu, GEN9_GFX_MOCS(index));
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, req->engine->id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
static int
restore_render_mocs_l3cc_for_inhibit(struct intel_vgpu *vgpu,
struct i915_request *req)
{
unsigned int index;
u32 *cs;
cs = intel_ring_begin(req, 2 * GEN9_MOCS_SIZE / 2 + 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_LOAD_REGISTER_IMM(GEN9_MOCS_SIZE / 2);
for (index = 0; index < GEN9_MOCS_SIZE / 2; index++) {
*cs++ = i915_mmio_reg_offset(GEN9_LNCFCMOCS(index));
*cs++ = vgpu_vreg_t(vgpu, GEN9_LNCFCMOCS(index));
gvt_dbg_core("add lri reg pair 0x%x:0x%x in inhibit ctx, vgpu:%d, rind_id:%d\n",
*(cs-2), *(cs-1), vgpu->id, req->engine->id);
}
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return 0;
}
/*
* Use lri command to initialize the mmio which is in context state image for
* inhibit context, it contains tracked engine mmio, render_mocs and
* render_mocs_l3cc.
*/
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req)
{
int ret;
u32 *cs;
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_DISABLE;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
ret = restore_context_mmio_for_inhibit(vgpu, req);
if (ret)
goto out;
/* no MOCS register in context except render engine */
if (req->engine->id != RCS)
goto out;
ret = restore_render_mocs_control_for_inhibit(vgpu, req);
if (ret)
goto out;
ret = restore_render_mocs_l3cc_for_inhibit(vgpu, req);
if (ret)
goto out;
out:
cs = intel_ring_begin(req, 2);
if (IS_ERR(cs))
return PTR_ERR(cs);
*cs++ = MI_ARB_ON_OFF | MI_ARB_ENABLE;
*cs++ = MI_NOOP;
intel_ring_advance(req, cs);
return ret;
}
static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id) static void handle_tlb_pending_event(struct intel_vgpu *vgpu, int ring_id)
{ {
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
...@@ -251,11 +400,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -251,11 +400,14 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (WARN_ON(ring_id >= ARRAY_SIZE(regs))) if (WARN_ON(ring_id >= ARRAY_SIZE(regs)))
return; return;
if (IS_KABYLAKE(dev_priv) && ring_id == RCS)
return;
if (!pre && !gen9_render_mocs.initialized) if (!pre && !gen9_render_mocs.initialized)
load_render_mocs(dev_priv); load_render_mocs(dev_priv);
offset.reg = regs[ring_id]; offset.reg = regs[ring_id];
for (i = 0; i < 64; i++) { for (i = 0; i < GEN9_MOCS_SIZE; i++) {
if (pre) if (pre)
old_v = vgpu_vreg_t(pre, offset); old_v = vgpu_vreg_t(pre, offset);
else else
...@@ -273,7 +425,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -273,7 +425,7 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
if (ring_id == RCS) { if (ring_id == RCS) {
l3_offset.reg = 0xb020; l3_offset.reg = 0xb020;
for (i = 0; i < 32; i++) { for (i = 0; i < GEN9_MOCS_SIZE / 2; i++) {
if (pre) if (pre)
old_v = vgpu_vreg_t(pre, l3_offset); old_v = vgpu_vreg_t(pre, l3_offset);
else else
...@@ -293,6 +445,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next, ...@@ -293,6 +445,16 @@ static void switch_mocs(struct intel_vgpu *pre, struct intel_vgpu *next,
#define CTX_CONTEXT_CONTROL_VAL 0x03 #define CTX_CONTEXT_CONTROL_VAL 0x03
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id)
{
u32 *reg_state = ctx->engine[ring_id].lrc_reg_state;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
return inhibit_mask ==
(reg_state[CTX_CONTEXT_CONTROL_VAL] & inhibit_mask);
}
/* Switch ring mmio values (context). */ /* Switch ring mmio values (context). */
static void switch_mmio(struct intel_vgpu *pre, static void switch_mmio(struct intel_vgpu *pre,
struct intel_vgpu *next, struct intel_vgpu *next,
...@@ -300,9 +462,6 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -300,9 +462,6 @@ static void switch_mmio(struct intel_vgpu *pre,
{ {
struct drm_i915_private *dev_priv; struct drm_i915_private *dev_priv;
struct intel_vgpu_submission *s; struct intel_vgpu_submission *s;
u32 *reg_state, ctx_ctrl;
u32 inhibit_mask =
_MASKED_BIT_ENABLE(CTX_CTRL_ENGINE_CTX_RESTORE_INHIBIT);
struct engine_mmio *mmio; struct engine_mmio *mmio;
u32 old_v, new_v; u32 old_v, new_v;
...@@ -310,10 +469,18 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -310,10 +469,18 @@ static void switch_mmio(struct intel_vgpu *pre,
if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv)) if (IS_SKYLAKE(dev_priv) || IS_KABYLAKE(dev_priv))
switch_mocs(pre, next, ring_id); switch_mocs(pre, next, ring_id);
for (mmio = dev_priv->gvt->engine_mmio_list; for (mmio = dev_priv->gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) { i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->ring_id != ring_id) if (mmio->ring_id != ring_id)
continue; continue;
/*
* No need to do save or restore of the mmio which is in context
* state image on kabylake, it's initialized by lri command and
* save or restore with context together.
*/
if (IS_KABYLAKE(dev_priv) && mmio->in_context)
continue;
// save // save
if (pre) { if (pre) {
vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg); vgpu_vreg_t(pre, mmio->reg) = I915_READ_FW(mmio->reg);
...@@ -327,16 +494,13 @@ static void switch_mmio(struct intel_vgpu *pre, ...@@ -327,16 +494,13 @@ static void switch_mmio(struct intel_vgpu *pre,
// restore // restore
if (next) { if (next) {
s = &next->submission; s = &next->submission;
reg_state =
s->shadow_ctx->engine[ring_id].lrc_reg_state;
ctx_ctrl = reg_state[CTX_CONTEXT_CONTROL_VAL];
/* /*
* if it is an inhibit context, load in_context mmio * No need to restore the mmio which is in context state
* into HW by mmio write. If it is not, skip this mmio * image if it's not inhibit context, it will restore
* write. * itself.
*/ */
if (mmio->in_context && if (mmio->in_context &&
(ctx_ctrl & inhibit_mask) != inhibit_mask) !is_inhibit_context(s->shadow_ctx, ring_id))
continue; continue;
if (mmio->mask) if (mmio->mask)
...@@ -405,8 +569,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -405,8 +569,16 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
*/ */
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt) void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
{ {
struct engine_mmio *mmio;
if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv)) if (IS_SKYLAKE(gvt->dev_priv) || IS_KABYLAKE(gvt->dev_priv))
gvt->engine_mmio_list = gen9_engine_mmio_list; gvt->engine_mmio_list.mmio = gen9_engine_mmio_list;
else else
gvt->engine_mmio_list = gen8_engine_mmio_list; gvt->engine_mmio_list.mmio = gen8_engine_mmio_list;
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
if (mmio->in_context)
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
}
} }
...@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre, ...@@ -49,4 +49,9 @@ void intel_gvt_switch_mmio(struct intel_vgpu *pre,
void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt); void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt);
bool is_inhibit_context(struct i915_gem_context *ctx, int ring_id);
int intel_vgpu_restore_inhibit_context(struct intel_vgpu *vgpu,
struct i915_request *req);
#endif #endif
...@@ -154,54 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p) ...@@ -154,54 +154,31 @@ static inline unsigned long intel_gvt_hypervisor_virt_to_mfn(void *p)
} }
/** /**
* intel_gvt_hypervisor_enable - set a guest page to write-protected * intel_gvt_hypervisor_enable_page_track - track a guest page
* @vgpu: a vGPU * @vgpu: a vGPU
* @t: page track data structure * @gfn: the gfn of guest
* *
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
*/ */
static inline int intel_gvt_hypervisor_enable_page_track( static inline int intel_gvt_hypervisor_enable_page_track(
struct intel_vgpu *vgpu, struct intel_vgpu *vgpu, unsigned long gfn)
struct intel_vgpu_page_track *t)
{ {
int ret; return intel_gvt_host.mpt->enable_page_track(vgpu->handle, gfn);
if (t->tracked)
return 0;
ret = intel_gvt_host.mpt->set_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
t->tracked = true;
atomic_inc(&vgpu->gtt.n_tracked_guest_page);
return 0;
} }
/** /**
* intel_gvt_hypervisor_disable_page_track - remove the write-protection of a * intel_gvt_hypervisor_disable_page_track - untrack a guest page
* guest page
* @vgpu: a vGPU * @vgpu: a vGPU
* @t: page track data structure * @gfn: the gfn of guest
* *
* Returns: * Returns:
* Zero on success, negative error code if failed. * Zero on success, negative error code if failed.
*/ */
static inline int intel_gvt_hypervisor_disable_page_track( static inline int intel_gvt_hypervisor_disable_page_track(
struct intel_vgpu *vgpu, struct intel_vgpu *vgpu, unsigned long gfn)
struct intel_vgpu_page_track *t)
{ {
int ret; return intel_gvt_host.mpt->disable_page_track(vgpu->handle, gfn);
if (!t->tracked)
return 0;
ret = intel_gvt_host.mpt->unset_wp_page(vgpu->handle, t->gfn);
if (ret)
return ret;
t->tracked = false;
atomic_dec(&vgpu->gtt.n_tracked_guest_page);
return 0;
} }
/** /**
...@@ -250,6 +227,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn( ...@@ -250,6 +227,34 @@ static inline unsigned long intel_gvt_hypervisor_gfn_to_mfn(
return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn); return intel_gvt_host.mpt->gfn_to_mfn(vgpu->handle, gfn);
} }
/**
* intel_gvt_hypervisor_dma_map_guest_page - setup dma map for guest page
* @vgpu: a vGPU
* @gpfn: guest pfn
* @dma_addr: retrieve allocated dma addr
*
* Returns:
* 0 on success, negative error code if failed.
*/
static inline int intel_gvt_hypervisor_dma_map_guest_page(
struct intel_vgpu *vgpu, unsigned long gfn,
dma_addr_t *dma_addr)
{
return intel_gvt_host.mpt->dma_map_guest_page(vgpu->handle, gfn,
dma_addr);
}
/**
* intel_gvt_hypervisor_dma_unmap_guest_page - cancel dma map for guest page
* @vgpu: a vGPU
* @dma_addr: the mapped dma addr
*/
static inline void intel_gvt_hypervisor_dma_unmap_guest_page(
struct intel_vgpu *vgpu, dma_addr_t dma_addr)
{
intel_gvt_host.mpt->dma_unmap_guest_page(vgpu->handle, dma_addr);
}
/** /**
* intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN * intel_gvt_hypervisor_map_gfn_to_mfn - map a GFN region to MFN
* @vgpu: a vGPU * @vgpu: a vGPU
......
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "i915_drv.h"
#include "gvt.h"
/**
* intel_vgpu_find_page_track - find page track rcord of guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* A pointer to struct intel_vgpu_page_track if found, else NULL returned.
*/
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn)
{
return radix_tree_lookup(&vgpu->page_track_tree, gfn);
}
/**
* intel_vgpu_register_page_track - register a guest page to be tacked
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu, unsigned long gfn,
gvt_page_track_handler_t handler, void *priv)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (track)
return -EEXIST;
track = kzalloc(sizeof(*track), GFP_KERNEL);
if (!track)
return -ENOMEM;
track->handler = handler;
track->priv_data = priv;
ret = radix_tree_insert(&vgpu->page_track_tree, gfn, track);
if (ret) {
kfree(track);
return ret;
}
return 0;
}
/**
* intel_vgpu_unregister_page_track - unregister the tracked guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
*/
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn)
{
struct intel_vgpu_page_track *track;
track = radix_tree_delete(&vgpu->page_track_tree, gfn);
if (track) {
if (track->tracked)
intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
kfree(track);
}
}
/**
* intel_vgpu_enable_page_track - set write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (track->tracked)
return 0;
ret = intel_gvt_hypervisor_enable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = true;
return 0;
}
/**
* intel_vgpu_enable_page_track - cancel write-protection on guest page
* @vgpu: a vGPU
* @gfn: the gfn of guest page
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn)
{
struct intel_vgpu_page_track *track;
int ret;
track = intel_vgpu_find_page_track(vgpu, gfn);
if (!track)
return -ENXIO;
if (!track->tracked)
return 0;
ret = intel_gvt_hypervisor_disable_page_track(vgpu, gfn);
if (ret)
return ret;
track->tracked = false;
return 0;
}
/**
* intel_vgpu_page_track_handler - called when write to write-protected page
* @vgpu: a vGPU
* @gpa: the gpa of this write
* @data: the writed data
* @bytes: the length of this write
*
* Returns:
* zero on success, negative error code if failed.
*/
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes)
{
struct intel_gvt *gvt = vgpu->gvt;
struct intel_vgpu_page_track *page_track;
int ret = 0;
mutex_lock(&gvt->lock);
page_track = intel_vgpu_find_page_track(vgpu, gpa >> PAGE_SHIFT);
if (!page_track) {
ret = -ENXIO;
goto out;
}
if (unlikely(vgpu->failsafe)) {
/* Remove write protection to prevent furture traps. */
intel_vgpu_disable_page_track(vgpu, gpa >> PAGE_SHIFT);
} else {
ret = page_track->handler(page_track, gpa, data, bytes);
if (ret)
gvt_err("guest page write error, gpa %llx\n", gpa);
}
out:
mutex_unlock(&gvt->lock);
return ret;
}
/*
* Copyright(c) 2011-2017 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
*/
#ifndef _GVT_PAGE_TRACK_H_
#define _GVT_PAGE_TRACK_H_
struct intel_vgpu_page_track;
typedef int (*gvt_page_track_handler_t)(
struct intel_vgpu_page_track *page_track,
u64 gpa, void *data, int bytes);
/* Track record for a write-protected guest page. */
struct intel_vgpu_page_track {
gvt_page_track_handler_t handler;
bool tracked;
void *priv_data;
};
struct intel_vgpu_page_track *intel_vgpu_find_page_track(
struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_register_page_track(struct intel_vgpu *vgpu,
unsigned long gfn, gvt_page_track_handler_t handler,
void *priv);
void intel_vgpu_unregister_page_track(struct intel_vgpu *vgpu,
unsigned long gfn);
int intel_vgpu_enable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_disable_page_track(struct intel_vgpu *vgpu, unsigned long gfn);
int intel_vgpu_page_track_handler(struct intel_vgpu *vgpu, u64 gpa,
void *data, unsigned int bytes);
#endif
...@@ -103,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data) ...@@ -103,9 +103,8 @@ static void gvt_balance_timeslice(struct gvt_sched_data *sched_data)
list_for_each(pos, &sched_data->lru_runq_head) { list_for_each(pos, &sched_data->lru_runq_head) {
vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list); vgpu_data = container_of(pos, struct vgpu_sched_data, lru_list);
fair_timeslice = ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS) * fair_timeslice = ktime_divns(ms_to_ktime(GVT_TS_BALANCE_PERIOD_MS),
vgpu_data->sched_ctl.weight / total_weight) * vgpu_data->sched_ctl.weight;
total_weight;
vgpu_data->allocated_ts = fair_timeslice; vgpu_data->allocated_ts = fair_timeslice;
vgpu_data->left_ts = vgpu_data->allocated_ts; vgpu_data->left_ts = vgpu_data->allocated_ts;
......
...@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -113,7 +113,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
#undef COPY_REG #undef COPY_REG
set_context_pdp_root_pointer(shadow_ring_context, set_context_pdp_root_pointer(shadow_ring_context,
workload->shadow_mm->shadow_page_table); (void *)workload->shadow_mm->ppgtt_mm.shadow_pdps);
intel_gvt_hypervisor_read_gpa(vgpu, intel_gvt_hypervisor_read_gpa(vgpu,
workload->ring_context_gpa + workload->ring_context_gpa +
...@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload) ...@@ -126,7 +126,7 @@ static int populate_shadow_context(struct intel_vgpu_workload *workload)
return 0; return 0;
} }
static inline bool is_gvt_request(struct drm_i915_gem_request *req) static inline bool is_gvt_request(struct i915_request *req)
{ {
return i915_gem_context_force_single_submission(req->ctx); return i915_gem_context_force_single_submission(req->ctx);
} }
...@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id) ...@@ -148,7 +148,7 @@ static void save_ring_hw_state(struct intel_vgpu *vgpu, int ring_id)
static int shadow_context_status_change(struct notifier_block *nb, static int shadow_context_status_change(struct notifier_block *nb,
unsigned long action, void *data) unsigned long action, void *data)
{ {
struct drm_i915_gem_request *req = (struct drm_i915_gem_request *)data; struct i915_request *req = data;
struct intel_gvt *gvt = container_of(nb, struct intel_gvt, struct intel_gvt *gvt = container_of(nb, struct intel_gvt,
shadow_ctx_notifier_block[req->engine->id]); shadow_ctx_notifier_block[req->engine->id]);
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
...@@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload) ...@@ -225,6 +225,11 @@ static int copy_workload_to_ring_buffer(struct intel_vgpu_workload *workload)
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
void *shadow_ring_buffer_va; void *shadow_ring_buffer_va;
u32 *cs; u32 *cs;
struct i915_request *req = workload->req;
if (IS_KABYLAKE(req->i915) &&
is_inhibit_context(req->ctx, req->engine->id))
intel_vgpu_restore_inhibit_context(vgpu, req);
/* allocate shadow ring buffer */ /* allocate shadow ring buffer */
cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32)); cs = intel_ring_begin(workload->req, workload->rb_len / sizeof(u32));
...@@ -333,13 +338,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) ...@@ -333,13 +338,13 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
int ring_id = workload->ring_id; int ring_id = workload->ring_id;
struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv; struct drm_i915_private *dev_priv = workload->vgpu->gvt->dev_priv;
struct intel_engine_cs *engine = dev_priv->engine[ring_id]; struct intel_engine_cs *engine = dev_priv->engine[ring_id];
struct drm_i915_gem_request *rq; struct i915_request *rq;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
struct intel_vgpu_submission *s = &vgpu->submission; struct intel_vgpu_submission *s = &vgpu->submission;
struct i915_gem_context *shadow_ctx = s->shadow_ctx; struct i915_gem_context *shadow_ctx = s->shadow_ctx;
int ret; int ret;
rq = i915_gem_request_alloc(dev_priv->engine[ring_id], shadow_ctx); rq = i915_request_alloc(dev_priv->engine[ring_id], shadow_ctx);
if (IS_ERR(rq)) { if (IS_ERR(rq)) {
gvt_vgpu_err("fail to allocate gem request\n"); gvt_vgpu_err("fail to allocate gem request\n");
ret = PTR_ERR(rq); ret = PTR_ERR(rq);
...@@ -348,7 +353,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload) ...@@ -348,7 +353,7 @@ static int intel_gvt_generate_request(struct intel_vgpu_workload *workload)
gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq); gvt_dbg_sched("ring id %d get i915 gem request %p\n", ring_id, rq);
workload->req = i915_gem_request_get(rq); workload->req = i915_request_get(rq);
ret = copy_workload_to_ring_buffer(workload); ret = copy_workload_to_ring_buffer(workload);
if (ret) if (ret)
goto err_unpin; goto err_unpin;
...@@ -582,7 +587,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -582,7 +587,7 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
if (!IS_ERR_OR_NULL(workload->req)) { if (!IS_ERR_OR_NULL(workload->req)) {
gvt_dbg_sched("ring id %d submit workload to i915 %p\n", gvt_dbg_sched("ring id %d submit workload to i915 %p\n",
ring_id, workload->req); ring_id, workload->req);
i915_add_request(workload->req); i915_request_add(workload->req);
workload->dispatched = true; workload->dispatched = true;
} }
...@@ -769,7 +774,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -769,7 +774,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
workload->status = 0; workload->status = 0;
} }
i915_gem_request_put(fetch_and_zero(&workload->req)); i915_request_put(fetch_and_zero(&workload->req));
if (!workload->status && !(vgpu->resetting_eng & if (!workload->status && !(vgpu->resetting_eng &
ENGINE_MASK(ring_id))) { ENGINE_MASK(ring_id))) {
...@@ -886,7 +891,7 @@ static int workload_thread(void *priv) ...@@ -886,7 +891,7 @@ static int workload_thread(void *priv)
gvt_dbg_sched("ring id %d wait workload %p\n", gvt_dbg_sched("ring id %d wait workload %p\n",
workload->ring_id, workload); workload->ring_id, workload);
i915_wait_request(workload->req, 0, MAX_SCHEDULE_TIMEOUT); i915_request_wait(workload->req, 0, MAX_SCHEDULE_TIMEOUT);
complete: complete:
gvt_dbg_sched("will complete workload %p, status: %d\n", gvt_dbg_sched("will complete workload %p, status: %d\n",
...@@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload) ...@@ -1132,7 +1137,7 @@ void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload)
struct intel_vgpu_submission *s = &workload->vgpu->submission; struct intel_vgpu_submission *s = &workload->vgpu->submission;
if (workload->shadow_mm) if (workload->shadow_mm)
intel_gvt_mm_unreference(workload->shadow_mm); intel_vgpu_mm_put(workload->shadow_mm);
kmem_cache_free(s->workloads, workload); kmem_cache_free(s->workloads, workload);
} }
...@@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload) ...@@ -1181,32 +1186,27 @@ static int prepare_mm(struct intel_vgpu_workload *workload)
struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc; struct execlist_ctx_descriptor_format *desc = &workload->ctx_desc;
struct intel_vgpu_mm *mm; struct intel_vgpu_mm *mm;
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
int page_table_level; intel_gvt_gtt_type_t root_entry_type;
u32 pdp[8]; u64 pdps[GVT_RING_CTX_NR_PDPS];
if (desc->addressing_mode == 1) { /* legacy 32-bit */ switch (desc->addressing_mode) {
page_table_level = 3; case 1: /* legacy 32-bit */
} else if (desc->addressing_mode == 3) { /* legacy 64 bit */ root_entry_type = GTT_TYPE_PPGTT_ROOT_L3_ENTRY;
page_table_level = 4; break;
} else { case 3: /* legacy 64-bit */
root_entry_type = GTT_TYPE_PPGTT_ROOT_L4_ENTRY;
break;
default:
gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n"); gvt_vgpu_err("Advanced Context mode(SVM) is not supported!\n");
return -EINVAL; return -EINVAL;
} }
read_guest_pdps(workload->vgpu, workload->ring_context_gpa, pdp); read_guest_pdps(workload->vgpu, workload->ring_context_gpa, (void *)pdps);
mm = intel_vgpu_find_ppgtt_mm(workload->vgpu, page_table_level, pdp); mm = intel_vgpu_get_ppgtt_mm(workload->vgpu, root_entry_type, pdps);
if (mm) { if (IS_ERR(mm))
intel_gvt_mm_reference(mm);
} else {
mm = intel_vgpu_create_mm(workload->vgpu, INTEL_GVT_MM_PPGTT,
pdp, page_table_level, 0);
if (IS_ERR(mm)) {
gvt_vgpu_err("fail to create mm object.\n");
return PTR_ERR(mm); return PTR_ERR(mm);
}
}
workload->shadow_mm = mm; workload->shadow_mm = mm;
return 0; return 0;
} }
......
...@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx { ...@@ -80,7 +80,7 @@ struct intel_shadow_wa_ctx {
struct intel_vgpu_workload { struct intel_vgpu_workload {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ring_id; int ring_id;
struct drm_i915_gem_request *req; struct i915_request *req;
/* if this workload has been dispatched to i915? */ /* if this workload has been dispatched to i915? */
bool dispatched; bool dispatched;
bool shadowed; bool shadowed;
......
...@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index, ...@@ -113,10 +113,10 @@ TRACE_EVENT(gma_index,
); );
TRACE_EVENT(gma_translate, TRACE_EVENT(gma_translate,
TP_PROTO(int id, char *type, int ring_id, int pt_level, TP_PROTO(int id, char *type, int ring_id, int root_entry_type,
unsigned long gma, unsigned long gpa), unsigned long gma, unsigned long gpa),
TP_ARGS(id, type, ring_id, pt_level, gma, gpa), TP_ARGS(id, type, ring_id, root_entry_type, gma, gpa),
TP_STRUCT__entry( TP_STRUCT__entry(
__array(char, buf, MAX_BUF_LEN) __array(char, buf, MAX_BUF_LEN)
...@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate, ...@@ -124,8 +124,8 @@ TRACE_EVENT(gma_translate,
TP_fast_assign( TP_fast_assign(
snprintf(__entry->buf, MAX_BUF_LEN, snprintf(__entry->buf, MAX_BUF_LEN,
"VM%d %s ring %d pt_level %d gma 0x%lx -> gpa 0x%lx\n", "VM%d %s ring %d root_entry_type %d gma 0x%lx -> gpa 0x%lx\n",
id, type, ring_id, pt_level, gma, gpa); id, type, ring_id, root_entry_type, gma, gpa);
), ),
TP_printk("%s", __entry->buf) TP_printk("%s", __entry->buf)
...@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change, ...@@ -168,7 +168,7 @@ TRACE_EVENT(spt_change,
TP_printk("%s", __entry->buf) TP_printk("%s", __entry->buf)
); );
TRACE_EVENT(gpt_change, TRACE_EVENT(spt_guest_change,
TP_PROTO(int id, const char *tag, void *spt, int type, u64 v, TP_PROTO(int id, const char *tag, void *spt, int type, u64 v,
unsigned long index), unsigned long index),
......
...@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, ...@@ -354,6 +354,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt; vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight; vgpu->sched_ctl.weight = param->weight;
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head); INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr); idr_init(&vgpu->object_idr);
intel_vgpu_init_cfg_space(vgpu, param->primary); intel_vgpu_init_cfg_space(vgpu, param->primary);
......
This diff is collapsed.
...@@ -49,6 +49,7 @@ ...@@ -49,6 +49,7 @@
#include "i915_drv.h" #include "i915_drv.h"
#include "i915_trace.h" #include "i915_trace.h"
#include "i915_pmu.h" #include "i915_pmu.h"
#include "i915_query.h"
#include "i915_vgpu.h" #include "i915_vgpu.h"
#include "intel_drv.h" #include "intel_drv.h"
#include "intel_uc.h" #include "intel_uc.h"
...@@ -428,7 +429,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data, ...@@ -428,7 +429,7 @@ static int i915_getparam_ioctl(struct drm_device *dev, void *data,
return -ENODEV; return -ENODEV;
break; break;
case I915_PARAM_SUBSLICE_MASK: case I915_PARAM_SUBSLICE_MASK:
value = INTEL_INFO(dev_priv)->sseu.subslice_mask; value = INTEL_INFO(dev_priv)->sseu.subslice_mask[0];
if (!value) if (!value)
return -ENODEV; return -ENODEV;
break; break;
...@@ -808,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv) ...@@ -808,7 +809,7 @@ static int i915_workqueues_init(struct drm_i915_private *dev_priv)
/* /*
* The i915 workqueue is primarily used for batched retirement of * The i915 workqueue is primarily used for batched retirement of
* requests (and thus managing bo) once the task has been completed * requests (and thus managing bo) once the task has been completed
* by the GPU. i915_gem_retire_requests() is called directly when we * by the GPU. i915_retire_requests() is called directly when we
* need high-priority retirement, such as waiting for an explicit * need high-priority retirement, such as waiting for an explicit
* bo. * bo.
* *
...@@ -1992,7 +1993,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags) ...@@ -1992,7 +1993,7 @@ void i915_reset(struct drm_i915_private *i915, unsigned int flags)
add_taint(TAINT_WARN, LOCKDEP_STILL_OK); add_taint(TAINT_WARN, LOCKDEP_STILL_OK);
error: error:
i915_gem_set_wedged(i915); i915_gem_set_wedged(i915);
i915_gem_retire_requests(i915); i915_retire_requests(i915);
intel_gpu_reset(i915, ALL_ENGINES); intel_gpu_reset(i915, ALL_ENGINES);
goto finish; goto finish;
} }
...@@ -2019,7 +2020,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv, ...@@ -2019,7 +2020,7 @@ static inline int intel_gt_reset_engine(struct drm_i915_private *dev_priv,
int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags) int i915_reset_engine(struct intel_engine_cs *engine, unsigned int flags)
{ {
struct i915_gpu_error *error = &engine->i915->gpu_error; struct i915_gpu_error *error = &engine->i915->gpu_error;
struct drm_i915_gem_request *active_request; struct i915_request *active_request;
int ret; int ret;
GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags)); GEM_BUG_ON(!test_bit(I915_RESET_ENGINE + engine->id, &error->flags));
...@@ -2575,7 +2576,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2575,7 +2576,7 @@ static int intel_runtime_suspend(struct device *kdev)
*/ */
i915_gem_runtime_suspend(dev_priv); i915_gem_runtime_suspend(dev_priv);
intel_guc_suspend(dev_priv); intel_uc_suspend(dev_priv);
intel_runtime_pm_disable_interrupts(dev_priv); intel_runtime_pm_disable_interrupts(dev_priv);
...@@ -2597,7 +2598,7 @@ static int intel_runtime_suspend(struct device *kdev) ...@@ -2597,7 +2598,7 @@ static int intel_runtime_suspend(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv); intel_uc_resume(dev_priv);
i915_gem_init_swizzling(dev_priv); i915_gem_init_swizzling(dev_priv);
i915_gem_restore_fences(dev_priv); i915_gem_restore_fences(dev_priv);
...@@ -2683,7 +2684,7 @@ static int intel_runtime_resume(struct device *kdev) ...@@ -2683,7 +2684,7 @@ static int intel_runtime_resume(struct device *kdev)
intel_runtime_pm_enable_interrupts(dev_priv); intel_runtime_pm_enable_interrupts(dev_priv);
intel_guc_resume(dev_priv); intel_uc_resume(dev_priv);
/* /*
* No point of rolling back things in case of an error, as the best * No point of rolling back things in case of an error, as the best
...@@ -2832,6 +2833,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = { ...@@ -2832,6 +2833,7 @@ static const struct drm_ioctl_desc i915_ioctls[] = {
DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_OPEN, i915_perf_open_ioctl, DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_ADD_CONFIG, i915_perf_add_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(I915_PERF_REMOVE_CONFIG, i915_perf_remove_config_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(I915_QUERY, i915_query_ioctl, DRM_UNLOCKED|DRM_RENDER_ALLOW),
}; };
static struct drm_driver driver = { static struct drm_driver driver = {
......
...@@ -71,9 +71,9 @@ ...@@ -71,9 +71,9 @@
#include "i915_gem_fence_reg.h" #include "i915_gem_fence_reg.h"
#include "i915_gem_object.h" #include "i915_gem_object.h"
#include "i915_gem_gtt.h" #include "i915_gem_gtt.h"
#include "i915_gem_request.h"
#include "i915_gem_timeline.h" #include "i915_gem_timeline.h"
#include "i915_request.h"
#include "i915_vma.h" #include "i915_vma.h"
#include "intel_gvt.h" #include "intel_gvt.h"
...@@ -83,8 +83,8 @@ ...@@ -83,8 +83,8 @@
#define DRIVER_NAME "i915" #define DRIVER_NAME "i915"
#define DRIVER_DESC "Intel Graphics" #define DRIVER_DESC "Intel Graphics"
#define DRIVER_DATE "20180221" #define DRIVER_DATE "20180308"
#define DRIVER_TIMESTAMP 1519219289 #define DRIVER_TIMESTAMP 1520513379
/* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and /* Use I915_STATE_WARN(x) and I915_STATE_WARN_ON() (rather than WARN() and
* WARN_ON()) for hw state sanity checks to check for unexpected conditions * WARN_ON()) for hw state sanity checks to check for unexpected conditions
...@@ -1231,7 +1231,7 @@ struct i915_gpu_error { ...@@ -1231,7 +1231,7 @@ struct i915_gpu_error {
* *
* #I915_WEDGED - If reset fails and we can no longer use the GPU, * #I915_WEDGED - If reset fails and we can no longer use the GPU,
* we set the #I915_WEDGED bit. Prior to command submission, e.g. * we set the #I915_WEDGED bit. Prior to command submission, e.g.
* i915_gem_request_alloc(), this bit is checked and the sequence * i915_request_alloc(), this bit is checked and the sequence
* aborted (with -EIO reported to userspace) if set. * aborted (with -EIO reported to userspace) if set.
*/ */
unsigned long flags; unsigned long flags;
...@@ -2103,6 +2103,7 @@ struct drm_i915_private { ...@@ -2103,6 +2103,7 @@ struct drm_i915_private {
*/ */
struct ida hw_ida; struct ida hw_ida;
#define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */ #define MAX_CONTEXT_HW_ID (1<<21) /* exclusive */
#define GEN11_MAX_CONTEXT_HW_ID (1<<11) /* exclusive */
} contexts; } contexts;
u32 fdi_rx_config; u32 fdi_rx_config;
...@@ -2746,6 +2747,9 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2746,6 +2747,9 @@ intel_info(const struct drm_i915_private *dev_priv)
#define BLT_RING ENGINE_MASK(BCS) #define BLT_RING ENGINE_MASK(BCS)
#define VEBOX_RING ENGINE_MASK(VECS) #define VEBOX_RING ENGINE_MASK(VECS)
#define BSD2_RING ENGINE_MASK(VCS2) #define BSD2_RING ENGINE_MASK(VCS2)
#define BSD3_RING ENGINE_MASK(VCS3)
#define BSD4_RING ENGINE_MASK(VCS4)
#define VEBOX2_RING ENGINE_MASK(VECS2)
#define ALL_ENGINES (~0) #define ALL_ENGINES (~0)
#define HAS_ENGINE(dev_priv, id) \ #define HAS_ENGINE(dev_priv, id) \
...@@ -2768,6 +2772,8 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2768,6 +2772,8 @@ intel_info(const struct drm_i915_private *dev_priv)
#define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \ #define HAS_LOGICAL_RING_CONTEXTS(dev_priv) \
((dev_priv)->info.has_logical_ring_contexts) ((dev_priv)->info.has_logical_ring_contexts)
#define HAS_LOGICAL_RING_ELSQ(dev_priv) \
((dev_priv)->info.has_logical_ring_elsq)
#define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \ #define HAS_LOGICAL_RING_PREEMPTION(dev_priv) \
((dev_priv)->info.has_logical_ring_preemption) ((dev_priv)->info.has_logical_ring_preemption)
...@@ -2788,9 +2794,10 @@ intel_info(const struct drm_i915_private *dev_priv) ...@@ -2788,9 +2794,10 @@ intel_info(const struct drm_i915_private *dev_priv)
/* Early gen2 have a totally busted CS tlb and require pinned batches. */ /* Early gen2 have a totally busted CS tlb and require pinned batches. */
#define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv)) #define HAS_BROKEN_CS_TLB(dev_priv) (IS_I830(dev_priv) || IS_I845G(dev_priv))
/* WaRsDisableCoarsePowerGating:skl,bxt */ /* WaRsDisableCoarsePowerGating:skl,cnl */
#define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \ #define NEEDS_WaRsDisableCoarsePowerGating(dev_priv) \
(IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv)) (IS_CANNONLAKE(dev_priv) || \
IS_SKL_GT3(dev_priv) || IS_SKL_GT4(dev_priv))
/* /*
* dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts * dp aux and gmbus irq on gen4 seems to be able to generate legacy interrupts
...@@ -3329,7 +3336,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj) ...@@ -3329,7 +3336,7 @@ i915_gem_obj_finish_shmem_access(struct drm_i915_gem_object *obj)
int __must_check i915_mutex_lock_interruptible(struct drm_device *dev); int __must_check i915_mutex_lock_interruptible(struct drm_device *dev);
void i915_vma_move_to_active(struct i915_vma *vma, void i915_vma_move_to_active(struct i915_vma *vma,
struct drm_i915_gem_request *req, struct i915_request *rq,
unsigned int flags); unsigned int flags);
int i915_gem_dumb_create(struct drm_file *file_priv, int i915_gem_dumb_create(struct drm_file *file_priv,
struct drm_device *dev, struct drm_device *dev,
...@@ -3344,11 +3351,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old, ...@@ -3344,11 +3351,9 @@ void i915_gem_track_fb(struct drm_i915_gem_object *old,
int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno); int __must_check i915_gem_set_global_seqno(struct drm_device *dev, u32 seqno);
struct drm_i915_gem_request * struct i915_request *
i915_gem_find_active_request(struct intel_engine_cs *engine); i915_gem_find_active_request(struct intel_engine_cs *engine);
void i915_gem_retire_requests(struct drm_i915_private *dev_priv);
static inline bool i915_reset_backoff(struct i915_gpu_error *error) static inline bool i915_reset_backoff(struct i915_gpu_error *error)
{ {
return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags)); return unlikely(test_bit(I915_RESET_BACKOFF, &error->flags));
...@@ -3380,7 +3385,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error, ...@@ -3380,7 +3385,7 @@ static inline u32 i915_reset_engine_count(struct i915_gpu_error *error,
return READ_ONCE(error->reset_engine_count[engine->id]); return READ_ONCE(error->reset_engine_count[engine->id]);
} }
struct drm_i915_gem_request * struct i915_request *
i915_gem_reset_prepare_engine(struct intel_engine_cs *engine); i915_gem_reset_prepare_engine(struct intel_engine_cs *engine);
int i915_gem_reset_prepare(struct drm_i915_private *dev_priv); int i915_gem_reset_prepare(struct drm_i915_private *dev_priv);
void i915_gem_reset(struct drm_i915_private *dev_priv); void i915_gem_reset(struct drm_i915_private *dev_priv);
...@@ -3389,7 +3394,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv); ...@@ -3389,7 +3394,7 @@ void i915_gem_reset_finish(struct drm_i915_private *dev_priv);
void i915_gem_set_wedged(struct drm_i915_private *dev_priv); void i915_gem_set_wedged(struct drm_i915_private *dev_priv);
bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv); bool i915_gem_unset_wedged(struct drm_i915_private *dev_priv);
void i915_gem_reset_engine(struct intel_engine_cs *engine, void i915_gem_reset_engine(struct intel_engine_cs *engine,
struct drm_i915_gem_request *request); struct i915_request *request);
void i915_gem_init_mmio(struct drm_i915_private *i915); void i915_gem_init_mmio(struct drm_i915_private *i915);
int __must_check i915_gem_init(struct drm_i915_private *dev_priv); int __must_check i915_gem_init(struct drm_i915_private *dev_priv);
...@@ -4008,9 +4013,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) ...@@ -4008,9 +4013,9 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms)
} }
static inline bool static inline bool
__i915_request_irq_complete(const struct drm_i915_gem_request *req) __i915_request_irq_complete(const struct i915_request *rq)
{ {
struct intel_engine_cs *engine = req->engine; struct intel_engine_cs *engine = rq->engine;
u32 seqno; u32 seqno;
/* Note that the engine may have wrapped around the seqno, and /* Note that the engine may have wrapped around the seqno, and
...@@ -4019,7 +4024,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req) ...@@ -4019,7 +4024,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* this by kicking all the waiters before resetting the seqno * this by kicking all the waiters before resetting the seqno
* in hardware, and also signal the fence. * in hardware, and also signal the fence.
*/ */
if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &req->fence.flags)) if (test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &rq->fence.flags))
return true; return true;
/* The request was dequeued before we were awoken. We check after /* The request was dequeued before we were awoken. We check after
...@@ -4028,14 +4033,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req) ...@@ -4028,14 +4033,14 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
* the request execution are sufficient to ensure that a check * the request execution are sufficient to ensure that a check
* after reading the value from hw matches this request. * after reading the value from hw matches this request.
*/ */
seqno = i915_gem_request_global_seqno(req); seqno = i915_request_global_seqno(rq);
if (!seqno) if (!seqno)
return false; return false;
/* Before we do the heavier coherent read of the seqno, /* Before we do the heavier coherent read of the seqno,
* check the value (hopefully) in the CPU cacheline. * check the value (hopefully) in the CPU cacheline.
*/ */
if (__i915_gem_request_completed(req, seqno)) if (__i915_request_completed(rq, seqno))
return true; return true;
/* Ensure our read of the seqno is coherent so that we /* Ensure our read of the seqno is coherent so that we
...@@ -4084,7 +4089,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req) ...@@ -4084,7 +4089,7 @@ __i915_request_irq_complete(const struct drm_i915_gem_request *req)
wake_up_process(b->irq_wait->tsk); wake_up_process(b->irq_wait->tsk);
spin_unlock_irq(&b->irq_lock); spin_unlock_irq(&b->irq_lock);
if (__i915_gem_request_completed(req, seqno)) if (__i915_request_completed(rq, seqno))
return true; return true;
} }
......
This diff is collapsed.
...@@ -29,7 +29,10 @@ ...@@ -29,7 +29,10 @@
#ifdef CONFIG_DRM_I915_DEBUG_GEM #ifdef CONFIG_DRM_I915_DEBUG_GEM
#define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \ #define GEM_BUG_ON(condition) do { if (unlikely((condition))) { \
printk(KERN_ERR "GEM_BUG_ON(%s)\n", __stringify(condition)); \ pr_err("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
GEM_TRACE("%s:%d GEM_BUG_ON(%s)\n", \
__func__, __LINE__, __stringify(condition)); \
BUG(); \ BUG(); \
} \ } \
} while(0) } while(0)
...@@ -54,6 +57,6 @@ ...@@ -54,6 +57,6 @@
#define GEM_TRACE(...) do { } while (0) #define GEM_TRACE(...) do { } while (0)
#endif #endif
#define I915_NUM_ENGINES 5 #define I915_NUM_ENGINES 8
#endif /* __I915_GEM_H__ */ #endif /* __I915_GEM_H__ */
...@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool, ...@@ -119,7 +119,7 @@ i915_gem_batch_pool_get(struct i915_gem_batch_pool *pool,
if (!reservation_object_test_signaled_rcu(resv, true)) if (!reservation_object_test_signaled_rcu(resv, true))
break; break;
i915_gem_retire_requests(pool->engine->i915); i915_retire_requests(pool->engine->i915);
GEM_BUG_ON(i915_gem_object_is_active(obj)); GEM_BUG_ON(i915_gem_object_is_active(obj));
/* /*
......
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
This diff is collapsed.
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment