Commit 45d9f439 authored by Jani Nikula's avatar Jani Nikula

Merge tag 'gvt-fixes-2017-01-25' of https://github.com/01org/gvt-linux into drm-intel-fixes

gvt-fixes-2017-01-25

- re-enable shadow batch buffer for security that was falsely turned off.
- kvmgt/mdev typo fix for correct ABI
- gvt mail list change
Signed-off-by: default avatarJani Nikula <jani.nikula@intel.com>
parents 2f5db26c ba7addcd
...@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst ...@@ -4153,7 +4153,7 @@ F: Documentation/gpu/i915.rst
INTEL GVT-g DRIVERS (Intel GPU Virtualization) INTEL GVT-g DRIVERS (Intel GPU Virtualization)
M: Zhenyu Wang <zhenyuw@linux.intel.com> M: Zhenyu Wang <zhenyuw@linux.intel.com>
M: Zhi Wang <zhi.a.wang@intel.com> M: Zhi Wang <zhi.a.wang@intel.com>
L: igvt-g-dev@lists.01.org L: intel-gvt-dev@lists.freedesktop.org
L: intel-gfx@lists.freedesktop.org L: intel-gfx@lists.freedesktop.org
W: https://01.org/igvt-g W: https://01.org/igvt-g
T: git https://github.com/01org/gvt-linux.git T: git https://github.com/01org/gvt-linux.git
......
...@@ -481,7 +481,6 @@ struct parser_exec_state { ...@@ -481,7 +481,6 @@ struct parser_exec_state {
(s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2) (s->vgpu->gvt->device_info.gmadr_bytes_in_cmd >> 2)
static unsigned long bypass_scan_mask = 0; static unsigned long bypass_scan_mask = 0;
static bool bypass_batch_buffer_scan = true;
/* ring ALL, type = 0 */ /* ring ALL, type = 0 */
static struct sub_op_bits sub_op_mi[] = { static struct sub_op_bits sub_op_mi[] = {
...@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s) ...@@ -1525,9 +1524,6 @@ static int batch_buffer_needs_scan(struct parser_exec_state *s)
{ {
struct intel_gvt *gvt = s->vgpu->gvt; struct intel_gvt *gvt = s->vgpu->gvt;
if (bypass_batch_buffer_scan)
return 0;
if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) { if (IS_BROADWELL(gvt->dev_priv) || IS_SKYLAKE(gvt->dev_priv)) {
/* BDW decides privilege based on address space */ /* BDW decides privilege based on address space */
if (cmd_val(s, 0) & (1 << 8)) if (cmd_val(s, 0) & (1 << 8))
......
...@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload) ...@@ -364,58 +364,30 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \ #define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload) static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{ {
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd; const int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct intel_shadow_bb_entry *entry_obj;
/* pin the gem object to ggtt */ /* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) { list_for_each_entry(entry_obj, &workload->shadow_bb, list) {
struct intel_shadow_bb_entry *entry_obj = struct i915_vma *vma;
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb, vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0, 4, 0);
list) { if (IS_ERR(vma)) {
struct i915_vma *vma; gvt_err("Cannot pin\n");
return;
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
4, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
}
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
/* update the relocate gma with shadow batch buffer*/
set_gma_to_bb_cmd(entry_obj,
i915_ggtt_offset(vma),
gmadr_bytes);
} }
/* FIXME: we are not tracking our pinned VMA leaving it
* up to the core to fix up the stray pin_count upon
* free.
*/
/* update the relocate gma with shadow batch buffer*/
entry_obj->bb_start_cmd_va[1] = i915_ggtt_offset(vma);
if (gmadr_bytes == 8)
entry_obj->bb_start_cmd_va[2] = 0;
} }
} }
...@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu) ...@@ -826,7 +798,7 @@ int intel_vgpu_init_execlist(struct intel_vgpu *vgpu)
INIT_LIST_HEAD(&vgpu->workload_q_head[i]); INIT_LIST_HEAD(&vgpu->workload_q_head[i]);
} }
vgpu->workloads = kmem_cache_create("gvt-g vgpu workload", vgpu->workloads = kmem_cache_create("gvt-g_vgpu_workload",
sizeof(struct intel_vgpu_workload), 0, sizeof(struct intel_vgpu_workload), 0,
SLAB_HWCACHE_ALIGN, SLAB_HWCACHE_ALIGN,
NULL); NULL);
......
...@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt, ...@@ -230,8 +230,8 @@ static struct intel_vgpu_type *intel_gvt_find_vgpu_type(struct intel_gvt *gvt,
return NULL; return NULL;
} }
static ssize_t available_instance_show(struct kobject *kobj, struct device *dev, static ssize_t available_instances_show(struct kobject *kobj,
char *buf) struct device *dev, char *buf)
{ {
struct intel_vgpu_type *type; struct intel_vgpu_type *type;
unsigned int num = 0; unsigned int num = 0;
...@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev, ...@@ -269,12 +269,12 @@ static ssize_t description_show(struct kobject *kobj, struct device *dev,
type->fence); type->fence);
} }
static MDEV_TYPE_ATTR_RO(available_instance); static MDEV_TYPE_ATTR_RO(available_instances);
static MDEV_TYPE_ATTR_RO(device_api); static MDEV_TYPE_ATTR_RO(device_api);
static MDEV_TYPE_ATTR_RO(description); static MDEV_TYPE_ATTR_RO(description);
static struct attribute *type_attrs[] = { static struct attribute *type_attrs[] = {
&mdev_type_attr_available_instance.attr, &mdev_type_attr_available_instances.attr,
&mdev_type_attr_device_api.attr, &mdev_type_attr_device_api.attr,
&mdev_type_attr_description.attr, &mdev_type_attr_description.attr,
NULL, NULL,
......
...@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry { ...@@ -113,7 +113,7 @@ struct intel_shadow_bb_entry {
struct drm_i915_gem_object *obj; struct drm_i915_gem_object *obj;
void *va; void *va;
unsigned long len; unsigned long len;
void *bb_start_cmd_va; u32 *bb_start_cmd_va;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment