Commit be1da707 authored by Zhi Wang's avatar Zhi Wang Committed by Zhenyu Wang

drm/i915/gvt: vGPU command scanner

This patch introduces a command scanner to scan guest command buffers.
Signed-off-by: default avatarYulei Zhang <yulei.zhang@intel.com>
Signed-off-by: default avatarZhi Wang <zhi.a.wang@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 17865713
GVT_DIR := gvt GVT_DIR := gvt
GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \ GVT_SOURCE := gvt.o aperture_gm.o handlers.o vgpu.o trace_points.o firmware.o \
interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \ interrupt.o gtt.o cfg_space.o opregion.o mmio.o display.o edid.o \
execlist.o scheduler.o sched_policy.o render.o execlist.o scheduler.o sched_policy.o render.o cmd_parser.o
ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall ccflags-y += -I$(src) -I$(src)/$(GVT_DIR) -Wall
i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE)) i915-y += $(addprefix $(GVT_DIR)/, $(GVT_SOURCE))
This diff is collapsed.
/*
* Copyright(c) 2011-2016 Intel Corporation. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice (including the next
* paragraph) shall be included in all copies or substantial portions of the
* Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
* Authors:
* Ke Yu
* Kevin Tian <kevin.tian@intel.com>
* Zhiyuan Lv <zhiyuan.lv@intel.com>
*
* Contributors:
* Min He <min.he@intel.com>
* Ping Gao <ping.a.gao@intel.com>
* Tina Zhang <tina.zhang@intel.com>
* Yulei Zhang <yulei.zhang@intel.com>
* Zhi Wang <zhi.a.wang@intel.com>
*
*/
#ifndef _GVT_CMD_PARSER_H_
#define _GVT_CMD_PARSER_H_
#define GVT_CMD_HASH_BITS 7
void intel_gvt_clean_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_init_cmd_parser(struct intel_gvt *gvt);
int intel_gvt_scan_and_shadow_workload(struct intel_vgpu_workload *workload);
int intel_gvt_scan_and_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx);
#endif
...@@ -51,4 +51,7 @@ ...@@ -51,4 +51,7 @@
#define gvt_dbg_render(fmt, args...) \ #define gvt_dbg_render(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args) DRM_DEBUG_DRIVER("gvt: render: "fmt, ##args)
#define gvt_dbg_cmd(fmt, args...) \
DRM_DEBUG_DRIVER("gvt: cmd: "fmt, ##args)
#endif #endif
...@@ -363,6 +363,109 @@ static void free_workload(struct intel_vgpu_workload *workload) ...@@ -363,6 +363,109 @@ static void free_workload(struct intel_vgpu_workload *workload)
#define get_desc_from_elsp_dwords(ed, i) \ #define get_desc_from_elsp_dwords(ed, i) \
((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2])) ((struct execlist_ctx_descriptor_format *)&((ed)->data[i * 2]))
#define BATCH_BUFFER_ADDR_MASK ((1UL << 32) - (1U << 2))
#define BATCH_BUFFER_ADDR_HIGH_MASK ((1UL << 16) - (1U))
static int set_gma_to_bb_cmd(struct intel_shadow_bb_entry *entry_obj,
unsigned long add, int gmadr_bytes)
{
if (WARN_ON(gmadr_bytes != 4 && gmadr_bytes != 8))
return -1;
*((u32 *)(entry_obj->bb_start_cmd_va + (1 << 2))) = add &
BATCH_BUFFER_ADDR_MASK;
if (gmadr_bytes == 8) {
*((u32 *)(entry_obj->bb_start_cmd_va + (2 << 2))) =
add & BATCH_BUFFER_ADDR_HIGH_MASK;
}
return 0;
}
static void prepare_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
int gmadr_bytes = workload->vgpu->gvt->device_info.gmadr_bytes_in_cmd;
struct i915_vma *vma;
unsigned long gma;
/* pin the gem object to ggtt */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
vma = i915_gem_object_ggtt_pin(entry_obj->obj, NULL, 0,
0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin\n");
return;
}
i915_gem_object_unpin_pages(entry_obj->obj);
/* update the relocate gma with shadow batch buffer*/
gma = i915_gem_object_ggtt_offset(entry_obj->obj, NULL);
WARN_ON(!IS_ALIGNED(gma, 4));
set_gma_to_bb_cmd(entry_obj, gma, gmadr_bytes);
}
}
}
static int update_wa_ctx_2_shadow_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
int ring_id = wa_ctx->workload->ring_id;
struct i915_gem_context *shadow_ctx =
wa_ctx->workload->vgpu->shadow_ctx;
struct drm_i915_gem_object *ctx_obj =
shadow_ctx->engine[ring_id].state->obj;
struct execlist_ring_context *shadow_ring_context;
struct page *page;
page = i915_gem_object_get_page(ctx_obj, LRC_STATE_PN);
shadow_ring_context = kmap_atomic(page);
shadow_ring_context->bb_per_ctx_ptr.val =
(shadow_ring_context->bb_per_ctx_ptr.val &
(~PER_CTX_ADDR_MASK)) | wa_ctx->per_ctx.shadow_gma;
shadow_ring_context->rcs_indirect_ctx.val =
(shadow_ring_context->rcs_indirect_ctx.val &
(~INDIRECT_CTX_ADDR_MASK)) | wa_ctx->indirect_ctx.shadow_gma;
kunmap_atomic(shadow_ring_context);
return 0;
}
static void prepare_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
struct i915_vma *vma;
unsigned long gma;
unsigned char *per_ctx_va =
(unsigned char *)wa_ctx->indirect_ctx.shadow_va +
wa_ctx->indirect_ctx.size;
if (wa_ctx->indirect_ctx.size == 0)
return;
vma = i915_gem_object_ggtt_pin(wa_ctx->indirect_ctx.obj, NULL, 0, 0, 0);
if (IS_ERR(vma)) {
gvt_err("Cannot pin indirect ctx obj\n");
return;
}
i915_gem_object_unpin_pages(wa_ctx->indirect_ctx.obj);
gma = i915_gem_object_ggtt_offset(wa_ctx->indirect_ctx.obj, NULL);
WARN_ON(!IS_ALIGNED(gma, CACHELINE_BYTES));
wa_ctx->indirect_ctx.shadow_gma = gma;
wa_ctx->per_ctx.shadow_gma = *((unsigned int *)per_ctx_va + 1);
memset(per_ctx_va, 0, CACHELINE_BYTES);
update_wa_ctx_2_shadow_ctx(wa_ctx);
}
static int prepare_execlist_workload(struct intel_vgpu_workload *workload) static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
...@@ -372,6 +475,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -372,6 +475,8 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
intel_vgpu_pin_mm(workload->shadow_mm); intel_vgpu_pin_mm(workload->shadow_mm);
intel_vgpu_sync_oos_pages(workload->vgpu); intel_vgpu_sync_oos_pages(workload->vgpu);
intel_vgpu_flush_post_shadow(workload->vgpu); intel_vgpu_flush_post_shadow(workload->vgpu);
prepare_shadow_batch_buffer(workload);
prepare_shadow_wa_ctx(&workload->wa_ctx);
if (!workload->emulate_schedule_in) if (!workload->emulate_schedule_in)
return 0; return 0;
...@@ -381,6 +486,35 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -381,6 +486,35 @@ static int prepare_execlist_workload(struct intel_vgpu_workload *workload)
return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx); return emulate_execlist_schedule_in(&vgpu->execlist[ring_id], ctx);
} }
static void release_shadow_batch_buffer(struct intel_vgpu_workload *workload)
{
/* release all the shadow batch buffer */
if (!list_empty(&workload->shadow_bb)) {
struct intel_shadow_bb_entry *entry_obj =
list_first_entry(&workload->shadow_bb,
struct intel_shadow_bb_entry,
list);
struct intel_shadow_bb_entry *temp;
list_for_each_entry_safe(entry_obj, temp, &workload->shadow_bb,
list) {
drm_gem_object_unreference(&(entry_obj->obj->base));
kvfree(entry_obj->va);
list_del(&entry_obj->list);
kfree(entry_obj);
}
}
}
static void release_shadow_wa_ctx(struct intel_shadow_wa_ctx *wa_ctx)
{
if (wa_ctx->indirect_ctx.size == 0)
return;
drm_gem_object_unreference(&(wa_ctx->indirect_ctx.obj->base));
kvfree(wa_ctx->indirect_ctx.shadow_va);
}
static int complete_execlist_workload(struct intel_vgpu_workload *workload) static int complete_execlist_workload(struct intel_vgpu_workload *workload)
{ {
struct intel_vgpu *vgpu = workload->vgpu; struct intel_vgpu *vgpu = workload->vgpu;
...@@ -394,6 +528,9 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload) ...@@ -394,6 +528,9 @@ static int complete_execlist_workload(struct intel_vgpu_workload *workload)
gvt_dbg_el("complete workload %p status %d\n", workload, gvt_dbg_el("complete workload %p status %d\n", workload,
workload->status); workload->status);
release_shadow_batch_buffer(workload);
release_shadow_wa_ctx(&workload->wa_ctx);
if (workload->status || vgpu->resetting) if (workload->status || vgpu->resetting)
goto out; goto out;
...@@ -487,7 +624,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -487,7 +624,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
struct intel_vgpu_workload *last_workload = get_last_workload(q); struct intel_vgpu_workload *last_workload = get_last_workload(q);
struct intel_vgpu_workload *workload = NULL; struct intel_vgpu_workload *workload = NULL;
u64 ring_context_gpa; u64 ring_context_gpa;
u32 head, tail, start, ctl, ctx_ctl; u32 head, tail, start, ctl, ctx_ctl, per_ctx, indirect_ctx;
int ret; int ret;
ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm, ring_context_gpa = intel_vgpu_gma_to_gpa(vgpu->gtt.ggtt_mm,
...@@ -532,6 +669,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -532,6 +669,7 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4); RING_CTX_OFF(ctx_ctrl.val), &ctx_ctl, 4);
INIT_LIST_HEAD(&workload->list); INIT_LIST_HEAD(&workload->list);
INIT_LIST_HEAD(&workload->shadow_bb);
init_waitqueue_head(&workload->shadow_ctx_status_wq); init_waitqueue_head(&workload->shadow_ctx_status_wq);
atomic_set(&workload->shadow_ctx_active, 0); atomic_set(&workload->shadow_ctx_active, 0);
...@@ -549,6 +687,24 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id, ...@@ -549,6 +687,24 @@ bool submit_context(struct intel_vgpu *vgpu, int ring_id,
workload->status = -EINPROGRESS; workload->status = -EINPROGRESS;
workload->emulate_schedule_in = emulate_schedule_in; workload->emulate_schedule_in = emulate_schedule_in;
if (ring_id == RCS) {
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(bb_per_ctx_ptr.val), &per_ctx, 4);
intel_gvt_hypervisor_read_gpa(vgpu, ring_context_gpa +
RING_CTX_OFF(rcs_indirect_ctx.val), &indirect_ctx, 4);
workload->wa_ctx.indirect_ctx.guest_gma =
indirect_ctx & INDIRECT_CTX_ADDR_MASK;
workload->wa_ctx.indirect_ctx.size =
(indirect_ctx & INDIRECT_CTX_SIZE_MASK) *
CACHELINE_BYTES;
workload->wa_ctx.per_ctx.guest_gma =
per_ctx & PER_CTX_ADDR_MASK;
workload->wa_ctx.workload = workload;
WARN_ON(workload->wa_ctx.indirect_ctx.size && !(per_ctx & 0x1));
}
if (emulate_schedule_in) if (emulate_schedule_in)
memcpy(&workload->elsp_dwords, memcpy(&workload->elsp_dwords,
&vgpu->execlist[ring_id].elsp_dwords, &vgpu->execlist[ring_id].elsp_dwords,
......
...@@ -112,6 +112,8 @@ static void init_device_info(struct intel_gvt *gvt) ...@@ -112,6 +112,8 @@ static void init_device_info(struct intel_gvt *gvt)
info->gtt_start_offset = 8 * 1024 * 1024; info->gtt_start_offset = 8 * 1024 * 1024;
info->gtt_entry_size = 8; info->gtt_entry_size = 8;
info->gtt_entry_size_shift = 3; info->gtt_entry_size_shift = 3;
info->gmadr_bytes_in_cmd = 8;
info->max_surface_size = 36 * 1024 * 1024;
} }
} }
...@@ -177,6 +179,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv) ...@@ -177,6 +179,7 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
return; return;
clean_service_thread(gvt); clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
intel_gvt_clean_sched_policy(gvt); intel_gvt_clean_sched_policy(gvt);
intel_gvt_clean_workload_scheduler(gvt); intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_opregion(gvt); intel_gvt_clean_opregion(gvt);
...@@ -249,14 +252,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv) ...@@ -249,14 +252,20 @@ int intel_gvt_init_device(struct drm_i915_private *dev_priv)
if (ret) if (ret)
goto out_clean_workload_scheduler; goto out_clean_workload_scheduler;
ret = init_service_thread(gvt); ret = intel_gvt_init_cmd_parser(gvt);
if (ret) if (ret)
goto out_clean_sched_policy; goto out_clean_sched_policy;
ret = init_service_thread(gvt);
if (ret)
goto out_clean_cmd_parser;
gvt_dbg_core("gvt device creation is done\n"); gvt_dbg_core("gvt device creation is done\n");
gvt->initialized = true; gvt->initialized = true;
return 0; return 0;
out_clean_cmd_parser:
intel_gvt_clean_cmd_parser(gvt);
out_clean_sched_policy: out_clean_sched_policy:
intel_gvt_clean_sched_policy(gvt); intel_gvt_clean_sched_policy(gvt);
out_clean_workload_scheduler: out_clean_workload_scheduler:
......
...@@ -45,6 +45,7 @@ ...@@ -45,6 +45,7 @@
#include "scheduler.h" #include "scheduler.h"
#include "sched_policy.h" #include "sched_policy.h"
#include "render.h" #include "render.h"
#include "cmd_parser.h"
#define GVT_MAX_VGPU 8 #define GVT_MAX_VGPU 8
...@@ -71,6 +72,8 @@ struct intel_gvt_device_info { ...@@ -71,6 +72,8 @@ struct intel_gvt_device_info {
u32 gtt_start_offset; u32 gtt_start_offset;
u32 gtt_entry_size; u32 gtt_entry_size;
u32 gtt_entry_size_shift; u32 gtt_entry_size_shift;
int gmadr_bytes_in_cmd;
u32 max_surface_size;
}; };
/* GM resources owned by a vGPU */ /* GM resources owned by a vGPU */
...@@ -203,6 +206,7 @@ struct intel_gvt { ...@@ -203,6 +206,7 @@ struct intel_gvt {
struct intel_gvt_gtt gtt; struct intel_gvt_gtt gtt;
struct intel_gvt_opregion opregion; struct intel_gvt_opregion opregion;
struct intel_gvt_workload_scheduler scheduler; struct intel_gvt_workload_scheduler scheduler;
DECLARE_HASHTABLE(cmd_table, GVT_CMD_HASH_BITS);
struct task_struct *service_thread; struct task_struct *service_thread;
wait_queue_head_t service_thread_wq; wait_queue_head_t service_thread_wq;
......
...@@ -226,4 +226,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu, ...@@ -226,4 +226,8 @@ int intel_vgpu_reg_master_irq_handler(struct intel_vgpu *vgpu,
int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu, int intel_vgpu_reg_imr_handler(struct intel_vgpu *vgpu,
unsigned int reg, void *p_data, unsigned int bytes); unsigned int reg, void *p_data, unsigned int bytes);
int gvt_ring_id_to_pipe_control_notify_event(int ring_id);
int gvt_ring_id_to_mi_flush_dw_event(int ring_id);
int gvt_ring_id_to_mi_user_interrupt_event(int ring_id);
#endif /* _GVT_INTERRUPT_H_ */ #endif /* _GVT_INTERRUPT_H_ */
...@@ -185,6 +185,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload) ...@@ -185,6 +185,14 @@ static int dispatch_workload(struct intel_vgpu_workload *workload)
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
ret = intel_gvt_scan_and_shadow_workload(workload);
if (ret)
goto err;
ret = intel_gvt_scan_and_shadow_wa_ctx(&workload->wa_ctx);
if (ret)
goto err;
ret = populate_shadow_context(workload); ret = populate_shadow_context(workload);
if (ret) if (ret)
goto err; goto err;
...@@ -345,6 +353,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -345,6 +353,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
{ {
struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler; struct intel_gvt_workload_scheduler *scheduler = &gvt->scheduler;
struct intel_vgpu_workload *workload; struct intel_vgpu_workload *workload;
int event;
mutex_lock(&gvt->lock); mutex_lock(&gvt->lock);
...@@ -355,6 +364,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id) ...@@ -355,6 +364,11 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
!atomic_read(&workload->shadow_ctx_active)); !atomic_read(&workload->shadow_ctx_active));
update_guest_context(workload); update_guest_context(workload);
for_each_set_bit(event, workload->pending_events,
INTEL_GVT_EVENT_MAX)
intel_vgpu_trigger_virtual_event(workload->vgpu,
event);
} }
gvt_dbg_sched("ring id %d complete workload %p status %d\n", gvt_dbg_sched("ring id %d complete workload %p status %d\n",
......
...@@ -50,6 +50,29 @@ struct intel_gvt_workload_scheduler { ...@@ -50,6 +50,29 @@ struct intel_gvt_workload_scheduler {
struct intel_gvt_sched_policy_ops *sched_ops; struct intel_gvt_sched_policy_ops *sched_ops;
}; };
#define INDIRECT_CTX_ADDR_MASK 0xffffffc0
#define INDIRECT_CTX_SIZE_MASK 0x3f
struct shadow_indirect_ctx {
struct drm_i915_gem_object *obj;
unsigned long guest_gma;
unsigned long shadow_gma;
void *shadow_va;
uint32_t size;
};
#define PER_CTX_ADDR_MASK 0xfffff000
struct shadow_per_ctx {
unsigned long guest_gma;
unsigned long shadow_gma;
};
struct intel_shadow_wa_ctx {
struct intel_vgpu_workload *workload;
struct shadow_indirect_ctx indirect_ctx;
struct shadow_per_ctx per_ctx;
};
struct intel_vgpu_workload { struct intel_vgpu_workload {
struct intel_vgpu *vgpu; struct intel_vgpu *vgpu;
int ring_id; int ring_id;
...@@ -65,16 +88,32 @@ struct intel_vgpu_workload { ...@@ -65,16 +88,32 @@ struct intel_vgpu_workload {
int (*complete)(struct intel_vgpu_workload *); int (*complete)(struct intel_vgpu_workload *);
struct list_head list; struct list_head list;
DECLARE_BITMAP(pending_events, INTEL_GVT_EVENT_MAX);
void *shadow_ring_buffer_va;
/* execlist context information */ /* execlist context information */
struct execlist_ctx_descriptor_format ctx_desc; struct execlist_ctx_descriptor_format ctx_desc;
struct execlist_ring_context *ring_context; struct execlist_ring_context *ring_context;
unsigned long rb_head, rb_tail, rb_ctl, rb_start; unsigned long rb_head, rb_tail, rb_ctl, rb_start, rb_len;
bool restore_inhibit; bool restore_inhibit;
struct intel_vgpu_elsp_dwords elsp_dwords; struct intel_vgpu_elsp_dwords elsp_dwords;
bool emulate_schedule_in; bool emulate_schedule_in;
atomic_t shadow_ctx_active; atomic_t shadow_ctx_active;
wait_queue_head_t shadow_ctx_status_wq; wait_queue_head_t shadow_ctx_status_wq;
u64 ring_context_gpa; u64 ring_context_gpa;
/* shadow batch buffer */
struct list_head shadow_bb;
struct intel_shadow_wa_ctx wa_ctx;
};
/* Intel shadow batch buffer is a i915 gem object */
struct intel_shadow_bb_entry {
struct list_head list;
struct drm_i915_gem_object *obj;
void *va;
unsigned long len;
void *bb_start_cmd_va;
}; };
#define workload_q_head(vgpu, ring_id) \ #define workload_q_head(vgpu, ring_id) \
......
...@@ -224,6 +224,58 @@ TRACE_EVENT(oos_sync, ...@@ -224,6 +224,58 @@ TRACE_EVENT(oos_sync,
TP_printk("%s", __entry->buf) TP_printk("%s", __entry->buf)
); );
#define MAX_CMD_STR_LEN 256
TRACE_EVENT(gvt_command,
TP_PROTO(u8 vm_id, u8 ring_id, u32 ip_gma, u32 *cmd_va, u32 cmd_len, bool ring_buffer_cmd, cycles_t cost_pre_cmd_handler, cycles_t cost_cmd_handler),
TP_ARGS(vm_id, ring_id, ip_gma, cmd_va, cmd_len, ring_buffer_cmd, cost_pre_cmd_handler, cost_cmd_handler),
TP_STRUCT__entry(
__field(u8, vm_id)
__field(u8, ring_id)
__field(int, i)
__array(char, tmp_buf, MAX_CMD_STR_LEN)
__array(char, cmd_str, MAX_CMD_STR_LEN)
),
TP_fast_assign(
__entry->vm_id = vm_id;
__entry->ring_id = ring_id;
__entry->cmd_str[0] = '\0';
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "VM(%d) Ring(%d): %s ip(%08x) pre handler cost (%llu), handler cost (%llu) ", vm_id, ring_id, ring_buffer_cmd ? "RB":"BB", ip_gma, cost_pre_cmd_handler, cost_cmd_handler);
strcat(__entry->cmd_str, __entry->tmp_buf);
entry->i = 0;
while (cmd_len > 0) {
if (cmd_len >= 8) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x %08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3],
cmd_va[__entry->i+4], cmd_va[__entry->i+5], cmd_va[__entry->i+6], cmd_va[__entry->i+7]);
__entry->i += 8;
cmd_len -= 8;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 4) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x %08x %08x ",
cmd_va[__entry->i], cmd_va[__entry->i+1], cmd_va[__entry->i+2], cmd_va[__entry->i+3]);
__entry->i += 4;
cmd_len -= 4;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len >= 2) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x %08x ", cmd_va[__entry->i], cmd_va[__entry->i+1]);
__entry->i += 2;
cmd_len -= 2;
strcat(__entry->cmd_str, __entry->tmp_buf);
} else if (cmd_len == 1) {
snprintf(__entry->tmp_buf, MAX_CMD_STR_LEN, "%08x ", cmd_va[__entry->i]);
__entry->i += 1;
cmd_len -= 1;
strcat(__entry->cmd_str, __entry->tmp_buf);
}
}
strcat(__entry->cmd_str, "\n");
),
TP_printk("%s", __entry->cmd_str)
);
#endif /* _GVT_TRACE_H_ */ #endif /* _GVT_TRACE_H_ */
/* This part must be out of protection */ /* This part must be out of protection */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment