Commit 5c6d4c67 authored by Changbin Du's avatar Changbin Du Committed by Zhenyu Wang

drm/i915/gvt: Make the MMIO attribute wrappers be inline

Function calls are expensive. I have see obvious overhead call to
these wrappers in perf data, especially from the cmd parser side.
So make these simple wrappers be inline to kill them all.
Signed-off-by: default avatarChangbin Du <changbin.du@intel.com>
Signed-off-by: default avatarZhenyu Wang <zhenyuw@linux.intel.com>
parent 56a78de5
......@@ -199,6 +199,21 @@ struct intel_gvt_fence {
struct intel_gvt_mmio {
u8 *mmio_attribute;
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
#define F_GMADR (1 << 1)
/* Mode mask registers with high 16 bits as the mask bits */
#define F_MODE_MASK (1 << 2)
/* This reg can be accessed by GPU commands */
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
DECLARE_HASHTABLE(mmio_info_table, INTEL_GVT_MMIO_HASH_BITS);
};
......@@ -487,6 +502,69 @@ static inline void mmio_hw_access_post(struct drm_i915_private *dev_priv)
intel_runtime_pm_put(dev_priv);
}
/**
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_accessed(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_ACCESSED;
}
/**
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline bool intel_gvt_mmio_is_cmd_access(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_CMD_ACCESS;
}
/**
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline bool intel_gvt_mmio_is_unalign(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_UNALIGN;
}
/**
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
static inline void intel_gvt_mmio_set_cmd_accessed(
struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |= F_CMD_ACCESSED;
}
/**
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
*
*/
static inline bool intel_gvt_mmio_has_mode_mask(
struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
#include "trace.h"
#include "mpt.h"
......
......@@ -47,21 +47,6 @@
#define PCH_PP_OFF_DELAYS _MMIO(0xc720c)
#define PCH_PP_DIVISOR _MMIO(0xc7210)
/* Register contains RO bits */
#define F_RO (1 << 0)
/* Register contains graphics address */
#define F_GMADR (1 << 1)
/* Mode mask registers with high 16 bits as the mask bits */
#define F_MODE_MASK (1 << 2)
/* This reg can be accessed by GPU commands */
#define F_CMD_ACCESS (1 << 3)
/* This reg has been accessed by a VM */
#define F_ACCESSED (1 << 4)
/* This reg has been accessed through GPU commands */
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
unsigned long intel_gvt_get_device_type(struct intel_gvt *gvt)
{
if (IS_BROADWELL(gvt->dev_priv))
......@@ -2952,71 +2937,6 @@ int intel_gvt_setup_mmio_info(struct intel_gvt *gvt)
return ret;
}
/**
* intel_gvt_mmio_set_accessed - mark a MMIO has been accessed
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_ACCESSED;
}
/**
* intel_gvt_mmio_is_cmd_accessed - mark a MMIO could be accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_CMD_ACCESS;
}
/**
* intel_gvt_mmio_is_unalign - mark a MMIO could be accessed unaligned
* @gvt: a GVT device
* @offset: register offset
*
*/
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt,
unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_UNALIGN;
}
/**
* intel_gvt_mmio_set_cmd_accessed - mark a MMIO has been accessed by command
* @gvt: a GVT device
* @offset: register offset
*
*/
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset)
{
gvt->mmio.mmio_attribute[offset >> 2] |=
F_CMD_ACCESSED;
}
/**
* intel_gvt_mmio_has_mode_mask - if a MMIO has a mode mask
* @gvt: a GVT device
* @offset: register offset
*
* Returns:
* True if a MMIO has a mode mask in its higher 16 bits, false if it isn't.
*
*/
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset)
{
return gvt->mmio.mmio_attribute[offset >> 2] &
F_MODE_MASK;
}
/**
* intel_vgpu_default_mmio_read - default MMIO read handler
......
......@@ -87,13 +87,7 @@ int intel_vgpu_emulate_mmio_read(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
int intel_vgpu_emulate_mmio_write(struct intel_vgpu *vgpu, u64 pa,
void *p_data, unsigned int bytes);
bool intel_gvt_mmio_is_cmd_access(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_is_unalign(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_accessed(struct intel_gvt *gvt, unsigned int offset);
void intel_gvt_mmio_set_cmd_accessed(struct intel_gvt *gvt,
unsigned int offset);
bool intel_gvt_mmio_has_mode_mask(struct intel_gvt *gvt, unsigned int offset);
int intel_vgpu_default_mmio_read(struct intel_vgpu *vgpu, unsigned int offset,
void *p_data, unsigned int bytes);
int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment