Commit 8cda7a4f authored by Alex Deucher's avatar Alex Deucher

drm/amdgpu/UAPI: add new CTX OP to get/set stable pstates

Add a new CTX ioctl operation to set stable pstates for profiling.
When creating traces for tools like RGP or using SPM or doing
performance profiling, it's required to enable a special
stable profiling power state on the GPU.  These profiling
states set fixed clocks and disable certain other power
features like powergating which may impact the results.

Historically, these profiling pstates were enabled via sysfs,
but this adds an interface to enable it via the CTX ioctl
from the application.  Since the power state is global
only one application can set it at a time, so if multiple
applications try and use it only the first will get it,
the ioctl will return -EBUSY for others.  The sysfs interface
will override whatever has been set by this interface.

Mesa MR: https://gitlab.freedesktop.org/mesa/drm/-/merge_requests/207

v2: don't default r = 0;
v3: rebase on Evan's PM cleanup
Reviewed-by: default avatarEvan Quan <evan.quan@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent e281d594
......@@ -237,6 +237,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
ctx->vram_lost_counter = atomic_read(&adev->vram_lost_counter);
ctx->init_priority = priority;
ctx->override_priority = AMDGPU_CTX_PRIORITY_UNSET;
ctx->stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
return 0;
}
......@@ -255,6 +256,86 @@ static void amdgpu_ctx_fini_entity(struct amdgpu_ctx_entity *entity)
kfree(entity);
}
static int amdgpu_ctx_get_stable_pstate(struct amdgpu_ctx *ctx,
u32 *stable_pstate)
{
struct amdgpu_device *adev = ctx->adev;
enum amd_dpm_forced_level current_level;
if (!ctx)
return -EINVAL;
current_level = amdgpu_dpm_get_performance_level(adev);
switch (current_level) {
case AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_STANDARD;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK;
break;
case AMD_DPM_FORCED_LEVEL_PROFILE_PEAK:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_PEAK;
break;
default:
*stable_pstate = AMDGPU_CTX_STABLE_PSTATE_NONE;
break;
}
return 0;
}
static int amdgpu_ctx_set_stable_pstate(struct amdgpu_ctx *ctx,
u32 stable_pstate)
{
struct amdgpu_device *adev = ctx->adev;
enum amd_dpm_forced_level level;
int r;
if (!ctx)
return -EINVAL;
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (adev->pm.stable_pstate_ctx && adev->pm.stable_pstate_ctx != ctx) {
r = -EBUSY;
goto done;
}
switch (stable_pstate) {
case AMDGPU_CTX_STABLE_PSTATE_NONE:
level = AMD_DPM_FORCED_LEVEL_AUTO;
break;
case AMDGPU_CTX_STABLE_PSTATE_STANDARD:
level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD;
break;
case AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK;
break;
case AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK;
break;
case AMDGPU_CTX_STABLE_PSTATE_PEAK:
level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK;
break;
default:
r = -EINVAL;
goto done;
}
r = amdgpu_dpm_force_performance_level(adev, level);
if (level == AMD_DPM_FORCED_LEVEL_AUTO)
adev->pm.stable_pstate_ctx = NULL;
else
adev->pm.stable_pstate_ctx = ctx;
done:
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return r;
}
static void amdgpu_ctx_fini(struct kref *ref)
{
struct amdgpu_ctx *ctx = container_of(ref, struct amdgpu_ctx, refcount);
......@@ -270,7 +351,7 @@ static void amdgpu_ctx_fini(struct kref *ref)
ctx->entities[i][j] = NULL;
}
}
amdgpu_ctx_set_stable_pstate(ctx, AMDGPU_CTX_STABLE_PSTATE_NONE);
mutex_destroy(&ctx->lock);
kfree(ctx);
}
......@@ -467,11 +548,41 @@ static int amdgpu_ctx_query2(struct amdgpu_device *adev,
return 0;
}
static int amdgpu_ctx_stable_pstate(struct amdgpu_device *adev,
struct amdgpu_fpriv *fpriv, uint32_t id,
bool set, u32 *stable_pstate)
{
struct amdgpu_ctx *ctx;
struct amdgpu_ctx_mgr *mgr;
int r;
if (!fpriv)
return -EINVAL;
mgr = &fpriv->ctx_mgr;
mutex_lock(&mgr->lock);
ctx = idr_find(&mgr->ctx_handles, id);
if (!ctx) {
mutex_unlock(&mgr->lock);
return -EINVAL;
}
if (set)
r = amdgpu_ctx_set_stable_pstate(ctx, *stable_pstate);
else
r = amdgpu_ctx_get_stable_pstate(ctx, stable_pstate);
mutex_unlock(&mgr->lock);
return r;
}
int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
struct drm_file *filp)
{
int r;
uint32_t id;
uint32_t id, stable_pstate;
int32_t priority;
union drm_amdgpu_ctx *args = data;
......@@ -500,6 +611,20 @@ int amdgpu_ctx_ioctl(struct drm_device *dev, void *data,
case AMDGPU_CTX_OP_QUERY_STATE2:
r = amdgpu_ctx_query2(adev, fpriv, id, &args->out);
break;
case AMDGPU_CTX_OP_GET_STABLE_PSTATE:
if (args->in.flags)
return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, false, &stable_pstate);
args->out.pstate.flags = stable_pstate;
break;
case AMDGPU_CTX_OP_SET_STABLE_PSTATE:
if (args->in.flags & ~AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK)
return -EINVAL;
stable_pstate = args->in.flags & AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK;
if (stable_pstate > AMDGPU_CTX_STABLE_PSTATE_PEAK)
return -EINVAL;
r = amdgpu_ctx_stable_pstate(adev, fpriv, id, true, &stable_pstate);
break;
default:
return -EINVAL;
}
......
......@@ -53,6 +53,7 @@ struct amdgpu_ctx {
atomic_t guilty;
unsigned long ras_counter_ce;
unsigned long ras_counter_ue;
uint32_t stable_pstate;
};
struct amdgpu_ctx_mgr {
......
......@@ -3510,6 +3510,7 @@ int amdgpu_device_init(struct amdgpu_device *adev,
init_rwsem(&adev->reset_sem);
mutex_init(&adev->psp.mutex);
mutex_init(&adev->notifier_lock);
mutex_init(&adev->pm.stable_pstate_ctx_lock);
amdgpu_device_init_apu_flags(adev);
......
......@@ -336,11 +336,16 @@ static ssize_t amdgpu_set_power_dpm_force_performance_level(struct device *dev,
return ret;
}
mutex_lock(&adev->pm.stable_pstate_ctx_lock);
if (amdgpu_dpm_force_performance_level(adev, level)) {
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
return -EINVAL;
}
/* override whatever a user ctx may have set */
adev->pm.stable_pstate_ctx = NULL;
mutex_unlock(&adev->pm.stable_pstate_ctx_lock);
pm_runtime_mark_last_busy(ddev->dev);
pm_runtime_put_autosuspend(ddev->dev);
......
......@@ -338,6 +338,9 @@ struct amdgpu_pm {
uint32_t smu_debug_mask;
bool pp_force_state_enabled;
struct mutex stable_pstate_ctx_lock;
struct amdgpu_ctx *stable_pstate_ctx;
};
u32 amdgpu_dpm_get_vblank_time(struct amdgpu_device *adev);
......
......@@ -206,6 +206,8 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_OP_FREE_CTX 2
#define AMDGPU_CTX_OP_QUERY_STATE 3
#define AMDGPU_CTX_OP_QUERY_STATE2 4
#define AMDGPU_CTX_OP_GET_STABLE_PSTATE 5
#define AMDGPU_CTX_OP_SET_STABLE_PSTATE 6
/* GPU reset status */
#define AMDGPU_CTX_NO_RESET 0
......@@ -238,10 +240,18 @@ union drm_amdgpu_bo_list {
#define AMDGPU_CTX_PRIORITY_HIGH 512
#define AMDGPU_CTX_PRIORITY_VERY_HIGH 1023
/* select a stable profiling pstate for perfmon tools */
#define AMDGPU_CTX_STABLE_PSTATE_FLAGS_MASK 0xf
#define AMDGPU_CTX_STABLE_PSTATE_NONE 0
#define AMDGPU_CTX_STABLE_PSTATE_STANDARD 1
#define AMDGPU_CTX_STABLE_PSTATE_MIN_SCLK 2
#define AMDGPU_CTX_STABLE_PSTATE_MIN_MCLK 3
#define AMDGPU_CTX_STABLE_PSTATE_PEAK 4
struct drm_amdgpu_ctx_in {
/** AMDGPU_CTX_OP_* */
__u32 op;
/** For future use, no flags defined so far */
/** Flags */
__u32 flags;
__u32 ctx_id;
/** AMDGPU_CTX_PRIORITY_* */
......@@ -262,6 +272,11 @@ union drm_amdgpu_ctx_out {
/** Reset status since the last call of the ioctl. */
__u32 reset_status;
} state;
struct {
__u32 flags;
__u32 _pad;
} pstate;
};
union drm_amdgpu_ctx {
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment