Commit b5e161e4 authored by Nicholas Kazlauskas's avatar Nicholas Kazlauskas Committed by Alex Deucher

drm/amd/display: Add shared firmware state for DMUB IPS handshake

[Why]
Read modify write hazards can occur when using a single shared scratch
register between driver and firmware leading to driver accessing DCN
in IPS2 and a system hang.

[How]
Add infrastructure for using REGION6 as a shared firmware state between
driver and firmware. This region is uncachable.

Replace the existing get/set idle calls with reads/writes to the
(volatile) shared firmware state blocks that a separated by at least
a cache line between firmware and driver.

Remove the workarounds that required rewriting/checking read modify
write hazards.
Reviewed-by: default avatarCharlene Liu <charlene.liu@amd.com>
Acked-by: default avatarAurabindo Pillai <aurabindo.pillai@amd.com>
Signed-off-by: default avatarNicholas Kazlauskas <nicholas.kazlauskas@amd.com>
Tested-by: default avatarDaniel Wheeler <daniel.wheeler@amd.com>
Signed-off-by: default avatarAlex Deucher <alexander.deucher@amd.com>
parent 616b3946
......@@ -2128,7 +2128,8 @@ static int dm_dmub_sw_init(struct amdgpu_device *adev)
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_4_MAILBOX
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_5_TRACEBUFF
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_6_FW_STATE
DMUB_WINDOW_MEMORY_TYPE_FB //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_7_SCRATCH_MEM
DMUB_WINDOW_MEMORY_TYPE_FB, //DMUB_WINDOW_SHARED_STATE
};
int r;
......
......@@ -1198,6 +1198,7 @@ bool dc_dmub_srv_is_hw_pwr_up(struct dc_dmub_srv *dc_dmub_srv, bool wait)
static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
{
struct dc_dmub_srv *dc_dmub_srv;
union dmub_rb_cmd cmd = {0};
if (dc->debug.dmcub_emulation)
......@@ -1206,6 +1207,8 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
dc_dmub_srv = dc->ctx->dmub_srv;
memset(&cmd, 0, sizeof(cmd));
cmd.idle_opt_notify_idle.header.type = DMUB_CMD__IDLE_OPT;
cmd.idle_opt_notify_idle.header.sub_type = DMUB_CMD__IDLE_OPT_DCN_NOTIFY_IDLE;
......@@ -1216,10 +1219,32 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
cmd.idle_opt_notify_idle.cntl_data.driver_idle = allow_idle;
if (allow_idle) {
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals new_signals;
dc_dmub_srv_wait_idle(dc->ctx->dmub_srv);
if (dc->hwss.set_idle_state)
dc->hwss.set_idle_state(dc, true);
memset(&new_signals, 0, sizeof(new_signals));
if (dc->config.disable_ips == DMUB_IPS_ENABLE ||
dc->config.disable_ips == DMUB_IPS_DISABLE_DYNAMIC) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
new_signals.bits.allow_z10 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS1) {
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
} else if (dc->config.disable_ips == DMUB_IPS_DISABLE_IPS2_Z10) {
new_signals.bits.allow_pg = 1;
new_signals.bits.allow_ips1 = 1;
new_signals.bits.allow_ips2 = 1;
}
ips_driver->signals = new_signals;
}
/* NOTE: This does not use the "wake" interface since this is part of the wake path. */
......@@ -1229,8 +1254,7 @@ static void dc_dmub_srv_notify_idle(const struct dc *dc, bool allow_idle)
static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
{
uint32_t allow_state = 0;
uint32_t commit_state = 0;
struct dc_dmub_srv *dc_dmub_srv;
if (dc->debug.dmcub_emulation)
return;
......@@ -1238,61 +1262,44 @@ static void dc_dmub_srv_exit_low_power_state(const struct dc *dc)
if (!dc->ctx->dmub_srv || !dc->ctx->dmub_srv->dmub)
return;
if (dc->hwss.get_idle_state &&
dc->hwss.set_idle_state &&
dc->clk_mgr->funcs->exit_low_power_state) {
dc_dmub_srv = dc->ctx->dmub_srv;
allow_state = dc->hwss.get_idle_state(dc);
dc->hwss.set_idle_state(dc, false);
if (dc->clk_mgr->funcs->exit_low_power_state) {
volatile const struct dmub_shared_state_ips_fw *ips_fw =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_FW].data.ips_fw;
volatile struct dmub_shared_state_ips_driver *ips_driver =
&dc_dmub_srv->dmub->shared_state[DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER].data.ips_driver;
union dmub_shared_state_ips_driver_signals prev_driver_signals = ips_driver->signals;
if (!(allow_state & DMUB_IPS2_ALLOW_MASK)) {
// Wait for evaluation time
for (;;) {
udelay(dc->debug.ips2_eval_delay_us);
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_ALLOW_MASK)
break;
ips_driver->signals.all = 0;
/* allow was still set, retry eval delay */
dc->hwss.set_idle_state(dc, false);
}
if (prev_driver_signals.bits.allow_ips2) {
udelay(dc->debug.ips2_eval_delay_us);
if (!(commit_state & DMUB_IPS2_COMMIT_MASK)) {
if (ips_fw->signals.bits.ips2_commit) {
// Tell PMFW to exit low power state
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
// Wait for IPS2 entry upper bound
udelay(dc->debug.ips2_entry_delay_us);
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS2_COMMIT_MASK)
break;
dc->clk_mgr->funcs->exit_low_power_state(dc->clk_mgr);
while (ips_fw->signals.bits.ips2_commit)
udelay(1);
}
if (!dc_dmub_srv_is_hw_pwr_up(dc->ctx->dmub_srv, true))
ASSERT(0);
/* TODO: See if we can return early here - IPS2 should go
* back directly to IPS0 and clear the flags, but it will
* be safer to directly notify DMCUB of this.
*/
allow_state = dc->hwss.get_idle_state(dc);
dmub_srv_sync_inbox1(dc->ctx->dmub_srv->dmub);
}
}
dc_dmub_srv_notify_idle(dc, false);
if (!(allow_state & DMUB_IPS1_ALLOW_MASK)) {
for (;;) {
commit_state = dc->hwss.get_idle_state(dc);
if (commit_state & DMUB_IPS1_COMMIT_MASK)
break;
if (prev_driver_signals.bits.allow_ips1) {
while (ips_fw->signals.bits.ips1_commit)
udelay(1);
}
}
}
......
......@@ -125,6 +125,7 @@ enum dmub_window_id {
DMUB_WINDOW_5_TRACEBUFF,
DMUB_WINDOW_6_FW_STATE,
DMUB_WINDOW_7_SCRATCH_MEM,
DMUB_WINDOW_SHARED_STATE,
DMUB_WINDOW_TOTAL,
};
......@@ -368,7 +369,8 @@ struct dmub_srv_hw_funcs {
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void (*setup_mailbox)(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
......@@ -461,6 +463,7 @@ struct dmub_srv_create_params {
* @user_ctx: user provided context for the dmub_srv
* @fw_version: the current firmware version, if any
* @is_virtual: false if hardware support only
* @shared_state: dmub shared state between firmware and driver
* @fw_state: dmub firmware state pointer
*/
struct dmub_srv {
......@@ -469,6 +472,7 @@ struct dmub_srv {
uint32_t fw_version;
bool is_virtual;
struct dmub_fb scratch_mem_fb;
volatile struct dmub_shared_state_feature_block *shared_state;
volatile const struct dmub_fw_state *fw_state;
/* private: internal use only */
......
......@@ -508,6 +508,8 @@ struct dmub_visual_confirm_color {
* @trace_buffer_size: size of the tracebuffer region
* @fw_version: the firmware version information
* @dal_fw: 1 if the firmware is DAL
* @shared_state_size: size of the shared state region in bytes
* @shared_state_features: number of shared state features
*/
struct dmub_fw_meta_info {
uint32_t magic_value; /**< magic value identifying DMUB firmware meta info */
......@@ -516,6 +518,9 @@ struct dmub_fw_meta_info {
uint32_t fw_version; /**< the firmware version information */
uint8_t dal_fw; /**< 1 if the firmware is DAL */
uint8_t reserved[3]; /**< padding bits */
uint32_t shared_state_size; /**< size of the shared state region in bytes */
uint16_t shared_state_features; /**< number of shared state features */
uint16_t reserved2; /**< padding bytes */
};
/**
......@@ -659,6 +664,116 @@ enum dmub_fw_boot_options_bit {
DMUB_FW_BOOT_OPTION_BIT_OPTIMIZED_INIT_DONE = (1 << 2), /**< 1 if optimized init done */
};
//==============================================================================
//< DMUB_SHARED_STATE>==========================================================
//==============================================================================
/**
* Shared firmware state between driver and firmware for lockless communication
* in situations where the inbox/outbox may be unavailable.
*
* Each structure *must* be at most 256-bytes in size. The layout allocation is
* described below:
*
* [Header (256 Bytes)][Feature 1 (256 Bytes)][Feature 2 (256 Bytes)]...
*/
/**
* enum dmub_shared_state_feature_id - List of shared state features.
*/
enum dmub_shared_state_feature_id {
DMUB_SHARED_SHARE_FEATURE__INVALID = 0,
DMUB_SHARED_SHARE_FEATURE__IPS_FW = 1,
DMUB_SHARED_SHARE_FEATURE__IPS_DRIVER = 2,
DMUB_SHARED_STATE_FEATURE__LAST, /* Total number of features. */
};
/**
* struct dmub_shared_state_ips_fw - Firmware signals for IPS.
*/
union dmub_shared_state_ips_fw_signals {
struct {
uint32_t ips1_commit : 1; /**< 1 if in IPS1 */
uint32_t ips2_commit : 1; /**< 1 if in IPS2 */
uint32_t reserved_bits : 30; /**< Reversed */
} bits;
uint32_t all;
};
/**
* struct dmub_shared_state_ips_signals - Firmware signals for IPS.
*/
union dmub_shared_state_ips_driver_signals {
struct {
uint32_t allow_pg : 1; /**< 1 if PG is allowed */
uint32_t allow_ips1 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_ips2 : 1; /**< 1 is IPS1 is allowed */
uint32_t allow_z10 : 1; /**< 1 if Z10 is allowed */
uint32_t reserved_bits : 28; /**< Reversed bits */
} bits;
uint32_t all;
};
/**
* IPS FW Version
*/
#define DMUB_SHARED_STATE__IPS_FW_VERSION 1
/**
* struct dmub_shared_state_ips_fw - Firmware state for IPS.
*/
struct dmub_shared_state_ips_fw {
union dmub_shared_state_ips_fw_signals signals; /**< 4 bytes, IPS signal bits */
uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
* IPS Driver Version
*/
#define DMUB_SHARED_STATE__IPS_DRIVER_VERSION 1
/**
* struct dmub_shared_state_ips_driver - Driver state for IPS.
*/
struct dmub_shared_state_ips_driver {
union dmub_shared_state_ips_driver_signals signals; /**< 4 bytes, IPS signal bits */
uint32_t reserved[61]; /**< Reversed, to be updated when adding new fields. */
}; /* 248-bytes, fixed */
/**
* enum dmub_shared_state_feature_common - Generic payload.
*/
struct dmub_shared_state_feature_common {
uint32_t padding[62];
}; /* 248-bytes, fixed */
/**
* enum dmub_shared_state_feature_header - Feature description.
*/
struct dmub_shared_state_feature_header {
uint16_t id; /**< Feature ID */
uint16_t version; /**< Feature version */
uint32_t reserved; /**< Reserved bytes. */
}; /* 8 bytes, fixed */
/**
* struct dmub_shared_state_feature_block - Feature block.
*/
struct dmub_shared_state_feature_block {
struct dmub_shared_state_feature_header header; /**< Shared state header. */
union dmub_shared_feature_state_union {
struct dmub_shared_state_feature_common common; /**< Generic data */
struct dmub_shared_state_ips_fw ips_fw; /**< IPS firmware state */
struct dmub_shared_state_ips_driver ips_driver; /**< IPS driver state */
} data; /**< Shared state data. */
}; /* 256-bytes, fixed */
/**
* Shared state size in bytes.
*/
#define DMUB_FW_HEADER_SHARED_STATE_SIZE \
((DMUB_SHARED_STATE_FEATURE__LAST + 1) * sizeof(struct dmub_shared_state_feature_block))
//==============================================================================
//</DMUB_STATUS>================================================================
//==============================================================================
......
......@@ -191,7 +191,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
uint64_t fb_base, fb_offset;
......
......@@ -197,7 +197,8 @@ void dmub_dcn20_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn20_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
......
......@@ -124,7 +124,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
......
......@@ -43,7 +43,8 @@ void dmub_dcn30_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
#endif /* _DMUB_DCN30_H_ */
......@@ -187,7 +187,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
......
......@@ -199,7 +199,8 @@ void dmub_dcn31_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn31_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
......
......@@ -216,7 +216,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
......
......@@ -206,7 +206,8 @@ void dmub_dcn32_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn32_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
......
......@@ -229,7 +229,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6)
const struct dmub_window *cw6,
const struct dmub_window *region6)
{
union dmub_addr offset;
......@@ -275,6 +276,15 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
REG_SET_2(DMCUB_REGION3_CW6_TOP_ADDRESS, 0,
DMCUB_REGION3_CW6_TOP_ADDRESS, cw6->region.top,
DMCUB_REGION3_CW6_ENABLE, 1);
offset = region6->offset;
REG_WRITE(DMCUB_REGION6_OFFSET, offset.u.low_part);
REG_WRITE(DMCUB_REGION6_OFFSET_HIGH, offset.u.high_part);
REG_SET_2(DMCUB_REGION6_TOP_ADDRESS, 0,
DMCUB_REGION6_TOP_ADDRESS,
region6->region.top - region6->region.base - 1,
DMCUB_REGION6_ENABLE, 1);
}
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
......
......@@ -89,6 +89,9 @@ struct dmub_srv;
DMUB_SR(DMCUB_REGION5_OFFSET) \
DMUB_SR(DMCUB_REGION5_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SR(DMCUB_REGION6_OFFSET) \
DMUB_SR(DMCUB_REGION6_OFFSET_HIGH) \
DMUB_SR(DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SR(DMCUB_SCRATCH0) \
DMUB_SR(DMCUB_SCRATCH1) \
DMUB_SR(DMCUB_SCRATCH2) \
......@@ -154,6 +157,8 @@ struct dmub_srv;
DMUB_SF(DMCUB_REGION4_TOP_ADDRESS, DMCUB_REGION4_ENABLE) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION5_TOP_ADDRESS, DMCUB_REGION5_ENABLE) \
DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_TOP_ADDRESS) \
DMUB_SF(DMCUB_REGION6_TOP_ADDRESS, DMCUB_REGION6_ENABLE) \
DMUB_SF(CC_DC_PIPE_DIS, DC_DMCUB_ENABLE) \
DMUB_SF(MMHUBBUB_SOFT_RESET, DMUIF_SOFT_RESET) \
DMUB_SF(DCN_VM_FB_LOCATION_BASE, FB_BASE) \
......@@ -214,7 +219,8 @@ void dmub_dcn35_setup_windows(struct dmub_srv *dmub,
const struct dmub_window *cw3,
const struct dmub_window *cw4,
const struct dmub_window *cw5,
const struct dmub_window *cw6);
const struct dmub_window *cw6,
const struct dmub_window *region6);
void dmub_dcn35_setup_mailbox(struct dmub_srv *dmub,
const struct dmub_region *inbox1);
......
......@@ -78,6 +78,7 @@
#define DMUB_CW6_BASE (0x66000000)
#define DMUB_REGION5_BASE (0xA0000000)
#define DMUB_REGION6_BASE (0xC0000000)
static struct dmub_srv_dcn32_regs dmub_srv_dcn32_regs;
static struct dmub_srv_dcn35_regs dmub_srv_dcn35_regs;
......@@ -480,6 +481,7 @@ enum dmub_status
window_sizes[DMUB_WINDOW_5_TRACEBUFF] = trace_buffer_size;
window_sizes[DMUB_WINDOW_6_FW_STATE] = fw_state_size;
window_sizes[DMUB_WINDOW_7_SCRATCH_MEM] = DMUB_SCRATCH_MEM_SIZE;
window_sizes[DMUB_WINDOW_SHARED_STATE] = DMUB_FW_HEADER_SHARED_STATE_SIZE;
out->fb_size =
dmub_srv_calc_regions_for_memory_type(params, out, window_sizes, DMUB_WINDOW_MEMORY_TYPE_FB);
......@@ -565,9 +567,10 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
struct dmub_fb *tracebuff_fb = params->fb[DMUB_WINDOW_5_TRACEBUFF];
struct dmub_fb *fw_state_fb = params->fb[DMUB_WINDOW_6_FW_STATE];
struct dmub_fb *scratch_mem_fb = params->fb[DMUB_WINDOW_7_SCRATCH_MEM];
struct dmub_fb *shared_state_fb = params->fb[DMUB_WINDOW_SHARED_STATE];
struct dmub_rb_init_params rb_params, outbox0_rb_params;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6;
struct dmub_window cw0, cw1, cw2, cw3, cw4, cw5, cw6, region6;
struct dmub_region inbox1, outbox1, outbox0;
if (!dmub->sw_init)
......@@ -652,10 +655,16 @@ enum dmub_status dmub_srv_hw_init(struct dmub_srv *dmub,
dmub->fw_state = fw_state_fb->cpu_addr;
region6.offset.quad_part = shared_state_fb->gpu_addr;
region6.region.base = DMUB_CW6_BASE;
region6.region.top = region6.region.base + shared_state_fb->size;
dmub->shared_state = shared_state_fb->cpu_addr;
dmub->scratch_mem_fb = *scratch_mem_fb;
if (dmub->hw_funcs.setup_windows)
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6);
dmub->hw_funcs.setup_windows(dmub, &cw2, &cw3, &cw4, &cw5, &cw6, &region6);
if (dmub->hw_funcs.setup_outbox0)
dmub->hw_funcs.setup_outbox0(dmub, &outbox0);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment