Commit 703db1f5 authored by Bjorn Andersson's avatar Bjorn Andersson

clk: qcom: rcg2: Cache CFG register updates for parked RCGs

As GDSCs are turned on and off some associated clocks are momentarily
enabled for house keeping purposes. For this, and similar, purposes the
"shared RCGs" will park the RCG on a source clock which is known to be
available.
When the RCG is parked, a safe clock source will be selected and
committed, then the original source would be written back and upon enable
the change back to the unparked source would be committed.

But starting with SM8350 this fails, as the value in CFG is committed by
the GDSC handshake and without a ticking parent the GDSC enablement will
time out.

This becomes a concrete problem if the runtime supended state of a
device includes disabling such rcg's parent clock. As the device
attempts to power up the domain again the rcg will fail to enable and
hence the GDSC enablement will fail, preventing the device from
returning from the suspended state.

This can be seen in e.g. the display stack during probe on SM8350.

To avoid this problem, the software needs to ensure that the RCG is
configured to a active parent clock while it is disabled. This is done
by caching the CFG register content while the shared RCG is parked on
this safe source.

Writes to M, N and D registers are committed as they are requested. New
helpers for get_parent() and recalc_rate() are extracted from their
previous implementations and __clk_rcg2_configure() is modified to allow
it to operate on the cached value.

Fixes: 7ef6f118 ("clk: qcom: Configure the RCGs to a safe source as needed")
Signed-off-by: default avatarBjorn Andersson <bjorn.andersson@linaro.org>
Reviewed-by: default avatarStephen Boyd <sboyd@kernel.org>
Link: https://lore.kernel.org/r/20220426212136.1543984-1-bjorn.andersson@linaro.org
parent d65d005f
...@@ -140,6 +140,7 @@ extern const struct clk_ops clk_dyn_rcg_ops; ...@@ -140,6 +140,7 @@ extern const struct clk_ops clk_dyn_rcg_ops;
* @freq_tbl: frequency table * @freq_tbl: frequency table
* @clkr: regmap clock handle * @clkr: regmap clock handle
* @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG * @cfg_off: defines the cfg register offset from the CMD_RCGR + CFG_REG
* @parked_cfg: cached value of the CFG register for parked RCGs
*/ */
struct clk_rcg2 { struct clk_rcg2 {
u32 cmd_rcgr; u32 cmd_rcgr;
...@@ -150,6 +151,7 @@ struct clk_rcg2 { ...@@ -150,6 +151,7 @@ struct clk_rcg2 {
const struct freq_tbl *freq_tbl; const struct freq_tbl *freq_tbl;
struct clk_regmap clkr; struct clk_regmap clkr;
u8 cfg_off; u8 cfg_off;
u32 parked_cfg;
}; };
#define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr) #define to_clk_rcg2(_hw) container_of(to_clk_regmap(_hw), struct clk_rcg2, clkr)
......
...@@ -73,16 +73,11 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw) ...@@ -73,16 +73,11 @@ static int clk_rcg2_is_enabled(struct clk_hw *hw)
return (cmd & CMD_ROOT_OFF) == 0; return (cmd & CMD_ROOT_OFF) == 0;
} }
static u8 clk_rcg2_get_parent(struct clk_hw *hw) static u8 __clk_rcg2_get_parent(struct clk_hw *hw, u32 cfg)
{ {
struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_rcg2 *rcg = to_clk_rcg2(hw);
int num_parents = clk_hw_get_num_parents(hw); int num_parents = clk_hw_get_num_parents(hw);
u32 cfg; int i;
int i, ret;
ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (ret)
goto err;
cfg &= CFG_SRC_SEL_MASK; cfg &= CFG_SRC_SEL_MASK;
cfg >>= CFG_SRC_SEL_SHIFT; cfg >>= CFG_SRC_SEL_SHIFT;
...@@ -91,12 +86,27 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw) ...@@ -91,12 +86,27 @@ static u8 clk_rcg2_get_parent(struct clk_hw *hw)
if (cfg == rcg->parent_map[i].cfg) if (cfg == rcg->parent_map[i].cfg)
return i; return i;
err:
pr_debug("%s: Clock %s has invalid parent, using default.\n", pr_debug("%s: Clock %s has invalid parent, using default.\n",
__func__, clk_hw_get_name(hw)); __func__, clk_hw_get_name(hw));
return 0; return 0;
} }
static u8 clk_rcg2_get_parent(struct clk_hw *hw)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg;
int ret;
ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (ret) {
pr_debug("%s: Unable to read CFG register for %s\n",
__func__, clk_hw_get_name(hw));
return 0;
}
return __clk_rcg2_get_parent(hw, cfg);
}
static int update_config(struct clk_rcg2 *rcg) static int update_config(struct clk_rcg2 *rcg)
{ {
int count, ret; int count, ret;
...@@ -163,12 +173,10 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div) ...@@ -163,12 +173,10 @@ calc_rate(unsigned long rate, u32 m, u32 n, u32 mode, u32 hid_div)
} }
static unsigned long static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) __clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate, u32 cfg)
{ {
struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg, hid_div, m = 0, n = 0, mode = 0, mask; u32 hid_div, m = 0, n = 0, mode = 0, mask;
regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (rcg->mnd_width) { if (rcg->mnd_width) {
mask = BIT(rcg->mnd_width) - 1; mask = BIT(rcg->mnd_width) - 1;
...@@ -189,6 +197,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) ...@@ -189,6 +197,17 @@ clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
return calc_rate(parent_rate, m, n, mode, hid_div); return calc_rate(parent_rate, m, n, mode, hid_div);
} }
static unsigned long
clk_rcg2_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg;
regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
return __clk_rcg2_recalc_rate(hw, parent_rate, cfg);
}
static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f, static int _freq_tbl_determine_rate(struct clk_hw *hw, const struct freq_tbl *f,
struct clk_rate_request *req, struct clk_rate_request *req,
enum freq_policy policy) enum freq_policy policy)
...@@ -262,7 +281,8 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw, ...@@ -262,7 +281,8 @@ static int clk_rcg2_determine_floor_rate(struct clk_hw *hw,
return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR); return _freq_tbl_determine_rate(hw, rcg->freq_tbl, req, FLOOR);
} }
static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f,
u32 *_cfg)
{ {
u32 cfg, mask, d_val, not2d_val, n_minus_m; u32 cfg, mask, d_val, not2d_val, n_minus_m;
struct clk_hw *hw = &rcg->clkr.hw; struct clk_hw *hw = &rcg->clkr.hw;
...@@ -304,15 +324,27 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) ...@@ -304,15 +324,27 @@ static int __clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT; cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
if (rcg->mnd_width && f->n && (f->m != f->n)) if (rcg->mnd_width && f->n && (f->m != f->n))
cfg |= CFG_MODE_DUAL_EDGE; cfg |= CFG_MODE_DUAL_EDGE;
return regmap_update_bits(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg),
mask, cfg); *_cfg &= ~mask;
*_cfg |= cfg;
return 0;
} }
static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f) static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{ {
u32 cfg;
int ret; int ret;
ret = __clk_rcg2_configure(rcg, f); ret = regmap_read(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), &cfg);
if (ret)
return ret;
ret = __clk_rcg2_configure(rcg, f, &cfg);
if (ret)
return ret;
ret = regmap_write(rcg->clkr.regmap, RCG_CFG_OFFSET(rcg), cfg);
if (ret) if (ret)
return ret; return ret;
...@@ -979,11 +1011,12 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate, ...@@ -979,11 +1011,12 @@ static int clk_rcg2_shared_set_rate(struct clk_hw *hw, unsigned long rate,
return -EINVAL; return -EINVAL;
/* /*
* In case clock is disabled, update the CFG, M, N and D registers * In case clock is disabled, update the M, N and D registers, cache
* and don't hit the update bit of CMD register. * the CFG value in parked_cfg and don't hit the update bit of CMD
* register.
*/ */
if (!__clk_is_enabled(hw->clk)) if (!clk_hw_is_enabled(hw))
return __clk_rcg2_configure(rcg, f); return __clk_rcg2_configure(rcg, f, &rcg->parked_cfg);
return clk_rcg2_shared_force_enable_clear(hw, f); return clk_rcg2_shared_force_enable_clear(hw, f);
} }
...@@ -1007,6 +1040,11 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw) ...@@ -1007,6 +1040,11 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
if (ret) if (ret)
return ret; return ret;
/* Write back the stored configuration corresponding to current rate */
ret = regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, rcg->parked_cfg);
if (ret)
return ret;
ret = update_config(rcg); ret = update_config(rcg);
if (ret) if (ret)
return ret; return ret;
...@@ -1017,13 +1055,12 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw) ...@@ -1017,13 +1055,12 @@ static int clk_rcg2_shared_enable(struct clk_hw *hw)
static void clk_rcg2_shared_disable(struct clk_hw *hw) static void clk_rcg2_shared_disable(struct clk_hw *hw)
{ {
struct clk_rcg2 *rcg = to_clk_rcg2(hw); struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 cfg;
/* /*
* Store current configuration as switching to safe source would clear * Store current configuration as switching to safe source would clear
* the SRC and DIV of CFG register * the SRC and DIV of CFG register
*/ */
regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &cfg); regmap_read(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, &rcg->parked_cfg);
/* /*
* Park the RCG at a safe configuration - sourced off of safe source. * Park the RCG at a safe configuration - sourced off of safe source.
...@@ -1041,17 +1078,52 @@ static void clk_rcg2_shared_disable(struct clk_hw *hw) ...@@ -1041,17 +1078,52 @@ static void clk_rcg2_shared_disable(struct clk_hw *hw)
update_config(rcg); update_config(rcg);
clk_rcg2_clear_force_enable(hw); clk_rcg2_clear_force_enable(hw);
}
/* Write back the stored configuration corresponding to current rate */ static u8 clk_rcg2_shared_get_parent(struct clk_hw *hw)
regmap_write(rcg->clkr.regmap, rcg->cmd_rcgr + CFG_REG, cfg); {
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
/* If the shared rcg is parked use the cached cfg instead */
if (!clk_hw_is_enabled(hw))
return __clk_rcg2_get_parent(hw, rcg->parked_cfg);
return clk_rcg2_get_parent(hw);
}
static int clk_rcg2_shared_set_parent(struct clk_hw *hw, u8 index)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
/* If the shared rcg is parked only update the cached cfg */
if (!clk_hw_is_enabled(hw)) {
rcg->parked_cfg &= ~CFG_SRC_SEL_MASK;
rcg->parked_cfg |= rcg->parent_map[index].cfg << CFG_SRC_SEL_SHIFT;
return 0;
}
return clk_rcg2_set_parent(hw, index);
}
static unsigned long
clk_rcg2_shared_recalc_rate(struct clk_hw *hw, unsigned long parent_rate)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
/* If the shared rcg is parked use the cached cfg instead */
if (!clk_hw_is_enabled(hw))
return __clk_rcg2_recalc_rate(hw, parent_rate, rcg->parked_cfg);
return clk_rcg2_recalc_rate(hw, parent_rate);
} }
const struct clk_ops clk_rcg2_shared_ops = { const struct clk_ops clk_rcg2_shared_ops = {
.enable = clk_rcg2_shared_enable, .enable = clk_rcg2_shared_enable,
.disable = clk_rcg2_shared_disable, .disable = clk_rcg2_shared_disable,
.get_parent = clk_rcg2_get_parent, .get_parent = clk_rcg2_shared_get_parent,
.set_parent = clk_rcg2_set_parent, .set_parent = clk_rcg2_shared_set_parent,
.recalc_rate = clk_rcg2_recalc_rate, .recalc_rate = clk_rcg2_shared_recalc_rate,
.determine_rate = clk_rcg2_determine_rate, .determine_rate = clk_rcg2_determine_rate,
.set_rate = clk_rcg2_shared_set_rate, .set_rate = clk_rcg2_shared_set_rate,
.set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent, .set_rate_and_parent = clk_rcg2_shared_set_rate_and_parent,
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment