Commit 7f891faf authored by Taniya Das's avatar Taniya Das Committed by Stephen Boyd

clk: qcom: clk-rcg2: Add support for duty-cycle for RCG

The root clock generators with MND divider has the capability to support
change in duty-cycle by updating the 'D'. Add the clock ops which would
check all the boundary conditions and enable setting the desired duty-cycle
as per the consumer.
Signed-off-by: default avatarTaniya Das <tdas@codeaurora.org>
Link: https://lore.kernel.org/r/1619334502-9880-2-git-send-email-tdas@codeaurora.org
[sboyd@kernel.org: Remove _val everywhere]
Signed-off-by: default avatarStephen Boyd <sboyd@kernel.org>
parent 6efb943b
...@@ -357,6 +357,83 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw, ...@@ -357,6 +357,83 @@ static int clk_rcg2_set_floor_rate_and_parent(struct clk_hw *hw,
return __clk_rcg2_set_rate(hw, rate, FLOOR); return __clk_rcg2_set_rate(hw, rate, FLOOR);
} }
static int clk_rcg2_get_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 notn_m, n, m, d, not2d, mask;
if (!rcg->mnd_width) {
/* 50 % duty-cycle for Non-MND RCGs */
duty->num = 1;
duty->den = 2;
return 0;
}
regmap_read(rcg->clkr.regmap, RCG_D_OFFSET(rcg), &not2d);
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
if (!not2d && !m && !notn_m) {
/* 50 % duty-cycle always */
duty->num = 1;
duty->den = 2;
return 0;
}
mask = BIT(rcg->mnd_width) - 1;
d = ~(not2d) & mask;
d = DIV_ROUND_CLOSEST(d, 2);
n = (~(notn_m) + m) & mask;
duty->num = d;
duty->den = n;
return 0;
}
static int clk_rcg2_set_duty_cycle(struct clk_hw *hw, struct clk_duty *duty)
{
struct clk_rcg2 *rcg = to_clk_rcg2(hw);
u32 notn_m, n, m, d, not2d, mask, duty_per;
int ret;
/* Duty-cycle cannot be modified for non-MND RCGs */
if (!rcg->mnd_width)
return -EINVAL;
mask = BIT(rcg->mnd_width) - 1;
regmap_read(rcg->clkr.regmap, RCG_N_OFFSET(rcg), &notn_m);
regmap_read(rcg->clkr.regmap, RCG_M_OFFSET(rcg), &m);
n = (~(notn_m) + m) & mask;
duty_per = (duty->num * 100) / duty->den;
/* Calculate 2d value */
d = DIV_ROUND_CLOSEST(n * duty_per * 2, 100);
/* Check bit widths of 2d. If D is too big reduce duty cycle. */
if (d > mask)
d = mask;
if ((d / 2) > (n - m))
d = (n - m) * 2;
else if ((d / 2) < (m / 2))
d = m;
not2d = ~d & mask;
ret = regmap_update_bits(rcg->clkr.regmap, RCG_D_OFFSET(rcg), mask,
not2d);
if (ret)
return ret;
return update_config(rcg);
}
const struct clk_ops clk_rcg2_ops = { const struct clk_ops clk_rcg2_ops = {
.is_enabled = clk_rcg2_is_enabled, .is_enabled = clk_rcg2_is_enabled,
.get_parent = clk_rcg2_get_parent, .get_parent = clk_rcg2_get_parent,
...@@ -365,6 +442,8 @@ const struct clk_ops clk_rcg2_ops = { ...@@ -365,6 +442,8 @@ const struct clk_ops clk_rcg2_ops = {
.determine_rate = clk_rcg2_determine_rate, .determine_rate = clk_rcg2_determine_rate,
.set_rate = clk_rcg2_set_rate, .set_rate = clk_rcg2_set_rate,
.set_rate_and_parent = clk_rcg2_set_rate_and_parent, .set_rate_and_parent = clk_rcg2_set_rate_and_parent,
.get_duty_cycle = clk_rcg2_get_duty_cycle,
.set_duty_cycle = clk_rcg2_set_duty_cycle,
}; };
EXPORT_SYMBOL_GPL(clk_rcg2_ops); EXPORT_SYMBOL_GPL(clk_rcg2_ops);
...@@ -376,6 +455,8 @@ const struct clk_ops clk_rcg2_floor_ops = { ...@@ -376,6 +455,8 @@ const struct clk_ops clk_rcg2_floor_ops = {
.determine_rate = clk_rcg2_determine_floor_rate, .determine_rate = clk_rcg2_determine_floor_rate,
.set_rate = clk_rcg2_set_floor_rate, .set_rate = clk_rcg2_set_floor_rate,
.set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent, .set_rate_and_parent = clk_rcg2_set_floor_rate_and_parent,
.get_duty_cycle = clk_rcg2_get_duty_cycle,
.set_duty_cycle = clk_rcg2_set_duty_cycle,
}; };
EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops); EXPORT_SYMBOL_GPL(clk_rcg2_floor_ops);
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment