Commit 2641ee13 authored by Cristian Marussi's avatar Cristian Marussi Committed by Stephen Boyd

clk: scmi: Allocate CLK operations dynamically

SCMI Clocks descriptors expose an increasing number of properties, thing
which, in turn, leads to a varying set of supported CLK operations to be
associated with each clock.

Providing statically pre-defined CLK operations structs for all the
possible combinations of allowed clock features is becoming cumbersome and
error-prone.

Allocate the per-clock operations descriptors dynamically and populate it
with the strictly needed set of operations depending on the advertised
clock properties: one descriptor is created for each distinct combination
of clock operations, so minimizing the number of clk_ops structures to the
strictly minimum needed.

CC: Michael Turquette <mturquette@baylibre.com>
CC: Stephen Boyd <sboyd@kernel.org>
CC: linux-clk@vger.kernel.org
Signed-off-by: default avatarCristian Marussi <cristian.marussi@arm.com>
Link: https://lore.kernel.org/r/20240415163649.895268-2-cristian.marussi@arm.comReviewed-by: default avatarSudeep Holla <sudeep.holla@arm.com>
Signed-off-by: default avatarStephen Boyd <sboyd@kernel.org>
parent 4cece764
......@@ -2,9 +2,10 @@
/*
* System Control and Power Interface (SCMI) Protocol based clock driver
*
* Copyright (C) 2018-2022 ARM Ltd.
* Copyright (C) 2018-2024 ARM Ltd.
*/
#include <linux/bits.h>
#include <linux/clk-provider.h>
#include <linux/device.h>
#include <linux/err.h>
......@@ -16,6 +17,13 @@
#define NOT_ATOMIC false
#define ATOMIC true
enum scmi_clk_feats {
SCMI_CLK_ATOMIC_SUPPORTED,
SCMI_CLK_FEATS_COUNT
};
#define SCMI_MAX_CLK_OPS BIT(SCMI_CLK_FEATS_COUNT)
static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
struct scmi_clk {
......@@ -158,42 +166,6 @@ static int scmi_clk_atomic_is_enabled(struct clk_hw *hw)
return !!enabled;
}
/*
* We can provide enable/disable/is_enabled atomic callbacks only if the
* underlying SCMI transport for an SCMI instance is configured to handle
* SCMI commands in an atomic manner.
*
* When no SCMI atomic transport support is available we instead provide only
* the prepare/unprepare API, as allowed by the clock framework when atomic
* calls are not available.
*
* Two distinct sets of clk_ops are provided since we could have multiple SCMI
* instances with different underlying transport quality, so they cannot be
* shared.
*/
static const struct clk_ops scmi_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
.prepare = scmi_clk_enable,
.unprepare = scmi_clk_disable,
.set_parent = scmi_clk_set_parent,
.get_parent = scmi_clk_get_parent,
.determine_rate = scmi_clk_determine_rate,
};
static const struct clk_ops scmi_atomic_clk_ops = {
.recalc_rate = scmi_clk_recalc_rate,
.round_rate = scmi_clk_round_rate,
.set_rate = scmi_clk_set_rate,
.enable = scmi_clk_atomic_enable,
.disable = scmi_clk_atomic_disable,
.is_enabled = scmi_clk_atomic_is_enabled,
.set_parent = scmi_clk_set_parent,
.get_parent = scmi_clk_get_parent,
.determine_rate = scmi_clk_determine_rate,
};
static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
const struct clk_ops *scmi_ops)
{
......@@ -230,17 +202,129 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
return ret;
}
/**
* scmi_clk_ops_alloc() - Alloc and configure clock operations
* @dev: A device reference for devres
* @feats_key: A bitmap representing the desired clk_ops capabilities
*
* Allocate and configure a proper set of clock operations depending on the
* specifically required SCMI clock features.
*
* Return: A pointer to the allocated and configured clk_ops on success,
* or NULL on allocation failure.
*/
static const struct clk_ops *
scmi_clk_ops_alloc(struct device *dev, unsigned long feats_key)
{
struct clk_ops *ops;
ops = devm_kzalloc(dev, sizeof(*ops), GFP_KERNEL);
if (!ops)
return NULL;
/*
* We can provide enable/disable/is_enabled atomic callbacks only if the
* underlying SCMI transport for an SCMI instance is configured to
* handle SCMI commands in an atomic manner.
*
* When no SCMI atomic transport support is available we instead provide
* only the prepare/unprepare API, as allowed by the clock framework
* when atomic calls are not available.
*/
if (feats_key & BIT(SCMI_CLK_ATOMIC_SUPPORTED)) {
ops->enable = scmi_clk_atomic_enable;
ops->disable = scmi_clk_atomic_disable;
ops->is_enabled = scmi_clk_atomic_is_enabled;
} else {
ops->prepare = scmi_clk_enable;
ops->unprepare = scmi_clk_disable;
}
/* Rate ops */
ops->recalc_rate = scmi_clk_recalc_rate;
ops->round_rate = scmi_clk_round_rate;
ops->determine_rate = scmi_clk_determine_rate;
ops->set_rate = scmi_clk_set_rate;
/* Parent ops */
ops->get_parent = scmi_clk_get_parent;
ops->set_parent = scmi_clk_set_parent;
return ops;
}
/**
* scmi_clk_ops_select() - Select a proper set of clock operations
* @sclk: A reference to an SCMI clock descriptor
* @atomic_capable: A flag to indicate if atomic mode is supported by the
* transport
* @atomic_threshold_us: Platform atomic threshold value in microseconds:
* clk_ops are atomic when clock enable latency is less
* than this threshold
* @clk_ops_db: A reference to the array used as a database to store all the
* created clock operations combinations.
* @db_size: Maximum number of entries held by @clk_ops_db
*
* After having built a bitmap descriptor to represent the set of features
* needed by this SCMI clock, at first use it to lookup into the set of
* previously allocated clk_ops to check if a suitable combination of clock
* operations was already created; when no match is found allocate a brand new
* set of clk_ops satisfying the required combination of features and save it
* for future references.
*
* In this way only one set of clk_ops is ever created for each different
* combination that is effectively needed by a driver instance.
*
* Return: A pointer to the allocated and configured clk_ops on success, or
* NULL otherwise.
*/
static const struct clk_ops *
scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
unsigned int atomic_threshold_us,
const struct clk_ops **clk_ops_db, size_t db_size)
{
const struct scmi_clock_info *ci = sclk->info;
unsigned int feats_key = 0;
const struct clk_ops *ops;
/*
* Note that when transport is atomic but SCMI protocol did not
* specify (or support) an enable_latency associated with a
* clock, we default to use atomic operations mode.
*/
if (atomic_capable && ci->enable_latency <= atomic_threshold_us)
feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
if (WARN_ON(feats_key >= db_size))
return NULL;
/* Lookup previously allocated ops */
ops = clk_ops_db[feats_key];
if (ops)
return ops;
/* Did not find a pre-allocated clock_ops */
ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
if (!ops)
return NULL;
/* Store new ops combinations */
clk_ops_db[feats_key] = ops;
return ops;
}
static int scmi_clocks_probe(struct scmi_device *sdev)
{
int idx, count, err;
unsigned int atomic_threshold;
bool is_atomic;
unsigned int atomic_threshold_us;
bool transport_is_atomic;
struct clk_hw **hws;
struct clk_hw_onecell_data *clk_data;
struct device *dev = &sdev->dev;
struct device_node *np = dev->of_node;
const struct scmi_handle *handle = sdev->handle;
struct scmi_protocol_handle *ph;
const struct clk_ops *scmi_clk_ops_db[SCMI_MAX_CLK_OPS] = {};
if (!handle)
return -ENODEV;
......@@ -264,7 +348,8 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
clk_data->num = count;
hws = clk_data->hws;
is_atomic = handle->is_transport_atomic(handle, &atomic_threshold);
transport_is_atomic = handle->is_transport_atomic(handle,
&atomic_threshold_us);
for (idx = 0; idx < count; idx++) {
struct scmi_clk *sclk;
......@@ -286,15 +371,17 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
sclk->dev = dev;
/*
* Note that when transport is atomic but SCMI protocol did not
* specify (or support) an enable_latency associated with a
* clock, we default to use atomic operations mode.
* Note that the scmi_clk_ops_db is on the stack, not global,
* because it cannot be shared between mulitple probe-sequences
* to avoid sharing the devm_ allocated clk_ops between multiple
* SCMI clk driver instances.
*/
if (is_atomic &&
sclk->info->enable_latency <= atomic_threshold)
scmi_ops = &scmi_atomic_clk_ops;
else
scmi_ops = &scmi_clk_ops;
scmi_ops = scmi_clk_ops_select(sclk, transport_is_atomic,
atomic_threshold_us,
scmi_clk_ops_db,
ARRAY_SIZE(scmi_clk_ops_db));
if (!scmi_ops)
return -ENOMEM;
/* Initialize clock parent data. */
if (sclk->info->num_parents > 0) {
......@@ -318,8 +405,7 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
} else {
dev_dbg(dev, "Registered clock:%s%s\n",
sclk->info->name,
scmi_ops == &scmi_atomic_clk_ops ?
" (atomic ops)" : "");
scmi_ops->enable ? " (atomic ops)" : "");
hws[idx] = &sclk->hw;
}
}
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment