[PATCH v2 1/5] clk: scmi: Allocate CLK operations dynamically

Stephen Boyd sboyd at kernel.org
Sun Apr 7 21:38:46 PDT 2024


Quoting Cristian Marussi (2024-03-25 14:00:21)
> diff --git a/drivers/clk/clk-scmi.c b/drivers/clk/clk-scmi.c
> index 8cbe24789c24..d5d369b052bd 100644
> --- a/drivers/clk/clk-scmi.c
> +++ b/drivers/clk/clk-scmi.c
> @@ -16,6 +16,14 @@
>  #define NOT_ATOMIC     false
>  #define ATOMIC         true
>  
> +enum scmi_clk_feats {
> +       SCMI_CLK_ATOMIC_SUPPORTED,
> +       SCMI_CLK_MAX_FEATS
> +};
> +
> +#define SCMI_MAX_CLK_OPS       (1 << SCMI_CLK_MAX_FEATS)
> +
> +static const struct clk_ops *clk_ops_db[SCMI_MAX_CLK_OPS];

Can it be 'scmi_clk_ops_db' for some name spacing?

>  static const struct scmi_clk_proto_ops *scmi_proto_clk_ops;
>  
>  struct scmi_clk {
> @@ -230,6 +202,106 @@ static int scmi_clk_ops_init(struct device *dev, struct scmi_clk *sclk,
>         return ret;
>  }
>  
> +/**
> + * scmi_clk_ops_alloc() - Alloc and configure clock operations
> + * @dev: A device reference for devres
> + * @feats_key: A bitmap representing the desired clk_ops capabilities.

Drop the period please because it's not consistent with the previous
argument descriptor.

> + *
> + * Allocate and configure a proper set of clock operations depending on the
> + * specifically required SCMI clock features.
> + *
> + * Return: A pointer to the allocated and configured clk_ops on Success,

Lowercase 'Success'.

> +
> +/**
> + * scmi_clk_ops_select() - Select a proper set of clock operations
> + * @sclk: A reference to an SCMI clock descriptor
> + * @atomic_capable: A flag to indicate if atomic mode is supported by the
> + *                 transport
> + * @atomic_threshold: Platform atomic threshold value

Is this in nanoseconds, microseconds, or ??? Maybe a better description is
"clk_ops are atomic when clk enable_latency is less than X [time unit]" 

> + *
> + * After having built a bitmap descriptor to represent the set of features
> + * needed by this SCMI clock, at first use it to lookup into the set of
> + * previously allocated clk_ops to check if a suitable combination of clock
> + * operations was already created; when no match is found allocate a brand new
> + * set of clk_ops satisfying the required combination of features and save it
> + * for future references.
> + *
> + * In this way only one set of clk_ops is ever created for each different
> + * combination that is effectively needed.
> + *
> + * Return: A pointer to the allocated and configured clk_ops on Success, or

Lowercase 'Success'.

> + *        NULL otherwise.
> + */
> +static const struct clk_ops *
> +scmi_clk_ops_select(struct scmi_clk *sclk, bool atomic_capable,
> +                   unsigned int atomic_threshold)
> +{
> +       const struct scmi_clock_info *ci = sclk->info;
> +       unsigned int feats_key = 0;
> +       const struct clk_ops *ops;
> +
> +       /*
> +        * Note that when transport is atomic but SCMI protocol did not
> +        * specify (or support) an enable_latency associated with a
> +        * clock, we default to use atomic operations mode.
> +        */
> +       if (atomic_capable && ci->enable_latency <= atomic_threshold)
> +               feats_key |= BIT(SCMI_CLK_ATOMIC_SUPPORTED);
> +

Can we have a static_assert() here that makes sure 'feats_key' isn't
larger than the size of clk_ops_db?

	static_assert(ARRAY_SIZE(clk_ops_db) >= feats_key);

> +       /* Lookup previously allocated ops */
> +       ops = clk_ops_db[feats_key];
> +       if (!ops) {
> +               ops = scmi_clk_ops_alloc(sclk->dev, feats_key);
> +               if (!ops)
> +                       return NULL;

This could be less nested if the first lookup is put in
scmi_clk_ops_alloc() and the store below is folded in. Or an early
return if found.

	ops = clk_ops_db[feats_key];
	if (ops)
		return ops;

	/* Didn't find one */
	ops = scmi_clk_ops_alloc(...)
	if (!ops)
		return NULL;

	clk_ops_db[feats_key] = ops;
	return ops;
		
> +
> +               /* Store new ops combinations */
> +               clk_ops_db[feats_key] = ops;
> +       }
> +
> +       return ops;
> +}
> +
>  static int scmi_clocks_probe(struct scmi_device *sdev)
>  {
>         int idx, count, err;
> @@ -285,16 +357,10 @@ static int scmi_clocks_probe(struct scmi_device *sdev)
>                 sclk->ph = ph;
>                 sclk->dev = dev;
>  
> -               /*
> -                * Note that when transport is atomic but SCMI protocol did not
> -                * specify (or support) an enable_latency associated with a
> -                * clock, we default to use atomic operations mode.
> -                */
> -               if (is_atomic &&
> -                   sclk->info->enable_latency <= atomic_threshold)
> -                       scmi_ops = &scmi_atomic_clk_ops;
> -               else
> -                       scmi_ops = &scmi_clk_ops;
> +               scmi_ops = scmi_clk_ops_select(sclk, is_atomic,

'is_atomic' should probably be 'transport_is_atomic' so this reads
easier.

> +                                              atomic_threshold);
> +               if (!scmi_ops)
> +                       return -ENOMEM;
>  
>                 /* Initialize clock parent data. */
>                 if (sclk->info->num_parents > 0) {



More information about the linux-arm-kernel mailing list