[RFC 4/7] clk: add support for clock protection

Jerome Brunet jbrunet at baylibre.com
Tue Mar 21 11:33:27 PDT 2017


The patch adds clk_protect and clk_unprotect to the CCF API. These
functions allow a consumer to inform the system that the rate of clock is
critical to for its operations and it can't tolerate other consumers
changing the rate or introducing glitches while the clock is protected.

Signed-off-by: Jerome Brunet <jbrunet at baylibre.com>
---
 drivers/clk/clk.c            | 177 ++++++++++++++++++++++++++++++++++++++++---
 include/linux/clk-provider.h |   1 +
 include/linux/clk.h          |  29 +++++++
 3 files changed, 197 insertions(+), 10 deletions(-)

diff --git a/drivers/clk/clk.c b/drivers/clk/clk.c
index fa77a1841e0f..69db8cc15063 100644
--- a/drivers/clk/clk.c
+++ b/drivers/clk/clk.c
@@ -60,6 +60,7 @@ struct clk_core {
 	bool			orphan;
 	unsigned int		enable_count;
 	unsigned int		prepare_count;
+	unsigned int		protect_count;
 	unsigned long		min_rate;
 	unsigned long		max_rate;
 	unsigned long		accuracy;
@@ -84,6 +85,7 @@ struct clk {
 	const char *con_id;
 	unsigned long min_rate;
 	unsigned long max_rate;
+	unsigned long protect_ucount;
 	struct hlist_node clks_node;
 };
 
@@ -160,6 +162,11 @@ static bool clk_core_is_prepared(struct clk_core *core)
 	return core->ops->is_prepared(core->hw);
 }
 
+static bool clk_core_is_protected(struct clk_core *core)
+{
+	return core->protect_count;
+}
+
 static bool clk_core_is_enabled(struct clk_core *core)
 {
 	/*
@@ -328,6 +335,11 @@ bool clk_hw_is_prepared(const struct clk_hw *hw)
 	return clk_core_is_prepared(hw->core);
 }
 
+bool clk_hw_is_protected(const struct clk_hw *hw)
+{
+	return clk_core_is_protected(hw->core);
+}
+
 bool clk_hw_is_enabled(const struct clk_hw *hw)
 {
 	return clk_core_is_enabled(hw->core);
@@ -584,6 +596,89 @@ int clk_prepare(struct clk *clk)
 }
 EXPORT_SYMBOL_GPL(clk_prepare);
 
+static void clk_core_unprotect(struct clk_core *core)
+{
+	lockdep_assert_held(&prepare_lock);
+
+	if (!core)
+		return;
+
+	if (WARN_ON(core->protect_count == 0))
+		return;
+
+	if (--core->protect_count > 0)
+		return;
+
+	clk_core_unprotect(core->parent);
+}
+
+/**
+ * clk_unprotect - unprotect a clock source
+ * @clk: the clk being unprotected
+ *
+ * clk_unprotect shall be used when a consumer no longer depends on the clock
+ * rate and can tolerate glitches. As with clk_unprepare and clk_enable, calls
+ * to clk_unprotect must be balanced with clk_protect.
+ * clk_protect may sleep
+ */
+void clk_unprotect(struct clk *clk)
+{
+	if (!clk)
+		return;
+
+	clk_prepare_lock();
+
+	if (WARN_ON(clk->protect_ucount <= 0)) {
+		/*
+		 * There is something wrong with this consumer protect count.
+		 * Stop here before messing with the provider
+		 */
+		clk_prepare_unlock();
+		return;
+	}
+
+	clk_core_unprotect(clk->core);
+	clk->protect_ucount--;
+	clk_prepare_unlock();
+}
+EXPORT_SYMBOL_GPL(clk_unprotect);
+
+static void clk_core_protect(struct clk_core *core)
+{
+	lockdep_assert_held(&prepare_lock);
+
+	if (!core)
+		return;
+
+	if (core->protect_count == 0)
+		clk_core_protect(core->parent);
+
+	core->protect_count++;
+}
+
+/**
+ * clk_protect - protect a clock source
+ * @clk: the clk being protected
+ *
+ * clk_protect can be used when a consumer depends on the clock rate and can't
+ * tolerate any glitches. The consumer protecting the clock can still make
+ * adjustment to clock, if it is the only one protecting the clock. Other
+ * consumers can still use the clock but won't be able to adjust the rate or
+ * reparent the clock while it is protected.
+ * clk_protect may sleep.
+ */
+void clk_protect(struct clk *clk)
+{
+	if (!clk)
+		return;
+
+	clk_prepare_lock();
+	clk_core_protect(clk->core);
+	clk->protect_ucount++;
+	clk_prepare_unlock();
+}
+EXPORT_SYMBOL_GPL(clk_protect);
+
 static void clk_core_disable(struct clk_core *core)
 {
 	lockdep_assert_held(&enable_lock);
@@ -838,7 +933,9 @@ static int clk_core_determine_round(struct clk_core *core,
 {
 	long rate;
 
-	if (core->ops->determine_rate) {
+	if (clk_core_is_protected(core)) {
+		req->rate = core->rate;
+	} else if (core->ops->determine_rate) {
 		return core->ops->determine_rate(core->hw, req);
 	} else if (core->ops->round_rate) {
 		rate = core->ops->round_rate(core->hw, req->rate,
@@ -1381,7 +1478,7 @@ static struct clk_core *clk_calc_new_rates(struct clk_core *core,
 		req.min_rate = min_rate;
 		req.max_rate = max_rate;
 
-		clk_core_init_rate_req(core, req);
+		clk_core_init_rate_req(core, &req);
 
 		ret = clk_core_determine_round(core, &req);
 		if (ret < 0)
@@ -1637,8 +1734,14 @@ int clk_set_rate(struct clk *clk, unsigned long rate)
 	/* prevent racing with updates to the clock topology */
 	clk_prepare_lock();
 
+	if (clk->protect_ucount)
+		clk_core_unprotect(clk->core);
+
 	ret = clk_core_set_rate_nolock(clk->core, rate);
 
+	if (clk->protect_ucount)
+		clk_core_protect(clk->core);
+
 	clk_prepare_unlock();
 
 	return ret;
@@ -1669,12 +1772,18 @@ int clk_set_rate_range(struct clk *clk, unsigned long min, unsigned long max)
 
 	clk_prepare_lock();
 
+	if (clk->protect_ucount)
+		clk_core_unprotect(clk->core);
+
 	if (min != clk->min_rate || max != clk->max_rate) {
 		clk->min_rate = min;
 		clk->max_rate = max;
 		ret = clk_core_set_rate_nolock(clk->core, clk->core->req_rate);
 	}
 
+	if (clk->protect_ucount)
+		clk_core_protect(clk->core);
+
 	clk_prepare_unlock();
 
 	return ret;
@@ -1815,6 +1924,9 @@ static int clk_core_set_parent(struct clk_core *core, struct clk_core *parent)
 	if ((core->flags & CLK_SET_PARENT_GATE) && core->prepare_count)
 		return -EBUSY;
 
+	if (clk_core_is_protected(core))
+		return -EBUSY;
+
 	/* try finding the new parent index */
 	if (parent) {
 		p_index = clk_fetch_parent_index(core, parent);
@@ -1878,11 +1990,24 @@ static int clk_core_set_parent_lock(struct clk_core *core,
  */
 int clk_set_parent(struct clk *clk, struct clk *parent)
 {
+	int ret;
+
 	if (!clk)
 		return 0;
 
-	return clk_core_set_parent_lock(clk->core,
-					parent ? parent->core : NULL);
+	clk_prepare_lock();
+
+	if (clk->protect_ucount)
+		clk_core_unprotect(clk->core);
+
+	ret = clk_core_set_parent(clk->core, parent ? parent->core : NULL);
+
+	if (clk->protect_ucount)
+		clk_core_protect(clk->core);
+
+	clk_prepare_unlock();
+
+	return ret;
 }
 EXPORT_SYMBOL_GPL(clk_set_parent);
 
@@ -1893,7 +2018,10 @@ static int clk_core_set_phase(struct clk_core *core, int degrees)
 	if (!core)
 		return 0;
 
-	trace_clk_set_phase(clk->core, degrees);
+	if (clk_core_is_protected(core))
+		return -EBUSY;
+
+	trace_clk_set_phase(core, degrees);
 
 	if (core->ops->set_phase)
 		ret = core->ops->set_phase(core->hw, degrees);
@@ -1936,7 +2064,15 @@ int clk_set_phase(struct clk *clk, int degrees)
 		degrees += 360;
 
 	clk_prepare_lock();
+
+	if (clk->protect_ucount)
+		clk_core_unprotect(clk->core);
+
 	ret = clk_core_set_phase(clk->core, degrees);
+
+	if (clk->protect_ucount)
+		clk_core_protect(clk->core);
+
 	clk_prepare_unlock();
 
 	return ret;
@@ -2023,11 +2159,12 @@ static void clk_summary_show_one(struct seq_file *s, struct clk_core *c,
 	if (!c)
 		return;
 
-	seq_printf(s, "%*s%-*s %11d %12d %11lu %10lu %-3d\n",
+	seq_printf(s, "%*s%-*s %11d %12d %12d %11lu %10lu %-3d\n",
 		   level * 3 + 1, "",
 		   30 - level * 3, c->name,
-		   c->enable_count, c->prepare_count, clk_core_get_rate(c),
-		   clk_core_get_accuracy(c), clk_core_get_phase(c));
+		   c->enable_count, c->prepare_count, c->protect_count,
+		   clk_core_get_rate(c), clk_core_get_accuracy(c),
+		   clk_core_get_phase(c));
 }
 
 static void clk_summary_show_subtree(struct seq_file *s, struct clk_core *c,
@@ -2049,8 +2186,8 @@ static int clk_summary_show(struct seq_file *s, void *data)
 	struct clk_core *c;
 	struct hlist_head **lists = (struct hlist_head **)s->private;
 
-	seq_puts(s, "   clock                         enable_cnt  prepare_cnt        rate   accuracy   phase\n");
-	seq_puts(s, "----------------------------------------------------------------------------------------\n");
+	seq_puts(s, "   clock                         enable_cnt  prepare_cnt  protect_cnt        rate   accuracy   phase\n");
+	seq_puts(s, "----------------------------------------------------------------------------------------------------\n");
 
 	clk_prepare_lock();
 
@@ -2085,6 +2222,7 @@ static void clk_dump_one(struct seq_file *s, struct clk_core *c, int level)
 	seq_printf(s, "\"%s\": { ", c->name);
 	seq_printf(s, "\"enable_count\": %d,", c->enable_count);
 	seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
+	seq_printf(s, "\"protect_count\": %d,", c->protect_count);
 	seq_printf(s, "\"rate\": %lu,", clk_core_get_rate(c));
 	seq_printf(s, "\"accuracy\": %lu,", clk_core_get_accuracy(c));
 	seq_printf(s, "\"phase\": %d", clk_core_get_phase(c));
@@ -2191,6 +2329,11 @@ static int clk_debug_create_one(struct clk_core *core, struct dentry *pdentry)
 	if (!d)
 		goto err_out;
 
+	d = debugfs_create_u32("clk_protect_count", S_IRUGO, core->dentry,
+			(u32 *)&core->protect_count);
+	if (!d)
+		goto err_out;
+
 	d = debugfs_create_u32("clk_notifier_count", S_IRUGO, core->dentry,
 			(u32 *)&core->notifier_count);
 	if (!d)
@@ -2747,6 +2890,11 @@ void clk_unregister(struct clk *clk)
 	if (clk->core->prepare_count)
 		pr_warn("%s: unregistering prepared clock: %s\n",
 					__func__, clk->core->name);
+
+	if (clk->core->protect_count)
+		pr_warn("%s: unregistering protected clock: %s\n",
+					__func__, clk->core->name);
+
 	kref_put(&clk->core->ref, __clk_release);
 unlock:
 	clk_prepare_unlock();
@@ -2905,6 +3053,15 @@ void __clk_put(struct clk *clk)
 
 	clk_prepare_lock();
 
+	/* Protect count not balanced: warn and sanitize */
+	if (clk->protect_ucount) {
+		pr_warn("%s: releasing protected clock: %s\n",
+					__func__, clk->core->name);
+
+		for (; clk->protect_ucount; clk->protect_ucount--)
+			clk_core_unprotect(clk->core);
+	}
+
 	hlist_del(&clk->clks_node);
 	if (clk->min_rate > clk->core->req_rate ||
 	    clk->max_rate < clk->core->req_rate)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index a428aec36ace..705a158d9b8f 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -739,6 +739,7 @@ unsigned long clk_hw_get_rate(const struct clk_hw *hw);
 unsigned long __clk_get_flags(struct clk *clk);
 unsigned long clk_hw_get_flags(const struct clk_hw *hw);
 bool clk_hw_is_prepared(const struct clk_hw *hw);
+bool clk_hw_is_protected(const struct clk_hw *hw);
 bool clk_hw_is_enabled(const struct clk_hw *hw);
 bool __clk_is_enabled(struct clk *clk);
 struct clk *__clk_lookup(const char *name);
diff --git a/include/linux/clk.h b/include/linux/clk.h
index e9d36b3e49de..90b72ead4411 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -265,6 +265,30 @@ struct clk *devm_clk_get(struct device *dev, const char *id);
  */
 struct clk *devm_get_clk_from_child(struct device *dev,
 				    struct device_node *np, const char *con_id);
+/**
+ * clk_protect - inform the system when the clock source should be protected.
+ * @clk: clock source
+ *
+ * This function informs the system that the consumer protecting the clock
+ * depends on the rate of the clock source and can't tolerate any glitches
+ * introduced by further clock rate change or re-parenting of the clock source.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_protect(struct clk *clk);
+
+/**
+ * clk_unprotect - release the protection of the clock source.
+ * @clk: clock source
+ *
+ * This function informs the system that the consumer previously protecting the
+ * clock source can now deal with other consumer altering the clock source.
+ *
+ * The caller must balance the number of protect and unprotect calls.
+ *
+ * Must not be called from within atomic context.
+ */
+void clk_unprotect(struct clk *clk);
 
 /**
  * clk_enable - inform the system when the clock source should be running.
@@ -460,6 +484,11 @@ static inline void clk_put(struct clk *clk) {}
 
 static inline void devm_clk_put(struct device *dev, struct clk *clk) {}
 
+
+static inline void clk_protect(struct clk *clk) {}
+
+static inline void clk_unprotect(struct clk *clk) {}
+
 static inline int clk_enable(struct clk *clk)
 {
 	return 0;
-- 
2.9.3




More information about the linux-amlogic mailing list