2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
21 #include <linux/init.h>
23 static DEFINE_SPINLOCK(enable_lock);
24 static DEFINE_MUTEX(prepare_lock);
26 static HLIST_HEAD(clk_root_list);
27 static HLIST_HEAD(clk_orphan_list);
28 static LIST_HEAD(clk_notifier_list);
30 /*** debugfs support ***/
32 #ifdef CONFIG_COMMON_CLK_DEBUG
33 #include <linux/debugfs.h>
35 static struct dentry *rootdir;
36 static struct dentry *orphandir;
37 static int inited = 0;
39 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
44 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
46 30 - level * 3, c->name,
47 c->enable_count, c->prepare_count, c->rate);
51 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
55 struct hlist_node *tmp;
60 clk_summary_show_one(s, c, level);
62 hlist_for_each_entry(child, tmp, &c->children, child_node)
63 clk_summary_show_subtree(s, child, level + 1);
66 static int clk_summary_show(struct seq_file *s, void *data)
69 struct hlist_node *tmp;
71 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
72 seq_printf(s, "---------------------------------------------------------------------\n");
74 mutex_lock(&prepare_lock);
76 hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
77 clk_summary_show_subtree(s, c, 0);
79 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
80 clk_summary_show_subtree(s, c, 0);
82 mutex_unlock(&prepare_lock);
88 static int clk_summary_open(struct inode *inode, struct file *file)
90 return single_open(file, clk_summary_show, inode->i_private);
93 static const struct file_operations clk_summary_fops = {
94 .open = clk_summary_open,
97 .release = single_release,
100 static void clk_dump_one(struct seq_file *s, struct clk *c, int level)
105 seq_printf(s, "\"%s\": { ", c->name);
106 seq_printf(s, "\"enable_count\": %d,", c->enable_count);
107 seq_printf(s, "\"prepare_count\": %d,", c->prepare_count);
108 seq_printf(s, "\"rate\": %lu", c->rate);
111 static void clk_dump_subtree(struct seq_file *s, struct clk *c, int level)
114 struct hlist_node *tmp;
119 clk_dump_one(s, c, level);
121 hlist_for_each_entry(child, tmp, &c->children, child_node) {
123 clk_dump_subtree(s, child, level + 1);
129 static int clk_dump(struct seq_file *s, void *data)
132 struct hlist_node *tmp;
133 bool first_node = true;
137 mutex_lock(&prepare_lock);
139 hlist_for_each_entry(c, tmp, &clk_root_list, child_node) {
143 clk_dump_subtree(s, c, 0);
146 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node) {
148 clk_dump_subtree(s, c, 0);
151 mutex_unlock(&prepare_lock);
158 static int clk_dump_open(struct inode *inode, struct file *file)
160 return single_open(file, clk_dump, inode->i_private);
163 static const struct file_operations clk_dump_fops = {
164 .open = clk_dump_open,
167 .release = single_release,
170 /* caller must hold prepare_lock */
171 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
176 if (!clk || !pdentry) {
181 d = debugfs_create_dir(clk->name, pdentry);
187 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
192 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
197 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
198 (u32 *)&clk->prepare_count);
202 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
203 (u32 *)&clk->enable_count);
207 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
208 (u32 *)&clk->notifier_count);
216 debugfs_remove(clk->dentry);
221 /* caller must hold prepare_lock */
222 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
225 struct hlist_node *tmp;
228 if (!clk || !pdentry)
231 ret = clk_debug_create_one(clk, pdentry);
236 hlist_for_each_entry(child, tmp, &clk->children, child_node)
237 clk_debug_create_subtree(child, clk->dentry);
245 * clk_debug_register - add a clk node to the debugfs clk tree
246 * @clk: the clk being added to the debugfs clk tree
248 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
249 * initialized. Otherwise it bails out early since the debugfs clk tree
250 * will be created lazily by clk_debug_init as part of a late_initcall.
252 * Caller must hold prepare_lock. Only clk_init calls this function (so
253 * far) so this is taken care.
255 static int clk_debug_register(struct clk *clk)
258 struct dentry *pdentry;
264 parent = clk->parent;
267 * Check to see if a clk is a root clk. Also check that it is
268 * safe to add this clk to debugfs
271 if (clk->flags & CLK_IS_ROOT)
277 pdentry = parent->dentry;
281 ret = clk_debug_create_subtree(clk, pdentry);
288 * clk_debug_init - lazily create the debugfs clk tree visualization
290 * clks are often initialized very early during boot before memory can
291 * be dynamically allocated and well before debugfs is setup.
292 * clk_debug_init walks the clk tree hierarchy while holding
293 * prepare_lock and creates the topology as part of a late_initcall,
294 * thus insuring that clks initialized very early will still be
295 * represented in the debugfs clk tree. This function should only be
296 * called once at boot-time, and all other clks added dynamically will
297 * be done so with clk_debug_register.
299 static int __init clk_debug_init(void)
302 struct hlist_node *tmp;
305 rootdir = debugfs_create_dir("clk", NULL);
310 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
315 d = debugfs_create_file("clk_dump", S_IRUGO, rootdir, NULL,
320 orphandir = debugfs_create_dir("orphans", rootdir);
325 mutex_lock(&prepare_lock);
327 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
328 clk_debug_create_subtree(clk, rootdir);
330 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
331 clk_debug_create_subtree(clk, orphandir);
335 mutex_unlock(&prepare_lock);
339 late_initcall(clk_debug_init);
341 static inline int clk_debug_register(struct clk *clk) { return 0; }
344 /* caller must hold prepare_lock */
345 static void clk_disable_unused_subtree(struct clk *clk)
348 struct hlist_node *tmp;
354 hlist_for_each_entry(child, tmp, &clk->children, child_node)
355 clk_disable_unused_subtree(child);
357 spin_lock_irqsave(&enable_lock, flags);
359 if (clk->enable_count)
362 if (clk->flags & CLK_IGNORE_UNUSED)
366 * some gate clocks have special needs during the disable-unused
367 * sequence. call .disable_unused if available, otherwise fall
370 if (__clk_is_enabled(clk)) {
371 if (clk->ops->disable_unused)
372 clk->ops->disable_unused(clk->hw);
373 else if (clk->ops->disable)
374 clk->ops->disable(clk->hw);
378 spin_unlock_irqrestore(&enable_lock, flags);
384 static int clk_disable_unused(void)
387 struct hlist_node *tmp;
389 mutex_lock(&prepare_lock);
391 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
392 clk_disable_unused_subtree(clk);
394 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
395 clk_disable_unused_subtree(clk);
397 mutex_unlock(&prepare_lock);
401 late_initcall(clk_disable_unused);
403 /*** helper functions ***/
405 const char *__clk_get_name(struct clk *clk)
407 return !clk ? NULL : clk->name;
409 EXPORT_SYMBOL_GPL(__clk_get_name);
411 struct clk_hw *__clk_get_hw(struct clk *clk)
413 return !clk ? NULL : clk->hw;
416 u8 __clk_get_num_parents(struct clk *clk)
418 return !clk ? 0 : clk->num_parents;
421 struct clk *__clk_get_parent(struct clk *clk)
423 return !clk ? NULL : clk->parent;
426 unsigned int __clk_get_enable_count(struct clk *clk)
428 return !clk ? 0 : clk->enable_count;
431 unsigned int __clk_get_prepare_count(struct clk *clk)
433 return !clk ? 0 : clk->prepare_count;
436 unsigned long __clk_get_rate(struct clk *clk)
447 if (clk->flags & CLK_IS_ROOT)
457 unsigned long __clk_get_flags(struct clk *clk)
459 return !clk ? 0 : clk->flags;
462 bool __clk_is_enabled(struct clk *clk)
470 * .is_enabled is only mandatory for clocks that gate
471 * fall back to software usage counter if .is_enabled is missing
473 if (!clk->ops->is_enabled) {
474 ret = clk->enable_count ? 1 : 0;
478 ret = clk->ops->is_enabled(clk->hw);
483 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
487 struct hlist_node *tmp;
489 if (!strcmp(clk->name, name))
492 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
493 ret = __clk_lookup_subtree(name, child);
501 struct clk *__clk_lookup(const char *name)
503 struct clk *root_clk;
505 struct hlist_node *tmp;
510 /* search the 'proper' clk tree first */
511 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
512 ret = __clk_lookup_subtree(name, root_clk);
517 /* if not found, then search the orphan tree */
518 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
519 ret = __clk_lookup_subtree(name, root_clk);
529 void __clk_unprepare(struct clk *clk)
534 if (WARN_ON(clk->prepare_count == 0))
537 if (--clk->prepare_count > 0)
540 WARN_ON(clk->enable_count > 0);
542 if (clk->ops->unprepare)
543 clk->ops->unprepare(clk->hw);
545 __clk_unprepare(clk->parent);
549 * clk_unprepare - undo preparation of a clock source
550 * @clk: the clk being unprepare
552 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
553 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
554 * if the operation may sleep. One example is a clk which is accessed over
555 * I2c. In the complex case a clk gate operation may require a fast and a slow
556 * part. It is this reason that clk_unprepare and clk_disable are not mutually
557 * exclusive. In fact clk_disable must be called before clk_unprepare.
559 void clk_unprepare(struct clk *clk)
561 mutex_lock(&prepare_lock);
562 __clk_unprepare(clk);
563 mutex_unlock(&prepare_lock);
565 EXPORT_SYMBOL_GPL(clk_unprepare);
567 int __clk_prepare(struct clk *clk)
574 if (clk->prepare_count == 0) {
575 ret = __clk_prepare(clk->parent);
579 if (clk->ops->prepare) {
580 ret = clk->ops->prepare(clk->hw);
582 __clk_unprepare(clk->parent);
588 clk->prepare_count++;
594 * clk_prepare - prepare a clock source
595 * @clk: the clk being prepared
597 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
598 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
599 * operation may sleep. One example is a clk which is accessed over I2c. In
600 * the complex case a clk ungate operation may require a fast and a slow part.
601 * It is this reason that clk_prepare and clk_enable are not mutually
602 * exclusive. In fact clk_prepare must be called before clk_enable.
603 * Returns 0 on success, -EERROR otherwise.
605 int clk_prepare(struct clk *clk)
609 mutex_lock(&prepare_lock);
610 ret = __clk_prepare(clk);
611 mutex_unlock(&prepare_lock);
615 EXPORT_SYMBOL_GPL(clk_prepare);
617 static void __clk_disable(struct clk *clk)
622 if (WARN_ON(IS_ERR(clk)))
625 if (WARN_ON(clk->enable_count == 0))
628 if (--clk->enable_count > 0)
631 if (clk->ops->disable)
632 clk->ops->disable(clk->hw);
634 __clk_disable(clk->parent);
638 * clk_disable - gate a clock
639 * @clk: the clk being gated
641 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
642 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
643 * clk if the operation is fast and will never sleep. One example is a
644 * SoC-internal clk which is controlled via simple register writes. In the
645 * complex case a clk gate operation may require a fast and a slow part. It is
646 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
647 * In fact clk_disable must be called before clk_unprepare.
649 void clk_disable(struct clk *clk)
653 spin_lock_irqsave(&enable_lock, flags);
655 spin_unlock_irqrestore(&enable_lock, flags);
657 EXPORT_SYMBOL_GPL(clk_disable);
659 static int __clk_enable(struct clk *clk)
666 if (WARN_ON(clk->prepare_count == 0))
669 if (clk->enable_count == 0) {
670 ret = __clk_enable(clk->parent);
675 if (clk->ops->enable) {
676 ret = clk->ops->enable(clk->hw);
678 __clk_disable(clk->parent);
689 * clk_enable - ungate a clock
690 * @clk: the clk being ungated
692 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
693 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
694 * if the operation will never sleep. One example is a SoC-internal clk which
695 * is controlled via simple register writes. In the complex case a clk ungate
696 * operation may require a fast and a slow part. It is this reason that
697 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
698 * must be called before clk_enable. Returns 0 on success, -EERROR
701 int clk_enable(struct clk *clk)
706 spin_lock_irqsave(&enable_lock, flags);
707 ret = __clk_enable(clk);
708 spin_unlock_irqrestore(&enable_lock, flags);
712 EXPORT_SYMBOL_GPL(clk_enable);
715 * __clk_round_rate - round the given rate for a clk
716 * @clk: round the rate of this clock
718 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
720 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
722 unsigned long parent_rate = 0;
727 if (!clk->ops->round_rate) {
728 if (clk->flags & CLK_SET_RATE_PARENT)
729 return __clk_round_rate(clk->parent, rate);
735 parent_rate = clk->parent->rate;
737 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
741 * clk_round_rate - round the given rate for a clk
742 * @clk: the clk for which we are rounding a rate
743 * @rate: the rate which is to be rounded
745 * Takes in a rate as input and rounds it to a rate that the clk can actually
746 * use which is then returned. If clk doesn't support round_rate operation
747 * then the parent rate is returned.
749 long clk_round_rate(struct clk *clk, unsigned long rate)
753 mutex_lock(&prepare_lock);
754 ret = __clk_round_rate(clk, rate);
755 mutex_unlock(&prepare_lock);
759 EXPORT_SYMBOL_GPL(clk_round_rate);
762 * __clk_notify - call clk notifier chain
763 * @clk: struct clk * that is changing rate
764 * @msg: clk notifier type (see include/linux/clk.h)
765 * @old_rate: old clk rate
766 * @new_rate: new clk rate
768 * Triggers a notifier call chain on the clk rate-change notification
769 * for 'clk'. Passes a pointer to the struct clk and the previous
770 * and current rates to the notifier callback. Intended to be called by
771 * internal clock code only. Returns NOTIFY_DONE from the last driver
772 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
773 * a driver returns that.
775 static int __clk_notify(struct clk *clk, unsigned long msg,
776 unsigned long old_rate, unsigned long new_rate)
778 struct clk_notifier *cn;
779 struct clk_notifier_data cnd;
780 int ret = NOTIFY_DONE;
783 cnd.old_rate = old_rate;
784 cnd.new_rate = new_rate;
786 list_for_each_entry(cn, &clk_notifier_list, node) {
787 if (cn->clk == clk) {
788 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
799 * @clk: first clk in the subtree
800 * @msg: notification type (see include/linux/clk.h)
802 * Walks the subtree of clks starting with clk and recalculates rates as it
803 * goes. Note that if a clk does not implement the .recalc_rate callback then
804 * it is assumed that the clock will take on the rate of it's parent.
806 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
809 * Caller must hold prepare_lock.
811 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
813 unsigned long old_rate;
814 unsigned long parent_rate = 0;
815 struct hlist_node *tmp;
818 old_rate = clk->rate;
821 parent_rate = clk->parent->rate;
823 if (clk->ops->recalc_rate)
824 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
826 clk->rate = parent_rate;
829 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
830 * & ABORT_RATE_CHANGE notifiers
832 if (clk->notifier_count && msg)
833 __clk_notify(clk, msg, old_rate, clk->rate);
835 hlist_for_each_entry(child, tmp, &clk->children, child_node)
836 __clk_recalc_rates(child, msg);
840 * clk_get_rate - return the rate of clk
841 * @clk: the clk whose rate is being returned
843 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
844 * is set, which means a recalc_rate will be issued.
845 * If clk is NULL then returns 0.
847 unsigned long clk_get_rate(struct clk *clk)
851 mutex_lock(&prepare_lock);
853 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
854 __clk_recalc_rates(clk, 0);
856 rate = __clk_get_rate(clk);
857 mutex_unlock(&prepare_lock);
861 EXPORT_SYMBOL_GPL(clk_get_rate);
864 * __clk_speculate_rates
865 * @clk: first clk in the subtree
866 * @parent_rate: the "future" rate of clk's parent
868 * Walks the subtree of clks starting with clk, speculating rates as it
869 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
871 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
872 * pre-rate change notifications and returns early if no clks in the
873 * subtree have subscribed to the notifications. Note that if a clk does not
874 * implement the .recalc_rate callback then it is assumed that the clock will
875 * take on the rate of it's parent.
877 * Caller must hold prepare_lock.
879 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
881 struct hlist_node *tmp;
883 unsigned long new_rate;
884 int ret = NOTIFY_DONE;
886 if (clk->ops->recalc_rate)
887 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
889 new_rate = parent_rate;
891 /* abort the rate change if a driver returns NOTIFY_BAD */
892 if (clk->notifier_count)
893 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
895 if (ret == NOTIFY_BAD)
898 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
899 ret = __clk_speculate_rates(child, new_rate);
900 if (ret == NOTIFY_BAD)
908 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
911 struct hlist_node *tmp;
913 clk->new_rate = new_rate;
915 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
916 if (child->ops->recalc_rate)
917 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
919 child->new_rate = new_rate;
920 clk_calc_subtree(child, child->new_rate);
925 * calculate the new rates returning the topmost clock that has to be
928 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
930 struct clk *top = clk;
931 unsigned long best_parent_rate = 0;
932 unsigned long new_rate;
935 if (IS_ERR_OR_NULL(clk))
938 /* save parent rate, if it exists */
940 best_parent_rate = clk->parent->rate;
942 /* never propagate up to the parent */
943 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
944 if (!clk->ops->round_rate) {
945 clk->new_rate = clk->rate;
948 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
952 /* need clk->parent from here on out */
954 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
958 if (!clk->ops->round_rate) {
959 top = clk_calc_new_rates(clk->parent, rate);
960 new_rate = clk->parent->new_rate;
965 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
967 if (best_parent_rate != clk->parent->rate) {
968 top = clk_calc_new_rates(clk->parent, best_parent_rate);
974 clk_calc_subtree(clk, new_rate);
980 * Notify about rate changes in a subtree. Always walk down the whole tree
981 * so that in case of an error we can walk down the whole tree again and
984 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
986 struct hlist_node *tmp;
987 struct clk *child, *fail_clk = NULL;
988 int ret = NOTIFY_DONE;
990 if (clk->rate == clk->new_rate)
993 if (clk->notifier_count) {
994 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
995 if (ret == NOTIFY_BAD)
999 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
1000 clk = clk_propagate_rate_change(child, event);
1009 * walk down a subtree and set the new rates notifying the rate
1012 static void clk_change_rate(struct clk *clk)
1015 unsigned long old_rate;
1016 unsigned long best_parent_rate = 0;
1017 struct hlist_node *tmp;
1019 old_rate = clk->rate;
1022 best_parent_rate = clk->parent->rate;
1024 if (clk->ops->set_rate)
1025 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
1027 if (clk->ops->recalc_rate)
1028 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
1030 clk->rate = best_parent_rate;
1032 if (clk->notifier_count && old_rate != clk->rate)
1033 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
1035 hlist_for_each_entry(child, tmp, &clk->children, child_node)
1036 clk_change_rate(child);
1040 * clk_set_rate - specify a new rate for clk
1041 * @clk: the clk whose rate is being changed
1042 * @rate: the new rate for clk
1044 * In the simplest case clk_set_rate will only adjust the rate of clk.
1046 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
1047 * propagate up to clk's parent; whether or not this happens depends on the
1048 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
1049 * after calling .round_rate then upstream parent propagation is ignored. If
1050 * *parent_rate comes back with a new rate for clk's parent then we propagate
1051 * up to clk's parent and set it's rate. Upward propagation will continue
1052 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
1053 * .round_rate stops requesting changes to clk's parent_rate.
1055 * Rate changes are accomplished via tree traversal that also recalculates the
1056 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
1058 * Returns 0 on success, -EERROR otherwise.
1060 int clk_set_rate(struct clk *clk, unsigned long rate)
1062 struct clk *top, *fail_clk;
1065 /* prevent racing with updates to the clock topology */
1066 mutex_lock(&prepare_lock);
1068 /* bail early if nothing to do */
1069 if (rate == clk->rate)
1072 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1077 /* calculate new rates and get the topmost changed clock */
1078 top = clk_calc_new_rates(clk, rate);
1084 /* notify that we are about to change rates */
1085 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1087 pr_warn("%s: failed to set %s rate\n", __func__,
1089 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1094 /* change the rates */
1095 clk_change_rate(top);
1098 mutex_unlock(&prepare_lock);
1102 EXPORT_SYMBOL_GPL(clk_set_rate);
1105 * clk_get_parent - return the parent of a clk
1106 * @clk: the clk whose parent gets returned
1108 * Simply returns clk->parent. Returns NULL if clk is NULL.
1110 struct clk *clk_get_parent(struct clk *clk)
1114 mutex_lock(&prepare_lock);
1115 parent = __clk_get_parent(clk);
1116 mutex_unlock(&prepare_lock);
1120 EXPORT_SYMBOL_GPL(clk_get_parent);
1123 * .get_parent is mandatory for clocks with multiple possible parents. It is
1124 * optional for single-parent clocks. Always call .get_parent if it is
1125 * available and WARN if it is missing for multi-parent clocks.
1127 * For single-parent clocks without .get_parent, first check to see if the
1128 * .parents array exists, and if so use it to avoid an expensive tree
1129 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1131 static struct clk *__clk_init_parent(struct clk *clk)
1133 struct clk *ret = NULL;
1136 /* handle the trivial cases */
1138 if (!clk->num_parents)
1141 if (clk->num_parents == 1) {
1142 if (IS_ERR_OR_NULL(clk->parent))
1143 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1148 if (!clk->ops->get_parent) {
1149 WARN(!clk->ops->get_parent,
1150 "%s: multi-parent clocks must implement .get_parent\n",
1156 * Do our best to cache parent clocks in clk->parents. This prevents
1157 * unnecessary and expensive calls to __clk_lookup. We don't set
1158 * clk->parent here; that is done by the calling function
1161 index = clk->ops->get_parent(clk->hw);
1165 kzalloc((sizeof(struct clk*) * clk->num_parents),
1169 ret = __clk_lookup(clk->parent_names[index]);
1170 else if (!clk->parents[index])
1171 ret = clk->parents[index] =
1172 __clk_lookup(clk->parent_names[index]);
1174 ret = clk->parents[index];
1180 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1182 #ifdef CONFIG_COMMON_CLK_DEBUG
1184 struct dentry *new_parent_d;
1187 if (!clk || !new_parent)
1190 hlist_del(&clk->child_node);
1193 hlist_add_head(&clk->child_node, &new_parent->children);
1195 hlist_add_head(&clk->child_node, &clk_orphan_list);
1197 #ifdef CONFIG_COMMON_CLK_DEBUG
1202 new_parent_d = new_parent->dentry;
1204 new_parent_d = orphandir;
1206 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1207 new_parent_d, clk->name);
1211 pr_debug("%s: failed to rename debugfs entry for %s\n",
1212 __func__, clk->name);
1216 clk->parent = new_parent;
1218 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1221 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1223 struct clk *old_parent;
1224 unsigned long flags;
1228 old_parent = clk->parent;
1231 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1235 * find index of new parent clock using cached parent ptrs,
1236 * or if not yet cached, use string name comparison and cache
1237 * them now to avoid future calls to __clk_lookup.
1239 for (i = 0; i < clk->num_parents; i++) {
1240 if (clk->parents && clk->parents[i] == parent)
1242 else if (!strcmp(clk->parent_names[i], parent->name)) {
1244 clk->parents[i] = __clk_lookup(parent->name);
1249 if (i == clk->num_parents) {
1250 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1251 __func__, parent->name, clk->name);
1255 /* migrate prepare and enable */
1256 if (clk->prepare_count)
1257 __clk_prepare(parent);
1259 /* FIXME replace with clk_is_enabled(clk) someday */
1260 spin_lock_irqsave(&enable_lock, flags);
1261 if (clk->enable_count)
1262 __clk_enable(parent);
1263 spin_unlock_irqrestore(&enable_lock, flags);
1265 /* change clock input source */
1266 ret = clk->ops->set_parent(clk->hw, i);
1268 /* clean up old prepare and enable */
1269 spin_lock_irqsave(&enable_lock, flags);
1270 if (clk->enable_count)
1271 __clk_disable(old_parent);
1272 spin_unlock_irqrestore(&enable_lock, flags);
1274 if (clk->prepare_count)
1275 __clk_unprepare(old_parent);
1282 * clk_set_parent - switch the parent of a mux clk
1283 * @clk: the mux clk whose input we are switching
1284 * @parent: the new input to clk
1286 * Re-parent clk to use parent as it's new input source. If clk has the
1287 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1288 * operation to succeed. After successfully changing clk's parent
1289 * clk_set_parent will update the clk topology, sysfs topology and
1290 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1291 * success, -EERROR otherwise.
1293 int clk_set_parent(struct clk *clk, struct clk *parent)
1297 if (!clk || !clk->ops)
1300 if (!clk->ops->set_parent)
1303 /* prevent racing with updates to the clock topology */
1304 mutex_lock(&prepare_lock);
1306 if (clk->parent == parent)
1309 /* propagate PRE_RATE_CHANGE notifications */
1310 if (clk->notifier_count)
1311 ret = __clk_speculate_rates(clk, parent->rate);
1313 /* abort if a driver objects */
1314 if (ret == NOTIFY_STOP)
1317 /* only re-parent if the clock is not in use */
1318 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1321 ret = __clk_set_parent(clk, parent);
1323 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1325 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1329 /* propagate rate recalculation downstream */
1330 __clk_reparent(clk, parent);
1333 mutex_unlock(&prepare_lock);
1337 EXPORT_SYMBOL_GPL(clk_set_parent);
1340 * __clk_init - initialize the data structures in a struct clk
1341 * @dev: device initializing this clk, placeholder for now
1342 * @clk: clk being initialized
1344 * Initializes the lists in struct clk, queries the hardware for the
1345 * parent and rate and sets them both.
1347 int __clk_init(struct device *dev, struct clk *clk)
1351 struct hlist_node *tmp, *tmp2;
1356 mutex_lock(&prepare_lock);
1358 /* check to see if a clock with this name is already registered */
1359 if (__clk_lookup(clk->name)) {
1360 pr_debug("%s: clk %s already initialized\n",
1361 __func__, clk->name);
1366 /* check that clk_ops are sane. See Documentation/clk.txt */
1367 if (clk->ops->set_rate &&
1368 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1369 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1370 __func__, clk->name);
1375 if (clk->ops->set_parent && !clk->ops->get_parent) {
1376 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1377 __func__, clk->name);
1382 /* throw a WARN if any entries in parent_names are NULL */
1383 for (i = 0; i < clk->num_parents; i++)
1384 WARN(!clk->parent_names[i],
1385 "%s: invalid NULL in %s's .parent_names\n",
1386 __func__, clk->name);
1389 * Allocate an array of struct clk *'s to avoid unnecessary string
1390 * look-ups of clk's possible parents. This can fail for clocks passed
1391 * in to clk_init during early boot; thus any access to clk->parents[]
1392 * must always check for a NULL pointer and try to populate it if
1395 * If clk->parents is not NULL we skip this entire block. This allows
1396 * for clock drivers to statically initialize clk->parents.
1398 if (clk->num_parents > 1 && !clk->parents) {
1399 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1402 * __clk_lookup returns NULL for parents that have not been
1403 * clk_init'd; thus any access to clk->parents[] must check
1404 * for a NULL pointer. We can always perform lazy lookups for
1405 * missing parents later on.
1408 for (i = 0; i < clk->num_parents; i++)
1410 __clk_lookup(clk->parent_names[i]);
1413 clk->parent = __clk_init_parent(clk);
1416 * Populate clk->parent if parent has already been __clk_init'd. If
1417 * parent has not yet been __clk_init'd then place clk in the orphan
1418 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1421 * Every time a new clk is clk_init'd then we walk the list of orphan
1422 * clocks and re-parent any that are children of the clock currently
1426 hlist_add_head(&clk->child_node,
1427 &clk->parent->children);
1428 else if (clk->flags & CLK_IS_ROOT)
1429 hlist_add_head(&clk->child_node, &clk_root_list);
1431 hlist_add_head(&clk->child_node, &clk_orphan_list);
1434 * Set clk's rate. The preferred method is to use .recalc_rate. For
1435 * simple clocks and lazy developers the default fallback is to use the
1436 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1437 * then rate is set to zero.
1439 if (clk->ops->recalc_rate)
1440 clk->rate = clk->ops->recalc_rate(clk->hw,
1441 __clk_get_rate(clk->parent));
1442 else if (clk->parent)
1443 clk->rate = clk->parent->rate;
1448 * walk the list of orphan clocks and reparent any that are children of
1451 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1452 if (orphan->ops->get_parent) {
1453 i = orphan->ops->get_parent(orphan->hw);
1454 if (!strcmp(clk->name, orphan->parent_names[i]))
1455 __clk_reparent(orphan, clk);
1459 for (i = 0; i < orphan->num_parents; i++)
1460 if (!strcmp(clk->name, orphan->parent_names[i])) {
1461 __clk_reparent(orphan, clk);
1467 * optional platform-specific magic
1469 * The .init callback is not used by any of the basic clock types, but
1470 * exists for weird hardware that must perform initialization magic.
1471 * Please consider other ways of solving initialization problems before
1472 * using this callback, as it's use is discouraged.
1475 clk->ops->init(clk->hw);
1477 clk_debug_register(clk);
1480 mutex_unlock(&prepare_lock);
1486 * __clk_register - register a clock and return a cookie.
1488 * Same as clk_register, except that the .clk field inside hw shall point to a
1489 * preallocated (generally statically allocated) struct clk. None of the fields
1490 * of the struct clk need to be initialized.
1492 * The data pointed to by .init and .clk field shall NOT be marked as init
1495 * __clk_register is only exposed via clk-private.h and is intended for use with
1496 * very large numbers of clocks that need to be statically initialized. It is
1497 * a layering violation to include clk-private.h from any code which implements
1498 * a clock's .ops; as such any statically initialized clock data MUST be in a
1499 * separate C file from the logic that implements it's operations. Returns 0
1500 * on success, otherwise an error code.
1502 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1508 clk->name = hw->init->name;
1509 clk->ops = hw->init->ops;
1511 clk->flags = hw->init->flags;
1512 clk->parent_names = hw->init->parent_names;
1513 clk->num_parents = hw->init->num_parents;
1515 ret = __clk_init(dev, clk);
1517 return ERR_PTR(ret);
1521 EXPORT_SYMBOL_GPL(__clk_register);
1523 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1527 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1529 pr_err("%s: could not allocate clk->name\n", __func__);
1533 clk->ops = hw->init->ops;
1535 clk->flags = hw->init->flags;
1536 clk->num_parents = hw->init->num_parents;
1539 /* allocate local copy in case parent_names is __initdata */
1540 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1543 if (!clk->parent_names) {
1544 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1546 goto fail_parent_names;
1550 /* copy each string name in case parent_names is __initdata */
1551 for (i = 0; i < clk->num_parents; i++) {
1552 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1554 if (!clk->parent_names[i]) {
1555 pr_err("%s: could not copy parent_names\n", __func__);
1557 goto fail_parent_names_copy;
1561 ret = __clk_init(dev, clk);
1565 fail_parent_names_copy:
1567 kfree(clk->parent_names[i]);
1568 kfree(clk->parent_names);
1576 * clk_register - allocate a new clock, register it and return an opaque cookie
1577 * @dev: device that is registering this clock
1578 * @hw: link to hardware-specific clock data
1580 * clk_register is the primary interface for populating the clock tree with new
1581 * clock nodes. It returns a pointer to the newly allocated struct clk which
1582 * cannot be dereferenced by driver code but may be used in conjuction with the
1583 * rest of the clock API. In the event of an error clk_register will return an
1584 * error code; drivers must test for an error code after calling clk_register.
1586 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1591 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1593 pr_err("%s: could not allocate clk\n", __func__);
1598 ret = _clk_register(dev, hw, clk);
1604 return ERR_PTR(ret);
1606 EXPORT_SYMBOL_GPL(clk_register);
1609 * clk_unregister - unregister a currently registered clock
1610 * @clk: clock to unregister
1612 * Currently unimplemented.
1614 void clk_unregister(struct clk *clk) {}
1615 EXPORT_SYMBOL_GPL(clk_unregister);
1617 static void devm_clk_release(struct device *dev, void *res)
1619 clk_unregister(res);
1623 * devm_clk_register - resource managed clk_register()
1624 * @dev: device that is registering this clock
1625 * @hw: link to hardware-specific clock data
1627 * Managed clk_register(). Clocks returned from this function are
1628 * automatically clk_unregister()ed on driver detach. See clk_register() for
1631 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1636 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1638 return ERR_PTR(-ENOMEM);
1640 ret = _clk_register(dev, hw, clk);
1642 devres_add(dev, clk);
1650 EXPORT_SYMBOL_GPL(devm_clk_register);
1652 static int devm_clk_match(struct device *dev, void *res, void *data)
1654 struct clk *c = res;
1661 * devm_clk_unregister - resource managed clk_unregister()
1662 * @clk: clock to unregister
1664 * Deallocate a clock allocated with devm_clk_register(). Normally
1665 * this function will not need to be called and the resource management
1666 * code will ensure that the resource is freed.
1668 void devm_clk_unregister(struct device *dev, struct clk *clk)
1670 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1672 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1674 /*** clk rate change notifiers ***/
1677 * clk_notifier_register - add a clk rate change notifier
1678 * @clk: struct clk * to watch
1679 * @nb: struct notifier_block * with callback info
1681 * Request notification when clk's rate changes. This uses an SRCU
1682 * notifier because we want it to block and notifier unregistrations are
1683 * uncommon. The callbacks associated with the notifier must not
1684 * re-enter into the clk framework by calling any top-level clk APIs;
1685 * this will cause a nested prepare_lock mutex.
1687 * Pre-change notifier callbacks will be passed the current, pre-change
1688 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1689 * post-change rate of the clk is passed via struct
1690 * clk_notifier_data.new_rate.
1692 * Post-change notifiers will pass the now-current, post-change rate of
1693 * the clk in both struct clk_notifier_data.old_rate and struct
1694 * clk_notifier_data.new_rate.
1696 * Abort-change notifiers are effectively the opposite of pre-change
1697 * notifiers: the original pre-change clk rate is passed in via struct
1698 * clk_notifier_data.new_rate and the failed post-change rate is passed
1699 * in via struct clk_notifier_data.old_rate.
1701 * clk_notifier_register() must be called from non-atomic context.
1702 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1703 * allocation failure; otherwise, passes along the return value of
1704 * srcu_notifier_chain_register().
1706 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1708 struct clk_notifier *cn;
1714 mutex_lock(&prepare_lock);
1716 /* search the list of notifiers for this clk */
1717 list_for_each_entry(cn, &clk_notifier_list, node)
1721 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1722 if (cn->clk != clk) {
1723 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1728 srcu_init_notifier_head(&cn->notifier_head);
1730 list_add(&cn->node, &clk_notifier_list);
1733 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1735 clk->notifier_count++;
1738 mutex_unlock(&prepare_lock);
1742 EXPORT_SYMBOL_GPL(clk_notifier_register);
1745 * clk_notifier_unregister - remove a clk rate change notifier
1746 * @clk: struct clk *
1747 * @nb: struct notifier_block * with callback info
1749 * Request no further notification for changes to 'clk' and frees memory
1750 * allocated in clk_notifier_register.
1752 * Returns -EINVAL if called with null arguments; otherwise, passes
1753 * along the return value of srcu_notifier_chain_unregister().
1755 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1757 struct clk_notifier *cn = NULL;
1763 mutex_lock(&prepare_lock);
1765 list_for_each_entry(cn, &clk_notifier_list, node)
1769 if (cn->clk == clk) {
1770 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1772 clk->notifier_count--;
1774 /* XXX the notifier code should handle this better */
1775 if (!cn->notifier_head.head) {
1776 srcu_cleanup_notifier_head(&cn->notifier_head);
1784 mutex_unlock(&prepare_lock);
1788 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1792 * struct of_clk_provider - Clock provider registration structure
1793 * @link: Entry in global list of clock providers
1794 * @node: Pointer to device tree node of clock provider
1795 * @get: Get clock callback. Returns NULL or a struct clk for the
1796 * given clock specifier
1797 * @data: context pointer to be passed into @get callback
1799 struct of_clk_provider {
1800 struct list_head link;
1802 struct device_node *node;
1803 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1807 extern struct of_device_id __clk_of_table[];
1809 static const struct of_device_id __clk_of_table_sentinel
1810 __used __section(__clk_of_table_end);
1812 static LIST_HEAD(of_clk_providers);
1813 static DEFINE_MUTEX(of_clk_lock);
1815 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1820 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1822 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1824 struct clk_onecell_data *clk_data = data;
1825 unsigned int idx = clkspec->args[0];
1827 if (idx >= clk_data->clk_num) {
1828 pr_err("%s: invalid clock index %d\n", __func__, idx);
1829 return ERR_PTR(-EINVAL);
1832 return clk_data->clks[idx];
1834 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1837 * of_clk_add_provider() - Register a clock provider for a node
1838 * @np: Device node pointer associated with clock provider
1839 * @clk_src_get: callback for decoding clock
1840 * @data: context pointer for @clk_src_get callback.
1842 int of_clk_add_provider(struct device_node *np,
1843 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1847 struct of_clk_provider *cp;
1849 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1853 cp->node = of_node_get(np);
1855 cp->get = clk_src_get;
1857 mutex_lock(&of_clk_lock);
1858 list_add(&cp->link, &of_clk_providers);
1859 mutex_unlock(&of_clk_lock);
1860 pr_debug("Added clock from %s\n", np->full_name);
1864 EXPORT_SYMBOL_GPL(of_clk_add_provider);
1867 * of_clk_del_provider() - Remove a previously registered clock provider
1868 * @np: Device node pointer associated with clock provider
1870 void of_clk_del_provider(struct device_node *np)
1872 struct of_clk_provider *cp;
1874 mutex_lock(&of_clk_lock);
1875 list_for_each_entry(cp, &of_clk_providers, link) {
1876 if (cp->node == np) {
1877 list_del(&cp->link);
1878 of_node_put(cp->node);
1883 mutex_unlock(&of_clk_lock);
1885 EXPORT_SYMBOL_GPL(of_clk_del_provider);
1887 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1889 struct of_clk_provider *provider;
1890 struct clk *clk = ERR_PTR(-ENOENT);
1892 /* Check if we have such a provider in our array */
1893 mutex_lock(&of_clk_lock);
1894 list_for_each_entry(provider, &of_clk_providers, link) {
1895 if (provider->node == clkspec->np)
1896 clk = provider->get(clkspec, provider->data);
1900 mutex_unlock(&of_clk_lock);
1905 const char *of_clk_get_parent_name(struct device_node *np, int index)
1907 struct of_phandle_args clkspec;
1908 const char *clk_name;
1914 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1919 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1920 clkspec.args_count ? clkspec.args[0] : 0,
1922 clk_name = clkspec.np->name;
1924 of_node_put(clkspec.np);
1927 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1930 * of_clk_init() - Scan and init clock providers from the DT
1931 * @matches: array of compatible values and init functions for providers.
1933 * This function scans the device tree for matching clock providers and
1934 * calls their initialization functions
1936 void __init of_clk_init(const struct of_device_id *matches)
1938 struct device_node *np;
1941 matches = __clk_of_table;
1943 for_each_matching_node(np, matches) {
1944 const struct of_device_id *match = of_match_node(matches, np);
1945 of_clk_init_cb_t clk_init_cb = match->data;