2 * Copyright (C) 2010-2011 Canonical Ltd <jeremy.kerr@canonical.com>
3 * Copyright (C) 2011-2012 Linaro Ltd <mturquette@linaro.org>
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
9 * Standard functionality for the common clock API. See Documentation/clk.txt
12 #include <linux/clk-private.h>
13 #include <linux/module.h>
14 #include <linux/mutex.h>
15 #include <linux/spinlock.h>
16 #include <linux/err.h>
17 #include <linux/list.h>
18 #include <linux/slab.h>
20 #include <linux/device.h>
22 static DEFINE_SPINLOCK(enable_lock);
23 static DEFINE_MUTEX(prepare_lock);
25 static HLIST_HEAD(clk_root_list);
26 static HLIST_HEAD(clk_orphan_list);
27 static LIST_HEAD(clk_notifier_list);
29 /*** debugfs support ***/
31 #ifdef CONFIG_COMMON_CLK_DEBUG
32 #include <linux/debugfs.h>
34 static struct dentry *rootdir;
35 static struct dentry *orphandir;
36 static int inited = 0;
38 static void clk_summary_show_one(struct seq_file *s, struct clk *c, int level)
43 seq_printf(s, "%*s%-*s %-11d %-12d %-10lu",
45 30 - level * 3, c->name,
46 c->enable_count, c->prepare_count, c->rate);
50 static void clk_summary_show_subtree(struct seq_file *s, struct clk *c,
54 struct hlist_node *tmp;
59 clk_summary_show_one(s, c, level);
61 hlist_for_each_entry(child, tmp, &c->children, child_node)
62 clk_summary_show_subtree(s, child, level + 1);
65 static int clk_summary_show(struct seq_file *s, void *data)
68 struct hlist_node *tmp;
70 seq_printf(s, " clock enable_cnt prepare_cnt rate\n");
71 seq_printf(s, "---------------------------------------------------------------------\n");
73 mutex_lock(&prepare_lock);
75 hlist_for_each_entry(c, tmp, &clk_root_list, child_node)
76 clk_summary_show_subtree(s, c, 0);
78 hlist_for_each_entry(c, tmp, &clk_orphan_list, child_node)
79 clk_summary_show_subtree(s, c, 0);
81 mutex_unlock(&prepare_lock);
87 static int clk_summary_open(struct inode *inode, struct file *file)
89 return single_open(file, clk_summary_show, inode->i_private);
92 static const struct file_operations clk_summary_fops = {
93 .open = clk_summary_open,
96 .release = single_release,
99 /* caller must hold prepare_lock */
100 static int clk_debug_create_one(struct clk *clk, struct dentry *pdentry)
105 if (!clk || !pdentry) {
110 d = debugfs_create_dir(clk->name, pdentry);
116 d = debugfs_create_u32("clk_rate", S_IRUGO, clk->dentry,
121 d = debugfs_create_x32("clk_flags", S_IRUGO, clk->dentry,
126 d = debugfs_create_u32("clk_prepare_count", S_IRUGO, clk->dentry,
127 (u32 *)&clk->prepare_count);
131 d = debugfs_create_u32("clk_enable_count", S_IRUGO, clk->dentry,
132 (u32 *)&clk->enable_count);
136 d = debugfs_create_u32("clk_notifier_count", S_IRUGO, clk->dentry,
137 (u32 *)&clk->notifier_count);
145 debugfs_remove(clk->dentry);
150 /* caller must hold prepare_lock */
151 static int clk_debug_create_subtree(struct clk *clk, struct dentry *pdentry)
154 struct hlist_node *tmp;
157 if (!clk || !pdentry)
160 ret = clk_debug_create_one(clk, pdentry);
165 hlist_for_each_entry(child, tmp, &clk->children, child_node)
166 clk_debug_create_subtree(child, clk->dentry);
174 * clk_debug_register - add a clk node to the debugfs clk tree
175 * @clk: the clk being added to the debugfs clk tree
177 * Dynamically adds a clk to the debugfs clk tree if debugfs has been
178 * initialized. Otherwise it bails out early since the debugfs clk tree
179 * will be created lazily by clk_debug_init as part of a late_initcall.
181 * Caller must hold prepare_lock. Only clk_init calls this function (so
182 * far) so this is taken care.
184 static int clk_debug_register(struct clk *clk)
187 struct dentry *pdentry;
193 parent = clk->parent;
196 * Check to see if a clk is a root clk. Also check that it is
197 * safe to add this clk to debugfs
200 if (clk->flags & CLK_IS_ROOT)
206 pdentry = parent->dentry;
210 ret = clk_debug_create_subtree(clk, pdentry);
217 * clk_debug_init - lazily create the debugfs clk tree visualization
219 * clks are often initialized very early during boot before memory can
220 * be dynamically allocated and well before debugfs is setup.
221 * clk_debug_init walks the clk tree hierarchy while holding
222 * prepare_lock and creates the topology as part of a late_initcall,
223 * thus insuring that clks initialized very early will still be
224 * represented in the debugfs clk tree. This function should only be
225 * called once at boot-time, and all other clks added dynamically will
226 * be done so with clk_debug_register.
228 static int __init clk_debug_init(void)
231 struct hlist_node *tmp;
234 rootdir = debugfs_create_dir("clk", NULL);
239 d = debugfs_create_file("clk_summary", S_IRUGO, rootdir, NULL,
244 orphandir = debugfs_create_dir("orphans", rootdir);
249 mutex_lock(&prepare_lock);
251 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
252 clk_debug_create_subtree(clk, rootdir);
254 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
255 clk_debug_create_subtree(clk, orphandir);
259 mutex_unlock(&prepare_lock);
263 late_initcall(clk_debug_init);
265 static inline int clk_debug_register(struct clk *clk) { return 0; }
268 /* caller must hold prepare_lock */
269 static void clk_disable_unused_subtree(struct clk *clk)
272 struct hlist_node *tmp;
278 hlist_for_each_entry(child, tmp, &clk->children, child_node)
279 clk_disable_unused_subtree(child);
281 spin_lock_irqsave(&enable_lock, flags);
283 if (clk->enable_count)
286 if (clk->flags & CLK_IGNORE_UNUSED)
290 * some gate clocks have special needs during the disable-unused
291 * sequence. call .disable_unused if available, otherwise fall
294 if (__clk_is_enabled(clk)) {
295 if (clk->ops->disable_unused)
296 clk->ops->disable_unused(clk->hw);
297 else if (clk->ops->disable)
298 clk->ops->disable(clk->hw);
302 spin_unlock_irqrestore(&enable_lock, flags);
308 static int clk_disable_unused(void)
311 struct hlist_node *tmp;
313 mutex_lock(&prepare_lock);
315 hlist_for_each_entry(clk, tmp, &clk_root_list, child_node)
316 clk_disable_unused_subtree(clk);
318 hlist_for_each_entry(clk, tmp, &clk_orphan_list, child_node)
319 clk_disable_unused_subtree(clk);
321 mutex_unlock(&prepare_lock);
325 late_initcall(clk_disable_unused);
327 /*** helper functions ***/
329 const char *__clk_get_name(struct clk *clk)
331 return !clk ? NULL : clk->name;
333 EXPORT_SYMBOL_GPL(__clk_get_name);
335 struct clk_hw *__clk_get_hw(struct clk *clk)
337 return !clk ? NULL : clk->hw;
340 u8 __clk_get_num_parents(struct clk *clk)
342 return !clk ? 0 : clk->num_parents;
345 struct clk *__clk_get_parent(struct clk *clk)
347 return !clk ? NULL : clk->parent;
350 unsigned int __clk_get_enable_count(struct clk *clk)
352 return !clk ? 0 : clk->enable_count;
355 unsigned int __clk_get_prepare_count(struct clk *clk)
357 return !clk ? 0 : clk->prepare_count;
360 unsigned long __clk_get_rate(struct clk *clk)
371 if (clk->flags & CLK_IS_ROOT)
381 unsigned long __clk_get_flags(struct clk *clk)
383 return !clk ? 0 : clk->flags;
386 bool __clk_is_enabled(struct clk *clk)
394 * .is_enabled is only mandatory for clocks that gate
395 * fall back to software usage counter if .is_enabled is missing
397 if (!clk->ops->is_enabled) {
398 ret = clk->enable_count ? 1 : 0;
402 ret = clk->ops->is_enabled(clk->hw);
407 static struct clk *__clk_lookup_subtree(const char *name, struct clk *clk)
411 struct hlist_node *tmp;
413 if (!strcmp(clk->name, name))
416 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
417 ret = __clk_lookup_subtree(name, child);
425 struct clk *__clk_lookup(const char *name)
427 struct clk *root_clk;
429 struct hlist_node *tmp;
434 /* search the 'proper' clk tree first */
435 hlist_for_each_entry(root_clk, tmp, &clk_root_list, child_node) {
436 ret = __clk_lookup_subtree(name, root_clk);
441 /* if not found, then search the orphan tree */
442 hlist_for_each_entry(root_clk, tmp, &clk_orphan_list, child_node) {
443 ret = __clk_lookup_subtree(name, root_clk);
453 void __clk_unprepare(struct clk *clk)
458 if (WARN_ON(clk->prepare_count == 0))
461 if (--clk->prepare_count > 0)
464 WARN_ON(clk->enable_count > 0);
466 if (clk->ops->unprepare)
467 clk->ops->unprepare(clk->hw);
469 __clk_unprepare(clk->parent);
473 * clk_unprepare - undo preparation of a clock source
474 * @clk: the clk being unprepare
476 * clk_unprepare may sleep, which differentiates it from clk_disable. In a
477 * simple case, clk_unprepare can be used instead of clk_disable to gate a clk
478 * if the operation may sleep. One example is a clk which is accessed over
479 * I2c. In the complex case a clk gate operation may require a fast and a slow
480 * part. It is this reason that clk_unprepare and clk_disable are not mutually
481 * exclusive. In fact clk_disable must be called before clk_unprepare.
483 void clk_unprepare(struct clk *clk)
485 mutex_lock(&prepare_lock);
486 __clk_unprepare(clk);
487 mutex_unlock(&prepare_lock);
489 EXPORT_SYMBOL_GPL(clk_unprepare);
491 int __clk_prepare(struct clk *clk)
498 if (clk->prepare_count == 0) {
499 ret = __clk_prepare(clk->parent);
503 if (clk->ops->prepare) {
504 ret = clk->ops->prepare(clk->hw);
506 __clk_unprepare(clk->parent);
512 clk->prepare_count++;
518 * clk_prepare - prepare a clock source
519 * @clk: the clk being prepared
521 * clk_prepare may sleep, which differentiates it from clk_enable. In a simple
522 * case, clk_prepare can be used instead of clk_enable to ungate a clk if the
523 * operation may sleep. One example is a clk which is accessed over I2c. In
524 * the complex case a clk ungate operation may require a fast and a slow part.
525 * It is this reason that clk_prepare and clk_enable are not mutually
526 * exclusive. In fact clk_prepare must be called before clk_enable.
527 * Returns 0 on success, -EERROR otherwise.
529 int clk_prepare(struct clk *clk)
533 mutex_lock(&prepare_lock);
534 ret = __clk_prepare(clk);
535 mutex_unlock(&prepare_lock);
539 EXPORT_SYMBOL_GPL(clk_prepare);
541 static void __clk_disable(struct clk *clk)
546 if (WARN_ON(IS_ERR(clk)))
549 if (WARN_ON(clk->enable_count == 0))
552 if (--clk->enable_count > 0)
555 if (clk->ops->disable)
556 clk->ops->disable(clk->hw);
558 __clk_disable(clk->parent);
562 * clk_disable - gate a clock
563 * @clk: the clk being gated
565 * clk_disable must not sleep, which differentiates it from clk_unprepare. In
566 * a simple case, clk_disable can be used instead of clk_unprepare to gate a
567 * clk if the operation is fast and will never sleep. One example is a
568 * SoC-internal clk which is controlled via simple register writes. In the
569 * complex case a clk gate operation may require a fast and a slow part. It is
570 * this reason that clk_unprepare and clk_disable are not mutually exclusive.
571 * In fact clk_disable must be called before clk_unprepare.
573 void clk_disable(struct clk *clk)
577 spin_lock_irqsave(&enable_lock, flags);
579 spin_unlock_irqrestore(&enable_lock, flags);
581 EXPORT_SYMBOL_GPL(clk_disable);
583 static int __clk_enable(struct clk *clk)
590 if (WARN_ON(clk->prepare_count == 0))
593 if (clk->enable_count == 0) {
594 ret = __clk_enable(clk->parent);
599 if (clk->ops->enable) {
600 ret = clk->ops->enable(clk->hw);
602 __clk_disable(clk->parent);
613 * clk_enable - ungate a clock
614 * @clk: the clk being ungated
616 * clk_enable must not sleep, which differentiates it from clk_prepare. In a
617 * simple case, clk_enable can be used instead of clk_prepare to ungate a clk
618 * if the operation will never sleep. One example is a SoC-internal clk which
619 * is controlled via simple register writes. In the complex case a clk ungate
620 * operation may require a fast and a slow part. It is this reason that
621 * clk_enable and clk_prepare are not mutually exclusive. In fact clk_prepare
622 * must be called before clk_enable. Returns 0 on success, -EERROR
625 int clk_enable(struct clk *clk)
630 spin_lock_irqsave(&enable_lock, flags);
631 ret = __clk_enable(clk);
632 spin_unlock_irqrestore(&enable_lock, flags);
636 EXPORT_SYMBOL_GPL(clk_enable);
639 * __clk_round_rate - round the given rate for a clk
640 * @clk: round the rate of this clock
642 * Caller must hold prepare_lock. Useful for clk_ops such as .set_rate
644 unsigned long __clk_round_rate(struct clk *clk, unsigned long rate)
646 unsigned long parent_rate = 0;
651 if (!clk->ops->round_rate) {
652 if (clk->flags & CLK_SET_RATE_PARENT)
653 return __clk_round_rate(clk->parent, rate);
659 parent_rate = clk->parent->rate;
661 return clk->ops->round_rate(clk->hw, rate, &parent_rate);
665 * clk_round_rate - round the given rate for a clk
666 * @clk: the clk for which we are rounding a rate
667 * @rate: the rate which is to be rounded
669 * Takes in a rate as input and rounds it to a rate that the clk can actually
670 * use which is then returned. If clk doesn't support round_rate operation
671 * then the parent rate is returned.
673 long clk_round_rate(struct clk *clk, unsigned long rate)
677 mutex_lock(&prepare_lock);
678 ret = __clk_round_rate(clk, rate);
679 mutex_unlock(&prepare_lock);
683 EXPORT_SYMBOL_GPL(clk_round_rate);
686 * __clk_notify - call clk notifier chain
687 * @clk: struct clk * that is changing rate
688 * @msg: clk notifier type (see include/linux/clk.h)
689 * @old_rate: old clk rate
690 * @new_rate: new clk rate
692 * Triggers a notifier call chain on the clk rate-change notification
693 * for 'clk'. Passes a pointer to the struct clk and the previous
694 * and current rates to the notifier callback. Intended to be called by
695 * internal clock code only. Returns NOTIFY_DONE from the last driver
696 * called if all went well, or NOTIFY_STOP or NOTIFY_BAD immediately if
697 * a driver returns that.
699 static int __clk_notify(struct clk *clk, unsigned long msg,
700 unsigned long old_rate, unsigned long new_rate)
702 struct clk_notifier *cn;
703 struct clk_notifier_data cnd;
704 int ret = NOTIFY_DONE;
707 cnd.old_rate = old_rate;
708 cnd.new_rate = new_rate;
710 list_for_each_entry(cn, &clk_notifier_list, node) {
711 if (cn->clk == clk) {
712 ret = srcu_notifier_call_chain(&cn->notifier_head, msg,
723 * @clk: first clk in the subtree
724 * @msg: notification type (see include/linux/clk.h)
726 * Walks the subtree of clks starting with clk and recalculates rates as it
727 * goes. Note that if a clk does not implement the .recalc_rate callback then
728 * it is assumed that the clock will take on the rate of it's parent.
730 * clk_recalc_rates also propagates the POST_RATE_CHANGE notification,
733 * Caller must hold prepare_lock.
735 static void __clk_recalc_rates(struct clk *clk, unsigned long msg)
737 unsigned long old_rate;
738 unsigned long parent_rate = 0;
739 struct hlist_node *tmp;
742 old_rate = clk->rate;
745 parent_rate = clk->parent->rate;
747 if (clk->ops->recalc_rate)
748 clk->rate = clk->ops->recalc_rate(clk->hw, parent_rate);
750 clk->rate = parent_rate;
753 * ignore NOTIFY_STOP and NOTIFY_BAD return values for POST_RATE_CHANGE
754 * & ABORT_RATE_CHANGE notifiers
756 if (clk->notifier_count && msg)
757 __clk_notify(clk, msg, old_rate, clk->rate);
759 hlist_for_each_entry(child, tmp, &clk->children, child_node)
760 __clk_recalc_rates(child, msg);
764 * clk_get_rate - return the rate of clk
765 * @clk: the clk whose rate is being returned
767 * Simply returns the cached rate of the clk, unless CLK_GET_RATE_NOCACHE flag
768 * is set, which means a recalc_rate will be issued.
769 * If clk is NULL then returns 0.
771 unsigned long clk_get_rate(struct clk *clk)
775 mutex_lock(&prepare_lock);
777 if (clk && (clk->flags & CLK_GET_RATE_NOCACHE))
778 __clk_recalc_rates(clk, 0);
780 rate = __clk_get_rate(clk);
781 mutex_unlock(&prepare_lock);
785 EXPORT_SYMBOL_GPL(clk_get_rate);
788 * __clk_speculate_rates
789 * @clk: first clk in the subtree
790 * @parent_rate: the "future" rate of clk's parent
792 * Walks the subtree of clks starting with clk, speculating rates as it
793 * goes and firing off PRE_RATE_CHANGE notifications as necessary.
795 * Unlike clk_recalc_rates, clk_speculate_rates exists only for sending
796 * pre-rate change notifications and returns early if no clks in the
797 * subtree have subscribed to the notifications. Note that if a clk does not
798 * implement the .recalc_rate callback then it is assumed that the clock will
799 * take on the rate of it's parent.
801 * Caller must hold prepare_lock.
803 static int __clk_speculate_rates(struct clk *clk, unsigned long parent_rate)
805 struct hlist_node *tmp;
807 unsigned long new_rate;
808 int ret = NOTIFY_DONE;
810 if (clk->ops->recalc_rate)
811 new_rate = clk->ops->recalc_rate(clk->hw, parent_rate);
813 new_rate = parent_rate;
815 /* abort the rate change if a driver returns NOTIFY_BAD */
816 if (clk->notifier_count)
817 ret = __clk_notify(clk, PRE_RATE_CHANGE, clk->rate, new_rate);
819 if (ret == NOTIFY_BAD)
822 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
823 ret = __clk_speculate_rates(child, new_rate);
824 if (ret == NOTIFY_BAD)
832 static void clk_calc_subtree(struct clk *clk, unsigned long new_rate)
835 struct hlist_node *tmp;
837 clk->new_rate = new_rate;
839 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
840 if (child->ops->recalc_rate)
841 child->new_rate = child->ops->recalc_rate(child->hw, new_rate);
843 child->new_rate = new_rate;
844 clk_calc_subtree(child, child->new_rate);
849 * calculate the new rates returning the topmost clock that has to be
852 static struct clk *clk_calc_new_rates(struct clk *clk, unsigned long rate)
854 struct clk *top = clk;
855 unsigned long best_parent_rate = 0;
856 unsigned long new_rate;
859 if (IS_ERR_OR_NULL(clk))
862 /* save parent rate, if it exists */
864 best_parent_rate = clk->parent->rate;
866 /* never propagate up to the parent */
867 if (!(clk->flags & CLK_SET_RATE_PARENT)) {
868 if (!clk->ops->round_rate) {
869 clk->new_rate = clk->rate;
872 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
876 /* need clk->parent from here on out */
878 pr_debug("%s: %s has NULL parent\n", __func__, clk->name);
882 if (!clk->ops->round_rate) {
883 top = clk_calc_new_rates(clk->parent, rate);
884 new_rate = clk->parent->new_rate;
889 new_rate = clk->ops->round_rate(clk->hw, rate, &best_parent_rate);
891 if (best_parent_rate != clk->parent->rate) {
892 top = clk_calc_new_rates(clk->parent, best_parent_rate);
898 clk_calc_subtree(clk, new_rate);
904 * Notify about rate changes in a subtree. Always walk down the whole tree
905 * so that in case of an error we can walk down the whole tree again and
908 static struct clk *clk_propagate_rate_change(struct clk *clk, unsigned long event)
910 struct hlist_node *tmp;
911 struct clk *child, *fail_clk = NULL;
912 int ret = NOTIFY_DONE;
914 if (clk->rate == clk->new_rate)
917 if (clk->notifier_count) {
918 ret = __clk_notify(clk, event, clk->rate, clk->new_rate);
919 if (ret == NOTIFY_BAD)
923 hlist_for_each_entry(child, tmp, &clk->children, child_node) {
924 clk = clk_propagate_rate_change(child, event);
933 * walk down a subtree and set the new rates notifying the rate
936 static void clk_change_rate(struct clk *clk)
939 unsigned long old_rate;
940 unsigned long best_parent_rate = 0;
941 struct hlist_node *tmp;
943 old_rate = clk->rate;
946 best_parent_rate = clk->parent->rate;
948 if (clk->ops->set_rate)
949 clk->ops->set_rate(clk->hw, clk->new_rate, best_parent_rate);
951 if (clk->ops->recalc_rate)
952 clk->rate = clk->ops->recalc_rate(clk->hw, best_parent_rate);
954 clk->rate = best_parent_rate;
956 if (clk->notifier_count && old_rate != clk->rate)
957 __clk_notify(clk, POST_RATE_CHANGE, old_rate, clk->rate);
959 hlist_for_each_entry(child, tmp, &clk->children, child_node)
960 clk_change_rate(child);
964 * clk_set_rate - specify a new rate for clk
965 * @clk: the clk whose rate is being changed
966 * @rate: the new rate for clk
968 * In the simplest case clk_set_rate will only adjust the rate of clk.
970 * Setting the CLK_SET_RATE_PARENT flag allows the rate change operation to
971 * propagate up to clk's parent; whether or not this happens depends on the
972 * outcome of clk's .round_rate implementation. If *parent_rate is unchanged
973 * after calling .round_rate then upstream parent propagation is ignored. If
974 * *parent_rate comes back with a new rate for clk's parent then we propagate
975 * up to clk's parent and set it's rate. Upward propagation will continue
976 * until either a clk does not support the CLK_SET_RATE_PARENT flag or
977 * .round_rate stops requesting changes to clk's parent_rate.
979 * Rate changes are accomplished via tree traversal that also recalculates the
980 * rates for the clocks and fires off POST_RATE_CHANGE notifiers.
982 * Returns 0 on success, -EERROR otherwise.
984 int clk_set_rate(struct clk *clk, unsigned long rate)
986 struct clk *top, *fail_clk;
989 /* prevent racing with updates to the clock topology */
990 mutex_lock(&prepare_lock);
992 /* bail early if nothing to do */
993 if (rate == clk->rate)
996 if ((clk->flags & CLK_SET_RATE_GATE) && clk->prepare_count) {
1001 /* calculate new rates and get the topmost changed clock */
1002 top = clk_calc_new_rates(clk, rate);
1008 /* notify that we are about to change rates */
1009 fail_clk = clk_propagate_rate_change(top, PRE_RATE_CHANGE);
1011 pr_warn("%s: failed to set %s rate\n", __func__,
1013 clk_propagate_rate_change(top, ABORT_RATE_CHANGE);
1018 /* change the rates */
1019 clk_change_rate(top);
1021 mutex_unlock(&prepare_lock);
1025 mutex_unlock(&prepare_lock);
1029 EXPORT_SYMBOL_GPL(clk_set_rate);
1032 * clk_get_parent - return the parent of a clk
1033 * @clk: the clk whose parent gets returned
1035 * Simply returns clk->parent. Returns NULL if clk is NULL.
1037 struct clk *clk_get_parent(struct clk *clk)
1041 mutex_lock(&prepare_lock);
1042 parent = __clk_get_parent(clk);
1043 mutex_unlock(&prepare_lock);
1047 EXPORT_SYMBOL_GPL(clk_get_parent);
1050 * .get_parent is mandatory for clocks with multiple possible parents. It is
1051 * optional for single-parent clocks. Always call .get_parent if it is
1052 * available and WARN if it is missing for multi-parent clocks.
1054 * For single-parent clocks without .get_parent, first check to see if the
1055 * .parents array exists, and if so use it to avoid an expensive tree
1056 * traversal. If .parents does not exist then walk the tree with __clk_lookup.
1058 static struct clk *__clk_init_parent(struct clk *clk)
1060 struct clk *ret = NULL;
1063 /* handle the trivial cases */
1065 if (!clk->num_parents)
1068 if (clk->num_parents == 1) {
1069 if (IS_ERR_OR_NULL(clk->parent))
1070 ret = clk->parent = __clk_lookup(clk->parent_names[0]);
1075 if (!clk->ops->get_parent) {
1076 WARN(!clk->ops->get_parent,
1077 "%s: multi-parent clocks must implement .get_parent\n",
1083 * Do our best to cache parent clocks in clk->parents. This prevents
1084 * unnecessary and expensive calls to __clk_lookup. We don't set
1085 * clk->parent here; that is done by the calling function
1088 index = clk->ops->get_parent(clk->hw);
1092 kzalloc((sizeof(struct clk*) * clk->num_parents),
1096 ret = __clk_lookup(clk->parent_names[index]);
1097 else if (!clk->parents[index])
1098 ret = clk->parents[index] =
1099 __clk_lookup(clk->parent_names[index]);
1101 ret = clk->parents[index];
1107 void __clk_reparent(struct clk *clk, struct clk *new_parent)
1109 #ifdef CONFIG_COMMON_CLK_DEBUG
1111 struct dentry *new_parent_d;
1114 if (!clk || !new_parent)
1117 hlist_del(&clk->child_node);
1120 hlist_add_head(&clk->child_node, &new_parent->children);
1122 hlist_add_head(&clk->child_node, &clk_orphan_list);
1124 #ifdef CONFIG_COMMON_CLK_DEBUG
1129 new_parent_d = new_parent->dentry;
1131 new_parent_d = orphandir;
1133 d = debugfs_rename(clk->dentry->d_parent, clk->dentry,
1134 new_parent_d, clk->name);
1138 pr_debug("%s: failed to rename debugfs entry for %s\n",
1139 __func__, clk->name);
1143 clk->parent = new_parent;
1145 __clk_recalc_rates(clk, POST_RATE_CHANGE);
1148 static int __clk_set_parent(struct clk *clk, struct clk *parent)
1150 struct clk *old_parent;
1151 unsigned long flags;
1155 old_parent = clk->parent;
1158 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1162 * find index of new parent clock using cached parent ptrs,
1163 * or if not yet cached, use string name comparison and cache
1164 * them now to avoid future calls to __clk_lookup.
1166 for (i = 0; i < clk->num_parents; i++) {
1167 if (clk->parents && clk->parents[i] == parent)
1169 else if (!strcmp(clk->parent_names[i], parent->name)) {
1171 clk->parents[i] = __clk_lookup(parent->name);
1176 if (i == clk->num_parents) {
1177 pr_debug("%s: clock %s is not a possible parent of clock %s\n",
1178 __func__, parent->name, clk->name);
1182 /* migrate prepare and enable */
1183 if (clk->prepare_count)
1184 __clk_prepare(parent);
1186 /* FIXME replace with clk_is_enabled(clk) someday */
1187 spin_lock_irqsave(&enable_lock, flags);
1188 if (clk->enable_count)
1189 __clk_enable(parent);
1190 spin_unlock_irqrestore(&enable_lock, flags);
1192 /* change clock input source */
1193 ret = clk->ops->set_parent(clk->hw, i);
1195 /* clean up old prepare and enable */
1196 spin_lock_irqsave(&enable_lock, flags);
1197 if (clk->enable_count)
1198 __clk_disable(old_parent);
1199 spin_unlock_irqrestore(&enable_lock, flags);
1201 if (clk->prepare_count)
1202 __clk_unprepare(old_parent);
1209 * clk_set_parent - switch the parent of a mux clk
1210 * @clk: the mux clk whose input we are switching
1211 * @parent: the new input to clk
1213 * Re-parent clk to use parent as it's new input source. If clk has the
1214 * CLK_SET_PARENT_GATE flag set then clk must be gated for this
1215 * operation to succeed. After successfully changing clk's parent
1216 * clk_set_parent will update the clk topology, sysfs topology and
1217 * propagate rate recalculation via __clk_recalc_rates. Returns 0 on
1218 * success, -EERROR otherwise.
1220 int clk_set_parent(struct clk *clk, struct clk *parent)
1224 if (!clk || !clk->ops)
1227 if (!clk->ops->set_parent)
1230 /* prevent racing with updates to the clock topology */
1231 mutex_lock(&prepare_lock);
1233 if (clk->parent == parent)
1236 /* propagate PRE_RATE_CHANGE notifications */
1237 if (clk->notifier_count)
1238 ret = __clk_speculate_rates(clk, parent->rate);
1240 /* abort if a driver objects */
1241 if (ret == NOTIFY_STOP)
1244 /* only re-parent if the clock is not in use */
1245 if ((clk->flags & CLK_SET_PARENT_GATE) && clk->prepare_count)
1248 ret = __clk_set_parent(clk, parent);
1250 /* propagate ABORT_RATE_CHANGE if .set_parent failed */
1252 __clk_recalc_rates(clk, ABORT_RATE_CHANGE);
1256 /* propagate rate recalculation downstream */
1257 __clk_reparent(clk, parent);
1260 mutex_unlock(&prepare_lock);
1264 EXPORT_SYMBOL_GPL(clk_set_parent);
1267 * __clk_init - initialize the data structures in a struct clk
1268 * @dev: device initializing this clk, placeholder for now
1269 * @clk: clk being initialized
1271 * Initializes the lists in struct clk, queries the hardware for the
1272 * parent and rate and sets them both.
1274 int __clk_init(struct device *dev, struct clk *clk)
1278 struct hlist_node *tmp, *tmp2;
1283 mutex_lock(&prepare_lock);
1285 /* check to see if a clock with this name is already registered */
1286 if (__clk_lookup(clk->name)) {
1287 pr_debug("%s: clk %s already initialized\n",
1288 __func__, clk->name);
1293 /* check that clk_ops are sane. See Documentation/clk.txt */
1294 if (clk->ops->set_rate &&
1295 !(clk->ops->round_rate && clk->ops->recalc_rate)) {
1296 pr_warning("%s: %s must implement .round_rate & .recalc_rate\n",
1297 __func__, clk->name);
1302 if (clk->ops->set_parent && !clk->ops->get_parent) {
1303 pr_warning("%s: %s must implement .get_parent & .set_parent\n",
1304 __func__, clk->name);
1309 /* throw a WARN if any entries in parent_names are NULL */
1310 for (i = 0; i < clk->num_parents; i++)
1311 WARN(!clk->parent_names[i],
1312 "%s: invalid NULL in %s's .parent_names\n",
1313 __func__, clk->name);
1316 * Allocate an array of struct clk *'s to avoid unnecessary string
1317 * look-ups of clk's possible parents. This can fail for clocks passed
1318 * in to clk_init during early boot; thus any access to clk->parents[]
1319 * must always check for a NULL pointer and try to populate it if
1322 * If clk->parents is not NULL we skip this entire block. This allows
1323 * for clock drivers to statically initialize clk->parents.
1325 if (clk->num_parents > 1 && !clk->parents) {
1326 clk->parents = kzalloc((sizeof(struct clk*) * clk->num_parents),
1329 * __clk_lookup returns NULL for parents that have not been
1330 * clk_init'd; thus any access to clk->parents[] must check
1331 * for a NULL pointer. We can always perform lazy lookups for
1332 * missing parents later on.
1335 for (i = 0; i < clk->num_parents; i++)
1337 __clk_lookup(clk->parent_names[i]);
1340 clk->parent = __clk_init_parent(clk);
1343 * Populate clk->parent if parent has already been __clk_init'd. If
1344 * parent has not yet been __clk_init'd then place clk in the orphan
1345 * list. If clk has set the CLK_IS_ROOT flag then place it in the root
1348 * Every time a new clk is clk_init'd then we walk the list of orphan
1349 * clocks and re-parent any that are children of the clock currently
1353 hlist_add_head(&clk->child_node,
1354 &clk->parent->children);
1355 else if (clk->flags & CLK_IS_ROOT)
1356 hlist_add_head(&clk->child_node, &clk_root_list);
1358 hlist_add_head(&clk->child_node, &clk_orphan_list);
1361 * Set clk's rate. The preferred method is to use .recalc_rate. For
1362 * simple clocks and lazy developers the default fallback is to use the
1363 * parent's rate. If a clock doesn't have a parent (or is orphaned)
1364 * then rate is set to zero.
1366 if (clk->ops->recalc_rate)
1367 clk->rate = clk->ops->recalc_rate(clk->hw,
1368 __clk_get_rate(clk->parent));
1369 else if (clk->parent)
1370 clk->rate = clk->parent->rate;
1375 * walk the list of orphan clocks and reparent any that are children of
1378 hlist_for_each_entry_safe(orphan, tmp, tmp2, &clk_orphan_list, child_node) {
1379 if (orphan->ops->get_parent) {
1380 i = orphan->ops->get_parent(orphan->hw);
1381 if (!strcmp(clk->name, orphan->parent_names[i]))
1382 __clk_reparent(orphan, clk);
1386 for (i = 0; i < orphan->num_parents; i++)
1387 if (!strcmp(clk->name, orphan->parent_names[i])) {
1388 __clk_reparent(orphan, clk);
1394 * optional platform-specific magic
1396 * The .init callback is not used by any of the basic clock types, but
1397 * exists for weird hardware that must perform initialization magic.
1398 * Please consider other ways of solving initialization problems before
1399 * using this callback, as it's use is discouraged.
1402 clk->ops->init(clk->hw);
1404 clk_debug_register(clk);
1407 mutex_unlock(&prepare_lock);
1413 * __clk_register - register a clock and return a cookie.
1415 * Same as clk_register, except that the .clk field inside hw shall point to a
1416 * preallocated (generally statically allocated) struct clk. None of the fields
1417 * of the struct clk need to be initialized.
1419 * The data pointed to by .init and .clk field shall NOT be marked as init
1422 * __clk_register is only exposed via clk-private.h and is intended for use with
1423 * very large numbers of clocks that need to be statically initialized. It is
1424 * a layering violation to include clk-private.h from any code which implements
1425 * a clock's .ops; as such any statically initialized clock data MUST be in a
1426 * separate C file from the logic that implements it's operations. Returns 0
1427 * on success, otherwise an error code.
1429 struct clk *__clk_register(struct device *dev, struct clk_hw *hw)
1435 clk->name = hw->init->name;
1436 clk->ops = hw->init->ops;
1438 clk->flags = hw->init->flags;
1439 clk->parent_names = hw->init->parent_names;
1440 clk->num_parents = hw->init->num_parents;
1442 ret = __clk_init(dev, clk);
1444 return ERR_PTR(ret);
1448 EXPORT_SYMBOL_GPL(__clk_register);
1450 static int _clk_register(struct device *dev, struct clk_hw *hw, struct clk *clk)
1454 clk->name = kstrdup(hw->init->name, GFP_KERNEL);
1456 pr_err("%s: could not allocate clk->name\n", __func__);
1460 clk->ops = hw->init->ops;
1462 clk->flags = hw->init->flags;
1463 clk->num_parents = hw->init->num_parents;
1466 /* allocate local copy in case parent_names is __initdata */
1467 clk->parent_names = kzalloc((sizeof(char*) * clk->num_parents),
1470 if (!clk->parent_names) {
1471 pr_err("%s: could not allocate clk->parent_names\n", __func__);
1473 goto fail_parent_names;
1477 /* copy each string name in case parent_names is __initdata */
1478 for (i = 0; i < clk->num_parents; i++) {
1479 clk->parent_names[i] = kstrdup(hw->init->parent_names[i],
1481 if (!clk->parent_names[i]) {
1482 pr_err("%s: could not copy parent_names\n", __func__);
1484 goto fail_parent_names_copy;
1488 ret = __clk_init(dev, clk);
1492 fail_parent_names_copy:
1494 kfree(clk->parent_names[i]);
1495 kfree(clk->parent_names);
1503 * clk_register - allocate a new clock, register it and return an opaque cookie
1504 * @dev: device that is registering this clock
1505 * @hw: link to hardware-specific clock data
1507 * clk_register is the primary interface for populating the clock tree with new
1508 * clock nodes. It returns a pointer to the newly allocated struct clk which
1509 * cannot be dereferenced by driver code but may be used in conjuction with the
1510 * rest of the clock API. In the event of an error clk_register will return an
1511 * error code; drivers must test for an error code after calling clk_register.
1513 struct clk *clk_register(struct device *dev, struct clk_hw *hw)
1518 clk = kzalloc(sizeof(*clk), GFP_KERNEL);
1520 pr_err("%s: could not allocate clk\n", __func__);
1525 ret = _clk_register(dev, hw, clk);
1531 return ERR_PTR(ret);
1533 EXPORT_SYMBOL_GPL(clk_register);
1536 * clk_unregister - unregister a currently registered clock
1537 * @clk: clock to unregister
1539 * Currently unimplemented.
1541 void clk_unregister(struct clk *clk) {}
1542 EXPORT_SYMBOL_GPL(clk_unregister);
1544 static void devm_clk_release(struct device *dev, void *res)
1546 clk_unregister(res);
1550 * devm_clk_register - resource managed clk_register()
1551 * @dev: device that is registering this clock
1552 * @hw: link to hardware-specific clock data
1554 * Managed clk_register(). Clocks returned from this function are
1555 * automatically clk_unregister()ed on driver detach. See clk_register() for
1558 struct clk *devm_clk_register(struct device *dev, struct clk_hw *hw)
1563 clk = devres_alloc(devm_clk_release, sizeof(*clk), GFP_KERNEL);
1565 return ERR_PTR(-ENOMEM);
1567 ret = _clk_register(dev, hw, clk);
1569 devres_add(dev, clk);
1577 EXPORT_SYMBOL_GPL(devm_clk_register);
1579 static int devm_clk_match(struct device *dev, void *res, void *data)
1581 struct clk *c = res;
1588 * devm_clk_unregister - resource managed clk_unregister()
1589 * @clk: clock to unregister
1591 * Deallocate a clock allocated with devm_clk_register(). Normally
1592 * this function will not need to be called and the resource management
1593 * code will ensure that the resource is freed.
1595 void devm_clk_unregister(struct device *dev, struct clk *clk)
1597 WARN_ON(devres_release(dev, devm_clk_release, devm_clk_match, clk));
1599 EXPORT_SYMBOL_GPL(devm_clk_unregister);
1601 /*** clk rate change notifiers ***/
1604 * clk_notifier_register - add a clk rate change notifier
1605 * @clk: struct clk * to watch
1606 * @nb: struct notifier_block * with callback info
1608 * Request notification when clk's rate changes. This uses an SRCU
1609 * notifier because we want it to block and notifier unregistrations are
1610 * uncommon. The callbacks associated with the notifier must not
1611 * re-enter into the clk framework by calling any top-level clk APIs;
1612 * this will cause a nested prepare_lock mutex.
1614 * Pre-change notifier callbacks will be passed the current, pre-change
1615 * rate of the clk via struct clk_notifier_data.old_rate. The new,
1616 * post-change rate of the clk is passed via struct
1617 * clk_notifier_data.new_rate.
1619 * Post-change notifiers will pass the now-current, post-change rate of
1620 * the clk in both struct clk_notifier_data.old_rate and struct
1621 * clk_notifier_data.new_rate.
1623 * Abort-change notifiers are effectively the opposite of pre-change
1624 * notifiers: the original pre-change clk rate is passed in via struct
1625 * clk_notifier_data.new_rate and the failed post-change rate is passed
1626 * in via struct clk_notifier_data.old_rate.
1628 * clk_notifier_register() must be called from non-atomic context.
1629 * Returns -EINVAL if called with null arguments, -ENOMEM upon
1630 * allocation failure; otherwise, passes along the return value of
1631 * srcu_notifier_chain_register().
1633 int clk_notifier_register(struct clk *clk, struct notifier_block *nb)
1635 struct clk_notifier *cn;
1641 mutex_lock(&prepare_lock);
1643 /* search the list of notifiers for this clk */
1644 list_for_each_entry(cn, &clk_notifier_list, node)
1648 /* if clk wasn't in the notifier list, allocate new clk_notifier */
1649 if (cn->clk != clk) {
1650 cn = kzalloc(sizeof(struct clk_notifier), GFP_KERNEL);
1655 srcu_init_notifier_head(&cn->notifier_head);
1657 list_add(&cn->node, &clk_notifier_list);
1660 ret = srcu_notifier_chain_register(&cn->notifier_head, nb);
1662 clk->notifier_count++;
1665 mutex_unlock(&prepare_lock);
1669 EXPORT_SYMBOL_GPL(clk_notifier_register);
1672 * clk_notifier_unregister - remove a clk rate change notifier
1673 * @clk: struct clk *
1674 * @nb: struct notifier_block * with callback info
1676 * Request no further notification for changes to 'clk' and frees memory
1677 * allocated in clk_notifier_register.
1679 * Returns -EINVAL if called with null arguments; otherwise, passes
1680 * along the return value of srcu_notifier_chain_unregister().
1682 int clk_notifier_unregister(struct clk *clk, struct notifier_block *nb)
1684 struct clk_notifier *cn = NULL;
1690 mutex_lock(&prepare_lock);
1692 list_for_each_entry(cn, &clk_notifier_list, node)
1696 if (cn->clk == clk) {
1697 ret = srcu_notifier_chain_unregister(&cn->notifier_head, nb);
1699 clk->notifier_count--;
1701 /* XXX the notifier code should handle this better */
1702 if (!cn->notifier_head.head) {
1703 srcu_cleanup_notifier_head(&cn->notifier_head);
1711 mutex_unlock(&prepare_lock);
1715 EXPORT_SYMBOL_GPL(clk_notifier_unregister);
1719 * struct of_clk_provider - Clock provider registration structure
1720 * @link: Entry in global list of clock providers
1721 * @node: Pointer to device tree node of clock provider
1722 * @get: Get clock callback. Returns NULL or a struct clk for the
1723 * given clock specifier
1724 * @data: context pointer to be passed into @get callback
1726 struct of_clk_provider {
1727 struct list_head link;
1729 struct device_node *node;
1730 struct clk *(*get)(struct of_phandle_args *clkspec, void *data);
1734 static LIST_HEAD(of_clk_providers);
1735 static DEFINE_MUTEX(of_clk_lock);
1737 struct clk *of_clk_src_simple_get(struct of_phandle_args *clkspec,
1742 EXPORT_SYMBOL_GPL(of_clk_src_simple_get);
1744 struct clk *of_clk_src_onecell_get(struct of_phandle_args *clkspec, void *data)
1746 struct clk_onecell_data *clk_data = data;
1747 unsigned int idx = clkspec->args[0];
1749 if (idx >= clk_data->clk_num) {
1750 pr_err("%s: invalid clock index %d\n", __func__, idx);
1751 return ERR_PTR(-EINVAL);
1754 return clk_data->clks[idx];
1756 EXPORT_SYMBOL_GPL(of_clk_src_onecell_get);
1759 * of_clk_add_provider() - Register a clock provider for a node
1760 * @np: Device node pointer associated with clock provider
1761 * @clk_src_get: callback for decoding clock
1762 * @data: context pointer for @clk_src_get callback.
1764 int of_clk_add_provider(struct device_node *np,
1765 struct clk *(*clk_src_get)(struct of_phandle_args *clkspec,
1769 struct of_clk_provider *cp;
1771 cp = kzalloc(sizeof(struct of_clk_provider), GFP_KERNEL);
1775 cp->node = of_node_get(np);
1777 cp->get = clk_src_get;
1779 mutex_lock(&of_clk_lock);
1780 list_add(&cp->link, &of_clk_providers);
1781 mutex_unlock(&of_clk_lock);
1782 pr_debug("Added clock from %s\n", np->full_name);
1786 EXPORT_SYMBOL_GPL(of_clk_add_provider);
1789 * of_clk_del_provider() - Remove a previously registered clock provider
1790 * @np: Device node pointer associated with clock provider
1792 void of_clk_del_provider(struct device_node *np)
1794 struct of_clk_provider *cp;
1796 mutex_lock(&of_clk_lock);
1797 list_for_each_entry(cp, &of_clk_providers, link) {
1798 if (cp->node == np) {
1799 list_del(&cp->link);
1800 of_node_put(cp->node);
1805 mutex_unlock(&of_clk_lock);
1807 EXPORT_SYMBOL_GPL(of_clk_del_provider);
1809 struct clk *of_clk_get_from_provider(struct of_phandle_args *clkspec)
1811 struct of_clk_provider *provider;
1812 struct clk *clk = ERR_PTR(-ENOENT);
1814 /* Check if we have such a provider in our array */
1815 mutex_lock(&of_clk_lock);
1816 list_for_each_entry(provider, &of_clk_providers, link) {
1817 if (provider->node == clkspec->np)
1818 clk = provider->get(clkspec, provider->data);
1822 mutex_unlock(&of_clk_lock);
1827 const char *of_clk_get_parent_name(struct device_node *np, int index)
1829 struct of_phandle_args clkspec;
1830 const char *clk_name;
1836 rc = of_parse_phandle_with_args(np, "clocks", "#clock-cells", index,
1841 if (of_property_read_string_index(clkspec.np, "clock-output-names",
1842 clkspec.args_count ? clkspec.args[0] : 0,
1844 clk_name = clkspec.np->name;
1846 of_node_put(clkspec.np);
1849 EXPORT_SYMBOL_GPL(of_clk_get_parent_name);
1852 * of_clk_init() - Scan and init clock providers from the DT
1853 * @matches: array of compatible values and init functions for providers.
1855 * This function scans the device tree for matching clock providers and
1856 * calls their initialization functions
1858 void __init of_clk_init(const struct of_device_id *matches)
1860 struct device_node *np;
1862 for_each_matching_node(np, matches) {
1863 const struct of_device_id *match = of_match_node(matches, np);
1864 of_clk_init_cb_t clk_init_cb = match->data;