1 // SPDX-License-Identifier: GPL-2.0
5 * Dong Aisheng <aisheng.dong@nxp.com>
9 #include <linux/clk-provider.h>
10 #include <linux/device.h>
11 #include <linux/export.h>
13 #include <linux/slab.h>
15 static int __must_check of_clk_bulk_get(struct device_node *np, int num_clks,
16 struct clk_bulk_data *clks)
21 for (i = 0; i < num_clks; i++)
24 for (i = 0; i < num_clks; i++) {
25 clks[i].clk = of_clk_get(np, i);
26 if (IS_ERR(clks[i].clk)) {
27 ret = PTR_ERR(clks[i].clk);
28 pr_err("%pOF: Failed to get clk index: %d ret: %d\n",
38 clk_bulk_put(i, clks);
43 static int __must_check of_clk_bulk_get_all(struct device_node *np,
44 struct clk_bulk_data **clks)
46 struct clk_bulk_data *clk_bulk;
50 num_clks = of_clk_get_parent_count(np);
54 clk_bulk = kmalloc_array(num_clks, sizeof(*clk_bulk), GFP_KERNEL);
58 ret = of_clk_bulk_get(np, num_clks, clk_bulk);
69 void clk_bulk_put(int num_clks, struct clk_bulk_data *clks)
71 while (--num_clks >= 0) {
72 clk_put(clks[num_clks].clk);
73 clks[num_clks].clk = NULL;
76 EXPORT_SYMBOL_GPL(clk_bulk_put);
78 int __must_check clk_bulk_get(struct device *dev, int num_clks,
79 struct clk_bulk_data *clks)
84 for (i = 0; i < num_clks; i++)
87 for (i = 0; i < num_clks; i++) {
88 clks[i].clk = clk_get(dev, clks[i].id);
89 if (IS_ERR(clks[i].clk)) {
90 ret = PTR_ERR(clks[i].clk);
91 if (ret != -EPROBE_DEFER)
92 dev_err(dev, "Failed to get clk '%s': %d\n",
102 clk_bulk_put(i, clks);
106 EXPORT_SYMBOL(clk_bulk_get);
108 void clk_bulk_put_all(int num_clks, struct clk_bulk_data *clks)
110 if (IS_ERR_OR_NULL(clks))
113 clk_bulk_put(num_clks, clks);
117 EXPORT_SYMBOL(clk_bulk_put_all);
119 int __must_check clk_bulk_get_all(struct device *dev,
120 struct clk_bulk_data **clks)
122 struct device_node *np = dev_of_node(dev);
127 return of_clk_bulk_get_all(np, clks);
129 EXPORT_SYMBOL(clk_bulk_get_all);
131 #ifdef CONFIG_HAVE_CLK_PREPARE
134 * clk_bulk_unprepare - undo preparation of a set of clock sources
135 * @num_clks: the number of clk_bulk_data
136 * @clks: the clk_bulk_data table being unprepared
138 * clk_bulk_unprepare may sleep, which differentiates it from clk_bulk_disable.
139 * Returns 0 on success, -EERROR otherwise.
141 void clk_bulk_unprepare(int num_clks, const struct clk_bulk_data *clks)
143 while (--num_clks >= 0)
144 clk_unprepare(clks[num_clks].clk);
146 EXPORT_SYMBOL_GPL(clk_bulk_unprepare);
149 * clk_bulk_prepare - prepare a set of clocks
150 * @num_clks: the number of clk_bulk_data
151 * @clks: the clk_bulk_data table being prepared
153 * clk_bulk_prepare may sleep, which differentiates it from clk_bulk_enable.
154 * Returns 0 on success, -EERROR otherwise.
156 int __must_check clk_bulk_prepare(int num_clks,
157 const struct clk_bulk_data *clks)
162 for (i = 0; i < num_clks; i++) {
163 ret = clk_prepare(clks[i].clk);
165 pr_err("Failed to prepare clk '%s': %d\n",
174 clk_bulk_unprepare(i, clks);
178 EXPORT_SYMBOL_GPL(clk_bulk_prepare);
180 #endif /* CONFIG_HAVE_CLK_PREPARE */
183 * clk_bulk_disable - gate a set of clocks
184 * @num_clks: the number of clk_bulk_data
185 * @clks: the clk_bulk_data table being gated
187 * clk_bulk_disable must not sleep, which differentiates it from
188 * clk_bulk_unprepare. clk_bulk_disable must be called before
189 * clk_bulk_unprepare.
191 void clk_bulk_disable(int num_clks, const struct clk_bulk_data *clks)
194 while (--num_clks >= 0)
195 clk_disable(clks[num_clks].clk);
197 EXPORT_SYMBOL_GPL(clk_bulk_disable);
200 * clk_bulk_enable - ungate a set of clocks
201 * @num_clks: the number of clk_bulk_data
202 * @clks: the clk_bulk_data table being ungated
204 * clk_bulk_enable must not sleep
205 * Returns 0 on success, -EERROR otherwise.
207 int __must_check clk_bulk_enable(int num_clks, const struct clk_bulk_data *clks)
212 for (i = 0; i < num_clks; i++) {
213 ret = clk_enable(clks[i].clk);
215 pr_err("Failed to enable clk '%s': %d\n",
224 clk_bulk_disable(i, clks);
228 EXPORT_SYMBOL_GPL(clk_bulk_enable);