2 * Copyright (c) 2013 Samsung Electronics Co., Ltd.
3 * http://www.samsung.com
5 * Amit Daniel Kachhap <amit.daniel@samsung.com>
7 * EXYNOS5440 - CPU frequency scaling support
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
14 #include <linux/clk.h>
15 #include <linux/cpu.h>
16 #include <linux/cpufreq.h>
17 #include <linux/err.h>
18 #include <linux/interrupt.h>
20 #include <linux/module.h>
21 #include <linux/of_address.h>
22 #include <linux/of_irq.h>
23 #include <linux/pm_opp.h>
24 #include <linux/platform_device.h>
25 #include <linux/slab.h>
27 /* Register definitions */
28 #define XMU_DVFS_CTRL 0x0060
29 #define XMU_PMU_P0_7 0x0064
30 #define XMU_C0_3_PSTATE 0x0090
31 #define XMU_P_LIMIT 0x00a0
32 #define XMU_P_STATUS 0x00a4
33 #define XMU_PMUEVTEN 0x00d0
34 #define XMU_PMUIRQEN 0x00d4
35 #define XMU_PMUIRQ 0x00d8
37 /* PMU mask and shift definations */
38 #define P_VALUE_MASK 0x7
40 #define XMU_DVFS_CTRL_EN_SHIFT 0
42 #define P0_7_CPUCLKDEV_SHIFT 21
43 #define P0_7_CPUCLKDEV_MASK 0x7
44 #define P0_7_ATBCLKDEV_SHIFT 18
45 #define P0_7_ATBCLKDEV_MASK 0x7
46 #define P0_7_CSCLKDEV_SHIFT 15
47 #define P0_7_CSCLKDEV_MASK 0x7
48 #define P0_7_CPUEMA_SHIFT 28
49 #define P0_7_CPUEMA_MASK 0xf
50 #define P0_7_L2EMA_SHIFT 24
51 #define P0_7_L2EMA_MASK 0xf
52 #define P0_7_VDD_SHIFT 8
53 #define P0_7_VDD_MASK 0x7f
54 #define P0_7_FREQ_SHIFT 0
55 #define P0_7_FREQ_MASK 0xff
57 #define C0_3_PSTATE_VALID_SHIFT 8
58 #define C0_3_PSTATE_CURR_SHIFT 4
59 #define C0_3_PSTATE_NEW_SHIFT 0
61 #define PSTATE_CHANGED_EVTEN_SHIFT 0
63 #define PSTATE_CHANGED_IRQEN_SHIFT 0
65 #define PSTATE_CHANGED_SHIFT 0
67 /* some constant values for clock divider calculation */
68 #define CPU_DIV_FREQ_MAX 500
69 #define CPU_DBG_FREQ_MAX 375
70 #define CPU_ATB_FREQ_MAX 500
72 #define PMIC_LOW_VOLT 0x30
73 #define PMIC_HIGH_VOLT 0x28
75 #define CPUEMA_HIGH 0x2
76 #define CPUEMA_MID 0x4
77 #define CPUEMA_LOW 0x7
79 #define L2EMA_HIGH 0x1
84 /* frequency unit is 20MHZ */
86 #define MAX_VOLTAGE 1550000 /* In microvolt */
87 #define VOLTAGE_STEP 12500 /* In microvolt */
89 #define CPUFREQ_NAME "exynos5440_dvfs"
90 #define DEF_TRANS_LATENCY 100000
92 enum cpufreq_level_index {
96 #define CPUFREQ_LEVEL_END (L7 + 1)
98 struct exynos_dvfs_data {
100 struct resource *mem;
103 unsigned int latency;
104 struct cpufreq_frequency_table *freq_table;
105 unsigned int freq_count;
108 struct work_struct irq_work;
111 static struct exynos_dvfs_data *dvfs_info;
112 static DEFINE_MUTEX(cpufreq_lock);
113 static struct cpufreq_freqs freqs;
115 static int init_div_table(void)
117 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
118 unsigned int tmp, clk_div, ema_div, freq, volt_id;
120 struct dev_pm_opp *opp;
123 for (i = 0; freq_tbl[i].frequency != CPUFREQ_TABLE_END; i++) {
125 opp = dev_pm_opp_find_freq_exact(dvfs_info->dev,
126 freq_tbl[i].frequency * 1000, true);
129 dev_err(dvfs_info->dev,
130 "failed to find valid OPP for %u KHZ\n",
131 freq_tbl[i].frequency);
135 freq = freq_tbl[i].frequency / 1000; /* In MHZ */
136 clk_div = ((freq / CPU_DIV_FREQ_MAX) & P0_7_CPUCLKDEV_MASK)
137 << P0_7_CPUCLKDEV_SHIFT;
138 clk_div |= ((freq / CPU_ATB_FREQ_MAX) & P0_7_ATBCLKDEV_MASK)
139 << P0_7_ATBCLKDEV_SHIFT;
140 clk_div |= ((freq / CPU_DBG_FREQ_MAX) & P0_7_CSCLKDEV_MASK)
141 << P0_7_CSCLKDEV_SHIFT;
144 volt_id = dev_pm_opp_get_voltage(opp);
145 volt_id = (MAX_VOLTAGE - volt_id) / VOLTAGE_STEP;
146 if (volt_id < PMIC_HIGH_VOLT) {
147 ema_div = (CPUEMA_HIGH << P0_7_CPUEMA_SHIFT) |
148 (L2EMA_HIGH << P0_7_L2EMA_SHIFT);
149 } else if (volt_id > PMIC_LOW_VOLT) {
150 ema_div = (CPUEMA_LOW << P0_7_CPUEMA_SHIFT) |
151 (L2EMA_LOW << P0_7_L2EMA_SHIFT);
153 ema_div = (CPUEMA_MID << P0_7_CPUEMA_SHIFT) |
154 (L2EMA_MID << P0_7_L2EMA_SHIFT);
157 tmp = (clk_div | ema_div | (volt_id << P0_7_VDD_SHIFT)
158 | ((freq / FREQ_UNIT) << P0_7_FREQ_SHIFT));
160 __raw_writel(tmp, dvfs_info->base + XMU_PMU_P0_7 + 4 * i);
167 static void exynos_enable_dvfs(unsigned int cur_frequency)
169 unsigned int tmp, i, cpu;
170 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
172 __raw_writel(0, dvfs_info->base + XMU_DVFS_CTRL);
174 /* Enable PSTATE Change Event */
175 tmp = __raw_readl(dvfs_info->base + XMU_PMUEVTEN);
176 tmp |= (1 << PSTATE_CHANGED_EVTEN_SHIFT);
177 __raw_writel(tmp, dvfs_info->base + XMU_PMUEVTEN);
179 /* Enable PSTATE Change IRQ */
180 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQEN);
181 tmp |= (1 << PSTATE_CHANGED_IRQEN_SHIFT);
182 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQEN);
184 /* Set initial performance index */
185 for (i = 0; freq_table[i].frequency != CPUFREQ_TABLE_END; i++)
186 if (freq_table[i].frequency == cur_frequency)
189 if (freq_table[i].frequency == CPUFREQ_TABLE_END) {
190 dev_crit(dvfs_info->dev, "Boot up frequency not supported\n");
191 /* Assign the highest frequency */
193 cur_frequency = freq_table[i].frequency;
196 dev_info(dvfs_info->dev, "Setting dvfs initial frequency = %uKHZ",
199 for (cpu = 0; cpu < CONFIG_NR_CPUS; cpu++) {
200 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
201 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
202 tmp |= (i << C0_3_PSTATE_NEW_SHIFT);
203 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + cpu * 4);
207 __raw_writel(1 << XMU_DVFS_CTRL_EN_SHIFT,
208 dvfs_info->base + XMU_DVFS_CTRL);
211 static int exynos_target(struct cpufreq_policy *policy, unsigned int index)
215 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
217 mutex_lock(&cpufreq_lock);
219 freqs.old = policy->cur;
220 freqs.new = freq_table[index].frequency;
222 cpufreq_notify_transition(policy, &freqs, CPUFREQ_PRECHANGE);
224 /* Set the target frequency in all C0_3_PSTATE register */
225 for_each_cpu(i, policy->cpus) {
226 tmp = __raw_readl(dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
227 tmp &= ~(P_VALUE_MASK << C0_3_PSTATE_NEW_SHIFT);
228 tmp |= (index << C0_3_PSTATE_NEW_SHIFT);
230 __raw_writel(tmp, dvfs_info->base + XMU_C0_3_PSTATE + i * 4);
232 mutex_unlock(&cpufreq_lock);
236 static void exynos_cpufreq_work(struct work_struct *work)
238 unsigned int cur_pstate, index;
239 struct cpufreq_policy *policy = cpufreq_cpu_get(0); /* boot CPU */
240 struct cpufreq_frequency_table *freq_table = dvfs_info->freq_table;
242 /* Ensure we can access cpufreq structures */
243 if (unlikely(dvfs_info->dvfs_enabled == false))
246 mutex_lock(&cpufreq_lock);
247 freqs.old = policy->cur;
249 cur_pstate = __raw_readl(dvfs_info->base + XMU_P_STATUS);
250 if (cur_pstate >> C0_3_PSTATE_VALID_SHIFT & 0x1)
251 index = (cur_pstate >> C0_3_PSTATE_CURR_SHIFT) & P_VALUE_MASK;
253 index = (cur_pstate >> C0_3_PSTATE_NEW_SHIFT) & P_VALUE_MASK;
255 if (likely(index < dvfs_info->freq_count)) {
256 freqs.new = freq_table[index].frequency;
258 dev_crit(dvfs_info->dev, "New frequency out of range\n");
259 freqs.new = freqs.old;
261 cpufreq_notify_transition(policy, &freqs, CPUFREQ_POSTCHANGE);
263 cpufreq_cpu_put(policy);
264 mutex_unlock(&cpufreq_lock);
266 enable_irq(dvfs_info->irq);
269 static irqreturn_t exynos_cpufreq_irq(int irq, void *id)
273 tmp = __raw_readl(dvfs_info->base + XMU_PMUIRQ);
274 if (tmp >> PSTATE_CHANGED_SHIFT & 0x1) {
275 __raw_writel(tmp, dvfs_info->base + XMU_PMUIRQ);
276 disable_irq_nosync(irq);
277 schedule_work(&dvfs_info->irq_work);
282 static void exynos_sort_descend_freq_table(void)
284 struct cpufreq_frequency_table *freq_tbl = dvfs_info->freq_table;
286 unsigned int tmp_freq;
288 * Exynos5440 clock controller state logic expects the cpufreq table to
289 * be in descending order. But the OPP library constructs the table in
290 * ascending order. So to make the table descending we just need to
291 * swap the i element with the N - i element.
293 for (i = 0; i < dvfs_info->freq_count / 2; i++) {
294 index = dvfs_info->freq_count - i - 1;
295 tmp_freq = freq_tbl[i].frequency;
296 freq_tbl[i].frequency = freq_tbl[index].frequency;
297 freq_tbl[index].frequency = tmp_freq;
301 static int exynos_cpufreq_cpu_init(struct cpufreq_policy *policy)
303 policy->clk = dvfs_info->cpu_clk;
304 return cpufreq_generic_init(policy, dvfs_info->freq_table,
308 static struct cpufreq_driver exynos_driver = {
309 .flags = CPUFREQ_STICKY | CPUFREQ_ASYNC_NOTIFICATION |
310 CPUFREQ_NEED_INITIAL_FREQ_CHECK,
311 .verify = cpufreq_generic_frequency_table_verify,
312 .target_index = exynos_target,
313 .get = cpufreq_generic_get,
314 .init = exynos_cpufreq_cpu_init,
315 .exit = cpufreq_generic_exit,
316 .name = CPUFREQ_NAME,
317 .attr = cpufreq_generic_attr,
320 static const struct of_device_id exynos_cpufreq_match[] = {
322 .compatible = "samsung,exynos5440-cpufreq",
326 MODULE_DEVICE_TABLE(of, exynos_cpufreq_match);
328 static int exynos_cpufreq_probe(struct platform_device *pdev)
331 struct device_node *np;
333 unsigned int cur_frequency;
335 np = pdev->dev.of_node;
339 dvfs_info = devm_kzalloc(&pdev->dev, sizeof(*dvfs_info), GFP_KERNEL);
345 dvfs_info->dev = &pdev->dev;
347 ret = of_address_to_resource(np, 0, &res);
351 dvfs_info->base = devm_ioremap_resource(dvfs_info->dev, &res);
352 if (IS_ERR(dvfs_info->base)) {
353 ret = PTR_ERR(dvfs_info->base);
357 dvfs_info->irq = irq_of_parse_and_map(np, 0);
358 if (!dvfs_info->irq) {
359 dev_err(dvfs_info->dev, "No cpufreq irq found\n");
364 ret = of_init_opp_table(dvfs_info->dev);
366 dev_err(dvfs_info->dev, "failed to init OPP table: %d\n", ret);
370 ret = dev_pm_opp_init_cpufreq_table(dvfs_info->dev,
371 &dvfs_info->freq_table);
373 dev_err(dvfs_info->dev,
374 "failed to init cpufreq table: %d\n", ret);
377 dvfs_info->freq_count = dev_pm_opp_get_opp_count(dvfs_info->dev);
378 exynos_sort_descend_freq_table();
380 if (of_property_read_u32(np, "clock-latency", &dvfs_info->latency))
381 dvfs_info->latency = DEF_TRANS_LATENCY;
383 dvfs_info->cpu_clk = devm_clk_get(dvfs_info->dev, "armclk");
384 if (IS_ERR(dvfs_info->cpu_clk)) {
385 dev_err(dvfs_info->dev, "Failed to get cpu clock\n");
386 ret = PTR_ERR(dvfs_info->cpu_clk);
390 cur_frequency = clk_get_rate(dvfs_info->cpu_clk);
391 if (!cur_frequency) {
392 dev_err(dvfs_info->dev, "Failed to get clock rate\n");
396 cur_frequency /= 1000;
398 INIT_WORK(&dvfs_info->irq_work, exynos_cpufreq_work);
399 ret = devm_request_irq(dvfs_info->dev, dvfs_info->irq,
400 exynos_cpufreq_irq, IRQF_TRIGGER_NONE,
401 CPUFREQ_NAME, dvfs_info);
403 dev_err(dvfs_info->dev, "Failed to register IRQ\n");
407 ret = init_div_table();
409 dev_err(dvfs_info->dev, "Failed to initialise div table\n");
413 exynos_enable_dvfs(cur_frequency);
414 ret = cpufreq_register_driver(&exynos_driver);
416 dev_err(dvfs_info->dev,
417 "%s: failed to register cpufreq driver\n", __func__);
422 dvfs_info->dvfs_enabled = true;
426 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
429 dev_err(&pdev->dev, "%s: failed initialization\n", __func__);
433 static int exynos_cpufreq_remove(struct platform_device *pdev)
435 cpufreq_unregister_driver(&exynos_driver);
436 dev_pm_opp_free_cpufreq_table(dvfs_info->dev, &dvfs_info->freq_table);
440 static struct platform_driver exynos_cpufreq_platdrv = {
442 .name = "exynos5440-cpufreq",
443 .owner = THIS_MODULE,
444 .of_match_table = exynos_cpufreq_match,
446 .probe = exynos_cpufreq_probe,
447 .remove = exynos_cpufreq_remove,
449 module_platform_driver(exynos_cpufreq_platdrv);
451 MODULE_AUTHOR("Amit Daniel Kachhap <amit.daniel@samsung.com>");
452 MODULE_DESCRIPTION("Exynos5440 cpufreq driver");
453 MODULE_LICENSE("GPL");