1 // SPDX-License-Identifier: GPL-2.0
3 // Copyright (c) 2011-2014 Samsung Electronics Co., Ltd.
4 // http://www.samsung.com
6 // Exynos - Suspend support
8 // Based on arch/arm/mach-s3c2410/pm.c
9 // Copyright (c) 2006 Simtec Electronics
10 // Ben Dooks <ben@simtec.co.uk>
12 #include <linux/init.h>
13 #include <linux/suspend.h>
14 #include <linux/syscore_ops.h>
15 #include <linux/cpu_pm.h>
17 #include <linux/irq.h>
18 #include <linux/irqchip.h>
19 #include <linux/irqdomain.h>
20 #include <linux/of_address.h>
21 #include <linux/err.h>
22 #include <linux/regulator/machine.h>
23 #include <linux/soc/samsung/exynos-pmu.h>
24 #include <linux/soc/samsung/exynos-regs-pmu.h>
26 #include <asm/cacheflush.h>
27 #include <asm/hardware/cache-l2x0.h>
28 #include <asm/firmware.h>
30 #include <asm/smp_scu.h>
31 #include <asm/suspend.h>
36 #define REG_TABLE_END (-1U)
38 #define EXYNOS5420_CPU_STATE 0x28
41 * struct exynos_wkup_irq - PMU IRQ to mask mapping
42 * @hwirq: Hardware IRQ signal of the PMU
43 * @mask: Mask in PMU wake-up mask register
45 struct exynos_wkup_irq {
50 struct exynos_pm_data {
51 const struct exynos_wkup_irq *wkup_irq;
52 unsigned int wake_disable_mask;
54 void (*pm_prepare)(void);
55 void (*pm_resume_prepare)(void);
56 void (*pm_resume)(void);
57 int (*pm_suspend)(void);
58 int (*cpu_suspend)(unsigned long);
61 /* Used only on Exynos542x/5800 */
62 struct exynos_pm_state {
64 unsigned int pmu_spare3;
65 void __iomem *sysram_base;
66 phys_addr_t sysram_phys;
70 static const struct exynos_pm_data *pm_data __ro_after_init;
71 static struct exynos_pm_state pm_state;
77 static u32 exynos_irqwake_intmask = 0xffffffff;
79 static const struct exynos_wkup_irq exynos3250_wkup_irq[] = {
80 { 73, BIT(1) }, /* RTC alarm */
81 { 74, BIT(2) }, /* RTC tick */
85 static const struct exynos_wkup_irq exynos4_wkup_irq[] = {
86 { 44, BIT(1) }, /* RTC alarm */
87 { 45, BIT(2) }, /* RTC tick */
91 static const struct exynos_wkup_irq exynos5250_wkup_irq[] = {
92 { 43, BIT(1) }, /* RTC alarm */
93 { 44, BIT(2) }, /* RTC tick */
97 static u32 exynos_read_eint_wakeup_mask(void)
99 return pmu_raw_readl(EXYNOS_EINT_WAKEUP_MASK);
102 static int exynos_irq_set_wake(struct irq_data *data, unsigned int state)
104 const struct exynos_wkup_irq *wkup_irq;
106 if (!pm_data->wkup_irq)
108 wkup_irq = pm_data->wkup_irq;
110 while (wkup_irq->mask) {
111 if (wkup_irq->hwirq == data->hwirq) {
113 exynos_irqwake_intmask |= wkup_irq->mask;
115 exynos_irqwake_intmask &= ~wkup_irq->mask;
124 static struct irq_chip exynos_pmu_chip = {
126 .irq_eoi = irq_chip_eoi_parent,
127 .irq_mask = irq_chip_mask_parent,
128 .irq_unmask = irq_chip_unmask_parent,
129 .irq_retrigger = irq_chip_retrigger_hierarchy,
130 .irq_set_wake = exynos_irq_set_wake,
132 .irq_set_affinity = irq_chip_set_affinity_parent,
136 static int exynos_pmu_domain_translate(struct irq_domain *d,
137 struct irq_fwspec *fwspec,
138 unsigned long *hwirq,
141 if (is_of_node(fwspec->fwnode)) {
142 if (fwspec->param_count != 3)
145 /* No PPI should point to this domain */
146 if (fwspec->param[0] != 0)
149 *hwirq = fwspec->param[1];
150 *type = fwspec->param[2];
157 static int exynos_pmu_domain_alloc(struct irq_domain *domain,
159 unsigned int nr_irqs, void *data)
161 struct irq_fwspec *fwspec = data;
162 struct irq_fwspec parent_fwspec;
163 irq_hw_number_t hwirq;
166 if (fwspec->param_count != 3)
167 return -EINVAL; /* Not GIC compliant */
168 if (fwspec->param[0] != 0)
169 return -EINVAL; /* No PPI should point to this domain */
171 hwirq = fwspec->param[1];
173 for (i = 0; i < nr_irqs; i++)
174 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i,
175 &exynos_pmu_chip, NULL);
177 parent_fwspec = *fwspec;
178 parent_fwspec.fwnode = domain->parent->fwnode;
179 return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs,
183 static const struct irq_domain_ops exynos_pmu_domain_ops = {
184 .translate = exynos_pmu_domain_translate,
185 .alloc = exynos_pmu_domain_alloc,
186 .free = irq_domain_free_irqs_common,
189 static int __init exynos_pmu_irq_init(struct device_node *node,
190 struct device_node *parent)
192 struct irq_domain *parent_domain, *domain;
195 pr_err("%pOF: no parent, giving up\n", node);
199 parent_domain = irq_find_host(parent);
200 if (!parent_domain) {
201 pr_err("%pOF: unable to obtain parent domain\n", node);
205 pmu_base_addr = of_iomap(node, 0);
207 if (!pmu_base_addr) {
208 pr_err("%pOF: failed to find exynos pmu register\n", node);
212 domain = irq_domain_add_hierarchy(parent_domain, 0, 0,
213 node, &exynos_pmu_domain_ops,
216 iounmap(pmu_base_addr);
217 pmu_base_addr = NULL;
222 * Clear the OF_POPULATED flag set in of_irq_init so that
223 * later the Exynos PMU platform device won't be skipped.
225 of_node_clear_flag(node, OF_POPULATED);
230 #define EXYNOS_PMU_IRQ(symbol, name) IRQCHIP_DECLARE(symbol, name, exynos_pmu_irq_init)
232 EXYNOS_PMU_IRQ(exynos3250_pmu_irq, "samsung,exynos3250-pmu");
233 EXYNOS_PMU_IRQ(exynos4210_pmu_irq, "samsung,exynos4210-pmu");
234 EXYNOS_PMU_IRQ(exynos4412_pmu_irq, "samsung,exynos4412-pmu");
235 EXYNOS_PMU_IRQ(exynos5250_pmu_irq, "samsung,exynos5250-pmu");
236 EXYNOS_PMU_IRQ(exynos5420_pmu_irq, "samsung,exynos5420-pmu");
238 static int exynos_cpu_do_idle(void)
240 /* issue the standby signal into the pm unit. */
243 pr_info("Failed to suspend the system\n");
244 return 1; /* Aborting suspend */
246 static void exynos_flush_cache_all(void)
252 static int exynos_cpu_suspend(unsigned long arg)
254 exynos_flush_cache_all();
255 return exynos_cpu_do_idle();
258 static int exynos3250_cpu_suspend(unsigned long arg)
261 return exynos_cpu_do_idle();
264 static int exynos5420_cpu_suspend(unsigned long arg)
266 /* MCPM works with HW CPU identifiers */
267 unsigned int mpidr = read_cpuid_mpidr();
268 unsigned int cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
269 unsigned int cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
271 if (IS_ENABLED(CONFIG_EXYNOS_MCPM)) {
272 mcpm_set_entry_vector(cpu, cluster, exynos_cpu_resume);
276 pr_info("Failed to suspend the system\n");
278 /* return value != 0 means failure */
282 static void exynos_pm_set_wakeup_mask(void)
285 * Set wake-up mask registers
286 * EXYNOS_EINT_WAKEUP_MASK is set by pinctrl driver in late suspend.
288 pmu_raw_writel(exynos_irqwake_intmask & ~BIT(31), S5P_WAKEUP_MASK);
291 static void exynos_pm_enter_sleep_mode(void)
293 /* Set value of power down register for sleep mode */
294 exynos_sys_powerdown_conf(SYS_SLEEP);
295 pmu_raw_writel(EXYNOS_SLEEP_MAGIC, S5P_INFORM1);
298 static void exynos_pm_prepare(void)
300 exynos_set_delayed_reset_assertion(false);
302 /* Set wake-up mask registers */
303 exynos_pm_set_wakeup_mask();
305 exynos_pm_enter_sleep_mode();
307 /* ensure at least INFORM0 has the resume address */
308 pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
311 static void exynos3250_pm_prepare(void)
315 /* Set wake-up mask registers */
316 exynos_pm_set_wakeup_mask();
318 tmp = pmu_raw_readl(EXYNOS3_ARM_L2_OPTION);
319 tmp &= ~EXYNOS5_OPTION_USE_RETENTION;
320 pmu_raw_writel(tmp, EXYNOS3_ARM_L2_OPTION);
322 exynos_pm_enter_sleep_mode();
324 /* ensure at least INFORM0 has the resume address */
325 pmu_raw_writel(__pa_symbol(exynos_cpu_resume), S5P_INFORM0);
328 static void exynos5420_pm_prepare(void)
332 /* Set wake-up mask registers */
333 exynos_pm_set_wakeup_mask();
335 pm_state.pmu_spare3 = pmu_raw_readl(S5P_PMU_SPARE3);
337 * The cpu state needs to be saved and restored so that the
338 * secondary CPUs will enter low power start. Though the U-Boot
339 * is setting the cpu state with low power flag, the kernel
340 * needs to restore it back in case, the primary cpu fails to
341 * suspend for any reason.
343 pm_state.cpu_state = readl_relaxed(pm_state.sysram_base +
344 EXYNOS5420_CPU_STATE);
345 writel_relaxed(0x0, pm_state.sysram_base + EXYNOS5420_CPU_STATE);
346 if (pm_state.secure_firmware)
347 exynos_smc(SMC_CMD_REG, SMC_REG_ID_SFR_W(pm_state.sysram_phys +
348 EXYNOS5420_CPU_STATE),
351 exynos_pm_enter_sleep_mode();
353 /* ensure at least INFORM0 has the resume address */
354 if (IS_ENABLED(CONFIG_EXYNOS_MCPM))
355 pmu_raw_writel(__pa_symbol(mcpm_entry_point), S5P_INFORM0);
357 tmp = pmu_raw_readl(EXYNOS_L2_OPTION(0));
358 tmp &= ~EXYNOS_L2_USE_RETENTION;
359 pmu_raw_writel(tmp, EXYNOS_L2_OPTION(0));
361 tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1);
362 tmp |= EXYNOS5420_UFS;
363 pmu_raw_writel(tmp, EXYNOS5420_SFR_AXI_CGDIS1);
365 tmp = pmu_raw_readl(EXYNOS5420_ARM_COMMON_OPTION);
366 tmp &= ~EXYNOS5420_L2RSTDISABLE_VALUE;
367 pmu_raw_writel(tmp, EXYNOS5420_ARM_COMMON_OPTION);
369 tmp = pmu_raw_readl(EXYNOS5420_FSYS2_OPTION);
370 tmp |= EXYNOS5420_EMULATION;
371 pmu_raw_writel(tmp, EXYNOS5420_FSYS2_OPTION);
373 tmp = pmu_raw_readl(EXYNOS5420_PSGEN_OPTION);
374 tmp |= EXYNOS5420_EMULATION;
375 pmu_raw_writel(tmp, EXYNOS5420_PSGEN_OPTION);
379 static int exynos_pm_suspend(void)
381 exynos_pm_central_suspend();
383 /* Setting SEQ_OPTION register */
384 pmu_raw_writel(S5P_USE_STANDBY_WFI0 | S5P_USE_STANDBY_WFE0,
385 S5P_CENTRAL_SEQ_OPTION);
387 if (read_cpuid_part() == ARM_CPU_PART_CORTEX_A9)
388 exynos_cpu_save_register();
393 static int exynos5420_pm_suspend(void)
397 exynos_pm_central_suspend();
399 /* Setting SEQ_OPTION register */
401 this_cluster = MPIDR_AFFINITY_LEVEL(read_cpuid_mpidr(), 1);
403 pmu_raw_writel(EXYNOS5420_ARM_USE_STANDBY_WFI0,
404 S5P_CENTRAL_SEQ_OPTION);
406 pmu_raw_writel(EXYNOS5420_KFC_USE_STANDBY_WFI0,
407 S5P_CENTRAL_SEQ_OPTION);
411 static void exynos_pm_resume(void)
413 u32 cpuid = read_cpuid_part();
415 if (exynos_pm_central_resume())
418 if (cpuid == ARM_CPU_PART_CORTEX_A9)
421 if (call_firmware_op(resume) == -ENOSYS
422 && cpuid == ARM_CPU_PART_CORTEX_A9)
423 exynos_cpu_restore_register();
427 /* Clear SLEEP mode set in INFORM1 */
428 pmu_raw_writel(0x0, S5P_INFORM1);
429 exynos_set_delayed_reset_assertion(true);
432 static void exynos3250_pm_resume(void)
434 u32 cpuid = read_cpuid_part();
436 if (exynos_pm_central_resume())
439 pmu_raw_writel(S5P_USE_STANDBY_WFI_ALL, S5P_CENTRAL_SEQ_OPTION);
441 if (call_firmware_op(resume) == -ENOSYS
442 && cpuid == ARM_CPU_PART_CORTEX_A9)
443 exynos_cpu_restore_register();
447 /* Clear SLEEP mode set in INFORM1 */
448 pmu_raw_writel(0x0, S5P_INFORM1);
451 static void exynos5420_prepare_pm_resume(void)
453 unsigned int mpidr, cluster;
455 mpidr = read_cpuid_mpidr();
456 cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
458 if (IS_ENABLED(CONFIG_EXYNOS_MCPM))
459 WARN_ON(mcpm_cpu_powered_up());
461 if (IS_ENABLED(CONFIG_HW_PERF_EVENTS) && cluster != 0) {
463 * When system is resumed on the LITTLE/KFC core (cluster 1),
464 * the DSCR is not properly updated until the power is turned
465 * on also for the cluster 0. Enable it for a while to
466 * propagate the SPNIDEN and SPIDEN signals from Secure JTAG
467 * block and avoid undefined instruction issue on CP14 reset.
469 pmu_raw_writel(S5P_CORE_LOCAL_PWR_EN,
470 EXYNOS_COMMON_CONFIGURATION(0));
472 EXYNOS_COMMON_CONFIGURATION(0));
476 static void exynos5420_pm_resume(void)
480 /* Restore the CPU0 low power state register */
481 tmp = pmu_raw_readl(EXYNOS5_ARM_CORE0_SYS_PWR_REG);
482 pmu_raw_writel(tmp | S5P_CORE_LOCAL_PWR_EN,
483 EXYNOS5_ARM_CORE0_SYS_PWR_REG);
485 /* Restore the sysram cpu state register */
486 writel_relaxed(pm_state.cpu_state,
487 pm_state.sysram_base + EXYNOS5420_CPU_STATE);
488 if (pm_state.secure_firmware)
489 exynos_smc(SMC_CMD_REG,
490 SMC_REG_ID_SFR_W(pm_state.sysram_phys +
491 EXYNOS5420_CPU_STATE),
492 EXYNOS_AFTR_MAGIC, 0);
494 pmu_raw_writel(EXYNOS5420_USE_STANDBY_WFI_ALL,
495 S5P_CENTRAL_SEQ_OPTION);
497 if (exynos_pm_central_resume())
500 pmu_raw_writel(pm_state.pmu_spare3, S5P_PMU_SPARE3);
504 tmp = pmu_raw_readl(EXYNOS5420_SFR_AXI_CGDIS1);
505 tmp &= ~EXYNOS5420_UFS;
506 pmu_raw_writel(tmp, EXYNOS5420_SFR_AXI_CGDIS1);
508 tmp = pmu_raw_readl(EXYNOS5420_FSYS2_OPTION);
509 tmp &= ~EXYNOS5420_EMULATION;
510 pmu_raw_writel(tmp, EXYNOS5420_FSYS2_OPTION);
512 tmp = pmu_raw_readl(EXYNOS5420_PSGEN_OPTION);
513 tmp &= ~EXYNOS5420_EMULATION;
514 pmu_raw_writel(tmp, EXYNOS5420_PSGEN_OPTION);
516 /* Clear SLEEP mode set in INFORM1 */
517 pmu_raw_writel(0x0, S5P_INFORM1);
524 static int exynos_suspend_enter(suspend_state_t state)
526 u32 eint_wakeup_mask = exynos_read_eint_wakeup_mask();
529 pr_debug("%s: suspending the system...\n", __func__);
531 pr_debug("%s: wakeup masks: %08x,%08x\n", __func__,
532 exynos_irqwake_intmask, eint_wakeup_mask);
534 if (exynos_irqwake_intmask == -1U
535 && eint_wakeup_mask == EXYNOS_EINT_WAKEUP_MASK_DISABLED) {
536 pr_err("%s: No wake-up sources!\n", __func__);
537 pr_err("%s: Aborting sleep\n", __func__);
541 if (pm_data->pm_prepare)
542 pm_data->pm_prepare();
545 ret = call_firmware_op(suspend);
547 ret = cpu_suspend(0, pm_data->cpu_suspend);
551 if (pm_data->pm_resume_prepare)
552 pm_data->pm_resume_prepare();
554 pr_debug("%s: wakeup stat: %08x\n", __func__,
555 pmu_raw_readl(S5P_WAKEUP_STAT));
557 pr_debug("%s: resuming the system...\n", __func__);
562 static int exynos_suspend_prepare(void)
567 * REVISIT: It would be better if struct platform_suspend_ops
568 * .prepare handler get the suspend_state_t as a parameter to
569 * avoid hard-coding the suspend to mem state. It's safe to do
570 * it now only because the suspend_valid_only_mem function is
571 * used as the .valid callback used to check if a given state
572 * is supported by the platform anyways.
574 ret = regulator_suspend_prepare(PM_SUSPEND_MEM);
576 pr_err("Failed to prepare regulators for suspend (%d)\n", ret);
583 static void exynos_suspend_finish(void)
587 ret = regulator_suspend_finish();
589 pr_warn("Failed to resume regulators from suspend (%d)\n", ret);
592 static const struct platform_suspend_ops exynos_suspend_ops = {
593 .enter = exynos_suspend_enter,
594 .prepare = exynos_suspend_prepare,
595 .finish = exynos_suspend_finish,
596 .valid = suspend_valid_only_mem,
599 static const struct exynos_pm_data exynos3250_pm_data = {
600 .wkup_irq = exynos3250_wkup_irq,
601 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
602 .pm_suspend = exynos_pm_suspend,
603 .pm_resume = exynos3250_pm_resume,
604 .pm_prepare = exynos3250_pm_prepare,
605 .cpu_suspend = exynos3250_cpu_suspend,
608 static const struct exynos_pm_data exynos4_pm_data = {
609 .wkup_irq = exynos4_wkup_irq,
610 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
611 .pm_suspend = exynos_pm_suspend,
612 .pm_resume = exynos_pm_resume,
613 .pm_prepare = exynos_pm_prepare,
614 .cpu_suspend = exynos_cpu_suspend,
617 static const struct exynos_pm_data exynos5250_pm_data = {
618 .wkup_irq = exynos5250_wkup_irq,
619 .wake_disable_mask = ((0xFF << 8) | (0x1F << 1)),
620 .pm_suspend = exynos_pm_suspend,
621 .pm_resume = exynos_pm_resume,
622 .pm_prepare = exynos_pm_prepare,
623 .cpu_suspend = exynos_cpu_suspend,
626 static const struct exynos_pm_data exynos5420_pm_data = {
627 .wkup_irq = exynos5250_wkup_irq,
628 .wake_disable_mask = (0x7F << 7) | (0x1F << 1),
629 .pm_resume_prepare = exynos5420_prepare_pm_resume,
630 .pm_resume = exynos5420_pm_resume,
631 .pm_suspend = exynos5420_pm_suspend,
632 .pm_prepare = exynos5420_pm_prepare,
633 .cpu_suspend = exynos5420_cpu_suspend,
636 static const struct of_device_id exynos_pmu_of_device_ids[] __initconst = {
638 .compatible = "samsung,exynos3250-pmu",
639 .data = &exynos3250_pm_data,
641 .compatible = "samsung,exynos4210-pmu",
642 .data = &exynos4_pm_data,
644 .compatible = "samsung,exynos4412-pmu",
645 .data = &exynos4_pm_data,
647 .compatible = "samsung,exynos5250-pmu",
648 .data = &exynos5250_pm_data,
650 .compatible = "samsung,exynos5420-pmu",
651 .data = &exynos5420_pm_data,
656 static struct syscore_ops exynos_pm_syscore_ops;
658 void __init exynos_pm_init(void)
660 const struct of_device_id *match;
661 struct device_node *np;
664 np = of_find_matching_node_and_match(NULL, exynos_pmu_of_device_ids, &match);
666 pr_err("Failed to find PMU node\n");
670 if (WARN_ON(!of_find_property(np, "interrupt-controller", NULL))) {
671 pr_warn("Outdated DT detected, suspend/resume will NOT work\n");
677 pm_data = (const struct exynos_pm_data *) match->data;
679 /* All wakeup disable */
680 tmp = pmu_raw_readl(S5P_WAKEUP_MASK);
681 tmp |= pm_data->wake_disable_mask;
682 pmu_raw_writel(tmp, S5P_WAKEUP_MASK);
684 exynos_pm_syscore_ops.suspend = pm_data->pm_suspend;
685 exynos_pm_syscore_ops.resume = pm_data->pm_resume;
687 register_syscore_ops(&exynos_pm_syscore_ops);
688 suspend_set_ops(&exynos_suspend_ops);
691 * Applicable as of now only to Exynos542x. If booted under secure
692 * firmware, the non-secure region of sysram should be used.
694 if (exynos_secure_firmware_available()) {
695 pm_state.sysram_phys = sysram_base_phys;
696 pm_state.sysram_base = sysram_ns_base_addr;
697 pm_state.secure_firmware = true;
699 pm_state.sysram_base = sysram_base_addr;