1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * processor_idle - idle state submodule to the ACPI processor driver
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (C) 2004, 2005 Dominik Brodowski <linux@brodo.de>
8 * Copyright (C) 2004 Anil S Keshavamurthy <anil.s.keshavamurthy@intel.com>
9 * - Added processor hotplug support
10 * Copyright (C) 2005 Venkatesh Pallipadi <venkatesh.pallipadi@intel.com>
11 * - Added support for C3 on SMP
13 #define pr_fmt(fmt) "ACPI: " fmt
15 #include <linux/module.h>
16 #include <linux/acpi.h>
17 #include <linux/dmi.h>
18 #include <linux/sched.h> /* need_resched() */
19 #include <linux/tick.h>
20 #include <linux/cpuidle.h>
21 #include <linux/cpu.h>
22 #include <acpi/processor.h>
25 * Include the apic definitions for x86 to have the APIC timer related defines
26 * available also for UP (on SMP it gets magically included via linux/smp.h).
27 * asm/acpi.h is not an option, as it would require more include magic. Also
28 * creating an empty asm-ia64/apic.h would just trade pest vs. cholera.
34 #define ACPI_PROCESSOR_CLASS "processor"
35 #define _COMPONENT ACPI_PROCESSOR_COMPONENT
36 ACPI_MODULE_NAME("processor_idle");
38 #define ACPI_IDLE_STATE_START (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX) ? 1 : 0)
40 static unsigned int max_cstate __read_mostly = ACPI_PROCESSOR_MAX_POWER;
41 module_param(max_cstate, uint, 0000);
42 static unsigned int nocst __read_mostly;
43 module_param(nocst, uint, 0000);
44 static int bm_check_disable __read_mostly;
45 module_param(bm_check_disable, uint, 0000);
47 static unsigned int latency_factor __read_mostly = 2;
48 module_param(latency_factor, uint, 0644);
50 static DEFINE_PER_CPU(struct cpuidle_device *, acpi_cpuidle_device);
52 struct cpuidle_driver acpi_idle_driver = {
57 #ifdef CONFIG_ACPI_PROCESSOR_CSTATE
59 DEFINE_PER_CPU(struct acpi_processor_cx * [CPUIDLE_STATE_MAX], acpi_cstate);
61 static int disabled_by_idle_boot_param(void)
63 return boot_option_idle_override == IDLE_POLL ||
64 boot_option_idle_override == IDLE_HALT;
68 * IBM ThinkPad R40e crashes mysteriously when going into C2 or C3.
69 * For now disable this. Probably a bug somewhere else.
71 * To skip this limit, boot/load with a large max_cstate limit.
73 static int set_max_cstate(const struct dmi_system_id *id)
75 if (max_cstate > ACPI_PROCESSOR_MAX_POWER)
78 pr_notice("%s detected - limiting to C%ld max_cstate."
79 " Override with \"processor.max_cstate=%d\"\n", id->ident,
80 (long)id->driver_data, ACPI_PROCESSOR_MAX_POWER + 1);
82 max_cstate = (long)id->driver_data;
87 static const struct dmi_system_id processor_power_dmi_table[] = {
88 { set_max_cstate, "Clevo 5600D", {
89 DMI_MATCH(DMI_BIOS_VENDOR,"Phoenix Technologies LTD"),
90 DMI_MATCH(DMI_BIOS_VERSION,"SHE845M0.86C.0013.D.0302131307")},
92 { set_max_cstate, "Pavilion zv5000", {
93 DMI_MATCH(DMI_SYS_VENDOR, "Hewlett-Packard"),
94 DMI_MATCH(DMI_PRODUCT_NAME,"Pavilion zv5000 (DS502A#ABA)")},
96 { set_max_cstate, "Asus L8400B", {
97 DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK Computer Inc."),
98 DMI_MATCH(DMI_PRODUCT_NAME,"L8400B series Notebook PC")},
105 * Callers should disable interrupts before the call and enable
106 * interrupts after return.
108 static void __cpuidle acpi_safe_halt(void)
110 if (!tif_need_resched()) {
116 #ifdef ARCH_APICTIMER_STOPS_ON_C3
119 * Some BIOS implementations switch to C3 in the published C2 state.
120 * This seems to be a common problem on AMD boxen, but other vendors
121 * are affected too. We pick the most conservative approach: we assume
122 * that the local APIC stops in both C2 and C3.
124 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
125 struct acpi_processor_cx *cx)
127 struct acpi_processor_power *pwr = &pr->power;
128 u8 type = local_apic_timer_c2_ok ? ACPI_STATE_C3 : ACPI_STATE_C2;
130 if (cpu_has(&cpu_data(pr->id), X86_FEATURE_ARAT))
133 if (boot_cpu_has_bug(X86_BUG_AMD_APIC_C1E))
134 type = ACPI_STATE_C1;
137 * Check, if one of the previous states already marked the lapic
140 if (pwr->timer_broadcast_on_state < state)
143 if (cx->type >= type)
144 pr->power.timer_broadcast_on_state = state;
147 static void __lapic_timer_propagate_broadcast(void *arg)
149 struct acpi_processor *pr = (struct acpi_processor *) arg;
151 if (pr->power.timer_broadcast_on_state < INT_MAX)
152 tick_broadcast_enable();
154 tick_broadcast_disable();
157 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr)
159 smp_call_function_single(pr->id, __lapic_timer_propagate_broadcast,
163 /* Power(C) State timer broadcast control */
164 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
165 struct acpi_processor_cx *cx,
168 int state = cx - pr->power.states;
170 if (state >= pr->power.timer_broadcast_on_state) {
172 tick_broadcast_enter();
174 tick_broadcast_exit();
180 static void lapic_timer_check_state(int state, struct acpi_processor *pr,
181 struct acpi_processor_cx *cstate) { }
182 static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { }
183 static void lapic_timer_state_broadcast(struct acpi_processor *pr,
184 struct acpi_processor_cx *cx,
191 #if defined(CONFIG_X86)
192 static void tsc_check_state(int state)
194 switch (boot_cpu_data.x86_vendor) {
195 case X86_VENDOR_HYGON:
197 case X86_VENDOR_INTEL:
198 case X86_VENDOR_CENTAUR:
199 case X86_VENDOR_ZHAOXIN:
201 * AMD Fam10h TSC will tick in all
202 * C/P/S0/S1 states when this bit is set.
204 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
209 /* TSC could halt in idle, so notify users */
210 if (state > ACPI_STATE_C1)
211 mark_tsc_unstable("TSC halts in idle");
215 static void tsc_check_state(int state) { return; }
218 static int acpi_processor_get_power_info_fadt(struct acpi_processor *pr)
224 /* if info is obtained from pblk/fadt, type equals state */
225 pr->power.states[ACPI_STATE_C2].type = ACPI_STATE_C2;
226 pr->power.states[ACPI_STATE_C3].type = ACPI_STATE_C3;
228 #ifndef CONFIG_HOTPLUG_CPU
230 * Check for P_LVL2_UP flag before entering C2 and above on
233 if ((num_online_cpus() > 1) &&
234 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED))
238 /* determine C2 and C3 address from pblk */
239 pr->power.states[ACPI_STATE_C2].address = pr->pblk + 4;
240 pr->power.states[ACPI_STATE_C3].address = pr->pblk + 5;
242 /* determine latencies from FADT */
243 pr->power.states[ACPI_STATE_C2].latency = acpi_gbl_FADT.c2_latency;
244 pr->power.states[ACPI_STATE_C3].latency = acpi_gbl_FADT.c3_latency;
247 * FADT specified C2 latency must be less than or equal to
250 if (acpi_gbl_FADT.c2_latency > ACPI_PROCESSOR_MAX_C2_LATENCY) {
251 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
252 "C2 latency too large [%d]\n", acpi_gbl_FADT.c2_latency));
254 pr->power.states[ACPI_STATE_C2].address = 0;
258 * FADT supplied C3 latency must be less than or equal to
261 if (acpi_gbl_FADT.c3_latency > ACPI_PROCESSOR_MAX_C3_LATENCY) {
262 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
263 "C3 latency too large [%d]\n", acpi_gbl_FADT.c3_latency));
265 pr->power.states[ACPI_STATE_C3].address = 0;
268 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
269 "lvl2[0x%08x] lvl3[0x%08x]\n",
270 pr->power.states[ACPI_STATE_C2].address,
271 pr->power.states[ACPI_STATE_C3].address));
273 snprintf(pr->power.states[ACPI_STATE_C2].desc,
274 ACPI_CX_DESC_LEN, "ACPI P_LVL2 IOPORT 0x%x",
275 pr->power.states[ACPI_STATE_C2].address);
276 snprintf(pr->power.states[ACPI_STATE_C3].desc,
277 ACPI_CX_DESC_LEN, "ACPI P_LVL3 IOPORT 0x%x",
278 pr->power.states[ACPI_STATE_C3].address);
283 static int acpi_processor_get_power_info_default(struct acpi_processor *pr)
285 if (!pr->power.states[ACPI_STATE_C1].valid) {
286 /* set the first C-State to C1 */
287 /* all processors need to support C1 */
288 pr->power.states[ACPI_STATE_C1].type = ACPI_STATE_C1;
289 pr->power.states[ACPI_STATE_C1].valid = 1;
290 pr->power.states[ACPI_STATE_C1].entry_method = ACPI_CSTATE_HALT;
292 snprintf(pr->power.states[ACPI_STATE_C1].desc,
293 ACPI_CX_DESC_LEN, "ACPI HLT");
295 /* the C0 state only exists as a filler in our array */
296 pr->power.states[ACPI_STATE_C0].valid = 1;
300 static int acpi_processor_get_power_info_cst(struct acpi_processor *pr)
307 ret = acpi_processor_evaluate_cst(pr->handle, pr->id, &pr->power);
311 if (!pr->power.count)
314 pr->flags.has_cst = 1;
318 static void acpi_processor_power_verify_c3(struct acpi_processor *pr,
319 struct acpi_processor_cx *cx)
321 static int bm_check_flag = -1;
322 static int bm_control_flag = -1;
329 * PIIX4 Erratum #18: We don't support C3 when Type-F (fast)
330 * DMA transfers are used by any ISA device to avoid livelock.
331 * Note that we could disable Type-F DMA (as recommended by
332 * the erratum), but this is known to disrupt certain ISA
333 * devices thus we take the conservative approach.
335 else if (errata.piix4.fdma) {
336 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
337 "C3 not supported on PIIX4 with Type-F DMA\n"));
341 /* All the logic here assumes flags.bm_check is same across all CPUs */
342 if (bm_check_flag == -1) {
343 /* Determine whether bm_check is needed based on CPU */
344 acpi_processor_power_init_bm_check(&(pr->flags), pr->id);
345 bm_check_flag = pr->flags.bm_check;
346 bm_control_flag = pr->flags.bm_control;
348 pr->flags.bm_check = bm_check_flag;
349 pr->flags.bm_control = bm_control_flag;
352 if (pr->flags.bm_check) {
353 if (!pr->flags.bm_control) {
354 if (pr->flags.has_cst != 1) {
355 /* bus mastering control is necessary */
356 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
357 "C3 support requires BM control\n"));
360 /* Here we enter C3 without bus mastering */
361 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
362 "C3 support without BM control\n"));
367 * WBINVD should be set in fadt, for C3 state to be
368 * supported on when bm_check is not required.
370 if (!(acpi_gbl_FADT.flags & ACPI_FADT_WBINVD)) {
371 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
372 "Cache invalidation should work properly"
373 " for C3 to be enabled on SMP systems\n"));
379 * Otherwise we've met all of our C3 requirements.
380 * Normalize the C3 latency to expidite policy. Enable
381 * checking of bus mastering status (bm_check) so we can
382 * use this in our C3 policy
387 * On older chipsets, BM_RLD needs to be set
388 * in order for Bus Master activity to wake the
389 * system from C3. Newer chipsets handle DMA
390 * during C3 automatically and BM_RLD is a NOP.
391 * In either case, the proper way to
392 * handle BM_RLD is to set it and leave it set.
394 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_RLD, 1);
399 static int acpi_processor_power_verify(struct acpi_processor *pr)
402 unsigned int working = 0;
404 pr->power.timer_broadcast_on_state = INT_MAX;
406 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
407 struct acpi_processor_cx *cx = &pr->power.states[i];
421 acpi_processor_power_verify_c3(pr, cx);
427 lapic_timer_check_state(i, pr, cx);
428 tsc_check_state(cx->type);
432 lapic_timer_propagate_broadcast(pr);
437 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
443 /* NOTE: the idle thread may not be running while calling
446 /* Zero initialize all the C-states info. */
447 memset(pr->power.states, 0, sizeof(pr->power.states));
449 result = acpi_processor_get_power_info_cst(pr);
450 if (result == -ENODEV)
451 result = acpi_processor_get_power_info_fadt(pr);
456 acpi_processor_get_power_info_default(pr);
458 pr->power.count = acpi_processor_power_verify(pr);
461 * if one state of type C2 or C3 is available, mark this
462 * CPU as being "idle manageable"
464 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER; i++) {
465 if (pr->power.states[i].valid) {
475 * acpi_idle_bm_check - checks if bus master activity was detected
477 static int acpi_idle_bm_check(void)
481 if (bm_check_disable)
484 acpi_read_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, &bm_status);
486 acpi_write_bit_register(ACPI_BITREG_BUS_MASTER_STATUS, 1);
488 * PIIX4 Erratum #18: Note that BM_STS doesn't always reflect
489 * the true state of bus mastering activity; forcing us to
490 * manually check the BMIDEA bit of each IDE channel.
492 else if (errata.piix4.bmisx) {
493 if ((inb_p(errata.piix4.bmisx + 0x02) & 0x01)
494 || (inb_p(errata.piix4.bmisx + 0x0A) & 0x01))
500 static void wait_for_freeze(void)
503 /* No delay is needed if we are in guest */
504 if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
507 /* Dummy wait op - must do something useless after P_LVL2 read
508 because chipsets cannot guarantee that STPCLK# signal
509 gets asserted in time to freeze execution properly. */
510 inl(acpi_gbl_FADT.xpm_timer_block.address);
514 * acpi_idle_do_entry - enter idle state using the appropriate method
517 * Caller disables interrupt before call and enables interrupt after return.
519 static void __cpuidle acpi_idle_do_entry(struct acpi_processor_cx *cx)
521 if (cx->entry_method == ACPI_CSTATE_FFH) {
522 /* Call into architectural FFH based C-state */
523 acpi_processor_ffh_cstate_enter(cx);
524 } else if (cx->entry_method == ACPI_CSTATE_HALT) {
527 /* IO port based C-state */
534 * acpi_idle_play_dead - enters an ACPI state for long-term idle (i.e. off-lining)
535 * @dev: the target CPU
536 * @index: the index of suggested state
538 static int acpi_idle_play_dead(struct cpuidle_device *dev, int index)
540 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
542 ACPI_FLUSH_CPU_CACHE();
546 if (cx->entry_method == ACPI_CSTATE_HALT)
548 else if (cx->entry_method == ACPI_CSTATE_SYSTEMIO) {
559 static bool acpi_idle_fallback_to_c1(struct acpi_processor *pr)
561 return IS_ENABLED(CONFIG_HOTPLUG_CPU) && !pr->flags.has_cst &&
562 !(acpi_gbl_FADT.flags & ACPI_FADT_C2_MP_SUPPORTED);
565 static int c3_cpu_count;
566 static DEFINE_RAW_SPINLOCK(c3_lock);
569 * acpi_idle_enter_bm - enters C3 with proper BM handling
570 * @pr: Target processor
571 * @cx: Target state context
572 * @timer_bc: Whether or not to change timer mode to broadcast
574 static void acpi_idle_enter_bm(struct acpi_processor *pr,
575 struct acpi_processor_cx *cx, bool timer_bc)
577 acpi_unlazy_tlb(smp_processor_id());
580 * Must be done before busmaster disable as we might need to
584 lapic_timer_state_broadcast(pr, cx, 1);
588 * bm_check implies we need ARB_DIS
589 * bm_control implies whether we can do ARB_DIS
591 * That leaves a case where bm_check is set and bm_control is
592 * not set. In that case we cannot do much, we enter C3
593 * without doing anything.
595 if (pr->flags.bm_control) {
596 raw_spin_lock(&c3_lock);
598 /* Disable bus master arbitration when all CPUs are in C3 */
599 if (c3_cpu_count == num_online_cpus())
600 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 1);
601 raw_spin_unlock(&c3_lock);
604 acpi_idle_do_entry(cx);
606 /* Re-enable bus master arbitration */
607 if (pr->flags.bm_control) {
608 raw_spin_lock(&c3_lock);
609 acpi_write_bit_register(ACPI_BITREG_ARB_DISABLE, 0);
611 raw_spin_unlock(&c3_lock);
615 lapic_timer_state_broadcast(pr, cx, 0);
618 static int acpi_idle_enter(struct cpuidle_device *dev,
619 struct cpuidle_driver *drv, int index)
621 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
622 struct acpi_processor *pr;
624 pr = __this_cpu_read(processors);
628 if (cx->type != ACPI_STATE_C1) {
629 if (acpi_idle_fallback_to_c1(pr) && num_online_cpus() > 1) {
630 index = ACPI_IDLE_STATE_START;
631 cx = per_cpu(acpi_cstate[index], dev->cpu);
632 } else if (cx->type == ACPI_STATE_C3 && pr->flags.bm_check) {
633 if (cx->bm_sts_skip || !acpi_idle_bm_check()) {
634 acpi_idle_enter_bm(pr, cx, true);
636 } else if (drv->safe_state_index >= 0) {
637 index = drv->safe_state_index;
638 cx = per_cpu(acpi_cstate[index], dev->cpu);
646 lapic_timer_state_broadcast(pr, cx, 1);
648 if (cx->type == ACPI_STATE_C3)
649 ACPI_FLUSH_CPU_CACHE();
651 acpi_idle_do_entry(cx);
653 lapic_timer_state_broadcast(pr, cx, 0);
658 static void acpi_idle_enter_s2idle(struct cpuidle_device *dev,
659 struct cpuidle_driver *drv, int index)
661 struct acpi_processor_cx *cx = per_cpu(acpi_cstate[index], dev->cpu);
663 if (cx->type == ACPI_STATE_C3) {
664 struct acpi_processor *pr = __this_cpu_read(processors);
669 if (pr->flags.bm_check) {
670 acpi_idle_enter_bm(pr, cx, false);
673 ACPI_FLUSH_CPU_CACHE();
676 acpi_idle_do_entry(cx);
679 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
680 struct cpuidle_device *dev)
682 int i, count = ACPI_IDLE_STATE_START;
683 struct acpi_processor_cx *cx;
688 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
689 cx = &pr->power.states[i];
694 per_cpu(acpi_cstate[count], dev->cpu) = cx;
697 if (count == CPUIDLE_STATE_MAX)
707 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
710 struct acpi_processor_cx *cx;
711 struct cpuidle_state *state;
712 struct cpuidle_driver *drv = &acpi_idle_driver;
717 if (IS_ENABLED(CONFIG_ARCH_HAS_CPU_RELAX)) {
718 cpuidle_poll_state_init(drv);
724 for (i = 1; i < ACPI_PROCESSOR_MAX_POWER && i <= max_cstate; i++) {
725 cx = &pr->power.states[i];
730 state = &drv->states[count];
731 snprintf(state->name, CPUIDLE_NAME_LEN, "C%d", i);
732 strlcpy(state->desc, cx->desc, CPUIDLE_DESC_LEN);
733 state->exit_latency = cx->latency;
734 state->target_residency = cx->latency * latency_factor;
735 state->enter = acpi_idle_enter;
738 if (cx->type == ACPI_STATE_C1 || cx->type == ACPI_STATE_C2) {
739 state->enter_dead = acpi_idle_play_dead;
740 drv->safe_state_index = count;
743 * Halt-induced C1 is not good for ->enter_s2idle, because it
744 * re-enables interrupts on exit. Moreover, C1 is generally not
745 * particularly interesting from the suspend-to-idle angle, so
746 * avoid C1 and the situations in which we may need to fall back
749 if (cx->type != ACPI_STATE_C1 && !acpi_idle_fallback_to_c1(pr))
750 state->enter_s2idle = acpi_idle_enter_s2idle;
753 if (count == CPUIDLE_STATE_MAX)
757 drv->state_count = count;
765 static inline void acpi_processor_cstate_first_run_checks(void)
767 static int first_run;
771 dmi_check_system(processor_power_dmi_table);
772 max_cstate = acpi_processor_cstate_check(max_cstate);
773 if (max_cstate < ACPI_C_STATES_MAX)
774 pr_notice("ACPI: processor limited to max C-state %d\n",
781 acpi_processor_claim_cst_control();
785 static inline int disabled_by_idle_boot_param(void) { return 0; }
786 static inline void acpi_processor_cstate_first_run_checks(void) { }
787 static int acpi_processor_get_cstate_info(struct acpi_processor *pr)
792 static int acpi_processor_setup_cpuidle_cx(struct acpi_processor *pr,
793 struct cpuidle_device *dev)
798 static int acpi_processor_setup_cstates(struct acpi_processor *pr)
803 #endif /* CONFIG_ACPI_PROCESSOR_CSTATE */
805 struct acpi_lpi_states_array {
807 unsigned int composite_states_size;
808 struct acpi_lpi_state *entries;
809 struct acpi_lpi_state *composite_states[ACPI_PROCESSOR_MAX_POWER];
812 static int obj_get_integer(union acpi_object *obj, u32 *value)
814 if (obj->type != ACPI_TYPE_INTEGER)
817 *value = obj->integer.value;
821 static int acpi_processor_evaluate_lpi(acpi_handle handle,
822 struct acpi_lpi_states_array *info)
826 int pkg_count, state_idx = 1, loop;
827 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
828 union acpi_object *lpi_data;
829 struct acpi_lpi_state *lpi_state;
831 status = acpi_evaluate_object(handle, "_LPI", NULL, &buffer);
832 if (ACPI_FAILURE(status)) {
833 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "No _LPI, giving up\n"));
837 lpi_data = buffer.pointer;
839 /* There must be at least 4 elements = 3 elements + 1 package */
840 if (!lpi_data || lpi_data->type != ACPI_TYPE_PACKAGE ||
841 lpi_data->package.count < 4) {
842 pr_debug("not enough elements in _LPI\n");
847 pkg_count = lpi_data->package.elements[2].integer.value;
849 /* Validate number of power states. */
850 if (pkg_count < 1 || pkg_count != lpi_data->package.count - 3) {
851 pr_debug("count given by _LPI is not valid\n");
856 lpi_state = kcalloc(pkg_count, sizeof(*lpi_state), GFP_KERNEL);
862 info->size = pkg_count;
863 info->entries = lpi_state;
865 /* LPI States start at index 3 */
866 for (loop = 3; state_idx <= pkg_count; loop++, state_idx++, lpi_state++) {
867 union acpi_object *element, *pkg_elem, *obj;
869 element = &lpi_data->package.elements[loop];
870 if (element->type != ACPI_TYPE_PACKAGE || element->package.count < 7)
873 pkg_elem = element->package.elements;
876 if (obj->type == ACPI_TYPE_BUFFER) {
877 struct acpi_power_register *reg;
879 reg = (struct acpi_power_register *)obj->buffer.pointer;
880 if (reg->space_id != ACPI_ADR_SPACE_SYSTEM_IO &&
881 reg->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE)
884 lpi_state->address = reg->address;
885 lpi_state->entry_method =
886 reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE ?
887 ACPI_CSTATE_FFH : ACPI_CSTATE_SYSTEMIO;
888 } else if (obj->type == ACPI_TYPE_INTEGER) {
889 lpi_state->entry_method = ACPI_CSTATE_INTEGER;
890 lpi_state->address = obj->integer.value;
895 /* elements[7,8] skipped for now i.e. Residency/Usage counter*/
898 if (obj->type == ACPI_TYPE_STRING)
899 strlcpy(lpi_state->desc, obj->string.pointer,
902 lpi_state->index = state_idx;
903 if (obj_get_integer(pkg_elem + 0, &lpi_state->min_residency)) {
904 pr_debug("No min. residency found, assuming 10 us\n");
905 lpi_state->min_residency = 10;
908 if (obj_get_integer(pkg_elem + 1, &lpi_state->wake_latency)) {
909 pr_debug("No wakeup residency found, assuming 10 us\n");
910 lpi_state->wake_latency = 10;
913 if (obj_get_integer(pkg_elem + 2, &lpi_state->flags))
914 lpi_state->flags = 0;
916 if (obj_get_integer(pkg_elem + 3, &lpi_state->arch_flags))
917 lpi_state->arch_flags = 0;
919 if (obj_get_integer(pkg_elem + 4, &lpi_state->res_cnt_freq))
920 lpi_state->res_cnt_freq = 1;
922 if (obj_get_integer(pkg_elem + 5, &lpi_state->enable_parent_state))
923 lpi_state->enable_parent_state = 0;
926 acpi_handle_debug(handle, "Found %d power states\n", state_idx);
928 kfree(buffer.pointer);
933 * flat_state_cnt - the number of composite LPI states after the process of flattening
935 static int flat_state_cnt;
938 * combine_lpi_states - combine local and parent LPI states to form a composite LPI state
940 * @local: local LPI state
941 * @parent: parent LPI state
942 * @result: composite LPI state
944 static bool combine_lpi_states(struct acpi_lpi_state *local,
945 struct acpi_lpi_state *parent,
946 struct acpi_lpi_state *result)
948 if (parent->entry_method == ACPI_CSTATE_INTEGER) {
949 if (!parent->address) /* 0 means autopromotable */
951 result->address = local->address + parent->address;
953 result->address = parent->address;
956 result->min_residency = max(local->min_residency, parent->min_residency);
957 result->wake_latency = local->wake_latency + parent->wake_latency;
958 result->enable_parent_state = parent->enable_parent_state;
959 result->entry_method = local->entry_method;
961 result->flags = parent->flags;
962 result->arch_flags = parent->arch_flags;
963 result->index = parent->index;
965 strlcpy(result->desc, local->desc, ACPI_CX_DESC_LEN);
966 strlcat(result->desc, "+", ACPI_CX_DESC_LEN);
967 strlcat(result->desc, parent->desc, ACPI_CX_DESC_LEN);
971 #define ACPI_LPI_STATE_FLAGS_ENABLED BIT(0)
973 static void stash_composite_state(struct acpi_lpi_states_array *curr_level,
974 struct acpi_lpi_state *t)
976 curr_level->composite_states[curr_level->composite_states_size++] = t;
979 static int flatten_lpi_states(struct acpi_processor *pr,
980 struct acpi_lpi_states_array *curr_level,
981 struct acpi_lpi_states_array *prev_level)
983 int i, j, state_count = curr_level->size;
984 struct acpi_lpi_state *p, *t = curr_level->entries;
986 curr_level->composite_states_size = 0;
987 for (j = 0; j < state_count; j++, t++) {
988 struct acpi_lpi_state *flpi;
990 if (!(t->flags & ACPI_LPI_STATE_FLAGS_ENABLED))
993 if (flat_state_cnt >= ACPI_PROCESSOR_MAX_POWER) {
994 pr_warn("Limiting number of LPI states to max (%d)\n",
995 ACPI_PROCESSOR_MAX_POWER);
996 pr_warn("Please increase ACPI_PROCESSOR_MAX_POWER if needed.\n");
1000 flpi = &pr->power.lpi_states[flat_state_cnt];
1002 if (!prev_level) { /* leaf/processor node */
1003 memcpy(flpi, t, sizeof(*t));
1004 stash_composite_state(curr_level, flpi);
1009 for (i = 0; i < prev_level->composite_states_size; i++) {
1010 p = prev_level->composite_states[i];
1011 if (t->index <= p->enable_parent_state &&
1012 combine_lpi_states(p, t, flpi)) {
1013 stash_composite_state(curr_level, flpi);
1020 kfree(curr_level->entries);
1024 static int acpi_processor_get_lpi_info(struct acpi_processor *pr)
1028 acpi_handle handle = pr->handle, pr_ahandle;
1029 struct acpi_device *d = NULL;
1030 struct acpi_lpi_states_array info[2], *tmp, *prev, *curr;
1032 if (!osc_pc_lpi_support_confirmed)
1035 if (!acpi_has_method(handle, "_LPI"))
1041 handle = pr->handle;
1042 ret = acpi_processor_evaluate_lpi(handle, prev);
1045 flatten_lpi_states(pr, prev, NULL);
1047 status = acpi_get_parent(handle, &pr_ahandle);
1048 while (ACPI_SUCCESS(status)) {
1049 acpi_bus_get_device(pr_ahandle, &d);
1050 handle = pr_ahandle;
1052 if (strcmp(acpi_device_hid(d), ACPI_PROCESSOR_CONTAINER_HID))
1055 /* can be optional ? */
1056 if (!acpi_has_method(handle, "_LPI"))
1059 ret = acpi_processor_evaluate_lpi(handle, curr);
1063 /* flatten all the LPI states in this level of hierarchy */
1064 flatten_lpi_states(pr, curr, prev);
1066 tmp = prev, prev = curr, curr = tmp;
1068 status = acpi_get_parent(handle, &pr_ahandle);
1071 pr->power.count = flat_state_cnt;
1072 /* reset the index after flattening */
1073 for (i = 0; i < pr->power.count; i++)
1074 pr->power.lpi_states[i].index = i;
1076 /* Tell driver that _LPI is supported. */
1077 pr->flags.has_lpi = 1;
1078 pr->flags.power = 1;
1083 int __weak acpi_processor_ffh_lpi_probe(unsigned int cpu)
1088 int __weak acpi_processor_ffh_lpi_enter(struct acpi_lpi_state *lpi)
1094 * acpi_idle_lpi_enter - enters an ACPI any LPI state
1095 * @dev: the target CPU
1096 * @drv: cpuidle driver containing cpuidle state info
1097 * @index: index of target state
1099 * Return: 0 for success or negative value for error
1101 static int acpi_idle_lpi_enter(struct cpuidle_device *dev,
1102 struct cpuidle_driver *drv, int index)
1104 struct acpi_processor *pr;
1105 struct acpi_lpi_state *lpi;
1107 pr = __this_cpu_read(processors);
1112 lpi = &pr->power.lpi_states[index];
1113 if (lpi->entry_method == ACPI_CSTATE_FFH)
1114 return acpi_processor_ffh_lpi_enter(lpi);
1119 static int acpi_processor_setup_lpi_states(struct acpi_processor *pr)
1122 struct acpi_lpi_state *lpi;
1123 struct cpuidle_state *state;
1124 struct cpuidle_driver *drv = &acpi_idle_driver;
1126 if (!pr->flags.has_lpi)
1129 for (i = 0; i < pr->power.count && i < CPUIDLE_STATE_MAX; i++) {
1130 lpi = &pr->power.lpi_states[i];
1132 state = &drv->states[i];
1133 snprintf(state->name, CPUIDLE_NAME_LEN, "LPI-%d", i);
1134 strlcpy(state->desc, lpi->desc, CPUIDLE_DESC_LEN);
1135 state->exit_latency = lpi->wake_latency;
1136 state->target_residency = lpi->min_residency;
1137 if (lpi->arch_flags)
1138 state->flags |= CPUIDLE_FLAG_TIMER_STOP;
1139 state->enter = acpi_idle_lpi_enter;
1140 drv->safe_state_index = i;
1143 drv->state_count = i;
1149 * acpi_processor_setup_cpuidle_states- prepares and configures cpuidle
1150 * global state data i.e. idle routines
1152 * @pr: the ACPI processor
1154 static int acpi_processor_setup_cpuidle_states(struct acpi_processor *pr)
1157 struct cpuidle_driver *drv = &acpi_idle_driver;
1159 if (!pr->flags.power_setup_done || !pr->flags.power)
1162 drv->safe_state_index = -1;
1163 for (i = ACPI_IDLE_STATE_START; i < CPUIDLE_STATE_MAX; i++) {
1164 drv->states[i].name[0] = '\0';
1165 drv->states[i].desc[0] = '\0';
1168 if (pr->flags.has_lpi)
1169 return acpi_processor_setup_lpi_states(pr);
1171 return acpi_processor_setup_cstates(pr);
1175 * acpi_processor_setup_cpuidle_dev - prepares and configures CPUIDLE
1176 * device i.e. per-cpu data
1178 * @pr: the ACPI processor
1179 * @dev : the cpuidle device
1181 static int acpi_processor_setup_cpuidle_dev(struct acpi_processor *pr,
1182 struct cpuidle_device *dev)
1184 if (!pr->flags.power_setup_done || !pr->flags.power || !dev)
1188 if (pr->flags.has_lpi)
1189 return acpi_processor_ffh_lpi_probe(pr->id);
1191 return acpi_processor_setup_cpuidle_cx(pr, dev);
1194 static int acpi_processor_get_power_info(struct acpi_processor *pr)
1198 ret = acpi_processor_get_lpi_info(pr);
1200 ret = acpi_processor_get_cstate_info(pr);
1205 int acpi_processor_hotplug(struct acpi_processor *pr)
1208 struct cpuidle_device *dev;
1210 if (disabled_by_idle_boot_param())
1213 if (!pr->flags.power_setup_done)
1216 dev = per_cpu(acpi_cpuidle_device, pr->id);
1217 cpuidle_pause_and_lock();
1218 cpuidle_disable_device(dev);
1219 ret = acpi_processor_get_power_info(pr);
1220 if (!ret && pr->flags.power) {
1221 acpi_processor_setup_cpuidle_dev(pr, dev);
1222 ret = cpuidle_enable_device(dev);
1224 cpuidle_resume_and_unlock();
1229 int acpi_processor_power_state_has_changed(struct acpi_processor *pr)
1232 struct acpi_processor *_pr;
1233 struct cpuidle_device *dev;
1235 if (disabled_by_idle_boot_param())
1238 if (!pr->flags.power_setup_done)
1242 * FIXME: Design the ACPI notification to make it once per
1243 * system instead of once per-cpu. This condition is a hack
1244 * to make the code that updates C-States be called once.
1247 if (pr->id == 0 && cpuidle_get_driver() == &acpi_idle_driver) {
1249 /* Protect against cpu-hotplug */
1251 cpuidle_pause_and_lock();
1253 /* Disable all cpuidle devices */
1254 for_each_online_cpu(cpu) {
1255 _pr = per_cpu(processors, cpu);
1256 if (!_pr || !_pr->flags.power_setup_done)
1258 dev = per_cpu(acpi_cpuidle_device, cpu);
1259 cpuidle_disable_device(dev);
1262 /* Populate Updated C-state information */
1263 acpi_processor_get_power_info(pr);
1264 acpi_processor_setup_cpuidle_states(pr);
1266 /* Enable all cpuidle devices */
1267 for_each_online_cpu(cpu) {
1268 _pr = per_cpu(processors, cpu);
1269 if (!_pr || !_pr->flags.power_setup_done)
1271 acpi_processor_get_power_info(_pr);
1272 if (_pr->flags.power) {
1273 dev = per_cpu(acpi_cpuidle_device, cpu);
1274 acpi_processor_setup_cpuidle_dev(_pr, dev);
1275 cpuidle_enable_device(dev);
1278 cpuidle_resume_and_unlock();
1285 static int acpi_processor_registered;
1287 int acpi_processor_power_init(struct acpi_processor *pr)
1290 struct cpuidle_device *dev;
1292 if (disabled_by_idle_boot_param())
1295 acpi_processor_cstate_first_run_checks();
1297 if (!acpi_processor_get_power_info(pr))
1298 pr->flags.power_setup_done = 1;
1301 * Install the idle handler if processor power management is supported.
1302 * Note that we use previously set idle handler will be used on
1303 * platforms that only support C1.
1305 if (pr->flags.power) {
1306 /* Register acpi_idle_driver if not already registered */
1307 if (!acpi_processor_registered) {
1308 acpi_processor_setup_cpuidle_states(pr);
1309 retval = cpuidle_register_driver(&acpi_idle_driver);
1312 pr_debug("%s registered with cpuidle\n",
1313 acpi_idle_driver.name);
1316 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
1319 per_cpu(acpi_cpuidle_device, pr->id) = dev;
1321 acpi_processor_setup_cpuidle_dev(pr, dev);
1323 /* Register per-cpu cpuidle_device. Cpuidle driver
1324 * must already be registered before registering device
1326 retval = cpuidle_register_device(dev);
1328 if (acpi_processor_registered == 0)
1329 cpuidle_unregister_driver(&acpi_idle_driver);
1332 acpi_processor_registered++;
1337 int acpi_processor_power_exit(struct acpi_processor *pr)
1339 struct cpuidle_device *dev = per_cpu(acpi_cpuidle_device, pr->id);
1341 if (disabled_by_idle_boot_param())
1344 if (pr->flags.power) {
1345 cpuidle_unregister_device(dev);
1346 acpi_processor_registered--;
1347 if (acpi_processor_registered == 0)
1348 cpuidle_unregister_driver(&acpi_idle_driver);
1351 pr->flags.power_setup_done = 0;