Merge branches 'acpi-apei', 'acpi-misc' and 'acpi-processor'
[linux-2.6-microblaze.git] / kernel / cpu.c
1 /* CPU control.
2  * (C) 2001, 2002, 2003, 2004 Rusty Russell
3  *
4  * This code is licenced under the GPL.
5  */
6 #include <linux/sched/mm.h>
7 #include <linux/proc_fs.h>
8 #include <linux/smp.h>
9 #include <linux/init.h>
10 #include <linux/notifier.h>
11 #include <linux/sched/signal.h>
12 #include <linux/sched/hotplug.h>
13 #include <linux/sched/isolation.h>
14 #include <linux/sched/task.h>
15 #include <linux/sched/smt.h>
16 #include <linux/unistd.h>
17 #include <linux/cpu.h>
18 #include <linux/oom.h>
19 #include <linux/rcupdate.h>
20 #include <linux/export.h>
21 #include <linux/bug.h>
22 #include <linux/kthread.h>
23 #include <linux/stop_machine.h>
24 #include <linux/mutex.h>
25 #include <linux/gfp.h>
26 #include <linux/suspend.h>
27 #include <linux/lockdep.h>
28 #include <linux/tick.h>
29 #include <linux/irq.h>
30 #include <linux/nmi.h>
31 #include <linux/smpboot.h>
32 #include <linux/relay.h>
33 #include <linux/slab.h>
34 #include <linux/percpu-rwsem.h>
35
36 #include <trace/events/power.h>
37 #define CREATE_TRACE_POINTS
38 #include <trace/events/cpuhp.h>
39
40 #include "smpboot.h"
41
42 /**
43  * cpuhp_cpu_state - Per cpu hotplug state storage
44  * @state:      The current cpu state
45  * @target:     The target state
46  * @thread:     Pointer to the hotplug thread
47  * @should_run: Thread should execute
48  * @rollback:   Perform a rollback
49  * @single:     Single callback invocation
50  * @bringup:    Single callback bringup or teardown selector
51  * @cb_state:   The state for a single callback (install/uninstall)
52  * @result:     Result of the operation
53  * @done_up:    Signal completion to the issuer of the task for cpu-up
54  * @done_down:  Signal completion to the issuer of the task for cpu-down
55  */
56 struct cpuhp_cpu_state {
57         enum cpuhp_state        state;
58         enum cpuhp_state        target;
59         enum cpuhp_state        fail;
60 #ifdef CONFIG_SMP
61         struct task_struct      *thread;
62         bool                    should_run;
63         bool                    rollback;
64         bool                    single;
65         bool                    bringup;
66         struct hlist_node       *node;
67         struct hlist_node       *last;
68         enum cpuhp_state        cb_state;
69         int                     result;
70         struct completion       done_up;
71         struct completion       done_down;
72 #endif
73 };
74
75 static DEFINE_PER_CPU(struct cpuhp_cpu_state, cpuhp_state) = {
76         .fail = CPUHP_INVALID,
77 };
78
79 #ifdef CONFIG_SMP
80 cpumask_t cpus_booted_once_mask;
81 #endif
82
83 #if defined(CONFIG_LOCKDEP) && defined(CONFIG_SMP)
84 static struct lockdep_map cpuhp_state_up_map =
85         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-up", &cpuhp_state_up_map);
86 static struct lockdep_map cpuhp_state_down_map =
87         STATIC_LOCKDEP_MAP_INIT("cpuhp_state-down", &cpuhp_state_down_map);
88
89
90 static inline void cpuhp_lock_acquire(bool bringup)
91 {
92         lock_map_acquire(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
93 }
94
95 static inline void cpuhp_lock_release(bool bringup)
96 {
97         lock_map_release(bringup ? &cpuhp_state_up_map : &cpuhp_state_down_map);
98 }
99 #else
100
101 static inline void cpuhp_lock_acquire(bool bringup) { }
102 static inline void cpuhp_lock_release(bool bringup) { }
103
104 #endif
105
106 /**
107  * cpuhp_step - Hotplug state machine step
108  * @name:       Name of the step
109  * @startup:    Startup function of the step
110  * @teardown:   Teardown function of the step
111  * @cant_stop:  Bringup/teardown can't be stopped at this step
112  */
113 struct cpuhp_step {
114         const char              *name;
115         union {
116                 int             (*single)(unsigned int cpu);
117                 int             (*multi)(unsigned int cpu,
118                                          struct hlist_node *node);
119         } startup;
120         union {
121                 int             (*single)(unsigned int cpu);
122                 int             (*multi)(unsigned int cpu,
123                                          struct hlist_node *node);
124         } teardown;
125         struct hlist_head       list;
126         bool                    cant_stop;
127         bool                    multi_instance;
128 };
129
130 static DEFINE_MUTEX(cpuhp_state_mutex);
131 static struct cpuhp_step cpuhp_hp_states[];
132
133 static struct cpuhp_step *cpuhp_get_step(enum cpuhp_state state)
134 {
135         return cpuhp_hp_states + state;
136 }
137
138 /**
139  * cpuhp_invoke_callback _ Invoke the callbacks for a given state
140  * @cpu:        The cpu for which the callback should be invoked
141  * @state:      The state to do callbacks for
142  * @bringup:    True if the bringup callback should be invoked
143  * @node:       For multi-instance, do a single entry callback for install/remove
144  * @lastp:      For multi-instance rollback, remember how far we got
145  *
146  * Called from cpu hotplug and from the state register machinery.
147  */
148 static int cpuhp_invoke_callback(unsigned int cpu, enum cpuhp_state state,
149                                  bool bringup, struct hlist_node *node,
150                                  struct hlist_node **lastp)
151 {
152         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
153         struct cpuhp_step *step = cpuhp_get_step(state);
154         int (*cbm)(unsigned int cpu, struct hlist_node *node);
155         int (*cb)(unsigned int cpu);
156         int ret, cnt;
157
158         if (st->fail == state) {
159                 st->fail = CPUHP_INVALID;
160
161                 if (!(bringup ? step->startup.single : step->teardown.single))
162                         return 0;
163
164                 return -EAGAIN;
165         }
166
167         if (!step->multi_instance) {
168                 WARN_ON_ONCE(lastp && *lastp);
169                 cb = bringup ? step->startup.single : step->teardown.single;
170                 if (!cb)
171                         return 0;
172                 trace_cpuhp_enter(cpu, st->target, state, cb);
173                 ret = cb(cpu);
174                 trace_cpuhp_exit(cpu, st->state, state, ret);
175                 return ret;
176         }
177         cbm = bringup ? step->startup.multi : step->teardown.multi;
178         if (!cbm)
179                 return 0;
180
181         /* Single invocation for instance add/remove */
182         if (node) {
183                 WARN_ON_ONCE(lastp && *lastp);
184                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
185                 ret = cbm(cpu, node);
186                 trace_cpuhp_exit(cpu, st->state, state, ret);
187                 return ret;
188         }
189
190         /* State transition. Invoke on all instances */
191         cnt = 0;
192         hlist_for_each(node, &step->list) {
193                 if (lastp && node == *lastp)
194                         break;
195
196                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
197                 ret = cbm(cpu, node);
198                 trace_cpuhp_exit(cpu, st->state, state, ret);
199                 if (ret) {
200                         if (!lastp)
201                                 goto err;
202
203                         *lastp = node;
204                         return ret;
205                 }
206                 cnt++;
207         }
208         if (lastp)
209                 *lastp = NULL;
210         return 0;
211 err:
212         /* Rollback the instances if one failed */
213         cbm = !bringup ? step->startup.multi : step->teardown.multi;
214         if (!cbm)
215                 return ret;
216
217         hlist_for_each(node, &step->list) {
218                 if (!cnt--)
219                         break;
220
221                 trace_cpuhp_multi_enter(cpu, st->target, state, cbm, node);
222                 ret = cbm(cpu, node);
223                 trace_cpuhp_exit(cpu, st->state, state, ret);
224                 /*
225                  * Rollback must not fail,
226                  */
227                 WARN_ON_ONCE(ret);
228         }
229         return ret;
230 }
231
232 #ifdef CONFIG_SMP
233 static bool cpuhp_is_ap_state(enum cpuhp_state state)
234 {
235         /*
236          * The extra check for CPUHP_TEARDOWN_CPU is only for documentation
237          * purposes as that state is handled explicitly in cpu_down.
238          */
239         return state > CPUHP_BRINGUP_CPU && state != CPUHP_TEARDOWN_CPU;
240 }
241
242 static inline void wait_for_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
243 {
244         struct completion *done = bringup ? &st->done_up : &st->done_down;
245         wait_for_completion(done);
246 }
247
248 static inline void complete_ap_thread(struct cpuhp_cpu_state *st, bool bringup)
249 {
250         struct completion *done = bringup ? &st->done_up : &st->done_down;
251         complete(done);
252 }
253
254 /*
255  * The former STARTING/DYING states, ran with IRQs disabled and must not fail.
256  */
257 static bool cpuhp_is_atomic_state(enum cpuhp_state state)
258 {
259         return CPUHP_AP_IDLE_DEAD <= state && state < CPUHP_AP_ONLINE;
260 }
261
262 /* Serializes the updates to cpu_online_mask, cpu_present_mask */
263 static DEFINE_MUTEX(cpu_add_remove_lock);
264 bool cpuhp_tasks_frozen;
265 EXPORT_SYMBOL_GPL(cpuhp_tasks_frozen);
266
267 /*
268  * The following two APIs (cpu_maps_update_begin/done) must be used when
269  * attempting to serialize the updates to cpu_online_mask & cpu_present_mask.
270  */
271 void cpu_maps_update_begin(void)
272 {
273         mutex_lock(&cpu_add_remove_lock);
274 }
275
276 void cpu_maps_update_done(void)
277 {
278         mutex_unlock(&cpu_add_remove_lock);
279 }
280
281 /*
282  * If set, cpu_up and cpu_down will return -EBUSY and do nothing.
283  * Should always be manipulated under cpu_add_remove_lock
284  */
285 static int cpu_hotplug_disabled;
286
287 #ifdef CONFIG_HOTPLUG_CPU
288
289 DEFINE_STATIC_PERCPU_RWSEM(cpu_hotplug_lock);
290
291 void cpus_read_lock(void)
292 {
293         percpu_down_read(&cpu_hotplug_lock);
294 }
295 EXPORT_SYMBOL_GPL(cpus_read_lock);
296
297 int cpus_read_trylock(void)
298 {
299         return percpu_down_read_trylock(&cpu_hotplug_lock);
300 }
301 EXPORT_SYMBOL_GPL(cpus_read_trylock);
302
303 void cpus_read_unlock(void)
304 {
305         percpu_up_read(&cpu_hotplug_lock);
306 }
307 EXPORT_SYMBOL_GPL(cpus_read_unlock);
308
309 void cpus_write_lock(void)
310 {
311         percpu_down_write(&cpu_hotplug_lock);
312 }
313
314 void cpus_write_unlock(void)
315 {
316         percpu_up_write(&cpu_hotplug_lock);
317 }
318
319 void lockdep_assert_cpus_held(void)
320 {
321         /*
322          * We can't have hotplug operations before userspace starts running,
323          * and some init codepaths will knowingly not take the hotplug lock.
324          * This is all valid, so mute lockdep until it makes sense to report
325          * unheld locks.
326          */
327         if (system_state < SYSTEM_RUNNING)
328                 return;
329
330         percpu_rwsem_assert_held(&cpu_hotplug_lock);
331 }
332
333 static void lockdep_acquire_cpus_lock(void)
334 {
335         rwsem_acquire(&cpu_hotplug_lock.dep_map, 0, 0, _THIS_IP_);
336 }
337
338 static void lockdep_release_cpus_lock(void)
339 {
340         rwsem_release(&cpu_hotplug_lock.dep_map, _THIS_IP_);
341 }
342
343 /*
344  * Wait for currently running CPU hotplug operations to complete (if any) and
345  * disable future CPU hotplug (from sysfs). The 'cpu_add_remove_lock' protects
346  * the 'cpu_hotplug_disabled' flag. The same lock is also acquired by the
347  * hotplug path before performing hotplug operations. So acquiring that lock
348  * guarantees mutual exclusion from any currently running hotplug operations.
349  */
350 void cpu_hotplug_disable(void)
351 {
352         cpu_maps_update_begin();
353         cpu_hotplug_disabled++;
354         cpu_maps_update_done();
355 }
356 EXPORT_SYMBOL_GPL(cpu_hotplug_disable);
357
358 static void __cpu_hotplug_enable(void)
359 {
360         if (WARN_ONCE(!cpu_hotplug_disabled, "Unbalanced cpu hotplug enable\n"))
361                 return;
362         cpu_hotplug_disabled--;
363 }
364
365 void cpu_hotplug_enable(void)
366 {
367         cpu_maps_update_begin();
368         __cpu_hotplug_enable();
369         cpu_maps_update_done();
370 }
371 EXPORT_SYMBOL_GPL(cpu_hotplug_enable);
372
373 #else
374
375 static void lockdep_acquire_cpus_lock(void)
376 {
377 }
378
379 static void lockdep_release_cpus_lock(void)
380 {
381 }
382
383 #endif  /* CONFIG_HOTPLUG_CPU */
384
385 /*
386  * Architectures that need SMT-specific errata handling during SMT hotplug
387  * should override this.
388  */
389 void __weak arch_smt_update(void) { }
390
391 #ifdef CONFIG_HOTPLUG_SMT
392 enum cpuhp_smt_control cpu_smt_control __read_mostly = CPU_SMT_ENABLED;
393
394 void __init cpu_smt_disable(bool force)
395 {
396         if (!cpu_smt_possible())
397                 return;
398
399         if (force) {
400                 pr_info("SMT: Force disabled\n");
401                 cpu_smt_control = CPU_SMT_FORCE_DISABLED;
402         } else {
403                 pr_info("SMT: disabled\n");
404                 cpu_smt_control = CPU_SMT_DISABLED;
405         }
406 }
407
408 /*
409  * The decision whether SMT is supported can only be done after the full
410  * CPU identification. Called from architecture code.
411  */
412 void __init cpu_smt_check_topology(void)
413 {
414         if (!topology_smt_supported())
415                 cpu_smt_control = CPU_SMT_NOT_SUPPORTED;
416 }
417
418 static int __init smt_cmdline_disable(char *str)
419 {
420         cpu_smt_disable(str && !strcmp(str, "force"));
421         return 0;
422 }
423 early_param("nosmt", smt_cmdline_disable);
424
425 static inline bool cpu_smt_allowed(unsigned int cpu)
426 {
427         if (cpu_smt_control == CPU_SMT_ENABLED)
428                 return true;
429
430         if (topology_is_primary_thread(cpu))
431                 return true;
432
433         /*
434          * On x86 it's required to boot all logical CPUs at least once so
435          * that the init code can get a chance to set CR4.MCE on each
436          * CPU. Otherwise, a broadcasted MCE observing CR4.MCE=0b on any
437          * core will shutdown the machine.
438          */
439         return !cpumask_test_cpu(cpu, &cpus_booted_once_mask);
440 }
441
442 /* Returns true if SMT is not supported of forcefully (irreversibly) disabled */
443 bool cpu_smt_possible(void)
444 {
445         return cpu_smt_control != CPU_SMT_FORCE_DISABLED &&
446                 cpu_smt_control != CPU_SMT_NOT_SUPPORTED;
447 }
448 EXPORT_SYMBOL_GPL(cpu_smt_possible);
449 #else
450 static inline bool cpu_smt_allowed(unsigned int cpu) { return true; }
451 #endif
452
453 static inline enum cpuhp_state
454 cpuhp_set_state(struct cpuhp_cpu_state *st, enum cpuhp_state target)
455 {
456         enum cpuhp_state prev_state = st->state;
457
458         st->rollback = false;
459         st->last = NULL;
460
461         st->target = target;
462         st->single = false;
463         st->bringup = st->state < target;
464
465         return prev_state;
466 }
467
468 static inline void
469 cpuhp_reset_state(struct cpuhp_cpu_state *st, enum cpuhp_state prev_state)
470 {
471         st->rollback = true;
472
473         /*
474          * If we have st->last we need to undo partial multi_instance of this
475          * state first. Otherwise start undo at the previous state.
476          */
477         if (!st->last) {
478                 if (st->bringup)
479                         st->state--;
480                 else
481                         st->state++;
482         }
483
484         st->target = prev_state;
485         st->bringup = !st->bringup;
486 }
487
488 /* Regular hotplug invocation of the AP hotplug thread */
489 static void __cpuhp_kick_ap(struct cpuhp_cpu_state *st)
490 {
491         if (!st->single && st->state == st->target)
492                 return;
493
494         st->result = 0;
495         /*
496          * Make sure the above stores are visible before should_run becomes
497          * true. Paired with the mb() above in cpuhp_thread_fun()
498          */
499         smp_mb();
500         st->should_run = true;
501         wake_up_process(st->thread);
502         wait_for_ap_thread(st, st->bringup);
503 }
504
505 static int cpuhp_kick_ap(struct cpuhp_cpu_state *st, enum cpuhp_state target)
506 {
507         enum cpuhp_state prev_state;
508         int ret;
509
510         prev_state = cpuhp_set_state(st, target);
511         __cpuhp_kick_ap(st);
512         if ((ret = st->result)) {
513                 cpuhp_reset_state(st, prev_state);
514                 __cpuhp_kick_ap(st);
515         }
516
517         return ret;
518 }
519
520 static int bringup_wait_for_ap(unsigned int cpu)
521 {
522         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
523
524         /* Wait for the CPU to reach CPUHP_AP_ONLINE_IDLE */
525         wait_for_ap_thread(st, true);
526         if (WARN_ON_ONCE((!cpu_online(cpu))))
527                 return -ECANCELED;
528
529         /* Unpark the hotplug thread of the target cpu */
530         kthread_unpark(st->thread);
531
532         /*
533          * SMT soft disabling on X86 requires to bring the CPU out of the
534          * BIOS 'wait for SIPI' state in order to set the CR4.MCE bit.  The
535          * CPU marked itself as booted_once in notify_cpu_starting() so the
536          * cpu_smt_allowed() check will now return false if this is not the
537          * primary sibling.
538          */
539         if (!cpu_smt_allowed(cpu))
540                 return -ECANCELED;
541
542         if (st->target <= CPUHP_AP_ONLINE_IDLE)
543                 return 0;
544
545         return cpuhp_kick_ap(st, st->target);
546 }
547
548 static int bringup_cpu(unsigned int cpu)
549 {
550         struct task_struct *idle = idle_thread_get(cpu);
551         int ret;
552
553         /*
554          * Some architectures have to walk the irq descriptors to
555          * setup the vector space for the cpu which comes online.
556          * Prevent irq alloc/free across the bringup.
557          */
558         irq_lock_sparse();
559
560         /* Arch-specific enabling code. */
561         ret = __cpu_up(cpu, idle);
562         irq_unlock_sparse();
563         if (ret)
564                 return ret;
565         return bringup_wait_for_ap(cpu);
566 }
567
568 static int finish_cpu(unsigned int cpu)
569 {
570         struct task_struct *idle = idle_thread_get(cpu);
571         struct mm_struct *mm = idle->active_mm;
572
573         /*
574          * idle_task_exit() will have switched to &init_mm, now
575          * clean up any remaining active_mm state.
576          */
577         if (mm != &init_mm)
578                 idle->active_mm = &init_mm;
579         mmdrop(mm);
580         return 0;
581 }
582
583 /*
584  * Hotplug state machine related functions
585  */
586
587 static void undo_cpu_up(unsigned int cpu, struct cpuhp_cpu_state *st)
588 {
589         for (st->state--; st->state > st->target; st->state--)
590                 cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
591 }
592
593 static inline bool can_rollback_cpu(struct cpuhp_cpu_state *st)
594 {
595         if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
596                 return true;
597         /*
598          * When CPU hotplug is disabled, then taking the CPU down is not
599          * possible because takedown_cpu() and the architecture and
600          * subsystem specific mechanisms are not available. So the CPU
601          * which would be completely unplugged again needs to stay around
602          * in the current state.
603          */
604         return st->state <= CPUHP_BRINGUP_CPU;
605 }
606
607 static int cpuhp_up_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
608                               enum cpuhp_state target)
609 {
610         enum cpuhp_state prev_state = st->state;
611         int ret = 0;
612
613         while (st->state < target) {
614                 st->state++;
615                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
616                 if (ret) {
617                         if (can_rollback_cpu(st)) {
618                                 st->target = prev_state;
619                                 undo_cpu_up(cpu, st);
620                         }
621                         break;
622                 }
623         }
624         return ret;
625 }
626
627 /*
628  * The cpu hotplug threads manage the bringup and teardown of the cpus
629  */
630 static void cpuhp_create(unsigned int cpu)
631 {
632         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
633
634         init_completion(&st->done_up);
635         init_completion(&st->done_down);
636 }
637
638 static int cpuhp_should_run(unsigned int cpu)
639 {
640         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
641
642         return st->should_run;
643 }
644
645 /*
646  * Execute teardown/startup callbacks on the plugged cpu. Also used to invoke
647  * callbacks when a state gets [un]installed at runtime.
648  *
649  * Each invocation of this function by the smpboot thread does a single AP
650  * state callback.
651  *
652  * It has 3 modes of operation:
653  *  - single: runs st->cb_state
654  *  - up:     runs ++st->state, while st->state < st->target
655  *  - down:   runs st->state--, while st->state > st->target
656  *
657  * When complete or on error, should_run is cleared and the completion is fired.
658  */
659 static void cpuhp_thread_fun(unsigned int cpu)
660 {
661         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
662         bool bringup = st->bringup;
663         enum cpuhp_state state;
664
665         if (WARN_ON_ONCE(!st->should_run))
666                 return;
667
668         /*
669          * ACQUIRE for the cpuhp_should_run() load of ->should_run. Ensures
670          * that if we see ->should_run we also see the rest of the state.
671          */
672         smp_mb();
673
674         /*
675          * The BP holds the hotplug lock, but we're now running on the AP,
676          * ensure that anybody asserting the lock is held, will actually find
677          * it so.
678          */
679         lockdep_acquire_cpus_lock();
680         cpuhp_lock_acquire(bringup);
681
682         if (st->single) {
683                 state = st->cb_state;
684                 st->should_run = false;
685         } else {
686                 if (bringup) {
687                         st->state++;
688                         state = st->state;
689                         st->should_run = (st->state < st->target);
690                         WARN_ON_ONCE(st->state > st->target);
691                 } else {
692                         state = st->state;
693                         st->state--;
694                         st->should_run = (st->state > st->target);
695                         WARN_ON_ONCE(st->state < st->target);
696                 }
697         }
698
699         WARN_ON_ONCE(!cpuhp_is_ap_state(state));
700
701         if (cpuhp_is_atomic_state(state)) {
702                 local_irq_disable();
703                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
704                 local_irq_enable();
705
706                 /*
707                  * STARTING/DYING must not fail!
708                  */
709                 WARN_ON_ONCE(st->result);
710         } else {
711                 st->result = cpuhp_invoke_callback(cpu, state, bringup, st->node, &st->last);
712         }
713
714         if (st->result) {
715                 /*
716                  * If we fail on a rollback, we're up a creek without no
717                  * paddle, no way forward, no way back. We loose, thanks for
718                  * playing.
719                  */
720                 WARN_ON_ONCE(st->rollback);
721                 st->should_run = false;
722         }
723
724         cpuhp_lock_release(bringup);
725         lockdep_release_cpus_lock();
726
727         if (!st->should_run)
728                 complete_ap_thread(st, bringup);
729 }
730
731 /* Invoke a single callback on a remote cpu */
732 static int
733 cpuhp_invoke_ap_callback(int cpu, enum cpuhp_state state, bool bringup,
734                          struct hlist_node *node)
735 {
736         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
737         int ret;
738
739         if (!cpu_online(cpu))
740                 return 0;
741
742         cpuhp_lock_acquire(false);
743         cpuhp_lock_release(false);
744
745         cpuhp_lock_acquire(true);
746         cpuhp_lock_release(true);
747
748         /*
749          * If we are up and running, use the hotplug thread. For early calls
750          * we invoke the thread function directly.
751          */
752         if (!st->thread)
753                 return cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
754
755         st->rollback = false;
756         st->last = NULL;
757
758         st->node = node;
759         st->bringup = bringup;
760         st->cb_state = state;
761         st->single = true;
762
763         __cpuhp_kick_ap(st);
764
765         /*
766          * If we failed and did a partial, do a rollback.
767          */
768         if ((ret = st->result) && st->last) {
769                 st->rollback = true;
770                 st->bringup = !bringup;
771
772                 __cpuhp_kick_ap(st);
773         }
774
775         /*
776          * Clean up the leftovers so the next hotplug operation wont use stale
777          * data.
778          */
779         st->node = st->last = NULL;
780         return ret;
781 }
782
783 static int cpuhp_kick_ap_work(unsigned int cpu)
784 {
785         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
786         enum cpuhp_state prev_state = st->state;
787         int ret;
788
789         cpuhp_lock_acquire(false);
790         cpuhp_lock_release(false);
791
792         cpuhp_lock_acquire(true);
793         cpuhp_lock_release(true);
794
795         trace_cpuhp_enter(cpu, st->target, prev_state, cpuhp_kick_ap_work);
796         ret = cpuhp_kick_ap(st, st->target);
797         trace_cpuhp_exit(cpu, st->state, prev_state, ret);
798
799         return ret;
800 }
801
802 static struct smp_hotplug_thread cpuhp_threads = {
803         .store                  = &cpuhp_state.thread,
804         .create                 = &cpuhp_create,
805         .thread_should_run      = cpuhp_should_run,
806         .thread_fn              = cpuhp_thread_fun,
807         .thread_comm            = "cpuhp/%u",
808         .selfparking            = true,
809 };
810
811 void __init cpuhp_threads_init(void)
812 {
813         BUG_ON(smpboot_register_percpu_thread(&cpuhp_threads));
814         kthread_unpark(this_cpu_read(cpuhp_state.thread));
815 }
816
817 #ifdef CONFIG_HOTPLUG_CPU
818 #ifndef arch_clear_mm_cpumask_cpu
819 #define arch_clear_mm_cpumask_cpu(cpu, mm) cpumask_clear_cpu(cpu, mm_cpumask(mm))
820 #endif
821
822 /**
823  * clear_tasks_mm_cpumask - Safely clear tasks' mm_cpumask for a CPU
824  * @cpu: a CPU id
825  *
826  * This function walks all processes, finds a valid mm struct for each one and
827  * then clears a corresponding bit in mm's cpumask.  While this all sounds
828  * trivial, there are various non-obvious corner cases, which this function
829  * tries to solve in a safe manner.
830  *
831  * Also note that the function uses a somewhat relaxed locking scheme, so it may
832  * be called only for an already offlined CPU.
833  */
834 void clear_tasks_mm_cpumask(int cpu)
835 {
836         struct task_struct *p;
837
838         /*
839          * This function is called after the cpu is taken down and marked
840          * offline, so its not like new tasks will ever get this cpu set in
841          * their mm mask. -- Peter Zijlstra
842          * Thus, we may use rcu_read_lock() here, instead of grabbing
843          * full-fledged tasklist_lock.
844          */
845         WARN_ON(cpu_online(cpu));
846         rcu_read_lock();
847         for_each_process(p) {
848                 struct task_struct *t;
849
850                 /*
851                  * Main thread might exit, but other threads may still have
852                  * a valid mm. Find one.
853                  */
854                 t = find_lock_task_mm(p);
855                 if (!t)
856                         continue;
857                 arch_clear_mm_cpumask_cpu(cpu, t->mm);
858                 task_unlock(t);
859         }
860         rcu_read_unlock();
861 }
862
863 /* Take this CPU down. */
864 static int take_cpu_down(void *_param)
865 {
866         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
867         enum cpuhp_state target = max((int)st->target, CPUHP_AP_OFFLINE);
868         int err, cpu = smp_processor_id();
869         int ret;
870
871         /* Ensure this CPU doesn't handle any more interrupts. */
872         err = __cpu_disable();
873         if (err < 0)
874                 return err;
875
876         /*
877          * We get here while we are in CPUHP_TEARDOWN_CPU state and we must not
878          * do this step again.
879          */
880         WARN_ON(st->state != CPUHP_TEARDOWN_CPU);
881         st->state--;
882         /* Invoke the former CPU_DYING callbacks */
883         for (; st->state > target; st->state--) {
884                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
885                 /*
886                  * DYING must not fail!
887                  */
888                 WARN_ON_ONCE(ret);
889         }
890
891         /* Give up timekeeping duties */
892         tick_handover_do_timer();
893         /* Remove CPU from timer broadcasting */
894         tick_offline_cpu(cpu);
895         /* Park the stopper thread */
896         stop_machine_park(cpu);
897         return 0;
898 }
899
900 static int takedown_cpu(unsigned int cpu)
901 {
902         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
903         int err;
904
905         /* Park the smpboot threads */
906         kthread_park(per_cpu_ptr(&cpuhp_state, cpu)->thread);
907
908         /*
909          * Prevent irq alloc/free while the dying cpu reorganizes the
910          * interrupt affinities.
911          */
912         irq_lock_sparse();
913
914         /*
915          * So now all preempt/rcu users must observe !cpu_active().
916          */
917         err = stop_machine_cpuslocked(take_cpu_down, NULL, cpumask_of(cpu));
918         if (err) {
919                 /* CPU refused to die */
920                 irq_unlock_sparse();
921                 /* Unpark the hotplug thread so we can rollback there */
922                 kthread_unpark(per_cpu_ptr(&cpuhp_state, cpu)->thread);
923                 return err;
924         }
925         BUG_ON(cpu_online(cpu));
926
927         /*
928          * The teardown callback for CPUHP_AP_SCHED_STARTING will have removed
929          * all runnable tasks from the CPU, there's only the idle task left now
930          * that the migration thread is done doing the stop_machine thing.
931          *
932          * Wait for the stop thread to go away.
933          */
934         wait_for_ap_thread(st, false);
935         BUG_ON(st->state != CPUHP_AP_IDLE_DEAD);
936
937         /* Interrupts are moved away from the dying cpu, reenable alloc/free */
938         irq_unlock_sparse();
939
940         hotplug_cpu__broadcast_tick_pull(cpu);
941         /* This actually kills the CPU. */
942         __cpu_die(cpu);
943
944         tick_cleanup_dead_cpu(cpu);
945         rcutree_migrate_callbacks(cpu);
946         return 0;
947 }
948
949 static void cpuhp_complete_idle_dead(void *arg)
950 {
951         struct cpuhp_cpu_state *st = arg;
952
953         complete_ap_thread(st, false);
954 }
955
956 void cpuhp_report_idle_dead(void)
957 {
958         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
959
960         BUG_ON(st->state != CPUHP_AP_OFFLINE);
961         rcu_report_dead(smp_processor_id());
962         st->state = CPUHP_AP_IDLE_DEAD;
963         /*
964          * We cannot call complete after rcu_report_dead() so we delegate it
965          * to an online cpu.
966          */
967         smp_call_function_single(cpumask_first(cpu_online_mask),
968                                  cpuhp_complete_idle_dead, st, 0);
969 }
970
971 static void undo_cpu_down(unsigned int cpu, struct cpuhp_cpu_state *st)
972 {
973         for (st->state++; st->state < st->target; st->state++)
974                 cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
975 }
976
977 static int cpuhp_down_callbacks(unsigned int cpu, struct cpuhp_cpu_state *st,
978                                 enum cpuhp_state target)
979 {
980         enum cpuhp_state prev_state = st->state;
981         int ret = 0;
982
983         for (; st->state > target; st->state--) {
984                 ret = cpuhp_invoke_callback(cpu, st->state, false, NULL, NULL);
985                 if (ret) {
986                         st->target = prev_state;
987                         if (st->state < prev_state)
988                                 undo_cpu_down(cpu, st);
989                         break;
990                 }
991         }
992         return ret;
993 }
994
995 /* Requires cpu_add_remove_lock to be held */
996 static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,
997                            enum cpuhp_state target)
998 {
999         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1000         int prev_state, ret = 0;
1001
1002         if (num_online_cpus() == 1)
1003                 return -EBUSY;
1004
1005         if (!cpu_present(cpu))
1006                 return -EINVAL;
1007
1008         cpus_write_lock();
1009
1010         cpuhp_tasks_frozen = tasks_frozen;
1011
1012         prev_state = cpuhp_set_state(st, target);
1013         /*
1014          * If the current CPU state is in the range of the AP hotplug thread,
1015          * then we need to kick the thread.
1016          */
1017         if (st->state > CPUHP_TEARDOWN_CPU) {
1018                 st->target = max((int)target, CPUHP_TEARDOWN_CPU);
1019                 ret = cpuhp_kick_ap_work(cpu);
1020                 /*
1021                  * The AP side has done the error rollback already. Just
1022                  * return the error code..
1023                  */
1024                 if (ret)
1025                         goto out;
1026
1027                 /*
1028                  * We might have stopped still in the range of the AP hotplug
1029                  * thread. Nothing to do anymore.
1030                  */
1031                 if (st->state > CPUHP_TEARDOWN_CPU)
1032                         goto out;
1033
1034                 st->target = target;
1035         }
1036         /*
1037          * The AP brought itself down to CPUHP_TEARDOWN_CPU. So we need
1038          * to do the further cleanups.
1039          */
1040         ret = cpuhp_down_callbacks(cpu, st, target);
1041         if (ret && st->state == CPUHP_TEARDOWN_CPU && st->state < prev_state) {
1042                 cpuhp_reset_state(st, prev_state);
1043                 __cpuhp_kick_ap(st);
1044         }
1045
1046 out:
1047         cpus_write_unlock();
1048         /*
1049          * Do post unplug cleanup. This is still protected against
1050          * concurrent CPU hotplug via cpu_add_remove_lock.
1051          */
1052         lockup_detector_cleanup();
1053         arch_smt_update();
1054         return ret;
1055 }
1056
1057 static int cpu_down_maps_locked(unsigned int cpu, enum cpuhp_state target)
1058 {
1059         if (cpu_hotplug_disabled)
1060                 return -EBUSY;
1061         return _cpu_down(cpu, 0, target);
1062 }
1063
1064 static int cpu_down(unsigned int cpu, enum cpuhp_state target)
1065 {
1066         int err;
1067
1068         cpu_maps_update_begin();
1069         err = cpu_down_maps_locked(cpu, target);
1070         cpu_maps_update_done();
1071         return err;
1072 }
1073
1074 /**
1075  * cpu_device_down - Bring down a cpu device
1076  * @dev: Pointer to the cpu device to offline
1077  *
1078  * This function is meant to be used by device core cpu subsystem only.
1079  *
1080  * Other subsystems should use remove_cpu() instead.
1081  */
1082 int cpu_device_down(struct device *dev)
1083 {
1084         return cpu_down(dev->id, CPUHP_OFFLINE);
1085 }
1086
1087 int remove_cpu(unsigned int cpu)
1088 {
1089         int ret;
1090
1091         lock_device_hotplug();
1092         ret = device_offline(get_cpu_device(cpu));
1093         unlock_device_hotplug();
1094
1095         return ret;
1096 }
1097 EXPORT_SYMBOL_GPL(remove_cpu);
1098
1099 void smp_shutdown_nonboot_cpus(unsigned int primary_cpu)
1100 {
1101         unsigned int cpu;
1102         int error;
1103
1104         cpu_maps_update_begin();
1105
1106         /*
1107          * Make certain the cpu I'm about to reboot on is online.
1108          *
1109          * This is inline to what migrate_to_reboot_cpu() already do.
1110          */
1111         if (!cpu_online(primary_cpu))
1112                 primary_cpu = cpumask_first(cpu_online_mask);
1113
1114         for_each_online_cpu(cpu) {
1115                 if (cpu == primary_cpu)
1116                         continue;
1117
1118                 error = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
1119                 if (error) {
1120                         pr_err("Failed to offline CPU%d - error=%d",
1121                                 cpu, error);
1122                         break;
1123                 }
1124         }
1125
1126         /*
1127          * Ensure all but the reboot CPU are offline.
1128          */
1129         BUG_ON(num_online_cpus() > 1);
1130
1131         /*
1132          * Make sure the CPUs won't be enabled by someone else after this
1133          * point. Kexec will reboot to a new kernel shortly resetting
1134          * everything along the way.
1135          */
1136         cpu_hotplug_disabled++;
1137
1138         cpu_maps_update_done();
1139 }
1140
1141 #else
1142 #define takedown_cpu            NULL
1143 #endif /*CONFIG_HOTPLUG_CPU*/
1144
1145 /**
1146  * notify_cpu_starting(cpu) - Invoke the callbacks on the starting CPU
1147  * @cpu: cpu that just started
1148  *
1149  * It must be called by the arch code on the new cpu, before the new cpu
1150  * enables interrupts and before the "boot" cpu returns from __cpu_up().
1151  */
1152 void notify_cpu_starting(unsigned int cpu)
1153 {
1154         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1155         enum cpuhp_state target = min((int)st->target, CPUHP_AP_ONLINE);
1156         int ret;
1157
1158         rcu_cpu_starting(cpu);  /* Enables RCU usage on this CPU. */
1159         cpumask_set_cpu(cpu, &cpus_booted_once_mask);
1160         while (st->state < target) {
1161                 st->state++;
1162                 ret = cpuhp_invoke_callback(cpu, st->state, true, NULL, NULL);
1163                 /*
1164                  * STARTING must not fail!
1165                  */
1166                 WARN_ON_ONCE(ret);
1167         }
1168 }
1169
1170 /*
1171  * Called from the idle task. Wake up the controlling task which brings the
1172  * hotplug thread of the upcoming CPU up and then delegates the rest of the
1173  * online bringup to the hotplug thread.
1174  */
1175 void cpuhp_online_idle(enum cpuhp_state state)
1176 {
1177         struct cpuhp_cpu_state *st = this_cpu_ptr(&cpuhp_state);
1178
1179         /* Happens for the boot cpu */
1180         if (state != CPUHP_AP_ONLINE_IDLE)
1181                 return;
1182
1183         /*
1184          * Unpart the stopper thread before we start the idle loop (and start
1185          * scheduling); this ensures the stopper task is always available.
1186          */
1187         stop_machine_unpark(smp_processor_id());
1188
1189         st->state = CPUHP_AP_ONLINE_IDLE;
1190         complete_ap_thread(st, true);
1191 }
1192
1193 /* Requires cpu_add_remove_lock to be held */
1194 static int _cpu_up(unsigned int cpu, int tasks_frozen, enum cpuhp_state target)
1195 {
1196         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1197         struct task_struct *idle;
1198         int ret = 0;
1199
1200         cpus_write_lock();
1201
1202         if (!cpu_present(cpu)) {
1203                 ret = -EINVAL;
1204                 goto out;
1205         }
1206
1207         /*
1208          * The caller of cpu_up() might have raced with another
1209          * caller. Nothing to do.
1210          */
1211         if (st->state >= target)
1212                 goto out;
1213
1214         if (st->state == CPUHP_OFFLINE) {
1215                 /* Let it fail before we try to bring the cpu up */
1216                 idle = idle_thread_get(cpu);
1217                 if (IS_ERR(idle)) {
1218                         ret = PTR_ERR(idle);
1219                         goto out;
1220                 }
1221         }
1222
1223         cpuhp_tasks_frozen = tasks_frozen;
1224
1225         cpuhp_set_state(st, target);
1226         /*
1227          * If the current CPU state is in the range of the AP hotplug thread,
1228          * then we need to kick the thread once more.
1229          */
1230         if (st->state > CPUHP_BRINGUP_CPU) {
1231                 ret = cpuhp_kick_ap_work(cpu);
1232                 /*
1233                  * The AP side has done the error rollback already. Just
1234                  * return the error code..
1235                  */
1236                 if (ret)
1237                         goto out;
1238         }
1239
1240         /*
1241          * Try to reach the target state. We max out on the BP at
1242          * CPUHP_BRINGUP_CPU. After that the AP hotplug thread is
1243          * responsible for bringing it up to the target state.
1244          */
1245         target = min((int)target, CPUHP_BRINGUP_CPU);
1246         ret = cpuhp_up_callbacks(cpu, st, target);
1247 out:
1248         cpus_write_unlock();
1249         arch_smt_update();
1250         return ret;
1251 }
1252
1253 static int cpu_up(unsigned int cpu, enum cpuhp_state target)
1254 {
1255         int err = 0;
1256
1257         if (!cpu_possible(cpu)) {
1258                 pr_err("can't online cpu %d because it is not configured as may-hotadd at boot time\n",
1259                        cpu);
1260 #if defined(CONFIG_IA64)
1261                 pr_err("please check additional_cpus= boot parameter\n");
1262 #endif
1263                 return -EINVAL;
1264         }
1265
1266         err = try_online_node(cpu_to_node(cpu));
1267         if (err)
1268                 return err;
1269
1270         cpu_maps_update_begin();
1271
1272         if (cpu_hotplug_disabled) {
1273                 err = -EBUSY;
1274                 goto out;
1275         }
1276         if (!cpu_smt_allowed(cpu)) {
1277                 err = -EPERM;
1278                 goto out;
1279         }
1280
1281         err = _cpu_up(cpu, 0, target);
1282 out:
1283         cpu_maps_update_done();
1284         return err;
1285 }
1286
1287 /**
1288  * cpu_device_up - Bring up a cpu device
1289  * @dev: Pointer to the cpu device to online
1290  *
1291  * This function is meant to be used by device core cpu subsystem only.
1292  *
1293  * Other subsystems should use add_cpu() instead.
1294  */
1295 int cpu_device_up(struct device *dev)
1296 {
1297         return cpu_up(dev->id, CPUHP_ONLINE);
1298 }
1299
1300 int add_cpu(unsigned int cpu)
1301 {
1302         int ret;
1303
1304         lock_device_hotplug();
1305         ret = device_online(get_cpu_device(cpu));
1306         unlock_device_hotplug();
1307
1308         return ret;
1309 }
1310 EXPORT_SYMBOL_GPL(add_cpu);
1311
1312 /**
1313  * bringup_hibernate_cpu - Bring up the CPU that we hibernated on
1314  * @sleep_cpu: The cpu we hibernated on and should be brought up.
1315  *
1316  * On some architectures like arm64, we can hibernate on any CPU, but on
1317  * wake up the CPU we hibernated on might be offline as a side effect of
1318  * using maxcpus= for example.
1319  */
1320 int bringup_hibernate_cpu(unsigned int sleep_cpu)
1321 {
1322         int ret;
1323
1324         if (!cpu_online(sleep_cpu)) {
1325                 pr_info("Hibernated on a CPU that is offline! Bringing CPU up.\n");
1326                 ret = cpu_up(sleep_cpu, CPUHP_ONLINE);
1327                 if (ret) {
1328                         pr_err("Failed to bring hibernate-CPU up!\n");
1329                         return ret;
1330                 }
1331         }
1332         return 0;
1333 }
1334
1335 void bringup_nonboot_cpus(unsigned int setup_max_cpus)
1336 {
1337         unsigned int cpu;
1338
1339         for_each_present_cpu(cpu) {
1340                 if (num_online_cpus() >= setup_max_cpus)
1341                         break;
1342                 if (!cpu_online(cpu))
1343                         cpu_up(cpu, CPUHP_ONLINE);
1344         }
1345 }
1346
1347 #ifdef CONFIG_PM_SLEEP_SMP
1348 static cpumask_var_t frozen_cpus;
1349
1350 int freeze_secondary_cpus(int primary)
1351 {
1352         int cpu, error = 0;
1353
1354         cpu_maps_update_begin();
1355         if (primary == -1) {
1356                 primary = cpumask_first(cpu_online_mask);
1357                 if (!housekeeping_cpu(primary, HK_FLAG_TIMER))
1358                         primary = housekeeping_any_cpu(HK_FLAG_TIMER);
1359         } else {
1360                 if (!cpu_online(primary))
1361                         primary = cpumask_first(cpu_online_mask);
1362         }
1363
1364         /*
1365          * We take down all of the non-boot CPUs in one shot to avoid races
1366          * with the userspace trying to use the CPU hotplug at the same time
1367          */
1368         cpumask_clear(frozen_cpus);
1369
1370         pr_info("Disabling non-boot CPUs ...\n");
1371         for_each_online_cpu(cpu) {
1372                 if (cpu == primary)
1373                         continue;
1374
1375                 if (pm_wakeup_pending()) {
1376                         pr_info("Wakeup pending. Abort CPU freeze\n");
1377                         error = -EBUSY;
1378                         break;
1379                 }
1380
1381                 trace_suspend_resume(TPS("CPU_OFF"), cpu, true);
1382                 error = _cpu_down(cpu, 1, CPUHP_OFFLINE);
1383                 trace_suspend_resume(TPS("CPU_OFF"), cpu, false);
1384                 if (!error)
1385                         cpumask_set_cpu(cpu, frozen_cpus);
1386                 else {
1387                         pr_err("Error taking CPU%d down: %d\n", cpu, error);
1388                         break;
1389                 }
1390         }
1391
1392         if (!error)
1393                 BUG_ON(num_online_cpus() > 1);
1394         else
1395                 pr_err("Non-boot CPUs are not disabled\n");
1396
1397         /*
1398          * Make sure the CPUs won't be enabled by someone else. We need to do
1399          * this even in case of failure as all freeze_secondary_cpus() users are
1400          * supposed to do thaw_secondary_cpus() on the failure path.
1401          */
1402         cpu_hotplug_disabled++;
1403
1404         cpu_maps_update_done();
1405         return error;
1406 }
1407
1408 void __weak arch_thaw_secondary_cpus_begin(void)
1409 {
1410 }
1411
1412 void __weak arch_thaw_secondary_cpus_end(void)
1413 {
1414 }
1415
1416 void thaw_secondary_cpus(void)
1417 {
1418         int cpu, error;
1419
1420         /* Allow everyone to use the CPU hotplug again */
1421         cpu_maps_update_begin();
1422         __cpu_hotplug_enable();
1423         if (cpumask_empty(frozen_cpus))
1424                 goto out;
1425
1426         pr_info("Enabling non-boot CPUs ...\n");
1427
1428         arch_thaw_secondary_cpus_begin();
1429
1430         for_each_cpu(cpu, frozen_cpus) {
1431                 trace_suspend_resume(TPS("CPU_ON"), cpu, true);
1432                 error = _cpu_up(cpu, 1, CPUHP_ONLINE);
1433                 trace_suspend_resume(TPS("CPU_ON"), cpu, false);
1434                 if (!error) {
1435                         pr_info("CPU%d is up\n", cpu);
1436                         continue;
1437                 }
1438                 pr_warn("Error taking CPU%d up: %d\n", cpu, error);
1439         }
1440
1441         arch_thaw_secondary_cpus_end();
1442
1443         cpumask_clear(frozen_cpus);
1444 out:
1445         cpu_maps_update_done();
1446 }
1447
1448 static int __init alloc_frozen_cpus(void)
1449 {
1450         if (!alloc_cpumask_var(&frozen_cpus, GFP_KERNEL|__GFP_ZERO))
1451                 return -ENOMEM;
1452         return 0;
1453 }
1454 core_initcall(alloc_frozen_cpus);
1455
1456 /*
1457  * When callbacks for CPU hotplug notifications are being executed, we must
1458  * ensure that the state of the system with respect to the tasks being frozen
1459  * or not, as reported by the notification, remains unchanged *throughout the
1460  * duration* of the execution of the callbacks.
1461  * Hence we need to prevent the freezer from racing with regular CPU hotplug.
1462  *
1463  * This synchronization is implemented by mutually excluding regular CPU
1464  * hotplug and Suspend/Hibernate call paths by hooking onto the Suspend/
1465  * Hibernate notifications.
1466  */
1467 static int
1468 cpu_hotplug_pm_callback(struct notifier_block *nb,
1469                         unsigned long action, void *ptr)
1470 {
1471         switch (action) {
1472
1473         case PM_SUSPEND_PREPARE:
1474         case PM_HIBERNATION_PREPARE:
1475                 cpu_hotplug_disable();
1476                 break;
1477
1478         case PM_POST_SUSPEND:
1479         case PM_POST_HIBERNATION:
1480                 cpu_hotplug_enable();
1481                 break;
1482
1483         default:
1484                 return NOTIFY_DONE;
1485         }
1486
1487         return NOTIFY_OK;
1488 }
1489
1490
1491 static int __init cpu_hotplug_pm_sync_init(void)
1492 {
1493         /*
1494          * cpu_hotplug_pm_callback has higher priority than x86
1495          * bsp_pm_callback which depends on cpu_hotplug_pm_callback
1496          * to disable cpu hotplug to avoid cpu hotplug race.
1497          */
1498         pm_notifier(cpu_hotplug_pm_callback, 0);
1499         return 0;
1500 }
1501 core_initcall(cpu_hotplug_pm_sync_init);
1502
1503 #endif /* CONFIG_PM_SLEEP_SMP */
1504
1505 int __boot_cpu_id;
1506
1507 #endif /* CONFIG_SMP */
1508
1509 /* Boot processor state steps */
1510 static struct cpuhp_step cpuhp_hp_states[] = {
1511         [CPUHP_OFFLINE] = {
1512                 .name                   = "offline",
1513                 .startup.single         = NULL,
1514                 .teardown.single        = NULL,
1515         },
1516 #ifdef CONFIG_SMP
1517         [CPUHP_CREATE_THREADS]= {
1518                 .name                   = "threads:prepare",
1519                 .startup.single         = smpboot_create_threads,
1520                 .teardown.single        = NULL,
1521                 .cant_stop              = true,
1522         },
1523         [CPUHP_PERF_PREPARE] = {
1524                 .name                   = "perf:prepare",
1525                 .startup.single         = perf_event_init_cpu,
1526                 .teardown.single        = perf_event_exit_cpu,
1527         },
1528         [CPUHP_WORKQUEUE_PREP] = {
1529                 .name                   = "workqueue:prepare",
1530                 .startup.single         = workqueue_prepare_cpu,
1531                 .teardown.single        = NULL,
1532         },
1533         [CPUHP_HRTIMERS_PREPARE] = {
1534                 .name                   = "hrtimers:prepare",
1535                 .startup.single         = hrtimers_prepare_cpu,
1536                 .teardown.single        = hrtimers_dead_cpu,
1537         },
1538         [CPUHP_SMPCFD_PREPARE] = {
1539                 .name                   = "smpcfd:prepare",
1540                 .startup.single         = smpcfd_prepare_cpu,
1541                 .teardown.single        = smpcfd_dead_cpu,
1542         },
1543         [CPUHP_RELAY_PREPARE] = {
1544                 .name                   = "relay:prepare",
1545                 .startup.single         = relay_prepare_cpu,
1546                 .teardown.single        = NULL,
1547         },
1548         [CPUHP_SLAB_PREPARE] = {
1549                 .name                   = "slab:prepare",
1550                 .startup.single         = slab_prepare_cpu,
1551                 .teardown.single        = slab_dead_cpu,
1552         },
1553         [CPUHP_RCUTREE_PREP] = {
1554                 .name                   = "RCU/tree:prepare",
1555                 .startup.single         = rcutree_prepare_cpu,
1556                 .teardown.single        = rcutree_dead_cpu,
1557         },
1558         /*
1559          * On the tear-down path, timers_dead_cpu() must be invoked
1560          * before blk_mq_queue_reinit_notify() from notify_dead(),
1561          * otherwise a RCU stall occurs.
1562          */
1563         [CPUHP_TIMERS_PREPARE] = {
1564                 .name                   = "timers:prepare",
1565                 .startup.single         = timers_prepare_cpu,
1566                 .teardown.single        = timers_dead_cpu,
1567         },
1568         /* Kicks the plugged cpu into life */
1569         [CPUHP_BRINGUP_CPU] = {
1570                 .name                   = "cpu:bringup",
1571                 .startup.single         = bringup_cpu,
1572                 .teardown.single        = finish_cpu,
1573                 .cant_stop              = true,
1574         },
1575         /* Final state before CPU kills itself */
1576         [CPUHP_AP_IDLE_DEAD] = {
1577                 .name                   = "idle:dead",
1578         },
1579         /*
1580          * Last state before CPU enters the idle loop to die. Transient state
1581          * for synchronization.
1582          */
1583         [CPUHP_AP_OFFLINE] = {
1584                 .name                   = "ap:offline",
1585                 .cant_stop              = true,
1586         },
1587         /* First state is scheduler control. Interrupts are disabled */
1588         [CPUHP_AP_SCHED_STARTING] = {
1589                 .name                   = "sched:starting",
1590                 .startup.single         = sched_cpu_starting,
1591                 .teardown.single        = sched_cpu_dying,
1592         },
1593         [CPUHP_AP_RCUTREE_DYING] = {
1594                 .name                   = "RCU/tree:dying",
1595                 .startup.single         = NULL,
1596                 .teardown.single        = rcutree_dying_cpu,
1597         },
1598         [CPUHP_AP_SMPCFD_DYING] = {
1599                 .name                   = "smpcfd:dying",
1600                 .startup.single         = NULL,
1601                 .teardown.single        = smpcfd_dying_cpu,
1602         },
1603         /* Entry state on starting. Interrupts enabled from here on. Transient
1604          * state for synchronsization */
1605         [CPUHP_AP_ONLINE] = {
1606                 .name                   = "ap:online",
1607         },
1608         /*
1609          * Handled on controll processor until the plugged processor manages
1610          * this itself.
1611          */
1612         [CPUHP_TEARDOWN_CPU] = {
1613                 .name                   = "cpu:teardown",
1614                 .startup.single         = NULL,
1615                 .teardown.single        = takedown_cpu,
1616                 .cant_stop              = true,
1617         },
1618         /* Handle smpboot threads park/unpark */
1619         [CPUHP_AP_SMPBOOT_THREADS] = {
1620                 .name                   = "smpboot/threads:online",
1621                 .startup.single         = smpboot_unpark_threads,
1622                 .teardown.single        = smpboot_park_threads,
1623         },
1624         [CPUHP_AP_IRQ_AFFINITY_ONLINE] = {
1625                 .name                   = "irq/affinity:online",
1626                 .startup.single         = irq_affinity_online_cpu,
1627                 .teardown.single        = NULL,
1628         },
1629         [CPUHP_AP_PERF_ONLINE] = {
1630                 .name                   = "perf:online",
1631                 .startup.single         = perf_event_init_cpu,
1632                 .teardown.single        = perf_event_exit_cpu,
1633         },
1634         [CPUHP_AP_WATCHDOG_ONLINE] = {
1635                 .name                   = "lockup_detector:online",
1636                 .startup.single         = lockup_detector_online_cpu,
1637                 .teardown.single        = lockup_detector_offline_cpu,
1638         },
1639         [CPUHP_AP_WORKQUEUE_ONLINE] = {
1640                 .name                   = "workqueue:online",
1641                 .startup.single         = workqueue_online_cpu,
1642                 .teardown.single        = workqueue_offline_cpu,
1643         },
1644         [CPUHP_AP_RCUTREE_ONLINE] = {
1645                 .name                   = "RCU/tree:online",
1646                 .startup.single         = rcutree_online_cpu,
1647                 .teardown.single        = rcutree_offline_cpu,
1648         },
1649 #endif
1650         /*
1651          * The dynamically registered state space is here
1652          */
1653
1654 #ifdef CONFIG_SMP
1655         /* Last state is scheduler control setting the cpu active */
1656         [CPUHP_AP_ACTIVE] = {
1657                 .name                   = "sched:active",
1658                 .startup.single         = sched_cpu_activate,
1659                 .teardown.single        = sched_cpu_deactivate,
1660         },
1661 #endif
1662
1663         /* CPU is fully up and running. */
1664         [CPUHP_ONLINE] = {
1665                 .name                   = "online",
1666                 .startup.single         = NULL,
1667                 .teardown.single        = NULL,
1668         },
1669 };
1670
1671 /* Sanity check for callbacks */
1672 static int cpuhp_cb_check(enum cpuhp_state state)
1673 {
1674         if (state <= CPUHP_OFFLINE || state >= CPUHP_ONLINE)
1675                 return -EINVAL;
1676         return 0;
1677 }
1678
1679 /*
1680  * Returns a free for dynamic slot assignment of the Online state. The states
1681  * are protected by the cpuhp_slot_states mutex and an empty slot is identified
1682  * by having no name assigned.
1683  */
1684 static int cpuhp_reserve_state(enum cpuhp_state state)
1685 {
1686         enum cpuhp_state i, end;
1687         struct cpuhp_step *step;
1688
1689         switch (state) {
1690         case CPUHP_AP_ONLINE_DYN:
1691                 step = cpuhp_hp_states + CPUHP_AP_ONLINE_DYN;
1692                 end = CPUHP_AP_ONLINE_DYN_END;
1693                 break;
1694         case CPUHP_BP_PREPARE_DYN:
1695                 step = cpuhp_hp_states + CPUHP_BP_PREPARE_DYN;
1696                 end = CPUHP_BP_PREPARE_DYN_END;
1697                 break;
1698         default:
1699                 return -EINVAL;
1700         }
1701
1702         for (i = state; i <= end; i++, step++) {
1703                 if (!step->name)
1704                         return i;
1705         }
1706         WARN(1, "No more dynamic states available for CPU hotplug\n");
1707         return -ENOSPC;
1708 }
1709
1710 static int cpuhp_store_callbacks(enum cpuhp_state state, const char *name,
1711                                  int (*startup)(unsigned int cpu),
1712                                  int (*teardown)(unsigned int cpu),
1713                                  bool multi_instance)
1714 {
1715         /* (Un)Install the callbacks for further cpu hotplug operations */
1716         struct cpuhp_step *sp;
1717         int ret = 0;
1718
1719         /*
1720          * If name is NULL, then the state gets removed.
1721          *
1722          * CPUHP_AP_ONLINE_DYN and CPUHP_BP_PREPARE_DYN are handed out on
1723          * the first allocation from these dynamic ranges, so the removal
1724          * would trigger a new allocation and clear the wrong (already
1725          * empty) state, leaving the callbacks of the to be cleared state
1726          * dangling, which causes wreckage on the next hotplug operation.
1727          */
1728         if (name && (state == CPUHP_AP_ONLINE_DYN ||
1729                      state == CPUHP_BP_PREPARE_DYN)) {
1730                 ret = cpuhp_reserve_state(state);
1731                 if (ret < 0)
1732                         return ret;
1733                 state = ret;
1734         }
1735         sp = cpuhp_get_step(state);
1736         if (name && sp->name)
1737                 return -EBUSY;
1738
1739         sp->startup.single = startup;
1740         sp->teardown.single = teardown;
1741         sp->name = name;
1742         sp->multi_instance = multi_instance;
1743         INIT_HLIST_HEAD(&sp->list);
1744         return ret;
1745 }
1746
1747 static void *cpuhp_get_teardown_cb(enum cpuhp_state state)
1748 {
1749         return cpuhp_get_step(state)->teardown.single;
1750 }
1751
1752 /*
1753  * Call the startup/teardown function for a step either on the AP or
1754  * on the current CPU.
1755  */
1756 static int cpuhp_issue_call(int cpu, enum cpuhp_state state, bool bringup,
1757                             struct hlist_node *node)
1758 {
1759         struct cpuhp_step *sp = cpuhp_get_step(state);
1760         int ret;
1761
1762         /*
1763          * If there's nothing to do, we done.
1764          * Relies on the union for multi_instance.
1765          */
1766         if ((bringup && !sp->startup.single) ||
1767             (!bringup && !sp->teardown.single))
1768                 return 0;
1769         /*
1770          * The non AP bound callbacks can fail on bringup. On teardown
1771          * e.g. module removal we crash for now.
1772          */
1773 #ifdef CONFIG_SMP
1774         if (cpuhp_is_ap_state(state))
1775                 ret = cpuhp_invoke_ap_callback(cpu, state, bringup, node);
1776         else
1777                 ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1778 #else
1779         ret = cpuhp_invoke_callback(cpu, state, bringup, node, NULL);
1780 #endif
1781         BUG_ON(ret && !bringup);
1782         return ret;
1783 }
1784
1785 /*
1786  * Called from __cpuhp_setup_state on a recoverable failure.
1787  *
1788  * Note: The teardown callbacks for rollback are not allowed to fail!
1789  */
1790 static void cpuhp_rollback_install(int failedcpu, enum cpuhp_state state,
1791                                    struct hlist_node *node)
1792 {
1793         int cpu;
1794
1795         /* Roll back the already executed steps on the other cpus */
1796         for_each_present_cpu(cpu) {
1797                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1798                 int cpustate = st->state;
1799
1800                 if (cpu >= failedcpu)
1801                         break;
1802
1803                 /* Did we invoke the startup call on that cpu ? */
1804                 if (cpustate >= state)
1805                         cpuhp_issue_call(cpu, state, false, node);
1806         }
1807 }
1808
1809 int __cpuhp_state_add_instance_cpuslocked(enum cpuhp_state state,
1810                                           struct hlist_node *node,
1811                                           bool invoke)
1812 {
1813         struct cpuhp_step *sp;
1814         int cpu;
1815         int ret;
1816
1817         lockdep_assert_cpus_held();
1818
1819         sp = cpuhp_get_step(state);
1820         if (sp->multi_instance == false)
1821                 return -EINVAL;
1822
1823         mutex_lock(&cpuhp_state_mutex);
1824
1825         if (!invoke || !sp->startup.multi)
1826                 goto add_node;
1827
1828         /*
1829          * Try to call the startup callback for each present cpu
1830          * depending on the hotplug state of the cpu.
1831          */
1832         for_each_present_cpu(cpu) {
1833                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1834                 int cpustate = st->state;
1835
1836                 if (cpustate < state)
1837                         continue;
1838
1839                 ret = cpuhp_issue_call(cpu, state, true, node);
1840                 if (ret) {
1841                         if (sp->teardown.multi)
1842                                 cpuhp_rollback_install(cpu, state, node);
1843                         goto unlock;
1844                 }
1845         }
1846 add_node:
1847         ret = 0;
1848         hlist_add_head(node, &sp->list);
1849 unlock:
1850         mutex_unlock(&cpuhp_state_mutex);
1851         return ret;
1852 }
1853
1854 int __cpuhp_state_add_instance(enum cpuhp_state state, struct hlist_node *node,
1855                                bool invoke)
1856 {
1857         int ret;
1858
1859         cpus_read_lock();
1860         ret = __cpuhp_state_add_instance_cpuslocked(state, node, invoke);
1861         cpus_read_unlock();
1862         return ret;
1863 }
1864 EXPORT_SYMBOL_GPL(__cpuhp_state_add_instance);
1865
1866 /**
1867  * __cpuhp_setup_state_cpuslocked - Setup the callbacks for an hotplug machine state
1868  * @state:              The state to setup
1869  * @invoke:             If true, the startup function is invoked for cpus where
1870  *                      cpu state >= @state
1871  * @startup:            startup callback function
1872  * @teardown:           teardown callback function
1873  * @multi_instance:     State is set up for multiple instances which get
1874  *                      added afterwards.
1875  *
1876  * The caller needs to hold cpus read locked while calling this function.
1877  * Returns:
1878  *   On success:
1879  *      Positive state number if @state is CPUHP_AP_ONLINE_DYN
1880  *      0 for all other states
1881  *   On failure: proper (negative) error code
1882  */
1883 int __cpuhp_setup_state_cpuslocked(enum cpuhp_state state,
1884                                    const char *name, bool invoke,
1885                                    int (*startup)(unsigned int cpu),
1886                                    int (*teardown)(unsigned int cpu),
1887                                    bool multi_instance)
1888 {
1889         int cpu, ret = 0;
1890         bool dynstate;
1891
1892         lockdep_assert_cpus_held();
1893
1894         if (cpuhp_cb_check(state) || !name)
1895                 return -EINVAL;
1896
1897         mutex_lock(&cpuhp_state_mutex);
1898
1899         ret = cpuhp_store_callbacks(state, name, startup, teardown,
1900                                     multi_instance);
1901
1902         dynstate = state == CPUHP_AP_ONLINE_DYN;
1903         if (ret > 0 && dynstate) {
1904                 state = ret;
1905                 ret = 0;
1906         }
1907
1908         if (ret || !invoke || !startup)
1909                 goto out;
1910
1911         /*
1912          * Try to call the startup callback for each present cpu
1913          * depending on the hotplug state of the cpu.
1914          */
1915         for_each_present_cpu(cpu) {
1916                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1917                 int cpustate = st->state;
1918
1919                 if (cpustate < state)
1920                         continue;
1921
1922                 ret = cpuhp_issue_call(cpu, state, true, NULL);
1923                 if (ret) {
1924                         if (teardown)
1925                                 cpuhp_rollback_install(cpu, state, NULL);
1926                         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
1927                         goto out;
1928                 }
1929         }
1930 out:
1931         mutex_unlock(&cpuhp_state_mutex);
1932         /*
1933          * If the requested state is CPUHP_AP_ONLINE_DYN, return the
1934          * dynamically allocated state in case of success.
1935          */
1936         if (!ret && dynstate)
1937                 return state;
1938         return ret;
1939 }
1940 EXPORT_SYMBOL(__cpuhp_setup_state_cpuslocked);
1941
1942 int __cpuhp_setup_state(enum cpuhp_state state,
1943                         const char *name, bool invoke,
1944                         int (*startup)(unsigned int cpu),
1945                         int (*teardown)(unsigned int cpu),
1946                         bool multi_instance)
1947 {
1948         int ret;
1949
1950         cpus_read_lock();
1951         ret = __cpuhp_setup_state_cpuslocked(state, name, invoke, startup,
1952                                              teardown, multi_instance);
1953         cpus_read_unlock();
1954         return ret;
1955 }
1956 EXPORT_SYMBOL(__cpuhp_setup_state);
1957
1958 int __cpuhp_state_remove_instance(enum cpuhp_state state,
1959                                   struct hlist_node *node, bool invoke)
1960 {
1961         struct cpuhp_step *sp = cpuhp_get_step(state);
1962         int cpu;
1963
1964         BUG_ON(cpuhp_cb_check(state));
1965
1966         if (!sp->multi_instance)
1967                 return -EINVAL;
1968
1969         cpus_read_lock();
1970         mutex_lock(&cpuhp_state_mutex);
1971
1972         if (!invoke || !cpuhp_get_teardown_cb(state))
1973                 goto remove;
1974         /*
1975          * Call the teardown callback for each present cpu depending
1976          * on the hotplug state of the cpu. This function is not
1977          * allowed to fail currently!
1978          */
1979         for_each_present_cpu(cpu) {
1980                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
1981                 int cpustate = st->state;
1982
1983                 if (cpustate >= state)
1984                         cpuhp_issue_call(cpu, state, false, node);
1985         }
1986
1987 remove:
1988         hlist_del(node);
1989         mutex_unlock(&cpuhp_state_mutex);
1990         cpus_read_unlock();
1991
1992         return 0;
1993 }
1994 EXPORT_SYMBOL_GPL(__cpuhp_state_remove_instance);
1995
1996 /**
1997  * __cpuhp_remove_state_cpuslocked - Remove the callbacks for an hotplug machine state
1998  * @state:      The state to remove
1999  * @invoke:     If true, the teardown function is invoked for cpus where
2000  *              cpu state >= @state
2001  *
2002  * The caller needs to hold cpus read locked while calling this function.
2003  * The teardown callback is currently not allowed to fail. Think
2004  * about module removal!
2005  */
2006 void __cpuhp_remove_state_cpuslocked(enum cpuhp_state state, bool invoke)
2007 {
2008         struct cpuhp_step *sp = cpuhp_get_step(state);
2009         int cpu;
2010
2011         BUG_ON(cpuhp_cb_check(state));
2012
2013         lockdep_assert_cpus_held();
2014
2015         mutex_lock(&cpuhp_state_mutex);
2016         if (sp->multi_instance) {
2017                 WARN(!hlist_empty(&sp->list),
2018                      "Error: Removing state %d which has instances left.\n",
2019                      state);
2020                 goto remove;
2021         }
2022
2023         if (!invoke || !cpuhp_get_teardown_cb(state))
2024                 goto remove;
2025
2026         /*
2027          * Call the teardown callback for each present cpu depending
2028          * on the hotplug state of the cpu. This function is not
2029          * allowed to fail currently!
2030          */
2031         for_each_present_cpu(cpu) {
2032                 struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, cpu);
2033                 int cpustate = st->state;
2034
2035                 if (cpustate >= state)
2036                         cpuhp_issue_call(cpu, state, false, NULL);
2037         }
2038 remove:
2039         cpuhp_store_callbacks(state, NULL, NULL, NULL, false);
2040         mutex_unlock(&cpuhp_state_mutex);
2041 }
2042 EXPORT_SYMBOL(__cpuhp_remove_state_cpuslocked);
2043
2044 void __cpuhp_remove_state(enum cpuhp_state state, bool invoke)
2045 {
2046         cpus_read_lock();
2047         __cpuhp_remove_state_cpuslocked(state, invoke);
2048         cpus_read_unlock();
2049 }
2050 EXPORT_SYMBOL(__cpuhp_remove_state);
2051
2052 #ifdef CONFIG_HOTPLUG_SMT
2053 static void cpuhp_offline_cpu_device(unsigned int cpu)
2054 {
2055         struct device *dev = get_cpu_device(cpu);
2056
2057         dev->offline = true;
2058         /* Tell user space about the state change */
2059         kobject_uevent(&dev->kobj, KOBJ_OFFLINE);
2060 }
2061
2062 static void cpuhp_online_cpu_device(unsigned int cpu)
2063 {
2064         struct device *dev = get_cpu_device(cpu);
2065
2066         dev->offline = false;
2067         /* Tell user space about the state change */
2068         kobject_uevent(&dev->kobj, KOBJ_ONLINE);
2069 }
2070
2071 int cpuhp_smt_disable(enum cpuhp_smt_control ctrlval)
2072 {
2073         int cpu, ret = 0;
2074
2075         cpu_maps_update_begin();
2076         for_each_online_cpu(cpu) {
2077                 if (topology_is_primary_thread(cpu))
2078                         continue;
2079                 ret = cpu_down_maps_locked(cpu, CPUHP_OFFLINE);
2080                 if (ret)
2081                         break;
2082                 /*
2083                  * As this needs to hold the cpu maps lock it's impossible
2084                  * to call device_offline() because that ends up calling
2085                  * cpu_down() which takes cpu maps lock. cpu maps lock
2086                  * needs to be held as this might race against in kernel
2087                  * abusers of the hotplug machinery (thermal management).
2088                  *
2089                  * So nothing would update device:offline state. That would
2090                  * leave the sysfs entry stale and prevent onlining after
2091                  * smt control has been changed to 'off' again. This is
2092                  * called under the sysfs hotplug lock, so it is properly
2093                  * serialized against the regular offline usage.
2094                  */
2095                 cpuhp_offline_cpu_device(cpu);
2096         }
2097         if (!ret)
2098                 cpu_smt_control = ctrlval;
2099         cpu_maps_update_done();
2100         return ret;
2101 }
2102
2103 int cpuhp_smt_enable(void)
2104 {
2105         int cpu, ret = 0;
2106
2107         cpu_maps_update_begin();
2108         cpu_smt_control = CPU_SMT_ENABLED;
2109         for_each_present_cpu(cpu) {
2110                 /* Skip online CPUs and CPUs on offline nodes */
2111                 if (cpu_online(cpu) || !node_online(cpu_to_node(cpu)))
2112                         continue;
2113                 ret = _cpu_up(cpu, 0, CPUHP_ONLINE);
2114                 if (ret)
2115                         break;
2116                 /* See comment in cpuhp_smt_disable() */
2117                 cpuhp_online_cpu_device(cpu);
2118         }
2119         cpu_maps_update_done();
2120         return ret;
2121 }
2122 #endif
2123
2124 #if defined(CONFIG_SYSFS) && defined(CONFIG_HOTPLUG_CPU)
2125 static ssize_t show_cpuhp_state(struct device *dev,
2126                                 struct device_attribute *attr, char *buf)
2127 {
2128         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2129
2130         return sprintf(buf, "%d\n", st->state);
2131 }
2132 static DEVICE_ATTR(state, 0444, show_cpuhp_state, NULL);
2133
2134 static ssize_t write_cpuhp_target(struct device *dev,
2135                                   struct device_attribute *attr,
2136                                   const char *buf, size_t count)
2137 {
2138         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2139         struct cpuhp_step *sp;
2140         int target, ret;
2141
2142         ret = kstrtoint(buf, 10, &target);
2143         if (ret)
2144                 return ret;
2145
2146 #ifdef CONFIG_CPU_HOTPLUG_STATE_CONTROL
2147         if (target < CPUHP_OFFLINE || target > CPUHP_ONLINE)
2148                 return -EINVAL;
2149 #else
2150         if (target != CPUHP_OFFLINE && target != CPUHP_ONLINE)
2151                 return -EINVAL;
2152 #endif
2153
2154         ret = lock_device_hotplug_sysfs();
2155         if (ret)
2156                 return ret;
2157
2158         mutex_lock(&cpuhp_state_mutex);
2159         sp = cpuhp_get_step(target);
2160         ret = !sp->name || sp->cant_stop ? -EINVAL : 0;
2161         mutex_unlock(&cpuhp_state_mutex);
2162         if (ret)
2163                 goto out;
2164
2165         if (st->state < target)
2166                 ret = cpu_up(dev->id, target);
2167         else
2168                 ret = cpu_down(dev->id, target);
2169 out:
2170         unlock_device_hotplug();
2171         return ret ? ret : count;
2172 }
2173
2174 static ssize_t show_cpuhp_target(struct device *dev,
2175                                  struct device_attribute *attr, char *buf)
2176 {
2177         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2178
2179         return sprintf(buf, "%d\n", st->target);
2180 }
2181 static DEVICE_ATTR(target, 0644, show_cpuhp_target, write_cpuhp_target);
2182
2183
2184 static ssize_t write_cpuhp_fail(struct device *dev,
2185                                 struct device_attribute *attr,
2186                                 const char *buf, size_t count)
2187 {
2188         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2189         struct cpuhp_step *sp;
2190         int fail, ret;
2191
2192         ret = kstrtoint(buf, 10, &fail);
2193         if (ret)
2194                 return ret;
2195
2196         if (fail < CPUHP_OFFLINE || fail > CPUHP_ONLINE)
2197                 return -EINVAL;
2198
2199         /*
2200          * Cannot fail STARTING/DYING callbacks.
2201          */
2202         if (cpuhp_is_atomic_state(fail))
2203                 return -EINVAL;
2204
2205         /*
2206          * Cannot fail anything that doesn't have callbacks.
2207          */
2208         mutex_lock(&cpuhp_state_mutex);
2209         sp = cpuhp_get_step(fail);
2210         if (!sp->startup.single && !sp->teardown.single)
2211                 ret = -EINVAL;
2212         mutex_unlock(&cpuhp_state_mutex);
2213         if (ret)
2214                 return ret;
2215
2216         st->fail = fail;
2217
2218         return count;
2219 }
2220
2221 static ssize_t show_cpuhp_fail(struct device *dev,
2222                                struct device_attribute *attr, char *buf)
2223 {
2224         struct cpuhp_cpu_state *st = per_cpu_ptr(&cpuhp_state, dev->id);
2225
2226         return sprintf(buf, "%d\n", st->fail);
2227 }
2228
2229 static DEVICE_ATTR(fail, 0644, show_cpuhp_fail, write_cpuhp_fail);
2230
2231 static struct attribute *cpuhp_cpu_attrs[] = {
2232         &dev_attr_state.attr,
2233         &dev_attr_target.attr,
2234         &dev_attr_fail.attr,
2235         NULL
2236 };
2237
2238 static const struct attribute_group cpuhp_cpu_attr_group = {
2239         .attrs = cpuhp_cpu_attrs,
2240         .name = "hotplug",
2241         NULL
2242 };
2243
2244 static ssize_t show_cpuhp_states(struct device *dev,
2245                                  struct device_attribute *attr, char *buf)
2246 {
2247         ssize_t cur, res = 0;
2248         int i;
2249
2250         mutex_lock(&cpuhp_state_mutex);
2251         for (i = CPUHP_OFFLINE; i <= CPUHP_ONLINE; i++) {
2252                 struct cpuhp_step *sp = cpuhp_get_step(i);
2253
2254                 if (sp->name) {
2255                         cur = sprintf(buf, "%3d: %s\n", i, sp->name);
2256                         buf += cur;
2257                         res += cur;
2258                 }
2259         }
2260         mutex_unlock(&cpuhp_state_mutex);
2261         return res;
2262 }
2263 static DEVICE_ATTR(states, 0444, show_cpuhp_states, NULL);
2264
2265 static struct attribute *cpuhp_cpu_root_attrs[] = {
2266         &dev_attr_states.attr,
2267         NULL
2268 };
2269
2270 static const struct attribute_group cpuhp_cpu_root_attr_group = {
2271         .attrs = cpuhp_cpu_root_attrs,
2272         .name = "hotplug",
2273         NULL
2274 };
2275
2276 #ifdef CONFIG_HOTPLUG_SMT
2277
2278 static ssize_t
2279 __store_smt_control(struct device *dev, struct device_attribute *attr,
2280                     const char *buf, size_t count)
2281 {
2282         int ctrlval, ret;
2283
2284         if (sysfs_streq(buf, "on"))
2285                 ctrlval = CPU_SMT_ENABLED;
2286         else if (sysfs_streq(buf, "off"))
2287                 ctrlval = CPU_SMT_DISABLED;
2288         else if (sysfs_streq(buf, "forceoff"))
2289                 ctrlval = CPU_SMT_FORCE_DISABLED;
2290         else
2291                 return -EINVAL;
2292
2293         if (cpu_smt_control == CPU_SMT_FORCE_DISABLED)
2294                 return -EPERM;
2295
2296         if (cpu_smt_control == CPU_SMT_NOT_SUPPORTED)
2297                 return -ENODEV;
2298
2299         ret = lock_device_hotplug_sysfs();
2300         if (ret)
2301                 return ret;
2302
2303         if (ctrlval != cpu_smt_control) {
2304                 switch (ctrlval) {
2305                 case CPU_SMT_ENABLED:
2306                         ret = cpuhp_smt_enable();
2307                         break;
2308                 case CPU_SMT_DISABLED:
2309                 case CPU_SMT_FORCE_DISABLED:
2310                         ret = cpuhp_smt_disable(ctrlval);
2311                         break;
2312                 }
2313         }
2314
2315         unlock_device_hotplug();
2316         return ret ? ret : count;
2317 }
2318
2319 #else /* !CONFIG_HOTPLUG_SMT */
2320 static ssize_t
2321 __store_smt_control(struct device *dev, struct device_attribute *attr,
2322                     const char *buf, size_t count)
2323 {
2324         return -ENODEV;
2325 }
2326 #endif /* CONFIG_HOTPLUG_SMT */
2327
2328 static const char *smt_states[] = {
2329         [CPU_SMT_ENABLED]               = "on",
2330         [CPU_SMT_DISABLED]              = "off",
2331         [CPU_SMT_FORCE_DISABLED]        = "forceoff",
2332         [CPU_SMT_NOT_SUPPORTED]         = "notsupported",
2333         [CPU_SMT_NOT_IMPLEMENTED]       = "notimplemented",
2334 };
2335
2336 static ssize_t
2337 show_smt_control(struct device *dev, struct device_attribute *attr, char *buf)
2338 {
2339         const char *state = smt_states[cpu_smt_control];
2340
2341         return snprintf(buf, PAGE_SIZE - 2, "%s\n", state);
2342 }
2343
2344 static ssize_t
2345 store_smt_control(struct device *dev, struct device_attribute *attr,
2346                   const char *buf, size_t count)
2347 {
2348         return __store_smt_control(dev, attr, buf, count);
2349 }
2350 static DEVICE_ATTR(control, 0644, show_smt_control, store_smt_control);
2351
2352 static ssize_t
2353 show_smt_active(struct device *dev, struct device_attribute *attr, char *buf)
2354 {
2355         return snprintf(buf, PAGE_SIZE - 2, "%d\n", sched_smt_active());
2356 }
2357 static DEVICE_ATTR(active, 0444, show_smt_active, NULL);
2358
2359 static struct attribute *cpuhp_smt_attrs[] = {
2360         &dev_attr_control.attr,
2361         &dev_attr_active.attr,
2362         NULL
2363 };
2364
2365 static const struct attribute_group cpuhp_smt_attr_group = {
2366         .attrs = cpuhp_smt_attrs,
2367         .name = "smt",
2368         NULL
2369 };
2370
2371 static int __init cpu_smt_sysfs_init(void)
2372 {
2373         return sysfs_create_group(&cpu_subsys.dev_root->kobj,
2374                                   &cpuhp_smt_attr_group);
2375 }
2376
2377 static int __init cpuhp_sysfs_init(void)
2378 {
2379         int cpu, ret;
2380
2381         ret = cpu_smt_sysfs_init();
2382         if (ret)
2383                 return ret;
2384
2385         ret = sysfs_create_group(&cpu_subsys.dev_root->kobj,
2386                                  &cpuhp_cpu_root_attr_group);
2387         if (ret)
2388                 return ret;
2389
2390         for_each_possible_cpu(cpu) {
2391                 struct device *dev = get_cpu_device(cpu);
2392
2393                 if (!dev)
2394                         continue;
2395                 ret = sysfs_create_group(&dev->kobj, &cpuhp_cpu_attr_group);
2396                 if (ret)
2397                         return ret;
2398         }
2399         return 0;
2400 }
2401 device_initcall(cpuhp_sysfs_init);
2402 #endif /* CONFIG_SYSFS && CONFIG_HOTPLUG_CPU */
2403
2404 /*
2405  * cpu_bit_bitmap[] is a special, "compressed" data structure that
2406  * represents all NR_CPUS bits binary values of 1<<nr.
2407  *
2408  * It is used by cpumask_of() to get a constant address to a CPU
2409  * mask value that has a single bit set only.
2410  */
2411
2412 /* cpu_bit_bitmap[0] is empty - so we can back into it */
2413 #define MASK_DECLARE_1(x)       [x+1][0] = (1UL << (x))
2414 #define MASK_DECLARE_2(x)       MASK_DECLARE_1(x), MASK_DECLARE_1(x+1)
2415 #define MASK_DECLARE_4(x)       MASK_DECLARE_2(x), MASK_DECLARE_2(x+2)
2416 #define MASK_DECLARE_8(x)       MASK_DECLARE_4(x), MASK_DECLARE_4(x+4)
2417
2418 const unsigned long cpu_bit_bitmap[BITS_PER_LONG+1][BITS_TO_LONGS(NR_CPUS)] = {
2419
2420         MASK_DECLARE_8(0),      MASK_DECLARE_8(8),
2421         MASK_DECLARE_8(16),     MASK_DECLARE_8(24),
2422 #if BITS_PER_LONG > 32
2423         MASK_DECLARE_8(32),     MASK_DECLARE_8(40),
2424         MASK_DECLARE_8(48),     MASK_DECLARE_8(56),
2425 #endif
2426 };
2427 EXPORT_SYMBOL_GPL(cpu_bit_bitmap);
2428
2429 const DECLARE_BITMAP(cpu_all_bits, NR_CPUS) = CPU_BITS_ALL;
2430 EXPORT_SYMBOL(cpu_all_bits);
2431
2432 #ifdef CONFIG_INIT_ALL_POSSIBLE
2433 struct cpumask __cpu_possible_mask __read_mostly
2434         = {CPU_BITS_ALL};
2435 #else
2436 struct cpumask __cpu_possible_mask __read_mostly;
2437 #endif
2438 EXPORT_SYMBOL(__cpu_possible_mask);
2439
2440 struct cpumask __cpu_online_mask __read_mostly;
2441 EXPORT_SYMBOL(__cpu_online_mask);
2442
2443 struct cpumask __cpu_present_mask __read_mostly;
2444 EXPORT_SYMBOL(__cpu_present_mask);
2445
2446 struct cpumask __cpu_active_mask __read_mostly;
2447 EXPORT_SYMBOL(__cpu_active_mask);
2448
2449 atomic_t __num_online_cpus __read_mostly;
2450 EXPORT_SYMBOL(__num_online_cpus);
2451
2452 void init_cpu_present(const struct cpumask *src)
2453 {
2454         cpumask_copy(&__cpu_present_mask, src);
2455 }
2456
2457 void init_cpu_possible(const struct cpumask *src)
2458 {
2459         cpumask_copy(&__cpu_possible_mask, src);
2460 }
2461
2462 void init_cpu_online(const struct cpumask *src)
2463 {
2464         cpumask_copy(&__cpu_online_mask, src);
2465 }
2466
2467 void set_cpu_online(unsigned int cpu, bool online)
2468 {
2469         /*
2470          * atomic_inc/dec() is required to handle the horrid abuse of this
2471          * function by the reboot and kexec code which invoke it from
2472          * IPI/NMI broadcasts when shutting down CPUs. Invocation from
2473          * regular CPU hotplug is properly serialized.
2474          *
2475          * Note, that the fact that __num_online_cpus is of type atomic_t
2476          * does not protect readers which are not serialized against
2477          * concurrent hotplug operations.
2478          */
2479         if (online) {
2480                 if (!cpumask_test_and_set_cpu(cpu, &__cpu_online_mask))
2481                         atomic_inc(&__num_online_cpus);
2482         } else {
2483                 if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
2484                         atomic_dec(&__num_online_cpus);
2485         }
2486 }
2487
2488 /*
2489  * Activate the first processor.
2490  */
2491 void __init boot_cpu_init(void)
2492 {
2493         int cpu = smp_processor_id();
2494
2495         /* Mark the boot cpu "present", "online" etc for SMP and UP case */
2496         set_cpu_online(cpu, true);
2497         set_cpu_active(cpu, true);
2498         set_cpu_present(cpu, true);
2499         set_cpu_possible(cpu, true);
2500
2501 #ifdef CONFIG_SMP
2502         __boot_cpu_id = cpu;
2503 #endif
2504 }
2505
2506 /*
2507  * Must be called _AFTER_ setting up the per_cpu areas
2508  */
2509 void __init boot_cpu_hotplug_init(void)
2510 {
2511 #ifdef CONFIG_SMP
2512         cpumask_set_cpu(smp_processor_id(), &cpus_booted_once_mask);
2513 #endif
2514         this_cpu_write(cpuhp_state.state, CPUHP_ONLINE);
2515 }
2516
2517 /*
2518  * These are used for a global "mitigations=" cmdline option for toggling
2519  * optional CPU mitigations.
2520  */
2521 enum cpu_mitigations {
2522         CPU_MITIGATIONS_OFF,
2523         CPU_MITIGATIONS_AUTO,
2524         CPU_MITIGATIONS_AUTO_NOSMT,
2525 };
2526
2527 static enum cpu_mitigations cpu_mitigations __ro_after_init =
2528         CPU_MITIGATIONS_AUTO;
2529
2530 static int __init mitigations_parse_cmdline(char *arg)
2531 {
2532         if (!strcmp(arg, "off"))
2533                 cpu_mitigations = CPU_MITIGATIONS_OFF;
2534         else if (!strcmp(arg, "auto"))
2535                 cpu_mitigations = CPU_MITIGATIONS_AUTO;
2536         else if (!strcmp(arg, "auto,nosmt"))
2537                 cpu_mitigations = CPU_MITIGATIONS_AUTO_NOSMT;
2538         else
2539                 pr_crit("Unsupported mitigations=%s, system may still be vulnerable\n",
2540                         arg);
2541
2542         return 0;
2543 }
2544 early_param("mitigations", mitigations_parse_cmdline);
2545
2546 /* mitigations=off */
2547 bool cpu_mitigations_off(void)
2548 {
2549         return cpu_mitigations == CPU_MITIGATIONS_OFF;
2550 }
2551 EXPORT_SYMBOL_GPL(cpu_mitigations_off);
2552
2553 /* mitigations=auto,nosmt */
2554 bool cpu_mitigations_auto_nosmt(void)
2555 {
2556         return cpu_mitigations == CPU_MITIGATIONS_AUTO_NOSMT;
2557 }
2558 EXPORT_SYMBOL_GPL(cpu_mitigations_auto_nosmt);