platform/x86: intel_menlow: switch to use <linux/units.h> helpers
[linux-2.6-microblaze.git] / kernel / up.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Uniprocessor-only support functions.  The counterpart to kernel/smp.c
4  */
5
6 #include <linux/interrupt.h>
7 #include <linux/kernel.h>
8 #include <linux/export.h>
9 #include <linux/smp.h>
10 #include <linux/hypervisor.h>
11
12 int smp_call_function_single(int cpu, void (*func) (void *info), void *info,
13                                 int wait)
14 {
15         unsigned long flags;
16
17         WARN_ON(cpu != 0);
18
19         local_irq_save(flags);
20         func(info);
21         local_irq_restore(flags);
22
23         return 0;
24 }
25 EXPORT_SYMBOL(smp_call_function_single);
26
27 int smp_call_function_single_async(int cpu, call_single_data_t *csd)
28 {
29         unsigned long flags;
30
31         local_irq_save(flags);
32         csd->func(csd->info);
33         local_irq_restore(flags);
34         return 0;
35 }
36 EXPORT_SYMBOL(smp_call_function_single_async);
37
38 void on_each_cpu(smp_call_func_t func, void *info, int wait)
39 {
40         unsigned long flags;
41
42         local_irq_save(flags);
43         func(info);
44         local_irq_restore(flags);
45 }
46 EXPORT_SYMBOL(on_each_cpu);
47
48 /*
49  * Note we still need to test the mask even for UP
50  * because we actually can get an empty mask from
51  * code that on SMP might call us without the local
52  * CPU in the mask.
53  */
54 void on_each_cpu_mask(const struct cpumask *mask,
55                       smp_call_func_t func, void *info, bool wait)
56 {
57         unsigned long flags;
58
59         if (cpumask_test_cpu(0, mask)) {
60                 local_irq_save(flags);
61                 func(info);
62                 local_irq_restore(flags);
63         }
64 }
65 EXPORT_SYMBOL(on_each_cpu_mask);
66
67 /*
68  * Preemption is disabled here to make sure the cond_func is called under the
69  * same condtions in UP and SMP.
70  */
71 void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
72                            void *info, bool wait, const struct cpumask *mask)
73 {
74         unsigned long flags;
75
76         preempt_disable();
77         if (cond_func(0, info)) {
78                 local_irq_save(flags);
79                 func(info);
80                 local_irq_restore(flags);
81         }
82         preempt_enable();
83 }
84 EXPORT_SYMBOL(on_each_cpu_cond_mask);
85
86 void on_each_cpu_cond(smp_cond_func_t cond_func, smp_call_func_t func,
87                       void *info, bool wait)
88 {
89         on_each_cpu_cond_mask(cond_func, func, info, wait, NULL);
90 }
91 EXPORT_SYMBOL(on_each_cpu_cond);
92
93 int smp_call_on_cpu(unsigned int cpu, int (*func)(void *), void *par, bool phys)
94 {
95         int ret;
96
97         if (cpu != 0)
98                 return -ENXIO;
99
100         if (phys)
101                 hypervisor_pin_vcpu(0);
102         ret = func(par);
103         if (phys)
104                 hypervisor_pin_vcpu(-1);
105
106         return ret;
107 }
108 EXPORT_SYMBOL_GPL(smp_call_on_cpu);