ARM: dts: qcom: add rpmcc missing clocks for apq/ipq8064 and msm8660
[linux-2.6-microblaze.git] / arch / loongarch / kernel / acpi.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * acpi.c - Architecture-Specific Low-Level ACPI Boot Support
4  *
5  * Author: Jianmin Lv <lvjianmin@loongson.cn>
6  *         Huacai Chen <chenhuacai@loongson.cn>
7  * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
8  */
9
10 #include <linux/init.h>
11 #include <linux/acpi.h>
12 #include <linux/irq.h>
13 #include <linux/irqdomain.h>
14 #include <linux/memblock.h>
15 #include <linux/serial_core.h>
16 #include <asm/io.h>
17 #include <asm/numa.h>
18 #include <asm/loongson.h>
19
20 int acpi_disabled;
21 EXPORT_SYMBOL(acpi_disabled);
22 int acpi_noirq;
23 int acpi_pci_disabled;
24 EXPORT_SYMBOL(acpi_pci_disabled);
25 int acpi_strict = 1; /* We have no workarounds on LoongArch */
26 int num_processors;
27 int disabled_cpus;
28 enum acpi_irq_model_id acpi_irq_model = ACPI_IRQ_MODEL_PLATFORM;
29
30 u64 acpi_saved_sp;
31
32 #define MAX_CORE_PIC 256
33
34 #define PREFIX                  "ACPI: "
35
36 int acpi_gsi_to_irq(u32 gsi, unsigned int *irqp)
37 {
38         if (irqp != NULL)
39                 *irqp = acpi_register_gsi(NULL, gsi, -1, -1);
40         return (*irqp >= 0) ? 0 : -EINVAL;
41 }
42 EXPORT_SYMBOL_GPL(acpi_gsi_to_irq);
43
44 int acpi_isa_irq_to_gsi(unsigned int isa_irq, u32 *gsi)
45 {
46         if (gsi)
47                 *gsi = isa_irq;
48         return 0;
49 }
50
51 /*
52  * success: return IRQ number (>=0)
53  * failure: return < 0
54  */
55 int acpi_register_gsi(struct device *dev, u32 gsi, int trigger, int polarity)
56 {
57         struct irq_fwspec fwspec;
58
59         switch (gsi) {
60         case GSI_MIN_CPU_IRQ ... GSI_MAX_CPU_IRQ:
61                 fwspec.fwnode = liointc_domain->fwnode;
62                 fwspec.param[0] = gsi - GSI_MIN_CPU_IRQ;
63                 fwspec.param_count = 1;
64
65                 return irq_create_fwspec_mapping(&fwspec);
66
67         case GSI_MIN_LPC_IRQ ... GSI_MAX_LPC_IRQ:
68                 if (!pch_lpc_domain)
69                         return -EINVAL;
70
71                 fwspec.fwnode = pch_lpc_domain->fwnode;
72                 fwspec.param[0] = gsi - GSI_MIN_LPC_IRQ;
73                 fwspec.param[1] = acpi_dev_get_irq_type(trigger, polarity);
74                 fwspec.param_count = 2;
75
76                 return irq_create_fwspec_mapping(&fwspec);
77
78         case GSI_MIN_PCH_IRQ ... GSI_MAX_PCH_IRQ:
79                 if (!pch_pic_domain[0])
80                         return -EINVAL;
81
82                 fwspec.fwnode = pch_pic_domain[0]->fwnode;
83                 fwspec.param[0] = gsi - GSI_MIN_PCH_IRQ;
84                 fwspec.param[1] = IRQ_TYPE_LEVEL_HIGH;
85                 fwspec.param_count = 2;
86
87                 return irq_create_fwspec_mapping(&fwspec);
88         }
89
90         return -EINVAL;
91 }
92 EXPORT_SYMBOL_GPL(acpi_register_gsi);
93
94 void acpi_unregister_gsi(u32 gsi)
95 {
96
97 }
98 EXPORT_SYMBOL_GPL(acpi_unregister_gsi);
99
100 void __init __iomem * __acpi_map_table(unsigned long phys, unsigned long size)
101 {
102
103         if (!phys || !size)
104                 return NULL;
105
106         return early_memremap(phys, size);
107 }
108 void __init __acpi_unmap_table(void __iomem *map, unsigned long size)
109 {
110         if (!map || !size)
111                 return;
112
113         early_memunmap(map, size);
114 }
115
116 void __init __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size)
117 {
118         if (!memblock_is_memory(phys))
119                 return ioremap(phys, size);
120         else
121                 return ioremap_cache(phys, size);
122 }
123
124 void __init acpi_boot_table_init(void)
125 {
126         /*
127          * If acpi_disabled, bail out
128          */
129         if (acpi_disabled)
130                 return;
131
132         /*
133          * Initialize the ACPI boot-time table parser.
134          */
135         if (acpi_table_init()) {
136                 disable_acpi();
137                 return;
138         }
139 }
140
141 static int set_processor_mask(u32 id, u32 flags)
142 {
143
144         int cpu, cpuid = id;
145
146         if (num_processors >= nr_cpu_ids) {
147                 pr_warn(PREFIX "nr_cpus/possible_cpus limit of %i reached."
148                         " processor 0x%x ignored.\n", nr_cpu_ids, cpuid);
149
150                 return -ENODEV;
151
152         }
153         if (cpuid == loongson_sysconf.boot_cpu_id)
154                 cpu = 0;
155         else
156                 cpu = cpumask_next_zero(-1, cpu_present_mask);
157
158         if (flags & ACPI_MADT_ENABLED) {
159                 num_processors++;
160                 set_cpu_possible(cpu, true);
161                 set_cpu_present(cpu, true);
162                 __cpu_number_map[cpuid] = cpu;
163                 __cpu_logical_map[cpu] = cpuid;
164         } else
165                 disabled_cpus++;
166
167         return cpu;
168 }
169
170 static void __init acpi_process_madt(void)
171 {
172         int i;
173
174         for (i = 0; i < NR_CPUS; i++) {
175                 __cpu_number_map[i] = -1;
176                 __cpu_logical_map[i] = -1;
177         }
178
179         loongson_sysconf.nr_cpus = num_processors;
180 }
181
182 int __init acpi_boot_init(void)
183 {
184         /*
185          * If acpi_disabled, bail out
186          */
187         if (acpi_disabled)
188                 return -1;
189
190         loongson_sysconf.boot_cpu_id = read_csr_cpuid();
191
192         /*
193          * Process the Multiple APIC Description Table (MADT), if present
194          */
195         acpi_process_madt();
196
197         /* Do not enable ACPI SPCR console by default */
198         acpi_parse_spcr(earlycon_acpi_spcr_enable, false);
199
200         return 0;
201 }
202
203 #ifdef CONFIG_ACPI_NUMA
204
205 static __init int setup_node(int pxm)
206 {
207         return acpi_map_pxm_to_node(pxm);
208 }
209
210 /*
211  * Callback for SLIT parsing.  pxm_to_node() returns NUMA_NO_NODE for
212  * I/O localities since SRAT does not list them.  I/O localities are
213  * not supported at this point.
214  */
215 unsigned int numa_distance_cnt;
216
217 static inline unsigned int get_numa_distances_cnt(struct acpi_table_slit *slit)
218 {
219         return slit->locality_count;
220 }
221
222 void __init numa_set_distance(int from, int to, int distance)
223 {
224         if ((u8)distance != distance || (from == to && distance != LOCAL_DISTANCE)) {
225                 pr_warn_once("Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
226                                 from, to, distance);
227                 return;
228         }
229
230         node_distances[from][to] = distance;
231 }
232
233 /* Callback for Proximity Domain -> CPUID mapping */
234 void __init
235 acpi_numa_processor_affinity_init(struct acpi_srat_cpu_affinity *pa)
236 {
237         int pxm, node;
238
239         if (srat_disabled())
240                 return;
241         if (pa->header.length != sizeof(struct acpi_srat_cpu_affinity)) {
242                 bad_srat();
243                 return;
244         }
245         if ((pa->flags & ACPI_SRAT_CPU_ENABLED) == 0)
246                 return;
247         pxm = pa->proximity_domain_lo;
248         if (acpi_srat_revision >= 2) {
249                 pxm |= (pa->proximity_domain_hi[0] << 8);
250                 pxm |= (pa->proximity_domain_hi[1] << 16);
251                 pxm |= (pa->proximity_domain_hi[2] << 24);
252         }
253         node = setup_node(pxm);
254         if (node < 0) {
255                 pr_err("SRAT: Too many proximity domains %x\n", pxm);
256                 bad_srat();
257                 return;
258         }
259
260         if (pa->apic_id >= CONFIG_NR_CPUS) {
261                 pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u skipped apicid that is too big\n",
262                                 pxm, pa->apic_id, node);
263                 return;
264         }
265
266         early_numa_add_cpu(pa->apic_id, node);
267
268         set_cpuid_to_node(pa->apic_id, node);
269         node_set(node, numa_nodes_parsed);
270         pr_info("SRAT: PXM %u -> CPU 0x%02x -> Node %u\n", pxm, pa->apic_id, node);
271 }
272
273 void __init acpi_numa_arch_fixup(void) {}
274 #endif
275
276 void __init arch_reserve_mem_area(acpi_physical_address addr, size_t size)
277 {
278         memblock_reserve(addr, size);
279 }
280
281 #ifdef CONFIG_ACPI_HOTPLUG_CPU
282
283 #include <acpi/processor.h>
284
285 static int __ref acpi_map_cpu2node(acpi_handle handle, int cpu, int physid)
286 {
287 #ifdef CONFIG_ACPI_NUMA
288         int nid;
289
290         nid = acpi_get_node(handle);
291         if (nid != NUMA_NO_NODE) {
292                 set_cpuid_to_node(physid, nid);
293                 node_set(nid, numa_nodes_parsed);
294                 set_cpu_numa_node(cpu, nid);
295                 cpumask_set_cpu(cpu, cpumask_of_node(nid));
296         }
297 #endif
298         return 0;
299 }
300
301 int acpi_map_cpu(acpi_handle handle, phys_cpuid_t physid, u32 acpi_id, int *pcpu)
302 {
303         int cpu;
304
305         cpu = set_processor_mask(physid, ACPI_MADT_ENABLED);
306         if (cpu < 0) {
307                 pr_info(PREFIX "Unable to map lapic to logical cpu number\n");
308                 return cpu;
309         }
310
311         acpi_map_cpu2node(handle, cpu, physid);
312
313         *pcpu = cpu;
314
315         return 0;
316 }
317 EXPORT_SYMBOL(acpi_map_cpu);
318
319 int acpi_unmap_cpu(int cpu)
320 {
321 #ifdef CONFIG_ACPI_NUMA
322         set_cpuid_to_node(cpu_logical_map(cpu), NUMA_NO_NODE);
323 #endif
324         set_cpu_present(cpu, false);
325         num_processors--;
326
327         pr_info("cpu%d hot remove!\n", cpu);
328
329         return 0;
330 }
331 EXPORT_SYMBOL(acpi_unmap_cpu);
332
333 #endif /* CONFIG_ACPI_HOTPLUG_CPU */