Merge tag 'defconfig-5.15' of git://git.kernel.org/pub/scm/linux/kernel/git/soc/soc
[linux-2.6-microblaze.git] / drivers / acpi / osl.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *  acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4  *
5  *  Copyright (C) 2000       Andrew Henroid
6  *  Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7  *  Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8  *  Copyright (c) 2008 Intel Corporation
9  *   Author: Matthew Wilcox <willy@linux.intel.com>
10  */
11
12 #define pr_fmt(fmt) "ACPI: OSL: " fmt
13
14 #include <linux/module.h>
15 #include <linux/kernel.h>
16 #include <linux/slab.h>
17 #include <linux/mm.h>
18 #include <linux/highmem.h>
19 #include <linux/lockdep.h>
20 #include <linux/pci.h>
21 #include <linux/interrupt.h>
22 #include <linux/kmod.h>
23 #include <linux/delay.h>
24 #include <linux/workqueue.h>
25 #include <linux/nmi.h>
26 #include <linux/acpi.h>
27 #include <linux/efi.h>
28 #include <linux/ioport.h>
29 #include <linux/list.h>
30 #include <linux/jiffies.h>
31 #include <linux/semaphore.h>
32 #include <linux/security.h>
33
34 #include <asm/io.h>
35 #include <linux/uaccess.h>
36 #include <linux/io-64-nonatomic-lo-hi.h>
37
38 #include "acpica/accommon.h"
39 #include "acpica/acnamesp.h"
40 #include "internal.h"
41
42 /* Definitions for ACPI_DEBUG_PRINT() */
43 #define _COMPONENT              ACPI_OS_SERVICES
44 ACPI_MODULE_NAME("osl");
45
46 struct acpi_os_dpc {
47         acpi_osd_exec_callback function;
48         void *context;
49         struct work_struct work;
50 };
51
52 #ifdef ENABLE_DEBUGGER
53 #include <linux/kdb.h>
54
55 /* stuff for debugger support */
56 int acpi_in_debugger;
57 EXPORT_SYMBOL(acpi_in_debugger);
58 #endif                          /*ENABLE_DEBUGGER */
59
60 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
61                                       u32 pm1b_ctrl);
62 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
63                                       u32 val_b);
64
65 static acpi_osd_handler acpi_irq_handler;
66 static void *acpi_irq_context;
67 static struct workqueue_struct *kacpid_wq;
68 static struct workqueue_struct *kacpi_notify_wq;
69 static struct workqueue_struct *kacpi_hotplug_wq;
70 static bool acpi_os_initialized;
71 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
72 bool acpi_permanent_mmap = false;
73
74 /*
75  * This list of permanent mappings is for memory that may be accessed from
76  * interrupt context, where we can't do the ioremap().
77  */
78 struct acpi_ioremap {
79         struct list_head list;
80         void __iomem *virt;
81         acpi_physical_address phys;
82         acpi_size size;
83         union {
84                 unsigned long refcount;
85                 struct rcu_work rwork;
86         } track;
87 };
88
89 static LIST_HEAD(acpi_ioremaps);
90 static DEFINE_MUTEX(acpi_ioremap_lock);
91 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
92
93 static void __init acpi_request_region (struct acpi_generic_address *gas,
94         unsigned int length, char *desc)
95 {
96         u64 addr;
97
98         /* Handle possible alignment issues */
99         memcpy(&addr, &gas->address, sizeof(addr));
100         if (!addr || !length)
101                 return;
102
103         /* Resources are never freed */
104         if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
105                 request_region(addr, length, desc);
106         else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
107                 request_mem_region(addr, length, desc);
108 }
109
110 static int __init acpi_reserve_resources(void)
111 {
112         acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
113                 "ACPI PM1a_EVT_BLK");
114
115         acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
116                 "ACPI PM1b_EVT_BLK");
117
118         acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
119                 "ACPI PM1a_CNT_BLK");
120
121         acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
122                 "ACPI PM1b_CNT_BLK");
123
124         if (acpi_gbl_FADT.pm_timer_length == 4)
125                 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
126
127         acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
128                 "ACPI PM2_CNT_BLK");
129
130         /* Length of GPE blocks must be a non-negative multiple of 2 */
131
132         if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
133                 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
134                                acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
135
136         if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
137                 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
138                                acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
139
140         return 0;
141 }
142 fs_initcall_sync(acpi_reserve_resources);
143
144 void acpi_os_printf(const char *fmt, ...)
145 {
146         va_list args;
147         va_start(args, fmt);
148         acpi_os_vprintf(fmt, args);
149         va_end(args);
150 }
151 EXPORT_SYMBOL(acpi_os_printf);
152
153 void acpi_os_vprintf(const char *fmt, va_list args)
154 {
155         static char buffer[512];
156
157         vsprintf(buffer, fmt, args);
158
159 #ifdef ENABLE_DEBUGGER
160         if (acpi_in_debugger) {
161                 kdb_printf("%s", buffer);
162         } else {
163                 if (printk_get_level(buffer))
164                         printk("%s", buffer);
165                 else
166                         printk(KERN_CONT "%s", buffer);
167         }
168 #else
169         if (acpi_debugger_write_log(buffer) < 0) {
170                 if (printk_get_level(buffer))
171                         printk("%s", buffer);
172                 else
173                         printk(KERN_CONT "%s", buffer);
174         }
175 #endif
176 }
177
178 #ifdef CONFIG_KEXEC
179 static unsigned long acpi_rsdp;
180 static int __init setup_acpi_rsdp(char *arg)
181 {
182         return kstrtoul(arg, 16, &acpi_rsdp);
183 }
184 early_param("acpi_rsdp", setup_acpi_rsdp);
185 #endif
186
187 acpi_physical_address __init acpi_os_get_root_pointer(void)
188 {
189         acpi_physical_address pa;
190
191 #ifdef CONFIG_KEXEC
192         /*
193          * We may have been provided with an RSDP on the command line,
194          * but if a malicious user has done so they may be pointing us
195          * at modified ACPI tables that could alter kernel behaviour -
196          * so, we check the lockdown status before making use of
197          * it. If we trust it then also stash it in an architecture
198          * specific location (if appropriate) so it can be carried
199          * over further kexec()s.
200          */
201         if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
202                 acpi_arch_set_root_pointer(acpi_rsdp);
203                 return acpi_rsdp;
204         }
205 #endif
206         pa = acpi_arch_get_root_pointer();
207         if (pa)
208                 return pa;
209
210         if (efi_enabled(EFI_CONFIG_TABLES)) {
211                 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
212                         return efi.acpi20;
213                 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
214                         return efi.acpi;
215                 pr_err("System description tables not found\n");
216         } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
217                 acpi_find_root_pointer(&pa);
218         }
219
220         return pa;
221 }
222
223 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
224 static struct acpi_ioremap *
225 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
226 {
227         struct acpi_ioremap *map;
228
229         list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
230                 if (map->phys <= phys &&
231                     phys + size <= map->phys + map->size)
232                         return map;
233
234         return NULL;
235 }
236
237 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
238 static void __iomem *
239 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
240 {
241         struct acpi_ioremap *map;
242
243         map = acpi_map_lookup(phys, size);
244         if (map)
245                 return map->virt + (phys - map->phys);
246
247         return NULL;
248 }
249
250 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
251 {
252         struct acpi_ioremap *map;
253         void __iomem *virt = NULL;
254
255         mutex_lock(&acpi_ioremap_lock);
256         map = acpi_map_lookup(phys, size);
257         if (map) {
258                 virt = map->virt + (phys - map->phys);
259                 map->track.refcount++;
260         }
261         mutex_unlock(&acpi_ioremap_lock);
262         return virt;
263 }
264 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
265
266 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
267 static struct acpi_ioremap *
268 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
269 {
270         struct acpi_ioremap *map;
271
272         list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
273                 if (map->virt <= virt &&
274                     virt + size <= map->virt + map->size)
275                         return map;
276
277         return NULL;
278 }
279
280 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
281 /* ioremap will take care of cache attributes */
282 #define should_use_kmap(pfn)   0
283 #else
284 #define should_use_kmap(pfn)   page_is_ram(pfn)
285 #endif
286
287 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz,
288                               bool memory)
289 {
290         unsigned long pfn;
291
292         pfn = pg_off >> PAGE_SHIFT;
293         if (should_use_kmap(pfn)) {
294                 if (pg_sz > PAGE_SIZE)
295                         return NULL;
296                 return (void __iomem __force *)kmap(pfn_to_page(pfn));
297         } else
298                 return memory ? acpi_os_memmap(pg_off, pg_sz) :
299                                 acpi_os_ioremap(pg_off, pg_sz);
300 }
301
302 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
303 {
304         unsigned long pfn;
305
306         pfn = pg_off >> PAGE_SHIFT;
307         if (should_use_kmap(pfn))
308                 kunmap(pfn_to_page(pfn));
309         else
310                 iounmap(vaddr);
311 }
312
313 /**
314  * __acpi_os_map_iomem - Get a virtual address for a given physical address range.
315  * @phys: Start of the physical address range to map.
316  * @size: Size of the physical address range to map.
317  * @memory: true if remapping memory, false if IO
318  *
319  * Look up the given physical address range in the list of existing ACPI memory
320  * mappings.  If found, get a reference to it and return a pointer to it (its
321  * virtual address).  If not found, map it, add it to that list and return a
322  * pointer to it.
323  *
324  * During early init (when acpi_permanent_mmap has not been set yet) this
325  * routine simply calls __acpi_map_table() to get the job done.
326  */
327 static void __iomem __ref
328 *__acpi_os_map_iomem(acpi_physical_address phys, acpi_size size, bool memory)
329 {
330         struct acpi_ioremap *map;
331         void __iomem *virt;
332         acpi_physical_address pg_off;
333         acpi_size pg_sz;
334
335         if (phys > ULONG_MAX) {
336                 pr_err("Cannot map memory that high: 0x%llx\n", phys);
337                 return NULL;
338         }
339
340         if (!acpi_permanent_mmap)
341                 return __acpi_map_table((unsigned long)phys, size);
342
343         mutex_lock(&acpi_ioremap_lock);
344         /* Check if there's a suitable mapping already. */
345         map = acpi_map_lookup(phys, size);
346         if (map) {
347                 map->track.refcount++;
348                 goto out;
349         }
350
351         map = kzalloc(sizeof(*map), GFP_KERNEL);
352         if (!map) {
353                 mutex_unlock(&acpi_ioremap_lock);
354                 return NULL;
355         }
356
357         pg_off = round_down(phys, PAGE_SIZE);
358         pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
359         virt = acpi_map(phys, size, memory);
360         if (!virt) {
361                 mutex_unlock(&acpi_ioremap_lock);
362                 kfree(map);
363                 return NULL;
364         }
365
366         INIT_LIST_HEAD(&map->list);
367         map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
368         map->phys = pg_off;
369         map->size = pg_sz;
370         map->track.refcount = 1;
371
372         list_add_tail_rcu(&map->list, &acpi_ioremaps);
373
374 out:
375         mutex_unlock(&acpi_ioremap_lock);
376         return map->virt + (phys - map->phys);
377 }
378
379 void __iomem *__ref
380 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
381 {
382         return __acpi_os_map_iomem(phys, size, false);
383 }
384 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
385
386 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
387 {
388         return (void *)__acpi_os_map_iomem(phys, size, true);
389 }
390 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
391
392 static void acpi_os_map_remove(struct work_struct *work)
393 {
394         struct acpi_ioremap *map = container_of(to_rcu_work(work),
395                                                 struct acpi_ioremap,
396                                                 track.rwork);
397
398         acpi_unmap(map->phys, map->virt);
399         kfree(map);
400 }
401
402 /* Must be called with mutex_lock(&acpi_ioremap_lock) */
403 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
404 {
405         if (--map->track.refcount)
406                 return;
407
408         list_del_rcu(&map->list);
409
410         INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
411         queue_rcu_work(system_wq, &map->track.rwork);
412 }
413
414 /**
415  * acpi_os_unmap_iomem - Drop a memory mapping reference.
416  * @virt: Start of the address range to drop a reference to.
417  * @size: Size of the address range to drop a reference to.
418  *
419  * Look up the given virtual address range in the list of existing ACPI memory
420  * mappings, drop a reference to it and if there are no more active references
421  * to it, queue it up for later removal.
422  *
423  * During early init (when acpi_permanent_mmap has not been set yet) this
424  * routine simply calls __acpi_unmap_table() to get the job done.  Since
425  * __acpi_unmap_table() is an __init function, the __ref annotation is needed
426  * here.
427  */
428 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
429 {
430         struct acpi_ioremap *map;
431
432         if (!acpi_permanent_mmap) {
433                 __acpi_unmap_table(virt, size);
434                 return;
435         }
436
437         mutex_lock(&acpi_ioremap_lock);
438
439         map = acpi_map_lookup_virt(virt, size);
440         if (!map) {
441                 mutex_unlock(&acpi_ioremap_lock);
442                 WARN(true, "ACPI: %s: bad address %p\n", __func__, virt);
443                 return;
444         }
445         acpi_os_drop_map_ref(map);
446
447         mutex_unlock(&acpi_ioremap_lock);
448 }
449 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
450
451 /**
452  * acpi_os_unmap_memory - Drop a memory mapping reference.
453  * @virt: Start of the address range to drop a reference to.
454  * @size: Size of the address range to drop a reference to.
455  */
456 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
457 {
458         acpi_os_unmap_iomem((void __iomem *)virt, size);
459 }
460 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
461
462 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
463 {
464         u64 addr;
465
466         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
467                 return NULL;
468
469         /* Handle possible alignment issues */
470         memcpy(&addr, &gas->address, sizeof(addr));
471         if (!addr || !gas->bit_width)
472                 return NULL;
473
474         return acpi_os_map_iomem(addr, gas->bit_width / 8);
475 }
476 EXPORT_SYMBOL(acpi_os_map_generic_address);
477
478 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
479 {
480         u64 addr;
481         struct acpi_ioremap *map;
482
483         if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
484                 return;
485
486         /* Handle possible alignment issues */
487         memcpy(&addr, &gas->address, sizeof(addr));
488         if (!addr || !gas->bit_width)
489                 return;
490
491         mutex_lock(&acpi_ioremap_lock);
492
493         map = acpi_map_lookup(addr, gas->bit_width / 8);
494         if (!map) {
495                 mutex_unlock(&acpi_ioremap_lock);
496                 return;
497         }
498         acpi_os_drop_map_ref(map);
499
500         mutex_unlock(&acpi_ioremap_lock);
501 }
502 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
503
504 #ifdef ACPI_FUTURE_USAGE
505 acpi_status
506 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
507 {
508         if (!phys || !virt)
509                 return AE_BAD_PARAMETER;
510
511         *phys = virt_to_phys(virt);
512
513         return AE_OK;
514 }
515 #endif
516
517 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
518 static bool acpi_rev_override;
519
520 int __init acpi_rev_override_setup(char *str)
521 {
522         acpi_rev_override = true;
523         return 1;
524 }
525 __setup("acpi_rev_override", acpi_rev_override_setup);
526 #else
527 #define acpi_rev_override       false
528 #endif
529
530 #define ACPI_MAX_OVERRIDE_LEN 100
531
532 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
533
534 acpi_status
535 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
536                             acpi_string *new_val)
537 {
538         if (!init_val || !new_val)
539                 return AE_BAD_PARAMETER;
540
541         *new_val = NULL;
542         if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
543                 pr_info("Overriding _OS definition to '%s'\n", acpi_os_name);
544                 *new_val = acpi_os_name;
545         }
546
547         if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
548                 pr_info("Overriding _REV return value to 5\n");
549                 *new_val = (char *)5;
550         }
551
552         return AE_OK;
553 }
554
555 static irqreturn_t acpi_irq(int irq, void *dev_id)
556 {
557         u32 handled;
558
559         handled = (*acpi_irq_handler) (acpi_irq_context);
560
561         if (handled) {
562                 acpi_irq_handled++;
563                 return IRQ_HANDLED;
564         } else {
565                 acpi_irq_not_handled++;
566                 return IRQ_NONE;
567         }
568 }
569
570 acpi_status
571 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
572                                   void *context)
573 {
574         unsigned int irq;
575
576         acpi_irq_stats_init();
577
578         /*
579          * ACPI interrupts different from the SCI in our copy of the FADT are
580          * not supported.
581          */
582         if (gsi != acpi_gbl_FADT.sci_interrupt)
583                 return AE_BAD_PARAMETER;
584
585         if (acpi_irq_handler)
586                 return AE_ALREADY_ACQUIRED;
587
588         if (acpi_gsi_to_irq(gsi, &irq) < 0) {
589                 pr_err("SCI (ACPI GSI %d) not registered\n", gsi);
590                 return AE_OK;
591         }
592
593         acpi_irq_handler = handler;
594         acpi_irq_context = context;
595         if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
596                 pr_err("SCI (IRQ%d) allocation failed\n", irq);
597                 acpi_irq_handler = NULL;
598                 return AE_NOT_ACQUIRED;
599         }
600         acpi_sci_irq = irq;
601
602         return AE_OK;
603 }
604
605 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
606 {
607         if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
608                 return AE_BAD_PARAMETER;
609
610         free_irq(acpi_sci_irq, acpi_irq);
611         acpi_irq_handler = NULL;
612         acpi_sci_irq = INVALID_ACPI_IRQ;
613
614         return AE_OK;
615 }
616
617 /*
618  * Running in interpreter thread context, safe to sleep
619  */
620
621 void acpi_os_sleep(u64 ms)
622 {
623         msleep(ms);
624 }
625
626 void acpi_os_stall(u32 us)
627 {
628         while (us) {
629                 u32 delay = 1000;
630
631                 if (delay > us)
632                         delay = us;
633                 udelay(delay);
634                 touch_nmi_watchdog();
635                 us -= delay;
636         }
637 }
638
639 /*
640  * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
641  * monotonically increasing timer with 100ns granularity. Do not use
642  * ktime_get() to implement this function because this function may get
643  * called after timekeeping has been suspended. Note: calling this function
644  * after timekeeping has been suspended may lead to unexpected results
645  * because when timekeeping is suspended the jiffies counter is not
646  * incremented. See also timekeeping_suspend().
647  */
648 u64 acpi_os_get_timer(void)
649 {
650         return (get_jiffies_64() - INITIAL_JIFFIES) *
651                 (ACPI_100NSEC_PER_SEC / HZ);
652 }
653
654 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
655 {
656         u32 dummy;
657
658         if (!value)
659                 value = &dummy;
660
661         *value = 0;
662         if (width <= 8) {
663                 *(u8 *) value = inb(port);
664         } else if (width <= 16) {
665                 *(u16 *) value = inw(port);
666         } else if (width <= 32) {
667                 *(u32 *) value = inl(port);
668         } else {
669                 BUG();
670         }
671
672         return AE_OK;
673 }
674
675 EXPORT_SYMBOL(acpi_os_read_port);
676
677 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
678 {
679         if (width <= 8) {
680                 outb(value, port);
681         } else if (width <= 16) {
682                 outw(value, port);
683         } else if (width <= 32) {
684                 outl(value, port);
685         } else {
686                 BUG();
687         }
688
689         return AE_OK;
690 }
691
692 EXPORT_SYMBOL(acpi_os_write_port);
693
694 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
695 {
696
697         switch (width) {
698         case 8:
699                 *(u8 *) value = readb(virt_addr);
700                 break;
701         case 16:
702                 *(u16 *) value = readw(virt_addr);
703                 break;
704         case 32:
705                 *(u32 *) value = readl(virt_addr);
706                 break;
707         case 64:
708                 *(u64 *) value = readq(virt_addr);
709                 break;
710         default:
711                 return -EINVAL;
712         }
713
714         return 0;
715 }
716
717 acpi_status
718 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
719 {
720         void __iomem *virt_addr;
721         unsigned int size = width / 8;
722         bool unmap = false;
723         u64 dummy;
724         int error;
725
726         rcu_read_lock();
727         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
728         if (!virt_addr) {
729                 rcu_read_unlock();
730                 virt_addr = acpi_os_ioremap(phys_addr, size);
731                 if (!virt_addr)
732                         return AE_BAD_ADDRESS;
733                 unmap = true;
734         }
735
736         if (!value)
737                 value = &dummy;
738
739         error = acpi_os_read_iomem(virt_addr, value, width);
740         BUG_ON(error);
741
742         if (unmap)
743                 iounmap(virt_addr);
744         else
745                 rcu_read_unlock();
746
747         return AE_OK;
748 }
749
750 acpi_status
751 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
752 {
753         void __iomem *virt_addr;
754         unsigned int size = width / 8;
755         bool unmap = false;
756
757         rcu_read_lock();
758         virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
759         if (!virt_addr) {
760                 rcu_read_unlock();
761                 virt_addr = acpi_os_ioremap(phys_addr, size);
762                 if (!virt_addr)
763                         return AE_BAD_ADDRESS;
764                 unmap = true;
765         }
766
767         switch (width) {
768         case 8:
769                 writeb(value, virt_addr);
770                 break;
771         case 16:
772                 writew(value, virt_addr);
773                 break;
774         case 32:
775                 writel(value, virt_addr);
776                 break;
777         case 64:
778                 writeq(value, virt_addr);
779                 break;
780         default:
781                 BUG();
782         }
783
784         if (unmap)
785                 iounmap(virt_addr);
786         else
787                 rcu_read_unlock();
788
789         return AE_OK;
790 }
791
792 #ifdef CONFIG_PCI
793 acpi_status
794 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
795                                u64 *value, u32 width)
796 {
797         int result, size;
798         u32 value32;
799
800         if (!value)
801                 return AE_BAD_PARAMETER;
802
803         switch (width) {
804         case 8:
805                 size = 1;
806                 break;
807         case 16:
808                 size = 2;
809                 break;
810         case 32:
811                 size = 4;
812                 break;
813         default:
814                 return AE_ERROR;
815         }
816
817         result = raw_pci_read(pci_id->segment, pci_id->bus,
818                                 PCI_DEVFN(pci_id->device, pci_id->function),
819                                 reg, size, &value32);
820         *value = value32;
821
822         return (result ? AE_ERROR : AE_OK);
823 }
824
825 acpi_status
826 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
827                                 u64 value, u32 width)
828 {
829         int result, size;
830
831         switch (width) {
832         case 8:
833                 size = 1;
834                 break;
835         case 16:
836                 size = 2;
837                 break;
838         case 32:
839                 size = 4;
840                 break;
841         default:
842                 return AE_ERROR;
843         }
844
845         result = raw_pci_write(pci_id->segment, pci_id->bus,
846                                 PCI_DEVFN(pci_id->device, pci_id->function),
847                                 reg, size, value);
848
849         return (result ? AE_ERROR : AE_OK);
850 }
851 #endif
852
853 static void acpi_os_execute_deferred(struct work_struct *work)
854 {
855         struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
856
857         dpc->function(dpc->context);
858         kfree(dpc);
859 }
860
861 #ifdef CONFIG_ACPI_DEBUGGER
862 static struct acpi_debugger acpi_debugger;
863 static bool acpi_debugger_initialized;
864
865 int acpi_register_debugger(struct module *owner,
866                            const struct acpi_debugger_ops *ops)
867 {
868         int ret = 0;
869
870         mutex_lock(&acpi_debugger.lock);
871         if (acpi_debugger.ops) {
872                 ret = -EBUSY;
873                 goto err_lock;
874         }
875
876         acpi_debugger.owner = owner;
877         acpi_debugger.ops = ops;
878
879 err_lock:
880         mutex_unlock(&acpi_debugger.lock);
881         return ret;
882 }
883 EXPORT_SYMBOL(acpi_register_debugger);
884
885 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
886 {
887         mutex_lock(&acpi_debugger.lock);
888         if (ops == acpi_debugger.ops) {
889                 acpi_debugger.ops = NULL;
890                 acpi_debugger.owner = NULL;
891         }
892         mutex_unlock(&acpi_debugger.lock);
893 }
894 EXPORT_SYMBOL(acpi_unregister_debugger);
895
896 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
897 {
898         int ret;
899         int (*func)(acpi_osd_exec_callback, void *);
900         struct module *owner;
901
902         if (!acpi_debugger_initialized)
903                 return -ENODEV;
904         mutex_lock(&acpi_debugger.lock);
905         if (!acpi_debugger.ops) {
906                 ret = -ENODEV;
907                 goto err_lock;
908         }
909         if (!try_module_get(acpi_debugger.owner)) {
910                 ret = -ENODEV;
911                 goto err_lock;
912         }
913         func = acpi_debugger.ops->create_thread;
914         owner = acpi_debugger.owner;
915         mutex_unlock(&acpi_debugger.lock);
916
917         ret = func(function, context);
918
919         mutex_lock(&acpi_debugger.lock);
920         module_put(owner);
921 err_lock:
922         mutex_unlock(&acpi_debugger.lock);
923         return ret;
924 }
925
926 ssize_t acpi_debugger_write_log(const char *msg)
927 {
928         ssize_t ret;
929         ssize_t (*func)(const char *);
930         struct module *owner;
931
932         if (!acpi_debugger_initialized)
933                 return -ENODEV;
934         mutex_lock(&acpi_debugger.lock);
935         if (!acpi_debugger.ops) {
936                 ret = -ENODEV;
937                 goto err_lock;
938         }
939         if (!try_module_get(acpi_debugger.owner)) {
940                 ret = -ENODEV;
941                 goto err_lock;
942         }
943         func = acpi_debugger.ops->write_log;
944         owner = acpi_debugger.owner;
945         mutex_unlock(&acpi_debugger.lock);
946
947         ret = func(msg);
948
949         mutex_lock(&acpi_debugger.lock);
950         module_put(owner);
951 err_lock:
952         mutex_unlock(&acpi_debugger.lock);
953         return ret;
954 }
955
956 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
957 {
958         ssize_t ret;
959         ssize_t (*func)(char *, size_t);
960         struct module *owner;
961
962         if (!acpi_debugger_initialized)
963                 return -ENODEV;
964         mutex_lock(&acpi_debugger.lock);
965         if (!acpi_debugger.ops) {
966                 ret = -ENODEV;
967                 goto err_lock;
968         }
969         if (!try_module_get(acpi_debugger.owner)) {
970                 ret = -ENODEV;
971                 goto err_lock;
972         }
973         func = acpi_debugger.ops->read_cmd;
974         owner = acpi_debugger.owner;
975         mutex_unlock(&acpi_debugger.lock);
976
977         ret = func(buffer, buffer_length);
978
979         mutex_lock(&acpi_debugger.lock);
980         module_put(owner);
981 err_lock:
982         mutex_unlock(&acpi_debugger.lock);
983         return ret;
984 }
985
986 int acpi_debugger_wait_command_ready(void)
987 {
988         int ret;
989         int (*func)(bool, char *, size_t);
990         struct module *owner;
991
992         if (!acpi_debugger_initialized)
993                 return -ENODEV;
994         mutex_lock(&acpi_debugger.lock);
995         if (!acpi_debugger.ops) {
996                 ret = -ENODEV;
997                 goto err_lock;
998         }
999         if (!try_module_get(acpi_debugger.owner)) {
1000                 ret = -ENODEV;
1001                 goto err_lock;
1002         }
1003         func = acpi_debugger.ops->wait_command_ready;
1004         owner = acpi_debugger.owner;
1005         mutex_unlock(&acpi_debugger.lock);
1006
1007         ret = func(acpi_gbl_method_executing,
1008                    acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1009
1010         mutex_lock(&acpi_debugger.lock);
1011         module_put(owner);
1012 err_lock:
1013         mutex_unlock(&acpi_debugger.lock);
1014         return ret;
1015 }
1016
1017 int acpi_debugger_notify_command_complete(void)
1018 {
1019         int ret;
1020         int (*func)(void);
1021         struct module *owner;
1022
1023         if (!acpi_debugger_initialized)
1024                 return -ENODEV;
1025         mutex_lock(&acpi_debugger.lock);
1026         if (!acpi_debugger.ops) {
1027                 ret = -ENODEV;
1028                 goto err_lock;
1029         }
1030         if (!try_module_get(acpi_debugger.owner)) {
1031                 ret = -ENODEV;
1032                 goto err_lock;
1033         }
1034         func = acpi_debugger.ops->notify_command_complete;
1035         owner = acpi_debugger.owner;
1036         mutex_unlock(&acpi_debugger.lock);
1037
1038         ret = func();
1039
1040         mutex_lock(&acpi_debugger.lock);
1041         module_put(owner);
1042 err_lock:
1043         mutex_unlock(&acpi_debugger.lock);
1044         return ret;
1045 }
1046
1047 int __init acpi_debugger_init(void)
1048 {
1049         mutex_init(&acpi_debugger.lock);
1050         acpi_debugger_initialized = true;
1051         return 0;
1052 }
1053 #endif
1054
1055 /*******************************************************************************
1056  *
1057  * FUNCTION:    acpi_os_execute
1058  *
1059  * PARAMETERS:  Type               - Type of the callback
1060  *              Function           - Function to be executed
1061  *              Context            - Function parameters
1062  *
1063  * RETURN:      Status
1064  *
1065  * DESCRIPTION: Depending on type, either queues function for deferred execution or
1066  *              immediately executes function on a separate thread.
1067  *
1068  ******************************************************************************/
1069
1070 acpi_status acpi_os_execute(acpi_execute_type type,
1071                             acpi_osd_exec_callback function, void *context)
1072 {
1073         acpi_status status = AE_OK;
1074         struct acpi_os_dpc *dpc;
1075         struct workqueue_struct *queue;
1076         int ret;
1077         ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1078                           "Scheduling function [%p(%p)] for deferred execution.\n",
1079                           function, context));
1080
1081         if (type == OSL_DEBUGGER_MAIN_THREAD) {
1082                 ret = acpi_debugger_create_thread(function, context);
1083                 if (ret) {
1084                         pr_err("Kernel thread creation failed\n");
1085                         status = AE_ERROR;
1086                 }
1087                 goto out_thread;
1088         }
1089
1090         /*
1091          * Allocate/initialize DPC structure.  Note that this memory will be
1092          * freed by the callee.  The kernel handles the work_struct list  in a
1093          * way that allows us to also free its memory inside the callee.
1094          * Because we may want to schedule several tasks with different
1095          * parameters we can't use the approach some kernel code uses of
1096          * having a static work_struct.
1097          */
1098
1099         dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1100         if (!dpc)
1101                 return AE_NO_MEMORY;
1102
1103         dpc->function = function;
1104         dpc->context = context;
1105
1106         /*
1107          * To prevent lockdep from complaining unnecessarily, make sure that
1108          * there is a different static lockdep key for each workqueue by using
1109          * INIT_WORK() for each of them separately.
1110          */
1111         if (type == OSL_NOTIFY_HANDLER) {
1112                 queue = kacpi_notify_wq;
1113                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1114         } else if (type == OSL_GPE_HANDLER) {
1115                 queue = kacpid_wq;
1116                 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1117         } else {
1118                 pr_err("Unsupported os_execute type %d.\n", type);
1119                 status = AE_ERROR;
1120         }
1121
1122         if (ACPI_FAILURE(status))
1123                 goto err_workqueue;
1124
1125         /*
1126          * On some machines, a software-initiated SMI causes corruption unless
1127          * the SMI runs on CPU 0.  An SMI can be initiated by any AML, but
1128          * typically it's done in GPE-related methods that are run via
1129          * workqueues, so we can avoid the known corruption cases by always
1130          * queueing on CPU 0.
1131          */
1132         ret = queue_work_on(0, queue, &dpc->work);
1133         if (!ret) {
1134                 pr_err("Unable to queue work\n");
1135                 status = AE_ERROR;
1136         }
1137 err_workqueue:
1138         if (ACPI_FAILURE(status))
1139                 kfree(dpc);
1140 out_thread:
1141         return status;
1142 }
1143 EXPORT_SYMBOL(acpi_os_execute);
1144
1145 void acpi_os_wait_events_complete(void)
1146 {
1147         /*
1148          * Make sure the GPE handler or the fixed event handler is not used
1149          * on another CPU after removal.
1150          */
1151         if (acpi_sci_irq_valid())
1152                 synchronize_hardirq(acpi_sci_irq);
1153         flush_workqueue(kacpid_wq);
1154         flush_workqueue(kacpi_notify_wq);
1155 }
1156 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1157
1158 struct acpi_hp_work {
1159         struct work_struct work;
1160         struct acpi_device *adev;
1161         u32 src;
1162 };
1163
1164 static void acpi_hotplug_work_fn(struct work_struct *work)
1165 {
1166         struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1167
1168         acpi_os_wait_events_complete();
1169         acpi_device_hotplug(hpw->adev, hpw->src);
1170         kfree(hpw);
1171 }
1172
1173 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1174 {
1175         struct acpi_hp_work *hpw;
1176
1177         acpi_handle_debug(adev->handle,
1178                           "Scheduling hotplug event %u for deferred handling\n",
1179                            src);
1180
1181         hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1182         if (!hpw)
1183                 return AE_NO_MEMORY;
1184
1185         INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1186         hpw->adev = adev;
1187         hpw->src = src;
1188         /*
1189          * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1190          * the hotplug code may call driver .remove() functions, which may
1191          * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1192          * these workqueues.
1193          */
1194         if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1195                 kfree(hpw);
1196                 return AE_ERROR;
1197         }
1198         return AE_OK;
1199 }
1200
1201 bool acpi_queue_hotplug_work(struct work_struct *work)
1202 {
1203         return queue_work(kacpi_hotplug_wq, work);
1204 }
1205
1206 acpi_status
1207 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1208 {
1209         struct semaphore *sem = NULL;
1210
1211         sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1212         if (!sem)
1213                 return AE_NO_MEMORY;
1214
1215         sema_init(sem, initial_units);
1216
1217         *handle = (acpi_handle *) sem;
1218
1219         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1220                           *handle, initial_units));
1221
1222         return AE_OK;
1223 }
1224
1225 /*
1226  * TODO: A better way to delete semaphores?  Linux doesn't have a
1227  * 'delete_semaphore()' function -- may result in an invalid
1228  * pointer dereference for non-synchronized consumers.  Should
1229  * we at least check for blocked threads and signal/cancel them?
1230  */
1231
1232 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1233 {
1234         struct semaphore *sem = (struct semaphore *)handle;
1235
1236         if (!sem)
1237                 return AE_BAD_PARAMETER;
1238
1239         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1240
1241         BUG_ON(!list_empty(&sem->wait_list));
1242         kfree(sem);
1243         sem = NULL;
1244
1245         return AE_OK;
1246 }
1247
1248 /*
1249  * TODO: Support for units > 1?
1250  */
1251 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1252 {
1253         acpi_status status = AE_OK;
1254         struct semaphore *sem = (struct semaphore *)handle;
1255         long jiffies;
1256         int ret = 0;
1257
1258         if (!acpi_os_initialized)
1259                 return AE_OK;
1260
1261         if (!sem || (units < 1))
1262                 return AE_BAD_PARAMETER;
1263
1264         if (units > 1)
1265                 return AE_SUPPORT;
1266
1267         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1268                           handle, units, timeout));
1269
1270         if (timeout == ACPI_WAIT_FOREVER)
1271                 jiffies = MAX_SCHEDULE_TIMEOUT;
1272         else
1273                 jiffies = msecs_to_jiffies(timeout);
1274
1275         ret = down_timeout(sem, jiffies);
1276         if (ret)
1277                 status = AE_TIME;
1278
1279         if (ACPI_FAILURE(status)) {
1280                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1281                                   "Failed to acquire semaphore[%p|%d|%d], %s",
1282                                   handle, units, timeout,
1283                                   acpi_format_exception(status)));
1284         } else {
1285                 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1286                                   "Acquired semaphore[%p|%d|%d]", handle,
1287                                   units, timeout));
1288         }
1289
1290         return status;
1291 }
1292
1293 /*
1294  * TODO: Support for units > 1?
1295  */
1296 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1297 {
1298         struct semaphore *sem = (struct semaphore *)handle;
1299
1300         if (!acpi_os_initialized)
1301                 return AE_OK;
1302
1303         if (!sem || (units < 1))
1304                 return AE_BAD_PARAMETER;
1305
1306         if (units > 1)
1307                 return AE_SUPPORT;
1308
1309         ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1310                           units));
1311
1312         up(sem);
1313
1314         return AE_OK;
1315 }
1316
1317 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1318 {
1319 #ifdef ENABLE_DEBUGGER
1320         if (acpi_in_debugger) {
1321                 u32 chars;
1322
1323                 kdb_read(buffer, buffer_length);
1324
1325                 /* remove the CR kdb includes */
1326                 chars = strlen(buffer) - 1;
1327                 buffer[chars] = '\0';
1328         }
1329 #else
1330         int ret;
1331
1332         ret = acpi_debugger_read_cmd(buffer, buffer_length);
1333         if (ret < 0)
1334                 return AE_ERROR;
1335         if (bytes_read)
1336                 *bytes_read = ret;
1337 #endif
1338
1339         return AE_OK;
1340 }
1341 EXPORT_SYMBOL(acpi_os_get_line);
1342
1343 acpi_status acpi_os_wait_command_ready(void)
1344 {
1345         int ret;
1346
1347         ret = acpi_debugger_wait_command_ready();
1348         if (ret < 0)
1349                 return AE_ERROR;
1350         return AE_OK;
1351 }
1352
1353 acpi_status acpi_os_notify_command_complete(void)
1354 {
1355         int ret;
1356
1357         ret = acpi_debugger_notify_command_complete();
1358         if (ret < 0)
1359                 return AE_ERROR;
1360         return AE_OK;
1361 }
1362
1363 acpi_status acpi_os_signal(u32 function, void *info)
1364 {
1365         switch (function) {
1366         case ACPI_SIGNAL_FATAL:
1367                 pr_err("Fatal opcode executed\n");
1368                 break;
1369         case ACPI_SIGNAL_BREAKPOINT:
1370                 /*
1371                  * AML Breakpoint
1372                  * ACPI spec. says to treat it as a NOP unless
1373                  * you are debugging.  So if/when we integrate
1374                  * AML debugger into the kernel debugger its
1375                  * hook will go here.  But until then it is
1376                  * not useful to print anything on breakpoints.
1377                  */
1378                 break;
1379         default:
1380                 break;
1381         }
1382
1383         return AE_OK;
1384 }
1385
1386 static int __init acpi_os_name_setup(char *str)
1387 {
1388         char *p = acpi_os_name;
1389         int count = ACPI_MAX_OVERRIDE_LEN - 1;
1390
1391         if (!str || !*str)
1392                 return 0;
1393
1394         for (; count-- && *str; str++) {
1395                 if (isalnum(*str) || *str == ' ' || *str == ':')
1396                         *p++ = *str;
1397                 else if (*str == '\'' || *str == '"')
1398                         continue;
1399                 else
1400                         break;
1401         }
1402         *p = 0;
1403
1404         return 1;
1405
1406 }
1407
1408 __setup("acpi_os_name=", acpi_os_name_setup);
1409
1410 /*
1411  * Disable the auto-serialization of named objects creation methods.
1412  *
1413  * This feature is enabled by default.  It marks the AML control methods
1414  * that contain the opcodes to create named objects as "Serialized".
1415  */
1416 static int __init acpi_no_auto_serialize_setup(char *str)
1417 {
1418         acpi_gbl_auto_serialize_methods = FALSE;
1419         pr_info("Auto-serialization disabled\n");
1420
1421         return 1;
1422 }
1423
1424 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1425
1426 /* Check of resource interference between native drivers and ACPI
1427  * OperationRegions (SystemIO and System Memory only).
1428  * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1429  * in arbitrary AML code and can interfere with legacy drivers.
1430  * acpi_enforce_resources= can be set to:
1431  *
1432  *   - strict (default) (2)
1433  *     -> further driver trying to access the resources will not load
1434  *   - lax              (1)
1435  *     -> further driver trying to access the resources will load, but you
1436  *     get a system message that something might go wrong...
1437  *
1438  *   - no               (0)
1439  *     -> ACPI Operation Region resources will not be registered
1440  *
1441  */
1442 #define ENFORCE_RESOURCES_STRICT 2
1443 #define ENFORCE_RESOURCES_LAX    1
1444 #define ENFORCE_RESOURCES_NO     0
1445
1446 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1447
1448 static int __init acpi_enforce_resources_setup(char *str)
1449 {
1450         if (str == NULL || *str == '\0')
1451                 return 0;
1452
1453         if (!strcmp("strict", str))
1454                 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1455         else if (!strcmp("lax", str))
1456                 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1457         else if (!strcmp("no", str))
1458                 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1459
1460         return 1;
1461 }
1462
1463 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1464
1465 /* Check for resource conflicts between ACPI OperationRegions and native
1466  * drivers */
1467 int acpi_check_resource_conflict(const struct resource *res)
1468 {
1469         acpi_adr_space_type space_id;
1470
1471         if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1472                 return 0;
1473
1474         if (res->flags & IORESOURCE_IO)
1475                 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1476         else if (res->flags & IORESOURCE_MEM)
1477                 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1478         else
1479                 return 0;
1480
1481         if (!acpi_check_address_range(space_id, res->start, resource_size(res), 1))
1482                 return 0;
1483
1484         pr_info("Resource conflict; ACPI support missing from driver?\n");
1485
1486         if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1487                 return -EBUSY;
1488
1489         if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1490                 pr_notice("Resource conflict: System may be unstable or behave erratically\n");
1491
1492         return 0;
1493 }
1494 EXPORT_SYMBOL(acpi_check_resource_conflict);
1495
1496 int acpi_check_region(resource_size_t start, resource_size_t n,
1497                       const char *name)
1498 {
1499         struct resource res = DEFINE_RES_IO_NAMED(start, n, name);
1500
1501         return acpi_check_resource_conflict(&res);
1502 }
1503 EXPORT_SYMBOL(acpi_check_region);
1504
1505 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
1506                                               void *_res, void **return_value)
1507 {
1508         struct acpi_mem_space_context **mem_ctx;
1509         union acpi_operand_object *handler_obj;
1510         union acpi_operand_object *region_obj2;
1511         union acpi_operand_object *region_obj;
1512         struct resource *res = _res;
1513         acpi_status status;
1514
1515         region_obj = acpi_ns_get_attached_object(handle);
1516         if (!region_obj)
1517                 return AE_OK;
1518
1519         handler_obj = region_obj->region.handler;
1520         if (!handler_obj)
1521                 return AE_OK;
1522
1523         if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
1524                 return AE_OK;
1525
1526         if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
1527                 return AE_OK;
1528
1529         region_obj2 = acpi_ns_get_secondary_object(region_obj);
1530         if (!region_obj2)
1531                 return AE_OK;
1532
1533         mem_ctx = (void *)&region_obj2->extra.region_context;
1534
1535         if (!(mem_ctx[0]->address >= res->start &&
1536               mem_ctx[0]->address < res->end))
1537                 return AE_OK;
1538
1539         status = handler_obj->address_space.setup(region_obj,
1540                                                   ACPI_REGION_DEACTIVATE,
1541                                                   NULL, (void **)mem_ctx);
1542         if (ACPI_SUCCESS(status))
1543                 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
1544
1545         return status;
1546 }
1547
1548 /**
1549  * acpi_release_memory - Release any mappings done to a memory region
1550  * @handle: Handle to namespace node
1551  * @res: Memory resource
1552  * @level: A level that terminates the search
1553  *
1554  * Walks through @handle and unmaps all SystemMemory Operation Regions that
1555  * overlap with @res and that have already been activated (mapped).
1556  *
1557  * This is a helper that allows drivers to place special requirements on memory
1558  * region that may overlap with operation regions, primarily allowing them to
1559  * safely map the region as non-cached memory.
1560  *
1561  * The unmapped Operation Regions will be automatically remapped next time they
1562  * are called, so the drivers do not need to do anything else.
1563  */
1564 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1565                                 u32 level)
1566 {
1567         acpi_status status;
1568
1569         if (!(res->flags & IORESOURCE_MEM))
1570                 return AE_TYPE;
1571
1572         status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
1573                                      acpi_deactivate_mem_region, NULL,
1574                                      res, NULL);
1575         if (ACPI_FAILURE(status))
1576                 return status;
1577
1578         /*
1579          * Wait for all of the mappings queued up for removal by
1580          * acpi_deactivate_mem_region() to actually go away.
1581          */
1582         synchronize_rcu();
1583         rcu_barrier();
1584         flush_scheduled_work();
1585
1586         return AE_OK;
1587 }
1588 EXPORT_SYMBOL_GPL(acpi_release_memory);
1589
1590 /*
1591  * Let drivers know whether the resource checks are effective
1592  */
1593 int acpi_resources_are_enforced(void)
1594 {
1595         return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1596 }
1597 EXPORT_SYMBOL(acpi_resources_are_enforced);
1598
1599 /*
1600  * Deallocate the memory for a spinlock.
1601  */
1602 void acpi_os_delete_lock(acpi_spinlock handle)
1603 {
1604         ACPI_FREE(handle);
1605 }
1606
1607 /*
1608  * Acquire a spinlock.
1609  *
1610  * handle is a pointer to the spinlock_t.
1611  */
1612
1613 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1614         __acquires(lockp)
1615 {
1616         acpi_cpu_flags flags;
1617         spin_lock_irqsave(lockp, flags);
1618         return flags;
1619 }
1620
1621 /*
1622  * Release a spinlock. See above.
1623  */
1624
1625 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1626         __releases(lockp)
1627 {
1628         spin_unlock_irqrestore(lockp, flags);
1629 }
1630
1631 #ifndef ACPI_USE_LOCAL_CACHE
1632
1633 /*******************************************************************************
1634  *
1635  * FUNCTION:    acpi_os_create_cache
1636  *
1637  * PARAMETERS:  name      - Ascii name for the cache
1638  *              size      - Size of each cached object
1639  *              depth     - Maximum depth of the cache (in objects) <ignored>
1640  *              cache     - Where the new cache object is returned
1641  *
1642  * RETURN:      status
1643  *
1644  * DESCRIPTION: Create a cache object
1645  *
1646  ******************************************************************************/
1647
1648 acpi_status
1649 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1650 {
1651         *cache = kmem_cache_create(name, size, 0, 0, NULL);
1652         if (*cache == NULL)
1653                 return AE_ERROR;
1654         else
1655                 return AE_OK;
1656 }
1657
1658 /*******************************************************************************
1659  *
1660  * FUNCTION:    acpi_os_purge_cache
1661  *
1662  * PARAMETERS:  Cache           - Handle to cache object
1663  *
1664  * RETURN:      Status
1665  *
1666  * DESCRIPTION: Free all objects within the requested cache.
1667  *
1668  ******************************************************************************/
1669
1670 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1671 {
1672         kmem_cache_shrink(cache);
1673         return (AE_OK);
1674 }
1675
1676 /*******************************************************************************
1677  *
1678  * FUNCTION:    acpi_os_delete_cache
1679  *
1680  * PARAMETERS:  Cache           - Handle to cache object
1681  *
1682  * RETURN:      Status
1683  *
1684  * DESCRIPTION: Free all objects within the requested cache and delete the
1685  *              cache object.
1686  *
1687  ******************************************************************************/
1688
1689 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1690 {
1691         kmem_cache_destroy(cache);
1692         return (AE_OK);
1693 }
1694
1695 /*******************************************************************************
1696  *
1697  * FUNCTION:    acpi_os_release_object
1698  *
1699  * PARAMETERS:  Cache       - Handle to cache object
1700  *              Object      - The object to be released
1701  *
1702  * RETURN:      None
1703  *
1704  * DESCRIPTION: Release an object to the specified cache.  If cache is full,
1705  *              the object is deleted.
1706  *
1707  ******************************************************************************/
1708
1709 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1710 {
1711         kmem_cache_free(cache, object);
1712         return (AE_OK);
1713 }
1714 #endif
1715
1716 static int __init acpi_no_static_ssdt_setup(char *s)
1717 {
1718         acpi_gbl_disable_ssdt_table_install = TRUE;
1719         pr_info("Static SSDT installation disabled\n");
1720
1721         return 0;
1722 }
1723
1724 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1725
1726 static int __init acpi_disable_return_repair(char *s)
1727 {
1728         pr_notice("Predefined validation mechanism disabled\n");
1729         acpi_gbl_disable_auto_repair = TRUE;
1730
1731         return 1;
1732 }
1733
1734 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1735
1736 acpi_status __init acpi_os_initialize(void)
1737 {
1738         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1739         acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1740
1741         acpi_gbl_xgpe0_block_logical_address =
1742                 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1743         acpi_gbl_xgpe1_block_logical_address =
1744                 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1745
1746         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1747                 /*
1748                  * Use acpi_os_map_generic_address to pre-map the reset
1749                  * register if it's in system memory.
1750                  */
1751                 void *rv;
1752
1753                 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1754                 pr_debug("%s: Reset register mapping %s\n", __func__,
1755                          rv ? "successful" : "failed");
1756         }
1757         acpi_os_initialized = true;
1758
1759         return AE_OK;
1760 }
1761
1762 acpi_status __init acpi_os_initialize1(void)
1763 {
1764         kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1765         kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1766         kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1767         BUG_ON(!kacpid_wq);
1768         BUG_ON(!kacpi_notify_wq);
1769         BUG_ON(!kacpi_hotplug_wq);
1770         acpi_osi_init();
1771         return AE_OK;
1772 }
1773
1774 acpi_status acpi_os_terminate(void)
1775 {
1776         if (acpi_irq_handler) {
1777                 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1778                                                  acpi_irq_handler);
1779         }
1780
1781         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1782         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1783         acpi_gbl_xgpe0_block_logical_address = 0UL;
1784         acpi_gbl_xgpe1_block_logical_address = 0UL;
1785
1786         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1787         acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1788
1789         if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1790                 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1791
1792         destroy_workqueue(kacpid_wq);
1793         destroy_workqueue(kacpi_notify_wq);
1794         destroy_workqueue(kacpi_hotplug_wq);
1795
1796         return AE_OK;
1797 }
1798
1799 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1800                                   u32 pm1b_control)
1801 {
1802         int rc = 0;
1803         if (__acpi_os_prepare_sleep)
1804                 rc = __acpi_os_prepare_sleep(sleep_state,
1805                                              pm1a_control, pm1b_control);
1806         if (rc < 0)
1807                 return AE_ERROR;
1808         else if (rc > 0)
1809                 return AE_CTRL_TERMINATE;
1810
1811         return AE_OK;
1812 }
1813
1814 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1815                                u32 pm1a_ctrl, u32 pm1b_ctrl))
1816 {
1817         __acpi_os_prepare_sleep = func;
1818 }
1819
1820 #if (ACPI_REDUCED_HARDWARE)
1821 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1822                                   u32 val_b)
1823 {
1824         int rc = 0;
1825         if (__acpi_os_prepare_extended_sleep)
1826                 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1827                                              val_a, val_b);
1828         if (rc < 0)
1829                 return AE_ERROR;
1830         else if (rc > 0)
1831                 return AE_CTRL_TERMINATE;
1832
1833         return AE_OK;
1834 }
1835 #else
1836 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1837                                   u32 val_b)
1838 {
1839         return AE_OK;
1840 }
1841 #endif
1842
1843 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1844                                u32 val_a, u32 val_b))
1845 {
1846         __acpi_os_prepare_extended_sleep = func;
1847 }
1848
1849 acpi_status acpi_os_enter_sleep(u8 sleep_state,
1850                                 u32 reg_a_value, u32 reg_b_value)
1851 {
1852         acpi_status status;
1853
1854         if (acpi_gbl_reduced_hardware)
1855                 status = acpi_os_prepare_extended_sleep(sleep_state,
1856                                                         reg_a_value,
1857                                                         reg_b_value);
1858         else
1859                 status = acpi_os_prepare_sleep(sleep_state,
1860                                                reg_a_value, reg_b_value);
1861         return status;
1862 }