2 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
4 * Copyright (C) 2000 Andrew Henroid
5 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
6 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
7 * Copyright (c) 2008 Intel Corporation
8 * Author: Matthew Wilcox <willy@linux.intel.com>
10 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License as published by
14 * the Free Software Foundation; either version 2 of the License, or
15 * (at your option) any later version.
17 * This program is distributed in the hope that it will be useful,
18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
20 * GNU General Public License for more details.
22 * ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
26 #include <linux/module.h>
27 #include <linux/kernel.h>
28 #include <linux/slab.h>
30 #include <linux/highmem.h>
31 #include <linux/pci.h>
32 #include <linux/interrupt.h>
33 #include <linux/kmod.h>
34 #include <linux/delay.h>
35 #include <linux/workqueue.h>
36 #include <linux/nmi.h>
37 #include <linux/acpi.h>
38 #include <linux/efi.h>
39 #include <linux/ioport.h>
40 #include <linux/list.h>
41 #include <linux/jiffies.h>
42 #include <linux/semaphore.h>
45 #include <asm/uaccess.h>
46 #include <linux/io-64-nonatomic-lo-hi.h>
50 #define _COMPONENT ACPI_OS_SERVICES
51 ACPI_MODULE_NAME("osl");
54 acpi_osd_exec_callback function;
56 struct work_struct work;
59 #ifdef CONFIG_ACPI_CUSTOM_DSDT
60 #include CONFIG_ACPI_CUSTOM_DSDT_FILE
63 #ifdef ENABLE_DEBUGGER
64 #include <linux/kdb.h>
66 /* stuff for debugger support */
68 EXPORT_SYMBOL(acpi_in_debugger);
69 #endif /*ENABLE_DEBUGGER */
71 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
73 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
76 static acpi_osd_handler acpi_irq_handler;
77 static void *acpi_irq_context;
78 static struct workqueue_struct *kacpid_wq;
79 static struct workqueue_struct *kacpi_notify_wq;
80 static struct workqueue_struct *kacpi_hotplug_wq;
81 static bool acpi_os_initialized;
82 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
85 * This list of permanent mappings is for memory that may be accessed from
86 * interrupt context, where we can't do the ioremap().
89 struct list_head list;
91 acpi_physical_address phys;
93 unsigned long refcount;
96 static LIST_HEAD(acpi_ioremaps);
97 static DEFINE_MUTEX(acpi_ioremap_lock);
99 static void __init acpi_osi_setup_late(void);
102 * The story of _OSI(Linux)
104 * From pre-history through Linux-2.6.22,
105 * Linux responded TRUE upon a BIOS OSI(Linux) query.
107 * Unfortunately, reference BIOS writers got wind of this
108 * and put OSI(Linux) in their example code, quickly exposing
109 * this string as ill-conceived and opening the door to
110 * an un-bounded number of BIOS incompatibilities.
112 * For example, OSI(Linux) was used on resume to re-POST a
113 * video card on one system, because Linux at that time
114 * could not do a speedy restore in its native driver.
115 * But then upon gaining quick native restore capability,
116 * Linux has no way to tell the BIOS to skip the time-consuming
117 * POST -- putting Linux at a permanent performance disadvantage.
118 * On another system, the BIOS writer used OSI(Linux)
119 * to infer native OS support for IPMI! On other systems,
120 * OSI(Linux) simply got in the way of Linux claiming to
121 * be compatible with other operating systems, exposing
122 * BIOS issues such as skipped device initialization.
124 * So "Linux" turned out to be a really poor chose of
125 * OSI string, and from Linux-2.6.23 onward we respond FALSE.
127 * BIOS writers should NOT query _OSI(Linux) on future systems.
128 * Linux will complain on the console when it sees it, and return FALSE.
129 * To get Linux to return TRUE for your system will require
130 * a kernel source update to add a DMI entry,
131 * or boot with "acpi_osi=Linux"
134 static struct osi_linux {
135 unsigned int enable:1;
137 unsigned int cmdline:1;
138 unsigned int default_disabling:1;
139 } osi_linux = {0, 0, 0, 0};
141 static u32 acpi_osi_handler(acpi_string interface, u32 supported)
143 if (!strcmp("Linux", interface)) {
145 printk_once(KERN_NOTICE FW_BUG PREFIX
146 "BIOS _OSI(Linux) query %s%s\n",
147 osi_linux.enable ? "honored" : "ignored",
148 osi_linux.cmdline ? " via cmdline" :
149 osi_linux.dmi ? " via DMI" : "");
152 if (!strcmp("Darwin", interface)) {
154 * Apple firmware will behave poorly if it receives positive
155 * answers to "Darwin" and any other OS. Respond positively
156 * to Darwin and then disable all other vendor strings.
158 acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
159 supported = ACPI_UINT32_MAX;
165 static void __init acpi_request_region (struct acpi_generic_address *gas,
166 unsigned int length, char *desc)
170 /* Handle possible alignment issues */
171 memcpy(&addr, &gas->address, sizeof(addr));
172 if (!addr || !length)
175 /* Resources are never freed */
176 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
177 request_region(addr, length, desc);
178 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
179 request_mem_region(addr, length, desc);
182 static int __init acpi_reserve_resources(void)
184 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
185 "ACPI PM1a_EVT_BLK");
187 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
188 "ACPI PM1b_EVT_BLK");
190 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
191 "ACPI PM1a_CNT_BLK");
193 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
194 "ACPI PM1b_CNT_BLK");
196 if (acpi_gbl_FADT.pm_timer_length == 4)
197 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
199 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
202 /* Length of GPE blocks must be a non-negative multiple of 2 */
204 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
205 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
206 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
208 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
209 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
210 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
214 fs_initcall_sync(acpi_reserve_resources);
216 void acpi_os_printf(const char *fmt, ...)
220 acpi_os_vprintf(fmt, args);
223 EXPORT_SYMBOL(acpi_os_printf);
225 void acpi_os_vprintf(const char *fmt, va_list args)
227 static char buffer[512];
229 vsprintf(buffer, fmt, args);
231 #ifdef ENABLE_DEBUGGER
232 if (acpi_in_debugger) {
233 kdb_printf("%s", buffer);
235 printk(KERN_CONT "%s", buffer);
238 if (acpi_debugger_write_log(buffer) < 0)
239 printk(KERN_CONT "%s", buffer);
244 static unsigned long acpi_rsdp;
245 static int __init setup_acpi_rsdp(char *arg)
247 if (kstrtoul(arg, 16, &acpi_rsdp))
251 early_param("acpi_rsdp", setup_acpi_rsdp);
254 acpi_physical_address __init acpi_os_get_root_pointer(void)
261 if (efi_enabled(EFI_CONFIG_TABLES)) {
262 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
264 else if (efi.acpi != EFI_INVALID_TABLE_ADDR)
267 printk(KERN_ERR PREFIX
268 "System description tables not found\n");
271 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
272 acpi_physical_address pa = 0;
274 acpi_find_root_pointer(&pa);
281 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
282 static struct acpi_ioremap *
283 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
285 struct acpi_ioremap *map;
287 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
288 if (map->phys <= phys &&
289 phys + size <= map->phys + map->size)
295 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
296 static void __iomem *
297 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
299 struct acpi_ioremap *map;
301 map = acpi_map_lookup(phys, size);
303 return map->virt + (phys - map->phys);
308 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
310 struct acpi_ioremap *map;
311 void __iomem *virt = NULL;
313 mutex_lock(&acpi_ioremap_lock);
314 map = acpi_map_lookup(phys, size);
316 virt = map->virt + (phys - map->phys);
319 mutex_unlock(&acpi_ioremap_lock);
322 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
324 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
325 static struct acpi_ioremap *
326 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
328 struct acpi_ioremap *map;
330 list_for_each_entry_rcu(map, &acpi_ioremaps, list)
331 if (map->virt <= virt &&
332 virt + size <= map->virt + map->size)
338 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
339 /* ioremap will take care of cache attributes */
340 #define should_use_kmap(pfn) 0
342 #define should_use_kmap(pfn) page_is_ram(pfn)
345 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
349 pfn = pg_off >> PAGE_SHIFT;
350 if (should_use_kmap(pfn)) {
351 if (pg_sz > PAGE_SIZE)
353 return (void __iomem __force *)kmap(pfn_to_page(pfn));
355 return acpi_os_ioremap(pg_off, pg_sz);
358 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
362 pfn = pg_off >> PAGE_SHIFT;
363 if (should_use_kmap(pfn))
364 kunmap(pfn_to_page(pfn));
370 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
371 * @phys: Start of the physical address range to map.
372 * @size: Size of the physical address range to map.
374 * Look up the given physical address range in the list of existing ACPI memory
375 * mappings. If found, get a reference to it and return a pointer to it (its
376 * virtual address). If not found, map it, add it to that list and return a
379 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
380 * routine simply calls __acpi_map_table() to get the job done.
382 void __iomem *__init_refok
383 acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
385 struct acpi_ioremap *map;
387 acpi_physical_address pg_off;
390 if (phys > ULONG_MAX) {
391 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
395 if (!acpi_gbl_permanent_mmap)
396 return __acpi_map_table((unsigned long)phys, size);
398 mutex_lock(&acpi_ioremap_lock);
399 /* Check if there's a suitable mapping already. */
400 map = acpi_map_lookup(phys, size);
406 map = kzalloc(sizeof(*map), GFP_KERNEL);
408 mutex_unlock(&acpi_ioremap_lock);
412 pg_off = round_down(phys, PAGE_SIZE);
413 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
414 virt = acpi_map(pg_off, pg_sz);
416 mutex_unlock(&acpi_ioremap_lock);
421 INIT_LIST_HEAD(&map->list);
427 list_add_tail_rcu(&map->list, &acpi_ioremaps);
430 mutex_unlock(&acpi_ioremap_lock);
431 return map->virt + (phys - map->phys);
433 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
436 acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
438 return (void *)acpi_os_map_iomem(phys, size);
440 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
442 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
444 if (!--map->refcount)
445 list_del_rcu(&map->list);
448 static void acpi_os_map_cleanup(struct acpi_ioremap *map)
450 if (!map->refcount) {
451 synchronize_rcu_expedited();
452 acpi_unmap(map->phys, map->virt);
458 * acpi_os_unmap_iomem - Drop a memory mapping reference.
459 * @virt: Start of the address range to drop a reference to.
460 * @size: Size of the address range to drop a reference to.
462 * Look up the given virtual address range in the list of existing ACPI memory
463 * mappings, drop a reference to it and unmap it if there are no more active
466 * During early init (when acpi_gbl_permanent_mmap has not been set yet) this
467 * routine simply calls __acpi_unmap_table() to get the job done. Since
468 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
471 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
473 struct acpi_ioremap *map;
475 if (!acpi_gbl_permanent_mmap) {
476 __acpi_unmap_table(virt, size);
480 mutex_lock(&acpi_ioremap_lock);
481 map = acpi_map_lookup_virt(virt, size);
483 mutex_unlock(&acpi_ioremap_lock);
484 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
487 acpi_os_drop_map_ref(map);
488 mutex_unlock(&acpi_ioremap_lock);
490 acpi_os_map_cleanup(map);
492 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
494 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
496 return acpi_os_unmap_iomem((void __iomem *)virt, size);
498 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
500 void __init early_acpi_os_unmap_memory(void __iomem *virt, acpi_size size)
502 if (!acpi_gbl_permanent_mmap)
503 __acpi_unmap_table(virt, size);
506 int acpi_os_map_generic_address(struct acpi_generic_address *gas)
511 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
514 /* Handle possible alignment issues */
515 memcpy(&addr, &gas->address, sizeof(addr));
516 if (!addr || !gas->bit_width)
519 virt = acpi_os_map_iomem(addr, gas->bit_width / 8);
525 EXPORT_SYMBOL(acpi_os_map_generic_address);
527 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
530 struct acpi_ioremap *map;
532 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
535 /* Handle possible alignment issues */
536 memcpy(&addr, &gas->address, sizeof(addr));
537 if (!addr || !gas->bit_width)
540 mutex_lock(&acpi_ioremap_lock);
541 map = acpi_map_lookup(addr, gas->bit_width / 8);
543 mutex_unlock(&acpi_ioremap_lock);
546 acpi_os_drop_map_ref(map);
547 mutex_unlock(&acpi_ioremap_lock);
549 acpi_os_map_cleanup(map);
551 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
553 #ifdef ACPI_FUTURE_USAGE
555 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
558 return AE_BAD_PARAMETER;
560 *phys = virt_to_phys(virt);
566 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
567 static bool acpi_rev_override;
569 int __init acpi_rev_override_setup(char *str)
571 acpi_rev_override = true;
574 __setup("acpi_rev_override", acpi_rev_override_setup);
576 #define acpi_rev_override false
579 #define ACPI_MAX_OVERRIDE_LEN 100
581 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
584 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
587 if (!init_val || !new_val)
588 return AE_BAD_PARAMETER;
591 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
592 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
594 *new_val = acpi_os_name;
597 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
598 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
599 *new_val = (char *)5;
605 #ifdef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
606 #include <linux/earlycpio.h>
607 #include <linux/memblock.h>
609 static u64 acpi_tables_addr;
610 static int all_tables_size;
612 /* Copied from acpica/tbutils.c:acpi_tb_checksum() */
613 static u8 __init acpi_table_checksum(u8 *buffer, u32 length)
616 u8 *end = buffer + length;
619 sum = (u8) (sum + *(buffer++));
623 /* All but ACPI_SIG_RSDP and ACPI_SIG_FACS: */
624 static const char * const table_sigs[] = {
625 ACPI_SIG_BERT, ACPI_SIG_CPEP, ACPI_SIG_ECDT, ACPI_SIG_EINJ,
626 ACPI_SIG_ERST, ACPI_SIG_HEST, ACPI_SIG_MADT, ACPI_SIG_MSCT,
627 ACPI_SIG_SBST, ACPI_SIG_SLIT, ACPI_SIG_SRAT, ACPI_SIG_ASF,
628 ACPI_SIG_BOOT, ACPI_SIG_DBGP, ACPI_SIG_DMAR, ACPI_SIG_HPET,
629 ACPI_SIG_IBFT, ACPI_SIG_IVRS, ACPI_SIG_MCFG, ACPI_SIG_MCHI,
630 ACPI_SIG_SLIC, ACPI_SIG_SPCR, ACPI_SIG_SPMI, ACPI_SIG_TCPA,
631 ACPI_SIG_UEFI, ACPI_SIG_WAET, ACPI_SIG_WDAT, ACPI_SIG_WDDT,
632 ACPI_SIG_WDRT, ACPI_SIG_DSDT, ACPI_SIG_FADT, ACPI_SIG_PSDT,
633 ACPI_SIG_RSDT, ACPI_SIG_XSDT, ACPI_SIG_SSDT, NULL };
635 #define ACPI_HEADER_SIZE sizeof(struct acpi_table_header)
637 #define ACPI_OVERRIDE_TABLES 64
638 static struct cpio_data __initdata acpi_initrd_files[ACPI_OVERRIDE_TABLES];
640 #define MAP_CHUNK_SIZE (NR_FIX_BTMAPS << PAGE_SHIFT)
642 void __init acpi_initrd_override(void *data, size_t size)
644 int sig, no, table_nr = 0, total_offset = 0;
646 struct acpi_table_header *table;
647 char cpio_path[32] = "kernel/firmware/acpi/";
648 struct cpio_data file;
650 if (data == NULL || size == 0)
653 for (no = 0; no < ACPI_OVERRIDE_TABLES; no++) {
654 file = find_cpio_data(cpio_path, data, size, &offset);
661 if (file.size < sizeof(struct acpi_table_header)) {
662 pr_err("ACPI OVERRIDE: Table smaller than ACPI header [%s%s]\n",
663 cpio_path, file.name);
669 for (sig = 0; table_sigs[sig]; sig++)
670 if (!memcmp(table->signature, table_sigs[sig], 4))
673 if (!table_sigs[sig]) {
674 pr_err("ACPI OVERRIDE: Unknown signature [%s%s]\n",
675 cpio_path, file.name);
678 if (file.size != table->length) {
679 pr_err("ACPI OVERRIDE: File length does not match table length [%s%s]\n",
680 cpio_path, file.name);
683 if (acpi_table_checksum(file.data, table->length)) {
684 pr_err("ACPI OVERRIDE: Bad table checksum [%s%s]\n",
685 cpio_path, file.name);
689 pr_info("%4.4s ACPI table found in initrd [%s%s][0x%x]\n",
690 table->signature, cpio_path, file.name, table->length);
692 all_tables_size += table->length;
693 acpi_initrd_files[table_nr].data = file.data;
694 acpi_initrd_files[table_nr].size = file.size;
701 memblock_find_in_range(0, max_low_pfn_mapped << PAGE_SHIFT,
702 all_tables_size, PAGE_SIZE);
703 if (!acpi_tables_addr) {
708 * Only calling e820_add_reserve does not work and the
709 * tables are invalid (memory got used) later.
710 * memblock_reserve works as expected and the tables won't get modified.
711 * But it's not enough on X86 because ioremap will
712 * complain later (used by acpi_os_map_memory) that the pages
713 * that should get mapped are not marked "reserved".
714 * Both memblock_reserve and e820_add_region (via arch_reserve_mem_area)
717 memblock_reserve(acpi_tables_addr, all_tables_size);
718 arch_reserve_mem_area(acpi_tables_addr, all_tables_size);
721 * early_ioremap only can remap 256k one time. If we map all
722 * tables one time, we will hit the limit. Need to map chunks
723 * one by one during copying the same as that in relocate_initrd().
725 for (no = 0; no < table_nr; no++) {
726 unsigned char *src_p = acpi_initrd_files[no].data;
727 phys_addr_t size = acpi_initrd_files[no].size;
728 phys_addr_t dest_addr = acpi_tables_addr + total_offset;
729 phys_addr_t slop, clen;
732 total_offset += size;
735 slop = dest_addr & ~PAGE_MASK;
737 if (clen > MAP_CHUNK_SIZE - slop)
738 clen = MAP_CHUNK_SIZE - slop;
739 dest_p = early_ioremap(dest_addr & PAGE_MASK,
741 memcpy(dest_p + slop, src_p, clen);
742 early_iounmap(dest_p, clen + slop);
749 #endif /* CONFIG_ACPI_INITRD_TABLE_OVERRIDE */
751 static void acpi_table_taint(struct acpi_table_header *table)
754 "Override [%4.4s-%8.8s], this is unsafe: tainting kernel\n",
755 table->signature, table->oem_table_id);
756 add_taint(TAINT_OVERRIDDEN_ACPI_TABLE, LOCKDEP_NOW_UNRELIABLE);
761 acpi_os_table_override(struct acpi_table_header * existing_table,
762 struct acpi_table_header ** new_table)
764 if (!existing_table || !new_table)
765 return AE_BAD_PARAMETER;
769 #ifdef CONFIG_ACPI_CUSTOM_DSDT
770 if (strncmp(existing_table->signature, "DSDT", 4) == 0)
771 *new_table = (struct acpi_table_header *)AmlCode;
773 if (*new_table != NULL)
774 acpi_table_taint(existing_table);
779 acpi_os_physical_table_override(struct acpi_table_header *existing_table,
780 acpi_physical_address *address,
783 #ifndef CONFIG_ACPI_INITRD_TABLE_OVERRIDE
788 int table_offset = 0;
789 struct acpi_table_header *table;
794 if (!acpi_tables_addr)
798 if (table_offset + ACPI_HEADER_SIZE > all_tables_size) {
803 table = acpi_os_map_memory(acpi_tables_addr + table_offset,
806 if (table_offset + table->length > all_tables_size) {
807 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
812 table_offset += table->length;
814 if (memcmp(existing_table->signature, table->signature, 4)) {
815 acpi_os_unmap_memory(table,
820 /* Only override tables with matching oem id */
821 if (memcmp(table->oem_table_id, existing_table->oem_table_id,
822 ACPI_OEM_TABLE_ID_SIZE)) {
823 acpi_os_unmap_memory(table,
828 table_offset -= table->length;
829 *table_length = table->length;
830 acpi_os_unmap_memory(table, ACPI_HEADER_SIZE);
831 *address = acpi_tables_addr + table_offset;
833 } while (table_offset + ACPI_HEADER_SIZE < all_tables_size);
836 acpi_table_taint(existing_table);
841 static irqreturn_t acpi_irq(int irq, void *dev_id)
845 handled = (*acpi_irq_handler) (acpi_irq_context);
851 acpi_irq_not_handled++;
857 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
862 acpi_irq_stats_init();
865 * ACPI interrupts different from the SCI in our copy of the FADT are
868 if (gsi != acpi_gbl_FADT.sci_interrupt)
869 return AE_BAD_PARAMETER;
871 if (acpi_irq_handler)
872 return AE_ALREADY_ACQUIRED;
874 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
875 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
880 acpi_irq_handler = handler;
881 acpi_irq_context = context;
882 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
883 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
884 acpi_irq_handler = NULL;
885 return AE_NOT_ACQUIRED;
892 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
894 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
895 return AE_BAD_PARAMETER;
897 free_irq(acpi_sci_irq, acpi_irq);
898 acpi_irq_handler = NULL;
899 acpi_sci_irq = INVALID_ACPI_IRQ;
905 * Running in interpreter thread context, safe to sleep
908 void acpi_os_sleep(u64 ms)
913 void acpi_os_stall(u32 us)
921 touch_nmi_watchdog();
927 * Support ACPI 3.0 AML Timer operand
928 * Returns 64-bit free-running, monotonically increasing timer
929 * with 100ns granularity
931 u64 acpi_os_get_timer(void)
933 u64 time_ns = ktime_to_ns(ktime_get());
934 do_div(time_ns, 100);
938 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
947 *(u8 *) value = inb(port);
948 } else if (width <= 16) {
949 *(u16 *) value = inw(port);
950 } else if (width <= 32) {
951 *(u32 *) value = inl(port);
959 EXPORT_SYMBOL(acpi_os_read_port);
961 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
965 } else if (width <= 16) {
967 } else if (width <= 32) {
976 EXPORT_SYMBOL(acpi_os_write_port);
979 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
981 void __iomem *virt_addr;
982 unsigned int size = width / 8;
987 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
990 virt_addr = acpi_os_ioremap(phys_addr, size);
992 return AE_BAD_ADDRESS;
1001 *(u8 *) value = readb(virt_addr);
1004 *(u16 *) value = readw(virt_addr);
1007 *(u32 *) value = readl(virt_addr);
1010 *(u64 *) value = readq(virt_addr);
1025 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
1027 void __iomem *virt_addr;
1028 unsigned int size = width / 8;
1032 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
1035 virt_addr = acpi_os_ioremap(phys_addr, size);
1037 return AE_BAD_ADDRESS;
1043 writeb(value, virt_addr);
1046 writew(value, virt_addr);
1049 writel(value, virt_addr);
1052 writeq(value, virt_addr);
1067 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1068 u64 *value, u32 width)
1074 return AE_BAD_PARAMETER;
1090 result = raw_pci_read(pci_id->segment, pci_id->bus,
1091 PCI_DEVFN(pci_id->device, pci_id->function),
1092 reg, size, &value32);
1095 return (result ? AE_ERROR : AE_OK);
1099 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
1100 u64 value, u32 width)
1118 result = raw_pci_write(pci_id->segment, pci_id->bus,
1119 PCI_DEVFN(pci_id->device, pci_id->function),
1122 return (result ? AE_ERROR : AE_OK);
1125 static void acpi_os_execute_deferred(struct work_struct *work)
1127 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
1129 dpc->function(dpc->context);
1133 #ifdef CONFIG_ACPI_DEBUGGER
1134 static struct acpi_debugger acpi_debugger;
1135 static bool acpi_debugger_initialized;
1137 int acpi_register_debugger(struct module *owner,
1138 const struct acpi_debugger_ops *ops)
1142 mutex_lock(&acpi_debugger.lock);
1143 if (acpi_debugger.ops) {
1148 acpi_debugger.owner = owner;
1149 acpi_debugger.ops = ops;
1152 mutex_unlock(&acpi_debugger.lock);
1155 EXPORT_SYMBOL(acpi_register_debugger);
1157 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
1159 mutex_lock(&acpi_debugger.lock);
1160 if (ops == acpi_debugger.ops) {
1161 acpi_debugger.ops = NULL;
1162 acpi_debugger.owner = NULL;
1164 mutex_unlock(&acpi_debugger.lock);
1166 EXPORT_SYMBOL(acpi_unregister_debugger);
1168 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
1171 int (*func)(acpi_osd_exec_callback, void *);
1172 struct module *owner;
1174 if (!acpi_debugger_initialized)
1176 mutex_lock(&acpi_debugger.lock);
1177 if (!acpi_debugger.ops) {
1181 if (!try_module_get(acpi_debugger.owner)) {
1185 func = acpi_debugger.ops->create_thread;
1186 owner = acpi_debugger.owner;
1187 mutex_unlock(&acpi_debugger.lock);
1189 ret = func(function, context);
1191 mutex_lock(&acpi_debugger.lock);
1194 mutex_unlock(&acpi_debugger.lock);
1198 ssize_t acpi_debugger_write_log(const char *msg)
1201 ssize_t (*func)(const char *);
1202 struct module *owner;
1204 if (!acpi_debugger_initialized)
1206 mutex_lock(&acpi_debugger.lock);
1207 if (!acpi_debugger.ops) {
1211 if (!try_module_get(acpi_debugger.owner)) {
1215 func = acpi_debugger.ops->write_log;
1216 owner = acpi_debugger.owner;
1217 mutex_unlock(&acpi_debugger.lock);
1221 mutex_lock(&acpi_debugger.lock);
1224 mutex_unlock(&acpi_debugger.lock);
1228 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
1231 ssize_t (*func)(char *, size_t);
1232 struct module *owner;
1234 if (!acpi_debugger_initialized)
1236 mutex_lock(&acpi_debugger.lock);
1237 if (!acpi_debugger.ops) {
1241 if (!try_module_get(acpi_debugger.owner)) {
1245 func = acpi_debugger.ops->read_cmd;
1246 owner = acpi_debugger.owner;
1247 mutex_unlock(&acpi_debugger.lock);
1249 ret = func(buffer, buffer_length);
1251 mutex_lock(&acpi_debugger.lock);
1254 mutex_unlock(&acpi_debugger.lock);
1258 int acpi_debugger_wait_command_ready(void)
1261 int (*func)(bool, char *, size_t);
1262 struct module *owner;
1264 if (!acpi_debugger_initialized)
1266 mutex_lock(&acpi_debugger.lock);
1267 if (!acpi_debugger.ops) {
1271 if (!try_module_get(acpi_debugger.owner)) {
1275 func = acpi_debugger.ops->wait_command_ready;
1276 owner = acpi_debugger.owner;
1277 mutex_unlock(&acpi_debugger.lock);
1279 ret = func(acpi_gbl_method_executing,
1280 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1282 mutex_lock(&acpi_debugger.lock);
1285 mutex_unlock(&acpi_debugger.lock);
1289 int acpi_debugger_notify_command_complete(void)
1293 struct module *owner;
1295 if (!acpi_debugger_initialized)
1297 mutex_lock(&acpi_debugger.lock);
1298 if (!acpi_debugger.ops) {
1302 if (!try_module_get(acpi_debugger.owner)) {
1306 func = acpi_debugger.ops->notify_command_complete;
1307 owner = acpi_debugger.owner;
1308 mutex_unlock(&acpi_debugger.lock);
1312 mutex_lock(&acpi_debugger.lock);
1315 mutex_unlock(&acpi_debugger.lock);
1319 int __init acpi_debugger_init(void)
1321 mutex_init(&acpi_debugger.lock);
1322 acpi_debugger_initialized = true;
1327 /*******************************************************************************
1329 * FUNCTION: acpi_os_execute
1331 * PARAMETERS: Type - Type of the callback
1332 * Function - Function to be executed
1333 * Context - Function parameters
1337 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1338 * immediately executes function on a separate thread.
1340 ******************************************************************************/
1342 acpi_status acpi_os_execute(acpi_execute_type type,
1343 acpi_osd_exec_callback function, void *context)
1345 acpi_status status = AE_OK;
1346 struct acpi_os_dpc *dpc;
1347 struct workqueue_struct *queue;
1349 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1350 "Scheduling function [%p(%p)] for deferred execution.\n",
1351 function, context));
1353 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1354 ret = acpi_debugger_create_thread(function, context);
1356 pr_err("Call to kthread_create() failed.\n");
1363 * Allocate/initialize DPC structure. Note that this memory will be
1364 * freed by the callee. The kernel handles the work_struct list in a
1365 * way that allows us to also free its memory inside the callee.
1366 * Because we may want to schedule several tasks with different
1367 * parameters we can't use the approach some kernel code uses of
1368 * having a static work_struct.
1371 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1373 return AE_NO_MEMORY;
1375 dpc->function = function;
1376 dpc->context = context;
1379 * To prevent lockdep from complaining unnecessarily, make sure that
1380 * there is a different static lockdep key for each workqueue by using
1381 * INIT_WORK() for each of them separately.
1383 if (type == OSL_NOTIFY_HANDLER) {
1384 queue = kacpi_notify_wq;
1385 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1386 } else if (type == OSL_GPE_HANDLER) {
1388 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1390 pr_err("Unsupported os_execute type %d.\n", type);
1394 if (ACPI_FAILURE(status))
1398 * On some machines, a software-initiated SMI causes corruption unless
1399 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1400 * typically it's done in GPE-related methods that are run via
1401 * workqueues, so we can avoid the known corruption cases by always
1402 * queueing on CPU 0.
1404 ret = queue_work_on(0, queue, &dpc->work);
1406 printk(KERN_ERR PREFIX
1407 "Call to queue_work() failed.\n");
1411 if (ACPI_FAILURE(status))
1416 EXPORT_SYMBOL(acpi_os_execute);
1418 void acpi_os_wait_events_complete(void)
1421 * Make sure the GPE handler or the fixed event handler is not used
1422 * on another CPU after removal.
1424 if (acpi_sci_irq_valid())
1425 synchronize_hardirq(acpi_sci_irq);
1426 flush_workqueue(kacpid_wq);
1427 flush_workqueue(kacpi_notify_wq);
1430 struct acpi_hp_work {
1431 struct work_struct work;
1432 struct acpi_device *adev;
1436 static void acpi_hotplug_work_fn(struct work_struct *work)
1438 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1440 acpi_os_wait_events_complete();
1441 acpi_device_hotplug(hpw->adev, hpw->src);
1445 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1447 struct acpi_hp_work *hpw;
1449 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1450 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1453 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1455 return AE_NO_MEMORY;
1457 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1461 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1462 * the hotplug code may call driver .remove() functions, which may
1463 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1466 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1473 bool acpi_queue_hotplug_work(struct work_struct *work)
1475 return queue_work(kacpi_hotplug_wq, work);
1479 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1481 struct semaphore *sem = NULL;
1483 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1485 return AE_NO_MEMORY;
1487 sema_init(sem, initial_units);
1489 *handle = (acpi_handle *) sem;
1491 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1492 *handle, initial_units));
1498 * TODO: A better way to delete semaphores? Linux doesn't have a
1499 * 'delete_semaphore()' function -- may result in an invalid
1500 * pointer dereference for non-synchronized consumers. Should
1501 * we at least check for blocked threads and signal/cancel them?
1504 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1506 struct semaphore *sem = (struct semaphore *)handle;
1509 return AE_BAD_PARAMETER;
1511 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1513 BUG_ON(!list_empty(&sem->wait_list));
1521 * TODO: Support for units > 1?
1523 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1525 acpi_status status = AE_OK;
1526 struct semaphore *sem = (struct semaphore *)handle;
1530 if (!acpi_os_initialized)
1533 if (!sem || (units < 1))
1534 return AE_BAD_PARAMETER;
1539 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1540 handle, units, timeout));
1542 if (timeout == ACPI_WAIT_FOREVER)
1543 jiffies = MAX_SCHEDULE_TIMEOUT;
1545 jiffies = msecs_to_jiffies(timeout);
1547 ret = down_timeout(sem, jiffies);
1551 if (ACPI_FAILURE(status)) {
1552 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1553 "Failed to acquire semaphore[%p|%d|%d], %s",
1554 handle, units, timeout,
1555 acpi_format_exception(status)));
1557 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1558 "Acquired semaphore[%p|%d|%d]", handle,
1566 * TODO: Support for units > 1?
1568 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1570 struct semaphore *sem = (struct semaphore *)handle;
1572 if (!acpi_os_initialized)
1575 if (!sem || (units < 1))
1576 return AE_BAD_PARAMETER;
1581 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1589 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1591 #ifdef ENABLE_DEBUGGER
1592 if (acpi_in_debugger) {
1595 kdb_read(buffer, buffer_length);
1597 /* remove the CR kdb includes */
1598 chars = strlen(buffer) - 1;
1599 buffer[chars] = '\0';
1604 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1613 EXPORT_SYMBOL(acpi_os_get_line);
1615 acpi_status acpi_os_wait_command_ready(void)
1619 ret = acpi_debugger_wait_command_ready();
1625 acpi_status acpi_os_notify_command_complete(void)
1629 ret = acpi_debugger_notify_command_complete();
1635 acpi_status acpi_os_signal(u32 function, void *info)
1638 case ACPI_SIGNAL_FATAL:
1639 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1641 case ACPI_SIGNAL_BREAKPOINT:
1644 * ACPI spec. says to treat it as a NOP unless
1645 * you are debugging. So if/when we integrate
1646 * AML debugger into the kernel debugger its
1647 * hook will go here. But until then it is
1648 * not useful to print anything on breakpoints.
1658 static int __init acpi_os_name_setup(char *str)
1660 char *p = acpi_os_name;
1661 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1666 for (; count-- && *str; str++) {
1667 if (isalnum(*str) || *str == ' ' || *str == ':')
1669 else if (*str == '\'' || *str == '"')
1680 __setup("acpi_os_name=", acpi_os_name_setup);
1682 #define OSI_STRING_LENGTH_MAX 64 /* arbitrary */
1683 #define OSI_STRING_ENTRIES_MAX 16 /* arbitrary */
1685 struct osi_setup_entry {
1686 char string[OSI_STRING_LENGTH_MAX];
1690 static struct osi_setup_entry
1691 osi_setup_entries[OSI_STRING_ENTRIES_MAX] __initdata = {
1692 {"Module Device", true},
1693 {"Processor Device", true},
1694 {"3.0 _SCP Extensions", true},
1695 {"Processor Aggregator Device", true},
1698 void __init acpi_osi_setup(char *str)
1700 struct osi_setup_entry *osi;
1704 if (!acpi_gbl_create_osi_method)
1707 if (str == NULL || *str == '\0') {
1708 printk(KERN_INFO PREFIX "_OSI method disabled\n");
1709 acpi_gbl_create_osi_method = FALSE;
1716 osi_linux.default_disabling = 1;
1718 } else if (*str == '*') {
1719 acpi_update_interfaces(ACPI_DISABLE_ALL_STRINGS);
1720 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1721 osi = &osi_setup_entries[i];
1722 osi->enable = false;
1729 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1730 osi = &osi_setup_entries[i];
1731 if (!strcmp(osi->string, str)) {
1732 osi->enable = enable;
1734 } else if (osi->string[0] == '\0') {
1735 osi->enable = enable;
1736 strncpy(osi->string, str, OSI_STRING_LENGTH_MAX);
1742 static void __init set_osi_linux(unsigned int enable)
1744 if (osi_linux.enable != enable)
1745 osi_linux.enable = enable;
1747 if (osi_linux.enable)
1748 acpi_osi_setup("Linux");
1750 acpi_osi_setup("!Linux");
1755 static void __init acpi_cmdline_osi_linux(unsigned int enable)
1757 osi_linux.cmdline = 1; /* cmdline set the default and override DMI */
1759 set_osi_linux(enable);
1764 void __init acpi_dmi_osi_linux(int enable, const struct dmi_system_id *d)
1766 printk(KERN_NOTICE PREFIX "DMI detected: %s\n", d->ident);
1771 osi_linux.dmi = 1; /* DMI knows that this box asks OSI(Linux) */
1772 set_osi_linux(enable);
1778 * Modify the list of "OS Interfaces" reported to BIOS via _OSI
1780 * empty string disables _OSI
1781 * string starting with '!' disables that string
1782 * otherwise string is added to list, augmenting built-in strings
1784 static void __init acpi_osi_setup_late(void)
1786 struct osi_setup_entry *osi;
1791 if (osi_linux.default_disabling) {
1792 status = acpi_update_interfaces(ACPI_DISABLE_ALL_VENDOR_STRINGS);
1794 if (ACPI_SUCCESS(status))
1795 printk(KERN_INFO PREFIX "Disabled all _OSI OS vendors\n");
1798 for (i = 0; i < OSI_STRING_ENTRIES_MAX; i++) {
1799 osi = &osi_setup_entries[i];
1805 status = acpi_install_interface(str);
1807 if (ACPI_SUCCESS(status))
1808 printk(KERN_INFO PREFIX "Added _OSI(%s)\n", str);
1810 status = acpi_remove_interface(str);
1812 if (ACPI_SUCCESS(status))
1813 printk(KERN_INFO PREFIX "Deleted _OSI(%s)\n", str);
1818 static int __init osi_setup(char *str)
1820 if (str && !strcmp("Linux", str))
1821 acpi_cmdline_osi_linux(1);
1822 else if (str && !strcmp("!Linux", str))
1823 acpi_cmdline_osi_linux(0);
1825 acpi_osi_setup(str);
1830 __setup("acpi_osi=", osi_setup);
1833 * Disable the auto-serialization of named objects creation methods.
1835 * This feature is enabled by default. It marks the AML control methods
1836 * that contain the opcodes to create named objects as "Serialized".
1838 static int __init acpi_no_auto_serialize_setup(char *str)
1840 acpi_gbl_auto_serialize_methods = FALSE;
1841 pr_info("ACPI: auto-serialization disabled\n");
1846 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1848 /* Check of resource interference between native drivers and ACPI
1849 * OperationRegions (SystemIO and System Memory only).
1850 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1851 * in arbitrary AML code and can interfere with legacy drivers.
1852 * acpi_enforce_resources= can be set to:
1854 * - strict (default) (2)
1855 * -> further driver trying to access the resources will not load
1857 * -> further driver trying to access the resources will load, but you
1858 * get a system message that something might go wrong...
1861 * -> ACPI Operation Region resources will not be registered
1864 #define ENFORCE_RESOURCES_STRICT 2
1865 #define ENFORCE_RESOURCES_LAX 1
1866 #define ENFORCE_RESOURCES_NO 0
1868 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1870 static int __init acpi_enforce_resources_setup(char *str)
1872 if (str == NULL || *str == '\0')
1875 if (!strcmp("strict", str))
1876 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1877 else if (!strcmp("lax", str))
1878 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1879 else if (!strcmp("no", str))
1880 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1885 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1887 /* Check for resource conflicts between ACPI OperationRegions and native
1889 int acpi_check_resource_conflict(const struct resource *res)
1891 acpi_adr_space_type space_id;
1896 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1898 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1901 if (res->flags & IORESOURCE_IO)
1902 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1904 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1906 length = resource_size(res);
1907 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1909 clash = acpi_check_address_range(space_id, res->start, length, warn);
1912 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1913 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1914 printk(KERN_NOTICE "ACPI: This conflict may"
1915 " cause random problems and system"
1917 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1918 " for this device, you should use it instead of"
1919 " the native driver\n");
1921 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1926 EXPORT_SYMBOL(acpi_check_resource_conflict);
1928 int acpi_check_region(resource_size_t start, resource_size_t n,
1931 struct resource res = {
1933 .end = start + n - 1,
1935 .flags = IORESOURCE_IO,
1938 return acpi_check_resource_conflict(&res);
1940 EXPORT_SYMBOL(acpi_check_region);
1943 * Let drivers know whether the resource checks are effective
1945 int acpi_resources_are_enforced(void)
1947 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1949 EXPORT_SYMBOL(acpi_resources_are_enforced);
1951 bool acpi_osi_is_win8(void)
1953 return acpi_gbl_osi_data >= ACPI_OSI_WIN_8;
1955 EXPORT_SYMBOL(acpi_osi_is_win8);
1958 * Deallocate the memory for a spinlock.
1960 void acpi_os_delete_lock(acpi_spinlock handle)
1966 * Acquire a spinlock.
1968 * handle is a pointer to the spinlock_t.
1971 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1973 acpi_cpu_flags flags;
1974 spin_lock_irqsave(lockp, flags);
1979 * Release a spinlock. See above.
1982 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1984 spin_unlock_irqrestore(lockp, flags);
1987 #ifndef ACPI_USE_LOCAL_CACHE
1989 /*******************************************************************************
1991 * FUNCTION: acpi_os_create_cache
1993 * PARAMETERS: name - Ascii name for the cache
1994 * size - Size of each cached object
1995 * depth - Maximum depth of the cache (in objects) <ignored>
1996 * cache - Where the new cache object is returned
2000 * DESCRIPTION: Create a cache object
2002 ******************************************************************************/
2005 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
2007 *cache = kmem_cache_create(name, size, 0, 0, NULL);
2014 /*******************************************************************************
2016 * FUNCTION: acpi_os_purge_cache
2018 * PARAMETERS: Cache - Handle to cache object
2022 * DESCRIPTION: Free all objects within the requested cache.
2024 ******************************************************************************/
2026 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
2028 kmem_cache_shrink(cache);
2032 /*******************************************************************************
2034 * FUNCTION: acpi_os_delete_cache
2036 * PARAMETERS: Cache - Handle to cache object
2040 * DESCRIPTION: Free all objects within the requested cache and delete the
2043 ******************************************************************************/
2045 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
2047 kmem_cache_destroy(cache);
2051 /*******************************************************************************
2053 * FUNCTION: acpi_os_release_object
2055 * PARAMETERS: Cache - Handle to cache object
2056 * Object - The object to be released
2060 * DESCRIPTION: Release an object to the specified cache. If cache is full,
2061 * the object is deleted.
2063 ******************************************************************************/
2065 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
2067 kmem_cache_free(cache, object);
2072 static int __init acpi_no_static_ssdt_setup(char *s)
2074 acpi_gbl_disable_ssdt_table_install = TRUE;
2075 pr_info("ACPI: static SSDT installation disabled\n");
2080 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
2082 static int __init acpi_disable_return_repair(char *s)
2084 printk(KERN_NOTICE PREFIX
2085 "ACPI: Predefined validation mechanism disabled\n");
2086 acpi_gbl_disable_auto_repair = TRUE;
2091 __setup("acpica_no_return_repair", acpi_disable_return_repair);
2093 acpi_status __init acpi_os_initialize(void)
2095 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2096 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2097 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
2098 acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
2099 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
2101 * Use acpi_os_map_generic_address to pre-map the reset
2102 * register if it's in system memory.
2106 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
2107 pr_debug(PREFIX "%s: map reset_reg status %d\n", __func__, rv);
2109 acpi_os_initialized = true;
2114 acpi_status __init acpi_os_initialize1(void)
2116 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
2117 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
2118 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
2120 BUG_ON(!kacpi_notify_wq);
2121 BUG_ON(!kacpi_hotplug_wq);
2122 acpi_install_interface_handler(acpi_osi_handler);
2123 acpi_osi_setup_late();
2127 acpi_status acpi_os_terminate(void)
2129 if (acpi_irq_handler) {
2130 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
2134 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
2135 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
2136 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
2137 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
2138 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
2139 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
2141 destroy_workqueue(kacpid_wq);
2142 destroy_workqueue(kacpi_notify_wq);
2143 destroy_workqueue(kacpi_hotplug_wq);
2148 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
2152 if (__acpi_os_prepare_sleep)
2153 rc = __acpi_os_prepare_sleep(sleep_state,
2154 pm1a_control, pm1b_control);
2158 return AE_CTRL_SKIP;
2163 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
2164 u32 pm1a_ctrl, u32 pm1b_ctrl))
2166 __acpi_os_prepare_sleep = func;
2169 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
2173 if (__acpi_os_prepare_extended_sleep)
2174 rc = __acpi_os_prepare_extended_sleep(sleep_state,
2179 return AE_CTRL_SKIP;
2184 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
2185 u32 val_a, u32 val_b))
2187 __acpi_os_prepare_extended_sleep = func;