1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * acpi_osl.c - OS-dependent functions ($Revision: 83 $)
5 * Copyright (C) 2000 Andrew Henroid
6 * Copyright (C) 2001, 2002 Andy Grover <andrew.grover@intel.com>
7 * Copyright (C) 2001, 2002 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
8 * Copyright (c) 2008 Intel Corporation
9 * Author: Matthew Wilcox <willy@linux.intel.com>
12 #include <linux/module.h>
13 #include <linux/kernel.h>
14 #include <linux/slab.h>
16 #include <linux/highmem.h>
17 #include <linux/lockdep.h>
18 #include <linux/pci.h>
19 #include <linux/interrupt.h>
20 #include <linux/kmod.h>
21 #include <linux/delay.h>
22 #include <linux/workqueue.h>
23 #include <linux/nmi.h>
24 #include <linux/acpi.h>
25 #include <linux/efi.h>
26 #include <linux/ioport.h>
27 #include <linux/list.h>
28 #include <linux/jiffies.h>
29 #include <linux/semaphore.h>
30 #include <linux/security.h>
33 #include <linux/uaccess.h>
34 #include <linux/io-64-nonatomic-lo-hi.h>
36 #include "acpica/accommon.h"
37 #include "acpica/acnamesp.h"
40 #define _COMPONENT ACPI_OS_SERVICES
41 ACPI_MODULE_NAME("osl");
44 acpi_osd_exec_callback function;
46 struct work_struct work;
49 #ifdef ENABLE_DEBUGGER
50 #include <linux/kdb.h>
52 /* stuff for debugger support */
54 EXPORT_SYMBOL(acpi_in_debugger);
55 #endif /*ENABLE_DEBUGGER */
57 static int (*__acpi_os_prepare_sleep)(u8 sleep_state, u32 pm1a_ctrl,
59 static int (*__acpi_os_prepare_extended_sleep)(u8 sleep_state, u32 val_a,
62 static acpi_osd_handler acpi_irq_handler;
63 static void *acpi_irq_context;
64 static struct workqueue_struct *kacpid_wq;
65 static struct workqueue_struct *kacpi_notify_wq;
66 static struct workqueue_struct *kacpi_hotplug_wq;
67 static bool acpi_os_initialized;
68 unsigned int acpi_sci_irq = INVALID_ACPI_IRQ;
69 bool acpi_permanent_mmap = false;
72 * This list of permanent mappings is for memory that may be accessed from
73 * interrupt context, where we can't do the ioremap().
76 struct list_head list;
78 acpi_physical_address phys;
81 unsigned long refcount;
82 struct rcu_work rwork;
86 static LIST_HEAD(acpi_ioremaps);
87 static DEFINE_MUTEX(acpi_ioremap_lock);
88 #define acpi_ioremap_lock_held() lock_is_held(&acpi_ioremap_lock.dep_map)
90 static void __init acpi_request_region (struct acpi_generic_address *gas,
91 unsigned int length, char *desc)
95 /* Handle possible alignment issues */
96 memcpy(&addr, &gas->address, sizeof(addr));
100 /* Resources are never freed */
101 if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_IO)
102 request_region(addr, length, desc);
103 else if (gas->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
104 request_mem_region(addr, length, desc);
107 static int __init acpi_reserve_resources(void)
109 acpi_request_region(&acpi_gbl_FADT.xpm1a_event_block, acpi_gbl_FADT.pm1_event_length,
110 "ACPI PM1a_EVT_BLK");
112 acpi_request_region(&acpi_gbl_FADT.xpm1b_event_block, acpi_gbl_FADT.pm1_event_length,
113 "ACPI PM1b_EVT_BLK");
115 acpi_request_region(&acpi_gbl_FADT.xpm1a_control_block, acpi_gbl_FADT.pm1_control_length,
116 "ACPI PM1a_CNT_BLK");
118 acpi_request_region(&acpi_gbl_FADT.xpm1b_control_block, acpi_gbl_FADT.pm1_control_length,
119 "ACPI PM1b_CNT_BLK");
121 if (acpi_gbl_FADT.pm_timer_length == 4)
122 acpi_request_region(&acpi_gbl_FADT.xpm_timer_block, 4, "ACPI PM_TMR");
124 acpi_request_region(&acpi_gbl_FADT.xpm2_control_block, acpi_gbl_FADT.pm2_control_length,
127 /* Length of GPE blocks must be a non-negative multiple of 2 */
129 if (!(acpi_gbl_FADT.gpe0_block_length & 0x1))
130 acpi_request_region(&acpi_gbl_FADT.xgpe0_block,
131 acpi_gbl_FADT.gpe0_block_length, "ACPI GPE0_BLK");
133 if (!(acpi_gbl_FADT.gpe1_block_length & 0x1))
134 acpi_request_region(&acpi_gbl_FADT.xgpe1_block,
135 acpi_gbl_FADT.gpe1_block_length, "ACPI GPE1_BLK");
139 fs_initcall_sync(acpi_reserve_resources);
141 void acpi_os_printf(const char *fmt, ...)
145 acpi_os_vprintf(fmt, args);
148 EXPORT_SYMBOL(acpi_os_printf);
150 void acpi_os_vprintf(const char *fmt, va_list args)
152 static char buffer[512];
154 vsprintf(buffer, fmt, args);
156 #ifdef ENABLE_DEBUGGER
157 if (acpi_in_debugger) {
158 kdb_printf("%s", buffer);
160 if (printk_get_level(buffer))
161 printk("%s", buffer);
163 printk(KERN_CONT "%s", buffer);
166 if (acpi_debugger_write_log(buffer) < 0) {
167 if (printk_get_level(buffer))
168 printk("%s", buffer);
170 printk(KERN_CONT "%s", buffer);
176 static unsigned long acpi_rsdp;
177 static int __init setup_acpi_rsdp(char *arg)
179 return kstrtoul(arg, 16, &acpi_rsdp);
181 early_param("acpi_rsdp", setup_acpi_rsdp);
184 acpi_physical_address __init acpi_os_get_root_pointer(void)
186 acpi_physical_address pa;
190 * We may have been provided with an RSDP on the command line,
191 * but if a malicious user has done so they may be pointing us
192 * at modified ACPI tables that could alter kernel behaviour -
193 * so, we check the lockdown status before making use of
194 * it. If we trust it then also stash it in an architecture
195 * specific location (if appropriate) so it can be carried
196 * over further kexec()s.
198 if (acpi_rsdp && !security_locked_down(LOCKDOWN_ACPI_TABLES)) {
199 acpi_arch_set_root_pointer(acpi_rsdp);
203 pa = acpi_arch_get_root_pointer();
207 if (efi_enabled(EFI_CONFIG_TABLES)) {
208 if (efi.acpi20 != EFI_INVALID_TABLE_ADDR)
210 if (efi.acpi != EFI_INVALID_TABLE_ADDR)
212 pr_err(PREFIX "System description tables not found\n");
213 } else if (IS_ENABLED(CONFIG_ACPI_LEGACY_TABLES_LOOKUP)) {
214 acpi_find_root_pointer(&pa);
220 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
221 static struct acpi_ioremap *
222 acpi_map_lookup(acpi_physical_address phys, acpi_size size)
224 struct acpi_ioremap *map;
226 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
227 if (map->phys <= phys &&
228 phys + size <= map->phys + map->size)
234 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
235 static void __iomem *
236 acpi_map_vaddr_lookup(acpi_physical_address phys, unsigned int size)
238 struct acpi_ioremap *map;
240 map = acpi_map_lookup(phys, size);
242 return map->virt + (phys - map->phys);
247 void __iomem *acpi_os_get_iomem(acpi_physical_address phys, unsigned int size)
249 struct acpi_ioremap *map;
250 void __iomem *virt = NULL;
252 mutex_lock(&acpi_ioremap_lock);
253 map = acpi_map_lookup(phys, size);
255 virt = map->virt + (phys - map->phys);
256 map->track.refcount++;
258 mutex_unlock(&acpi_ioremap_lock);
261 EXPORT_SYMBOL_GPL(acpi_os_get_iomem);
263 /* Must be called with 'acpi_ioremap_lock' or RCU read lock held. */
264 static struct acpi_ioremap *
265 acpi_map_lookup_virt(void __iomem *virt, acpi_size size)
267 struct acpi_ioremap *map;
269 list_for_each_entry_rcu(map, &acpi_ioremaps, list, acpi_ioremap_lock_held())
270 if (map->virt <= virt &&
271 virt + size <= map->virt + map->size)
277 #if defined(CONFIG_IA64) || defined(CONFIG_ARM64)
278 /* ioremap will take care of cache attributes */
279 #define should_use_kmap(pfn) 0
281 #define should_use_kmap(pfn) page_is_ram(pfn)
284 static void __iomem *acpi_map(acpi_physical_address pg_off, unsigned long pg_sz)
288 pfn = pg_off >> PAGE_SHIFT;
289 if (should_use_kmap(pfn)) {
290 if (pg_sz > PAGE_SIZE)
292 return (void __iomem __force *)kmap(pfn_to_page(pfn));
294 return acpi_os_ioremap(pg_off, pg_sz);
297 static void acpi_unmap(acpi_physical_address pg_off, void __iomem *vaddr)
301 pfn = pg_off >> PAGE_SHIFT;
302 if (should_use_kmap(pfn))
303 kunmap(pfn_to_page(pfn));
309 * acpi_os_map_iomem - Get a virtual address for a given physical address range.
310 * @phys: Start of the physical address range to map.
311 * @size: Size of the physical address range to map.
313 * Look up the given physical address range in the list of existing ACPI memory
314 * mappings. If found, get a reference to it and return a pointer to it (its
315 * virtual address). If not found, map it, add it to that list and return a
318 * During early init (when acpi_permanent_mmap has not been set yet) this
319 * routine simply calls __acpi_map_table() to get the job done.
322 *acpi_os_map_iomem(acpi_physical_address phys, acpi_size size)
324 struct acpi_ioremap *map;
326 acpi_physical_address pg_off;
329 if (phys > ULONG_MAX) {
330 printk(KERN_ERR PREFIX "Cannot map memory that high\n");
334 if (!acpi_permanent_mmap)
335 return __acpi_map_table((unsigned long)phys, size);
337 mutex_lock(&acpi_ioremap_lock);
338 /* Check if there's a suitable mapping already. */
339 map = acpi_map_lookup(phys, size);
341 map->track.refcount++;
345 map = kzalloc(sizeof(*map), GFP_KERNEL);
347 mutex_unlock(&acpi_ioremap_lock);
351 pg_off = round_down(phys, PAGE_SIZE);
352 pg_sz = round_up(phys + size, PAGE_SIZE) - pg_off;
353 virt = acpi_map(phys, size);
355 mutex_unlock(&acpi_ioremap_lock);
360 INIT_LIST_HEAD(&map->list);
361 map->virt = (void __iomem __force *)((unsigned long)virt & PAGE_MASK);
364 map->track.refcount = 1;
366 list_add_tail_rcu(&map->list, &acpi_ioremaps);
369 mutex_unlock(&acpi_ioremap_lock);
370 return map->virt + (phys - map->phys);
372 EXPORT_SYMBOL_GPL(acpi_os_map_iomem);
374 void *__ref acpi_os_map_memory(acpi_physical_address phys, acpi_size size)
376 return (void *)acpi_os_map_iomem(phys, size);
378 EXPORT_SYMBOL_GPL(acpi_os_map_memory);
380 static void acpi_os_map_remove(struct work_struct *work)
382 struct acpi_ioremap *map = container_of(to_rcu_work(work),
386 acpi_unmap(map->phys, map->virt);
390 /* Must be called with mutex_lock(&acpi_ioremap_lock) */
391 static void acpi_os_drop_map_ref(struct acpi_ioremap *map)
393 if (--map->track.refcount)
396 list_del_rcu(&map->list);
398 INIT_RCU_WORK(&map->track.rwork, acpi_os_map_remove);
399 queue_rcu_work(system_wq, &map->track.rwork);
403 * acpi_os_unmap_iomem - Drop a memory mapping reference.
404 * @virt: Start of the address range to drop a reference to.
405 * @size: Size of the address range to drop a reference to.
407 * Look up the given virtual address range in the list of existing ACPI memory
408 * mappings, drop a reference to it and if there are no more active references
409 * to it, queue it up for later removal.
411 * During early init (when acpi_permanent_mmap has not been set yet) this
412 * routine simply calls __acpi_unmap_table() to get the job done. Since
413 * __acpi_unmap_table() is an __init function, the __ref annotation is needed
416 void __ref acpi_os_unmap_iomem(void __iomem *virt, acpi_size size)
418 struct acpi_ioremap *map;
420 if (!acpi_permanent_mmap) {
421 __acpi_unmap_table(virt, size);
425 mutex_lock(&acpi_ioremap_lock);
427 map = acpi_map_lookup_virt(virt, size);
429 mutex_unlock(&acpi_ioremap_lock);
430 WARN(true, PREFIX "%s: bad address %p\n", __func__, virt);
433 acpi_os_drop_map_ref(map);
435 mutex_unlock(&acpi_ioremap_lock);
437 EXPORT_SYMBOL_GPL(acpi_os_unmap_iomem);
440 * acpi_os_unmap_memory - Drop a memory mapping reference.
441 * @virt: Start of the address range to drop a reference to.
442 * @size: Size of the address range to drop a reference to.
444 void __ref acpi_os_unmap_memory(void *virt, acpi_size size)
446 acpi_os_unmap_iomem((void __iomem *)virt, size);
448 EXPORT_SYMBOL_GPL(acpi_os_unmap_memory);
450 void __iomem *acpi_os_map_generic_address(struct acpi_generic_address *gas)
454 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
457 /* Handle possible alignment issues */
458 memcpy(&addr, &gas->address, sizeof(addr));
459 if (!addr || !gas->bit_width)
462 return acpi_os_map_iomem(addr, gas->bit_width / 8);
464 EXPORT_SYMBOL(acpi_os_map_generic_address);
466 void acpi_os_unmap_generic_address(struct acpi_generic_address *gas)
469 struct acpi_ioremap *map;
471 if (gas->space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
474 /* Handle possible alignment issues */
475 memcpy(&addr, &gas->address, sizeof(addr));
476 if (!addr || !gas->bit_width)
479 mutex_lock(&acpi_ioremap_lock);
481 map = acpi_map_lookup(addr, gas->bit_width / 8);
483 mutex_unlock(&acpi_ioremap_lock);
486 acpi_os_drop_map_ref(map);
488 mutex_unlock(&acpi_ioremap_lock);
490 EXPORT_SYMBOL(acpi_os_unmap_generic_address);
492 #ifdef ACPI_FUTURE_USAGE
494 acpi_os_get_physical_address(void *virt, acpi_physical_address * phys)
497 return AE_BAD_PARAMETER;
499 *phys = virt_to_phys(virt);
505 #ifdef CONFIG_ACPI_REV_OVERRIDE_POSSIBLE
506 static bool acpi_rev_override;
508 int __init acpi_rev_override_setup(char *str)
510 acpi_rev_override = true;
513 __setup("acpi_rev_override", acpi_rev_override_setup);
515 #define acpi_rev_override false
518 #define ACPI_MAX_OVERRIDE_LEN 100
520 static char acpi_os_name[ACPI_MAX_OVERRIDE_LEN];
523 acpi_os_predefined_override(const struct acpi_predefined_names *init_val,
524 acpi_string *new_val)
526 if (!init_val || !new_val)
527 return AE_BAD_PARAMETER;
530 if (!memcmp(init_val->name, "_OS_", 4) && strlen(acpi_os_name)) {
531 printk(KERN_INFO PREFIX "Overriding _OS definition to '%s'\n",
533 *new_val = acpi_os_name;
536 if (!memcmp(init_val->name, "_REV", 4) && acpi_rev_override) {
537 printk(KERN_INFO PREFIX "Overriding _REV return value to 5\n");
538 *new_val = (char *)5;
544 static irqreturn_t acpi_irq(int irq, void *dev_id)
548 handled = (*acpi_irq_handler) (acpi_irq_context);
554 acpi_irq_not_handled++;
560 acpi_os_install_interrupt_handler(u32 gsi, acpi_osd_handler handler,
565 acpi_irq_stats_init();
568 * ACPI interrupts different from the SCI in our copy of the FADT are
571 if (gsi != acpi_gbl_FADT.sci_interrupt)
572 return AE_BAD_PARAMETER;
574 if (acpi_irq_handler)
575 return AE_ALREADY_ACQUIRED;
577 if (acpi_gsi_to_irq(gsi, &irq) < 0) {
578 printk(KERN_ERR PREFIX "SCI (ACPI GSI %d) not registered\n",
583 acpi_irq_handler = handler;
584 acpi_irq_context = context;
585 if (request_irq(irq, acpi_irq, IRQF_SHARED, "acpi", acpi_irq)) {
586 printk(KERN_ERR PREFIX "SCI (IRQ%d) allocation failed\n", irq);
587 acpi_irq_handler = NULL;
588 return AE_NOT_ACQUIRED;
595 acpi_status acpi_os_remove_interrupt_handler(u32 gsi, acpi_osd_handler handler)
597 if (gsi != acpi_gbl_FADT.sci_interrupt || !acpi_sci_irq_valid())
598 return AE_BAD_PARAMETER;
600 free_irq(acpi_sci_irq, acpi_irq);
601 acpi_irq_handler = NULL;
602 acpi_sci_irq = INVALID_ACPI_IRQ;
608 * Running in interpreter thread context, safe to sleep
611 void acpi_os_sleep(u64 ms)
616 void acpi_os_stall(u32 us)
624 touch_nmi_watchdog();
630 * Support ACPI 3.0 AML Timer operand. Returns a 64-bit free-running,
631 * monotonically increasing timer with 100ns granularity. Do not use
632 * ktime_get() to implement this function because this function may get
633 * called after timekeeping has been suspended. Note: calling this function
634 * after timekeeping has been suspended may lead to unexpected results
635 * because when timekeeping is suspended the jiffies counter is not
636 * incremented. See also timekeeping_suspend().
638 u64 acpi_os_get_timer(void)
640 return (get_jiffies_64() - INITIAL_JIFFIES) *
641 (ACPI_100NSEC_PER_SEC / HZ);
644 acpi_status acpi_os_read_port(acpi_io_address port, u32 * value, u32 width)
653 *(u8 *) value = inb(port);
654 } else if (width <= 16) {
655 *(u16 *) value = inw(port);
656 } else if (width <= 32) {
657 *(u32 *) value = inl(port);
665 EXPORT_SYMBOL(acpi_os_read_port);
667 acpi_status acpi_os_write_port(acpi_io_address port, u32 value, u32 width)
671 } else if (width <= 16) {
673 } else if (width <= 32) {
682 EXPORT_SYMBOL(acpi_os_write_port);
684 int acpi_os_read_iomem(void __iomem *virt_addr, u64 *value, u32 width)
689 *(u8 *) value = readb(virt_addr);
692 *(u16 *) value = readw(virt_addr);
695 *(u32 *) value = readl(virt_addr);
698 *(u64 *) value = readq(virt_addr);
708 acpi_os_read_memory(acpi_physical_address phys_addr, u64 *value, u32 width)
710 void __iomem *virt_addr;
711 unsigned int size = width / 8;
717 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
720 virt_addr = acpi_os_ioremap(phys_addr, size);
722 return AE_BAD_ADDRESS;
729 error = acpi_os_read_iomem(virt_addr, value, width);
741 acpi_os_write_memory(acpi_physical_address phys_addr, u64 value, u32 width)
743 void __iomem *virt_addr;
744 unsigned int size = width / 8;
748 virt_addr = acpi_map_vaddr_lookup(phys_addr, size);
751 virt_addr = acpi_os_ioremap(phys_addr, size);
753 return AE_BAD_ADDRESS;
759 writeb(value, virt_addr);
762 writew(value, virt_addr);
765 writel(value, virt_addr);
768 writeq(value, virt_addr);
784 acpi_os_read_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
785 u64 *value, u32 width)
791 return AE_BAD_PARAMETER;
807 result = raw_pci_read(pci_id->segment, pci_id->bus,
808 PCI_DEVFN(pci_id->device, pci_id->function),
809 reg, size, &value32);
812 return (result ? AE_ERROR : AE_OK);
816 acpi_os_write_pci_configuration(struct acpi_pci_id * pci_id, u32 reg,
817 u64 value, u32 width)
835 result = raw_pci_write(pci_id->segment, pci_id->bus,
836 PCI_DEVFN(pci_id->device, pci_id->function),
839 return (result ? AE_ERROR : AE_OK);
843 static void acpi_os_execute_deferred(struct work_struct *work)
845 struct acpi_os_dpc *dpc = container_of(work, struct acpi_os_dpc, work);
847 dpc->function(dpc->context);
851 #ifdef CONFIG_ACPI_DEBUGGER
852 static struct acpi_debugger acpi_debugger;
853 static bool acpi_debugger_initialized;
855 int acpi_register_debugger(struct module *owner,
856 const struct acpi_debugger_ops *ops)
860 mutex_lock(&acpi_debugger.lock);
861 if (acpi_debugger.ops) {
866 acpi_debugger.owner = owner;
867 acpi_debugger.ops = ops;
870 mutex_unlock(&acpi_debugger.lock);
873 EXPORT_SYMBOL(acpi_register_debugger);
875 void acpi_unregister_debugger(const struct acpi_debugger_ops *ops)
877 mutex_lock(&acpi_debugger.lock);
878 if (ops == acpi_debugger.ops) {
879 acpi_debugger.ops = NULL;
880 acpi_debugger.owner = NULL;
882 mutex_unlock(&acpi_debugger.lock);
884 EXPORT_SYMBOL(acpi_unregister_debugger);
886 int acpi_debugger_create_thread(acpi_osd_exec_callback function, void *context)
889 int (*func)(acpi_osd_exec_callback, void *);
890 struct module *owner;
892 if (!acpi_debugger_initialized)
894 mutex_lock(&acpi_debugger.lock);
895 if (!acpi_debugger.ops) {
899 if (!try_module_get(acpi_debugger.owner)) {
903 func = acpi_debugger.ops->create_thread;
904 owner = acpi_debugger.owner;
905 mutex_unlock(&acpi_debugger.lock);
907 ret = func(function, context);
909 mutex_lock(&acpi_debugger.lock);
912 mutex_unlock(&acpi_debugger.lock);
916 ssize_t acpi_debugger_write_log(const char *msg)
919 ssize_t (*func)(const char *);
920 struct module *owner;
922 if (!acpi_debugger_initialized)
924 mutex_lock(&acpi_debugger.lock);
925 if (!acpi_debugger.ops) {
929 if (!try_module_get(acpi_debugger.owner)) {
933 func = acpi_debugger.ops->write_log;
934 owner = acpi_debugger.owner;
935 mutex_unlock(&acpi_debugger.lock);
939 mutex_lock(&acpi_debugger.lock);
942 mutex_unlock(&acpi_debugger.lock);
946 ssize_t acpi_debugger_read_cmd(char *buffer, size_t buffer_length)
949 ssize_t (*func)(char *, size_t);
950 struct module *owner;
952 if (!acpi_debugger_initialized)
954 mutex_lock(&acpi_debugger.lock);
955 if (!acpi_debugger.ops) {
959 if (!try_module_get(acpi_debugger.owner)) {
963 func = acpi_debugger.ops->read_cmd;
964 owner = acpi_debugger.owner;
965 mutex_unlock(&acpi_debugger.lock);
967 ret = func(buffer, buffer_length);
969 mutex_lock(&acpi_debugger.lock);
972 mutex_unlock(&acpi_debugger.lock);
976 int acpi_debugger_wait_command_ready(void)
979 int (*func)(bool, char *, size_t);
980 struct module *owner;
982 if (!acpi_debugger_initialized)
984 mutex_lock(&acpi_debugger.lock);
985 if (!acpi_debugger.ops) {
989 if (!try_module_get(acpi_debugger.owner)) {
993 func = acpi_debugger.ops->wait_command_ready;
994 owner = acpi_debugger.owner;
995 mutex_unlock(&acpi_debugger.lock);
997 ret = func(acpi_gbl_method_executing,
998 acpi_gbl_db_line_buf, ACPI_DB_LINE_BUFFER_SIZE);
1000 mutex_lock(&acpi_debugger.lock);
1003 mutex_unlock(&acpi_debugger.lock);
1007 int acpi_debugger_notify_command_complete(void)
1011 struct module *owner;
1013 if (!acpi_debugger_initialized)
1015 mutex_lock(&acpi_debugger.lock);
1016 if (!acpi_debugger.ops) {
1020 if (!try_module_get(acpi_debugger.owner)) {
1024 func = acpi_debugger.ops->notify_command_complete;
1025 owner = acpi_debugger.owner;
1026 mutex_unlock(&acpi_debugger.lock);
1030 mutex_lock(&acpi_debugger.lock);
1033 mutex_unlock(&acpi_debugger.lock);
1037 int __init acpi_debugger_init(void)
1039 mutex_init(&acpi_debugger.lock);
1040 acpi_debugger_initialized = true;
1045 /*******************************************************************************
1047 * FUNCTION: acpi_os_execute
1049 * PARAMETERS: Type - Type of the callback
1050 * Function - Function to be executed
1051 * Context - Function parameters
1055 * DESCRIPTION: Depending on type, either queues function for deferred execution or
1056 * immediately executes function on a separate thread.
1058 ******************************************************************************/
1060 acpi_status acpi_os_execute(acpi_execute_type type,
1061 acpi_osd_exec_callback function, void *context)
1063 acpi_status status = AE_OK;
1064 struct acpi_os_dpc *dpc;
1065 struct workqueue_struct *queue;
1067 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1068 "Scheduling function [%p(%p)] for deferred execution.\n",
1069 function, context));
1071 if (type == OSL_DEBUGGER_MAIN_THREAD) {
1072 ret = acpi_debugger_create_thread(function, context);
1074 pr_err("Call to kthread_create() failed.\n");
1081 * Allocate/initialize DPC structure. Note that this memory will be
1082 * freed by the callee. The kernel handles the work_struct list in a
1083 * way that allows us to also free its memory inside the callee.
1084 * Because we may want to schedule several tasks with different
1085 * parameters we can't use the approach some kernel code uses of
1086 * having a static work_struct.
1089 dpc = kzalloc(sizeof(struct acpi_os_dpc), GFP_ATOMIC);
1091 return AE_NO_MEMORY;
1093 dpc->function = function;
1094 dpc->context = context;
1097 * To prevent lockdep from complaining unnecessarily, make sure that
1098 * there is a different static lockdep key for each workqueue by using
1099 * INIT_WORK() for each of them separately.
1101 if (type == OSL_NOTIFY_HANDLER) {
1102 queue = kacpi_notify_wq;
1103 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1104 } else if (type == OSL_GPE_HANDLER) {
1106 INIT_WORK(&dpc->work, acpi_os_execute_deferred);
1108 pr_err("Unsupported os_execute type %d.\n", type);
1112 if (ACPI_FAILURE(status))
1116 * On some machines, a software-initiated SMI causes corruption unless
1117 * the SMI runs on CPU 0. An SMI can be initiated by any AML, but
1118 * typically it's done in GPE-related methods that are run via
1119 * workqueues, so we can avoid the known corruption cases by always
1120 * queueing on CPU 0.
1122 ret = queue_work_on(0, queue, &dpc->work);
1124 printk(KERN_ERR PREFIX
1125 "Call to queue_work() failed.\n");
1129 if (ACPI_FAILURE(status))
1134 EXPORT_SYMBOL(acpi_os_execute);
1136 void acpi_os_wait_events_complete(void)
1139 * Make sure the GPE handler or the fixed event handler is not used
1140 * on another CPU after removal.
1142 if (acpi_sci_irq_valid())
1143 synchronize_hardirq(acpi_sci_irq);
1144 flush_workqueue(kacpid_wq);
1145 flush_workqueue(kacpi_notify_wq);
1147 EXPORT_SYMBOL(acpi_os_wait_events_complete);
1149 struct acpi_hp_work {
1150 struct work_struct work;
1151 struct acpi_device *adev;
1155 static void acpi_hotplug_work_fn(struct work_struct *work)
1157 struct acpi_hp_work *hpw = container_of(work, struct acpi_hp_work, work);
1159 acpi_os_wait_events_complete();
1160 acpi_device_hotplug(hpw->adev, hpw->src);
1164 acpi_status acpi_hotplug_schedule(struct acpi_device *adev, u32 src)
1166 struct acpi_hp_work *hpw;
1168 ACPI_DEBUG_PRINT((ACPI_DB_EXEC,
1169 "Scheduling hotplug event (%p, %u) for deferred execution.\n",
1172 hpw = kmalloc(sizeof(*hpw), GFP_KERNEL);
1174 return AE_NO_MEMORY;
1176 INIT_WORK(&hpw->work, acpi_hotplug_work_fn);
1180 * We can't run hotplug code in kacpid_wq/kacpid_notify_wq etc., because
1181 * the hotplug code may call driver .remove() functions, which may
1182 * invoke flush_scheduled_work()/acpi_os_wait_events_complete() to flush
1185 if (!queue_work(kacpi_hotplug_wq, &hpw->work)) {
1192 bool acpi_queue_hotplug_work(struct work_struct *work)
1194 return queue_work(kacpi_hotplug_wq, work);
1198 acpi_os_create_semaphore(u32 max_units, u32 initial_units, acpi_handle * handle)
1200 struct semaphore *sem = NULL;
1202 sem = acpi_os_allocate_zeroed(sizeof(struct semaphore));
1204 return AE_NO_MEMORY;
1206 sema_init(sem, initial_units);
1208 *handle = (acpi_handle *) sem;
1210 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Creating semaphore[%p|%d].\n",
1211 *handle, initial_units));
1217 * TODO: A better way to delete semaphores? Linux doesn't have a
1218 * 'delete_semaphore()' function -- may result in an invalid
1219 * pointer dereference for non-synchronized consumers. Should
1220 * we at least check for blocked threads and signal/cancel them?
1223 acpi_status acpi_os_delete_semaphore(acpi_handle handle)
1225 struct semaphore *sem = (struct semaphore *)handle;
1228 return AE_BAD_PARAMETER;
1230 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Deleting semaphore[%p].\n", handle));
1232 BUG_ON(!list_empty(&sem->wait_list));
1240 * TODO: Support for units > 1?
1242 acpi_status acpi_os_wait_semaphore(acpi_handle handle, u32 units, u16 timeout)
1244 acpi_status status = AE_OK;
1245 struct semaphore *sem = (struct semaphore *)handle;
1249 if (!acpi_os_initialized)
1252 if (!sem || (units < 1))
1253 return AE_BAD_PARAMETER;
1258 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Waiting for semaphore[%p|%d|%d]\n",
1259 handle, units, timeout));
1261 if (timeout == ACPI_WAIT_FOREVER)
1262 jiffies = MAX_SCHEDULE_TIMEOUT;
1264 jiffies = msecs_to_jiffies(timeout);
1266 ret = down_timeout(sem, jiffies);
1270 if (ACPI_FAILURE(status)) {
1271 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1272 "Failed to acquire semaphore[%p|%d|%d], %s",
1273 handle, units, timeout,
1274 acpi_format_exception(status)));
1276 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX,
1277 "Acquired semaphore[%p|%d|%d]", handle,
1285 * TODO: Support for units > 1?
1287 acpi_status acpi_os_signal_semaphore(acpi_handle handle, u32 units)
1289 struct semaphore *sem = (struct semaphore *)handle;
1291 if (!acpi_os_initialized)
1294 if (!sem || (units < 1))
1295 return AE_BAD_PARAMETER;
1300 ACPI_DEBUG_PRINT((ACPI_DB_MUTEX, "Signaling semaphore[%p|%d]\n", handle,
1308 acpi_status acpi_os_get_line(char *buffer, u32 buffer_length, u32 *bytes_read)
1310 #ifdef ENABLE_DEBUGGER
1311 if (acpi_in_debugger) {
1314 kdb_read(buffer, buffer_length);
1316 /* remove the CR kdb includes */
1317 chars = strlen(buffer) - 1;
1318 buffer[chars] = '\0';
1323 ret = acpi_debugger_read_cmd(buffer, buffer_length);
1332 EXPORT_SYMBOL(acpi_os_get_line);
1334 acpi_status acpi_os_wait_command_ready(void)
1338 ret = acpi_debugger_wait_command_ready();
1344 acpi_status acpi_os_notify_command_complete(void)
1348 ret = acpi_debugger_notify_command_complete();
1354 acpi_status acpi_os_signal(u32 function, void *info)
1357 case ACPI_SIGNAL_FATAL:
1358 printk(KERN_ERR PREFIX "Fatal opcode executed\n");
1360 case ACPI_SIGNAL_BREAKPOINT:
1363 * ACPI spec. says to treat it as a NOP unless
1364 * you are debugging. So if/when we integrate
1365 * AML debugger into the kernel debugger its
1366 * hook will go here. But until then it is
1367 * not useful to print anything on breakpoints.
1377 static int __init acpi_os_name_setup(char *str)
1379 char *p = acpi_os_name;
1380 int count = ACPI_MAX_OVERRIDE_LEN - 1;
1385 for (; count-- && *str; str++) {
1386 if (isalnum(*str) || *str == ' ' || *str == ':')
1388 else if (*str == '\'' || *str == '"')
1399 __setup("acpi_os_name=", acpi_os_name_setup);
1402 * Disable the auto-serialization of named objects creation methods.
1404 * This feature is enabled by default. It marks the AML control methods
1405 * that contain the opcodes to create named objects as "Serialized".
1407 static int __init acpi_no_auto_serialize_setup(char *str)
1409 acpi_gbl_auto_serialize_methods = FALSE;
1410 pr_info("ACPI: auto-serialization disabled\n");
1415 __setup("acpi_no_auto_serialize", acpi_no_auto_serialize_setup);
1417 /* Check of resource interference between native drivers and ACPI
1418 * OperationRegions (SystemIO and System Memory only).
1419 * IO ports and memory declared in ACPI might be used by the ACPI subsystem
1420 * in arbitrary AML code and can interfere with legacy drivers.
1421 * acpi_enforce_resources= can be set to:
1423 * - strict (default) (2)
1424 * -> further driver trying to access the resources will not load
1426 * -> further driver trying to access the resources will load, but you
1427 * get a system message that something might go wrong...
1430 * -> ACPI Operation Region resources will not be registered
1433 #define ENFORCE_RESOURCES_STRICT 2
1434 #define ENFORCE_RESOURCES_LAX 1
1435 #define ENFORCE_RESOURCES_NO 0
1437 static unsigned int acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1439 static int __init acpi_enforce_resources_setup(char *str)
1441 if (str == NULL || *str == '\0')
1444 if (!strcmp("strict", str))
1445 acpi_enforce_resources = ENFORCE_RESOURCES_STRICT;
1446 else if (!strcmp("lax", str))
1447 acpi_enforce_resources = ENFORCE_RESOURCES_LAX;
1448 else if (!strcmp("no", str))
1449 acpi_enforce_resources = ENFORCE_RESOURCES_NO;
1454 __setup("acpi_enforce_resources=", acpi_enforce_resources_setup);
1456 /* Check for resource conflicts between ACPI OperationRegions and native
1458 int acpi_check_resource_conflict(const struct resource *res)
1460 acpi_adr_space_type space_id;
1465 if (acpi_enforce_resources == ENFORCE_RESOURCES_NO)
1467 if (!(res->flags & IORESOURCE_IO) && !(res->flags & IORESOURCE_MEM))
1470 if (res->flags & IORESOURCE_IO)
1471 space_id = ACPI_ADR_SPACE_SYSTEM_IO;
1473 space_id = ACPI_ADR_SPACE_SYSTEM_MEMORY;
1475 length = resource_size(res);
1476 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO)
1478 clash = acpi_check_address_range(space_id, res->start, length, warn);
1481 if (acpi_enforce_resources != ENFORCE_RESOURCES_NO) {
1482 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1483 printk(KERN_NOTICE "ACPI: This conflict may"
1484 " cause random problems and system"
1486 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1487 " for this device, you should use it instead of"
1488 " the native driver\n");
1490 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1495 EXPORT_SYMBOL(acpi_check_resource_conflict);
1497 int acpi_check_region(resource_size_t start, resource_size_t n,
1500 struct resource res = {
1502 .end = start + n - 1,
1504 .flags = IORESOURCE_IO,
1507 return acpi_check_resource_conflict(&res);
1509 EXPORT_SYMBOL(acpi_check_region);
1511 static acpi_status acpi_deactivate_mem_region(acpi_handle handle, u32 level,
1512 void *_res, void **return_value)
1514 struct acpi_mem_space_context **mem_ctx;
1515 union acpi_operand_object *handler_obj;
1516 union acpi_operand_object *region_obj2;
1517 union acpi_operand_object *region_obj;
1518 struct resource *res = _res;
1521 region_obj = acpi_ns_get_attached_object(handle);
1525 handler_obj = region_obj->region.handler;
1529 if (region_obj->region.space_id != ACPI_ADR_SPACE_SYSTEM_MEMORY)
1532 if (!(region_obj->region.flags & AOPOBJ_SETUP_COMPLETE))
1535 region_obj2 = acpi_ns_get_secondary_object(region_obj);
1539 mem_ctx = (void *)®ion_obj2->extra.region_context;
1541 if (!(mem_ctx[0]->address >= res->start &&
1542 mem_ctx[0]->address < res->end))
1545 status = handler_obj->address_space.setup(region_obj,
1546 ACPI_REGION_DEACTIVATE,
1547 NULL, (void **)mem_ctx);
1548 if (ACPI_SUCCESS(status))
1549 region_obj->region.flags &= ~(AOPOBJ_SETUP_COMPLETE);
1555 * acpi_release_memory - Release any mappings done to a memory region
1556 * @handle: Handle to namespace node
1557 * @res: Memory resource
1558 * @level: A level that terminates the search
1560 * Walks through @handle and unmaps all SystemMemory Operation Regions that
1561 * overlap with @res and that have already been activated (mapped).
1563 * This is a helper that allows drivers to place special requirements on memory
1564 * region that may overlap with operation regions, primarily allowing them to
1565 * safely map the region as non-cached memory.
1567 * The unmapped Operation Regions will be automatically remapped next time they
1568 * are called, so the drivers do not need to do anything else.
1570 acpi_status acpi_release_memory(acpi_handle handle, struct resource *res,
1575 if (!(res->flags & IORESOURCE_MEM))
1578 status = acpi_walk_namespace(ACPI_TYPE_REGION, handle, level,
1579 acpi_deactivate_mem_region, NULL,
1581 if (ACPI_FAILURE(status))
1585 * Wait for all of the mappings queued up for removal by
1586 * acpi_deactivate_mem_region() to actually go away.
1590 flush_scheduled_work();
1594 EXPORT_SYMBOL_GPL(acpi_release_memory);
1597 * Let drivers know whether the resource checks are effective
1599 int acpi_resources_are_enforced(void)
1601 return acpi_enforce_resources == ENFORCE_RESOURCES_STRICT;
1603 EXPORT_SYMBOL(acpi_resources_are_enforced);
1606 * Deallocate the memory for a spinlock.
1608 void acpi_os_delete_lock(acpi_spinlock handle)
1614 * Acquire a spinlock.
1616 * handle is a pointer to the spinlock_t.
1619 acpi_cpu_flags acpi_os_acquire_lock(acpi_spinlock lockp)
1622 acpi_cpu_flags flags;
1623 spin_lock_irqsave(lockp, flags);
1628 * Release a spinlock. See above.
1631 void acpi_os_release_lock(acpi_spinlock lockp, acpi_cpu_flags flags)
1634 spin_unlock_irqrestore(lockp, flags);
1637 #ifndef ACPI_USE_LOCAL_CACHE
1639 /*******************************************************************************
1641 * FUNCTION: acpi_os_create_cache
1643 * PARAMETERS: name - Ascii name for the cache
1644 * size - Size of each cached object
1645 * depth - Maximum depth of the cache (in objects) <ignored>
1646 * cache - Where the new cache object is returned
1650 * DESCRIPTION: Create a cache object
1652 ******************************************************************************/
1655 acpi_os_create_cache(char *name, u16 size, u16 depth, acpi_cache_t ** cache)
1657 *cache = kmem_cache_create(name, size, 0, 0, NULL);
1664 /*******************************************************************************
1666 * FUNCTION: acpi_os_purge_cache
1668 * PARAMETERS: Cache - Handle to cache object
1672 * DESCRIPTION: Free all objects within the requested cache.
1674 ******************************************************************************/
1676 acpi_status acpi_os_purge_cache(acpi_cache_t * cache)
1678 kmem_cache_shrink(cache);
1682 /*******************************************************************************
1684 * FUNCTION: acpi_os_delete_cache
1686 * PARAMETERS: Cache - Handle to cache object
1690 * DESCRIPTION: Free all objects within the requested cache and delete the
1693 ******************************************************************************/
1695 acpi_status acpi_os_delete_cache(acpi_cache_t * cache)
1697 kmem_cache_destroy(cache);
1701 /*******************************************************************************
1703 * FUNCTION: acpi_os_release_object
1705 * PARAMETERS: Cache - Handle to cache object
1706 * Object - The object to be released
1710 * DESCRIPTION: Release an object to the specified cache. If cache is full,
1711 * the object is deleted.
1713 ******************************************************************************/
1715 acpi_status acpi_os_release_object(acpi_cache_t * cache, void *object)
1717 kmem_cache_free(cache, object);
1722 static int __init acpi_no_static_ssdt_setup(char *s)
1724 acpi_gbl_disable_ssdt_table_install = TRUE;
1725 pr_info("ACPI: static SSDT installation disabled\n");
1730 early_param("acpi_no_static_ssdt", acpi_no_static_ssdt_setup);
1732 static int __init acpi_disable_return_repair(char *s)
1734 printk(KERN_NOTICE PREFIX
1735 "ACPI: Predefined validation mechanism disabled\n");
1736 acpi_gbl_disable_auto_repair = TRUE;
1741 __setup("acpica_no_return_repair", acpi_disable_return_repair);
1743 acpi_status __init acpi_os_initialize(void)
1745 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1746 acpi_os_map_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1748 acpi_gbl_xgpe0_block_logical_address =
1749 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe0_block);
1750 acpi_gbl_xgpe1_block_logical_address =
1751 (unsigned long)acpi_os_map_generic_address(&acpi_gbl_FADT.xgpe1_block);
1753 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER) {
1755 * Use acpi_os_map_generic_address to pre-map the reset
1756 * register if it's in system memory.
1760 rv = acpi_os_map_generic_address(&acpi_gbl_FADT.reset_register);
1761 pr_debug(PREFIX "%s: map reset_reg %s\n", __func__,
1762 rv ? "successful" : "failed");
1764 acpi_os_initialized = true;
1769 acpi_status __init acpi_os_initialize1(void)
1771 kacpid_wq = alloc_workqueue("kacpid", 0, 1);
1772 kacpi_notify_wq = alloc_workqueue("kacpi_notify", 0, 1);
1773 kacpi_hotplug_wq = alloc_ordered_workqueue("kacpi_hotplug", 0);
1775 BUG_ON(!kacpi_notify_wq);
1776 BUG_ON(!kacpi_hotplug_wq);
1781 acpi_status acpi_os_terminate(void)
1783 if (acpi_irq_handler) {
1784 acpi_os_remove_interrupt_handler(acpi_gbl_FADT.sci_interrupt,
1788 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe1_block);
1789 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xgpe0_block);
1790 acpi_gbl_xgpe0_block_logical_address = 0UL;
1791 acpi_gbl_xgpe1_block_logical_address = 0UL;
1793 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1b_event_block);
1794 acpi_os_unmap_generic_address(&acpi_gbl_FADT.xpm1a_event_block);
1796 if (acpi_gbl_FADT.flags & ACPI_FADT_RESET_REGISTER)
1797 acpi_os_unmap_generic_address(&acpi_gbl_FADT.reset_register);
1799 destroy_workqueue(kacpid_wq);
1800 destroy_workqueue(kacpi_notify_wq);
1801 destroy_workqueue(kacpi_hotplug_wq);
1806 acpi_status acpi_os_prepare_sleep(u8 sleep_state, u32 pm1a_control,
1810 if (__acpi_os_prepare_sleep)
1811 rc = __acpi_os_prepare_sleep(sleep_state,
1812 pm1a_control, pm1b_control);
1816 return AE_CTRL_TERMINATE;
1821 void acpi_os_set_prepare_sleep(int (*func)(u8 sleep_state,
1822 u32 pm1a_ctrl, u32 pm1b_ctrl))
1824 __acpi_os_prepare_sleep = func;
1827 #if (ACPI_REDUCED_HARDWARE)
1828 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1832 if (__acpi_os_prepare_extended_sleep)
1833 rc = __acpi_os_prepare_extended_sleep(sleep_state,
1838 return AE_CTRL_TERMINATE;
1843 acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a,
1850 void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state,
1851 u32 val_a, u32 val_b))
1853 __acpi_os_prepare_extended_sleep = func;
1856 acpi_status acpi_os_enter_sleep(u8 sleep_state,
1857 u32 reg_a_value, u32 reg_b_value)
1861 if (acpi_gbl_reduced_hardware)
1862 status = acpi_os_prepare_extended_sleep(sleep_state,
1866 status = acpi_os_prepare_sleep(sleep_state,
1867 reg_a_value, reg_b_value);