1 // SPDX-License-Identifier: BSD-3-Clause OR GPL-2.0
2 /******************************************************************************
4 * Module Name: evgpeutil - GPE utilities
6 * Copyright (C) 2000 - 2020, Intel Corp.
8 *****************************************************************************/
10 #include <acpi/acpi.h>
14 #define _COMPONENT ACPI_EVENTS
15 ACPI_MODULE_NAME("evgpeutil")
17 #if (!ACPI_REDUCED_HARDWARE) /* Entire module */
18 /*******************************************************************************
20 * FUNCTION: acpi_ev_walk_gpe_list
22 * PARAMETERS: gpe_walk_callback - Routine called for each GPE block
23 * context - Value passed to callback
27 * DESCRIPTION: Walk the GPE lists.
29 ******************************************************************************/
31 acpi_ev_walk_gpe_list(acpi_gpe_callback gpe_walk_callback, void *context)
33 struct acpi_gpe_block_info *gpe_block;
34 struct acpi_gpe_xrupt_info *gpe_xrupt_info;
35 acpi_status status = AE_OK;
38 ACPI_FUNCTION_TRACE(ev_walk_gpe_list);
40 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
42 /* Walk the interrupt level descriptor list */
44 gpe_xrupt_info = acpi_gbl_gpe_xrupt_list_head;
45 while (gpe_xrupt_info) {
47 /* Walk all Gpe Blocks attached to this interrupt level */
49 gpe_block = gpe_xrupt_info->gpe_block_list_head;
52 /* One callback per GPE block */
55 gpe_walk_callback(gpe_xrupt_info, gpe_block,
57 if (ACPI_FAILURE(status)) {
58 if (status == AE_CTRL_END) { /* Callback abort */
64 gpe_block = gpe_block->next;
67 gpe_xrupt_info = gpe_xrupt_info->next;
71 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
72 return_ACPI_STATUS(status);
75 /*******************************************************************************
77 * FUNCTION: acpi_ev_get_gpe_device
79 * PARAMETERS: GPE_WALK_CALLBACK
83 * DESCRIPTION: Matches the input GPE index (0-current_gpe_count) with a GPE
84 * block device. NULL if the GPE is one of the FADT-defined GPEs.
86 ******************************************************************************/
89 acpi_ev_get_gpe_device(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
90 struct acpi_gpe_block_info *gpe_block, void *context)
92 struct acpi_gpe_device_info *info = context;
94 /* Increment Index by the number of GPEs in this block */
96 info->next_block_base_index += gpe_block->gpe_count;
98 if (info->index < info->next_block_base_index) {
100 * The GPE index is within this block, get the node. Leave the node
101 * NULL for the FADT-defined GPEs
103 if ((gpe_block->node)->type == ACPI_TYPE_DEVICE) {
104 info->gpe_device = gpe_block->node;
107 info->status = AE_OK;
108 return (AE_CTRL_END);
114 /*******************************************************************************
116 * FUNCTION: acpi_ev_get_gpe_xrupt_block
118 * PARAMETERS: interrupt_number - Interrupt for a GPE block
119 * gpe_xrupt_block - Where the block is returned
123 * DESCRIPTION: Get or Create a GPE interrupt block. There is one interrupt
124 * block per unique interrupt level used for GPEs. Should be
125 * called only when the GPE lists are semaphore locked and not
128 ******************************************************************************/
131 acpi_ev_get_gpe_xrupt_block(u32 interrupt_number,
132 struct acpi_gpe_xrupt_info **gpe_xrupt_block)
134 struct acpi_gpe_xrupt_info *next_gpe_xrupt;
135 struct acpi_gpe_xrupt_info *gpe_xrupt;
137 acpi_cpu_flags flags;
139 ACPI_FUNCTION_TRACE(ev_get_gpe_xrupt_block);
141 /* No need for lock since we are not changing any list elements here */
143 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
144 while (next_gpe_xrupt) {
145 if (next_gpe_xrupt->interrupt_number == interrupt_number) {
146 *gpe_xrupt_block = next_gpe_xrupt;
147 return_ACPI_STATUS(AE_OK);
150 next_gpe_xrupt = next_gpe_xrupt->next;
153 /* Not found, must allocate a new xrupt descriptor */
155 gpe_xrupt = ACPI_ALLOCATE_ZEROED(sizeof(struct acpi_gpe_xrupt_info));
157 return_ACPI_STATUS(AE_NO_MEMORY);
160 gpe_xrupt->interrupt_number = interrupt_number;
162 /* Install new interrupt descriptor with spin lock */
164 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
165 if (acpi_gbl_gpe_xrupt_list_head) {
166 next_gpe_xrupt = acpi_gbl_gpe_xrupt_list_head;
167 while (next_gpe_xrupt->next) {
168 next_gpe_xrupt = next_gpe_xrupt->next;
171 next_gpe_xrupt->next = gpe_xrupt;
172 gpe_xrupt->previous = next_gpe_xrupt;
174 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt;
177 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
179 /* Install new interrupt handler if not SCI_INT */
181 if (interrupt_number != acpi_gbl_FADT.sci_interrupt) {
182 status = acpi_os_install_interrupt_handler(interrupt_number,
183 acpi_ev_gpe_xrupt_handler,
185 if (ACPI_FAILURE(status)) {
186 ACPI_EXCEPTION((AE_INFO, status,
187 "Could not install GPE interrupt handler at level 0x%X",
189 return_ACPI_STATUS(status);
193 *gpe_xrupt_block = gpe_xrupt;
194 return_ACPI_STATUS(AE_OK);
197 /*******************************************************************************
199 * FUNCTION: acpi_ev_delete_gpe_xrupt
201 * PARAMETERS: gpe_xrupt - A GPE interrupt info block
205 * DESCRIPTION: Remove and free a gpe_xrupt block. Remove an associated
206 * interrupt handler if not the SCI interrupt.
208 ******************************************************************************/
210 acpi_status acpi_ev_delete_gpe_xrupt(struct acpi_gpe_xrupt_info *gpe_xrupt)
213 acpi_cpu_flags flags;
215 ACPI_FUNCTION_TRACE(ev_delete_gpe_xrupt);
217 /* We never want to remove the SCI interrupt handler */
219 if (gpe_xrupt->interrupt_number == acpi_gbl_FADT.sci_interrupt) {
220 gpe_xrupt->gpe_block_list_head = NULL;
221 return_ACPI_STATUS(AE_OK);
224 /* Disable this interrupt */
227 acpi_os_remove_interrupt_handler(gpe_xrupt->interrupt_number,
228 acpi_ev_gpe_xrupt_handler);
229 if (ACPI_FAILURE(status)) {
230 return_ACPI_STATUS(status);
233 /* Unlink the interrupt block with lock */
235 flags = acpi_os_acquire_lock(acpi_gbl_gpe_lock);
236 if (gpe_xrupt->previous) {
237 gpe_xrupt->previous->next = gpe_xrupt->next;
239 /* No previous, update list head */
241 acpi_gbl_gpe_xrupt_list_head = gpe_xrupt->next;
244 if (gpe_xrupt->next) {
245 gpe_xrupt->next->previous = gpe_xrupt->previous;
247 acpi_os_release_lock(acpi_gbl_gpe_lock, flags);
251 ACPI_FREE(gpe_xrupt);
252 return_ACPI_STATUS(AE_OK);
255 /*******************************************************************************
257 * FUNCTION: acpi_ev_delete_gpe_handlers
259 * PARAMETERS: gpe_xrupt_info - GPE Interrupt info
260 * gpe_block - Gpe Block info
264 * DESCRIPTION: Delete all Handler objects found in the GPE data structs.
265 * Used only prior to termination.
267 ******************************************************************************/
270 acpi_ev_delete_gpe_handlers(struct acpi_gpe_xrupt_info *gpe_xrupt_info,
271 struct acpi_gpe_block_info *gpe_block,
274 struct acpi_gpe_event_info *gpe_event_info;
275 struct acpi_gpe_notify_info *notify;
276 struct acpi_gpe_notify_info *next;
280 ACPI_FUNCTION_TRACE(ev_delete_gpe_handlers);
282 /* Examine each GPE Register within the block */
284 for (i = 0; i < gpe_block->register_count; i++) {
286 /* Now look at the individual GPEs in this byte register */
288 for (j = 0; j < ACPI_GPE_REGISTER_WIDTH; j++) {
289 gpe_event_info = &gpe_block->event_info[((acpi_size)i *
290 ACPI_GPE_REGISTER_WIDTH)
293 if ((ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
294 ACPI_GPE_DISPATCH_HANDLER) ||
295 (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags) ==
296 ACPI_GPE_DISPATCH_RAW_HANDLER)) {
298 /* Delete an installed handler block */
300 ACPI_FREE(gpe_event_info->dispatch.handler);
301 gpe_event_info->dispatch.handler = NULL;
302 gpe_event_info->flags &=
303 ~ACPI_GPE_DISPATCH_MASK;
304 } else if (ACPI_GPE_DISPATCH_TYPE(gpe_event_info->flags)
305 == ACPI_GPE_DISPATCH_NOTIFY) {
307 /* Delete the implicit notification device list */
309 notify = gpe_event_info->dispatch.notify_list;
316 gpe_event_info->dispatch.notify_list = NULL;
317 gpe_event_info->flags &=
318 ~ACPI_GPE_DISPATCH_MASK;
323 return_ACPI_STATUS(AE_OK);
326 #endif /* !ACPI_REDUCED_HARDWARE */