1 // SPDX-License-Identifier: GPL-2.0-only
3 * ACPI AML interfacing support
5 * Copyright (C) 2015, Intel Corporation
6 * Authors: Lv Zheng <lv.zheng@intel.com>
10 #define pr_fmt(fmt) "ACPI: AML: " fmt
12 #include <linux/kernel.h>
13 #include <linux/module.h>
14 #include <linux/wait.h>
15 #include <linux/poll.h>
16 #include <linux/sched.h>
17 #include <linux/kthread.h>
18 #include <linux/proc_fs.h>
19 #include <linux/debugfs.h>
20 #include <linux/circ_buf.h>
21 #include <linux/acpi.h>
24 #define ACPI_AML_BUF_ALIGN (sizeof (acpi_size))
25 #define ACPI_AML_BUF_SIZE PAGE_SIZE
27 #define circ_count(circ) \
28 (CIRC_CNT((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
29 #define circ_count_to_end(circ) \
30 (CIRC_CNT_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
31 #define circ_space(circ) \
32 (CIRC_SPACE((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
33 #define circ_space_to_end(circ) \
34 (CIRC_SPACE_TO_END((circ)->head, (circ)->tail, ACPI_AML_BUF_SIZE))
36 #define ACPI_AML_OPENED 0x0001
37 #define ACPI_AML_CLOSED 0x0002
38 #define ACPI_AML_IN_USER 0x0004 /* user space is writing cmd */
39 #define ACPI_AML_IN_KERN 0x0008 /* kernel space is reading cmd */
40 #define ACPI_AML_OUT_USER 0x0010 /* user space is reading log */
41 #define ACPI_AML_OUT_KERN 0x0020 /* kernel space is writing log */
42 #define ACPI_AML_USER (ACPI_AML_IN_USER | ACPI_AML_OUT_USER)
43 #define ACPI_AML_KERN (ACPI_AML_IN_KERN | ACPI_AML_OUT_KERN)
44 #define ACPI_AML_BUSY (ACPI_AML_USER | ACPI_AML_KERN)
45 #define ACPI_AML_OPEN (ACPI_AML_OPENED | ACPI_AML_CLOSED)
48 wait_queue_head_t wait;
52 struct task_struct *thread;
53 char out_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
54 struct circ_buf out_crc;
55 char in_buf[ACPI_AML_BUF_SIZE] __aligned(ACPI_AML_BUF_ALIGN);
56 struct circ_buf in_crc;
57 acpi_osd_exec_callback function;
62 static struct acpi_aml_io acpi_aml_io;
63 static bool acpi_aml_initialized;
64 static struct file *acpi_aml_active_reader;
65 static struct dentry *acpi_aml_dentry;
67 static inline bool __acpi_aml_running(void)
69 return acpi_aml_io.thread ? true : false;
72 static inline bool __acpi_aml_access_ok(unsigned long flag)
75 * The debugger interface is in opened state (OPENED && !CLOSED),
76 * then it is allowed to access the debugger buffers from either
77 * user space or the kernel space.
78 * In addition, for the kernel space, only the debugger thread
79 * (thread ID matched) is allowed to access.
81 if (!(acpi_aml_io.flags & ACPI_AML_OPENED) ||
82 (acpi_aml_io.flags & ACPI_AML_CLOSED) ||
83 !__acpi_aml_running())
85 if ((flag & ACPI_AML_KERN) &&
86 current != acpi_aml_io.thread)
91 static inline bool __acpi_aml_readable(struct circ_buf *circ, unsigned long flag)
94 * Another read is not in progress and there is data in buffer
97 if (!(acpi_aml_io.flags & flag) && circ_count(circ))
102 static inline bool __acpi_aml_writable(struct circ_buf *circ, unsigned long flag)
105 * Another write is not in progress and there is buffer space
106 * available for write.
108 if (!(acpi_aml_io.flags & flag) && circ_space(circ))
113 static inline bool __acpi_aml_busy(void)
115 if (acpi_aml_io.flags & ACPI_AML_BUSY)
120 static inline bool __acpi_aml_opened(void)
122 if (acpi_aml_io.flags & ACPI_AML_OPEN)
127 static inline bool __acpi_aml_used(void)
129 return acpi_aml_io.usages ? true : false;
132 static inline bool acpi_aml_running(void)
136 mutex_lock(&acpi_aml_io.lock);
137 ret = __acpi_aml_running();
138 mutex_unlock(&acpi_aml_io.lock);
142 static bool acpi_aml_busy(void)
146 mutex_lock(&acpi_aml_io.lock);
147 ret = __acpi_aml_busy();
148 mutex_unlock(&acpi_aml_io.lock);
152 static bool acpi_aml_used(void)
157 * The usage count is prepared to avoid race conditions between the
158 * starts and the stops of the debugger thread.
160 mutex_lock(&acpi_aml_io.lock);
161 ret = __acpi_aml_used();
162 mutex_unlock(&acpi_aml_io.lock);
166 static bool acpi_aml_kern_readable(void)
170 mutex_lock(&acpi_aml_io.lock);
171 ret = !__acpi_aml_access_ok(ACPI_AML_IN_KERN) ||
172 __acpi_aml_readable(&acpi_aml_io.in_crc, ACPI_AML_IN_KERN);
173 mutex_unlock(&acpi_aml_io.lock);
177 static bool acpi_aml_kern_writable(void)
181 mutex_lock(&acpi_aml_io.lock);
182 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_KERN) ||
183 __acpi_aml_writable(&acpi_aml_io.out_crc, ACPI_AML_OUT_KERN);
184 mutex_unlock(&acpi_aml_io.lock);
188 static bool acpi_aml_user_readable(void)
192 mutex_lock(&acpi_aml_io.lock);
193 ret = !__acpi_aml_access_ok(ACPI_AML_OUT_USER) ||
194 __acpi_aml_readable(&acpi_aml_io.out_crc, ACPI_AML_OUT_USER);
195 mutex_unlock(&acpi_aml_io.lock);
199 static bool acpi_aml_user_writable(void)
203 mutex_lock(&acpi_aml_io.lock);
204 ret = !__acpi_aml_access_ok(ACPI_AML_IN_USER) ||
205 __acpi_aml_writable(&acpi_aml_io.in_crc, ACPI_AML_IN_USER);
206 mutex_unlock(&acpi_aml_io.lock);
210 static int acpi_aml_lock_write(struct circ_buf *circ, unsigned long flag)
214 mutex_lock(&acpi_aml_io.lock);
215 if (!__acpi_aml_access_ok(flag)) {
219 if (!__acpi_aml_writable(circ, flag)) {
223 acpi_aml_io.flags |= flag;
225 mutex_unlock(&acpi_aml_io.lock);
229 static int acpi_aml_lock_read(struct circ_buf *circ, unsigned long flag)
233 mutex_lock(&acpi_aml_io.lock);
234 if (!__acpi_aml_access_ok(flag)) {
238 if (!__acpi_aml_readable(circ, flag)) {
242 acpi_aml_io.flags |= flag;
244 mutex_unlock(&acpi_aml_io.lock);
248 static void acpi_aml_unlock_fifo(unsigned long flag, bool wakeup)
250 mutex_lock(&acpi_aml_io.lock);
251 acpi_aml_io.flags &= ~flag;
253 wake_up_interruptible(&acpi_aml_io.wait);
254 mutex_unlock(&acpi_aml_io.lock);
257 static int acpi_aml_write_kern(const char *buf, int len)
260 struct circ_buf *crc = &acpi_aml_io.out_crc;
264 ret = acpi_aml_lock_write(crc, ACPI_AML_OUT_KERN);
267 /* sync tail before inserting logs */
269 p = &crc->buf[crc->head];
270 n = min(len, circ_space_to_end(crc));
272 /* sync head after inserting logs */
274 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
275 acpi_aml_unlock_fifo(ACPI_AML_OUT_KERN, true);
279 static int acpi_aml_readb_kern(void)
282 struct circ_buf *crc = &acpi_aml_io.in_crc;
285 ret = acpi_aml_lock_read(crc, ACPI_AML_IN_KERN);
288 /* sync head before removing cmds */
290 p = &crc->buf[crc->tail];
292 /* sync tail before inserting cmds */
294 crc->tail = (crc->tail + 1) & (ACPI_AML_BUF_SIZE - 1);
295 acpi_aml_unlock_fifo(ACPI_AML_IN_KERN, true);
300 * acpi_aml_write_log() - Capture debugger output
301 * @msg: the debugger output
303 * This function should be used to implement acpi_os_printf() to filter out
304 * the debugger output and store the output into the debugger interface
305 * buffer. Return the size of stored logs or errno.
307 static ssize_t acpi_aml_write_log(const char *msg)
310 int count = 0, size = 0;
312 if (!acpi_aml_initialized)
318 ret = acpi_aml_write_kern(msg + size, count);
319 if (ret == -EAGAIN) {
320 ret = wait_event_interruptible(acpi_aml_io.wait,
321 acpi_aml_kern_writable());
323 * We need to retry when the condition
335 return size > 0 ? size : ret;
339 * acpi_aml_read_cmd() - Capture debugger input
340 * @msg: the debugger input
341 * @size: the size of the debugger input
343 * This function should be used to implement acpi_os_get_line() to capture
344 * the debugger input commands and store the input commands into the
345 * debugger interface buffer. Return the size of stored commands or errno.
347 static ssize_t acpi_aml_read_cmd(char *msg, size_t count)
353 * This is ensured by the running fact of the debugger thread
354 * unless a bug is introduced.
356 BUG_ON(!acpi_aml_initialized);
360 * Check each input byte to find the end of the command.
362 ret = acpi_aml_readb_kern();
363 if (ret == -EAGAIN) {
364 ret = wait_event_interruptible(acpi_aml_io.wait,
365 acpi_aml_kern_readable());
367 * We need to retry when the condition becomes
375 *(msg + size) = (char)ret;
380 * acpi_os_get_line() requires a zero terminated command
383 *(msg + size - 1) = '\0';
387 return size > 0 ? size : ret;
390 static int acpi_aml_thread(void *unused)
392 acpi_osd_exec_callback function = NULL;
395 mutex_lock(&acpi_aml_io.lock);
396 if (acpi_aml_io.function) {
397 acpi_aml_io.usages++;
398 function = acpi_aml_io.function;
399 context = acpi_aml_io.context;
401 mutex_unlock(&acpi_aml_io.lock);
406 mutex_lock(&acpi_aml_io.lock);
407 acpi_aml_io.usages--;
408 if (!__acpi_aml_used()) {
409 acpi_aml_io.thread = NULL;
410 wake_up(&acpi_aml_io.wait);
412 mutex_unlock(&acpi_aml_io.lock);
418 * acpi_aml_create_thread() - Create AML debugger thread
419 * @function: the debugger thread callback
420 * @context: the context to be passed to the debugger thread
422 * This function should be used to implement acpi_os_execute() which is
423 * used by the ACPICA debugger to create the debugger thread.
425 static int acpi_aml_create_thread(acpi_osd_exec_callback function, void *context)
427 struct task_struct *t;
429 mutex_lock(&acpi_aml_io.lock);
430 acpi_aml_io.function = function;
431 acpi_aml_io.context = context;
432 mutex_unlock(&acpi_aml_io.lock);
434 t = kthread_create(acpi_aml_thread, NULL, "aml");
436 pr_err("Failed to create AML debugger thread.\n");
440 mutex_lock(&acpi_aml_io.lock);
441 acpi_aml_io.thread = t;
442 acpi_set_debugger_thread_id((acpi_thread_id)(unsigned long)t);
444 mutex_unlock(&acpi_aml_io.lock);
448 static int acpi_aml_wait_command_ready(bool single_step,
449 char *buffer, size_t length)
454 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_EXECUTE_PROMPT);
456 acpi_os_printf("\n%1c ", ACPI_DEBUGGER_COMMAND_PROMPT);
458 status = acpi_os_get_line(buffer, length, NULL);
459 if (ACPI_FAILURE(status))
464 static int acpi_aml_notify_command_complete(void)
469 static int acpi_aml_open(struct inode *inode, struct file *file)
474 mutex_lock(&acpi_aml_io.lock);
476 * The debugger interface is being closed, no new user is allowed
477 * during this period.
479 if (acpi_aml_io.flags & ACPI_AML_CLOSED) {
483 if ((file->f_flags & O_ACCMODE) != O_WRONLY) {
485 * Only one reader is allowed to initiate the debugger
488 if (acpi_aml_active_reader) {
492 pr_debug("Opening debugger reader.\n");
493 acpi_aml_active_reader = file;
497 * No writer is allowed unless the debugger thread is
500 if (!(acpi_aml_io.flags & ACPI_AML_OPENED)) {
505 if (acpi_aml_active_reader == file) {
506 pr_debug("Opening debugger interface.\n");
507 mutex_unlock(&acpi_aml_io.lock);
509 pr_debug("Initializing debugger thread.\n");
510 status = acpi_initialize_debugger();
511 if (ACPI_FAILURE(status)) {
512 pr_err("Failed to initialize debugger.\n");
516 pr_debug("Debugger thread initialized.\n");
518 mutex_lock(&acpi_aml_io.lock);
519 acpi_aml_io.flags |= ACPI_AML_OPENED;
520 acpi_aml_io.out_crc.head = acpi_aml_io.out_crc.tail = 0;
521 acpi_aml_io.in_crc.head = acpi_aml_io.in_crc.tail = 0;
522 pr_debug("Debugger interface opened.\n");
527 if (acpi_aml_active_reader == file)
528 acpi_aml_active_reader = NULL;
530 mutex_unlock(&acpi_aml_io.lock);
535 static int acpi_aml_release(struct inode *inode, struct file *file)
537 mutex_lock(&acpi_aml_io.lock);
539 if (file == acpi_aml_active_reader) {
540 pr_debug("Closing debugger reader.\n");
541 acpi_aml_active_reader = NULL;
543 pr_debug("Closing debugger interface.\n");
544 acpi_aml_io.flags |= ACPI_AML_CLOSED;
547 * Wake up all user space/kernel space blocked
550 wake_up_interruptible(&acpi_aml_io.wait);
551 mutex_unlock(&acpi_aml_io.lock);
553 * Wait all user space/kernel space readers/writers to
554 * stop so that ACPICA command loop of the debugger thread
555 * should fail all its command line reads after this point.
557 wait_event(acpi_aml_io.wait, !acpi_aml_busy());
560 * Then we try to terminate the debugger thread if it is
563 pr_debug("Terminating debugger thread.\n");
564 acpi_terminate_debugger();
565 wait_event(acpi_aml_io.wait, !acpi_aml_used());
566 pr_debug("Debugger thread terminated.\n");
568 mutex_lock(&acpi_aml_io.lock);
569 acpi_aml_io.flags &= ~ACPI_AML_OPENED;
571 if (acpi_aml_io.users == 0) {
572 pr_debug("Debugger interface closed.\n");
573 acpi_aml_io.flags &= ~ACPI_AML_CLOSED;
575 mutex_unlock(&acpi_aml_io.lock);
579 static int acpi_aml_read_user(char __user *buf, int len)
582 struct circ_buf *crc = &acpi_aml_io.out_crc;
586 ret = acpi_aml_lock_read(crc, ACPI_AML_OUT_USER);
589 /* sync head before removing logs */
591 p = &crc->buf[crc->tail];
592 n = min(len, circ_count_to_end(crc));
593 if (copy_to_user(buf, p, n)) {
597 /* sync tail after removing logs */
599 crc->tail = (crc->tail + n) & (ACPI_AML_BUF_SIZE - 1);
602 acpi_aml_unlock_fifo(ACPI_AML_OUT_USER, ret >= 0);
606 static ssize_t acpi_aml_read(struct file *file, char __user *buf,
607 size_t count, loff_t *ppos)
614 if (!access_ok(buf, count))
619 ret = acpi_aml_read_user(buf + size, count);
620 if (ret == -EAGAIN) {
621 if (file->f_flags & O_NONBLOCK)
624 ret = wait_event_interruptible(acpi_aml_io.wait,
625 acpi_aml_user_readable());
627 * We need to retry when the condition
635 if (!acpi_aml_running())
646 return size > 0 ? size : ret;
649 static int acpi_aml_write_user(const char __user *buf, int len)
652 struct circ_buf *crc = &acpi_aml_io.in_crc;
656 ret = acpi_aml_lock_write(crc, ACPI_AML_IN_USER);
659 /* sync tail before inserting cmds */
661 p = &crc->buf[crc->head];
662 n = min(len, circ_space_to_end(crc));
663 if (copy_from_user(p, buf, n)) {
667 /* sync head after inserting cmds */
669 crc->head = (crc->head + n) & (ACPI_AML_BUF_SIZE - 1);
672 acpi_aml_unlock_fifo(ACPI_AML_IN_USER, ret >= 0);
676 static ssize_t acpi_aml_write(struct file *file, const char __user *buf,
677 size_t count, loff_t *ppos)
684 if (!access_ok(buf, count))
689 ret = acpi_aml_write_user(buf + size, count);
690 if (ret == -EAGAIN) {
691 if (file->f_flags & O_NONBLOCK)
694 ret = wait_event_interruptible(acpi_aml_io.wait,
695 acpi_aml_user_writable());
697 * We need to retry when the condition
705 if (!acpi_aml_running())
715 return size > 0 ? size : ret;
718 static __poll_t acpi_aml_poll(struct file *file, poll_table *wait)
722 poll_wait(file, &acpi_aml_io.wait, wait);
723 if (acpi_aml_user_readable())
724 masks |= EPOLLIN | EPOLLRDNORM;
725 if (acpi_aml_user_writable())
726 masks |= EPOLLOUT | EPOLLWRNORM;
731 static const struct file_operations acpi_aml_operations = {
732 .read = acpi_aml_read,
733 .write = acpi_aml_write,
734 .poll = acpi_aml_poll,
735 .open = acpi_aml_open,
736 .release = acpi_aml_release,
737 .llseek = generic_file_llseek,
740 static const struct acpi_debugger_ops acpi_aml_debugger = {
741 .create_thread = acpi_aml_create_thread,
742 .read_cmd = acpi_aml_read_cmd,
743 .write_log = acpi_aml_write_log,
744 .wait_command_ready = acpi_aml_wait_command_ready,
745 .notify_command_complete = acpi_aml_notify_command_complete,
748 static int __init acpi_aml_init(void)
752 /* Initialize AML IO interface */
753 mutex_init(&acpi_aml_io.lock);
754 init_waitqueue_head(&acpi_aml_io.wait);
755 acpi_aml_io.out_crc.buf = acpi_aml_io.out_buf;
756 acpi_aml_io.in_crc.buf = acpi_aml_io.in_buf;
758 acpi_aml_dentry = debugfs_create_file("acpidbg",
759 S_IFREG | S_IRUGO | S_IWUSR,
760 acpi_debugfs_dir, NULL,
761 &acpi_aml_operations);
763 ret = acpi_register_debugger(THIS_MODULE, &acpi_aml_debugger);
765 debugfs_remove(acpi_aml_dentry);
766 acpi_aml_dentry = NULL;
770 acpi_aml_initialized = true;
774 static void __exit acpi_aml_exit(void)
776 if (acpi_aml_initialized) {
777 acpi_unregister_debugger(&acpi_aml_debugger);
778 debugfs_remove(acpi_aml_dentry);
779 acpi_aml_dentry = NULL;
780 acpi_aml_initialized = false;
784 module_init(acpi_aml_init);
785 module_exit(acpi_aml_exit);
787 MODULE_AUTHOR("Lv Zheng");
788 MODULE_DESCRIPTION("ACPI debugger userspace IO driver");
789 MODULE_LICENSE("GPL");