1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020-21 Intel Corporation.
6 #include "iosm_ipc_imem.h"
7 #include "iosm_ipc_task_queue.h"
9 /* Actual tasklet function, will be called whenever tasklet is scheduled.
10 * Calls event handler involves callback for each element in the message queue
12 static void ipc_task_queue_handler(unsigned long data)
14 struct ipc_task_queue *ipc_task = (struct ipc_task_queue *)data;
15 unsigned int q_rpos = ipc_task->q_rpos;
17 /* Loop over the input queue contents. */
18 while (q_rpos != ipc_task->q_wpos) {
19 /* Get the current first queue element. */
20 struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
22 /* Process the input message. */
24 args->response = args->func(args->ipc_imem, args->arg,
25 args->msg, args->size);
27 /* Signal completion for synchronous calls */
29 complete(args->completion);
31 /* Free message if copy was allocated. */
35 /* Set invalid queue element. Technically
36 * spin_lock_irqsave is not required here as
37 * the array element has been processed already
38 * so we can assume that immediately after processing
39 * ipc_task element, queue will not rotate again to
40 * ipc_task same element within such short time.
42 args->completion = NULL;
46 args->is_copy = false;
48 /* calculate the new read ptr and update the volatile read
51 q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
52 ipc_task->q_rpos = q_rpos;
56 /* Free memory alloc and trigger completions left in the queue during dealloc */
57 static void ipc_task_queue_cleanup(struct ipc_task_queue *ipc_task)
59 unsigned int q_rpos = ipc_task->q_rpos;
61 while (q_rpos != ipc_task->q_wpos) {
62 struct ipc_task_queue_args *args = &ipc_task->args[q_rpos];
65 complete(args->completion);
70 q_rpos = (q_rpos + 1) % IPC_THREAD_QUEUE_SIZE;
71 ipc_task->q_rpos = q_rpos;
75 /* Add a message to the queue and trigger the ipc_task. */
77 ipc_task_queue_add_task(struct iosm_imem *ipc_imem,
79 int (*func)(struct iosm_imem *ipc_imem, int arg,
80 void *msg, size_t size),
81 size_t size, bool is_copy, bool wait)
83 struct tasklet_struct *ipc_tasklet = ipc_imem->ipc_task->ipc_tasklet;
84 struct ipc_task_queue *ipc_task = &ipc_imem->ipc_task->ipc_queue;
85 struct completion completion;
86 unsigned int pos, nextpos;
90 init_completion(&completion);
92 /* tasklet send may be called from both interrupt or thread
93 * context, therefore protect queue operation by spinlock
95 spin_lock_irqsave(&ipc_task->q_lock, flags);
97 pos = ipc_task->q_wpos;
98 nextpos = (pos + 1) % IPC_THREAD_QUEUE_SIZE;
100 /* Get next queue position. */
101 if (nextpos != ipc_task->q_rpos) {
102 /* Get the reference to the queue element and save the passed
105 ipc_task->args[pos].arg = arg;
106 ipc_task->args[pos].msg = msg;
107 ipc_task->args[pos].func = func;
108 ipc_task->args[pos].ipc_imem = ipc_imem;
109 ipc_task->args[pos].size = size;
110 ipc_task->args[pos].is_copy = is_copy;
111 ipc_task->args[pos].completion = wait ? &completion : NULL;
112 ipc_task->args[pos].response = -1;
114 /* apply write barrier so that ipc_task->q_rpos elements
115 * are updated before ipc_task->q_wpos is being updated.
119 /* Update the status of the free queue space. */
120 ipc_task->q_wpos = nextpos;
124 spin_unlock_irqrestore(&ipc_task->q_lock, flags);
127 tasklet_schedule(ipc_tasklet);
130 wait_for_completion(&completion);
131 result = ipc_task->args[pos].response;
134 dev_err(ipc_imem->ipc_task->dev, "queue is full");
140 int ipc_task_queue_send_task(struct iosm_imem *imem,
141 int (*func)(struct iosm_imem *ipc_imem, int arg,
142 void *msg, size_t size),
143 int arg, void *msg, size_t size, bool wait)
145 bool is_copy = false;
150 copy = kmemdup(msg, size, GFP_ATOMIC);
157 ret = ipc_task_queue_add_task(imem, arg, copy, func,
158 size, is_copy, wait);
160 dev_err(imem->ipc_task->dev,
161 "add task failed for %ps %d, %p, %zu, %d", func, arg,
162 copy, size, is_copy);
173 int ipc_task_init(struct ipc_task *ipc_task)
175 struct ipc_task_queue *ipc_queue = &ipc_task->ipc_queue;
177 ipc_task->ipc_tasklet = kzalloc(sizeof(*ipc_task->ipc_tasklet),
180 if (!ipc_task->ipc_tasklet)
183 /* Initialize the spinlock needed to protect the message queue of the
186 spin_lock_init(&ipc_queue->q_lock);
188 tasklet_init(ipc_task->ipc_tasklet, ipc_task_queue_handler,
189 (unsigned long)ipc_queue);
193 void ipc_task_deinit(struct ipc_task *ipc_task)
195 tasklet_kill(ipc_task->ipc_tasklet);
197 kfree(ipc_task->ipc_tasklet);
198 /* This will free/complete any outstanding messages,
199 * without calling the actual handler
201 ipc_task_queue_cleanup(&ipc_task->ipc_queue);