1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (c) 2015, Linaro Limited
5 #include <linux/arm-smccc.h>
6 #include <linux/device.h>
8 #include <linux/errno.h>
10 #include <linux/sched.h>
11 #include <linux/slab.h>
12 #include <linux/tee_drv.h>
13 #include <linux/types.h>
14 #include <linux/uaccess.h>
15 #include "optee_private.h"
16 #include "optee_smc.h"
17 #define CREATE_TRACE_POINTS
18 #include "optee_trace.h"
20 struct optee_call_waiter {
21 struct list_head list_node;
25 static void optee_cq_wait_init(struct optee_call_queue *cq,
26 struct optee_call_waiter *w)
29 * We're preparing to make a call to secure world. In case we can't
30 * allocate a thread in secure world we'll end up waiting in
31 * optee_cq_wait_for_completion().
33 * Normally if there's no contention in secure world the call will
34 * complete and we can cleanup directly with optee_cq_wait_final().
36 mutex_lock(&cq->mutex);
39 * We add ourselves to the queue, but we don't wait. This
40 * guarantees that we don't lose a completion if secure world
41 * returns busy and another thread just exited and try to complete
44 init_completion(&w->c);
45 list_add_tail(&w->list_node, &cq->waiters);
47 mutex_unlock(&cq->mutex);
50 static void optee_cq_wait_for_completion(struct optee_call_queue *cq,
51 struct optee_call_waiter *w)
53 wait_for_completion(&w->c);
55 mutex_lock(&cq->mutex);
57 /* Move to end of list to get out of the way for other waiters */
58 list_del(&w->list_node);
59 reinit_completion(&w->c);
60 list_add_tail(&w->list_node, &cq->waiters);
62 mutex_unlock(&cq->mutex);
65 static void optee_cq_complete_one(struct optee_call_queue *cq)
67 struct optee_call_waiter *w;
69 list_for_each_entry(w, &cq->waiters, list_node) {
70 if (!completion_done(&w->c)) {
77 static void optee_cq_wait_final(struct optee_call_queue *cq,
78 struct optee_call_waiter *w)
81 * We're done with the call to secure world. The thread in secure
82 * world that was used for this call is now available for some
85 mutex_lock(&cq->mutex);
87 /* Get out of the list */
88 list_del(&w->list_node);
90 /* Wake up one eventual waiting task */
91 optee_cq_complete_one(cq);
94 * If we're completed we've got a completion from another task that
95 * was just done with its call to secure world. Since yet another
96 * thread now is available in secure world wake up another eventual
99 if (completion_done(&w->c))
100 optee_cq_complete_one(cq);
102 mutex_unlock(&cq->mutex);
105 /* Requires the filpstate mutex to be held */
106 static struct optee_session *find_session(struct optee_context_data *ctxdata,
109 struct optee_session *sess;
111 list_for_each_entry(sess, &ctxdata->sess_list, list_node)
112 if (sess->session_id == session_id)
119 * optee_do_call_with_arg() - Do an SMC to OP-TEE in secure world
120 * @ctx: calling context
121 * @parg: physical address of message to pass to secure world
123 * Does and SMC to OP-TEE in secure world and handles eventual resulting
124 * Remote Procedure Calls (RPC) from OP-TEE.
126 * Returns return code from secure world, 0 is OK
128 u32 optee_do_call_with_arg(struct tee_context *ctx, phys_addr_t parg)
130 struct optee *optee = tee_get_drvdata(ctx->teedev);
131 struct optee_call_waiter w;
132 struct optee_rpc_param param = { };
133 struct optee_call_ctx call_ctx = { };
136 param.a0 = OPTEE_SMC_CALL_WITH_ARG;
137 reg_pair_from_64(¶m.a1, ¶m.a2, parg);
138 /* Initialize waiter */
139 optee_cq_wait_init(&optee->call_queue, &w);
141 struct arm_smccc_res res;
143 trace_optee_invoke_fn_begin(¶m);
144 optee->invoke_fn(param.a0, param.a1, param.a2, param.a3,
145 param.a4, param.a5, param.a6, param.a7,
147 trace_optee_invoke_fn_end(¶m, &res);
149 if (res.a0 == OPTEE_SMC_RETURN_ETHREAD_LIMIT) {
151 * Out of threads in secure world, wait for a thread
154 optee_cq_wait_for_completion(&optee->call_queue, &w);
155 } else if (OPTEE_SMC_RETURN_IS_RPC(res.a0)) {
161 optee_handle_rpc(ctx, ¶m, &call_ctx);
168 optee_rpc_finalize_call(&call_ctx);
170 * We're done with our thread in secure world, if there's any
171 * thread waiters wake up one.
173 optee_cq_wait_final(&optee->call_queue, &w);
178 static struct tee_shm *get_msg_arg(struct tee_context *ctx, size_t num_params,
179 struct optee_msg_arg **msg_arg,
180 phys_addr_t *msg_parg)
184 struct optee_msg_arg *ma;
186 shm = tee_shm_alloc(ctx, OPTEE_MSG_GET_ARG_SIZE(num_params),
191 ma = tee_shm_get_va(shm, 0);
197 rc = tee_shm_get_pa(shm, 0, msg_parg);
201 memset(ma, 0, OPTEE_MSG_GET_ARG_SIZE(num_params));
202 ma->num_params = num_params;
213 int optee_open_session(struct tee_context *ctx,
214 struct tee_ioctl_open_session_arg *arg,
215 struct tee_param *param)
217 struct optee_context_data *ctxdata = ctx->data;
220 struct optee_msg_arg *msg_arg;
221 phys_addr_t msg_parg;
222 struct optee_session *sess = NULL;
224 /* +2 for the meta parameters added below */
225 shm = get_msg_arg(ctx, arg->num_params + 2, &msg_arg, &msg_parg);
229 msg_arg->cmd = OPTEE_MSG_CMD_OPEN_SESSION;
230 msg_arg->cancel_id = arg->cancel_id;
233 * Initialize and add the meta parameters needed when opening a
236 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
238 msg_arg->params[1].attr = OPTEE_MSG_ATTR_TYPE_VALUE_INPUT |
240 memcpy(&msg_arg->params[0].u.value, arg->uuid, sizeof(arg->uuid));
241 msg_arg->params[1].u.value.c = arg->clnt_login;
243 rc = tee_session_calc_client_uuid((uuid_t *)&msg_arg->params[1].u.value,
244 arg->clnt_login, arg->clnt_uuid);
248 rc = optee_to_msg_param(msg_arg->params + 2, arg->num_params, param);
252 sess = kzalloc(sizeof(*sess), GFP_KERNEL);
258 if (optee_do_call_with_arg(ctx, msg_parg)) {
259 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
260 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
263 if (msg_arg->ret == TEEC_SUCCESS) {
264 /* A new session has been created, add it to the list. */
265 sess->session_id = msg_arg->session;
266 mutex_lock(&ctxdata->mutex);
267 list_add(&sess->list_node, &ctxdata->sess_list);
268 mutex_unlock(&ctxdata->mutex);
273 if (optee_from_msg_param(param, arg->num_params, msg_arg->params + 2)) {
274 arg->ret = TEEC_ERROR_COMMUNICATION;
275 arg->ret_origin = TEEC_ORIGIN_COMMS;
276 /* Close session again to avoid leakage */
277 optee_close_session(ctx, msg_arg->session);
279 arg->session = msg_arg->session;
280 arg->ret = msg_arg->ret;
281 arg->ret_origin = msg_arg->ret_origin;
289 int optee_close_session(struct tee_context *ctx, u32 session)
291 struct optee_context_data *ctxdata = ctx->data;
293 struct optee_msg_arg *msg_arg;
294 phys_addr_t msg_parg;
295 struct optee_session *sess;
297 /* Check that the session is valid and remove it from the list */
298 mutex_lock(&ctxdata->mutex);
299 sess = find_session(ctxdata, session);
301 list_del(&sess->list_node);
302 mutex_unlock(&ctxdata->mutex);
307 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
311 msg_arg->cmd = OPTEE_MSG_CMD_CLOSE_SESSION;
312 msg_arg->session = session;
313 optee_do_call_with_arg(ctx, msg_parg);
319 int optee_invoke_func(struct tee_context *ctx, struct tee_ioctl_invoke_arg *arg,
320 struct tee_param *param)
322 struct optee_context_data *ctxdata = ctx->data;
324 struct optee_msg_arg *msg_arg;
325 phys_addr_t msg_parg;
326 struct optee_session *sess;
329 /* Check that the session is valid */
330 mutex_lock(&ctxdata->mutex);
331 sess = find_session(ctxdata, arg->session);
332 mutex_unlock(&ctxdata->mutex);
336 shm = get_msg_arg(ctx, arg->num_params, &msg_arg, &msg_parg);
339 msg_arg->cmd = OPTEE_MSG_CMD_INVOKE_COMMAND;
340 msg_arg->func = arg->func;
341 msg_arg->session = arg->session;
342 msg_arg->cancel_id = arg->cancel_id;
344 rc = optee_to_msg_param(msg_arg->params, arg->num_params, param);
348 if (optee_do_call_with_arg(ctx, msg_parg)) {
349 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
350 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
353 if (optee_from_msg_param(param, arg->num_params, msg_arg->params)) {
354 msg_arg->ret = TEEC_ERROR_COMMUNICATION;
355 msg_arg->ret_origin = TEEC_ORIGIN_COMMS;
358 arg->ret = msg_arg->ret;
359 arg->ret_origin = msg_arg->ret_origin;
365 int optee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
367 struct optee_context_data *ctxdata = ctx->data;
369 struct optee_msg_arg *msg_arg;
370 phys_addr_t msg_parg;
371 struct optee_session *sess;
373 /* Check that the session is valid */
374 mutex_lock(&ctxdata->mutex);
375 sess = find_session(ctxdata, session);
376 mutex_unlock(&ctxdata->mutex);
380 shm = get_msg_arg(ctx, 0, &msg_arg, &msg_parg);
384 msg_arg->cmd = OPTEE_MSG_CMD_CANCEL;
385 msg_arg->session = session;
386 msg_arg->cancel_id = cancel_id;
387 optee_do_call_with_arg(ctx, msg_parg);
394 * optee_enable_shm_cache() - Enables caching of some shared memory allocation
396 * @optee: main service struct
398 void optee_enable_shm_cache(struct optee *optee)
400 struct optee_call_waiter w;
402 /* We need to retry until secure world isn't busy. */
403 optee_cq_wait_init(&optee->call_queue, &w);
405 struct arm_smccc_res res;
407 optee->invoke_fn(OPTEE_SMC_ENABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
409 if (res.a0 == OPTEE_SMC_RETURN_OK)
411 optee_cq_wait_for_completion(&optee->call_queue, &w);
413 optee_cq_wait_final(&optee->call_queue, &w);
417 * optee_disable_shm_cache() - Disables caching of some shared memory allocation
419 * @optee: main service struct
421 void optee_disable_shm_cache(struct optee *optee)
423 struct optee_call_waiter w;
425 /* We need to retry until secure world isn't busy. */
426 optee_cq_wait_init(&optee->call_queue, &w);
429 struct arm_smccc_res smccc;
430 struct optee_smc_disable_shm_cache_result result;
433 optee->invoke_fn(OPTEE_SMC_DISABLE_SHM_CACHE, 0, 0, 0, 0, 0, 0,
435 if (res.result.status == OPTEE_SMC_RETURN_ENOTAVAIL)
436 break; /* All shm's freed */
437 if (res.result.status == OPTEE_SMC_RETURN_OK) {
440 shm = reg_pair_to_ptr(res.result.shm_upper32,
441 res.result.shm_lower32);
444 optee_cq_wait_for_completion(&optee->call_queue, &w);
447 optee_cq_wait_final(&optee->call_queue, &w);
450 #define PAGELIST_ENTRIES_PER_PAGE \
451 ((OPTEE_MSG_NONCONTIG_PAGE_SIZE / sizeof(u64)) - 1)
454 * optee_fill_pages_list() - write list of user pages to given shared
457 * @dst: page-aligned buffer where list of pages will be stored
458 * @pages: array of pages that represents shared buffer
459 * @num_pages: number of entries in @pages
460 * @page_offset: offset of user buffer from page start
462 * @dst should be big enough to hold list of user page addresses and
463 * links to the next pages of buffer
465 void optee_fill_pages_list(u64 *dst, struct page **pages, int num_pages,
469 phys_addr_t optee_page;
471 * Refer to OPTEE_MSG_ATTR_NONCONTIG description in optee_msg.h
475 u64 pages_list[PAGELIST_ENTRIES_PER_PAGE];
480 * Currently OP-TEE uses 4k page size and it does not looks
481 * like this will change in the future. On other hand, there are
482 * no know ARM architectures with page size < 4k.
483 * Thus the next built assert looks redundant. But the following
484 * code heavily relies on this assumption, so it is better be
487 BUILD_BUG_ON(PAGE_SIZE < OPTEE_MSG_NONCONTIG_PAGE_SIZE);
489 pages_data = (void *)dst;
491 * If linux page is bigger than 4k, and user buffer offset is
492 * larger than 4k/8k/12k/etc this will skip first 4k pages,
493 * because they bear no value data for OP-TEE.
495 optee_page = page_to_phys(*pages) +
496 round_down(page_offset, OPTEE_MSG_NONCONTIG_PAGE_SIZE);
499 pages_data->pages_list[n++] = optee_page;
501 if (n == PAGELIST_ENTRIES_PER_PAGE) {
502 pages_data->next_page_data =
503 virt_to_phys(pages_data + 1);
508 optee_page += OPTEE_MSG_NONCONTIG_PAGE_SIZE;
509 if (!(optee_page & ~PAGE_MASK)) {
513 optee_page = page_to_phys(*pages);
519 * The final entry in each pagelist page is a pointer to the next
522 static size_t get_pages_list_size(size_t num_entries)
524 int pages = DIV_ROUND_UP(num_entries, PAGELIST_ENTRIES_PER_PAGE);
526 return pages * OPTEE_MSG_NONCONTIG_PAGE_SIZE;
529 u64 *optee_allocate_pages_list(size_t num_entries)
531 return alloc_pages_exact(get_pages_list_size(num_entries), GFP_KERNEL);
534 void optee_free_pages_list(void *list, size_t num_entries)
536 free_pages_exact(list, get_pages_list_size(num_entries));
539 static bool is_normal_memory(pgprot_t p)
541 #if defined(CONFIG_ARM)
542 return (((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEALLOC) ||
543 ((pgprot_val(p) & L_PTE_MT_MASK) == L_PTE_MT_WRITEBACK));
544 #elif defined(CONFIG_ARM64)
545 return (pgprot_val(p) & PTE_ATTRINDX_MASK) == PTE_ATTRINDX(MT_NORMAL);
547 #error "Unuspported architecture"
551 static int __check_mem_type(struct vm_area_struct *vma, unsigned long end)
553 while (vma && is_normal_memory(vma->vm_page_prot)) {
554 if (vma->vm_end >= end)
562 static int check_mem_type(unsigned long start, size_t num_pages)
564 struct mm_struct *mm = current->mm;
568 * Allow kernel address to register with OP-TEE as kernel
569 * pages are configured as normal memory only.
571 if (virt_addr_valid(start))
575 rc = __check_mem_type(find_vma(mm, start),
576 start + num_pages * PAGE_SIZE);
577 mmap_read_unlock(mm);
582 int optee_shm_register(struct tee_context *ctx, struct tee_shm *shm,
583 struct page **pages, size_t num_pages,
586 struct tee_shm *shm_arg = NULL;
587 struct optee_msg_arg *msg_arg;
589 phys_addr_t msg_parg;
595 rc = check_mem_type(start, num_pages);
599 pages_list = optee_allocate_pages_list(num_pages);
603 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
604 if (IS_ERR(shm_arg)) {
605 rc = PTR_ERR(shm_arg);
609 optee_fill_pages_list(pages_list, pages, num_pages,
610 tee_shm_get_page_offset(shm));
612 msg_arg->cmd = OPTEE_MSG_CMD_REGISTER_SHM;
613 msg_arg->params->attr = OPTEE_MSG_ATTR_TYPE_TMEM_OUTPUT |
614 OPTEE_MSG_ATTR_NONCONTIG;
615 msg_arg->params->u.tmem.shm_ref = (unsigned long)shm;
616 msg_arg->params->u.tmem.size = tee_shm_get_size(shm);
618 * In the least bits of msg_arg->params->u.tmem.buf_ptr we
619 * store buffer offset from 4k page, as described in OP-TEE ABI.
621 msg_arg->params->u.tmem.buf_ptr = virt_to_phys(pages_list) |
622 (tee_shm_get_page_offset(shm) & (OPTEE_MSG_NONCONTIG_PAGE_SIZE - 1));
624 if (optee_do_call_with_arg(ctx, msg_parg) ||
625 msg_arg->ret != TEEC_SUCCESS)
628 tee_shm_free(shm_arg);
630 optee_free_pages_list(pages_list, num_pages);
634 int optee_shm_unregister(struct tee_context *ctx, struct tee_shm *shm)
636 struct tee_shm *shm_arg;
637 struct optee_msg_arg *msg_arg;
638 phys_addr_t msg_parg;
641 shm_arg = get_msg_arg(ctx, 1, &msg_arg, &msg_parg);
643 return PTR_ERR(shm_arg);
645 msg_arg->cmd = OPTEE_MSG_CMD_UNREGISTER_SHM;
647 msg_arg->params[0].attr = OPTEE_MSG_ATTR_TYPE_RMEM_INPUT;
648 msg_arg->params[0].u.rmem.shm_ref = (unsigned long)shm;
650 if (optee_do_call_with_arg(ctx, msg_parg) ||
651 msg_arg->ret != TEEC_SUCCESS)
653 tee_shm_free(shm_arg);
657 int optee_shm_register_supp(struct tee_context *ctx, struct tee_shm *shm,
658 struct page **pages, size_t num_pages,
662 * We don't want to register supplicant memory in OP-TEE.
663 * Instead information about it will be passed in RPC code.
665 return check_mem_type(start, num_pages);
668 int optee_shm_unregister_supp(struct tee_context *ctx, struct tee_shm *shm)