1 // SPDX-License-Identifier: GPL-2.0-only
3 * Arm Firmware Framework for ARMv8-A(FFA) interface driver
5 * The Arm FFA specification[1] describes a software architecture to
6 * leverages the virtualization extension to isolate software images
7 * provided by an ecosystem of vendors from each other and describes
8 * interfaces that standardize communication between the various software
9 * images including communication between images in the Secure world and
10 * Normal world. Any Hypervisor could use the FFA interfaces to enable
11 * communication between VMs it manages.
13 * The Hypervisor a.k.a Partition managers in FFA terminology can assign
14 * system resources(Memory regions, Devices, CPU cycles) to the partitions
15 * and manage isolation amongst them.
17 * [1] https://developer.arm.com/docs/den0077/latest
19 * Copyright (C) 2021 ARM Ltd.
22 #define DRIVER_NAME "ARM FF-A"
23 #define pr_fmt(fmt) DRIVER_NAME ": " fmt
25 #include <linux/acpi.h>
26 #include <linux/arm_ffa.h>
27 #include <linux/bitfield.h>
28 #include <linux/cpuhotplug.h>
29 #include <linux/device.h>
30 #include <linux/hashtable.h>
31 #include <linux/interrupt.h>
33 #include <linux/kernel.h>
34 #include <linux/module.h>
36 #include <linux/mutex.h>
37 #include <linux/of_irq.h>
38 #include <linux/scatterlist.h>
39 #include <linux/slab.h>
40 #include <linux/smp.h>
41 #include <linux/uuid.h>
42 #include <linux/xarray.h>
46 #define FFA_DRIVER_VERSION FFA_VERSION_1_1
47 #define FFA_MIN_VERSION FFA_VERSION_1_0
49 #define SENDER_ID_MASK GENMASK(31, 16)
50 #define RECEIVER_ID_MASK GENMASK(15, 0)
51 #define SENDER_ID(x) ((u16)(FIELD_GET(SENDER_ID_MASK, (x))))
52 #define RECEIVER_ID(x) ((u16)(FIELD_GET(RECEIVER_ID_MASK, (x))))
53 #define PACK_TARGET_INFO(s, r) \
54 (FIELD_PREP(SENDER_ID_MASK, (s)) | FIELD_PREP(RECEIVER_ID_MASK, (r)))
57 * Keeping RX TX buffer size as 4K for now
58 * 64K may be preferred to keep it min a page in 64K PAGE_SIZE config
60 #define RXTX_BUFFER_SIZE SZ_4K
62 #define FFA_MAX_NOTIFICATIONS 64
64 static ffa_fn *invoke_ffa_fn;
66 static const int ffa_linux_errmap[] = {
67 /* better than switch case as long as return value is continuous */
68 0, /* FFA_RET_SUCCESS */
69 -EOPNOTSUPP, /* FFA_RET_NOT_SUPPORTED */
70 -EINVAL, /* FFA_RET_INVALID_PARAMETERS */
71 -ENOMEM, /* FFA_RET_NO_MEMORY */
72 -EBUSY, /* FFA_RET_BUSY */
73 -EINTR, /* FFA_RET_INTERRUPTED */
74 -EACCES, /* FFA_RET_DENIED */
75 -EAGAIN, /* FFA_RET_RETRY */
76 -ECANCELED, /* FFA_RET_ABORTED */
77 -ENODATA, /* FFA_RET_NO_DATA */
80 static inline int ffa_to_linux_errno(int errno)
84 if (err_idx >= 0 && err_idx < ARRAY_SIZE(ffa_linux_errmap))
85 return ffa_linux_errmap[err_idx];
90 struct ffa_drv_info *info;
96 struct mutex rx_lock; /* lock to protect Rx buffer */
97 struct mutex tx_lock; /* lock to protect Tx buffer */
103 unsigned int sched_recv_irq;
104 unsigned int cpuhp_state;
105 struct ffa_pcpu_irq __percpu *irq_pcpu;
106 struct workqueue_struct *notif_pcpu_wq;
107 struct work_struct notif_pcpu_work;
108 struct work_struct irq_work;
109 struct xarray partition_info;
110 unsigned int partition_count;
111 DECLARE_HASHTABLE(notifier_hash, ilog2(FFA_MAX_NOTIFICATIONS));
112 struct mutex notify_lock; /* lock to protect notifier hashtable */
115 static struct ffa_drv_info *drv_info;
118 * The driver must be able to support all the versions from the earliest
119 * supported FFA_MIN_VERSION to the latest supported FFA_DRIVER_VERSION.
120 * The specification states that if firmware supports a FFA implementation
121 * that is incompatible with and at a greater version number than specified
122 * by the caller(FFA_DRIVER_VERSION passed as parameter to FFA_VERSION),
123 * it must return the NOT_SUPPORTED error code.
125 static u32 ffa_compatible_version_find(u32 version)
127 u16 major = FFA_MAJOR_VERSION(version), minor = FFA_MINOR_VERSION(version);
128 u16 drv_major = FFA_MAJOR_VERSION(FFA_DRIVER_VERSION);
129 u16 drv_minor = FFA_MINOR_VERSION(FFA_DRIVER_VERSION);
131 if ((major < drv_major) || (major == drv_major && minor <= drv_minor))
134 pr_info("Firmware version higher than driver version, downgrading\n");
135 return FFA_DRIVER_VERSION;
138 static int ffa_version_check(u32 *version)
142 invoke_ffa_fn((ffa_value_t){
143 .a0 = FFA_VERSION, .a1 = FFA_DRIVER_VERSION,
146 if (ver.a0 == FFA_RET_NOT_SUPPORTED) {
147 pr_info("FFA_VERSION returned not supported\n");
151 if (ver.a0 < FFA_MIN_VERSION) {
152 pr_err("Incompatible v%d.%d! Earliest supported v%d.%d\n",
153 FFA_MAJOR_VERSION(ver.a0), FFA_MINOR_VERSION(ver.a0),
154 FFA_MAJOR_VERSION(FFA_MIN_VERSION),
155 FFA_MINOR_VERSION(FFA_MIN_VERSION));
159 pr_info("Driver version %d.%d\n", FFA_MAJOR_VERSION(FFA_DRIVER_VERSION),
160 FFA_MINOR_VERSION(FFA_DRIVER_VERSION));
161 pr_info("Firmware version %d.%d found\n", FFA_MAJOR_VERSION(ver.a0),
162 FFA_MINOR_VERSION(ver.a0));
163 *version = ffa_compatible_version_find(ver.a0);
168 static int ffa_rx_release(void)
172 invoke_ffa_fn((ffa_value_t){
173 .a0 = FFA_RX_RELEASE,
176 if (ret.a0 == FFA_ERROR)
177 return ffa_to_linux_errno((int)ret.a2);
179 /* check for ret.a0 == FFA_RX_RELEASE ? */
184 static int ffa_rxtx_map(phys_addr_t tx_buf, phys_addr_t rx_buf, u32 pg_cnt)
188 invoke_ffa_fn((ffa_value_t){
189 .a0 = FFA_FN_NATIVE(RXTX_MAP),
190 .a1 = tx_buf, .a2 = rx_buf, .a3 = pg_cnt,
193 if (ret.a0 == FFA_ERROR)
194 return ffa_to_linux_errno((int)ret.a2);
199 static int ffa_rxtx_unmap(u16 vm_id)
203 invoke_ffa_fn((ffa_value_t){
204 .a0 = FFA_RXTX_UNMAP, .a1 = PACK_TARGET_INFO(vm_id, 0),
207 if (ret.a0 == FFA_ERROR)
208 return ffa_to_linux_errno((int)ret.a2);
213 #define PARTITION_INFO_GET_RETURN_COUNT_ONLY BIT(0)
215 /* buffer must be sizeof(struct ffa_partition_info) * num_partitions */
217 __ffa_partition_info_get(u32 uuid0, u32 uuid1, u32 uuid2, u32 uuid3,
218 struct ffa_partition_info *buffer, int num_partitions)
220 int idx, count, flags = 0, sz, buf_sz;
221 ffa_value_t partition_info;
223 if (drv_info->version > FFA_VERSION_1_0 &&
224 (!buffer || !num_partitions)) /* Just get the count for now */
225 flags = PARTITION_INFO_GET_RETURN_COUNT_ONLY;
227 mutex_lock(&drv_info->rx_lock);
228 invoke_ffa_fn((ffa_value_t){
229 .a0 = FFA_PARTITION_INFO_GET,
230 .a1 = uuid0, .a2 = uuid1, .a3 = uuid2, .a4 = uuid3,
234 if (partition_info.a0 == FFA_ERROR) {
235 mutex_unlock(&drv_info->rx_lock);
236 return ffa_to_linux_errno((int)partition_info.a2);
239 count = partition_info.a2;
241 if (drv_info->version > FFA_VERSION_1_0) {
242 buf_sz = sz = partition_info.a3;
243 if (sz > sizeof(*buffer))
244 buf_sz = sizeof(*buffer);
246 /* FFA_VERSION_1_0 lacks size in the response */
250 if (buffer && count <= num_partitions)
251 for (idx = 0; idx < count; idx++)
252 memcpy(buffer + idx, drv_info->rx_buffer + idx * sz,
257 mutex_unlock(&drv_info->rx_lock);
262 /* buffer is allocated and caller must free the same if returned count > 0 */
264 ffa_partition_probe(const uuid_t *uuid, struct ffa_partition_info **buffer)
268 struct ffa_partition_info *pbuf;
270 export_uuid((u8 *)uuid0_4, uuid);
271 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
272 uuid0_4[3], NULL, 0);
276 pbuf = kcalloc(count, sizeof(*pbuf), GFP_KERNEL);
280 count = __ffa_partition_info_get(uuid0_4[0], uuid0_4[1], uuid0_4[2],
281 uuid0_4[3], pbuf, count);
290 #define VM_ID_MASK GENMASK(15, 0)
291 static int ffa_id_get(u16 *vm_id)
295 invoke_ffa_fn((ffa_value_t){
299 if (id.a0 == FFA_ERROR)
300 return ffa_to_linux_errno((int)id.a2);
302 *vm_id = FIELD_GET(VM_ID_MASK, (id.a2));
307 static int ffa_msg_send_direct_req(u16 src_id, u16 dst_id, bool mode_32bit,
308 struct ffa_send_direct_data *data)
310 u32 req_id, resp_id, src_dst_ids = PACK_TARGET_INFO(src_id, dst_id);
314 req_id = FFA_MSG_SEND_DIRECT_REQ;
315 resp_id = FFA_MSG_SEND_DIRECT_RESP;
317 req_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_REQ);
318 resp_id = FFA_FN_NATIVE(MSG_SEND_DIRECT_RESP);
321 invoke_ffa_fn((ffa_value_t){
322 .a0 = req_id, .a1 = src_dst_ids, .a2 = 0,
323 .a3 = data->data0, .a4 = data->data1, .a5 = data->data2,
324 .a6 = data->data3, .a7 = data->data4,
327 while (ret.a0 == FFA_INTERRUPT)
328 invoke_ffa_fn((ffa_value_t){
329 .a0 = FFA_RUN, .a1 = ret.a1,
332 if (ret.a0 == FFA_ERROR)
333 return ffa_to_linux_errno((int)ret.a2);
335 if (ret.a0 == resp_id) {
336 data->data0 = ret.a3;
337 data->data1 = ret.a4;
338 data->data2 = ret.a5;
339 data->data3 = ret.a6;
340 data->data4 = ret.a7;
347 static int ffa_mem_first_frag(u32 func_id, phys_addr_t buf, u32 buf_sz,
348 u32 frag_len, u32 len, u64 *handle)
352 invoke_ffa_fn((ffa_value_t){
353 .a0 = func_id, .a1 = len, .a2 = frag_len,
354 .a3 = buf, .a4 = buf_sz,
357 while (ret.a0 == FFA_MEM_OP_PAUSE)
358 invoke_ffa_fn((ffa_value_t){
359 .a0 = FFA_MEM_OP_RESUME,
360 .a1 = ret.a1, .a2 = ret.a2,
363 if (ret.a0 == FFA_ERROR)
364 return ffa_to_linux_errno((int)ret.a2);
366 if (ret.a0 == FFA_SUCCESS) {
368 *handle = PACK_HANDLE(ret.a2, ret.a3);
369 } else if (ret.a0 == FFA_MEM_FRAG_RX) {
371 *handle = PACK_HANDLE(ret.a1, ret.a2);
379 static int ffa_mem_next_frag(u64 handle, u32 frag_len)
383 invoke_ffa_fn((ffa_value_t){
384 .a0 = FFA_MEM_FRAG_TX,
385 .a1 = HANDLE_LOW(handle), .a2 = HANDLE_HIGH(handle),
389 while (ret.a0 == FFA_MEM_OP_PAUSE)
390 invoke_ffa_fn((ffa_value_t){
391 .a0 = FFA_MEM_OP_RESUME,
392 .a1 = ret.a1, .a2 = ret.a2,
395 if (ret.a0 == FFA_ERROR)
396 return ffa_to_linux_errno((int)ret.a2);
398 if (ret.a0 == FFA_MEM_FRAG_RX)
400 else if (ret.a0 == FFA_SUCCESS)
407 ffa_transmit_fragment(u32 func_id, phys_addr_t buf, u32 buf_sz, u32 frag_len,
408 u32 len, u64 *handle, bool first)
411 return ffa_mem_next_frag(*handle, frag_len);
413 return ffa_mem_first_frag(func_id, buf, buf_sz, frag_len, len, handle);
416 static u32 ffa_get_num_pages_sg(struct scatterlist *sg)
421 num_pages += sg->length / FFA_PAGE_SIZE;
422 } while ((sg = sg_next(sg)));
427 static u16 ffa_memory_attributes_get(u32 func_id)
430 * For the memory lend or donate operation, if the receiver is a PE or
431 * a proxy endpoint, the owner/sender must not specify the attributes
433 if (func_id == FFA_FN_NATIVE(MEM_LEND) ||
434 func_id == FFA_MEM_LEND)
437 return FFA_MEM_NORMAL | FFA_MEM_WRITE_BACK | FFA_MEM_INNER_SHAREABLE;
441 ffa_setup_and_transmit(u32 func_id, void *buffer, u32 max_fragsize,
442 struct ffa_mem_ops_args *args)
446 u32 composite_offset;
447 phys_addr_t addr = 0;
448 struct ffa_mem_region *mem_region = buffer;
449 struct ffa_composite_mem_region *composite;
450 struct ffa_mem_region_addr_range *constituents;
451 struct ffa_mem_region_attributes *ep_mem_access;
452 u32 idx, frag_len, length, buf_sz = 0, num_entries = sg_nents(args->sg);
454 mem_region->tag = args->tag;
455 mem_region->flags = args->flags;
456 mem_region->sender_id = drv_info->vm_id;
457 mem_region->attributes = ffa_memory_attributes_get(func_id);
458 ep_mem_access = buffer +
459 ffa_mem_desc_offset(buffer, 0, drv_info->version);
460 composite_offset = ffa_mem_desc_offset(buffer, args->nattrs,
463 for (idx = 0; idx < args->nattrs; idx++, ep_mem_access++) {
464 ep_mem_access->receiver = args->attrs[idx].receiver;
465 ep_mem_access->attrs = args->attrs[idx].attrs;
466 ep_mem_access->composite_off = composite_offset;
467 ep_mem_access->flag = 0;
468 ep_mem_access->reserved = 0;
470 mem_region->handle = 0;
471 mem_region->ep_count = args->nattrs;
472 if (drv_info->version <= FFA_VERSION_1_0) {
473 mem_region->ep_mem_size = 0;
475 mem_region->ep_mem_size = sizeof(*ep_mem_access);
476 mem_region->ep_mem_offset = sizeof(*mem_region);
477 memset(mem_region->reserved, 0, 12);
480 composite = buffer + composite_offset;
481 composite->total_pg_cnt = ffa_get_num_pages_sg(args->sg);
482 composite->addr_range_cnt = num_entries;
483 composite->reserved = 0;
485 length = composite_offset + CONSTITUENTS_OFFSET(num_entries);
486 frag_len = composite_offset + CONSTITUENTS_OFFSET(0);
487 if (frag_len > max_fragsize)
490 if (!args->use_txbuf) {
491 addr = virt_to_phys(buffer);
492 buf_sz = max_fragsize / FFA_PAGE_SIZE;
495 constituents = buffer + frag_len;
498 if (frag_len == max_fragsize) {
499 rc = ffa_transmit_fragment(func_id, addr, buf_sz,
501 &args->g_handle, first);
508 constituents = buffer;
511 if ((void *)constituents - buffer > max_fragsize) {
512 pr_err("Memory Region Fragment > Tx Buffer size\n");
516 constituents->address = sg_phys(args->sg);
517 constituents->pg_cnt = args->sg->length / FFA_PAGE_SIZE;
518 constituents->reserved = 0;
520 frag_len += sizeof(struct ffa_mem_region_addr_range);
521 } while ((args->sg = sg_next(args->sg)));
523 return ffa_transmit_fragment(func_id, addr, buf_sz, frag_len,
524 length, &args->g_handle, first);
527 static int ffa_memory_ops(u32 func_id, struct ffa_mem_ops_args *args)
532 if (!args->use_txbuf) {
533 buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
537 buffer = drv_info->tx_buffer;
538 mutex_lock(&drv_info->tx_lock);
541 ret = ffa_setup_and_transmit(func_id, buffer, RXTX_BUFFER_SIZE, args);
544 mutex_unlock(&drv_info->tx_lock);
546 free_pages_exact(buffer, RXTX_BUFFER_SIZE);
548 return ret < 0 ? ret : 0;
551 static int ffa_memory_reclaim(u64 g_handle, u32 flags)
555 invoke_ffa_fn((ffa_value_t){
556 .a0 = FFA_MEM_RECLAIM,
557 .a1 = HANDLE_LOW(g_handle), .a2 = HANDLE_HIGH(g_handle),
561 if (ret.a0 == FFA_ERROR)
562 return ffa_to_linux_errno((int)ret.a2);
567 static int ffa_features(u32 func_feat_id, u32 input_props,
568 u32 *if_props_1, u32 *if_props_2)
572 if (!ARM_SMCCC_IS_FAST_CALL(func_feat_id) && input_props) {
573 pr_err("%s: Invalid Parameters: %x, %x", __func__,
574 func_feat_id, input_props);
575 return ffa_to_linux_errno(FFA_RET_INVALID_PARAMETERS);
578 invoke_ffa_fn((ffa_value_t){
579 .a0 = FFA_FEATURES, .a1 = func_feat_id, .a2 = input_props,
582 if (id.a0 == FFA_ERROR)
583 return ffa_to_linux_errno((int)id.a2);
593 static int ffa_notification_bitmap_create(void)
596 u16 vcpu_count = nr_cpu_ids;
598 invoke_ffa_fn((ffa_value_t){
599 .a0 = FFA_NOTIFICATION_BITMAP_CREATE,
600 .a1 = drv_info->vm_id, .a2 = vcpu_count,
603 if (ret.a0 == FFA_ERROR)
604 return ffa_to_linux_errno((int)ret.a2);
609 static int ffa_notification_bitmap_destroy(void)
613 invoke_ffa_fn((ffa_value_t){
614 .a0 = FFA_NOTIFICATION_BITMAP_DESTROY,
615 .a1 = drv_info->vm_id,
618 if (ret.a0 == FFA_ERROR)
619 return ffa_to_linux_errno((int)ret.a2);
624 #define NOTIFICATION_LOW_MASK GENMASK(31, 0)
625 #define NOTIFICATION_HIGH_MASK GENMASK(63, 32)
626 #define NOTIFICATION_BITMAP_HIGH(x) \
627 ((u32)(FIELD_GET(NOTIFICATION_HIGH_MASK, (x))))
628 #define NOTIFICATION_BITMAP_LOW(x) \
629 ((u32)(FIELD_GET(NOTIFICATION_LOW_MASK, (x))))
630 #define PACK_NOTIFICATION_BITMAP(low, high) \
631 (FIELD_PREP(NOTIFICATION_LOW_MASK, (low)) | \
632 FIELD_PREP(NOTIFICATION_HIGH_MASK, (high)))
634 #define RECEIVER_VCPU_MASK GENMASK(31, 16)
635 #define PACK_NOTIFICATION_GET_RECEIVER_INFO(vcpu_r, r) \
636 (FIELD_PREP(RECEIVER_VCPU_MASK, (vcpu_r)) | \
637 FIELD_PREP(RECEIVER_ID_MASK, (r)))
639 #define NOTIFICATION_INFO_GET_MORE_PEND_MASK BIT(0)
640 #define NOTIFICATION_INFO_GET_ID_COUNT GENMASK(11, 7)
641 #define ID_LIST_MASK_64 GENMASK(51, 12)
642 #define ID_LIST_MASK_32 GENMASK(31, 12)
643 #define MAX_IDS_64 20
644 #define MAX_IDS_32 10
646 #define PER_VCPU_NOTIFICATION_FLAG BIT(0)
647 #define SECURE_PARTITION_BITMAP BIT(0)
648 #define NON_SECURE_VM_BITMAP BIT(1)
649 #define SPM_FRAMEWORK_BITMAP BIT(2)
650 #define NS_HYP_FRAMEWORK_BITMAP BIT(3)
652 static int ffa_notification_bind_common(u16 dst_id, u64 bitmap,
653 u32 flags, bool is_bind)
656 u32 func, src_dst_ids = PACK_TARGET_INFO(dst_id, drv_info->vm_id);
658 func = is_bind ? FFA_NOTIFICATION_BIND : FFA_NOTIFICATION_UNBIND;
660 invoke_ffa_fn((ffa_value_t){
661 .a0 = func, .a1 = src_dst_ids, .a2 = flags,
662 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
663 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
666 if (ret.a0 == FFA_ERROR)
667 return ffa_to_linux_errno((int)ret.a2);
668 else if (ret.a0 != FFA_SUCCESS)
675 int ffa_notification_set(u16 src_id, u16 dst_id, u32 flags, u64 bitmap)
678 u32 src_dst_ids = PACK_TARGET_INFO(dst_id, src_id);
680 invoke_ffa_fn((ffa_value_t) {
681 .a0 = FFA_NOTIFICATION_SET, .a1 = src_dst_ids, .a2 = flags,
682 .a3 = NOTIFICATION_BITMAP_LOW(bitmap),
683 .a4 = NOTIFICATION_BITMAP_HIGH(bitmap),
686 if (ret.a0 == FFA_ERROR)
687 return ffa_to_linux_errno((int)ret.a2);
688 else if (ret.a0 != FFA_SUCCESS)
694 struct ffa_notify_bitmaps {
700 static int ffa_notification_get(u32 flags, struct ffa_notify_bitmaps *notify)
703 u16 src_id = drv_info->vm_id;
704 u16 cpu_id = smp_processor_id();
705 u32 rec_vcpu_ids = PACK_NOTIFICATION_GET_RECEIVER_INFO(cpu_id, src_id);
707 invoke_ffa_fn((ffa_value_t){
708 .a0 = FFA_NOTIFICATION_GET, .a1 = rec_vcpu_ids, .a2 = flags,
711 if (ret.a0 == FFA_ERROR)
712 return ffa_to_linux_errno((int)ret.a2);
713 else if (ret.a0 != FFA_SUCCESS)
714 return -EINVAL; /* Something else went wrong. */
716 notify->sp_map = PACK_NOTIFICATION_BITMAP(ret.a2, ret.a3);
717 notify->vm_map = PACK_NOTIFICATION_BITMAP(ret.a4, ret.a5);
718 notify->arch_map = PACK_NOTIFICATION_BITMAP(ret.a6, ret.a7);
723 struct ffa_dev_part_info {
724 ffa_sched_recv_cb callback;
729 static void __do_sched_recv_cb(u16 part_id, u16 vcpu, bool is_per_vcpu)
731 struct ffa_dev_part_info *partition;
732 ffa_sched_recv_cb callback;
735 partition = xa_load(&drv_info->partition_info, part_id);
736 read_lock(&partition->rw_lock);
737 callback = partition->callback;
738 cb_data = partition->cb_data;
739 read_unlock(&partition->rw_lock);
742 callback(vcpu, is_per_vcpu, cb_data);
745 static void ffa_notification_info_get(void)
747 int idx, list, max_ids, lists_cnt, ids_processed, ids_count[MAX_IDS_64];
753 invoke_ffa_fn((ffa_value_t){
754 .a0 = FFA_FN_NATIVE(NOTIFICATION_INFO_GET),
757 if (ret.a0 != FFA_FN_NATIVE(SUCCESS) && ret.a0 != FFA_SUCCESS) {
758 if (ret.a2 != FFA_RET_NO_DATA)
759 pr_err("Notification Info fetch failed: 0x%lx (0x%lx)",
764 is_64b_resp = (ret.a0 == FFA_FN64_SUCCESS);
767 lists_cnt = FIELD_GET(NOTIFICATION_INFO_GET_ID_COUNT, ret.a2);
769 max_ids = MAX_IDS_64;
770 id_list = FIELD_GET(ID_LIST_MASK_64, ret.a2);
772 max_ids = MAX_IDS_32;
773 id_list = FIELD_GET(ID_LIST_MASK_32, ret.a2);
776 for (idx = 0; idx < lists_cnt; idx++, id_list >>= 2)
777 ids_count[idx] = (id_list & 0x3) + 1;
780 for (list = 0; list < lists_cnt; list++) {
781 u16 vcpu_id, part_id, *packed_id_list = (u16 *)&ret.a3;
783 if (ids_processed >= max_ids - 1)
786 part_id = packed_id_list[ids_processed++];
788 if (!ids_count[list]) { /* Global Notification */
789 __do_sched_recv_cb(part_id, 0, false);
793 /* Per vCPU Notification */
794 for (idx = 0; idx < ids_count[list]; idx++) {
795 if (ids_processed >= max_ids - 1)
798 vcpu_id = packed_id_list[ids_processed++];
800 __do_sched_recv_cb(part_id, vcpu_id, true);
803 } while (ret.a2 & NOTIFICATION_INFO_GET_MORE_PEND_MASK);
806 static int ffa_run(struct ffa_device *dev, u16 vcpu)
809 u32 target = dev->vm_id << 16 | vcpu;
811 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = target, }, &ret);
813 while (ret.a0 == FFA_INTERRUPT)
814 invoke_ffa_fn((ffa_value_t){ .a0 = FFA_RUN, .a1 = ret.a1, },
817 if (ret.a0 == FFA_ERROR)
818 return ffa_to_linux_errno((int)ret.a2);
823 static void ffa_set_up_mem_ops_native_flag(void)
825 if (!ffa_features(FFA_FN_NATIVE(MEM_LEND), 0, NULL, NULL) ||
826 !ffa_features(FFA_FN_NATIVE(MEM_SHARE), 0, NULL, NULL))
827 drv_info->mem_ops_native = true;
830 static u32 ffa_api_version_get(void)
832 return drv_info->version;
835 static int ffa_partition_info_get(const char *uuid_str,
836 struct ffa_partition_info *buffer)
840 struct ffa_partition_info *pbuf;
842 if (uuid_parse(uuid_str, &uuid)) {
843 pr_err("invalid uuid (%s)\n", uuid_str);
847 count = ffa_partition_probe(&uuid, &pbuf);
851 memcpy(buffer, pbuf, sizeof(*pbuf) * count);
856 static void ffa_mode_32bit_set(struct ffa_device *dev)
858 dev->mode_32bit = true;
861 static int ffa_sync_send_receive(struct ffa_device *dev,
862 struct ffa_send_direct_data *data)
864 return ffa_msg_send_direct_req(drv_info->vm_id, dev->vm_id,
865 dev->mode_32bit, data);
868 static int ffa_memory_share(struct ffa_mem_ops_args *args)
870 if (drv_info->mem_ops_native)
871 return ffa_memory_ops(FFA_FN_NATIVE(MEM_SHARE), args);
873 return ffa_memory_ops(FFA_MEM_SHARE, args);
876 static int ffa_memory_lend(struct ffa_mem_ops_args *args)
878 /* Note that upon a successful MEM_LEND request the caller
879 * must ensure that the memory region specified is not accessed
880 * until a successful MEM_RECALIM call has been made.
881 * On systems with a hypervisor present this will been enforced,
882 * however on systems without a hypervisor the responsibility
883 * falls to the calling kernel driver to prevent access.
885 if (drv_info->mem_ops_native)
886 return ffa_memory_ops(FFA_FN_NATIVE(MEM_LEND), args);
888 return ffa_memory_ops(FFA_MEM_LEND, args);
891 #define FFA_SECURE_PARTITION_ID_FLAG BIT(15)
893 #define ffa_notifications_disabled() (!drv_info->notif_enabled)
901 struct notifier_cb_info {
902 struct hlist_node hnode;
905 enum notify_type type;
908 static int ffa_sched_recv_cb_update(u16 part_id, ffa_sched_recv_cb callback,
909 void *cb_data, bool is_registration)
911 struct ffa_dev_part_info *partition;
914 if (ffa_notifications_disabled())
917 partition = xa_load(&drv_info->partition_info, part_id);
918 write_lock(&partition->rw_lock);
920 cb_valid = !!partition->callback;
921 if (!(is_registration ^ cb_valid)) {
922 write_unlock(&partition->rw_lock);
926 partition->callback = callback;
927 partition->cb_data = cb_data;
929 write_unlock(&partition->rw_lock);
933 static int ffa_sched_recv_cb_register(struct ffa_device *dev,
934 ffa_sched_recv_cb cb, void *cb_data)
936 return ffa_sched_recv_cb_update(dev->vm_id, cb, cb_data, true);
939 static int ffa_sched_recv_cb_unregister(struct ffa_device *dev)
941 return ffa_sched_recv_cb_update(dev->vm_id, NULL, NULL, false);
944 static int ffa_notification_bind(u16 dst_id, u64 bitmap, u32 flags)
946 return ffa_notification_bind_common(dst_id, bitmap, flags, true);
949 static int ffa_notification_unbind(u16 dst_id, u64 bitmap)
951 return ffa_notification_bind_common(dst_id, bitmap, 0, false);
954 /* Should be called while the notify_lock is taken */
955 static struct notifier_cb_info *
956 notifier_hash_node_get(u16 notify_id, enum notify_type type)
958 struct notifier_cb_info *node;
960 hash_for_each_possible(drv_info->notifier_hash, node, hnode, notify_id)
961 if (type == node->type)
968 update_notifier_cb(int notify_id, enum notify_type type, ffa_notifier_cb cb,
969 void *cb_data, bool is_registration)
971 struct notifier_cb_info *cb_info = NULL;
974 cb_info = notifier_hash_node_get(notify_id, type);
975 cb_found = !!cb_info;
977 if (!(is_registration ^ cb_found))
980 if (is_registration) {
981 cb_info = kzalloc(sizeof(*cb_info), GFP_KERNEL);
985 cb_info->type = type;
987 cb_info->cb_data = cb_data;
989 hash_add(drv_info->notifier_hash, &cb_info->hnode, notify_id);
991 hash_del(&cb_info->hnode);
997 static enum notify_type ffa_notify_type_get(u16 vm_id)
999 if (vm_id & FFA_SECURE_PARTITION_ID_FLAG)
1000 return SECURE_PARTITION;
1002 return NON_SECURE_VM;
1005 static int ffa_notify_relinquish(struct ffa_device *dev, int notify_id)
1008 enum notify_type type = ffa_notify_type_get(dev->vm_id);
1010 if (ffa_notifications_disabled())
1013 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1016 mutex_lock(&drv_info->notify_lock);
1018 rc = update_notifier_cb(notify_id, type, NULL, NULL, false);
1020 pr_err("Could not unregister notification callback\n");
1021 mutex_unlock(&drv_info->notify_lock);
1025 rc = ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1027 mutex_unlock(&drv_info->notify_lock);
1032 static int ffa_notify_request(struct ffa_device *dev, bool is_per_vcpu,
1033 ffa_notifier_cb cb, void *cb_data, int notify_id)
1037 enum notify_type type = ffa_notify_type_get(dev->vm_id);
1039 if (ffa_notifications_disabled())
1042 if (notify_id >= FFA_MAX_NOTIFICATIONS)
1045 mutex_lock(&drv_info->notify_lock);
1048 flags = PER_VCPU_NOTIFICATION_FLAG;
1050 rc = ffa_notification_bind(dev->vm_id, BIT(notify_id), flags);
1052 mutex_unlock(&drv_info->notify_lock);
1056 rc = update_notifier_cb(notify_id, type, cb, cb_data, true);
1058 pr_err("Failed to register callback for %d - %d\n",
1060 ffa_notification_unbind(dev->vm_id, BIT(notify_id));
1062 mutex_unlock(&drv_info->notify_lock);
1067 static int ffa_notify_send(struct ffa_device *dev, int notify_id,
1068 bool is_per_vcpu, u16 vcpu)
1072 if (ffa_notifications_disabled())
1076 flags |= (PER_VCPU_NOTIFICATION_FLAG | vcpu << 16);
1078 return ffa_notification_set(dev->vm_id, drv_info->vm_id, flags,
1082 static void handle_notif_callbacks(u64 bitmap, enum notify_type type)
1085 struct notifier_cb_info *cb_info = NULL;
1087 for (notify_id = 0; notify_id <= FFA_MAX_NOTIFICATIONS && bitmap;
1088 notify_id++, bitmap >>= 1) {
1092 mutex_lock(&drv_info->notify_lock);
1093 cb_info = notifier_hash_node_get(notify_id, type);
1094 mutex_unlock(&drv_info->notify_lock);
1096 if (cb_info && cb_info->cb)
1097 cb_info->cb(notify_id, cb_info->cb_data);
1101 static void notif_pcpu_irq_work_fn(struct work_struct *work)
1104 struct ffa_notify_bitmaps bitmaps;
1106 rc = ffa_notification_get(SECURE_PARTITION_BITMAP |
1107 SPM_FRAMEWORK_BITMAP, &bitmaps);
1109 pr_err("Failed to retrieve notifications with %d!\n", rc);
1113 handle_notif_callbacks(bitmaps.vm_map, NON_SECURE_VM);
1114 handle_notif_callbacks(bitmaps.sp_map, SECURE_PARTITION);
1115 handle_notif_callbacks(bitmaps.arch_map, FRAMEWORK);
1119 ffa_self_notif_handle(u16 vcpu, bool is_per_vcpu, void *cb_data)
1121 struct ffa_drv_info *info = cb_data;
1124 notif_pcpu_irq_work_fn(&info->notif_pcpu_work);
1126 queue_work_on(vcpu, info->notif_pcpu_wq,
1127 &info->notif_pcpu_work);
1130 static const struct ffa_info_ops ffa_drv_info_ops = {
1131 .api_version_get = ffa_api_version_get,
1132 .partition_info_get = ffa_partition_info_get,
1135 static const struct ffa_msg_ops ffa_drv_msg_ops = {
1136 .mode_32bit_set = ffa_mode_32bit_set,
1137 .sync_send_receive = ffa_sync_send_receive,
1140 static const struct ffa_mem_ops ffa_drv_mem_ops = {
1141 .memory_reclaim = ffa_memory_reclaim,
1142 .memory_share = ffa_memory_share,
1143 .memory_lend = ffa_memory_lend,
1146 static const struct ffa_cpu_ops ffa_drv_cpu_ops = {
1150 static const struct ffa_notifier_ops ffa_drv_notifier_ops = {
1151 .sched_recv_cb_register = ffa_sched_recv_cb_register,
1152 .sched_recv_cb_unregister = ffa_sched_recv_cb_unregister,
1153 .notify_request = ffa_notify_request,
1154 .notify_relinquish = ffa_notify_relinquish,
1155 .notify_send = ffa_notify_send,
1158 static const struct ffa_ops ffa_drv_ops = {
1159 .info_ops = &ffa_drv_info_ops,
1160 .msg_ops = &ffa_drv_msg_ops,
1161 .mem_ops = &ffa_drv_mem_ops,
1162 .cpu_ops = &ffa_drv_cpu_ops,
1163 .notifier_ops = &ffa_drv_notifier_ops,
1166 void ffa_device_match_uuid(struct ffa_device *ffa_dev, const uuid_t *uuid)
1169 struct ffa_partition_info *pbuf, *tpbuf;
1172 * FF-A v1.1 provides UUID for each partition as part of the discovery
1173 * API, the discovered UUID must be populated in the device's UUID and
1174 * there is no need to copy the same from the driver table.
1176 if (drv_info->version > FFA_VERSION_1_0)
1179 count = ffa_partition_probe(uuid, &pbuf);
1183 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++)
1184 if (tpbuf->id == ffa_dev->vm_id)
1185 uuid_copy(&ffa_dev->uuid, uuid);
1189 static void ffa_setup_partitions(void)
1193 struct ffa_device *ffa_dev;
1194 struct ffa_dev_part_info *info;
1195 struct ffa_partition_info *pbuf, *tpbuf;
1197 count = ffa_partition_probe(&uuid_null, &pbuf);
1199 pr_info("%s: No partitions found, error %d\n", __func__, count);
1203 xa_init(&drv_info->partition_info);
1204 for (idx = 0, tpbuf = pbuf; idx < count; idx++, tpbuf++) {
1205 import_uuid(&uuid, (u8 *)tpbuf->uuid);
1207 /* Note that if the UUID will be uuid_null, that will require
1208 * ffa_device_match() to find the UUID of this partition id
1209 * with help of ffa_device_match_uuid(). FF-A v1.1 and above
1210 * provides UUID here for each partition as part of the
1211 * discovery API and the same is passed.
1213 ffa_dev = ffa_device_register(&uuid, tpbuf->id, &ffa_drv_ops);
1215 pr_err("%s: failed to register partition ID 0x%x\n",
1216 __func__, tpbuf->id);
1220 if (drv_info->version > FFA_VERSION_1_0 &&
1221 !(tpbuf->properties & FFA_PARTITION_AARCH64_EXEC))
1222 ffa_mode_32bit_set(ffa_dev);
1224 info = kzalloc(sizeof(*info), GFP_KERNEL);
1226 ffa_device_unregister(ffa_dev);
1229 xa_store(&drv_info->partition_info, tpbuf->id, info, GFP_KERNEL);
1231 drv_info->partition_count = count;
1235 /* Allocate for the host */
1236 info = kzalloc(sizeof(*info), GFP_KERNEL);
1239 xa_store(&drv_info->partition_info, drv_info->vm_id, info, GFP_KERNEL);
1240 drv_info->partition_count++;
1243 static void ffa_partitions_cleanup(void)
1245 struct ffa_dev_part_info **info;
1246 int idx, count = drv_info->partition_count;
1251 info = kcalloc(count, sizeof(*info), GFP_KERNEL);
1255 xa_extract(&drv_info->partition_info, (void **)info, 0, VM_ID_MASK,
1258 for (idx = 0; idx < count; idx++)
1262 drv_info->partition_count = 0;
1263 xa_destroy(&drv_info->partition_info);
1266 /* FFA FEATURE IDs */
1267 #define FFA_FEAT_NOTIFICATION_PENDING_INT (1)
1268 #define FFA_FEAT_SCHEDULE_RECEIVER_INT (2)
1269 #define FFA_FEAT_MANAGED_EXIT_INT (3)
1271 static irqreturn_t irq_handler(int irq, void *irq_data)
1273 struct ffa_pcpu_irq *pcpu = irq_data;
1274 struct ffa_drv_info *info = pcpu->info;
1276 queue_work(info->notif_pcpu_wq, &info->irq_work);
1281 static void ffa_sched_recv_irq_work_fn(struct work_struct *work)
1283 ffa_notification_info_get();
1286 static int ffa_sched_recv_irq_map(void)
1288 int ret, irq, sr_intid;
1290 /* The returned sr_intid is assumed to be SGI donated to NS world */
1291 ret = ffa_features(FFA_FEAT_SCHEDULE_RECEIVER_INT, 0, &sr_intid, NULL);
1293 if (ret != -EOPNOTSUPP)
1294 pr_err("Failed to retrieve scheduler Rx interrupt\n");
1298 if (acpi_disabled) {
1299 struct of_phandle_args oirq = {};
1300 struct device_node *gic;
1302 /* Only GICv3 supported currently with the device tree */
1303 gic = of_find_compatible_node(NULL, NULL, "arm,gic-v3");
1308 oirq.args_count = 1;
1309 oirq.args[0] = sr_intid;
1310 irq = irq_create_of_mapping(&oirq);
1314 irq = acpi_register_gsi(NULL, sr_intid, ACPI_EDGE_SENSITIVE,
1320 pr_err("Failed to create IRQ mapping!\n");
1327 static void ffa_sched_recv_irq_unmap(void)
1329 if (drv_info->sched_recv_irq) {
1330 irq_dispose_mapping(drv_info->sched_recv_irq);
1331 drv_info->sched_recv_irq = 0;
1335 static int ffa_cpuhp_pcpu_irq_enable(unsigned int cpu)
1337 enable_percpu_irq(drv_info->sched_recv_irq, IRQ_TYPE_NONE);
1341 static int ffa_cpuhp_pcpu_irq_disable(unsigned int cpu)
1343 disable_percpu_irq(drv_info->sched_recv_irq);
1347 static void ffa_uninit_pcpu_irq(void)
1349 if (drv_info->cpuhp_state) {
1350 cpuhp_remove_state(drv_info->cpuhp_state);
1351 drv_info->cpuhp_state = 0;
1354 if (drv_info->notif_pcpu_wq) {
1355 destroy_workqueue(drv_info->notif_pcpu_wq);
1356 drv_info->notif_pcpu_wq = NULL;
1359 if (drv_info->sched_recv_irq)
1360 free_percpu_irq(drv_info->sched_recv_irq, drv_info->irq_pcpu);
1362 if (drv_info->irq_pcpu) {
1363 free_percpu(drv_info->irq_pcpu);
1364 drv_info->irq_pcpu = NULL;
1368 static int ffa_init_pcpu_irq(unsigned int irq)
1370 struct ffa_pcpu_irq __percpu *irq_pcpu;
1373 irq_pcpu = alloc_percpu(struct ffa_pcpu_irq);
1377 for_each_present_cpu(cpu)
1378 per_cpu_ptr(irq_pcpu, cpu)->info = drv_info;
1380 drv_info->irq_pcpu = irq_pcpu;
1382 ret = request_percpu_irq(irq, irq_handler, "ARM-FFA", irq_pcpu);
1384 pr_err("Error registering notification IRQ %d: %d\n", irq, ret);
1388 INIT_WORK(&drv_info->irq_work, ffa_sched_recv_irq_work_fn);
1389 INIT_WORK(&drv_info->notif_pcpu_work, notif_pcpu_irq_work_fn);
1390 drv_info->notif_pcpu_wq = create_workqueue("ffa_pcpu_irq_notification");
1391 if (!drv_info->notif_pcpu_wq)
1394 ret = cpuhp_setup_state(CPUHP_AP_ONLINE_DYN, "ffa/pcpu-irq:starting",
1395 ffa_cpuhp_pcpu_irq_enable,
1396 ffa_cpuhp_pcpu_irq_disable);
1401 drv_info->cpuhp_state = ret;
1405 static void ffa_notifications_cleanup(void)
1407 ffa_uninit_pcpu_irq();
1408 ffa_sched_recv_irq_unmap();
1410 if (drv_info->bitmap_created) {
1411 ffa_notification_bitmap_destroy();
1412 drv_info->bitmap_created = false;
1414 drv_info->notif_enabled = false;
1417 static void ffa_notifications_setup(void)
1421 ret = ffa_features(FFA_NOTIFICATION_BITMAP_CREATE, 0, NULL, NULL);
1423 pr_info("Notifications not supported, continuing with it ..\n");
1427 ret = ffa_notification_bitmap_create();
1429 pr_info("Notification bitmap create error %d\n", ret);
1432 drv_info->bitmap_created = true;
1434 irq = ffa_sched_recv_irq_map();
1440 drv_info->sched_recv_irq = irq;
1442 ret = ffa_init_pcpu_irq(irq);
1446 hash_init(drv_info->notifier_hash);
1447 mutex_init(&drv_info->notify_lock);
1449 drv_info->notif_enabled = true;
1452 pr_info("Notification setup failed %d, not enabled\n", ret);
1453 ffa_notifications_cleanup();
1456 static int __init ffa_init(void)
1460 ret = ffa_transport_init(&invoke_ffa_fn);
1464 ret = arm_ffa_bus_init();
1468 drv_info = kzalloc(sizeof(*drv_info), GFP_KERNEL);
1474 ret = ffa_version_check(&drv_info->version);
1478 if (ffa_id_get(&drv_info->vm_id)) {
1479 pr_err("failed to obtain VM id for self\n");
1484 drv_info->rx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
1485 if (!drv_info->rx_buffer) {
1490 drv_info->tx_buffer = alloc_pages_exact(RXTX_BUFFER_SIZE, GFP_KERNEL);
1491 if (!drv_info->tx_buffer) {
1496 ret = ffa_rxtx_map(virt_to_phys(drv_info->tx_buffer),
1497 virt_to_phys(drv_info->rx_buffer),
1498 RXTX_BUFFER_SIZE / FFA_PAGE_SIZE);
1500 pr_err("failed to register FFA RxTx buffers\n");
1504 mutex_init(&drv_info->rx_lock);
1505 mutex_init(&drv_info->tx_lock);
1507 ffa_set_up_mem_ops_native_flag();
1509 ffa_notifications_setup();
1511 ffa_setup_partitions();
1513 ret = ffa_sched_recv_cb_update(drv_info->vm_id, ffa_self_notif_handle,
1516 pr_info("Failed to register driver sched callback %d\n", ret);
1520 if (drv_info->tx_buffer)
1521 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
1522 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
1529 subsys_initcall(ffa_init);
1531 static void __exit ffa_exit(void)
1533 ffa_notifications_cleanup();
1534 ffa_partitions_cleanup();
1535 ffa_rxtx_unmap(drv_info->vm_id);
1536 free_pages_exact(drv_info->tx_buffer, RXTX_BUFFER_SIZE);
1537 free_pages_exact(drv_info->rx_buffer, RXTX_BUFFER_SIZE);
1538 xa_destroy(&drv_info->partition_info);
1542 module_exit(ffa_exit);
1544 MODULE_ALIAS("arm-ffa");
1545 MODULE_AUTHOR("Sudeep Holla <sudeep.holla@arm.com>");
1546 MODULE_DESCRIPTION("Arm FF-A interface driver");
1547 MODULE_LICENSE("GPL v2");