1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define VCHIQ_MSG_PADDING 0 /* - */
21 #define VCHIQ_MSG_CONNECT 1 /* - */
22 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
23 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
24 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
25 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
26 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
27 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
28 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
29 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
30 #define VCHIQ_MSG_PAUSE 10 /* - */
31 #define VCHIQ_MSG_RESUME 11 /* - */
32 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
33 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
36 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
37 #define VCHIQ_PORT_FREE 0x1000
38 #define VCHIQ_PORT_IS_VALID(port) (port < VCHIQ_PORT_FREE)
39 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
40 ((type<<24) | (srcport<<12) | (dstport<<0))
41 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)msgid >> 24)
42 #define VCHIQ_MSG_SRCPORT(msgid) \
43 (unsigned short)(((unsigned int)msgid >> 12) & 0xfff)
44 #define VCHIQ_MSG_DSTPORT(msgid) \
45 ((unsigned short)msgid & 0xfff)
47 /* Ensure the fields are wide enough */
48 vchiq_static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
50 vchiq_static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
51 vchiq_static_assert((unsigned int)VCHIQ_PORT_MAX <
52 (unsigned int)VCHIQ_PORT_FREE);
54 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
55 #define VCHIQ_MSGID_CLAIMED 0x40000000
57 #define VCHIQ_FOURCC_INVALID 0x00000000
58 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) (fourcc != VCHIQ_FOURCC_INVALID)
60 #define VCHIQ_BULK_ACTUAL_ABORTED -1
62 #if VCHIQ_ENABLE_STATS
63 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
64 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
65 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
66 (service->stats. stat += addend)
68 #define VCHIQ_STATS_INC(state, stat) ((void)0)
69 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
70 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
73 #define HANDLE_STATE_SHIFT 12
75 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
76 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
77 #define SLOT_INDEX_FROM_DATA(state, data) \
78 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
80 #define SLOT_INDEX_FROM_INFO(state, info) \
81 ((unsigned int)(info - state->slot_info))
82 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
83 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
84 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
85 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
87 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
89 #define SRVTRACE_LEVEL(srv) \
90 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
91 #define SRVTRACE_ENABLED(srv, lev) \
92 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
94 #define NO_CLOSE_RECVD 0
97 #define NO_RETRY_POLL 0
100 struct vchiq_open_payload {
107 struct vchiq_openack_payload {
112 QMFLAGS_IS_BLOCKING = BIT(0),
113 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
114 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
118 VCHIQ_POLL_TERMINATE,
125 /* we require this for consistency between endpoints */
126 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
127 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
128 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
129 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
130 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
131 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
133 /* Run time control of log level, based on KERN_XXX level. */
134 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
135 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
136 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
138 DEFINE_SPINLOCK(bulk_waiter_spinlock);
139 static DEFINE_SPINLOCK(quota_spinlock);
141 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
142 static unsigned int handle_seq;
144 static const char *const srvstate_names[] = {
157 static const char *const reason_names[] = {
161 "BULK_TRANSMIT_DONE",
163 "BULK_TRANSMIT_ABORTED",
164 "BULK_RECEIVE_ABORTED"
167 static const char *const conn_state_names[] = {
180 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
182 static const char *msg_type_str(unsigned int msg_type)
185 case VCHIQ_MSG_PADDING: return "PADDING";
186 case VCHIQ_MSG_CONNECT: return "CONNECT";
187 case VCHIQ_MSG_OPEN: return "OPEN";
188 case VCHIQ_MSG_OPENACK: return "OPENACK";
189 case VCHIQ_MSG_CLOSE: return "CLOSE";
190 case VCHIQ_MSG_DATA: return "DATA";
191 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
192 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
193 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
194 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
195 case VCHIQ_MSG_PAUSE: return "PAUSE";
196 case VCHIQ_MSG_RESUME: return "RESUME";
197 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
198 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
199 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
205 vchiq_set_service_state(struct vchiq_service *service, int newstate)
207 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
208 service->state->id, service->localport,
209 srvstate_names[service->srvstate],
210 srvstate_names[newstate]);
211 service->srvstate = newstate;
214 struct vchiq_service *
215 find_service_by_handle(unsigned int handle)
217 struct vchiq_service *service;
220 service = handle_to_service(handle);
221 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
222 service->handle == handle &&
223 kref_get_unless_zero(&service->ref_count)) {
224 service = rcu_pointer_handoff(service);
229 vchiq_log_info(vchiq_core_log_level,
230 "Invalid service handle 0x%x", handle);
234 struct vchiq_service *
235 find_service_by_port(struct vchiq_state *state, int localport)
238 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
239 struct vchiq_service *service;
242 service = rcu_dereference(state->services[localport]);
243 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
244 kref_get_unless_zero(&service->ref_count)) {
245 service = rcu_pointer_handoff(service);
251 vchiq_log_info(vchiq_core_log_level,
252 "Invalid port %d", localport);
256 struct vchiq_service *
257 find_service_for_instance(struct vchiq_instance *instance,
260 struct vchiq_service *service;
263 service = handle_to_service(handle);
264 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
265 service->handle == handle &&
266 service->instance == instance &&
267 kref_get_unless_zero(&service->ref_count)) {
268 service = rcu_pointer_handoff(service);
273 vchiq_log_info(vchiq_core_log_level,
274 "Invalid service handle 0x%x", handle);
278 struct vchiq_service *
279 find_closed_service_for_instance(struct vchiq_instance *instance,
282 struct vchiq_service *service;
285 service = handle_to_service(handle);
287 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
288 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
289 service->handle == handle &&
290 service->instance == instance &&
291 kref_get_unless_zero(&service->ref_count)) {
292 service = rcu_pointer_handoff(service);
297 vchiq_log_info(vchiq_core_log_level,
298 "Invalid service handle 0x%x", handle);
302 struct vchiq_service *
303 __next_service_by_instance(struct vchiq_state *state,
304 struct vchiq_instance *instance,
307 struct vchiq_service *service = NULL;
310 while (idx < state->unused_service) {
311 struct vchiq_service *srv;
313 srv = rcu_dereference(state->services[idx]);
315 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
316 srv->instance == instance) {
326 struct vchiq_service *
327 next_service_by_instance(struct vchiq_state *state,
328 struct vchiq_instance *instance,
331 struct vchiq_service *service;
335 service = __next_service_by_instance(state, instance, pidx);
338 if (kref_get_unless_zero(&service->ref_count)) {
339 service = rcu_pointer_handoff(service);
348 lock_service(struct vchiq_service *service)
351 WARN(1, "%s service is NULL\n", __func__);
354 kref_get(&service->ref_count);
357 static void service_release(struct kref *kref)
359 struct vchiq_service *service =
360 container_of(kref, struct vchiq_service, ref_count);
361 struct vchiq_state *state = service->state;
363 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
364 rcu_assign_pointer(state->services[service->localport], NULL);
365 if (service->userdata_term)
366 service->userdata_term(service->base.userdata);
367 kfree_rcu(service, rcu);
371 unlock_service(struct vchiq_service *service)
374 WARN(1, "%s: service is NULL\n", __func__);
377 kref_put(&service->ref_count, service_release);
381 vchiq_get_client_id(unsigned int handle)
383 struct vchiq_service *service;
387 service = handle_to_service(handle);
388 id = service ? service->client_id : 0;
394 vchiq_get_service_userdata(unsigned int handle)
397 struct vchiq_service *service;
400 service = handle_to_service(handle);
401 userdata = service ? service->base.userdata : NULL;
405 EXPORT_SYMBOL(vchiq_get_service_userdata);
408 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
410 struct vchiq_state *state = service->state;
411 struct vchiq_service_quota *quota;
413 service->closing = 1;
415 /* Synchronise with other threads. */
416 mutex_lock(&state->recycle_mutex);
417 mutex_unlock(&state->recycle_mutex);
418 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
420 * If we're pausing then the slot_mutex is held until resume
421 * by the slot handler. Therefore don't try to acquire this
422 * mutex if we're the slot handler and in the pause sent state.
423 * We don't need to in this case anyway.
425 mutex_lock(&state->slot_mutex);
426 mutex_unlock(&state->slot_mutex);
429 /* Unblock any sending thread. */
430 quota = &state->service_quotas[service->localport];
431 complete("a->quota_event);
435 mark_service_closing(struct vchiq_service *service)
437 mark_service_closing_internal(service, 0);
440 static inline enum vchiq_status
441 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
442 struct vchiq_header *header, void *bulk_userdata)
444 enum vchiq_status status;
446 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
447 service->state->id, service->localport, reason_names[reason],
448 header, bulk_userdata);
449 status = service->base.callback(reason, header, service->handle,
451 if (status == VCHIQ_ERROR) {
452 vchiq_log_warning(vchiq_core_log_level,
453 "%d: ignoring ERROR from callback to service %x",
454 service->state->id, service->handle);
455 status = VCHIQ_SUCCESS;
458 if (reason != VCHIQ_MESSAGE_AVAILABLE)
459 vchiq_release_message(service->handle, header);
465 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
467 enum vchiq_connstate oldstate = state->conn_state;
469 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
470 conn_state_names[oldstate],
471 conn_state_names[newstate]);
472 state->conn_state = newstate;
473 vchiq_platform_conn_state_changed(state, oldstate, newstate);
477 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
481 * Don't clear the 'fired' flag because it may already have been set
484 init_waitqueue_head(wq);
488 * All the event waiting routines in VCHIQ used a custom semaphore
489 * implementation that filtered most signals. This achieved a behaviour similar
490 * to the "killable" family of functions. While cleaning up this code all the
491 * routines where switched to the "interruptible" family of functions, as the
492 * former was deemed unjustified and the use "killable" set all VCHIQ's
493 * threads in D state.
496 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
501 if (wait_event_interruptible(*wq, event->fired)) {
514 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
522 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
524 if (event->fired && event->armed)
525 remote_event_signal_local(wq, event);
529 remote_event_pollall(struct vchiq_state *state)
531 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
532 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
533 remote_event_poll(&state->trigger_event, &state->local->trigger);
534 remote_event_poll(&state->recycle_event, &state->local->recycle);
538 * Round up message sizes so that any space at the end of a slot is always big
539 * enough for a header. This relies on header size being a power of two, which
540 * has been verified earlier by a static assertion.
544 calc_stride(size_t size)
546 /* Allow room for the header */
547 size += sizeof(struct vchiq_header);
550 return (size + sizeof(struct vchiq_header) - 1) &
551 ~(sizeof(struct vchiq_header) - 1);
554 /* Called by the slot handler thread */
555 static struct vchiq_service *
556 get_listening_service(struct vchiq_state *state, int fourcc)
560 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
563 for (i = 0; i < state->unused_service; i++) {
564 struct vchiq_service *service;
566 service = rcu_dereference(state->services[i]);
568 service->public_fourcc == fourcc &&
569 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
570 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
571 service->remoteport == VCHIQ_PORT_FREE)) &&
572 kref_get_unless_zero(&service->ref_count)) {
573 service = rcu_pointer_handoff(service);
582 /* Called by the slot handler thread */
583 static struct vchiq_service *
584 get_connected_service(struct vchiq_state *state, unsigned int port)
589 for (i = 0; i < state->unused_service; i++) {
590 struct vchiq_service *service =
591 rcu_dereference(state->services[i]);
593 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
594 service->remoteport == port &&
595 kref_get_unless_zero(&service->ref_count)) {
596 service = rcu_pointer_handoff(service);
606 request_poll(struct vchiq_state *state, struct vchiq_service *service,
616 value = atomic_read(&service->poll_flags);
617 } while (atomic_cmpxchg(&service->poll_flags, value,
618 value | BIT(poll_type)) != value);
620 index = BITSET_WORD(service->localport);
622 value = atomic_read(&state->poll_services[index]);
623 } while (atomic_cmpxchg(&state->poll_services[index],
624 value, value | BIT(service->localport & 0x1f)) != value);
627 state->poll_needed = 1;
630 /* ... and ensure the slot handler runs. */
631 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
635 * Called from queue_message, by the slot handler and application threads,
636 * with slot_mutex held
638 static struct vchiq_header *
639 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
641 struct vchiq_shared_state *local = state->local;
642 int tx_pos = state->local_tx_pos;
643 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
645 if (space > slot_space) {
646 struct vchiq_header *header;
647 /* Fill the remaining space with padding */
648 WARN_ON(!state->tx_data);
649 header = (struct vchiq_header *)
650 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
651 header->msgid = VCHIQ_MSGID_PADDING;
652 header->size = slot_space - sizeof(struct vchiq_header);
654 tx_pos += slot_space;
657 /* If necessary, get the next slot. */
658 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
661 /* If there is no free slot... */
663 if (!try_wait_for_completion(&state->slot_available_event)) {
664 /* ...wait for one. */
666 VCHIQ_STATS_INC(state, slot_stalls);
668 /* But first, flush through the last slot. */
669 state->local_tx_pos = tx_pos;
670 local->tx_pos = tx_pos;
671 remote_event_signal(&state->remote->trigger);
674 (wait_for_completion_interruptible(
675 &state->slot_available_event)))
676 return NULL; /* No space available */
679 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
680 complete(&state->slot_available_event);
681 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
685 slot_index = local->slot_queue[
686 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
688 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
691 state->local_tx_pos = tx_pos + space;
693 return (struct vchiq_header *)(state->tx_data +
694 (tx_pos & VCHIQ_SLOT_MASK));
697 /* Called by the recycle thread. */
699 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
702 struct vchiq_shared_state *local = state->local;
703 int slot_queue_available;
706 * Find slots which have been freed by the other side, and return them
707 * to the available queue.
709 slot_queue_available = state->slot_queue_available;
712 * Use a memory barrier to ensure that any state that may have been
713 * modified by another thread is not masked by stale prefetched
718 while (slot_queue_available != local->slot_queue_recycle) {
720 int slot_index = local->slot_queue[slot_queue_available &
721 VCHIQ_SLOT_QUEUE_MASK];
722 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
725 slot_queue_available++;
727 * Beware of the address dependency - data is calculated
728 * using an index written by the other side.
732 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
733 state->id, slot_index, data,
734 local->slot_queue_recycle, slot_queue_available);
736 /* Initialise the bitmask for services which have used this slot */
737 memset(service_found, 0, length);
741 while (pos < VCHIQ_SLOT_SIZE) {
742 struct vchiq_header *header =
743 (struct vchiq_header *)(data + pos);
744 int msgid = header->msgid;
746 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
747 int port = VCHIQ_MSG_SRCPORT(msgid);
748 struct vchiq_service_quota *quota =
749 &state->service_quotas[port];
752 spin_lock("a_spinlock);
753 count = quota->message_use_count;
755 quota->message_use_count = count - 1;
756 spin_unlock("a_spinlock);
758 if (count == quota->message_quota) {
760 * Signal the service that it
761 * has dropped below its quota
763 complete("a->quota_event);
764 } else if (count == 0) {
765 vchiq_log_error(vchiq_core_log_level,
766 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
768 quota->message_use_count,
769 header, msgid, header->msgid,
771 WARN(1, "invalid message use count\n");
773 if (!BITSET_IS_SET(service_found, port)) {
774 /* Set the found bit for this service */
775 BITSET_SET(service_found, port);
777 spin_lock("a_spinlock);
778 count = quota->slot_use_count;
780 quota->slot_use_count =
782 spin_unlock("a_spinlock);
786 * Signal the service in case
787 * it has dropped below its quota
789 complete("a->quota_event);
791 vchiq_core_log_level,
792 "%d: pfq:%d %x@%pK - slot_use->%d",
794 header->size, header,
798 vchiq_core_log_level,
799 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
801 msgid, header->msgid,
803 WARN(1, "bad slot use count\n");
810 pos += calc_stride(header->size);
811 if (pos > VCHIQ_SLOT_SIZE) {
812 vchiq_log_error(vchiq_core_log_level,
813 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
814 pos, header, msgid, header->msgid,
816 WARN(1, "invalid slot position\n");
823 spin_lock("a_spinlock);
824 count = state->data_use_count;
826 state->data_use_count = count - 1;
827 spin_unlock("a_spinlock);
828 if (count == state->data_quota)
829 complete(&state->data_quota_event);
833 * Don't allow the slot to be reused until we are no
834 * longer interested in it.
838 state->slot_queue_available = slot_queue_available;
839 complete(&state->slot_available_event);
844 memcpy_copy_callback(
845 void *context, void *dest,
846 size_t offset, size_t maxsize)
848 memcpy(dest + offset, context + offset, maxsize);
854 ssize_t (*copy_callback)(void *context, void *dest,
855 size_t offset, size_t maxsize),
863 ssize_t callback_result;
864 size_t max_bytes = size - pos;
867 copy_callback(context, dest + pos,
870 if (callback_result < 0)
871 return callback_result;
873 if (!callback_result)
876 if (callback_result > max_bytes)
879 pos += callback_result;
885 /* Called by the slot handler and application threads */
886 static enum vchiq_status
887 queue_message(struct vchiq_state *state, struct vchiq_service *service,
889 ssize_t (*copy_callback)(void *context, void *dest,
890 size_t offset, size_t maxsize),
891 void *context, size_t size, int flags)
893 struct vchiq_shared_state *local;
894 struct vchiq_service_quota *quota = NULL;
895 struct vchiq_header *header;
896 int type = VCHIQ_MSG_TYPE(msgid);
900 local = state->local;
902 stride = calc_stride(size);
904 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
906 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
907 mutex_lock_killable(&state->slot_mutex))
910 if (type == VCHIQ_MSG_DATA) {
914 WARN(1, "%s: service is NULL\n", __func__);
915 mutex_unlock(&state->slot_mutex);
919 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
920 QMFLAGS_NO_MUTEX_UNLOCK));
922 if (service->closing) {
923 /* The service has been closed */
924 mutex_unlock(&state->slot_mutex);
928 quota = &state->service_quotas[service->localport];
930 spin_lock("a_spinlock);
933 * Ensure this service doesn't use more than its quota of
936 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
937 state->local_tx_pos + stride - 1);
940 * Ensure data messages don't use more than their quota of
943 while ((tx_end_index != state->previous_data_index) &&
944 (state->data_use_count == state->data_quota)) {
945 VCHIQ_STATS_INC(state, data_stalls);
946 spin_unlock("a_spinlock);
947 mutex_unlock(&state->slot_mutex);
949 if (wait_for_completion_interruptible(
950 &state->data_quota_event))
953 mutex_lock(&state->slot_mutex);
954 spin_lock("a_spinlock);
955 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
956 state->local_tx_pos + stride - 1);
957 if ((tx_end_index == state->previous_data_index) ||
958 (state->data_use_count < state->data_quota)) {
959 /* Pass the signal on to other waiters */
960 complete(&state->data_quota_event);
965 while ((quota->message_use_count == quota->message_quota) ||
966 ((tx_end_index != quota->previous_tx_index) &&
967 (quota->slot_use_count == quota->slot_quota))) {
968 spin_unlock("a_spinlock);
969 vchiq_log_trace(vchiq_core_log_level,
970 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
971 state->id, service->localport,
972 msg_type_str(type), size,
973 quota->message_use_count,
974 quota->slot_use_count);
975 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
976 mutex_unlock(&state->slot_mutex);
977 if (wait_for_completion_interruptible(
978 "a->quota_event))
980 if (service->closing)
982 if (mutex_lock_killable(&state->slot_mutex))
984 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
985 /* The service has been closed */
986 mutex_unlock(&state->slot_mutex);
989 spin_lock("a_spinlock);
990 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
991 state->local_tx_pos + stride - 1);
994 spin_unlock("a_spinlock);
997 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1001 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1003 * In the event of a failure, return the mutex to the
1006 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1007 mutex_unlock(&state->slot_mutex);
1011 if (type == VCHIQ_MSG_DATA) {
1012 ssize_t callback_result;
1016 vchiq_log_info(vchiq_core_log_level,
1017 "%d: qm %s@%pK,%zx (%d->%d)",
1018 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1019 header, size, VCHIQ_MSG_SRCPORT(msgid),
1020 VCHIQ_MSG_DSTPORT(msgid));
1022 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1023 QMFLAGS_NO_MUTEX_UNLOCK));
1026 copy_message_data(copy_callback, context,
1027 header->data, size);
1029 if (callback_result < 0) {
1030 mutex_unlock(&state->slot_mutex);
1031 VCHIQ_SERVICE_STATS_INC(service,
1036 if (SRVTRACE_ENABLED(service,
1038 vchiq_log_dump_mem("Sent", 0,
1041 (size_t)callback_result));
1043 spin_lock("a_spinlock);
1044 quota->message_use_count++;
1047 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1050 * If this transmission can't fit in the last slot used by any
1051 * service, the data_use_count must be increased.
1053 if (tx_end_index != state->previous_data_index) {
1054 state->previous_data_index = tx_end_index;
1055 state->data_use_count++;
1059 * If this isn't the same slot last used by this service,
1060 * the service's slot_use_count must be increased.
1062 if (tx_end_index != quota->previous_tx_index) {
1063 quota->previous_tx_index = tx_end_index;
1064 slot_use_count = ++quota->slot_use_count;
1069 spin_unlock("a_spinlock);
1072 vchiq_log_trace(vchiq_core_log_level,
1073 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1074 state->id, service->localport,
1075 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1076 slot_use_count, header);
1078 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1079 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1081 vchiq_log_info(vchiq_core_log_level,
1082 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1083 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1084 header, size, VCHIQ_MSG_SRCPORT(msgid),
1085 VCHIQ_MSG_DSTPORT(msgid));
1088 * It is assumed for now that this code path
1089 * only happens from calls inside this file.
1091 * External callers are through the vchiq_queue_message
1092 * path which always sets the type to be VCHIQ_MSG_DATA
1094 * At first glance this appears to be correct but
1095 * more review is needed.
1097 copy_message_data(copy_callback, context,
1098 header->data, size);
1100 VCHIQ_STATS_INC(state, ctrl_tx_count);
1103 header->msgid = msgid;
1104 header->size = size;
1109 svc_fourcc = service
1110 ? service->base.fourcc
1111 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1113 vchiq_log_info(SRVTRACE_LEVEL(service),
1114 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1115 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1116 VCHIQ_MSG_TYPE(msgid),
1117 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1118 VCHIQ_MSG_SRCPORT(msgid),
1119 VCHIQ_MSG_DSTPORT(msgid),
1123 /* Make sure the new header is visible to the peer. */
1126 /* Make the new tx_pos visible to the peer. */
1127 local->tx_pos = state->local_tx_pos;
1130 if (service && (type == VCHIQ_MSG_CLOSE))
1131 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1133 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1134 mutex_unlock(&state->slot_mutex);
1136 remote_event_signal(&state->remote->trigger);
1138 return VCHIQ_SUCCESS;
1141 /* Called by the slot handler and application threads */
1142 static enum vchiq_status
1143 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1145 ssize_t (*copy_callback)(void *context, void *dest,
1146 size_t offset, size_t maxsize),
1147 void *context, int size, int is_blocking)
1149 struct vchiq_shared_state *local;
1150 struct vchiq_header *header;
1151 ssize_t callback_result;
1153 local = state->local;
1155 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1156 mutex_lock_killable(&state->sync_mutex))
1159 remote_event_wait(&state->sync_release_event, &local->sync_release);
1163 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1167 int oldmsgid = header->msgid;
1169 if (oldmsgid != VCHIQ_MSGID_PADDING)
1170 vchiq_log_error(vchiq_core_log_level,
1171 "%d: qms - msgid %x, not PADDING",
1172 state->id, oldmsgid);
1175 vchiq_log_info(vchiq_sync_log_level,
1176 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1177 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1178 header, size, VCHIQ_MSG_SRCPORT(msgid),
1179 VCHIQ_MSG_DSTPORT(msgid));
1182 copy_message_data(copy_callback, context,
1183 header->data, size);
1185 if (callback_result < 0) {
1186 mutex_unlock(&state->slot_mutex);
1187 VCHIQ_SERVICE_STATS_INC(service,
1193 if (SRVTRACE_ENABLED(service,
1195 vchiq_log_dump_mem("Sent", 0,
1198 (size_t)callback_result));
1200 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1201 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1203 VCHIQ_STATS_INC(state, ctrl_tx_count);
1206 header->size = size;
1207 header->msgid = msgid;
1209 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1212 svc_fourcc = service
1213 ? service->base.fourcc
1214 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1216 vchiq_log_trace(vchiq_sync_log_level,
1217 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1218 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1219 VCHIQ_MSG_TYPE(msgid),
1220 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1221 VCHIQ_MSG_SRCPORT(msgid),
1222 VCHIQ_MSG_DSTPORT(msgid),
1226 remote_event_signal(&state->remote->sync_trigger);
1228 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1229 mutex_unlock(&state->sync_mutex);
1231 return VCHIQ_SUCCESS;
1235 claim_slot(struct vchiq_slot_info *slot)
1241 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1242 struct vchiq_header *header, struct vchiq_service *service)
1244 mutex_lock(&state->recycle_mutex);
1247 int msgid = header->msgid;
1249 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1250 (service && service->closing)) {
1251 mutex_unlock(&state->recycle_mutex);
1255 /* Rewrite the message header to prevent a double release */
1256 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1259 slot_info->release_count++;
1261 if (slot_info->release_count == slot_info->use_count) {
1262 int slot_queue_recycle;
1263 /* Add to the freed queue */
1266 * A read barrier is necessary here to prevent speculative
1267 * fetches of remote->slot_queue_recycle from overtaking the
1272 slot_queue_recycle = state->remote->slot_queue_recycle;
1273 state->remote->slot_queue[slot_queue_recycle &
1274 VCHIQ_SLOT_QUEUE_MASK] =
1275 SLOT_INDEX_FROM_INFO(state, slot_info);
1276 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1277 vchiq_log_info(vchiq_core_log_level,
1278 "%d: %s %d - recycle->%x", state->id, __func__,
1279 SLOT_INDEX_FROM_INFO(state, slot_info),
1280 state->remote->slot_queue_recycle);
1283 * A write barrier is necessary, but remote_event_signal
1286 remote_event_signal(&state->remote->recycle);
1289 mutex_unlock(&state->recycle_mutex);
1292 static inline enum vchiq_reason
1293 get_bulk_reason(struct vchiq_bulk *bulk)
1295 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1296 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1297 return VCHIQ_BULK_TRANSMIT_ABORTED;
1299 return VCHIQ_BULK_TRANSMIT_DONE;
1302 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1303 return VCHIQ_BULK_RECEIVE_ABORTED;
1305 return VCHIQ_BULK_RECEIVE_DONE;
1308 /* Called by the slot handler - don't hold the bulk mutex */
1309 static enum vchiq_status
1310 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1313 enum vchiq_status status = VCHIQ_SUCCESS;
1315 vchiq_log_trace(vchiq_core_log_level,
1316 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1317 service->state->id, service->localport,
1318 (queue == &service->bulk_tx) ? 't' : 'r',
1319 queue->process, queue->remote_notify, queue->remove);
1321 queue->remote_notify = queue->process;
1323 while (queue->remove != queue->remote_notify) {
1324 struct vchiq_bulk *bulk =
1325 &queue->bulks[BULK_INDEX(queue->remove)];
1328 * Only generate callbacks for non-dummy bulk
1329 * requests, and non-terminated services
1331 if (bulk->data && service->instance) {
1332 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1333 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1334 VCHIQ_SERVICE_STATS_INC(service,
1336 VCHIQ_SERVICE_STATS_ADD(service,
1340 VCHIQ_SERVICE_STATS_INC(service,
1342 VCHIQ_SERVICE_STATS_ADD(service,
1347 VCHIQ_SERVICE_STATS_INC(service,
1348 bulk_aborted_count);
1350 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1351 struct bulk_waiter *waiter;
1353 spin_lock(&bulk_waiter_spinlock);
1354 waiter = bulk->userdata;
1356 waiter->actual = bulk->actual;
1357 complete(&waiter->event);
1359 spin_unlock(&bulk_waiter_spinlock);
1360 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1361 enum vchiq_reason reason =
1362 get_bulk_reason(bulk);
1363 status = make_service_callback(service,
1364 reason, NULL, bulk->userdata);
1365 if (status == VCHIQ_RETRY)
1371 complete(&service->bulk_remove_event);
1374 status = VCHIQ_SUCCESS;
1376 if (status == VCHIQ_RETRY)
1377 request_poll(service->state, service,
1378 (queue == &service->bulk_tx) ?
1379 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1385 poll_services_of_group(struct vchiq_state *state, int group)
1387 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1390 for (i = 0; flags; i++) {
1391 struct vchiq_service *service;
1394 if ((flags & BIT(i)) == 0)
1397 service = find_service_by_port(state, (group << 5) + i);
1403 service_flags = atomic_xchg(&service->poll_flags, 0);
1404 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1405 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1406 state->id, service->localport,
1407 service->remoteport);
1410 * Make it look like a client, because
1411 * it must be removed and not left in
1412 * the LISTENING state.
1414 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1416 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1418 request_poll(state, service, VCHIQ_POLL_REMOVE);
1419 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1420 vchiq_log_info(vchiq_core_log_level,
1421 "%d: ps - terminate %d<->%d",
1422 state->id, service->localport,
1423 service->remoteport);
1424 if (vchiq_close_service_internal(
1425 service, NO_CLOSE_RECVD) !=
1427 request_poll(state, service,
1428 VCHIQ_POLL_TERMINATE);
1430 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1431 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1432 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1433 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1434 unlock_service(service);
1438 /* Called by the slot handler thread */
1440 poll_services(struct vchiq_state *state)
1444 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1445 poll_services_of_group(state, group);
1448 /* Called with the bulk_mutex held */
1450 abort_outstanding_bulks(struct vchiq_service *service,
1451 struct vchiq_bulk_queue *queue)
1453 int is_tx = (queue == &service->bulk_tx);
1455 vchiq_log_trace(vchiq_core_log_level,
1456 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1457 service->state->id, service->localport, is_tx ? 't' : 'r',
1458 queue->local_insert, queue->remote_insert, queue->process);
1460 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1461 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1463 while ((queue->process != queue->local_insert) ||
1464 (queue->process != queue->remote_insert)) {
1465 struct vchiq_bulk *bulk =
1466 &queue->bulks[BULK_INDEX(queue->process)];
1468 if (queue->process == queue->remote_insert) {
1469 /* fabricate a matching dummy bulk */
1470 bulk->remote_data = NULL;
1471 bulk->remote_size = 0;
1472 queue->remote_insert++;
1475 if (queue->process != queue->local_insert) {
1476 vchiq_complete_bulk(bulk);
1478 vchiq_log_info(SRVTRACE_LEVEL(service),
1479 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1480 is_tx ? "Send Bulk to" : "Recv Bulk from",
1481 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1482 service->remoteport,
1486 /* fabricate a matching dummy bulk */
1489 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1490 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1492 queue->local_insert++;
1500 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1502 struct vchiq_service *service = NULL;
1504 unsigned int localport, remoteport;
1506 msgid = header->msgid;
1507 size = header->size;
1508 localport = VCHIQ_MSG_DSTPORT(msgid);
1509 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1510 if (size >= sizeof(struct vchiq_open_payload)) {
1511 const struct vchiq_open_payload *payload =
1512 (struct vchiq_open_payload *)header->data;
1513 unsigned int fourcc;
1515 fourcc = payload->fourcc;
1516 vchiq_log_info(vchiq_core_log_level,
1517 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1518 state->id, header, localport,
1519 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1521 service = get_listening_service(state, fourcc);
1524 /* A matching service exists */
1525 short version = payload->version;
1526 short version_min = payload->version_min;
1528 if ((service->version < version_min) ||
1529 (version < service->version_min)) {
1530 /* Version mismatch */
1531 vchiq_loud_error_header();
1532 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1533 "version mismatch - local (%d, min %d)"
1534 " vs. remote (%d, min %d)",
1535 state->id, service->localport,
1536 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1537 service->version, service->version_min,
1538 version, version_min);
1539 vchiq_loud_error_footer();
1540 unlock_service(service);
1544 service->peer_version = version;
1546 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1547 struct vchiq_openack_payload ack_payload = {
1551 if (state->version_common <
1552 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1555 /* Acknowledge the OPEN */
1556 if (service->sync) {
1557 if (queue_message_sync(
1564 memcpy_copy_callback,
1566 sizeof(ack_payload),
1568 goto bail_not_ready;
1570 if (queue_message(state,
1576 memcpy_copy_callback,
1578 sizeof(ack_payload),
1580 goto bail_not_ready;
1583 /* The service is now open */
1584 vchiq_set_service_state(service,
1585 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1586 : VCHIQ_SRVSTATE_OPEN);
1589 /* Success - the message has been dealt with */
1590 unlock_service(service);
1596 /* No available service, or an invalid request - send a CLOSE */
1597 if (queue_message(state, NULL,
1598 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1599 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1600 goto bail_not_ready;
1606 unlock_service(service);
1612 * parse_message() - parses a single message from the rx slot
1613 * @state: vchiq state struct
1614 * @header: message header
1616 * Context: Process context
1619 * * >= 0 - size of the parsed message payload (without header)
1620 * * -EINVAL - fatal error occurred, bail out is required
1623 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1625 struct vchiq_service *service = NULL;
1626 unsigned int localport, remoteport;
1627 int msgid, size, type, ret = -EINVAL;
1629 DEBUG_INITIALISE(state->local)
1631 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1632 msgid = header->msgid;
1633 DEBUG_VALUE(PARSE_MSGID, msgid);
1634 size = header->size;
1635 type = VCHIQ_MSG_TYPE(msgid);
1636 localport = VCHIQ_MSG_DSTPORT(msgid);
1637 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1639 if (type != VCHIQ_MSG_DATA)
1640 VCHIQ_STATS_INC(state, ctrl_rx_count);
1643 case VCHIQ_MSG_OPENACK:
1644 case VCHIQ_MSG_CLOSE:
1645 case VCHIQ_MSG_DATA:
1646 case VCHIQ_MSG_BULK_RX:
1647 case VCHIQ_MSG_BULK_TX:
1648 case VCHIQ_MSG_BULK_RX_DONE:
1649 case VCHIQ_MSG_BULK_TX_DONE:
1650 service = find_service_by_port(state, localport);
1652 ((service->remoteport != remoteport) &&
1653 (service->remoteport != VCHIQ_PORT_FREE))) &&
1655 (type == VCHIQ_MSG_CLOSE)) {
1657 * This could be a CLOSE from a client which
1658 * hadn't yet received the OPENACK - look for
1659 * the connected service
1662 unlock_service(service);
1663 service = get_connected_service(state,
1666 vchiq_log_warning(vchiq_core_log_level,
1667 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1668 state->id, msg_type_str(type),
1669 header, remoteport, localport,
1670 service->localport);
1674 vchiq_log_error(vchiq_core_log_level,
1675 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1676 state->id, msg_type_str(type),
1677 header, remoteport, localport,
1686 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1689 svc_fourcc = service
1690 ? service->base.fourcc
1691 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1692 vchiq_log_info(SRVTRACE_LEVEL(service),
1693 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1694 msg_type_str(type), type,
1695 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1696 remoteport, localport, size);
1698 vchiq_log_dump_mem("Rcvd", 0, header->data,
1702 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1703 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1704 vchiq_log_error(vchiq_core_log_level,
1705 "header %pK (msgid %x) - size %x too big for slot",
1706 header, (unsigned int)msgid,
1707 (unsigned int)size);
1708 WARN(1, "oversized for slot\n");
1712 case VCHIQ_MSG_OPEN:
1713 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1714 if (!parse_open(state, header))
1715 goto bail_not_ready;
1717 case VCHIQ_MSG_OPENACK:
1718 if (size >= sizeof(struct vchiq_openack_payload)) {
1719 const struct vchiq_openack_payload *payload =
1720 (struct vchiq_openack_payload *)
1722 service->peer_version = payload->version;
1724 vchiq_log_info(vchiq_core_log_level,
1725 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1726 state->id, header, size, remoteport, localport,
1727 service->peer_version);
1728 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1729 service->remoteport = remoteport;
1730 vchiq_set_service_state(service,
1731 VCHIQ_SRVSTATE_OPEN);
1732 complete(&service->remove_event);
1734 vchiq_log_error(vchiq_core_log_level,
1735 "OPENACK received in state %s",
1736 srvstate_names[service->srvstate]);
1739 case VCHIQ_MSG_CLOSE:
1740 WARN_ON(size != 0); /* There should be no data */
1742 vchiq_log_info(vchiq_core_log_level,
1743 "%d: prs CLOSE@%pK (%d->%d)",
1744 state->id, header, remoteport, localport);
1746 mark_service_closing_internal(service, 1);
1748 if (vchiq_close_service_internal(service,
1749 CLOSE_RECVD) == VCHIQ_RETRY)
1750 goto bail_not_ready;
1752 vchiq_log_info(vchiq_core_log_level,
1753 "Close Service %c%c%c%c s:%u d:%d",
1754 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1756 service->remoteport);
1758 case VCHIQ_MSG_DATA:
1759 vchiq_log_info(vchiq_core_log_level,
1760 "%d: prs DATA@%pK,%x (%d->%d)",
1761 state->id, header, size, remoteport, localport);
1763 if ((service->remoteport == remoteport) &&
1764 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1765 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1766 claim_slot(state->rx_info);
1767 DEBUG_TRACE(PARSE_LINE);
1768 if (make_service_callback(service,
1769 VCHIQ_MESSAGE_AVAILABLE, header,
1770 NULL) == VCHIQ_RETRY) {
1771 DEBUG_TRACE(PARSE_LINE);
1772 goto bail_not_ready;
1774 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1775 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1778 VCHIQ_STATS_INC(state, error_count);
1781 case VCHIQ_MSG_CONNECT:
1782 vchiq_log_info(vchiq_core_log_level,
1783 "%d: prs CONNECT@%pK", state->id, header);
1784 state->version_common = ((struct vchiq_slot_zero *)
1785 state->slot_data)->version;
1786 complete(&state->connect);
1788 case VCHIQ_MSG_BULK_RX:
1789 case VCHIQ_MSG_BULK_TX:
1791 * We should never receive a bulk request from the
1792 * other side since we're not setup to perform as the
1797 case VCHIQ_MSG_BULK_RX_DONE:
1798 case VCHIQ_MSG_BULK_TX_DONE:
1799 if ((service->remoteport == remoteport) &&
1800 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1801 struct vchiq_bulk_queue *queue;
1802 struct vchiq_bulk *bulk;
1804 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1805 &service->bulk_rx : &service->bulk_tx;
1807 DEBUG_TRACE(PARSE_LINE);
1808 if (mutex_lock_killable(&service->bulk_mutex)) {
1809 DEBUG_TRACE(PARSE_LINE);
1810 goto bail_not_ready;
1812 if ((int)(queue->remote_insert -
1813 queue->local_insert) >= 0) {
1814 vchiq_log_error(vchiq_core_log_level,
1815 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1816 state->id, msg_type_str(type),
1817 header, remoteport, localport,
1818 queue->remote_insert,
1819 queue->local_insert);
1820 mutex_unlock(&service->bulk_mutex);
1823 if (queue->process != queue->remote_insert) {
1824 pr_err("%s: p %x != ri %x\n",
1827 queue->remote_insert);
1828 mutex_unlock(&service->bulk_mutex);
1829 goto bail_not_ready;
1832 bulk = &queue->bulks[
1833 BULK_INDEX(queue->remote_insert)];
1834 bulk->actual = *(int *)header->data;
1835 queue->remote_insert++;
1837 vchiq_log_info(vchiq_core_log_level,
1838 "%d: prs %s@%pK (%d->%d) %x@%pad",
1839 state->id, msg_type_str(type),
1840 header, remoteport, localport,
1841 bulk->actual, &bulk->data);
1843 vchiq_log_trace(vchiq_core_log_level,
1844 "%d: prs:%d %cx li=%x ri=%x p=%x",
1845 state->id, localport,
1846 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1848 queue->local_insert,
1849 queue->remote_insert, queue->process);
1851 DEBUG_TRACE(PARSE_LINE);
1852 WARN_ON(queue->process == queue->local_insert);
1853 vchiq_complete_bulk(bulk);
1855 mutex_unlock(&service->bulk_mutex);
1856 DEBUG_TRACE(PARSE_LINE);
1857 notify_bulks(service, queue, RETRY_POLL);
1858 DEBUG_TRACE(PARSE_LINE);
1861 case VCHIQ_MSG_PADDING:
1862 vchiq_log_trace(vchiq_core_log_level,
1863 "%d: prs PADDING@%pK,%x",
1864 state->id, header, size);
1866 case VCHIQ_MSG_PAUSE:
1867 /* If initiated, signal the application thread */
1868 vchiq_log_trace(vchiq_core_log_level,
1869 "%d: prs PAUSE@%pK,%x",
1870 state->id, header, size);
1871 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1872 vchiq_log_error(vchiq_core_log_level,
1873 "%d: PAUSE received in state PAUSED",
1877 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1878 /* Send a PAUSE in response */
1879 if (queue_message(state, NULL,
1880 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1881 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1883 goto bail_not_ready;
1885 /* At this point slot_mutex is held */
1886 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1888 case VCHIQ_MSG_RESUME:
1889 vchiq_log_trace(vchiq_core_log_level,
1890 "%d: prs RESUME@%pK,%x",
1891 state->id, header, size);
1892 /* Release the slot mutex */
1893 mutex_unlock(&state->slot_mutex);
1894 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1897 case VCHIQ_MSG_REMOTE_USE:
1898 vchiq_on_remote_use(state);
1900 case VCHIQ_MSG_REMOTE_RELEASE:
1901 vchiq_on_remote_release(state);
1903 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1907 vchiq_log_error(vchiq_core_log_level,
1908 "%d: prs invalid msgid %x@%pK,%x",
1909 state->id, msgid, header, size);
1910 WARN(1, "invalid message\n");
1919 unlock_service(service);
1924 /* Called by the slot handler thread */
1926 parse_rx_slots(struct vchiq_state *state)
1928 struct vchiq_shared_state *remote = state->remote;
1931 DEBUG_INITIALISE(state->local)
1933 tx_pos = remote->tx_pos;
1935 while (state->rx_pos != tx_pos) {
1936 struct vchiq_header *header;
1939 DEBUG_TRACE(PARSE_LINE);
1940 if (!state->rx_data) {
1943 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1944 rx_index = remote->slot_queue[
1945 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1946 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1948 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1951 * Initialise use_count to one, and increment
1952 * release_count at the end of the slot to avoid
1953 * releasing the slot prematurely.
1955 state->rx_info->use_count = 1;
1956 state->rx_info->release_count = 0;
1959 header = (struct vchiq_header *)(state->rx_data +
1960 (state->rx_pos & VCHIQ_SLOT_MASK));
1961 size = parse_message(state, header);
1965 state->rx_pos += calc_stride(size);
1967 DEBUG_TRACE(PARSE_LINE);
1969 * Perform some housekeeping when the end of the slot is
1972 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1973 /* Remove the extra reference count. */
1974 release_slot(state, state->rx_info, NULL, NULL);
1975 state->rx_data = NULL;
1980 /* Called by the slot handler thread */
1982 slot_handler_func(void *v)
1984 struct vchiq_state *state = v;
1985 struct vchiq_shared_state *local = state->local;
1987 DEBUG_INITIALISE(local)
1990 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1991 DEBUG_TRACE(SLOT_HANDLER_LINE);
1992 remote_event_wait(&state->trigger_event, &local->trigger);
1996 DEBUG_TRACE(SLOT_HANDLER_LINE);
1997 if (state->poll_needed) {
1999 state->poll_needed = 0;
2002 * Handle service polling and other rare conditions here
2003 * out of the mainline code
2005 switch (state->conn_state) {
2006 case VCHIQ_CONNSTATE_CONNECTED:
2007 /* Poll the services as requested */
2008 poll_services(state);
2011 case VCHIQ_CONNSTATE_PAUSING:
2012 if (queue_message(state, NULL,
2013 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
2015 QMFLAGS_NO_MUTEX_UNLOCK)
2017 vchiq_set_conn_state(state,
2018 VCHIQ_CONNSTATE_PAUSE_SENT);
2021 state->poll_needed = 1;
2025 case VCHIQ_CONNSTATE_RESUMING:
2026 if (queue_message(state, NULL,
2027 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
2028 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
2030 vchiq_set_conn_state(state,
2031 VCHIQ_CONNSTATE_CONNECTED);
2034 * This should really be impossible,
2035 * since the PAUSE should have flushed
2036 * through outstanding messages.
2038 vchiq_log_error(vchiq_core_log_level,
2039 "Failed to send RESUME message");
2048 DEBUG_TRACE(SLOT_HANDLER_LINE);
2049 parse_rx_slots(state);
2054 /* Called by the recycle thread */
2056 recycle_func(void *v)
2058 struct vchiq_state *state = v;
2059 struct vchiq_shared_state *local = state->local;
2063 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2065 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2071 remote_event_wait(&state->recycle_event, &local->recycle);
2073 process_free_queue(state, found, length);
2078 /* Called by the sync thread */
2082 struct vchiq_state *state = v;
2083 struct vchiq_shared_state *local = state->local;
2084 struct vchiq_header *header =
2085 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2086 state->remote->slot_sync);
2089 struct vchiq_service *service;
2092 unsigned int localport, remoteport;
2094 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2098 msgid = header->msgid;
2099 size = header->size;
2100 type = VCHIQ_MSG_TYPE(msgid);
2101 localport = VCHIQ_MSG_DSTPORT(msgid);
2102 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2104 service = find_service_by_port(state, localport);
2107 vchiq_log_error(vchiq_sync_log_level,
2108 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2109 state->id, msg_type_str(type),
2110 header, remoteport, localport, localport);
2111 release_message_sync(state, header);
2115 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2118 svc_fourcc = service
2119 ? service->base.fourcc
2120 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2121 vchiq_log_trace(vchiq_sync_log_level,
2122 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2124 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2125 remoteport, localport, size);
2127 vchiq_log_dump_mem("Rcvd", 0, header->data,
2132 case VCHIQ_MSG_OPENACK:
2133 if (size >= sizeof(struct vchiq_openack_payload)) {
2134 const struct vchiq_openack_payload *payload =
2135 (struct vchiq_openack_payload *)
2137 service->peer_version = payload->version;
2139 vchiq_log_info(vchiq_sync_log_level,
2140 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2141 state->id, header, size, remoteport, localport,
2142 service->peer_version);
2143 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2144 service->remoteport = remoteport;
2145 vchiq_set_service_state(service,
2146 VCHIQ_SRVSTATE_OPENSYNC);
2148 complete(&service->remove_event);
2150 release_message_sync(state, header);
2153 case VCHIQ_MSG_DATA:
2154 vchiq_log_trace(vchiq_sync_log_level,
2155 "%d: sf DATA@%pK,%x (%d->%d)",
2156 state->id, header, size, remoteport, localport);
2158 if ((service->remoteport == remoteport) &&
2159 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2160 if (make_service_callback(service,
2161 VCHIQ_MESSAGE_AVAILABLE, header,
2162 NULL) == VCHIQ_RETRY)
2163 vchiq_log_error(vchiq_sync_log_level,
2164 "synchronous callback to service %d returns VCHIQ_RETRY",
2170 vchiq_log_error(vchiq_sync_log_level,
2171 "%d: sf unexpected msgid %x@%pK,%x",
2172 state->id, msgid, header, size);
2173 release_message_sync(state, header);
2177 unlock_service(service);
2184 init_bulk_queue(struct vchiq_bulk_queue *queue)
2186 queue->local_insert = 0;
2187 queue->remote_insert = 0;
2189 queue->remote_notify = 0;
2194 get_conn_state_name(enum vchiq_connstate conn_state)
2196 return conn_state_names[conn_state];
2199 struct vchiq_slot_zero *
2200 vchiq_init_slots(void *mem_base, int mem_size)
2203 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2204 struct vchiq_slot_zero *slot_zero =
2205 (struct vchiq_slot_zero *)(mem_base + mem_align);
2206 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2207 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2209 /* Ensure there is enough memory to run an absolutely minimum system */
2210 num_slots -= first_data_slot;
2212 if (num_slots < 4) {
2213 vchiq_log_error(vchiq_core_log_level,
2214 "%s - insufficient memory %x bytes",
2215 __func__, mem_size);
2219 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2221 slot_zero->magic = VCHIQ_MAGIC;
2222 slot_zero->version = VCHIQ_VERSION;
2223 slot_zero->version_min = VCHIQ_VERSION_MIN;
2224 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2225 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2226 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2227 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2229 slot_zero->master.slot_sync = first_data_slot;
2230 slot_zero->master.slot_first = first_data_slot + 1;
2231 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2232 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2233 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2234 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2240 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2242 struct vchiq_shared_state *local;
2243 struct vchiq_shared_state *remote;
2244 char threadname[16];
2247 if (vchiq_states[0]) {
2248 pr_err("%s: VCHIQ state already initialized\n", __func__);
2252 local = &slot_zero->slave;
2253 remote = &slot_zero->master;
2255 if (local->initialised) {
2256 vchiq_loud_error_header();
2257 if (remote->initialised)
2258 vchiq_loud_error("local state has already been initialised");
2260 vchiq_loud_error("master/slave mismatch two slaves");
2261 vchiq_loud_error_footer();
2265 memset(state, 0, sizeof(struct vchiq_state));
2268 * initialize shared state pointers
2271 state->local = local;
2272 state->remote = remote;
2273 state->slot_data = (struct vchiq_slot *)slot_zero;
2276 * initialize events and mutexes
2279 init_completion(&state->connect);
2280 mutex_init(&state->mutex);
2281 mutex_init(&state->slot_mutex);
2282 mutex_init(&state->recycle_mutex);
2283 mutex_init(&state->sync_mutex);
2284 mutex_init(&state->bulk_transfer_mutex);
2286 init_completion(&state->slot_available_event);
2287 init_completion(&state->slot_remove_event);
2288 init_completion(&state->data_quota_event);
2290 state->slot_queue_available = 0;
2292 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2293 struct vchiq_service_quota *quota = &state->service_quotas[i];
2294 init_completion("a->quota_event);
2297 for (i = local->slot_first; i <= local->slot_last; i++) {
2298 local->slot_queue[state->slot_queue_available] = i;
2299 state->slot_queue_available++;
2300 complete(&state->slot_available_event);
2303 state->default_slot_quota = state->slot_queue_available/2;
2304 state->default_message_quota =
2305 min((unsigned short)(state->default_slot_quota * 256),
2306 (unsigned short)~0);
2308 state->previous_data_index = -1;
2309 state->data_use_count = 0;
2310 state->data_quota = state->slot_queue_available - 1;
2312 remote_event_create(&state->trigger_event, &local->trigger);
2314 remote_event_create(&state->recycle_event, &local->recycle);
2315 local->slot_queue_recycle = state->slot_queue_available;
2316 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2317 remote_event_create(&state->sync_release_event, &local->sync_release);
2319 /* At start-of-day, the slot is empty and available */
2320 ((struct vchiq_header *)
2321 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2322 VCHIQ_MSGID_PADDING;
2323 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2325 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2327 ret = vchiq_platform_init_state(state);
2332 * bring up slot handler thread
2334 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2335 state->slot_handler_thread = kthread_create(&slot_handler_func,
2339 if (IS_ERR(state->slot_handler_thread)) {
2340 vchiq_loud_error_header();
2341 vchiq_loud_error("couldn't create thread %s", threadname);
2342 vchiq_loud_error_footer();
2343 return PTR_ERR(state->slot_handler_thread);
2345 set_user_nice(state->slot_handler_thread, -19);
2347 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2348 state->recycle_thread = kthread_create(&recycle_func,
2351 if (IS_ERR(state->recycle_thread)) {
2352 vchiq_loud_error_header();
2353 vchiq_loud_error("couldn't create thread %s", threadname);
2354 vchiq_loud_error_footer();
2355 ret = PTR_ERR(state->recycle_thread);
2356 goto fail_free_handler_thread;
2358 set_user_nice(state->recycle_thread, -19);
2360 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2361 state->sync_thread = kthread_create(&sync_func,
2364 if (IS_ERR(state->sync_thread)) {
2365 vchiq_loud_error_header();
2366 vchiq_loud_error("couldn't create thread %s", threadname);
2367 vchiq_loud_error_footer();
2368 ret = PTR_ERR(state->sync_thread);
2369 goto fail_free_recycle_thread;
2371 set_user_nice(state->sync_thread, -20);
2373 wake_up_process(state->slot_handler_thread);
2374 wake_up_process(state->recycle_thread);
2375 wake_up_process(state->sync_thread);
2377 vchiq_states[0] = state;
2379 /* Indicate readiness to the other side */
2380 local->initialised = 1;
2384 fail_free_recycle_thread:
2385 kthread_stop(state->recycle_thread);
2386 fail_free_handler_thread:
2387 kthread_stop(state->slot_handler_thread);
2392 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2394 struct vchiq_service *service = find_service_by_handle(handle);
2397 while (service->msg_queue_write == service->msg_queue_read +
2399 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2400 flush_signals(current);
2403 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2404 service->msg_queue_write++;
2405 service->msg_queue[pos] = header;
2407 complete(&service->msg_queue_push);
2409 EXPORT_SYMBOL(vchiq_msg_queue_push);
2411 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2413 struct vchiq_service *service = find_service_by_handle(handle);
2414 struct vchiq_header *header;
2417 if (service->msg_queue_write == service->msg_queue_read)
2420 while (service->msg_queue_write == service->msg_queue_read) {
2421 if (wait_for_completion_interruptible(&service->msg_queue_push))
2422 flush_signals(current);
2425 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2426 service->msg_queue_read++;
2427 header = service->msg_queue[pos];
2429 complete(&service->msg_queue_pop);
2433 EXPORT_SYMBOL(vchiq_msg_hold);
2435 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2437 if (!params->callback || !params->fourcc) {
2438 vchiq_loud_error("Can't add service, invalid params\n");
2445 /* Called from application thread when a client or server service is created. */
2446 struct vchiq_service *
2447 vchiq_add_service_internal(struct vchiq_state *state,
2448 const struct vchiq_service_params_kernel *params,
2449 int srvstate, struct vchiq_instance *instance,
2450 vchiq_userdata_term userdata_term)
2452 struct vchiq_service *service;
2453 struct vchiq_service __rcu **pservice = NULL;
2454 struct vchiq_service_quota *quota;
2458 ret = vchiq_validate_params(params);
2462 service = kmalloc(sizeof(*service), GFP_KERNEL);
2466 service->base.fourcc = params->fourcc;
2467 service->base.callback = params->callback;
2468 service->base.userdata = params->userdata;
2469 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2470 kref_init(&service->ref_count);
2471 service->srvstate = VCHIQ_SRVSTATE_FREE;
2472 service->userdata_term = userdata_term;
2473 service->localport = VCHIQ_PORT_FREE;
2474 service->remoteport = VCHIQ_PORT_FREE;
2476 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2477 VCHIQ_FOURCC_INVALID : params->fourcc;
2478 service->client_id = 0;
2479 service->auto_close = 1;
2481 service->closing = 0;
2483 atomic_set(&service->poll_flags, 0);
2484 service->version = params->version;
2485 service->version_min = params->version_min;
2486 service->state = state;
2487 service->instance = instance;
2488 service->service_use_count = 0;
2489 service->msg_queue_read = 0;
2490 service->msg_queue_write = 0;
2491 init_bulk_queue(&service->bulk_tx);
2492 init_bulk_queue(&service->bulk_rx);
2493 init_completion(&service->remove_event);
2494 init_completion(&service->bulk_remove_event);
2495 init_completion(&service->msg_queue_pop);
2496 init_completion(&service->msg_queue_push);
2497 mutex_init(&service->bulk_mutex);
2498 memset(&service->stats, 0, sizeof(service->stats));
2499 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2502 * Although it is perfectly possible to use a spinlock
2503 * to protect the creation of services, it is overkill as it
2504 * disables interrupts while the array is searched.
2505 * The only danger is of another thread trying to create a
2506 * service - service deletion is safe.
2507 * Therefore it is preferable to use state->mutex which,
2508 * although slower to claim, doesn't block interrupts while
2512 mutex_lock(&state->mutex);
2514 /* Prepare to use a previously unused service */
2515 if (state->unused_service < VCHIQ_MAX_SERVICES)
2516 pservice = &state->services[state->unused_service];
2518 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2519 for (i = 0; i < state->unused_service; i++) {
2520 if (!rcu_access_pointer(state->services[i])) {
2521 pservice = &state->services[i];
2527 for (i = (state->unused_service - 1); i >= 0; i--) {
2528 struct vchiq_service *srv;
2530 srv = rcu_dereference(state->services[i]);
2532 pservice = &state->services[i];
2533 } else if ((srv->public_fourcc == params->fourcc) &&
2534 ((srv->instance != instance) ||
2535 (srv->base.callback != params->callback))) {
2537 * There is another server using this
2538 * fourcc which doesn't match.
2548 service->localport = (pservice - state->services);
2550 handle_seq = VCHIQ_MAX_STATES *
2552 service->handle = handle_seq |
2553 (state->id * VCHIQ_MAX_SERVICES) |
2555 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2556 rcu_assign_pointer(*pservice, service);
2557 if (pservice == &state->services[state->unused_service])
2558 state->unused_service++;
2561 mutex_unlock(&state->mutex);
2568 quota = &state->service_quotas[service->localport];
2569 quota->slot_quota = state->default_slot_quota;
2570 quota->message_quota = state->default_message_quota;
2571 if (quota->slot_use_count == 0)
2572 quota->previous_tx_index =
2573 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2576 /* Bring this service online */
2577 vchiq_set_service_state(service, srvstate);
2579 vchiq_log_info(vchiq_core_msg_log_level,
2580 "%s Service %c%c%c%c SrcPort:%d",
2581 (srvstate == VCHIQ_SRVSTATE_OPENING)
2583 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2584 service->localport);
2586 /* Don't unlock the service - leave it with a ref_count of 1. */
2592 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2594 struct vchiq_open_payload payload = {
2595 service->base.fourcc,
2598 service->version_min
2600 enum vchiq_status status = VCHIQ_SUCCESS;
2602 service->client_id = client_id;
2603 vchiq_use_service_internal(service);
2604 status = queue_message(service->state,
2606 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2609 memcpy_copy_callback,
2612 QMFLAGS_IS_BLOCKING);
2614 if (status != VCHIQ_SUCCESS)
2617 /* Wait for the ACK/NAK */
2618 if (wait_for_completion_interruptible(&service->remove_event)) {
2619 status = VCHIQ_RETRY;
2620 vchiq_release_service_internal(service);
2621 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2622 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2623 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2624 vchiq_log_error(vchiq_core_log_level,
2625 "%d: osi - srvstate = %s (ref %u)",
2627 srvstate_names[service->srvstate],
2628 kref_read(&service->ref_count));
2629 status = VCHIQ_ERROR;
2630 VCHIQ_SERVICE_STATS_INC(service, error_count);
2631 vchiq_release_service_internal(service);
2638 release_service_messages(struct vchiq_service *service)
2640 struct vchiq_state *state = service->state;
2641 int slot_last = state->remote->slot_last;
2644 /* Release any claimed messages aimed at this service */
2646 if (service->sync) {
2647 struct vchiq_header *header =
2648 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2649 state->remote->slot_sync);
2650 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2651 release_message_sync(state, header);
2656 for (i = state->remote->slot_first; i <= slot_last; i++) {
2657 struct vchiq_slot_info *slot_info =
2658 SLOT_INFO_FROM_INDEX(state, i);
2659 unsigned int pos, end;
2662 if (slot_info->release_count == slot_info->use_count)
2665 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2666 end = VCHIQ_SLOT_SIZE;
2667 if (data == state->rx_data)
2669 * This buffer is still being read from - stop
2670 * at the current read position
2672 end = state->rx_pos & VCHIQ_SLOT_MASK;
2677 struct vchiq_header *header =
2678 (struct vchiq_header *)(data + pos);
2679 int msgid = header->msgid;
2680 int port = VCHIQ_MSG_DSTPORT(msgid);
2682 if ((port == service->localport) &&
2683 (msgid & VCHIQ_MSGID_CLAIMED)) {
2684 vchiq_log_info(vchiq_core_log_level,
2685 " fsi - hdr %pK", header);
2686 release_slot(state, slot_info, header,
2689 pos += calc_stride(header->size);
2690 if (pos > VCHIQ_SLOT_SIZE) {
2691 vchiq_log_error(vchiq_core_log_level,
2692 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2694 header->msgid, header->size);
2695 WARN(1, "invalid slot position\n");
2702 do_abort_bulks(struct vchiq_service *service)
2704 enum vchiq_status status;
2706 /* Abort any outstanding bulk transfers */
2707 if (mutex_lock_killable(&service->bulk_mutex))
2709 abort_outstanding_bulks(service, &service->bulk_tx);
2710 abort_outstanding_bulks(service, &service->bulk_rx);
2711 mutex_unlock(&service->bulk_mutex);
2713 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2714 if (status != VCHIQ_SUCCESS)
2717 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2718 return (status == VCHIQ_SUCCESS);
2721 static enum vchiq_status
2722 close_service_complete(struct vchiq_service *service, int failstate)
2724 enum vchiq_status status;
2725 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2728 switch (service->srvstate) {
2729 case VCHIQ_SRVSTATE_OPEN:
2730 case VCHIQ_SRVSTATE_CLOSESENT:
2731 case VCHIQ_SRVSTATE_CLOSERECVD:
2733 if (service->auto_close) {
2734 service->client_id = 0;
2735 service->remoteport = VCHIQ_PORT_FREE;
2736 newstate = VCHIQ_SRVSTATE_LISTENING;
2738 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2741 newstate = VCHIQ_SRVSTATE_CLOSED;
2743 vchiq_set_service_state(service, newstate);
2745 case VCHIQ_SRVSTATE_LISTENING:
2748 vchiq_log_error(vchiq_core_log_level,
2749 "%s(%x) called in state %s", __func__,
2750 service->handle, srvstate_names[service->srvstate]);
2751 WARN(1, "%s in unexpected state\n", __func__);
2755 status = make_service_callback(service,
2756 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2758 if (status != VCHIQ_RETRY) {
2759 int uc = service->service_use_count;
2761 /* Complete the close process */
2762 for (i = 0; i < uc; i++)
2764 * cater for cases where close is forced and the
2765 * client may not close all it's handles
2767 vchiq_release_service_internal(service);
2769 service->client_id = 0;
2770 service->remoteport = VCHIQ_PORT_FREE;
2772 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2773 vchiq_free_service_internal(service);
2774 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2776 service->closing = 0;
2778 complete(&service->remove_event);
2781 vchiq_set_service_state(service, failstate);
2787 /* Called by the slot handler */
2789 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2791 struct vchiq_state *state = service->state;
2792 enum vchiq_status status = VCHIQ_SUCCESS;
2793 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2795 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2796 service->state->id, service->localport, close_recvd,
2797 srvstate_names[service->srvstate]);
2799 switch (service->srvstate) {
2800 case VCHIQ_SRVSTATE_CLOSED:
2801 case VCHIQ_SRVSTATE_HIDDEN:
2802 case VCHIQ_SRVSTATE_LISTENING:
2803 case VCHIQ_SRVSTATE_CLOSEWAIT:
2805 vchiq_log_error(vchiq_core_log_level,
2806 "%s(1) called in state %s",
2807 __func__, srvstate_names[service->srvstate]);
2808 } else if (is_server) {
2809 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2810 status = VCHIQ_ERROR;
2812 service->client_id = 0;
2813 service->remoteport = VCHIQ_PORT_FREE;
2814 if (service->srvstate ==
2815 VCHIQ_SRVSTATE_CLOSEWAIT)
2816 vchiq_set_service_state(service,
2817 VCHIQ_SRVSTATE_LISTENING);
2819 complete(&service->remove_event);
2821 vchiq_free_service_internal(service);
2824 case VCHIQ_SRVSTATE_OPENING:
2826 /* The open was rejected - tell the user */
2827 vchiq_set_service_state(service,
2828 VCHIQ_SRVSTATE_CLOSEWAIT);
2829 complete(&service->remove_event);
2831 /* Shutdown mid-open - let the other side know */
2832 status = queue_message(state, service,
2836 VCHIQ_MSG_DSTPORT(service->remoteport)),
2841 case VCHIQ_SRVSTATE_OPENSYNC:
2842 mutex_lock(&state->sync_mutex);
2844 case VCHIQ_SRVSTATE_OPEN:
2846 if (!do_abort_bulks(service))
2847 status = VCHIQ_RETRY;
2850 release_service_messages(service);
2852 if (status == VCHIQ_SUCCESS)
2853 status = queue_message(state, service,
2857 VCHIQ_MSG_DSTPORT(service->remoteport)),
2858 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2860 if (status != VCHIQ_SUCCESS) {
2861 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2862 mutex_unlock(&state->sync_mutex);
2867 /* Change the state while the mutex is still held */
2868 vchiq_set_service_state(service,
2869 VCHIQ_SRVSTATE_CLOSESENT);
2870 mutex_unlock(&state->slot_mutex);
2872 mutex_unlock(&state->sync_mutex);
2876 /* Change the state while the mutex is still held */
2877 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2878 mutex_unlock(&state->slot_mutex);
2880 mutex_unlock(&state->sync_mutex);
2882 status = close_service_complete(service,
2883 VCHIQ_SRVSTATE_CLOSERECVD);
2886 case VCHIQ_SRVSTATE_CLOSESENT:
2888 /* This happens when a process is killed mid-close */
2891 if (!do_abort_bulks(service)) {
2892 status = VCHIQ_RETRY;
2896 if (status == VCHIQ_SUCCESS)
2897 status = close_service_complete(service,
2898 VCHIQ_SRVSTATE_CLOSERECVD);
2901 case VCHIQ_SRVSTATE_CLOSERECVD:
2902 if (!close_recvd && is_server)
2903 /* Force into LISTENING mode */
2904 vchiq_set_service_state(service,
2905 VCHIQ_SRVSTATE_LISTENING);
2906 status = close_service_complete(service,
2907 VCHIQ_SRVSTATE_CLOSERECVD);
2911 vchiq_log_error(vchiq_core_log_level,
2912 "%s(%d) called in state %s", __func__,
2913 close_recvd, srvstate_names[service->srvstate]);
2920 /* Called from the application process upon process death */
2922 vchiq_terminate_service_internal(struct vchiq_service *service)
2924 struct vchiq_state *state = service->state;
2926 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2927 state->id, service->localport, service->remoteport);
2929 mark_service_closing(service);
2931 /* Mark the service for removal by the slot handler */
2932 request_poll(state, service, VCHIQ_POLL_REMOVE);
2935 /* Called from the slot handler */
2937 vchiq_free_service_internal(struct vchiq_service *service)
2939 struct vchiq_state *state = service->state;
2941 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2942 state->id, service->localport);
2944 switch (service->srvstate) {
2945 case VCHIQ_SRVSTATE_OPENING:
2946 case VCHIQ_SRVSTATE_CLOSED:
2947 case VCHIQ_SRVSTATE_HIDDEN:
2948 case VCHIQ_SRVSTATE_LISTENING:
2949 case VCHIQ_SRVSTATE_CLOSEWAIT:
2952 vchiq_log_error(vchiq_core_log_level,
2953 "%d: fsi - (%d) in state %s",
2954 state->id, service->localport,
2955 srvstate_names[service->srvstate]);
2959 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2961 complete(&service->remove_event);
2963 /* Release the initial lock */
2964 unlock_service(service);
2968 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2970 struct vchiq_service *service;
2973 /* Find all services registered to this client and enable them. */
2975 while ((service = next_service_by_instance(state, instance,
2977 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2978 vchiq_set_service_state(service,
2979 VCHIQ_SRVSTATE_LISTENING);
2980 unlock_service(service);
2983 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2984 if (queue_message(state, NULL,
2985 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2986 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2989 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2992 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2993 if (wait_for_completion_interruptible(&state->connect))
2996 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2997 complete(&state->connect);
3000 return VCHIQ_SUCCESS;
3004 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
3006 struct vchiq_service *service;
3009 /* Find all services registered to this client and remove them. */
3011 while ((service = next_service_by_instance(state, instance,
3013 (void)vchiq_remove_service(service->handle);
3014 unlock_service(service);
3019 vchiq_close_service(unsigned int handle)
3021 /* Unregister the service */
3022 struct vchiq_service *service = find_service_by_handle(handle);
3023 enum vchiq_status status = VCHIQ_SUCCESS;
3028 vchiq_log_info(vchiq_core_log_level,
3029 "%d: close_service:%d",
3030 service->state->id, service->localport);
3032 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3033 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3034 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
3035 unlock_service(service);
3039 mark_service_closing(service);
3041 if (current == service->state->slot_handler_thread) {
3042 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3043 WARN_ON(status == VCHIQ_RETRY);
3045 /* Mark the service for termination by the slot handler */
3046 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
3050 if (wait_for_completion_interruptible(&service->remove_event)) {
3051 status = VCHIQ_RETRY;
3055 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3056 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3057 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3060 vchiq_log_warning(vchiq_core_log_level,
3061 "%d: close_service:%d - waiting in state %s",
3062 service->state->id, service->localport,
3063 srvstate_names[service->srvstate]);
3066 if ((status == VCHIQ_SUCCESS) &&
3067 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3068 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3069 status = VCHIQ_ERROR;
3071 unlock_service(service);
3075 EXPORT_SYMBOL(vchiq_close_service);
3078 vchiq_remove_service(unsigned int handle)
3080 /* Unregister the service */
3081 struct vchiq_service *service = find_service_by_handle(handle);
3082 enum vchiq_status status = VCHIQ_SUCCESS;
3087 vchiq_log_info(vchiq_core_log_level,
3088 "%d: remove_service:%d",
3089 service->state->id, service->localport);
3091 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3092 unlock_service(service);
3096 mark_service_closing(service);
3098 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3099 (current == service->state->slot_handler_thread)) {
3101 * Make it look like a client, because it must be removed and
3102 * not left in the LISTENING state.
3104 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3106 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3107 WARN_ON(status == VCHIQ_RETRY);
3109 /* Mark the service for removal by the slot handler */
3110 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3113 if (wait_for_completion_interruptible(&service->remove_event)) {
3114 status = VCHIQ_RETRY;
3118 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3119 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3122 vchiq_log_warning(vchiq_core_log_level,
3123 "%d: remove_service:%d - waiting in state %s",
3124 service->state->id, service->localport,
3125 srvstate_names[service->srvstate]);
3128 if ((status == VCHIQ_SUCCESS) &&
3129 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3130 status = VCHIQ_ERROR;
3132 unlock_service(service);
3138 * This function may be called by kernel threads or user threads.
3139 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3140 * received and the call should be retried after being returned to user
3142 * When called in blocking mode, the userdata field points to a bulk_waiter
3145 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3146 void *offset, void __user *uoffset,
3147 int size, void *userdata,
3148 enum vchiq_bulk_mode mode,
3149 enum vchiq_bulk_dir dir)
3151 struct vchiq_service *service = find_service_by_handle(handle);
3152 struct vchiq_bulk_queue *queue;
3153 struct vchiq_bulk *bulk;
3154 struct vchiq_state *state;
3155 struct bulk_waiter *bulk_waiter = NULL;
3156 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3157 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3158 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3159 enum vchiq_status status = VCHIQ_ERROR;
3165 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3168 if (!offset && !uoffset)
3171 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3175 case VCHIQ_BULK_MODE_NOCALLBACK:
3176 case VCHIQ_BULK_MODE_CALLBACK:
3178 case VCHIQ_BULK_MODE_BLOCKING:
3179 bulk_waiter = userdata;
3180 init_completion(&bulk_waiter->event);
3181 bulk_waiter->actual = 0;
3182 bulk_waiter->bulk = NULL;
3184 case VCHIQ_BULK_MODE_WAITING:
3185 bulk_waiter = userdata;
3186 bulk = bulk_waiter->bulk;
3192 state = service->state;
3194 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3195 &service->bulk_tx : &service->bulk_rx;
3197 if (mutex_lock_killable(&service->bulk_mutex)) {
3198 status = VCHIQ_RETRY;
3202 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3203 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3205 mutex_unlock(&service->bulk_mutex);
3206 if (wait_for_completion_interruptible(
3207 &service->bulk_remove_event)) {
3208 status = VCHIQ_RETRY;
3211 if (mutex_lock_killable(&service->bulk_mutex)) {
3212 status = VCHIQ_RETRY;
3215 } while (queue->local_insert == queue->remove +
3216 VCHIQ_NUM_SERVICE_BULKS);
3219 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3223 bulk->userdata = userdata;
3225 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3227 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3228 goto unlock_error_exit;
3232 vchiq_log_info(vchiq_core_log_level,
3233 "%d: bt (%d->%d) %cx %x@%pad %pK",
3234 state->id, service->localport, service->remoteport, dir_char,
3235 size, &bulk->data, userdata);
3238 * The slot mutex must be held when the service is being closed, so
3239 * claim it here to ensure that isn't happening
3241 if (mutex_lock_killable(&state->slot_mutex)) {
3242 status = VCHIQ_RETRY;
3243 goto cancel_bulk_error_exit;
3246 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3247 goto unlock_both_error_exit;
3249 payload[0] = lower_32_bits(bulk->data);
3250 payload[1] = bulk->size;
3251 status = queue_message(state,
3253 VCHIQ_MAKE_MSG(dir_msgtype,
3255 service->remoteport),
3256 memcpy_copy_callback,
3259 QMFLAGS_IS_BLOCKING |
3260 QMFLAGS_NO_MUTEX_LOCK |
3261 QMFLAGS_NO_MUTEX_UNLOCK);
3262 if (status != VCHIQ_SUCCESS)
3263 goto unlock_both_error_exit;
3265 queue->local_insert++;
3267 mutex_unlock(&state->slot_mutex);
3268 mutex_unlock(&service->bulk_mutex);
3270 vchiq_log_trace(vchiq_core_log_level,
3271 "%d: bt:%d %cx li=%x ri=%x p=%x",
3273 service->localport, dir_char,
3274 queue->local_insert, queue->remote_insert, queue->process);
3277 unlock_service(service);
3279 status = VCHIQ_SUCCESS;
3282 bulk_waiter->bulk = bulk;
3283 if (wait_for_completion_interruptible(&bulk_waiter->event))
3284 status = VCHIQ_RETRY;
3285 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3286 status = VCHIQ_ERROR;
3291 unlock_both_error_exit:
3292 mutex_unlock(&state->slot_mutex);
3293 cancel_bulk_error_exit:
3294 vchiq_complete_bulk(bulk);
3296 mutex_unlock(&service->bulk_mutex);
3300 unlock_service(service);
3305 vchiq_queue_message(unsigned int handle,
3306 ssize_t (*copy_callback)(void *context, void *dest,
3307 size_t offset, size_t maxsize),
3311 struct vchiq_service *service = find_service_by_handle(handle);
3312 enum vchiq_status status = VCHIQ_ERROR;
3317 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3321 VCHIQ_SERVICE_STATS_INC(service, error_count);
3326 if (size > VCHIQ_MAX_MSG_SIZE) {
3327 VCHIQ_SERVICE_STATS_INC(service, error_count);
3331 switch (service->srvstate) {
3332 case VCHIQ_SRVSTATE_OPEN:
3333 status = queue_message(service->state, service,
3334 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3336 service->remoteport),
3337 copy_callback, context, size, 1);
3339 case VCHIQ_SRVSTATE_OPENSYNC:
3340 status = queue_message_sync(service->state, service,
3341 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3343 service->remoteport),
3344 copy_callback, context, size, 1);
3347 status = VCHIQ_ERROR;
3353 unlock_service(service);
3358 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3360 enum vchiq_status status;
3363 status = vchiq_queue_message(handle, memcpy_copy_callback,
3367 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3368 * implement a retry mechanism since this function is supposed
3369 * to block until queued
3371 if (status != VCHIQ_RETRY)
3379 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3382 vchiq_release_message(unsigned int handle,
3383 struct vchiq_header *header)
3385 struct vchiq_service *service = find_service_by_handle(handle);
3386 struct vchiq_shared_state *remote;
3387 struct vchiq_state *state;
3393 state = service->state;
3394 remote = state->remote;
3396 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3398 if ((slot_index >= remote->slot_first) &&
3399 (slot_index <= remote->slot_last)) {
3400 int msgid = header->msgid;
3402 if (msgid & VCHIQ_MSGID_CLAIMED) {
3403 struct vchiq_slot_info *slot_info =
3404 SLOT_INFO_FROM_INDEX(state, slot_index);
3406 release_slot(state, slot_info, header, service);
3408 } else if (slot_index == remote->slot_sync) {
3409 release_message_sync(state, header);
3412 unlock_service(service);
3414 EXPORT_SYMBOL(vchiq_release_message);
3417 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3419 header->msgid = VCHIQ_MSGID_PADDING;
3420 remote_event_signal(&state->remote->sync_release);
3424 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3426 enum vchiq_status status = VCHIQ_ERROR;
3427 struct vchiq_service *service = find_service_by_handle(handle);
3432 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3438 *peer_version = service->peer_version;
3439 status = VCHIQ_SUCCESS;
3443 unlock_service(service);
3446 EXPORT_SYMBOL(vchiq_get_peer_version);
3448 void vchiq_get_config(struct vchiq_config *config)
3450 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3451 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3452 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3453 config->max_services = VCHIQ_MAX_SERVICES;
3454 config->version = VCHIQ_VERSION;
3455 config->version_min = VCHIQ_VERSION_MIN;
3459 vchiq_set_service_option(unsigned int handle,
3460 enum vchiq_service_option option, int value)
3462 struct vchiq_service *service = find_service_by_handle(handle);
3463 struct vchiq_service_quota *quota;
3470 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3471 service->auto_close = value;
3475 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3476 quota = &service->state->service_quotas[service->localport];
3478 value = service->state->default_slot_quota;
3479 if ((value >= quota->slot_use_count) &&
3480 (value < (unsigned short)~0)) {
3481 quota->slot_quota = value;
3482 if ((value >= quota->slot_use_count) &&
3483 (quota->message_quota >= quota->message_use_count))
3485 * Signal the service that it may have
3486 * dropped below its quota
3488 complete("a->quota_event);
3493 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3494 quota = &service->state->service_quotas[service->localport];
3496 value = service->state->default_message_quota;
3497 if ((value >= quota->message_use_count) &&
3498 (value < (unsigned short)~0)) {
3499 quota->message_quota = value;
3500 if ((value >= quota->message_use_count) &&
3501 (quota->slot_quota >= quota->slot_use_count))
3503 * Signal the service that it may have
3504 * dropped below its quota
3506 complete("a->quota_event);
3511 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3512 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3513 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3514 service->sync = value;
3519 case VCHIQ_SERVICE_OPTION_TRACE:
3520 service->trace = value;
3527 unlock_service(service);
3533 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3534 struct vchiq_shared_state *shared, const char *label)
3536 static const char *const debug_names[] = {
3538 "SLOT_HANDLER_COUNT",
3539 "SLOT_HANDLER_LINE",
3543 "AWAIT_COMPLETION_LINE",
3544 "DEQUEUE_MESSAGE_LINE",
3545 "SERVICE_CALLBACK_LINE",
3546 "MSG_QUEUE_FULL_COUNT",
3547 "COMPLETION_QUEUE_FULL_COUNT"
3554 len = scnprintf(buf, sizeof(buf),
3555 " %s: slots %d-%d tx_pos=%x recycle=%x",
3556 label, shared->slot_first, shared->slot_last,
3557 shared->tx_pos, shared->slot_queue_recycle);
3558 err = vchiq_dump(dump_context, buf, len + 1);
3562 len = scnprintf(buf, sizeof(buf),
3564 err = vchiq_dump(dump_context, buf, len + 1);
3568 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3569 struct vchiq_slot_info slot_info =
3570 *SLOT_INFO_FROM_INDEX(state, i);
3571 if (slot_info.use_count != slot_info.release_count) {
3572 len = scnprintf(buf, sizeof(buf),
3573 " %d: %d/%d", i, slot_info.use_count,
3574 slot_info.release_count);
3575 err = vchiq_dump(dump_context, buf, len + 1);
3581 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3582 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3583 debug_names[i], shared->debug[i], shared->debug[i]);
3584 err = vchiq_dump(dump_context, buf, len + 1);
3591 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3598 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3599 conn_state_names[state->conn_state]);
3600 err = vchiq_dump(dump_context, buf, len + 1);
3604 len = scnprintf(buf, sizeof(buf),
3605 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3606 state->local->tx_pos,
3607 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3609 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3610 err = vchiq_dump(dump_context, buf, len + 1);
3614 len = scnprintf(buf, sizeof(buf),
3615 " Version: %d (min %d)",
3616 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3617 err = vchiq_dump(dump_context, buf, len + 1);
3621 if (VCHIQ_ENABLE_STATS) {
3622 len = scnprintf(buf, sizeof(buf),
3623 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3624 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3625 state->stats.error_count);
3626 err = vchiq_dump(dump_context, buf, len + 1);
3631 len = scnprintf(buf, sizeof(buf),
3632 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3633 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3634 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3635 state->data_quota - state->data_use_count,
3636 state->local->slot_queue_recycle - state->slot_queue_available,
3637 state->stats.slot_stalls, state->stats.data_stalls);
3638 err = vchiq_dump(dump_context, buf, len + 1);
3642 err = vchiq_dump_platform_state(dump_context);
3646 err = vchiq_dump_shared_state(dump_context,
3652 err = vchiq_dump_shared_state(dump_context,
3659 err = vchiq_dump_platform_instances(dump_context);
3663 for (i = 0; i < state->unused_service; i++) {
3664 struct vchiq_service *service = find_service_by_port(state, i);
3667 err = vchiq_dump_service_state(dump_context, service);
3668 unlock_service(service);
3676 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3681 unsigned int ref_count;
3683 /*Don't include the lock just taken*/
3684 ref_count = kref_read(&service->ref_count) - 1;
3685 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3686 service->localport, srvstate_names[service->srvstate],
3689 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3690 char remoteport[30];
3691 struct vchiq_service_quota *quota =
3692 &service->state->service_quotas[service->localport];
3693 int fourcc = service->base.fourcc;
3694 int tx_pending, rx_pending;
3696 if (service->remoteport != VCHIQ_PORT_FREE) {
3697 int len2 = scnprintf(remoteport, sizeof(remoteport),
3698 "%u", service->remoteport);
3700 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3701 scnprintf(remoteport + len2,
3702 sizeof(remoteport) - len2,
3703 " (client %x)", service->client_id);
3705 strcpy(remoteport, "n/a");
3708 len += scnprintf(buf + len, sizeof(buf) - len,
3709 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3710 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3712 quota->message_use_count,
3713 quota->message_quota,
3714 quota->slot_use_count,
3717 err = vchiq_dump(dump_context, buf, len + 1);
3721 tx_pending = service->bulk_tx.local_insert -
3722 service->bulk_tx.remote_insert;
3724 rx_pending = service->bulk_rx.local_insert -
3725 service->bulk_rx.remote_insert;
3727 len = scnprintf(buf, sizeof(buf),
3728 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3730 tx_pending ? service->bulk_tx.bulks[
3731 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3733 rx_pending ? service->bulk_rx.bulks[
3734 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3736 if (VCHIQ_ENABLE_STATS) {
3737 err = vchiq_dump(dump_context, buf, len + 1);
3741 len = scnprintf(buf, sizeof(buf),
3742 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3743 service->stats.ctrl_tx_count,
3744 service->stats.ctrl_tx_bytes,
3745 service->stats.ctrl_rx_count,
3746 service->stats.ctrl_rx_bytes);
3747 err = vchiq_dump(dump_context, buf, len + 1);
3751 len = scnprintf(buf, sizeof(buf),
3752 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3753 service->stats.bulk_tx_count,
3754 service->stats.bulk_tx_bytes,
3755 service->stats.bulk_rx_count,
3756 service->stats.bulk_rx_bytes);
3757 err = vchiq_dump(dump_context, buf, len + 1);
3761 len = scnprintf(buf, sizeof(buf),
3762 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3763 service->stats.quota_stalls,
3764 service->stats.slot_stalls,
3765 service->stats.bulk_stalls,
3766 service->stats.bulk_aborted_count,
3767 service->stats.error_count);
3771 err = vchiq_dump(dump_context, buf, len + 1);
3775 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3776 err = vchiq_dump_platform_service_state(dump_context, service);
3781 vchiq_loud_error_header(void)
3783 vchiq_log_error(vchiq_core_log_level,
3784 "============================================================================");
3785 vchiq_log_error(vchiq_core_log_level,
3786 "============================================================================");
3787 vchiq_log_error(vchiq_core_log_level, "=====");
3791 vchiq_loud_error_footer(void)
3793 vchiq_log_error(vchiq_core_log_level, "=====");
3794 vchiq_log_error(vchiq_core_log_level,
3795 "============================================================================");
3796 vchiq_log_error(vchiq_core_log_level,
3797 "============================================================================");
3800 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3802 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3805 return queue_message(state, NULL,
3806 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3810 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3812 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3815 return queue_message(state, NULL,
3816 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3820 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3823 const u8 *mem = void_mem;
3828 while (num_bytes > 0) {
3831 for (offset = 0; offset < 16; offset++) {
3832 if (offset < num_bytes)
3833 s += scnprintf(s, 4, "%02x ", mem[offset]);
3835 s += scnprintf(s, 4, " ");
3838 for (offset = 0; offset < 16; offset++) {
3839 if (offset < num_bytes) {
3840 u8 ch = mem[offset];
3842 if ((ch < ' ') || (ch > '~'))
3849 if (label && (*label != '\0'))
3850 vchiq_log_trace(VCHIQ_LOG_TRACE,
3851 "%s: %08x: %s", label, addr, line_buf);
3853 vchiq_log_trace(VCHIQ_LOG_TRACE,
3854 "%08x: %s", addr, line_buf);