1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define HANDLE_STATE_SHIFT 12
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 ((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
32 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
34 #define SRVTRACE_LEVEL(srv) \
35 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
36 #define SRVTRACE_ENABLED(srv, lev) \
37 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
39 struct vchiq_open_payload {
46 struct vchiq_openack_payload {
51 QMFLAGS_IS_BLOCKING = BIT(0),
52 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
53 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
56 /* we require this for consistency between endpoints */
57 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
58 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
59 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
60 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
61 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
62 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
64 /* Run time control of log level, based on KERN_XXX level. */
65 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
66 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
67 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
69 DEFINE_SPINLOCK(bulk_waiter_spinlock);
70 static DEFINE_SPINLOCK(quota_spinlock);
72 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
73 static unsigned int handle_seq;
75 static const char *const srvstate_names[] = {
88 static const char *const reason_names[] = {
94 "BULK_TRANSMIT_ABORTED",
95 "BULK_RECEIVE_ABORTED"
98 static const char *const conn_state_names[] = {
111 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
113 static const char *msg_type_str(unsigned int msg_type)
116 case VCHIQ_MSG_PADDING: return "PADDING";
117 case VCHIQ_MSG_CONNECT: return "CONNECT";
118 case VCHIQ_MSG_OPEN: return "OPEN";
119 case VCHIQ_MSG_OPENACK: return "OPENACK";
120 case VCHIQ_MSG_CLOSE: return "CLOSE";
121 case VCHIQ_MSG_DATA: return "DATA";
122 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
123 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
124 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
125 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
126 case VCHIQ_MSG_PAUSE: return "PAUSE";
127 case VCHIQ_MSG_RESUME: return "RESUME";
128 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
129 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
130 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
136 vchiq_set_service_state(struct vchiq_service *service, int newstate)
138 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
139 service->state->id, service->localport,
140 srvstate_names[service->srvstate],
141 srvstate_names[newstate]);
142 service->srvstate = newstate;
145 struct vchiq_service *
146 find_service_by_handle(unsigned int handle)
148 struct vchiq_service *service;
151 service = handle_to_service(handle);
152 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
153 service->handle == handle &&
154 kref_get_unless_zero(&service->ref_count)) {
155 service = rcu_pointer_handoff(service);
160 vchiq_log_info(vchiq_core_log_level,
161 "Invalid service handle 0x%x", handle);
165 struct vchiq_service *
166 find_service_by_port(struct vchiq_state *state, int localport)
169 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
170 struct vchiq_service *service;
173 service = rcu_dereference(state->services[localport]);
174 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
175 kref_get_unless_zero(&service->ref_count)) {
176 service = rcu_pointer_handoff(service);
182 vchiq_log_info(vchiq_core_log_level,
183 "Invalid port %d", localport);
187 struct vchiq_service *
188 find_service_for_instance(struct vchiq_instance *instance,
191 struct vchiq_service *service;
194 service = handle_to_service(handle);
195 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
196 service->handle == handle &&
197 service->instance == instance &&
198 kref_get_unless_zero(&service->ref_count)) {
199 service = rcu_pointer_handoff(service);
204 vchiq_log_info(vchiq_core_log_level,
205 "Invalid service handle 0x%x", handle);
209 struct vchiq_service *
210 find_closed_service_for_instance(struct vchiq_instance *instance,
213 struct vchiq_service *service;
216 service = handle_to_service(handle);
218 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
219 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
220 service->handle == handle &&
221 service->instance == instance &&
222 kref_get_unless_zero(&service->ref_count)) {
223 service = rcu_pointer_handoff(service);
228 vchiq_log_info(vchiq_core_log_level,
229 "Invalid service handle 0x%x", handle);
233 struct vchiq_service *
234 __next_service_by_instance(struct vchiq_state *state,
235 struct vchiq_instance *instance,
238 struct vchiq_service *service = NULL;
241 while (idx < state->unused_service) {
242 struct vchiq_service *srv;
244 srv = rcu_dereference(state->services[idx++]);
245 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
246 srv->instance == instance) {
256 struct vchiq_service *
257 next_service_by_instance(struct vchiq_state *state,
258 struct vchiq_instance *instance,
261 struct vchiq_service *service;
265 service = __next_service_by_instance(state, instance, pidx);
268 if (kref_get_unless_zero(&service->ref_count)) {
269 service = rcu_pointer_handoff(service);
278 lock_service(struct vchiq_service *service)
281 WARN(1, "%s service is NULL\n", __func__);
284 kref_get(&service->ref_count);
287 static void service_release(struct kref *kref)
289 struct vchiq_service *service =
290 container_of(kref, struct vchiq_service, ref_count);
291 struct vchiq_state *state = service->state;
293 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
294 rcu_assign_pointer(state->services[service->localport], NULL);
295 if (service->userdata_term)
296 service->userdata_term(service->base.userdata);
297 kfree_rcu(service, rcu);
301 unlock_service(struct vchiq_service *service)
304 WARN(1, "%s: service is NULL\n", __func__);
307 kref_put(&service->ref_count, service_release);
311 vchiq_get_client_id(unsigned int handle)
313 struct vchiq_service *service;
317 service = handle_to_service(handle);
318 id = service ? service->client_id : 0;
324 vchiq_get_service_userdata(unsigned int handle)
327 struct vchiq_service *service;
330 service = handle_to_service(handle);
331 userdata = service ? service->base.userdata : NULL;
335 EXPORT_SYMBOL(vchiq_get_service_userdata);
338 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
340 struct vchiq_state *state = service->state;
341 struct vchiq_service_quota *service_quota;
343 service->closing = 1;
345 /* Synchronise with other threads. */
346 mutex_lock(&state->recycle_mutex);
347 mutex_unlock(&state->recycle_mutex);
348 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
349 /* If we're pausing then the slot_mutex is held until resume
350 * by the slot handler. Therefore don't try to acquire this
351 * mutex if we're the slot handler and in the pause sent state.
352 * We don't need to in this case anyway. */
353 mutex_lock(&state->slot_mutex);
354 mutex_unlock(&state->slot_mutex);
357 /* Unblock any sending thread. */
358 service_quota = &state->service_quotas[service->localport];
359 complete(&service_quota->quota_event);
363 mark_service_closing(struct vchiq_service *service)
365 mark_service_closing_internal(service, 0);
368 static inline enum vchiq_status
369 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
370 struct vchiq_header *header, void *bulk_userdata)
372 enum vchiq_status status;
374 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
375 service->state->id, service->localport, reason_names[reason],
376 header, bulk_userdata);
377 status = service->base.callback(reason, header, service->handle,
379 if (status == VCHIQ_ERROR) {
380 vchiq_log_warning(vchiq_core_log_level,
381 "%d: ignoring ERROR from callback to service %x",
382 service->state->id, service->handle);
383 status = VCHIQ_SUCCESS;
386 if (reason != VCHIQ_MESSAGE_AVAILABLE)
387 vchiq_release_message(service->handle, header);
393 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
395 enum vchiq_connstate oldstate = state->conn_state;
397 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
398 conn_state_names[oldstate],
399 conn_state_names[newstate]);
400 state->conn_state = newstate;
401 vchiq_platform_conn_state_changed(state, oldstate, newstate);
405 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
408 /* Don't clear the 'fired' flag because it may already have been set
409 ** by the other side. */
410 init_waitqueue_head(wq);
414 * All the event waiting routines in VCHIQ used a custom semaphore
415 * implementation that filtered most signals. This achieved a behaviour similar
416 * to the "killable" family of functions. While cleaning up this code all the
417 * routines where switched to the "interruptible" family of functions, as the
418 * former was deemed unjustified and the use "killable" set all VCHIQ's
419 * threads in D state.
422 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
427 if (wait_event_interruptible(*wq, event->fired)) {
440 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
448 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
450 if (event->fired && event->armed)
451 remote_event_signal_local(wq, event);
455 remote_event_pollall(struct vchiq_state *state)
457 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
458 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
459 remote_event_poll(&state->trigger_event, &state->local->trigger);
460 remote_event_poll(&state->recycle_event, &state->local->recycle);
463 /* Round up message sizes so that any space at the end of a slot is always big
464 ** enough for a header. This relies on header size being a power of two, which
465 ** has been verified earlier by a static assertion. */
468 calc_stride(size_t size)
470 /* Allow room for the header */
471 size += sizeof(struct vchiq_header);
474 return (size + sizeof(struct vchiq_header) - 1) &
475 ~(sizeof(struct vchiq_header) - 1);
478 /* Called by the slot handler thread */
479 static struct vchiq_service *
480 get_listening_service(struct vchiq_state *state, int fourcc)
484 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
487 for (i = 0; i < state->unused_service; i++) {
488 struct vchiq_service *service;
490 service = rcu_dereference(state->services[i]);
492 service->public_fourcc == fourcc &&
493 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
494 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
495 service->remoteport == VCHIQ_PORT_FREE)) &&
496 kref_get_unless_zero(&service->ref_count)) {
497 service = rcu_pointer_handoff(service);
506 /* Called by the slot handler thread */
507 static struct vchiq_service *
508 get_connected_service(struct vchiq_state *state, unsigned int port)
513 for (i = 0; i < state->unused_service; i++) {
514 struct vchiq_service *service =
515 rcu_dereference(state->services[i]);
517 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
518 service->remoteport == port &&
519 kref_get_unless_zero(&service->ref_count)) {
520 service = rcu_pointer_handoff(service);
530 request_poll(struct vchiq_state *state, struct vchiq_service *service,
537 value = atomic_read(&service->poll_flags);
538 } while (atomic_cmpxchg(&service->poll_flags, value,
539 value | BIT(poll_type)) != value);
542 value = atomic_read(&state->poll_services[
543 service->localport>>5]);
544 } while (atomic_cmpxchg(
545 &state->poll_services[service->localport>>5],
546 value, value | BIT(service->localport & 0x1f))
550 state->poll_needed = 1;
553 /* ... and ensure the slot handler runs. */
554 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
557 /* Called from queue_message, by the slot handler and application threads,
558 ** with slot_mutex held */
559 static struct vchiq_header *
560 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
562 struct vchiq_shared_state *local = state->local;
563 int tx_pos = state->local_tx_pos;
564 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
566 if (space > slot_space) {
567 struct vchiq_header *header;
568 /* Fill the remaining space with padding */
569 WARN_ON(!state->tx_data);
570 header = (struct vchiq_header *)
571 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
572 header->msgid = VCHIQ_MSGID_PADDING;
573 header->size = slot_space - sizeof(struct vchiq_header);
575 tx_pos += slot_space;
578 /* If necessary, get the next slot. */
579 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
582 /* If there is no free slot... */
584 if (!try_wait_for_completion(&state->slot_available_event)) {
585 /* ...wait for one. */
587 VCHIQ_STATS_INC(state, slot_stalls);
589 /* But first, flush through the last slot. */
590 state->local_tx_pos = tx_pos;
591 local->tx_pos = tx_pos;
592 remote_event_signal(&state->remote->trigger);
595 (wait_for_completion_interruptible(
596 &state->slot_available_event)))
597 return NULL; /* No space available */
600 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
601 complete(&state->slot_available_event);
602 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
606 slot_index = local->slot_queue[
607 SLOT_QUEUE_INDEX_FROM_POS(tx_pos) &
608 VCHIQ_SLOT_QUEUE_MASK];
610 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
613 state->local_tx_pos = tx_pos + space;
615 return (struct vchiq_header *)(state->tx_data +
616 (tx_pos & VCHIQ_SLOT_MASK));
619 /* Called by the recycle thread. */
621 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
624 struct vchiq_shared_state *local = state->local;
625 int slot_queue_available;
627 /* Find slots which have been freed by the other side, and return them
628 ** to the available queue. */
629 slot_queue_available = state->slot_queue_available;
632 * Use a memory barrier to ensure that any state that may have been
633 * modified by another thread is not masked by stale prefetched
638 while (slot_queue_available != local->slot_queue_recycle) {
640 int slot_index = local->slot_queue[slot_queue_available++ &
641 VCHIQ_SLOT_QUEUE_MASK];
642 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
646 * Beware of the address dependency - data is calculated
647 * using an index written by the other side.
651 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
652 state->id, slot_index, data,
653 local->slot_queue_recycle, slot_queue_available);
655 /* Initialise the bitmask for services which have used this
657 memset(service_found, 0, length);
661 while (pos < VCHIQ_SLOT_SIZE) {
662 struct vchiq_header *header =
663 (struct vchiq_header *)(data + pos);
664 int msgid = header->msgid;
666 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
667 int port = VCHIQ_MSG_SRCPORT(msgid);
668 struct vchiq_service_quota *service_quota =
669 &state->service_quotas[port];
672 spin_lock("a_spinlock);
673 count = service_quota->message_use_count;
675 service_quota->message_use_count =
677 spin_unlock("a_spinlock);
679 if (count == service_quota->message_quota)
680 /* Signal the service that it
681 ** has dropped below its quota
683 complete(&service_quota->quota_event);
684 else if (count == 0) {
685 vchiq_log_error(vchiq_core_log_level,
686 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
688 service_quota->message_use_count,
689 header, msgid, header->msgid,
691 WARN(1, "invalid message use count\n");
693 if (!BITSET_IS_SET(service_found, port)) {
694 /* Set the found bit for this service */
695 BITSET_SET(service_found, port);
697 spin_lock("a_spinlock);
698 count = service_quota->slot_use_count;
700 service_quota->slot_use_count =
702 spin_unlock("a_spinlock);
705 /* Signal the service in case
706 ** it has dropped below its
708 complete(&service_quota->quota_event);
710 vchiq_core_log_level,
711 "%d: pfq:%d %x@%pK - slot_use->%d",
713 header->size, header,
717 vchiq_core_log_level,
718 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
720 msgid, header->msgid,
722 WARN(1, "bad slot use count\n");
729 pos += calc_stride(header->size);
730 if (pos > VCHIQ_SLOT_SIZE) {
731 vchiq_log_error(vchiq_core_log_level,
732 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
733 pos, header, msgid, header->msgid,
735 WARN(1, "invalid slot position\n");
742 spin_lock("a_spinlock);
743 count = state->data_use_count;
745 state->data_use_count =
747 spin_unlock("a_spinlock);
748 if (count == state->data_quota)
749 complete(&state->data_quota_event);
753 * Don't allow the slot to be reused until we are no
754 * longer interested in it.
758 state->slot_queue_available = slot_queue_available;
759 complete(&state->slot_available_event);
764 memcpy_copy_callback(
765 void *context, void *dest,
766 size_t offset, size_t maxsize)
768 memcpy(dest + offset, context + offset, maxsize);
774 ssize_t (*copy_callback)(void *context, void *dest,
775 size_t offset, size_t maxsize),
783 ssize_t callback_result;
784 size_t max_bytes = size - pos;
787 copy_callback(context, dest + pos,
790 if (callback_result < 0)
791 return callback_result;
793 if (!callback_result)
796 if (callback_result > max_bytes)
799 pos += callback_result;
805 /* Called by the slot handler and application threads */
806 static enum vchiq_status
807 queue_message(struct vchiq_state *state, struct vchiq_service *service,
809 ssize_t (*copy_callback)(void *context, void *dest,
810 size_t offset, size_t maxsize),
811 void *context, size_t size, int flags)
813 struct vchiq_shared_state *local;
814 struct vchiq_service_quota *service_quota = NULL;
815 struct vchiq_header *header;
816 int type = VCHIQ_MSG_TYPE(msgid);
820 local = state->local;
822 stride = calc_stride(size);
824 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
826 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
827 mutex_lock_killable(&state->slot_mutex))
830 if (type == VCHIQ_MSG_DATA) {
834 WARN(1, "%s: service is NULL\n", __func__);
835 mutex_unlock(&state->slot_mutex);
839 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
840 QMFLAGS_NO_MUTEX_UNLOCK));
842 if (service->closing) {
843 /* The service has been closed */
844 mutex_unlock(&state->slot_mutex);
848 service_quota = &state->service_quotas[service->localport];
850 spin_lock("a_spinlock);
852 /* Ensure this service doesn't use more than its quota of
853 ** messages or slots */
854 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
855 state->local_tx_pos + stride - 1);
857 /* Ensure data messages don't use more than their quota of
859 while ((tx_end_index != state->previous_data_index) &&
860 (state->data_use_count == state->data_quota)) {
861 VCHIQ_STATS_INC(state, data_stalls);
862 spin_unlock("a_spinlock);
863 mutex_unlock(&state->slot_mutex);
865 if (wait_for_completion_interruptible(
866 &state->data_quota_event))
869 mutex_lock(&state->slot_mutex);
870 spin_lock("a_spinlock);
871 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
872 state->local_tx_pos + stride - 1);
873 if ((tx_end_index == state->previous_data_index) ||
874 (state->data_use_count < state->data_quota)) {
875 /* Pass the signal on to other waiters */
876 complete(&state->data_quota_event);
881 while ((service_quota->message_use_count ==
882 service_quota->message_quota) ||
883 ((tx_end_index != service_quota->previous_tx_index) &&
884 (service_quota->slot_use_count ==
885 service_quota->slot_quota))) {
886 spin_unlock("a_spinlock);
887 vchiq_log_trace(vchiq_core_log_level,
888 "%d: qm:%d %s,%zx - quota stall "
890 state->id, service->localport,
891 msg_type_str(type), size,
892 service_quota->message_use_count,
893 service_quota->slot_use_count);
894 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
895 mutex_unlock(&state->slot_mutex);
896 if (wait_for_completion_interruptible(
897 &service_quota->quota_event))
899 if (service->closing)
901 if (mutex_lock_killable(&state->slot_mutex))
903 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
904 /* The service has been closed */
905 mutex_unlock(&state->slot_mutex);
908 spin_lock("a_spinlock);
909 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
910 state->local_tx_pos + stride - 1);
913 spin_unlock("a_spinlock);
916 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
920 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
921 /* In the event of a failure, return the mutex to the
923 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
924 mutex_unlock(&state->slot_mutex);
928 if (type == VCHIQ_MSG_DATA) {
929 ssize_t callback_result;
933 vchiq_log_info(vchiq_core_log_level,
934 "%d: qm %s@%pK,%zx (%d->%d)",
935 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
936 header, size, VCHIQ_MSG_SRCPORT(msgid),
937 VCHIQ_MSG_DSTPORT(msgid));
939 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
940 QMFLAGS_NO_MUTEX_UNLOCK));
943 copy_message_data(copy_callback, context,
946 if (callback_result < 0) {
947 mutex_unlock(&state->slot_mutex);
948 VCHIQ_SERVICE_STATS_INC(service,
953 if (SRVTRACE_ENABLED(service,
955 vchiq_log_dump_mem("Sent", 0,
958 (size_t)callback_result));
960 spin_lock("a_spinlock);
961 service_quota->message_use_count++;
964 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
966 /* If this transmission can't fit in the last slot used by any
967 ** service, the data_use_count must be increased. */
968 if (tx_end_index != state->previous_data_index) {
969 state->previous_data_index = tx_end_index;
970 state->data_use_count++;
973 /* If this isn't the same slot last used by this service,
974 ** the service's slot_use_count must be increased. */
975 if (tx_end_index != service_quota->previous_tx_index) {
976 service_quota->previous_tx_index = tx_end_index;
977 slot_use_count = ++service_quota->slot_use_count;
982 spin_unlock("a_spinlock);
985 vchiq_log_trace(vchiq_core_log_level,
986 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
987 state->id, service->localport,
988 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
989 slot_use_count, header);
991 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
992 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
994 vchiq_log_info(vchiq_core_log_level,
995 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
996 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
997 header, size, VCHIQ_MSG_SRCPORT(msgid),
998 VCHIQ_MSG_DSTPORT(msgid));
1000 /* It is assumed for now that this code path
1001 * only happens from calls inside this file.
1003 * External callers are through the vchiq_queue_message
1004 * path which always sets the type to be VCHIQ_MSG_DATA
1006 * At first glance this appears to be correct but
1007 * more review is needed.
1009 copy_message_data(copy_callback, context,
1010 header->data, size);
1012 VCHIQ_STATS_INC(state, ctrl_tx_count);
1015 header->msgid = msgid;
1016 header->size = size;
1021 svc_fourcc = service
1022 ? service->base.fourcc
1023 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1025 vchiq_log_info(SRVTRACE_LEVEL(service),
1026 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1027 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1028 VCHIQ_MSG_TYPE(msgid),
1029 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1030 VCHIQ_MSG_SRCPORT(msgid),
1031 VCHIQ_MSG_DSTPORT(msgid),
1035 /* Make sure the new header is visible to the peer. */
1038 /* Make the new tx_pos visible to the peer. */
1039 local->tx_pos = state->local_tx_pos;
1042 if (service && (type == VCHIQ_MSG_CLOSE))
1043 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1045 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1046 mutex_unlock(&state->slot_mutex);
1048 remote_event_signal(&state->remote->trigger);
1050 return VCHIQ_SUCCESS;
1053 /* Called by the slot handler and application threads */
1054 static enum vchiq_status
1055 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1057 ssize_t (*copy_callback)(void *context, void *dest,
1058 size_t offset, size_t maxsize),
1059 void *context, int size, int is_blocking)
1061 struct vchiq_shared_state *local;
1062 struct vchiq_header *header;
1063 ssize_t callback_result;
1065 local = state->local;
1067 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1068 mutex_lock_killable(&state->sync_mutex))
1071 remote_event_wait(&state->sync_release_event, &local->sync_release);
1075 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1079 int oldmsgid = header->msgid;
1081 if (oldmsgid != VCHIQ_MSGID_PADDING)
1082 vchiq_log_error(vchiq_core_log_level,
1083 "%d: qms - msgid %x, not PADDING",
1084 state->id, oldmsgid);
1087 vchiq_log_info(vchiq_sync_log_level,
1088 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1089 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1090 header, size, VCHIQ_MSG_SRCPORT(msgid),
1091 VCHIQ_MSG_DSTPORT(msgid));
1094 copy_message_data(copy_callback, context,
1095 header->data, size);
1097 if (callback_result < 0) {
1098 mutex_unlock(&state->slot_mutex);
1099 VCHIQ_SERVICE_STATS_INC(service,
1105 if (SRVTRACE_ENABLED(service,
1107 vchiq_log_dump_mem("Sent", 0,
1110 (size_t)callback_result));
1112 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1113 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1115 VCHIQ_STATS_INC(state, ctrl_tx_count);
1118 header->size = size;
1119 header->msgid = msgid;
1121 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1124 svc_fourcc = service
1125 ? service->base.fourcc
1126 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1128 vchiq_log_trace(vchiq_sync_log_level,
1129 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1130 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1131 VCHIQ_MSG_TYPE(msgid),
1132 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1133 VCHIQ_MSG_SRCPORT(msgid),
1134 VCHIQ_MSG_DSTPORT(msgid),
1138 remote_event_signal(&state->remote->sync_trigger);
1140 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1141 mutex_unlock(&state->sync_mutex);
1143 return VCHIQ_SUCCESS;
1147 claim_slot(struct vchiq_slot_info *slot)
1153 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1154 struct vchiq_header *header, struct vchiq_service *service)
1158 mutex_lock(&state->recycle_mutex);
1161 int msgid = header->msgid;
1163 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1164 (service && service->closing)) {
1165 mutex_unlock(&state->recycle_mutex);
1169 /* Rewrite the message header to prevent a double
1171 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1174 release_count = slot_info->release_count;
1175 slot_info->release_count = ++release_count;
1177 if (release_count == slot_info->use_count) {
1178 int slot_queue_recycle;
1179 /* Add to the freed queue */
1181 /* A read barrier is necessary here to prevent speculative
1182 ** fetches of remote->slot_queue_recycle from overtaking the
1186 slot_queue_recycle = state->remote->slot_queue_recycle;
1187 state->remote->slot_queue[slot_queue_recycle &
1188 VCHIQ_SLOT_QUEUE_MASK] =
1189 SLOT_INDEX_FROM_INFO(state, slot_info);
1190 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1191 vchiq_log_info(vchiq_core_log_level,
1192 "%d: %s %d - recycle->%x", state->id, __func__,
1193 SLOT_INDEX_FROM_INFO(state, slot_info),
1194 state->remote->slot_queue_recycle);
1196 /* A write barrier is necessary, but remote_event_signal
1198 remote_event_signal(&state->remote->recycle);
1201 mutex_unlock(&state->recycle_mutex);
1204 /* Called by the slot handler - don't hold the bulk mutex */
1205 static enum vchiq_status
1206 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1209 enum vchiq_status status = VCHIQ_SUCCESS;
1211 vchiq_log_trace(vchiq_core_log_level,
1212 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1213 service->state->id, service->localport,
1214 (queue == &service->bulk_tx) ? 't' : 'r',
1215 queue->process, queue->remote_notify, queue->remove);
1217 queue->remote_notify = queue->process;
1219 if (status == VCHIQ_SUCCESS) {
1220 while (queue->remove != queue->remote_notify) {
1221 struct vchiq_bulk *bulk =
1222 &queue->bulks[BULK_INDEX(queue->remove)];
1224 /* Only generate callbacks for non-dummy bulk
1225 ** requests, and non-terminated services */
1226 if (bulk->data && service->instance) {
1227 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1228 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1229 VCHIQ_SERVICE_STATS_INC(service,
1231 VCHIQ_SERVICE_STATS_ADD(service,
1235 VCHIQ_SERVICE_STATS_INC(service,
1237 VCHIQ_SERVICE_STATS_ADD(service,
1242 VCHIQ_SERVICE_STATS_INC(service,
1243 bulk_aborted_count);
1245 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1246 struct bulk_waiter *waiter;
1248 spin_lock(&bulk_waiter_spinlock);
1249 waiter = bulk->userdata;
1251 waiter->actual = bulk->actual;
1252 complete(&waiter->event);
1254 spin_unlock(&bulk_waiter_spinlock);
1255 } else if (bulk->mode ==
1256 VCHIQ_BULK_MODE_CALLBACK) {
1257 enum vchiq_reason reason = (bulk->dir ==
1258 VCHIQ_BULK_TRANSMIT) ?
1260 VCHIQ_BULK_ACTUAL_ABORTED) ?
1261 VCHIQ_BULK_TRANSMIT_ABORTED :
1262 VCHIQ_BULK_TRANSMIT_DONE) :
1264 VCHIQ_BULK_ACTUAL_ABORTED) ?
1265 VCHIQ_BULK_RECEIVE_ABORTED :
1266 VCHIQ_BULK_RECEIVE_DONE);
1267 status = make_service_callback(service,
1268 reason, NULL, bulk->userdata);
1269 if (status == VCHIQ_RETRY)
1275 complete(&service->bulk_remove_event);
1278 status = VCHIQ_SUCCESS;
1281 if (status == VCHIQ_RETRY)
1282 request_poll(service->state, service,
1283 (queue == &service->bulk_tx) ?
1284 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1289 /* Called by the slot handler thread */
1291 poll_services(struct vchiq_state *state)
1295 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1298 flags = atomic_xchg(&state->poll_services[group], 0);
1299 for (i = 0; flags; i++) {
1300 if (flags & BIT(i)) {
1301 struct vchiq_service *service =
1302 find_service_by_port(state,
1310 atomic_xchg(&service->poll_flags, 0);
1312 BIT(VCHIQ_POLL_REMOVE)) {
1313 vchiq_log_info(vchiq_core_log_level,
1314 "%d: ps - remove %d<->%d",
1315 state->id, service->localport,
1316 service->remoteport);
1318 /* Make it look like a client, because
1319 it must be removed and not left in
1320 the LISTENING state. */
1321 service->public_fourcc =
1322 VCHIQ_FOURCC_INVALID;
1324 if (vchiq_close_service_internal(
1325 service, 0/*!close_recvd*/) !=
1327 request_poll(state, service,
1329 } else if (service_flags &
1330 BIT(VCHIQ_POLL_TERMINATE)) {
1331 vchiq_log_info(vchiq_core_log_level,
1332 "%d: ps - terminate %d<->%d",
1333 state->id, service->localport,
1334 service->remoteport);
1335 if (vchiq_close_service_internal(
1336 service, 0/*!close_recvd*/) !=
1338 request_poll(state, service,
1339 VCHIQ_POLL_TERMINATE);
1341 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1342 notify_bulks(service,
1345 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1346 notify_bulks(service,
1349 unlock_service(service);
1355 /* Called with the bulk_mutex held */
1357 abort_outstanding_bulks(struct vchiq_service *service,
1358 struct vchiq_bulk_queue *queue)
1360 int is_tx = (queue == &service->bulk_tx);
1362 vchiq_log_trace(vchiq_core_log_level,
1363 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1364 service->state->id, service->localport, is_tx ? 't' : 'r',
1365 queue->local_insert, queue->remote_insert, queue->process);
1367 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1368 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1370 while ((queue->process != queue->local_insert) ||
1371 (queue->process != queue->remote_insert)) {
1372 struct vchiq_bulk *bulk =
1373 &queue->bulks[BULK_INDEX(queue->process)];
1375 if (queue->process == queue->remote_insert) {
1376 /* fabricate a matching dummy bulk */
1377 bulk->remote_data = NULL;
1378 bulk->remote_size = 0;
1379 queue->remote_insert++;
1382 if (queue->process != queue->local_insert) {
1383 vchiq_complete_bulk(bulk);
1385 vchiq_log_info(SRVTRACE_LEVEL(service),
1386 "%s %c%c%c%c d:%d ABORTED - tx len:%d, "
1388 is_tx ? "Send Bulk to" : "Recv Bulk from",
1389 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1390 service->remoteport,
1394 /* fabricate a matching dummy bulk */
1397 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1398 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1400 queue->local_insert++;
1408 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1410 struct vchiq_service *service = NULL;
1412 unsigned int localport, remoteport;
1414 msgid = header->msgid;
1415 size = header->size;
1416 localport = VCHIQ_MSG_DSTPORT(msgid);
1417 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1418 if (size >= sizeof(struct vchiq_open_payload)) {
1419 const struct vchiq_open_payload *payload =
1420 (struct vchiq_open_payload *)header->data;
1421 unsigned int fourcc;
1423 fourcc = payload->fourcc;
1424 vchiq_log_info(vchiq_core_log_level,
1425 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1426 state->id, header, localport,
1427 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1429 service = get_listening_service(state, fourcc);
1432 /* A matching service exists */
1433 short version = payload->version;
1434 short version_min = payload->version_min;
1436 if ((service->version < version_min) ||
1437 (version < service->version_min)) {
1438 /* Version mismatch */
1439 vchiq_loud_error_header();
1440 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1441 "version mismatch - local (%d, min %d)"
1442 " vs. remote (%d, min %d)",
1443 state->id, service->localport,
1444 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1445 service->version, service->version_min,
1446 version, version_min);
1447 vchiq_loud_error_footer();
1448 unlock_service(service);
1452 service->peer_version = version;
1454 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1455 struct vchiq_openack_payload ack_payload = {
1459 if (state->version_common <
1460 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1463 /* Acknowledge the OPEN */
1464 if (service->sync) {
1465 if (queue_message_sync(
1472 memcpy_copy_callback,
1474 sizeof(ack_payload),
1476 goto bail_not_ready;
1478 if (queue_message(state,
1484 memcpy_copy_callback,
1486 sizeof(ack_payload),
1488 goto bail_not_ready;
1491 /* The service is now open */
1492 vchiq_set_service_state(service,
1493 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1494 : VCHIQ_SRVSTATE_OPEN);
1497 /* Success - the message has been dealt with */
1498 unlock_service(service);
1504 /* No available service, or an invalid request - send a CLOSE */
1505 if (queue_message(state, NULL,
1506 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1507 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1508 goto bail_not_ready;
1514 unlock_service(service);
1519 /* Called by the slot handler thread */
1521 parse_rx_slots(struct vchiq_state *state)
1523 struct vchiq_shared_state *remote = state->remote;
1524 struct vchiq_service *service = NULL;
1527 DEBUG_INITIALISE(state->local)
1529 tx_pos = remote->tx_pos;
1531 while (state->rx_pos != tx_pos) {
1532 struct vchiq_header *header;
1535 unsigned int localport, remoteport;
1537 DEBUG_TRACE(PARSE_LINE);
1538 if (!state->rx_data) {
1541 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1542 rx_index = remote->slot_queue[
1543 SLOT_QUEUE_INDEX_FROM_POS(state->rx_pos) &
1544 VCHIQ_SLOT_QUEUE_MASK];
1545 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1547 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1549 /* Initialise use_count to one, and increment
1550 ** release_count at the end of the slot to avoid
1551 ** releasing the slot prematurely. */
1552 state->rx_info->use_count = 1;
1553 state->rx_info->release_count = 0;
1556 header = (struct vchiq_header *)(state->rx_data +
1557 (state->rx_pos & VCHIQ_SLOT_MASK));
1558 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1559 msgid = header->msgid;
1560 DEBUG_VALUE(PARSE_MSGID, msgid);
1561 size = header->size;
1562 type = VCHIQ_MSG_TYPE(msgid);
1563 localport = VCHIQ_MSG_DSTPORT(msgid);
1564 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1566 if (type != VCHIQ_MSG_DATA)
1567 VCHIQ_STATS_INC(state, ctrl_rx_count);
1570 case VCHIQ_MSG_OPENACK:
1571 case VCHIQ_MSG_CLOSE:
1572 case VCHIQ_MSG_DATA:
1573 case VCHIQ_MSG_BULK_RX:
1574 case VCHIQ_MSG_BULK_TX:
1575 case VCHIQ_MSG_BULK_RX_DONE:
1576 case VCHIQ_MSG_BULK_TX_DONE:
1577 service = find_service_by_port(state, localport);
1579 ((service->remoteport != remoteport) &&
1580 (service->remoteport != VCHIQ_PORT_FREE))) &&
1582 (type == VCHIQ_MSG_CLOSE)) {
1583 /* This could be a CLOSE from a client which
1584 hadn't yet received the OPENACK - look for
1585 the connected service */
1587 unlock_service(service);
1588 service = get_connected_service(state,
1591 vchiq_log_warning(vchiq_core_log_level,
1592 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1593 state->id, msg_type_str(type),
1594 header, remoteport, localport,
1595 service->localport);
1599 vchiq_log_error(vchiq_core_log_level,
1600 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1601 state->id, msg_type_str(type),
1602 header, remoteport, localport,
1611 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1614 svc_fourcc = service
1615 ? service->base.fourcc
1616 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1617 vchiq_log_info(SRVTRACE_LEVEL(service),
1618 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d "
1620 msg_type_str(type), type,
1621 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1622 remoteport, localport, size);
1624 vchiq_log_dump_mem("Rcvd", 0, header->data,
1628 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1629 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1630 vchiq_log_error(vchiq_core_log_level,
1631 "header %pK (msgid %x) - size %x too big for slot",
1632 header, (unsigned int)msgid,
1633 (unsigned int)size);
1634 WARN(1, "oversized for slot\n");
1638 case VCHIQ_MSG_OPEN:
1639 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1640 if (!parse_open(state, header))
1641 goto bail_not_ready;
1643 case VCHIQ_MSG_OPENACK:
1644 if (size >= sizeof(struct vchiq_openack_payload)) {
1645 const struct vchiq_openack_payload *payload =
1646 (struct vchiq_openack_payload *)
1648 service->peer_version = payload->version;
1650 vchiq_log_info(vchiq_core_log_level,
1651 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1652 state->id, header, size, remoteport, localport,
1653 service->peer_version);
1654 if (service->srvstate ==
1655 VCHIQ_SRVSTATE_OPENING) {
1656 service->remoteport = remoteport;
1657 vchiq_set_service_state(service,
1658 VCHIQ_SRVSTATE_OPEN);
1659 complete(&service->remove_event);
1661 vchiq_log_error(vchiq_core_log_level,
1662 "OPENACK received in state %s",
1663 srvstate_names[service->srvstate]);
1665 case VCHIQ_MSG_CLOSE:
1666 WARN_ON(size != 0); /* There should be no data */
1668 vchiq_log_info(vchiq_core_log_level,
1669 "%d: prs CLOSE@%pK (%d->%d)",
1670 state->id, header, remoteport, localport);
1672 mark_service_closing_internal(service, 1);
1674 if (vchiq_close_service_internal(service,
1675 1/*close_recvd*/) == VCHIQ_RETRY)
1676 goto bail_not_ready;
1678 vchiq_log_info(vchiq_core_log_level,
1679 "Close Service %c%c%c%c s:%u d:%d",
1680 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1682 service->remoteport);
1684 case VCHIQ_MSG_DATA:
1685 vchiq_log_info(vchiq_core_log_level,
1686 "%d: prs DATA@%pK,%x (%d->%d)",
1687 state->id, header, size, remoteport, localport);
1689 if ((service->remoteport == remoteport)
1690 && (service->srvstate ==
1691 VCHIQ_SRVSTATE_OPEN)) {
1692 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1693 claim_slot(state->rx_info);
1694 DEBUG_TRACE(PARSE_LINE);
1695 if (make_service_callback(service,
1696 VCHIQ_MESSAGE_AVAILABLE, header,
1697 NULL) == VCHIQ_RETRY) {
1698 DEBUG_TRACE(PARSE_LINE);
1699 goto bail_not_ready;
1701 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1702 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1705 VCHIQ_STATS_INC(state, error_count);
1708 case VCHIQ_MSG_CONNECT:
1709 vchiq_log_info(vchiq_core_log_level,
1710 "%d: prs CONNECT@%pK", state->id, header);
1711 state->version_common = ((struct vchiq_slot_zero *)
1712 state->slot_data)->version;
1713 complete(&state->connect);
1715 case VCHIQ_MSG_BULK_RX:
1716 case VCHIQ_MSG_BULK_TX:
1718 * We should never receive a bulk request from the
1719 * other side since we're not setup to perform as the
1724 case VCHIQ_MSG_BULK_RX_DONE:
1725 case VCHIQ_MSG_BULK_TX_DONE:
1726 if ((service->remoteport == remoteport)
1727 && (service->srvstate !=
1728 VCHIQ_SRVSTATE_FREE)) {
1729 struct vchiq_bulk_queue *queue;
1730 struct vchiq_bulk *bulk;
1732 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1733 &service->bulk_rx : &service->bulk_tx;
1735 DEBUG_TRACE(PARSE_LINE);
1736 if (mutex_lock_killable(&service->bulk_mutex)) {
1737 DEBUG_TRACE(PARSE_LINE);
1738 goto bail_not_ready;
1740 if ((int)(queue->remote_insert -
1741 queue->local_insert) >= 0) {
1742 vchiq_log_error(vchiq_core_log_level,
1743 "%d: prs %s@%pK (%d->%d) "
1744 "unexpected (ri=%d,li=%d)",
1745 state->id, msg_type_str(type),
1746 header, remoteport, localport,
1747 queue->remote_insert,
1748 queue->local_insert);
1749 mutex_unlock(&service->bulk_mutex);
1752 if (queue->process != queue->remote_insert) {
1753 pr_err("%s: p %x != ri %x\n",
1756 queue->remote_insert);
1757 mutex_unlock(&service->bulk_mutex);
1758 goto bail_not_ready;
1761 bulk = &queue->bulks[
1762 BULK_INDEX(queue->remote_insert)];
1763 bulk->actual = *(int *)header->data;
1764 queue->remote_insert++;
1766 vchiq_log_info(vchiq_core_log_level,
1767 "%d: prs %s@%pK (%d->%d) %x@%pK",
1768 state->id, msg_type_str(type),
1769 header, remoteport, localport,
1770 bulk->actual, bulk->data);
1772 vchiq_log_trace(vchiq_core_log_level,
1773 "%d: prs:%d %cx li=%x ri=%x p=%x",
1774 state->id, localport,
1775 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1777 queue->local_insert,
1778 queue->remote_insert, queue->process);
1780 DEBUG_TRACE(PARSE_LINE);
1781 WARN_ON(queue->process == queue->local_insert);
1782 vchiq_complete_bulk(bulk);
1784 mutex_unlock(&service->bulk_mutex);
1785 DEBUG_TRACE(PARSE_LINE);
1786 notify_bulks(service, queue, 1/*retry_poll*/);
1787 DEBUG_TRACE(PARSE_LINE);
1790 case VCHIQ_MSG_PADDING:
1791 vchiq_log_trace(vchiq_core_log_level,
1792 "%d: prs PADDING@%pK,%x",
1793 state->id, header, size);
1795 case VCHIQ_MSG_PAUSE:
1796 /* If initiated, signal the application thread */
1797 vchiq_log_trace(vchiq_core_log_level,
1798 "%d: prs PAUSE@%pK,%x",
1799 state->id, header, size);
1800 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1801 vchiq_log_error(vchiq_core_log_level,
1802 "%d: PAUSE received in state PAUSED",
1806 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1807 /* Send a PAUSE in response */
1808 if (queue_message(state, NULL,
1809 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1810 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1812 goto bail_not_ready;
1814 /* At this point slot_mutex is held */
1815 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1817 case VCHIQ_MSG_RESUME:
1818 vchiq_log_trace(vchiq_core_log_level,
1819 "%d: prs RESUME@%pK,%x",
1820 state->id, header, size);
1821 /* Release the slot mutex */
1822 mutex_unlock(&state->slot_mutex);
1823 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1826 case VCHIQ_MSG_REMOTE_USE:
1827 vchiq_on_remote_use(state);
1829 case VCHIQ_MSG_REMOTE_RELEASE:
1830 vchiq_on_remote_release(state);
1832 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1836 vchiq_log_error(vchiq_core_log_level,
1837 "%d: prs invalid msgid %x@%pK,%x",
1838 state->id, msgid, header, size);
1839 WARN(1, "invalid message\n");
1845 unlock_service(service);
1849 state->rx_pos += calc_stride(size);
1851 DEBUG_TRACE(PARSE_LINE);
1852 /* Perform some housekeeping when the end of the slot is
1854 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1855 /* Remove the extra reference count. */
1856 release_slot(state, state->rx_info, NULL, NULL);
1857 state->rx_data = NULL;
1863 unlock_service(service);
1866 /* Called by the slot handler thread */
1868 slot_handler_func(void *v)
1870 struct vchiq_state *state = v;
1871 struct vchiq_shared_state *local = state->local;
1873 DEBUG_INITIALISE(local)
1876 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1877 DEBUG_TRACE(SLOT_HANDLER_LINE);
1878 remote_event_wait(&state->trigger_event, &local->trigger);
1882 DEBUG_TRACE(SLOT_HANDLER_LINE);
1883 if (state->poll_needed) {
1885 state->poll_needed = 0;
1887 /* Handle service polling and other rare conditions here
1888 ** out of the mainline code */
1889 switch (state->conn_state) {
1890 case VCHIQ_CONNSTATE_CONNECTED:
1891 /* Poll the services as requested */
1892 poll_services(state);
1895 case VCHIQ_CONNSTATE_PAUSING:
1896 if (queue_message(state, NULL,
1897 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1899 QMFLAGS_NO_MUTEX_UNLOCK)
1901 vchiq_set_conn_state(state,
1902 VCHIQ_CONNSTATE_PAUSE_SENT);
1905 state->poll_needed = 1;
1909 case VCHIQ_CONNSTATE_RESUMING:
1910 if (queue_message(state, NULL,
1911 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1912 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1914 vchiq_set_conn_state(state,
1915 VCHIQ_CONNSTATE_CONNECTED);
1917 /* This should really be impossible,
1918 ** since the PAUSE should have flushed
1919 ** through outstanding messages. */
1920 vchiq_log_error(vchiq_core_log_level,
1921 "Failed to send RESUME "
1931 DEBUG_TRACE(SLOT_HANDLER_LINE);
1932 parse_rx_slots(state);
1937 /* Called by the recycle thread */
1939 recycle_func(void *v)
1941 struct vchiq_state *state = v;
1942 struct vchiq_shared_state *local = state->local;
1946 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1948 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1954 remote_event_wait(&state->recycle_event, &local->recycle);
1956 process_free_queue(state, found, length);
1961 /* Called by the sync thread */
1965 struct vchiq_state *state = v;
1966 struct vchiq_shared_state *local = state->local;
1967 struct vchiq_header *header =
1968 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1969 state->remote->slot_sync);
1972 struct vchiq_service *service;
1975 unsigned int localport, remoteport;
1977 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
1981 msgid = header->msgid;
1982 size = header->size;
1983 type = VCHIQ_MSG_TYPE(msgid);
1984 localport = VCHIQ_MSG_DSTPORT(msgid);
1985 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1987 service = find_service_by_port(state, localport);
1990 vchiq_log_error(vchiq_sync_log_level,
1991 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
1992 state->id, msg_type_str(type),
1993 header, remoteport, localport, localport);
1994 release_message_sync(state, header);
1998 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2001 svc_fourcc = service
2002 ? service->base.fourcc
2003 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2004 vchiq_log_trace(vchiq_sync_log_level,
2005 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2007 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2008 remoteport, localport, size);
2010 vchiq_log_dump_mem("Rcvd", 0, header->data,
2015 case VCHIQ_MSG_OPENACK:
2016 if (size >= sizeof(struct vchiq_openack_payload)) {
2017 const struct vchiq_openack_payload *payload =
2018 (struct vchiq_openack_payload *)
2020 service->peer_version = payload->version;
2022 vchiq_log_info(vchiq_sync_log_level,
2023 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2024 state->id, header, size, remoteport, localport,
2025 service->peer_version);
2026 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2027 service->remoteport = remoteport;
2028 vchiq_set_service_state(service,
2029 VCHIQ_SRVSTATE_OPENSYNC);
2031 complete(&service->remove_event);
2033 release_message_sync(state, header);
2036 case VCHIQ_MSG_DATA:
2037 vchiq_log_trace(vchiq_sync_log_level,
2038 "%d: sf DATA@%pK,%x (%d->%d)",
2039 state->id, header, size, remoteport, localport);
2041 if ((service->remoteport == remoteport) &&
2042 (service->srvstate ==
2043 VCHIQ_SRVSTATE_OPENSYNC)) {
2044 if (make_service_callback(service,
2045 VCHIQ_MESSAGE_AVAILABLE, header,
2046 NULL) == VCHIQ_RETRY)
2047 vchiq_log_error(vchiq_sync_log_level,
2048 "synchronous callback to "
2049 "service %d returns "
2056 vchiq_log_error(vchiq_sync_log_level,
2057 "%d: sf unexpected msgid %x@%pK,%x",
2058 state->id, msgid, header, size);
2059 release_message_sync(state, header);
2063 unlock_service(service);
2070 init_bulk_queue(struct vchiq_bulk_queue *queue)
2072 queue->local_insert = 0;
2073 queue->remote_insert = 0;
2075 queue->remote_notify = 0;
2080 get_conn_state_name(enum vchiq_connstate conn_state)
2082 return conn_state_names[conn_state];
2085 struct vchiq_slot_zero *
2086 vchiq_init_slots(void *mem_base, int mem_size)
2089 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2090 struct vchiq_slot_zero *slot_zero =
2091 (struct vchiq_slot_zero *)(mem_base + mem_align);
2092 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2093 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2095 /* Ensure there is enough memory to run an absolutely minimum system */
2096 num_slots -= first_data_slot;
2098 if (num_slots < 4) {
2099 vchiq_log_error(vchiq_core_log_level,
2100 "%s - insufficient memory %x bytes",
2101 __func__, mem_size);
2105 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2107 slot_zero->magic = VCHIQ_MAGIC;
2108 slot_zero->version = VCHIQ_VERSION;
2109 slot_zero->version_min = VCHIQ_VERSION_MIN;
2110 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2111 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2112 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2113 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2115 slot_zero->master.slot_sync = first_data_slot;
2116 slot_zero->master.slot_first = first_data_slot + 1;
2117 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2118 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2119 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2120 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2126 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2128 struct vchiq_shared_state *local;
2129 struct vchiq_shared_state *remote;
2130 enum vchiq_status status;
2131 char threadname[16];
2134 if (vchiq_states[0]) {
2135 pr_err("%s: VCHIQ state already initialized\n", __func__);
2139 local = &slot_zero->slave;
2140 remote = &slot_zero->master;
2142 if (local->initialised) {
2143 vchiq_loud_error_header();
2144 if (remote->initialised)
2145 vchiq_loud_error("local state has already been "
2148 vchiq_loud_error("master/slave mismatch two slaves");
2149 vchiq_loud_error_footer();
2153 memset(state, 0, sizeof(struct vchiq_state));
2156 initialize shared state pointers
2159 state->local = local;
2160 state->remote = remote;
2161 state->slot_data = (struct vchiq_slot *)slot_zero;
2164 initialize events and mutexes
2167 init_completion(&state->connect);
2168 mutex_init(&state->mutex);
2169 mutex_init(&state->slot_mutex);
2170 mutex_init(&state->recycle_mutex);
2171 mutex_init(&state->sync_mutex);
2172 mutex_init(&state->bulk_transfer_mutex);
2174 init_completion(&state->slot_available_event);
2175 init_completion(&state->slot_remove_event);
2176 init_completion(&state->data_quota_event);
2178 state->slot_queue_available = 0;
2180 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2181 struct vchiq_service_quota *service_quota =
2182 &state->service_quotas[i];
2183 init_completion(&service_quota->quota_event);
2186 for (i = local->slot_first; i <= local->slot_last; i++) {
2187 local->slot_queue[state->slot_queue_available++] = i;
2188 complete(&state->slot_available_event);
2191 state->default_slot_quota = state->slot_queue_available/2;
2192 state->default_message_quota =
2193 min((unsigned short)(state->default_slot_quota * 256),
2194 (unsigned short)~0);
2196 state->previous_data_index = -1;
2197 state->data_use_count = 0;
2198 state->data_quota = state->slot_queue_available - 1;
2200 remote_event_create(&state->trigger_event, &local->trigger);
2202 remote_event_create(&state->recycle_event, &local->recycle);
2203 local->slot_queue_recycle = state->slot_queue_available;
2204 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2205 remote_event_create(&state->sync_release_event, &local->sync_release);
2207 /* At start-of-day, the slot is empty and available */
2208 ((struct vchiq_header *)
2209 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2210 VCHIQ_MSGID_PADDING;
2211 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2213 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2215 status = vchiq_platform_init_state(state);
2216 if (status != VCHIQ_SUCCESS)
2220 bring up slot handler thread
2222 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2223 state->slot_handler_thread = kthread_create(&slot_handler_func,
2227 if (IS_ERR(state->slot_handler_thread)) {
2228 vchiq_loud_error_header();
2229 vchiq_loud_error("couldn't create thread %s", threadname);
2230 vchiq_loud_error_footer();
2233 set_user_nice(state->slot_handler_thread, -19);
2235 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2236 state->recycle_thread = kthread_create(&recycle_func,
2239 if (IS_ERR(state->recycle_thread)) {
2240 vchiq_loud_error_header();
2241 vchiq_loud_error("couldn't create thread %s", threadname);
2242 vchiq_loud_error_footer();
2243 goto fail_free_handler_thread;
2245 set_user_nice(state->recycle_thread, -19);
2247 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2248 state->sync_thread = kthread_create(&sync_func,
2251 if (IS_ERR(state->sync_thread)) {
2252 vchiq_loud_error_header();
2253 vchiq_loud_error("couldn't create thread %s", threadname);
2254 vchiq_loud_error_footer();
2255 goto fail_free_recycle_thread;
2257 set_user_nice(state->sync_thread, -20);
2259 wake_up_process(state->slot_handler_thread);
2260 wake_up_process(state->recycle_thread);
2261 wake_up_process(state->sync_thread);
2263 vchiq_states[0] = state;
2265 /* Indicate readiness to the other side */
2266 local->initialised = 1;
2270 fail_free_recycle_thread:
2271 kthread_stop(state->recycle_thread);
2272 fail_free_handler_thread:
2273 kthread_stop(state->slot_handler_thread);
2278 void vchiq_msg_queue_push(unsigned handle, struct vchiq_header *header)
2280 struct vchiq_service *service = find_service_by_handle(handle);
2283 while (service->msg_queue_write == service->msg_queue_read +
2285 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2286 flush_signals(current);
2289 pos = service->msg_queue_write++ & (VCHIQ_MAX_SLOTS - 1);
2290 service->msg_queue[pos] = header;
2292 complete(&service->msg_queue_push);
2294 EXPORT_SYMBOL(vchiq_msg_queue_push);
2296 struct vchiq_header *vchiq_msg_hold(unsigned handle)
2298 struct vchiq_service *service = find_service_by_handle(handle);
2299 struct vchiq_header *header;
2302 if (service->msg_queue_write == service->msg_queue_read)
2305 while (service->msg_queue_write == service->msg_queue_read) {
2306 if (wait_for_completion_interruptible(&service->msg_queue_push))
2307 flush_signals(current);
2310 pos = service->msg_queue_read++ & (VCHIQ_MAX_SLOTS - 1);
2311 header = service->msg_queue[pos];
2313 complete(&service->msg_queue_pop);
2317 EXPORT_SYMBOL(vchiq_msg_hold);
2319 static int vchiq_validate_params(const struct vchiq_service_params *params)
2321 if (!params->callback || !params->fourcc) {
2322 vchiq_loud_error("Can't add service, invalid params\n");
2329 /* Called from application thread when a client or server service is created. */
2330 struct vchiq_service *
2331 vchiq_add_service_internal(struct vchiq_state *state,
2332 const struct vchiq_service_params *params,
2333 int srvstate, struct vchiq_instance *instance,
2334 vchiq_userdata_term userdata_term)
2336 struct vchiq_service *service;
2337 struct vchiq_service __rcu **pservice = NULL;
2338 struct vchiq_service_quota *service_quota;
2342 ret = vchiq_validate_params(params);
2346 service = kmalloc(sizeof(*service), GFP_KERNEL);
2350 service->base.fourcc = params->fourcc;
2351 service->base.callback = params->callback;
2352 service->base.userdata = params->userdata;
2353 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2354 kref_init(&service->ref_count);
2355 service->srvstate = VCHIQ_SRVSTATE_FREE;
2356 service->userdata_term = userdata_term;
2357 service->localport = VCHIQ_PORT_FREE;
2358 service->remoteport = VCHIQ_PORT_FREE;
2360 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2361 VCHIQ_FOURCC_INVALID : params->fourcc;
2362 service->client_id = 0;
2363 service->auto_close = 1;
2365 service->closing = 0;
2367 atomic_set(&service->poll_flags, 0);
2368 service->version = params->version;
2369 service->version_min = params->version_min;
2370 service->state = state;
2371 service->instance = instance;
2372 service->service_use_count = 0;
2373 service->msg_queue_read = 0;
2374 service->msg_queue_write = 0;
2375 init_bulk_queue(&service->bulk_tx);
2376 init_bulk_queue(&service->bulk_rx);
2377 init_completion(&service->remove_event);
2378 init_completion(&service->bulk_remove_event);
2379 init_completion(&service->msg_queue_pop);
2380 init_completion(&service->msg_queue_push);
2381 mutex_init(&service->bulk_mutex);
2382 memset(&service->stats, 0, sizeof(service->stats));
2383 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2385 /* Although it is perfectly possible to use a spinlock
2386 ** to protect the creation of services, it is overkill as it
2387 ** disables interrupts while the array is searched.
2388 ** The only danger is of another thread trying to create a
2389 ** service - service deletion is safe.
2390 ** Therefore it is preferable to use state->mutex which,
2391 ** although slower to claim, doesn't block interrupts while
2395 mutex_lock(&state->mutex);
2397 /* Prepare to use a previously unused service */
2398 if (state->unused_service < VCHIQ_MAX_SERVICES)
2399 pservice = &state->services[state->unused_service];
2401 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2402 for (i = 0; i < state->unused_service; i++) {
2403 if (!rcu_access_pointer(state->services[i])) {
2404 pservice = &state->services[i];
2410 for (i = (state->unused_service - 1); i >= 0; i--) {
2411 struct vchiq_service *srv;
2413 srv = rcu_dereference(state->services[i]);
2415 pservice = &state->services[i];
2416 else if ((srv->public_fourcc == params->fourcc)
2417 && ((srv->instance != instance) ||
2418 (srv->base.callback !=
2419 params->callback))) {
2420 /* There is another server using this
2421 ** fourcc which doesn't match. */
2430 service->localport = (pservice - state->services);
2432 handle_seq = VCHIQ_MAX_STATES *
2434 service->handle = handle_seq |
2435 (state->id * VCHIQ_MAX_SERVICES) |
2437 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2438 rcu_assign_pointer(*pservice, service);
2439 if (pservice == &state->services[state->unused_service])
2440 state->unused_service++;
2443 mutex_unlock(&state->mutex);
2450 service_quota = &state->service_quotas[service->localport];
2451 service_quota->slot_quota = state->default_slot_quota;
2452 service_quota->message_quota = state->default_message_quota;
2453 if (service_quota->slot_use_count == 0)
2454 service_quota->previous_tx_index =
2455 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2458 /* Bring this service online */
2459 vchiq_set_service_state(service, srvstate);
2461 vchiq_log_info(vchiq_core_msg_log_level,
2462 "%s Service %c%c%c%c SrcPort:%d",
2463 (srvstate == VCHIQ_SRVSTATE_OPENING)
2465 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2466 service->localport);
2468 /* Don't unlock the service - leave it with a ref_count of 1. */
2474 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2476 struct vchiq_open_payload payload = {
2477 service->base.fourcc,
2480 service->version_min
2482 enum vchiq_status status = VCHIQ_SUCCESS;
2484 service->client_id = client_id;
2485 vchiq_use_service_internal(service);
2486 status = queue_message(service->state,
2488 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2491 memcpy_copy_callback,
2494 QMFLAGS_IS_BLOCKING);
2495 if (status == VCHIQ_SUCCESS) {
2496 /* Wait for the ACK/NAK */
2497 if (wait_for_completion_interruptible(&service->remove_event)) {
2498 status = VCHIQ_RETRY;
2499 vchiq_release_service_internal(service);
2500 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2501 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2502 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2503 vchiq_log_error(vchiq_core_log_level,
2504 "%d: osi - srvstate = %s (ref %u)",
2506 srvstate_names[service->srvstate],
2507 kref_read(&service->ref_count));
2508 status = VCHIQ_ERROR;
2509 VCHIQ_SERVICE_STATS_INC(service, error_count);
2510 vchiq_release_service_internal(service);
2517 release_service_messages(struct vchiq_service *service)
2519 struct vchiq_state *state = service->state;
2520 int slot_last = state->remote->slot_last;
2523 /* Release any claimed messages aimed at this service */
2525 if (service->sync) {
2526 struct vchiq_header *header =
2527 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2528 state->remote->slot_sync);
2529 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2530 release_message_sync(state, header);
2535 for (i = state->remote->slot_first; i <= slot_last; i++) {
2536 struct vchiq_slot_info *slot_info =
2537 SLOT_INFO_FROM_INDEX(state, i);
2538 if (slot_info->release_count != slot_info->use_count) {
2540 (char *)SLOT_DATA_FROM_INDEX(state, i);
2541 unsigned int pos, end;
2543 end = VCHIQ_SLOT_SIZE;
2544 if (data == state->rx_data)
2545 /* This buffer is still being read from - stop
2546 ** at the current read position */
2547 end = state->rx_pos & VCHIQ_SLOT_MASK;
2552 struct vchiq_header *header =
2553 (struct vchiq_header *)(data + pos);
2554 int msgid = header->msgid;
2555 int port = VCHIQ_MSG_DSTPORT(msgid);
2557 if ((port == service->localport) &&
2558 (msgid & VCHIQ_MSGID_CLAIMED)) {
2559 vchiq_log_info(vchiq_core_log_level,
2560 " fsi - hdr %pK", header);
2561 release_slot(state, slot_info, header,
2564 pos += calc_stride(header->size);
2565 if (pos > VCHIQ_SLOT_SIZE) {
2566 vchiq_log_error(vchiq_core_log_level,
2567 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2569 header->msgid, header->size);
2570 WARN(1, "invalid slot position\n");
2578 do_abort_bulks(struct vchiq_service *service)
2580 enum vchiq_status status;
2582 /* Abort any outstanding bulk transfers */
2583 if (mutex_lock_killable(&service->bulk_mutex))
2585 abort_outstanding_bulks(service, &service->bulk_tx);
2586 abort_outstanding_bulks(service, &service->bulk_rx);
2587 mutex_unlock(&service->bulk_mutex);
2589 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2590 if (status == VCHIQ_SUCCESS)
2591 status = notify_bulks(service, &service->bulk_rx,
2593 return (status == VCHIQ_SUCCESS);
2596 static enum vchiq_status
2597 close_service_complete(struct vchiq_service *service, int failstate)
2599 enum vchiq_status status;
2600 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2603 switch (service->srvstate) {
2604 case VCHIQ_SRVSTATE_OPEN:
2605 case VCHIQ_SRVSTATE_CLOSESENT:
2606 case VCHIQ_SRVSTATE_CLOSERECVD:
2608 if (service->auto_close) {
2609 service->client_id = 0;
2610 service->remoteport = VCHIQ_PORT_FREE;
2611 newstate = VCHIQ_SRVSTATE_LISTENING;
2613 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2615 newstate = VCHIQ_SRVSTATE_CLOSED;
2616 vchiq_set_service_state(service, newstate);
2618 case VCHIQ_SRVSTATE_LISTENING:
2621 vchiq_log_error(vchiq_core_log_level,
2622 "%s(%x) called in state %s", __func__,
2623 service->handle, srvstate_names[service->srvstate]);
2624 WARN(1, "%s in unexpected state\n", __func__);
2628 status = make_service_callback(service,
2629 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2631 if (status != VCHIQ_RETRY) {
2632 int uc = service->service_use_count;
2634 /* Complete the close process */
2635 for (i = 0; i < uc; i++)
2636 /* cater for cases where close is forced and the
2637 ** client may not close all it's handles */
2638 vchiq_release_service_internal(service);
2640 service->client_id = 0;
2641 service->remoteport = VCHIQ_PORT_FREE;
2643 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED)
2644 vchiq_free_service_internal(service);
2645 else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2647 service->closing = 0;
2649 complete(&service->remove_event);
2652 vchiq_set_service_state(service, failstate);
2657 /* Called by the slot handler */
2659 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2661 struct vchiq_state *state = service->state;
2662 enum vchiq_status status = VCHIQ_SUCCESS;
2663 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2665 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2666 service->state->id, service->localport, close_recvd,
2667 srvstate_names[service->srvstate]);
2669 switch (service->srvstate) {
2670 case VCHIQ_SRVSTATE_CLOSED:
2671 case VCHIQ_SRVSTATE_HIDDEN:
2672 case VCHIQ_SRVSTATE_LISTENING:
2673 case VCHIQ_SRVSTATE_CLOSEWAIT:
2675 vchiq_log_error(vchiq_core_log_level,
2678 __func__, srvstate_names[service->srvstate]);
2679 else if (is_server) {
2680 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2681 status = VCHIQ_ERROR;
2683 service->client_id = 0;
2684 service->remoteport = VCHIQ_PORT_FREE;
2685 if (service->srvstate ==
2686 VCHIQ_SRVSTATE_CLOSEWAIT)
2687 vchiq_set_service_state(service,
2688 VCHIQ_SRVSTATE_LISTENING);
2690 complete(&service->remove_event);
2692 vchiq_free_service_internal(service);
2694 case VCHIQ_SRVSTATE_OPENING:
2696 /* The open was rejected - tell the user */
2697 vchiq_set_service_state(service,
2698 VCHIQ_SRVSTATE_CLOSEWAIT);
2699 complete(&service->remove_event);
2701 /* Shutdown mid-open - let the other side know */
2702 status = queue_message(state, service,
2706 VCHIQ_MSG_DSTPORT(service->remoteport)),
2711 case VCHIQ_SRVSTATE_OPENSYNC:
2712 mutex_lock(&state->sync_mutex);
2714 case VCHIQ_SRVSTATE_OPEN:
2716 if (!do_abort_bulks(service))
2717 status = VCHIQ_RETRY;
2720 release_service_messages(service);
2722 if (status == VCHIQ_SUCCESS)
2723 status = queue_message(state, service,
2727 VCHIQ_MSG_DSTPORT(service->remoteport)),
2728 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2730 if (status == VCHIQ_SUCCESS) {
2732 /* Change the state while the mutex is
2734 vchiq_set_service_state(service,
2735 VCHIQ_SRVSTATE_CLOSESENT);
2736 mutex_unlock(&state->slot_mutex);
2738 mutex_unlock(&state->sync_mutex);
2741 } else if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC) {
2742 mutex_unlock(&state->sync_mutex);
2747 /* Change the state while the mutex is still held */
2748 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2749 mutex_unlock(&state->slot_mutex);
2751 mutex_unlock(&state->sync_mutex);
2753 status = close_service_complete(service,
2754 VCHIQ_SRVSTATE_CLOSERECVD);
2757 case VCHIQ_SRVSTATE_CLOSESENT:
2759 /* This happens when a process is killed mid-close */
2762 if (!do_abort_bulks(service)) {
2763 status = VCHIQ_RETRY;
2767 if (status == VCHIQ_SUCCESS)
2768 status = close_service_complete(service,
2769 VCHIQ_SRVSTATE_CLOSERECVD);
2772 case VCHIQ_SRVSTATE_CLOSERECVD:
2773 if (!close_recvd && is_server)
2774 /* Force into LISTENING mode */
2775 vchiq_set_service_state(service,
2776 VCHIQ_SRVSTATE_LISTENING);
2777 status = close_service_complete(service,
2778 VCHIQ_SRVSTATE_CLOSERECVD);
2782 vchiq_log_error(vchiq_core_log_level,
2783 "%s(%d) called in state %s", __func__,
2784 close_recvd, srvstate_names[service->srvstate]);
2791 /* Called from the application process upon process death */
2793 vchiq_terminate_service_internal(struct vchiq_service *service)
2795 struct vchiq_state *state = service->state;
2797 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2798 state->id, service->localport, service->remoteport);
2800 mark_service_closing(service);
2802 /* Mark the service for removal by the slot handler */
2803 request_poll(state, service, VCHIQ_POLL_REMOVE);
2806 /* Called from the slot handler */
2808 vchiq_free_service_internal(struct vchiq_service *service)
2810 struct vchiq_state *state = service->state;
2812 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2813 state->id, service->localport);
2815 switch (service->srvstate) {
2816 case VCHIQ_SRVSTATE_OPENING:
2817 case VCHIQ_SRVSTATE_CLOSED:
2818 case VCHIQ_SRVSTATE_HIDDEN:
2819 case VCHIQ_SRVSTATE_LISTENING:
2820 case VCHIQ_SRVSTATE_CLOSEWAIT:
2823 vchiq_log_error(vchiq_core_log_level,
2824 "%d: fsi - (%d) in state %s",
2825 state->id, service->localport,
2826 srvstate_names[service->srvstate]);
2830 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2832 complete(&service->remove_event);
2834 /* Release the initial lock */
2835 unlock_service(service);
2839 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2841 struct vchiq_service *service;
2844 /* Find all services registered to this client and enable them. */
2846 while ((service = next_service_by_instance(state, instance,
2848 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2849 vchiq_set_service_state(service,
2850 VCHIQ_SRVSTATE_LISTENING);
2851 unlock_service(service);
2854 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2855 if (queue_message(state, NULL,
2856 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2857 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2860 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2863 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2864 if (wait_for_completion_interruptible(&state->connect))
2867 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2868 complete(&state->connect);
2871 return VCHIQ_SUCCESS;
2875 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2877 struct vchiq_service *service;
2880 /* Find all services registered to this client and enable them. */
2882 while ((service = next_service_by_instance(state, instance,
2884 (void)vchiq_remove_service(service->handle);
2885 unlock_service(service);
2888 return VCHIQ_SUCCESS;
2892 vchiq_close_service(unsigned int handle)
2894 /* Unregister the service */
2895 struct vchiq_service *service = find_service_by_handle(handle);
2896 enum vchiq_status status = VCHIQ_SUCCESS;
2901 vchiq_log_info(vchiq_core_log_level,
2902 "%d: close_service:%d",
2903 service->state->id, service->localport);
2905 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2906 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2907 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2908 unlock_service(service);
2912 mark_service_closing(service);
2914 if (current == service->state->slot_handler_thread) {
2915 status = vchiq_close_service_internal(service,
2917 WARN_ON(status == VCHIQ_RETRY);
2919 /* Mark the service for termination by the slot handler */
2920 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2924 if (wait_for_completion_interruptible(&service->remove_event)) {
2925 status = VCHIQ_RETRY;
2929 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2930 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2931 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2934 vchiq_log_warning(vchiq_core_log_level,
2935 "%d: close_service:%d - waiting in state %s",
2936 service->state->id, service->localport,
2937 srvstate_names[service->srvstate]);
2940 if ((status == VCHIQ_SUCCESS) &&
2941 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2942 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2943 status = VCHIQ_ERROR;
2945 unlock_service(service);
2951 vchiq_remove_service(unsigned int handle)
2953 /* Unregister the service */
2954 struct vchiq_service *service = find_service_by_handle(handle);
2955 enum vchiq_status status = VCHIQ_SUCCESS;
2960 vchiq_log_info(vchiq_core_log_level,
2961 "%d: remove_service:%d",
2962 service->state->id, service->localport);
2964 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
2965 unlock_service(service);
2969 mark_service_closing(service);
2971 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
2972 (current == service->state->slot_handler_thread)) {
2973 /* Make it look like a client, because it must be removed and
2974 not left in the LISTENING state. */
2975 service->public_fourcc = VCHIQ_FOURCC_INVALID;
2977 status = vchiq_close_service_internal(service,
2979 WARN_ON(status == VCHIQ_RETRY);
2981 /* Mark the service for removal by the slot handler */
2982 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
2985 if (wait_for_completion_interruptible(&service->remove_event)) {
2986 status = VCHIQ_RETRY;
2990 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2991 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2994 vchiq_log_warning(vchiq_core_log_level,
2995 "%d: remove_service:%d - waiting in state %s",
2996 service->state->id, service->localport,
2997 srvstate_names[service->srvstate]);
3000 if ((status == VCHIQ_SUCCESS) &&
3001 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3002 status = VCHIQ_ERROR;
3004 unlock_service(service);
3009 /* This function may be called by kernel threads or user threads.
3010 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3011 * received and the call should be retried after being returned to user
3013 * When called in blocking mode, the userdata field points to a bulk_waiter
3016 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3017 void *offset, int size, void *userdata,
3018 enum vchiq_bulk_mode mode,
3019 enum vchiq_bulk_dir dir)
3021 struct vchiq_service *service = find_service_by_handle(handle);
3022 struct vchiq_bulk_queue *queue;
3023 struct vchiq_bulk *bulk;
3024 struct vchiq_state *state;
3025 struct bulk_waiter *bulk_waiter = NULL;
3026 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3027 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3028 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3029 enum vchiq_status status = VCHIQ_ERROR;
3032 if (!service || service->srvstate != VCHIQ_SRVSTATE_OPEN ||
3033 !offset || vchiq_check_service(service) != VCHIQ_SUCCESS)
3037 case VCHIQ_BULK_MODE_NOCALLBACK:
3038 case VCHIQ_BULK_MODE_CALLBACK:
3040 case VCHIQ_BULK_MODE_BLOCKING:
3041 bulk_waiter = userdata;
3042 init_completion(&bulk_waiter->event);
3043 bulk_waiter->actual = 0;
3044 bulk_waiter->bulk = NULL;
3046 case VCHIQ_BULK_MODE_WAITING:
3047 bulk_waiter = userdata;
3048 bulk = bulk_waiter->bulk;
3054 state = service->state;
3056 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3057 &service->bulk_tx : &service->bulk_rx;
3059 if (mutex_lock_killable(&service->bulk_mutex)) {
3060 status = VCHIQ_RETRY;
3064 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3065 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3067 mutex_unlock(&service->bulk_mutex);
3068 if (wait_for_completion_interruptible(
3069 &service->bulk_remove_event)) {
3070 status = VCHIQ_RETRY;
3073 if (mutex_lock_killable(&service->bulk_mutex)) {
3074 status = VCHIQ_RETRY;
3077 } while (queue->local_insert == queue->remove +
3078 VCHIQ_NUM_SERVICE_BULKS);
3081 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3085 bulk->userdata = userdata;
3087 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3089 if (vchiq_prepare_bulk_data(bulk, offset, size, dir) != VCHIQ_SUCCESS)
3090 goto unlock_error_exit;
3094 vchiq_log_info(vchiq_core_log_level,
3095 "%d: bt (%d->%d) %cx %x@%pK %pK",
3096 state->id, service->localport, service->remoteport, dir_char,
3097 size, bulk->data, userdata);
3099 /* The slot mutex must be held when the service is being closed, so
3100 claim it here to ensure that isn't happening */
3101 if (mutex_lock_killable(&state->slot_mutex)) {
3102 status = VCHIQ_RETRY;
3103 goto cancel_bulk_error_exit;
3106 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3107 goto unlock_both_error_exit;
3109 payload[0] = (int)(long)bulk->data;
3110 payload[1] = bulk->size;
3111 status = queue_message(state,
3113 VCHIQ_MAKE_MSG(dir_msgtype,
3115 service->remoteport),
3116 memcpy_copy_callback,
3119 QMFLAGS_IS_BLOCKING |
3120 QMFLAGS_NO_MUTEX_LOCK |
3121 QMFLAGS_NO_MUTEX_UNLOCK);
3122 if (status != VCHIQ_SUCCESS)
3123 goto unlock_both_error_exit;
3125 queue->local_insert++;
3127 mutex_unlock(&state->slot_mutex);
3128 mutex_unlock(&service->bulk_mutex);
3130 vchiq_log_trace(vchiq_core_log_level,
3131 "%d: bt:%d %cx li=%x ri=%x p=%x",
3133 service->localport, dir_char,
3134 queue->local_insert, queue->remote_insert, queue->process);
3137 unlock_service(service);
3139 status = VCHIQ_SUCCESS;
3142 bulk_waiter->bulk = bulk;
3143 if (wait_for_completion_interruptible(&bulk_waiter->event))
3144 status = VCHIQ_RETRY;
3145 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3146 status = VCHIQ_ERROR;
3151 unlock_both_error_exit:
3152 mutex_unlock(&state->slot_mutex);
3153 cancel_bulk_error_exit:
3154 vchiq_complete_bulk(bulk);
3156 mutex_unlock(&service->bulk_mutex);
3160 unlock_service(service);
3165 vchiq_queue_message(unsigned int handle,
3166 ssize_t (*copy_callback)(void *context, void *dest,
3167 size_t offset, size_t maxsize),
3171 struct vchiq_service *service = find_service_by_handle(handle);
3172 enum vchiq_status status = VCHIQ_ERROR;
3175 (vchiq_check_service(service) != VCHIQ_SUCCESS))
3179 VCHIQ_SERVICE_STATS_INC(service, error_count);
3184 if (size > VCHIQ_MAX_MSG_SIZE) {
3185 VCHIQ_SERVICE_STATS_INC(service, error_count);
3189 switch (service->srvstate) {
3190 case VCHIQ_SRVSTATE_OPEN:
3191 status = queue_message(service->state, service,
3192 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3194 service->remoteport),
3195 copy_callback, context, size, 1);
3197 case VCHIQ_SRVSTATE_OPENSYNC:
3198 status = queue_message_sync(service->state, service,
3199 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3201 service->remoteport),
3202 copy_callback, context, size, 1);
3205 status = VCHIQ_ERROR;
3211 unlock_service(service);
3216 int vchiq_queue_kernel_message(unsigned handle, void *data, unsigned size)
3218 enum vchiq_status status;
3221 status = vchiq_queue_message(handle, memcpy_copy_callback,
3225 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3226 * implement a retry mechanism since this function is supposed
3227 * to block until queued
3229 if (status != VCHIQ_RETRY)
3237 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3240 vchiq_release_message(unsigned int handle,
3241 struct vchiq_header *header)
3243 struct vchiq_service *service = find_service_by_handle(handle);
3244 struct vchiq_shared_state *remote;
3245 struct vchiq_state *state;
3251 state = service->state;
3252 remote = state->remote;
3254 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3256 if ((slot_index >= remote->slot_first) &&
3257 (slot_index <= remote->slot_last)) {
3258 int msgid = header->msgid;
3260 if (msgid & VCHIQ_MSGID_CLAIMED) {
3261 struct vchiq_slot_info *slot_info =
3262 SLOT_INFO_FROM_INDEX(state, slot_index);
3264 release_slot(state, slot_info, header, service);
3266 } else if (slot_index == remote->slot_sync)
3267 release_message_sync(state, header);
3269 unlock_service(service);
3273 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3275 header->msgid = VCHIQ_MSGID_PADDING;
3276 remote_event_signal(&state->remote->sync_release);
3280 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3282 enum vchiq_status status = VCHIQ_ERROR;
3283 struct vchiq_service *service = find_service_by_handle(handle);
3286 (vchiq_check_service(service) != VCHIQ_SUCCESS) ||
3289 *peer_version = service->peer_version;
3290 status = VCHIQ_SUCCESS;
3294 unlock_service(service);
3298 void vchiq_get_config(struct vchiq_config *config)
3300 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3301 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3302 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3303 config->max_services = VCHIQ_MAX_SERVICES;
3304 config->version = VCHIQ_VERSION;
3305 config->version_min = VCHIQ_VERSION_MIN;
3309 vchiq_set_service_option(unsigned int handle,
3310 enum vchiq_service_option option, int value)
3312 struct vchiq_service *service = find_service_by_handle(handle);
3313 enum vchiq_status status = VCHIQ_ERROR;
3317 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3318 service->auto_close = value;
3319 status = VCHIQ_SUCCESS;
3322 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA: {
3323 struct vchiq_service_quota *service_quota =
3324 &service->state->service_quotas[
3325 service->localport];
3327 value = service->state->default_slot_quota;
3328 if ((value >= service_quota->slot_use_count) &&
3329 (value < (unsigned short)~0)) {
3330 service_quota->slot_quota = value;
3331 if ((value >= service_quota->slot_use_count) &&
3332 (service_quota->message_quota >=
3333 service_quota->message_use_count)) {
3334 /* Signal the service that it may have
3335 ** dropped below its quota */
3336 complete(&service_quota->quota_event);
3338 status = VCHIQ_SUCCESS;
3342 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA: {
3343 struct vchiq_service_quota *service_quota =
3344 &service->state->service_quotas[
3345 service->localport];
3347 value = service->state->default_message_quota;
3348 if ((value >= service_quota->message_use_count) &&
3349 (value < (unsigned short)~0)) {
3350 service_quota->message_quota = value;
3352 service_quota->message_use_count) &&
3353 (service_quota->slot_quota >=
3354 service_quota->slot_use_count))
3355 /* Signal the service that it may have
3356 ** dropped below its quota */
3357 complete(&service_quota->quota_event);
3358 status = VCHIQ_SUCCESS;
3362 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3363 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3364 (service->srvstate ==
3365 VCHIQ_SRVSTATE_LISTENING)) {
3366 service->sync = value;
3367 status = VCHIQ_SUCCESS;
3371 case VCHIQ_SERVICE_OPTION_TRACE:
3372 service->trace = value;
3373 status = VCHIQ_SUCCESS;
3379 unlock_service(service);
3386 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3387 struct vchiq_shared_state *shared, const char *label)
3389 static const char *const debug_names[] = {
3391 "SLOT_HANDLER_COUNT",
3392 "SLOT_HANDLER_LINE",
3396 "AWAIT_COMPLETION_LINE",
3397 "DEQUEUE_MESSAGE_LINE",
3398 "SERVICE_CALLBACK_LINE",
3399 "MSG_QUEUE_FULL_COUNT",
3400 "COMPLETION_QUEUE_FULL_COUNT"
3407 len = scnprintf(buf, sizeof(buf),
3408 " %s: slots %d-%d tx_pos=%x recycle=%x",
3409 label, shared->slot_first, shared->slot_last,
3410 shared->tx_pos, shared->slot_queue_recycle);
3411 err = vchiq_dump(dump_context, buf, len + 1);
3415 len = scnprintf(buf, sizeof(buf),
3417 err = vchiq_dump(dump_context, buf, len + 1);
3421 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3422 struct vchiq_slot_info slot_info =
3423 *SLOT_INFO_FROM_INDEX(state, i);
3424 if (slot_info.use_count != slot_info.release_count) {
3425 len = scnprintf(buf, sizeof(buf),
3426 " %d: %d/%d", i, slot_info.use_count,
3427 slot_info.release_count);
3428 err = vchiq_dump(dump_context, buf, len + 1);
3434 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3435 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3436 debug_names[i], shared->debug[i], shared->debug[i]);
3437 err = vchiq_dump(dump_context, buf, len + 1);
3444 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3451 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3452 conn_state_names[state->conn_state]);
3453 err = vchiq_dump(dump_context, buf, len + 1);
3457 len = scnprintf(buf, sizeof(buf),
3458 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3459 state->local->tx_pos,
3460 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3462 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3463 err = vchiq_dump(dump_context, buf, len + 1);
3467 len = scnprintf(buf, sizeof(buf),
3468 " Version: %d (min %d)",
3469 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3470 err = vchiq_dump(dump_context, buf, len + 1);
3474 if (VCHIQ_ENABLE_STATS) {
3475 len = scnprintf(buf, sizeof(buf),
3476 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, "
3478 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3479 state->stats.error_count);
3480 err = vchiq_dump(dump_context, buf, len + 1);
3485 len = scnprintf(buf, sizeof(buf),
3486 " Slots: %d available (%d data), %d recyclable, %d stalls "
3488 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3489 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3490 state->data_quota - state->data_use_count,
3491 state->local->slot_queue_recycle - state->slot_queue_available,
3492 state->stats.slot_stalls, state->stats.data_stalls);
3493 err = vchiq_dump(dump_context, buf, len + 1);
3497 err = vchiq_dump_platform_state(dump_context);
3501 err = vchiq_dump_shared_state(dump_context,
3507 err = vchiq_dump_shared_state(dump_context,
3514 err = vchiq_dump_platform_instances(dump_context);
3518 for (i = 0; i < state->unused_service; i++) {
3519 struct vchiq_service *service = find_service_by_port(state, i);
3522 err = vchiq_dump_service_state(dump_context, service);
3523 unlock_service(service);
3531 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3536 unsigned int ref_count;
3538 /*Don't include the lock just taken*/
3539 ref_count = kref_read(&service->ref_count) - 1;
3540 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3541 service->localport, srvstate_names[service->srvstate],
3544 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3545 char remoteport[30];
3546 struct vchiq_service_quota *service_quota =
3547 &service->state->service_quotas[service->localport];
3548 int fourcc = service->base.fourcc;
3549 int tx_pending, rx_pending;
3551 if (service->remoteport != VCHIQ_PORT_FREE) {
3552 int len2 = scnprintf(remoteport, sizeof(remoteport),
3553 "%u", service->remoteport);
3555 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3556 scnprintf(remoteport + len2,
3557 sizeof(remoteport) - len2,
3558 " (client %x)", service->client_id);
3560 strcpy(remoteport, "n/a");
3562 len += scnprintf(buf + len, sizeof(buf) - len,
3563 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3564 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3566 service_quota->message_use_count,
3567 service_quota->message_quota,
3568 service_quota->slot_use_count,
3569 service_quota->slot_quota);
3571 err = vchiq_dump(dump_context, buf, len + 1);
3575 tx_pending = service->bulk_tx.local_insert -
3576 service->bulk_tx.remote_insert;
3578 rx_pending = service->bulk_rx.local_insert -
3579 service->bulk_rx.remote_insert;
3581 len = scnprintf(buf, sizeof(buf),
3582 " Bulk: tx_pending=%d (size %d),"
3583 " rx_pending=%d (size %d)",
3585 tx_pending ? service->bulk_tx.bulks[
3586 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3588 rx_pending ? service->bulk_rx.bulks[
3589 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3591 if (VCHIQ_ENABLE_STATS) {
3592 err = vchiq_dump(dump_context, buf, len + 1);
3596 len = scnprintf(buf, sizeof(buf),
3597 " Ctrl: tx_count=%d, tx_bytes=%llu, "
3598 "rx_count=%d, rx_bytes=%llu",
3599 service->stats.ctrl_tx_count,
3600 service->stats.ctrl_tx_bytes,
3601 service->stats.ctrl_rx_count,
3602 service->stats.ctrl_rx_bytes);
3603 err = vchiq_dump(dump_context, buf, len + 1);
3607 len = scnprintf(buf, sizeof(buf),
3608 " Bulk: tx_count=%d, tx_bytes=%llu, "
3609 "rx_count=%d, rx_bytes=%llu",
3610 service->stats.bulk_tx_count,
3611 service->stats.bulk_tx_bytes,
3612 service->stats.bulk_rx_count,
3613 service->stats.bulk_rx_bytes);
3614 err = vchiq_dump(dump_context, buf, len + 1);
3618 len = scnprintf(buf, sizeof(buf),
3619 " %d quota stalls, %d slot stalls, "
3620 "%d bulk stalls, %d aborted, %d errors",
3621 service->stats.quota_stalls,
3622 service->stats.slot_stalls,
3623 service->stats.bulk_stalls,
3624 service->stats.bulk_aborted_count,
3625 service->stats.error_count);
3629 err = vchiq_dump(dump_context, buf, len + 1);
3633 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3634 err = vchiq_dump_platform_service_state(dump_context, service);
3639 vchiq_loud_error_header(void)
3641 vchiq_log_error(vchiq_core_log_level,
3642 "============================================================"
3643 "================");
3644 vchiq_log_error(vchiq_core_log_level,
3645 "============================================================"
3646 "================");
3647 vchiq_log_error(vchiq_core_log_level, "=====");
3651 vchiq_loud_error_footer(void)
3653 vchiq_log_error(vchiq_core_log_level, "=====");
3654 vchiq_log_error(vchiq_core_log_level,
3655 "============================================================"
3656 "================");
3657 vchiq_log_error(vchiq_core_log_level,
3658 "============================================================"
3659 "================");
3662 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3664 enum vchiq_status status = VCHIQ_RETRY;
3666 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3667 status = queue_message(state, NULL,
3668 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3673 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3675 enum vchiq_status status = VCHIQ_RETRY;
3677 if (state->conn_state != VCHIQ_CONNSTATE_DISCONNECTED)
3678 status = queue_message(state, NULL,
3679 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3684 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3687 const u8 *mem = void_mem;
3692 while (num_bytes > 0) {
3695 for (offset = 0; offset < 16; offset++) {
3696 if (offset < num_bytes)
3697 s += scnprintf(s, 4, "%02x ", mem[offset]);
3699 s += scnprintf(s, 4, " ");
3702 for (offset = 0; offset < 16; offset++) {
3703 if (offset < num_bytes) {
3704 u8 ch = mem[offset];
3706 if ((ch < ' ') || (ch > '~'))
3713 if (label && (*label != '\0'))
3714 vchiq_log_trace(VCHIQ_LOG_TRACE,
3715 "%s: %08x: %s", label, addr, line_buf);
3717 vchiq_log_trace(VCHIQ_LOG_TRACE,
3718 "%08x: %s", addr, line_buf);