1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define HANDLE_STATE_SHIFT 12
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 ((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
32 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
34 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
36 #define SRVTRACE_LEVEL(srv) \
37 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
38 #define SRVTRACE_ENABLED(srv, lev) \
39 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
41 struct vchiq_open_payload {
48 struct vchiq_openack_payload {
53 QMFLAGS_IS_BLOCKING = BIT(0),
54 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
55 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
58 /* we require this for consistency between endpoints */
59 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
60 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
61 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
62 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
63 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
64 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
66 /* Run time control of log level, based on KERN_XXX level. */
67 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
68 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
69 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
71 DEFINE_SPINLOCK(bulk_waiter_spinlock);
72 static DEFINE_SPINLOCK(quota_spinlock);
74 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
75 static unsigned int handle_seq;
77 static const char *const srvstate_names[] = {
90 static const char *const reason_names[] = {
96 "BULK_TRANSMIT_ABORTED",
97 "BULK_RECEIVE_ABORTED"
100 static const char *const conn_state_names[] = {
113 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
115 static const char *msg_type_str(unsigned int msg_type)
118 case VCHIQ_MSG_PADDING: return "PADDING";
119 case VCHIQ_MSG_CONNECT: return "CONNECT";
120 case VCHIQ_MSG_OPEN: return "OPEN";
121 case VCHIQ_MSG_OPENACK: return "OPENACK";
122 case VCHIQ_MSG_CLOSE: return "CLOSE";
123 case VCHIQ_MSG_DATA: return "DATA";
124 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
125 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
126 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
127 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
128 case VCHIQ_MSG_PAUSE: return "PAUSE";
129 case VCHIQ_MSG_RESUME: return "RESUME";
130 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
131 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
132 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
138 vchiq_set_service_state(struct vchiq_service *service, int newstate)
140 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
141 service->state->id, service->localport,
142 srvstate_names[service->srvstate],
143 srvstate_names[newstate]);
144 service->srvstate = newstate;
147 struct vchiq_service *
148 find_service_by_handle(unsigned int handle)
150 struct vchiq_service *service;
153 service = handle_to_service(handle);
154 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
155 service->handle == handle &&
156 kref_get_unless_zero(&service->ref_count)) {
157 service = rcu_pointer_handoff(service);
162 vchiq_log_info(vchiq_core_log_level,
163 "Invalid service handle 0x%x", handle);
167 struct vchiq_service *
168 find_service_by_port(struct vchiq_state *state, int localport)
171 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
172 struct vchiq_service *service;
175 service = rcu_dereference(state->services[localport]);
176 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
177 kref_get_unless_zero(&service->ref_count)) {
178 service = rcu_pointer_handoff(service);
184 vchiq_log_info(vchiq_core_log_level,
185 "Invalid port %d", localport);
189 struct vchiq_service *
190 find_service_for_instance(struct vchiq_instance *instance,
193 struct vchiq_service *service;
196 service = handle_to_service(handle);
197 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
198 service->handle == handle &&
199 service->instance == instance &&
200 kref_get_unless_zero(&service->ref_count)) {
201 service = rcu_pointer_handoff(service);
206 vchiq_log_info(vchiq_core_log_level,
207 "Invalid service handle 0x%x", handle);
211 struct vchiq_service *
212 find_closed_service_for_instance(struct vchiq_instance *instance,
215 struct vchiq_service *service;
218 service = handle_to_service(handle);
220 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
221 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
222 service->handle == handle &&
223 service->instance == instance &&
224 kref_get_unless_zero(&service->ref_count)) {
225 service = rcu_pointer_handoff(service);
230 vchiq_log_info(vchiq_core_log_level,
231 "Invalid service handle 0x%x", handle);
235 struct vchiq_service *
236 __next_service_by_instance(struct vchiq_state *state,
237 struct vchiq_instance *instance,
240 struct vchiq_service *service = NULL;
243 while (idx < state->unused_service) {
244 struct vchiq_service *srv;
246 srv = rcu_dereference(state->services[idx]);
248 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
249 srv->instance == instance) {
259 struct vchiq_service *
260 next_service_by_instance(struct vchiq_state *state,
261 struct vchiq_instance *instance,
264 struct vchiq_service *service;
268 service = __next_service_by_instance(state, instance, pidx);
271 if (kref_get_unless_zero(&service->ref_count)) {
272 service = rcu_pointer_handoff(service);
281 lock_service(struct vchiq_service *service)
284 WARN(1, "%s service is NULL\n", __func__);
287 kref_get(&service->ref_count);
290 static void service_release(struct kref *kref)
292 struct vchiq_service *service =
293 container_of(kref, struct vchiq_service, ref_count);
294 struct vchiq_state *state = service->state;
296 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
297 rcu_assign_pointer(state->services[service->localport], NULL);
298 if (service->userdata_term)
299 service->userdata_term(service->base.userdata);
300 kfree_rcu(service, rcu);
304 unlock_service(struct vchiq_service *service)
307 WARN(1, "%s: service is NULL\n", __func__);
310 kref_put(&service->ref_count, service_release);
314 vchiq_get_client_id(unsigned int handle)
316 struct vchiq_service *service;
320 service = handle_to_service(handle);
321 id = service ? service->client_id : 0;
327 vchiq_get_service_userdata(unsigned int handle)
330 struct vchiq_service *service;
333 service = handle_to_service(handle);
334 userdata = service ? service->base.userdata : NULL;
338 EXPORT_SYMBOL(vchiq_get_service_userdata);
341 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
343 struct vchiq_state *state = service->state;
344 struct vchiq_service_quota *quota;
346 service->closing = 1;
348 /* Synchronise with other threads. */
349 mutex_lock(&state->recycle_mutex);
350 mutex_unlock(&state->recycle_mutex);
351 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
353 * If we're pausing then the slot_mutex is held until resume
354 * by the slot handler. Therefore don't try to acquire this
355 * mutex if we're the slot handler and in the pause sent state.
356 * We don't need to in this case anyway.
358 mutex_lock(&state->slot_mutex);
359 mutex_unlock(&state->slot_mutex);
362 /* Unblock any sending thread. */
363 quota = &state->service_quotas[service->localport];
364 complete("a->quota_event);
368 mark_service_closing(struct vchiq_service *service)
370 mark_service_closing_internal(service, 0);
373 static inline enum vchiq_status
374 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
375 struct vchiq_header *header, void *bulk_userdata)
377 enum vchiq_status status;
379 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
380 service->state->id, service->localport, reason_names[reason],
381 header, bulk_userdata);
382 status = service->base.callback(reason, header, service->handle,
384 if (status == VCHIQ_ERROR) {
385 vchiq_log_warning(vchiq_core_log_level,
386 "%d: ignoring ERROR from callback to service %x",
387 service->state->id, service->handle);
388 status = VCHIQ_SUCCESS;
391 if (reason != VCHIQ_MESSAGE_AVAILABLE)
392 vchiq_release_message(service->handle, header);
398 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
400 enum vchiq_connstate oldstate = state->conn_state;
402 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
403 conn_state_names[oldstate],
404 conn_state_names[newstate]);
405 state->conn_state = newstate;
406 vchiq_platform_conn_state_changed(state, oldstate, newstate);
410 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
414 * Don't clear the 'fired' flag because it may already have been set
417 init_waitqueue_head(wq);
421 * All the event waiting routines in VCHIQ used a custom semaphore
422 * implementation that filtered most signals. This achieved a behaviour similar
423 * to the "killable" family of functions. While cleaning up this code all the
424 * routines where switched to the "interruptible" family of functions, as the
425 * former was deemed unjustified and the use "killable" set all VCHIQ's
426 * threads in D state.
429 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
434 if (wait_event_interruptible(*wq, event->fired)) {
447 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
455 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
457 if (event->fired && event->armed)
458 remote_event_signal_local(wq, event);
462 remote_event_pollall(struct vchiq_state *state)
464 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
465 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
466 remote_event_poll(&state->trigger_event, &state->local->trigger);
467 remote_event_poll(&state->recycle_event, &state->local->recycle);
471 * Round up message sizes so that any space at the end of a slot is always big
472 * enough for a header. This relies on header size being a power of two, which
473 * has been verified earlier by a static assertion.
477 calc_stride(size_t size)
479 /* Allow room for the header */
480 size += sizeof(struct vchiq_header);
483 return (size + sizeof(struct vchiq_header) - 1) &
484 ~(sizeof(struct vchiq_header) - 1);
487 /* Called by the slot handler thread */
488 static struct vchiq_service *
489 get_listening_service(struct vchiq_state *state, int fourcc)
493 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
496 for (i = 0; i < state->unused_service; i++) {
497 struct vchiq_service *service;
499 service = rcu_dereference(state->services[i]);
501 service->public_fourcc == fourcc &&
502 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
503 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
504 service->remoteport == VCHIQ_PORT_FREE)) &&
505 kref_get_unless_zero(&service->ref_count)) {
506 service = rcu_pointer_handoff(service);
515 /* Called by the slot handler thread */
516 static struct vchiq_service *
517 get_connected_service(struct vchiq_state *state, unsigned int port)
522 for (i = 0; i < state->unused_service; i++) {
523 struct vchiq_service *service =
524 rcu_dereference(state->services[i]);
526 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
527 service->remoteport == port &&
528 kref_get_unless_zero(&service->ref_count)) {
529 service = rcu_pointer_handoff(service);
539 request_poll(struct vchiq_state *state, struct vchiq_service *service,
549 value = atomic_read(&service->poll_flags);
550 } while (atomic_cmpxchg(&service->poll_flags, value,
551 value | BIT(poll_type)) != value);
553 index = BITSET_WORD(service->localport);
555 value = atomic_read(&state->poll_services[index]);
556 } while (atomic_cmpxchg(&state->poll_services[index],
557 value, value | BIT(service->localport & 0x1f)) != value);
560 state->poll_needed = 1;
563 /* ... and ensure the slot handler runs. */
564 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
568 * Called from queue_message, by the slot handler and application threads,
569 * with slot_mutex held
571 static struct vchiq_header *
572 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
574 struct vchiq_shared_state *local = state->local;
575 int tx_pos = state->local_tx_pos;
576 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
578 if (space > slot_space) {
579 struct vchiq_header *header;
580 /* Fill the remaining space with padding */
581 WARN_ON(!state->tx_data);
582 header = (struct vchiq_header *)
583 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
584 header->msgid = VCHIQ_MSGID_PADDING;
585 header->size = slot_space - sizeof(struct vchiq_header);
587 tx_pos += slot_space;
590 /* If necessary, get the next slot. */
591 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
594 /* If there is no free slot... */
596 if (!try_wait_for_completion(&state->slot_available_event)) {
597 /* ...wait for one. */
599 VCHIQ_STATS_INC(state, slot_stalls);
601 /* But first, flush through the last slot. */
602 state->local_tx_pos = tx_pos;
603 local->tx_pos = tx_pos;
604 remote_event_signal(&state->remote->trigger);
607 (wait_for_completion_interruptible(
608 &state->slot_available_event)))
609 return NULL; /* No space available */
612 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
613 complete(&state->slot_available_event);
614 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
618 slot_index = local->slot_queue[
619 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
621 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
624 state->local_tx_pos = tx_pos + space;
626 return (struct vchiq_header *)(state->tx_data +
627 (tx_pos & VCHIQ_SLOT_MASK));
630 /* Called by the recycle thread. */
632 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
635 struct vchiq_shared_state *local = state->local;
636 int slot_queue_available;
639 * Find slots which have been freed by the other side, and return them
640 * to the available queue.
642 slot_queue_available = state->slot_queue_available;
645 * Use a memory barrier to ensure that any state that may have been
646 * modified by another thread is not masked by stale prefetched
651 while (slot_queue_available != local->slot_queue_recycle) {
653 int slot_index = local->slot_queue[slot_queue_available &
654 VCHIQ_SLOT_QUEUE_MASK];
655 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
658 slot_queue_available++;
660 * Beware of the address dependency - data is calculated
661 * using an index written by the other side.
665 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
666 state->id, slot_index, data,
667 local->slot_queue_recycle, slot_queue_available);
669 /* Initialise the bitmask for services which have used this slot */
670 memset(service_found, 0, length);
674 while (pos < VCHIQ_SLOT_SIZE) {
675 struct vchiq_header *header =
676 (struct vchiq_header *)(data + pos);
677 int msgid = header->msgid;
679 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
680 int port = VCHIQ_MSG_SRCPORT(msgid);
681 struct vchiq_service_quota *quota =
682 &state->service_quotas[port];
685 spin_lock("a_spinlock);
686 count = quota->message_use_count;
688 quota->message_use_count =
690 spin_unlock("a_spinlock);
692 if (count == quota->message_quota) {
694 * Signal the service that it
695 * has dropped below its quota
697 complete("a->quota_event);
698 } else if (count == 0) {
699 vchiq_log_error(vchiq_core_log_level,
700 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
702 quota->message_use_count,
703 header, msgid, header->msgid,
705 WARN(1, "invalid message use count\n");
707 if (!BITSET_IS_SET(service_found, port)) {
708 /* Set the found bit for this service */
709 BITSET_SET(service_found, port);
711 spin_lock("a_spinlock);
712 count = quota->slot_use_count;
714 quota->slot_use_count =
716 spin_unlock("a_spinlock);
720 * Signal the service in case
721 * it has dropped below its quota
723 complete("a->quota_event);
725 vchiq_core_log_level,
726 "%d: pfq:%d %x@%pK - slot_use->%d",
728 header->size, header,
732 vchiq_core_log_level,
733 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
735 msgid, header->msgid,
737 WARN(1, "bad slot use count\n");
744 pos += calc_stride(header->size);
745 if (pos > VCHIQ_SLOT_SIZE) {
746 vchiq_log_error(vchiq_core_log_level,
747 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
748 pos, header, msgid, header->msgid,
750 WARN(1, "invalid slot position\n");
757 spin_lock("a_spinlock);
758 count = state->data_use_count;
760 state->data_use_count =
762 spin_unlock("a_spinlock);
763 if (count == state->data_quota)
764 complete(&state->data_quota_event);
768 * Don't allow the slot to be reused until we are no
769 * longer interested in it.
773 state->slot_queue_available = slot_queue_available;
774 complete(&state->slot_available_event);
779 memcpy_copy_callback(
780 void *context, void *dest,
781 size_t offset, size_t maxsize)
783 memcpy(dest + offset, context + offset, maxsize);
789 ssize_t (*copy_callback)(void *context, void *dest,
790 size_t offset, size_t maxsize),
798 ssize_t callback_result;
799 size_t max_bytes = size - pos;
802 copy_callback(context, dest + pos,
805 if (callback_result < 0)
806 return callback_result;
808 if (!callback_result)
811 if (callback_result > max_bytes)
814 pos += callback_result;
820 /* Called by the slot handler and application threads */
821 static enum vchiq_status
822 queue_message(struct vchiq_state *state, struct vchiq_service *service,
824 ssize_t (*copy_callback)(void *context, void *dest,
825 size_t offset, size_t maxsize),
826 void *context, size_t size, int flags)
828 struct vchiq_shared_state *local;
829 struct vchiq_service_quota *quota = NULL;
830 struct vchiq_header *header;
831 int type = VCHIQ_MSG_TYPE(msgid);
835 local = state->local;
837 stride = calc_stride(size);
839 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
841 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
842 mutex_lock_killable(&state->slot_mutex))
845 if (type == VCHIQ_MSG_DATA) {
849 WARN(1, "%s: service is NULL\n", __func__);
850 mutex_unlock(&state->slot_mutex);
854 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
855 QMFLAGS_NO_MUTEX_UNLOCK));
857 if (service->closing) {
858 /* The service has been closed */
859 mutex_unlock(&state->slot_mutex);
863 quota = &state->service_quotas[service->localport];
865 spin_lock("a_spinlock);
868 * Ensure this service doesn't use more than its quota of
871 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
872 state->local_tx_pos + stride - 1);
875 * Ensure data messages don't use more than their quota of
878 while ((tx_end_index != state->previous_data_index) &&
879 (state->data_use_count == state->data_quota)) {
880 VCHIQ_STATS_INC(state, data_stalls);
881 spin_unlock("a_spinlock);
882 mutex_unlock(&state->slot_mutex);
884 if (wait_for_completion_interruptible(
885 &state->data_quota_event))
888 mutex_lock(&state->slot_mutex);
889 spin_lock("a_spinlock);
890 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
891 state->local_tx_pos + stride - 1);
892 if ((tx_end_index == state->previous_data_index) ||
893 (state->data_use_count < state->data_quota)) {
894 /* Pass the signal on to other waiters */
895 complete(&state->data_quota_event);
900 while ((quota->message_use_count == quota->message_quota) ||
901 ((tx_end_index != quota->previous_tx_index) &&
902 (quota->slot_use_count ==
903 quota->slot_quota))) {
904 spin_unlock("a_spinlock);
905 vchiq_log_trace(vchiq_core_log_level,
906 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
907 state->id, service->localport,
908 msg_type_str(type), size,
909 quota->message_use_count,
910 quota->slot_use_count);
911 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
912 mutex_unlock(&state->slot_mutex);
913 if (wait_for_completion_interruptible(
914 "a->quota_event))
916 if (service->closing)
918 if (mutex_lock_killable(&state->slot_mutex))
920 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
921 /* The service has been closed */
922 mutex_unlock(&state->slot_mutex);
925 spin_lock("a_spinlock);
926 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
927 state->local_tx_pos + stride - 1);
930 spin_unlock("a_spinlock);
933 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
937 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
939 * In the event of a failure, return the mutex to the
942 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
943 mutex_unlock(&state->slot_mutex);
947 if (type == VCHIQ_MSG_DATA) {
948 ssize_t callback_result;
952 vchiq_log_info(vchiq_core_log_level,
953 "%d: qm %s@%pK,%zx (%d->%d)",
954 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
955 header, size, VCHIQ_MSG_SRCPORT(msgid),
956 VCHIQ_MSG_DSTPORT(msgid));
958 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
959 QMFLAGS_NO_MUTEX_UNLOCK));
962 copy_message_data(copy_callback, context,
965 if (callback_result < 0) {
966 mutex_unlock(&state->slot_mutex);
967 VCHIQ_SERVICE_STATS_INC(service,
972 if (SRVTRACE_ENABLED(service,
974 vchiq_log_dump_mem("Sent", 0,
977 (size_t)callback_result));
979 spin_lock("a_spinlock);
980 quota->message_use_count++;
983 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
986 * If this transmission can't fit in the last slot used by any
987 * service, the data_use_count must be increased.
989 if (tx_end_index != state->previous_data_index) {
990 state->previous_data_index = tx_end_index;
991 state->data_use_count++;
995 * If this isn't the same slot last used by this service,
996 * the service's slot_use_count must be increased.
998 if (tx_end_index != quota->previous_tx_index) {
999 quota->previous_tx_index = tx_end_index;
1000 slot_use_count = ++quota->slot_use_count;
1005 spin_unlock("a_spinlock);
1008 vchiq_log_trace(vchiq_core_log_level,
1009 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1010 state->id, service->localport,
1011 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1012 slot_use_count, header);
1014 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1015 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1017 vchiq_log_info(vchiq_core_log_level,
1018 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1019 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1020 header, size, VCHIQ_MSG_SRCPORT(msgid),
1021 VCHIQ_MSG_DSTPORT(msgid));
1024 * It is assumed for now that this code path
1025 * only happens from calls inside this file.
1027 * External callers are through the vchiq_queue_message
1028 * path which always sets the type to be VCHIQ_MSG_DATA
1030 * At first glance this appears to be correct but
1031 * more review is needed.
1033 copy_message_data(copy_callback, context,
1034 header->data, size);
1036 VCHIQ_STATS_INC(state, ctrl_tx_count);
1039 header->msgid = msgid;
1040 header->size = size;
1045 svc_fourcc = service
1046 ? service->base.fourcc
1047 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1049 vchiq_log_info(SRVTRACE_LEVEL(service),
1050 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1051 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1052 VCHIQ_MSG_TYPE(msgid),
1053 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1054 VCHIQ_MSG_SRCPORT(msgid),
1055 VCHIQ_MSG_DSTPORT(msgid),
1059 /* Make sure the new header is visible to the peer. */
1062 /* Make the new tx_pos visible to the peer. */
1063 local->tx_pos = state->local_tx_pos;
1066 if (service && (type == VCHIQ_MSG_CLOSE))
1067 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1069 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1070 mutex_unlock(&state->slot_mutex);
1072 remote_event_signal(&state->remote->trigger);
1074 return VCHIQ_SUCCESS;
1077 /* Called by the slot handler and application threads */
1078 static enum vchiq_status
1079 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1081 ssize_t (*copy_callback)(void *context, void *dest,
1082 size_t offset, size_t maxsize),
1083 void *context, int size, int is_blocking)
1085 struct vchiq_shared_state *local;
1086 struct vchiq_header *header;
1087 ssize_t callback_result;
1089 local = state->local;
1091 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1092 mutex_lock_killable(&state->sync_mutex))
1095 remote_event_wait(&state->sync_release_event, &local->sync_release);
1099 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1103 int oldmsgid = header->msgid;
1105 if (oldmsgid != VCHIQ_MSGID_PADDING)
1106 vchiq_log_error(vchiq_core_log_level,
1107 "%d: qms - msgid %x, not PADDING",
1108 state->id, oldmsgid);
1111 vchiq_log_info(vchiq_sync_log_level,
1112 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1113 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1114 header, size, VCHIQ_MSG_SRCPORT(msgid),
1115 VCHIQ_MSG_DSTPORT(msgid));
1118 copy_message_data(copy_callback, context,
1119 header->data, size);
1121 if (callback_result < 0) {
1122 mutex_unlock(&state->slot_mutex);
1123 VCHIQ_SERVICE_STATS_INC(service,
1129 if (SRVTRACE_ENABLED(service,
1131 vchiq_log_dump_mem("Sent", 0,
1134 (size_t)callback_result));
1136 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1137 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1139 VCHIQ_STATS_INC(state, ctrl_tx_count);
1142 header->size = size;
1143 header->msgid = msgid;
1145 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1148 svc_fourcc = service
1149 ? service->base.fourcc
1150 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1152 vchiq_log_trace(vchiq_sync_log_level,
1153 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1154 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1155 VCHIQ_MSG_TYPE(msgid),
1156 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1157 VCHIQ_MSG_SRCPORT(msgid),
1158 VCHIQ_MSG_DSTPORT(msgid),
1162 remote_event_signal(&state->remote->sync_trigger);
1164 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1165 mutex_unlock(&state->sync_mutex);
1167 return VCHIQ_SUCCESS;
1171 claim_slot(struct vchiq_slot_info *slot)
1177 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1178 struct vchiq_header *header, struct vchiq_service *service)
1180 mutex_lock(&state->recycle_mutex);
1183 int msgid = header->msgid;
1185 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1186 (service && service->closing)) {
1187 mutex_unlock(&state->recycle_mutex);
1191 /* Rewrite the message header to prevent a double release */
1192 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1195 slot_info->release_count++;
1197 if (slot_info->release_count == slot_info->use_count) {
1198 int slot_queue_recycle;
1199 /* Add to the freed queue */
1202 * A read barrier is necessary here to prevent speculative
1203 * fetches of remote->slot_queue_recycle from overtaking the
1208 slot_queue_recycle = state->remote->slot_queue_recycle;
1209 state->remote->slot_queue[slot_queue_recycle &
1210 VCHIQ_SLOT_QUEUE_MASK] =
1211 SLOT_INDEX_FROM_INFO(state, slot_info);
1212 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1213 vchiq_log_info(vchiq_core_log_level,
1214 "%d: %s %d - recycle->%x", state->id, __func__,
1215 SLOT_INDEX_FROM_INFO(state, slot_info),
1216 state->remote->slot_queue_recycle);
1219 * A write barrier is necessary, but remote_event_signal
1222 remote_event_signal(&state->remote->recycle);
1225 mutex_unlock(&state->recycle_mutex);
1228 static inline enum vchiq_reason
1229 get_bulk_reason(struct vchiq_bulk *bulk)
1231 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1232 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1233 return VCHIQ_BULK_TRANSMIT_ABORTED;
1235 return VCHIQ_BULK_TRANSMIT_DONE;
1238 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1239 return VCHIQ_BULK_RECEIVE_ABORTED;
1241 return VCHIQ_BULK_RECEIVE_DONE;
1244 /* Called by the slot handler - don't hold the bulk mutex */
1245 static enum vchiq_status
1246 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1249 enum vchiq_status status = VCHIQ_SUCCESS;
1251 vchiq_log_trace(vchiq_core_log_level,
1252 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1253 service->state->id, service->localport,
1254 (queue == &service->bulk_tx) ? 't' : 'r',
1255 queue->process, queue->remote_notify, queue->remove);
1257 queue->remote_notify = queue->process;
1259 while (queue->remove != queue->remote_notify) {
1260 struct vchiq_bulk *bulk =
1261 &queue->bulks[BULK_INDEX(queue->remove)];
1264 * Only generate callbacks for non-dummy bulk
1265 * requests, and non-terminated services
1267 if (bulk->data && service->instance) {
1268 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1269 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1270 VCHIQ_SERVICE_STATS_INC(service,
1272 VCHIQ_SERVICE_STATS_ADD(service,
1276 VCHIQ_SERVICE_STATS_INC(service,
1278 VCHIQ_SERVICE_STATS_ADD(service,
1283 VCHIQ_SERVICE_STATS_INC(service,
1284 bulk_aborted_count);
1286 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1287 struct bulk_waiter *waiter;
1289 spin_lock(&bulk_waiter_spinlock);
1290 waiter = bulk->userdata;
1292 waiter->actual = bulk->actual;
1293 complete(&waiter->event);
1295 spin_unlock(&bulk_waiter_spinlock);
1296 } else if (bulk->mode ==
1297 VCHIQ_BULK_MODE_CALLBACK) {
1298 enum vchiq_reason reason =
1299 get_bulk_reason(bulk);
1300 status = make_service_callback(service,
1301 reason, NULL, bulk->userdata);
1302 if (status == VCHIQ_RETRY)
1308 complete(&service->bulk_remove_event);
1311 status = VCHIQ_SUCCESS;
1313 if (status == VCHIQ_RETRY)
1314 request_poll(service->state, service,
1315 (queue == &service->bulk_tx) ?
1316 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1321 /* Called by the slot handler thread */
1323 poll_services(struct vchiq_state *state)
1327 for (group = 0; group < BITSET_SIZE(state->unused_service); group++) {
1330 flags = atomic_xchg(&state->poll_services[group], 0);
1331 for (i = 0; flags; i++) {
1332 if (flags & BIT(i)) {
1333 struct vchiq_service *service =
1334 find_service_by_port(state,
1342 atomic_xchg(&service->poll_flags, 0);
1344 BIT(VCHIQ_POLL_REMOVE)) {
1345 vchiq_log_info(vchiq_core_log_level,
1346 "%d: ps - remove %d<->%d",
1347 state->id, service->localport,
1348 service->remoteport);
1351 * Make it look like a client, because
1352 * it must be removed and not left in
1353 * the LISTENING state.
1355 service->public_fourcc =
1356 VCHIQ_FOURCC_INVALID;
1358 if (vchiq_close_service_internal(
1359 service, 0/*!close_recvd*/) !=
1361 request_poll(state, service,
1363 } else if (service_flags &
1364 BIT(VCHIQ_POLL_TERMINATE)) {
1365 vchiq_log_info(vchiq_core_log_level,
1366 "%d: ps - terminate %d<->%d",
1367 state->id, service->localport,
1368 service->remoteport);
1369 if (vchiq_close_service_internal(
1370 service, 0/*!close_recvd*/) !=
1372 request_poll(state, service,
1373 VCHIQ_POLL_TERMINATE);
1375 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1376 notify_bulks(service,
1379 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1380 notify_bulks(service,
1383 unlock_service(service);
1389 /* Called with the bulk_mutex held */
1391 abort_outstanding_bulks(struct vchiq_service *service,
1392 struct vchiq_bulk_queue *queue)
1394 int is_tx = (queue == &service->bulk_tx);
1396 vchiq_log_trace(vchiq_core_log_level,
1397 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1398 service->state->id, service->localport, is_tx ? 't' : 'r',
1399 queue->local_insert, queue->remote_insert, queue->process);
1401 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1402 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1404 while ((queue->process != queue->local_insert) ||
1405 (queue->process != queue->remote_insert)) {
1406 struct vchiq_bulk *bulk =
1407 &queue->bulks[BULK_INDEX(queue->process)];
1409 if (queue->process == queue->remote_insert) {
1410 /* fabricate a matching dummy bulk */
1411 bulk->remote_data = NULL;
1412 bulk->remote_size = 0;
1413 queue->remote_insert++;
1416 if (queue->process != queue->local_insert) {
1417 vchiq_complete_bulk(bulk);
1419 vchiq_log_info(SRVTRACE_LEVEL(service),
1420 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1421 is_tx ? "Send Bulk to" : "Recv Bulk from",
1422 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1423 service->remoteport,
1427 /* fabricate a matching dummy bulk */
1430 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1431 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1433 queue->local_insert++;
1441 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1443 struct vchiq_service *service = NULL;
1445 unsigned int localport, remoteport;
1447 msgid = header->msgid;
1448 size = header->size;
1449 localport = VCHIQ_MSG_DSTPORT(msgid);
1450 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1451 if (size >= sizeof(struct vchiq_open_payload)) {
1452 const struct vchiq_open_payload *payload =
1453 (struct vchiq_open_payload *)header->data;
1454 unsigned int fourcc;
1456 fourcc = payload->fourcc;
1457 vchiq_log_info(vchiq_core_log_level,
1458 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1459 state->id, header, localport,
1460 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1462 service = get_listening_service(state, fourcc);
1465 /* A matching service exists */
1466 short version = payload->version;
1467 short version_min = payload->version_min;
1469 if ((service->version < version_min) ||
1470 (version < service->version_min)) {
1471 /* Version mismatch */
1472 vchiq_loud_error_header();
1473 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1474 "version mismatch - local (%d, min %d)"
1475 " vs. remote (%d, min %d)",
1476 state->id, service->localport,
1477 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1478 service->version, service->version_min,
1479 version, version_min);
1480 vchiq_loud_error_footer();
1481 unlock_service(service);
1485 service->peer_version = version;
1487 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1488 struct vchiq_openack_payload ack_payload = {
1492 if (state->version_common <
1493 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1496 /* Acknowledge the OPEN */
1497 if (service->sync) {
1498 if (queue_message_sync(
1505 memcpy_copy_callback,
1507 sizeof(ack_payload),
1509 goto bail_not_ready;
1511 if (queue_message(state,
1517 memcpy_copy_callback,
1519 sizeof(ack_payload),
1521 goto bail_not_ready;
1524 /* The service is now open */
1525 vchiq_set_service_state(service,
1526 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1527 : VCHIQ_SRVSTATE_OPEN);
1530 /* Success - the message has been dealt with */
1531 unlock_service(service);
1537 /* No available service, or an invalid request - send a CLOSE */
1538 if (queue_message(state, NULL,
1539 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1540 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1541 goto bail_not_ready;
1547 unlock_service(service);
1552 /* Called by the slot handler thread */
1554 parse_rx_slots(struct vchiq_state *state)
1556 struct vchiq_shared_state *remote = state->remote;
1557 struct vchiq_service *service = NULL;
1560 DEBUG_INITIALISE(state->local)
1562 tx_pos = remote->tx_pos;
1564 while (state->rx_pos != tx_pos) {
1565 struct vchiq_header *header;
1568 unsigned int localport, remoteport;
1570 DEBUG_TRACE(PARSE_LINE);
1571 if (!state->rx_data) {
1574 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1575 rx_index = remote->slot_queue[
1576 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1577 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1579 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1582 * Initialise use_count to one, and increment
1583 * release_count at the end of the slot to avoid
1584 * releasing the slot prematurely.
1586 state->rx_info->use_count = 1;
1587 state->rx_info->release_count = 0;
1590 header = (struct vchiq_header *)(state->rx_data +
1591 (state->rx_pos & VCHIQ_SLOT_MASK));
1592 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1593 msgid = header->msgid;
1594 DEBUG_VALUE(PARSE_MSGID, msgid);
1595 size = header->size;
1596 type = VCHIQ_MSG_TYPE(msgid);
1597 localport = VCHIQ_MSG_DSTPORT(msgid);
1598 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1600 if (type != VCHIQ_MSG_DATA)
1601 VCHIQ_STATS_INC(state, ctrl_rx_count);
1604 case VCHIQ_MSG_OPENACK:
1605 case VCHIQ_MSG_CLOSE:
1606 case VCHIQ_MSG_DATA:
1607 case VCHIQ_MSG_BULK_RX:
1608 case VCHIQ_MSG_BULK_TX:
1609 case VCHIQ_MSG_BULK_RX_DONE:
1610 case VCHIQ_MSG_BULK_TX_DONE:
1611 service = find_service_by_port(state, localport);
1613 ((service->remoteport != remoteport) &&
1614 (service->remoteport != VCHIQ_PORT_FREE))) &&
1616 (type == VCHIQ_MSG_CLOSE)) {
1618 * This could be a CLOSE from a client which
1619 * hadn't yet received the OPENACK - look for
1620 * the connected service
1623 unlock_service(service);
1624 service = get_connected_service(state,
1627 vchiq_log_warning(vchiq_core_log_level,
1628 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1629 state->id, msg_type_str(type),
1630 header, remoteport, localport,
1631 service->localport);
1635 vchiq_log_error(vchiq_core_log_level,
1636 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1637 state->id, msg_type_str(type),
1638 header, remoteport, localport,
1647 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1650 svc_fourcc = service
1651 ? service->base.fourcc
1652 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1653 vchiq_log_info(SRVTRACE_LEVEL(service),
1654 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1655 msg_type_str(type), type,
1656 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1657 remoteport, localport, size);
1659 vchiq_log_dump_mem("Rcvd", 0, header->data,
1663 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1664 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1665 vchiq_log_error(vchiq_core_log_level,
1666 "header %pK (msgid %x) - size %x too big for slot",
1667 header, (unsigned int)msgid,
1668 (unsigned int)size);
1669 WARN(1, "oversized for slot\n");
1673 case VCHIQ_MSG_OPEN:
1674 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1675 if (!parse_open(state, header))
1676 goto bail_not_ready;
1678 case VCHIQ_MSG_OPENACK:
1679 if (size >= sizeof(struct vchiq_openack_payload)) {
1680 const struct vchiq_openack_payload *payload =
1681 (struct vchiq_openack_payload *)
1683 service->peer_version = payload->version;
1685 vchiq_log_info(vchiq_core_log_level,
1686 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1687 state->id, header, size, remoteport, localport,
1688 service->peer_version);
1689 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1690 service->remoteport = remoteport;
1691 vchiq_set_service_state(service,
1692 VCHIQ_SRVSTATE_OPEN);
1693 complete(&service->remove_event);
1695 vchiq_log_error(vchiq_core_log_level,
1696 "OPENACK received in state %s",
1697 srvstate_names[service->srvstate]);
1700 case VCHIQ_MSG_CLOSE:
1701 WARN_ON(size != 0); /* There should be no data */
1703 vchiq_log_info(vchiq_core_log_level,
1704 "%d: prs CLOSE@%pK (%d->%d)",
1705 state->id, header, remoteport, localport);
1707 mark_service_closing_internal(service, 1);
1709 if (vchiq_close_service_internal(service,
1710 1/*close_recvd*/) == VCHIQ_RETRY)
1711 goto bail_not_ready;
1713 vchiq_log_info(vchiq_core_log_level,
1714 "Close Service %c%c%c%c s:%u d:%d",
1715 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1717 service->remoteport);
1719 case VCHIQ_MSG_DATA:
1720 vchiq_log_info(vchiq_core_log_level,
1721 "%d: prs DATA@%pK,%x (%d->%d)",
1722 state->id, header, size, remoteport, localport);
1724 if ((service->remoteport == remoteport) &&
1725 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1726 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1727 claim_slot(state->rx_info);
1728 DEBUG_TRACE(PARSE_LINE);
1729 if (make_service_callback(service,
1730 VCHIQ_MESSAGE_AVAILABLE, header,
1731 NULL) == VCHIQ_RETRY) {
1732 DEBUG_TRACE(PARSE_LINE);
1733 goto bail_not_ready;
1735 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1736 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1739 VCHIQ_STATS_INC(state, error_count);
1742 case VCHIQ_MSG_CONNECT:
1743 vchiq_log_info(vchiq_core_log_level,
1744 "%d: prs CONNECT@%pK", state->id, header);
1745 state->version_common = ((struct vchiq_slot_zero *)
1746 state->slot_data)->version;
1747 complete(&state->connect);
1749 case VCHIQ_MSG_BULK_RX:
1750 case VCHIQ_MSG_BULK_TX:
1752 * We should never receive a bulk request from the
1753 * other side since we're not setup to perform as the
1758 case VCHIQ_MSG_BULK_RX_DONE:
1759 case VCHIQ_MSG_BULK_TX_DONE:
1760 if ((service->remoteport == remoteport) &&
1761 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1762 struct vchiq_bulk_queue *queue;
1763 struct vchiq_bulk *bulk;
1765 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1766 &service->bulk_rx : &service->bulk_tx;
1768 DEBUG_TRACE(PARSE_LINE);
1769 if (mutex_lock_killable(&service->bulk_mutex)) {
1770 DEBUG_TRACE(PARSE_LINE);
1771 goto bail_not_ready;
1773 if ((int)(queue->remote_insert -
1774 queue->local_insert) >= 0) {
1775 vchiq_log_error(vchiq_core_log_level,
1776 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1777 state->id, msg_type_str(type),
1778 header, remoteport, localport,
1779 queue->remote_insert,
1780 queue->local_insert);
1781 mutex_unlock(&service->bulk_mutex);
1784 if (queue->process != queue->remote_insert) {
1785 pr_err("%s: p %x != ri %x\n",
1788 queue->remote_insert);
1789 mutex_unlock(&service->bulk_mutex);
1790 goto bail_not_ready;
1793 bulk = &queue->bulks[
1794 BULK_INDEX(queue->remote_insert)];
1795 bulk->actual = *(int *)header->data;
1796 queue->remote_insert++;
1798 vchiq_log_info(vchiq_core_log_level,
1799 "%d: prs %s@%pK (%d->%d) %x@%pad",
1800 state->id, msg_type_str(type),
1801 header, remoteport, localport,
1802 bulk->actual, &bulk->data);
1804 vchiq_log_trace(vchiq_core_log_level,
1805 "%d: prs:%d %cx li=%x ri=%x p=%x",
1806 state->id, localport,
1807 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1809 queue->local_insert,
1810 queue->remote_insert, queue->process);
1812 DEBUG_TRACE(PARSE_LINE);
1813 WARN_ON(queue->process == queue->local_insert);
1814 vchiq_complete_bulk(bulk);
1816 mutex_unlock(&service->bulk_mutex);
1817 DEBUG_TRACE(PARSE_LINE);
1818 notify_bulks(service, queue, 1/*retry_poll*/);
1819 DEBUG_TRACE(PARSE_LINE);
1822 case VCHIQ_MSG_PADDING:
1823 vchiq_log_trace(vchiq_core_log_level,
1824 "%d: prs PADDING@%pK,%x",
1825 state->id, header, size);
1827 case VCHIQ_MSG_PAUSE:
1828 /* If initiated, signal the application thread */
1829 vchiq_log_trace(vchiq_core_log_level,
1830 "%d: prs PAUSE@%pK,%x",
1831 state->id, header, size);
1832 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1833 vchiq_log_error(vchiq_core_log_level,
1834 "%d: PAUSE received in state PAUSED",
1838 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1839 /* Send a PAUSE in response */
1840 if (queue_message(state, NULL,
1841 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1842 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1844 goto bail_not_ready;
1846 /* At this point slot_mutex is held */
1847 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1849 case VCHIQ_MSG_RESUME:
1850 vchiq_log_trace(vchiq_core_log_level,
1851 "%d: prs RESUME@%pK,%x",
1852 state->id, header, size);
1853 /* Release the slot mutex */
1854 mutex_unlock(&state->slot_mutex);
1855 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1858 case VCHIQ_MSG_REMOTE_USE:
1859 vchiq_on_remote_use(state);
1861 case VCHIQ_MSG_REMOTE_RELEASE:
1862 vchiq_on_remote_release(state);
1864 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1868 vchiq_log_error(vchiq_core_log_level,
1869 "%d: prs invalid msgid %x@%pK,%x",
1870 state->id, msgid, header, size);
1871 WARN(1, "invalid message\n");
1877 unlock_service(service);
1881 state->rx_pos += calc_stride(size);
1883 DEBUG_TRACE(PARSE_LINE);
1885 * Perform some housekeeping when the end of the slot is
1888 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1889 /* Remove the extra reference count. */
1890 release_slot(state, state->rx_info, NULL, NULL);
1891 state->rx_data = NULL;
1897 unlock_service(service);
1900 /* Called by the slot handler thread */
1902 slot_handler_func(void *v)
1904 struct vchiq_state *state = v;
1905 struct vchiq_shared_state *local = state->local;
1907 DEBUG_INITIALISE(local)
1910 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1911 DEBUG_TRACE(SLOT_HANDLER_LINE);
1912 remote_event_wait(&state->trigger_event, &local->trigger);
1916 DEBUG_TRACE(SLOT_HANDLER_LINE);
1917 if (state->poll_needed) {
1919 state->poll_needed = 0;
1922 * Handle service polling and other rare conditions here
1923 * out of the mainline code
1925 switch (state->conn_state) {
1926 case VCHIQ_CONNSTATE_CONNECTED:
1927 /* Poll the services as requested */
1928 poll_services(state);
1931 case VCHIQ_CONNSTATE_PAUSING:
1932 if (queue_message(state, NULL,
1933 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1935 QMFLAGS_NO_MUTEX_UNLOCK)
1937 vchiq_set_conn_state(state,
1938 VCHIQ_CONNSTATE_PAUSE_SENT);
1941 state->poll_needed = 1;
1945 case VCHIQ_CONNSTATE_RESUMING:
1946 if (queue_message(state, NULL,
1947 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1948 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1950 vchiq_set_conn_state(state,
1951 VCHIQ_CONNSTATE_CONNECTED);
1954 * This should really be impossible,
1955 * since the PAUSE should have flushed
1956 * through outstanding messages.
1958 vchiq_log_error(vchiq_core_log_level,
1959 "Failed to send RESUME message");
1968 DEBUG_TRACE(SLOT_HANDLER_LINE);
1969 parse_rx_slots(state);
1974 /* Called by the recycle thread */
1976 recycle_func(void *v)
1978 struct vchiq_state *state = v;
1979 struct vchiq_shared_state *local = state->local;
1983 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1985 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1991 remote_event_wait(&state->recycle_event, &local->recycle);
1993 process_free_queue(state, found, length);
1998 /* Called by the sync thread */
2002 struct vchiq_state *state = v;
2003 struct vchiq_shared_state *local = state->local;
2004 struct vchiq_header *header =
2005 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2006 state->remote->slot_sync);
2009 struct vchiq_service *service;
2012 unsigned int localport, remoteport;
2014 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2018 msgid = header->msgid;
2019 size = header->size;
2020 type = VCHIQ_MSG_TYPE(msgid);
2021 localport = VCHIQ_MSG_DSTPORT(msgid);
2022 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2024 service = find_service_by_port(state, localport);
2027 vchiq_log_error(vchiq_sync_log_level,
2028 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2029 state->id, msg_type_str(type),
2030 header, remoteport, localport, localport);
2031 release_message_sync(state, header);
2035 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2038 svc_fourcc = service
2039 ? service->base.fourcc
2040 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2041 vchiq_log_trace(vchiq_sync_log_level,
2042 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2044 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2045 remoteport, localport, size);
2047 vchiq_log_dump_mem("Rcvd", 0, header->data,
2052 case VCHIQ_MSG_OPENACK:
2053 if (size >= sizeof(struct vchiq_openack_payload)) {
2054 const struct vchiq_openack_payload *payload =
2055 (struct vchiq_openack_payload *)
2057 service->peer_version = payload->version;
2059 vchiq_log_info(vchiq_sync_log_level,
2060 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2061 state->id, header, size, remoteport, localport,
2062 service->peer_version);
2063 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2064 service->remoteport = remoteport;
2065 vchiq_set_service_state(service,
2066 VCHIQ_SRVSTATE_OPENSYNC);
2068 complete(&service->remove_event);
2070 release_message_sync(state, header);
2073 case VCHIQ_MSG_DATA:
2074 vchiq_log_trace(vchiq_sync_log_level,
2075 "%d: sf DATA@%pK,%x (%d->%d)",
2076 state->id, header, size, remoteport, localport);
2078 if ((service->remoteport == remoteport) &&
2079 (service->srvstate ==
2080 VCHIQ_SRVSTATE_OPENSYNC)) {
2081 if (make_service_callback(service,
2082 VCHIQ_MESSAGE_AVAILABLE, header,
2083 NULL) == VCHIQ_RETRY)
2084 vchiq_log_error(vchiq_sync_log_level,
2085 "synchronous callback to service %d returns VCHIQ_RETRY",
2091 vchiq_log_error(vchiq_sync_log_level,
2092 "%d: sf unexpected msgid %x@%pK,%x",
2093 state->id, msgid, header, size);
2094 release_message_sync(state, header);
2098 unlock_service(service);
2105 init_bulk_queue(struct vchiq_bulk_queue *queue)
2107 queue->local_insert = 0;
2108 queue->remote_insert = 0;
2110 queue->remote_notify = 0;
2115 get_conn_state_name(enum vchiq_connstate conn_state)
2117 return conn_state_names[conn_state];
2120 struct vchiq_slot_zero *
2121 vchiq_init_slots(void *mem_base, int mem_size)
2124 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2125 struct vchiq_slot_zero *slot_zero =
2126 (struct vchiq_slot_zero *)(mem_base + mem_align);
2127 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2128 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2130 /* Ensure there is enough memory to run an absolutely minimum system */
2131 num_slots -= first_data_slot;
2133 if (num_slots < 4) {
2134 vchiq_log_error(vchiq_core_log_level,
2135 "%s - insufficient memory %x bytes",
2136 __func__, mem_size);
2140 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2142 slot_zero->magic = VCHIQ_MAGIC;
2143 slot_zero->version = VCHIQ_VERSION;
2144 slot_zero->version_min = VCHIQ_VERSION_MIN;
2145 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2146 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2147 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2148 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2150 slot_zero->master.slot_sync = first_data_slot;
2151 slot_zero->master.slot_first = first_data_slot + 1;
2152 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2153 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2154 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2155 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2161 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2163 struct vchiq_shared_state *local;
2164 struct vchiq_shared_state *remote;
2165 char threadname[16];
2168 if (vchiq_states[0]) {
2169 pr_err("%s: VCHIQ state already initialized\n", __func__);
2173 local = &slot_zero->slave;
2174 remote = &slot_zero->master;
2176 if (local->initialised) {
2177 vchiq_loud_error_header();
2178 if (remote->initialised)
2179 vchiq_loud_error("local state has already been initialised");
2181 vchiq_loud_error("master/slave mismatch two slaves");
2182 vchiq_loud_error_footer();
2186 memset(state, 0, sizeof(struct vchiq_state));
2189 * initialize shared state pointers
2192 state->local = local;
2193 state->remote = remote;
2194 state->slot_data = (struct vchiq_slot *)slot_zero;
2197 * initialize events and mutexes
2200 init_completion(&state->connect);
2201 mutex_init(&state->mutex);
2202 mutex_init(&state->slot_mutex);
2203 mutex_init(&state->recycle_mutex);
2204 mutex_init(&state->sync_mutex);
2205 mutex_init(&state->bulk_transfer_mutex);
2207 init_completion(&state->slot_available_event);
2208 init_completion(&state->slot_remove_event);
2209 init_completion(&state->data_quota_event);
2211 state->slot_queue_available = 0;
2213 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2214 struct vchiq_service_quota *quota =
2215 &state->service_quotas[i];
2216 init_completion("a->quota_event);
2219 for (i = local->slot_first; i <= local->slot_last; i++) {
2220 local->slot_queue[state->slot_queue_available] = i;
2221 state->slot_queue_available++;
2222 complete(&state->slot_available_event);
2225 state->default_slot_quota = state->slot_queue_available/2;
2226 state->default_message_quota =
2227 min((unsigned short)(state->default_slot_quota * 256),
2228 (unsigned short)~0);
2230 state->previous_data_index = -1;
2231 state->data_use_count = 0;
2232 state->data_quota = state->slot_queue_available - 1;
2234 remote_event_create(&state->trigger_event, &local->trigger);
2236 remote_event_create(&state->recycle_event, &local->recycle);
2237 local->slot_queue_recycle = state->slot_queue_available;
2238 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2239 remote_event_create(&state->sync_release_event, &local->sync_release);
2241 /* At start-of-day, the slot is empty and available */
2242 ((struct vchiq_header *)
2243 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2244 VCHIQ_MSGID_PADDING;
2245 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2247 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2249 ret = vchiq_platform_init_state(state);
2254 * bring up slot handler thread
2256 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2257 state->slot_handler_thread = kthread_create(&slot_handler_func,
2261 if (IS_ERR(state->slot_handler_thread)) {
2262 vchiq_loud_error_header();
2263 vchiq_loud_error("couldn't create thread %s", threadname);
2264 vchiq_loud_error_footer();
2265 return PTR_ERR(state->slot_handler_thread);
2267 set_user_nice(state->slot_handler_thread, -19);
2269 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2270 state->recycle_thread = kthread_create(&recycle_func,
2273 if (IS_ERR(state->recycle_thread)) {
2274 vchiq_loud_error_header();
2275 vchiq_loud_error("couldn't create thread %s", threadname);
2276 vchiq_loud_error_footer();
2277 ret = PTR_ERR(state->recycle_thread);
2278 goto fail_free_handler_thread;
2280 set_user_nice(state->recycle_thread, -19);
2282 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2283 state->sync_thread = kthread_create(&sync_func,
2286 if (IS_ERR(state->sync_thread)) {
2287 vchiq_loud_error_header();
2288 vchiq_loud_error("couldn't create thread %s", threadname);
2289 vchiq_loud_error_footer();
2290 ret = PTR_ERR(state->sync_thread);
2291 goto fail_free_recycle_thread;
2293 set_user_nice(state->sync_thread, -20);
2295 wake_up_process(state->slot_handler_thread);
2296 wake_up_process(state->recycle_thread);
2297 wake_up_process(state->sync_thread);
2299 vchiq_states[0] = state;
2301 /* Indicate readiness to the other side */
2302 local->initialised = 1;
2306 fail_free_recycle_thread:
2307 kthread_stop(state->recycle_thread);
2308 fail_free_handler_thread:
2309 kthread_stop(state->slot_handler_thread);
2314 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2316 struct vchiq_service *service = find_service_by_handle(handle);
2319 while (service->msg_queue_write == service->msg_queue_read +
2321 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2322 flush_signals(current);
2325 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2326 service->msg_queue_write++;
2327 service->msg_queue[pos] = header;
2329 complete(&service->msg_queue_push);
2331 EXPORT_SYMBOL(vchiq_msg_queue_push);
2333 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2335 struct vchiq_service *service = find_service_by_handle(handle);
2336 struct vchiq_header *header;
2339 if (service->msg_queue_write == service->msg_queue_read)
2342 while (service->msg_queue_write == service->msg_queue_read) {
2343 if (wait_for_completion_interruptible(&service->msg_queue_push))
2344 flush_signals(current);
2347 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2348 service->msg_queue_read++;
2349 header = service->msg_queue[pos];
2351 complete(&service->msg_queue_pop);
2355 EXPORT_SYMBOL(vchiq_msg_hold);
2357 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2359 if (!params->callback || !params->fourcc) {
2360 vchiq_loud_error("Can't add service, invalid params\n");
2367 /* Called from application thread when a client or server service is created. */
2368 struct vchiq_service *
2369 vchiq_add_service_internal(struct vchiq_state *state,
2370 const struct vchiq_service_params_kernel *params,
2371 int srvstate, struct vchiq_instance *instance,
2372 vchiq_userdata_term userdata_term)
2374 struct vchiq_service *service;
2375 struct vchiq_service __rcu **pservice = NULL;
2376 struct vchiq_service_quota *quota;
2380 ret = vchiq_validate_params(params);
2384 service = kmalloc(sizeof(*service), GFP_KERNEL);
2388 service->base.fourcc = params->fourcc;
2389 service->base.callback = params->callback;
2390 service->base.userdata = params->userdata;
2391 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2392 kref_init(&service->ref_count);
2393 service->srvstate = VCHIQ_SRVSTATE_FREE;
2394 service->userdata_term = userdata_term;
2395 service->localport = VCHIQ_PORT_FREE;
2396 service->remoteport = VCHIQ_PORT_FREE;
2398 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2399 VCHIQ_FOURCC_INVALID : params->fourcc;
2400 service->client_id = 0;
2401 service->auto_close = 1;
2403 service->closing = 0;
2405 atomic_set(&service->poll_flags, 0);
2406 service->version = params->version;
2407 service->version_min = params->version_min;
2408 service->state = state;
2409 service->instance = instance;
2410 service->service_use_count = 0;
2411 service->msg_queue_read = 0;
2412 service->msg_queue_write = 0;
2413 init_bulk_queue(&service->bulk_tx);
2414 init_bulk_queue(&service->bulk_rx);
2415 init_completion(&service->remove_event);
2416 init_completion(&service->bulk_remove_event);
2417 init_completion(&service->msg_queue_pop);
2418 init_completion(&service->msg_queue_push);
2419 mutex_init(&service->bulk_mutex);
2420 memset(&service->stats, 0, sizeof(service->stats));
2421 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2424 * Although it is perfectly possible to use a spinlock
2425 * to protect the creation of services, it is overkill as it
2426 * disables interrupts while the array is searched.
2427 * The only danger is of another thread trying to create a
2428 * service - service deletion is safe.
2429 * Therefore it is preferable to use state->mutex which,
2430 * although slower to claim, doesn't block interrupts while
2434 mutex_lock(&state->mutex);
2436 /* Prepare to use a previously unused service */
2437 if (state->unused_service < VCHIQ_MAX_SERVICES)
2438 pservice = &state->services[state->unused_service];
2440 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2441 for (i = 0; i < state->unused_service; i++) {
2442 if (!rcu_access_pointer(state->services[i])) {
2443 pservice = &state->services[i];
2449 for (i = (state->unused_service - 1); i >= 0; i--) {
2450 struct vchiq_service *srv;
2452 srv = rcu_dereference(state->services[i]);
2454 pservice = &state->services[i];
2455 } else if ((srv->public_fourcc == params->fourcc) &&
2456 ((srv->instance != instance) ||
2457 (srv->base.callback != params->callback))) {
2459 * There is another server using this
2460 * fourcc which doesn't match.
2470 service->localport = (pservice - state->services);
2472 handle_seq = VCHIQ_MAX_STATES *
2474 service->handle = handle_seq |
2475 (state->id * VCHIQ_MAX_SERVICES) |
2477 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2478 rcu_assign_pointer(*pservice, service);
2479 if (pservice == &state->services[state->unused_service])
2480 state->unused_service++;
2483 mutex_unlock(&state->mutex);
2490 quota = &state->service_quotas[service->localport];
2491 quota->slot_quota = state->default_slot_quota;
2492 quota->message_quota = state->default_message_quota;
2493 if (quota->slot_use_count == 0)
2494 quota->previous_tx_index =
2495 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2498 /* Bring this service online */
2499 vchiq_set_service_state(service, srvstate);
2501 vchiq_log_info(vchiq_core_msg_log_level,
2502 "%s Service %c%c%c%c SrcPort:%d",
2503 (srvstate == VCHIQ_SRVSTATE_OPENING)
2505 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2506 service->localport);
2508 /* Don't unlock the service - leave it with a ref_count of 1. */
2514 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2516 struct vchiq_open_payload payload = {
2517 service->base.fourcc,
2520 service->version_min
2522 enum vchiq_status status = VCHIQ_SUCCESS;
2524 service->client_id = client_id;
2525 vchiq_use_service_internal(service);
2526 status = queue_message(service->state,
2528 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2531 memcpy_copy_callback,
2534 QMFLAGS_IS_BLOCKING);
2536 if (status != VCHIQ_SUCCESS)
2539 /* Wait for the ACK/NAK */
2540 if (wait_for_completion_interruptible(&service->remove_event)) {
2541 status = VCHIQ_RETRY;
2542 vchiq_release_service_internal(service);
2543 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2544 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2545 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2546 vchiq_log_error(vchiq_core_log_level,
2547 "%d: osi - srvstate = %s (ref %u)",
2549 srvstate_names[service->srvstate],
2550 kref_read(&service->ref_count));
2551 status = VCHIQ_ERROR;
2552 VCHIQ_SERVICE_STATS_INC(service, error_count);
2553 vchiq_release_service_internal(service);
2560 release_service_messages(struct vchiq_service *service)
2562 struct vchiq_state *state = service->state;
2563 int slot_last = state->remote->slot_last;
2566 /* Release any claimed messages aimed at this service */
2568 if (service->sync) {
2569 struct vchiq_header *header =
2570 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2571 state->remote->slot_sync);
2572 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2573 release_message_sync(state, header);
2578 for (i = state->remote->slot_first; i <= slot_last; i++) {
2579 struct vchiq_slot_info *slot_info =
2580 SLOT_INFO_FROM_INDEX(state, i);
2581 if (slot_info->release_count != slot_info->use_count) {
2583 (char *)SLOT_DATA_FROM_INDEX(state, i);
2584 unsigned int pos, end;
2586 end = VCHIQ_SLOT_SIZE;
2587 if (data == state->rx_data)
2589 * This buffer is still being read from - stop
2590 * at the current read position
2592 end = state->rx_pos & VCHIQ_SLOT_MASK;
2597 struct vchiq_header *header =
2598 (struct vchiq_header *)(data + pos);
2599 int msgid = header->msgid;
2600 int port = VCHIQ_MSG_DSTPORT(msgid);
2602 if ((port == service->localport) &&
2603 (msgid & VCHIQ_MSGID_CLAIMED)) {
2604 vchiq_log_info(vchiq_core_log_level,
2605 " fsi - hdr %pK", header);
2606 release_slot(state, slot_info, header,
2609 pos += calc_stride(header->size);
2610 if (pos > VCHIQ_SLOT_SIZE) {
2611 vchiq_log_error(vchiq_core_log_level,
2612 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2614 header->msgid, header->size);
2615 WARN(1, "invalid slot position\n");
2623 do_abort_bulks(struct vchiq_service *service)
2625 enum vchiq_status status;
2627 /* Abort any outstanding bulk transfers */
2628 if (mutex_lock_killable(&service->bulk_mutex))
2630 abort_outstanding_bulks(service, &service->bulk_tx);
2631 abort_outstanding_bulks(service, &service->bulk_rx);
2632 mutex_unlock(&service->bulk_mutex);
2634 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2635 if (status != VCHIQ_SUCCESS)
2638 status = notify_bulks(service, &service->bulk_rx, 0/*!retry_poll*/);
2639 return (status == VCHIQ_SUCCESS);
2642 static enum vchiq_status
2643 close_service_complete(struct vchiq_service *service, int failstate)
2645 enum vchiq_status status;
2646 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2649 switch (service->srvstate) {
2650 case VCHIQ_SRVSTATE_OPEN:
2651 case VCHIQ_SRVSTATE_CLOSESENT:
2652 case VCHIQ_SRVSTATE_CLOSERECVD:
2654 if (service->auto_close) {
2655 service->client_id = 0;
2656 service->remoteport = VCHIQ_PORT_FREE;
2657 newstate = VCHIQ_SRVSTATE_LISTENING;
2659 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2662 newstate = VCHIQ_SRVSTATE_CLOSED;
2664 vchiq_set_service_state(service, newstate);
2666 case VCHIQ_SRVSTATE_LISTENING:
2669 vchiq_log_error(vchiq_core_log_level,
2670 "%s(%x) called in state %s", __func__,
2671 service->handle, srvstate_names[service->srvstate]);
2672 WARN(1, "%s in unexpected state\n", __func__);
2676 status = make_service_callback(service,
2677 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2679 if (status != VCHIQ_RETRY) {
2680 int uc = service->service_use_count;
2682 /* Complete the close process */
2683 for (i = 0; i < uc; i++)
2685 * cater for cases where close is forced and the
2686 * client may not close all it's handles
2688 vchiq_release_service_internal(service);
2690 service->client_id = 0;
2691 service->remoteport = VCHIQ_PORT_FREE;
2693 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2694 vchiq_free_service_internal(service);
2695 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2697 service->closing = 0;
2699 complete(&service->remove_event);
2702 vchiq_set_service_state(service, failstate);
2708 /* Called by the slot handler */
2710 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2712 struct vchiq_state *state = service->state;
2713 enum vchiq_status status = VCHIQ_SUCCESS;
2714 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2716 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2717 service->state->id, service->localport, close_recvd,
2718 srvstate_names[service->srvstate]);
2720 switch (service->srvstate) {
2721 case VCHIQ_SRVSTATE_CLOSED:
2722 case VCHIQ_SRVSTATE_HIDDEN:
2723 case VCHIQ_SRVSTATE_LISTENING:
2724 case VCHIQ_SRVSTATE_CLOSEWAIT:
2726 vchiq_log_error(vchiq_core_log_level,
2727 "%s(1) called in state %s",
2728 __func__, srvstate_names[service->srvstate]);
2729 } else if (is_server) {
2730 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2731 status = VCHIQ_ERROR;
2733 service->client_id = 0;
2734 service->remoteport = VCHIQ_PORT_FREE;
2735 if (service->srvstate ==
2736 VCHIQ_SRVSTATE_CLOSEWAIT)
2737 vchiq_set_service_state(service,
2738 VCHIQ_SRVSTATE_LISTENING);
2740 complete(&service->remove_event);
2742 vchiq_free_service_internal(service);
2745 case VCHIQ_SRVSTATE_OPENING:
2747 /* The open was rejected - tell the user */
2748 vchiq_set_service_state(service,
2749 VCHIQ_SRVSTATE_CLOSEWAIT);
2750 complete(&service->remove_event);
2752 /* Shutdown mid-open - let the other side know */
2753 status = queue_message(state, service,
2757 VCHIQ_MSG_DSTPORT(service->remoteport)),
2762 case VCHIQ_SRVSTATE_OPENSYNC:
2763 mutex_lock(&state->sync_mutex);
2765 case VCHIQ_SRVSTATE_OPEN:
2767 if (!do_abort_bulks(service))
2768 status = VCHIQ_RETRY;
2771 release_service_messages(service);
2773 if (status == VCHIQ_SUCCESS)
2774 status = queue_message(state, service,
2778 VCHIQ_MSG_DSTPORT(service->remoteport)),
2779 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2781 if (status != VCHIQ_SUCCESS) {
2782 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2783 mutex_unlock(&state->sync_mutex);
2788 /* Change the state while the mutex is still held */
2789 vchiq_set_service_state(service,
2790 VCHIQ_SRVSTATE_CLOSESENT);
2791 mutex_unlock(&state->slot_mutex);
2793 mutex_unlock(&state->sync_mutex);
2797 /* Change the state while the mutex is still held */
2798 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2799 mutex_unlock(&state->slot_mutex);
2801 mutex_unlock(&state->sync_mutex);
2803 status = close_service_complete(service,
2804 VCHIQ_SRVSTATE_CLOSERECVD);
2807 case VCHIQ_SRVSTATE_CLOSESENT:
2809 /* This happens when a process is killed mid-close */
2812 if (!do_abort_bulks(service)) {
2813 status = VCHIQ_RETRY;
2817 if (status == VCHIQ_SUCCESS)
2818 status = close_service_complete(service,
2819 VCHIQ_SRVSTATE_CLOSERECVD);
2822 case VCHIQ_SRVSTATE_CLOSERECVD:
2823 if (!close_recvd && is_server)
2824 /* Force into LISTENING mode */
2825 vchiq_set_service_state(service,
2826 VCHIQ_SRVSTATE_LISTENING);
2827 status = close_service_complete(service,
2828 VCHIQ_SRVSTATE_CLOSERECVD);
2832 vchiq_log_error(vchiq_core_log_level,
2833 "%s(%d) called in state %s", __func__,
2834 close_recvd, srvstate_names[service->srvstate]);
2841 /* Called from the application process upon process death */
2843 vchiq_terminate_service_internal(struct vchiq_service *service)
2845 struct vchiq_state *state = service->state;
2847 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2848 state->id, service->localport, service->remoteport);
2850 mark_service_closing(service);
2852 /* Mark the service for removal by the slot handler */
2853 request_poll(state, service, VCHIQ_POLL_REMOVE);
2856 /* Called from the slot handler */
2858 vchiq_free_service_internal(struct vchiq_service *service)
2860 struct vchiq_state *state = service->state;
2862 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2863 state->id, service->localport);
2865 switch (service->srvstate) {
2866 case VCHIQ_SRVSTATE_OPENING:
2867 case VCHIQ_SRVSTATE_CLOSED:
2868 case VCHIQ_SRVSTATE_HIDDEN:
2869 case VCHIQ_SRVSTATE_LISTENING:
2870 case VCHIQ_SRVSTATE_CLOSEWAIT:
2873 vchiq_log_error(vchiq_core_log_level,
2874 "%d: fsi - (%d) in state %s",
2875 state->id, service->localport,
2876 srvstate_names[service->srvstate]);
2880 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2882 complete(&service->remove_event);
2884 /* Release the initial lock */
2885 unlock_service(service);
2889 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2891 struct vchiq_service *service;
2894 /* Find all services registered to this client and enable them. */
2896 while ((service = next_service_by_instance(state, instance,
2898 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2899 vchiq_set_service_state(service,
2900 VCHIQ_SRVSTATE_LISTENING);
2901 unlock_service(service);
2904 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2905 if (queue_message(state, NULL,
2906 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2907 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2910 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2913 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2914 if (wait_for_completion_interruptible(&state->connect))
2917 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2918 complete(&state->connect);
2921 return VCHIQ_SUCCESS;
2925 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2927 struct vchiq_service *service;
2930 /* Find all services registered to this client and enable them. */
2932 while ((service = next_service_by_instance(state, instance,
2934 (void)vchiq_remove_service(service->handle);
2935 unlock_service(service);
2938 return VCHIQ_SUCCESS;
2942 vchiq_close_service(unsigned int handle)
2944 /* Unregister the service */
2945 struct vchiq_service *service = find_service_by_handle(handle);
2946 enum vchiq_status status = VCHIQ_SUCCESS;
2951 vchiq_log_info(vchiq_core_log_level,
2952 "%d: close_service:%d",
2953 service->state->id, service->localport);
2955 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2956 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2957 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2958 unlock_service(service);
2962 mark_service_closing(service);
2964 if (current == service->state->slot_handler_thread) {
2965 status = vchiq_close_service_internal(service,
2967 WARN_ON(status == VCHIQ_RETRY);
2969 /* Mark the service for termination by the slot handler */
2970 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2974 if (wait_for_completion_interruptible(&service->remove_event)) {
2975 status = VCHIQ_RETRY;
2979 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2980 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2981 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2984 vchiq_log_warning(vchiq_core_log_level,
2985 "%d: close_service:%d - waiting in state %s",
2986 service->state->id, service->localport,
2987 srvstate_names[service->srvstate]);
2990 if ((status == VCHIQ_SUCCESS) &&
2991 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2992 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2993 status = VCHIQ_ERROR;
2995 unlock_service(service);
2999 EXPORT_SYMBOL(vchiq_close_service);
3002 vchiq_remove_service(unsigned int handle)
3004 /* Unregister the service */
3005 struct vchiq_service *service = find_service_by_handle(handle);
3006 enum vchiq_status status = VCHIQ_SUCCESS;
3011 vchiq_log_info(vchiq_core_log_level,
3012 "%d: remove_service:%d",
3013 service->state->id, service->localport);
3015 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3016 unlock_service(service);
3020 mark_service_closing(service);
3022 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3023 (current == service->state->slot_handler_thread)) {
3025 * Make it look like a client, because it must be removed and
3026 * not left in the LISTENING state.
3028 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3030 status = vchiq_close_service_internal(service,
3032 WARN_ON(status == VCHIQ_RETRY);
3034 /* Mark the service for removal by the slot handler */
3035 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3038 if (wait_for_completion_interruptible(&service->remove_event)) {
3039 status = VCHIQ_RETRY;
3043 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3044 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3047 vchiq_log_warning(vchiq_core_log_level,
3048 "%d: remove_service:%d - waiting in state %s",
3049 service->state->id, service->localport,
3050 srvstate_names[service->srvstate]);
3053 if ((status == VCHIQ_SUCCESS) &&
3054 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3055 status = VCHIQ_ERROR;
3057 unlock_service(service);
3063 * This function may be called by kernel threads or user threads.
3064 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3065 * received and the call should be retried after being returned to user
3067 * When called in blocking mode, the userdata field points to a bulk_waiter
3070 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3071 void *offset, void __user *uoffset,
3072 int size, void *userdata,
3073 enum vchiq_bulk_mode mode,
3074 enum vchiq_bulk_dir dir)
3076 struct vchiq_service *service = find_service_by_handle(handle);
3077 struct vchiq_bulk_queue *queue;
3078 struct vchiq_bulk *bulk;
3079 struct vchiq_state *state;
3080 struct bulk_waiter *bulk_waiter = NULL;
3081 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3082 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3083 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3084 enum vchiq_status status = VCHIQ_ERROR;
3090 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3093 if (!offset && !uoffset)
3096 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3100 case VCHIQ_BULK_MODE_NOCALLBACK:
3101 case VCHIQ_BULK_MODE_CALLBACK:
3103 case VCHIQ_BULK_MODE_BLOCKING:
3104 bulk_waiter = userdata;
3105 init_completion(&bulk_waiter->event);
3106 bulk_waiter->actual = 0;
3107 bulk_waiter->bulk = NULL;
3109 case VCHIQ_BULK_MODE_WAITING:
3110 bulk_waiter = userdata;
3111 bulk = bulk_waiter->bulk;
3117 state = service->state;
3119 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3120 &service->bulk_tx : &service->bulk_rx;
3122 if (mutex_lock_killable(&service->bulk_mutex)) {
3123 status = VCHIQ_RETRY;
3127 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3128 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3130 mutex_unlock(&service->bulk_mutex);
3131 if (wait_for_completion_interruptible(
3132 &service->bulk_remove_event)) {
3133 status = VCHIQ_RETRY;
3136 if (mutex_lock_killable(&service->bulk_mutex)) {
3137 status = VCHIQ_RETRY;
3140 } while (queue->local_insert == queue->remove +
3141 VCHIQ_NUM_SERVICE_BULKS);
3144 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3148 bulk->userdata = userdata;
3150 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3152 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3153 goto unlock_error_exit;
3157 vchiq_log_info(vchiq_core_log_level,
3158 "%d: bt (%d->%d) %cx %x@%pad %pK",
3159 state->id, service->localport, service->remoteport, dir_char,
3160 size, &bulk->data, userdata);
3163 * The slot mutex must be held when the service is being closed, so
3164 * claim it here to ensure that isn't happening
3166 if (mutex_lock_killable(&state->slot_mutex)) {
3167 status = VCHIQ_RETRY;
3168 goto cancel_bulk_error_exit;
3171 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3172 goto unlock_both_error_exit;
3174 payload[0] = lower_32_bits(bulk->data);
3175 payload[1] = bulk->size;
3176 status = queue_message(state,
3178 VCHIQ_MAKE_MSG(dir_msgtype,
3180 service->remoteport),
3181 memcpy_copy_callback,
3184 QMFLAGS_IS_BLOCKING |
3185 QMFLAGS_NO_MUTEX_LOCK |
3186 QMFLAGS_NO_MUTEX_UNLOCK);
3187 if (status != VCHIQ_SUCCESS)
3188 goto unlock_both_error_exit;
3190 queue->local_insert++;
3192 mutex_unlock(&state->slot_mutex);
3193 mutex_unlock(&service->bulk_mutex);
3195 vchiq_log_trace(vchiq_core_log_level,
3196 "%d: bt:%d %cx li=%x ri=%x p=%x",
3198 service->localport, dir_char,
3199 queue->local_insert, queue->remote_insert, queue->process);
3202 unlock_service(service);
3204 status = VCHIQ_SUCCESS;
3207 bulk_waiter->bulk = bulk;
3208 if (wait_for_completion_interruptible(&bulk_waiter->event))
3209 status = VCHIQ_RETRY;
3210 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3211 status = VCHIQ_ERROR;
3216 unlock_both_error_exit:
3217 mutex_unlock(&state->slot_mutex);
3218 cancel_bulk_error_exit:
3219 vchiq_complete_bulk(bulk);
3221 mutex_unlock(&service->bulk_mutex);
3225 unlock_service(service);
3230 vchiq_queue_message(unsigned int handle,
3231 ssize_t (*copy_callback)(void *context, void *dest,
3232 size_t offset, size_t maxsize),
3236 struct vchiq_service *service = find_service_by_handle(handle);
3237 enum vchiq_status status = VCHIQ_ERROR;
3242 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3246 VCHIQ_SERVICE_STATS_INC(service, error_count);
3251 if (size > VCHIQ_MAX_MSG_SIZE) {
3252 VCHIQ_SERVICE_STATS_INC(service, error_count);
3256 switch (service->srvstate) {
3257 case VCHIQ_SRVSTATE_OPEN:
3258 status = queue_message(service->state, service,
3259 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3261 service->remoteport),
3262 copy_callback, context, size, 1);
3264 case VCHIQ_SRVSTATE_OPENSYNC:
3265 status = queue_message_sync(service->state, service,
3266 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3268 service->remoteport),
3269 copy_callback, context, size, 1);
3272 status = VCHIQ_ERROR;
3278 unlock_service(service);
3283 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3285 enum vchiq_status status;
3288 status = vchiq_queue_message(handle, memcpy_copy_callback,
3292 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3293 * implement a retry mechanism since this function is supposed
3294 * to block until queued
3296 if (status != VCHIQ_RETRY)
3304 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3307 vchiq_release_message(unsigned int handle,
3308 struct vchiq_header *header)
3310 struct vchiq_service *service = find_service_by_handle(handle);
3311 struct vchiq_shared_state *remote;
3312 struct vchiq_state *state;
3318 state = service->state;
3319 remote = state->remote;
3321 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3323 if ((slot_index >= remote->slot_first) &&
3324 (slot_index <= remote->slot_last)) {
3325 int msgid = header->msgid;
3327 if (msgid & VCHIQ_MSGID_CLAIMED) {
3328 struct vchiq_slot_info *slot_info =
3329 SLOT_INFO_FROM_INDEX(state, slot_index);
3331 release_slot(state, slot_info, header, service);
3333 } else if (slot_index == remote->slot_sync) {
3334 release_message_sync(state, header);
3337 unlock_service(service);
3339 EXPORT_SYMBOL(vchiq_release_message);
3342 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3344 header->msgid = VCHIQ_MSGID_PADDING;
3345 remote_event_signal(&state->remote->sync_release);
3349 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3351 enum vchiq_status status = VCHIQ_ERROR;
3352 struct vchiq_service *service = find_service_by_handle(handle);
3357 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3363 *peer_version = service->peer_version;
3364 status = VCHIQ_SUCCESS;
3368 unlock_service(service);
3371 EXPORT_SYMBOL(vchiq_get_peer_version);
3373 void vchiq_get_config(struct vchiq_config *config)
3375 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3376 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3377 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3378 config->max_services = VCHIQ_MAX_SERVICES;
3379 config->version = VCHIQ_VERSION;
3380 config->version_min = VCHIQ_VERSION_MIN;
3384 vchiq_set_service_option(unsigned int handle,
3385 enum vchiq_service_option option, int value)
3387 struct vchiq_service *service = find_service_by_handle(handle);
3388 struct vchiq_service_quota *quota;
3395 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3396 service->auto_close = value;
3400 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3401 quota = &service->state->service_quotas[service->localport];
3403 value = service->state->default_slot_quota;
3404 if ((value >= quota->slot_use_count) &&
3405 (value < (unsigned short)~0)) {
3406 quota->slot_quota = value;
3407 if ((value >= quota->slot_use_count) &&
3408 (quota->message_quota >= quota->message_use_count))
3410 * Signal the service that it may have
3411 * dropped below its quota
3413 complete("a->quota_event);
3418 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3419 quota = &service->state->service_quotas[service->localport];
3421 value = service->state->default_message_quota;
3422 if ((value >= quota->message_use_count) &&
3423 (value < (unsigned short)~0)) {
3424 quota->message_quota = value;
3425 if ((value >= quota->message_use_count) &&
3426 (quota->slot_quota >= quota->slot_use_count))
3428 * Signal the service that it may have
3429 * dropped below its quota
3431 complete("a->quota_event);
3436 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3437 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3438 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3439 service->sync = value;
3444 case VCHIQ_SERVICE_OPTION_TRACE:
3445 service->trace = value;
3452 unlock_service(service);
3458 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3459 struct vchiq_shared_state *shared, const char *label)
3461 static const char *const debug_names[] = {
3463 "SLOT_HANDLER_COUNT",
3464 "SLOT_HANDLER_LINE",
3468 "AWAIT_COMPLETION_LINE",
3469 "DEQUEUE_MESSAGE_LINE",
3470 "SERVICE_CALLBACK_LINE",
3471 "MSG_QUEUE_FULL_COUNT",
3472 "COMPLETION_QUEUE_FULL_COUNT"
3479 len = scnprintf(buf, sizeof(buf),
3480 " %s: slots %d-%d tx_pos=%x recycle=%x",
3481 label, shared->slot_first, shared->slot_last,
3482 shared->tx_pos, shared->slot_queue_recycle);
3483 err = vchiq_dump(dump_context, buf, len + 1);
3487 len = scnprintf(buf, sizeof(buf),
3489 err = vchiq_dump(dump_context, buf, len + 1);
3493 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3494 struct vchiq_slot_info slot_info =
3495 *SLOT_INFO_FROM_INDEX(state, i);
3496 if (slot_info.use_count != slot_info.release_count) {
3497 len = scnprintf(buf, sizeof(buf),
3498 " %d: %d/%d", i, slot_info.use_count,
3499 slot_info.release_count);
3500 err = vchiq_dump(dump_context, buf, len + 1);
3506 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3507 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3508 debug_names[i], shared->debug[i], shared->debug[i]);
3509 err = vchiq_dump(dump_context, buf, len + 1);
3516 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3523 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3524 conn_state_names[state->conn_state]);
3525 err = vchiq_dump(dump_context, buf, len + 1);
3529 len = scnprintf(buf, sizeof(buf),
3530 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3531 state->local->tx_pos,
3532 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3534 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3535 err = vchiq_dump(dump_context, buf, len + 1);
3539 len = scnprintf(buf, sizeof(buf),
3540 " Version: %d (min %d)",
3541 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3542 err = vchiq_dump(dump_context, buf, len + 1);
3546 if (VCHIQ_ENABLE_STATS) {
3547 len = scnprintf(buf, sizeof(buf),
3548 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3549 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3550 state->stats.error_count);
3551 err = vchiq_dump(dump_context, buf, len + 1);
3556 len = scnprintf(buf, sizeof(buf),
3557 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3558 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3559 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3560 state->data_quota - state->data_use_count,
3561 state->local->slot_queue_recycle - state->slot_queue_available,
3562 state->stats.slot_stalls, state->stats.data_stalls);
3563 err = vchiq_dump(dump_context, buf, len + 1);
3567 err = vchiq_dump_platform_state(dump_context);
3571 err = vchiq_dump_shared_state(dump_context,
3577 err = vchiq_dump_shared_state(dump_context,
3584 err = vchiq_dump_platform_instances(dump_context);
3588 for (i = 0; i < state->unused_service; i++) {
3589 struct vchiq_service *service = find_service_by_port(state, i);
3592 err = vchiq_dump_service_state(dump_context, service);
3593 unlock_service(service);
3601 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3606 unsigned int ref_count;
3608 /*Don't include the lock just taken*/
3609 ref_count = kref_read(&service->ref_count) - 1;
3610 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3611 service->localport, srvstate_names[service->srvstate],
3614 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3615 char remoteport[30];
3616 struct vchiq_service_quota *quota =
3617 &service->state->service_quotas[service->localport];
3618 int fourcc = service->base.fourcc;
3619 int tx_pending, rx_pending;
3621 if (service->remoteport != VCHIQ_PORT_FREE) {
3622 int len2 = scnprintf(remoteport, sizeof(remoteport),
3623 "%u", service->remoteport);
3625 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3626 scnprintf(remoteport + len2,
3627 sizeof(remoteport) - len2,
3628 " (client %x)", service->client_id);
3630 strcpy(remoteport, "n/a");
3633 len += scnprintf(buf + len, sizeof(buf) - len,
3634 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3635 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3637 quota->message_use_count,
3638 quota->message_quota,
3639 quota->slot_use_count,
3642 err = vchiq_dump(dump_context, buf, len + 1);
3646 tx_pending = service->bulk_tx.local_insert -
3647 service->bulk_tx.remote_insert;
3649 rx_pending = service->bulk_rx.local_insert -
3650 service->bulk_rx.remote_insert;
3652 len = scnprintf(buf, sizeof(buf),
3653 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3655 tx_pending ? service->bulk_tx.bulks[
3656 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3658 rx_pending ? service->bulk_rx.bulks[
3659 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3661 if (VCHIQ_ENABLE_STATS) {
3662 err = vchiq_dump(dump_context, buf, len + 1);
3666 len = scnprintf(buf, sizeof(buf),
3667 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3668 service->stats.ctrl_tx_count,
3669 service->stats.ctrl_tx_bytes,
3670 service->stats.ctrl_rx_count,
3671 service->stats.ctrl_rx_bytes);
3672 err = vchiq_dump(dump_context, buf, len + 1);
3676 len = scnprintf(buf, sizeof(buf),
3677 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3678 service->stats.bulk_tx_count,
3679 service->stats.bulk_tx_bytes,
3680 service->stats.bulk_rx_count,
3681 service->stats.bulk_rx_bytes);
3682 err = vchiq_dump(dump_context, buf, len + 1);
3686 len = scnprintf(buf, sizeof(buf),
3687 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3688 service->stats.quota_stalls,
3689 service->stats.slot_stalls,
3690 service->stats.bulk_stalls,
3691 service->stats.bulk_aborted_count,
3692 service->stats.error_count);
3696 err = vchiq_dump(dump_context, buf, len + 1);
3700 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3701 err = vchiq_dump_platform_service_state(dump_context, service);
3706 vchiq_loud_error_header(void)
3708 vchiq_log_error(vchiq_core_log_level,
3709 "============================================================================");
3710 vchiq_log_error(vchiq_core_log_level,
3711 "============================================================================");
3712 vchiq_log_error(vchiq_core_log_level, "=====");
3716 vchiq_loud_error_footer(void)
3718 vchiq_log_error(vchiq_core_log_level, "=====");
3719 vchiq_log_error(vchiq_core_log_level,
3720 "============================================================================");
3721 vchiq_log_error(vchiq_core_log_level,
3722 "============================================================================");
3725 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3727 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3730 return queue_message(state, NULL,
3731 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3735 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3737 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3740 return queue_message(state, NULL,
3741 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3745 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3748 const u8 *mem = void_mem;
3753 while (num_bytes > 0) {
3756 for (offset = 0; offset < 16; offset++) {
3757 if (offset < num_bytes)
3758 s += scnprintf(s, 4, "%02x ", mem[offset]);
3760 s += scnprintf(s, 4, " ");
3763 for (offset = 0; offset < 16; offset++) {
3764 if (offset < num_bytes) {
3765 u8 ch = mem[offset];
3767 if ((ch < ' ') || (ch > '~'))
3774 if (label && (*label != '\0'))
3775 vchiq_log_trace(VCHIQ_LOG_TRACE,
3776 "%s: %08x: %s", label, addr, line_buf);
3778 vchiq_log_trace(VCHIQ_LOG_TRACE,
3779 "%08x: %s", addr, line_buf);