1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define HANDLE_STATE_SHIFT 12
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 ((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
32 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
34 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
36 #define SRVTRACE_LEVEL(srv) \
37 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
38 #define SRVTRACE_ENABLED(srv, lev) \
39 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
41 struct vchiq_open_payload {
48 struct vchiq_openack_payload {
53 QMFLAGS_IS_BLOCKING = BIT(0),
54 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
55 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
58 /* we require this for consistency between endpoints */
59 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
60 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
61 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
62 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
63 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
64 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
66 /* Run time control of log level, based on KERN_XXX level. */
67 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
68 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
69 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
71 DEFINE_SPINLOCK(bulk_waiter_spinlock);
72 static DEFINE_SPINLOCK(quota_spinlock);
74 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
75 static unsigned int handle_seq;
77 static const char *const srvstate_names[] = {
90 static const char *const reason_names[] = {
96 "BULK_TRANSMIT_ABORTED",
97 "BULK_RECEIVE_ABORTED"
100 static const char *const conn_state_names[] = {
113 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
115 static const char *msg_type_str(unsigned int msg_type)
118 case VCHIQ_MSG_PADDING: return "PADDING";
119 case VCHIQ_MSG_CONNECT: return "CONNECT";
120 case VCHIQ_MSG_OPEN: return "OPEN";
121 case VCHIQ_MSG_OPENACK: return "OPENACK";
122 case VCHIQ_MSG_CLOSE: return "CLOSE";
123 case VCHIQ_MSG_DATA: return "DATA";
124 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
125 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
126 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
127 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
128 case VCHIQ_MSG_PAUSE: return "PAUSE";
129 case VCHIQ_MSG_RESUME: return "RESUME";
130 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
131 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
132 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
138 vchiq_set_service_state(struct vchiq_service *service, int newstate)
140 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
141 service->state->id, service->localport,
142 srvstate_names[service->srvstate],
143 srvstate_names[newstate]);
144 service->srvstate = newstate;
147 struct vchiq_service *
148 find_service_by_handle(unsigned int handle)
150 struct vchiq_service *service;
153 service = handle_to_service(handle);
154 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
155 service->handle == handle &&
156 kref_get_unless_zero(&service->ref_count)) {
157 service = rcu_pointer_handoff(service);
162 vchiq_log_info(vchiq_core_log_level,
163 "Invalid service handle 0x%x", handle);
167 struct vchiq_service *
168 find_service_by_port(struct vchiq_state *state, int localport)
171 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
172 struct vchiq_service *service;
175 service = rcu_dereference(state->services[localport]);
176 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
177 kref_get_unless_zero(&service->ref_count)) {
178 service = rcu_pointer_handoff(service);
184 vchiq_log_info(vchiq_core_log_level,
185 "Invalid port %d", localport);
189 struct vchiq_service *
190 find_service_for_instance(struct vchiq_instance *instance,
193 struct vchiq_service *service;
196 service = handle_to_service(handle);
197 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
198 service->handle == handle &&
199 service->instance == instance &&
200 kref_get_unless_zero(&service->ref_count)) {
201 service = rcu_pointer_handoff(service);
206 vchiq_log_info(vchiq_core_log_level,
207 "Invalid service handle 0x%x", handle);
211 struct vchiq_service *
212 find_closed_service_for_instance(struct vchiq_instance *instance,
215 struct vchiq_service *service;
218 service = handle_to_service(handle);
220 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
221 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
222 service->handle == handle &&
223 service->instance == instance &&
224 kref_get_unless_zero(&service->ref_count)) {
225 service = rcu_pointer_handoff(service);
230 vchiq_log_info(vchiq_core_log_level,
231 "Invalid service handle 0x%x", handle);
235 struct vchiq_service *
236 __next_service_by_instance(struct vchiq_state *state,
237 struct vchiq_instance *instance,
240 struct vchiq_service *service = NULL;
243 while (idx < state->unused_service) {
244 struct vchiq_service *srv;
246 srv = rcu_dereference(state->services[idx]);
248 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
249 srv->instance == instance) {
259 struct vchiq_service *
260 next_service_by_instance(struct vchiq_state *state,
261 struct vchiq_instance *instance,
264 struct vchiq_service *service;
268 service = __next_service_by_instance(state, instance, pidx);
271 if (kref_get_unless_zero(&service->ref_count)) {
272 service = rcu_pointer_handoff(service);
281 lock_service(struct vchiq_service *service)
284 WARN(1, "%s service is NULL\n", __func__);
287 kref_get(&service->ref_count);
290 static void service_release(struct kref *kref)
292 struct vchiq_service *service =
293 container_of(kref, struct vchiq_service, ref_count);
294 struct vchiq_state *state = service->state;
296 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
297 rcu_assign_pointer(state->services[service->localport], NULL);
298 if (service->userdata_term)
299 service->userdata_term(service->base.userdata);
300 kfree_rcu(service, rcu);
304 unlock_service(struct vchiq_service *service)
307 WARN(1, "%s: service is NULL\n", __func__);
310 kref_put(&service->ref_count, service_release);
314 vchiq_get_client_id(unsigned int handle)
316 struct vchiq_service *service;
320 service = handle_to_service(handle);
321 id = service ? service->client_id : 0;
327 vchiq_get_service_userdata(unsigned int handle)
330 struct vchiq_service *service;
333 service = handle_to_service(handle);
334 userdata = service ? service->base.userdata : NULL;
338 EXPORT_SYMBOL(vchiq_get_service_userdata);
341 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
343 struct vchiq_state *state = service->state;
344 struct vchiq_service_quota *quota;
346 service->closing = 1;
348 /* Synchronise with other threads. */
349 mutex_lock(&state->recycle_mutex);
350 mutex_unlock(&state->recycle_mutex);
351 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
353 * If we're pausing then the slot_mutex is held until resume
354 * by the slot handler. Therefore don't try to acquire this
355 * mutex if we're the slot handler and in the pause sent state.
356 * We don't need to in this case anyway.
358 mutex_lock(&state->slot_mutex);
359 mutex_unlock(&state->slot_mutex);
362 /* Unblock any sending thread. */
363 quota = &state->service_quotas[service->localport];
364 complete("a->quota_event);
368 mark_service_closing(struct vchiq_service *service)
370 mark_service_closing_internal(service, 0);
373 static inline enum vchiq_status
374 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
375 struct vchiq_header *header, void *bulk_userdata)
377 enum vchiq_status status;
379 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
380 service->state->id, service->localport, reason_names[reason],
381 header, bulk_userdata);
382 status = service->base.callback(reason, header, service->handle,
384 if (status == VCHIQ_ERROR) {
385 vchiq_log_warning(vchiq_core_log_level,
386 "%d: ignoring ERROR from callback to service %x",
387 service->state->id, service->handle);
388 status = VCHIQ_SUCCESS;
391 if (reason != VCHIQ_MESSAGE_AVAILABLE)
392 vchiq_release_message(service->handle, header);
398 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
400 enum vchiq_connstate oldstate = state->conn_state;
402 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
403 conn_state_names[oldstate],
404 conn_state_names[newstate]);
405 state->conn_state = newstate;
406 vchiq_platform_conn_state_changed(state, oldstate, newstate);
410 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
414 * Don't clear the 'fired' flag because it may already have been set
417 init_waitqueue_head(wq);
421 * All the event waiting routines in VCHIQ used a custom semaphore
422 * implementation that filtered most signals. This achieved a behaviour similar
423 * to the "killable" family of functions. While cleaning up this code all the
424 * routines where switched to the "interruptible" family of functions, as the
425 * former was deemed unjustified and the use "killable" set all VCHIQ's
426 * threads in D state.
429 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
434 if (wait_event_interruptible(*wq, event->fired)) {
447 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
455 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
457 if (event->fired && event->armed)
458 remote_event_signal_local(wq, event);
462 remote_event_pollall(struct vchiq_state *state)
464 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
465 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
466 remote_event_poll(&state->trigger_event, &state->local->trigger);
467 remote_event_poll(&state->recycle_event, &state->local->recycle);
471 * Round up message sizes so that any space at the end of a slot is always big
472 * enough for a header. This relies on header size being a power of two, which
473 * has been verified earlier by a static assertion.
477 calc_stride(size_t size)
479 /* Allow room for the header */
480 size += sizeof(struct vchiq_header);
483 return (size + sizeof(struct vchiq_header) - 1) &
484 ~(sizeof(struct vchiq_header) - 1);
487 /* Called by the slot handler thread */
488 static struct vchiq_service *
489 get_listening_service(struct vchiq_state *state, int fourcc)
493 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
496 for (i = 0; i < state->unused_service; i++) {
497 struct vchiq_service *service;
499 service = rcu_dereference(state->services[i]);
501 service->public_fourcc == fourcc &&
502 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
503 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
504 service->remoteport == VCHIQ_PORT_FREE)) &&
505 kref_get_unless_zero(&service->ref_count)) {
506 service = rcu_pointer_handoff(service);
515 /* Called by the slot handler thread */
516 static struct vchiq_service *
517 get_connected_service(struct vchiq_state *state, unsigned int port)
522 for (i = 0; i < state->unused_service; i++) {
523 struct vchiq_service *service =
524 rcu_dereference(state->services[i]);
526 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
527 service->remoteport == port &&
528 kref_get_unless_zero(&service->ref_count)) {
529 service = rcu_pointer_handoff(service);
539 request_poll(struct vchiq_state *state, struct vchiq_service *service,
549 value = atomic_read(&service->poll_flags);
550 } while (atomic_cmpxchg(&service->poll_flags, value,
551 value | BIT(poll_type)) != value);
553 index = BITSET_WORD(service->localport);
555 value = atomic_read(&state->poll_services[index]);
556 } while (atomic_cmpxchg(&state->poll_services[index],
557 value, value | BIT(service->localport & 0x1f)) != value);
560 state->poll_needed = 1;
563 /* ... and ensure the slot handler runs. */
564 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
568 * Called from queue_message, by the slot handler and application threads,
569 * with slot_mutex held
571 static struct vchiq_header *
572 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
574 struct vchiq_shared_state *local = state->local;
575 int tx_pos = state->local_tx_pos;
576 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
578 if (space > slot_space) {
579 struct vchiq_header *header;
580 /* Fill the remaining space with padding */
581 WARN_ON(!state->tx_data);
582 header = (struct vchiq_header *)
583 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
584 header->msgid = VCHIQ_MSGID_PADDING;
585 header->size = slot_space - sizeof(struct vchiq_header);
587 tx_pos += slot_space;
590 /* If necessary, get the next slot. */
591 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
594 /* If there is no free slot... */
596 if (!try_wait_for_completion(&state->slot_available_event)) {
597 /* ...wait for one. */
599 VCHIQ_STATS_INC(state, slot_stalls);
601 /* But first, flush through the last slot. */
602 state->local_tx_pos = tx_pos;
603 local->tx_pos = tx_pos;
604 remote_event_signal(&state->remote->trigger);
607 (wait_for_completion_interruptible(
608 &state->slot_available_event)))
609 return NULL; /* No space available */
612 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
613 complete(&state->slot_available_event);
614 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
618 slot_index = local->slot_queue[
619 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
621 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
624 state->local_tx_pos = tx_pos + space;
626 return (struct vchiq_header *)(state->tx_data +
627 (tx_pos & VCHIQ_SLOT_MASK));
630 /* Called by the recycle thread. */
632 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
635 struct vchiq_shared_state *local = state->local;
636 int slot_queue_available;
639 * Find slots which have been freed by the other side, and return them
640 * to the available queue.
642 slot_queue_available = state->slot_queue_available;
645 * Use a memory barrier to ensure that any state that may have been
646 * modified by another thread is not masked by stale prefetched
651 while (slot_queue_available != local->slot_queue_recycle) {
653 int slot_index = local->slot_queue[slot_queue_available &
654 VCHIQ_SLOT_QUEUE_MASK];
655 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
658 slot_queue_available++;
660 * Beware of the address dependency - data is calculated
661 * using an index written by the other side.
665 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
666 state->id, slot_index, data,
667 local->slot_queue_recycle, slot_queue_available);
669 /* Initialise the bitmask for services which have used this slot */
670 memset(service_found, 0, length);
674 while (pos < VCHIQ_SLOT_SIZE) {
675 struct vchiq_header *header =
676 (struct vchiq_header *)(data + pos);
677 int msgid = header->msgid;
679 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
680 int port = VCHIQ_MSG_SRCPORT(msgid);
681 struct vchiq_service_quota *quota =
682 &state->service_quotas[port];
685 spin_lock("a_spinlock);
686 count = quota->message_use_count;
688 quota->message_use_count =
690 spin_unlock("a_spinlock);
692 if (count == quota->message_quota) {
694 * Signal the service that it
695 * has dropped below its quota
697 complete("a->quota_event);
698 } else if (count == 0) {
699 vchiq_log_error(vchiq_core_log_level,
700 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
702 quota->message_use_count,
703 header, msgid, header->msgid,
705 WARN(1, "invalid message use count\n");
707 if (!BITSET_IS_SET(service_found, port)) {
708 /* Set the found bit for this service */
709 BITSET_SET(service_found, port);
711 spin_lock("a_spinlock);
712 count = quota->slot_use_count;
714 quota->slot_use_count =
716 spin_unlock("a_spinlock);
720 * Signal the service in case
721 * it has dropped below its quota
723 complete("a->quota_event);
725 vchiq_core_log_level,
726 "%d: pfq:%d %x@%pK - slot_use->%d",
728 header->size, header,
732 vchiq_core_log_level,
733 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
735 msgid, header->msgid,
737 WARN(1, "bad slot use count\n");
744 pos += calc_stride(header->size);
745 if (pos > VCHIQ_SLOT_SIZE) {
746 vchiq_log_error(vchiq_core_log_level,
747 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
748 pos, header, msgid, header->msgid,
750 WARN(1, "invalid slot position\n");
757 spin_lock("a_spinlock);
758 count = state->data_use_count;
760 state->data_use_count =
762 spin_unlock("a_spinlock);
763 if (count == state->data_quota)
764 complete(&state->data_quota_event);
768 * Don't allow the slot to be reused until we are no
769 * longer interested in it.
773 state->slot_queue_available = slot_queue_available;
774 complete(&state->slot_available_event);
779 memcpy_copy_callback(
780 void *context, void *dest,
781 size_t offset, size_t maxsize)
783 memcpy(dest + offset, context + offset, maxsize);
789 ssize_t (*copy_callback)(void *context, void *dest,
790 size_t offset, size_t maxsize),
798 ssize_t callback_result;
799 size_t max_bytes = size - pos;
802 copy_callback(context, dest + pos,
805 if (callback_result < 0)
806 return callback_result;
808 if (!callback_result)
811 if (callback_result > max_bytes)
814 pos += callback_result;
820 /* Called by the slot handler and application threads */
821 static enum vchiq_status
822 queue_message(struct vchiq_state *state, struct vchiq_service *service,
824 ssize_t (*copy_callback)(void *context, void *dest,
825 size_t offset, size_t maxsize),
826 void *context, size_t size, int flags)
828 struct vchiq_shared_state *local;
829 struct vchiq_service_quota *quota = NULL;
830 struct vchiq_header *header;
831 int type = VCHIQ_MSG_TYPE(msgid);
835 local = state->local;
837 stride = calc_stride(size);
839 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
841 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
842 mutex_lock_killable(&state->slot_mutex))
845 if (type == VCHIQ_MSG_DATA) {
849 WARN(1, "%s: service is NULL\n", __func__);
850 mutex_unlock(&state->slot_mutex);
854 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
855 QMFLAGS_NO_MUTEX_UNLOCK));
857 if (service->closing) {
858 /* The service has been closed */
859 mutex_unlock(&state->slot_mutex);
863 quota = &state->service_quotas[service->localport];
865 spin_lock("a_spinlock);
868 * Ensure this service doesn't use more than its quota of
871 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
872 state->local_tx_pos + stride - 1);
875 * Ensure data messages don't use more than their quota of
878 while ((tx_end_index != state->previous_data_index) &&
879 (state->data_use_count == state->data_quota)) {
880 VCHIQ_STATS_INC(state, data_stalls);
881 spin_unlock("a_spinlock);
882 mutex_unlock(&state->slot_mutex);
884 if (wait_for_completion_interruptible(
885 &state->data_quota_event))
888 mutex_lock(&state->slot_mutex);
889 spin_lock("a_spinlock);
890 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
891 state->local_tx_pos + stride - 1);
892 if ((tx_end_index == state->previous_data_index) ||
893 (state->data_use_count < state->data_quota)) {
894 /* Pass the signal on to other waiters */
895 complete(&state->data_quota_event);
900 while ((quota->message_use_count == quota->message_quota) ||
901 ((tx_end_index != quota->previous_tx_index) &&
902 (quota->slot_use_count ==
903 quota->slot_quota))) {
904 spin_unlock("a_spinlock);
905 vchiq_log_trace(vchiq_core_log_level,
906 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
907 state->id, service->localport,
908 msg_type_str(type), size,
909 quota->message_use_count,
910 quota->slot_use_count);
911 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
912 mutex_unlock(&state->slot_mutex);
913 if (wait_for_completion_interruptible(
914 "a->quota_event))
916 if (service->closing)
918 if (mutex_lock_killable(&state->slot_mutex))
920 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
921 /* The service has been closed */
922 mutex_unlock(&state->slot_mutex);
925 spin_lock("a_spinlock);
926 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
927 state->local_tx_pos + stride - 1);
930 spin_unlock("a_spinlock);
933 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
937 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
939 * In the event of a failure, return the mutex to the
942 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
943 mutex_unlock(&state->slot_mutex);
947 if (type == VCHIQ_MSG_DATA) {
948 ssize_t callback_result;
952 vchiq_log_info(vchiq_core_log_level,
953 "%d: qm %s@%pK,%zx (%d->%d)",
954 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
955 header, size, VCHIQ_MSG_SRCPORT(msgid),
956 VCHIQ_MSG_DSTPORT(msgid));
958 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
959 QMFLAGS_NO_MUTEX_UNLOCK));
962 copy_message_data(copy_callback, context,
965 if (callback_result < 0) {
966 mutex_unlock(&state->slot_mutex);
967 VCHIQ_SERVICE_STATS_INC(service,
972 if (SRVTRACE_ENABLED(service,
974 vchiq_log_dump_mem("Sent", 0,
977 (size_t)callback_result));
979 spin_lock("a_spinlock);
980 quota->message_use_count++;
983 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
986 * If this transmission can't fit in the last slot used by any
987 * service, the data_use_count must be increased.
989 if (tx_end_index != state->previous_data_index) {
990 state->previous_data_index = tx_end_index;
991 state->data_use_count++;
995 * If this isn't the same slot last used by this service,
996 * the service's slot_use_count must be increased.
998 if (tx_end_index != quota->previous_tx_index) {
999 quota->previous_tx_index = tx_end_index;
1000 slot_use_count = ++quota->slot_use_count;
1005 spin_unlock("a_spinlock);
1008 vchiq_log_trace(vchiq_core_log_level,
1009 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1010 state->id, service->localport,
1011 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1012 slot_use_count, header);
1014 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1015 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1017 vchiq_log_info(vchiq_core_log_level,
1018 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1019 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1020 header, size, VCHIQ_MSG_SRCPORT(msgid),
1021 VCHIQ_MSG_DSTPORT(msgid));
1024 * It is assumed for now that this code path
1025 * only happens from calls inside this file.
1027 * External callers are through the vchiq_queue_message
1028 * path which always sets the type to be VCHIQ_MSG_DATA
1030 * At first glance this appears to be correct but
1031 * more review is needed.
1033 copy_message_data(copy_callback, context,
1034 header->data, size);
1036 VCHIQ_STATS_INC(state, ctrl_tx_count);
1039 header->msgid = msgid;
1040 header->size = size;
1045 svc_fourcc = service
1046 ? service->base.fourcc
1047 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1049 vchiq_log_info(SRVTRACE_LEVEL(service),
1050 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1051 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1052 VCHIQ_MSG_TYPE(msgid),
1053 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1054 VCHIQ_MSG_SRCPORT(msgid),
1055 VCHIQ_MSG_DSTPORT(msgid),
1059 /* Make sure the new header is visible to the peer. */
1062 /* Make the new tx_pos visible to the peer. */
1063 local->tx_pos = state->local_tx_pos;
1066 if (service && (type == VCHIQ_MSG_CLOSE))
1067 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1069 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1070 mutex_unlock(&state->slot_mutex);
1072 remote_event_signal(&state->remote->trigger);
1074 return VCHIQ_SUCCESS;
1077 /* Called by the slot handler and application threads */
1078 static enum vchiq_status
1079 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1081 ssize_t (*copy_callback)(void *context, void *dest,
1082 size_t offset, size_t maxsize),
1083 void *context, int size, int is_blocking)
1085 struct vchiq_shared_state *local;
1086 struct vchiq_header *header;
1087 ssize_t callback_result;
1089 local = state->local;
1091 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1092 mutex_lock_killable(&state->sync_mutex))
1095 remote_event_wait(&state->sync_release_event, &local->sync_release);
1099 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1103 int oldmsgid = header->msgid;
1105 if (oldmsgid != VCHIQ_MSGID_PADDING)
1106 vchiq_log_error(vchiq_core_log_level,
1107 "%d: qms - msgid %x, not PADDING",
1108 state->id, oldmsgid);
1111 vchiq_log_info(vchiq_sync_log_level,
1112 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1113 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1114 header, size, VCHIQ_MSG_SRCPORT(msgid),
1115 VCHIQ_MSG_DSTPORT(msgid));
1118 copy_message_data(copy_callback, context,
1119 header->data, size);
1121 if (callback_result < 0) {
1122 mutex_unlock(&state->slot_mutex);
1123 VCHIQ_SERVICE_STATS_INC(service,
1129 if (SRVTRACE_ENABLED(service,
1131 vchiq_log_dump_mem("Sent", 0,
1134 (size_t)callback_result));
1136 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1137 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1139 VCHIQ_STATS_INC(state, ctrl_tx_count);
1142 header->size = size;
1143 header->msgid = msgid;
1145 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1148 svc_fourcc = service
1149 ? service->base.fourcc
1150 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1152 vchiq_log_trace(vchiq_sync_log_level,
1153 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1154 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1155 VCHIQ_MSG_TYPE(msgid),
1156 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1157 VCHIQ_MSG_SRCPORT(msgid),
1158 VCHIQ_MSG_DSTPORT(msgid),
1162 remote_event_signal(&state->remote->sync_trigger);
1164 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1165 mutex_unlock(&state->sync_mutex);
1167 return VCHIQ_SUCCESS;
1171 claim_slot(struct vchiq_slot_info *slot)
1177 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1178 struct vchiq_header *header, struct vchiq_service *service)
1180 mutex_lock(&state->recycle_mutex);
1183 int msgid = header->msgid;
1185 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1186 (service && service->closing)) {
1187 mutex_unlock(&state->recycle_mutex);
1191 /* Rewrite the message header to prevent a double release */
1192 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1195 slot_info->release_count++;
1197 if (slot_info->release_count == slot_info->use_count) {
1198 int slot_queue_recycle;
1199 /* Add to the freed queue */
1202 * A read barrier is necessary here to prevent speculative
1203 * fetches of remote->slot_queue_recycle from overtaking the
1208 slot_queue_recycle = state->remote->slot_queue_recycle;
1209 state->remote->slot_queue[slot_queue_recycle &
1210 VCHIQ_SLOT_QUEUE_MASK] =
1211 SLOT_INDEX_FROM_INFO(state, slot_info);
1212 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1213 vchiq_log_info(vchiq_core_log_level,
1214 "%d: %s %d - recycle->%x", state->id, __func__,
1215 SLOT_INDEX_FROM_INFO(state, slot_info),
1216 state->remote->slot_queue_recycle);
1219 * A write barrier is necessary, but remote_event_signal
1222 remote_event_signal(&state->remote->recycle);
1225 mutex_unlock(&state->recycle_mutex);
1228 static inline enum vchiq_reason
1229 get_bulk_reason(struct vchiq_bulk *bulk)
1231 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1232 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1233 return VCHIQ_BULK_TRANSMIT_ABORTED;
1235 return VCHIQ_BULK_TRANSMIT_DONE;
1238 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1239 return VCHIQ_BULK_RECEIVE_ABORTED;
1241 return VCHIQ_BULK_RECEIVE_DONE;
1244 /* Called by the slot handler - don't hold the bulk mutex */
1245 static enum vchiq_status
1246 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1249 enum vchiq_status status = VCHIQ_SUCCESS;
1251 vchiq_log_trace(vchiq_core_log_level,
1252 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1253 service->state->id, service->localport,
1254 (queue == &service->bulk_tx) ? 't' : 'r',
1255 queue->process, queue->remote_notify, queue->remove);
1257 queue->remote_notify = queue->process;
1259 while (queue->remove != queue->remote_notify) {
1260 struct vchiq_bulk *bulk =
1261 &queue->bulks[BULK_INDEX(queue->remove)];
1264 * Only generate callbacks for non-dummy bulk
1265 * requests, and non-terminated services
1267 if (bulk->data && service->instance) {
1268 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1269 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1270 VCHIQ_SERVICE_STATS_INC(service,
1272 VCHIQ_SERVICE_STATS_ADD(service,
1276 VCHIQ_SERVICE_STATS_INC(service,
1278 VCHIQ_SERVICE_STATS_ADD(service,
1283 VCHIQ_SERVICE_STATS_INC(service,
1284 bulk_aborted_count);
1286 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1287 struct bulk_waiter *waiter;
1289 spin_lock(&bulk_waiter_spinlock);
1290 waiter = bulk->userdata;
1292 waiter->actual = bulk->actual;
1293 complete(&waiter->event);
1295 spin_unlock(&bulk_waiter_spinlock);
1296 } else if (bulk->mode ==
1297 VCHIQ_BULK_MODE_CALLBACK) {
1298 enum vchiq_reason reason =
1299 get_bulk_reason(bulk);
1300 status = make_service_callback(service,
1301 reason, NULL, bulk->userdata);
1302 if (status == VCHIQ_RETRY)
1308 complete(&service->bulk_remove_event);
1311 status = VCHIQ_SUCCESS;
1313 if (status == VCHIQ_RETRY)
1314 request_poll(service->state, service,
1315 (queue == &service->bulk_tx) ?
1316 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1322 poll_services_of_group(struct vchiq_state *state, int group)
1324 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1327 for (i = 0; flags; i++) {
1328 if (flags & BIT(i)) {
1329 struct vchiq_service *service =
1330 find_service_by_port(state,
1338 atomic_xchg(&service->poll_flags, 0);
1340 BIT(VCHIQ_POLL_REMOVE)) {
1341 vchiq_log_info(vchiq_core_log_level,
1342 "%d: ps - remove %d<->%d",
1343 state->id, service->localport,
1344 service->remoteport);
1347 * Make it look like a client, because
1348 * it must be removed and not left in
1349 * the LISTENING state.
1351 service->public_fourcc =
1352 VCHIQ_FOURCC_INVALID;
1354 if (vchiq_close_service_internal(
1355 service, 0/*!close_recvd*/) !=
1357 request_poll(state, service,
1359 } else if (service_flags &
1360 BIT(VCHIQ_POLL_TERMINATE)) {
1361 vchiq_log_info(vchiq_core_log_level,
1362 "%d: ps - terminate %d<->%d",
1363 state->id, service->localport,
1364 service->remoteport);
1365 if (vchiq_close_service_internal(
1366 service, 0/*!close_recvd*/) !=
1368 request_poll(state, service,
1369 VCHIQ_POLL_TERMINATE);
1371 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1372 notify_bulks(service,
1375 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1376 notify_bulks(service,
1379 unlock_service(service);
1384 /* Called by the slot handler thread */
1386 poll_services(struct vchiq_state *state)
1390 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1391 poll_services_of_group(state, group);
1394 /* Called with the bulk_mutex held */
1396 abort_outstanding_bulks(struct vchiq_service *service,
1397 struct vchiq_bulk_queue *queue)
1399 int is_tx = (queue == &service->bulk_tx);
1401 vchiq_log_trace(vchiq_core_log_level,
1402 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1403 service->state->id, service->localport, is_tx ? 't' : 'r',
1404 queue->local_insert, queue->remote_insert, queue->process);
1406 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1407 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1409 while ((queue->process != queue->local_insert) ||
1410 (queue->process != queue->remote_insert)) {
1411 struct vchiq_bulk *bulk =
1412 &queue->bulks[BULK_INDEX(queue->process)];
1414 if (queue->process == queue->remote_insert) {
1415 /* fabricate a matching dummy bulk */
1416 bulk->remote_data = NULL;
1417 bulk->remote_size = 0;
1418 queue->remote_insert++;
1421 if (queue->process != queue->local_insert) {
1422 vchiq_complete_bulk(bulk);
1424 vchiq_log_info(SRVTRACE_LEVEL(service),
1425 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1426 is_tx ? "Send Bulk to" : "Recv Bulk from",
1427 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1428 service->remoteport,
1432 /* fabricate a matching dummy bulk */
1435 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1436 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1438 queue->local_insert++;
1446 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1448 struct vchiq_service *service = NULL;
1450 unsigned int localport, remoteport;
1452 msgid = header->msgid;
1453 size = header->size;
1454 localport = VCHIQ_MSG_DSTPORT(msgid);
1455 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1456 if (size >= sizeof(struct vchiq_open_payload)) {
1457 const struct vchiq_open_payload *payload =
1458 (struct vchiq_open_payload *)header->data;
1459 unsigned int fourcc;
1461 fourcc = payload->fourcc;
1462 vchiq_log_info(vchiq_core_log_level,
1463 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1464 state->id, header, localport,
1465 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1467 service = get_listening_service(state, fourcc);
1470 /* A matching service exists */
1471 short version = payload->version;
1472 short version_min = payload->version_min;
1474 if ((service->version < version_min) ||
1475 (version < service->version_min)) {
1476 /* Version mismatch */
1477 vchiq_loud_error_header();
1478 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1479 "version mismatch - local (%d, min %d)"
1480 " vs. remote (%d, min %d)",
1481 state->id, service->localport,
1482 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1483 service->version, service->version_min,
1484 version, version_min);
1485 vchiq_loud_error_footer();
1486 unlock_service(service);
1490 service->peer_version = version;
1492 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1493 struct vchiq_openack_payload ack_payload = {
1497 if (state->version_common <
1498 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1501 /* Acknowledge the OPEN */
1502 if (service->sync) {
1503 if (queue_message_sync(
1510 memcpy_copy_callback,
1512 sizeof(ack_payload),
1514 goto bail_not_ready;
1516 if (queue_message(state,
1522 memcpy_copy_callback,
1524 sizeof(ack_payload),
1526 goto bail_not_ready;
1529 /* The service is now open */
1530 vchiq_set_service_state(service,
1531 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1532 : VCHIQ_SRVSTATE_OPEN);
1535 /* Success - the message has been dealt with */
1536 unlock_service(service);
1542 /* No available service, or an invalid request - send a CLOSE */
1543 if (queue_message(state, NULL,
1544 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1545 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1546 goto bail_not_ready;
1552 unlock_service(service);
1557 /* Called by the slot handler thread */
1559 parse_rx_slots(struct vchiq_state *state)
1561 struct vchiq_shared_state *remote = state->remote;
1562 struct vchiq_service *service = NULL;
1565 DEBUG_INITIALISE(state->local)
1567 tx_pos = remote->tx_pos;
1569 while (state->rx_pos != tx_pos) {
1570 struct vchiq_header *header;
1573 unsigned int localport, remoteport;
1575 DEBUG_TRACE(PARSE_LINE);
1576 if (!state->rx_data) {
1579 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1580 rx_index = remote->slot_queue[
1581 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1582 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1584 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1587 * Initialise use_count to one, and increment
1588 * release_count at the end of the slot to avoid
1589 * releasing the slot prematurely.
1591 state->rx_info->use_count = 1;
1592 state->rx_info->release_count = 0;
1595 header = (struct vchiq_header *)(state->rx_data +
1596 (state->rx_pos & VCHIQ_SLOT_MASK));
1597 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1598 msgid = header->msgid;
1599 DEBUG_VALUE(PARSE_MSGID, msgid);
1600 size = header->size;
1601 type = VCHIQ_MSG_TYPE(msgid);
1602 localport = VCHIQ_MSG_DSTPORT(msgid);
1603 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1605 if (type != VCHIQ_MSG_DATA)
1606 VCHIQ_STATS_INC(state, ctrl_rx_count);
1609 case VCHIQ_MSG_OPENACK:
1610 case VCHIQ_MSG_CLOSE:
1611 case VCHIQ_MSG_DATA:
1612 case VCHIQ_MSG_BULK_RX:
1613 case VCHIQ_MSG_BULK_TX:
1614 case VCHIQ_MSG_BULK_RX_DONE:
1615 case VCHIQ_MSG_BULK_TX_DONE:
1616 service = find_service_by_port(state, localport);
1618 ((service->remoteport != remoteport) &&
1619 (service->remoteport != VCHIQ_PORT_FREE))) &&
1621 (type == VCHIQ_MSG_CLOSE)) {
1623 * This could be a CLOSE from a client which
1624 * hadn't yet received the OPENACK - look for
1625 * the connected service
1628 unlock_service(service);
1629 service = get_connected_service(state,
1632 vchiq_log_warning(vchiq_core_log_level,
1633 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1634 state->id, msg_type_str(type),
1635 header, remoteport, localport,
1636 service->localport);
1640 vchiq_log_error(vchiq_core_log_level,
1641 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1642 state->id, msg_type_str(type),
1643 header, remoteport, localport,
1652 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1655 svc_fourcc = service
1656 ? service->base.fourcc
1657 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1658 vchiq_log_info(SRVTRACE_LEVEL(service),
1659 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1660 msg_type_str(type), type,
1661 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1662 remoteport, localport, size);
1664 vchiq_log_dump_mem("Rcvd", 0, header->data,
1668 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1669 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1670 vchiq_log_error(vchiq_core_log_level,
1671 "header %pK (msgid %x) - size %x too big for slot",
1672 header, (unsigned int)msgid,
1673 (unsigned int)size);
1674 WARN(1, "oversized for slot\n");
1678 case VCHIQ_MSG_OPEN:
1679 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1680 if (!parse_open(state, header))
1681 goto bail_not_ready;
1683 case VCHIQ_MSG_OPENACK:
1684 if (size >= sizeof(struct vchiq_openack_payload)) {
1685 const struct vchiq_openack_payload *payload =
1686 (struct vchiq_openack_payload *)
1688 service->peer_version = payload->version;
1690 vchiq_log_info(vchiq_core_log_level,
1691 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1692 state->id, header, size, remoteport, localport,
1693 service->peer_version);
1694 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1695 service->remoteport = remoteport;
1696 vchiq_set_service_state(service,
1697 VCHIQ_SRVSTATE_OPEN);
1698 complete(&service->remove_event);
1700 vchiq_log_error(vchiq_core_log_level,
1701 "OPENACK received in state %s",
1702 srvstate_names[service->srvstate]);
1705 case VCHIQ_MSG_CLOSE:
1706 WARN_ON(size != 0); /* There should be no data */
1708 vchiq_log_info(vchiq_core_log_level,
1709 "%d: prs CLOSE@%pK (%d->%d)",
1710 state->id, header, remoteport, localport);
1712 mark_service_closing_internal(service, 1);
1714 if (vchiq_close_service_internal(service,
1715 1/*close_recvd*/) == VCHIQ_RETRY)
1716 goto bail_not_ready;
1718 vchiq_log_info(vchiq_core_log_level,
1719 "Close Service %c%c%c%c s:%u d:%d",
1720 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1722 service->remoteport);
1724 case VCHIQ_MSG_DATA:
1725 vchiq_log_info(vchiq_core_log_level,
1726 "%d: prs DATA@%pK,%x (%d->%d)",
1727 state->id, header, size, remoteport, localport);
1729 if ((service->remoteport == remoteport) &&
1730 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1731 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1732 claim_slot(state->rx_info);
1733 DEBUG_TRACE(PARSE_LINE);
1734 if (make_service_callback(service,
1735 VCHIQ_MESSAGE_AVAILABLE, header,
1736 NULL) == VCHIQ_RETRY) {
1737 DEBUG_TRACE(PARSE_LINE);
1738 goto bail_not_ready;
1740 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1741 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1744 VCHIQ_STATS_INC(state, error_count);
1747 case VCHIQ_MSG_CONNECT:
1748 vchiq_log_info(vchiq_core_log_level,
1749 "%d: prs CONNECT@%pK", state->id, header);
1750 state->version_common = ((struct vchiq_slot_zero *)
1751 state->slot_data)->version;
1752 complete(&state->connect);
1754 case VCHIQ_MSG_BULK_RX:
1755 case VCHIQ_MSG_BULK_TX:
1757 * We should never receive a bulk request from the
1758 * other side since we're not setup to perform as the
1763 case VCHIQ_MSG_BULK_RX_DONE:
1764 case VCHIQ_MSG_BULK_TX_DONE:
1765 if ((service->remoteport == remoteport) &&
1766 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1767 struct vchiq_bulk_queue *queue;
1768 struct vchiq_bulk *bulk;
1770 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1771 &service->bulk_rx : &service->bulk_tx;
1773 DEBUG_TRACE(PARSE_LINE);
1774 if (mutex_lock_killable(&service->bulk_mutex)) {
1775 DEBUG_TRACE(PARSE_LINE);
1776 goto bail_not_ready;
1778 if ((int)(queue->remote_insert -
1779 queue->local_insert) >= 0) {
1780 vchiq_log_error(vchiq_core_log_level,
1781 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1782 state->id, msg_type_str(type),
1783 header, remoteport, localport,
1784 queue->remote_insert,
1785 queue->local_insert);
1786 mutex_unlock(&service->bulk_mutex);
1789 if (queue->process != queue->remote_insert) {
1790 pr_err("%s: p %x != ri %x\n",
1793 queue->remote_insert);
1794 mutex_unlock(&service->bulk_mutex);
1795 goto bail_not_ready;
1798 bulk = &queue->bulks[
1799 BULK_INDEX(queue->remote_insert)];
1800 bulk->actual = *(int *)header->data;
1801 queue->remote_insert++;
1803 vchiq_log_info(vchiq_core_log_level,
1804 "%d: prs %s@%pK (%d->%d) %x@%pad",
1805 state->id, msg_type_str(type),
1806 header, remoteport, localport,
1807 bulk->actual, &bulk->data);
1809 vchiq_log_trace(vchiq_core_log_level,
1810 "%d: prs:%d %cx li=%x ri=%x p=%x",
1811 state->id, localport,
1812 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1814 queue->local_insert,
1815 queue->remote_insert, queue->process);
1817 DEBUG_TRACE(PARSE_LINE);
1818 WARN_ON(queue->process == queue->local_insert);
1819 vchiq_complete_bulk(bulk);
1821 mutex_unlock(&service->bulk_mutex);
1822 DEBUG_TRACE(PARSE_LINE);
1823 notify_bulks(service, queue, 1/*retry_poll*/);
1824 DEBUG_TRACE(PARSE_LINE);
1827 case VCHIQ_MSG_PADDING:
1828 vchiq_log_trace(vchiq_core_log_level,
1829 "%d: prs PADDING@%pK,%x",
1830 state->id, header, size);
1832 case VCHIQ_MSG_PAUSE:
1833 /* If initiated, signal the application thread */
1834 vchiq_log_trace(vchiq_core_log_level,
1835 "%d: prs PAUSE@%pK,%x",
1836 state->id, header, size);
1837 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1838 vchiq_log_error(vchiq_core_log_level,
1839 "%d: PAUSE received in state PAUSED",
1843 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1844 /* Send a PAUSE in response */
1845 if (queue_message(state, NULL,
1846 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1847 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1849 goto bail_not_ready;
1851 /* At this point slot_mutex is held */
1852 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1854 case VCHIQ_MSG_RESUME:
1855 vchiq_log_trace(vchiq_core_log_level,
1856 "%d: prs RESUME@%pK,%x",
1857 state->id, header, size);
1858 /* Release the slot mutex */
1859 mutex_unlock(&state->slot_mutex);
1860 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1863 case VCHIQ_MSG_REMOTE_USE:
1864 vchiq_on_remote_use(state);
1866 case VCHIQ_MSG_REMOTE_RELEASE:
1867 vchiq_on_remote_release(state);
1869 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1873 vchiq_log_error(vchiq_core_log_level,
1874 "%d: prs invalid msgid %x@%pK,%x",
1875 state->id, msgid, header, size);
1876 WARN(1, "invalid message\n");
1882 unlock_service(service);
1886 state->rx_pos += calc_stride(size);
1888 DEBUG_TRACE(PARSE_LINE);
1890 * Perform some housekeeping when the end of the slot is
1893 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1894 /* Remove the extra reference count. */
1895 release_slot(state, state->rx_info, NULL, NULL);
1896 state->rx_data = NULL;
1902 unlock_service(service);
1905 /* Called by the slot handler thread */
1907 slot_handler_func(void *v)
1909 struct vchiq_state *state = v;
1910 struct vchiq_shared_state *local = state->local;
1912 DEBUG_INITIALISE(local)
1915 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1916 DEBUG_TRACE(SLOT_HANDLER_LINE);
1917 remote_event_wait(&state->trigger_event, &local->trigger);
1921 DEBUG_TRACE(SLOT_HANDLER_LINE);
1922 if (state->poll_needed) {
1924 state->poll_needed = 0;
1927 * Handle service polling and other rare conditions here
1928 * out of the mainline code
1930 switch (state->conn_state) {
1931 case VCHIQ_CONNSTATE_CONNECTED:
1932 /* Poll the services as requested */
1933 poll_services(state);
1936 case VCHIQ_CONNSTATE_PAUSING:
1937 if (queue_message(state, NULL,
1938 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1940 QMFLAGS_NO_MUTEX_UNLOCK)
1942 vchiq_set_conn_state(state,
1943 VCHIQ_CONNSTATE_PAUSE_SENT);
1946 state->poll_needed = 1;
1950 case VCHIQ_CONNSTATE_RESUMING:
1951 if (queue_message(state, NULL,
1952 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1953 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1955 vchiq_set_conn_state(state,
1956 VCHIQ_CONNSTATE_CONNECTED);
1959 * This should really be impossible,
1960 * since the PAUSE should have flushed
1961 * through outstanding messages.
1963 vchiq_log_error(vchiq_core_log_level,
1964 "Failed to send RESUME message");
1973 DEBUG_TRACE(SLOT_HANDLER_LINE);
1974 parse_rx_slots(state);
1979 /* Called by the recycle thread */
1981 recycle_func(void *v)
1983 struct vchiq_state *state = v;
1984 struct vchiq_shared_state *local = state->local;
1988 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
1990 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
1996 remote_event_wait(&state->recycle_event, &local->recycle);
1998 process_free_queue(state, found, length);
2003 /* Called by the sync thread */
2007 struct vchiq_state *state = v;
2008 struct vchiq_shared_state *local = state->local;
2009 struct vchiq_header *header =
2010 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2011 state->remote->slot_sync);
2014 struct vchiq_service *service;
2017 unsigned int localport, remoteport;
2019 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2023 msgid = header->msgid;
2024 size = header->size;
2025 type = VCHIQ_MSG_TYPE(msgid);
2026 localport = VCHIQ_MSG_DSTPORT(msgid);
2027 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2029 service = find_service_by_port(state, localport);
2032 vchiq_log_error(vchiq_sync_log_level,
2033 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2034 state->id, msg_type_str(type),
2035 header, remoteport, localport, localport);
2036 release_message_sync(state, header);
2040 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2043 svc_fourcc = service
2044 ? service->base.fourcc
2045 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2046 vchiq_log_trace(vchiq_sync_log_level,
2047 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2049 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2050 remoteport, localport, size);
2052 vchiq_log_dump_mem("Rcvd", 0, header->data,
2057 case VCHIQ_MSG_OPENACK:
2058 if (size >= sizeof(struct vchiq_openack_payload)) {
2059 const struct vchiq_openack_payload *payload =
2060 (struct vchiq_openack_payload *)
2062 service->peer_version = payload->version;
2064 vchiq_log_info(vchiq_sync_log_level,
2065 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2066 state->id, header, size, remoteport, localport,
2067 service->peer_version);
2068 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2069 service->remoteport = remoteport;
2070 vchiq_set_service_state(service,
2071 VCHIQ_SRVSTATE_OPENSYNC);
2073 complete(&service->remove_event);
2075 release_message_sync(state, header);
2078 case VCHIQ_MSG_DATA:
2079 vchiq_log_trace(vchiq_sync_log_level,
2080 "%d: sf DATA@%pK,%x (%d->%d)",
2081 state->id, header, size, remoteport, localport);
2083 if ((service->remoteport == remoteport) &&
2084 (service->srvstate ==
2085 VCHIQ_SRVSTATE_OPENSYNC)) {
2086 if (make_service_callback(service,
2087 VCHIQ_MESSAGE_AVAILABLE, header,
2088 NULL) == VCHIQ_RETRY)
2089 vchiq_log_error(vchiq_sync_log_level,
2090 "synchronous callback to service %d returns VCHIQ_RETRY",
2096 vchiq_log_error(vchiq_sync_log_level,
2097 "%d: sf unexpected msgid %x@%pK,%x",
2098 state->id, msgid, header, size);
2099 release_message_sync(state, header);
2103 unlock_service(service);
2110 init_bulk_queue(struct vchiq_bulk_queue *queue)
2112 queue->local_insert = 0;
2113 queue->remote_insert = 0;
2115 queue->remote_notify = 0;
2120 get_conn_state_name(enum vchiq_connstate conn_state)
2122 return conn_state_names[conn_state];
2125 struct vchiq_slot_zero *
2126 vchiq_init_slots(void *mem_base, int mem_size)
2129 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2130 struct vchiq_slot_zero *slot_zero =
2131 (struct vchiq_slot_zero *)(mem_base + mem_align);
2132 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2133 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2135 /* Ensure there is enough memory to run an absolutely minimum system */
2136 num_slots -= first_data_slot;
2138 if (num_slots < 4) {
2139 vchiq_log_error(vchiq_core_log_level,
2140 "%s - insufficient memory %x bytes",
2141 __func__, mem_size);
2145 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2147 slot_zero->magic = VCHIQ_MAGIC;
2148 slot_zero->version = VCHIQ_VERSION;
2149 slot_zero->version_min = VCHIQ_VERSION_MIN;
2150 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2151 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2152 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2153 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2155 slot_zero->master.slot_sync = first_data_slot;
2156 slot_zero->master.slot_first = first_data_slot + 1;
2157 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2158 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2159 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2160 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2166 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2168 struct vchiq_shared_state *local;
2169 struct vchiq_shared_state *remote;
2170 char threadname[16];
2173 if (vchiq_states[0]) {
2174 pr_err("%s: VCHIQ state already initialized\n", __func__);
2178 local = &slot_zero->slave;
2179 remote = &slot_zero->master;
2181 if (local->initialised) {
2182 vchiq_loud_error_header();
2183 if (remote->initialised)
2184 vchiq_loud_error("local state has already been initialised");
2186 vchiq_loud_error("master/slave mismatch two slaves");
2187 vchiq_loud_error_footer();
2191 memset(state, 0, sizeof(struct vchiq_state));
2194 * initialize shared state pointers
2197 state->local = local;
2198 state->remote = remote;
2199 state->slot_data = (struct vchiq_slot *)slot_zero;
2202 * initialize events and mutexes
2205 init_completion(&state->connect);
2206 mutex_init(&state->mutex);
2207 mutex_init(&state->slot_mutex);
2208 mutex_init(&state->recycle_mutex);
2209 mutex_init(&state->sync_mutex);
2210 mutex_init(&state->bulk_transfer_mutex);
2212 init_completion(&state->slot_available_event);
2213 init_completion(&state->slot_remove_event);
2214 init_completion(&state->data_quota_event);
2216 state->slot_queue_available = 0;
2218 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2219 struct vchiq_service_quota *quota =
2220 &state->service_quotas[i];
2221 init_completion("a->quota_event);
2224 for (i = local->slot_first; i <= local->slot_last; i++) {
2225 local->slot_queue[state->slot_queue_available] = i;
2226 state->slot_queue_available++;
2227 complete(&state->slot_available_event);
2230 state->default_slot_quota = state->slot_queue_available/2;
2231 state->default_message_quota =
2232 min((unsigned short)(state->default_slot_quota * 256),
2233 (unsigned short)~0);
2235 state->previous_data_index = -1;
2236 state->data_use_count = 0;
2237 state->data_quota = state->slot_queue_available - 1;
2239 remote_event_create(&state->trigger_event, &local->trigger);
2241 remote_event_create(&state->recycle_event, &local->recycle);
2242 local->slot_queue_recycle = state->slot_queue_available;
2243 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2244 remote_event_create(&state->sync_release_event, &local->sync_release);
2246 /* At start-of-day, the slot is empty and available */
2247 ((struct vchiq_header *)
2248 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2249 VCHIQ_MSGID_PADDING;
2250 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2252 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2254 ret = vchiq_platform_init_state(state);
2259 * bring up slot handler thread
2261 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2262 state->slot_handler_thread = kthread_create(&slot_handler_func,
2266 if (IS_ERR(state->slot_handler_thread)) {
2267 vchiq_loud_error_header();
2268 vchiq_loud_error("couldn't create thread %s", threadname);
2269 vchiq_loud_error_footer();
2270 return PTR_ERR(state->slot_handler_thread);
2272 set_user_nice(state->slot_handler_thread, -19);
2274 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2275 state->recycle_thread = kthread_create(&recycle_func,
2278 if (IS_ERR(state->recycle_thread)) {
2279 vchiq_loud_error_header();
2280 vchiq_loud_error("couldn't create thread %s", threadname);
2281 vchiq_loud_error_footer();
2282 ret = PTR_ERR(state->recycle_thread);
2283 goto fail_free_handler_thread;
2285 set_user_nice(state->recycle_thread, -19);
2287 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2288 state->sync_thread = kthread_create(&sync_func,
2291 if (IS_ERR(state->sync_thread)) {
2292 vchiq_loud_error_header();
2293 vchiq_loud_error("couldn't create thread %s", threadname);
2294 vchiq_loud_error_footer();
2295 ret = PTR_ERR(state->sync_thread);
2296 goto fail_free_recycle_thread;
2298 set_user_nice(state->sync_thread, -20);
2300 wake_up_process(state->slot_handler_thread);
2301 wake_up_process(state->recycle_thread);
2302 wake_up_process(state->sync_thread);
2304 vchiq_states[0] = state;
2306 /* Indicate readiness to the other side */
2307 local->initialised = 1;
2311 fail_free_recycle_thread:
2312 kthread_stop(state->recycle_thread);
2313 fail_free_handler_thread:
2314 kthread_stop(state->slot_handler_thread);
2319 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2321 struct vchiq_service *service = find_service_by_handle(handle);
2324 while (service->msg_queue_write == service->msg_queue_read +
2326 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2327 flush_signals(current);
2330 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2331 service->msg_queue_write++;
2332 service->msg_queue[pos] = header;
2334 complete(&service->msg_queue_push);
2336 EXPORT_SYMBOL(vchiq_msg_queue_push);
2338 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2340 struct vchiq_service *service = find_service_by_handle(handle);
2341 struct vchiq_header *header;
2344 if (service->msg_queue_write == service->msg_queue_read)
2347 while (service->msg_queue_write == service->msg_queue_read) {
2348 if (wait_for_completion_interruptible(&service->msg_queue_push))
2349 flush_signals(current);
2352 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2353 service->msg_queue_read++;
2354 header = service->msg_queue[pos];
2356 complete(&service->msg_queue_pop);
2360 EXPORT_SYMBOL(vchiq_msg_hold);
2362 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2364 if (!params->callback || !params->fourcc) {
2365 vchiq_loud_error("Can't add service, invalid params\n");
2372 /* Called from application thread when a client or server service is created. */
2373 struct vchiq_service *
2374 vchiq_add_service_internal(struct vchiq_state *state,
2375 const struct vchiq_service_params_kernel *params,
2376 int srvstate, struct vchiq_instance *instance,
2377 vchiq_userdata_term userdata_term)
2379 struct vchiq_service *service;
2380 struct vchiq_service __rcu **pservice = NULL;
2381 struct vchiq_service_quota *quota;
2385 ret = vchiq_validate_params(params);
2389 service = kmalloc(sizeof(*service), GFP_KERNEL);
2393 service->base.fourcc = params->fourcc;
2394 service->base.callback = params->callback;
2395 service->base.userdata = params->userdata;
2396 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2397 kref_init(&service->ref_count);
2398 service->srvstate = VCHIQ_SRVSTATE_FREE;
2399 service->userdata_term = userdata_term;
2400 service->localport = VCHIQ_PORT_FREE;
2401 service->remoteport = VCHIQ_PORT_FREE;
2403 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2404 VCHIQ_FOURCC_INVALID : params->fourcc;
2405 service->client_id = 0;
2406 service->auto_close = 1;
2408 service->closing = 0;
2410 atomic_set(&service->poll_flags, 0);
2411 service->version = params->version;
2412 service->version_min = params->version_min;
2413 service->state = state;
2414 service->instance = instance;
2415 service->service_use_count = 0;
2416 service->msg_queue_read = 0;
2417 service->msg_queue_write = 0;
2418 init_bulk_queue(&service->bulk_tx);
2419 init_bulk_queue(&service->bulk_rx);
2420 init_completion(&service->remove_event);
2421 init_completion(&service->bulk_remove_event);
2422 init_completion(&service->msg_queue_pop);
2423 init_completion(&service->msg_queue_push);
2424 mutex_init(&service->bulk_mutex);
2425 memset(&service->stats, 0, sizeof(service->stats));
2426 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2429 * Although it is perfectly possible to use a spinlock
2430 * to protect the creation of services, it is overkill as it
2431 * disables interrupts while the array is searched.
2432 * The only danger is of another thread trying to create a
2433 * service - service deletion is safe.
2434 * Therefore it is preferable to use state->mutex which,
2435 * although slower to claim, doesn't block interrupts while
2439 mutex_lock(&state->mutex);
2441 /* Prepare to use a previously unused service */
2442 if (state->unused_service < VCHIQ_MAX_SERVICES)
2443 pservice = &state->services[state->unused_service];
2445 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2446 for (i = 0; i < state->unused_service; i++) {
2447 if (!rcu_access_pointer(state->services[i])) {
2448 pservice = &state->services[i];
2454 for (i = (state->unused_service - 1); i >= 0; i--) {
2455 struct vchiq_service *srv;
2457 srv = rcu_dereference(state->services[i]);
2459 pservice = &state->services[i];
2460 } else if ((srv->public_fourcc == params->fourcc) &&
2461 ((srv->instance != instance) ||
2462 (srv->base.callback != params->callback))) {
2464 * There is another server using this
2465 * fourcc which doesn't match.
2475 service->localport = (pservice - state->services);
2477 handle_seq = VCHIQ_MAX_STATES *
2479 service->handle = handle_seq |
2480 (state->id * VCHIQ_MAX_SERVICES) |
2482 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2483 rcu_assign_pointer(*pservice, service);
2484 if (pservice == &state->services[state->unused_service])
2485 state->unused_service++;
2488 mutex_unlock(&state->mutex);
2495 quota = &state->service_quotas[service->localport];
2496 quota->slot_quota = state->default_slot_quota;
2497 quota->message_quota = state->default_message_quota;
2498 if (quota->slot_use_count == 0)
2499 quota->previous_tx_index =
2500 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2503 /* Bring this service online */
2504 vchiq_set_service_state(service, srvstate);
2506 vchiq_log_info(vchiq_core_msg_log_level,
2507 "%s Service %c%c%c%c SrcPort:%d",
2508 (srvstate == VCHIQ_SRVSTATE_OPENING)
2510 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2511 service->localport);
2513 /* Don't unlock the service - leave it with a ref_count of 1. */
2519 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2521 struct vchiq_open_payload payload = {
2522 service->base.fourcc,
2525 service->version_min
2527 enum vchiq_status status = VCHIQ_SUCCESS;
2529 service->client_id = client_id;
2530 vchiq_use_service_internal(service);
2531 status = queue_message(service->state,
2533 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2536 memcpy_copy_callback,
2539 QMFLAGS_IS_BLOCKING);
2541 if (status != VCHIQ_SUCCESS)
2544 /* Wait for the ACK/NAK */
2545 if (wait_for_completion_interruptible(&service->remove_event)) {
2546 status = VCHIQ_RETRY;
2547 vchiq_release_service_internal(service);
2548 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2549 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2550 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2551 vchiq_log_error(vchiq_core_log_level,
2552 "%d: osi - srvstate = %s (ref %u)",
2554 srvstate_names[service->srvstate],
2555 kref_read(&service->ref_count));
2556 status = VCHIQ_ERROR;
2557 VCHIQ_SERVICE_STATS_INC(service, error_count);
2558 vchiq_release_service_internal(service);
2565 release_service_messages(struct vchiq_service *service)
2567 struct vchiq_state *state = service->state;
2568 int slot_last = state->remote->slot_last;
2571 /* Release any claimed messages aimed at this service */
2573 if (service->sync) {
2574 struct vchiq_header *header =
2575 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2576 state->remote->slot_sync);
2577 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2578 release_message_sync(state, header);
2583 for (i = state->remote->slot_first; i <= slot_last; i++) {
2584 struct vchiq_slot_info *slot_info =
2585 SLOT_INFO_FROM_INDEX(state, i);
2586 if (slot_info->release_count != slot_info->use_count) {
2588 (char *)SLOT_DATA_FROM_INDEX(state, i);
2589 unsigned int pos, end;
2591 end = VCHIQ_SLOT_SIZE;
2592 if (data == state->rx_data)
2594 * This buffer is still being read from - stop
2595 * at the current read position
2597 end = state->rx_pos & VCHIQ_SLOT_MASK;
2602 struct vchiq_header *header =
2603 (struct vchiq_header *)(data + pos);
2604 int msgid = header->msgid;
2605 int port = VCHIQ_MSG_DSTPORT(msgid);
2607 if ((port == service->localport) &&
2608 (msgid & VCHIQ_MSGID_CLAIMED)) {
2609 vchiq_log_info(vchiq_core_log_level,
2610 " fsi - hdr %pK", header);
2611 release_slot(state, slot_info, header,
2614 pos += calc_stride(header->size);
2615 if (pos > VCHIQ_SLOT_SIZE) {
2616 vchiq_log_error(vchiq_core_log_level,
2617 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2619 header->msgid, header->size);
2620 WARN(1, "invalid slot position\n");
2628 do_abort_bulks(struct vchiq_service *service)
2630 enum vchiq_status status;
2632 /* Abort any outstanding bulk transfers */
2633 if (mutex_lock_killable(&service->bulk_mutex))
2635 abort_outstanding_bulks(service, &service->bulk_tx);
2636 abort_outstanding_bulks(service, &service->bulk_rx);
2637 mutex_unlock(&service->bulk_mutex);
2639 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2640 if (status != VCHIQ_SUCCESS)
2643 status = notify_bulks(service, &service->bulk_rx, 0/*!retry_poll*/);
2644 return (status == VCHIQ_SUCCESS);
2647 static enum vchiq_status
2648 close_service_complete(struct vchiq_service *service, int failstate)
2650 enum vchiq_status status;
2651 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2654 switch (service->srvstate) {
2655 case VCHIQ_SRVSTATE_OPEN:
2656 case VCHIQ_SRVSTATE_CLOSESENT:
2657 case VCHIQ_SRVSTATE_CLOSERECVD:
2659 if (service->auto_close) {
2660 service->client_id = 0;
2661 service->remoteport = VCHIQ_PORT_FREE;
2662 newstate = VCHIQ_SRVSTATE_LISTENING;
2664 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2667 newstate = VCHIQ_SRVSTATE_CLOSED;
2669 vchiq_set_service_state(service, newstate);
2671 case VCHIQ_SRVSTATE_LISTENING:
2674 vchiq_log_error(vchiq_core_log_level,
2675 "%s(%x) called in state %s", __func__,
2676 service->handle, srvstate_names[service->srvstate]);
2677 WARN(1, "%s in unexpected state\n", __func__);
2681 status = make_service_callback(service,
2682 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2684 if (status != VCHIQ_RETRY) {
2685 int uc = service->service_use_count;
2687 /* Complete the close process */
2688 for (i = 0; i < uc; i++)
2690 * cater for cases where close is forced and the
2691 * client may not close all it's handles
2693 vchiq_release_service_internal(service);
2695 service->client_id = 0;
2696 service->remoteport = VCHIQ_PORT_FREE;
2698 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2699 vchiq_free_service_internal(service);
2700 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2702 service->closing = 0;
2704 complete(&service->remove_event);
2707 vchiq_set_service_state(service, failstate);
2713 /* Called by the slot handler */
2715 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2717 struct vchiq_state *state = service->state;
2718 enum vchiq_status status = VCHIQ_SUCCESS;
2719 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2721 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2722 service->state->id, service->localport, close_recvd,
2723 srvstate_names[service->srvstate]);
2725 switch (service->srvstate) {
2726 case VCHIQ_SRVSTATE_CLOSED:
2727 case VCHIQ_SRVSTATE_HIDDEN:
2728 case VCHIQ_SRVSTATE_LISTENING:
2729 case VCHIQ_SRVSTATE_CLOSEWAIT:
2731 vchiq_log_error(vchiq_core_log_level,
2732 "%s(1) called in state %s",
2733 __func__, srvstate_names[service->srvstate]);
2734 } else if (is_server) {
2735 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2736 status = VCHIQ_ERROR;
2738 service->client_id = 0;
2739 service->remoteport = VCHIQ_PORT_FREE;
2740 if (service->srvstate ==
2741 VCHIQ_SRVSTATE_CLOSEWAIT)
2742 vchiq_set_service_state(service,
2743 VCHIQ_SRVSTATE_LISTENING);
2745 complete(&service->remove_event);
2747 vchiq_free_service_internal(service);
2750 case VCHIQ_SRVSTATE_OPENING:
2752 /* The open was rejected - tell the user */
2753 vchiq_set_service_state(service,
2754 VCHIQ_SRVSTATE_CLOSEWAIT);
2755 complete(&service->remove_event);
2757 /* Shutdown mid-open - let the other side know */
2758 status = queue_message(state, service,
2762 VCHIQ_MSG_DSTPORT(service->remoteport)),
2767 case VCHIQ_SRVSTATE_OPENSYNC:
2768 mutex_lock(&state->sync_mutex);
2770 case VCHIQ_SRVSTATE_OPEN:
2772 if (!do_abort_bulks(service))
2773 status = VCHIQ_RETRY;
2776 release_service_messages(service);
2778 if (status == VCHIQ_SUCCESS)
2779 status = queue_message(state, service,
2783 VCHIQ_MSG_DSTPORT(service->remoteport)),
2784 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2786 if (status != VCHIQ_SUCCESS) {
2787 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2788 mutex_unlock(&state->sync_mutex);
2793 /* Change the state while the mutex is still held */
2794 vchiq_set_service_state(service,
2795 VCHIQ_SRVSTATE_CLOSESENT);
2796 mutex_unlock(&state->slot_mutex);
2798 mutex_unlock(&state->sync_mutex);
2802 /* Change the state while the mutex is still held */
2803 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2804 mutex_unlock(&state->slot_mutex);
2806 mutex_unlock(&state->sync_mutex);
2808 status = close_service_complete(service,
2809 VCHIQ_SRVSTATE_CLOSERECVD);
2812 case VCHIQ_SRVSTATE_CLOSESENT:
2814 /* This happens when a process is killed mid-close */
2817 if (!do_abort_bulks(service)) {
2818 status = VCHIQ_RETRY;
2822 if (status == VCHIQ_SUCCESS)
2823 status = close_service_complete(service,
2824 VCHIQ_SRVSTATE_CLOSERECVD);
2827 case VCHIQ_SRVSTATE_CLOSERECVD:
2828 if (!close_recvd && is_server)
2829 /* Force into LISTENING mode */
2830 vchiq_set_service_state(service,
2831 VCHIQ_SRVSTATE_LISTENING);
2832 status = close_service_complete(service,
2833 VCHIQ_SRVSTATE_CLOSERECVD);
2837 vchiq_log_error(vchiq_core_log_level,
2838 "%s(%d) called in state %s", __func__,
2839 close_recvd, srvstate_names[service->srvstate]);
2846 /* Called from the application process upon process death */
2848 vchiq_terminate_service_internal(struct vchiq_service *service)
2850 struct vchiq_state *state = service->state;
2852 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2853 state->id, service->localport, service->remoteport);
2855 mark_service_closing(service);
2857 /* Mark the service for removal by the slot handler */
2858 request_poll(state, service, VCHIQ_POLL_REMOVE);
2861 /* Called from the slot handler */
2863 vchiq_free_service_internal(struct vchiq_service *service)
2865 struct vchiq_state *state = service->state;
2867 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2868 state->id, service->localport);
2870 switch (service->srvstate) {
2871 case VCHIQ_SRVSTATE_OPENING:
2872 case VCHIQ_SRVSTATE_CLOSED:
2873 case VCHIQ_SRVSTATE_HIDDEN:
2874 case VCHIQ_SRVSTATE_LISTENING:
2875 case VCHIQ_SRVSTATE_CLOSEWAIT:
2878 vchiq_log_error(vchiq_core_log_level,
2879 "%d: fsi - (%d) in state %s",
2880 state->id, service->localport,
2881 srvstate_names[service->srvstate]);
2885 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2887 complete(&service->remove_event);
2889 /* Release the initial lock */
2890 unlock_service(service);
2894 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2896 struct vchiq_service *service;
2899 /* Find all services registered to this client and enable them. */
2901 while ((service = next_service_by_instance(state, instance,
2903 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2904 vchiq_set_service_state(service,
2905 VCHIQ_SRVSTATE_LISTENING);
2906 unlock_service(service);
2909 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2910 if (queue_message(state, NULL,
2911 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2912 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2915 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2918 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2919 if (wait_for_completion_interruptible(&state->connect))
2922 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2923 complete(&state->connect);
2926 return VCHIQ_SUCCESS;
2930 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2932 struct vchiq_service *service;
2935 /* Find all services registered to this client and enable them. */
2937 while ((service = next_service_by_instance(state, instance,
2939 (void)vchiq_remove_service(service->handle);
2940 unlock_service(service);
2943 return VCHIQ_SUCCESS;
2947 vchiq_close_service(unsigned int handle)
2949 /* Unregister the service */
2950 struct vchiq_service *service = find_service_by_handle(handle);
2951 enum vchiq_status status = VCHIQ_SUCCESS;
2956 vchiq_log_info(vchiq_core_log_level,
2957 "%d: close_service:%d",
2958 service->state->id, service->localport);
2960 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2961 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2962 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2963 unlock_service(service);
2967 mark_service_closing(service);
2969 if (current == service->state->slot_handler_thread) {
2970 status = vchiq_close_service_internal(service,
2972 WARN_ON(status == VCHIQ_RETRY);
2974 /* Mark the service for termination by the slot handler */
2975 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2979 if (wait_for_completion_interruptible(&service->remove_event)) {
2980 status = VCHIQ_RETRY;
2984 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2985 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2986 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
2989 vchiq_log_warning(vchiq_core_log_level,
2990 "%d: close_service:%d - waiting in state %s",
2991 service->state->id, service->localport,
2992 srvstate_names[service->srvstate]);
2995 if ((status == VCHIQ_SUCCESS) &&
2996 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
2997 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
2998 status = VCHIQ_ERROR;
3000 unlock_service(service);
3004 EXPORT_SYMBOL(vchiq_close_service);
3007 vchiq_remove_service(unsigned int handle)
3009 /* Unregister the service */
3010 struct vchiq_service *service = find_service_by_handle(handle);
3011 enum vchiq_status status = VCHIQ_SUCCESS;
3016 vchiq_log_info(vchiq_core_log_level,
3017 "%d: remove_service:%d",
3018 service->state->id, service->localport);
3020 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3021 unlock_service(service);
3025 mark_service_closing(service);
3027 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3028 (current == service->state->slot_handler_thread)) {
3030 * Make it look like a client, because it must be removed and
3031 * not left in the LISTENING state.
3033 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3035 status = vchiq_close_service_internal(service,
3037 WARN_ON(status == VCHIQ_RETRY);
3039 /* Mark the service for removal by the slot handler */
3040 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3043 if (wait_for_completion_interruptible(&service->remove_event)) {
3044 status = VCHIQ_RETRY;
3048 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3049 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3052 vchiq_log_warning(vchiq_core_log_level,
3053 "%d: remove_service:%d - waiting in state %s",
3054 service->state->id, service->localport,
3055 srvstate_names[service->srvstate]);
3058 if ((status == VCHIQ_SUCCESS) &&
3059 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3060 status = VCHIQ_ERROR;
3062 unlock_service(service);
3068 * This function may be called by kernel threads or user threads.
3069 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3070 * received and the call should be retried after being returned to user
3072 * When called in blocking mode, the userdata field points to a bulk_waiter
3075 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3076 void *offset, void __user *uoffset,
3077 int size, void *userdata,
3078 enum vchiq_bulk_mode mode,
3079 enum vchiq_bulk_dir dir)
3081 struct vchiq_service *service = find_service_by_handle(handle);
3082 struct vchiq_bulk_queue *queue;
3083 struct vchiq_bulk *bulk;
3084 struct vchiq_state *state;
3085 struct bulk_waiter *bulk_waiter = NULL;
3086 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3087 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3088 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3089 enum vchiq_status status = VCHIQ_ERROR;
3095 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3098 if (!offset && !uoffset)
3101 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3105 case VCHIQ_BULK_MODE_NOCALLBACK:
3106 case VCHIQ_BULK_MODE_CALLBACK:
3108 case VCHIQ_BULK_MODE_BLOCKING:
3109 bulk_waiter = userdata;
3110 init_completion(&bulk_waiter->event);
3111 bulk_waiter->actual = 0;
3112 bulk_waiter->bulk = NULL;
3114 case VCHIQ_BULK_MODE_WAITING:
3115 bulk_waiter = userdata;
3116 bulk = bulk_waiter->bulk;
3122 state = service->state;
3124 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3125 &service->bulk_tx : &service->bulk_rx;
3127 if (mutex_lock_killable(&service->bulk_mutex)) {
3128 status = VCHIQ_RETRY;
3132 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3133 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3135 mutex_unlock(&service->bulk_mutex);
3136 if (wait_for_completion_interruptible(
3137 &service->bulk_remove_event)) {
3138 status = VCHIQ_RETRY;
3141 if (mutex_lock_killable(&service->bulk_mutex)) {
3142 status = VCHIQ_RETRY;
3145 } while (queue->local_insert == queue->remove +
3146 VCHIQ_NUM_SERVICE_BULKS);
3149 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3153 bulk->userdata = userdata;
3155 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3157 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3158 goto unlock_error_exit;
3162 vchiq_log_info(vchiq_core_log_level,
3163 "%d: bt (%d->%d) %cx %x@%pad %pK",
3164 state->id, service->localport, service->remoteport, dir_char,
3165 size, &bulk->data, userdata);
3168 * The slot mutex must be held when the service is being closed, so
3169 * claim it here to ensure that isn't happening
3171 if (mutex_lock_killable(&state->slot_mutex)) {
3172 status = VCHIQ_RETRY;
3173 goto cancel_bulk_error_exit;
3176 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3177 goto unlock_both_error_exit;
3179 payload[0] = lower_32_bits(bulk->data);
3180 payload[1] = bulk->size;
3181 status = queue_message(state,
3183 VCHIQ_MAKE_MSG(dir_msgtype,
3185 service->remoteport),
3186 memcpy_copy_callback,
3189 QMFLAGS_IS_BLOCKING |
3190 QMFLAGS_NO_MUTEX_LOCK |
3191 QMFLAGS_NO_MUTEX_UNLOCK);
3192 if (status != VCHIQ_SUCCESS)
3193 goto unlock_both_error_exit;
3195 queue->local_insert++;
3197 mutex_unlock(&state->slot_mutex);
3198 mutex_unlock(&service->bulk_mutex);
3200 vchiq_log_trace(vchiq_core_log_level,
3201 "%d: bt:%d %cx li=%x ri=%x p=%x",
3203 service->localport, dir_char,
3204 queue->local_insert, queue->remote_insert, queue->process);
3207 unlock_service(service);
3209 status = VCHIQ_SUCCESS;
3212 bulk_waiter->bulk = bulk;
3213 if (wait_for_completion_interruptible(&bulk_waiter->event))
3214 status = VCHIQ_RETRY;
3215 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3216 status = VCHIQ_ERROR;
3221 unlock_both_error_exit:
3222 mutex_unlock(&state->slot_mutex);
3223 cancel_bulk_error_exit:
3224 vchiq_complete_bulk(bulk);
3226 mutex_unlock(&service->bulk_mutex);
3230 unlock_service(service);
3235 vchiq_queue_message(unsigned int handle,
3236 ssize_t (*copy_callback)(void *context, void *dest,
3237 size_t offset, size_t maxsize),
3241 struct vchiq_service *service = find_service_by_handle(handle);
3242 enum vchiq_status status = VCHIQ_ERROR;
3247 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3251 VCHIQ_SERVICE_STATS_INC(service, error_count);
3256 if (size > VCHIQ_MAX_MSG_SIZE) {
3257 VCHIQ_SERVICE_STATS_INC(service, error_count);
3261 switch (service->srvstate) {
3262 case VCHIQ_SRVSTATE_OPEN:
3263 status = queue_message(service->state, service,
3264 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3266 service->remoteport),
3267 copy_callback, context, size, 1);
3269 case VCHIQ_SRVSTATE_OPENSYNC:
3270 status = queue_message_sync(service->state, service,
3271 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3273 service->remoteport),
3274 copy_callback, context, size, 1);
3277 status = VCHIQ_ERROR;
3283 unlock_service(service);
3288 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3290 enum vchiq_status status;
3293 status = vchiq_queue_message(handle, memcpy_copy_callback,
3297 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3298 * implement a retry mechanism since this function is supposed
3299 * to block until queued
3301 if (status != VCHIQ_RETRY)
3309 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3312 vchiq_release_message(unsigned int handle,
3313 struct vchiq_header *header)
3315 struct vchiq_service *service = find_service_by_handle(handle);
3316 struct vchiq_shared_state *remote;
3317 struct vchiq_state *state;
3323 state = service->state;
3324 remote = state->remote;
3326 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3328 if ((slot_index >= remote->slot_first) &&
3329 (slot_index <= remote->slot_last)) {
3330 int msgid = header->msgid;
3332 if (msgid & VCHIQ_MSGID_CLAIMED) {
3333 struct vchiq_slot_info *slot_info =
3334 SLOT_INFO_FROM_INDEX(state, slot_index);
3336 release_slot(state, slot_info, header, service);
3338 } else if (slot_index == remote->slot_sync) {
3339 release_message_sync(state, header);
3342 unlock_service(service);
3344 EXPORT_SYMBOL(vchiq_release_message);
3347 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3349 header->msgid = VCHIQ_MSGID_PADDING;
3350 remote_event_signal(&state->remote->sync_release);
3354 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3356 enum vchiq_status status = VCHIQ_ERROR;
3357 struct vchiq_service *service = find_service_by_handle(handle);
3362 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3368 *peer_version = service->peer_version;
3369 status = VCHIQ_SUCCESS;
3373 unlock_service(service);
3376 EXPORT_SYMBOL(vchiq_get_peer_version);
3378 void vchiq_get_config(struct vchiq_config *config)
3380 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3381 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3382 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3383 config->max_services = VCHIQ_MAX_SERVICES;
3384 config->version = VCHIQ_VERSION;
3385 config->version_min = VCHIQ_VERSION_MIN;
3389 vchiq_set_service_option(unsigned int handle,
3390 enum vchiq_service_option option, int value)
3392 struct vchiq_service *service = find_service_by_handle(handle);
3393 struct vchiq_service_quota *quota;
3400 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3401 service->auto_close = value;
3405 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3406 quota = &service->state->service_quotas[service->localport];
3408 value = service->state->default_slot_quota;
3409 if ((value >= quota->slot_use_count) &&
3410 (value < (unsigned short)~0)) {
3411 quota->slot_quota = value;
3412 if ((value >= quota->slot_use_count) &&
3413 (quota->message_quota >= quota->message_use_count))
3415 * Signal the service that it may have
3416 * dropped below its quota
3418 complete("a->quota_event);
3423 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3424 quota = &service->state->service_quotas[service->localport];
3426 value = service->state->default_message_quota;
3427 if ((value >= quota->message_use_count) &&
3428 (value < (unsigned short)~0)) {
3429 quota->message_quota = value;
3430 if ((value >= quota->message_use_count) &&
3431 (quota->slot_quota >= quota->slot_use_count))
3433 * Signal the service that it may have
3434 * dropped below its quota
3436 complete("a->quota_event);
3441 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3442 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3443 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3444 service->sync = value;
3449 case VCHIQ_SERVICE_OPTION_TRACE:
3450 service->trace = value;
3457 unlock_service(service);
3463 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3464 struct vchiq_shared_state *shared, const char *label)
3466 static const char *const debug_names[] = {
3468 "SLOT_HANDLER_COUNT",
3469 "SLOT_HANDLER_LINE",
3473 "AWAIT_COMPLETION_LINE",
3474 "DEQUEUE_MESSAGE_LINE",
3475 "SERVICE_CALLBACK_LINE",
3476 "MSG_QUEUE_FULL_COUNT",
3477 "COMPLETION_QUEUE_FULL_COUNT"
3484 len = scnprintf(buf, sizeof(buf),
3485 " %s: slots %d-%d tx_pos=%x recycle=%x",
3486 label, shared->slot_first, shared->slot_last,
3487 shared->tx_pos, shared->slot_queue_recycle);
3488 err = vchiq_dump(dump_context, buf, len + 1);
3492 len = scnprintf(buf, sizeof(buf),
3494 err = vchiq_dump(dump_context, buf, len + 1);
3498 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3499 struct vchiq_slot_info slot_info =
3500 *SLOT_INFO_FROM_INDEX(state, i);
3501 if (slot_info.use_count != slot_info.release_count) {
3502 len = scnprintf(buf, sizeof(buf),
3503 " %d: %d/%d", i, slot_info.use_count,
3504 slot_info.release_count);
3505 err = vchiq_dump(dump_context, buf, len + 1);
3511 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3512 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3513 debug_names[i], shared->debug[i], shared->debug[i]);
3514 err = vchiq_dump(dump_context, buf, len + 1);
3521 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3528 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3529 conn_state_names[state->conn_state]);
3530 err = vchiq_dump(dump_context, buf, len + 1);
3534 len = scnprintf(buf, sizeof(buf),
3535 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3536 state->local->tx_pos,
3537 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3539 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3540 err = vchiq_dump(dump_context, buf, len + 1);
3544 len = scnprintf(buf, sizeof(buf),
3545 " Version: %d (min %d)",
3546 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3547 err = vchiq_dump(dump_context, buf, len + 1);
3551 if (VCHIQ_ENABLE_STATS) {
3552 len = scnprintf(buf, sizeof(buf),
3553 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3554 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3555 state->stats.error_count);
3556 err = vchiq_dump(dump_context, buf, len + 1);
3561 len = scnprintf(buf, sizeof(buf),
3562 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3563 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3564 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3565 state->data_quota - state->data_use_count,
3566 state->local->slot_queue_recycle - state->slot_queue_available,
3567 state->stats.slot_stalls, state->stats.data_stalls);
3568 err = vchiq_dump(dump_context, buf, len + 1);
3572 err = vchiq_dump_platform_state(dump_context);
3576 err = vchiq_dump_shared_state(dump_context,
3582 err = vchiq_dump_shared_state(dump_context,
3589 err = vchiq_dump_platform_instances(dump_context);
3593 for (i = 0; i < state->unused_service; i++) {
3594 struct vchiq_service *service = find_service_by_port(state, i);
3597 err = vchiq_dump_service_state(dump_context, service);
3598 unlock_service(service);
3606 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3611 unsigned int ref_count;
3613 /*Don't include the lock just taken*/
3614 ref_count = kref_read(&service->ref_count) - 1;
3615 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3616 service->localport, srvstate_names[service->srvstate],
3619 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3620 char remoteport[30];
3621 struct vchiq_service_quota *quota =
3622 &service->state->service_quotas[service->localport];
3623 int fourcc = service->base.fourcc;
3624 int tx_pending, rx_pending;
3626 if (service->remoteport != VCHIQ_PORT_FREE) {
3627 int len2 = scnprintf(remoteport, sizeof(remoteport),
3628 "%u", service->remoteport);
3630 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3631 scnprintf(remoteport + len2,
3632 sizeof(remoteport) - len2,
3633 " (client %x)", service->client_id);
3635 strcpy(remoteport, "n/a");
3638 len += scnprintf(buf + len, sizeof(buf) - len,
3639 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3640 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3642 quota->message_use_count,
3643 quota->message_quota,
3644 quota->slot_use_count,
3647 err = vchiq_dump(dump_context, buf, len + 1);
3651 tx_pending = service->bulk_tx.local_insert -
3652 service->bulk_tx.remote_insert;
3654 rx_pending = service->bulk_rx.local_insert -
3655 service->bulk_rx.remote_insert;
3657 len = scnprintf(buf, sizeof(buf),
3658 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3660 tx_pending ? service->bulk_tx.bulks[
3661 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3663 rx_pending ? service->bulk_rx.bulks[
3664 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3666 if (VCHIQ_ENABLE_STATS) {
3667 err = vchiq_dump(dump_context, buf, len + 1);
3671 len = scnprintf(buf, sizeof(buf),
3672 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3673 service->stats.ctrl_tx_count,
3674 service->stats.ctrl_tx_bytes,
3675 service->stats.ctrl_rx_count,
3676 service->stats.ctrl_rx_bytes);
3677 err = vchiq_dump(dump_context, buf, len + 1);
3681 len = scnprintf(buf, sizeof(buf),
3682 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3683 service->stats.bulk_tx_count,
3684 service->stats.bulk_tx_bytes,
3685 service->stats.bulk_rx_count,
3686 service->stats.bulk_rx_bytes);
3687 err = vchiq_dump(dump_context, buf, len + 1);
3691 len = scnprintf(buf, sizeof(buf),
3692 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3693 service->stats.quota_stalls,
3694 service->stats.slot_stalls,
3695 service->stats.bulk_stalls,
3696 service->stats.bulk_aborted_count,
3697 service->stats.error_count);
3701 err = vchiq_dump(dump_context, buf, len + 1);
3705 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3706 err = vchiq_dump_platform_service_state(dump_context, service);
3711 vchiq_loud_error_header(void)
3713 vchiq_log_error(vchiq_core_log_level,
3714 "============================================================================");
3715 vchiq_log_error(vchiq_core_log_level,
3716 "============================================================================");
3717 vchiq_log_error(vchiq_core_log_level, "=====");
3721 vchiq_loud_error_footer(void)
3723 vchiq_log_error(vchiq_core_log_level, "=====");
3724 vchiq_log_error(vchiq_core_log_level,
3725 "============================================================================");
3726 vchiq_log_error(vchiq_core_log_level,
3727 "============================================================================");
3730 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3732 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3735 return queue_message(state, NULL,
3736 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3740 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3742 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3745 return queue_message(state, NULL,
3746 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3750 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3753 const u8 *mem = void_mem;
3758 while (num_bytes > 0) {
3761 for (offset = 0; offset < 16; offset++) {
3762 if (offset < num_bytes)
3763 s += scnprintf(s, 4, "%02x ", mem[offset]);
3765 s += scnprintf(s, 4, " ");
3768 for (offset = 0; offset < 16; offset++) {
3769 if (offset < num_bytes) {
3770 u8 ch = mem[offset];
3772 if ((ch < ' ') || (ch > '~'))
3779 if (label && (*label != '\0'))
3780 vchiq_log_trace(VCHIQ_LOG_TRACE,
3781 "%s: %08x: %s", label, addr, line_buf);
3783 vchiq_log_trace(VCHIQ_LOG_TRACE,
3784 "%08x: %s", addr, line_buf);