1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define HANDLE_STATE_SHIFT 12
22 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
23 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
24 #define SLOT_INDEX_FROM_DATA(state, data) \
25 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
27 #define SLOT_INDEX_FROM_INFO(state, info) \
28 ((unsigned int)(info - state->slot_info))
29 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
30 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
31 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
32 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
34 #define BULK_INDEX(x) (x & (VCHIQ_NUM_SERVICE_BULKS - 1))
36 #define SRVTRACE_LEVEL(srv) \
37 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
38 #define SRVTRACE_ENABLED(srv, lev) \
39 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
41 #define NO_CLOSE_RECVD 0
44 struct vchiq_open_payload {
51 struct vchiq_openack_payload {
56 QMFLAGS_IS_BLOCKING = BIT(0),
57 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
58 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
61 /* we require this for consistency between endpoints */
62 vchiq_static_assert(sizeof(struct vchiq_header) == 8);
63 vchiq_static_assert(IS_POW2(sizeof(struct vchiq_header)));
64 vchiq_static_assert(IS_POW2(VCHIQ_NUM_CURRENT_BULKS));
65 vchiq_static_assert(IS_POW2(VCHIQ_NUM_SERVICE_BULKS));
66 vchiq_static_assert(IS_POW2(VCHIQ_MAX_SERVICES));
67 vchiq_static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
69 /* Run time control of log level, based on KERN_XXX level. */
70 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
71 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
72 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
74 DEFINE_SPINLOCK(bulk_waiter_spinlock);
75 static DEFINE_SPINLOCK(quota_spinlock);
77 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
78 static unsigned int handle_seq;
80 static const char *const srvstate_names[] = {
93 static const char *const reason_names[] = {
99 "BULK_TRANSMIT_ABORTED",
100 "BULK_RECEIVE_ABORTED"
103 static const char *const conn_state_names[] = {
116 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
118 static const char *msg_type_str(unsigned int msg_type)
121 case VCHIQ_MSG_PADDING: return "PADDING";
122 case VCHIQ_MSG_CONNECT: return "CONNECT";
123 case VCHIQ_MSG_OPEN: return "OPEN";
124 case VCHIQ_MSG_OPENACK: return "OPENACK";
125 case VCHIQ_MSG_CLOSE: return "CLOSE";
126 case VCHIQ_MSG_DATA: return "DATA";
127 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
128 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
129 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
130 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
131 case VCHIQ_MSG_PAUSE: return "PAUSE";
132 case VCHIQ_MSG_RESUME: return "RESUME";
133 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
134 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
135 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
141 vchiq_set_service_state(struct vchiq_service *service, int newstate)
143 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
144 service->state->id, service->localport,
145 srvstate_names[service->srvstate],
146 srvstate_names[newstate]);
147 service->srvstate = newstate;
150 struct vchiq_service *
151 find_service_by_handle(unsigned int handle)
153 struct vchiq_service *service;
156 service = handle_to_service(handle);
157 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
158 service->handle == handle &&
159 kref_get_unless_zero(&service->ref_count)) {
160 service = rcu_pointer_handoff(service);
165 vchiq_log_info(vchiq_core_log_level,
166 "Invalid service handle 0x%x", handle);
170 struct vchiq_service *
171 find_service_by_port(struct vchiq_state *state, int localport)
174 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
175 struct vchiq_service *service;
178 service = rcu_dereference(state->services[localport]);
179 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
180 kref_get_unless_zero(&service->ref_count)) {
181 service = rcu_pointer_handoff(service);
187 vchiq_log_info(vchiq_core_log_level,
188 "Invalid port %d", localport);
192 struct vchiq_service *
193 find_service_for_instance(struct vchiq_instance *instance,
196 struct vchiq_service *service;
199 service = handle_to_service(handle);
200 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
201 service->handle == handle &&
202 service->instance == instance &&
203 kref_get_unless_zero(&service->ref_count)) {
204 service = rcu_pointer_handoff(service);
209 vchiq_log_info(vchiq_core_log_level,
210 "Invalid service handle 0x%x", handle);
214 struct vchiq_service *
215 find_closed_service_for_instance(struct vchiq_instance *instance,
218 struct vchiq_service *service;
221 service = handle_to_service(handle);
223 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
224 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
225 service->handle == handle &&
226 service->instance == instance &&
227 kref_get_unless_zero(&service->ref_count)) {
228 service = rcu_pointer_handoff(service);
233 vchiq_log_info(vchiq_core_log_level,
234 "Invalid service handle 0x%x", handle);
238 struct vchiq_service *
239 __next_service_by_instance(struct vchiq_state *state,
240 struct vchiq_instance *instance,
243 struct vchiq_service *service = NULL;
246 while (idx < state->unused_service) {
247 struct vchiq_service *srv;
249 srv = rcu_dereference(state->services[idx]);
251 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
252 srv->instance == instance) {
262 struct vchiq_service *
263 next_service_by_instance(struct vchiq_state *state,
264 struct vchiq_instance *instance,
267 struct vchiq_service *service;
271 service = __next_service_by_instance(state, instance, pidx);
274 if (kref_get_unless_zero(&service->ref_count)) {
275 service = rcu_pointer_handoff(service);
284 lock_service(struct vchiq_service *service)
287 WARN(1, "%s service is NULL\n", __func__);
290 kref_get(&service->ref_count);
293 static void service_release(struct kref *kref)
295 struct vchiq_service *service =
296 container_of(kref, struct vchiq_service, ref_count);
297 struct vchiq_state *state = service->state;
299 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
300 rcu_assign_pointer(state->services[service->localport], NULL);
301 if (service->userdata_term)
302 service->userdata_term(service->base.userdata);
303 kfree_rcu(service, rcu);
307 unlock_service(struct vchiq_service *service)
310 WARN(1, "%s: service is NULL\n", __func__);
313 kref_put(&service->ref_count, service_release);
317 vchiq_get_client_id(unsigned int handle)
319 struct vchiq_service *service;
323 service = handle_to_service(handle);
324 id = service ? service->client_id : 0;
330 vchiq_get_service_userdata(unsigned int handle)
333 struct vchiq_service *service;
336 service = handle_to_service(handle);
337 userdata = service ? service->base.userdata : NULL;
341 EXPORT_SYMBOL(vchiq_get_service_userdata);
344 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
346 struct vchiq_state *state = service->state;
347 struct vchiq_service_quota *quota;
349 service->closing = 1;
351 /* Synchronise with other threads. */
352 mutex_lock(&state->recycle_mutex);
353 mutex_unlock(&state->recycle_mutex);
354 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
356 * If we're pausing then the slot_mutex is held until resume
357 * by the slot handler. Therefore don't try to acquire this
358 * mutex if we're the slot handler and in the pause sent state.
359 * We don't need to in this case anyway.
361 mutex_lock(&state->slot_mutex);
362 mutex_unlock(&state->slot_mutex);
365 /* Unblock any sending thread. */
366 quota = &state->service_quotas[service->localport];
367 complete("a->quota_event);
371 mark_service_closing(struct vchiq_service *service)
373 mark_service_closing_internal(service, 0);
376 static inline enum vchiq_status
377 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
378 struct vchiq_header *header, void *bulk_userdata)
380 enum vchiq_status status;
382 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
383 service->state->id, service->localport, reason_names[reason],
384 header, bulk_userdata);
385 status = service->base.callback(reason, header, service->handle,
387 if (status == VCHIQ_ERROR) {
388 vchiq_log_warning(vchiq_core_log_level,
389 "%d: ignoring ERROR from callback to service %x",
390 service->state->id, service->handle);
391 status = VCHIQ_SUCCESS;
394 if (reason != VCHIQ_MESSAGE_AVAILABLE)
395 vchiq_release_message(service->handle, header);
401 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
403 enum vchiq_connstate oldstate = state->conn_state;
405 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
406 conn_state_names[oldstate],
407 conn_state_names[newstate]);
408 state->conn_state = newstate;
409 vchiq_platform_conn_state_changed(state, oldstate, newstate);
413 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
417 * Don't clear the 'fired' flag because it may already have been set
420 init_waitqueue_head(wq);
424 * All the event waiting routines in VCHIQ used a custom semaphore
425 * implementation that filtered most signals. This achieved a behaviour similar
426 * to the "killable" family of functions. While cleaning up this code all the
427 * routines where switched to the "interruptible" family of functions, as the
428 * former was deemed unjustified and the use "killable" set all VCHIQ's
429 * threads in D state.
432 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
437 if (wait_event_interruptible(*wq, event->fired)) {
450 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
458 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
460 if (event->fired && event->armed)
461 remote_event_signal_local(wq, event);
465 remote_event_pollall(struct vchiq_state *state)
467 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
468 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
469 remote_event_poll(&state->trigger_event, &state->local->trigger);
470 remote_event_poll(&state->recycle_event, &state->local->recycle);
474 * Round up message sizes so that any space at the end of a slot is always big
475 * enough for a header. This relies on header size being a power of two, which
476 * has been verified earlier by a static assertion.
480 calc_stride(size_t size)
482 /* Allow room for the header */
483 size += sizeof(struct vchiq_header);
486 return (size + sizeof(struct vchiq_header) - 1) &
487 ~(sizeof(struct vchiq_header) - 1);
490 /* Called by the slot handler thread */
491 static struct vchiq_service *
492 get_listening_service(struct vchiq_state *state, int fourcc)
496 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
499 for (i = 0; i < state->unused_service; i++) {
500 struct vchiq_service *service;
502 service = rcu_dereference(state->services[i]);
504 service->public_fourcc == fourcc &&
505 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
506 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
507 service->remoteport == VCHIQ_PORT_FREE)) &&
508 kref_get_unless_zero(&service->ref_count)) {
509 service = rcu_pointer_handoff(service);
518 /* Called by the slot handler thread */
519 static struct vchiq_service *
520 get_connected_service(struct vchiq_state *state, unsigned int port)
525 for (i = 0; i < state->unused_service; i++) {
526 struct vchiq_service *service =
527 rcu_dereference(state->services[i]);
529 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
530 service->remoteport == port &&
531 kref_get_unless_zero(&service->ref_count)) {
532 service = rcu_pointer_handoff(service);
542 request_poll(struct vchiq_state *state, struct vchiq_service *service,
552 value = atomic_read(&service->poll_flags);
553 } while (atomic_cmpxchg(&service->poll_flags, value,
554 value | BIT(poll_type)) != value);
556 index = BITSET_WORD(service->localport);
558 value = atomic_read(&state->poll_services[index]);
559 } while (atomic_cmpxchg(&state->poll_services[index],
560 value, value | BIT(service->localport & 0x1f)) != value);
563 state->poll_needed = 1;
566 /* ... and ensure the slot handler runs. */
567 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
571 * Called from queue_message, by the slot handler and application threads,
572 * with slot_mutex held
574 static struct vchiq_header *
575 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
577 struct vchiq_shared_state *local = state->local;
578 int tx_pos = state->local_tx_pos;
579 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
581 if (space > slot_space) {
582 struct vchiq_header *header;
583 /* Fill the remaining space with padding */
584 WARN_ON(!state->tx_data);
585 header = (struct vchiq_header *)
586 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
587 header->msgid = VCHIQ_MSGID_PADDING;
588 header->size = slot_space - sizeof(struct vchiq_header);
590 tx_pos += slot_space;
593 /* If necessary, get the next slot. */
594 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
597 /* If there is no free slot... */
599 if (!try_wait_for_completion(&state->slot_available_event)) {
600 /* ...wait for one. */
602 VCHIQ_STATS_INC(state, slot_stalls);
604 /* But first, flush through the last slot. */
605 state->local_tx_pos = tx_pos;
606 local->tx_pos = tx_pos;
607 remote_event_signal(&state->remote->trigger);
610 (wait_for_completion_interruptible(
611 &state->slot_available_event)))
612 return NULL; /* No space available */
615 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
616 complete(&state->slot_available_event);
617 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
621 slot_index = local->slot_queue[
622 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
624 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
627 state->local_tx_pos = tx_pos + space;
629 return (struct vchiq_header *)(state->tx_data +
630 (tx_pos & VCHIQ_SLOT_MASK));
633 /* Called by the recycle thread. */
635 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
638 struct vchiq_shared_state *local = state->local;
639 int slot_queue_available;
642 * Find slots which have been freed by the other side, and return them
643 * to the available queue.
645 slot_queue_available = state->slot_queue_available;
648 * Use a memory barrier to ensure that any state that may have been
649 * modified by another thread is not masked by stale prefetched
654 while (slot_queue_available != local->slot_queue_recycle) {
656 int slot_index = local->slot_queue[slot_queue_available &
657 VCHIQ_SLOT_QUEUE_MASK];
658 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
661 slot_queue_available++;
663 * Beware of the address dependency - data is calculated
664 * using an index written by the other side.
668 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
669 state->id, slot_index, data,
670 local->slot_queue_recycle, slot_queue_available);
672 /* Initialise the bitmask for services which have used this slot */
673 memset(service_found, 0, length);
677 while (pos < VCHIQ_SLOT_SIZE) {
678 struct vchiq_header *header =
679 (struct vchiq_header *)(data + pos);
680 int msgid = header->msgid;
682 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
683 int port = VCHIQ_MSG_SRCPORT(msgid);
684 struct vchiq_service_quota *quota =
685 &state->service_quotas[port];
688 spin_lock("a_spinlock);
689 count = quota->message_use_count;
691 quota->message_use_count = count - 1;
692 spin_unlock("a_spinlock);
694 if (count == quota->message_quota) {
696 * Signal the service that it
697 * has dropped below its quota
699 complete("a->quota_event);
700 } else if (count == 0) {
701 vchiq_log_error(vchiq_core_log_level,
702 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
704 quota->message_use_count,
705 header, msgid, header->msgid,
707 WARN(1, "invalid message use count\n");
709 if (!BITSET_IS_SET(service_found, port)) {
710 /* Set the found bit for this service */
711 BITSET_SET(service_found, port);
713 spin_lock("a_spinlock);
714 count = quota->slot_use_count;
716 quota->slot_use_count =
718 spin_unlock("a_spinlock);
722 * Signal the service in case
723 * it has dropped below its quota
725 complete("a->quota_event);
727 vchiq_core_log_level,
728 "%d: pfq:%d %x@%pK - slot_use->%d",
730 header->size, header,
734 vchiq_core_log_level,
735 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
737 msgid, header->msgid,
739 WARN(1, "bad slot use count\n");
746 pos += calc_stride(header->size);
747 if (pos > VCHIQ_SLOT_SIZE) {
748 vchiq_log_error(vchiq_core_log_level,
749 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
750 pos, header, msgid, header->msgid,
752 WARN(1, "invalid slot position\n");
759 spin_lock("a_spinlock);
760 count = state->data_use_count;
762 state->data_use_count = count - 1;
763 spin_unlock("a_spinlock);
764 if (count == state->data_quota)
765 complete(&state->data_quota_event);
769 * Don't allow the slot to be reused until we are no
770 * longer interested in it.
774 state->slot_queue_available = slot_queue_available;
775 complete(&state->slot_available_event);
780 memcpy_copy_callback(
781 void *context, void *dest,
782 size_t offset, size_t maxsize)
784 memcpy(dest + offset, context + offset, maxsize);
790 ssize_t (*copy_callback)(void *context, void *dest,
791 size_t offset, size_t maxsize),
799 ssize_t callback_result;
800 size_t max_bytes = size - pos;
803 copy_callback(context, dest + pos,
806 if (callback_result < 0)
807 return callback_result;
809 if (!callback_result)
812 if (callback_result > max_bytes)
815 pos += callback_result;
821 /* Called by the slot handler and application threads */
822 static enum vchiq_status
823 queue_message(struct vchiq_state *state, struct vchiq_service *service,
825 ssize_t (*copy_callback)(void *context, void *dest,
826 size_t offset, size_t maxsize),
827 void *context, size_t size, int flags)
829 struct vchiq_shared_state *local;
830 struct vchiq_service_quota *quota = NULL;
831 struct vchiq_header *header;
832 int type = VCHIQ_MSG_TYPE(msgid);
836 local = state->local;
838 stride = calc_stride(size);
840 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
842 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
843 mutex_lock_killable(&state->slot_mutex))
846 if (type == VCHIQ_MSG_DATA) {
850 WARN(1, "%s: service is NULL\n", __func__);
851 mutex_unlock(&state->slot_mutex);
855 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
856 QMFLAGS_NO_MUTEX_UNLOCK));
858 if (service->closing) {
859 /* The service has been closed */
860 mutex_unlock(&state->slot_mutex);
864 quota = &state->service_quotas[service->localport];
866 spin_lock("a_spinlock);
869 * Ensure this service doesn't use more than its quota of
872 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
873 state->local_tx_pos + stride - 1);
876 * Ensure data messages don't use more than their quota of
879 while ((tx_end_index != state->previous_data_index) &&
880 (state->data_use_count == state->data_quota)) {
881 VCHIQ_STATS_INC(state, data_stalls);
882 spin_unlock("a_spinlock);
883 mutex_unlock(&state->slot_mutex);
885 if (wait_for_completion_interruptible(
886 &state->data_quota_event))
889 mutex_lock(&state->slot_mutex);
890 spin_lock("a_spinlock);
891 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
892 state->local_tx_pos + stride - 1);
893 if ((tx_end_index == state->previous_data_index) ||
894 (state->data_use_count < state->data_quota)) {
895 /* Pass the signal on to other waiters */
896 complete(&state->data_quota_event);
901 while ((quota->message_use_count == quota->message_quota) ||
902 ((tx_end_index != quota->previous_tx_index) &&
903 (quota->slot_use_count == quota->slot_quota))) {
904 spin_unlock("a_spinlock);
905 vchiq_log_trace(vchiq_core_log_level,
906 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
907 state->id, service->localport,
908 msg_type_str(type), size,
909 quota->message_use_count,
910 quota->slot_use_count);
911 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
912 mutex_unlock(&state->slot_mutex);
913 if (wait_for_completion_interruptible(
914 "a->quota_event))
916 if (service->closing)
918 if (mutex_lock_killable(&state->slot_mutex))
920 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
921 /* The service has been closed */
922 mutex_unlock(&state->slot_mutex);
925 spin_lock("a_spinlock);
926 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
927 state->local_tx_pos + stride - 1);
930 spin_unlock("a_spinlock);
933 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
937 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
939 * In the event of a failure, return the mutex to the
942 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
943 mutex_unlock(&state->slot_mutex);
947 if (type == VCHIQ_MSG_DATA) {
948 ssize_t callback_result;
952 vchiq_log_info(vchiq_core_log_level,
953 "%d: qm %s@%pK,%zx (%d->%d)",
954 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
955 header, size, VCHIQ_MSG_SRCPORT(msgid),
956 VCHIQ_MSG_DSTPORT(msgid));
958 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
959 QMFLAGS_NO_MUTEX_UNLOCK));
962 copy_message_data(copy_callback, context,
965 if (callback_result < 0) {
966 mutex_unlock(&state->slot_mutex);
967 VCHIQ_SERVICE_STATS_INC(service,
972 if (SRVTRACE_ENABLED(service,
974 vchiq_log_dump_mem("Sent", 0,
977 (size_t)callback_result));
979 spin_lock("a_spinlock);
980 quota->message_use_count++;
983 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
986 * If this transmission can't fit in the last slot used by any
987 * service, the data_use_count must be increased.
989 if (tx_end_index != state->previous_data_index) {
990 state->previous_data_index = tx_end_index;
991 state->data_use_count++;
995 * If this isn't the same slot last used by this service,
996 * the service's slot_use_count must be increased.
998 if (tx_end_index != quota->previous_tx_index) {
999 quota->previous_tx_index = tx_end_index;
1000 slot_use_count = ++quota->slot_use_count;
1005 spin_unlock("a_spinlock);
1008 vchiq_log_trace(vchiq_core_log_level,
1009 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1010 state->id, service->localport,
1011 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1012 slot_use_count, header);
1014 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1015 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1017 vchiq_log_info(vchiq_core_log_level,
1018 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1019 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1020 header, size, VCHIQ_MSG_SRCPORT(msgid),
1021 VCHIQ_MSG_DSTPORT(msgid));
1024 * It is assumed for now that this code path
1025 * only happens from calls inside this file.
1027 * External callers are through the vchiq_queue_message
1028 * path which always sets the type to be VCHIQ_MSG_DATA
1030 * At first glance this appears to be correct but
1031 * more review is needed.
1033 copy_message_data(copy_callback, context,
1034 header->data, size);
1036 VCHIQ_STATS_INC(state, ctrl_tx_count);
1039 header->msgid = msgid;
1040 header->size = size;
1045 svc_fourcc = service
1046 ? service->base.fourcc
1047 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1049 vchiq_log_info(SRVTRACE_LEVEL(service),
1050 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1051 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1052 VCHIQ_MSG_TYPE(msgid),
1053 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1054 VCHIQ_MSG_SRCPORT(msgid),
1055 VCHIQ_MSG_DSTPORT(msgid),
1059 /* Make sure the new header is visible to the peer. */
1062 /* Make the new tx_pos visible to the peer. */
1063 local->tx_pos = state->local_tx_pos;
1066 if (service && (type == VCHIQ_MSG_CLOSE))
1067 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1069 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1070 mutex_unlock(&state->slot_mutex);
1072 remote_event_signal(&state->remote->trigger);
1074 return VCHIQ_SUCCESS;
1077 /* Called by the slot handler and application threads */
1078 static enum vchiq_status
1079 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1081 ssize_t (*copy_callback)(void *context, void *dest,
1082 size_t offset, size_t maxsize),
1083 void *context, int size, int is_blocking)
1085 struct vchiq_shared_state *local;
1086 struct vchiq_header *header;
1087 ssize_t callback_result;
1089 local = state->local;
1091 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1092 mutex_lock_killable(&state->sync_mutex))
1095 remote_event_wait(&state->sync_release_event, &local->sync_release);
1099 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1103 int oldmsgid = header->msgid;
1105 if (oldmsgid != VCHIQ_MSGID_PADDING)
1106 vchiq_log_error(vchiq_core_log_level,
1107 "%d: qms - msgid %x, not PADDING",
1108 state->id, oldmsgid);
1111 vchiq_log_info(vchiq_sync_log_level,
1112 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1113 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1114 header, size, VCHIQ_MSG_SRCPORT(msgid),
1115 VCHIQ_MSG_DSTPORT(msgid));
1118 copy_message_data(copy_callback, context,
1119 header->data, size);
1121 if (callback_result < 0) {
1122 mutex_unlock(&state->slot_mutex);
1123 VCHIQ_SERVICE_STATS_INC(service,
1129 if (SRVTRACE_ENABLED(service,
1131 vchiq_log_dump_mem("Sent", 0,
1134 (size_t)callback_result));
1136 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1137 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1139 VCHIQ_STATS_INC(state, ctrl_tx_count);
1142 header->size = size;
1143 header->msgid = msgid;
1145 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1148 svc_fourcc = service
1149 ? service->base.fourcc
1150 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1152 vchiq_log_trace(vchiq_sync_log_level,
1153 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1154 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1155 VCHIQ_MSG_TYPE(msgid),
1156 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1157 VCHIQ_MSG_SRCPORT(msgid),
1158 VCHIQ_MSG_DSTPORT(msgid),
1162 remote_event_signal(&state->remote->sync_trigger);
1164 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1165 mutex_unlock(&state->sync_mutex);
1167 return VCHIQ_SUCCESS;
1171 claim_slot(struct vchiq_slot_info *slot)
1177 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1178 struct vchiq_header *header, struct vchiq_service *service)
1180 mutex_lock(&state->recycle_mutex);
1183 int msgid = header->msgid;
1185 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1186 (service && service->closing)) {
1187 mutex_unlock(&state->recycle_mutex);
1191 /* Rewrite the message header to prevent a double release */
1192 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1195 slot_info->release_count++;
1197 if (slot_info->release_count == slot_info->use_count) {
1198 int slot_queue_recycle;
1199 /* Add to the freed queue */
1202 * A read barrier is necessary here to prevent speculative
1203 * fetches of remote->slot_queue_recycle from overtaking the
1208 slot_queue_recycle = state->remote->slot_queue_recycle;
1209 state->remote->slot_queue[slot_queue_recycle &
1210 VCHIQ_SLOT_QUEUE_MASK] =
1211 SLOT_INDEX_FROM_INFO(state, slot_info);
1212 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1213 vchiq_log_info(vchiq_core_log_level,
1214 "%d: %s %d - recycle->%x", state->id, __func__,
1215 SLOT_INDEX_FROM_INFO(state, slot_info),
1216 state->remote->slot_queue_recycle);
1219 * A write barrier is necessary, but remote_event_signal
1222 remote_event_signal(&state->remote->recycle);
1225 mutex_unlock(&state->recycle_mutex);
1228 static inline enum vchiq_reason
1229 get_bulk_reason(struct vchiq_bulk *bulk)
1231 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1232 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1233 return VCHIQ_BULK_TRANSMIT_ABORTED;
1235 return VCHIQ_BULK_TRANSMIT_DONE;
1238 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1239 return VCHIQ_BULK_RECEIVE_ABORTED;
1241 return VCHIQ_BULK_RECEIVE_DONE;
1244 /* Called by the slot handler - don't hold the bulk mutex */
1245 static enum vchiq_status
1246 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1249 enum vchiq_status status = VCHIQ_SUCCESS;
1251 vchiq_log_trace(vchiq_core_log_level,
1252 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1253 service->state->id, service->localport,
1254 (queue == &service->bulk_tx) ? 't' : 'r',
1255 queue->process, queue->remote_notify, queue->remove);
1257 queue->remote_notify = queue->process;
1259 while (queue->remove != queue->remote_notify) {
1260 struct vchiq_bulk *bulk =
1261 &queue->bulks[BULK_INDEX(queue->remove)];
1264 * Only generate callbacks for non-dummy bulk
1265 * requests, and non-terminated services
1267 if (bulk->data && service->instance) {
1268 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1269 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1270 VCHIQ_SERVICE_STATS_INC(service,
1272 VCHIQ_SERVICE_STATS_ADD(service,
1276 VCHIQ_SERVICE_STATS_INC(service,
1278 VCHIQ_SERVICE_STATS_ADD(service,
1283 VCHIQ_SERVICE_STATS_INC(service,
1284 bulk_aborted_count);
1286 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1287 struct bulk_waiter *waiter;
1289 spin_lock(&bulk_waiter_spinlock);
1290 waiter = bulk->userdata;
1292 waiter->actual = bulk->actual;
1293 complete(&waiter->event);
1295 spin_unlock(&bulk_waiter_spinlock);
1296 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1297 enum vchiq_reason reason =
1298 get_bulk_reason(bulk);
1299 status = make_service_callback(service,
1300 reason, NULL, bulk->userdata);
1301 if (status == VCHIQ_RETRY)
1307 complete(&service->bulk_remove_event);
1310 status = VCHIQ_SUCCESS;
1312 if (status == VCHIQ_RETRY)
1313 request_poll(service->state, service,
1314 (queue == &service->bulk_tx) ?
1315 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1321 poll_services_of_group(struct vchiq_state *state, int group)
1323 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1326 for (i = 0; flags; i++) {
1327 struct vchiq_service *service;
1330 if ((flags & BIT(i)) == 0)
1333 service = find_service_by_port(state, (group << 5) + i);
1339 service_flags = atomic_xchg(&service->poll_flags, 0);
1340 if ((service_flags & BIT(VCHIQ_POLL_REMOVE)) == 0)
1343 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1344 state->id, service->localport,
1345 service->remoteport);
1348 * Make it look like a client, because
1349 * it must be removed and not left in
1350 * the LISTENING state.
1352 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1354 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1356 request_poll(state, service, VCHIQ_POLL_REMOVE);
1357 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1358 vchiq_log_info(vchiq_core_log_level,
1359 "%d: ps - terminate %d<->%d",
1360 state->id, service->localport,
1361 service->remoteport);
1362 if (vchiq_close_service_internal(
1363 service, NO_CLOSE_RECVD) !=
1365 request_poll(state, service,
1366 VCHIQ_POLL_TERMINATE);
1368 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1369 notify_bulks(service, &service->bulk_tx,
1371 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1372 notify_bulks(service, &service->bulk_rx,
1374 unlock_service(service);
1378 /* Called by the slot handler thread */
1380 poll_services(struct vchiq_state *state)
1384 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1385 poll_services_of_group(state, group);
1388 /* Called with the bulk_mutex held */
1390 abort_outstanding_bulks(struct vchiq_service *service,
1391 struct vchiq_bulk_queue *queue)
1393 int is_tx = (queue == &service->bulk_tx);
1395 vchiq_log_trace(vchiq_core_log_level,
1396 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1397 service->state->id, service->localport, is_tx ? 't' : 'r',
1398 queue->local_insert, queue->remote_insert, queue->process);
1400 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1401 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1403 while ((queue->process != queue->local_insert) ||
1404 (queue->process != queue->remote_insert)) {
1405 struct vchiq_bulk *bulk =
1406 &queue->bulks[BULK_INDEX(queue->process)];
1408 if (queue->process == queue->remote_insert) {
1409 /* fabricate a matching dummy bulk */
1410 bulk->remote_data = NULL;
1411 bulk->remote_size = 0;
1412 queue->remote_insert++;
1415 if (queue->process != queue->local_insert) {
1416 vchiq_complete_bulk(bulk);
1418 vchiq_log_info(SRVTRACE_LEVEL(service),
1419 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1420 is_tx ? "Send Bulk to" : "Recv Bulk from",
1421 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1422 service->remoteport,
1426 /* fabricate a matching dummy bulk */
1429 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1430 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1432 queue->local_insert++;
1440 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1442 struct vchiq_service *service = NULL;
1444 unsigned int localport, remoteport;
1446 msgid = header->msgid;
1447 size = header->size;
1448 localport = VCHIQ_MSG_DSTPORT(msgid);
1449 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1450 if (size >= sizeof(struct vchiq_open_payload)) {
1451 const struct vchiq_open_payload *payload =
1452 (struct vchiq_open_payload *)header->data;
1453 unsigned int fourcc;
1455 fourcc = payload->fourcc;
1456 vchiq_log_info(vchiq_core_log_level,
1457 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1458 state->id, header, localport,
1459 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1461 service = get_listening_service(state, fourcc);
1464 /* A matching service exists */
1465 short version = payload->version;
1466 short version_min = payload->version_min;
1468 if ((service->version < version_min) ||
1469 (version < service->version_min)) {
1470 /* Version mismatch */
1471 vchiq_loud_error_header();
1472 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1473 "version mismatch - local (%d, min %d)"
1474 " vs. remote (%d, min %d)",
1475 state->id, service->localport,
1476 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1477 service->version, service->version_min,
1478 version, version_min);
1479 vchiq_loud_error_footer();
1480 unlock_service(service);
1484 service->peer_version = version;
1486 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1487 struct vchiq_openack_payload ack_payload = {
1491 if (state->version_common <
1492 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1495 /* Acknowledge the OPEN */
1496 if (service->sync) {
1497 if (queue_message_sync(
1504 memcpy_copy_callback,
1506 sizeof(ack_payload),
1508 goto bail_not_ready;
1510 if (queue_message(state,
1516 memcpy_copy_callback,
1518 sizeof(ack_payload),
1520 goto bail_not_ready;
1523 /* The service is now open */
1524 vchiq_set_service_state(service,
1525 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1526 : VCHIQ_SRVSTATE_OPEN);
1529 /* Success - the message has been dealt with */
1530 unlock_service(service);
1536 /* No available service, or an invalid request - send a CLOSE */
1537 if (queue_message(state, NULL,
1538 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1539 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1540 goto bail_not_ready;
1546 unlock_service(service);
1552 * parse_message() - parses a single message from the rx slot
1553 * @state: vchiq state struct
1554 * @header: message header
1556 * Context: Process context
1559 * * >= 0 - size of the parsed message payload (without header)
1560 * * -EINVAL - fatal error occurred, bail out is required
1563 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1565 struct vchiq_service *service = NULL;
1566 unsigned int localport, remoteport;
1567 int msgid, size, type, ret = -EINVAL;
1569 DEBUG_INITIALISE(state->local)
1571 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1572 msgid = header->msgid;
1573 DEBUG_VALUE(PARSE_MSGID, msgid);
1574 size = header->size;
1575 type = VCHIQ_MSG_TYPE(msgid);
1576 localport = VCHIQ_MSG_DSTPORT(msgid);
1577 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1579 if (type != VCHIQ_MSG_DATA)
1580 VCHIQ_STATS_INC(state, ctrl_rx_count);
1583 case VCHIQ_MSG_OPENACK:
1584 case VCHIQ_MSG_CLOSE:
1585 case VCHIQ_MSG_DATA:
1586 case VCHIQ_MSG_BULK_RX:
1587 case VCHIQ_MSG_BULK_TX:
1588 case VCHIQ_MSG_BULK_RX_DONE:
1589 case VCHIQ_MSG_BULK_TX_DONE:
1590 service = find_service_by_port(state, localport);
1592 ((service->remoteport != remoteport) &&
1593 (service->remoteport != VCHIQ_PORT_FREE))) &&
1595 (type == VCHIQ_MSG_CLOSE)) {
1597 * This could be a CLOSE from a client which
1598 * hadn't yet received the OPENACK - look for
1599 * the connected service
1602 unlock_service(service);
1603 service = get_connected_service(state,
1606 vchiq_log_warning(vchiq_core_log_level,
1607 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1608 state->id, msg_type_str(type),
1609 header, remoteport, localport,
1610 service->localport);
1614 vchiq_log_error(vchiq_core_log_level,
1615 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1616 state->id, msg_type_str(type),
1617 header, remoteport, localport,
1626 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1629 svc_fourcc = service
1630 ? service->base.fourcc
1631 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1632 vchiq_log_info(SRVTRACE_LEVEL(service),
1633 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1634 msg_type_str(type), type,
1635 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1636 remoteport, localport, size);
1638 vchiq_log_dump_mem("Rcvd", 0, header->data,
1642 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1643 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1644 vchiq_log_error(vchiq_core_log_level,
1645 "header %pK (msgid %x) - size %x too big for slot",
1646 header, (unsigned int)msgid,
1647 (unsigned int)size);
1648 WARN(1, "oversized for slot\n");
1652 case VCHIQ_MSG_OPEN:
1653 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1654 if (!parse_open(state, header))
1655 goto bail_not_ready;
1657 case VCHIQ_MSG_OPENACK:
1658 if (size >= sizeof(struct vchiq_openack_payload)) {
1659 const struct vchiq_openack_payload *payload =
1660 (struct vchiq_openack_payload *)
1662 service->peer_version = payload->version;
1664 vchiq_log_info(vchiq_core_log_level,
1665 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1666 state->id, header, size, remoteport, localport,
1667 service->peer_version);
1668 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1669 service->remoteport = remoteport;
1670 vchiq_set_service_state(service,
1671 VCHIQ_SRVSTATE_OPEN);
1672 complete(&service->remove_event);
1674 vchiq_log_error(vchiq_core_log_level,
1675 "OPENACK received in state %s",
1676 srvstate_names[service->srvstate]);
1679 case VCHIQ_MSG_CLOSE:
1680 WARN_ON(size != 0); /* There should be no data */
1682 vchiq_log_info(vchiq_core_log_level,
1683 "%d: prs CLOSE@%pK (%d->%d)",
1684 state->id, header, remoteport, localport);
1686 mark_service_closing_internal(service, 1);
1688 if (vchiq_close_service_internal(service,
1689 CLOSE_RECVD) == VCHIQ_RETRY)
1690 goto bail_not_ready;
1692 vchiq_log_info(vchiq_core_log_level,
1693 "Close Service %c%c%c%c s:%u d:%d",
1694 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1696 service->remoteport);
1698 case VCHIQ_MSG_DATA:
1699 vchiq_log_info(vchiq_core_log_level,
1700 "%d: prs DATA@%pK,%x (%d->%d)",
1701 state->id, header, size, remoteport, localport);
1703 if ((service->remoteport == remoteport) &&
1704 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1705 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1706 claim_slot(state->rx_info);
1707 DEBUG_TRACE(PARSE_LINE);
1708 if (make_service_callback(service,
1709 VCHIQ_MESSAGE_AVAILABLE, header,
1710 NULL) == VCHIQ_RETRY) {
1711 DEBUG_TRACE(PARSE_LINE);
1712 goto bail_not_ready;
1714 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1715 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1718 VCHIQ_STATS_INC(state, error_count);
1721 case VCHIQ_MSG_CONNECT:
1722 vchiq_log_info(vchiq_core_log_level,
1723 "%d: prs CONNECT@%pK", state->id, header);
1724 state->version_common = ((struct vchiq_slot_zero *)
1725 state->slot_data)->version;
1726 complete(&state->connect);
1728 case VCHIQ_MSG_BULK_RX:
1729 case VCHIQ_MSG_BULK_TX:
1731 * We should never receive a bulk request from the
1732 * other side since we're not setup to perform as the
1737 case VCHIQ_MSG_BULK_RX_DONE:
1738 case VCHIQ_MSG_BULK_TX_DONE:
1739 if ((service->remoteport == remoteport) &&
1740 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1741 struct vchiq_bulk_queue *queue;
1742 struct vchiq_bulk *bulk;
1744 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1745 &service->bulk_rx : &service->bulk_tx;
1747 DEBUG_TRACE(PARSE_LINE);
1748 if (mutex_lock_killable(&service->bulk_mutex)) {
1749 DEBUG_TRACE(PARSE_LINE);
1750 goto bail_not_ready;
1752 if ((int)(queue->remote_insert -
1753 queue->local_insert) >= 0) {
1754 vchiq_log_error(vchiq_core_log_level,
1755 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1756 state->id, msg_type_str(type),
1757 header, remoteport, localport,
1758 queue->remote_insert,
1759 queue->local_insert);
1760 mutex_unlock(&service->bulk_mutex);
1763 if (queue->process != queue->remote_insert) {
1764 pr_err("%s: p %x != ri %x\n",
1767 queue->remote_insert);
1768 mutex_unlock(&service->bulk_mutex);
1769 goto bail_not_ready;
1772 bulk = &queue->bulks[
1773 BULK_INDEX(queue->remote_insert)];
1774 bulk->actual = *(int *)header->data;
1775 queue->remote_insert++;
1777 vchiq_log_info(vchiq_core_log_level,
1778 "%d: prs %s@%pK (%d->%d) %x@%pad",
1779 state->id, msg_type_str(type),
1780 header, remoteport, localport,
1781 bulk->actual, &bulk->data);
1783 vchiq_log_trace(vchiq_core_log_level,
1784 "%d: prs:%d %cx li=%x ri=%x p=%x",
1785 state->id, localport,
1786 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1788 queue->local_insert,
1789 queue->remote_insert, queue->process);
1791 DEBUG_TRACE(PARSE_LINE);
1792 WARN_ON(queue->process == queue->local_insert);
1793 vchiq_complete_bulk(bulk);
1795 mutex_unlock(&service->bulk_mutex);
1796 DEBUG_TRACE(PARSE_LINE);
1797 notify_bulks(service, queue, 1/*retry_poll*/);
1798 DEBUG_TRACE(PARSE_LINE);
1801 case VCHIQ_MSG_PADDING:
1802 vchiq_log_trace(vchiq_core_log_level,
1803 "%d: prs PADDING@%pK,%x",
1804 state->id, header, size);
1806 case VCHIQ_MSG_PAUSE:
1807 /* If initiated, signal the application thread */
1808 vchiq_log_trace(vchiq_core_log_level,
1809 "%d: prs PAUSE@%pK,%x",
1810 state->id, header, size);
1811 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1812 vchiq_log_error(vchiq_core_log_level,
1813 "%d: PAUSE received in state PAUSED",
1817 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1818 /* Send a PAUSE in response */
1819 if (queue_message(state, NULL,
1820 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1821 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1823 goto bail_not_ready;
1825 /* At this point slot_mutex is held */
1826 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1828 case VCHIQ_MSG_RESUME:
1829 vchiq_log_trace(vchiq_core_log_level,
1830 "%d: prs RESUME@%pK,%x",
1831 state->id, header, size);
1832 /* Release the slot mutex */
1833 mutex_unlock(&state->slot_mutex);
1834 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1837 case VCHIQ_MSG_REMOTE_USE:
1838 vchiq_on_remote_use(state);
1840 case VCHIQ_MSG_REMOTE_RELEASE:
1841 vchiq_on_remote_release(state);
1843 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1847 vchiq_log_error(vchiq_core_log_level,
1848 "%d: prs invalid msgid %x@%pK,%x",
1849 state->id, msgid, header, size);
1850 WARN(1, "invalid message\n");
1859 unlock_service(service);
1864 /* Called by the slot handler thread */
1866 parse_rx_slots(struct vchiq_state *state)
1868 struct vchiq_shared_state *remote = state->remote;
1871 DEBUG_INITIALISE(state->local)
1873 tx_pos = remote->tx_pos;
1875 while (state->rx_pos != tx_pos) {
1876 struct vchiq_header *header;
1879 DEBUG_TRACE(PARSE_LINE);
1880 if (!state->rx_data) {
1883 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1884 rx_index = remote->slot_queue[
1885 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1886 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1888 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1891 * Initialise use_count to one, and increment
1892 * release_count at the end of the slot to avoid
1893 * releasing the slot prematurely.
1895 state->rx_info->use_count = 1;
1896 state->rx_info->release_count = 0;
1899 header = (struct vchiq_header *)(state->rx_data +
1900 (state->rx_pos & VCHIQ_SLOT_MASK));
1901 size = parse_message(state, header);
1905 state->rx_pos += calc_stride(size);
1907 DEBUG_TRACE(PARSE_LINE);
1909 * Perform some housekeeping when the end of the slot is
1912 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1913 /* Remove the extra reference count. */
1914 release_slot(state, state->rx_info, NULL, NULL);
1915 state->rx_data = NULL;
1920 /* Called by the slot handler thread */
1922 slot_handler_func(void *v)
1924 struct vchiq_state *state = v;
1925 struct vchiq_shared_state *local = state->local;
1927 DEBUG_INITIALISE(local)
1930 DEBUG_COUNT(SLOT_HANDLER_COUNT);
1931 DEBUG_TRACE(SLOT_HANDLER_LINE);
1932 remote_event_wait(&state->trigger_event, &local->trigger);
1936 DEBUG_TRACE(SLOT_HANDLER_LINE);
1937 if (state->poll_needed) {
1939 state->poll_needed = 0;
1942 * Handle service polling and other rare conditions here
1943 * out of the mainline code
1945 switch (state->conn_state) {
1946 case VCHIQ_CONNSTATE_CONNECTED:
1947 /* Poll the services as requested */
1948 poll_services(state);
1951 case VCHIQ_CONNSTATE_PAUSING:
1952 if (queue_message(state, NULL,
1953 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1955 QMFLAGS_NO_MUTEX_UNLOCK)
1957 vchiq_set_conn_state(state,
1958 VCHIQ_CONNSTATE_PAUSE_SENT);
1961 state->poll_needed = 1;
1965 case VCHIQ_CONNSTATE_RESUMING:
1966 if (queue_message(state, NULL,
1967 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
1968 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
1970 vchiq_set_conn_state(state,
1971 VCHIQ_CONNSTATE_CONNECTED);
1974 * This should really be impossible,
1975 * since the PAUSE should have flushed
1976 * through outstanding messages.
1978 vchiq_log_error(vchiq_core_log_level,
1979 "Failed to send RESUME message");
1988 DEBUG_TRACE(SLOT_HANDLER_LINE);
1989 parse_rx_slots(state);
1994 /* Called by the recycle thread */
1996 recycle_func(void *v)
1998 struct vchiq_state *state = v;
1999 struct vchiq_shared_state *local = state->local;
2003 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2005 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2011 remote_event_wait(&state->recycle_event, &local->recycle);
2013 process_free_queue(state, found, length);
2018 /* Called by the sync thread */
2022 struct vchiq_state *state = v;
2023 struct vchiq_shared_state *local = state->local;
2024 struct vchiq_header *header =
2025 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2026 state->remote->slot_sync);
2029 struct vchiq_service *service;
2032 unsigned int localport, remoteport;
2034 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2038 msgid = header->msgid;
2039 size = header->size;
2040 type = VCHIQ_MSG_TYPE(msgid);
2041 localport = VCHIQ_MSG_DSTPORT(msgid);
2042 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2044 service = find_service_by_port(state, localport);
2047 vchiq_log_error(vchiq_sync_log_level,
2048 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2049 state->id, msg_type_str(type),
2050 header, remoteport, localport, localport);
2051 release_message_sync(state, header);
2055 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2058 svc_fourcc = service
2059 ? service->base.fourcc
2060 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2061 vchiq_log_trace(vchiq_sync_log_level,
2062 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2064 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2065 remoteport, localport, size);
2067 vchiq_log_dump_mem("Rcvd", 0, header->data,
2072 case VCHIQ_MSG_OPENACK:
2073 if (size >= sizeof(struct vchiq_openack_payload)) {
2074 const struct vchiq_openack_payload *payload =
2075 (struct vchiq_openack_payload *)
2077 service->peer_version = payload->version;
2079 vchiq_log_info(vchiq_sync_log_level,
2080 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2081 state->id, header, size, remoteport, localport,
2082 service->peer_version);
2083 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2084 service->remoteport = remoteport;
2085 vchiq_set_service_state(service,
2086 VCHIQ_SRVSTATE_OPENSYNC);
2088 complete(&service->remove_event);
2090 release_message_sync(state, header);
2093 case VCHIQ_MSG_DATA:
2094 vchiq_log_trace(vchiq_sync_log_level,
2095 "%d: sf DATA@%pK,%x (%d->%d)",
2096 state->id, header, size, remoteport, localport);
2098 if ((service->remoteport == remoteport) &&
2099 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2100 if (make_service_callback(service,
2101 VCHIQ_MESSAGE_AVAILABLE, header,
2102 NULL) == VCHIQ_RETRY)
2103 vchiq_log_error(vchiq_sync_log_level,
2104 "synchronous callback to service %d returns VCHIQ_RETRY",
2110 vchiq_log_error(vchiq_sync_log_level,
2111 "%d: sf unexpected msgid %x@%pK,%x",
2112 state->id, msgid, header, size);
2113 release_message_sync(state, header);
2117 unlock_service(service);
2124 init_bulk_queue(struct vchiq_bulk_queue *queue)
2126 queue->local_insert = 0;
2127 queue->remote_insert = 0;
2129 queue->remote_notify = 0;
2134 get_conn_state_name(enum vchiq_connstate conn_state)
2136 return conn_state_names[conn_state];
2139 struct vchiq_slot_zero *
2140 vchiq_init_slots(void *mem_base, int mem_size)
2143 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2144 struct vchiq_slot_zero *slot_zero =
2145 (struct vchiq_slot_zero *)(mem_base + mem_align);
2146 int num_slots = (mem_size - mem_align)/VCHIQ_SLOT_SIZE;
2147 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2149 /* Ensure there is enough memory to run an absolutely minimum system */
2150 num_slots -= first_data_slot;
2152 if (num_slots < 4) {
2153 vchiq_log_error(vchiq_core_log_level,
2154 "%s - insufficient memory %x bytes",
2155 __func__, mem_size);
2159 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2161 slot_zero->magic = VCHIQ_MAGIC;
2162 slot_zero->version = VCHIQ_VERSION;
2163 slot_zero->version_min = VCHIQ_VERSION_MIN;
2164 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2165 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2166 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2167 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2169 slot_zero->master.slot_sync = first_data_slot;
2170 slot_zero->master.slot_first = first_data_slot + 1;
2171 slot_zero->master.slot_last = first_data_slot + (num_slots/2) - 1;
2172 slot_zero->slave.slot_sync = first_data_slot + (num_slots/2);
2173 slot_zero->slave.slot_first = first_data_slot + (num_slots/2) + 1;
2174 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2180 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2182 struct vchiq_shared_state *local;
2183 struct vchiq_shared_state *remote;
2184 char threadname[16];
2187 if (vchiq_states[0]) {
2188 pr_err("%s: VCHIQ state already initialized\n", __func__);
2192 local = &slot_zero->slave;
2193 remote = &slot_zero->master;
2195 if (local->initialised) {
2196 vchiq_loud_error_header();
2197 if (remote->initialised)
2198 vchiq_loud_error("local state has already been initialised");
2200 vchiq_loud_error("master/slave mismatch two slaves");
2201 vchiq_loud_error_footer();
2205 memset(state, 0, sizeof(struct vchiq_state));
2208 * initialize shared state pointers
2211 state->local = local;
2212 state->remote = remote;
2213 state->slot_data = (struct vchiq_slot *)slot_zero;
2216 * initialize events and mutexes
2219 init_completion(&state->connect);
2220 mutex_init(&state->mutex);
2221 mutex_init(&state->slot_mutex);
2222 mutex_init(&state->recycle_mutex);
2223 mutex_init(&state->sync_mutex);
2224 mutex_init(&state->bulk_transfer_mutex);
2226 init_completion(&state->slot_available_event);
2227 init_completion(&state->slot_remove_event);
2228 init_completion(&state->data_quota_event);
2230 state->slot_queue_available = 0;
2232 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2233 struct vchiq_service_quota *quota = &state->service_quotas[i];
2234 init_completion("a->quota_event);
2237 for (i = local->slot_first; i <= local->slot_last; i++) {
2238 local->slot_queue[state->slot_queue_available] = i;
2239 state->slot_queue_available++;
2240 complete(&state->slot_available_event);
2243 state->default_slot_quota = state->slot_queue_available/2;
2244 state->default_message_quota =
2245 min((unsigned short)(state->default_slot_quota * 256),
2246 (unsigned short)~0);
2248 state->previous_data_index = -1;
2249 state->data_use_count = 0;
2250 state->data_quota = state->slot_queue_available - 1;
2252 remote_event_create(&state->trigger_event, &local->trigger);
2254 remote_event_create(&state->recycle_event, &local->recycle);
2255 local->slot_queue_recycle = state->slot_queue_available;
2256 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2257 remote_event_create(&state->sync_release_event, &local->sync_release);
2259 /* At start-of-day, the slot is empty and available */
2260 ((struct vchiq_header *)
2261 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2262 VCHIQ_MSGID_PADDING;
2263 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2265 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2267 ret = vchiq_platform_init_state(state);
2272 * bring up slot handler thread
2274 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2275 state->slot_handler_thread = kthread_create(&slot_handler_func,
2279 if (IS_ERR(state->slot_handler_thread)) {
2280 vchiq_loud_error_header();
2281 vchiq_loud_error("couldn't create thread %s", threadname);
2282 vchiq_loud_error_footer();
2283 return PTR_ERR(state->slot_handler_thread);
2285 set_user_nice(state->slot_handler_thread, -19);
2287 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2288 state->recycle_thread = kthread_create(&recycle_func,
2291 if (IS_ERR(state->recycle_thread)) {
2292 vchiq_loud_error_header();
2293 vchiq_loud_error("couldn't create thread %s", threadname);
2294 vchiq_loud_error_footer();
2295 ret = PTR_ERR(state->recycle_thread);
2296 goto fail_free_handler_thread;
2298 set_user_nice(state->recycle_thread, -19);
2300 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2301 state->sync_thread = kthread_create(&sync_func,
2304 if (IS_ERR(state->sync_thread)) {
2305 vchiq_loud_error_header();
2306 vchiq_loud_error("couldn't create thread %s", threadname);
2307 vchiq_loud_error_footer();
2308 ret = PTR_ERR(state->sync_thread);
2309 goto fail_free_recycle_thread;
2311 set_user_nice(state->sync_thread, -20);
2313 wake_up_process(state->slot_handler_thread);
2314 wake_up_process(state->recycle_thread);
2315 wake_up_process(state->sync_thread);
2317 vchiq_states[0] = state;
2319 /* Indicate readiness to the other side */
2320 local->initialised = 1;
2324 fail_free_recycle_thread:
2325 kthread_stop(state->recycle_thread);
2326 fail_free_handler_thread:
2327 kthread_stop(state->slot_handler_thread);
2332 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2334 struct vchiq_service *service = find_service_by_handle(handle);
2337 while (service->msg_queue_write == service->msg_queue_read +
2339 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2340 flush_signals(current);
2343 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2344 service->msg_queue_write++;
2345 service->msg_queue[pos] = header;
2347 complete(&service->msg_queue_push);
2349 EXPORT_SYMBOL(vchiq_msg_queue_push);
2351 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2353 struct vchiq_service *service = find_service_by_handle(handle);
2354 struct vchiq_header *header;
2357 if (service->msg_queue_write == service->msg_queue_read)
2360 while (service->msg_queue_write == service->msg_queue_read) {
2361 if (wait_for_completion_interruptible(&service->msg_queue_push))
2362 flush_signals(current);
2365 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2366 service->msg_queue_read++;
2367 header = service->msg_queue[pos];
2369 complete(&service->msg_queue_pop);
2373 EXPORT_SYMBOL(vchiq_msg_hold);
2375 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2377 if (!params->callback || !params->fourcc) {
2378 vchiq_loud_error("Can't add service, invalid params\n");
2385 /* Called from application thread when a client or server service is created. */
2386 struct vchiq_service *
2387 vchiq_add_service_internal(struct vchiq_state *state,
2388 const struct vchiq_service_params_kernel *params,
2389 int srvstate, struct vchiq_instance *instance,
2390 vchiq_userdata_term userdata_term)
2392 struct vchiq_service *service;
2393 struct vchiq_service __rcu **pservice = NULL;
2394 struct vchiq_service_quota *quota;
2398 ret = vchiq_validate_params(params);
2402 service = kmalloc(sizeof(*service), GFP_KERNEL);
2406 service->base.fourcc = params->fourcc;
2407 service->base.callback = params->callback;
2408 service->base.userdata = params->userdata;
2409 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2410 kref_init(&service->ref_count);
2411 service->srvstate = VCHIQ_SRVSTATE_FREE;
2412 service->userdata_term = userdata_term;
2413 service->localport = VCHIQ_PORT_FREE;
2414 service->remoteport = VCHIQ_PORT_FREE;
2416 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2417 VCHIQ_FOURCC_INVALID : params->fourcc;
2418 service->client_id = 0;
2419 service->auto_close = 1;
2421 service->closing = 0;
2423 atomic_set(&service->poll_flags, 0);
2424 service->version = params->version;
2425 service->version_min = params->version_min;
2426 service->state = state;
2427 service->instance = instance;
2428 service->service_use_count = 0;
2429 service->msg_queue_read = 0;
2430 service->msg_queue_write = 0;
2431 init_bulk_queue(&service->bulk_tx);
2432 init_bulk_queue(&service->bulk_rx);
2433 init_completion(&service->remove_event);
2434 init_completion(&service->bulk_remove_event);
2435 init_completion(&service->msg_queue_pop);
2436 init_completion(&service->msg_queue_push);
2437 mutex_init(&service->bulk_mutex);
2438 memset(&service->stats, 0, sizeof(service->stats));
2439 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2442 * Although it is perfectly possible to use a spinlock
2443 * to protect the creation of services, it is overkill as it
2444 * disables interrupts while the array is searched.
2445 * The only danger is of another thread trying to create a
2446 * service - service deletion is safe.
2447 * Therefore it is preferable to use state->mutex which,
2448 * although slower to claim, doesn't block interrupts while
2452 mutex_lock(&state->mutex);
2454 /* Prepare to use a previously unused service */
2455 if (state->unused_service < VCHIQ_MAX_SERVICES)
2456 pservice = &state->services[state->unused_service];
2458 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2459 for (i = 0; i < state->unused_service; i++) {
2460 if (!rcu_access_pointer(state->services[i])) {
2461 pservice = &state->services[i];
2467 for (i = (state->unused_service - 1); i >= 0; i--) {
2468 struct vchiq_service *srv;
2470 srv = rcu_dereference(state->services[i]);
2472 pservice = &state->services[i];
2473 } else if ((srv->public_fourcc == params->fourcc) &&
2474 ((srv->instance != instance) ||
2475 (srv->base.callback != params->callback))) {
2477 * There is another server using this
2478 * fourcc which doesn't match.
2488 service->localport = (pservice - state->services);
2490 handle_seq = VCHIQ_MAX_STATES *
2492 service->handle = handle_seq |
2493 (state->id * VCHIQ_MAX_SERVICES) |
2495 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2496 rcu_assign_pointer(*pservice, service);
2497 if (pservice == &state->services[state->unused_service])
2498 state->unused_service++;
2501 mutex_unlock(&state->mutex);
2508 quota = &state->service_quotas[service->localport];
2509 quota->slot_quota = state->default_slot_quota;
2510 quota->message_quota = state->default_message_quota;
2511 if (quota->slot_use_count == 0)
2512 quota->previous_tx_index =
2513 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2516 /* Bring this service online */
2517 vchiq_set_service_state(service, srvstate);
2519 vchiq_log_info(vchiq_core_msg_log_level,
2520 "%s Service %c%c%c%c SrcPort:%d",
2521 (srvstate == VCHIQ_SRVSTATE_OPENING)
2523 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2524 service->localport);
2526 /* Don't unlock the service - leave it with a ref_count of 1. */
2532 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2534 struct vchiq_open_payload payload = {
2535 service->base.fourcc,
2538 service->version_min
2540 enum vchiq_status status = VCHIQ_SUCCESS;
2542 service->client_id = client_id;
2543 vchiq_use_service_internal(service);
2544 status = queue_message(service->state,
2546 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2549 memcpy_copy_callback,
2552 QMFLAGS_IS_BLOCKING);
2554 if (status != VCHIQ_SUCCESS)
2557 /* Wait for the ACK/NAK */
2558 if (wait_for_completion_interruptible(&service->remove_event)) {
2559 status = VCHIQ_RETRY;
2560 vchiq_release_service_internal(service);
2561 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2562 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2563 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2564 vchiq_log_error(vchiq_core_log_level,
2565 "%d: osi - srvstate = %s (ref %u)",
2567 srvstate_names[service->srvstate],
2568 kref_read(&service->ref_count));
2569 status = VCHIQ_ERROR;
2570 VCHIQ_SERVICE_STATS_INC(service, error_count);
2571 vchiq_release_service_internal(service);
2578 release_service_messages(struct vchiq_service *service)
2580 struct vchiq_state *state = service->state;
2581 int slot_last = state->remote->slot_last;
2584 /* Release any claimed messages aimed at this service */
2586 if (service->sync) {
2587 struct vchiq_header *header =
2588 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2589 state->remote->slot_sync);
2590 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2591 release_message_sync(state, header);
2596 for (i = state->remote->slot_first; i <= slot_last; i++) {
2597 struct vchiq_slot_info *slot_info =
2598 SLOT_INFO_FROM_INDEX(state, i);
2599 unsigned int pos, end;
2602 if (slot_info->release_count == slot_info->use_count)
2605 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2606 end = VCHIQ_SLOT_SIZE;
2607 if (data == state->rx_data)
2609 * This buffer is still being read from - stop
2610 * at the current read position
2612 end = state->rx_pos & VCHIQ_SLOT_MASK;
2617 struct vchiq_header *header =
2618 (struct vchiq_header *)(data + pos);
2619 int msgid = header->msgid;
2620 int port = VCHIQ_MSG_DSTPORT(msgid);
2622 if ((port == service->localport) &&
2623 (msgid & VCHIQ_MSGID_CLAIMED)) {
2624 vchiq_log_info(vchiq_core_log_level,
2625 " fsi - hdr %pK", header);
2626 release_slot(state, slot_info, header,
2629 pos += calc_stride(header->size);
2630 if (pos > VCHIQ_SLOT_SIZE) {
2631 vchiq_log_error(vchiq_core_log_level,
2632 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2634 header->msgid, header->size);
2635 WARN(1, "invalid slot position\n");
2642 do_abort_bulks(struct vchiq_service *service)
2644 enum vchiq_status status;
2646 /* Abort any outstanding bulk transfers */
2647 if (mutex_lock_killable(&service->bulk_mutex))
2649 abort_outstanding_bulks(service, &service->bulk_tx);
2650 abort_outstanding_bulks(service, &service->bulk_rx);
2651 mutex_unlock(&service->bulk_mutex);
2653 status = notify_bulks(service, &service->bulk_tx, 0/*!retry_poll*/);
2654 if (status != VCHIQ_SUCCESS)
2657 status = notify_bulks(service, &service->bulk_rx, 0/*!retry_poll*/);
2658 return (status == VCHIQ_SUCCESS);
2661 static enum vchiq_status
2662 close_service_complete(struct vchiq_service *service, int failstate)
2664 enum vchiq_status status;
2665 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2668 switch (service->srvstate) {
2669 case VCHIQ_SRVSTATE_OPEN:
2670 case VCHIQ_SRVSTATE_CLOSESENT:
2671 case VCHIQ_SRVSTATE_CLOSERECVD:
2673 if (service->auto_close) {
2674 service->client_id = 0;
2675 service->remoteport = VCHIQ_PORT_FREE;
2676 newstate = VCHIQ_SRVSTATE_LISTENING;
2678 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2681 newstate = VCHIQ_SRVSTATE_CLOSED;
2683 vchiq_set_service_state(service, newstate);
2685 case VCHIQ_SRVSTATE_LISTENING:
2688 vchiq_log_error(vchiq_core_log_level,
2689 "%s(%x) called in state %s", __func__,
2690 service->handle, srvstate_names[service->srvstate]);
2691 WARN(1, "%s in unexpected state\n", __func__);
2695 status = make_service_callback(service,
2696 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2698 if (status != VCHIQ_RETRY) {
2699 int uc = service->service_use_count;
2701 /* Complete the close process */
2702 for (i = 0; i < uc; i++)
2704 * cater for cases where close is forced and the
2705 * client may not close all it's handles
2707 vchiq_release_service_internal(service);
2709 service->client_id = 0;
2710 service->remoteport = VCHIQ_PORT_FREE;
2712 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2713 vchiq_free_service_internal(service);
2714 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2716 service->closing = 0;
2718 complete(&service->remove_event);
2721 vchiq_set_service_state(service, failstate);
2727 /* Called by the slot handler */
2729 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2731 struct vchiq_state *state = service->state;
2732 enum vchiq_status status = VCHIQ_SUCCESS;
2733 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2735 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2736 service->state->id, service->localport, close_recvd,
2737 srvstate_names[service->srvstate]);
2739 switch (service->srvstate) {
2740 case VCHIQ_SRVSTATE_CLOSED:
2741 case VCHIQ_SRVSTATE_HIDDEN:
2742 case VCHIQ_SRVSTATE_LISTENING:
2743 case VCHIQ_SRVSTATE_CLOSEWAIT:
2745 vchiq_log_error(vchiq_core_log_level,
2746 "%s(1) called in state %s",
2747 __func__, srvstate_names[service->srvstate]);
2748 } else if (is_server) {
2749 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2750 status = VCHIQ_ERROR;
2752 service->client_id = 0;
2753 service->remoteport = VCHIQ_PORT_FREE;
2754 if (service->srvstate ==
2755 VCHIQ_SRVSTATE_CLOSEWAIT)
2756 vchiq_set_service_state(service,
2757 VCHIQ_SRVSTATE_LISTENING);
2759 complete(&service->remove_event);
2761 vchiq_free_service_internal(service);
2764 case VCHIQ_SRVSTATE_OPENING:
2766 /* The open was rejected - tell the user */
2767 vchiq_set_service_state(service,
2768 VCHIQ_SRVSTATE_CLOSEWAIT);
2769 complete(&service->remove_event);
2771 /* Shutdown mid-open - let the other side know */
2772 status = queue_message(state, service,
2776 VCHIQ_MSG_DSTPORT(service->remoteport)),
2781 case VCHIQ_SRVSTATE_OPENSYNC:
2782 mutex_lock(&state->sync_mutex);
2784 case VCHIQ_SRVSTATE_OPEN:
2786 if (!do_abort_bulks(service))
2787 status = VCHIQ_RETRY;
2790 release_service_messages(service);
2792 if (status == VCHIQ_SUCCESS)
2793 status = queue_message(state, service,
2797 VCHIQ_MSG_DSTPORT(service->remoteport)),
2798 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2800 if (status != VCHIQ_SUCCESS) {
2801 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2802 mutex_unlock(&state->sync_mutex);
2807 /* Change the state while the mutex is still held */
2808 vchiq_set_service_state(service,
2809 VCHIQ_SRVSTATE_CLOSESENT);
2810 mutex_unlock(&state->slot_mutex);
2812 mutex_unlock(&state->sync_mutex);
2816 /* Change the state while the mutex is still held */
2817 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2818 mutex_unlock(&state->slot_mutex);
2820 mutex_unlock(&state->sync_mutex);
2822 status = close_service_complete(service,
2823 VCHIQ_SRVSTATE_CLOSERECVD);
2826 case VCHIQ_SRVSTATE_CLOSESENT:
2828 /* This happens when a process is killed mid-close */
2831 if (!do_abort_bulks(service)) {
2832 status = VCHIQ_RETRY;
2836 if (status == VCHIQ_SUCCESS)
2837 status = close_service_complete(service,
2838 VCHIQ_SRVSTATE_CLOSERECVD);
2841 case VCHIQ_SRVSTATE_CLOSERECVD:
2842 if (!close_recvd && is_server)
2843 /* Force into LISTENING mode */
2844 vchiq_set_service_state(service,
2845 VCHIQ_SRVSTATE_LISTENING);
2846 status = close_service_complete(service,
2847 VCHIQ_SRVSTATE_CLOSERECVD);
2851 vchiq_log_error(vchiq_core_log_level,
2852 "%s(%d) called in state %s", __func__,
2853 close_recvd, srvstate_names[service->srvstate]);
2860 /* Called from the application process upon process death */
2862 vchiq_terminate_service_internal(struct vchiq_service *service)
2864 struct vchiq_state *state = service->state;
2866 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2867 state->id, service->localport, service->remoteport);
2869 mark_service_closing(service);
2871 /* Mark the service for removal by the slot handler */
2872 request_poll(state, service, VCHIQ_POLL_REMOVE);
2875 /* Called from the slot handler */
2877 vchiq_free_service_internal(struct vchiq_service *service)
2879 struct vchiq_state *state = service->state;
2881 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2882 state->id, service->localport);
2884 switch (service->srvstate) {
2885 case VCHIQ_SRVSTATE_OPENING:
2886 case VCHIQ_SRVSTATE_CLOSED:
2887 case VCHIQ_SRVSTATE_HIDDEN:
2888 case VCHIQ_SRVSTATE_LISTENING:
2889 case VCHIQ_SRVSTATE_CLOSEWAIT:
2892 vchiq_log_error(vchiq_core_log_level,
2893 "%d: fsi - (%d) in state %s",
2894 state->id, service->localport,
2895 srvstate_names[service->srvstate]);
2899 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2901 complete(&service->remove_event);
2903 /* Release the initial lock */
2904 unlock_service(service);
2908 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2910 struct vchiq_service *service;
2913 /* Find all services registered to this client and enable them. */
2915 while ((service = next_service_by_instance(state, instance,
2917 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2918 vchiq_set_service_state(service,
2919 VCHIQ_SRVSTATE_LISTENING);
2920 unlock_service(service);
2923 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2924 if (queue_message(state, NULL,
2925 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2926 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
2929 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
2932 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
2933 if (wait_for_completion_interruptible(&state->connect))
2936 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2937 complete(&state->connect);
2940 return VCHIQ_SUCCESS;
2944 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2946 struct vchiq_service *service;
2949 /* Find all services registered to this client and remove them. */
2951 while ((service = next_service_by_instance(state, instance,
2953 (void)vchiq_remove_service(service->handle);
2954 unlock_service(service);
2959 vchiq_close_service(unsigned int handle)
2961 /* Unregister the service */
2962 struct vchiq_service *service = find_service_by_handle(handle);
2963 enum vchiq_status status = VCHIQ_SUCCESS;
2968 vchiq_log_info(vchiq_core_log_level,
2969 "%d: close_service:%d",
2970 service->state->id, service->localport);
2972 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2973 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2974 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
2975 unlock_service(service);
2979 mark_service_closing(service);
2981 if (current == service->state->slot_handler_thread) {
2982 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
2983 WARN_ON(status == VCHIQ_RETRY);
2985 /* Mark the service for termination by the slot handler */
2986 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
2990 if (wait_for_completion_interruptible(&service->remove_event)) {
2991 status = VCHIQ_RETRY;
2995 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
2996 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
2997 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3000 vchiq_log_warning(vchiq_core_log_level,
3001 "%d: close_service:%d - waiting in state %s",
3002 service->state->id, service->localport,
3003 srvstate_names[service->srvstate]);
3006 if ((status == VCHIQ_SUCCESS) &&
3007 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3008 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3009 status = VCHIQ_ERROR;
3011 unlock_service(service);
3015 EXPORT_SYMBOL(vchiq_close_service);
3018 vchiq_remove_service(unsigned int handle)
3020 /* Unregister the service */
3021 struct vchiq_service *service = find_service_by_handle(handle);
3022 enum vchiq_status status = VCHIQ_SUCCESS;
3027 vchiq_log_info(vchiq_core_log_level,
3028 "%d: remove_service:%d",
3029 service->state->id, service->localport);
3031 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3032 unlock_service(service);
3036 mark_service_closing(service);
3038 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3039 (current == service->state->slot_handler_thread)) {
3041 * Make it look like a client, because it must be removed and
3042 * not left in the LISTENING state.
3044 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3046 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3047 WARN_ON(status == VCHIQ_RETRY);
3049 /* Mark the service for removal by the slot handler */
3050 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3053 if (wait_for_completion_interruptible(&service->remove_event)) {
3054 status = VCHIQ_RETRY;
3058 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3059 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3062 vchiq_log_warning(vchiq_core_log_level,
3063 "%d: remove_service:%d - waiting in state %s",
3064 service->state->id, service->localport,
3065 srvstate_names[service->srvstate]);
3068 if ((status == VCHIQ_SUCCESS) &&
3069 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3070 status = VCHIQ_ERROR;
3072 unlock_service(service);
3078 * This function may be called by kernel threads or user threads.
3079 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3080 * received and the call should be retried after being returned to user
3082 * When called in blocking mode, the userdata field points to a bulk_waiter
3085 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3086 void *offset, void __user *uoffset,
3087 int size, void *userdata,
3088 enum vchiq_bulk_mode mode,
3089 enum vchiq_bulk_dir dir)
3091 struct vchiq_service *service = find_service_by_handle(handle);
3092 struct vchiq_bulk_queue *queue;
3093 struct vchiq_bulk *bulk;
3094 struct vchiq_state *state;
3095 struct bulk_waiter *bulk_waiter = NULL;
3096 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3097 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3098 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3099 enum vchiq_status status = VCHIQ_ERROR;
3105 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3108 if (!offset && !uoffset)
3111 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3115 case VCHIQ_BULK_MODE_NOCALLBACK:
3116 case VCHIQ_BULK_MODE_CALLBACK:
3118 case VCHIQ_BULK_MODE_BLOCKING:
3119 bulk_waiter = userdata;
3120 init_completion(&bulk_waiter->event);
3121 bulk_waiter->actual = 0;
3122 bulk_waiter->bulk = NULL;
3124 case VCHIQ_BULK_MODE_WAITING:
3125 bulk_waiter = userdata;
3126 bulk = bulk_waiter->bulk;
3132 state = service->state;
3134 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3135 &service->bulk_tx : &service->bulk_rx;
3137 if (mutex_lock_killable(&service->bulk_mutex)) {
3138 status = VCHIQ_RETRY;
3142 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3143 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3145 mutex_unlock(&service->bulk_mutex);
3146 if (wait_for_completion_interruptible(
3147 &service->bulk_remove_event)) {
3148 status = VCHIQ_RETRY;
3151 if (mutex_lock_killable(&service->bulk_mutex)) {
3152 status = VCHIQ_RETRY;
3155 } while (queue->local_insert == queue->remove +
3156 VCHIQ_NUM_SERVICE_BULKS);
3159 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3163 bulk->userdata = userdata;
3165 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3167 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3168 goto unlock_error_exit;
3172 vchiq_log_info(vchiq_core_log_level,
3173 "%d: bt (%d->%d) %cx %x@%pad %pK",
3174 state->id, service->localport, service->remoteport, dir_char,
3175 size, &bulk->data, userdata);
3178 * The slot mutex must be held when the service is being closed, so
3179 * claim it here to ensure that isn't happening
3181 if (mutex_lock_killable(&state->slot_mutex)) {
3182 status = VCHIQ_RETRY;
3183 goto cancel_bulk_error_exit;
3186 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3187 goto unlock_both_error_exit;
3189 payload[0] = lower_32_bits(bulk->data);
3190 payload[1] = bulk->size;
3191 status = queue_message(state,
3193 VCHIQ_MAKE_MSG(dir_msgtype,
3195 service->remoteport),
3196 memcpy_copy_callback,
3199 QMFLAGS_IS_BLOCKING |
3200 QMFLAGS_NO_MUTEX_LOCK |
3201 QMFLAGS_NO_MUTEX_UNLOCK);
3202 if (status != VCHIQ_SUCCESS)
3203 goto unlock_both_error_exit;
3205 queue->local_insert++;
3207 mutex_unlock(&state->slot_mutex);
3208 mutex_unlock(&service->bulk_mutex);
3210 vchiq_log_trace(vchiq_core_log_level,
3211 "%d: bt:%d %cx li=%x ri=%x p=%x",
3213 service->localport, dir_char,
3214 queue->local_insert, queue->remote_insert, queue->process);
3217 unlock_service(service);
3219 status = VCHIQ_SUCCESS;
3222 bulk_waiter->bulk = bulk;
3223 if (wait_for_completion_interruptible(&bulk_waiter->event))
3224 status = VCHIQ_RETRY;
3225 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3226 status = VCHIQ_ERROR;
3231 unlock_both_error_exit:
3232 mutex_unlock(&state->slot_mutex);
3233 cancel_bulk_error_exit:
3234 vchiq_complete_bulk(bulk);
3236 mutex_unlock(&service->bulk_mutex);
3240 unlock_service(service);
3245 vchiq_queue_message(unsigned int handle,
3246 ssize_t (*copy_callback)(void *context, void *dest,
3247 size_t offset, size_t maxsize),
3251 struct vchiq_service *service = find_service_by_handle(handle);
3252 enum vchiq_status status = VCHIQ_ERROR;
3257 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3261 VCHIQ_SERVICE_STATS_INC(service, error_count);
3266 if (size > VCHIQ_MAX_MSG_SIZE) {
3267 VCHIQ_SERVICE_STATS_INC(service, error_count);
3271 switch (service->srvstate) {
3272 case VCHIQ_SRVSTATE_OPEN:
3273 status = queue_message(service->state, service,
3274 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3276 service->remoteport),
3277 copy_callback, context, size, 1);
3279 case VCHIQ_SRVSTATE_OPENSYNC:
3280 status = queue_message_sync(service->state, service,
3281 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3283 service->remoteport),
3284 copy_callback, context, size, 1);
3287 status = VCHIQ_ERROR;
3293 unlock_service(service);
3298 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3300 enum vchiq_status status;
3303 status = vchiq_queue_message(handle, memcpy_copy_callback,
3307 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3308 * implement a retry mechanism since this function is supposed
3309 * to block until queued
3311 if (status != VCHIQ_RETRY)
3319 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3322 vchiq_release_message(unsigned int handle,
3323 struct vchiq_header *header)
3325 struct vchiq_service *service = find_service_by_handle(handle);
3326 struct vchiq_shared_state *remote;
3327 struct vchiq_state *state;
3333 state = service->state;
3334 remote = state->remote;
3336 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3338 if ((slot_index >= remote->slot_first) &&
3339 (slot_index <= remote->slot_last)) {
3340 int msgid = header->msgid;
3342 if (msgid & VCHIQ_MSGID_CLAIMED) {
3343 struct vchiq_slot_info *slot_info =
3344 SLOT_INFO_FROM_INDEX(state, slot_index);
3346 release_slot(state, slot_info, header, service);
3348 } else if (slot_index == remote->slot_sync) {
3349 release_message_sync(state, header);
3352 unlock_service(service);
3354 EXPORT_SYMBOL(vchiq_release_message);
3357 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3359 header->msgid = VCHIQ_MSGID_PADDING;
3360 remote_event_signal(&state->remote->sync_release);
3364 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3366 enum vchiq_status status = VCHIQ_ERROR;
3367 struct vchiq_service *service = find_service_by_handle(handle);
3372 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3378 *peer_version = service->peer_version;
3379 status = VCHIQ_SUCCESS;
3383 unlock_service(service);
3386 EXPORT_SYMBOL(vchiq_get_peer_version);
3388 void vchiq_get_config(struct vchiq_config *config)
3390 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3391 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3392 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3393 config->max_services = VCHIQ_MAX_SERVICES;
3394 config->version = VCHIQ_VERSION;
3395 config->version_min = VCHIQ_VERSION_MIN;
3399 vchiq_set_service_option(unsigned int handle,
3400 enum vchiq_service_option option, int value)
3402 struct vchiq_service *service = find_service_by_handle(handle);
3403 struct vchiq_service_quota *quota;
3410 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3411 service->auto_close = value;
3415 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3416 quota = &service->state->service_quotas[service->localport];
3418 value = service->state->default_slot_quota;
3419 if ((value >= quota->slot_use_count) &&
3420 (value < (unsigned short)~0)) {
3421 quota->slot_quota = value;
3422 if ((value >= quota->slot_use_count) &&
3423 (quota->message_quota >= quota->message_use_count))
3425 * Signal the service that it may have
3426 * dropped below its quota
3428 complete("a->quota_event);
3433 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3434 quota = &service->state->service_quotas[service->localport];
3436 value = service->state->default_message_quota;
3437 if ((value >= quota->message_use_count) &&
3438 (value < (unsigned short)~0)) {
3439 quota->message_quota = value;
3440 if ((value >= quota->message_use_count) &&
3441 (quota->slot_quota >= quota->slot_use_count))
3443 * Signal the service that it may have
3444 * dropped below its quota
3446 complete("a->quota_event);
3451 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3452 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3453 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3454 service->sync = value;
3459 case VCHIQ_SERVICE_OPTION_TRACE:
3460 service->trace = value;
3467 unlock_service(service);
3473 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3474 struct vchiq_shared_state *shared, const char *label)
3476 static const char *const debug_names[] = {
3478 "SLOT_HANDLER_COUNT",
3479 "SLOT_HANDLER_LINE",
3483 "AWAIT_COMPLETION_LINE",
3484 "DEQUEUE_MESSAGE_LINE",
3485 "SERVICE_CALLBACK_LINE",
3486 "MSG_QUEUE_FULL_COUNT",
3487 "COMPLETION_QUEUE_FULL_COUNT"
3494 len = scnprintf(buf, sizeof(buf),
3495 " %s: slots %d-%d tx_pos=%x recycle=%x",
3496 label, shared->slot_first, shared->slot_last,
3497 shared->tx_pos, shared->slot_queue_recycle);
3498 err = vchiq_dump(dump_context, buf, len + 1);
3502 len = scnprintf(buf, sizeof(buf),
3504 err = vchiq_dump(dump_context, buf, len + 1);
3508 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3509 struct vchiq_slot_info slot_info =
3510 *SLOT_INFO_FROM_INDEX(state, i);
3511 if (slot_info.use_count != slot_info.release_count) {
3512 len = scnprintf(buf, sizeof(buf),
3513 " %d: %d/%d", i, slot_info.use_count,
3514 slot_info.release_count);
3515 err = vchiq_dump(dump_context, buf, len + 1);
3521 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3522 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3523 debug_names[i], shared->debug[i], shared->debug[i]);
3524 err = vchiq_dump(dump_context, buf, len + 1);
3531 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3538 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3539 conn_state_names[state->conn_state]);
3540 err = vchiq_dump(dump_context, buf, len + 1);
3544 len = scnprintf(buf, sizeof(buf),
3545 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3546 state->local->tx_pos,
3547 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3549 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3550 err = vchiq_dump(dump_context, buf, len + 1);
3554 len = scnprintf(buf, sizeof(buf),
3555 " Version: %d (min %d)",
3556 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3557 err = vchiq_dump(dump_context, buf, len + 1);
3561 if (VCHIQ_ENABLE_STATS) {
3562 len = scnprintf(buf, sizeof(buf),
3563 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3564 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3565 state->stats.error_count);
3566 err = vchiq_dump(dump_context, buf, len + 1);
3571 len = scnprintf(buf, sizeof(buf),
3572 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3573 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3574 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3575 state->data_quota - state->data_use_count,
3576 state->local->slot_queue_recycle - state->slot_queue_available,
3577 state->stats.slot_stalls, state->stats.data_stalls);
3578 err = vchiq_dump(dump_context, buf, len + 1);
3582 err = vchiq_dump_platform_state(dump_context);
3586 err = vchiq_dump_shared_state(dump_context,
3592 err = vchiq_dump_shared_state(dump_context,
3599 err = vchiq_dump_platform_instances(dump_context);
3603 for (i = 0; i < state->unused_service; i++) {
3604 struct vchiq_service *service = find_service_by_port(state, i);
3607 err = vchiq_dump_service_state(dump_context, service);
3608 unlock_service(service);
3616 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3621 unsigned int ref_count;
3623 /*Don't include the lock just taken*/
3624 ref_count = kref_read(&service->ref_count) - 1;
3625 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3626 service->localport, srvstate_names[service->srvstate],
3629 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3630 char remoteport[30];
3631 struct vchiq_service_quota *quota =
3632 &service->state->service_quotas[service->localport];
3633 int fourcc = service->base.fourcc;
3634 int tx_pending, rx_pending;
3636 if (service->remoteport != VCHIQ_PORT_FREE) {
3637 int len2 = scnprintf(remoteport, sizeof(remoteport),
3638 "%u", service->remoteport);
3640 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3641 scnprintf(remoteport + len2,
3642 sizeof(remoteport) - len2,
3643 " (client %x)", service->client_id);
3645 strcpy(remoteport, "n/a");
3648 len += scnprintf(buf + len, sizeof(buf) - len,
3649 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3650 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3652 quota->message_use_count,
3653 quota->message_quota,
3654 quota->slot_use_count,
3657 err = vchiq_dump(dump_context, buf, len + 1);
3661 tx_pending = service->bulk_tx.local_insert -
3662 service->bulk_tx.remote_insert;
3664 rx_pending = service->bulk_rx.local_insert -
3665 service->bulk_rx.remote_insert;
3667 len = scnprintf(buf, sizeof(buf),
3668 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3670 tx_pending ? service->bulk_tx.bulks[
3671 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3673 rx_pending ? service->bulk_rx.bulks[
3674 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3676 if (VCHIQ_ENABLE_STATS) {
3677 err = vchiq_dump(dump_context, buf, len + 1);
3681 len = scnprintf(buf, sizeof(buf),
3682 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3683 service->stats.ctrl_tx_count,
3684 service->stats.ctrl_tx_bytes,
3685 service->stats.ctrl_rx_count,
3686 service->stats.ctrl_rx_bytes);
3687 err = vchiq_dump(dump_context, buf, len + 1);
3691 len = scnprintf(buf, sizeof(buf),
3692 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3693 service->stats.bulk_tx_count,
3694 service->stats.bulk_tx_bytes,
3695 service->stats.bulk_rx_count,
3696 service->stats.bulk_rx_bytes);
3697 err = vchiq_dump(dump_context, buf, len + 1);
3701 len = scnprintf(buf, sizeof(buf),
3702 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3703 service->stats.quota_stalls,
3704 service->stats.slot_stalls,
3705 service->stats.bulk_stalls,
3706 service->stats.bulk_aborted_count,
3707 service->stats.error_count);
3711 err = vchiq_dump(dump_context, buf, len + 1);
3715 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3716 err = vchiq_dump_platform_service_state(dump_context, service);
3721 vchiq_loud_error_header(void)
3723 vchiq_log_error(vchiq_core_log_level,
3724 "============================================================================");
3725 vchiq_log_error(vchiq_core_log_level,
3726 "============================================================================");
3727 vchiq_log_error(vchiq_core_log_level, "=====");
3731 vchiq_loud_error_footer(void)
3733 vchiq_log_error(vchiq_core_log_level, "=====");
3734 vchiq_log_error(vchiq_core_log_level,
3735 "============================================================================");
3736 vchiq_log_error(vchiq_core_log_level,
3737 "============================================================================");
3740 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3742 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3745 return queue_message(state, NULL,
3746 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3750 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3752 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3755 return queue_message(state, NULL,
3756 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3760 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3763 const u8 *mem = void_mem;
3768 while (num_bytes > 0) {
3771 for (offset = 0; offset < 16; offset++) {
3772 if (offset < num_bytes)
3773 s += scnprintf(s, 4, "%02x ", mem[offset]);
3775 s += scnprintf(s, 4, " ");
3778 for (offset = 0; offset < 16; offset++) {
3779 if (offset < num_bytes) {
3780 u8 ch = mem[offset];
3782 if ((ch < ' ') || (ch > '~'))
3789 if (label && (*label != '\0'))
3790 vchiq_log_trace(VCHIQ_LOG_TRACE,
3791 "%s: %08x: %s", label, addr, line_buf);
3793 vchiq_log_trace(VCHIQ_LOG_TRACE,
3794 "%08x: %s", addr, line_buf);