1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define VCHIQ_MSG_PADDING 0 /* - */
21 #define VCHIQ_MSG_CONNECT 1 /* - */
22 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
23 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
24 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
25 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
26 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
27 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
28 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
29 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
30 #define VCHIQ_MSG_PAUSE 10 /* - */
31 #define VCHIQ_MSG_RESUME 11 /* - */
32 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
33 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
38 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
39 #define VCHIQ_PORT_FREE 0x1000
40 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
41 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
42 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
43 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
44 #define VCHIQ_MSG_SRCPORT(msgid) \
45 (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
46 #define VCHIQ_MSG_DSTPORT(msgid) \
47 ((unsigned short)(msgid) & 0xfff)
49 /* Ensure the fields are wide enough */
50 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
52 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
53 static_assert((unsigned int)VCHIQ_PORT_MAX <
54 (unsigned int)VCHIQ_PORT_FREE);
56 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
57 #define VCHIQ_MSGID_CLAIMED 0x40000000
59 #define VCHIQ_FOURCC_INVALID 0x00000000
60 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
62 #define VCHIQ_BULK_ACTUAL_ABORTED -1
64 #if VCHIQ_ENABLE_STATS
65 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
66 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
67 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
68 (service->stats. stat += addend)
70 #define VCHIQ_STATS_INC(state, stat) ((void)0)
71 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
72 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
75 #define HANDLE_STATE_SHIFT 12
77 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
78 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
79 #define SLOT_INDEX_FROM_DATA(state, data) \
80 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
82 #define SLOT_INDEX_FROM_INFO(state, info) \
83 ((unsigned int)(info - state->slot_info))
84 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
85 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
86 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
87 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
89 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
91 #define SRVTRACE_LEVEL(srv) \
92 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
93 #define SRVTRACE_ENABLED(srv, lev) \
94 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
96 #define NO_CLOSE_RECVD 0
99 #define NO_RETRY_POLL 0
102 struct vchiq_open_payload {
109 struct vchiq_openack_payload {
114 QMFLAGS_IS_BLOCKING = BIT(0),
115 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
116 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
120 VCHIQ_POLL_TERMINATE,
127 /* we require this for consistency between endpoints */
128 static_assert(sizeof(struct vchiq_header) == 8);
129 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
131 static inline void check_sizes(void)
133 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
134 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
135 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
136 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
137 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
138 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
139 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
142 /* Run time control of log level, based on KERN_XXX level. */
143 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
144 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
145 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
147 DEFINE_SPINLOCK(bulk_waiter_spinlock);
148 static DEFINE_SPINLOCK(quota_spinlock);
150 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
151 static unsigned int handle_seq;
153 static const char *const srvstate_names[] = {
166 static const char *const reason_names[] = {
170 "BULK_TRANSMIT_DONE",
172 "BULK_TRANSMIT_ABORTED",
173 "BULK_RECEIVE_ABORTED"
176 static const char *const conn_state_names[] = {
189 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
191 static const char *msg_type_str(unsigned int msg_type)
194 case VCHIQ_MSG_PADDING: return "PADDING";
195 case VCHIQ_MSG_CONNECT: return "CONNECT";
196 case VCHIQ_MSG_OPEN: return "OPEN";
197 case VCHIQ_MSG_OPENACK: return "OPENACK";
198 case VCHIQ_MSG_CLOSE: return "CLOSE";
199 case VCHIQ_MSG_DATA: return "DATA";
200 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
201 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
202 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
203 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
204 case VCHIQ_MSG_PAUSE: return "PAUSE";
205 case VCHIQ_MSG_RESUME: return "RESUME";
206 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
207 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
208 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
214 vchiq_set_service_state(struct vchiq_service *service, int newstate)
216 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
217 service->state->id, service->localport,
218 srvstate_names[service->srvstate],
219 srvstate_names[newstate]);
220 service->srvstate = newstate;
223 struct vchiq_service *
224 find_service_by_handle(unsigned int handle)
226 struct vchiq_service *service;
229 service = handle_to_service(handle);
230 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
231 service->handle == handle &&
232 kref_get_unless_zero(&service->ref_count)) {
233 service = rcu_pointer_handoff(service);
238 vchiq_log_info(vchiq_core_log_level,
239 "Invalid service handle 0x%x", handle);
243 struct vchiq_service *
244 find_service_by_port(struct vchiq_state *state, int localport)
247 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
248 struct vchiq_service *service;
251 service = rcu_dereference(state->services[localport]);
252 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
253 kref_get_unless_zero(&service->ref_count)) {
254 service = rcu_pointer_handoff(service);
260 vchiq_log_info(vchiq_core_log_level,
261 "Invalid port %d", localport);
265 struct vchiq_service *
266 find_service_for_instance(struct vchiq_instance *instance,
269 struct vchiq_service *service;
272 service = handle_to_service(handle);
273 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
274 service->handle == handle &&
275 service->instance == instance &&
276 kref_get_unless_zero(&service->ref_count)) {
277 service = rcu_pointer_handoff(service);
282 vchiq_log_info(vchiq_core_log_level,
283 "Invalid service handle 0x%x", handle);
287 struct vchiq_service *
288 find_closed_service_for_instance(struct vchiq_instance *instance,
291 struct vchiq_service *service;
294 service = handle_to_service(handle);
296 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
297 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
298 service->handle == handle &&
299 service->instance == instance &&
300 kref_get_unless_zero(&service->ref_count)) {
301 service = rcu_pointer_handoff(service);
306 vchiq_log_info(vchiq_core_log_level,
307 "Invalid service handle 0x%x", handle);
311 struct vchiq_service *
312 __next_service_by_instance(struct vchiq_state *state,
313 struct vchiq_instance *instance,
316 struct vchiq_service *service = NULL;
319 while (idx < state->unused_service) {
320 struct vchiq_service *srv;
322 srv = rcu_dereference(state->services[idx]);
324 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
325 srv->instance == instance) {
335 struct vchiq_service *
336 next_service_by_instance(struct vchiq_state *state,
337 struct vchiq_instance *instance,
340 struct vchiq_service *service;
344 service = __next_service_by_instance(state, instance, pidx);
347 if (kref_get_unless_zero(&service->ref_count)) {
348 service = rcu_pointer_handoff(service);
357 lock_service(struct vchiq_service *service)
360 WARN(1, "%s service is NULL\n", __func__);
363 kref_get(&service->ref_count);
366 static void service_release(struct kref *kref)
368 struct vchiq_service *service =
369 container_of(kref, struct vchiq_service, ref_count);
370 struct vchiq_state *state = service->state;
372 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
373 rcu_assign_pointer(state->services[service->localport], NULL);
374 if (service->userdata_term)
375 service->userdata_term(service->base.userdata);
376 kfree_rcu(service, rcu);
380 unlock_service(struct vchiq_service *service)
383 WARN(1, "%s: service is NULL\n", __func__);
386 kref_put(&service->ref_count, service_release);
390 vchiq_get_client_id(unsigned int handle)
392 struct vchiq_service *service;
396 service = handle_to_service(handle);
397 id = service ? service->client_id : 0;
403 vchiq_get_service_userdata(unsigned int handle)
406 struct vchiq_service *service;
409 service = handle_to_service(handle);
410 userdata = service ? service->base.userdata : NULL;
414 EXPORT_SYMBOL(vchiq_get_service_userdata);
417 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
419 struct vchiq_state *state = service->state;
420 struct vchiq_service_quota *quota;
422 service->closing = 1;
424 /* Synchronise with other threads. */
425 mutex_lock(&state->recycle_mutex);
426 mutex_unlock(&state->recycle_mutex);
427 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
429 * If we're pausing then the slot_mutex is held until resume
430 * by the slot handler. Therefore don't try to acquire this
431 * mutex if we're the slot handler and in the pause sent state.
432 * We don't need to in this case anyway.
434 mutex_lock(&state->slot_mutex);
435 mutex_unlock(&state->slot_mutex);
438 /* Unblock any sending thread. */
439 quota = &state->service_quotas[service->localport];
440 complete("a->quota_event);
444 mark_service_closing(struct vchiq_service *service)
446 mark_service_closing_internal(service, 0);
449 static inline enum vchiq_status
450 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
451 struct vchiq_header *header, void *bulk_userdata)
453 enum vchiq_status status;
455 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
456 service->state->id, service->localport, reason_names[reason],
457 header, bulk_userdata);
458 status = service->base.callback(reason, header, service->handle,
460 if (status == VCHIQ_ERROR) {
461 vchiq_log_warning(vchiq_core_log_level,
462 "%d: ignoring ERROR from callback to service %x",
463 service->state->id, service->handle);
464 status = VCHIQ_SUCCESS;
467 if (reason != VCHIQ_MESSAGE_AVAILABLE)
468 vchiq_release_message(service->handle, header);
474 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
476 enum vchiq_connstate oldstate = state->conn_state;
478 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
479 conn_state_names[oldstate],
480 conn_state_names[newstate]);
481 state->conn_state = newstate;
482 vchiq_platform_conn_state_changed(state, oldstate, newstate);
486 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
490 * Don't clear the 'fired' flag because it may already have been set
493 init_waitqueue_head(wq);
497 * All the event waiting routines in VCHIQ used a custom semaphore
498 * implementation that filtered most signals. This achieved a behaviour similar
499 * to the "killable" family of functions. While cleaning up this code all the
500 * routines where switched to the "interruptible" family of functions, as the
501 * former was deemed unjustified and the use "killable" set all VCHIQ's
502 * threads in D state.
505 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
510 if (wait_event_interruptible(*wq, event->fired)) {
523 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
531 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
533 if (event->fired && event->armed)
534 remote_event_signal_local(wq, event);
538 remote_event_pollall(struct vchiq_state *state)
540 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
541 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
542 remote_event_poll(&state->trigger_event, &state->local->trigger);
543 remote_event_poll(&state->recycle_event, &state->local->recycle);
547 * Round up message sizes so that any space at the end of a slot is always big
548 * enough for a header. This relies on header size being a power of two, which
549 * has been verified earlier by a static assertion.
553 calc_stride(size_t size)
555 /* Allow room for the header */
556 size += sizeof(struct vchiq_header);
559 return (size + sizeof(struct vchiq_header) - 1) &
560 ~(sizeof(struct vchiq_header) - 1);
563 /* Called by the slot handler thread */
564 static struct vchiq_service *
565 get_listening_service(struct vchiq_state *state, int fourcc)
569 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
572 for (i = 0; i < state->unused_service; i++) {
573 struct vchiq_service *service;
575 service = rcu_dereference(state->services[i]);
577 service->public_fourcc == fourcc &&
578 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
579 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
580 service->remoteport == VCHIQ_PORT_FREE)) &&
581 kref_get_unless_zero(&service->ref_count)) {
582 service = rcu_pointer_handoff(service);
591 /* Called by the slot handler thread */
592 static struct vchiq_service *
593 get_connected_service(struct vchiq_state *state, unsigned int port)
598 for (i = 0; i < state->unused_service; i++) {
599 struct vchiq_service *service =
600 rcu_dereference(state->services[i]);
602 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
603 service->remoteport == port &&
604 kref_get_unless_zero(&service->ref_count)) {
605 service = rcu_pointer_handoff(service);
615 request_poll(struct vchiq_state *state, struct vchiq_service *service,
625 value = atomic_read(&service->poll_flags);
626 } while (atomic_cmpxchg(&service->poll_flags, value,
627 value | BIT(poll_type)) != value);
629 index = BITSET_WORD(service->localport);
631 value = atomic_read(&state->poll_services[index]);
632 } while (atomic_cmpxchg(&state->poll_services[index],
633 value, value | BIT(service->localport & 0x1f)) != value);
636 state->poll_needed = 1;
639 /* ... and ensure the slot handler runs. */
640 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
644 * Called from queue_message, by the slot handler and application threads,
645 * with slot_mutex held
647 static struct vchiq_header *
648 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
650 struct vchiq_shared_state *local = state->local;
651 int tx_pos = state->local_tx_pos;
652 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
654 if (space > slot_space) {
655 struct vchiq_header *header;
656 /* Fill the remaining space with padding */
657 WARN_ON(!state->tx_data);
658 header = (struct vchiq_header *)
659 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
660 header->msgid = VCHIQ_MSGID_PADDING;
661 header->size = slot_space - sizeof(struct vchiq_header);
663 tx_pos += slot_space;
666 /* If necessary, get the next slot. */
667 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
670 /* If there is no free slot... */
672 if (!try_wait_for_completion(&state->slot_available_event)) {
673 /* ...wait for one. */
675 VCHIQ_STATS_INC(state, slot_stalls);
677 /* But first, flush through the last slot. */
678 state->local_tx_pos = tx_pos;
679 local->tx_pos = tx_pos;
680 remote_event_signal(&state->remote->trigger);
683 (wait_for_completion_interruptible(
684 &state->slot_available_event)))
685 return NULL; /* No space available */
688 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
689 complete(&state->slot_available_event);
690 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
694 slot_index = local->slot_queue[
695 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
697 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
700 state->local_tx_pos = tx_pos + space;
702 return (struct vchiq_header *)(state->tx_data +
703 (tx_pos & VCHIQ_SLOT_MASK));
706 /* Called by the recycle thread. */
708 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
711 struct vchiq_shared_state *local = state->local;
712 int slot_queue_available;
715 * Find slots which have been freed by the other side, and return them
716 * to the available queue.
718 slot_queue_available = state->slot_queue_available;
721 * Use a memory barrier to ensure that any state that may have been
722 * modified by another thread is not masked by stale prefetched
727 while (slot_queue_available != local->slot_queue_recycle) {
729 int slot_index = local->slot_queue[slot_queue_available &
730 VCHIQ_SLOT_QUEUE_MASK];
731 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
734 slot_queue_available++;
736 * Beware of the address dependency - data is calculated
737 * using an index written by the other side.
741 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
742 state->id, slot_index, data,
743 local->slot_queue_recycle, slot_queue_available);
745 /* Initialise the bitmask for services which have used this slot */
746 memset(service_found, 0, length);
750 while (pos < VCHIQ_SLOT_SIZE) {
751 struct vchiq_header *header =
752 (struct vchiq_header *)(data + pos);
753 int msgid = header->msgid;
755 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
756 int port = VCHIQ_MSG_SRCPORT(msgid);
757 struct vchiq_service_quota *quota =
758 &state->service_quotas[port];
761 spin_lock("a_spinlock);
762 count = quota->message_use_count;
764 quota->message_use_count = count - 1;
765 spin_unlock("a_spinlock);
767 if (count == quota->message_quota) {
769 * Signal the service that it
770 * has dropped below its quota
772 complete("a->quota_event);
773 } else if (count == 0) {
774 vchiq_log_error(vchiq_core_log_level,
775 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
777 quota->message_use_count,
778 header, msgid, header->msgid,
780 WARN(1, "invalid message use count\n");
782 if (!BITSET_IS_SET(service_found, port)) {
783 /* Set the found bit for this service */
784 BITSET_SET(service_found, port);
786 spin_lock("a_spinlock);
787 count = quota->slot_use_count;
789 quota->slot_use_count =
791 spin_unlock("a_spinlock);
795 * Signal the service in case
796 * it has dropped below its quota
798 complete("a->quota_event);
800 vchiq_core_log_level,
801 "%d: pfq:%d %x@%pK - slot_use->%d",
803 header->size, header,
807 vchiq_core_log_level,
808 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
810 msgid, header->msgid,
812 WARN(1, "bad slot use count\n");
819 pos += calc_stride(header->size);
820 if (pos > VCHIQ_SLOT_SIZE) {
821 vchiq_log_error(vchiq_core_log_level,
822 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
823 pos, header, msgid, header->msgid,
825 WARN(1, "invalid slot position\n");
832 spin_lock("a_spinlock);
833 count = state->data_use_count;
835 state->data_use_count = count - 1;
836 spin_unlock("a_spinlock);
837 if (count == state->data_quota)
838 complete(&state->data_quota_event);
842 * Don't allow the slot to be reused until we are no
843 * longer interested in it.
847 state->slot_queue_available = slot_queue_available;
848 complete(&state->slot_available_event);
853 memcpy_copy_callback(
854 void *context, void *dest,
855 size_t offset, size_t maxsize)
857 memcpy(dest + offset, context + offset, maxsize);
863 ssize_t (*copy_callback)(void *context, void *dest,
864 size_t offset, size_t maxsize),
872 ssize_t callback_result;
873 size_t max_bytes = size - pos;
876 copy_callback(context, dest + pos,
879 if (callback_result < 0)
880 return callback_result;
882 if (!callback_result)
885 if (callback_result > max_bytes)
888 pos += callback_result;
894 /* Called by the slot handler and application threads */
895 static enum vchiq_status
896 queue_message(struct vchiq_state *state, struct vchiq_service *service,
898 ssize_t (*copy_callback)(void *context, void *dest,
899 size_t offset, size_t maxsize),
900 void *context, size_t size, int flags)
902 struct vchiq_shared_state *local;
903 struct vchiq_service_quota *quota = NULL;
904 struct vchiq_header *header;
905 int type = VCHIQ_MSG_TYPE(msgid);
909 local = state->local;
911 stride = calc_stride(size);
913 WARN_ON(!(stride <= VCHIQ_SLOT_SIZE));
915 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
916 mutex_lock_killable(&state->slot_mutex))
919 if (type == VCHIQ_MSG_DATA) {
923 WARN(1, "%s: service is NULL\n", __func__);
924 mutex_unlock(&state->slot_mutex);
928 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
929 QMFLAGS_NO_MUTEX_UNLOCK));
931 if (service->closing) {
932 /* The service has been closed */
933 mutex_unlock(&state->slot_mutex);
937 quota = &state->service_quotas[service->localport];
939 spin_lock("a_spinlock);
942 * Ensure this service doesn't use more than its quota of
945 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
946 state->local_tx_pos + stride - 1);
949 * Ensure data messages don't use more than their quota of
952 while ((tx_end_index != state->previous_data_index) &&
953 (state->data_use_count == state->data_quota)) {
954 VCHIQ_STATS_INC(state, data_stalls);
955 spin_unlock("a_spinlock);
956 mutex_unlock(&state->slot_mutex);
958 if (wait_for_completion_interruptible(
959 &state->data_quota_event))
962 mutex_lock(&state->slot_mutex);
963 spin_lock("a_spinlock);
964 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
965 state->local_tx_pos + stride - 1);
966 if ((tx_end_index == state->previous_data_index) ||
967 (state->data_use_count < state->data_quota)) {
968 /* Pass the signal on to other waiters */
969 complete(&state->data_quota_event);
974 while ((quota->message_use_count == quota->message_quota) ||
975 ((tx_end_index != quota->previous_tx_index) &&
976 (quota->slot_use_count == quota->slot_quota))) {
977 spin_unlock("a_spinlock);
978 vchiq_log_trace(vchiq_core_log_level,
979 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
980 state->id, service->localport,
981 msg_type_str(type), size,
982 quota->message_use_count,
983 quota->slot_use_count);
984 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
985 mutex_unlock(&state->slot_mutex);
986 if (wait_for_completion_interruptible(
987 "a->quota_event))
989 if (service->closing)
991 if (mutex_lock_killable(&state->slot_mutex))
993 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
994 /* The service has been closed */
995 mutex_unlock(&state->slot_mutex);
998 spin_lock("a_spinlock);
999 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
1000 state->local_tx_pos + stride - 1);
1003 spin_unlock("a_spinlock);
1006 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1010 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1012 * In the event of a failure, return the mutex to the
1015 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1016 mutex_unlock(&state->slot_mutex);
1020 if (type == VCHIQ_MSG_DATA) {
1021 ssize_t callback_result;
1025 vchiq_log_info(vchiq_core_log_level,
1026 "%d: qm %s@%pK,%zx (%d->%d)",
1027 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1028 header, size, VCHIQ_MSG_SRCPORT(msgid),
1029 VCHIQ_MSG_DSTPORT(msgid));
1031 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1032 QMFLAGS_NO_MUTEX_UNLOCK));
1035 copy_message_data(copy_callback, context,
1036 header->data, size);
1038 if (callback_result < 0) {
1039 mutex_unlock(&state->slot_mutex);
1040 VCHIQ_SERVICE_STATS_INC(service,
1045 if (SRVTRACE_ENABLED(service,
1047 vchiq_log_dump_mem("Sent", 0,
1050 (size_t)callback_result));
1052 spin_lock("a_spinlock);
1053 quota->message_use_count++;
1056 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1059 * If this transmission can't fit in the last slot used by any
1060 * service, the data_use_count must be increased.
1062 if (tx_end_index != state->previous_data_index) {
1063 state->previous_data_index = tx_end_index;
1064 state->data_use_count++;
1068 * If this isn't the same slot last used by this service,
1069 * the service's slot_use_count must be increased.
1071 if (tx_end_index != quota->previous_tx_index) {
1072 quota->previous_tx_index = tx_end_index;
1073 slot_use_count = ++quota->slot_use_count;
1078 spin_unlock("a_spinlock);
1081 vchiq_log_trace(vchiq_core_log_level,
1082 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1083 state->id, service->localport,
1084 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1085 slot_use_count, header);
1087 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1088 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1090 vchiq_log_info(vchiq_core_log_level,
1091 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1092 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1093 header, size, VCHIQ_MSG_SRCPORT(msgid),
1094 VCHIQ_MSG_DSTPORT(msgid));
1097 * It is assumed for now that this code path
1098 * only happens from calls inside this file.
1100 * External callers are through the vchiq_queue_message
1101 * path which always sets the type to be VCHIQ_MSG_DATA
1103 * At first glance this appears to be correct but
1104 * more review is needed.
1106 copy_message_data(copy_callback, context,
1107 header->data, size);
1109 VCHIQ_STATS_INC(state, ctrl_tx_count);
1112 header->msgid = msgid;
1113 header->size = size;
1118 svc_fourcc = service
1119 ? service->base.fourcc
1120 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1122 vchiq_log_info(SRVTRACE_LEVEL(service),
1123 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1124 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1125 VCHIQ_MSG_TYPE(msgid),
1126 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1127 VCHIQ_MSG_SRCPORT(msgid),
1128 VCHIQ_MSG_DSTPORT(msgid),
1132 /* Make sure the new header is visible to the peer. */
1135 /* Make the new tx_pos visible to the peer. */
1136 local->tx_pos = state->local_tx_pos;
1139 if (service && (type == VCHIQ_MSG_CLOSE))
1140 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1142 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1143 mutex_unlock(&state->slot_mutex);
1145 remote_event_signal(&state->remote->trigger);
1147 return VCHIQ_SUCCESS;
1150 /* Called by the slot handler and application threads */
1151 static enum vchiq_status
1152 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1154 ssize_t (*copy_callback)(void *context, void *dest,
1155 size_t offset, size_t maxsize),
1156 void *context, int size, int is_blocking)
1158 struct vchiq_shared_state *local;
1159 struct vchiq_header *header;
1160 ssize_t callback_result;
1162 local = state->local;
1164 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1165 mutex_lock_killable(&state->sync_mutex))
1168 remote_event_wait(&state->sync_release_event, &local->sync_release);
1172 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1176 int oldmsgid = header->msgid;
1178 if (oldmsgid != VCHIQ_MSGID_PADDING)
1179 vchiq_log_error(vchiq_core_log_level,
1180 "%d: qms - msgid %x, not PADDING",
1181 state->id, oldmsgid);
1184 vchiq_log_info(vchiq_sync_log_level,
1185 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1186 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1187 header, size, VCHIQ_MSG_SRCPORT(msgid),
1188 VCHIQ_MSG_DSTPORT(msgid));
1191 copy_message_data(copy_callback, context,
1192 header->data, size);
1194 if (callback_result < 0) {
1195 mutex_unlock(&state->slot_mutex);
1196 VCHIQ_SERVICE_STATS_INC(service,
1202 if (SRVTRACE_ENABLED(service,
1204 vchiq_log_dump_mem("Sent", 0,
1207 (size_t)callback_result));
1209 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1210 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1212 VCHIQ_STATS_INC(state, ctrl_tx_count);
1215 header->size = size;
1216 header->msgid = msgid;
1218 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1221 svc_fourcc = service
1222 ? service->base.fourcc
1223 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1225 vchiq_log_trace(vchiq_sync_log_level,
1226 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1227 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1228 VCHIQ_MSG_TYPE(msgid),
1229 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1230 VCHIQ_MSG_SRCPORT(msgid),
1231 VCHIQ_MSG_DSTPORT(msgid),
1235 remote_event_signal(&state->remote->sync_trigger);
1237 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1238 mutex_unlock(&state->sync_mutex);
1240 return VCHIQ_SUCCESS;
1244 claim_slot(struct vchiq_slot_info *slot)
1250 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1251 struct vchiq_header *header, struct vchiq_service *service)
1253 mutex_lock(&state->recycle_mutex);
1256 int msgid = header->msgid;
1258 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1259 (service && service->closing)) {
1260 mutex_unlock(&state->recycle_mutex);
1264 /* Rewrite the message header to prevent a double release */
1265 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1268 slot_info->release_count++;
1270 if (slot_info->release_count == slot_info->use_count) {
1271 int slot_queue_recycle;
1272 /* Add to the freed queue */
1275 * A read barrier is necessary here to prevent speculative
1276 * fetches of remote->slot_queue_recycle from overtaking the
1281 slot_queue_recycle = state->remote->slot_queue_recycle;
1282 state->remote->slot_queue[slot_queue_recycle &
1283 VCHIQ_SLOT_QUEUE_MASK] =
1284 SLOT_INDEX_FROM_INFO(state, slot_info);
1285 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1286 vchiq_log_info(vchiq_core_log_level,
1287 "%d: %s %d - recycle->%x", state->id, __func__,
1288 SLOT_INDEX_FROM_INFO(state, slot_info),
1289 state->remote->slot_queue_recycle);
1292 * A write barrier is necessary, but remote_event_signal
1295 remote_event_signal(&state->remote->recycle);
1298 mutex_unlock(&state->recycle_mutex);
1301 static inline enum vchiq_reason
1302 get_bulk_reason(struct vchiq_bulk *bulk)
1304 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1305 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1306 return VCHIQ_BULK_TRANSMIT_ABORTED;
1308 return VCHIQ_BULK_TRANSMIT_DONE;
1311 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1312 return VCHIQ_BULK_RECEIVE_ABORTED;
1314 return VCHIQ_BULK_RECEIVE_DONE;
1317 /* Called by the slot handler - don't hold the bulk mutex */
1318 static enum vchiq_status
1319 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1322 enum vchiq_status status = VCHIQ_SUCCESS;
1324 vchiq_log_trace(vchiq_core_log_level,
1325 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1326 service->state->id, service->localport,
1327 (queue == &service->bulk_tx) ? 't' : 'r',
1328 queue->process, queue->remote_notify, queue->remove);
1330 queue->remote_notify = queue->process;
1332 while (queue->remove != queue->remote_notify) {
1333 struct vchiq_bulk *bulk =
1334 &queue->bulks[BULK_INDEX(queue->remove)];
1337 * Only generate callbacks for non-dummy bulk
1338 * requests, and non-terminated services
1340 if (bulk->data && service->instance) {
1341 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1342 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1343 VCHIQ_SERVICE_STATS_INC(service,
1345 VCHIQ_SERVICE_STATS_ADD(service,
1349 VCHIQ_SERVICE_STATS_INC(service,
1351 VCHIQ_SERVICE_STATS_ADD(service,
1356 VCHIQ_SERVICE_STATS_INC(service,
1357 bulk_aborted_count);
1359 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1360 struct bulk_waiter *waiter;
1362 spin_lock(&bulk_waiter_spinlock);
1363 waiter = bulk->userdata;
1365 waiter->actual = bulk->actual;
1366 complete(&waiter->event);
1368 spin_unlock(&bulk_waiter_spinlock);
1369 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1370 enum vchiq_reason reason =
1371 get_bulk_reason(bulk);
1372 status = make_service_callback(service,
1373 reason, NULL, bulk->userdata);
1374 if (status == VCHIQ_RETRY)
1380 complete(&service->bulk_remove_event);
1383 status = VCHIQ_SUCCESS;
1385 if (status == VCHIQ_RETRY)
1386 request_poll(service->state, service,
1387 (queue == &service->bulk_tx) ?
1388 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1394 poll_services_of_group(struct vchiq_state *state, int group)
1396 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1399 for (i = 0; flags; i++) {
1400 struct vchiq_service *service;
1403 if ((flags & BIT(i)) == 0)
1406 service = find_service_by_port(state, (group << 5) + i);
1412 service_flags = atomic_xchg(&service->poll_flags, 0);
1413 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1414 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1415 state->id, service->localport,
1416 service->remoteport);
1419 * Make it look like a client, because
1420 * it must be removed and not left in
1421 * the LISTENING state.
1423 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1425 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1427 request_poll(state, service, VCHIQ_POLL_REMOVE);
1428 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1429 vchiq_log_info(vchiq_core_log_level,
1430 "%d: ps - terminate %d<->%d",
1431 state->id, service->localport,
1432 service->remoteport);
1433 if (vchiq_close_service_internal(
1434 service, NO_CLOSE_RECVD) !=
1436 request_poll(state, service,
1437 VCHIQ_POLL_TERMINATE);
1439 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1440 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1441 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1442 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1443 unlock_service(service);
1447 /* Called by the slot handler thread */
1449 poll_services(struct vchiq_state *state)
1453 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1454 poll_services_of_group(state, group);
1457 /* Called with the bulk_mutex held */
1459 abort_outstanding_bulks(struct vchiq_service *service,
1460 struct vchiq_bulk_queue *queue)
1462 int is_tx = (queue == &service->bulk_tx);
1464 vchiq_log_trace(vchiq_core_log_level,
1465 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1466 service->state->id, service->localport, is_tx ? 't' : 'r',
1467 queue->local_insert, queue->remote_insert, queue->process);
1469 WARN_ON(!((int)(queue->local_insert - queue->process) >= 0));
1470 WARN_ON(!((int)(queue->remote_insert - queue->process) >= 0));
1472 while ((queue->process != queue->local_insert) ||
1473 (queue->process != queue->remote_insert)) {
1474 struct vchiq_bulk *bulk =
1475 &queue->bulks[BULK_INDEX(queue->process)];
1477 if (queue->process == queue->remote_insert) {
1478 /* fabricate a matching dummy bulk */
1479 bulk->remote_data = NULL;
1480 bulk->remote_size = 0;
1481 queue->remote_insert++;
1484 if (queue->process != queue->local_insert) {
1485 vchiq_complete_bulk(bulk);
1487 vchiq_log_info(SRVTRACE_LEVEL(service),
1488 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1489 is_tx ? "Send Bulk to" : "Recv Bulk from",
1490 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1491 service->remoteport,
1495 /* fabricate a matching dummy bulk */
1498 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1499 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1501 queue->local_insert++;
1509 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1511 struct vchiq_service *service = NULL;
1513 unsigned int localport, remoteport;
1515 msgid = header->msgid;
1516 size = header->size;
1517 localport = VCHIQ_MSG_DSTPORT(msgid);
1518 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1519 if (size >= sizeof(struct vchiq_open_payload)) {
1520 const struct vchiq_open_payload *payload =
1521 (struct vchiq_open_payload *)header->data;
1522 unsigned int fourcc;
1524 fourcc = payload->fourcc;
1525 vchiq_log_info(vchiq_core_log_level,
1526 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1527 state->id, header, localport,
1528 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1530 service = get_listening_service(state, fourcc);
1533 /* A matching service exists */
1534 short version = payload->version;
1535 short version_min = payload->version_min;
1537 if ((service->version < version_min) ||
1538 (version < service->version_min)) {
1539 /* Version mismatch */
1540 vchiq_loud_error_header();
1541 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1542 "version mismatch - local (%d, min %d)"
1543 " vs. remote (%d, min %d)",
1544 state->id, service->localport,
1545 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1546 service->version, service->version_min,
1547 version, version_min);
1548 vchiq_loud_error_footer();
1549 unlock_service(service);
1553 service->peer_version = version;
1555 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1556 struct vchiq_openack_payload ack_payload = {
1560 if (state->version_common <
1561 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1564 /* Acknowledge the OPEN */
1565 if (service->sync) {
1566 if (queue_message_sync(
1573 memcpy_copy_callback,
1575 sizeof(ack_payload),
1577 goto bail_not_ready;
1579 if (queue_message(state,
1585 memcpy_copy_callback,
1587 sizeof(ack_payload),
1589 goto bail_not_ready;
1592 /* The service is now open */
1593 vchiq_set_service_state(service,
1594 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1595 : VCHIQ_SRVSTATE_OPEN);
1598 /* Success - the message has been dealt with */
1599 unlock_service(service);
1605 /* No available service, or an invalid request - send a CLOSE */
1606 if (queue_message(state, NULL,
1607 VCHIQ_MAKE_MSG(VCHIQ_MSG_CLOSE, 0, VCHIQ_MSG_SRCPORT(msgid)),
1608 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1609 goto bail_not_ready;
1615 unlock_service(service);
1621 * parse_message() - parses a single message from the rx slot
1622 * @state: vchiq state struct
1623 * @header: message header
1625 * Context: Process context
1628 * * >= 0 - size of the parsed message payload (without header)
1629 * * -EINVAL - fatal error occurred, bail out is required
1632 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1634 struct vchiq_service *service = NULL;
1635 unsigned int localport, remoteport;
1636 int msgid, size, type, ret = -EINVAL;
1638 DEBUG_INITIALISE(state->local)
1640 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1641 msgid = header->msgid;
1642 DEBUG_VALUE(PARSE_MSGID, msgid);
1643 size = header->size;
1644 type = VCHIQ_MSG_TYPE(msgid);
1645 localport = VCHIQ_MSG_DSTPORT(msgid);
1646 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1648 if (type != VCHIQ_MSG_DATA)
1649 VCHIQ_STATS_INC(state, ctrl_rx_count);
1652 case VCHIQ_MSG_OPENACK:
1653 case VCHIQ_MSG_CLOSE:
1654 case VCHIQ_MSG_DATA:
1655 case VCHIQ_MSG_BULK_RX:
1656 case VCHIQ_MSG_BULK_TX:
1657 case VCHIQ_MSG_BULK_RX_DONE:
1658 case VCHIQ_MSG_BULK_TX_DONE:
1659 service = find_service_by_port(state, localport);
1661 ((service->remoteport != remoteport) &&
1662 (service->remoteport != VCHIQ_PORT_FREE))) &&
1664 (type == VCHIQ_MSG_CLOSE)) {
1666 * This could be a CLOSE from a client which
1667 * hadn't yet received the OPENACK - look for
1668 * the connected service
1671 unlock_service(service);
1672 service = get_connected_service(state,
1675 vchiq_log_warning(vchiq_core_log_level,
1676 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1677 state->id, msg_type_str(type),
1678 header, remoteport, localport,
1679 service->localport);
1683 vchiq_log_error(vchiq_core_log_level,
1684 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1685 state->id, msg_type_str(type),
1686 header, remoteport, localport,
1695 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1698 svc_fourcc = service
1699 ? service->base.fourcc
1700 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1701 vchiq_log_info(SRVTRACE_LEVEL(service),
1702 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1703 msg_type_str(type), type,
1704 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1705 remoteport, localport, size);
1707 vchiq_log_dump_mem("Rcvd", 0, header->data,
1711 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1712 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1713 vchiq_log_error(vchiq_core_log_level,
1714 "header %pK (msgid %x) - size %x too big for slot",
1715 header, (unsigned int)msgid,
1716 (unsigned int)size);
1717 WARN(1, "oversized for slot\n");
1721 case VCHIQ_MSG_OPEN:
1722 WARN_ON(!(VCHIQ_MSG_DSTPORT(msgid) == 0));
1723 if (!parse_open(state, header))
1724 goto bail_not_ready;
1726 case VCHIQ_MSG_OPENACK:
1727 if (size >= sizeof(struct vchiq_openack_payload)) {
1728 const struct vchiq_openack_payload *payload =
1729 (struct vchiq_openack_payload *)
1731 service->peer_version = payload->version;
1733 vchiq_log_info(vchiq_core_log_level,
1734 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1735 state->id, header, size, remoteport, localport,
1736 service->peer_version);
1737 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1738 service->remoteport = remoteport;
1739 vchiq_set_service_state(service,
1740 VCHIQ_SRVSTATE_OPEN);
1741 complete(&service->remove_event);
1743 vchiq_log_error(vchiq_core_log_level,
1744 "OPENACK received in state %s",
1745 srvstate_names[service->srvstate]);
1748 case VCHIQ_MSG_CLOSE:
1749 WARN_ON(size != 0); /* There should be no data */
1751 vchiq_log_info(vchiq_core_log_level,
1752 "%d: prs CLOSE@%pK (%d->%d)",
1753 state->id, header, remoteport, localport);
1755 mark_service_closing_internal(service, 1);
1757 if (vchiq_close_service_internal(service,
1758 CLOSE_RECVD) == VCHIQ_RETRY)
1759 goto bail_not_ready;
1761 vchiq_log_info(vchiq_core_log_level,
1762 "Close Service %c%c%c%c s:%u d:%d",
1763 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1765 service->remoteport);
1767 case VCHIQ_MSG_DATA:
1768 vchiq_log_info(vchiq_core_log_level,
1769 "%d: prs DATA@%pK,%x (%d->%d)",
1770 state->id, header, size, remoteport, localport);
1772 if ((service->remoteport == remoteport) &&
1773 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1774 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1775 claim_slot(state->rx_info);
1776 DEBUG_TRACE(PARSE_LINE);
1777 if (make_service_callback(service,
1778 VCHIQ_MESSAGE_AVAILABLE, header,
1779 NULL) == VCHIQ_RETRY) {
1780 DEBUG_TRACE(PARSE_LINE);
1781 goto bail_not_ready;
1783 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1784 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1787 VCHIQ_STATS_INC(state, error_count);
1790 case VCHIQ_MSG_CONNECT:
1791 vchiq_log_info(vchiq_core_log_level,
1792 "%d: prs CONNECT@%pK", state->id, header);
1793 state->version_common = ((struct vchiq_slot_zero *)
1794 state->slot_data)->version;
1795 complete(&state->connect);
1797 case VCHIQ_MSG_BULK_RX:
1798 case VCHIQ_MSG_BULK_TX:
1800 * We should never receive a bulk request from the
1801 * other side since we're not setup to perform as the
1806 case VCHIQ_MSG_BULK_RX_DONE:
1807 case VCHIQ_MSG_BULK_TX_DONE:
1808 if ((service->remoteport == remoteport) &&
1809 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1810 struct vchiq_bulk_queue *queue;
1811 struct vchiq_bulk *bulk;
1813 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1814 &service->bulk_rx : &service->bulk_tx;
1816 DEBUG_TRACE(PARSE_LINE);
1817 if (mutex_lock_killable(&service->bulk_mutex)) {
1818 DEBUG_TRACE(PARSE_LINE);
1819 goto bail_not_ready;
1821 if ((int)(queue->remote_insert -
1822 queue->local_insert) >= 0) {
1823 vchiq_log_error(vchiq_core_log_level,
1824 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1825 state->id, msg_type_str(type),
1826 header, remoteport, localport,
1827 queue->remote_insert,
1828 queue->local_insert);
1829 mutex_unlock(&service->bulk_mutex);
1832 if (queue->process != queue->remote_insert) {
1833 pr_err("%s: p %x != ri %x\n",
1836 queue->remote_insert);
1837 mutex_unlock(&service->bulk_mutex);
1838 goto bail_not_ready;
1841 bulk = &queue->bulks[
1842 BULK_INDEX(queue->remote_insert)];
1843 bulk->actual = *(int *)header->data;
1844 queue->remote_insert++;
1846 vchiq_log_info(vchiq_core_log_level,
1847 "%d: prs %s@%pK (%d->%d) %x@%pad",
1848 state->id, msg_type_str(type),
1849 header, remoteport, localport,
1850 bulk->actual, &bulk->data);
1852 vchiq_log_trace(vchiq_core_log_level,
1853 "%d: prs:%d %cx li=%x ri=%x p=%x",
1854 state->id, localport,
1855 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1857 queue->local_insert,
1858 queue->remote_insert, queue->process);
1860 DEBUG_TRACE(PARSE_LINE);
1861 WARN_ON(queue->process == queue->local_insert);
1862 vchiq_complete_bulk(bulk);
1864 mutex_unlock(&service->bulk_mutex);
1865 DEBUG_TRACE(PARSE_LINE);
1866 notify_bulks(service, queue, RETRY_POLL);
1867 DEBUG_TRACE(PARSE_LINE);
1870 case VCHIQ_MSG_PADDING:
1871 vchiq_log_trace(vchiq_core_log_level,
1872 "%d: prs PADDING@%pK,%x",
1873 state->id, header, size);
1875 case VCHIQ_MSG_PAUSE:
1876 /* If initiated, signal the application thread */
1877 vchiq_log_trace(vchiq_core_log_level,
1878 "%d: prs PAUSE@%pK,%x",
1879 state->id, header, size);
1880 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1881 vchiq_log_error(vchiq_core_log_level,
1882 "%d: PAUSE received in state PAUSED",
1886 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1887 /* Send a PAUSE in response */
1888 if (queue_message(state, NULL,
1889 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
1890 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1892 goto bail_not_ready;
1894 /* At this point slot_mutex is held */
1895 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1897 case VCHIQ_MSG_RESUME:
1898 vchiq_log_trace(vchiq_core_log_level,
1899 "%d: prs RESUME@%pK,%x",
1900 state->id, header, size);
1901 /* Release the slot mutex */
1902 mutex_unlock(&state->slot_mutex);
1903 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1906 case VCHIQ_MSG_REMOTE_USE:
1907 vchiq_on_remote_use(state);
1909 case VCHIQ_MSG_REMOTE_RELEASE:
1910 vchiq_on_remote_release(state);
1912 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1916 vchiq_log_error(vchiq_core_log_level,
1917 "%d: prs invalid msgid %x@%pK,%x",
1918 state->id, msgid, header, size);
1919 WARN(1, "invalid message\n");
1928 unlock_service(service);
1933 /* Called by the slot handler thread */
1935 parse_rx_slots(struct vchiq_state *state)
1937 struct vchiq_shared_state *remote = state->remote;
1940 DEBUG_INITIALISE(state->local)
1942 tx_pos = remote->tx_pos;
1944 while (state->rx_pos != tx_pos) {
1945 struct vchiq_header *header;
1948 DEBUG_TRACE(PARSE_LINE);
1949 if (!state->rx_data) {
1952 WARN_ON(!((state->rx_pos & VCHIQ_SLOT_MASK) == 0));
1953 rx_index = remote->slot_queue[
1954 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1955 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1957 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1960 * Initialise use_count to one, and increment
1961 * release_count at the end of the slot to avoid
1962 * releasing the slot prematurely.
1964 state->rx_info->use_count = 1;
1965 state->rx_info->release_count = 0;
1968 header = (struct vchiq_header *)(state->rx_data +
1969 (state->rx_pos & VCHIQ_SLOT_MASK));
1970 size = parse_message(state, header);
1974 state->rx_pos += calc_stride(size);
1976 DEBUG_TRACE(PARSE_LINE);
1978 * Perform some housekeeping when the end of the slot is
1981 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1982 /* Remove the extra reference count. */
1983 release_slot(state, state->rx_info, NULL, NULL);
1984 state->rx_data = NULL;
1989 /* Called by the slot handler thread */
1991 slot_handler_func(void *v)
1993 struct vchiq_state *state = v;
1994 struct vchiq_shared_state *local = state->local;
1996 DEBUG_INITIALISE(local)
1999 DEBUG_COUNT(SLOT_HANDLER_COUNT);
2000 DEBUG_TRACE(SLOT_HANDLER_LINE);
2001 remote_event_wait(&state->trigger_event, &local->trigger);
2005 DEBUG_TRACE(SLOT_HANDLER_LINE);
2006 if (state->poll_needed) {
2008 state->poll_needed = 0;
2011 * Handle service polling and other rare conditions here
2012 * out of the mainline code
2014 switch (state->conn_state) {
2015 case VCHIQ_CONNSTATE_CONNECTED:
2016 /* Poll the services as requested */
2017 poll_services(state);
2020 case VCHIQ_CONNSTATE_PAUSING:
2021 if (queue_message(state, NULL,
2022 VCHIQ_MAKE_MSG(VCHIQ_MSG_PAUSE, 0, 0),
2024 QMFLAGS_NO_MUTEX_UNLOCK)
2026 vchiq_set_conn_state(state,
2027 VCHIQ_CONNSTATE_PAUSE_SENT);
2030 state->poll_needed = 1;
2034 case VCHIQ_CONNSTATE_RESUMING:
2035 if (queue_message(state, NULL,
2036 VCHIQ_MAKE_MSG(VCHIQ_MSG_RESUME, 0, 0),
2037 NULL, NULL, 0, QMFLAGS_NO_MUTEX_LOCK)
2039 vchiq_set_conn_state(state,
2040 VCHIQ_CONNSTATE_CONNECTED);
2043 * This should really be impossible,
2044 * since the PAUSE should have flushed
2045 * through outstanding messages.
2047 vchiq_log_error(vchiq_core_log_level,
2048 "Failed to send RESUME message");
2057 DEBUG_TRACE(SLOT_HANDLER_LINE);
2058 parse_rx_slots(state);
2063 /* Called by the recycle thread */
2065 recycle_func(void *v)
2067 struct vchiq_state *state = v;
2068 struct vchiq_shared_state *local = state->local;
2072 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2074 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2080 remote_event_wait(&state->recycle_event, &local->recycle);
2082 process_free_queue(state, found, length);
2087 /* Called by the sync thread */
2091 struct vchiq_state *state = v;
2092 struct vchiq_shared_state *local = state->local;
2093 struct vchiq_header *header =
2094 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2095 state->remote->slot_sync);
2098 struct vchiq_service *service;
2101 unsigned int localport, remoteport;
2103 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2107 msgid = header->msgid;
2108 size = header->size;
2109 type = VCHIQ_MSG_TYPE(msgid);
2110 localport = VCHIQ_MSG_DSTPORT(msgid);
2111 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2113 service = find_service_by_port(state, localport);
2116 vchiq_log_error(vchiq_sync_log_level,
2117 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2118 state->id, msg_type_str(type),
2119 header, remoteport, localport, localport);
2120 release_message_sync(state, header);
2124 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2127 svc_fourcc = service
2128 ? service->base.fourcc
2129 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2130 vchiq_log_trace(vchiq_sync_log_level,
2131 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2133 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2134 remoteport, localport, size);
2136 vchiq_log_dump_mem("Rcvd", 0, header->data,
2141 case VCHIQ_MSG_OPENACK:
2142 if (size >= sizeof(struct vchiq_openack_payload)) {
2143 const struct vchiq_openack_payload *payload =
2144 (struct vchiq_openack_payload *)
2146 service->peer_version = payload->version;
2148 vchiq_log_info(vchiq_sync_log_level,
2149 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2150 state->id, header, size, remoteport, localport,
2151 service->peer_version);
2152 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2153 service->remoteport = remoteport;
2154 vchiq_set_service_state(service,
2155 VCHIQ_SRVSTATE_OPENSYNC);
2157 complete(&service->remove_event);
2159 release_message_sync(state, header);
2162 case VCHIQ_MSG_DATA:
2163 vchiq_log_trace(vchiq_sync_log_level,
2164 "%d: sf DATA@%pK,%x (%d->%d)",
2165 state->id, header, size, remoteport, localport);
2167 if ((service->remoteport == remoteport) &&
2168 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2169 if (make_service_callback(service,
2170 VCHIQ_MESSAGE_AVAILABLE, header,
2171 NULL) == VCHIQ_RETRY)
2172 vchiq_log_error(vchiq_sync_log_level,
2173 "synchronous callback to service %d returns VCHIQ_RETRY",
2179 vchiq_log_error(vchiq_sync_log_level,
2180 "%d: sf unexpected msgid %x@%pK,%x",
2181 state->id, msgid, header, size);
2182 release_message_sync(state, header);
2186 unlock_service(service);
2193 init_bulk_queue(struct vchiq_bulk_queue *queue)
2195 queue->local_insert = 0;
2196 queue->remote_insert = 0;
2198 queue->remote_notify = 0;
2203 get_conn_state_name(enum vchiq_connstate conn_state)
2205 return conn_state_names[conn_state];
2208 struct vchiq_slot_zero *
2209 vchiq_init_slots(void *mem_base, int mem_size)
2212 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2213 struct vchiq_slot_zero *slot_zero =
2214 (struct vchiq_slot_zero *)(mem_base + mem_align);
2215 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2216 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2220 /* Ensure there is enough memory to run an absolutely minimum system */
2221 num_slots -= first_data_slot;
2223 if (num_slots < 4) {
2224 vchiq_log_error(vchiq_core_log_level,
2225 "%s - insufficient memory %x bytes",
2226 __func__, mem_size);
2230 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2232 slot_zero->magic = VCHIQ_MAGIC;
2233 slot_zero->version = VCHIQ_VERSION;
2234 slot_zero->version_min = VCHIQ_VERSION_MIN;
2235 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2236 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2237 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2238 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2240 slot_zero->master.slot_sync = first_data_slot;
2241 slot_zero->master.slot_first = first_data_slot + 1;
2242 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2243 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2244 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2245 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2251 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2253 struct vchiq_shared_state *local;
2254 struct vchiq_shared_state *remote;
2255 char threadname[16];
2258 if (vchiq_states[0]) {
2259 pr_err("%s: VCHIQ state already initialized\n", __func__);
2263 local = &slot_zero->slave;
2264 remote = &slot_zero->master;
2266 if (local->initialised) {
2267 vchiq_loud_error_header();
2268 if (remote->initialised)
2269 vchiq_loud_error("local state has already been initialised");
2271 vchiq_loud_error("master/slave mismatch two slaves");
2272 vchiq_loud_error_footer();
2276 memset(state, 0, sizeof(struct vchiq_state));
2279 * initialize shared state pointers
2282 state->local = local;
2283 state->remote = remote;
2284 state->slot_data = (struct vchiq_slot *)slot_zero;
2287 * initialize events and mutexes
2290 init_completion(&state->connect);
2291 mutex_init(&state->mutex);
2292 mutex_init(&state->slot_mutex);
2293 mutex_init(&state->recycle_mutex);
2294 mutex_init(&state->sync_mutex);
2295 mutex_init(&state->bulk_transfer_mutex);
2297 init_completion(&state->slot_available_event);
2298 init_completion(&state->slot_remove_event);
2299 init_completion(&state->data_quota_event);
2301 state->slot_queue_available = 0;
2303 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2304 struct vchiq_service_quota *quota = &state->service_quotas[i];
2305 init_completion("a->quota_event);
2308 for (i = local->slot_first; i <= local->slot_last; i++) {
2309 local->slot_queue[state->slot_queue_available] = i;
2310 state->slot_queue_available++;
2311 complete(&state->slot_available_event);
2314 state->default_slot_quota = state->slot_queue_available / 2;
2315 state->default_message_quota =
2316 min((unsigned short)(state->default_slot_quota * 256),
2317 (unsigned short)~0);
2319 state->previous_data_index = -1;
2320 state->data_use_count = 0;
2321 state->data_quota = state->slot_queue_available - 1;
2323 remote_event_create(&state->trigger_event, &local->trigger);
2325 remote_event_create(&state->recycle_event, &local->recycle);
2326 local->slot_queue_recycle = state->slot_queue_available;
2327 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2328 remote_event_create(&state->sync_release_event, &local->sync_release);
2330 /* At start-of-day, the slot is empty and available */
2331 ((struct vchiq_header *)
2332 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2333 VCHIQ_MSGID_PADDING;
2334 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2336 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2338 ret = vchiq_platform_init_state(state);
2343 * bring up slot handler thread
2345 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2346 state->slot_handler_thread = kthread_create(&slot_handler_func,
2350 if (IS_ERR(state->slot_handler_thread)) {
2351 vchiq_loud_error_header();
2352 vchiq_loud_error("couldn't create thread %s", threadname);
2353 vchiq_loud_error_footer();
2354 return PTR_ERR(state->slot_handler_thread);
2356 set_user_nice(state->slot_handler_thread, -19);
2358 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2359 state->recycle_thread = kthread_create(&recycle_func,
2362 if (IS_ERR(state->recycle_thread)) {
2363 vchiq_loud_error_header();
2364 vchiq_loud_error("couldn't create thread %s", threadname);
2365 vchiq_loud_error_footer();
2366 ret = PTR_ERR(state->recycle_thread);
2367 goto fail_free_handler_thread;
2369 set_user_nice(state->recycle_thread, -19);
2371 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2372 state->sync_thread = kthread_create(&sync_func,
2375 if (IS_ERR(state->sync_thread)) {
2376 vchiq_loud_error_header();
2377 vchiq_loud_error("couldn't create thread %s", threadname);
2378 vchiq_loud_error_footer();
2379 ret = PTR_ERR(state->sync_thread);
2380 goto fail_free_recycle_thread;
2382 set_user_nice(state->sync_thread, -20);
2384 wake_up_process(state->slot_handler_thread);
2385 wake_up_process(state->recycle_thread);
2386 wake_up_process(state->sync_thread);
2388 vchiq_states[0] = state;
2390 /* Indicate readiness to the other side */
2391 local->initialised = 1;
2395 fail_free_recycle_thread:
2396 kthread_stop(state->recycle_thread);
2397 fail_free_handler_thread:
2398 kthread_stop(state->slot_handler_thread);
2403 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2405 struct vchiq_service *service = find_service_by_handle(handle);
2408 while (service->msg_queue_write == service->msg_queue_read +
2410 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2411 flush_signals(current);
2414 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2415 service->msg_queue_write++;
2416 service->msg_queue[pos] = header;
2418 complete(&service->msg_queue_push);
2420 EXPORT_SYMBOL(vchiq_msg_queue_push);
2422 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2424 struct vchiq_service *service = find_service_by_handle(handle);
2425 struct vchiq_header *header;
2428 if (service->msg_queue_write == service->msg_queue_read)
2431 while (service->msg_queue_write == service->msg_queue_read) {
2432 if (wait_for_completion_interruptible(&service->msg_queue_push))
2433 flush_signals(current);
2436 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2437 service->msg_queue_read++;
2438 header = service->msg_queue[pos];
2440 complete(&service->msg_queue_pop);
2444 EXPORT_SYMBOL(vchiq_msg_hold);
2446 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2448 if (!params->callback || !params->fourcc) {
2449 vchiq_loud_error("Can't add service, invalid params\n");
2456 /* Called from application thread when a client or server service is created. */
2457 struct vchiq_service *
2458 vchiq_add_service_internal(struct vchiq_state *state,
2459 const struct vchiq_service_params_kernel *params,
2460 int srvstate, struct vchiq_instance *instance,
2461 vchiq_userdata_term userdata_term)
2463 struct vchiq_service *service;
2464 struct vchiq_service __rcu **pservice = NULL;
2465 struct vchiq_service_quota *quota;
2469 ret = vchiq_validate_params(params);
2473 service = kmalloc(sizeof(*service), GFP_KERNEL);
2477 service->base.fourcc = params->fourcc;
2478 service->base.callback = params->callback;
2479 service->base.userdata = params->userdata;
2480 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2481 kref_init(&service->ref_count);
2482 service->srvstate = VCHIQ_SRVSTATE_FREE;
2483 service->userdata_term = userdata_term;
2484 service->localport = VCHIQ_PORT_FREE;
2485 service->remoteport = VCHIQ_PORT_FREE;
2487 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2488 VCHIQ_FOURCC_INVALID : params->fourcc;
2489 service->client_id = 0;
2490 service->auto_close = 1;
2492 service->closing = 0;
2494 atomic_set(&service->poll_flags, 0);
2495 service->version = params->version;
2496 service->version_min = params->version_min;
2497 service->state = state;
2498 service->instance = instance;
2499 service->service_use_count = 0;
2500 service->msg_queue_read = 0;
2501 service->msg_queue_write = 0;
2502 init_bulk_queue(&service->bulk_tx);
2503 init_bulk_queue(&service->bulk_rx);
2504 init_completion(&service->remove_event);
2505 init_completion(&service->bulk_remove_event);
2506 init_completion(&service->msg_queue_pop);
2507 init_completion(&service->msg_queue_push);
2508 mutex_init(&service->bulk_mutex);
2509 memset(&service->stats, 0, sizeof(service->stats));
2510 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2513 * Although it is perfectly possible to use a spinlock
2514 * to protect the creation of services, it is overkill as it
2515 * disables interrupts while the array is searched.
2516 * The only danger is of another thread trying to create a
2517 * service - service deletion is safe.
2518 * Therefore it is preferable to use state->mutex which,
2519 * although slower to claim, doesn't block interrupts while
2523 mutex_lock(&state->mutex);
2525 /* Prepare to use a previously unused service */
2526 if (state->unused_service < VCHIQ_MAX_SERVICES)
2527 pservice = &state->services[state->unused_service];
2529 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2530 for (i = 0; i < state->unused_service; i++) {
2531 if (!rcu_access_pointer(state->services[i])) {
2532 pservice = &state->services[i];
2538 for (i = (state->unused_service - 1); i >= 0; i--) {
2539 struct vchiq_service *srv;
2541 srv = rcu_dereference(state->services[i]);
2543 pservice = &state->services[i];
2544 } else if ((srv->public_fourcc == params->fourcc) &&
2545 ((srv->instance != instance) ||
2546 (srv->base.callback != params->callback))) {
2548 * There is another server using this
2549 * fourcc which doesn't match.
2559 service->localport = (pservice - state->services);
2561 handle_seq = VCHIQ_MAX_STATES *
2563 service->handle = handle_seq |
2564 (state->id * VCHIQ_MAX_SERVICES) |
2566 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2567 rcu_assign_pointer(*pservice, service);
2568 if (pservice == &state->services[state->unused_service])
2569 state->unused_service++;
2572 mutex_unlock(&state->mutex);
2579 quota = &state->service_quotas[service->localport];
2580 quota->slot_quota = state->default_slot_quota;
2581 quota->message_quota = state->default_message_quota;
2582 if (quota->slot_use_count == 0)
2583 quota->previous_tx_index =
2584 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2587 /* Bring this service online */
2588 vchiq_set_service_state(service, srvstate);
2590 vchiq_log_info(vchiq_core_msg_log_level,
2591 "%s Service %c%c%c%c SrcPort:%d",
2592 (srvstate == VCHIQ_SRVSTATE_OPENING)
2594 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2595 service->localport);
2597 /* Don't unlock the service - leave it with a ref_count of 1. */
2603 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2605 struct vchiq_open_payload payload = {
2606 service->base.fourcc,
2609 service->version_min
2611 enum vchiq_status status = VCHIQ_SUCCESS;
2613 service->client_id = client_id;
2614 vchiq_use_service_internal(service);
2615 status = queue_message(service->state,
2617 VCHIQ_MAKE_MSG(VCHIQ_MSG_OPEN,
2620 memcpy_copy_callback,
2623 QMFLAGS_IS_BLOCKING);
2625 if (status != VCHIQ_SUCCESS)
2628 /* Wait for the ACK/NAK */
2629 if (wait_for_completion_interruptible(&service->remove_event)) {
2630 status = VCHIQ_RETRY;
2631 vchiq_release_service_internal(service);
2632 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2633 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2634 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2635 vchiq_log_error(vchiq_core_log_level,
2636 "%d: osi - srvstate = %s (ref %u)",
2638 srvstate_names[service->srvstate],
2639 kref_read(&service->ref_count));
2640 status = VCHIQ_ERROR;
2641 VCHIQ_SERVICE_STATS_INC(service, error_count);
2642 vchiq_release_service_internal(service);
2649 release_service_messages(struct vchiq_service *service)
2651 struct vchiq_state *state = service->state;
2652 int slot_last = state->remote->slot_last;
2655 /* Release any claimed messages aimed at this service */
2657 if (service->sync) {
2658 struct vchiq_header *header =
2659 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2660 state->remote->slot_sync);
2661 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2662 release_message_sync(state, header);
2667 for (i = state->remote->slot_first; i <= slot_last; i++) {
2668 struct vchiq_slot_info *slot_info =
2669 SLOT_INFO_FROM_INDEX(state, i);
2670 unsigned int pos, end;
2673 if (slot_info->release_count == slot_info->use_count)
2676 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2677 end = VCHIQ_SLOT_SIZE;
2678 if (data == state->rx_data)
2680 * This buffer is still being read from - stop
2681 * at the current read position
2683 end = state->rx_pos & VCHIQ_SLOT_MASK;
2688 struct vchiq_header *header =
2689 (struct vchiq_header *)(data + pos);
2690 int msgid = header->msgid;
2691 int port = VCHIQ_MSG_DSTPORT(msgid);
2693 if ((port == service->localport) &&
2694 (msgid & VCHIQ_MSGID_CLAIMED)) {
2695 vchiq_log_info(vchiq_core_log_level,
2696 " fsi - hdr %pK", header);
2697 release_slot(state, slot_info, header,
2700 pos += calc_stride(header->size);
2701 if (pos > VCHIQ_SLOT_SIZE) {
2702 vchiq_log_error(vchiq_core_log_level,
2703 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2705 header->msgid, header->size);
2706 WARN(1, "invalid slot position\n");
2713 do_abort_bulks(struct vchiq_service *service)
2715 enum vchiq_status status;
2717 /* Abort any outstanding bulk transfers */
2718 if (mutex_lock_killable(&service->bulk_mutex))
2720 abort_outstanding_bulks(service, &service->bulk_tx);
2721 abort_outstanding_bulks(service, &service->bulk_rx);
2722 mutex_unlock(&service->bulk_mutex);
2724 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2725 if (status != VCHIQ_SUCCESS)
2728 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2729 return (status == VCHIQ_SUCCESS);
2732 static enum vchiq_status
2733 close_service_complete(struct vchiq_service *service, int failstate)
2735 enum vchiq_status status;
2736 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2739 switch (service->srvstate) {
2740 case VCHIQ_SRVSTATE_OPEN:
2741 case VCHIQ_SRVSTATE_CLOSESENT:
2742 case VCHIQ_SRVSTATE_CLOSERECVD:
2744 if (service->auto_close) {
2745 service->client_id = 0;
2746 service->remoteport = VCHIQ_PORT_FREE;
2747 newstate = VCHIQ_SRVSTATE_LISTENING;
2749 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2752 newstate = VCHIQ_SRVSTATE_CLOSED;
2754 vchiq_set_service_state(service, newstate);
2756 case VCHIQ_SRVSTATE_LISTENING:
2759 vchiq_log_error(vchiq_core_log_level,
2760 "%s(%x) called in state %s", __func__,
2761 service->handle, srvstate_names[service->srvstate]);
2762 WARN(1, "%s in unexpected state\n", __func__);
2766 status = make_service_callback(service,
2767 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2769 if (status != VCHIQ_RETRY) {
2770 int uc = service->service_use_count;
2772 /* Complete the close process */
2773 for (i = 0; i < uc; i++)
2775 * cater for cases where close is forced and the
2776 * client may not close all it's handles
2778 vchiq_release_service_internal(service);
2780 service->client_id = 0;
2781 service->remoteport = VCHIQ_PORT_FREE;
2783 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2784 vchiq_free_service_internal(service);
2785 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2787 service->closing = 0;
2789 complete(&service->remove_event);
2792 vchiq_set_service_state(service, failstate);
2798 /* Called by the slot handler */
2800 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2802 struct vchiq_state *state = service->state;
2803 enum vchiq_status status = VCHIQ_SUCCESS;
2804 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2806 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2807 service->state->id, service->localport, close_recvd,
2808 srvstate_names[service->srvstate]);
2810 switch (service->srvstate) {
2811 case VCHIQ_SRVSTATE_CLOSED:
2812 case VCHIQ_SRVSTATE_HIDDEN:
2813 case VCHIQ_SRVSTATE_LISTENING:
2814 case VCHIQ_SRVSTATE_CLOSEWAIT:
2816 vchiq_log_error(vchiq_core_log_level,
2817 "%s(1) called in state %s",
2818 __func__, srvstate_names[service->srvstate]);
2819 } else if (is_server) {
2820 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2821 status = VCHIQ_ERROR;
2823 service->client_id = 0;
2824 service->remoteport = VCHIQ_PORT_FREE;
2825 if (service->srvstate ==
2826 VCHIQ_SRVSTATE_CLOSEWAIT)
2827 vchiq_set_service_state(service,
2828 VCHIQ_SRVSTATE_LISTENING);
2830 complete(&service->remove_event);
2832 vchiq_free_service_internal(service);
2835 case VCHIQ_SRVSTATE_OPENING:
2837 /* The open was rejected - tell the user */
2838 vchiq_set_service_state(service,
2839 VCHIQ_SRVSTATE_CLOSEWAIT);
2840 complete(&service->remove_event);
2842 /* Shutdown mid-open - let the other side know */
2843 status = queue_message(state, service,
2847 VCHIQ_MSG_DSTPORT(service->remoteport)),
2852 case VCHIQ_SRVSTATE_OPENSYNC:
2853 mutex_lock(&state->sync_mutex);
2855 case VCHIQ_SRVSTATE_OPEN:
2857 if (!do_abort_bulks(service))
2858 status = VCHIQ_RETRY;
2861 release_service_messages(service);
2863 if (status == VCHIQ_SUCCESS)
2864 status = queue_message(state, service,
2868 VCHIQ_MSG_DSTPORT(service->remoteport)),
2869 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2871 if (status != VCHIQ_SUCCESS) {
2872 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2873 mutex_unlock(&state->sync_mutex);
2878 /* Change the state while the mutex is still held */
2879 vchiq_set_service_state(service,
2880 VCHIQ_SRVSTATE_CLOSESENT);
2881 mutex_unlock(&state->slot_mutex);
2883 mutex_unlock(&state->sync_mutex);
2887 /* Change the state while the mutex is still held */
2888 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2889 mutex_unlock(&state->slot_mutex);
2891 mutex_unlock(&state->sync_mutex);
2893 status = close_service_complete(service,
2894 VCHIQ_SRVSTATE_CLOSERECVD);
2897 case VCHIQ_SRVSTATE_CLOSESENT:
2899 /* This happens when a process is killed mid-close */
2902 if (!do_abort_bulks(service)) {
2903 status = VCHIQ_RETRY;
2907 if (status == VCHIQ_SUCCESS)
2908 status = close_service_complete(service,
2909 VCHIQ_SRVSTATE_CLOSERECVD);
2912 case VCHIQ_SRVSTATE_CLOSERECVD:
2913 if (!close_recvd && is_server)
2914 /* Force into LISTENING mode */
2915 vchiq_set_service_state(service,
2916 VCHIQ_SRVSTATE_LISTENING);
2917 status = close_service_complete(service,
2918 VCHIQ_SRVSTATE_CLOSERECVD);
2922 vchiq_log_error(vchiq_core_log_level,
2923 "%s(%d) called in state %s", __func__,
2924 close_recvd, srvstate_names[service->srvstate]);
2931 /* Called from the application process upon process death */
2933 vchiq_terminate_service_internal(struct vchiq_service *service)
2935 struct vchiq_state *state = service->state;
2937 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2938 state->id, service->localport, service->remoteport);
2940 mark_service_closing(service);
2942 /* Mark the service for removal by the slot handler */
2943 request_poll(state, service, VCHIQ_POLL_REMOVE);
2946 /* Called from the slot handler */
2948 vchiq_free_service_internal(struct vchiq_service *service)
2950 struct vchiq_state *state = service->state;
2952 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2953 state->id, service->localport);
2955 switch (service->srvstate) {
2956 case VCHIQ_SRVSTATE_OPENING:
2957 case VCHIQ_SRVSTATE_CLOSED:
2958 case VCHIQ_SRVSTATE_HIDDEN:
2959 case VCHIQ_SRVSTATE_LISTENING:
2960 case VCHIQ_SRVSTATE_CLOSEWAIT:
2963 vchiq_log_error(vchiq_core_log_level,
2964 "%d: fsi - (%d) in state %s",
2965 state->id, service->localport,
2966 srvstate_names[service->srvstate]);
2970 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2972 complete(&service->remove_event);
2974 /* Release the initial lock */
2975 unlock_service(service);
2979 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2981 struct vchiq_service *service;
2984 /* Find all services registered to this client and enable them. */
2986 while ((service = next_service_by_instance(state, instance,
2988 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2989 vchiq_set_service_state(service,
2990 VCHIQ_SRVSTATE_LISTENING);
2991 unlock_service(service);
2994 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
2995 if (queue_message(state, NULL,
2996 VCHIQ_MAKE_MSG(VCHIQ_MSG_CONNECT, 0, 0), NULL, NULL,
2997 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
3000 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
3003 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
3004 if (wait_for_completion_interruptible(&state->connect))
3007 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
3008 complete(&state->connect);
3011 return VCHIQ_SUCCESS;
3015 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
3017 struct vchiq_service *service;
3020 /* Find all services registered to this client and remove them. */
3022 while ((service = next_service_by_instance(state, instance,
3024 (void)vchiq_remove_service(service->handle);
3025 unlock_service(service);
3030 vchiq_close_service(unsigned int handle)
3032 /* Unregister the service */
3033 struct vchiq_service *service = find_service_by_handle(handle);
3034 enum vchiq_status status = VCHIQ_SUCCESS;
3039 vchiq_log_info(vchiq_core_log_level,
3040 "%d: close_service:%d",
3041 service->state->id, service->localport);
3043 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3044 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3045 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
3046 unlock_service(service);
3050 mark_service_closing(service);
3052 if (current == service->state->slot_handler_thread) {
3053 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3054 WARN_ON(status == VCHIQ_RETRY);
3056 /* Mark the service for termination by the slot handler */
3057 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
3061 if (wait_for_completion_interruptible(&service->remove_event)) {
3062 status = VCHIQ_RETRY;
3066 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3067 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3068 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3071 vchiq_log_warning(vchiq_core_log_level,
3072 "%d: close_service:%d - waiting in state %s",
3073 service->state->id, service->localport,
3074 srvstate_names[service->srvstate]);
3077 if ((status == VCHIQ_SUCCESS) &&
3078 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3079 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3080 status = VCHIQ_ERROR;
3082 unlock_service(service);
3086 EXPORT_SYMBOL(vchiq_close_service);
3089 vchiq_remove_service(unsigned int handle)
3091 /* Unregister the service */
3092 struct vchiq_service *service = find_service_by_handle(handle);
3093 enum vchiq_status status = VCHIQ_SUCCESS;
3098 vchiq_log_info(vchiq_core_log_level,
3099 "%d: remove_service:%d",
3100 service->state->id, service->localport);
3102 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3103 unlock_service(service);
3107 mark_service_closing(service);
3109 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3110 (current == service->state->slot_handler_thread)) {
3112 * Make it look like a client, because it must be removed and
3113 * not left in the LISTENING state.
3115 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3117 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3118 WARN_ON(status == VCHIQ_RETRY);
3120 /* Mark the service for removal by the slot handler */
3121 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3124 if (wait_for_completion_interruptible(&service->remove_event)) {
3125 status = VCHIQ_RETRY;
3129 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3130 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3133 vchiq_log_warning(vchiq_core_log_level,
3134 "%d: remove_service:%d - waiting in state %s",
3135 service->state->id, service->localport,
3136 srvstate_names[service->srvstate]);
3139 if ((status == VCHIQ_SUCCESS) &&
3140 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3141 status = VCHIQ_ERROR;
3143 unlock_service(service);
3149 * This function may be called by kernel threads or user threads.
3150 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3151 * received and the call should be retried after being returned to user
3153 * When called in blocking mode, the userdata field points to a bulk_waiter
3156 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3157 void *offset, void __user *uoffset,
3158 int size, void *userdata,
3159 enum vchiq_bulk_mode mode,
3160 enum vchiq_bulk_dir dir)
3162 struct vchiq_service *service = find_service_by_handle(handle);
3163 struct vchiq_bulk_queue *queue;
3164 struct vchiq_bulk *bulk;
3165 struct vchiq_state *state;
3166 struct bulk_waiter *bulk_waiter = NULL;
3167 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3168 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3169 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3170 enum vchiq_status status = VCHIQ_ERROR;
3176 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3179 if (!offset && !uoffset)
3182 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3186 case VCHIQ_BULK_MODE_NOCALLBACK:
3187 case VCHIQ_BULK_MODE_CALLBACK:
3189 case VCHIQ_BULK_MODE_BLOCKING:
3190 bulk_waiter = userdata;
3191 init_completion(&bulk_waiter->event);
3192 bulk_waiter->actual = 0;
3193 bulk_waiter->bulk = NULL;
3195 case VCHIQ_BULK_MODE_WAITING:
3196 bulk_waiter = userdata;
3197 bulk = bulk_waiter->bulk;
3203 state = service->state;
3205 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3206 &service->bulk_tx : &service->bulk_rx;
3208 if (mutex_lock_killable(&service->bulk_mutex)) {
3209 status = VCHIQ_RETRY;
3213 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3214 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3216 mutex_unlock(&service->bulk_mutex);
3217 if (wait_for_completion_interruptible(
3218 &service->bulk_remove_event)) {
3219 status = VCHIQ_RETRY;
3222 if (mutex_lock_killable(&service->bulk_mutex)) {
3223 status = VCHIQ_RETRY;
3226 } while (queue->local_insert == queue->remove +
3227 VCHIQ_NUM_SERVICE_BULKS);
3230 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3234 bulk->userdata = userdata;
3236 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3238 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3239 goto unlock_error_exit;
3243 vchiq_log_info(vchiq_core_log_level,
3244 "%d: bt (%d->%d) %cx %x@%pad %pK",
3245 state->id, service->localport, service->remoteport, dir_char,
3246 size, &bulk->data, userdata);
3249 * The slot mutex must be held when the service is being closed, so
3250 * claim it here to ensure that isn't happening
3252 if (mutex_lock_killable(&state->slot_mutex)) {
3253 status = VCHIQ_RETRY;
3254 goto cancel_bulk_error_exit;
3257 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3258 goto unlock_both_error_exit;
3260 payload[0] = lower_32_bits(bulk->data);
3261 payload[1] = bulk->size;
3262 status = queue_message(state,
3264 VCHIQ_MAKE_MSG(dir_msgtype,
3266 service->remoteport),
3267 memcpy_copy_callback,
3270 QMFLAGS_IS_BLOCKING |
3271 QMFLAGS_NO_MUTEX_LOCK |
3272 QMFLAGS_NO_MUTEX_UNLOCK);
3273 if (status != VCHIQ_SUCCESS)
3274 goto unlock_both_error_exit;
3276 queue->local_insert++;
3278 mutex_unlock(&state->slot_mutex);
3279 mutex_unlock(&service->bulk_mutex);
3281 vchiq_log_trace(vchiq_core_log_level,
3282 "%d: bt:%d %cx li=%x ri=%x p=%x",
3284 service->localport, dir_char,
3285 queue->local_insert, queue->remote_insert, queue->process);
3288 unlock_service(service);
3290 status = VCHIQ_SUCCESS;
3293 bulk_waiter->bulk = bulk;
3294 if (wait_for_completion_interruptible(&bulk_waiter->event))
3295 status = VCHIQ_RETRY;
3296 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3297 status = VCHIQ_ERROR;
3302 unlock_both_error_exit:
3303 mutex_unlock(&state->slot_mutex);
3304 cancel_bulk_error_exit:
3305 vchiq_complete_bulk(bulk);
3307 mutex_unlock(&service->bulk_mutex);
3311 unlock_service(service);
3316 vchiq_queue_message(unsigned int handle,
3317 ssize_t (*copy_callback)(void *context, void *dest,
3318 size_t offset, size_t maxsize),
3322 struct vchiq_service *service = find_service_by_handle(handle);
3323 enum vchiq_status status = VCHIQ_ERROR;
3328 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3332 VCHIQ_SERVICE_STATS_INC(service, error_count);
3337 if (size > VCHIQ_MAX_MSG_SIZE) {
3338 VCHIQ_SERVICE_STATS_INC(service, error_count);
3342 switch (service->srvstate) {
3343 case VCHIQ_SRVSTATE_OPEN:
3344 status = queue_message(service->state, service,
3345 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3347 service->remoteport),
3348 copy_callback, context, size, 1);
3350 case VCHIQ_SRVSTATE_OPENSYNC:
3351 status = queue_message_sync(service->state, service,
3352 VCHIQ_MAKE_MSG(VCHIQ_MSG_DATA,
3354 service->remoteport),
3355 copy_callback, context, size, 1);
3358 status = VCHIQ_ERROR;
3364 unlock_service(service);
3369 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3371 enum vchiq_status status;
3374 status = vchiq_queue_message(handle, memcpy_copy_callback,
3378 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3379 * implement a retry mechanism since this function is supposed
3380 * to block until queued
3382 if (status != VCHIQ_RETRY)
3390 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3393 vchiq_release_message(unsigned int handle,
3394 struct vchiq_header *header)
3396 struct vchiq_service *service = find_service_by_handle(handle);
3397 struct vchiq_shared_state *remote;
3398 struct vchiq_state *state;
3404 state = service->state;
3405 remote = state->remote;
3407 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3409 if ((slot_index >= remote->slot_first) &&
3410 (slot_index <= remote->slot_last)) {
3411 int msgid = header->msgid;
3413 if (msgid & VCHIQ_MSGID_CLAIMED) {
3414 struct vchiq_slot_info *slot_info =
3415 SLOT_INFO_FROM_INDEX(state, slot_index);
3417 release_slot(state, slot_info, header, service);
3419 } else if (slot_index == remote->slot_sync) {
3420 release_message_sync(state, header);
3423 unlock_service(service);
3425 EXPORT_SYMBOL(vchiq_release_message);
3428 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3430 header->msgid = VCHIQ_MSGID_PADDING;
3431 remote_event_signal(&state->remote->sync_release);
3435 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3437 enum vchiq_status status = VCHIQ_ERROR;
3438 struct vchiq_service *service = find_service_by_handle(handle);
3443 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3449 *peer_version = service->peer_version;
3450 status = VCHIQ_SUCCESS;
3454 unlock_service(service);
3457 EXPORT_SYMBOL(vchiq_get_peer_version);
3459 void vchiq_get_config(struct vchiq_config *config)
3461 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3462 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3463 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3464 config->max_services = VCHIQ_MAX_SERVICES;
3465 config->version = VCHIQ_VERSION;
3466 config->version_min = VCHIQ_VERSION_MIN;
3470 vchiq_set_service_option(unsigned int handle,
3471 enum vchiq_service_option option, int value)
3473 struct vchiq_service *service = find_service_by_handle(handle);
3474 struct vchiq_service_quota *quota;
3481 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3482 service->auto_close = value;
3486 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3487 quota = &service->state->service_quotas[service->localport];
3489 value = service->state->default_slot_quota;
3490 if ((value >= quota->slot_use_count) &&
3491 (value < (unsigned short)~0)) {
3492 quota->slot_quota = value;
3493 if ((value >= quota->slot_use_count) &&
3494 (quota->message_quota >= quota->message_use_count))
3496 * Signal the service that it may have
3497 * dropped below its quota
3499 complete("a->quota_event);
3504 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3505 quota = &service->state->service_quotas[service->localport];
3507 value = service->state->default_message_quota;
3508 if ((value >= quota->message_use_count) &&
3509 (value < (unsigned short)~0)) {
3510 quota->message_quota = value;
3511 if ((value >= quota->message_use_count) &&
3512 (quota->slot_quota >= quota->slot_use_count))
3514 * Signal the service that it may have
3515 * dropped below its quota
3517 complete("a->quota_event);
3522 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3523 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3524 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3525 service->sync = value;
3530 case VCHIQ_SERVICE_OPTION_TRACE:
3531 service->trace = value;
3538 unlock_service(service);
3544 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3545 struct vchiq_shared_state *shared, const char *label)
3547 static const char *const debug_names[] = {
3549 "SLOT_HANDLER_COUNT",
3550 "SLOT_HANDLER_LINE",
3554 "AWAIT_COMPLETION_LINE",
3555 "DEQUEUE_MESSAGE_LINE",
3556 "SERVICE_CALLBACK_LINE",
3557 "MSG_QUEUE_FULL_COUNT",
3558 "COMPLETION_QUEUE_FULL_COUNT"
3565 len = scnprintf(buf, sizeof(buf),
3566 " %s: slots %d-%d tx_pos=%x recycle=%x",
3567 label, shared->slot_first, shared->slot_last,
3568 shared->tx_pos, shared->slot_queue_recycle);
3569 err = vchiq_dump(dump_context, buf, len + 1);
3573 len = scnprintf(buf, sizeof(buf),
3575 err = vchiq_dump(dump_context, buf, len + 1);
3579 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3580 struct vchiq_slot_info slot_info =
3581 *SLOT_INFO_FROM_INDEX(state, i);
3582 if (slot_info.use_count != slot_info.release_count) {
3583 len = scnprintf(buf, sizeof(buf),
3584 " %d: %d/%d", i, slot_info.use_count,
3585 slot_info.release_count);
3586 err = vchiq_dump(dump_context, buf, len + 1);
3592 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3593 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3594 debug_names[i], shared->debug[i], shared->debug[i]);
3595 err = vchiq_dump(dump_context, buf, len + 1);
3602 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3609 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3610 conn_state_names[state->conn_state]);
3611 err = vchiq_dump(dump_context, buf, len + 1);
3615 len = scnprintf(buf, sizeof(buf),
3616 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3617 state->local->tx_pos,
3618 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3620 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3621 err = vchiq_dump(dump_context, buf, len + 1);
3625 len = scnprintf(buf, sizeof(buf),
3626 " Version: %d (min %d)",
3627 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3628 err = vchiq_dump(dump_context, buf, len + 1);
3632 if (VCHIQ_ENABLE_STATS) {
3633 len = scnprintf(buf, sizeof(buf),
3634 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3635 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3636 state->stats.error_count);
3637 err = vchiq_dump(dump_context, buf, len + 1);
3642 len = scnprintf(buf, sizeof(buf),
3643 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3644 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3645 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3646 state->data_quota - state->data_use_count,
3647 state->local->slot_queue_recycle - state->slot_queue_available,
3648 state->stats.slot_stalls, state->stats.data_stalls);
3649 err = vchiq_dump(dump_context, buf, len + 1);
3653 err = vchiq_dump_platform_state(dump_context);
3657 err = vchiq_dump_shared_state(dump_context,
3663 err = vchiq_dump_shared_state(dump_context,
3670 err = vchiq_dump_platform_instances(dump_context);
3674 for (i = 0; i < state->unused_service; i++) {
3675 struct vchiq_service *service = find_service_by_port(state, i);
3678 err = vchiq_dump_service_state(dump_context, service);
3679 unlock_service(service);
3687 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3692 unsigned int ref_count;
3694 /*Don't include the lock just taken*/
3695 ref_count = kref_read(&service->ref_count) - 1;
3696 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3697 service->localport, srvstate_names[service->srvstate],
3700 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3701 char remoteport[30];
3702 struct vchiq_service_quota *quota =
3703 &service->state->service_quotas[service->localport];
3704 int fourcc = service->base.fourcc;
3705 int tx_pending, rx_pending;
3707 if (service->remoteport != VCHIQ_PORT_FREE) {
3708 int len2 = scnprintf(remoteport, sizeof(remoteport),
3709 "%u", service->remoteport);
3711 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3712 scnprintf(remoteport + len2,
3713 sizeof(remoteport) - len2,
3714 " (client %x)", service->client_id);
3716 strcpy(remoteport, "n/a");
3719 len += scnprintf(buf + len, sizeof(buf) - len,
3720 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3721 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3723 quota->message_use_count,
3724 quota->message_quota,
3725 quota->slot_use_count,
3728 err = vchiq_dump(dump_context, buf, len + 1);
3732 tx_pending = service->bulk_tx.local_insert -
3733 service->bulk_tx.remote_insert;
3735 rx_pending = service->bulk_rx.local_insert -
3736 service->bulk_rx.remote_insert;
3738 len = scnprintf(buf, sizeof(buf),
3739 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3741 tx_pending ? service->bulk_tx.bulks[
3742 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3744 rx_pending ? service->bulk_rx.bulks[
3745 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3747 if (VCHIQ_ENABLE_STATS) {
3748 err = vchiq_dump(dump_context, buf, len + 1);
3752 len = scnprintf(buf, sizeof(buf),
3753 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3754 service->stats.ctrl_tx_count,
3755 service->stats.ctrl_tx_bytes,
3756 service->stats.ctrl_rx_count,
3757 service->stats.ctrl_rx_bytes);
3758 err = vchiq_dump(dump_context, buf, len + 1);
3762 len = scnprintf(buf, sizeof(buf),
3763 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3764 service->stats.bulk_tx_count,
3765 service->stats.bulk_tx_bytes,
3766 service->stats.bulk_rx_count,
3767 service->stats.bulk_rx_bytes);
3768 err = vchiq_dump(dump_context, buf, len + 1);
3772 len = scnprintf(buf, sizeof(buf),
3773 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3774 service->stats.quota_stalls,
3775 service->stats.slot_stalls,
3776 service->stats.bulk_stalls,
3777 service->stats.bulk_aborted_count,
3778 service->stats.error_count);
3782 err = vchiq_dump(dump_context, buf, len + 1);
3786 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3787 err = vchiq_dump_platform_service_state(dump_context, service);
3792 vchiq_loud_error_header(void)
3794 vchiq_log_error(vchiq_core_log_level,
3795 "============================================================================");
3796 vchiq_log_error(vchiq_core_log_level,
3797 "============================================================================");
3798 vchiq_log_error(vchiq_core_log_level, "=====");
3802 vchiq_loud_error_footer(void)
3804 vchiq_log_error(vchiq_core_log_level, "=====");
3805 vchiq_log_error(vchiq_core_log_level,
3806 "============================================================================");
3807 vchiq_log_error(vchiq_core_log_level,
3808 "============================================================================");
3811 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3813 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3816 return queue_message(state, NULL,
3817 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE, 0, 0),
3821 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3823 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3826 return queue_message(state, NULL,
3827 VCHIQ_MAKE_MSG(VCHIQ_MSG_REMOTE_USE_ACTIVE, 0, 0),
3831 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3834 const u8 *mem = void_mem;
3839 while (num_bytes > 0) {
3842 for (offset = 0; offset < 16; offset++) {
3843 if (offset < num_bytes)
3844 s += scnprintf(s, 4, "%02x ", mem[offset]);
3846 s += scnprintf(s, 4, " ");
3849 for (offset = 0; offset < 16; offset++) {
3850 if (offset < num_bytes) {
3851 u8 ch = mem[offset];
3853 if ((ch < ' ') || (ch > '~'))
3860 if (label && (*label != '\0'))
3861 vchiq_log_trace(VCHIQ_LOG_TRACE,
3862 "%s: %08x: %s", label, addr, line_buf);
3864 vchiq_log_trace(VCHIQ_LOG_TRACE,
3865 "%08x: %s", addr, line_buf);