1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/types.h>
5 #include <linux/completion.h>
6 #include <linux/mutex.h>
7 #include <linux/bitops.h>
8 #include <linux/kthread.h>
9 #include <linux/wait.h>
10 #include <linux/delay.h>
11 #include <linux/slab.h>
12 #include <linux/kref.h>
13 #include <linux/rcupdate.h>
14 #include <linux/sched/signal.h>
16 #include "vchiq_core.h"
18 #define VCHIQ_SLOT_HANDLER_STACK 8192
20 #define VCHIQ_MSG_PADDING 0 /* - */
21 #define VCHIQ_MSG_CONNECT 1 /* - */
22 #define VCHIQ_MSG_OPEN 2 /* + (srcport, -), fourcc, client_id */
23 #define VCHIQ_MSG_OPENACK 3 /* + (srcport, dstport) */
24 #define VCHIQ_MSG_CLOSE 4 /* + (srcport, dstport) */
25 #define VCHIQ_MSG_DATA 5 /* + (srcport, dstport) */
26 #define VCHIQ_MSG_BULK_RX 6 /* + (srcport, dstport), data, size */
27 #define VCHIQ_MSG_BULK_TX 7 /* + (srcport, dstport), data, size */
28 #define VCHIQ_MSG_BULK_RX_DONE 8 /* + (srcport, dstport), actual */
29 #define VCHIQ_MSG_BULK_TX_DONE 9 /* + (srcport, dstport), actual */
30 #define VCHIQ_MSG_PAUSE 10 /* - */
31 #define VCHIQ_MSG_RESUME 11 /* - */
32 #define VCHIQ_MSG_REMOTE_USE 12 /* - */
33 #define VCHIQ_MSG_REMOTE_RELEASE 13 /* - */
34 #define VCHIQ_MSG_REMOTE_USE_ACTIVE 14 /* - */
38 #define VCHIQ_PORT_MAX (VCHIQ_MAX_SERVICES - 1)
39 #define VCHIQ_PORT_FREE 0x1000
40 #define VCHIQ_PORT_IS_VALID(port) ((port) < VCHIQ_PORT_FREE)
41 #define VCHIQ_MAKE_MSG(type, srcport, dstport) \
42 (((type) << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
43 #define VCHIQ_MSG_TYPE(msgid) ((unsigned int)(msgid) >> TYPE_SHIFT)
44 #define VCHIQ_MSG_SRCPORT(msgid) \
45 (unsigned short)(((unsigned int)(msgid) >> 12) & 0xfff)
46 #define VCHIQ_MSG_DSTPORT(msgid) \
47 ((unsigned short)(msgid) & 0xfff)
49 #define MAKE_CONNECT (VCHIQ_MSG_CONNECT << TYPE_SHIFT)
50 #define MAKE_OPEN(srcport) \
51 ((VCHIQ_MSG_OPEN << TYPE_SHIFT) | ((srcport) << 12))
52 #define MAKE_OPENACK(srcport, dstport) \
53 ((VCHIQ_MSG_OPENACK << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
54 #define MAKE_CLOSE(srcport, dstport) \
55 ((VCHIQ_MSG_CLOSE << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
56 #define MAKE_DATA(srcport, dstport) \
57 ((VCHIQ_MSG_DATA << TYPE_SHIFT) | ((srcport) << 12) | ((dstport) << 0))
58 #define MAKE_PAUSE (VCHIQ_MSG_PAUSE << TYPE_SHIFT)
59 #define MAKE_RESUME (VCHIQ_MSG_RESUME << TYPE_SHIFT)
60 #define MAKE_REMOTE_USE (VCHIQ_MSG_REMOTE_USE << TYPE_SHIFT)
61 #define MAKE_REMOTE_USE_ACTIVE (VCHIQ_MSG_REMOTE_USE_ACTIVE << TYPE_SHIFT)
63 /* Ensure the fields are wide enough */
64 static_assert(VCHIQ_MSG_SRCPORT(VCHIQ_MAKE_MSG(0, 0, VCHIQ_PORT_MAX))
66 static_assert(VCHIQ_MSG_TYPE(VCHIQ_MAKE_MSG(0, VCHIQ_PORT_MAX, 0)) == 0);
67 static_assert((unsigned int)VCHIQ_PORT_MAX <
68 (unsigned int)VCHIQ_PORT_FREE);
70 #define VCHIQ_MSGID_PADDING VCHIQ_MAKE_MSG(VCHIQ_MSG_PADDING, 0, 0)
71 #define VCHIQ_MSGID_CLAIMED 0x40000000
73 #define VCHIQ_FOURCC_INVALID 0x00000000
74 #define VCHIQ_FOURCC_IS_LEGAL(fourcc) ((fourcc) != VCHIQ_FOURCC_INVALID)
76 #define VCHIQ_BULK_ACTUAL_ABORTED -1
78 #if VCHIQ_ENABLE_STATS
79 #define VCHIQ_STATS_INC(state, stat) (state->stats. stat++)
80 #define VCHIQ_SERVICE_STATS_INC(service, stat) (service->stats. stat++)
81 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) \
82 (service->stats. stat += addend)
84 #define VCHIQ_STATS_INC(state, stat) ((void)0)
85 #define VCHIQ_SERVICE_STATS_INC(service, stat) ((void)0)
86 #define VCHIQ_SERVICE_STATS_ADD(service, stat, addend) ((void)0)
89 #define HANDLE_STATE_SHIFT 12
91 #define SLOT_INFO_FROM_INDEX(state, index) (state->slot_info + (index))
92 #define SLOT_DATA_FROM_INDEX(state, index) (state->slot_data + (index))
93 #define SLOT_INDEX_FROM_DATA(state, data) \
94 (((unsigned int)((char *)data - (char *)state->slot_data)) / \
96 #define SLOT_INDEX_FROM_INFO(state, info) \
97 ((unsigned int)(info - state->slot_info))
98 #define SLOT_QUEUE_INDEX_FROM_POS(pos) \
99 ((int)((unsigned int)(pos) / VCHIQ_SLOT_SIZE))
100 #define SLOT_QUEUE_INDEX_FROM_POS_MASKED(pos) \
101 (SLOT_QUEUE_INDEX_FROM_POS(pos) & VCHIQ_SLOT_QUEUE_MASK)
103 #define BULK_INDEX(x) ((x) & (VCHIQ_NUM_SERVICE_BULKS - 1))
105 #define SRVTRACE_LEVEL(srv) \
106 (((srv) && (srv)->trace) ? VCHIQ_LOG_TRACE : vchiq_core_msg_log_level)
107 #define SRVTRACE_ENABLED(srv, lev) \
108 (((srv) && (srv)->trace) || (vchiq_core_msg_log_level >= (lev)))
110 #define NO_CLOSE_RECVD 0
111 #define CLOSE_RECVD 1
113 #define NO_RETRY_POLL 0
116 struct vchiq_open_payload {
123 struct vchiq_openack_payload {
128 QMFLAGS_IS_BLOCKING = BIT(0),
129 QMFLAGS_NO_MUTEX_LOCK = BIT(1),
130 QMFLAGS_NO_MUTEX_UNLOCK = BIT(2)
134 VCHIQ_POLL_TERMINATE,
141 /* we require this for consistency between endpoints */
142 static_assert(sizeof(struct vchiq_header) == 8);
143 static_assert(VCHIQ_VERSION >= VCHIQ_VERSION_MIN);
145 static inline void check_sizes(void)
147 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_SLOT_SIZE);
148 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS);
149 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SLOTS_PER_SIDE);
150 BUILD_BUG_ON_NOT_POWER_OF_2(sizeof(struct vchiq_header));
151 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_CURRENT_BULKS);
152 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_NUM_SERVICE_BULKS);
153 BUILD_BUG_ON_NOT_POWER_OF_2(VCHIQ_MAX_SERVICES);
156 /* Run time control of log level, based on KERN_XXX level. */
157 int vchiq_core_log_level = VCHIQ_LOG_DEFAULT;
158 int vchiq_core_msg_log_level = VCHIQ_LOG_DEFAULT;
159 int vchiq_sync_log_level = VCHIQ_LOG_DEFAULT;
161 DEFINE_SPINLOCK(bulk_waiter_spinlock);
162 static DEFINE_SPINLOCK(quota_spinlock);
164 struct vchiq_state *vchiq_states[VCHIQ_MAX_STATES];
165 static unsigned int handle_seq;
167 static const char *const srvstate_names[] = {
180 static const char *const reason_names[] = {
184 "BULK_TRANSMIT_DONE",
186 "BULK_TRANSMIT_ABORTED",
187 "BULK_RECEIVE_ABORTED"
190 static const char *const conn_state_names[] = {
203 release_message_sync(struct vchiq_state *state, struct vchiq_header *header);
205 static const char *msg_type_str(unsigned int msg_type)
208 case VCHIQ_MSG_PADDING: return "PADDING";
209 case VCHIQ_MSG_CONNECT: return "CONNECT";
210 case VCHIQ_MSG_OPEN: return "OPEN";
211 case VCHIQ_MSG_OPENACK: return "OPENACK";
212 case VCHIQ_MSG_CLOSE: return "CLOSE";
213 case VCHIQ_MSG_DATA: return "DATA";
214 case VCHIQ_MSG_BULK_RX: return "BULK_RX";
215 case VCHIQ_MSG_BULK_TX: return "BULK_TX";
216 case VCHIQ_MSG_BULK_RX_DONE: return "BULK_RX_DONE";
217 case VCHIQ_MSG_BULK_TX_DONE: return "BULK_TX_DONE";
218 case VCHIQ_MSG_PAUSE: return "PAUSE";
219 case VCHIQ_MSG_RESUME: return "RESUME";
220 case VCHIQ_MSG_REMOTE_USE: return "REMOTE_USE";
221 case VCHIQ_MSG_REMOTE_RELEASE: return "REMOTE_RELEASE";
222 case VCHIQ_MSG_REMOTE_USE_ACTIVE: return "REMOTE_USE_ACTIVE";
228 vchiq_set_service_state(struct vchiq_service *service, int newstate)
230 vchiq_log_info(vchiq_core_log_level, "%d: srv:%d %s->%s",
231 service->state->id, service->localport,
232 srvstate_names[service->srvstate],
233 srvstate_names[newstate]);
234 service->srvstate = newstate;
237 struct vchiq_service *
238 find_service_by_handle(unsigned int handle)
240 struct vchiq_service *service;
243 service = handle_to_service(handle);
244 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
245 service->handle == handle &&
246 kref_get_unless_zero(&service->ref_count)) {
247 service = rcu_pointer_handoff(service);
252 vchiq_log_info(vchiq_core_log_level,
253 "Invalid service handle 0x%x", handle);
257 struct vchiq_service *
258 find_service_by_port(struct vchiq_state *state, int localport)
261 if ((unsigned int)localport <= VCHIQ_PORT_MAX) {
262 struct vchiq_service *service;
265 service = rcu_dereference(state->services[localport]);
266 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
267 kref_get_unless_zero(&service->ref_count)) {
268 service = rcu_pointer_handoff(service);
274 vchiq_log_info(vchiq_core_log_level,
275 "Invalid port %d", localport);
279 struct vchiq_service *
280 find_service_for_instance(struct vchiq_instance *instance,
283 struct vchiq_service *service;
286 service = handle_to_service(handle);
287 if (service && service->srvstate != VCHIQ_SRVSTATE_FREE &&
288 service->handle == handle &&
289 service->instance == instance &&
290 kref_get_unless_zero(&service->ref_count)) {
291 service = rcu_pointer_handoff(service);
296 vchiq_log_info(vchiq_core_log_level,
297 "Invalid service handle 0x%x", handle);
301 struct vchiq_service *
302 find_closed_service_for_instance(struct vchiq_instance *instance,
305 struct vchiq_service *service;
308 service = handle_to_service(handle);
310 (service->srvstate == VCHIQ_SRVSTATE_FREE ||
311 service->srvstate == VCHIQ_SRVSTATE_CLOSED) &&
312 service->handle == handle &&
313 service->instance == instance &&
314 kref_get_unless_zero(&service->ref_count)) {
315 service = rcu_pointer_handoff(service);
320 vchiq_log_info(vchiq_core_log_level,
321 "Invalid service handle 0x%x", handle);
325 struct vchiq_service *
326 __next_service_by_instance(struct vchiq_state *state,
327 struct vchiq_instance *instance,
330 struct vchiq_service *service = NULL;
333 while (idx < state->unused_service) {
334 struct vchiq_service *srv;
336 srv = rcu_dereference(state->services[idx]);
338 if (srv && srv->srvstate != VCHIQ_SRVSTATE_FREE &&
339 srv->instance == instance) {
349 struct vchiq_service *
350 next_service_by_instance(struct vchiq_state *state,
351 struct vchiq_instance *instance,
354 struct vchiq_service *service;
358 service = __next_service_by_instance(state, instance, pidx);
361 if (kref_get_unless_zero(&service->ref_count)) {
362 service = rcu_pointer_handoff(service);
371 vchiq_service_get(struct vchiq_service *service)
374 WARN(1, "%s service is NULL\n", __func__);
377 kref_get(&service->ref_count);
380 static void service_release(struct kref *kref)
382 struct vchiq_service *service =
383 container_of(kref, struct vchiq_service, ref_count);
384 struct vchiq_state *state = service->state;
386 WARN_ON(service->srvstate != VCHIQ_SRVSTATE_FREE);
387 rcu_assign_pointer(state->services[service->localport], NULL);
388 if (service->userdata_term)
389 service->userdata_term(service->base.userdata);
390 kfree_rcu(service, rcu);
394 vchiq_service_put(struct vchiq_service *service)
397 WARN(1, "%s: service is NULL\n", __func__);
400 kref_put(&service->ref_count, service_release);
404 vchiq_get_client_id(unsigned int handle)
406 struct vchiq_service *service;
410 service = handle_to_service(handle);
411 id = service ? service->client_id : 0;
417 vchiq_get_service_userdata(unsigned int handle)
420 struct vchiq_service *service;
423 service = handle_to_service(handle);
424 userdata = service ? service->base.userdata : NULL;
428 EXPORT_SYMBOL(vchiq_get_service_userdata);
431 mark_service_closing_internal(struct vchiq_service *service, int sh_thread)
433 struct vchiq_state *state = service->state;
434 struct vchiq_service_quota *quota;
436 service->closing = 1;
438 /* Synchronise with other threads. */
439 mutex_lock(&state->recycle_mutex);
440 mutex_unlock(&state->recycle_mutex);
441 if (!sh_thread || (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT)) {
443 * If we're pausing then the slot_mutex is held until resume
444 * by the slot handler. Therefore don't try to acquire this
445 * mutex if we're the slot handler and in the pause sent state.
446 * We don't need to in this case anyway.
448 mutex_lock(&state->slot_mutex);
449 mutex_unlock(&state->slot_mutex);
452 /* Unblock any sending thread. */
453 quota = &state->service_quotas[service->localport];
454 complete("a->quota_event);
458 mark_service_closing(struct vchiq_service *service)
460 mark_service_closing_internal(service, 0);
463 static inline enum vchiq_status
464 make_service_callback(struct vchiq_service *service, enum vchiq_reason reason,
465 struct vchiq_header *header, void *bulk_userdata)
467 enum vchiq_status status;
469 vchiq_log_trace(vchiq_core_log_level, "%d: callback:%d (%s, %pK, %pK)",
470 service->state->id, service->localport, reason_names[reason],
471 header, bulk_userdata);
472 status = service->base.callback(reason, header, service->handle,
474 if (status == VCHIQ_ERROR) {
475 vchiq_log_warning(vchiq_core_log_level,
476 "%d: ignoring ERROR from callback to service %x",
477 service->state->id, service->handle);
478 status = VCHIQ_SUCCESS;
481 if (reason != VCHIQ_MESSAGE_AVAILABLE)
482 vchiq_release_message(service->handle, header);
488 vchiq_set_conn_state(struct vchiq_state *state, enum vchiq_connstate newstate)
490 enum vchiq_connstate oldstate = state->conn_state;
492 vchiq_log_info(vchiq_core_log_level, "%d: %s->%s", state->id,
493 conn_state_names[oldstate],
494 conn_state_names[newstate]);
495 state->conn_state = newstate;
496 vchiq_platform_conn_state_changed(state, oldstate, newstate);
500 remote_event_create(wait_queue_head_t *wq, struct remote_event *event)
504 * Don't clear the 'fired' flag because it may already have been set
507 init_waitqueue_head(wq);
511 * All the event waiting routines in VCHIQ used a custom semaphore
512 * implementation that filtered most signals. This achieved a behaviour similar
513 * to the "killable" family of functions. While cleaning up this code all the
514 * routines where switched to the "interruptible" family of functions, as the
515 * former was deemed unjustified and the use "killable" set all VCHIQ's
516 * threads in D state.
519 remote_event_wait(wait_queue_head_t *wq, struct remote_event *event)
524 if (wait_event_interruptible(*wq, event->fired)) {
537 remote_event_signal_local(wait_queue_head_t *wq, struct remote_event *event)
545 remote_event_poll(wait_queue_head_t *wq, struct remote_event *event)
547 if (event->fired && event->armed)
548 remote_event_signal_local(wq, event);
552 remote_event_pollall(struct vchiq_state *state)
554 remote_event_poll(&state->sync_trigger_event, &state->local->sync_trigger);
555 remote_event_poll(&state->sync_release_event, &state->local->sync_release);
556 remote_event_poll(&state->trigger_event, &state->local->trigger);
557 remote_event_poll(&state->recycle_event, &state->local->recycle);
561 * Round up message sizes so that any space at the end of a slot is always big
562 * enough for a header. This relies on header size being a power of two, which
563 * has been verified earlier by a static assertion.
567 calc_stride(size_t size)
569 /* Allow room for the header */
570 size += sizeof(struct vchiq_header);
573 return (size + sizeof(struct vchiq_header) - 1) &
574 ~(sizeof(struct vchiq_header) - 1);
577 /* Called by the slot handler thread */
578 static struct vchiq_service *
579 get_listening_service(struct vchiq_state *state, int fourcc)
583 WARN_ON(fourcc == VCHIQ_FOURCC_INVALID);
586 for (i = 0; i < state->unused_service; i++) {
587 struct vchiq_service *service;
589 service = rcu_dereference(state->services[i]);
591 service->public_fourcc == fourcc &&
592 (service->srvstate == VCHIQ_SRVSTATE_LISTENING ||
593 (service->srvstate == VCHIQ_SRVSTATE_OPEN &&
594 service->remoteport == VCHIQ_PORT_FREE)) &&
595 kref_get_unless_zero(&service->ref_count)) {
596 service = rcu_pointer_handoff(service);
605 /* Called by the slot handler thread */
606 static struct vchiq_service *
607 get_connected_service(struct vchiq_state *state, unsigned int port)
612 for (i = 0; i < state->unused_service; i++) {
613 struct vchiq_service *service =
614 rcu_dereference(state->services[i]);
616 if (service && service->srvstate == VCHIQ_SRVSTATE_OPEN &&
617 service->remoteport == port &&
618 kref_get_unless_zero(&service->ref_count)) {
619 service = rcu_pointer_handoff(service);
629 request_poll(struct vchiq_state *state, struct vchiq_service *service,
639 value = atomic_read(&service->poll_flags);
640 } while (atomic_cmpxchg(&service->poll_flags, value,
641 value | BIT(poll_type)) != value);
643 index = BITSET_WORD(service->localport);
645 value = atomic_read(&state->poll_services[index]);
646 } while (atomic_cmpxchg(&state->poll_services[index],
647 value, value | BIT(service->localport & 0x1f)) != value);
650 state->poll_needed = 1;
653 /* ... and ensure the slot handler runs. */
654 remote_event_signal_local(&state->trigger_event, &state->local->trigger);
658 * Called from queue_message, by the slot handler and application threads,
659 * with slot_mutex held
661 static struct vchiq_header *
662 reserve_space(struct vchiq_state *state, size_t space, int is_blocking)
664 struct vchiq_shared_state *local = state->local;
665 int tx_pos = state->local_tx_pos;
666 int slot_space = VCHIQ_SLOT_SIZE - (tx_pos & VCHIQ_SLOT_MASK);
668 if (space > slot_space) {
669 struct vchiq_header *header;
670 /* Fill the remaining space with padding */
671 WARN_ON(!state->tx_data);
672 header = (struct vchiq_header *)
673 (state->tx_data + (tx_pos & VCHIQ_SLOT_MASK));
674 header->msgid = VCHIQ_MSGID_PADDING;
675 header->size = slot_space - sizeof(struct vchiq_header);
677 tx_pos += slot_space;
680 /* If necessary, get the next slot. */
681 if ((tx_pos & VCHIQ_SLOT_MASK) == 0) {
684 /* If there is no free slot... */
686 if (!try_wait_for_completion(&state->slot_available_event)) {
687 /* ...wait for one. */
689 VCHIQ_STATS_INC(state, slot_stalls);
691 /* But first, flush through the last slot. */
692 state->local_tx_pos = tx_pos;
693 local->tx_pos = tx_pos;
694 remote_event_signal(&state->remote->trigger);
697 (wait_for_completion_interruptible(
698 &state->slot_available_event)))
699 return NULL; /* No space available */
702 if (tx_pos == (state->slot_queue_available * VCHIQ_SLOT_SIZE)) {
703 complete(&state->slot_available_event);
704 pr_warn("%s: invalid tx_pos: %d\n", __func__, tx_pos);
708 slot_index = local->slot_queue[
709 SLOT_QUEUE_INDEX_FROM_POS_MASKED(tx_pos)];
711 (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
714 state->local_tx_pos = tx_pos + space;
716 return (struct vchiq_header *)(state->tx_data +
717 (tx_pos & VCHIQ_SLOT_MASK));
721 process_free_data_message(struct vchiq_state *state, BITSET_T *service_found,
722 struct vchiq_header *header)
724 int msgid = header->msgid;
725 int port = VCHIQ_MSG_SRCPORT(msgid);
726 struct vchiq_service_quota *quota = &state->service_quotas[port];
729 spin_lock("a_spinlock);
730 count = quota->message_use_count;
732 quota->message_use_count = count - 1;
733 spin_unlock("a_spinlock);
735 if (count == quota->message_quota) {
737 * Signal the service that it
738 * has dropped below its quota
740 complete("a->quota_event);
741 } else if (count == 0) {
742 vchiq_log_error(vchiq_core_log_level,
743 "service %d message_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
745 quota->message_use_count,
746 header, msgid, header->msgid,
748 WARN(1, "invalid message use count\n");
750 if (!BITSET_IS_SET(service_found, port)) {
751 /* Set the found bit for this service */
752 BITSET_SET(service_found, port);
754 spin_lock("a_spinlock);
755 count = quota->slot_use_count;
757 quota->slot_use_count = count - 1;
758 spin_unlock("a_spinlock);
762 * Signal the service in case
763 * it has dropped below its quota
765 complete("a->quota_event);
766 vchiq_log_trace(vchiq_core_log_level,
767 "%d: pfq:%d %x@%pK - slot_use->%d",
769 header->size, header,
772 vchiq_log_error(vchiq_core_log_level,
773 "service %d slot_use_count=%d (header %pK, msgid %x, header->msgid %x, header->size %x)",
775 msgid, header->msgid,
777 WARN(1, "bad slot use count\n");
782 /* Called by the recycle thread. */
784 process_free_queue(struct vchiq_state *state, BITSET_T *service_found,
787 struct vchiq_shared_state *local = state->local;
788 int slot_queue_available;
791 * Find slots which have been freed by the other side, and return them
792 * to the available queue.
794 slot_queue_available = state->slot_queue_available;
797 * Use a memory barrier to ensure that any state that may have been
798 * modified by another thread is not masked by stale prefetched
803 while (slot_queue_available != local->slot_queue_recycle) {
805 int slot_index = local->slot_queue[slot_queue_available &
806 VCHIQ_SLOT_QUEUE_MASK];
807 char *data = (char *)SLOT_DATA_FROM_INDEX(state, slot_index);
810 slot_queue_available++;
812 * Beware of the address dependency - data is calculated
813 * using an index written by the other side.
817 vchiq_log_trace(vchiq_core_log_level, "%d: pfq %d=%pK %x %x",
818 state->id, slot_index, data,
819 local->slot_queue_recycle, slot_queue_available);
821 /* Initialise the bitmask for services which have used this slot */
822 memset(service_found, 0, length);
826 while (pos < VCHIQ_SLOT_SIZE) {
827 struct vchiq_header *header =
828 (struct vchiq_header *)(data + pos);
829 int msgid = header->msgid;
831 if (VCHIQ_MSG_TYPE(msgid) == VCHIQ_MSG_DATA) {
832 process_free_data_message(state, service_found,
837 pos += calc_stride(header->size);
838 if (pos > VCHIQ_SLOT_SIZE) {
839 vchiq_log_error(vchiq_core_log_level,
840 "pfq - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
841 pos, header, msgid, header->msgid,
843 WARN(1, "invalid slot position\n");
850 spin_lock("a_spinlock);
851 count = state->data_use_count;
853 state->data_use_count = count - 1;
854 spin_unlock("a_spinlock);
855 if (count == state->data_quota)
856 complete(&state->data_quota_event);
860 * Don't allow the slot to be reused until we are no
861 * longer interested in it.
865 state->slot_queue_available = slot_queue_available;
866 complete(&state->slot_available_event);
871 memcpy_copy_callback(
872 void *context, void *dest,
873 size_t offset, size_t maxsize)
875 memcpy(dest + offset, context + offset, maxsize);
881 ssize_t (*copy_callback)(void *context, void *dest,
882 size_t offset, size_t maxsize),
890 ssize_t callback_result;
891 size_t max_bytes = size - pos;
894 copy_callback(context, dest + pos,
897 if (callback_result < 0)
898 return callback_result;
900 if (!callback_result)
903 if (callback_result > max_bytes)
906 pos += callback_result;
912 /* Called by the slot handler and application threads */
913 static enum vchiq_status
914 queue_message(struct vchiq_state *state, struct vchiq_service *service,
916 ssize_t (*copy_callback)(void *context, void *dest,
917 size_t offset, size_t maxsize),
918 void *context, size_t size, int flags)
920 struct vchiq_shared_state *local;
921 struct vchiq_service_quota *quota = NULL;
922 struct vchiq_header *header;
923 int type = VCHIQ_MSG_TYPE(msgid);
927 local = state->local;
929 stride = calc_stride(size);
931 WARN_ON(stride > VCHIQ_SLOT_SIZE);
933 if (!(flags & QMFLAGS_NO_MUTEX_LOCK) &&
934 mutex_lock_killable(&state->slot_mutex))
937 if (type == VCHIQ_MSG_DATA) {
941 WARN(1, "%s: service is NULL\n", __func__);
942 mutex_unlock(&state->slot_mutex);
946 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
947 QMFLAGS_NO_MUTEX_UNLOCK));
949 if (service->closing) {
950 /* The service has been closed */
951 mutex_unlock(&state->slot_mutex);
955 quota = &state->service_quotas[service->localport];
957 spin_lock("a_spinlock);
960 * Ensure this service doesn't use more than its quota of
963 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
964 state->local_tx_pos + stride - 1);
967 * Ensure data messages don't use more than their quota of
970 while ((tx_end_index != state->previous_data_index) &&
971 (state->data_use_count == state->data_quota)) {
972 VCHIQ_STATS_INC(state, data_stalls);
973 spin_unlock("a_spinlock);
974 mutex_unlock(&state->slot_mutex);
976 if (wait_for_completion_interruptible(
977 &state->data_quota_event))
980 mutex_lock(&state->slot_mutex);
981 spin_lock("a_spinlock);
982 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
983 state->local_tx_pos + stride - 1);
984 if ((tx_end_index == state->previous_data_index) ||
985 (state->data_use_count < state->data_quota)) {
986 /* Pass the signal on to other waiters */
987 complete(&state->data_quota_event);
992 while ((quota->message_use_count == quota->message_quota) ||
993 ((tx_end_index != quota->previous_tx_index) &&
994 (quota->slot_use_count == quota->slot_quota))) {
995 spin_unlock("a_spinlock);
996 vchiq_log_trace(vchiq_core_log_level,
997 "%d: qm:%d %s,%zx - quota stall (msg %d, slot %d)",
998 state->id, service->localport,
999 msg_type_str(type), size,
1000 quota->message_use_count,
1001 quota->slot_use_count);
1002 VCHIQ_SERVICE_STATS_INC(service, quota_stalls);
1003 mutex_unlock(&state->slot_mutex);
1004 if (wait_for_completion_interruptible(
1005 "a->quota_event))
1007 if (service->closing)
1009 if (mutex_lock_killable(&state->slot_mutex))
1011 if (service->srvstate != VCHIQ_SRVSTATE_OPEN) {
1012 /* The service has been closed */
1013 mutex_unlock(&state->slot_mutex);
1016 spin_lock("a_spinlock);
1017 tx_end_index = SLOT_QUEUE_INDEX_FROM_POS(
1018 state->local_tx_pos + stride - 1);
1021 spin_unlock("a_spinlock);
1024 header = reserve_space(state, stride, flags & QMFLAGS_IS_BLOCKING);
1028 VCHIQ_SERVICE_STATS_INC(service, slot_stalls);
1030 * In the event of a failure, return the mutex to the
1033 if (!(flags & QMFLAGS_NO_MUTEX_LOCK))
1034 mutex_unlock(&state->slot_mutex);
1038 if (type == VCHIQ_MSG_DATA) {
1039 ssize_t callback_result;
1043 vchiq_log_info(vchiq_core_log_level,
1044 "%d: qm %s@%pK,%zx (%d->%d)",
1045 state->id, msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1046 header, size, VCHIQ_MSG_SRCPORT(msgid),
1047 VCHIQ_MSG_DSTPORT(msgid));
1049 WARN_ON(flags & (QMFLAGS_NO_MUTEX_LOCK |
1050 QMFLAGS_NO_MUTEX_UNLOCK));
1053 copy_message_data(copy_callback, context,
1054 header->data, size);
1056 if (callback_result < 0) {
1057 mutex_unlock(&state->slot_mutex);
1058 VCHIQ_SERVICE_STATS_INC(service,
1063 if (SRVTRACE_ENABLED(service,
1065 vchiq_log_dump_mem("Sent", 0,
1068 (size_t)callback_result));
1070 spin_lock("a_spinlock);
1071 quota->message_use_count++;
1074 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos - 1);
1077 * If this transmission can't fit in the last slot used by any
1078 * service, the data_use_count must be increased.
1080 if (tx_end_index != state->previous_data_index) {
1081 state->previous_data_index = tx_end_index;
1082 state->data_use_count++;
1086 * If this isn't the same slot last used by this service,
1087 * the service's slot_use_count must be increased.
1089 if (tx_end_index != quota->previous_tx_index) {
1090 quota->previous_tx_index = tx_end_index;
1091 slot_use_count = ++quota->slot_use_count;
1096 spin_unlock("a_spinlock);
1099 vchiq_log_trace(vchiq_core_log_level,
1100 "%d: qm:%d %s,%zx - slot_use->%d (hdr %p)",
1101 state->id, service->localport,
1102 msg_type_str(VCHIQ_MSG_TYPE(msgid)), size,
1103 slot_use_count, header);
1105 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1106 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1108 vchiq_log_info(vchiq_core_log_level,
1109 "%d: qm %s@%pK,%zx (%d->%d)", state->id,
1110 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1111 header, size, VCHIQ_MSG_SRCPORT(msgid),
1112 VCHIQ_MSG_DSTPORT(msgid));
1115 * It is assumed for now that this code path
1116 * only happens from calls inside this file.
1118 * External callers are through the vchiq_queue_message
1119 * path which always sets the type to be VCHIQ_MSG_DATA
1121 * At first glance this appears to be correct but
1122 * more review is needed.
1124 copy_message_data(copy_callback, context,
1125 header->data, size);
1127 VCHIQ_STATS_INC(state, ctrl_tx_count);
1130 header->msgid = msgid;
1131 header->size = size;
1136 svc_fourcc = service
1137 ? service->base.fourcc
1138 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1140 vchiq_log_info(SRVTRACE_LEVEL(service),
1141 "Sent Msg %s(%u) to %c%c%c%c s:%u d:%d len:%zu",
1142 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1143 VCHIQ_MSG_TYPE(msgid),
1144 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1145 VCHIQ_MSG_SRCPORT(msgid),
1146 VCHIQ_MSG_DSTPORT(msgid),
1150 /* Make sure the new header is visible to the peer. */
1153 /* Make the new tx_pos visible to the peer. */
1154 local->tx_pos = state->local_tx_pos;
1157 if (service && (type == VCHIQ_MSG_CLOSE))
1158 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSESENT);
1160 if (!(flags & QMFLAGS_NO_MUTEX_UNLOCK))
1161 mutex_unlock(&state->slot_mutex);
1163 remote_event_signal(&state->remote->trigger);
1165 return VCHIQ_SUCCESS;
1168 /* Called by the slot handler and application threads */
1169 static enum vchiq_status
1170 queue_message_sync(struct vchiq_state *state, struct vchiq_service *service,
1172 ssize_t (*copy_callback)(void *context, void *dest,
1173 size_t offset, size_t maxsize),
1174 void *context, int size, int is_blocking)
1176 struct vchiq_shared_state *local;
1177 struct vchiq_header *header;
1178 ssize_t callback_result;
1180 local = state->local;
1182 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_RESUME &&
1183 mutex_lock_killable(&state->sync_mutex))
1186 remote_event_wait(&state->sync_release_event, &local->sync_release);
1190 header = (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
1194 int oldmsgid = header->msgid;
1196 if (oldmsgid != VCHIQ_MSGID_PADDING)
1197 vchiq_log_error(vchiq_core_log_level,
1198 "%d: qms - msgid %x, not PADDING",
1199 state->id, oldmsgid);
1202 vchiq_log_info(vchiq_sync_log_level,
1203 "%d: qms %s@%pK,%x (%d->%d)", state->id,
1204 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1205 header, size, VCHIQ_MSG_SRCPORT(msgid),
1206 VCHIQ_MSG_DSTPORT(msgid));
1209 copy_message_data(copy_callback, context,
1210 header->data, size);
1212 if (callback_result < 0) {
1213 mutex_unlock(&state->slot_mutex);
1214 VCHIQ_SERVICE_STATS_INC(service,
1220 if (SRVTRACE_ENABLED(service,
1222 vchiq_log_dump_mem("Sent", 0,
1225 (size_t)callback_result));
1227 VCHIQ_SERVICE_STATS_INC(service, ctrl_tx_count);
1228 VCHIQ_SERVICE_STATS_ADD(service, ctrl_tx_bytes, size);
1230 VCHIQ_STATS_INC(state, ctrl_tx_count);
1233 header->size = size;
1234 header->msgid = msgid;
1236 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
1239 svc_fourcc = service
1240 ? service->base.fourcc
1241 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1243 vchiq_log_trace(vchiq_sync_log_level,
1244 "Sent Sync Msg %s(%u) to %c%c%c%c s:%u d:%d len:%d",
1245 msg_type_str(VCHIQ_MSG_TYPE(msgid)),
1246 VCHIQ_MSG_TYPE(msgid),
1247 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1248 VCHIQ_MSG_SRCPORT(msgid),
1249 VCHIQ_MSG_DSTPORT(msgid),
1253 remote_event_signal(&state->remote->sync_trigger);
1255 if (VCHIQ_MSG_TYPE(msgid) != VCHIQ_MSG_PAUSE)
1256 mutex_unlock(&state->sync_mutex);
1258 return VCHIQ_SUCCESS;
1262 claim_slot(struct vchiq_slot_info *slot)
1268 release_slot(struct vchiq_state *state, struct vchiq_slot_info *slot_info,
1269 struct vchiq_header *header, struct vchiq_service *service)
1271 mutex_lock(&state->recycle_mutex);
1274 int msgid = header->msgid;
1276 if (((msgid & VCHIQ_MSGID_CLAIMED) == 0) ||
1277 (service && service->closing)) {
1278 mutex_unlock(&state->recycle_mutex);
1282 /* Rewrite the message header to prevent a double release */
1283 header->msgid = msgid & ~VCHIQ_MSGID_CLAIMED;
1286 slot_info->release_count++;
1288 if (slot_info->release_count == slot_info->use_count) {
1289 int slot_queue_recycle;
1290 /* Add to the freed queue */
1293 * A read barrier is necessary here to prevent speculative
1294 * fetches of remote->slot_queue_recycle from overtaking the
1299 slot_queue_recycle = state->remote->slot_queue_recycle;
1300 state->remote->slot_queue[slot_queue_recycle &
1301 VCHIQ_SLOT_QUEUE_MASK] =
1302 SLOT_INDEX_FROM_INFO(state, slot_info);
1303 state->remote->slot_queue_recycle = slot_queue_recycle + 1;
1304 vchiq_log_info(vchiq_core_log_level,
1305 "%d: %s %d - recycle->%x", state->id, __func__,
1306 SLOT_INDEX_FROM_INFO(state, slot_info),
1307 state->remote->slot_queue_recycle);
1310 * A write barrier is necessary, but remote_event_signal
1313 remote_event_signal(&state->remote->recycle);
1316 mutex_unlock(&state->recycle_mutex);
1319 static inline enum vchiq_reason
1320 get_bulk_reason(struct vchiq_bulk *bulk)
1322 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1323 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1324 return VCHIQ_BULK_TRANSMIT_ABORTED;
1326 return VCHIQ_BULK_TRANSMIT_DONE;
1329 if (bulk->actual == VCHIQ_BULK_ACTUAL_ABORTED)
1330 return VCHIQ_BULK_RECEIVE_ABORTED;
1332 return VCHIQ_BULK_RECEIVE_DONE;
1335 /* Called by the slot handler - don't hold the bulk mutex */
1336 static enum vchiq_status
1337 notify_bulks(struct vchiq_service *service, struct vchiq_bulk_queue *queue,
1340 enum vchiq_status status = VCHIQ_SUCCESS;
1342 vchiq_log_trace(vchiq_core_log_level,
1343 "%d: nb:%d %cx - p=%x rn=%x r=%x",
1344 service->state->id, service->localport,
1345 (queue == &service->bulk_tx) ? 't' : 'r',
1346 queue->process, queue->remote_notify, queue->remove);
1348 queue->remote_notify = queue->process;
1350 while (queue->remove != queue->remote_notify) {
1351 struct vchiq_bulk *bulk =
1352 &queue->bulks[BULK_INDEX(queue->remove)];
1355 * Only generate callbacks for non-dummy bulk
1356 * requests, and non-terminated services
1358 if (bulk->data && service->instance) {
1359 if (bulk->actual != VCHIQ_BULK_ACTUAL_ABORTED) {
1360 if (bulk->dir == VCHIQ_BULK_TRANSMIT) {
1361 VCHIQ_SERVICE_STATS_INC(service,
1363 VCHIQ_SERVICE_STATS_ADD(service,
1367 VCHIQ_SERVICE_STATS_INC(service,
1369 VCHIQ_SERVICE_STATS_ADD(service,
1374 VCHIQ_SERVICE_STATS_INC(service,
1375 bulk_aborted_count);
1377 if (bulk->mode == VCHIQ_BULK_MODE_BLOCKING) {
1378 struct bulk_waiter *waiter;
1380 spin_lock(&bulk_waiter_spinlock);
1381 waiter = bulk->userdata;
1383 waiter->actual = bulk->actual;
1384 complete(&waiter->event);
1386 spin_unlock(&bulk_waiter_spinlock);
1387 } else if (bulk->mode == VCHIQ_BULK_MODE_CALLBACK) {
1388 enum vchiq_reason reason =
1389 get_bulk_reason(bulk);
1390 status = make_service_callback(service,
1391 reason, NULL, bulk->userdata);
1392 if (status == VCHIQ_RETRY)
1398 complete(&service->bulk_remove_event);
1401 status = VCHIQ_SUCCESS;
1403 if (status == VCHIQ_RETRY)
1404 request_poll(service->state, service,
1405 (queue == &service->bulk_tx) ?
1406 VCHIQ_POLL_TXNOTIFY : VCHIQ_POLL_RXNOTIFY);
1412 poll_services_of_group(struct vchiq_state *state, int group)
1414 u32 flags = atomic_xchg(&state->poll_services[group], 0);
1417 for (i = 0; flags; i++) {
1418 struct vchiq_service *service;
1421 if ((flags & BIT(i)) == 0)
1424 service = find_service_by_port(state, (group << 5) + i);
1430 service_flags = atomic_xchg(&service->poll_flags, 0);
1431 if (service_flags & BIT(VCHIQ_POLL_REMOVE)) {
1432 vchiq_log_info(vchiq_core_log_level, "%d: ps - remove %d<->%d",
1433 state->id, service->localport,
1434 service->remoteport);
1437 * Make it look like a client, because
1438 * it must be removed and not left in
1439 * the LISTENING state.
1441 service->public_fourcc = VCHIQ_FOURCC_INVALID;
1443 if (vchiq_close_service_internal(service, NO_CLOSE_RECVD) !=
1445 request_poll(state, service, VCHIQ_POLL_REMOVE);
1446 } else if (service_flags & BIT(VCHIQ_POLL_TERMINATE)) {
1447 vchiq_log_info(vchiq_core_log_level,
1448 "%d: ps - terminate %d<->%d",
1449 state->id, service->localport,
1450 service->remoteport);
1451 if (vchiq_close_service_internal(
1452 service, NO_CLOSE_RECVD) !=
1454 request_poll(state, service,
1455 VCHIQ_POLL_TERMINATE);
1457 if (service_flags & BIT(VCHIQ_POLL_TXNOTIFY))
1458 notify_bulks(service, &service->bulk_tx, RETRY_POLL);
1459 if (service_flags & BIT(VCHIQ_POLL_RXNOTIFY))
1460 notify_bulks(service, &service->bulk_rx, RETRY_POLL);
1461 vchiq_service_put(service);
1465 /* Called by the slot handler thread */
1467 poll_services(struct vchiq_state *state)
1471 for (group = 0; group < BITSET_SIZE(state->unused_service); group++)
1472 poll_services_of_group(state, group);
1475 /* Called with the bulk_mutex held */
1477 abort_outstanding_bulks(struct vchiq_service *service,
1478 struct vchiq_bulk_queue *queue)
1480 int is_tx = (queue == &service->bulk_tx);
1482 vchiq_log_trace(vchiq_core_log_level,
1483 "%d: aob:%d %cx - li=%x ri=%x p=%x",
1484 service->state->id, service->localport, is_tx ? 't' : 'r',
1485 queue->local_insert, queue->remote_insert, queue->process);
1487 WARN_ON((int)(queue->local_insert - queue->process) < 0);
1488 WARN_ON((int)(queue->remote_insert - queue->process) < 0);
1490 while ((queue->process != queue->local_insert) ||
1491 (queue->process != queue->remote_insert)) {
1492 struct vchiq_bulk *bulk =
1493 &queue->bulks[BULK_INDEX(queue->process)];
1495 if (queue->process == queue->remote_insert) {
1496 /* fabricate a matching dummy bulk */
1497 bulk->remote_data = NULL;
1498 bulk->remote_size = 0;
1499 queue->remote_insert++;
1502 if (queue->process != queue->local_insert) {
1503 vchiq_complete_bulk(bulk);
1505 vchiq_log_info(SRVTRACE_LEVEL(service),
1506 "%s %c%c%c%c d:%d ABORTED - tx len:%d, rx len:%d",
1507 is_tx ? "Send Bulk to" : "Recv Bulk from",
1508 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1509 service->remoteport,
1513 /* fabricate a matching dummy bulk */
1516 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
1517 bulk->dir = is_tx ? VCHIQ_BULK_TRANSMIT :
1519 queue->local_insert++;
1527 parse_open(struct vchiq_state *state, struct vchiq_header *header)
1529 const struct vchiq_open_payload *payload;
1530 struct vchiq_service *service = NULL;
1532 unsigned int localport, remoteport, fourcc;
1533 short version, version_min;
1535 msgid = header->msgid;
1536 size = header->size;
1537 localport = VCHIQ_MSG_DSTPORT(msgid);
1538 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1539 if (size < sizeof(struct vchiq_open_payload))
1542 payload = (struct vchiq_open_payload *)header->data;
1543 fourcc = payload->fourcc;
1544 vchiq_log_info(vchiq_core_log_level,
1545 "%d: prs OPEN@%pK (%d->'%c%c%c%c')",
1546 state->id, header, localport,
1547 VCHIQ_FOURCC_AS_4CHARS(fourcc));
1549 service = get_listening_service(state, fourcc);
1553 /* A matching service exists */
1554 version = payload->version;
1555 version_min = payload->version_min;
1557 if ((service->version < version_min) ||
1558 (version < service->version_min)) {
1559 /* Version mismatch */
1560 vchiq_loud_error_header();
1561 vchiq_loud_error("%d: service %d (%c%c%c%c) "
1562 "version mismatch - local (%d, min %d)"
1563 " vs. remote (%d, min %d)",
1564 state->id, service->localport,
1565 VCHIQ_FOURCC_AS_4CHARS(fourcc),
1566 service->version, service->version_min,
1567 version, version_min);
1568 vchiq_loud_error_footer();
1569 vchiq_service_put(service);
1573 service->peer_version = version;
1575 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
1576 struct vchiq_openack_payload ack_payload = {
1579 int openack_id = MAKE_OPENACK(service->localport, remoteport);
1581 if (state->version_common <
1582 VCHIQ_VERSION_SYNCHRONOUS_MODE)
1585 /* Acknowledge the OPEN */
1586 if (service->sync) {
1587 if (queue_message_sync(state, NULL, openack_id,
1588 memcpy_copy_callback,
1590 sizeof(ack_payload),
1592 goto bail_not_ready;
1594 if (queue_message(state, NULL, openack_id,
1595 memcpy_copy_callback,
1597 sizeof(ack_payload),
1599 goto bail_not_ready;
1602 /* The service is now open */
1603 vchiq_set_service_state(service,
1604 service->sync ? VCHIQ_SRVSTATE_OPENSYNC
1605 : VCHIQ_SRVSTATE_OPEN);
1608 /* Success - the message has been dealt with */
1609 vchiq_service_put(service);
1613 /* No available service, or an invalid request - send a CLOSE */
1614 if (queue_message(state, NULL, MAKE_CLOSE(0, VCHIQ_MSG_SRCPORT(msgid)),
1615 NULL, NULL, 0, 0) == VCHIQ_RETRY)
1616 goto bail_not_ready;
1622 vchiq_service_put(service);
1628 * parse_message() - parses a single message from the rx slot
1629 * @state: vchiq state struct
1630 * @header: message header
1632 * Context: Process context
1635 * * >= 0 - size of the parsed message payload (without header)
1636 * * -EINVAL - fatal error occurred, bail out is required
1639 parse_message(struct vchiq_state *state, struct vchiq_header *header)
1641 struct vchiq_service *service = NULL;
1642 unsigned int localport, remoteport;
1643 int msgid, size, type, ret = -EINVAL;
1645 DEBUG_INITIALISE(state->local)
1647 DEBUG_VALUE(PARSE_HEADER, (int)(long)header);
1648 msgid = header->msgid;
1649 DEBUG_VALUE(PARSE_MSGID, msgid);
1650 size = header->size;
1651 type = VCHIQ_MSG_TYPE(msgid);
1652 localport = VCHIQ_MSG_DSTPORT(msgid);
1653 remoteport = VCHIQ_MSG_SRCPORT(msgid);
1655 if (type != VCHIQ_MSG_DATA)
1656 VCHIQ_STATS_INC(state, ctrl_rx_count);
1659 case VCHIQ_MSG_OPENACK:
1660 case VCHIQ_MSG_CLOSE:
1661 case VCHIQ_MSG_DATA:
1662 case VCHIQ_MSG_BULK_RX:
1663 case VCHIQ_MSG_BULK_TX:
1664 case VCHIQ_MSG_BULK_RX_DONE:
1665 case VCHIQ_MSG_BULK_TX_DONE:
1666 service = find_service_by_port(state, localport);
1668 ((service->remoteport != remoteport) &&
1669 (service->remoteport != VCHIQ_PORT_FREE))) &&
1671 (type == VCHIQ_MSG_CLOSE)) {
1673 * This could be a CLOSE from a client which
1674 * hadn't yet received the OPENACK - look for
1675 * the connected service
1678 vchiq_service_put(service);
1679 service = get_connected_service(state,
1682 vchiq_log_warning(vchiq_core_log_level,
1683 "%d: prs %s@%pK (%d->%d) - found connected service %d",
1684 state->id, msg_type_str(type),
1685 header, remoteport, localport,
1686 service->localport);
1690 vchiq_log_error(vchiq_core_log_level,
1691 "%d: prs %s@%pK (%d->%d) - invalid/closed service %d",
1692 state->id, msg_type_str(type),
1693 header, remoteport, localport,
1702 if (SRVTRACE_ENABLED(service, VCHIQ_LOG_INFO)) {
1705 svc_fourcc = service
1706 ? service->base.fourcc
1707 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
1708 vchiq_log_info(SRVTRACE_LEVEL(service),
1709 "Rcvd Msg %s(%u) from %c%c%c%c s:%d d:%d len:%d",
1710 msg_type_str(type), type,
1711 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
1712 remoteport, localport, size);
1714 vchiq_log_dump_mem("Rcvd", 0, header->data,
1718 if (((unsigned long)header & VCHIQ_SLOT_MASK) +
1719 calc_stride(size) > VCHIQ_SLOT_SIZE) {
1720 vchiq_log_error(vchiq_core_log_level,
1721 "header %pK (msgid %x) - size %x too big for slot",
1722 header, (unsigned int)msgid,
1723 (unsigned int)size);
1724 WARN(1, "oversized for slot\n");
1728 case VCHIQ_MSG_OPEN:
1729 WARN_ON(VCHIQ_MSG_DSTPORT(msgid));
1730 if (!parse_open(state, header))
1731 goto bail_not_ready;
1733 case VCHIQ_MSG_OPENACK:
1734 if (size >= sizeof(struct vchiq_openack_payload)) {
1735 const struct vchiq_openack_payload *payload =
1736 (struct vchiq_openack_payload *)
1738 service->peer_version = payload->version;
1740 vchiq_log_info(vchiq_core_log_level,
1741 "%d: prs OPENACK@%pK,%x (%d->%d) v:%d",
1742 state->id, header, size, remoteport, localport,
1743 service->peer_version);
1744 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
1745 service->remoteport = remoteport;
1746 vchiq_set_service_state(service,
1747 VCHIQ_SRVSTATE_OPEN);
1748 complete(&service->remove_event);
1750 vchiq_log_error(vchiq_core_log_level,
1751 "OPENACK received in state %s",
1752 srvstate_names[service->srvstate]);
1755 case VCHIQ_MSG_CLOSE:
1756 WARN_ON(size); /* There should be no data */
1758 vchiq_log_info(vchiq_core_log_level,
1759 "%d: prs CLOSE@%pK (%d->%d)",
1760 state->id, header, remoteport, localport);
1762 mark_service_closing_internal(service, 1);
1764 if (vchiq_close_service_internal(service,
1765 CLOSE_RECVD) == VCHIQ_RETRY)
1766 goto bail_not_ready;
1768 vchiq_log_info(vchiq_core_log_level,
1769 "Close Service %c%c%c%c s:%u d:%d",
1770 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1772 service->remoteport);
1774 case VCHIQ_MSG_DATA:
1775 vchiq_log_info(vchiq_core_log_level,
1776 "%d: prs DATA@%pK,%x (%d->%d)",
1777 state->id, header, size, remoteport, localport);
1779 if ((service->remoteport == remoteport) &&
1780 (service->srvstate == VCHIQ_SRVSTATE_OPEN)) {
1781 header->msgid = msgid | VCHIQ_MSGID_CLAIMED;
1782 claim_slot(state->rx_info);
1783 DEBUG_TRACE(PARSE_LINE);
1784 if (make_service_callback(service,
1785 VCHIQ_MESSAGE_AVAILABLE, header,
1786 NULL) == VCHIQ_RETRY) {
1787 DEBUG_TRACE(PARSE_LINE);
1788 goto bail_not_ready;
1790 VCHIQ_SERVICE_STATS_INC(service, ctrl_rx_count);
1791 VCHIQ_SERVICE_STATS_ADD(service, ctrl_rx_bytes,
1794 VCHIQ_STATS_INC(state, error_count);
1797 case VCHIQ_MSG_CONNECT:
1798 vchiq_log_info(vchiq_core_log_level,
1799 "%d: prs CONNECT@%pK", state->id, header);
1800 state->version_common = ((struct vchiq_slot_zero *)
1801 state->slot_data)->version;
1802 complete(&state->connect);
1804 case VCHIQ_MSG_BULK_RX:
1805 case VCHIQ_MSG_BULK_TX:
1807 * We should never receive a bulk request from the
1808 * other side since we're not setup to perform as the
1813 case VCHIQ_MSG_BULK_RX_DONE:
1814 case VCHIQ_MSG_BULK_TX_DONE:
1815 if ((service->remoteport == remoteport) &&
1816 (service->srvstate != VCHIQ_SRVSTATE_FREE)) {
1817 struct vchiq_bulk_queue *queue;
1818 struct vchiq_bulk *bulk;
1820 queue = (type == VCHIQ_MSG_BULK_RX_DONE) ?
1821 &service->bulk_rx : &service->bulk_tx;
1823 DEBUG_TRACE(PARSE_LINE);
1824 if (mutex_lock_killable(&service->bulk_mutex)) {
1825 DEBUG_TRACE(PARSE_LINE);
1826 goto bail_not_ready;
1828 if ((int)(queue->remote_insert -
1829 queue->local_insert) >= 0) {
1830 vchiq_log_error(vchiq_core_log_level,
1831 "%d: prs %s@%pK (%d->%d) unexpected (ri=%d,li=%d)",
1832 state->id, msg_type_str(type),
1833 header, remoteport, localport,
1834 queue->remote_insert,
1835 queue->local_insert);
1836 mutex_unlock(&service->bulk_mutex);
1839 if (queue->process != queue->remote_insert) {
1840 pr_err("%s: p %x != ri %x\n",
1843 queue->remote_insert);
1844 mutex_unlock(&service->bulk_mutex);
1845 goto bail_not_ready;
1848 bulk = &queue->bulks[
1849 BULK_INDEX(queue->remote_insert)];
1850 bulk->actual = *(int *)header->data;
1851 queue->remote_insert++;
1853 vchiq_log_info(vchiq_core_log_level,
1854 "%d: prs %s@%pK (%d->%d) %x@%pad",
1855 state->id, msg_type_str(type),
1856 header, remoteport, localport,
1857 bulk->actual, &bulk->data);
1859 vchiq_log_trace(vchiq_core_log_level,
1860 "%d: prs:%d %cx li=%x ri=%x p=%x",
1861 state->id, localport,
1862 (type == VCHIQ_MSG_BULK_RX_DONE) ?
1864 queue->local_insert,
1865 queue->remote_insert, queue->process);
1867 DEBUG_TRACE(PARSE_LINE);
1868 WARN_ON(queue->process == queue->local_insert);
1869 vchiq_complete_bulk(bulk);
1871 mutex_unlock(&service->bulk_mutex);
1872 DEBUG_TRACE(PARSE_LINE);
1873 notify_bulks(service, queue, RETRY_POLL);
1874 DEBUG_TRACE(PARSE_LINE);
1877 case VCHIQ_MSG_PADDING:
1878 vchiq_log_trace(vchiq_core_log_level,
1879 "%d: prs PADDING@%pK,%x",
1880 state->id, header, size);
1882 case VCHIQ_MSG_PAUSE:
1883 /* If initiated, signal the application thread */
1884 vchiq_log_trace(vchiq_core_log_level,
1885 "%d: prs PAUSE@%pK,%x",
1886 state->id, header, size);
1887 if (state->conn_state == VCHIQ_CONNSTATE_PAUSED) {
1888 vchiq_log_error(vchiq_core_log_level,
1889 "%d: PAUSE received in state PAUSED",
1893 if (state->conn_state != VCHIQ_CONNSTATE_PAUSE_SENT) {
1894 /* Send a PAUSE in response */
1895 if (queue_message(state, NULL, MAKE_PAUSE,
1896 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK)
1898 goto bail_not_ready;
1900 /* At this point slot_mutex is held */
1901 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSED);
1903 case VCHIQ_MSG_RESUME:
1904 vchiq_log_trace(vchiq_core_log_level,
1905 "%d: prs RESUME@%pK,%x",
1906 state->id, header, size);
1907 /* Release the slot mutex */
1908 mutex_unlock(&state->slot_mutex);
1909 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
1912 case VCHIQ_MSG_REMOTE_USE:
1913 vchiq_on_remote_use(state);
1915 case VCHIQ_MSG_REMOTE_RELEASE:
1916 vchiq_on_remote_release(state);
1918 case VCHIQ_MSG_REMOTE_USE_ACTIVE:
1922 vchiq_log_error(vchiq_core_log_level,
1923 "%d: prs invalid msgid %x@%pK,%x",
1924 state->id, msgid, header, size);
1925 WARN(1, "invalid message\n");
1934 vchiq_service_put(service);
1939 /* Called by the slot handler thread */
1941 parse_rx_slots(struct vchiq_state *state)
1943 struct vchiq_shared_state *remote = state->remote;
1946 DEBUG_INITIALISE(state->local)
1948 tx_pos = remote->tx_pos;
1950 while (state->rx_pos != tx_pos) {
1951 struct vchiq_header *header;
1954 DEBUG_TRACE(PARSE_LINE);
1955 if (!state->rx_data) {
1958 WARN_ON(state->rx_pos & VCHIQ_SLOT_MASK);
1959 rx_index = remote->slot_queue[
1960 SLOT_QUEUE_INDEX_FROM_POS_MASKED(state->rx_pos)];
1961 state->rx_data = (char *)SLOT_DATA_FROM_INDEX(state,
1963 state->rx_info = SLOT_INFO_FROM_INDEX(state, rx_index);
1966 * Initialise use_count to one, and increment
1967 * release_count at the end of the slot to avoid
1968 * releasing the slot prematurely.
1970 state->rx_info->use_count = 1;
1971 state->rx_info->release_count = 0;
1974 header = (struct vchiq_header *)(state->rx_data +
1975 (state->rx_pos & VCHIQ_SLOT_MASK));
1976 size = parse_message(state, header);
1980 state->rx_pos += calc_stride(size);
1982 DEBUG_TRACE(PARSE_LINE);
1984 * Perform some housekeeping when the end of the slot is
1987 if ((state->rx_pos & VCHIQ_SLOT_MASK) == 0) {
1988 /* Remove the extra reference count. */
1989 release_slot(state, state->rx_info, NULL, NULL);
1990 state->rx_data = NULL;
1996 * handle_poll() - handle service polling and other rare conditions
1997 * @state: vchiq state struct
1999 * Context: Process context
2002 * * 0 - poll handled successful
2003 * * -EAGAIN - retry later
2006 handle_poll(struct vchiq_state *state)
2008 switch (state->conn_state) {
2009 case VCHIQ_CONNSTATE_CONNECTED:
2010 /* Poll the services as requested */
2011 poll_services(state);
2014 case VCHIQ_CONNSTATE_PAUSING:
2015 if (queue_message(state, NULL, MAKE_PAUSE, NULL, NULL, 0,
2016 QMFLAGS_NO_MUTEX_UNLOCK) != VCHIQ_RETRY) {
2017 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_PAUSE_SENT);
2024 case VCHIQ_CONNSTATE_RESUMING:
2025 if (queue_message(state, NULL, MAKE_RESUME, NULL, NULL, 0,
2026 QMFLAGS_NO_MUTEX_LOCK) != VCHIQ_RETRY) {
2027 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
2030 * This should really be impossible,
2031 * since the PAUSE should have flushed
2032 * through outstanding messages.
2034 vchiq_log_error(vchiq_core_log_level,
2035 "Failed to send RESUME message");
2045 /* Called by the slot handler thread */
2047 slot_handler_func(void *v)
2049 struct vchiq_state *state = v;
2050 struct vchiq_shared_state *local = state->local;
2052 DEBUG_INITIALISE(local)
2055 DEBUG_COUNT(SLOT_HANDLER_COUNT);
2056 DEBUG_TRACE(SLOT_HANDLER_LINE);
2057 remote_event_wait(&state->trigger_event, &local->trigger);
2061 DEBUG_TRACE(SLOT_HANDLER_LINE);
2062 if (state->poll_needed) {
2063 state->poll_needed = 0;
2066 * Handle service polling and other rare conditions here
2067 * out of the mainline code
2069 if (handle_poll(state) == -EAGAIN)
2070 state->poll_needed = 1;
2073 DEBUG_TRACE(SLOT_HANDLER_LINE);
2074 parse_rx_slots(state);
2079 /* Called by the recycle thread */
2081 recycle_func(void *v)
2083 struct vchiq_state *state = v;
2084 struct vchiq_shared_state *local = state->local;
2088 length = sizeof(*found) * BITSET_SIZE(VCHIQ_MAX_SERVICES);
2090 found = kmalloc_array(BITSET_SIZE(VCHIQ_MAX_SERVICES), sizeof(*found),
2096 remote_event_wait(&state->recycle_event, &local->recycle);
2098 process_free_queue(state, found, length);
2103 /* Called by the sync thread */
2107 struct vchiq_state *state = v;
2108 struct vchiq_shared_state *local = state->local;
2109 struct vchiq_header *header =
2110 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2111 state->remote->slot_sync);
2114 struct vchiq_service *service;
2117 unsigned int localport, remoteport;
2119 remote_event_wait(&state->sync_trigger_event, &local->sync_trigger);
2123 msgid = header->msgid;
2124 size = header->size;
2125 type = VCHIQ_MSG_TYPE(msgid);
2126 localport = VCHIQ_MSG_DSTPORT(msgid);
2127 remoteport = VCHIQ_MSG_SRCPORT(msgid);
2129 service = find_service_by_port(state, localport);
2132 vchiq_log_error(vchiq_sync_log_level,
2133 "%d: sf %s@%pK (%d->%d) - invalid/closed service %d",
2134 state->id, msg_type_str(type),
2135 header, remoteport, localport, localport);
2136 release_message_sync(state, header);
2140 if (vchiq_sync_log_level >= VCHIQ_LOG_TRACE) {
2143 svc_fourcc = service
2144 ? service->base.fourcc
2145 : VCHIQ_MAKE_FOURCC('?', '?', '?', '?');
2146 vchiq_log_trace(vchiq_sync_log_level,
2147 "Rcvd Msg %s from %c%c%c%c s:%d d:%d len:%d",
2149 VCHIQ_FOURCC_AS_4CHARS(svc_fourcc),
2150 remoteport, localport, size);
2152 vchiq_log_dump_mem("Rcvd", 0, header->data,
2157 case VCHIQ_MSG_OPENACK:
2158 if (size >= sizeof(struct vchiq_openack_payload)) {
2159 const struct vchiq_openack_payload *payload =
2160 (struct vchiq_openack_payload *)
2162 service->peer_version = payload->version;
2164 vchiq_log_info(vchiq_sync_log_level,
2165 "%d: sf OPENACK@%pK,%x (%d->%d) v:%d",
2166 state->id, header, size, remoteport, localport,
2167 service->peer_version);
2168 if (service->srvstate == VCHIQ_SRVSTATE_OPENING) {
2169 service->remoteport = remoteport;
2170 vchiq_set_service_state(service,
2171 VCHIQ_SRVSTATE_OPENSYNC);
2173 complete(&service->remove_event);
2175 release_message_sync(state, header);
2178 case VCHIQ_MSG_DATA:
2179 vchiq_log_trace(vchiq_sync_log_level,
2180 "%d: sf DATA@%pK,%x (%d->%d)",
2181 state->id, header, size, remoteport, localport);
2183 if ((service->remoteport == remoteport) &&
2184 (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)) {
2185 if (make_service_callback(service,
2186 VCHIQ_MESSAGE_AVAILABLE, header,
2187 NULL) == VCHIQ_RETRY)
2188 vchiq_log_error(vchiq_sync_log_level,
2189 "synchronous callback to service %d returns VCHIQ_RETRY",
2195 vchiq_log_error(vchiq_sync_log_level,
2196 "%d: sf unexpected msgid %x@%pK,%x",
2197 state->id, msgid, header, size);
2198 release_message_sync(state, header);
2202 vchiq_service_put(service);
2209 init_bulk_queue(struct vchiq_bulk_queue *queue)
2211 queue->local_insert = 0;
2212 queue->remote_insert = 0;
2214 queue->remote_notify = 0;
2219 get_conn_state_name(enum vchiq_connstate conn_state)
2221 return conn_state_names[conn_state];
2224 struct vchiq_slot_zero *
2225 vchiq_init_slots(void *mem_base, int mem_size)
2228 (int)((VCHIQ_SLOT_SIZE - (long)mem_base) & VCHIQ_SLOT_MASK);
2229 struct vchiq_slot_zero *slot_zero =
2230 (struct vchiq_slot_zero *)(mem_base + mem_align);
2231 int num_slots = (mem_size - mem_align) / VCHIQ_SLOT_SIZE;
2232 int first_data_slot = VCHIQ_SLOT_ZERO_SLOTS;
2236 /* Ensure there is enough memory to run an absolutely minimum system */
2237 num_slots -= first_data_slot;
2239 if (num_slots < 4) {
2240 vchiq_log_error(vchiq_core_log_level,
2241 "%s - insufficient memory %x bytes",
2242 __func__, mem_size);
2246 memset(slot_zero, 0, sizeof(struct vchiq_slot_zero));
2248 slot_zero->magic = VCHIQ_MAGIC;
2249 slot_zero->version = VCHIQ_VERSION;
2250 slot_zero->version_min = VCHIQ_VERSION_MIN;
2251 slot_zero->slot_zero_size = sizeof(struct vchiq_slot_zero);
2252 slot_zero->slot_size = VCHIQ_SLOT_SIZE;
2253 slot_zero->max_slots = VCHIQ_MAX_SLOTS;
2254 slot_zero->max_slots_per_side = VCHIQ_MAX_SLOTS_PER_SIDE;
2256 slot_zero->master.slot_sync = first_data_slot;
2257 slot_zero->master.slot_first = first_data_slot + 1;
2258 slot_zero->master.slot_last = first_data_slot + (num_slots / 2) - 1;
2259 slot_zero->slave.slot_sync = first_data_slot + (num_slots / 2);
2260 slot_zero->slave.slot_first = first_data_slot + (num_slots / 2) + 1;
2261 slot_zero->slave.slot_last = first_data_slot + num_slots - 1;
2267 vchiq_init_state(struct vchiq_state *state, struct vchiq_slot_zero *slot_zero)
2269 struct vchiq_shared_state *local;
2270 struct vchiq_shared_state *remote;
2271 char threadname[16];
2274 if (vchiq_states[0]) {
2275 pr_err("%s: VCHIQ state already initialized\n", __func__);
2279 local = &slot_zero->slave;
2280 remote = &slot_zero->master;
2282 if (local->initialised) {
2283 vchiq_loud_error_header();
2284 if (remote->initialised)
2285 vchiq_loud_error("local state has already been initialised");
2287 vchiq_loud_error("master/slave mismatch two slaves");
2288 vchiq_loud_error_footer();
2292 memset(state, 0, sizeof(struct vchiq_state));
2295 * initialize shared state pointers
2298 state->local = local;
2299 state->remote = remote;
2300 state->slot_data = (struct vchiq_slot *)slot_zero;
2303 * initialize events and mutexes
2306 init_completion(&state->connect);
2307 mutex_init(&state->mutex);
2308 mutex_init(&state->slot_mutex);
2309 mutex_init(&state->recycle_mutex);
2310 mutex_init(&state->sync_mutex);
2311 mutex_init(&state->bulk_transfer_mutex);
2313 init_completion(&state->slot_available_event);
2314 init_completion(&state->slot_remove_event);
2315 init_completion(&state->data_quota_event);
2317 state->slot_queue_available = 0;
2319 for (i = 0; i < VCHIQ_MAX_SERVICES; i++) {
2320 struct vchiq_service_quota *quota = &state->service_quotas[i];
2321 init_completion("a->quota_event);
2324 for (i = local->slot_first; i <= local->slot_last; i++) {
2325 local->slot_queue[state->slot_queue_available] = i;
2326 state->slot_queue_available++;
2327 complete(&state->slot_available_event);
2330 state->default_slot_quota = state->slot_queue_available / 2;
2331 state->default_message_quota =
2332 min((unsigned short)(state->default_slot_quota * 256),
2333 (unsigned short)~0);
2335 state->previous_data_index = -1;
2336 state->data_use_count = 0;
2337 state->data_quota = state->slot_queue_available - 1;
2339 remote_event_create(&state->trigger_event, &local->trigger);
2341 remote_event_create(&state->recycle_event, &local->recycle);
2342 local->slot_queue_recycle = state->slot_queue_available;
2343 remote_event_create(&state->sync_trigger_event, &local->sync_trigger);
2344 remote_event_create(&state->sync_release_event, &local->sync_release);
2346 /* At start-of-day, the slot is empty and available */
2347 ((struct vchiq_header *)
2348 SLOT_DATA_FROM_INDEX(state, local->slot_sync))->msgid =
2349 VCHIQ_MSGID_PADDING;
2350 remote_event_signal_local(&state->sync_release_event, &local->sync_release);
2352 local->debug[DEBUG_ENTRIES] = DEBUG_MAX;
2354 ret = vchiq_platform_init_state(state);
2359 * bring up slot handler thread
2361 snprintf(threadname, sizeof(threadname), "vchiq-slot/%d", state->id);
2362 state->slot_handler_thread = kthread_create(&slot_handler_func,
2366 if (IS_ERR(state->slot_handler_thread)) {
2367 vchiq_loud_error_header();
2368 vchiq_loud_error("couldn't create thread %s", threadname);
2369 vchiq_loud_error_footer();
2370 return PTR_ERR(state->slot_handler_thread);
2372 set_user_nice(state->slot_handler_thread, -19);
2374 snprintf(threadname, sizeof(threadname), "vchiq-recy/%d", state->id);
2375 state->recycle_thread = kthread_create(&recycle_func,
2378 if (IS_ERR(state->recycle_thread)) {
2379 vchiq_loud_error_header();
2380 vchiq_loud_error("couldn't create thread %s", threadname);
2381 vchiq_loud_error_footer();
2382 ret = PTR_ERR(state->recycle_thread);
2383 goto fail_free_handler_thread;
2385 set_user_nice(state->recycle_thread, -19);
2387 snprintf(threadname, sizeof(threadname), "vchiq-sync/%d", state->id);
2388 state->sync_thread = kthread_create(&sync_func,
2391 if (IS_ERR(state->sync_thread)) {
2392 vchiq_loud_error_header();
2393 vchiq_loud_error("couldn't create thread %s", threadname);
2394 vchiq_loud_error_footer();
2395 ret = PTR_ERR(state->sync_thread);
2396 goto fail_free_recycle_thread;
2398 set_user_nice(state->sync_thread, -20);
2400 wake_up_process(state->slot_handler_thread);
2401 wake_up_process(state->recycle_thread);
2402 wake_up_process(state->sync_thread);
2404 vchiq_states[0] = state;
2406 /* Indicate readiness to the other side */
2407 local->initialised = 1;
2411 fail_free_recycle_thread:
2412 kthread_stop(state->recycle_thread);
2413 fail_free_handler_thread:
2414 kthread_stop(state->slot_handler_thread);
2419 void vchiq_msg_queue_push(unsigned int handle, struct vchiq_header *header)
2421 struct vchiq_service *service = find_service_by_handle(handle);
2424 while (service->msg_queue_write == service->msg_queue_read +
2426 if (wait_for_completion_interruptible(&service->msg_queue_pop))
2427 flush_signals(current);
2430 pos = service->msg_queue_write & (VCHIQ_MAX_SLOTS - 1);
2431 service->msg_queue_write++;
2432 service->msg_queue[pos] = header;
2434 complete(&service->msg_queue_push);
2436 EXPORT_SYMBOL(vchiq_msg_queue_push);
2438 struct vchiq_header *vchiq_msg_hold(unsigned int handle)
2440 struct vchiq_service *service = find_service_by_handle(handle);
2441 struct vchiq_header *header;
2444 if (service->msg_queue_write == service->msg_queue_read)
2447 while (service->msg_queue_write == service->msg_queue_read) {
2448 if (wait_for_completion_interruptible(&service->msg_queue_push))
2449 flush_signals(current);
2452 pos = service->msg_queue_read & (VCHIQ_MAX_SLOTS - 1);
2453 service->msg_queue_read++;
2454 header = service->msg_queue[pos];
2456 complete(&service->msg_queue_pop);
2460 EXPORT_SYMBOL(vchiq_msg_hold);
2462 static int vchiq_validate_params(const struct vchiq_service_params_kernel *params)
2464 if (!params->callback || !params->fourcc) {
2465 vchiq_loud_error("Can't add service, invalid params\n");
2472 /* Called from application thread when a client or server service is created. */
2473 struct vchiq_service *
2474 vchiq_add_service_internal(struct vchiq_state *state,
2475 const struct vchiq_service_params_kernel *params,
2476 int srvstate, struct vchiq_instance *instance,
2477 vchiq_userdata_term userdata_term)
2479 struct vchiq_service *service;
2480 struct vchiq_service __rcu **pservice = NULL;
2481 struct vchiq_service_quota *quota;
2485 ret = vchiq_validate_params(params);
2489 service = kmalloc(sizeof(*service), GFP_KERNEL);
2493 service->base.fourcc = params->fourcc;
2494 service->base.callback = params->callback;
2495 service->base.userdata = params->userdata;
2496 service->handle = VCHIQ_SERVICE_HANDLE_INVALID;
2497 kref_init(&service->ref_count);
2498 service->srvstate = VCHIQ_SRVSTATE_FREE;
2499 service->userdata_term = userdata_term;
2500 service->localport = VCHIQ_PORT_FREE;
2501 service->remoteport = VCHIQ_PORT_FREE;
2503 service->public_fourcc = (srvstate == VCHIQ_SRVSTATE_OPENING) ?
2504 VCHIQ_FOURCC_INVALID : params->fourcc;
2505 service->client_id = 0;
2506 service->auto_close = 1;
2508 service->closing = 0;
2510 atomic_set(&service->poll_flags, 0);
2511 service->version = params->version;
2512 service->version_min = params->version_min;
2513 service->state = state;
2514 service->instance = instance;
2515 service->service_use_count = 0;
2516 service->msg_queue_read = 0;
2517 service->msg_queue_write = 0;
2518 init_bulk_queue(&service->bulk_tx);
2519 init_bulk_queue(&service->bulk_rx);
2520 init_completion(&service->remove_event);
2521 init_completion(&service->bulk_remove_event);
2522 init_completion(&service->msg_queue_pop);
2523 init_completion(&service->msg_queue_push);
2524 mutex_init(&service->bulk_mutex);
2525 memset(&service->stats, 0, sizeof(service->stats));
2526 memset(&service->msg_queue, 0, sizeof(service->msg_queue));
2529 * Although it is perfectly possible to use a spinlock
2530 * to protect the creation of services, it is overkill as it
2531 * disables interrupts while the array is searched.
2532 * The only danger is of another thread trying to create a
2533 * service - service deletion is safe.
2534 * Therefore it is preferable to use state->mutex which,
2535 * although slower to claim, doesn't block interrupts while
2539 mutex_lock(&state->mutex);
2541 /* Prepare to use a previously unused service */
2542 if (state->unused_service < VCHIQ_MAX_SERVICES)
2543 pservice = &state->services[state->unused_service];
2545 if (srvstate == VCHIQ_SRVSTATE_OPENING) {
2546 for (i = 0; i < state->unused_service; i++) {
2547 if (!rcu_access_pointer(state->services[i])) {
2548 pservice = &state->services[i];
2554 for (i = (state->unused_service - 1); i >= 0; i--) {
2555 struct vchiq_service *srv;
2557 srv = rcu_dereference(state->services[i]);
2559 pservice = &state->services[i];
2560 } else if ((srv->public_fourcc == params->fourcc) &&
2561 ((srv->instance != instance) ||
2562 (srv->base.callback != params->callback))) {
2564 * There is another server using this
2565 * fourcc which doesn't match.
2575 service->localport = (pservice - state->services);
2577 handle_seq = VCHIQ_MAX_STATES *
2579 service->handle = handle_seq |
2580 (state->id * VCHIQ_MAX_SERVICES) |
2582 handle_seq += VCHIQ_MAX_STATES * VCHIQ_MAX_SERVICES;
2583 rcu_assign_pointer(*pservice, service);
2584 if (pservice == &state->services[state->unused_service])
2585 state->unused_service++;
2588 mutex_unlock(&state->mutex);
2595 quota = &state->service_quotas[service->localport];
2596 quota->slot_quota = state->default_slot_quota;
2597 quota->message_quota = state->default_message_quota;
2598 if (quota->slot_use_count == 0)
2599 quota->previous_tx_index =
2600 SLOT_QUEUE_INDEX_FROM_POS(state->local_tx_pos)
2603 /* Bring this service online */
2604 vchiq_set_service_state(service, srvstate);
2606 vchiq_log_info(vchiq_core_msg_log_level,
2607 "%s Service %c%c%c%c SrcPort:%d",
2608 (srvstate == VCHIQ_SRVSTATE_OPENING)
2610 VCHIQ_FOURCC_AS_4CHARS(params->fourcc),
2611 service->localport);
2613 /* Don't unlock the service - leave it with a ref_count of 1. */
2619 vchiq_open_service_internal(struct vchiq_service *service, int client_id)
2621 struct vchiq_open_payload payload = {
2622 service->base.fourcc,
2625 service->version_min
2627 enum vchiq_status status = VCHIQ_SUCCESS;
2629 service->client_id = client_id;
2630 vchiq_use_service_internal(service);
2631 status = queue_message(service->state,
2632 NULL, MAKE_OPEN(service->localport),
2633 memcpy_copy_callback,
2636 QMFLAGS_IS_BLOCKING);
2638 if (status != VCHIQ_SUCCESS)
2641 /* Wait for the ACK/NAK */
2642 if (wait_for_completion_interruptible(&service->remove_event)) {
2643 status = VCHIQ_RETRY;
2644 vchiq_release_service_internal(service);
2645 } else if ((service->srvstate != VCHIQ_SRVSTATE_OPEN) &&
2646 (service->srvstate != VCHIQ_SRVSTATE_OPENSYNC)) {
2647 if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT)
2648 vchiq_log_error(vchiq_core_log_level,
2649 "%d: osi - srvstate = %s (ref %u)",
2651 srvstate_names[service->srvstate],
2652 kref_read(&service->ref_count));
2653 status = VCHIQ_ERROR;
2654 VCHIQ_SERVICE_STATS_INC(service, error_count);
2655 vchiq_release_service_internal(service);
2662 release_service_messages(struct vchiq_service *service)
2664 struct vchiq_state *state = service->state;
2665 int slot_last = state->remote->slot_last;
2668 /* Release any claimed messages aimed at this service */
2670 if (service->sync) {
2671 struct vchiq_header *header =
2672 (struct vchiq_header *)SLOT_DATA_FROM_INDEX(state,
2673 state->remote->slot_sync);
2674 if (VCHIQ_MSG_DSTPORT(header->msgid) == service->localport)
2675 release_message_sync(state, header);
2680 for (i = state->remote->slot_first; i <= slot_last; i++) {
2681 struct vchiq_slot_info *slot_info =
2682 SLOT_INFO_FROM_INDEX(state, i);
2683 unsigned int pos, end;
2686 if (slot_info->release_count == slot_info->use_count)
2689 data = (char *)SLOT_DATA_FROM_INDEX(state, i);
2690 end = VCHIQ_SLOT_SIZE;
2691 if (data == state->rx_data)
2693 * This buffer is still being read from - stop
2694 * at the current read position
2696 end = state->rx_pos & VCHIQ_SLOT_MASK;
2701 struct vchiq_header *header =
2702 (struct vchiq_header *)(data + pos);
2703 int msgid = header->msgid;
2704 int port = VCHIQ_MSG_DSTPORT(msgid);
2706 if ((port == service->localport) &&
2707 (msgid & VCHIQ_MSGID_CLAIMED)) {
2708 vchiq_log_info(vchiq_core_log_level,
2709 " fsi - hdr %pK", header);
2710 release_slot(state, slot_info, header,
2713 pos += calc_stride(header->size);
2714 if (pos > VCHIQ_SLOT_SIZE) {
2715 vchiq_log_error(vchiq_core_log_level,
2716 "fsi - pos %x: header %pK, msgid %x, header->msgid %x, header->size %x",
2718 header->msgid, header->size);
2719 WARN(1, "invalid slot position\n");
2726 do_abort_bulks(struct vchiq_service *service)
2728 enum vchiq_status status;
2730 /* Abort any outstanding bulk transfers */
2731 if (mutex_lock_killable(&service->bulk_mutex))
2733 abort_outstanding_bulks(service, &service->bulk_tx);
2734 abort_outstanding_bulks(service, &service->bulk_rx);
2735 mutex_unlock(&service->bulk_mutex);
2737 status = notify_bulks(service, &service->bulk_tx, NO_RETRY_POLL);
2738 if (status != VCHIQ_SUCCESS)
2741 status = notify_bulks(service, &service->bulk_rx, NO_RETRY_POLL);
2742 return (status == VCHIQ_SUCCESS);
2745 static enum vchiq_status
2746 close_service_complete(struct vchiq_service *service, int failstate)
2748 enum vchiq_status status;
2749 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2752 switch (service->srvstate) {
2753 case VCHIQ_SRVSTATE_OPEN:
2754 case VCHIQ_SRVSTATE_CLOSESENT:
2755 case VCHIQ_SRVSTATE_CLOSERECVD:
2757 if (service->auto_close) {
2758 service->client_id = 0;
2759 service->remoteport = VCHIQ_PORT_FREE;
2760 newstate = VCHIQ_SRVSTATE_LISTENING;
2762 newstate = VCHIQ_SRVSTATE_CLOSEWAIT;
2765 newstate = VCHIQ_SRVSTATE_CLOSED;
2767 vchiq_set_service_state(service, newstate);
2769 case VCHIQ_SRVSTATE_LISTENING:
2772 vchiq_log_error(vchiq_core_log_level,
2773 "%s(%x) called in state %s", __func__,
2774 service->handle, srvstate_names[service->srvstate]);
2775 WARN(1, "%s in unexpected state\n", __func__);
2779 status = make_service_callback(service,
2780 VCHIQ_SERVICE_CLOSED, NULL, NULL);
2782 if (status != VCHIQ_RETRY) {
2783 int uc = service->service_use_count;
2785 /* Complete the close process */
2786 for (i = 0; i < uc; i++)
2788 * cater for cases where close is forced and the
2789 * client may not close all it's handles
2791 vchiq_release_service_internal(service);
2793 service->client_id = 0;
2794 service->remoteport = VCHIQ_PORT_FREE;
2796 if (service->srvstate == VCHIQ_SRVSTATE_CLOSED) {
2797 vchiq_free_service_internal(service);
2798 } else if (service->srvstate != VCHIQ_SRVSTATE_CLOSEWAIT) {
2800 service->closing = 0;
2802 complete(&service->remove_event);
2805 vchiq_set_service_state(service, failstate);
2811 /* Called by the slot handler */
2813 vchiq_close_service_internal(struct vchiq_service *service, int close_recvd)
2815 struct vchiq_state *state = service->state;
2816 enum vchiq_status status = VCHIQ_SUCCESS;
2817 int is_server = (service->public_fourcc != VCHIQ_FOURCC_INVALID);
2818 int close_id = MAKE_CLOSE(service->localport,
2819 VCHIQ_MSG_DSTPORT(service->remoteport));
2821 vchiq_log_info(vchiq_core_log_level, "%d: csi:%d,%d (%s)",
2822 service->state->id, service->localport, close_recvd,
2823 srvstate_names[service->srvstate]);
2825 switch (service->srvstate) {
2826 case VCHIQ_SRVSTATE_CLOSED:
2827 case VCHIQ_SRVSTATE_HIDDEN:
2828 case VCHIQ_SRVSTATE_LISTENING:
2829 case VCHIQ_SRVSTATE_CLOSEWAIT:
2831 vchiq_log_error(vchiq_core_log_level,
2832 "%s(1) called in state %s",
2833 __func__, srvstate_names[service->srvstate]);
2834 } else if (is_server) {
2835 if (service->srvstate == VCHIQ_SRVSTATE_LISTENING) {
2836 status = VCHIQ_ERROR;
2838 service->client_id = 0;
2839 service->remoteport = VCHIQ_PORT_FREE;
2840 if (service->srvstate ==
2841 VCHIQ_SRVSTATE_CLOSEWAIT)
2842 vchiq_set_service_state(service,
2843 VCHIQ_SRVSTATE_LISTENING);
2845 complete(&service->remove_event);
2847 vchiq_free_service_internal(service);
2850 case VCHIQ_SRVSTATE_OPENING:
2852 /* The open was rejected - tell the user */
2853 vchiq_set_service_state(service,
2854 VCHIQ_SRVSTATE_CLOSEWAIT);
2855 complete(&service->remove_event);
2857 /* Shutdown mid-open - let the other side know */
2858 status = queue_message(state, service, close_id,
2863 case VCHIQ_SRVSTATE_OPENSYNC:
2864 mutex_lock(&state->sync_mutex);
2866 case VCHIQ_SRVSTATE_OPEN:
2868 if (!do_abort_bulks(service))
2869 status = VCHIQ_RETRY;
2872 release_service_messages(service);
2874 if (status == VCHIQ_SUCCESS)
2875 status = queue_message(state, service, close_id,
2876 NULL, NULL, 0, QMFLAGS_NO_MUTEX_UNLOCK);
2878 if (status != VCHIQ_SUCCESS) {
2879 if (service->srvstate == VCHIQ_SRVSTATE_OPENSYNC)
2880 mutex_unlock(&state->sync_mutex);
2885 /* Change the state while the mutex is still held */
2886 vchiq_set_service_state(service,
2887 VCHIQ_SRVSTATE_CLOSESENT);
2888 mutex_unlock(&state->slot_mutex);
2890 mutex_unlock(&state->sync_mutex);
2894 /* Change the state while the mutex is still held */
2895 vchiq_set_service_state(service, VCHIQ_SRVSTATE_CLOSERECVD);
2896 mutex_unlock(&state->slot_mutex);
2898 mutex_unlock(&state->sync_mutex);
2900 status = close_service_complete(service,
2901 VCHIQ_SRVSTATE_CLOSERECVD);
2904 case VCHIQ_SRVSTATE_CLOSESENT:
2906 /* This happens when a process is killed mid-close */
2909 if (!do_abort_bulks(service)) {
2910 status = VCHIQ_RETRY;
2914 if (status == VCHIQ_SUCCESS)
2915 status = close_service_complete(service,
2916 VCHIQ_SRVSTATE_CLOSERECVD);
2919 case VCHIQ_SRVSTATE_CLOSERECVD:
2920 if (!close_recvd && is_server)
2921 /* Force into LISTENING mode */
2922 vchiq_set_service_state(service,
2923 VCHIQ_SRVSTATE_LISTENING);
2924 status = close_service_complete(service,
2925 VCHIQ_SRVSTATE_CLOSERECVD);
2929 vchiq_log_error(vchiq_core_log_level,
2930 "%s(%d) called in state %s", __func__,
2931 close_recvd, srvstate_names[service->srvstate]);
2938 /* Called from the application process upon process death */
2940 vchiq_terminate_service_internal(struct vchiq_service *service)
2942 struct vchiq_state *state = service->state;
2944 vchiq_log_info(vchiq_core_log_level, "%d: tsi - (%d<->%d)",
2945 state->id, service->localport, service->remoteport);
2947 mark_service_closing(service);
2949 /* Mark the service for removal by the slot handler */
2950 request_poll(state, service, VCHIQ_POLL_REMOVE);
2953 /* Called from the slot handler */
2955 vchiq_free_service_internal(struct vchiq_service *service)
2957 struct vchiq_state *state = service->state;
2959 vchiq_log_info(vchiq_core_log_level, "%d: fsi - (%d)",
2960 state->id, service->localport);
2962 switch (service->srvstate) {
2963 case VCHIQ_SRVSTATE_OPENING:
2964 case VCHIQ_SRVSTATE_CLOSED:
2965 case VCHIQ_SRVSTATE_HIDDEN:
2966 case VCHIQ_SRVSTATE_LISTENING:
2967 case VCHIQ_SRVSTATE_CLOSEWAIT:
2970 vchiq_log_error(vchiq_core_log_level,
2971 "%d: fsi - (%d) in state %s",
2972 state->id, service->localport,
2973 srvstate_names[service->srvstate]);
2977 vchiq_set_service_state(service, VCHIQ_SRVSTATE_FREE);
2979 complete(&service->remove_event);
2981 /* Release the initial lock */
2982 vchiq_service_put(service);
2986 vchiq_connect_internal(struct vchiq_state *state, struct vchiq_instance *instance)
2988 struct vchiq_service *service;
2991 /* Find all services registered to this client and enable them. */
2993 while ((service = next_service_by_instance(state, instance,
2995 if (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)
2996 vchiq_set_service_state(service,
2997 VCHIQ_SRVSTATE_LISTENING);
2998 vchiq_service_put(service);
3001 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED) {
3002 if (queue_message(state, NULL, MAKE_CONNECT, NULL, NULL,
3003 0, QMFLAGS_IS_BLOCKING) == VCHIQ_RETRY)
3006 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTING);
3009 if (state->conn_state == VCHIQ_CONNSTATE_CONNECTING) {
3010 if (wait_for_completion_interruptible(&state->connect))
3013 vchiq_set_conn_state(state, VCHIQ_CONNSTATE_CONNECTED);
3014 complete(&state->connect);
3017 return VCHIQ_SUCCESS;
3021 vchiq_shutdown_internal(struct vchiq_state *state, struct vchiq_instance *instance)
3023 struct vchiq_service *service;
3026 /* Find all services registered to this client and remove them. */
3028 while ((service = next_service_by_instance(state, instance,
3030 (void)vchiq_remove_service(service->handle);
3031 vchiq_service_put(service);
3036 vchiq_close_service(unsigned int handle)
3038 /* Unregister the service */
3039 struct vchiq_service *service = find_service_by_handle(handle);
3040 enum vchiq_status status = VCHIQ_SUCCESS;
3045 vchiq_log_info(vchiq_core_log_level,
3046 "%d: close_service:%d",
3047 service->state->id, service->localport);
3049 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3050 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3051 (service->srvstate == VCHIQ_SRVSTATE_HIDDEN)) {
3052 vchiq_service_put(service);
3056 mark_service_closing(service);
3058 if (current == service->state->slot_handler_thread) {
3059 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3060 WARN_ON(status == VCHIQ_RETRY);
3062 /* Mark the service for termination by the slot handler */
3063 request_poll(service->state, service, VCHIQ_POLL_TERMINATE);
3067 if (wait_for_completion_interruptible(&service->remove_event)) {
3068 status = VCHIQ_RETRY;
3072 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3073 (service->srvstate == VCHIQ_SRVSTATE_LISTENING) ||
3074 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3077 vchiq_log_warning(vchiq_core_log_level,
3078 "%d: close_service:%d - waiting in state %s",
3079 service->state->id, service->localport,
3080 srvstate_names[service->srvstate]);
3083 if ((status == VCHIQ_SUCCESS) &&
3084 (service->srvstate != VCHIQ_SRVSTATE_FREE) &&
3085 (service->srvstate != VCHIQ_SRVSTATE_LISTENING))
3086 status = VCHIQ_ERROR;
3088 vchiq_service_put(service);
3092 EXPORT_SYMBOL(vchiq_close_service);
3095 vchiq_remove_service(unsigned int handle)
3097 /* Unregister the service */
3098 struct vchiq_service *service = find_service_by_handle(handle);
3099 enum vchiq_status status = VCHIQ_SUCCESS;
3104 vchiq_log_info(vchiq_core_log_level,
3105 "%d: remove_service:%d",
3106 service->state->id, service->localport);
3108 if (service->srvstate == VCHIQ_SRVSTATE_FREE) {
3109 vchiq_service_put(service);
3113 mark_service_closing(service);
3115 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3116 (current == service->state->slot_handler_thread)) {
3118 * Make it look like a client, because it must be removed and
3119 * not left in the LISTENING state.
3121 service->public_fourcc = VCHIQ_FOURCC_INVALID;
3123 status = vchiq_close_service_internal(service, NO_CLOSE_RECVD);
3124 WARN_ON(status == VCHIQ_RETRY);
3126 /* Mark the service for removal by the slot handler */
3127 request_poll(service->state, service, VCHIQ_POLL_REMOVE);
3130 if (wait_for_completion_interruptible(&service->remove_event)) {
3131 status = VCHIQ_RETRY;
3135 if ((service->srvstate == VCHIQ_SRVSTATE_FREE) ||
3136 (service->srvstate == VCHIQ_SRVSTATE_OPEN))
3139 vchiq_log_warning(vchiq_core_log_level,
3140 "%d: remove_service:%d - waiting in state %s",
3141 service->state->id, service->localport,
3142 srvstate_names[service->srvstate]);
3145 if ((status == VCHIQ_SUCCESS) &&
3146 (service->srvstate != VCHIQ_SRVSTATE_FREE))
3147 status = VCHIQ_ERROR;
3149 vchiq_service_put(service);
3155 * This function may be called by kernel threads or user threads.
3156 * User threads may receive VCHIQ_RETRY to indicate that a signal has been
3157 * received and the call should be retried after being returned to user
3159 * When called in blocking mode, the userdata field points to a bulk_waiter
3162 enum vchiq_status vchiq_bulk_transfer(unsigned int handle,
3163 void *offset, void __user *uoffset,
3164 int size, void *userdata,
3165 enum vchiq_bulk_mode mode,
3166 enum vchiq_bulk_dir dir)
3168 struct vchiq_service *service = find_service_by_handle(handle);
3169 struct vchiq_bulk_queue *queue;
3170 struct vchiq_bulk *bulk;
3171 struct vchiq_state *state;
3172 struct bulk_waiter *bulk_waiter = NULL;
3173 const char dir_char = (dir == VCHIQ_BULK_TRANSMIT) ? 't' : 'r';
3174 const int dir_msgtype = (dir == VCHIQ_BULK_TRANSMIT) ?
3175 VCHIQ_MSG_BULK_TX : VCHIQ_MSG_BULK_RX;
3176 enum vchiq_status status = VCHIQ_ERROR;
3182 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3185 if (!offset && !uoffset)
3188 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3192 case VCHIQ_BULK_MODE_NOCALLBACK:
3193 case VCHIQ_BULK_MODE_CALLBACK:
3195 case VCHIQ_BULK_MODE_BLOCKING:
3196 bulk_waiter = userdata;
3197 init_completion(&bulk_waiter->event);
3198 bulk_waiter->actual = 0;
3199 bulk_waiter->bulk = NULL;
3201 case VCHIQ_BULK_MODE_WAITING:
3202 bulk_waiter = userdata;
3203 bulk = bulk_waiter->bulk;
3209 state = service->state;
3211 queue = (dir == VCHIQ_BULK_TRANSMIT) ?
3212 &service->bulk_tx : &service->bulk_rx;
3214 if (mutex_lock_killable(&service->bulk_mutex)) {
3215 status = VCHIQ_RETRY;
3219 if (queue->local_insert == queue->remove + VCHIQ_NUM_SERVICE_BULKS) {
3220 VCHIQ_SERVICE_STATS_INC(service, bulk_stalls);
3222 mutex_unlock(&service->bulk_mutex);
3223 if (wait_for_completion_interruptible(
3224 &service->bulk_remove_event)) {
3225 status = VCHIQ_RETRY;
3228 if (mutex_lock_killable(&service->bulk_mutex)) {
3229 status = VCHIQ_RETRY;
3232 } while (queue->local_insert == queue->remove +
3233 VCHIQ_NUM_SERVICE_BULKS);
3236 bulk = &queue->bulks[BULK_INDEX(queue->local_insert)];
3240 bulk->userdata = userdata;
3242 bulk->actual = VCHIQ_BULK_ACTUAL_ABORTED;
3244 if (vchiq_prepare_bulk_data(bulk, offset, uoffset, size, dir))
3245 goto unlock_error_exit;
3249 vchiq_log_info(vchiq_core_log_level,
3250 "%d: bt (%d->%d) %cx %x@%pad %pK",
3251 state->id, service->localport, service->remoteport, dir_char,
3252 size, &bulk->data, userdata);
3255 * The slot mutex must be held when the service is being closed, so
3256 * claim it here to ensure that isn't happening
3258 if (mutex_lock_killable(&state->slot_mutex)) {
3259 status = VCHIQ_RETRY;
3260 goto cancel_bulk_error_exit;
3263 if (service->srvstate != VCHIQ_SRVSTATE_OPEN)
3264 goto unlock_both_error_exit;
3266 payload[0] = lower_32_bits(bulk->data);
3267 payload[1] = bulk->size;
3268 status = queue_message(state,
3270 VCHIQ_MAKE_MSG(dir_msgtype,
3272 service->remoteport),
3273 memcpy_copy_callback,
3276 QMFLAGS_IS_BLOCKING |
3277 QMFLAGS_NO_MUTEX_LOCK |
3278 QMFLAGS_NO_MUTEX_UNLOCK);
3279 if (status != VCHIQ_SUCCESS)
3280 goto unlock_both_error_exit;
3282 queue->local_insert++;
3284 mutex_unlock(&state->slot_mutex);
3285 mutex_unlock(&service->bulk_mutex);
3287 vchiq_log_trace(vchiq_core_log_level,
3288 "%d: bt:%d %cx li=%x ri=%x p=%x",
3290 service->localport, dir_char,
3291 queue->local_insert, queue->remote_insert, queue->process);
3294 vchiq_service_put(service);
3296 status = VCHIQ_SUCCESS;
3299 bulk_waiter->bulk = bulk;
3300 if (wait_for_completion_interruptible(&bulk_waiter->event))
3301 status = VCHIQ_RETRY;
3302 else if (bulk_waiter->actual == VCHIQ_BULK_ACTUAL_ABORTED)
3303 status = VCHIQ_ERROR;
3308 unlock_both_error_exit:
3309 mutex_unlock(&state->slot_mutex);
3310 cancel_bulk_error_exit:
3311 vchiq_complete_bulk(bulk);
3313 mutex_unlock(&service->bulk_mutex);
3317 vchiq_service_put(service);
3322 vchiq_queue_message(unsigned int handle,
3323 ssize_t (*copy_callback)(void *context, void *dest,
3324 size_t offset, size_t maxsize),
3328 struct vchiq_service *service = find_service_by_handle(handle);
3329 enum vchiq_status status = VCHIQ_ERROR;
3335 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3339 VCHIQ_SERVICE_STATS_INC(service, error_count);
3344 if (size > VCHIQ_MAX_MSG_SIZE) {
3345 VCHIQ_SERVICE_STATS_INC(service, error_count);
3349 data_id = MAKE_DATA(service->localport, service->remoteport);
3351 switch (service->srvstate) {
3352 case VCHIQ_SRVSTATE_OPEN:
3353 status = queue_message(service->state, service, data_id,
3354 copy_callback, context, size, 1);
3356 case VCHIQ_SRVSTATE_OPENSYNC:
3357 status = queue_message_sync(service->state, service, data_id,
3358 copy_callback, context, size, 1);
3361 status = VCHIQ_ERROR;
3367 vchiq_service_put(service);
3372 int vchiq_queue_kernel_message(unsigned int handle, void *data, unsigned int size)
3374 enum vchiq_status status;
3377 status = vchiq_queue_message(handle, memcpy_copy_callback,
3381 * vchiq_queue_message() may return VCHIQ_RETRY, so we need to
3382 * implement a retry mechanism since this function is supposed
3383 * to block until queued
3385 if (status != VCHIQ_RETRY)
3393 EXPORT_SYMBOL(vchiq_queue_kernel_message);
3396 vchiq_release_message(unsigned int handle,
3397 struct vchiq_header *header)
3399 struct vchiq_service *service = find_service_by_handle(handle);
3400 struct vchiq_shared_state *remote;
3401 struct vchiq_state *state;
3407 state = service->state;
3408 remote = state->remote;
3410 slot_index = SLOT_INDEX_FROM_DATA(state, (void *)header);
3412 if ((slot_index >= remote->slot_first) &&
3413 (slot_index <= remote->slot_last)) {
3414 int msgid = header->msgid;
3416 if (msgid & VCHIQ_MSGID_CLAIMED) {
3417 struct vchiq_slot_info *slot_info =
3418 SLOT_INFO_FROM_INDEX(state, slot_index);
3420 release_slot(state, slot_info, header, service);
3422 } else if (slot_index == remote->slot_sync) {
3423 release_message_sync(state, header);
3426 vchiq_service_put(service);
3428 EXPORT_SYMBOL(vchiq_release_message);
3431 release_message_sync(struct vchiq_state *state, struct vchiq_header *header)
3433 header->msgid = VCHIQ_MSGID_PADDING;
3434 remote_event_signal(&state->remote->sync_release);
3438 vchiq_get_peer_version(unsigned int handle, short *peer_version)
3440 enum vchiq_status status = VCHIQ_ERROR;
3441 struct vchiq_service *service = find_service_by_handle(handle);
3446 if (vchiq_check_service(service) != VCHIQ_SUCCESS)
3452 *peer_version = service->peer_version;
3453 status = VCHIQ_SUCCESS;
3457 vchiq_service_put(service);
3460 EXPORT_SYMBOL(vchiq_get_peer_version);
3462 void vchiq_get_config(struct vchiq_config *config)
3464 config->max_msg_size = VCHIQ_MAX_MSG_SIZE;
3465 config->bulk_threshold = VCHIQ_MAX_MSG_SIZE;
3466 config->max_outstanding_bulks = VCHIQ_NUM_SERVICE_BULKS;
3467 config->max_services = VCHIQ_MAX_SERVICES;
3468 config->version = VCHIQ_VERSION;
3469 config->version_min = VCHIQ_VERSION_MIN;
3473 vchiq_set_service_option(unsigned int handle,
3474 enum vchiq_service_option option, int value)
3476 struct vchiq_service *service = find_service_by_handle(handle);
3477 struct vchiq_service_quota *quota;
3484 case VCHIQ_SERVICE_OPTION_AUTOCLOSE:
3485 service->auto_close = value;
3489 case VCHIQ_SERVICE_OPTION_SLOT_QUOTA:
3490 quota = &service->state->service_quotas[service->localport];
3492 value = service->state->default_slot_quota;
3493 if ((value >= quota->slot_use_count) &&
3494 (value < (unsigned short)~0)) {
3495 quota->slot_quota = value;
3496 if ((value >= quota->slot_use_count) &&
3497 (quota->message_quota >= quota->message_use_count))
3499 * Signal the service that it may have
3500 * dropped below its quota
3502 complete("a->quota_event);
3507 case VCHIQ_SERVICE_OPTION_MESSAGE_QUOTA:
3508 quota = &service->state->service_quotas[service->localport];
3510 value = service->state->default_message_quota;
3511 if ((value >= quota->message_use_count) &&
3512 (value < (unsigned short)~0)) {
3513 quota->message_quota = value;
3514 if ((value >= quota->message_use_count) &&
3515 (quota->slot_quota >= quota->slot_use_count))
3517 * Signal the service that it may have
3518 * dropped below its quota
3520 complete("a->quota_event);
3525 case VCHIQ_SERVICE_OPTION_SYNCHRONOUS:
3526 if ((service->srvstate == VCHIQ_SRVSTATE_HIDDEN) ||
3527 (service->srvstate == VCHIQ_SRVSTATE_LISTENING)) {
3528 service->sync = value;
3533 case VCHIQ_SERVICE_OPTION_TRACE:
3534 service->trace = value;
3541 vchiq_service_put(service);
3547 vchiq_dump_shared_state(void *dump_context, struct vchiq_state *state,
3548 struct vchiq_shared_state *shared, const char *label)
3550 static const char *const debug_names[] = {
3552 "SLOT_HANDLER_COUNT",
3553 "SLOT_HANDLER_LINE",
3557 "AWAIT_COMPLETION_LINE",
3558 "DEQUEUE_MESSAGE_LINE",
3559 "SERVICE_CALLBACK_LINE",
3560 "MSG_QUEUE_FULL_COUNT",
3561 "COMPLETION_QUEUE_FULL_COUNT"
3568 len = scnprintf(buf, sizeof(buf),
3569 " %s: slots %d-%d tx_pos=%x recycle=%x",
3570 label, shared->slot_first, shared->slot_last,
3571 shared->tx_pos, shared->slot_queue_recycle);
3572 err = vchiq_dump(dump_context, buf, len + 1);
3576 len = scnprintf(buf, sizeof(buf),
3578 err = vchiq_dump(dump_context, buf, len + 1);
3582 for (i = shared->slot_first; i <= shared->slot_last; i++) {
3583 struct vchiq_slot_info slot_info =
3584 *SLOT_INFO_FROM_INDEX(state, i);
3585 if (slot_info.use_count != slot_info.release_count) {
3586 len = scnprintf(buf, sizeof(buf),
3587 " %d: %d/%d", i, slot_info.use_count,
3588 slot_info.release_count);
3589 err = vchiq_dump(dump_context, buf, len + 1);
3595 for (i = 1; i < shared->debug[DEBUG_ENTRIES]; i++) {
3596 len = scnprintf(buf, sizeof(buf), " DEBUG: %s = %d(%x)",
3597 debug_names[i], shared->debug[i], shared->debug[i]);
3598 err = vchiq_dump(dump_context, buf, len + 1);
3605 int vchiq_dump_state(void *dump_context, struct vchiq_state *state)
3612 len = scnprintf(buf, sizeof(buf), "State %d: %s", state->id,
3613 conn_state_names[state->conn_state]);
3614 err = vchiq_dump(dump_context, buf, len + 1);
3618 len = scnprintf(buf, sizeof(buf),
3619 " tx_pos=%x(@%pK), rx_pos=%x(@%pK)",
3620 state->local->tx_pos,
3621 state->tx_data + (state->local_tx_pos & VCHIQ_SLOT_MASK),
3623 state->rx_data + (state->rx_pos & VCHIQ_SLOT_MASK));
3624 err = vchiq_dump(dump_context, buf, len + 1);
3628 len = scnprintf(buf, sizeof(buf),
3629 " Version: %d (min %d)",
3630 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
3631 err = vchiq_dump(dump_context, buf, len + 1);
3635 if (VCHIQ_ENABLE_STATS) {
3636 len = scnprintf(buf, sizeof(buf),
3637 " Stats: ctrl_tx_count=%d, ctrl_rx_count=%d, error_count=%d",
3638 state->stats.ctrl_tx_count, state->stats.ctrl_rx_count,
3639 state->stats.error_count);
3640 err = vchiq_dump(dump_context, buf, len + 1);
3645 len = scnprintf(buf, sizeof(buf),
3646 " Slots: %d available (%d data), %d recyclable, %d stalls (%d data)",
3647 ((state->slot_queue_available * VCHIQ_SLOT_SIZE) -
3648 state->local_tx_pos) / VCHIQ_SLOT_SIZE,
3649 state->data_quota - state->data_use_count,
3650 state->local->slot_queue_recycle - state->slot_queue_available,
3651 state->stats.slot_stalls, state->stats.data_stalls);
3652 err = vchiq_dump(dump_context, buf, len + 1);
3656 err = vchiq_dump_platform_state(dump_context);
3660 err = vchiq_dump_shared_state(dump_context,
3666 err = vchiq_dump_shared_state(dump_context,
3673 err = vchiq_dump_platform_instances(dump_context);
3677 for (i = 0; i < state->unused_service; i++) {
3678 struct vchiq_service *service = find_service_by_port(state, i);
3681 err = vchiq_dump_service_state(dump_context, service);
3682 vchiq_service_put(service);
3690 int vchiq_dump_service_state(void *dump_context, struct vchiq_service *service)
3695 unsigned int ref_count;
3697 /*Don't include the lock just taken*/
3698 ref_count = kref_read(&service->ref_count) - 1;
3699 len = scnprintf(buf, sizeof(buf), "Service %u: %s (ref %u)",
3700 service->localport, srvstate_names[service->srvstate],
3703 if (service->srvstate != VCHIQ_SRVSTATE_FREE) {
3704 char remoteport[30];
3705 struct vchiq_service_quota *quota =
3706 &service->state->service_quotas[service->localport];
3707 int fourcc = service->base.fourcc;
3708 int tx_pending, rx_pending;
3710 if (service->remoteport != VCHIQ_PORT_FREE) {
3711 int len2 = scnprintf(remoteport, sizeof(remoteport),
3712 "%u", service->remoteport);
3714 if (service->public_fourcc != VCHIQ_FOURCC_INVALID)
3715 scnprintf(remoteport + len2,
3716 sizeof(remoteport) - len2,
3717 " (client %x)", service->client_id);
3719 strcpy(remoteport, "n/a");
3722 len += scnprintf(buf + len, sizeof(buf) - len,
3723 " '%c%c%c%c' remote %s (msg use %d/%d, slot use %d/%d)",
3724 VCHIQ_FOURCC_AS_4CHARS(fourcc),
3726 quota->message_use_count,
3727 quota->message_quota,
3728 quota->slot_use_count,
3731 err = vchiq_dump(dump_context, buf, len + 1);
3735 tx_pending = service->bulk_tx.local_insert -
3736 service->bulk_tx.remote_insert;
3738 rx_pending = service->bulk_rx.local_insert -
3739 service->bulk_rx.remote_insert;
3741 len = scnprintf(buf, sizeof(buf),
3742 " Bulk: tx_pending=%d (size %d), rx_pending=%d (size %d)",
3744 tx_pending ? service->bulk_tx.bulks[
3745 BULK_INDEX(service->bulk_tx.remove)].size : 0,
3747 rx_pending ? service->bulk_rx.bulks[
3748 BULK_INDEX(service->bulk_rx.remove)].size : 0);
3750 if (VCHIQ_ENABLE_STATS) {
3751 err = vchiq_dump(dump_context, buf, len + 1);
3755 len = scnprintf(buf, sizeof(buf),
3756 " Ctrl: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3757 service->stats.ctrl_tx_count,
3758 service->stats.ctrl_tx_bytes,
3759 service->stats.ctrl_rx_count,
3760 service->stats.ctrl_rx_bytes);
3761 err = vchiq_dump(dump_context, buf, len + 1);
3765 len = scnprintf(buf, sizeof(buf),
3766 " Bulk: tx_count=%d, tx_bytes=%llu, rx_count=%d, rx_bytes=%llu",
3767 service->stats.bulk_tx_count,
3768 service->stats.bulk_tx_bytes,
3769 service->stats.bulk_rx_count,
3770 service->stats.bulk_rx_bytes);
3771 err = vchiq_dump(dump_context, buf, len + 1);
3775 len = scnprintf(buf, sizeof(buf),
3776 " %d quota stalls, %d slot stalls, %d bulk stalls, %d aborted, %d errors",
3777 service->stats.quota_stalls,
3778 service->stats.slot_stalls,
3779 service->stats.bulk_stalls,
3780 service->stats.bulk_aborted_count,
3781 service->stats.error_count);
3785 err = vchiq_dump(dump_context, buf, len + 1);
3789 if (service->srvstate != VCHIQ_SRVSTATE_FREE)
3790 err = vchiq_dump_platform_service_state(dump_context, service);
3795 vchiq_loud_error_header(void)
3797 vchiq_log_error(vchiq_core_log_level,
3798 "============================================================================");
3799 vchiq_log_error(vchiq_core_log_level,
3800 "============================================================================");
3801 vchiq_log_error(vchiq_core_log_level, "=====");
3805 vchiq_loud_error_footer(void)
3807 vchiq_log_error(vchiq_core_log_level, "=====");
3808 vchiq_log_error(vchiq_core_log_level,
3809 "============================================================================");
3810 vchiq_log_error(vchiq_core_log_level,
3811 "============================================================================");
3814 enum vchiq_status vchiq_send_remote_use(struct vchiq_state *state)
3816 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3819 return queue_message(state, NULL, MAKE_REMOTE_USE, NULL, NULL, 0, 0);
3822 enum vchiq_status vchiq_send_remote_use_active(struct vchiq_state *state)
3824 if (state->conn_state == VCHIQ_CONNSTATE_DISCONNECTED)
3827 return queue_message(state, NULL, MAKE_REMOTE_USE_ACTIVE,
3831 void vchiq_log_dump_mem(const char *label, u32 addr, const void *void_mem,
3834 const u8 *mem = void_mem;
3839 while (num_bytes > 0) {
3842 for (offset = 0; offset < 16; offset++) {
3843 if (offset < num_bytes)
3844 s += scnprintf(s, 4, "%02x ", mem[offset]);
3846 s += scnprintf(s, 4, " ");
3849 for (offset = 0; offset < 16; offset++) {
3850 if (offset < num_bytes) {
3851 u8 ch = mem[offset];
3853 if ((ch < ' ') || (ch > '~'))
3860 if (label && (*label != '\0'))
3861 vchiq_log_trace(VCHIQ_LOG_TRACE,
3862 "%s: %08x: %s", label, addr, line_buf);
3864 vchiq_log_trace(VCHIQ_LOG_TRACE,
3865 "%08x: %s", addr, line_buf);