1 // SPDX-License-Identifier: GPL-2.0
3 * Broadcom BM2835 V4L2 driver
5 * Copyright © 2013 Raspberry Pi (Trading) Ltd.
7 * Authors: Vincent Sanders @ Collabora
8 * Dave Stevenson @ Broadcom
9 * (now dave.stevenson@raspberrypi.org)
10 * Simon Mellor @ Broadcom
11 * Luke Diamand @ Broadcom
13 * V4L2 driver MMAL vchiq interface code
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
28 #include "mmal-common.h"
29 #include "mmal-vchiq.h"
32 #include "interface/vchiq_arm/vchiq_if.h"
33 #include "interface/vchi/vchi.h"
36 * maximum number of components supported.
37 * This matches the maximum permitted by default on the VPU
39 #define VCHIQ_MMAL_MAX_COMPONENTS 64
42 * Timeout for synchronous msg responses in seconds.
43 * Helpful to increase this if stopping in the VPU debugger.
45 #define SYNC_MSG_TIMEOUT 3
47 /*#define FULL_MSG_DUMP 1*/
50 static const char *const msg_type_names[] = {
68 "GET_CORE_STATS_FOR_PORT",
72 "OPAQUE_ALLOCATOR_DESC",
75 "BUFFER_FROM_HOST_ZEROLEN",
81 static const char *const port_action_type_names[] = {
92 #if defined(FULL_MSG_DUMP)
93 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
95 pr_debug(TITLE" type:%s(%d) length:%d\n", \
96 msg_type_names[(MSG)->h.type], \
97 (MSG)->h.type, (MSG_LEN)); \
98 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
100 sizeof(struct mmal_msg_header), 1); \
101 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
103 ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
104 (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
107 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE) \
109 pr_debug(TITLE" type:%s(%d) length:%d\n", \
110 msg_type_names[(MSG)->h.type], \
111 (MSG)->h.type, (MSG_LEN)); \
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
118 struct vchiq_mmal_instance;
120 /* normal message context */
121 struct mmal_msg_context {
122 struct vchiq_mmal_instance *instance;
124 /* Index in the context_map idr so that we can find the
125 * mmal_msg_context again when servicing the VCHI reply.
131 /* work struct for buffer_cb callback */
132 struct work_struct work;
133 /* work struct for deferred callback */
134 struct work_struct buffer_to_host_work;
136 struct vchiq_mmal_instance *instance;
138 struct vchiq_mmal_port *port;
139 /* actual buffer used to store bulk reply */
140 struct mmal_buffer *buffer;
141 /* amount of buffer used */
142 unsigned long buffer_used;
143 /* MMAL buffer flags */
145 /* Presentation and Decode timestamps */
149 int status; /* context status */
151 } bulk; /* bulk data */
154 /* message handle to release */
155 struct vchiq_header *msg_handle;
156 /* pointer to received message */
157 struct mmal_msg *msg;
158 /* received message length */
160 /* completion upon reply */
161 struct completion cmplt;
162 } sync; /* synchronous response */
167 struct vchiq_mmal_instance {
168 unsigned service_handle;
170 /* ensure serialised access to service */
171 struct mutex vchiq_mutex;
173 /* vmalloc page to receive scratch bulk xfers into */
176 struct idr context_map;
177 /* protect accesses to context_map */
178 struct mutex context_map_lock;
180 struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
182 /* ordered workqueue to process all bulk operations */
183 struct workqueue_struct *bulk_wq;
186 static struct mmal_msg_context *
187 get_msg_context(struct vchiq_mmal_instance *instance)
189 struct mmal_msg_context *msg_context;
192 /* todo: should this be allocated from a pool to avoid kzalloc */
193 msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
196 return ERR_PTR(-ENOMEM);
198 /* Create an ID that will be passed along with our message so
199 * that when we service the VCHI reply, we can look up what
200 * message is being replied to.
202 mutex_lock(&instance->context_map_lock);
203 handle = idr_alloc(&instance->context_map, msg_context,
205 mutex_unlock(&instance->context_map_lock);
209 return ERR_PTR(handle);
212 msg_context->instance = instance;
213 msg_context->handle = handle;
218 static struct mmal_msg_context *
219 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
221 return idr_find(&instance->context_map, handle);
225 release_msg_context(struct mmal_msg_context *msg_context)
227 struct vchiq_mmal_instance *instance = msg_context->instance;
229 mutex_lock(&instance->context_map_lock);
230 idr_remove(&instance->context_map, msg_context->handle);
231 mutex_unlock(&instance->context_map_lock);
235 /* deals with receipt of event to host message */
236 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
237 struct mmal_msg *msg, u32 msg_len)
239 pr_debug("unhandled event\n");
240 pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
241 msg->u.event_to_host.client_component,
242 msg->u.event_to_host.port_type,
243 msg->u.event_to_host.port_num,
244 msg->u.event_to_host.cmd, msg->u.event_to_host.length);
247 /* workqueue scheduled callback
249 * we do this because it is important we do not call any other vchiq
250 * sync calls from witin the message delivery thread
252 static void buffer_work_cb(struct work_struct *work)
254 struct mmal_msg_context *msg_context =
255 container_of(work, struct mmal_msg_context, u.bulk.work);
256 struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
259 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
260 __func__, msg_context);
264 buffer->length = msg_context->u.bulk.buffer_used;
265 buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
266 buffer->dts = msg_context->u.bulk.dts;
267 buffer->pts = msg_context->u.bulk.pts;
269 atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
271 msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272 msg_context->u.bulk.port,
273 msg_context->u.bulk.status,
274 msg_context->u.bulk.buffer);
277 /* workqueue scheduled callback to handle receiving buffers
279 * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
280 * If we block in the service_callback context then we can't process the
281 * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
282 * vchi_bulk_queue_receive() call to complete.
284 static void buffer_to_host_work_cb(struct work_struct *work)
286 struct mmal_msg_context *msg_context =
287 container_of(work, struct mmal_msg_context,
288 u.bulk.buffer_to_host_work);
289 struct vchiq_mmal_instance *instance = msg_context->instance;
290 unsigned long len = msg_context->u.bulk.buffer_used;
294 /* Dummy receive to ensure the buffers remain in order */
296 /* queue the bulk submission */
297 vchi_service_use(instance->service_handle);
298 ret = vchi_bulk_queue_receive(instance->service_handle,
299 msg_context->u.bulk.buffer->buffer,
300 /* Actual receive needs to be a multiple
304 VCHIQ_BULK_MODE_CALLBACK,
307 vchi_service_release(instance->service_handle);
310 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
311 __func__, msg_context, ret);
314 /* enqueue a bulk receive for a given message context */
315 static int bulk_receive(struct vchiq_mmal_instance *instance,
316 struct mmal_msg *msg,
317 struct mmal_msg_context *msg_context)
319 unsigned long rd_len;
321 rd_len = msg->u.buffer_from_host.buffer_header.length;
323 if (!msg_context->u.bulk.buffer) {
324 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
326 /* todo: this is a serious error, we should never have
327 * committed a buffer_to_host operation to the mmal
328 * port without the buffer to back it up (underflow
329 * handling) and there is no obvious way to deal with
330 * this - how is the mmal servie going to react when
331 * we fail to do the xfer and reschedule a buffer when
332 * it arrives? perhaps a starved flag to indicate a
333 * waiting bulk receive?
339 /* ensure we do not overrun the available buffer */
340 if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
341 rd_len = msg_context->u.bulk.buffer->buffer_size;
342 pr_warn("short read as not enough receive buffer space\n");
343 /* todo: is this the correct response, what happens to
344 * the rest of the message data?
349 msg_context->u.bulk.buffer_used = rd_len;
350 msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
351 msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
353 queue_work(msg_context->instance->bulk_wq,
354 &msg_context->u.bulk.buffer_to_host_work);
359 /* data in message, memcpy from packet into output buffer */
360 static int inline_receive(struct vchiq_mmal_instance *instance,
361 struct mmal_msg *msg,
362 struct mmal_msg_context *msg_context)
364 memcpy(msg_context->u.bulk.buffer->buffer,
365 msg->u.buffer_from_host.short_data,
366 msg->u.buffer_from_host.payload_in_message);
368 msg_context->u.bulk.buffer_used =
369 msg->u.buffer_from_host.payload_in_message;
374 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
376 buffer_from_host(struct vchiq_mmal_instance *instance,
377 struct vchiq_mmal_port *port, struct mmal_buffer *buf)
379 struct mmal_msg_context *msg_context;
386 pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
389 if (!buf->msg_context) {
390 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
394 msg_context = buf->msg_context;
396 /* store bulk message context for when data arrives */
397 msg_context->u.bulk.instance = instance;
398 msg_context->u.bulk.port = port;
399 msg_context->u.bulk.buffer = buf;
400 msg_context->u.bulk.buffer_used = 0;
402 /* initialise work structure ready to schedule callback */
403 INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
404 INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
405 buffer_to_host_work_cb);
407 atomic_inc(&port->buffers_with_vpu);
409 /* prep the buffer from host message */
410 memset(&m, 0xbc, sizeof(m)); /* just to make debug clearer */
412 m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
413 m.h.magic = MMAL_MAGIC;
414 m.h.context = msg_context->handle;
417 /* drvbuf is our private data passed back */
418 m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
419 m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
420 m.u.buffer_from_host.drvbuf.port_handle = port->handle;
421 m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
424 m.u.buffer_from_host.buffer_header.cmd = 0;
425 m.u.buffer_from_host.buffer_header.data =
426 (u32)(unsigned long)buf->buffer;
427 m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
428 m.u.buffer_from_host.buffer_header.length = 0; /* nothing used yet */
429 m.u.buffer_from_host.buffer_header.offset = 0; /* no offset */
430 m.u.buffer_from_host.buffer_header.flags = 0; /* no flags */
431 m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
432 m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
434 /* clear buffer type sepecific data */
435 memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
436 sizeof(m.u.buffer_from_host.buffer_header_type_specific));
438 /* no payload in message */
439 m.u.buffer_from_host.payload_in_message = 0;
441 vchi_service_use(instance->service_handle);
443 ret = vchi_queue_kernel_message(instance->service_handle,
445 sizeof(struct mmal_msg_header) +
446 sizeof(m.u.buffer_from_host));
448 vchi_service_release(instance->service_handle);
453 /* deals with receipt of buffer to host message */
454 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
455 struct mmal_msg *msg, u32 msg_len)
457 struct mmal_msg_context *msg_context;
460 pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
461 __func__, instance, msg, msg_len);
463 if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
464 handle = msg->u.buffer_from_host.drvbuf.client_context;
465 msg_context = lookup_msg_context(instance, handle);
468 pr_err("drvbuf.client_context(%u) is invalid\n",
473 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
477 msg_context->u.bulk.mmal_flags =
478 msg->u.buffer_from_host.buffer_header.flags;
480 if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
481 /* message reception had an error */
482 pr_warn("error %d in reply\n", msg->h.status);
484 msg_context->u.bulk.status = msg->h.status;
486 } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
488 if (msg->u.buffer_from_host.buffer_header.flags &
489 MMAL_BUFFER_HEADER_FLAG_EOS) {
490 msg_context->u.bulk.status =
491 bulk_receive(instance, msg, msg_context);
492 if (msg_context->u.bulk.status == 0)
493 return; /* successful bulk submission, bulk
494 * completion will trigger callback
497 /* do callback with empty buffer - not EOS though */
498 msg_context->u.bulk.status = 0;
499 msg_context->u.bulk.buffer_used = 0;
501 } else if (msg->u.buffer_from_host.payload_in_message == 0) {
502 /* data is not in message, queue a bulk receive */
503 msg_context->u.bulk.status =
504 bulk_receive(instance, msg, msg_context);
505 if (msg_context->u.bulk.status == 0)
506 return; /* successful bulk submission, bulk
507 * completion will trigger callback
510 /* failed to submit buffer, this will end badly */
511 pr_err("error %d on bulk submission\n",
512 msg_context->u.bulk.status);
514 } else if (msg->u.buffer_from_host.payload_in_message <=
515 MMAL_VC_SHORT_DATA) {
516 /* data payload within message */
517 msg_context->u.bulk.status = inline_receive(instance, msg,
520 pr_err("message with invalid short payload\n");
523 msg_context->u.bulk.status = -EINVAL;
524 msg_context->u.bulk.buffer_used =
525 msg->u.buffer_from_host.payload_in_message;
528 /* schedule the port callback */
529 schedule_work(&msg_context->u.bulk.work);
532 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
533 struct mmal_msg_context *msg_context)
535 msg_context->u.bulk.status = 0;
537 /* schedule the port callback */
538 schedule_work(&msg_context->u.bulk.work);
541 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
542 struct mmal_msg_context *msg_context)
544 pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
546 msg_context->u.bulk.status = -EINTR;
548 schedule_work(&msg_context->u.bulk.work);
551 /* incoming event service callback */
552 static enum vchiq_status service_callback(enum vchiq_reason reason,
553 struct vchiq_header *header,
554 unsigned handle, void *bulk_ctx)
556 struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle);
558 struct mmal_msg *msg;
559 struct mmal_msg_context *msg_context;
562 pr_err("Message callback passed NULL instance\n");
563 return VCHIQ_SUCCESS;
567 case VCHIQ_MESSAGE_AVAILABLE:
568 msg = (void *)header->data;
569 msg_len = header->size;
571 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
573 /* handling is different for buffer messages */
574 switch (msg->h.type) {
575 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
576 vchiq_release_message(handle, header);
579 case MMAL_MSG_TYPE_EVENT_TO_HOST:
580 event_to_host_cb(instance, msg, msg_len);
581 vchiq_release_message(handle, header);
585 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
586 buffer_to_host_cb(instance, msg, msg_len);
587 vchiq_release_message(handle, header);
591 /* messages dependent on header context to complete */
592 if (!msg->h.context) {
593 pr_err("received message context was null!\n");
594 vchiq_release_message(handle, header);
598 msg_context = lookup_msg_context(instance,
601 pr_err("received invalid message context %u!\n",
603 vchiq_release_message(handle, header);
607 /* fill in context values */
608 msg_context->u.sync.msg_handle = header;
609 msg_context->u.sync.msg = msg;
610 msg_context->u.sync.msg_len = msg_len;
612 /* todo: should this check (completion_done()
613 * == 1) for no one waiting? or do we need a
614 * flag to tell us the completion has been
615 * interrupted so we can free the message and
616 * its context. This probably also solves the
617 * message arriving after interruption todo
621 /* complete message so caller knows it happened */
622 complete(&msg_context->u.sync.cmplt);
628 case VCHIQ_BULK_RECEIVE_DONE:
629 bulk_receive_cb(instance, bulk_ctx);
632 case VCHIQ_BULK_RECEIVE_ABORTED:
633 bulk_abort_cb(instance, bulk_ctx);
636 case VCHIQ_SERVICE_CLOSED:
637 /* TODO: consider if this requires action if received when
638 * driver is not explicitly closing the service
643 pr_err("Received unhandled message reason %d\n", reason);
647 return VCHIQ_SUCCESS;
650 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
651 struct mmal_msg *msg,
652 unsigned int payload_len,
653 struct mmal_msg **msg_out,
654 struct vchiq_header **msg_handle)
656 struct mmal_msg_context *msg_context;
658 unsigned long timeout;
660 /* payload size must not cause message to exceed max size */
662 (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
663 pr_err("payload length %d exceeds max:%d\n", payload_len,
664 (int)(MMAL_MSG_MAX_SIZE -
665 sizeof(struct mmal_msg_header)));
669 msg_context = get_msg_context(instance);
670 if (IS_ERR(msg_context))
671 return PTR_ERR(msg_context);
673 init_completion(&msg_context->u.sync.cmplt);
675 msg->h.magic = MMAL_MAGIC;
676 msg->h.context = msg_context->handle;
679 DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
682 vchi_service_use(instance->service_handle);
684 ret = vchi_queue_kernel_message(instance->service_handle,
686 sizeof(struct mmal_msg_header) +
689 vchi_service_release(instance->service_handle);
692 pr_err("error %d queuing message\n", ret);
693 release_msg_context(msg_context);
697 timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
698 SYNC_MSG_TIMEOUT * HZ);
700 pr_err("timed out waiting for sync completion\n");
702 /* todo: what happens if the message arrives after aborting */
703 release_msg_context(msg_context);
707 *msg_out = msg_context->u.sync.msg;
708 *msg_handle = msg_context->u.sync.msg_handle;
709 release_msg_context(msg_context);
714 static void dump_port_info(struct vchiq_mmal_port *port)
716 pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
718 pr_debug("buffer minimum num:%d size:%d align:%d\n",
719 port->minimum_buffer.num,
720 port->minimum_buffer.size, port->minimum_buffer.alignment);
722 pr_debug("buffer recommended num:%d size:%d align:%d\n",
723 port->recommended_buffer.num,
724 port->recommended_buffer.size,
725 port->recommended_buffer.alignment);
727 pr_debug("buffer current values num:%d size:%d align:%d\n",
728 port->current_buffer.num,
729 port->current_buffer.size, port->current_buffer.alignment);
731 pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
733 port->format.encoding, port->format.encoding_variant);
735 pr_debug(" bitrate:%d flags:0x%x\n",
736 port->format.bitrate, port->format.flags);
738 if (port->format.type == MMAL_ES_TYPE_VIDEO) {
740 ("es video format: width:%d height:%d colourspace:0x%x\n",
741 port->es.video.width, port->es.video.height,
742 port->es.video.color_space);
744 pr_debug(" : crop xywh %d,%d,%d,%d\n",
745 port->es.video.crop.x,
746 port->es.video.crop.y,
747 port->es.video.crop.width, port->es.video.crop.height);
748 pr_debug(" : framerate %d/%d aspect %d/%d\n",
749 port->es.video.frame_rate.num,
750 port->es.video.frame_rate.den,
751 port->es.video.par.num, port->es.video.par.den);
755 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
757 /* todo do readonly fields need setting at all? */
758 p->type = port->type;
759 p->index = port->index;
761 p->is_enabled = port->enabled;
762 p->buffer_num_min = port->minimum_buffer.num;
763 p->buffer_size_min = port->minimum_buffer.size;
764 p->buffer_alignment_min = port->minimum_buffer.alignment;
765 p->buffer_num_recommended = port->recommended_buffer.num;
766 p->buffer_size_recommended = port->recommended_buffer.size;
768 /* only three writable fields in a port */
769 p->buffer_num = port->current_buffer.num;
770 p->buffer_size = port->current_buffer.size;
771 p->userdata = (u32)(unsigned long)port;
774 static int port_info_set(struct vchiq_mmal_instance *instance,
775 struct vchiq_mmal_port *port)
779 struct mmal_msg *rmsg;
780 struct vchiq_header *rmsg_handle;
782 pr_debug("setting port info port %p\n", port);
785 dump_port_info(port);
787 m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
789 m.u.port_info_set.component_handle = port->component->handle;
790 m.u.port_info_set.port_type = port->type;
791 m.u.port_info_set.port_index = port->index;
793 port_to_mmal_msg(port, &m.u.port_info_set.port);
795 /* elementary stream format setup */
796 m.u.port_info_set.format.type = port->format.type;
797 m.u.port_info_set.format.encoding = port->format.encoding;
798 m.u.port_info_set.format.encoding_variant =
799 port->format.encoding_variant;
800 m.u.port_info_set.format.bitrate = port->format.bitrate;
801 m.u.port_info_set.format.flags = port->format.flags;
803 memcpy(&m.u.port_info_set.es, &port->es,
804 sizeof(union mmal_es_specific_format));
806 m.u.port_info_set.format.extradata_size = port->format.extradata_size;
807 memcpy(&m.u.port_info_set.extradata, port->format.extradata,
808 port->format.extradata_size);
810 ret = send_synchronous_mmal_msg(instance, &m,
811 sizeof(m.u.port_info_set),
812 &rmsg, &rmsg_handle);
816 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
817 /* got an unexpected message type in reply */
822 /* return operation status */
823 ret = -rmsg->u.port_info_get_reply.status;
825 pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
826 port->component->handle, port->handle);
829 vchiq_release_message(instance->service_handle, rmsg_handle);
834 /* use port info get message to retrieve port information */
835 static int port_info_get(struct vchiq_mmal_instance *instance,
836 struct vchiq_mmal_port *port)
840 struct mmal_msg *rmsg;
841 struct vchiq_header *rmsg_handle;
844 m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
845 m.u.port_info_get.component_handle = port->component->handle;
846 m.u.port_info_get.port_type = port->type;
847 m.u.port_info_get.index = port->index;
849 ret = send_synchronous_mmal_msg(instance, &m,
850 sizeof(m.u.port_info_get),
851 &rmsg, &rmsg_handle);
855 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
856 /* got an unexpected message type in reply */
861 /* return operation status */
862 ret = -rmsg->u.port_info_get_reply.status;
863 if (ret != MMAL_MSG_STATUS_SUCCESS)
866 if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
871 /* copy the values out of the message */
872 port->handle = rmsg->u.port_info_get_reply.port_handle;
874 /* port type and index cached to use on port info set because
875 * it does not use a port handle
877 port->type = rmsg->u.port_info_get_reply.port_type;
878 port->index = rmsg->u.port_info_get_reply.port_index;
880 port->minimum_buffer.num =
881 rmsg->u.port_info_get_reply.port.buffer_num_min;
882 port->minimum_buffer.size =
883 rmsg->u.port_info_get_reply.port.buffer_size_min;
884 port->minimum_buffer.alignment =
885 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
887 port->recommended_buffer.alignment =
888 rmsg->u.port_info_get_reply.port.buffer_alignment_min;
889 port->recommended_buffer.num =
890 rmsg->u.port_info_get_reply.port.buffer_num_recommended;
892 port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
893 port->current_buffer.size =
894 rmsg->u.port_info_get_reply.port.buffer_size;
897 port->format.type = rmsg->u.port_info_get_reply.format.type;
898 port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
899 port->format.encoding_variant =
900 rmsg->u.port_info_get_reply.format.encoding_variant;
901 port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
902 port->format.flags = rmsg->u.port_info_get_reply.format.flags;
904 /* elementary stream format */
906 &rmsg->u.port_info_get_reply.es,
907 sizeof(union mmal_es_specific_format));
908 port->format.es = &port->es;
910 port->format.extradata_size =
911 rmsg->u.port_info_get_reply.format.extradata_size;
912 memcpy(port->format.extradata,
913 rmsg->u.port_info_get_reply.extradata,
914 port->format.extradata_size);
916 pr_debug("received port info\n");
917 dump_port_info(port);
921 pr_debug("%s:result:%d component:0x%x port:%d\n",
922 __func__, ret, port->component->handle, port->handle);
924 vchiq_release_message(instance->service_handle, rmsg_handle);
929 /* create comonent on vc */
930 static int create_component(struct vchiq_mmal_instance *instance,
931 struct vchiq_mmal_component *component,
936 struct mmal_msg *rmsg;
937 struct vchiq_header *rmsg_handle;
939 /* build component create message */
940 m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
941 m.u.component_create.client_component = component->client_component;
942 strncpy(m.u.component_create.name, name,
943 sizeof(m.u.component_create.name));
945 ret = send_synchronous_mmal_msg(instance, &m,
946 sizeof(m.u.component_create),
947 &rmsg, &rmsg_handle);
951 if (rmsg->h.type != m.h.type) {
952 /* got an unexpected message type in reply */
957 ret = -rmsg->u.component_create_reply.status;
958 if (ret != MMAL_MSG_STATUS_SUCCESS)
961 /* a valid component response received */
962 component->handle = rmsg->u.component_create_reply.component_handle;
963 component->inputs = rmsg->u.component_create_reply.input_num;
964 component->outputs = rmsg->u.component_create_reply.output_num;
965 component->clocks = rmsg->u.component_create_reply.clock_num;
967 pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
969 component->inputs, component->outputs, component->clocks);
972 vchiq_release_message(instance->service_handle, rmsg_handle);
977 /* destroys a component on vc */
978 static int destroy_component(struct vchiq_mmal_instance *instance,
979 struct vchiq_mmal_component *component)
983 struct mmal_msg *rmsg;
984 struct vchiq_header *rmsg_handle;
986 m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
987 m.u.component_destroy.component_handle = component->handle;
989 ret = send_synchronous_mmal_msg(instance, &m,
990 sizeof(m.u.component_destroy),
991 &rmsg, &rmsg_handle);
995 if (rmsg->h.type != m.h.type) {
996 /* got an unexpected message type in reply */
1001 ret = -rmsg->u.component_destroy_reply.status;
1005 vchiq_release_message(instance->service_handle, rmsg_handle);
1010 /* enable a component on vc */
1011 static int enable_component(struct vchiq_mmal_instance *instance,
1012 struct vchiq_mmal_component *component)
1016 struct mmal_msg *rmsg;
1017 struct vchiq_header *rmsg_handle;
1019 m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1020 m.u.component_enable.component_handle = component->handle;
1022 ret = send_synchronous_mmal_msg(instance, &m,
1023 sizeof(m.u.component_enable),
1024 &rmsg, &rmsg_handle);
1028 if (rmsg->h.type != m.h.type) {
1029 /* got an unexpected message type in reply */
1034 ret = -rmsg->u.component_enable_reply.status;
1037 vchiq_release_message(instance->service_handle, rmsg_handle);
1042 /* disable a component on vc */
1043 static int disable_component(struct vchiq_mmal_instance *instance,
1044 struct vchiq_mmal_component *component)
1048 struct mmal_msg *rmsg;
1049 struct vchiq_header *rmsg_handle;
1051 m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1052 m.u.component_disable.component_handle = component->handle;
1054 ret = send_synchronous_mmal_msg(instance, &m,
1055 sizeof(m.u.component_disable),
1056 &rmsg, &rmsg_handle);
1060 if (rmsg->h.type != m.h.type) {
1061 /* got an unexpected message type in reply */
1066 ret = -rmsg->u.component_disable_reply.status;
1070 vchiq_release_message(instance->service_handle, rmsg_handle);
1075 /* get version of mmal implementation */
1076 static int get_version(struct vchiq_mmal_instance *instance,
1077 u32 *major_out, u32 *minor_out)
1081 struct mmal_msg *rmsg;
1082 struct vchiq_header *rmsg_handle;
1084 m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1086 ret = send_synchronous_mmal_msg(instance, &m,
1087 sizeof(m.u.version),
1088 &rmsg, &rmsg_handle);
1092 if (rmsg->h.type != m.h.type) {
1093 /* got an unexpected message type in reply */
1098 *major_out = rmsg->u.version.major;
1099 *minor_out = rmsg->u.version.minor;
1102 vchiq_release_message(instance->service_handle, rmsg_handle);
1107 /* do a port action with a port as a parameter */
1108 static int port_action_port(struct vchiq_mmal_instance *instance,
1109 struct vchiq_mmal_port *port,
1110 enum mmal_msg_port_action_type action_type)
1114 struct mmal_msg *rmsg;
1115 struct vchiq_header *rmsg_handle;
1117 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1118 m.u.port_action_port.component_handle = port->component->handle;
1119 m.u.port_action_port.port_handle = port->handle;
1120 m.u.port_action_port.action = action_type;
1122 port_to_mmal_msg(port, &m.u.port_action_port.port);
1124 ret = send_synchronous_mmal_msg(instance, &m,
1125 sizeof(m.u.port_action_port),
1126 &rmsg, &rmsg_handle);
1130 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1131 /* got an unexpected message type in reply */
1136 ret = -rmsg->u.port_action_reply.status;
1138 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1140 ret, port->component->handle, port->handle,
1141 port_action_type_names[action_type], action_type);
1144 vchiq_release_message(instance->service_handle, rmsg_handle);
1149 /* do a port action with handles as parameters */
1150 static int port_action_handle(struct vchiq_mmal_instance *instance,
1151 struct vchiq_mmal_port *port,
1152 enum mmal_msg_port_action_type action_type,
1153 u32 connect_component_handle,
1154 u32 connect_port_handle)
1158 struct mmal_msg *rmsg;
1159 struct vchiq_header *rmsg_handle;
1161 m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1163 m.u.port_action_handle.component_handle = port->component->handle;
1164 m.u.port_action_handle.port_handle = port->handle;
1165 m.u.port_action_handle.action = action_type;
1167 m.u.port_action_handle.connect_component_handle =
1168 connect_component_handle;
1169 m.u.port_action_handle.connect_port_handle = connect_port_handle;
1171 ret = send_synchronous_mmal_msg(instance, &m,
1172 sizeof(m.u.port_action_handle),
1173 &rmsg, &rmsg_handle);
1177 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1178 /* got an unexpected message type in reply */
1183 ret = -rmsg->u.port_action_reply.status;
1185 pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1187 ret, port->component->handle, port->handle,
1188 port_action_type_names[action_type],
1189 action_type, connect_component_handle, connect_port_handle);
1192 vchiq_release_message(instance->service_handle, rmsg_handle);
1197 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1198 struct vchiq_mmal_port *port,
1199 u32 parameter_id, void *value, u32 value_size)
1203 struct mmal_msg *rmsg;
1204 struct vchiq_header *rmsg_handle;
1206 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1208 m.u.port_parameter_set.component_handle = port->component->handle;
1209 m.u.port_parameter_set.port_handle = port->handle;
1210 m.u.port_parameter_set.id = parameter_id;
1211 m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1212 memcpy(&m.u.port_parameter_set.value, value, value_size);
1214 ret = send_synchronous_mmal_msg(instance, &m,
1215 (4 * sizeof(u32)) + value_size,
1216 &rmsg, &rmsg_handle);
1220 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1221 /* got an unexpected message type in reply */
1226 ret = -rmsg->u.port_parameter_set_reply.status;
1228 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1230 ret, port->component->handle, port->handle, parameter_id);
1233 vchiq_release_message(instance->service_handle, rmsg_handle);
1238 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1239 struct vchiq_mmal_port *port,
1240 u32 parameter_id, void *value, u32 *value_size)
1244 struct mmal_msg *rmsg;
1245 struct vchiq_header *rmsg_handle;
1247 m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1249 m.u.port_parameter_get.component_handle = port->component->handle;
1250 m.u.port_parameter_get.port_handle = port->handle;
1251 m.u.port_parameter_get.id = parameter_id;
1252 m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1254 ret = send_synchronous_mmal_msg(instance, &m,
1256 mmal_msg_port_parameter_get),
1257 &rmsg, &rmsg_handle);
1261 if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1262 /* got an unexpected message type in reply */
1263 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1268 ret = rmsg->u.port_parameter_get_reply.status;
1270 /* port_parameter_get_reply.size includes the header,
1271 * whilst *value_size doesn't.
1273 rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1275 if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1276 /* Copy only as much as we have space for
1277 * but report true size of parameter
1279 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1282 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1283 rmsg->u.port_parameter_get_reply.size);
1285 /* Always report the size of the returned parameter to the caller */
1286 *value_size = rmsg->u.port_parameter_get_reply.size;
1288 pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1289 ret, port->component->handle, port->handle, parameter_id);
1292 vchiq_release_message(instance->service_handle, rmsg_handle);
1297 /* disables a port and drains buffers from it */
1298 static int port_disable(struct vchiq_mmal_instance *instance,
1299 struct vchiq_mmal_port *port)
1302 struct list_head *q, *buf_head;
1303 unsigned long flags = 0;
1310 ret = port_action_port(instance, port,
1311 MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1314 * Drain all queued buffers on port. This should only
1315 * apply to buffers that have been queued before the port
1316 * has been enabled. If the port has been enabled and buffers
1317 * passed, then the buffers should have been removed from this
1318 * list, and we should get the relevant callbacks via VCHIQ
1319 * to release the buffers.
1321 spin_lock_irqsave(&port->slock, flags);
1323 list_for_each_safe(buf_head, q, &port->buffers) {
1324 struct mmal_buffer *mmalbuf;
1326 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1329 if (port->buffer_cb) {
1330 mmalbuf->length = 0;
1331 mmalbuf->mmal_flags = 0;
1332 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1333 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1334 port->buffer_cb(instance,
1339 spin_unlock_irqrestore(&port->slock, flags);
1341 ret = port_info_get(instance, port);
1348 static int port_enable(struct vchiq_mmal_instance *instance,
1349 struct vchiq_mmal_port *port)
1351 unsigned int hdr_count;
1352 struct list_head *q, *buf_head;
1358 ret = port_action_port(instance, port,
1359 MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1365 if (port->buffer_cb) {
1366 /* send buffer headers to videocore */
1368 list_for_each_safe(buf_head, q, &port->buffers) {
1369 struct mmal_buffer *mmalbuf;
1371 mmalbuf = list_entry(buf_head, struct mmal_buffer,
1373 ret = buffer_from_host(instance, port, mmalbuf);
1379 if (hdr_count > port->current_buffer.num)
1384 ret = port_info_get(instance, port);
1390 /* ------------------------------------------------------------------
1392 *------------------------------------------------------------------
1395 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1396 struct vchiq_mmal_port *port)
1400 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1403 ret = port_info_set(instance, port);
1405 goto release_unlock;
1407 /* read what has actually been set */
1408 ret = port_info_get(instance, port);
1411 mutex_unlock(&instance->vchiq_mutex);
1415 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1417 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1418 struct vchiq_mmal_port *port,
1419 u32 parameter, void *value, u32 value_size)
1423 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1426 ret = port_parameter_set(instance, port, parameter, value, value_size);
1428 mutex_unlock(&instance->vchiq_mutex);
1432 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1434 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1435 struct vchiq_mmal_port *port,
1436 u32 parameter, void *value, u32 *value_size)
1440 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1443 ret = port_parameter_get(instance, port, parameter, value, value_size);
1445 mutex_unlock(&instance->vchiq_mutex);
1449 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1453 * enables a port and queues buffers for satisfying callbacks if we
1454 * provide a callback handler
1456 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1457 struct vchiq_mmal_port *port,
1458 vchiq_mmal_buffer_cb buffer_cb)
1462 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1465 /* already enabled - noop */
1466 if (port->enabled) {
1471 port->buffer_cb = buffer_cb;
1473 ret = port_enable(instance, port);
1476 mutex_unlock(&instance->vchiq_mutex);
1480 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1482 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1483 struct vchiq_mmal_port *port)
1487 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1490 if (!port->enabled) {
1491 mutex_unlock(&instance->vchiq_mutex);
1495 ret = port_disable(instance, port);
1497 mutex_unlock(&instance->vchiq_mutex);
1501 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1503 /* ports will be connected in a tunneled manner so data buffers
1504 * are not handled by client.
1506 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1507 struct vchiq_mmal_port *src,
1508 struct vchiq_mmal_port *dst)
1512 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1515 /* disconnect ports if connected */
1516 if (src->connected) {
1517 ret = port_disable(instance, src);
1519 pr_err("failed disabling src port(%d)\n", ret);
1520 goto release_unlock;
1523 /* do not need to disable the destination port as they
1524 * are connected and it is done automatically
1527 ret = port_action_handle(instance, src,
1528 MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1529 src->connected->component->handle,
1530 src->connected->handle);
1532 pr_err("failed disconnecting src port\n");
1533 goto release_unlock;
1535 src->connected->enabled = 0;
1536 src->connected = NULL;
1540 /* do not make new connection */
1542 pr_debug("not making new connection\n");
1543 goto release_unlock;
1546 /* copy src port format to dst */
1547 dst->format.encoding = src->format.encoding;
1548 dst->es.video.width = src->es.video.width;
1549 dst->es.video.height = src->es.video.height;
1550 dst->es.video.crop.x = src->es.video.crop.x;
1551 dst->es.video.crop.y = src->es.video.crop.y;
1552 dst->es.video.crop.width = src->es.video.crop.width;
1553 dst->es.video.crop.height = src->es.video.crop.height;
1554 dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1555 dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1557 /* set new format */
1558 ret = port_info_set(instance, dst);
1560 pr_debug("setting port info failed\n");
1561 goto release_unlock;
1564 /* read what has actually been set */
1565 ret = port_info_get(instance, dst);
1567 pr_debug("read back port info failed\n");
1568 goto release_unlock;
1571 /* connect two ports together */
1572 ret = port_action_handle(instance, src,
1573 MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1574 dst->component->handle, dst->handle);
1576 pr_debug("connecting port %d:%d to %d:%d failed\n",
1577 src->component->handle, src->handle,
1578 dst->component->handle, dst->handle);
1579 goto release_unlock;
1581 src->connected = dst;
1585 mutex_unlock(&instance->vchiq_mutex);
1589 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1591 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1592 struct vchiq_mmal_port *port,
1593 struct mmal_buffer *buffer)
1595 unsigned long flags = 0;
1598 ret = buffer_from_host(instance, port, buffer);
1599 if (ret == -EINVAL) {
1600 /* Port is disabled. Queue for when it is enabled. */
1601 spin_lock_irqsave(&port->slock, flags);
1602 list_add_tail(&buffer->list, &port->buffers);
1603 spin_unlock_irqrestore(&port->slock, flags);
1608 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1610 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1611 struct mmal_buffer *buf)
1613 struct mmal_msg_context *msg_context = get_msg_context(instance);
1615 if (IS_ERR(msg_context))
1616 return (PTR_ERR(msg_context));
1618 buf->msg_context = msg_context;
1621 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1623 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1625 struct mmal_msg_context *msg_context = buf->msg_context;
1628 release_msg_context(msg_context);
1629 buf->msg_context = NULL;
1633 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1635 /* Initialise a mmal component and its ports
1638 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1640 struct vchiq_mmal_component **component_out)
1643 int idx; /* port index */
1644 struct vchiq_mmal_component *component = NULL;
1646 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1649 for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1650 if (!instance->component[idx].in_use) {
1651 component = &instance->component[idx];
1652 component->in_use = 1;
1658 ret = -EINVAL; /* todo is this correct error? */
1662 /* We need a handle to reference back to our component structure.
1663 * Use the array index in instance->component rather than rolling
1666 component->client_component = idx;
1668 ret = create_component(instance, component, name);
1670 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1675 /* ports info needs gathering */
1676 component->control.type = MMAL_PORT_TYPE_CONTROL;
1677 component->control.index = 0;
1678 component->control.component = component;
1679 spin_lock_init(&component->control.slock);
1680 INIT_LIST_HEAD(&component->control.buffers);
1681 ret = port_info_get(instance, &component->control);
1683 goto release_component;
1685 for (idx = 0; idx < component->inputs; idx++) {
1686 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1687 component->input[idx].index = idx;
1688 component->input[idx].component = component;
1689 spin_lock_init(&component->input[idx].slock);
1690 INIT_LIST_HEAD(&component->input[idx].buffers);
1691 ret = port_info_get(instance, &component->input[idx]);
1693 goto release_component;
1696 for (idx = 0; idx < component->outputs; idx++) {
1697 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1698 component->output[idx].index = idx;
1699 component->output[idx].component = component;
1700 spin_lock_init(&component->output[idx].slock);
1701 INIT_LIST_HEAD(&component->output[idx].buffers);
1702 ret = port_info_get(instance, &component->output[idx]);
1704 goto release_component;
1707 for (idx = 0; idx < component->clocks; idx++) {
1708 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1709 component->clock[idx].index = idx;
1710 component->clock[idx].component = component;
1711 spin_lock_init(&component->clock[idx].slock);
1712 INIT_LIST_HEAD(&component->clock[idx].buffers);
1713 ret = port_info_get(instance, &component->clock[idx]);
1715 goto release_component;
1718 *component_out = component;
1720 mutex_unlock(&instance->vchiq_mutex);
1725 destroy_component(instance, component);
1728 component->in_use = 0;
1729 mutex_unlock(&instance->vchiq_mutex);
1733 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1736 * cause a mmal component to be destroyed
1738 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1739 struct vchiq_mmal_component *component)
1743 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1746 if (component->enabled)
1747 ret = disable_component(instance, component);
1749 ret = destroy_component(instance, component);
1751 component->in_use = 0;
1753 mutex_unlock(&instance->vchiq_mutex);
1757 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1760 * cause a mmal component to be enabled
1762 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1763 struct vchiq_mmal_component *component)
1767 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1770 if (component->enabled) {
1771 mutex_unlock(&instance->vchiq_mutex);
1775 ret = enable_component(instance, component);
1777 component->enabled = true;
1779 mutex_unlock(&instance->vchiq_mutex);
1783 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1786 * cause a mmal component to be enabled
1788 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1789 struct vchiq_mmal_component *component)
1793 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1796 if (!component->enabled) {
1797 mutex_unlock(&instance->vchiq_mutex);
1801 ret = disable_component(instance, component);
1803 component->enabled = 0;
1805 mutex_unlock(&instance->vchiq_mutex);
1809 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1811 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1812 u32 *major_out, u32 *minor_out)
1816 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1819 ret = get_version(instance, major_out, minor_out);
1821 mutex_unlock(&instance->vchiq_mutex);
1825 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1827 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1834 if (mutex_lock_interruptible(&instance->vchiq_mutex))
1837 vchi_service_use(instance->service_handle);
1839 status = vchi_service_close(instance->service_handle);
1841 pr_err("mmal-vchiq: VCHIQ close failed\n");
1843 mutex_unlock(&instance->vchiq_mutex);
1845 flush_workqueue(instance->bulk_wq);
1846 destroy_workqueue(instance->bulk_wq);
1848 vfree(instance->bulk_scratch);
1850 idr_destroy(&instance->context_map);
1856 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1858 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1861 struct vchiq_mmal_instance *instance;
1862 static struct vchiq_instance *vchiq_instance;
1863 struct vchiq_service_params params = {
1864 .version = VC_MMAL_VER,
1865 .version_min = VC_MMAL_MIN_VER,
1866 .fourcc = VC_MMAL_SERVER_NAME,
1867 .callback = service_callback,
1871 /* compile time checks to ensure structure size as they are
1872 * directly (de)serialised from memory.
1875 /* ensure the header structure has packed to the correct size */
1876 BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1878 /* ensure message structure does not exceed maximum length */
1879 BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1881 /* mmal port struct is correct size */
1882 BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1884 /* create a vchi instance */
1885 status = vchi_initialise(&vchiq_instance);
1887 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1892 status = vchi_connect(vchiq_instance);
1894 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1898 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1903 mutex_init(&instance->vchiq_mutex);
1905 instance->bulk_scratch = vmalloc(PAGE_SIZE);
1907 mutex_init(&instance->context_map_lock);
1908 idr_init_base(&instance->context_map, 1);
1910 params.userdata = instance;
1912 instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1914 if (!instance->bulk_wq)
1917 status = vchi_service_open(vchiq_instance, ¶ms,
1918 &instance->service_handle);
1920 pr_err("Failed to open VCHI service connection (status=%d)\n",
1922 goto err_close_services;
1925 vchi_service_release(instance->service_handle);
1927 *out_instance = instance;
1932 vchi_service_close(instance->service_handle);
1933 destroy_workqueue(instance->bulk_wq);
1935 vfree(instance->bulk_scratch);
1939 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1941 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1942 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1943 MODULE_LICENSE("GPL");