staging: vchi: Unify struct shim_service and struct vchi_service_handle
[linux-2.6-microblaze.git] / drivers / staging / vc04_services / vchiq-mmal / mmal-vchiq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *              (now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27
28 #include "mmal-common.h"
29 #include "mmal-vchiq.h"
30 #include "mmal-msg.h"
31
32 #define USE_VCHIQ_ARM
33 #include "interface/vchi/vchi.h"
34
35 /*
36  * maximum number of components supported.
37  * This matches the maximum permitted by default on the VPU
38  */
39 #define VCHIQ_MMAL_MAX_COMPONENTS 64
40
41 /*
42  * Timeout for synchronous msg responses in seconds.
43  * Helpful to increase this if stopping in the VPU debugger.
44  */
45 #define SYNC_MSG_TIMEOUT       3
46
47 /*#define FULL_MSG_DUMP 1*/
48
49 #ifdef DEBUG
50 static const char *const msg_type_names[] = {
51         "UNKNOWN",
52         "QUIT",
53         "SERVICE_CLOSED",
54         "GET_VERSION",
55         "COMPONENT_CREATE",
56         "COMPONENT_DESTROY",
57         "COMPONENT_ENABLE",
58         "COMPONENT_DISABLE",
59         "PORT_INFO_GET",
60         "PORT_INFO_SET",
61         "PORT_ACTION",
62         "BUFFER_FROM_HOST",
63         "BUFFER_TO_HOST",
64         "GET_STATS",
65         "PORT_PARAMETER_SET",
66         "PORT_PARAMETER_GET",
67         "EVENT_TO_HOST",
68         "GET_CORE_STATS_FOR_PORT",
69         "OPAQUE_ALLOCATOR",
70         "CONSUME_MEM",
71         "LMK",
72         "OPAQUE_ALLOCATOR_DESC",
73         "DRM_GET_LHS32",
74         "DRM_GET_TIME",
75         "BUFFER_FROM_HOST_ZEROLEN",
76         "PORT_FLUSH",
77         "HOST_LOG",
78 };
79 #endif
80
81 static const char *const port_action_type_names[] = {
82         "UNKNOWN",
83         "ENABLE",
84         "DISABLE",
85         "FLUSH",
86         "CONNECT",
87         "DISCONNECT",
88         "SET_REQUIREMENTS",
89 };
90
91 #if defined(DEBUG)
92 #if defined(FULL_MSG_DUMP)
93 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
94         do {                                                            \
95                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
96                          msg_type_names[(MSG)->h.type],                 \
97                          (MSG)->h.type, (MSG_LEN));                     \
98                 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
99                                16, 4, (MSG),                            \
100                                sizeof(struct mmal_msg_header), 1);      \
101                 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
102                                16, 4,                                   \
103                                ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
104                                (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
105         } while (0)
106 #else
107 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
108         {                                                               \
109                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
110                          msg_type_names[(MSG)->h.type],                 \
111                          (MSG)->h.type, (MSG_LEN));                     \
112         }
113 #endif
114 #else
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
116 #endif
117
118 struct vchiq_mmal_instance;
119
120 /* normal message context */
121 struct mmal_msg_context {
122         struct vchiq_mmal_instance *instance;
123
124         /* Index in the context_map idr so that we can find the
125          * mmal_msg_context again when servicing the VCHI reply.
126          */
127         int handle;
128
129         union {
130                 struct {
131                         /* work struct for buffer_cb callback */
132                         struct work_struct work;
133                         /* work struct for deferred callback */
134                         struct work_struct buffer_to_host_work;
135                         /* mmal instance */
136                         struct vchiq_mmal_instance *instance;
137                         /* mmal port */
138                         struct vchiq_mmal_port *port;
139                         /* actual buffer used to store bulk reply */
140                         struct mmal_buffer *buffer;
141                         /* amount of buffer used */
142                         unsigned long buffer_used;
143                         /* MMAL buffer flags */
144                         u32 mmal_flags;
145                         /* Presentation and Decode timestamps */
146                         s64 pts;
147                         s64 dts;
148
149                         int status;     /* context status */
150
151                 } bulk;         /* bulk data */
152
153                 struct {
154                         /* message handle to release */
155                         struct vchi_held_msg msg_handle;
156                         /* pointer to received message */
157                         struct mmal_msg *msg;
158                         /* received message length */
159                         u32 msg_len;
160                         /* completion upon reply */
161                         struct completion cmplt;
162                 } sync;         /* synchronous response */
163         } u;
164
165 };
166
167 struct vchiq_mmal_instance {
168         struct vchi_service *service;
169
170         /* ensure serialised access to service */
171         struct mutex vchiq_mutex;
172
173         /* vmalloc page to receive scratch bulk xfers into */
174         void *bulk_scratch;
175
176         struct idr context_map;
177         /* protect accesses to context_map */
178         struct mutex context_map_lock;
179
180         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
181
182         /* ordered workqueue to process all bulk operations */
183         struct workqueue_struct *bulk_wq;
184 };
185
186 static struct mmal_msg_context *
187 get_msg_context(struct vchiq_mmal_instance *instance)
188 {
189         struct mmal_msg_context *msg_context;
190         int handle;
191
192         /* todo: should this be allocated from a pool to avoid kzalloc */
193         msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
194
195         if (!msg_context)
196                 return ERR_PTR(-ENOMEM);
197
198         /* Create an ID that will be passed along with our message so
199          * that when we service the VCHI reply, we can look up what
200          * message is being replied to.
201          */
202         mutex_lock(&instance->context_map_lock);
203         handle = idr_alloc(&instance->context_map, msg_context,
204                            0, 0, GFP_KERNEL);
205         mutex_unlock(&instance->context_map_lock);
206
207         if (handle < 0) {
208                 kfree(msg_context);
209                 return ERR_PTR(handle);
210         }
211
212         msg_context->instance = instance;
213         msg_context->handle = handle;
214
215         return msg_context;
216 }
217
218 static struct mmal_msg_context *
219 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
220 {
221         return idr_find(&instance->context_map, handle);
222 }
223
224 static void
225 release_msg_context(struct mmal_msg_context *msg_context)
226 {
227         struct vchiq_mmal_instance *instance = msg_context->instance;
228
229         mutex_lock(&instance->context_map_lock);
230         idr_remove(&instance->context_map, msg_context->handle);
231         mutex_unlock(&instance->context_map_lock);
232         kfree(msg_context);
233 }
234
235 /* deals with receipt of event to host message */
236 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
237                              struct mmal_msg *msg, u32 msg_len)
238 {
239         pr_debug("unhandled event\n");
240         pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
241                  msg->u.event_to_host.client_component,
242                  msg->u.event_to_host.port_type,
243                  msg->u.event_to_host.port_num,
244                  msg->u.event_to_host.cmd, msg->u.event_to_host.length);
245 }
246
247 /* workqueue scheduled callback
248  *
249  * we do this because it is important we do not call any other vchiq
250  * sync calls from witin the message delivery thread
251  */
252 static void buffer_work_cb(struct work_struct *work)
253 {
254         struct mmal_msg_context *msg_context =
255                 container_of(work, struct mmal_msg_context, u.bulk.work);
256         struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
257
258         if (!buffer) {
259                 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
260                        __func__, msg_context);
261                 return;
262         }
263
264         buffer->length = msg_context->u.bulk.buffer_used;
265         buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
266         buffer->dts = msg_context->u.bulk.dts;
267         buffer->pts = msg_context->u.bulk.pts;
268
269         atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
270
271         msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272                                             msg_context->u.bulk.port,
273                                             msg_context->u.bulk.status,
274                                             msg_context->u.bulk.buffer);
275 }
276
277 /* workqueue scheduled callback to handle receiving buffers
278  *
279  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
280  * If we block in the service_callback context then we can't process the
281  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
282  * vchi_bulk_queue_receive() call to complete.
283  */
284 static void buffer_to_host_work_cb(struct work_struct *work)
285 {
286         struct mmal_msg_context *msg_context =
287                 container_of(work, struct mmal_msg_context,
288                              u.bulk.buffer_to_host_work);
289         struct vchiq_mmal_instance *instance = msg_context->instance;
290         unsigned long len = msg_context->u.bulk.buffer_used;
291         int ret;
292
293         if (!len)
294                 /* Dummy receive to ensure the buffers remain in order */
295                 len = 8;
296         /* queue the bulk submission */
297         vchi_service_use(instance->service);
298         ret = vchi_bulk_queue_receive(instance->service,
299                                       msg_context->u.bulk.buffer->buffer,
300                                       /* Actual receive needs to be a multiple
301                                        * of 4 bytes
302                                        */
303                                       (len + 3) & ~3,
304                                       VCHI_FLAGS_CALLBACK_WHEN_OP_COMPLETE |
305                                       VCHI_FLAGS_BLOCK_UNTIL_QUEUED,
306                                       msg_context);
307
308         vchi_service_release(instance->service);
309
310         if (ret != 0)
311                 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
312                        __func__, msg_context, ret);
313 }
314
315 /* enqueue a bulk receive for a given message context */
316 static int bulk_receive(struct vchiq_mmal_instance *instance,
317                         struct mmal_msg *msg,
318                         struct mmal_msg_context *msg_context)
319 {
320         unsigned long rd_len;
321
322         rd_len = msg->u.buffer_from_host.buffer_header.length;
323
324         if (!msg_context->u.bulk.buffer) {
325                 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
326
327                 /* todo: this is a serious error, we should never have
328                  * committed a buffer_to_host operation to the mmal
329                  * port without the buffer to back it up (underflow
330                  * handling) and there is no obvious way to deal with
331                  * this - how is the mmal servie going to react when
332                  * we fail to do the xfer and reschedule a buffer when
333                  * it arrives? perhaps a starved flag to indicate a
334                  * waiting bulk receive?
335                  */
336
337                 return -EINVAL;
338         }
339
340         /* ensure we do not overrun the available buffer */
341         if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
342                 rd_len = msg_context->u.bulk.buffer->buffer_size;
343                 pr_warn("short read as not enough receive buffer space\n");
344                 /* todo: is this the correct response, what happens to
345                  * the rest of the message data?
346                  */
347         }
348
349         /* store length */
350         msg_context->u.bulk.buffer_used = rd_len;
351         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
352         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
353
354         queue_work(msg_context->instance->bulk_wq,
355                    &msg_context->u.bulk.buffer_to_host_work);
356
357         return 0;
358 }
359
360 /* data in message, memcpy from packet into output buffer */
361 static int inline_receive(struct vchiq_mmal_instance *instance,
362                           struct mmal_msg *msg,
363                           struct mmal_msg_context *msg_context)
364 {
365         memcpy(msg_context->u.bulk.buffer->buffer,
366                msg->u.buffer_from_host.short_data,
367                msg->u.buffer_from_host.payload_in_message);
368
369         msg_context->u.bulk.buffer_used =
370             msg->u.buffer_from_host.payload_in_message;
371
372         return 0;
373 }
374
375 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
376 static int
377 buffer_from_host(struct vchiq_mmal_instance *instance,
378                  struct vchiq_mmal_port *port, struct mmal_buffer *buf)
379 {
380         struct mmal_msg_context *msg_context;
381         struct mmal_msg m;
382         int ret;
383
384         if (!port->enabled)
385                 return -EINVAL;
386
387         pr_debug("instance:%p buffer:%p\n", instance->service, buf);
388
389         /* get context */
390         if (!buf->msg_context) {
391                 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
392                        buf);
393                 return -EINVAL;
394         }
395         msg_context = buf->msg_context;
396
397         /* store bulk message context for when data arrives */
398         msg_context->u.bulk.instance = instance;
399         msg_context->u.bulk.port = port;
400         msg_context->u.bulk.buffer = buf;
401         msg_context->u.bulk.buffer_used = 0;
402
403         /* initialise work structure ready to schedule callback */
404         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
405         INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
406                   buffer_to_host_work_cb);
407
408         atomic_inc(&port->buffers_with_vpu);
409
410         /* prep the buffer from host message */
411         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
412
413         m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
414         m.h.magic = MMAL_MAGIC;
415         m.h.context = msg_context->handle;
416         m.h.status = 0;
417
418         /* drvbuf is our private data passed back */
419         m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
420         m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
421         m.u.buffer_from_host.drvbuf.port_handle = port->handle;
422         m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
423
424         /* buffer header */
425         m.u.buffer_from_host.buffer_header.cmd = 0;
426         m.u.buffer_from_host.buffer_header.data =
427                 (u32)(unsigned long)buf->buffer;
428         m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
429         m.u.buffer_from_host.buffer_header.length = 0;  /* nothing used yet */
430         m.u.buffer_from_host.buffer_header.offset = 0;  /* no offset */
431         m.u.buffer_from_host.buffer_header.flags = 0;   /* no flags */
432         m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
433         m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
434
435         /* clear buffer type sepecific data */
436         memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
437                sizeof(m.u.buffer_from_host.buffer_header_type_specific));
438
439         /* no payload in message */
440         m.u.buffer_from_host.payload_in_message = 0;
441
442         vchi_service_use(instance->service);
443
444         ret = vchi_queue_kernel_message(instance->service,
445                                         &m,
446                                         sizeof(struct mmal_msg_header) +
447                                         sizeof(m.u.buffer_from_host));
448
449         vchi_service_release(instance->service);
450
451         return ret;
452 }
453
454 /* deals with receipt of buffer to host message */
455 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
456                               struct mmal_msg *msg, u32 msg_len)
457 {
458         struct mmal_msg_context *msg_context;
459         u32 handle;
460
461         pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
462                  __func__, instance, msg, msg_len);
463
464         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
465                 handle = msg->u.buffer_from_host.drvbuf.client_context;
466                 msg_context = lookup_msg_context(instance, handle);
467
468                 if (!msg_context) {
469                         pr_err("drvbuf.client_context(%u) is invalid\n",
470                                handle);
471                         return;
472                 }
473         } else {
474                 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
475                 return;
476         }
477
478         msg_context->u.bulk.mmal_flags =
479                                 msg->u.buffer_from_host.buffer_header.flags;
480
481         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
482                 /* message reception had an error */
483                 pr_warn("error %d in reply\n", msg->h.status);
484
485                 msg_context->u.bulk.status = msg->h.status;
486
487         } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
488                 /* empty buffer */
489                 if (msg->u.buffer_from_host.buffer_header.flags &
490                     MMAL_BUFFER_HEADER_FLAG_EOS) {
491                         msg_context->u.bulk.status =
492                             bulk_receive(instance, msg, msg_context);
493                         if (msg_context->u.bulk.status == 0)
494                                 return; /* successful bulk submission, bulk
495                                          * completion will trigger callback
496                                          */
497                 } else {
498                         /* do callback with empty buffer - not EOS though */
499                         msg_context->u.bulk.status = 0;
500                         msg_context->u.bulk.buffer_used = 0;
501                 }
502         } else if (msg->u.buffer_from_host.payload_in_message == 0) {
503                 /* data is not in message, queue a bulk receive */
504                 msg_context->u.bulk.status =
505                     bulk_receive(instance, msg, msg_context);
506                 if (msg_context->u.bulk.status == 0)
507                         return; /* successful bulk submission, bulk
508                                  * completion will trigger callback
509                                  */
510
511                 /* failed to submit buffer, this will end badly */
512                 pr_err("error %d on bulk submission\n",
513                        msg_context->u.bulk.status);
514
515         } else if (msg->u.buffer_from_host.payload_in_message <=
516                    MMAL_VC_SHORT_DATA) {
517                 /* data payload within message */
518                 msg_context->u.bulk.status = inline_receive(instance, msg,
519                                                             msg_context);
520         } else {
521                 pr_err("message with invalid short payload\n");
522
523                 /* signal error */
524                 msg_context->u.bulk.status = -EINVAL;
525                 msg_context->u.bulk.buffer_used =
526                     msg->u.buffer_from_host.payload_in_message;
527         }
528
529         /* schedule the port callback */
530         schedule_work(&msg_context->u.bulk.work);
531 }
532
533 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
534                             struct mmal_msg_context *msg_context)
535 {
536         msg_context->u.bulk.status = 0;
537
538         /* schedule the port callback */
539         schedule_work(&msg_context->u.bulk.work);
540 }
541
542 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
543                           struct mmal_msg_context *msg_context)
544 {
545         pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
546
547         msg_context->u.bulk.status = -EINTR;
548
549         schedule_work(&msg_context->u.bulk.work);
550 }
551
552 /* incoming event service callback */
553 static void service_callback(void *param,
554                              const enum vchi_callback_reason reason,
555                              void *bulk_ctx)
556 {
557         struct vchiq_mmal_instance *instance = param;
558         int status;
559         u32 msg_len;
560         struct mmal_msg *msg;
561         struct vchi_held_msg msg_handle;
562         struct mmal_msg_context *msg_context;
563
564         if (!instance) {
565                 pr_err("Message callback passed NULL instance\n");
566                 return;
567         }
568
569         switch (reason) {
570         case VCHI_CALLBACK_MSG_AVAILABLE:
571                 status = vchi_msg_hold(instance->service, (void **)&msg,
572                                        &msg_len, VCHI_FLAGS_NONE, &msg_handle);
573                 if (status) {
574                         pr_err("Unable to dequeue a message (%d)\n", status);
575                         break;
576                 }
577
578                 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
579
580                 /* handling is different for buffer messages */
581                 switch (msg->h.type) {
582                 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
583                         vchi_held_msg_release(&msg_handle);
584                         break;
585
586                 case MMAL_MSG_TYPE_EVENT_TO_HOST:
587                         event_to_host_cb(instance, msg, msg_len);
588                         vchi_held_msg_release(&msg_handle);
589
590                         break;
591
592                 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
593                         buffer_to_host_cb(instance, msg, msg_len);
594                         vchi_held_msg_release(&msg_handle);
595                         break;
596
597                 default:
598                         /* messages dependent on header context to complete */
599                         if (!msg->h.context) {
600                                 pr_err("received message context was null!\n");
601                                 vchi_held_msg_release(&msg_handle);
602                                 break;
603                         }
604
605                         msg_context = lookup_msg_context(instance,
606                                                          msg->h.context);
607                         if (!msg_context) {
608                                 pr_err("received invalid message context %u!\n",
609                                        msg->h.context);
610                                 vchi_held_msg_release(&msg_handle);
611                                 break;
612                         }
613
614                         /* fill in context values */
615                         msg_context->u.sync.msg_handle = msg_handle;
616                         msg_context->u.sync.msg = msg;
617                         msg_context->u.sync.msg_len = msg_len;
618
619                         /* todo: should this check (completion_done()
620                          * == 1) for no one waiting? or do we need a
621                          * flag to tell us the completion has been
622                          * interrupted so we can free the message and
623                          * its context. This probably also solves the
624                          * message arriving after interruption todo
625                          * below
626                          */
627
628                         /* complete message so caller knows it happened */
629                         complete(&msg_context->u.sync.cmplt);
630                         break;
631                 }
632
633                 break;
634
635         case VCHI_CALLBACK_BULK_RECEIVED:
636                 bulk_receive_cb(instance, bulk_ctx);
637                 break;
638
639         case VCHI_CALLBACK_BULK_RECEIVE_ABORTED:
640                 bulk_abort_cb(instance, bulk_ctx);
641                 break;
642
643         case VCHI_CALLBACK_SERVICE_CLOSED:
644                 /* TODO: consider if this requires action if received when
645                  * driver is not explicitly closing the service
646                  */
647                 break;
648
649         default:
650                 pr_err("Received unhandled message reason %d\n", reason);
651                 break;
652         }
653 }
654
655 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
656                                      struct mmal_msg *msg,
657                                      unsigned int payload_len,
658                                      struct mmal_msg **msg_out,
659                                      struct vchi_held_msg *msg_handle_out)
660 {
661         struct mmal_msg_context *msg_context;
662         int ret;
663         unsigned long timeout;
664
665         /* payload size must not cause message to exceed max size */
666         if (payload_len >
667             (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
668                 pr_err("payload length %d exceeds max:%d\n", payload_len,
669                        (int)(MMAL_MSG_MAX_SIZE -
670                             sizeof(struct mmal_msg_header)));
671                 return -EINVAL;
672         }
673
674         msg_context = get_msg_context(instance);
675         if (IS_ERR(msg_context))
676                 return PTR_ERR(msg_context);
677
678         init_completion(&msg_context->u.sync.cmplt);
679
680         msg->h.magic = MMAL_MAGIC;
681         msg->h.context = msg_context->handle;
682         msg->h.status = 0;
683
684         DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
685                      ">>> sync message");
686
687         vchi_service_use(instance->service);
688
689         ret = vchi_queue_kernel_message(instance->service,
690                                         msg,
691                                         sizeof(struct mmal_msg_header) +
692                                         payload_len);
693
694         vchi_service_release(instance->service);
695
696         if (ret) {
697                 pr_err("error %d queuing message\n", ret);
698                 release_msg_context(msg_context);
699                 return ret;
700         }
701
702         timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
703                                               SYNC_MSG_TIMEOUT * HZ);
704         if (timeout == 0) {
705                 pr_err("timed out waiting for sync completion\n");
706                 ret = -ETIME;
707                 /* todo: what happens if the message arrives after aborting */
708                 release_msg_context(msg_context);
709                 return ret;
710         }
711
712         *msg_out = msg_context->u.sync.msg;
713         *msg_handle_out = msg_context->u.sync.msg_handle;
714         release_msg_context(msg_context);
715
716         return 0;
717 }
718
719 static void dump_port_info(struct vchiq_mmal_port *port)
720 {
721         pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
722
723         pr_debug("buffer minimum num:%d size:%d align:%d\n",
724                  port->minimum_buffer.num,
725                  port->minimum_buffer.size, port->minimum_buffer.alignment);
726
727         pr_debug("buffer recommended num:%d size:%d align:%d\n",
728                  port->recommended_buffer.num,
729                  port->recommended_buffer.size,
730                  port->recommended_buffer.alignment);
731
732         pr_debug("buffer current values num:%d size:%d align:%d\n",
733                  port->current_buffer.num,
734                  port->current_buffer.size, port->current_buffer.alignment);
735
736         pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
737                  port->format.type,
738                  port->format.encoding, port->format.encoding_variant);
739
740         pr_debug("                  bitrate:%d flags:0x%x\n",
741                  port->format.bitrate, port->format.flags);
742
743         if (port->format.type == MMAL_ES_TYPE_VIDEO) {
744                 pr_debug
745                     ("es video format: width:%d height:%d colourspace:0x%x\n",
746                      port->es.video.width, port->es.video.height,
747                      port->es.video.color_space);
748
749                 pr_debug("               : crop xywh %d,%d,%d,%d\n",
750                          port->es.video.crop.x,
751                          port->es.video.crop.y,
752                          port->es.video.crop.width, port->es.video.crop.height);
753                 pr_debug("               : framerate %d/%d  aspect %d/%d\n",
754                          port->es.video.frame_rate.num,
755                          port->es.video.frame_rate.den,
756                          port->es.video.par.num, port->es.video.par.den);
757         }
758 }
759
760 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
761 {
762         /* todo do readonly fields need setting at all? */
763         p->type = port->type;
764         p->index = port->index;
765         p->index_all = 0;
766         p->is_enabled = port->enabled;
767         p->buffer_num_min = port->minimum_buffer.num;
768         p->buffer_size_min = port->minimum_buffer.size;
769         p->buffer_alignment_min = port->minimum_buffer.alignment;
770         p->buffer_num_recommended = port->recommended_buffer.num;
771         p->buffer_size_recommended = port->recommended_buffer.size;
772
773         /* only three writable fields in a port */
774         p->buffer_num = port->current_buffer.num;
775         p->buffer_size = port->current_buffer.size;
776         p->userdata = (u32)(unsigned long)port;
777 }
778
779 static int port_info_set(struct vchiq_mmal_instance *instance,
780                          struct vchiq_mmal_port *port)
781 {
782         int ret;
783         struct mmal_msg m;
784         struct mmal_msg *rmsg;
785         struct vchi_held_msg rmsg_handle;
786
787         pr_debug("setting port info port %p\n", port);
788         if (!port)
789                 return -1;
790         dump_port_info(port);
791
792         m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
793
794         m.u.port_info_set.component_handle = port->component->handle;
795         m.u.port_info_set.port_type = port->type;
796         m.u.port_info_set.port_index = port->index;
797
798         port_to_mmal_msg(port, &m.u.port_info_set.port);
799
800         /* elementary stream format setup */
801         m.u.port_info_set.format.type = port->format.type;
802         m.u.port_info_set.format.encoding = port->format.encoding;
803         m.u.port_info_set.format.encoding_variant =
804             port->format.encoding_variant;
805         m.u.port_info_set.format.bitrate = port->format.bitrate;
806         m.u.port_info_set.format.flags = port->format.flags;
807
808         memcpy(&m.u.port_info_set.es, &port->es,
809                sizeof(union mmal_es_specific_format));
810
811         m.u.port_info_set.format.extradata_size = port->format.extradata_size;
812         memcpy(&m.u.port_info_set.extradata, port->format.extradata,
813                port->format.extradata_size);
814
815         ret = send_synchronous_mmal_msg(instance, &m,
816                                         sizeof(m.u.port_info_set),
817                                         &rmsg, &rmsg_handle);
818         if (ret)
819                 return ret;
820
821         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
822                 /* got an unexpected message type in reply */
823                 ret = -EINVAL;
824                 goto release_msg;
825         }
826
827         /* return operation status */
828         ret = -rmsg->u.port_info_get_reply.status;
829
830         pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
831                  port->component->handle, port->handle);
832
833 release_msg:
834         vchi_held_msg_release(&rmsg_handle);
835
836         return ret;
837 }
838
839 /* use port info get message to retrieve port information */
840 static int port_info_get(struct vchiq_mmal_instance *instance,
841                          struct vchiq_mmal_port *port)
842 {
843         int ret;
844         struct mmal_msg m;
845         struct mmal_msg *rmsg;
846         struct vchi_held_msg rmsg_handle;
847
848         /* port info time */
849         m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
850         m.u.port_info_get.component_handle = port->component->handle;
851         m.u.port_info_get.port_type = port->type;
852         m.u.port_info_get.index = port->index;
853
854         ret = send_synchronous_mmal_msg(instance, &m,
855                                         sizeof(m.u.port_info_get),
856                                         &rmsg, &rmsg_handle);
857         if (ret)
858                 return ret;
859
860         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
861                 /* got an unexpected message type in reply */
862                 ret = -EINVAL;
863                 goto release_msg;
864         }
865
866         /* return operation status */
867         ret = -rmsg->u.port_info_get_reply.status;
868         if (ret != MMAL_MSG_STATUS_SUCCESS)
869                 goto release_msg;
870
871         if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
872                 port->enabled = 0;
873         else
874                 port->enabled = 1;
875
876         /* copy the values out of the message */
877         port->handle = rmsg->u.port_info_get_reply.port_handle;
878
879         /* port type and index cached to use on port info set because
880          * it does not use a port handle
881          */
882         port->type = rmsg->u.port_info_get_reply.port_type;
883         port->index = rmsg->u.port_info_get_reply.port_index;
884
885         port->minimum_buffer.num =
886             rmsg->u.port_info_get_reply.port.buffer_num_min;
887         port->minimum_buffer.size =
888             rmsg->u.port_info_get_reply.port.buffer_size_min;
889         port->minimum_buffer.alignment =
890             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
891
892         port->recommended_buffer.alignment =
893             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
894         port->recommended_buffer.num =
895             rmsg->u.port_info_get_reply.port.buffer_num_recommended;
896
897         port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
898         port->current_buffer.size =
899             rmsg->u.port_info_get_reply.port.buffer_size;
900
901         /* stream format */
902         port->format.type = rmsg->u.port_info_get_reply.format.type;
903         port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
904         port->format.encoding_variant =
905             rmsg->u.port_info_get_reply.format.encoding_variant;
906         port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
907         port->format.flags = rmsg->u.port_info_get_reply.format.flags;
908
909         /* elementary stream format */
910         memcpy(&port->es,
911                &rmsg->u.port_info_get_reply.es,
912                sizeof(union mmal_es_specific_format));
913         port->format.es = &port->es;
914
915         port->format.extradata_size =
916             rmsg->u.port_info_get_reply.format.extradata_size;
917         memcpy(port->format.extradata,
918                rmsg->u.port_info_get_reply.extradata,
919                port->format.extradata_size);
920
921         pr_debug("received port info\n");
922         dump_port_info(port);
923
924 release_msg:
925
926         pr_debug("%s:result:%d component:0x%x port:%d\n",
927                  __func__, ret, port->component->handle, port->handle);
928
929         vchi_held_msg_release(&rmsg_handle);
930
931         return ret;
932 }
933
934 /* create comonent on vc */
935 static int create_component(struct vchiq_mmal_instance *instance,
936                             struct vchiq_mmal_component *component,
937                             const char *name)
938 {
939         int ret;
940         struct mmal_msg m;
941         struct mmal_msg *rmsg;
942         struct vchi_held_msg rmsg_handle;
943
944         /* build component create message */
945         m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
946         m.u.component_create.client_component = component->client_component;
947         strncpy(m.u.component_create.name, name,
948                 sizeof(m.u.component_create.name));
949
950         ret = send_synchronous_mmal_msg(instance, &m,
951                                         sizeof(m.u.component_create),
952                                         &rmsg, &rmsg_handle);
953         if (ret)
954                 return ret;
955
956         if (rmsg->h.type != m.h.type) {
957                 /* got an unexpected message type in reply */
958                 ret = -EINVAL;
959                 goto release_msg;
960         }
961
962         ret = -rmsg->u.component_create_reply.status;
963         if (ret != MMAL_MSG_STATUS_SUCCESS)
964                 goto release_msg;
965
966         /* a valid component response received */
967         component->handle = rmsg->u.component_create_reply.component_handle;
968         component->inputs = rmsg->u.component_create_reply.input_num;
969         component->outputs = rmsg->u.component_create_reply.output_num;
970         component->clocks = rmsg->u.component_create_reply.clock_num;
971
972         pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
973                  component->handle,
974                  component->inputs, component->outputs, component->clocks);
975
976 release_msg:
977         vchi_held_msg_release(&rmsg_handle);
978
979         return ret;
980 }
981
982 /* destroys a component on vc */
983 static int destroy_component(struct vchiq_mmal_instance *instance,
984                              struct vchiq_mmal_component *component)
985 {
986         int ret;
987         struct mmal_msg m;
988         struct mmal_msg *rmsg;
989         struct vchi_held_msg rmsg_handle;
990
991         m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
992         m.u.component_destroy.component_handle = component->handle;
993
994         ret = send_synchronous_mmal_msg(instance, &m,
995                                         sizeof(m.u.component_destroy),
996                                         &rmsg, &rmsg_handle);
997         if (ret)
998                 return ret;
999
1000         if (rmsg->h.type != m.h.type) {
1001                 /* got an unexpected message type in reply */
1002                 ret = -EINVAL;
1003                 goto release_msg;
1004         }
1005
1006         ret = -rmsg->u.component_destroy_reply.status;
1007
1008 release_msg:
1009
1010         vchi_held_msg_release(&rmsg_handle);
1011
1012         return ret;
1013 }
1014
1015 /* enable a component on vc */
1016 static int enable_component(struct vchiq_mmal_instance *instance,
1017                             struct vchiq_mmal_component *component)
1018 {
1019         int ret;
1020         struct mmal_msg m;
1021         struct mmal_msg *rmsg;
1022         struct vchi_held_msg rmsg_handle;
1023
1024         m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1025         m.u.component_enable.component_handle = component->handle;
1026
1027         ret = send_synchronous_mmal_msg(instance, &m,
1028                                         sizeof(m.u.component_enable),
1029                                         &rmsg, &rmsg_handle);
1030         if (ret)
1031                 return ret;
1032
1033         if (rmsg->h.type != m.h.type) {
1034                 /* got an unexpected message type in reply */
1035                 ret = -EINVAL;
1036                 goto release_msg;
1037         }
1038
1039         ret = -rmsg->u.component_enable_reply.status;
1040
1041 release_msg:
1042         vchi_held_msg_release(&rmsg_handle);
1043
1044         return ret;
1045 }
1046
1047 /* disable a component on vc */
1048 static int disable_component(struct vchiq_mmal_instance *instance,
1049                              struct vchiq_mmal_component *component)
1050 {
1051         int ret;
1052         struct mmal_msg m;
1053         struct mmal_msg *rmsg;
1054         struct vchi_held_msg rmsg_handle;
1055
1056         m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1057         m.u.component_disable.component_handle = component->handle;
1058
1059         ret = send_synchronous_mmal_msg(instance, &m,
1060                                         sizeof(m.u.component_disable),
1061                                         &rmsg, &rmsg_handle);
1062         if (ret)
1063                 return ret;
1064
1065         if (rmsg->h.type != m.h.type) {
1066                 /* got an unexpected message type in reply */
1067                 ret = -EINVAL;
1068                 goto release_msg;
1069         }
1070
1071         ret = -rmsg->u.component_disable_reply.status;
1072
1073 release_msg:
1074
1075         vchi_held_msg_release(&rmsg_handle);
1076
1077         return ret;
1078 }
1079
1080 /* get version of mmal implementation */
1081 static int get_version(struct vchiq_mmal_instance *instance,
1082                        u32 *major_out, u32 *minor_out)
1083 {
1084         int ret;
1085         struct mmal_msg m;
1086         struct mmal_msg *rmsg;
1087         struct vchi_held_msg rmsg_handle;
1088
1089         m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1090
1091         ret = send_synchronous_mmal_msg(instance, &m,
1092                                         sizeof(m.u.version),
1093                                         &rmsg, &rmsg_handle);
1094         if (ret)
1095                 return ret;
1096
1097         if (rmsg->h.type != m.h.type) {
1098                 /* got an unexpected message type in reply */
1099                 ret = -EINVAL;
1100                 goto release_msg;
1101         }
1102
1103         *major_out = rmsg->u.version.major;
1104         *minor_out = rmsg->u.version.minor;
1105
1106 release_msg:
1107         vchi_held_msg_release(&rmsg_handle);
1108
1109         return ret;
1110 }
1111
1112 /* do a port action with a port as a parameter */
1113 static int port_action_port(struct vchiq_mmal_instance *instance,
1114                             struct vchiq_mmal_port *port,
1115                             enum mmal_msg_port_action_type action_type)
1116 {
1117         int ret;
1118         struct mmal_msg m;
1119         struct mmal_msg *rmsg;
1120         struct vchi_held_msg rmsg_handle;
1121
1122         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1123         m.u.port_action_port.component_handle = port->component->handle;
1124         m.u.port_action_port.port_handle = port->handle;
1125         m.u.port_action_port.action = action_type;
1126
1127         port_to_mmal_msg(port, &m.u.port_action_port.port);
1128
1129         ret = send_synchronous_mmal_msg(instance, &m,
1130                                         sizeof(m.u.port_action_port),
1131                                         &rmsg, &rmsg_handle);
1132         if (ret)
1133                 return ret;
1134
1135         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1136                 /* got an unexpected message type in reply */
1137                 ret = -EINVAL;
1138                 goto release_msg;
1139         }
1140
1141         ret = -rmsg->u.port_action_reply.status;
1142
1143         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1144                  __func__,
1145                  ret, port->component->handle, port->handle,
1146                  port_action_type_names[action_type], action_type);
1147
1148 release_msg:
1149         vchi_held_msg_release(&rmsg_handle);
1150
1151         return ret;
1152 }
1153
1154 /* do a port action with handles as parameters */
1155 static int port_action_handle(struct vchiq_mmal_instance *instance,
1156                               struct vchiq_mmal_port *port,
1157                               enum mmal_msg_port_action_type action_type,
1158                               u32 connect_component_handle,
1159                               u32 connect_port_handle)
1160 {
1161         int ret;
1162         struct mmal_msg m;
1163         struct mmal_msg *rmsg;
1164         struct vchi_held_msg rmsg_handle;
1165
1166         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1167
1168         m.u.port_action_handle.component_handle = port->component->handle;
1169         m.u.port_action_handle.port_handle = port->handle;
1170         m.u.port_action_handle.action = action_type;
1171
1172         m.u.port_action_handle.connect_component_handle =
1173             connect_component_handle;
1174         m.u.port_action_handle.connect_port_handle = connect_port_handle;
1175
1176         ret = send_synchronous_mmal_msg(instance, &m,
1177                                         sizeof(m.u.port_action_handle),
1178                                         &rmsg, &rmsg_handle);
1179         if (ret)
1180                 return ret;
1181
1182         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1183                 /* got an unexpected message type in reply */
1184                 ret = -EINVAL;
1185                 goto release_msg;
1186         }
1187
1188         ret = -rmsg->u.port_action_reply.status;
1189
1190         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1191                  __func__,
1192                  ret, port->component->handle, port->handle,
1193                  port_action_type_names[action_type],
1194                  action_type, connect_component_handle, connect_port_handle);
1195
1196 release_msg:
1197         vchi_held_msg_release(&rmsg_handle);
1198
1199         return ret;
1200 }
1201
1202 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1203                               struct vchiq_mmal_port *port,
1204                               u32 parameter_id, void *value, u32 value_size)
1205 {
1206         int ret;
1207         struct mmal_msg m;
1208         struct mmal_msg *rmsg;
1209         struct vchi_held_msg rmsg_handle;
1210
1211         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1212
1213         m.u.port_parameter_set.component_handle = port->component->handle;
1214         m.u.port_parameter_set.port_handle = port->handle;
1215         m.u.port_parameter_set.id = parameter_id;
1216         m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1217         memcpy(&m.u.port_parameter_set.value, value, value_size);
1218
1219         ret = send_synchronous_mmal_msg(instance, &m,
1220                                         (4 * sizeof(u32)) + value_size,
1221                                         &rmsg, &rmsg_handle);
1222         if (ret)
1223                 return ret;
1224
1225         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1226                 /* got an unexpected message type in reply */
1227                 ret = -EINVAL;
1228                 goto release_msg;
1229         }
1230
1231         ret = -rmsg->u.port_parameter_set_reply.status;
1232
1233         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1234                  __func__,
1235                  ret, port->component->handle, port->handle, parameter_id);
1236
1237 release_msg:
1238         vchi_held_msg_release(&rmsg_handle);
1239
1240         return ret;
1241 }
1242
1243 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1244                               struct vchiq_mmal_port *port,
1245                               u32 parameter_id, void *value, u32 *value_size)
1246 {
1247         int ret;
1248         struct mmal_msg m;
1249         struct mmal_msg *rmsg;
1250         struct vchi_held_msg rmsg_handle;
1251
1252         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1253
1254         m.u.port_parameter_get.component_handle = port->component->handle;
1255         m.u.port_parameter_get.port_handle = port->handle;
1256         m.u.port_parameter_get.id = parameter_id;
1257         m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1258
1259         ret = send_synchronous_mmal_msg(instance, &m,
1260                                         sizeof(struct
1261                                                mmal_msg_port_parameter_get),
1262                                         &rmsg, &rmsg_handle);
1263         if (ret)
1264                 return ret;
1265
1266         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1267                 /* got an unexpected message type in reply */
1268                 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1269                 ret = -EINVAL;
1270                 goto release_msg;
1271         }
1272
1273         ret = rmsg->u.port_parameter_get_reply.status;
1274
1275         /* port_parameter_get_reply.size includes the header,
1276          * whilst *value_size doesn't.
1277          */
1278         rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1279
1280         if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1281                 /* Copy only as much as we have space for
1282                  * but report true size of parameter
1283                  */
1284                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1285                        *value_size);
1286         } else {
1287                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1288                        rmsg->u.port_parameter_get_reply.size);
1289         }
1290         /* Always report the size of the returned parameter to the caller */
1291         *value_size = rmsg->u.port_parameter_get_reply.size;
1292
1293         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1294                  ret, port->component->handle, port->handle, parameter_id);
1295
1296 release_msg:
1297         vchi_held_msg_release(&rmsg_handle);
1298
1299         return ret;
1300 }
1301
1302 /* disables a port and drains buffers from it */
1303 static int port_disable(struct vchiq_mmal_instance *instance,
1304                         struct vchiq_mmal_port *port)
1305 {
1306         int ret;
1307         struct list_head *q, *buf_head;
1308         unsigned long flags = 0;
1309
1310         if (!port->enabled)
1311                 return 0;
1312
1313         port->enabled = 0;
1314
1315         ret = port_action_port(instance, port,
1316                                MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1317         if (ret == 0) {
1318                 /*
1319                  * Drain all queued buffers on port. This should only
1320                  * apply to buffers that have been queued before the port
1321                  * has been enabled. If the port has been enabled and buffers
1322                  * passed, then the buffers should have been removed from this
1323                  * list, and we should get the relevant callbacks via VCHIQ
1324                  * to release the buffers.
1325                  */
1326                 spin_lock_irqsave(&port->slock, flags);
1327
1328                 list_for_each_safe(buf_head, q, &port->buffers) {
1329                         struct mmal_buffer *mmalbuf;
1330
1331                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1332                                              list);
1333                         list_del(buf_head);
1334                         if (port->buffer_cb) {
1335                                 mmalbuf->length = 0;
1336                                 mmalbuf->mmal_flags = 0;
1337                                 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1338                                 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1339                                 port->buffer_cb(instance,
1340                                                 port, 0, mmalbuf);
1341                         }
1342                 }
1343
1344                 spin_unlock_irqrestore(&port->slock, flags);
1345
1346                 ret = port_info_get(instance, port);
1347         }
1348
1349         return ret;
1350 }
1351
1352 /* enable a port */
1353 static int port_enable(struct vchiq_mmal_instance *instance,
1354                        struct vchiq_mmal_port *port)
1355 {
1356         unsigned int hdr_count;
1357         struct list_head *q, *buf_head;
1358         int ret;
1359
1360         if (port->enabled)
1361                 return 0;
1362
1363         ret = port_action_port(instance, port,
1364                                MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1365         if (ret)
1366                 goto done;
1367
1368         port->enabled = 1;
1369
1370         if (port->buffer_cb) {
1371                 /* send buffer headers to videocore */
1372                 hdr_count = 1;
1373                 list_for_each_safe(buf_head, q, &port->buffers) {
1374                         struct mmal_buffer *mmalbuf;
1375
1376                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1377                                              list);
1378                         ret = buffer_from_host(instance, port, mmalbuf);
1379                         if (ret)
1380                                 goto done;
1381
1382                         list_del(buf_head);
1383                         hdr_count++;
1384                         if (hdr_count > port->current_buffer.num)
1385                                 break;
1386                 }
1387         }
1388
1389         ret = port_info_get(instance, port);
1390
1391 done:
1392         return ret;
1393 }
1394
1395 /* ------------------------------------------------------------------
1396  * Exported API
1397  *------------------------------------------------------------------
1398  */
1399
1400 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1401                                struct vchiq_mmal_port *port)
1402 {
1403         int ret;
1404
1405         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1406                 return -EINTR;
1407
1408         ret = port_info_set(instance, port);
1409         if (ret)
1410                 goto release_unlock;
1411
1412         /* read what has actually been set */
1413         ret = port_info_get(instance, port);
1414
1415 release_unlock:
1416         mutex_unlock(&instance->vchiq_mutex);
1417
1418         return ret;
1419 }
1420 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1421
1422 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1423                                   struct vchiq_mmal_port *port,
1424                                   u32 parameter, void *value, u32 value_size)
1425 {
1426         int ret;
1427
1428         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1429                 return -EINTR;
1430
1431         ret = port_parameter_set(instance, port, parameter, value, value_size);
1432
1433         mutex_unlock(&instance->vchiq_mutex);
1434
1435         return ret;
1436 }
1437 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1438
1439 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1440                                   struct vchiq_mmal_port *port,
1441                                   u32 parameter, void *value, u32 *value_size)
1442 {
1443         int ret;
1444
1445         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1446                 return -EINTR;
1447
1448         ret = port_parameter_get(instance, port, parameter, value, value_size);
1449
1450         mutex_unlock(&instance->vchiq_mutex);
1451
1452         return ret;
1453 }
1454 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1455
1456 /* enable a port
1457  *
1458  * enables a port and queues buffers for satisfying callbacks if we
1459  * provide a callback handler
1460  */
1461 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1462                            struct vchiq_mmal_port *port,
1463                            vchiq_mmal_buffer_cb buffer_cb)
1464 {
1465         int ret;
1466
1467         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1468                 return -EINTR;
1469
1470         /* already enabled - noop */
1471         if (port->enabled) {
1472                 ret = 0;
1473                 goto unlock;
1474         }
1475
1476         port->buffer_cb = buffer_cb;
1477
1478         ret = port_enable(instance, port);
1479
1480 unlock:
1481         mutex_unlock(&instance->vchiq_mutex);
1482
1483         return ret;
1484 }
1485 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1486
1487 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1488                             struct vchiq_mmal_port *port)
1489 {
1490         int ret;
1491
1492         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1493                 return -EINTR;
1494
1495         if (!port->enabled) {
1496                 mutex_unlock(&instance->vchiq_mutex);
1497                 return 0;
1498         }
1499
1500         ret = port_disable(instance, port);
1501
1502         mutex_unlock(&instance->vchiq_mutex);
1503
1504         return ret;
1505 }
1506 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1507
1508 /* ports will be connected in a tunneled manner so data buffers
1509  * are not handled by client.
1510  */
1511 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1512                                    struct vchiq_mmal_port *src,
1513                                    struct vchiq_mmal_port *dst)
1514 {
1515         int ret;
1516
1517         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1518                 return -EINTR;
1519
1520         /* disconnect ports if connected */
1521         if (src->connected) {
1522                 ret = port_disable(instance, src);
1523                 if (ret) {
1524                         pr_err("failed disabling src port(%d)\n", ret);
1525                         goto release_unlock;
1526                 }
1527
1528                 /* do not need to disable the destination port as they
1529                  * are connected and it is done automatically
1530                  */
1531
1532                 ret = port_action_handle(instance, src,
1533                                          MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1534                                          src->connected->component->handle,
1535                                          src->connected->handle);
1536                 if (ret < 0) {
1537                         pr_err("failed disconnecting src port\n");
1538                         goto release_unlock;
1539                 }
1540                 src->connected->enabled = 0;
1541                 src->connected = NULL;
1542         }
1543
1544         if (!dst) {
1545                 /* do not make new connection */
1546                 ret = 0;
1547                 pr_debug("not making new connection\n");
1548                 goto release_unlock;
1549         }
1550
1551         /* copy src port format to dst */
1552         dst->format.encoding = src->format.encoding;
1553         dst->es.video.width = src->es.video.width;
1554         dst->es.video.height = src->es.video.height;
1555         dst->es.video.crop.x = src->es.video.crop.x;
1556         dst->es.video.crop.y = src->es.video.crop.y;
1557         dst->es.video.crop.width = src->es.video.crop.width;
1558         dst->es.video.crop.height = src->es.video.crop.height;
1559         dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1560         dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1561
1562         /* set new format */
1563         ret = port_info_set(instance, dst);
1564         if (ret) {
1565                 pr_debug("setting port info failed\n");
1566                 goto release_unlock;
1567         }
1568
1569         /* read what has actually been set */
1570         ret = port_info_get(instance, dst);
1571         if (ret) {
1572                 pr_debug("read back port info failed\n");
1573                 goto release_unlock;
1574         }
1575
1576         /* connect two ports together */
1577         ret = port_action_handle(instance, src,
1578                                  MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1579                                  dst->component->handle, dst->handle);
1580         if (ret < 0) {
1581                 pr_debug("connecting port %d:%d to %d:%d failed\n",
1582                          src->component->handle, src->handle,
1583                          dst->component->handle, dst->handle);
1584                 goto release_unlock;
1585         }
1586         src->connected = dst;
1587
1588 release_unlock:
1589
1590         mutex_unlock(&instance->vchiq_mutex);
1591
1592         return ret;
1593 }
1594 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1595
1596 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1597                              struct vchiq_mmal_port *port,
1598                              struct mmal_buffer *buffer)
1599 {
1600         unsigned long flags = 0;
1601         int ret;
1602
1603         ret = buffer_from_host(instance, port, buffer);
1604         if (ret == -EINVAL) {
1605                 /* Port is disabled. Queue for when it is enabled. */
1606                 spin_lock_irqsave(&port->slock, flags);
1607                 list_add_tail(&buffer->list, &port->buffers);
1608                 spin_unlock_irqrestore(&port->slock, flags);
1609         }
1610
1611         return 0;
1612 }
1613 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1614
1615 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1616                           struct mmal_buffer *buf)
1617 {
1618         struct mmal_msg_context *msg_context = get_msg_context(instance);
1619
1620         if (IS_ERR(msg_context))
1621                 return (PTR_ERR(msg_context));
1622
1623         buf->msg_context = msg_context;
1624         return 0;
1625 }
1626 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1627
1628 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1629 {
1630         struct mmal_msg_context *msg_context = buf->msg_context;
1631
1632         if (msg_context)
1633                 release_msg_context(msg_context);
1634         buf->msg_context = NULL;
1635
1636         return 0;
1637 }
1638 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1639
1640 /* Initialise a mmal component and its ports
1641  *
1642  */
1643 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1644                               const char *name,
1645                               struct vchiq_mmal_component **component_out)
1646 {
1647         int ret;
1648         int idx;                /* port index */
1649         struct vchiq_mmal_component *component = NULL;
1650
1651         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1652                 return -EINTR;
1653
1654         for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1655                 if (!instance->component[idx].in_use) {
1656                         component = &instance->component[idx];
1657                         component->in_use = 1;
1658                         break;
1659                 }
1660         }
1661
1662         if (!component) {
1663                 ret = -EINVAL;  /* todo is this correct error? */
1664                 goto unlock;
1665         }
1666
1667         /* We need a handle to reference back to our component structure.
1668          * Use the array index in instance->component rather than rolling
1669          * another IDR.
1670          */
1671         component->client_component = idx;
1672
1673         ret = create_component(instance, component, name);
1674         if (ret < 0) {
1675                 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1676                        __func__, ret);
1677                 goto unlock;
1678         }
1679
1680         /* ports info needs gathering */
1681         component->control.type = MMAL_PORT_TYPE_CONTROL;
1682         component->control.index = 0;
1683         component->control.component = component;
1684         spin_lock_init(&component->control.slock);
1685         INIT_LIST_HEAD(&component->control.buffers);
1686         ret = port_info_get(instance, &component->control);
1687         if (ret < 0)
1688                 goto release_component;
1689
1690         for (idx = 0; idx < component->inputs; idx++) {
1691                 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1692                 component->input[idx].index = idx;
1693                 component->input[idx].component = component;
1694                 spin_lock_init(&component->input[idx].slock);
1695                 INIT_LIST_HEAD(&component->input[idx].buffers);
1696                 ret = port_info_get(instance, &component->input[idx]);
1697                 if (ret < 0)
1698                         goto release_component;
1699         }
1700
1701         for (idx = 0; idx < component->outputs; idx++) {
1702                 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1703                 component->output[idx].index = idx;
1704                 component->output[idx].component = component;
1705                 spin_lock_init(&component->output[idx].slock);
1706                 INIT_LIST_HEAD(&component->output[idx].buffers);
1707                 ret = port_info_get(instance, &component->output[idx]);
1708                 if (ret < 0)
1709                         goto release_component;
1710         }
1711
1712         for (idx = 0; idx < component->clocks; idx++) {
1713                 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1714                 component->clock[idx].index = idx;
1715                 component->clock[idx].component = component;
1716                 spin_lock_init(&component->clock[idx].slock);
1717                 INIT_LIST_HEAD(&component->clock[idx].buffers);
1718                 ret = port_info_get(instance, &component->clock[idx]);
1719                 if (ret < 0)
1720                         goto release_component;
1721         }
1722
1723         *component_out = component;
1724
1725         mutex_unlock(&instance->vchiq_mutex);
1726
1727         return 0;
1728
1729 release_component:
1730         destroy_component(instance, component);
1731 unlock:
1732         if (component)
1733                 component->in_use = 0;
1734         mutex_unlock(&instance->vchiq_mutex);
1735
1736         return ret;
1737 }
1738 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1739
1740 /*
1741  * cause a mmal component to be destroyed
1742  */
1743 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1744                                   struct vchiq_mmal_component *component)
1745 {
1746         int ret;
1747
1748         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1749                 return -EINTR;
1750
1751         if (component->enabled)
1752                 ret = disable_component(instance, component);
1753
1754         ret = destroy_component(instance, component);
1755
1756         component->in_use = 0;
1757
1758         mutex_unlock(&instance->vchiq_mutex);
1759
1760         return ret;
1761 }
1762 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1763
1764 /*
1765  * cause a mmal component to be enabled
1766  */
1767 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1768                                 struct vchiq_mmal_component *component)
1769 {
1770         int ret;
1771
1772         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1773                 return -EINTR;
1774
1775         if (component->enabled) {
1776                 mutex_unlock(&instance->vchiq_mutex);
1777                 return 0;
1778         }
1779
1780         ret = enable_component(instance, component);
1781         if (ret == 0)
1782                 component->enabled = true;
1783
1784         mutex_unlock(&instance->vchiq_mutex);
1785
1786         return ret;
1787 }
1788 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1789
1790 /*
1791  * cause a mmal component to be enabled
1792  */
1793 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1794                                  struct vchiq_mmal_component *component)
1795 {
1796         int ret;
1797
1798         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1799                 return -EINTR;
1800
1801         if (!component->enabled) {
1802                 mutex_unlock(&instance->vchiq_mutex);
1803                 return 0;
1804         }
1805
1806         ret = disable_component(instance, component);
1807         if (ret == 0)
1808                 component->enabled = 0;
1809
1810         mutex_unlock(&instance->vchiq_mutex);
1811
1812         return ret;
1813 }
1814 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1815
1816 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1817                        u32 *major_out, u32 *minor_out)
1818 {
1819         int ret;
1820
1821         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1822                 return -EINTR;
1823
1824         ret = get_version(instance, major_out, minor_out);
1825
1826         mutex_unlock(&instance->vchiq_mutex);
1827
1828         return ret;
1829 }
1830 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1831
1832 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1833 {
1834         int status = 0;
1835
1836         if (!instance)
1837                 return -EINVAL;
1838
1839         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1840                 return -EINTR;
1841
1842         vchi_service_use(instance->service);
1843
1844         status = vchi_service_close(instance->service);
1845         if (status != 0)
1846                 pr_err("mmal-vchiq: VCHIQ close failed\n");
1847
1848         mutex_unlock(&instance->vchiq_mutex);
1849
1850         flush_workqueue(instance->bulk_wq);
1851         destroy_workqueue(instance->bulk_wq);
1852
1853         vfree(instance->bulk_scratch);
1854
1855         idr_destroy(&instance->context_map);
1856
1857         kfree(instance);
1858
1859         return status;
1860 }
1861 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1862
1863 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1864 {
1865         int status;
1866         struct vchiq_mmal_instance *instance;
1867         static struct vchiq_instance *vchiq_instance;
1868         struct service_creation params = {
1869                 .version                = VCHI_VERSION_EX(VC_MMAL_VER, VC_MMAL_MIN_VER),
1870                 .service_id             = VC_MMAL_SERVER_NAME,
1871                 .callback               = service_callback,
1872                 .callback_param         = NULL,
1873         };
1874
1875         /* compile time checks to ensure structure size as they are
1876          * directly (de)serialised from memory.
1877          */
1878
1879         /* ensure the header structure has packed to the correct size */
1880         BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1881
1882         /* ensure message structure does not exceed maximum length */
1883         BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1884
1885         /* mmal port struct is correct size */
1886         BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1887
1888         /* create a vchi instance */
1889         status = vchi_initialise(&vchiq_instance);
1890         if (status) {
1891                 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1892                        status);
1893                 return -EIO;
1894         }
1895
1896         status = vchi_connect(vchiq_instance);
1897         if (status) {
1898                 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1899                 return -EIO;
1900         }
1901
1902         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1903
1904         if (!instance)
1905                 return -ENOMEM;
1906
1907         mutex_init(&instance->vchiq_mutex);
1908
1909         instance->bulk_scratch = vmalloc(PAGE_SIZE);
1910
1911         mutex_init(&instance->context_map_lock);
1912         idr_init_base(&instance->context_map, 1);
1913
1914         params.callback_param = instance;
1915
1916         instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1917                                                     WQ_MEM_RECLAIM);
1918         if (!instance->bulk_wq)
1919                 goto err_free;
1920
1921         status = vchi_service_open(vchiq_instance, &params, &instance->service);
1922         if (status) {
1923                 pr_err("Failed to open VCHI service connection (status=%d)\n",
1924                        status);
1925                 goto err_close_services;
1926         }
1927
1928         vchi_service_release(instance->service);
1929
1930         *out_instance = instance;
1931
1932         return 0;
1933
1934 err_close_services:
1935         vchi_service_close(instance->service);
1936         destroy_workqueue(instance->bulk_wq);
1937 err_free:
1938         vfree(instance->bulk_scratch);
1939         kfree(instance);
1940         return -ENODEV;
1941 }
1942 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1943
1944 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1945 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1946 MODULE_LICENSE("GPL");