c960fb7ec890b1750a12cd52149928c492ffd20a
[linux-2.6-microblaze.git] / drivers / staging / vc04_services / vchiq-mmal / mmal-vchiq.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Broadcom BM2835 V4L2 driver
4  *
5  * Copyright © 2013 Raspberry Pi (Trading) Ltd.
6  *
7  * Authors: Vincent Sanders @ Collabora
8  *          Dave Stevenson @ Broadcom
9  *              (now dave.stevenson@raspberrypi.org)
10  *          Simon Mellor @ Broadcom
11  *          Luke Diamand @ Broadcom
12  *
13  * V4L2 driver MMAL vchiq interface code
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17
18 #include <linux/errno.h>
19 #include <linux/kernel.h>
20 #include <linux/module.h>
21 #include <linux/mutex.h>
22 #include <linux/mm.h>
23 #include <linux/slab.h>
24 #include <linux/completion.h>
25 #include <linux/vmalloc.h>
26 #include <media/videobuf2-vmalloc.h>
27
28 #include "mmal-common.h"
29 #include "mmal-vchiq.h"
30 #include "mmal-msg.h"
31
32 #include "interface/vchiq_arm/vchiq_if.h"
33 #include "interface/vchi/vchi.h"
34
35 /*
36  * maximum number of components supported.
37  * This matches the maximum permitted by default on the VPU
38  */
39 #define VCHIQ_MMAL_MAX_COMPONENTS 64
40
41 /*
42  * Timeout for synchronous msg responses in seconds.
43  * Helpful to increase this if stopping in the VPU debugger.
44  */
45 #define SYNC_MSG_TIMEOUT       3
46
47 /*#define FULL_MSG_DUMP 1*/
48
49 #ifdef DEBUG
50 static const char *const msg_type_names[] = {
51         "UNKNOWN",
52         "QUIT",
53         "SERVICE_CLOSED",
54         "GET_VERSION",
55         "COMPONENT_CREATE",
56         "COMPONENT_DESTROY",
57         "COMPONENT_ENABLE",
58         "COMPONENT_DISABLE",
59         "PORT_INFO_GET",
60         "PORT_INFO_SET",
61         "PORT_ACTION",
62         "BUFFER_FROM_HOST",
63         "BUFFER_TO_HOST",
64         "GET_STATS",
65         "PORT_PARAMETER_SET",
66         "PORT_PARAMETER_GET",
67         "EVENT_TO_HOST",
68         "GET_CORE_STATS_FOR_PORT",
69         "OPAQUE_ALLOCATOR",
70         "CONSUME_MEM",
71         "LMK",
72         "OPAQUE_ALLOCATOR_DESC",
73         "DRM_GET_LHS32",
74         "DRM_GET_TIME",
75         "BUFFER_FROM_HOST_ZEROLEN",
76         "PORT_FLUSH",
77         "HOST_LOG",
78 };
79 #endif
80
81 static const char *const port_action_type_names[] = {
82         "UNKNOWN",
83         "ENABLE",
84         "DISABLE",
85         "FLUSH",
86         "CONNECT",
87         "DISCONNECT",
88         "SET_REQUIREMENTS",
89 };
90
91 #if defined(DEBUG)
92 #if defined(FULL_MSG_DUMP)
93 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
94         do {                                                            \
95                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
96                          msg_type_names[(MSG)->h.type],                 \
97                          (MSG)->h.type, (MSG_LEN));                     \
98                 print_hex_dump(KERN_DEBUG, "<<h: ", DUMP_PREFIX_OFFSET, \
99                                16, 4, (MSG),                            \
100                                sizeof(struct mmal_msg_header), 1);      \
101                 print_hex_dump(KERN_DEBUG, "<<p: ", DUMP_PREFIX_OFFSET, \
102                                16, 4,                                   \
103                                ((u8 *)(MSG)) + sizeof(struct mmal_msg_header),\
104                                (MSG_LEN) - sizeof(struct mmal_msg_header), 1); \
105         } while (0)
106 #else
107 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)                               \
108         {                                                               \
109                 pr_debug(TITLE" type:%s(%d) length:%d\n",               \
110                          msg_type_names[(MSG)->h.type],                 \
111                          (MSG)->h.type, (MSG_LEN));                     \
112         }
113 #endif
114 #else
115 #define DBG_DUMP_MSG(MSG, MSG_LEN, TITLE)
116 #endif
117
118 struct vchiq_mmal_instance;
119
120 /* normal message context */
121 struct mmal_msg_context {
122         struct vchiq_mmal_instance *instance;
123
124         /* Index in the context_map idr so that we can find the
125          * mmal_msg_context again when servicing the VCHI reply.
126          */
127         int handle;
128
129         union {
130                 struct {
131                         /* work struct for buffer_cb callback */
132                         struct work_struct work;
133                         /* work struct for deferred callback */
134                         struct work_struct buffer_to_host_work;
135                         /* mmal instance */
136                         struct vchiq_mmal_instance *instance;
137                         /* mmal port */
138                         struct vchiq_mmal_port *port;
139                         /* actual buffer used to store bulk reply */
140                         struct mmal_buffer *buffer;
141                         /* amount of buffer used */
142                         unsigned long buffer_used;
143                         /* MMAL buffer flags */
144                         u32 mmal_flags;
145                         /* Presentation and Decode timestamps */
146                         s64 pts;
147                         s64 dts;
148
149                         int status;     /* context status */
150
151                 } bulk;         /* bulk data */
152
153                 struct {
154                         /* message handle to release */
155                         struct vchiq_header *msg_handle;
156                         /* pointer to received message */
157                         struct mmal_msg *msg;
158                         /* received message length */
159                         u32 msg_len;
160                         /* completion upon reply */
161                         struct completion cmplt;
162                 } sync;         /* synchronous response */
163         } u;
164
165 };
166
167 struct vchiq_mmal_instance {
168         unsigned service_handle;
169
170         /* ensure serialised access to service */
171         struct mutex vchiq_mutex;
172
173         /* vmalloc page to receive scratch bulk xfers into */
174         void *bulk_scratch;
175
176         struct idr context_map;
177         /* protect accesses to context_map */
178         struct mutex context_map_lock;
179
180         struct vchiq_mmal_component component[VCHIQ_MMAL_MAX_COMPONENTS];
181
182         /* ordered workqueue to process all bulk operations */
183         struct workqueue_struct *bulk_wq;
184 };
185
186 static struct mmal_msg_context *
187 get_msg_context(struct vchiq_mmal_instance *instance)
188 {
189         struct mmal_msg_context *msg_context;
190         int handle;
191
192         /* todo: should this be allocated from a pool to avoid kzalloc */
193         msg_context = kzalloc(sizeof(*msg_context), GFP_KERNEL);
194
195         if (!msg_context)
196                 return ERR_PTR(-ENOMEM);
197
198         /* Create an ID that will be passed along with our message so
199          * that when we service the VCHI reply, we can look up what
200          * message is being replied to.
201          */
202         mutex_lock(&instance->context_map_lock);
203         handle = idr_alloc(&instance->context_map, msg_context,
204                            0, 0, GFP_KERNEL);
205         mutex_unlock(&instance->context_map_lock);
206
207         if (handle < 0) {
208                 kfree(msg_context);
209                 return ERR_PTR(handle);
210         }
211
212         msg_context->instance = instance;
213         msg_context->handle = handle;
214
215         return msg_context;
216 }
217
218 static struct mmal_msg_context *
219 lookup_msg_context(struct vchiq_mmal_instance *instance, int handle)
220 {
221         return idr_find(&instance->context_map, handle);
222 }
223
224 static void
225 release_msg_context(struct mmal_msg_context *msg_context)
226 {
227         struct vchiq_mmal_instance *instance = msg_context->instance;
228
229         mutex_lock(&instance->context_map_lock);
230         idr_remove(&instance->context_map, msg_context->handle);
231         mutex_unlock(&instance->context_map_lock);
232         kfree(msg_context);
233 }
234
235 /* deals with receipt of event to host message */
236 static void event_to_host_cb(struct vchiq_mmal_instance *instance,
237                              struct mmal_msg *msg, u32 msg_len)
238 {
239         pr_debug("unhandled event\n");
240         pr_debug("component:%u port type:%d num:%d cmd:0x%x length:%d\n",
241                  msg->u.event_to_host.client_component,
242                  msg->u.event_to_host.port_type,
243                  msg->u.event_to_host.port_num,
244                  msg->u.event_to_host.cmd, msg->u.event_to_host.length);
245 }
246
247 /* workqueue scheduled callback
248  *
249  * we do this because it is important we do not call any other vchiq
250  * sync calls from witin the message delivery thread
251  */
252 static void buffer_work_cb(struct work_struct *work)
253 {
254         struct mmal_msg_context *msg_context =
255                 container_of(work, struct mmal_msg_context, u.bulk.work);
256         struct mmal_buffer *buffer = msg_context->u.bulk.buffer;
257
258         if (!buffer) {
259                 pr_err("%s: ctx: %p, No mmal buffer to pass details\n",
260                        __func__, msg_context);
261                 return;
262         }
263
264         buffer->length = msg_context->u.bulk.buffer_used;
265         buffer->mmal_flags = msg_context->u.bulk.mmal_flags;
266         buffer->dts = msg_context->u.bulk.dts;
267         buffer->pts = msg_context->u.bulk.pts;
268
269         atomic_dec(&msg_context->u.bulk.port->buffers_with_vpu);
270
271         msg_context->u.bulk.port->buffer_cb(msg_context->u.bulk.instance,
272                                             msg_context->u.bulk.port,
273                                             msg_context->u.bulk.status,
274                                             msg_context->u.bulk.buffer);
275 }
276
277 /* workqueue scheduled callback to handle receiving buffers
278  *
279  * VCHI will allow up to 4 bulk receives to be scheduled before blocking.
280  * If we block in the service_callback context then we can't process the
281  * VCHI_CALLBACK_BULK_RECEIVED message that would otherwise allow the blocked
282  * vchi_bulk_queue_receive() call to complete.
283  */
284 static void buffer_to_host_work_cb(struct work_struct *work)
285 {
286         struct mmal_msg_context *msg_context =
287                 container_of(work, struct mmal_msg_context,
288                              u.bulk.buffer_to_host_work);
289         struct vchiq_mmal_instance *instance = msg_context->instance;
290         unsigned long len = msg_context->u.bulk.buffer_used;
291         int ret;
292
293         if (!len)
294                 /* Dummy receive to ensure the buffers remain in order */
295                 len = 8;
296         /* queue the bulk submission */
297         vchi_service_use(instance->service_handle);
298         ret = vchi_bulk_queue_receive(instance->service_handle,
299                                       msg_context->u.bulk.buffer->buffer,
300                                       /* Actual receive needs to be a multiple
301                                        * of 4 bytes
302                                        */
303                                       (len + 3) & ~3,
304                                       VCHIQ_BULK_MODE_CALLBACK,
305                                       msg_context);
306
307         vchi_service_release(instance->service_handle);
308
309         if (ret != 0)
310                 pr_err("%s: ctx: %p, vchi_bulk_queue_receive failed %d\n",
311                        __func__, msg_context, ret);
312 }
313
314 /* enqueue a bulk receive for a given message context */
315 static int bulk_receive(struct vchiq_mmal_instance *instance,
316                         struct mmal_msg *msg,
317                         struct mmal_msg_context *msg_context)
318 {
319         unsigned long rd_len;
320
321         rd_len = msg->u.buffer_from_host.buffer_header.length;
322
323         if (!msg_context->u.bulk.buffer) {
324                 pr_err("bulk.buffer not configured - error in buffer_from_host\n");
325
326                 /* todo: this is a serious error, we should never have
327                  * committed a buffer_to_host operation to the mmal
328                  * port without the buffer to back it up (underflow
329                  * handling) and there is no obvious way to deal with
330                  * this - how is the mmal servie going to react when
331                  * we fail to do the xfer and reschedule a buffer when
332                  * it arrives? perhaps a starved flag to indicate a
333                  * waiting bulk receive?
334                  */
335
336                 return -EINVAL;
337         }
338
339         /* ensure we do not overrun the available buffer */
340         if (rd_len > msg_context->u.bulk.buffer->buffer_size) {
341                 rd_len = msg_context->u.bulk.buffer->buffer_size;
342                 pr_warn("short read as not enough receive buffer space\n");
343                 /* todo: is this the correct response, what happens to
344                  * the rest of the message data?
345                  */
346         }
347
348         /* store length */
349         msg_context->u.bulk.buffer_used = rd_len;
350         msg_context->u.bulk.dts = msg->u.buffer_from_host.buffer_header.dts;
351         msg_context->u.bulk.pts = msg->u.buffer_from_host.buffer_header.pts;
352
353         queue_work(msg_context->instance->bulk_wq,
354                    &msg_context->u.bulk.buffer_to_host_work);
355
356         return 0;
357 }
358
359 /* data in message, memcpy from packet into output buffer */
360 static int inline_receive(struct vchiq_mmal_instance *instance,
361                           struct mmal_msg *msg,
362                           struct mmal_msg_context *msg_context)
363 {
364         memcpy(msg_context->u.bulk.buffer->buffer,
365                msg->u.buffer_from_host.short_data,
366                msg->u.buffer_from_host.payload_in_message);
367
368         msg_context->u.bulk.buffer_used =
369             msg->u.buffer_from_host.payload_in_message;
370
371         return 0;
372 }
373
374 /* queue the buffer availability with MMAL_MSG_TYPE_BUFFER_FROM_HOST */
375 static int
376 buffer_from_host(struct vchiq_mmal_instance *instance,
377                  struct vchiq_mmal_port *port, struct mmal_buffer *buf)
378 {
379         struct mmal_msg_context *msg_context;
380         struct mmal_msg m;
381         int ret;
382
383         if (!port->enabled)
384                 return -EINVAL;
385
386         pr_debug("instance:%u buffer:%p\n", instance->service_handle, buf);
387
388         /* get context */
389         if (!buf->msg_context) {
390                 pr_err("%s: msg_context not allocated, buf %p\n", __func__,
391                        buf);
392                 return -EINVAL;
393         }
394         msg_context = buf->msg_context;
395
396         /* store bulk message context for when data arrives */
397         msg_context->u.bulk.instance = instance;
398         msg_context->u.bulk.port = port;
399         msg_context->u.bulk.buffer = buf;
400         msg_context->u.bulk.buffer_used = 0;
401
402         /* initialise work structure ready to schedule callback */
403         INIT_WORK(&msg_context->u.bulk.work, buffer_work_cb);
404         INIT_WORK(&msg_context->u.bulk.buffer_to_host_work,
405                   buffer_to_host_work_cb);
406
407         atomic_inc(&port->buffers_with_vpu);
408
409         /* prep the buffer from host message */
410         memset(&m, 0xbc, sizeof(m));    /* just to make debug clearer */
411
412         m.h.type = MMAL_MSG_TYPE_BUFFER_FROM_HOST;
413         m.h.magic = MMAL_MAGIC;
414         m.h.context = msg_context->handle;
415         m.h.status = 0;
416
417         /* drvbuf is our private data passed back */
418         m.u.buffer_from_host.drvbuf.magic = MMAL_MAGIC;
419         m.u.buffer_from_host.drvbuf.component_handle = port->component->handle;
420         m.u.buffer_from_host.drvbuf.port_handle = port->handle;
421         m.u.buffer_from_host.drvbuf.client_context = msg_context->handle;
422
423         /* buffer header */
424         m.u.buffer_from_host.buffer_header.cmd = 0;
425         m.u.buffer_from_host.buffer_header.data =
426                 (u32)(unsigned long)buf->buffer;
427         m.u.buffer_from_host.buffer_header.alloc_size = buf->buffer_size;
428         m.u.buffer_from_host.buffer_header.length = 0;  /* nothing used yet */
429         m.u.buffer_from_host.buffer_header.offset = 0;  /* no offset */
430         m.u.buffer_from_host.buffer_header.flags = 0;   /* no flags */
431         m.u.buffer_from_host.buffer_header.pts = MMAL_TIME_UNKNOWN;
432         m.u.buffer_from_host.buffer_header.dts = MMAL_TIME_UNKNOWN;
433
434         /* clear buffer type sepecific data */
435         memset(&m.u.buffer_from_host.buffer_header_type_specific, 0,
436                sizeof(m.u.buffer_from_host.buffer_header_type_specific));
437
438         /* no payload in message */
439         m.u.buffer_from_host.payload_in_message = 0;
440
441         vchi_service_use(instance->service_handle);
442
443         ret = vchi_queue_kernel_message(instance->service_handle,
444                                         &m,
445                                         sizeof(struct mmal_msg_header) +
446                                         sizeof(m.u.buffer_from_host));
447
448         vchi_service_release(instance->service_handle);
449
450         return ret;
451 }
452
453 /* deals with receipt of buffer to host message */
454 static void buffer_to_host_cb(struct vchiq_mmal_instance *instance,
455                               struct mmal_msg *msg, u32 msg_len)
456 {
457         struct mmal_msg_context *msg_context;
458         u32 handle;
459
460         pr_debug("%s: instance:%p msg:%p msg_len:%d\n",
461                  __func__, instance, msg, msg_len);
462
463         if (msg->u.buffer_from_host.drvbuf.magic == MMAL_MAGIC) {
464                 handle = msg->u.buffer_from_host.drvbuf.client_context;
465                 msg_context = lookup_msg_context(instance, handle);
466
467                 if (!msg_context) {
468                         pr_err("drvbuf.client_context(%u) is invalid\n",
469                                handle);
470                         return;
471                 }
472         } else {
473                 pr_err("MMAL_MSG_TYPE_BUFFER_TO_HOST with bad magic\n");
474                 return;
475         }
476
477         msg_context->u.bulk.mmal_flags =
478                                 msg->u.buffer_from_host.buffer_header.flags;
479
480         if (msg->h.status != MMAL_MSG_STATUS_SUCCESS) {
481                 /* message reception had an error */
482                 pr_warn("error %d in reply\n", msg->h.status);
483
484                 msg_context->u.bulk.status = msg->h.status;
485
486         } else if (msg->u.buffer_from_host.buffer_header.length == 0) {
487                 /* empty buffer */
488                 if (msg->u.buffer_from_host.buffer_header.flags &
489                     MMAL_BUFFER_HEADER_FLAG_EOS) {
490                         msg_context->u.bulk.status =
491                             bulk_receive(instance, msg, msg_context);
492                         if (msg_context->u.bulk.status == 0)
493                                 return; /* successful bulk submission, bulk
494                                          * completion will trigger callback
495                                          */
496                 } else {
497                         /* do callback with empty buffer - not EOS though */
498                         msg_context->u.bulk.status = 0;
499                         msg_context->u.bulk.buffer_used = 0;
500                 }
501         } else if (msg->u.buffer_from_host.payload_in_message == 0) {
502                 /* data is not in message, queue a bulk receive */
503                 msg_context->u.bulk.status =
504                     bulk_receive(instance, msg, msg_context);
505                 if (msg_context->u.bulk.status == 0)
506                         return; /* successful bulk submission, bulk
507                                  * completion will trigger callback
508                                  */
509
510                 /* failed to submit buffer, this will end badly */
511                 pr_err("error %d on bulk submission\n",
512                        msg_context->u.bulk.status);
513
514         } else if (msg->u.buffer_from_host.payload_in_message <=
515                    MMAL_VC_SHORT_DATA) {
516                 /* data payload within message */
517                 msg_context->u.bulk.status = inline_receive(instance, msg,
518                                                             msg_context);
519         } else {
520                 pr_err("message with invalid short payload\n");
521
522                 /* signal error */
523                 msg_context->u.bulk.status = -EINVAL;
524                 msg_context->u.bulk.buffer_used =
525                     msg->u.buffer_from_host.payload_in_message;
526         }
527
528         /* schedule the port callback */
529         schedule_work(&msg_context->u.bulk.work);
530 }
531
532 static void bulk_receive_cb(struct vchiq_mmal_instance *instance,
533                             struct mmal_msg_context *msg_context)
534 {
535         msg_context->u.bulk.status = 0;
536
537         /* schedule the port callback */
538         schedule_work(&msg_context->u.bulk.work);
539 }
540
541 static void bulk_abort_cb(struct vchiq_mmal_instance *instance,
542                           struct mmal_msg_context *msg_context)
543 {
544         pr_err("%s: bulk ABORTED msg_context:%p\n", __func__, msg_context);
545
546         msg_context->u.bulk.status = -EINTR;
547
548         schedule_work(&msg_context->u.bulk.work);
549 }
550
551 /* incoming event service callback */
552 static enum vchiq_status service_callback(enum vchiq_reason reason,
553                                           struct vchiq_header *header,
554                                           unsigned handle, void *bulk_ctx)
555 {
556         struct vchiq_mmal_instance *instance = vchiq_get_service_userdata(handle);
557         u32 msg_len;
558         struct mmal_msg *msg;
559         struct mmal_msg_context *msg_context;
560
561         if (!instance) {
562                 pr_err("Message callback passed NULL instance\n");
563                 return VCHIQ_SUCCESS;
564         }
565
566         switch (reason) {
567         case VCHIQ_MESSAGE_AVAILABLE:
568                 msg = (void *)header->data;
569                 msg_len = header->size;
570
571                 DBG_DUMP_MSG(msg, msg_len, "<<< reply message");
572
573                 /* handling is different for buffer messages */
574                 switch (msg->h.type) {
575                 case MMAL_MSG_TYPE_BUFFER_FROM_HOST:
576                         vchiq_release_message(handle, header);
577                         break;
578
579                 case MMAL_MSG_TYPE_EVENT_TO_HOST:
580                         event_to_host_cb(instance, msg, msg_len);
581                         vchiq_release_message(handle, header);
582
583                         break;
584
585                 case MMAL_MSG_TYPE_BUFFER_TO_HOST:
586                         buffer_to_host_cb(instance, msg, msg_len);
587                         vchiq_release_message(handle, header);
588                         break;
589
590                 default:
591                         /* messages dependent on header context to complete */
592                         if (!msg->h.context) {
593                                 pr_err("received message context was null!\n");
594                                 vchiq_release_message(handle, header);
595                                 break;
596                         }
597
598                         msg_context = lookup_msg_context(instance,
599                                                          msg->h.context);
600                         if (!msg_context) {
601                                 pr_err("received invalid message context %u!\n",
602                                        msg->h.context);
603                                 vchiq_release_message(handle, header);
604                                 break;
605                         }
606
607                         /* fill in context values */
608                         msg_context->u.sync.msg_handle = header;
609                         msg_context->u.sync.msg = msg;
610                         msg_context->u.sync.msg_len = msg_len;
611
612                         /* todo: should this check (completion_done()
613                          * == 1) for no one waiting? or do we need a
614                          * flag to tell us the completion has been
615                          * interrupted so we can free the message and
616                          * its context. This probably also solves the
617                          * message arriving after interruption todo
618                          * below
619                          */
620
621                         /* complete message so caller knows it happened */
622                         complete(&msg_context->u.sync.cmplt);
623                         break;
624                 }
625
626                 break;
627
628         case VCHIQ_BULK_RECEIVE_DONE:
629                 bulk_receive_cb(instance, bulk_ctx);
630                 break;
631
632         case VCHIQ_BULK_RECEIVE_ABORTED:
633                 bulk_abort_cb(instance, bulk_ctx);
634                 break;
635
636         case VCHIQ_SERVICE_CLOSED:
637                 /* TODO: consider if this requires action if received when
638                  * driver is not explicitly closing the service
639                  */
640                 break;
641
642         default:
643                 pr_err("Received unhandled message reason %d\n", reason);
644                 break;
645         }
646
647         return VCHIQ_SUCCESS;
648 }
649
650 static int send_synchronous_mmal_msg(struct vchiq_mmal_instance *instance,
651                                      struct mmal_msg *msg,
652                                      unsigned int payload_len,
653                                      struct mmal_msg **msg_out,
654                                      struct vchiq_header **msg_handle)
655 {
656         struct mmal_msg_context *msg_context;
657         int ret;
658         unsigned long timeout;
659
660         /* payload size must not cause message to exceed max size */
661         if (payload_len >
662             (MMAL_MSG_MAX_SIZE - sizeof(struct mmal_msg_header))) {
663                 pr_err("payload length %d exceeds max:%d\n", payload_len,
664                        (int)(MMAL_MSG_MAX_SIZE -
665                             sizeof(struct mmal_msg_header)));
666                 return -EINVAL;
667         }
668
669         msg_context = get_msg_context(instance);
670         if (IS_ERR(msg_context))
671                 return PTR_ERR(msg_context);
672
673         init_completion(&msg_context->u.sync.cmplt);
674
675         msg->h.magic = MMAL_MAGIC;
676         msg->h.context = msg_context->handle;
677         msg->h.status = 0;
678
679         DBG_DUMP_MSG(msg, (sizeof(struct mmal_msg_header) + payload_len),
680                      ">>> sync message");
681
682         vchi_service_use(instance->service_handle);
683
684         ret = vchi_queue_kernel_message(instance->service_handle,
685                                         msg,
686                                         sizeof(struct mmal_msg_header) +
687                                         payload_len);
688
689         vchi_service_release(instance->service_handle);
690
691         if (ret) {
692                 pr_err("error %d queuing message\n", ret);
693                 release_msg_context(msg_context);
694                 return ret;
695         }
696
697         timeout = wait_for_completion_timeout(&msg_context->u.sync.cmplt,
698                                               SYNC_MSG_TIMEOUT * HZ);
699         if (timeout == 0) {
700                 pr_err("timed out waiting for sync completion\n");
701                 ret = -ETIME;
702                 /* todo: what happens if the message arrives after aborting */
703                 release_msg_context(msg_context);
704                 return ret;
705         }
706
707         *msg_out = msg_context->u.sync.msg;
708         *msg_handle = msg_context->u.sync.msg_handle;
709         release_msg_context(msg_context);
710
711         return 0;
712 }
713
714 static void dump_port_info(struct vchiq_mmal_port *port)
715 {
716         pr_debug("port handle:0x%x enabled:%d\n", port->handle, port->enabled);
717
718         pr_debug("buffer minimum num:%d size:%d align:%d\n",
719                  port->minimum_buffer.num,
720                  port->minimum_buffer.size, port->minimum_buffer.alignment);
721
722         pr_debug("buffer recommended num:%d size:%d align:%d\n",
723                  port->recommended_buffer.num,
724                  port->recommended_buffer.size,
725                  port->recommended_buffer.alignment);
726
727         pr_debug("buffer current values num:%d size:%d align:%d\n",
728                  port->current_buffer.num,
729                  port->current_buffer.size, port->current_buffer.alignment);
730
731         pr_debug("elementary stream: type:%d encoding:0x%x variant:0x%x\n",
732                  port->format.type,
733                  port->format.encoding, port->format.encoding_variant);
734
735         pr_debug("                  bitrate:%d flags:0x%x\n",
736                  port->format.bitrate, port->format.flags);
737
738         if (port->format.type == MMAL_ES_TYPE_VIDEO) {
739                 pr_debug
740                     ("es video format: width:%d height:%d colourspace:0x%x\n",
741                      port->es.video.width, port->es.video.height,
742                      port->es.video.color_space);
743
744                 pr_debug("               : crop xywh %d,%d,%d,%d\n",
745                          port->es.video.crop.x,
746                          port->es.video.crop.y,
747                          port->es.video.crop.width, port->es.video.crop.height);
748                 pr_debug("               : framerate %d/%d  aspect %d/%d\n",
749                          port->es.video.frame_rate.num,
750                          port->es.video.frame_rate.den,
751                          port->es.video.par.num, port->es.video.par.den);
752         }
753 }
754
755 static void port_to_mmal_msg(struct vchiq_mmal_port *port, struct mmal_port *p)
756 {
757         /* todo do readonly fields need setting at all? */
758         p->type = port->type;
759         p->index = port->index;
760         p->index_all = 0;
761         p->is_enabled = port->enabled;
762         p->buffer_num_min = port->minimum_buffer.num;
763         p->buffer_size_min = port->minimum_buffer.size;
764         p->buffer_alignment_min = port->minimum_buffer.alignment;
765         p->buffer_num_recommended = port->recommended_buffer.num;
766         p->buffer_size_recommended = port->recommended_buffer.size;
767
768         /* only three writable fields in a port */
769         p->buffer_num = port->current_buffer.num;
770         p->buffer_size = port->current_buffer.size;
771         p->userdata = (u32)(unsigned long)port;
772 }
773
774 static int port_info_set(struct vchiq_mmal_instance *instance,
775                          struct vchiq_mmal_port *port)
776 {
777         int ret;
778         struct mmal_msg m;
779         struct mmal_msg *rmsg;
780         struct vchiq_header *rmsg_handle;
781
782         pr_debug("setting port info port %p\n", port);
783         if (!port)
784                 return -1;
785         dump_port_info(port);
786
787         m.h.type = MMAL_MSG_TYPE_PORT_INFO_SET;
788
789         m.u.port_info_set.component_handle = port->component->handle;
790         m.u.port_info_set.port_type = port->type;
791         m.u.port_info_set.port_index = port->index;
792
793         port_to_mmal_msg(port, &m.u.port_info_set.port);
794
795         /* elementary stream format setup */
796         m.u.port_info_set.format.type = port->format.type;
797         m.u.port_info_set.format.encoding = port->format.encoding;
798         m.u.port_info_set.format.encoding_variant =
799             port->format.encoding_variant;
800         m.u.port_info_set.format.bitrate = port->format.bitrate;
801         m.u.port_info_set.format.flags = port->format.flags;
802
803         memcpy(&m.u.port_info_set.es, &port->es,
804                sizeof(union mmal_es_specific_format));
805
806         m.u.port_info_set.format.extradata_size = port->format.extradata_size;
807         memcpy(&m.u.port_info_set.extradata, port->format.extradata,
808                port->format.extradata_size);
809
810         ret = send_synchronous_mmal_msg(instance, &m,
811                                         sizeof(m.u.port_info_set),
812                                         &rmsg, &rmsg_handle);
813         if (ret)
814                 return ret;
815
816         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_SET) {
817                 /* got an unexpected message type in reply */
818                 ret = -EINVAL;
819                 goto release_msg;
820         }
821
822         /* return operation status */
823         ret = -rmsg->u.port_info_get_reply.status;
824
825         pr_debug("%s:result:%d component:0x%x port:%d\n", __func__, ret,
826                  port->component->handle, port->handle);
827
828 release_msg:
829         vchiq_release_message(instance->service_handle, rmsg_handle);
830
831         return ret;
832 }
833
834 /* use port info get message to retrieve port information */
835 static int port_info_get(struct vchiq_mmal_instance *instance,
836                          struct vchiq_mmal_port *port)
837 {
838         int ret;
839         struct mmal_msg m;
840         struct mmal_msg *rmsg;
841         struct vchiq_header *rmsg_handle;
842
843         /* port info time */
844         m.h.type = MMAL_MSG_TYPE_PORT_INFO_GET;
845         m.u.port_info_get.component_handle = port->component->handle;
846         m.u.port_info_get.port_type = port->type;
847         m.u.port_info_get.index = port->index;
848
849         ret = send_synchronous_mmal_msg(instance, &m,
850                                         sizeof(m.u.port_info_get),
851                                         &rmsg, &rmsg_handle);
852         if (ret)
853                 return ret;
854
855         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_INFO_GET) {
856                 /* got an unexpected message type in reply */
857                 ret = -EINVAL;
858                 goto release_msg;
859         }
860
861         /* return operation status */
862         ret = -rmsg->u.port_info_get_reply.status;
863         if (ret != MMAL_MSG_STATUS_SUCCESS)
864                 goto release_msg;
865
866         if (rmsg->u.port_info_get_reply.port.is_enabled == 0)
867                 port->enabled = 0;
868         else
869                 port->enabled = 1;
870
871         /* copy the values out of the message */
872         port->handle = rmsg->u.port_info_get_reply.port_handle;
873
874         /* port type and index cached to use on port info set because
875          * it does not use a port handle
876          */
877         port->type = rmsg->u.port_info_get_reply.port_type;
878         port->index = rmsg->u.port_info_get_reply.port_index;
879
880         port->minimum_buffer.num =
881             rmsg->u.port_info_get_reply.port.buffer_num_min;
882         port->minimum_buffer.size =
883             rmsg->u.port_info_get_reply.port.buffer_size_min;
884         port->minimum_buffer.alignment =
885             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
886
887         port->recommended_buffer.alignment =
888             rmsg->u.port_info_get_reply.port.buffer_alignment_min;
889         port->recommended_buffer.num =
890             rmsg->u.port_info_get_reply.port.buffer_num_recommended;
891
892         port->current_buffer.num = rmsg->u.port_info_get_reply.port.buffer_num;
893         port->current_buffer.size =
894             rmsg->u.port_info_get_reply.port.buffer_size;
895
896         /* stream format */
897         port->format.type = rmsg->u.port_info_get_reply.format.type;
898         port->format.encoding = rmsg->u.port_info_get_reply.format.encoding;
899         port->format.encoding_variant =
900             rmsg->u.port_info_get_reply.format.encoding_variant;
901         port->format.bitrate = rmsg->u.port_info_get_reply.format.bitrate;
902         port->format.flags = rmsg->u.port_info_get_reply.format.flags;
903
904         /* elementary stream format */
905         memcpy(&port->es,
906                &rmsg->u.port_info_get_reply.es,
907                sizeof(union mmal_es_specific_format));
908         port->format.es = &port->es;
909
910         port->format.extradata_size =
911             rmsg->u.port_info_get_reply.format.extradata_size;
912         memcpy(port->format.extradata,
913                rmsg->u.port_info_get_reply.extradata,
914                port->format.extradata_size);
915
916         pr_debug("received port info\n");
917         dump_port_info(port);
918
919 release_msg:
920
921         pr_debug("%s:result:%d component:0x%x port:%d\n",
922                  __func__, ret, port->component->handle, port->handle);
923
924         vchiq_release_message(instance->service_handle, rmsg_handle);
925
926         return ret;
927 }
928
929 /* create comonent on vc */
930 static int create_component(struct vchiq_mmal_instance *instance,
931                             struct vchiq_mmal_component *component,
932                             const char *name)
933 {
934         int ret;
935         struct mmal_msg m;
936         struct mmal_msg *rmsg;
937         struct vchiq_header *rmsg_handle;
938
939         /* build component create message */
940         m.h.type = MMAL_MSG_TYPE_COMPONENT_CREATE;
941         m.u.component_create.client_component = component->client_component;
942         strncpy(m.u.component_create.name, name,
943                 sizeof(m.u.component_create.name));
944
945         ret = send_synchronous_mmal_msg(instance, &m,
946                                         sizeof(m.u.component_create),
947                                         &rmsg, &rmsg_handle);
948         if (ret)
949                 return ret;
950
951         if (rmsg->h.type != m.h.type) {
952                 /* got an unexpected message type in reply */
953                 ret = -EINVAL;
954                 goto release_msg;
955         }
956
957         ret = -rmsg->u.component_create_reply.status;
958         if (ret != MMAL_MSG_STATUS_SUCCESS)
959                 goto release_msg;
960
961         /* a valid component response received */
962         component->handle = rmsg->u.component_create_reply.component_handle;
963         component->inputs = rmsg->u.component_create_reply.input_num;
964         component->outputs = rmsg->u.component_create_reply.output_num;
965         component->clocks = rmsg->u.component_create_reply.clock_num;
966
967         pr_debug("Component handle:0x%x in:%d out:%d clock:%d\n",
968                  component->handle,
969                  component->inputs, component->outputs, component->clocks);
970
971 release_msg:
972         vchiq_release_message(instance->service_handle, rmsg_handle);
973
974         return ret;
975 }
976
977 /* destroys a component on vc */
978 static int destroy_component(struct vchiq_mmal_instance *instance,
979                              struct vchiq_mmal_component *component)
980 {
981         int ret;
982         struct mmal_msg m;
983         struct mmal_msg *rmsg;
984         struct vchiq_header *rmsg_handle;
985
986         m.h.type = MMAL_MSG_TYPE_COMPONENT_DESTROY;
987         m.u.component_destroy.component_handle = component->handle;
988
989         ret = send_synchronous_mmal_msg(instance, &m,
990                                         sizeof(m.u.component_destroy),
991                                         &rmsg, &rmsg_handle);
992         if (ret)
993                 return ret;
994
995         if (rmsg->h.type != m.h.type) {
996                 /* got an unexpected message type in reply */
997                 ret = -EINVAL;
998                 goto release_msg;
999         }
1000
1001         ret = -rmsg->u.component_destroy_reply.status;
1002
1003 release_msg:
1004
1005         vchiq_release_message(instance->service_handle, rmsg_handle);
1006
1007         return ret;
1008 }
1009
1010 /* enable a component on vc */
1011 static int enable_component(struct vchiq_mmal_instance *instance,
1012                             struct vchiq_mmal_component *component)
1013 {
1014         int ret;
1015         struct mmal_msg m;
1016         struct mmal_msg *rmsg;
1017         struct vchiq_header *rmsg_handle;
1018
1019         m.h.type = MMAL_MSG_TYPE_COMPONENT_ENABLE;
1020         m.u.component_enable.component_handle = component->handle;
1021
1022         ret = send_synchronous_mmal_msg(instance, &m,
1023                                         sizeof(m.u.component_enable),
1024                                         &rmsg, &rmsg_handle);
1025         if (ret)
1026                 return ret;
1027
1028         if (rmsg->h.type != m.h.type) {
1029                 /* got an unexpected message type in reply */
1030                 ret = -EINVAL;
1031                 goto release_msg;
1032         }
1033
1034         ret = -rmsg->u.component_enable_reply.status;
1035
1036 release_msg:
1037         vchiq_release_message(instance->service_handle, rmsg_handle);
1038
1039         return ret;
1040 }
1041
1042 /* disable a component on vc */
1043 static int disable_component(struct vchiq_mmal_instance *instance,
1044                              struct vchiq_mmal_component *component)
1045 {
1046         int ret;
1047         struct mmal_msg m;
1048         struct mmal_msg *rmsg;
1049         struct vchiq_header *rmsg_handle;
1050
1051         m.h.type = MMAL_MSG_TYPE_COMPONENT_DISABLE;
1052         m.u.component_disable.component_handle = component->handle;
1053
1054         ret = send_synchronous_mmal_msg(instance, &m,
1055                                         sizeof(m.u.component_disable),
1056                                         &rmsg, &rmsg_handle);
1057         if (ret)
1058                 return ret;
1059
1060         if (rmsg->h.type != m.h.type) {
1061                 /* got an unexpected message type in reply */
1062                 ret = -EINVAL;
1063                 goto release_msg;
1064         }
1065
1066         ret = -rmsg->u.component_disable_reply.status;
1067
1068 release_msg:
1069
1070         vchiq_release_message(instance->service_handle, rmsg_handle);
1071
1072         return ret;
1073 }
1074
1075 /* get version of mmal implementation */
1076 static int get_version(struct vchiq_mmal_instance *instance,
1077                        u32 *major_out, u32 *minor_out)
1078 {
1079         int ret;
1080         struct mmal_msg m;
1081         struct mmal_msg *rmsg;
1082         struct vchiq_header *rmsg_handle;
1083
1084         m.h.type = MMAL_MSG_TYPE_GET_VERSION;
1085
1086         ret = send_synchronous_mmal_msg(instance, &m,
1087                                         sizeof(m.u.version),
1088                                         &rmsg, &rmsg_handle);
1089         if (ret)
1090                 return ret;
1091
1092         if (rmsg->h.type != m.h.type) {
1093                 /* got an unexpected message type in reply */
1094                 ret = -EINVAL;
1095                 goto release_msg;
1096         }
1097
1098         *major_out = rmsg->u.version.major;
1099         *minor_out = rmsg->u.version.minor;
1100
1101 release_msg:
1102         vchiq_release_message(instance->service_handle, rmsg_handle);
1103
1104         return ret;
1105 }
1106
1107 /* do a port action with a port as a parameter */
1108 static int port_action_port(struct vchiq_mmal_instance *instance,
1109                             struct vchiq_mmal_port *port,
1110                             enum mmal_msg_port_action_type action_type)
1111 {
1112         int ret;
1113         struct mmal_msg m;
1114         struct mmal_msg *rmsg;
1115         struct vchiq_header *rmsg_handle;
1116
1117         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1118         m.u.port_action_port.component_handle = port->component->handle;
1119         m.u.port_action_port.port_handle = port->handle;
1120         m.u.port_action_port.action = action_type;
1121
1122         port_to_mmal_msg(port, &m.u.port_action_port.port);
1123
1124         ret = send_synchronous_mmal_msg(instance, &m,
1125                                         sizeof(m.u.port_action_port),
1126                                         &rmsg, &rmsg_handle);
1127         if (ret)
1128                 return ret;
1129
1130         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1131                 /* got an unexpected message type in reply */
1132                 ret = -EINVAL;
1133                 goto release_msg;
1134         }
1135
1136         ret = -rmsg->u.port_action_reply.status;
1137
1138         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d)\n",
1139                  __func__,
1140                  ret, port->component->handle, port->handle,
1141                  port_action_type_names[action_type], action_type);
1142
1143 release_msg:
1144         vchiq_release_message(instance->service_handle, rmsg_handle);
1145
1146         return ret;
1147 }
1148
1149 /* do a port action with handles as parameters */
1150 static int port_action_handle(struct vchiq_mmal_instance *instance,
1151                               struct vchiq_mmal_port *port,
1152                               enum mmal_msg_port_action_type action_type,
1153                               u32 connect_component_handle,
1154                               u32 connect_port_handle)
1155 {
1156         int ret;
1157         struct mmal_msg m;
1158         struct mmal_msg *rmsg;
1159         struct vchiq_header *rmsg_handle;
1160
1161         m.h.type = MMAL_MSG_TYPE_PORT_ACTION;
1162
1163         m.u.port_action_handle.component_handle = port->component->handle;
1164         m.u.port_action_handle.port_handle = port->handle;
1165         m.u.port_action_handle.action = action_type;
1166
1167         m.u.port_action_handle.connect_component_handle =
1168             connect_component_handle;
1169         m.u.port_action_handle.connect_port_handle = connect_port_handle;
1170
1171         ret = send_synchronous_mmal_msg(instance, &m,
1172                                         sizeof(m.u.port_action_handle),
1173                                         &rmsg, &rmsg_handle);
1174         if (ret)
1175                 return ret;
1176
1177         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_ACTION) {
1178                 /* got an unexpected message type in reply */
1179                 ret = -EINVAL;
1180                 goto release_msg;
1181         }
1182
1183         ret = -rmsg->u.port_action_reply.status;
1184
1185         pr_debug("%s:result:%d component:0x%x port:%d action:%s(%d) connect component:0x%x connect port:%d\n",
1186                  __func__,
1187                  ret, port->component->handle, port->handle,
1188                  port_action_type_names[action_type],
1189                  action_type, connect_component_handle, connect_port_handle);
1190
1191 release_msg:
1192         vchiq_release_message(instance->service_handle, rmsg_handle);
1193
1194         return ret;
1195 }
1196
1197 static int port_parameter_set(struct vchiq_mmal_instance *instance,
1198                               struct vchiq_mmal_port *port,
1199                               u32 parameter_id, void *value, u32 value_size)
1200 {
1201         int ret;
1202         struct mmal_msg m;
1203         struct mmal_msg *rmsg;
1204         struct vchiq_header *rmsg_handle;
1205
1206         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_SET;
1207
1208         m.u.port_parameter_set.component_handle = port->component->handle;
1209         m.u.port_parameter_set.port_handle = port->handle;
1210         m.u.port_parameter_set.id = parameter_id;
1211         m.u.port_parameter_set.size = (2 * sizeof(u32)) + value_size;
1212         memcpy(&m.u.port_parameter_set.value, value, value_size);
1213
1214         ret = send_synchronous_mmal_msg(instance, &m,
1215                                         (4 * sizeof(u32)) + value_size,
1216                                         &rmsg, &rmsg_handle);
1217         if (ret)
1218                 return ret;
1219
1220         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_SET) {
1221                 /* got an unexpected message type in reply */
1222                 ret = -EINVAL;
1223                 goto release_msg;
1224         }
1225
1226         ret = -rmsg->u.port_parameter_set_reply.status;
1227
1228         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n",
1229                  __func__,
1230                  ret, port->component->handle, port->handle, parameter_id);
1231
1232 release_msg:
1233         vchiq_release_message(instance->service_handle, rmsg_handle);
1234
1235         return ret;
1236 }
1237
1238 static int port_parameter_get(struct vchiq_mmal_instance *instance,
1239                               struct vchiq_mmal_port *port,
1240                               u32 parameter_id, void *value, u32 *value_size)
1241 {
1242         int ret;
1243         struct mmal_msg m;
1244         struct mmal_msg *rmsg;
1245         struct vchiq_header *rmsg_handle;
1246
1247         m.h.type = MMAL_MSG_TYPE_PORT_PARAMETER_GET;
1248
1249         m.u.port_parameter_get.component_handle = port->component->handle;
1250         m.u.port_parameter_get.port_handle = port->handle;
1251         m.u.port_parameter_get.id = parameter_id;
1252         m.u.port_parameter_get.size = (2 * sizeof(u32)) + *value_size;
1253
1254         ret = send_synchronous_mmal_msg(instance, &m,
1255                                         sizeof(struct
1256                                                mmal_msg_port_parameter_get),
1257                                         &rmsg, &rmsg_handle);
1258         if (ret)
1259                 return ret;
1260
1261         if (rmsg->h.type != MMAL_MSG_TYPE_PORT_PARAMETER_GET) {
1262                 /* got an unexpected message type in reply */
1263                 pr_err("Incorrect reply type %d\n", rmsg->h.type);
1264                 ret = -EINVAL;
1265                 goto release_msg;
1266         }
1267
1268         ret = rmsg->u.port_parameter_get_reply.status;
1269
1270         /* port_parameter_get_reply.size includes the header,
1271          * whilst *value_size doesn't.
1272          */
1273         rmsg->u.port_parameter_get_reply.size -= (2 * sizeof(u32));
1274
1275         if (ret || rmsg->u.port_parameter_get_reply.size > *value_size) {
1276                 /* Copy only as much as we have space for
1277                  * but report true size of parameter
1278                  */
1279                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1280                        *value_size);
1281         } else {
1282                 memcpy(value, &rmsg->u.port_parameter_get_reply.value,
1283                        rmsg->u.port_parameter_get_reply.size);
1284         }
1285         /* Always report the size of the returned parameter to the caller */
1286         *value_size = rmsg->u.port_parameter_get_reply.size;
1287
1288         pr_debug("%s:result:%d component:0x%x port:%d parameter:%d\n", __func__,
1289                  ret, port->component->handle, port->handle, parameter_id);
1290
1291 release_msg:
1292         vchiq_release_message(instance->service_handle, rmsg_handle);
1293
1294         return ret;
1295 }
1296
1297 /* disables a port and drains buffers from it */
1298 static int port_disable(struct vchiq_mmal_instance *instance,
1299                         struct vchiq_mmal_port *port)
1300 {
1301         int ret;
1302         struct list_head *q, *buf_head;
1303         unsigned long flags = 0;
1304
1305         if (!port->enabled)
1306                 return 0;
1307
1308         port->enabled = 0;
1309
1310         ret = port_action_port(instance, port,
1311                                MMAL_MSG_PORT_ACTION_TYPE_DISABLE);
1312         if (ret == 0) {
1313                 /*
1314                  * Drain all queued buffers on port. This should only
1315                  * apply to buffers that have been queued before the port
1316                  * has been enabled. If the port has been enabled and buffers
1317                  * passed, then the buffers should have been removed from this
1318                  * list, and we should get the relevant callbacks via VCHIQ
1319                  * to release the buffers.
1320                  */
1321                 spin_lock_irqsave(&port->slock, flags);
1322
1323                 list_for_each_safe(buf_head, q, &port->buffers) {
1324                         struct mmal_buffer *mmalbuf;
1325
1326                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1327                                              list);
1328                         list_del(buf_head);
1329                         if (port->buffer_cb) {
1330                                 mmalbuf->length = 0;
1331                                 mmalbuf->mmal_flags = 0;
1332                                 mmalbuf->dts = MMAL_TIME_UNKNOWN;
1333                                 mmalbuf->pts = MMAL_TIME_UNKNOWN;
1334                                 port->buffer_cb(instance,
1335                                                 port, 0, mmalbuf);
1336                         }
1337                 }
1338
1339                 spin_unlock_irqrestore(&port->slock, flags);
1340
1341                 ret = port_info_get(instance, port);
1342         }
1343
1344         return ret;
1345 }
1346
1347 /* enable a port */
1348 static int port_enable(struct vchiq_mmal_instance *instance,
1349                        struct vchiq_mmal_port *port)
1350 {
1351         unsigned int hdr_count;
1352         struct list_head *q, *buf_head;
1353         int ret;
1354
1355         if (port->enabled)
1356                 return 0;
1357
1358         ret = port_action_port(instance, port,
1359                                MMAL_MSG_PORT_ACTION_TYPE_ENABLE);
1360         if (ret)
1361                 goto done;
1362
1363         port->enabled = 1;
1364
1365         if (port->buffer_cb) {
1366                 /* send buffer headers to videocore */
1367                 hdr_count = 1;
1368                 list_for_each_safe(buf_head, q, &port->buffers) {
1369                         struct mmal_buffer *mmalbuf;
1370
1371                         mmalbuf = list_entry(buf_head, struct mmal_buffer,
1372                                              list);
1373                         ret = buffer_from_host(instance, port, mmalbuf);
1374                         if (ret)
1375                                 goto done;
1376
1377                         list_del(buf_head);
1378                         hdr_count++;
1379                         if (hdr_count > port->current_buffer.num)
1380                                 break;
1381                 }
1382         }
1383
1384         ret = port_info_get(instance, port);
1385
1386 done:
1387         return ret;
1388 }
1389
1390 /* ------------------------------------------------------------------
1391  * Exported API
1392  *------------------------------------------------------------------
1393  */
1394
1395 int vchiq_mmal_port_set_format(struct vchiq_mmal_instance *instance,
1396                                struct vchiq_mmal_port *port)
1397 {
1398         int ret;
1399
1400         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1401                 return -EINTR;
1402
1403         ret = port_info_set(instance, port);
1404         if (ret)
1405                 goto release_unlock;
1406
1407         /* read what has actually been set */
1408         ret = port_info_get(instance, port);
1409
1410 release_unlock:
1411         mutex_unlock(&instance->vchiq_mutex);
1412
1413         return ret;
1414 }
1415 EXPORT_SYMBOL_GPL(vchiq_mmal_port_set_format);
1416
1417 int vchiq_mmal_port_parameter_set(struct vchiq_mmal_instance *instance,
1418                                   struct vchiq_mmal_port *port,
1419                                   u32 parameter, void *value, u32 value_size)
1420 {
1421         int ret;
1422
1423         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1424                 return -EINTR;
1425
1426         ret = port_parameter_set(instance, port, parameter, value, value_size);
1427
1428         mutex_unlock(&instance->vchiq_mutex);
1429
1430         return ret;
1431 }
1432 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_set);
1433
1434 int vchiq_mmal_port_parameter_get(struct vchiq_mmal_instance *instance,
1435                                   struct vchiq_mmal_port *port,
1436                                   u32 parameter, void *value, u32 *value_size)
1437 {
1438         int ret;
1439
1440         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1441                 return -EINTR;
1442
1443         ret = port_parameter_get(instance, port, parameter, value, value_size);
1444
1445         mutex_unlock(&instance->vchiq_mutex);
1446
1447         return ret;
1448 }
1449 EXPORT_SYMBOL_GPL(vchiq_mmal_port_parameter_get);
1450
1451 /* enable a port
1452  *
1453  * enables a port and queues buffers for satisfying callbacks if we
1454  * provide a callback handler
1455  */
1456 int vchiq_mmal_port_enable(struct vchiq_mmal_instance *instance,
1457                            struct vchiq_mmal_port *port,
1458                            vchiq_mmal_buffer_cb buffer_cb)
1459 {
1460         int ret;
1461
1462         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1463                 return -EINTR;
1464
1465         /* already enabled - noop */
1466         if (port->enabled) {
1467                 ret = 0;
1468                 goto unlock;
1469         }
1470
1471         port->buffer_cb = buffer_cb;
1472
1473         ret = port_enable(instance, port);
1474
1475 unlock:
1476         mutex_unlock(&instance->vchiq_mutex);
1477
1478         return ret;
1479 }
1480 EXPORT_SYMBOL_GPL(vchiq_mmal_port_enable);
1481
1482 int vchiq_mmal_port_disable(struct vchiq_mmal_instance *instance,
1483                             struct vchiq_mmal_port *port)
1484 {
1485         int ret;
1486
1487         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1488                 return -EINTR;
1489
1490         if (!port->enabled) {
1491                 mutex_unlock(&instance->vchiq_mutex);
1492                 return 0;
1493         }
1494
1495         ret = port_disable(instance, port);
1496
1497         mutex_unlock(&instance->vchiq_mutex);
1498
1499         return ret;
1500 }
1501 EXPORT_SYMBOL_GPL(vchiq_mmal_port_disable);
1502
1503 /* ports will be connected in a tunneled manner so data buffers
1504  * are not handled by client.
1505  */
1506 int vchiq_mmal_port_connect_tunnel(struct vchiq_mmal_instance *instance,
1507                                    struct vchiq_mmal_port *src,
1508                                    struct vchiq_mmal_port *dst)
1509 {
1510         int ret;
1511
1512         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1513                 return -EINTR;
1514
1515         /* disconnect ports if connected */
1516         if (src->connected) {
1517                 ret = port_disable(instance, src);
1518                 if (ret) {
1519                         pr_err("failed disabling src port(%d)\n", ret);
1520                         goto release_unlock;
1521                 }
1522
1523                 /* do not need to disable the destination port as they
1524                  * are connected and it is done automatically
1525                  */
1526
1527                 ret = port_action_handle(instance, src,
1528                                          MMAL_MSG_PORT_ACTION_TYPE_DISCONNECT,
1529                                          src->connected->component->handle,
1530                                          src->connected->handle);
1531                 if (ret < 0) {
1532                         pr_err("failed disconnecting src port\n");
1533                         goto release_unlock;
1534                 }
1535                 src->connected->enabled = 0;
1536                 src->connected = NULL;
1537         }
1538
1539         if (!dst) {
1540                 /* do not make new connection */
1541                 ret = 0;
1542                 pr_debug("not making new connection\n");
1543                 goto release_unlock;
1544         }
1545
1546         /* copy src port format to dst */
1547         dst->format.encoding = src->format.encoding;
1548         dst->es.video.width = src->es.video.width;
1549         dst->es.video.height = src->es.video.height;
1550         dst->es.video.crop.x = src->es.video.crop.x;
1551         dst->es.video.crop.y = src->es.video.crop.y;
1552         dst->es.video.crop.width = src->es.video.crop.width;
1553         dst->es.video.crop.height = src->es.video.crop.height;
1554         dst->es.video.frame_rate.num = src->es.video.frame_rate.num;
1555         dst->es.video.frame_rate.den = src->es.video.frame_rate.den;
1556
1557         /* set new format */
1558         ret = port_info_set(instance, dst);
1559         if (ret) {
1560                 pr_debug("setting port info failed\n");
1561                 goto release_unlock;
1562         }
1563
1564         /* read what has actually been set */
1565         ret = port_info_get(instance, dst);
1566         if (ret) {
1567                 pr_debug("read back port info failed\n");
1568                 goto release_unlock;
1569         }
1570
1571         /* connect two ports together */
1572         ret = port_action_handle(instance, src,
1573                                  MMAL_MSG_PORT_ACTION_TYPE_CONNECT,
1574                                  dst->component->handle, dst->handle);
1575         if (ret < 0) {
1576                 pr_debug("connecting port %d:%d to %d:%d failed\n",
1577                          src->component->handle, src->handle,
1578                          dst->component->handle, dst->handle);
1579                 goto release_unlock;
1580         }
1581         src->connected = dst;
1582
1583 release_unlock:
1584
1585         mutex_unlock(&instance->vchiq_mutex);
1586
1587         return ret;
1588 }
1589 EXPORT_SYMBOL_GPL(vchiq_mmal_port_connect_tunnel);
1590
1591 int vchiq_mmal_submit_buffer(struct vchiq_mmal_instance *instance,
1592                              struct vchiq_mmal_port *port,
1593                              struct mmal_buffer *buffer)
1594 {
1595         unsigned long flags = 0;
1596         int ret;
1597
1598         ret = buffer_from_host(instance, port, buffer);
1599         if (ret == -EINVAL) {
1600                 /* Port is disabled. Queue for when it is enabled. */
1601                 spin_lock_irqsave(&port->slock, flags);
1602                 list_add_tail(&buffer->list, &port->buffers);
1603                 spin_unlock_irqrestore(&port->slock, flags);
1604         }
1605
1606         return 0;
1607 }
1608 EXPORT_SYMBOL_GPL(vchiq_mmal_submit_buffer);
1609
1610 int mmal_vchi_buffer_init(struct vchiq_mmal_instance *instance,
1611                           struct mmal_buffer *buf)
1612 {
1613         struct mmal_msg_context *msg_context = get_msg_context(instance);
1614
1615         if (IS_ERR(msg_context))
1616                 return (PTR_ERR(msg_context));
1617
1618         buf->msg_context = msg_context;
1619         return 0;
1620 }
1621 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_init);
1622
1623 int mmal_vchi_buffer_cleanup(struct mmal_buffer *buf)
1624 {
1625         struct mmal_msg_context *msg_context = buf->msg_context;
1626
1627         if (msg_context)
1628                 release_msg_context(msg_context);
1629         buf->msg_context = NULL;
1630
1631         return 0;
1632 }
1633 EXPORT_SYMBOL_GPL(mmal_vchi_buffer_cleanup);
1634
1635 /* Initialise a mmal component and its ports
1636  *
1637  */
1638 int vchiq_mmal_component_init(struct vchiq_mmal_instance *instance,
1639                               const char *name,
1640                               struct vchiq_mmal_component **component_out)
1641 {
1642         int ret;
1643         int idx;                /* port index */
1644         struct vchiq_mmal_component *component = NULL;
1645
1646         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1647                 return -EINTR;
1648
1649         for (idx = 0; idx < VCHIQ_MMAL_MAX_COMPONENTS; idx++) {
1650                 if (!instance->component[idx].in_use) {
1651                         component = &instance->component[idx];
1652                         component->in_use = 1;
1653                         break;
1654                 }
1655         }
1656
1657         if (!component) {
1658                 ret = -EINVAL;  /* todo is this correct error? */
1659                 goto unlock;
1660         }
1661
1662         /* We need a handle to reference back to our component structure.
1663          * Use the array index in instance->component rather than rolling
1664          * another IDR.
1665          */
1666         component->client_component = idx;
1667
1668         ret = create_component(instance, component, name);
1669         if (ret < 0) {
1670                 pr_err("%s: failed to create component %d (Not enough GPU mem?)\n",
1671                        __func__, ret);
1672                 goto unlock;
1673         }
1674
1675         /* ports info needs gathering */
1676         component->control.type = MMAL_PORT_TYPE_CONTROL;
1677         component->control.index = 0;
1678         component->control.component = component;
1679         spin_lock_init(&component->control.slock);
1680         INIT_LIST_HEAD(&component->control.buffers);
1681         ret = port_info_get(instance, &component->control);
1682         if (ret < 0)
1683                 goto release_component;
1684
1685         for (idx = 0; idx < component->inputs; idx++) {
1686                 component->input[idx].type = MMAL_PORT_TYPE_INPUT;
1687                 component->input[idx].index = idx;
1688                 component->input[idx].component = component;
1689                 spin_lock_init(&component->input[idx].slock);
1690                 INIT_LIST_HEAD(&component->input[idx].buffers);
1691                 ret = port_info_get(instance, &component->input[idx]);
1692                 if (ret < 0)
1693                         goto release_component;
1694         }
1695
1696         for (idx = 0; idx < component->outputs; idx++) {
1697                 component->output[idx].type = MMAL_PORT_TYPE_OUTPUT;
1698                 component->output[idx].index = idx;
1699                 component->output[idx].component = component;
1700                 spin_lock_init(&component->output[idx].slock);
1701                 INIT_LIST_HEAD(&component->output[idx].buffers);
1702                 ret = port_info_get(instance, &component->output[idx]);
1703                 if (ret < 0)
1704                         goto release_component;
1705         }
1706
1707         for (idx = 0; idx < component->clocks; idx++) {
1708                 component->clock[idx].type = MMAL_PORT_TYPE_CLOCK;
1709                 component->clock[idx].index = idx;
1710                 component->clock[idx].component = component;
1711                 spin_lock_init(&component->clock[idx].slock);
1712                 INIT_LIST_HEAD(&component->clock[idx].buffers);
1713                 ret = port_info_get(instance, &component->clock[idx]);
1714                 if (ret < 0)
1715                         goto release_component;
1716         }
1717
1718         *component_out = component;
1719
1720         mutex_unlock(&instance->vchiq_mutex);
1721
1722         return 0;
1723
1724 release_component:
1725         destroy_component(instance, component);
1726 unlock:
1727         if (component)
1728                 component->in_use = 0;
1729         mutex_unlock(&instance->vchiq_mutex);
1730
1731         return ret;
1732 }
1733 EXPORT_SYMBOL_GPL(vchiq_mmal_component_init);
1734
1735 /*
1736  * cause a mmal component to be destroyed
1737  */
1738 int vchiq_mmal_component_finalise(struct vchiq_mmal_instance *instance,
1739                                   struct vchiq_mmal_component *component)
1740 {
1741         int ret;
1742
1743         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1744                 return -EINTR;
1745
1746         if (component->enabled)
1747                 ret = disable_component(instance, component);
1748
1749         ret = destroy_component(instance, component);
1750
1751         component->in_use = 0;
1752
1753         mutex_unlock(&instance->vchiq_mutex);
1754
1755         return ret;
1756 }
1757 EXPORT_SYMBOL_GPL(vchiq_mmal_component_finalise);
1758
1759 /*
1760  * cause a mmal component to be enabled
1761  */
1762 int vchiq_mmal_component_enable(struct vchiq_mmal_instance *instance,
1763                                 struct vchiq_mmal_component *component)
1764 {
1765         int ret;
1766
1767         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1768                 return -EINTR;
1769
1770         if (component->enabled) {
1771                 mutex_unlock(&instance->vchiq_mutex);
1772                 return 0;
1773         }
1774
1775         ret = enable_component(instance, component);
1776         if (ret == 0)
1777                 component->enabled = true;
1778
1779         mutex_unlock(&instance->vchiq_mutex);
1780
1781         return ret;
1782 }
1783 EXPORT_SYMBOL_GPL(vchiq_mmal_component_enable);
1784
1785 /*
1786  * cause a mmal component to be enabled
1787  */
1788 int vchiq_mmal_component_disable(struct vchiq_mmal_instance *instance,
1789                                  struct vchiq_mmal_component *component)
1790 {
1791         int ret;
1792
1793         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1794                 return -EINTR;
1795
1796         if (!component->enabled) {
1797                 mutex_unlock(&instance->vchiq_mutex);
1798                 return 0;
1799         }
1800
1801         ret = disable_component(instance, component);
1802         if (ret == 0)
1803                 component->enabled = 0;
1804
1805         mutex_unlock(&instance->vchiq_mutex);
1806
1807         return ret;
1808 }
1809 EXPORT_SYMBOL_GPL(vchiq_mmal_component_disable);
1810
1811 int vchiq_mmal_version(struct vchiq_mmal_instance *instance,
1812                        u32 *major_out, u32 *minor_out)
1813 {
1814         int ret;
1815
1816         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1817                 return -EINTR;
1818
1819         ret = get_version(instance, major_out, minor_out);
1820
1821         mutex_unlock(&instance->vchiq_mutex);
1822
1823         return ret;
1824 }
1825 EXPORT_SYMBOL_GPL(vchiq_mmal_version);
1826
1827 int vchiq_mmal_finalise(struct vchiq_mmal_instance *instance)
1828 {
1829         int status = 0;
1830
1831         if (!instance)
1832                 return -EINVAL;
1833
1834         if (mutex_lock_interruptible(&instance->vchiq_mutex))
1835                 return -EINTR;
1836
1837         vchi_service_use(instance->service_handle);
1838
1839         status = vchi_service_close(instance->service_handle);
1840         if (status != 0)
1841                 pr_err("mmal-vchiq: VCHIQ close failed\n");
1842
1843         mutex_unlock(&instance->vchiq_mutex);
1844
1845         flush_workqueue(instance->bulk_wq);
1846         destroy_workqueue(instance->bulk_wq);
1847
1848         vfree(instance->bulk_scratch);
1849
1850         idr_destroy(&instance->context_map);
1851
1852         kfree(instance);
1853
1854         return status;
1855 }
1856 EXPORT_SYMBOL_GPL(vchiq_mmal_finalise);
1857
1858 int vchiq_mmal_init(struct vchiq_mmal_instance **out_instance)
1859 {
1860         int status;
1861         struct vchiq_mmal_instance *instance;
1862         static struct vchiq_instance *vchiq_instance;
1863         struct vchiq_service_params params = {
1864                 .version                = VC_MMAL_VER,
1865                 .version_min            = VC_MMAL_MIN_VER,
1866                 .fourcc                 = VC_MMAL_SERVER_NAME,
1867                 .callback               = service_callback,
1868                 .userdata               = NULL,
1869         };
1870
1871         /* compile time checks to ensure structure size as they are
1872          * directly (de)serialised from memory.
1873          */
1874
1875         /* ensure the header structure has packed to the correct size */
1876         BUILD_BUG_ON(sizeof(struct mmal_msg_header) != 24);
1877
1878         /* ensure message structure does not exceed maximum length */
1879         BUILD_BUG_ON(sizeof(struct mmal_msg) > MMAL_MSG_MAX_SIZE);
1880
1881         /* mmal port struct is correct size */
1882         BUILD_BUG_ON(sizeof(struct mmal_port) != 64);
1883
1884         /* create a vchi instance */
1885         status = vchi_initialise(&vchiq_instance);
1886         if (status) {
1887                 pr_err("Failed to initialise VCHI instance (status=%d)\n",
1888                        status);
1889                 return -EIO;
1890         }
1891
1892         status = vchi_connect(vchiq_instance);
1893         if (status) {
1894                 pr_err("Failed to connect VCHI instance (status=%d)\n", status);
1895                 return -EIO;
1896         }
1897
1898         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
1899
1900         if (!instance)
1901                 return -ENOMEM;
1902
1903         mutex_init(&instance->vchiq_mutex);
1904
1905         instance->bulk_scratch = vmalloc(PAGE_SIZE);
1906
1907         mutex_init(&instance->context_map_lock);
1908         idr_init_base(&instance->context_map, 1);
1909
1910         params.userdata = instance;
1911
1912         instance->bulk_wq = alloc_ordered_workqueue("mmal-vchiq",
1913                                                     WQ_MEM_RECLAIM);
1914         if (!instance->bulk_wq)
1915                 goto err_free;
1916
1917         status = vchi_service_open(vchiq_instance, &params,
1918                                    &instance->service_handle);
1919         if (status) {
1920                 pr_err("Failed to open VCHI service connection (status=%d)\n",
1921                        status);
1922                 goto err_close_services;
1923         }
1924
1925         vchi_service_release(instance->service_handle);
1926
1927         *out_instance = instance;
1928
1929         return 0;
1930
1931 err_close_services:
1932         vchi_service_close(instance->service_handle);
1933         destroy_workqueue(instance->bulk_wq);
1934 err_free:
1935         vfree(instance->bulk_scratch);
1936         kfree(instance);
1937         return -ENODEV;
1938 }
1939 EXPORT_SYMBOL_GPL(vchiq_mmal_init);
1940
1941 MODULE_DESCRIPTION("BCM2835 MMAL VCHIQ interface");
1942 MODULE_AUTHOR("Dave Stevenson, <dave.stevenson@raspberrypi.org>");
1943 MODULE_LICENSE("GPL");