Merge branches 'clk-range', 'clk-uniphier', 'clk-apple' and 'clk-qcom' into clk-next
[linux-2.6-microblaze.git] / drivers / staging / vc04_services / interface / vchiq_arm / vchiq_arm.c
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4  * Copyright (c) 2010-2012 Broadcom. All rights reserved.
5  */
6
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
13 #include <linux/fs.h>
14 #include <linux/device.h>
15 #include <linux/mm.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
21 #include <linux/of.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
29 #include <linux/io.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
32
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
39
40 #define DEVICE_NAME "vchiq"
41
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
43
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
45
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX  1
48
49 #define BELL0   0x00
50 #define BELL2   0x08
51
52 #define ARM_DS_ACTIVE   BIT(2)
53
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
57
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
60
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
64
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
67
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
70
71 struct vchiq_drvdata {
72         const unsigned int cache_line_size;
73         struct rpi_firmware *fw;
74 };
75
76 static struct vchiq_drvdata bcm2835_drvdata = {
77         .cache_line_size = 32,
78 };
79
80 static struct vchiq_drvdata bcm2836_drvdata = {
81         .cache_line_size = 64,
82 };
83
84 struct vchiq_arm_state {
85         /* Keepalive-related data */
86         struct task_struct *ka_thread;
87         struct completion ka_evt;
88         atomic_t ka_use_count;
89         atomic_t ka_use_ack_count;
90         atomic_t ka_release_count;
91
92         rwlock_t susp_res_lock;
93
94         struct vchiq_state *state;
95
96         /*
97          * Global use count for videocore.
98          * This is equal to the sum of the use counts for all services.  When
99          * this hits zero the videocore suspend procedure will be initiated.
100          */
101         int videocore_use_count;
102
103         /*
104          * Use count to track requests from videocore peer.
105          * This use count is not associated with a service, so needs to be
106          * tracked separately with the state.
107          */
108         int peer_use_count;
109
110         /*
111          * Flag to indicate that the first vchiq connect has made it through.
112          * This means that both sides should be fully ready, and we should
113          * be able to suspend after this point.
114          */
115         int first_connect;
116 };
117
118 struct vchiq_2835_state {
119         int inited;
120         struct vchiq_arm_state arm_state;
121 };
122
123 struct vchiq_pagelist_info {
124         struct pagelist *pagelist;
125         size_t pagelist_buffer_size;
126         dma_addr_t dma_addr;
127         enum dma_data_direction dma_dir;
128         unsigned int num_pages;
129         unsigned int pages_need_release;
130         struct page **pages;
131         struct scatterlist *scatterlist;
132         unsigned int scatterlist_mapped;
133 };
134
135 static void __iomem *g_regs;
136 /* This value is the size of the L2 cache lines as understood by the
137  * VPU firmware, which determines the required alignment of the
138  * offsets/sizes in pagelists.
139  *
140  * Modern VPU firmware looks for a DT "cache-line-size" property in
141  * the VCHIQ node and will overwrite it with the actual L2 cache size,
142  * which the kernel must then respect.  That property was rejected
143  * upstream, so we have to use the VPU firmware's compatibility value
144  * of 32.
145  */
146 static unsigned int g_cache_line_size = 32;
147 static unsigned int g_fragments_size;
148 static char *g_fragments_base;
149 static char *g_free_fragments;
150 static struct semaphore g_free_fragments_sema;
151 static struct device *g_dev;
152
153 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
154
155 static enum vchiq_status
156 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
157                              unsigned int size, enum vchiq_bulk_dir dir);
158
159 static irqreturn_t
160 vchiq_doorbell_irq(int irq, void *dev_id)
161 {
162         struct vchiq_state *state = dev_id;
163         irqreturn_t ret = IRQ_NONE;
164         unsigned int status;
165
166         /* Read (and clear) the doorbell */
167         status = readl(g_regs + BELL0);
168
169         if (status & ARM_DS_ACTIVE) {  /* Was the doorbell rung? */
170                 remote_event_pollall(state);
171                 ret = IRQ_HANDLED;
172         }
173
174         return ret;
175 }
176
177 static void
178 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
179 {
180         if (pagelistinfo->scatterlist_mapped) {
181                 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
182                              pagelistinfo->num_pages, pagelistinfo->dma_dir);
183         }
184
185         if (pagelistinfo->pages_need_release)
186                 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
187
188         dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
189                           pagelistinfo->pagelist, pagelistinfo->dma_addr);
190 }
191
192 /* There is a potential problem with partial cache lines (pages?)
193  * at the ends of the block when reading. If the CPU accessed anything in
194  * the same line (page?) then it may have pulled old data into the cache,
195  * obscuring the new data underneath. We can solve this by transferring the
196  * partial cache lines separately, and allowing the ARM to copy into the
197  * cached area.
198  */
199
200 static struct vchiq_pagelist_info *
201 create_pagelist(char *buf, char __user *ubuf,
202                 size_t count, unsigned short type)
203 {
204         struct pagelist *pagelist;
205         struct vchiq_pagelist_info *pagelistinfo;
206         struct page **pages;
207         u32 *addrs;
208         unsigned int num_pages, offset, i, k;
209         int actual_pages;
210         size_t pagelist_size;
211         struct scatterlist *scatterlist, *sg;
212         int dma_buffers;
213         dma_addr_t dma_addr;
214
215         if (count >= INT_MAX - PAGE_SIZE)
216                 return NULL;
217
218         if (buf)
219                 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
220         else
221                 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
222         num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
223
224         if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
225                          sizeof(struct vchiq_pagelist_info)) /
226                         (sizeof(u32) + sizeof(pages[0]) +
227                          sizeof(struct scatterlist)))
228                 return NULL;
229
230         pagelist_size = sizeof(struct pagelist) +
231                         (num_pages * sizeof(u32)) +
232                         (num_pages * sizeof(pages[0]) +
233                         (num_pages * sizeof(struct scatterlist))) +
234                         sizeof(struct vchiq_pagelist_info);
235
236         /* Allocate enough storage to hold the page pointers and the page
237          * list
238          */
239         pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
240                                       GFP_KERNEL);
241
242         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
243
244         if (!pagelist)
245                 return NULL;
246
247         addrs           = pagelist->addrs;
248         pages           = (struct page **)(addrs + num_pages);
249         scatterlist     = (struct scatterlist *)(pages + num_pages);
250         pagelistinfo    = (struct vchiq_pagelist_info *)
251                           (scatterlist + num_pages);
252
253         pagelist->length = count;
254         pagelist->type = type;
255         pagelist->offset = offset;
256
257         /* Populate the fields of the pagelistinfo structure */
258         pagelistinfo->pagelist = pagelist;
259         pagelistinfo->pagelist_buffer_size = pagelist_size;
260         pagelistinfo->dma_addr = dma_addr;
261         pagelistinfo->dma_dir =  (type == PAGELIST_WRITE) ?
262                                   DMA_TO_DEVICE : DMA_FROM_DEVICE;
263         pagelistinfo->num_pages = num_pages;
264         pagelistinfo->pages_need_release = 0;
265         pagelistinfo->pages = pages;
266         pagelistinfo->scatterlist = scatterlist;
267         pagelistinfo->scatterlist_mapped = 0;
268
269         if (buf) {
270                 unsigned long length = count;
271                 unsigned int off = offset;
272
273                 for (actual_pages = 0; actual_pages < num_pages;
274                      actual_pages++) {
275                         struct page *pg =
276                                 vmalloc_to_page((buf +
277                                                  (actual_pages * PAGE_SIZE)));
278                         size_t bytes = PAGE_SIZE - off;
279
280                         if (!pg) {
281                                 cleanup_pagelistinfo(pagelistinfo);
282                                 return NULL;
283                         }
284
285                         if (bytes > length)
286                                 bytes = length;
287                         pages[actual_pages] = pg;
288                         length -= bytes;
289                         off = 0;
290                 }
291                 /* do not try and release vmalloc pages */
292         } else {
293                 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
294                                                    type == PAGELIST_READ, pages);
295
296                 if (actual_pages != num_pages) {
297                         vchiq_log_info(vchiq_arm_log_level,
298                                        "%s - only %d/%d pages locked",
299                                        __func__, actual_pages, num_pages);
300
301                         /* This is probably due to the process being killed */
302                         if (actual_pages > 0)
303                                 unpin_user_pages(pages, actual_pages);
304                         cleanup_pagelistinfo(pagelistinfo);
305                         return NULL;
306                 }
307                  /* release user pages */
308                 pagelistinfo->pages_need_release = 1;
309         }
310
311         /*
312          * Initialize the scatterlist so that the magic cookie
313          *  is filled if debugging is enabled
314          */
315         sg_init_table(scatterlist, num_pages);
316         /* Now set the pages for each scatterlist */
317         for (i = 0; i < num_pages; i++) {
318                 unsigned int len = PAGE_SIZE - offset;
319
320                 if (len > count)
321                         len = count;
322                 sg_set_page(scatterlist + i, pages[i], len, offset);
323                 offset = 0;
324                 count -= len;
325         }
326
327         dma_buffers = dma_map_sg(g_dev,
328                                  scatterlist,
329                                  num_pages,
330                                  pagelistinfo->dma_dir);
331
332         if (dma_buffers == 0) {
333                 cleanup_pagelistinfo(pagelistinfo);
334                 return NULL;
335         }
336
337         pagelistinfo->scatterlist_mapped = 1;
338
339         /* Combine adjacent blocks for performance */
340         k = 0;
341         for_each_sg(scatterlist, sg, dma_buffers, i) {
342                 u32 len = sg_dma_len(sg);
343                 u32 addr = sg_dma_address(sg);
344
345                 /* Note: addrs is the address + page_count - 1
346                  * The firmware expects blocks after the first to be page-
347                  * aligned and a multiple of the page size
348                  */
349                 WARN_ON(len == 0);
350                 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
351                 WARN_ON(i && (addr & ~PAGE_MASK));
352                 if (k > 0 &&
353                     ((addrs[k - 1] & PAGE_MASK) +
354                      (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
355                     == (addr & PAGE_MASK))
356                         addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
357                 else
358                         addrs[k++] = (addr & PAGE_MASK) |
359                                 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
360         }
361
362         /* Partial cache lines (fragments) require special measures */
363         if ((type == PAGELIST_READ) &&
364             ((pagelist->offset & (g_cache_line_size - 1)) ||
365             ((pagelist->offset + pagelist->length) &
366             (g_cache_line_size - 1)))) {
367                 char *fragments;
368
369                 if (down_interruptible(&g_free_fragments_sema)) {
370                         cleanup_pagelistinfo(pagelistinfo);
371                         return NULL;
372                 }
373
374                 WARN_ON(!g_free_fragments);
375
376                 down(&g_free_fragments_mutex);
377                 fragments = g_free_fragments;
378                 WARN_ON(!fragments);
379                 g_free_fragments = *(char **)g_free_fragments;
380                 up(&g_free_fragments_mutex);
381                 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
382                         (fragments - g_fragments_base) / g_fragments_size;
383         }
384
385         return pagelistinfo;
386 }
387
388 static void
389 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
390               int actual)
391 {
392         struct pagelist *pagelist = pagelistinfo->pagelist;
393         struct page **pages = pagelistinfo->pages;
394         unsigned int num_pages = pagelistinfo->num_pages;
395
396         vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
397                         __func__, pagelistinfo->pagelist, actual);
398
399         /*
400          * NOTE: dma_unmap_sg must be called before the
401          * cpu can touch any of the data/pages.
402          */
403         dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
404                      pagelistinfo->num_pages, pagelistinfo->dma_dir);
405         pagelistinfo->scatterlist_mapped = 0;
406
407         /* Deal with any partial cache lines (fragments) */
408         if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
409                 char *fragments = g_fragments_base +
410                         (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
411                         g_fragments_size;
412                 int head_bytes, tail_bytes;
413
414                 head_bytes = (g_cache_line_size - pagelist->offset) &
415                         (g_cache_line_size - 1);
416                 tail_bytes = (pagelist->offset + actual) &
417                         (g_cache_line_size - 1);
418
419                 if ((actual >= 0) && (head_bytes != 0)) {
420                         if (head_bytes > actual)
421                                 head_bytes = actual;
422
423                         memcpy((char *)kmap(pages[0]) +
424                                 pagelist->offset,
425                                 fragments,
426                                 head_bytes);
427                         kunmap(pages[0]);
428                 }
429                 if ((actual >= 0) && (head_bytes < actual) &&
430                     (tail_bytes != 0)) {
431                         memcpy((char *)kmap(pages[num_pages - 1]) +
432                                 ((pagelist->offset + actual) &
433                                 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
434                                 fragments + g_cache_line_size,
435                                 tail_bytes);
436                         kunmap(pages[num_pages - 1]);
437                 }
438
439                 down(&g_free_fragments_mutex);
440                 *(char **)fragments = g_free_fragments;
441                 g_free_fragments = fragments;
442                 up(&g_free_fragments_mutex);
443                 up(&g_free_fragments_sema);
444         }
445
446         /* Need to mark all the pages dirty. */
447         if (pagelist->type != PAGELIST_WRITE &&
448             pagelistinfo->pages_need_release) {
449                 unsigned int i;
450
451                 for (i = 0; i < num_pages; i++)
452                         set_page_dirty(pages[i]);
453         }
454
455         cleanup_pagelistinfo(pagelistinfo);
456 }
457
458 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
459 {
460         struct device *dev = &pdev->dev;
461         struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
462         struct rpi_firmware *fw = drvdata->fw;
463         struct vchiq_slot_zero *vchiq_slot_zero;
464         void *slot_mem;
465         dma_addr_t slot_phys;
466         u32 channelbase;
467         int slot_mem_size, frag_mem_size;
468         int err, irq, i;
469
470         /*
471          * VCHI messages between the CPU and firmware use
472          * 32-bit bus addresses.
473          */
474         err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
475
476         if (err < 0)
477                 return err;
478
479         g_cache_line_size = drvdata->cache_line_size;
480         g_fragments_size = 2 * g_cache_line_size;
481
482         /* Allocate space for the channels in coherent memory */
483         slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
484         frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
485
486         slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
487                                        &slot_phys, GFP_KERNEL);
488         if (!slot_mem) {
489                 dev_err(dev, "could not allocate DMA memory\n");
490                 return -ENOMEM;
491         }
492
493         WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
494
495         vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
496         if (!vchiq_slot_zero)
497                 return -EINVAL;
498
499         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
500                 (int)slot_phys + slot_mem_size;
501         vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
502                 MAX_FRAGMENTS;
503
504         g_fragments_base = (char *)slot_mem + slot_mem_size;
505
506         g_free_fragments = g_fragments_base;
507         for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
508                 *(char **)&g_fragments_base[i * g_fragments_size] =
509                         &g_fragments_base[(i + 1) * g_fragments_size];
510         }
511         *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
512         sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
513
514         err = vchiq_init_state(state, vchiq_slot_zero);
515         if (err)
516                 return err;
517
518         g_regs = devm_platform_ioremap_resource(pdev, 0);
519         if (IS_ERR(g_regs))
520                 return PTR_ERR(g_regs);
521
522         irq = platform_get_irq(pdev, 0);
523         if (irq <= 0)
524                 return irq;
525
526         err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
527                                "VCHIQ doorbell", state);
528         if (err) {
529                 dev_err(dev, "failed to register irq=%d\n", irq);
530                 return err;
531         }
532
533         /* Send the base address of the slots to VideoCore */
534         channelbase = slot_phys;
535         err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
536                                     &channelbase, sizeof(channelbase));
537         if (err || channelbase) {
538                 dev_err(dev, "failed to set channelbase\n");
539                 return err ? : -ENXIO;
540         }
541
542         g_dev = dev;
543         vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
544                        vchiq_slot_zero, &slot_phys);
545
546         vchiq_call_connected_callbacks();
547
548         return 0;
549 }
550
551 static void
552 vchiq_arm_init_state(struct vchiq_state *state,
553                      struct vchiq_arm_state *arm_state)
554 {
555         if (arm_state) {
556                 rwlock_init(&arm_state->susp_res_lock);
557
558                 init_completion(&arm_state->ka_evt);
559                 atomic_set(&arm_state->ka_use_count, 0);
560                 atomic_set(&arm_state->ka_use_ack_count, 0);
561                 atomic_set(&arm_state->ka_release_count, 0);
562
563                 arm_state->state = state;
564                 arm_state->first_connect = 0;
565         }
566 }
567
568 int
569 vchiq_platform_init_state(struct vchiq_state *state)
570 {
571         struct vchiq_2835_state *platform_state;
572
573         state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
574         if (!state->platform_state)
575                 return -ENOMEM;
576
577         platform_state = (struct vchiq_2835_state *)state->platform_state;
578
579         platform_state->inited = 1;
580         vchiq_arm_init_state(state, &platform_state->arm_state);
581
582         return 0;
583 }
584
585 struct vchiq_arm_state*
586 vchiq_platform_get_arm_state(struct vchiq_state *state)
587 {
588         struct vchiq_2835_state *platform_state;
589
590         platform_state   = (struct vchiq_2835_state *)state->platform_state;
591
592         WARN_ON_ONCE(!platform_state->inited);
593
594         return &platform_state->arm_state;
595 }
596
597 void
598 remote_event_signal(struct remote_event *event)
599 {
600         wmb();
601
602         event->fired = 1;
603
604         dsb(sy);         /* data barrier operation */
605
606         if (event->armed)
607                 writel(0, g_regs + BELL2); /* trigger vc interrupt */
608 }
609
610 int
611 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
612                         void __user *uoffset, int size, int dir)
613 {
614         struct vchiq_pagelist_info *pagelistinfo;
615
616         pagelistinfo = create_pagelist(offset, uoffset, size,
617                                        (dir == VCHIQ_BULK_RECEIVE)
618                                        ? PAGELIST_READ
619                                        : PAGELIST_WRITE);
620
621         if (!pagelistinfo)
622                 return -ENOMEM;
623
624         bulk->data = pagelistinfo->dma_addr;
625
626         /*
627          * Store the pagelistinfo address in remote_data,
628          * which isn't used by the slave.
629          */
630         bulk->remote_data = pagelistinfo;
631
632         return 0;
633 }
634
635 void
636 vchiq_complete_bulk(struct vchiq_bulk *bulk)
637 {
638         if (bulk && bulk->remote_data && bulk->actual)
639                 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
640                               bulk->actual);
641 }
642
643 int vchiq_dump_platform_state(void *dump_context)
644 {
645         char buf[80];
646         int len;
647
648         len = snprintf(buf, sizeof(buf), "  Platform: 2835 (VC master)");
649         return vchiq_dump(dump_context, buf, len + 1);
650 }
651
652 #define VCHIQ_INIT_RETRIES 10
653 int vchiq_initialise(struct vchiq_instance **instance_out)
654 {
655         struct vchiq_state *state;
656         struct vchiq_instance *instance = NULL;
657         int i, ret;
658
659         /*
660          * VideoCore may not be ready due to boot up timing.
661          * It may never be ready if kernel and firmware are mismatched,so don't
662          * block forever.
663          */
664         for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
665                 state = vchiq_get_state();
666                 if (state)
667                         break;
668                 usleep_range(500, 600);
669         }
670         if (i == VCHIQ_INIT_RETRIES) {
671                 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
672                 ret = -ENOTCONN;
673                 goto failed;
674         } else if (i > 0) {
675                 vchiq_log_warning(vchiq_core_log_level,
676                                   "%s: videocore initialized after %d retries\n", __func__, i);
677         }
678
679         instance = kzalloc(sizeof(*instance), GFP_KERNEL);
680         if (!instance) {
681                 vchiq_log_error(vchiq_core_log_level,
682                                 "%s: error allocating vchiq instance\n", __func__);
683                 ret = -ENOMEM;
684                 goto failed;
685         }
686
687         instance->connected = 0;
688         instance->state = state;
689         mutex_init(&instance->bulk_waiter_list_mutex);
690         INIT_LIST_HEAD(&instance->bulk_waiter_list);
691
692         *instance_out = instance;
693
694         ret = 0;
695
696 failed:
697         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
698
699         return ret;
700 }
701 EXPORT_SYMBOL(vchiq_initialise);
702
703 void free_bulk_waiter(struct vchiq_instance *instance)
704 {
705         struct bulk_waiter_node *waiter, *next;
706
707         list_for_each_entry_safe(waiter, next,
708                                  &instance->bulk_waiter_list, list) {
709                 list_del(&waiter->list);
710                 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
711                                waiter, waiter->pid);
712                 kfree(waiter);
713         }
714 }
715
716 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
717 {
718         enum vchiq_status status = VCHIQ_SUCCESS;
719         struct vchiq_state *state = instance->state;
720
721         if (mutex_lock_killable(&state->mutex))
722                 return VCHIQ_RETRY;
723
724         /* Remove all services */
725         vchiq_shutdown_internal(state, instance);
726
727         mutex_unlock(&state->mutex);
728
729         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
730
731         free_bulk_waiter(instance);
732         kfree(instance);
733
734         return status;
735 }
736 EXPORT_SYMBOL(vchiq_shutdown);
737
738 static int vchiq_is_connected(struct vchiq_instance *instance)
739 {
740         return instance->connected;
741 }
742
743 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
744 {
745         enum vchiq_status status;
746         struct vchiq_state *state = instance->state;
747
748         if (mutex_lock_killable(&state->mutex)) {
749                 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
750                 status = VCHIQ_RETRY;
751                 goto failed;
752         }
753         status = vchiq_connect_internal(state, instance);
754
755         if (status == VCHIQ_SUCCESS)
756                 instance->connected = 1;
757
758         mutex_unlock(&state->mutex);
759
760 failed:
761         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
762
763         return status;
764 }
765 EXPORT_SYMBOL(vchiq_connect);
766
767 static enum vchiq_status
768 vchiq_add_service(struct vchiq_instance *instance,
769                   const struct vchiq_service_params_kernel *params,
770                   unsigned int *phandle)
771 {
772         enum vchiq_status status;
773         struct vchiq_state *state = instance->state;
774         struct vchiq_service *service = NULL;
775         int srvstate;
776
777         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
778
779         srvstate = vchiq_is_connected(instance)
780                 ? VCHIQ_SRVSTATE_LISTENING
781                 : VCHIQ_SRVSTATE_HIDDEN;
782
783         service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
784
785         if (service) {
786                 *phandle = service->handle;
787                 status = VCHIQ_SUCCESS;
788         } else {
789                 status = VCHIQ_ERROR;
790         }
791
792         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
793
794         return status;
795 }
796
797 enum vchiq_status
798 vchiq_open_service(struct vchiq_instance *instance,
799                    const struct vchiq_service_params_kernel *params,
800                    unsigned int *phandle)
801 {
802         enum vchiq_status   status = VCHIQ_ERROR;
803         struct vchiq_state   *state = instance->state;
804         struct vchiq_service *service = NULL;
805
806         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
807
808         if (!vchiq_is_connected(instance))
809                 goto failed;
810
811         service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
812
813         if (service) {
814                 *phandle = service->handle;
815                 status = vchiq_open_service_internal(service, current->pid);
816                 if (status != VCHIQ_SUCCESS) {
817                         vchiq_remove_service(service->handle);
818                         *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
819                 }
820         }
821
822 failed:
823         vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
824
825         return status;
826 }
827 EXPORT_SYMBOL(vchiq_open_service);
828
829 enum vchiq_status
830 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
831                     void *userdata, enum vchiq_bulk_mode mode)
832 {
833         enum vchiq_status status;
834
835         while (1) {
836                 switch (mode) {
837                 case VCHIQ_BULK_MODE_NOCALLBACK:
838                 case VCHIQ_BULK_MODE_CALLBACK:
839                         status = vchiq_bulk_transfer(handle,
840                                                      (void *)data, NULL,
841                                                      size, userdata, mode,
842                                                      VCHIQ_BULK_TRANSMIT);
843                         break;
844                 case VCHIQ_BULK_MODE_BLOCKING:
845                         status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
846                                                               VCHIQ_BULK_TRANSMIT);
847                         break;
848                 default:
849                         return VCHIQ_ERROR;
850                 }
851
852                 /*
853                  * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
854                  * to implement a retry mechanism since this function is
855                  * supposed to block until queued
856                  */
857                 if (status != VCHIQ_RETRY)
858                         break;
859
860                 msleep(1);
861         }
862
863         return status;
864 }
865 EXPORT_SYMBOL(vchiq_bulk_transmit);
866
867 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
868                                      unsigned int size, void *userdata,
869                                      enum vchiq_bulk_mode mode)
870 {
871         enum vchiq_status status;
872
873         while (1) {
874                 switch (mode) {
875                 case VCHIQ_BULK_MODE_NOCALLBACK:
876                 case VCHIQ_BULK_MODE_CALLBACK:
877                         status = vchiq_bulk_transfer(handle, data, NULL,
878                                                      size, userdata,
879                                                      mode, VCHIQ_BULK_RECEIVE);
880                         break;
881                 case VCHIQ_BULK_MODE_BLOCKING:
882                         status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
883                                                               VCHIQ_BULK_RECEIVE);
884                         break;
885                 default:
886                         return VCHIQ_ERROR;
887                 }
888
889                 /*
890                  * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
891                  * to implement a retry mechanism since this function is
892                  * supposed to block until queued
893                  */
894                 if (status != VCHIQ_RETRY)
895                         break;
896
897                 msleep(1);
898         }
899
900         return status;
901 }
902 EXPORT_SYMBOL(vchiq_bulk_receive);
903
904 static enum vchiq_status
905 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
906                              enum vchiq_bulk_dir dir)
907 {
908         struct vchiq_instance *instance;
909         struct vchiq_service *service;
910         enum vchiq_status status;
911         struct bulk_waiter_node *waiter = NULL;
912         bool found = false;
913
914         service = find_service_by_handle(handle);
915         if (!service)
916                 return VCHIQ_ERROR;
917
918         instance = service->instance;
919
920         vchiq_service_put(service);
921
922         mutex_lock(&instance->bulk_waiter_list_mutex);
923         list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
924                 if (waiter->pid == current->pid) {
925                         list_del(&waiter->list);
926                         found = true;
927                         break;
928                 }
929         }
930         mutex_unlock(&instance->bulk_waiter_list_mutex);
931
932         if (found) {
933                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
934
935                 if (bulk) {
936                         /* This thread has an outstanding bulk transfer. */
937                         /* FIXME: why compare a dma address to a pointer? */
938                         if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
939                                 /*
940                                  * This is not a retry of the previous one.
941                                  * Cancel the signal when the transfer completes.
942                                  */
943                                 spin_lock(&bulk_waiter_spinlock);
944                                 bulk->userdata = NULL;
945                                 spin_unlock(&bulk_waiter_spinlock);
946                         }
947                 }
948         } else {
949                 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
950                 if (!waiter) {
951                         vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
952                         return VCHIQ_ERROR;
953                 }
954         }
955
956         status = vchiq_bulk_transfer(handle, data, NULL, size,
957                                      &waiter->bulk_waiter,
958                                      VCHIQ_BULK_MODE_BLOCKING, dir);
959         if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
960                 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
961
962                 if (bulk) {
963                         /* Cancel the signal when the transfer completes. */
964                         spin_lock(&bulk_waiter_spinlock);
965                         bulk->userdata = NULL;
966                         spin_unlock(&bulk_waiter_spinlock);
967                 }
968                 kfree(waiter);
969         } else {
970                 waiter->pid = current->pid;
971                 mutex_lock(&instance->bulk_waiter_list_mutex);
972                 list_add(&waiter->list, &instance->bulk_waiter_list);
973                 mutex_unlock(&instance->bulk_waiter_list_mutex);
974                 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
975                                current->pid);
976         }
977
978         return status;
979 }
980
981 static enum vchiq_status
982 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
983                struct vchiq_header *header, struct user_service *user_service,
984                void *bulk_userdata)
985 {
986         struct vchiq_completion_data_kernel *completion;
987         int insert;
988
989         DEBUG_INITIALISE(g_state.local);
990
991         insert = instance->completion_insert;
992         while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
993                 /* Out of space - wait for the client */
994                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
995                 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
996                 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
997                 if (wait_for_completion_interruptible(&instance->remove_event)) {
998                         vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
999                         return VCHIQ_RETRY;
1000                 } else if (instance->closing) {
1001                         vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1002                         return VCHIQ_SUCCESS;
1003                 }
1004                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1005         }
1006
1007         completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1008
1009         completion->header = header;
1010         completion->reason = reason;
1011         /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1012         completion->service_userdata = user_service->service;
1013         completion->bulk_userdata = bulk_userdata;
1014
1015         if (reason == VCHIQ_SERVICE_CLOSED) {
1016                 /*
1017                  * Take an extra reference, to be held until
1018                  * this CLOSED notification is delivered.
1019                  */
1020                 vchiq_service_get(user_service->service);
1021                 if (instance->use_close_delivered)
1022                         user_service->close_pending = 1;
1023         }
1024
1025         /*
1026          * A write barrier is needed here to ensure that the entire completion
1027          * record is written out before the insert point.
1028          */
1029         wmb();
1030
1031         if (reason == VCHIQ_MESSAGE_AVAILABLE)
1032                 user_service->message_available_pos = insert;
1033
1034         insert++;
1035         instance->completion_insert = insert;
1036
1037         complete(&instance->insert_event);
1038
1039         return VCHIQ_SUCCESS;
1040 }
1041
1042 enum vchiq_status
1043 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1044                  unsigned int handle, void *bulk_userdata)
1045 {
1046         /*
1047          * How do we ensure the callback goes to the right client?
1048          * The service_user data points to a user_service record
1049          * containing the original callback and the user state structure, which
1050          * contains a circular buffer for completion records.
1051          */
1052         struct user_service *user_service;
1053         struct vchiq_service *service;
1054         struct vchiq_instance *instance;
1055         bool skip_completion = false;
1056
1057         DEBUG_INITIALISE(g_state.local);
1058
1059         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1060
1061         rcu_read_lock();
1062         service = handle_to_service(handle);
1063         if (WARN_ON(!service)) {
1064                 rcu_read_unlock();
1065                 return VCHIQ_SUCCESS;
1066         }
1067
1068         user_service = (struct user_service *)service->base.userdata;
1069         instance = user_service->instance;
1070
1071         if (!instance || instance->closing) {
1072                 rcu_read_unlock();
1073                 return VCHIQ_SUCCESS;
1074         }
1075
1076         /*
1077          * As hopping around different synchronization mechanism,
1078          * taking an extra reference results in simpler implementation.
1079          */
1080         vchiq_service_get(service);
1081         rcu_read_unlock();
1082
1083         vchiq_log_trace(vchiq_arm_log_level,
1084                         "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1085                         __func__, (unsigned long)user_service, service->localport,
1086                         user_service->userdata, reason, (unsigned long)header,
1087                         (unsigned long)instance, (unsigned long)bulk_userdata);
1088
1089         if (header && user_service->is_vchi) {
1090                 spin_lock(&msg_queue_spinlock);
1091                 while (user_service->msg_insert ==
1092                         (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1093                         spin_unlock(&msg_queue_spinlock);
1094                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1095                         DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1096                         vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1097                         /*
1098                          * If there is no MESSAGE_AVAILABLE in the completion
1099                          * queue, add one
1100                          */
1101                         if ((user_service->message_available_pos -
1102                                 instance->completion_remove) < 0) {
1103                                 enum vchiq_status status;
1104
1105                                 vchiq_log_info(vchiq_arm_log_level,
1106                                                "Inserting extra MESSAGE_AVAILABLE");
1107                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1108                                 status = add_completion(instance, reason, NULL, user_service,
1109                                                         bulk_userdata);
1110                                 if (status != VCHIQ_SUCCESS) {
1111                                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1112                                         vchiq_service_put(service);
1113                                         return status;
1114                                 }
1115                         }
1116
1117                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1118                         if (wait_for_completion_interruptible(&user_service->remove_event)) {
1119                                 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1120                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1121                                 vchiq_service_put(service);
1122                                 return VCHIQ_RETRY;
1123                         } else if (instance->closing) {
1124                                 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1125                                 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1126                                 vchiq_service_put(service);
1127                                 return VCHIQ_ERROR;
1128                         }
1129                         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1130                         spin_lock(&msg_queue_spinlock);
1131                 }
1132
1133                 user_service->msg_queue[user_service->msg_insert &
1134                         (MSG_QUEUE_SIZE - 1)] = header;
1135                 user_service->msg_insert++;
1136
1137                 /*
1138                  * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1139                  * there is a MESSAGE_AVAILABLE in the completion queue then
1140                  * bypass the completion queue.
1141                  */
1142                 if (((user_service->message_available_pos -
1143                         instance->completion_remove) >= 0) ||
1144                         user_service->dequeue_pending) {
1145                         user_service->dequeue_pending = 0;
1146                         skip_completion = true;
1147                 }
1148
1149                 spin_unlock(&msg_queue_spinlock);
1150                 complete(&user_service->insert_event);
1151
1152                 header = NULL;
1153         }
1154         DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1155         vchiq_service_put(service);
1156
1157         if (skip_completion)
1158                 return VCHIQ_SUCCESS;
1159
1160         return add_completion(instance, reason, header, user_service,
1161                 bulk_userdata);
1162 }
1163
1164 int vchiq_dump(void *dump_context, const char *str, int len)
1165 {
1166         struct dump_context *context = (struct dump_context *)dump_context;
1167         int copy_bytes;
1168
1169         if (context->actual >= context->space)
1170                 return 0;
1171
1172         if (context->offset > 0) {
1173                 int skip_bytes = min_t(int, len, context->offset);
1174
1175                 str += skip_bytes;
1176                 len -= skip_bytes;
1177                 context->offset -= skip_bytes;
1178                 if (context->offset > 0)
1179                         return 0;
1180         }
1181         copy_bytes = min_t(int, len, context->space - context->actual);
1182         if (copy_bytes == 0)
1183                 return 0;
1184         if (copy_to_user(context->buf + context->actual, str,
1185                          copy_bytes))
1186                 return -EFAULT;
1187         context->actual += copy_bytes;
1188         len -= copy_bytes;
1189
1190         /*
1191          * If the terminating NUL is included in the length, then it
1192          * marks the end of a line and should be replaced with a
1193          * carriage return.
1194          */
1195         if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1196                 char cr = '\n';
1197
1198                 if (copy_to_user(context->buf + context->actual - 1,
1199                                  &cr, 1))
1200                         return -EFAULT;
1201         }
1202         return 0;
1203 }
1204
1205 int vchiq_dump_platform_instances(void *dump_context)
1206 {
1207         struct vchiq_state *state = vchiq_get_state();
1208         char buf[80];
1209         int len;
1210         int i;
1211
1212         /*
1213          * There is no list of instances, so instead scan all services,
1214          * marking those that have been dumped.
1215          */
1216
1217         rcu_read_lock();
1218         for (i = 0; i < state->unused_service; i++) {
1219                 struct vchiq_service *service;
1220                 struct vchiq_instance *instance;
1221
1222                 service = rcu_dereference(state->services[i]);
1223                 if (!service || service->base.callback != service_callback)
1224                         continue;
1225
1226                 instance = service->instance;
1227                 if (instance)
1228                         instance->mark = 0;
1229         }
1230         rcu_read_unlock();
1231
1232         for (i = 0; i < state->unused_service; i++) {
1233                 struct vchiq_service *service;
1234                 struct vchiq_instance *instance;
1235                 int err;
1236
1237                 rcu_read_lock();
1238                 service = rcu_dereference(state->services[i]);
1239                 if (!service || service->base.callback != service_callback) {
1240                         rcu_read_unlock();
1241                         continue;
1242                 }
1243
1244                 instance = service->instance;
1245                 if (!instance || instance->mark) {
1246                         rcu_read_unlock();
1247                         continue;
1248                 }
1249                 rcu_read_unlock();
1250
1251                 len = snprintf(buf, sizeof(buf),
1252                                "Instance %pK: pid %d,%s completions %d/%d",
1253                                instance, instance->pid,
1254                                instance->connected ? " connected, " :
1255                                "",
1256                                instance->completion_insert -
1257                                instance->completion_remove,
1258                                MAX_COMPLETIONS);
1259                 err = vchiq_dump(dump_context, buf, len + 1);
1260                 if (err)
1261                         return err;
1262                 instance->mark = 1;
1263         }
1264         return 0;
1265 }
1266
1267 int vchiq_dump_platform_service_state(void *dump_context,
1268                                       struct vchiq_service *service)
1269 {
1270         struct user_service *user_service =
1271                         (struct user_service *)service->base.userdata;
1272         char buf[80];
1273         int len;
1274
1275         len = scnprintf(buf, sizeof(buf), "  instance %pK", service->instance);
1276
1277         if ((service->base.callback == service_callback) && user_service->is_vchi) {
1278                 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1279                                  user_service->msg_insert - user_service->msg_remove,
1280                                  MSG_QUEUE_SIZE);
1281
1282                 if (user_service->dequeue_pending)
1283                         len += scnprintf(buf + len, sizeof(buf) - len,
1284                                 " (dequeue pending)");
1285         }
1286
1287         return vchiq_dump(dump_context, buf, len + 1);
1288 }
1289
1290 struct vchiq_state *
1291 vchiq_get_state(void)
1292 {
1293         if (!g_state.remote)
1294                 pr_err("%s: g_state.remote == NULL\n", __func__);
1295         else if (g_state.remote->initialised != 1)
1296                 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1297                           __func__, g_state.remote->initialised);
1298
1299         return (g_state.remote &&
1300                 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1301 }
1302
1303 /*
1304  * Autosuspend related functionality
1305  */
1306
1307 static enum vchiq_status
1308 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1309                                struct vchiq_header *header,
1310                                unsigned int service_user, void *bulk_user)
1311 {
1312         vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1313         return 0;
1314 }
1315
1316 static int
1317 vchiq_keepalive_thread_func(void *v)
1318 {
1319         struct vchiq_state *state = (struct vchiq_state *)v;
1320         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1321
1322         enum vchiq_status status;
1323         struct vchiq_instance *instance;
1324         unsigned int ka_handle;
1325         int ret;
1326
1327         struct vchiq_service_params_kernel params = {
1328                 .fourcc      = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1329                 .callback    = vchiq_keepalive_vchiq_callback,
1330                 .version     = KEEPALIVE_VER,
1331                 .version_min = KEEPALIVE_VER_MIN
1332         };
1333
1334         ret = vchiq_initialise(&instance);
1335         if (ret) {
1336                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1337                                 ret);
1338                 goto exit;
1339         }
1340
1341         status = vchiq_connect(instance);
1342         if (status != VCHIQ_SUCCESS) {
1343                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1344                                 status);
1345                 goto shutdown;
1346         }
1347
1348         status = vchiq_add_service(instance, &params, &ka_handle);
1349         if (status != VCHIQ_SUCCESS) {
1350                 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1351                                 status);
1352                 goto shutdown;
1353         }
1354
1355         while (1) {
1356                 long rc = 0, uc = 0;
1357
1358                 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1359                         vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1360                         flush_signals(current);
1361                         continue;
1362                 }
1363
1364                 /*
1365                  * read and clear counters.  Do release_count then use_count to
1366                  * prevent getting more releases than uses
1367                  */
1368                 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1369                 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1370
1371                 /*
1372                  * Call use/release service the requisite number of times.
1373                  * Process use before release so use counts don't go negative
1374                  */
1375                 while (uc--) {
1376                         atomic_inc(&arm_state->ka_use_ack_count);
1377                         status = vchiq_use_service(ka_handle);
1378                         if (status != VCHIQ_SUCCESS) {
1379                                 vchiq_log_error(vchiq_susp_log_level,
1380                                                 "%s vchiq_use_service error %d", __func__, status);
1381                         }
1382                 }
1383                 while (rc--) {
1384                         status = vchiq_release_service(ka_handle);
1385                         if (status != VCHIQ_SUCCESS) {
1386                                 vchiq_log_error(vchiq_susp_log_level,
1387                                                 "%s vchiq_release_service error %d", __func__,
1388                                                 status);
1389                         }
1390                 }
1391         }
1392
1393 shutdown:
1394         vchiq_shutdown(instance);
1395 exit:
1396         return 0;
1397 }
1398
1399 int
1400 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1401                    enum USE_TYPE_E use_type)
1402 {
1403         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1404         int ret = 0;
1405         char entity[16];
1406         int *entity_uc;
1407         int local_uc;
1408
1409         if (!arm_state) {
1410                 ret = -EINVAL;
1411                 goto out;
1412         }
1413
1414         if (use_type == USE_TYPE_VCHIQ) {
1415                 sprintf(entity, "VCHIQ:   ");
1416                 entity_uc = &arm_state->peer_use_count;
1417         } else if (service) {
1418                 sprintf(entity, "%c%c%c%c:%03d",
1419                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1420                         service->client_id);
1421                 entity_uc = &service->service_use_count;
1422         } else {
1423                 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1424                 ret = -EINVAL;
1425                 goto out;
1426         }
1427
1428         write_lock_bh(&arm_state->susp_res_lock);
1429         local_uc = ++arm_state->videocore_use_count;
1430         ++(*entity_uc);
1431
1432         vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1433                         *entity_uc, local_uc);
1434
1435         write_unlock_bh(&arm_state->susp_res_lock);
1436
1437         if (!ret) {
1438                 enum vchiq_status status = VCHIQ_SUCCESS;
1439                 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1440
1441                 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1442                         /* Send the use notify to videocore */
1443                         status = vchiq_send_remote_use_active(state);
1444                         if (status == VCHIQ_SUCCESS)
1445                                 ack_cnt--;
1446                         else
1447                                 atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1448                 }
1449         }
1450
1451 out:
1452         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1453         return ret;
1454 }
1455
1456 int
1457 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1458 {
1459         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1460         int ret = 0;
1461         char entity[16];
1462         int *entity_uc;
1463
1464         if (!arm_state) {
1465                 ret = -EINVAL;
1466                 goto out;
1467         }
1468
1469         if (service) {
1470                 sprintf(entity, "%c%c%c%c:%03d",
1471                         VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1472                         service->client_id);
1473                 entity_uc = &service->service_use_count;
1474         } else {
1475                 sprintf(entity, "PEER:   ");
1476                 entity_uc = &arm_state->peer_use_count;
1477         }
1478
1479         write_lock_bh(&arm_state->susp_res_lock);
1480         if (!arm_state->videocore_use_count || !(*entity_uc)) {
1481                 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1482                 WARN_ON(!arm_state->videocore_use_count);
1483                 WARN_ON(!(*entity_uc));
1484                 ret = -EINVAL;
1485                 goto unlock;
1486         }
1487         --arm_state->videocore_use_count;
1488         --(*entity_uc);
1489
1490         vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1491                         *entity_uc, arm_state->videocore_use_count);
1492
1493 unlock:
1494         write_unlock_bh(&arm_state->susp_res_lock);
1495
1496 out:
1497         vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1498         return ret;
1499 }
1500
1501 void
1502 vchiq_on_remote_use(struct vchiq_state *state)
1503 {
1504         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1505
1506         atomic_inc(&arm_state->ka_use_count);
1507         complete(&arm_state->ka_evt);
1508 }
1509
1510 void
1511 vchiq_on_remote_release(struct vchiq_state *state)
1512 {
1513         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1514
1515         atomic_inc(&arm_state->ka_release_count);
1516         complete(&arm_state->ka_evt);
1517 }
1518
1519 int
1520 vchiq_use_service_internal(struct vchiq_service *service)
1521 {
1522         return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1523 }
1524
1525 int
1526 vchiq_release_service_internal(struct vchiq_service *service)
1527 {
1528         return vchiq_release_internal(service->state, service);
1529 }
1530
1531 struct vchiq_debugfs_node *
1532 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1533 {
1534         return &instance->debugfs_node;
1535 }
1536
1537 int
1538 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1539 {
1540         struct vchiq_service *service;
1541         int use_count = 0, i;
1542
1543         i = 0;
1544         rcu_read_lock();
1545         while ((service = __next_service_by_instance(instance->state,
1546                                                      instance, &i)))
1547                 use_count += service->service_use_count;
1548         rcu_read_unlock();
1549         return use_count;
1550 }
1551
1552 int
1553 vchiq_instance_get_pid(struct vchiq_instance *instance)
1554 {
1555         return instance->pid;
1556 }
1557
1558 int
1559 vchiq_instance_get_trace(struct vchiq_instance *instance)
1560 {
1561         return instance->trace;
1562 }
1563
1564 void
1565 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1566 {
1567         struct vchiq_service *service;
1568         int i;
1569
1570         i = 0;
1571         rcu_read_lock();
1572         while ((service = __next_service_by_instance(instance->state,
1573                                                      instance, &i)))
1574                 service->trace = trace;
1575         rcu_read_unlock();
1576         instance->trace = (trace != 0);
1577 }
1578
1579 enum vchiq_status
1580 vchiq_use_service(unsigned int handle)
1581 {
1582         enum vchiq_status ret = VCHIQ_ERROR;
1583         struct vchiq_service *service = find_service_by_handle(handle);
1584
1585         if (service) {
1586                 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1587                 vchiq_service_put(service);
1588         }
1589         return ret;
1590 }
1591 EXPORT_SYMBOL(vchiq_use_service);
1592
1593 enum vchiq_status
1594 vchiq_release_service(unsigned int handle)
1595 {
1596         enum vchiq_status ret = VCHIQ_ERROR;
1597         struct vchiq_service *service = find_service_by_handle(handle);
1598
1599         if (service) {
1600                 ret = vchiq_release_internal(service->state, service);
1601                 vchiq_service_put(service);
1602         }
1603         return ret;
1604 }
1605 EXPORT_SYMBOL(vchiq_release_service);
1606
1607 struct service_data_struct {
1608         int fourcc;
1609         int clientid;
1610         int use_count;
1611 };
1612
1613 void
1614 vchiq_dump_service_use_state(struct vchiq_state *state)
1615 {
1616         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1617         struct service_data_struct *service_data;
1618         int i, found = 0;
1619         /*
1620          * If there's more than 64 services, only dump ones with
1621          * non-zero counts
1622          */
1623         int only_nonzero = 0;
1624         static const char *nz = "<-- preventing suspend";
1625
1626         int peer_count;
1627         int vc_use_count;
1628         int active_services;
1629
1630         if (!arm_state)
1631                 return;
1632
1633         service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1634                                      GFP_KERNEL);
1635         if (!service_data)
1636                 return;
1637
1638         read_lock_bh(&arm_state->susp_res_lock);
1639         peer_count = arm_state->peer_use_count;
1640         vc_use_count = arm_state->videocore_use_count;
1641         active_services = state->unused_service;
1642         if (active_services > MAX_SERVICES)
1643                 only_nonzero = 1;
1644
1645         rcu_read_lock();
1646         for (i = 0; i < active_services; i++) {
1647                 struct vchiq_service *service_ptr =
1648                         rcu_dereference(state->services[i]);
1649
1650                 if (!service_ptr)
1651                         continue;
1652
1653                 if (only_nonzero && !service_ptr->service_use_count)
1654                         continue;
1655
1656                 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1657                         continue;
1658
1659                 service_data[found].fourcc = service_ptr->base.fourcc;
1660                 service_data[found].clientid = service_ptr->client_id;
1661                 service_data[found].use_count = service_ptr->service_use_count;
1662                 found++;
1663                 if (found >= MAX_SERVICES)
1664                         break;
1665         }
1666         rcu_read_unlock();
1667
1668         read_unlock_bh(&arm_state->susp_res_lock);
1669
1670         if (only_nonzero)
1671                 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1672                                   active_services, found);
1673
1674         for (i = 0; i < found; i++) {
1675                 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1676                                   VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1677                                   service_data[i].clientid, service_data[i].use_count,
1678                                   service_data[i].use_count ? nz : "");
1679         }
1680         vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1681         vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1682                           vc_use_count);
1683
1684         kfree(service_data);
1685 }
1686
1687 enum vchiq_status
1688 vchiq_check_service(struct vchiq_service *service)
1689 {
1690         struct vchiq_arm_state *arm_state;
1691         enum vchiq_status ret = VCHIQ_ERROR;
1692
1693         if (!service || !service->state)
1694                 goto out;
1695
1696         arm_state = vchiq_platform_get_arm_state(service->state);
1697
1698         read_lock_bh(&arm_state->susp_res_lock);
1699         if (service->service_use_count)
1700                 ret = VCHIQ_SUCCESS;
1701         read_unlock_bh(&arm_state->susp_res_lock);
1702
1703         if (ret == VCHIQ_ERROR) {
1704                 vchiq_log_error(vchiq_susp_log_level,
1705                                 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1706                                 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1707                                 service->service_use_count, arm_state->videocore_use_count);
1708                 vchiq_dump_service_use_state(service->state);
1709         }
1710 out:
1711         return ret;
1712 }
1713
1714 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1715                                        enum vchiq_connstate oldstate,
1716                                        enum vchiq_connstate newstate)
1717 {
1718         struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1719         char threadname[16];
1720
1721         vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1722                        get_conn_state_name(oldstate), get_conn_state_name(newstate));
1723         if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1724                 return;
1725
1726         write_lock_bh(&arm_state->susp_res_lock);
1727         if (arm_state->first_connect) {
1728                 write_unlock_bh(&arm_state->susp_res_lock);
1729                 return;
1730         }
1731
1732         arm_state->first_connect = 1;
1733         write_unlock_bh(&arm_state->susp_res_lock);
1734         snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1735                  state->id);
1736         arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1737                                               (void *)state,
1738                                               threadname);
1739         if (IS_ERR(arm_state->ka_thread)) {
1740                 vchiq_log_error(vchiq_susp_log_level,
1741                                 "vchiq: FATAL: couldn't create thread %s",
1742                                 threadname);
1743         } else {
1744                 wake_up_process(arm_state->ka_thread);
1745         }
1746 }
1747
1748 static const struct of_device_id vchiq_of_match[] = {
1749         { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1750         { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1751         {},
1752 };
1753 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1754
1755 static struct platform_device *
1756 vchiq_register_child(struct platform_device *pdev, const char *name)
1757 {
1758         struct platform_device_info pdevinfo;
1759         struct platform_device *child;
1760
1761         memset(&pdevinfo, 0, sizeof(pdevinfo));
1762
1763         pdevinfo.parent = &pdev->dev;
1764         pdevinfo.name = name;
1765         pdevinfo.id = PLATFORM_DEVID_NONE;
1766         pdevinfo.dma_mask = DMA_BIT_MASK(32);
1767
1768         child = platform_device_register_full(&pdevinfo);
1769         if (IS_ERR(child)) {
1770                 dev_warn(&pdev->dev, "%s not registered\n", name);
1771                 child = NULL;
1772         }
1773
1774         return child;
1775 }
1776
1777 static int vchiq_probe(struct platform_device *pdev)
1778 {
1779         struct device_node *fw_node;
1780         const struct of_device_id *of_id;
1781         struct vchiq_drvdata *drvdata;
1782         int err;
1783
1784         of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1785         drvdata = (struct vchiq_drvdata *)of_id->data;
1786         if (!drvdata)
1787                 return -EINVAL;
1788
1789         fw_node = of_find_compatible_node(NULL, NULL,
1790                                           "raspberrypi,bcm2835-firmware");
1791         if (!fw_node) {
1792                 dev_err(&pdev->dev, "Missing firmware node\n");
1793                 return -ENOENT;
1794         }
1795
1796         drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1797         of_node_put(fw_node);
1798         if (!drvdata->fw)
1799                 return -EPROBE_DEFER;
1800
1801         platform_set_drvdata(pdev, drvdata);
1802
1803         err = vchiq_platform_init(pdev, &g_state);
1804         if (err)
1805                 goto failed_platform_init;
1806
1807         vchiq_debugfs_init();
1808
1809         vchiq_log_info(vchiq_arm_log_level,
1810                        "vchiq: platform initialised - version %d (min %d)",
1811                        VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1812
1813         /*
1814          * Simply exit on error since the function handles cleanup in
1815          * cases of failure.
1816          */
1817         err = vchiq_register_chrdev(&pdev->dev);
1818         if (err) {
1819                 vchiq_log_warning(vchiq_arm_log_level,
1820                                   "Failed to initialize vchiq cdev");
1821                 goto error_exit;
1822         }
1823
1824         bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1825         bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1826
1827         return 0;
1828
1829 failed_platform_init:
1830         vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1831 error_exit:
1832         return err;
1833 }
1834
1835 static int vchiq_remove(struct platform_device *pdev)
1836 {
1837         platform_device_unregister(bcm2835_audio);
1838         platform_device_unregister(bcm2835_camera);
1839         vchiq_debugfs_deinit();
1840         vchiq_deregister_chrdev();
1841
1842         return 0;
1843 }
1844
1845 static struct platform_driver vchiq_driver = {
1846         .driver = {
1847                 .name = "bcm2835_vchiq",
1848                 .of_match_table = vchiq_of_match,
1849         },
1850         .probe = vchiq_probe,
1851         .remove = vchiq_remove,
1852 };
1853
1854 static int __init vchiq_driver_init(void)
1855 {
1856         int ret;
1857
1858         ret = platform_driver_register(&vchiq_driver);
1859         if (ret)
1860                 pr_err("Failed to register vchiq driver\n");
1861
1862         return ret;
1863 }
1864 module_init(vchiq_driver_init);
1865
1866 static void __exit vchiq_driver_exit(void)
1867 {
1868         platform_driver_unregister(&vchiq_driver);
1869 }
1870 module_exit(vchiq_driver_exit);
1871
1872 MODULE_LICENSE("Dual BSD/GPL");
1873 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1874 MODULE_AUTHOR("Broadcom Corporation");