1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (c) 2014 Raspberry Pi (Trading) Ltd. All rights reserved.
4 * Copyright (c) 2010-2012 Broadcom. All rights reserved.
7 #include <linux/kernel.h>
8 #include <linux/module.h>
9 #include <linux/sched/signal.h>
10 #include <linux/types.h>
11 #include <linux/errno.h>
12 #include <linux/cdev.h>
14 #include <linux/device.h>
16 #include <linux/highmem.h>
17 #include <linux/pagemap.h>
18 #include <linux/bug.h>
19 #include <linux/completion.h>
20 #include <linux/list.h>
22 #include <linux/platform_device.h>
23 #include <linux/compat.h>
24 #include <linux/dma-mapping.h>
25 #include <linux/rcupdate.h>
26 #include <linux/delay.h>
27 #include <linux/slab.h>
28 #include <linux/interrupt.h>
30 #include <linux/uaccess.h>
31 #include <soc/bcm2835/raspberrypi-firmware.h>
33 #include "vchiq_core.h"
34 #include "vchiq_ioctl.h"
35 #include "vchiq_arm.h"
36 #include "vchiq_debugfs.h"
37 #include "vchiq_connected.h"
38 #include "vchiq_pagelist.h"
40 #define DEVICE_NAME "vchiq"
42 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
44 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
46 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
47 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
52 #define ARM_DS_ACTIVE BIT(2)
54 /* Override the default prefix, which would be vchiq_arm (from the filename) */
55 #undef MODULE_PARAM_PREFIX
56 #define MODULE_PARAM_PREFIX DEVICE_NAME "."
58 #define KEEPALIVE_VER 1
59 #define KEEPALIVE_VER_MIN KEEPALIVE_VER
61 /* Run time control of log level, based on KERN_XXX level. */
62 int vchiq_arm_log_level = VCHIQ_LOG_DEFAULT;
63 int vchiq_susp_log_level = VCHIQ_LOG_ERROR;
65 DEFINE_SPINLOCK(msg_queue_spinlock);
66 struct vchiq_state g_state;
68 static struct platform_device *bcm2835_camera;
69 static struct platform_device *bcm2835_audio;
71 struct vchiq_drvdata {
72 const unsigned int cache_line_size;
73 struct rpi_firmware *fw;
76 static struct vchiq_drvdata bcm2835_drvdata = {
77 .cache_line_size = 32,
80 static struct vchiq_drvdata bcm2836_drvdata = {
81 .cache_line_size = 64,
84 struct vchiq_arm_state {
85 /* Keepalive-related data */
86 struct task_struct *ka_thread;
87 struct completion ka_evt;
88 atomic_t ka_use_count;
89 atomic_t ka_use_ack_count;
90 atomic_t ka_release_count;
92 rwlock_t susp_res_lock;
94 struct vchiq_state *state;
97 * Global use count for videocore.
98 * This is equal to the sum of the use counts for all services. When
99 * this hits zero the videocore suspend procedure will be initiated.
101 int videocore_use_count;
104 * Use count to track requests from videocore peer.
105 * This use count is not associated with a service, so needs to be
106 * tracked separately with the state.
111 * Flag to indicate that the first vchiq connect has made it through.
112 * This means that both sides should be fully ready, and we should
113 * be able to suspend after this point.
118 struct vchiq_2835_state {
120 struct vchiq_arm_state arm_state;
123 struct vchiq_pagelist_info {
124 struct pagelist *pagelist;
125 size_t pagelist_buffer_size;
127 enum dma_data_direction dma_dir;
128 unsigned int num_pages;
129 unsigned int pages_need_release;
131 struct scatterlist *scatterlist;
132 unsigned int scatterlist_mapped;
135 static void __iomem *g_regs;
136 /* This value is the size of the L2 cache lines as understood by the
137 * VPU firmware, which determines the required alignment of the
138 * offsets/sizes in pagelists.
140 * Modern VPU firmware looks for a DT "cache-line-size" property in
141 * the VCHIQ node and will overwrite it with the actual L2 cache size,
142 * which the kernel must then respect. That property was rejected
143 * upstream, so we have to use the VPU firmware's compatibility value
146 static unsigned int g_cache_line_size = 32;
147 static unsigned int g_fragments_size;
148 static char *g_fragments_base;
149 static char *g_free_fragments;
150 static struct semaphore g_free_fragments_sema;
151 static struct device *g_dev;
153 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
155 static enum vchiq_status
156 vchiq_blocking_bulk_transfer(unsigned int handle, void *data,
157 unsigned int size, enum vchiq_bulk_dir dir);
160 vchiq_doorbell_irq(int irq, void *dev_id)
162 struct vchiq_state *state = dev_id;
163 irqreturn_t ret = IRQ_NONE;
166 /* Read (and clear) the doorbell */
167 status = readl(g_regs + BELL0);
169 if (status & ARM_DS_ACTIVE) { /* Was the doorbell rung? */
170 remote_event_pollall(state);
178 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
180 if (pagelistinfo->scatterlist_mapped) {
181 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
182 pagelistinfo->num_pages, pagelistinfo->dma_dir);
185 if (pagelistinfo->pages_need_release)
186 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
188 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
189 pagelistinfo->pagelist, pagelistinfo->dma_addr);
192 /* There is a potential problem with partial cache lines (pages?)
193 * at the ends of the block when reading. If the CPU accessed anything in
194 * the same line (page?) then it may have pulled old data into the cache,
195 * obscuring the new data underneath. We can solve this by transferring the
196 * partial cache lines separately, and allowing the ARM to copy into the
200 static struct vchiq_pagelist_info *
201 create_pagelist(char *buf, char __user *ubuf,
202 size_t count, unsigned short type)
204 struct pagelist *pagelist;
205 struct vchiq_pagelist_info *pagelistinfo;
208 unsigned int num_pages, offset, i, k;
210 size_t pagelist_size;
211 struct scatterlist *scatterlist, *sg;
215 if (count >= INT_MAX - PAGE_SIZE)
219 offset = (uintptr_t)buf & (PAGE_SIZE - 1);
221 offset = (uintptr_t)ubuf & (PAGE_SIZE - 1);
222 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
224 if ((size_t)num_pages > (SIZE_MAX - sizeof(struct pagelist) -
225 sizeof(struct vchiq_pagelist_info)) /
226 (sizeof(u32) + sizeof(pages[0]) +
227 sizeof(struct scatterlist)))
230 pagelist_size = sizeof(struct pagelist) +
231 (num_pages * sizeof(u32)) +
232 (num_pages * sizeof(pages[0]) +
233 (num_pages * sizeof(struct scatterlist))) +
234 sizeof(struct vchiq_pagelist_info);
236 /* Allocate enough storage to hold the page pointers and the page
239 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
242 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
247 addrs = pagelist->addrs;
248 pages = (struct page **)(addrs + num_pages);
249 scatterlist = (struct scatterlist *)(pages + num_pages);
250 pagelistinfo = (struct vchiq_pagelist_info *)
251 (scatterlist + num_pages);
253 pagelist->length = count;
254 pagelist->type = type;
255 pagelist->offset = offset;
257 /* Populate the fields of the pagelistinfo structure */
258 pagelistinfo->pagelist = pagelist;
259 pagelistinfo->pagelist_buffer_size = pagelist_size;
260 pagelistinfo->dma_addr = dma_addr;
261 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
262 DMA_TO_DEVICE : DMA_FROM_DEVICE;
263 pagelistinfo->num_pages = num_pages;
264 pagelistinfo->pages_need_release = 0;
265 pagelistinfo->pages = pages;
266 pagelistinfo->scatterlist = scatterlist;
267 pagelistinfo->scatterlist_mapped = 0;
270 unsigned long length = count;
271 unsigned int off = offset;
273 for (actual_pages = 0; actual_pages < num_pages;
276 vmalloc_to_page((buf +
277 (actual_pages * PAGE_SIZE)));
278 size_t bytes = PAGE_SIZE - off;
281 cleanup_pagelistinfo(pagelistinfo);
287 pages[actual_pages] = pg;
291 /* do not try and release vmalloc pages */
293 actual_pages = pin_user_pages_fast((unsigned long)ubuf & PAGE_MASK, num_pages,
294 type == PAGELIST_READ, pages);
296 if (actual_pages != num_pages) {
297 vchiq_log_info(vchiq_arm_log_level,
298 "%s - only %d/%d pages locked",
299 __func__, actual_pages, num_pages);
301 /* This is probably due to the process being killed */
302 if (actual_pages > 0)
303 unpin_user_pages(pages, actual_pages);
304 cleanup_pagelistinfo(pagelistinfo);
307 /* release user pages */
308 pagelistinfo->pages_need_release = 1;
312 * Initialize the scatterlist so that the magic cookie
313 * is filled if debugging is enabled
315 sg_init_table(scatterlist, num_pages);
316 /* Now set the pages for each scatterlist */
317 for (i = 0; i < num_pages; i++) {
318 unsigned int len = PAGE_SIZE - offset;
322 sg_set_page(scatterlist + i, pages[i], len, offset);
327 dma_buffers = dma_map_sg(g_dev,
330 pagelistinfo->dma_dir);
332 if (dma_buffers == 0) {
333 cleanup_pagelistinfo(pagelistinfo);
337 pagelistinfo->scatterlist_mapped = 1;
339 /* Combine adjacent blocks for performance */
341 for_each_sg(scatterlist, sg, dma_buffers, i) {
342 u32 len = sg_dma_len(sg);
343 u32 addr = sg_dma_address(sg);
345 /* Note: addrs is the address + page_count - 1
346 * The firmware expects blocks after the first to be page-
347 * aligned and a multiple of the page size
350 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
351 WARN_ON(i && (addr & ~PAGE_MASK));
353 ((addrs[k - 1] & PAGE_MASK) +
354 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
355 == (addr & PAGE_MASK))
356 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
358 addrs[k++] = (addr & PAGE_MASK) |
359 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
362 /* Partial cache lines (fragments) require special measures */
363 if ((type == PAGELIST_READ) &&
364 ((pagelist->offset & (g_cache_line_size - 1)) ||
365 ((pagelist->offset + pagelist->length) &
366 (g_cache_line_size - 1)))) {
369 if (down_interruptible(&g_free_fragments_sema)) {
370 cleanup_pagelistinfo(pagelistinfo);
374 WARN_ON(!g_free_fragments);
376 down(&g_free_fragments_mutex);
377 fragments = g_free_fragments;
379 g_free_fragments = *(char **)g_free_fragments;
380 up(&g_free_fragments_mutex);
381 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
382 (fragments - g_fragments_base) / g_fragments_size;
389 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
392 struct pagelist *pagelist = pagelistinfo->pagelist;
393 struct page **pages = pagelistinfo->pages;
394 unsigned int num_pages = pagelistinfo->num_pages;
396 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
397 __func__, pagelistinfo->pagelist, actual);
400 * NOTE: dma_unmap_sg must be called before the
401 * cpu can touch any of the data/pages.
403 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
404 pagelistinfo->num_pages, pagelistinfo->dma_dir);
405 pagelistinfo->scatterlist_mapped = 0;
407 /* Deal with any partial cache lines (fragments) */
408 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
409 char *fragments = g_fragments_base +
410 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
412 int head_bytes, tail_bytes;
414 head_bytes = (g_cache_line_size - pagelist->offset) &
415 (g_cache_line_size - 1);
416 tail_bytes = (pagelist->offset + actual) &
417 (g_cache_line_size - 1);
419 if ((actual >= 0) && (head_bytes != 0)) {
420 if (head_bytes > actual)
423 memcpy((char *)kmap(pages[0]) +
429 if ((actual >= 0) && (head_bytes < actual) &&
431 memcpy((char *)kmap(pages[num_pages - 1]) +
432 ((pagelist->offset + actual) &
433 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
434 fragments + g_cache_line_size,
436 kunmap(pages[num_pages - 1]);
439 down(&g_free_fragments_mutex);
440 *(char **)fragments = g_free_fragments;
441 g_free_fragments = fragments;
442 up(&g_free_fragments_mutex);
443 up(&g_free_fragments_sema);
446 /* Need to mark all the pages dirty. */
447 if (pagelist->type != PAGELIST_WRITE &&
448 pagelistinfo->pages_need_release) {
451 for (i = 0; i < num_pages; i++)
452 set_page_dirty(pages[i]);
455 cleanup_pagelistinfo(pagelistinfo);
458 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
460 struct device *dev = &pdev->dev;
461 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
462 struct rpi_firmware *fw = drvdata->fw;
463 struct vchiq_slot_zero *vchiq_slot_zero;
465 dma_addr_t slot_phys;
467 int slot_mem_size, frag_mem_size;
471 * VCHI messages between the CPU and firmware use
472 * 32-bit bus addresses.
474 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
479 g_cache_line_size = drvdata->cache_line_size;
480 g_fragments_size = 2 * g_cache_line_size;
482 /* Allocate space for the channels in coherent memory */
483 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
484 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
486 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
487 &slot_phys, GFP_KERNEL);
489 dev_err(dev, "could not allocate DMA memory\n");
493 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
495 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
496 if (!vchiq_slot_zero)
499 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
500 (int)slot_phys + slot_mem_size;
501 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
504 g_fragments_base = (char *)slot_mem + slot_mem_size;
506 g_free_fragments = g_fragments_base;
507 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
508 *(char **)&g_fragments_base[i * g_fragments_size] =
509 &g_fragments_base[(i + 1) * g_fragments_size];
511 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
512 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
514 err = vchiq_init_state(state, vchiq_slot_zero);
518 g_regs = devm_platform_ioremap_resource(pdev, 0);
520 return PTR_ERR(g_regs);
522 irq = platform_get_irq(pdev, 0);
526 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
527 "VCHIQ doorbell", state);
529 dev_err(dev, "failed to register irq=%d\n", irq);
533 /* Send the base address of the slots to VideoCore */
534 channelbase = slot_phys;
535 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
536 &channelbase, sizeof(channelbase));
537 if (err || channelbase) {
538 dev_err(dev, "failed to set channelbase\n");
539 return err ? : -ENXIO;
543 vchiq_log_info(vchiq_arm_log_level, "vchiq_init - done (slots %pK, phys %pad)",
544 vchiq_slot_zero, &slot_phys);
546 vchiq_call_connected_callbacks();
552 vchiq_arm_init_state(struct vchiq_state *state,
553 struct vchiq_arm_state *arm_state)
556 rwlock_init(&arm_state->susp_res_lock);
558 init_completion(&arm_state->ka_evt);
559 atomic_set(&arm_state->ka_use_count, 0);
560 atomic_set(&arm_state->ka_use_ack_count, 0);
561 atomic_set(&arm_state->ka_release_count, 0);
563 arm_state->state = state;
564 arm_state->first_connect = 0;
569 vchiq_platform_init_state(struct vchiq_state *state)
571 struct vchiq_2835_state *platform_state;
573 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
574 if (!state->platform_state)
577 platform_state = (struct vchiq_2835_state *)state->platform_state;
579 platform_state->inited = 1;
580 vchiq_arm_init_state(state, &platform_state->arm_state);
585 struct vchiq_arm_state*
586 vchiq_platform_get_arm_state(struct vchiq_state *state)
588 struct vchiq_2835_state *platform_state;
590 platform_state = (struct vchiq_2835_state *)state->platform_state;
592 WARN_ON_ONCE(!platform_state->inited);
594 return &platform_state->arm_state;
598 remote_event_signal(struct remote_event *event)
604 dsb(sy); /* data barrier operation */
607 writel(0, g_regs + BELL2); /* trigger vc interrupt */
611 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset,
612 void __user *uoffset, int size, int dir)
614 struct vchiq_pagelist_info *pagelistinfo;
616 pagelistinfo = create_pagelist(offset, uoffset, size,
617 (dir == VCHIQ_BULK_RECEIVE)
624 bulk->data = pagelistinfo->dma_addr;
627 * Store the pagelistinfo address in remote_data,
628 * which isn't used by the slave.
630 bulk->remote_data = pagelistinfo;
636 vchiq_complete_bulk(struct vchiq_bulk *bulk)
638 if (bulk && bulk->remote_data && bulk->actual)
639 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
643 int vchiq_dump_platform_state(void *dump_context)
648 len = snprintf(buf, sizeof(buf), " Platform: 2835 (VC master)");
649 return vchiq_dump(dump_context, buf, len + 1);
652 #define VCHIQ_INIT_RETRIES 10
653 int vchiq_initialise(struct vchiq_instance **instance_out)
655 struct vchiq_state *state;
656 struct vchiq_instance *instance = NULL;
660 * VideoCore may not be ready due to boot up timing.
661 * It may never be ready if kernel and firmware are mismatched,so don't
664 for (i = 0; i < VCHIQ_INIT_RETRIES; i++) {
665 state = vchiq_get_state();
668 usleep_range(500, 600);
670 if (i == VCHIQ_INIT_RETRIES) {
671 vchiq_log_error(vchiq_core_log_level, "%s: videocore not initialized\n", __func__);
675 vchiq_log_warning(vchiq_core_log_level,
676 "%s: videocore initialized after %d retries\n", __func__, i);
679 instance = kzalloc(sizeof(*instance), GFP_KERNEL);
681 vchiq_log_error(vchiq_core_log_level,
682 "%s: error allocating vchiq instance\n", __func__);
687 instance->connected = 0;
688 instance->state = state;
689 mutex_init(&instance->bulk_waiter_list_mutex);
690 INIT_LIST_HEAD(&instance->bulk_waiter_list);
692 *instance_out = instance;
697 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, ret);
701 EXPORT_SYMBOL(vchiq_initialise);
703 void free_bulk_waiter(struct vchiq_instance *instance)
705 struct bulk_waiter_node *waiter, *next;
707 list_for_each_entry_safe(waiter, next,
708 &instance->bulk_waiter_list, list) {
709 list_del(&waiter->list);
710 vchiq_log_info(vchiq_arm_log_level, "bulk_waiter - cleaned up %pK for pid %d",
711 waiter, waiter->pid);
716 enum vchiq_status vchiq_shutdown(struct vchiq_instance *instance)
718 enum vchiq_status status = VCHIQ_SUCCESS;
719 struct vchiq_state *state = instance->state;
721 if (mutex_lock_killable(&state->mutex))
724 /* Remove all services */
725 vchiq_shutdown_internal(state, instance);
727 mutex_unlock(&state->mutex);
729 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
731 free_bulk_waiter(instance);
736 EXPORT_SYMBOL(vchiq_shutdown);
738 static int vchiq_is_connected(struct vchiq_instance *instance)
740 return instance->connected;
743 enum vchiq_status vchiq_connect(struct vchiq_instance *instance)
745 enum vchiq_status status;
746 struct vchiq_state *state = instance->state;
748 if (mutex_lock_killable(&state->mutex)) {
749 vchiq_log_trace(vchiq_core_log_level, "%s: call to mutex_lock failed", __func__);
750 status = VCHIQ_RETRY;
753 status = vchiq_connect_internal(state, instance);
755 if (status == VCHIQ_SUCCESS)
756 instance->connected = 1;
758 mutex_unlock(&state->mutex);
761 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
765 EXPORT_SYMBOL(vchiq_connect);
767 static enum vchiq_status
768 vchiq_add_service(struct vchiq_instance *instance,
769 const struct vchiq_service_params_kernel *params,
770 unsigned int *phandle)
772 enum vchiq_status status;
773 struct vchiq_state *state = instance->state;
774 struct vchiq_service *service = NULL;
777 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
779 srvstate = vchiq_is_connected(instance)
780 ? VCHIQ_SRVSTATE_LISTENING
781 : VCHIQ_SRVSTATE_HIDDEN;
783 service = vchiq_add_service_internal(state, params, srvstate, instance, NULL);
786 *phandle = service->handle;
787 status = VCHIQ_SUCCESS;
789 status = VCHIQ_ERROR;
792 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
798 vchiq_open_service(struct vchiq_instance *instance,
799 const struct vchiq_service_params_kernel *params,
800 unsigned int *phandle)
802 enum vchiq_status status = VCHIQ_ERROR;
803 struct vchiq_state *state = instance->state;
804 struct vchiq_service *service = NULL;
806 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
808 if (!vchiq_is_connected(instance))
811 service = vchiq_add_service_internal(state, params, VCHIQ_SRVSTATE_OPENING, instance, NULL);
814 *phandle = service->handle;
815 status = vchiq_open_service_internal(service, current->pid);
816 if (status != VCHIQ_SUCCESS) {
817 vchiq_remove_service(service->handle);
818 *phandle = VCHIQ_SERVICE_HANDLE_INVALID;
823 vchiq_log_trace(vchiq_core_log_level, "%s(%p): returning %d", __func__, instance, status);
827 EXPORT_SYMBOL(vchiq_open_service);
830 vchiq_bulk_transmit(unsigned int handle, const void *data, unsigned int size,
831 void *userdata, enum vchiq_bulk_mode mode)
833 enum vchiq_status status;
837 case VCHIQ_BULK_MODE_NOCALLBACK:
838 case VCHIQ_BULK_MODE_CALLBACK:
839 status = vchiq_bulk_transfer(handle,
841 size, userdata, mode,
842 VCHIQ_BULK_TRANSMIT);
844 case VCHIQ_BULK_MODE_BLOCKING:
845 status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
846 VCHIQ_BULK_TRANSMIT);
853 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
854 * to implement a retry mechanism since this function is
855 * supposed to block until queued
857 if (status != VCHIQ_RETRY)
865 EXPORT_SYMBOL(vchiq_bulk_transmit);
867 enum vchiq_status vchiq_bulk_receive(unsigned int handle, void *data,
868 unsigned int size, void *userdata,
869 enum vchiq_bulk_mode mode)
871 enum vchiq_status status;
875 case VCHIQ_BULK_MODE_NOCALLBACK:
876 case VCHIQ_BULK_MODE_CALLBACK:
877 status = vchiq_bulk_transfer(handle, data, NULL,
879 mode, VCHIQ_BULK_RECEIVE);
881 case VCHIQ_BULK_MODE_BLOCKING:
882 status = vchiq_blocking_bulk_transfer(handle, (void *)data, size,
890 * vchiq_*_bulk_transfer() may return VCHIQ_RETRY, so we need
891 * to implement a retry mechanism since this function is
892 * supposed to block until queued
894 if (status != VCHIQ_RETRY)
902 EXPORT_SYMBOL(vchiq_bulk_receive);
904 static enum vchiq_status
905 vchiq_blocking_bulk_transfer(unsigned int handle, void *data, unsigned int size,
906 enum vchiq_bulk_dir dir)
908 struct vchiq_instance *instance;
909 struct vchiq_service *service;
910 enum vchiq_status status;
911 struct bulk_waiter_node *waiter = NULL;
914 service = find_service_by_handle(handle);
918 instance = service->instance;
920 vchiq_service_put(service);
922 mutex_lock(&instance->bulk_waiter_list_mutex);
923 list_for_each_entry(waiter, &instance->bulk_waiter_list, list) {
924 if (waiter->pid == current->pid) {
925 list_del(&waiter->list);
930 mutex_unlock(&instance->bulk_waiter_list_mutex);
933 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
936 /* This thread has an outstanding bulk transfer. */
937 /* FIXME: why compare a dma address to a pointer? */
938 if ((bulk->data != (dma_addr_t)(uintptr_t)data) || (bulk->size != size)) {
940 * This is not a retry of the previous one.
941 * Cancel the signal when the transfer completes.
943 spin_lock(&bulk_waiter_spinlock);
944 bulk->userdata = NULL;
945 spin_unlock(&bulk_waiter_spinlock);
949 waiter = kzalloc(sizeof(*waiter), GFP_KERNEL);
951 vchiq_log_error(vchiq_core_log_level, "%s - out of memory", __func__);
956 status = vchiq_bulk_transfer(handle, data, NULL, size,
957 &waiter->bulk_waiter,
958 VCHIQ_BULK_MODE_BLOCKING, dir);
959 if ((status != VCHIQ_RETRY) || fatal_signal_pending(current) || !waiter->bulk_waiter.bulk) {
960 struct vchiq_bulk *bulk = waiter->bulk_waiter.bulk;
963 /* Cancel the signal when the transfer completes. */
964 spin_lock(&bulk_waiter_spinlock);
965 bulk->userdata = NULL;
966 spin_unlock(&bulk_waiter_spinlock);
970 waiter->pid = current->pid;
971 mutex_lock(&instance->bulk_waiter_list_mutex);
972 list_add(&waiter->list, &instance->bulk_waiter_list);
973 mutex_unlock(&instance->bulk_waiter_list_mutex);
974 vchiq_log_info(vchiq_arm_log_level, "saved bulk_waiter %pK for pid %d", waiter,
981 static enum vchiq_status
982 add_completion(struct vchiq_instance *instance, enum vchiq_reason reason,
983 struct vchiq_header *header, struct user_service *user_service,
986 struct vchiq_completion_data_kernel *completion;
989 DEBUG_INITIALISE(g_state.local);
991 insert = instance->completion_insert;
992 while ((insert - instance->completion_remove) >= MAX_COMPLETIONS) {
993 /* Out of space - wait for the client */
994 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
995 vchiq_log_trace(vchiq_arm_log_level, "%s - completion queue full", __func__);
996 DEBUG_COUNT(COMPLETION_QUEUE_FULL_COUNT);
997 if (wait_for_completion_interruptible(&instance->remove_event)) {
998 vchiq_log_info(vchiq_arm_log_level, "service_callback interrupted");
1000 } else if (instance->closing) {
1001 vchiq_log_info(vchiq_arm_log_level, "service_callback closing");
1002 return VCHIQ_SUCCESS;
1004 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1007 completion = &instance->completions[insert & (MAX_COMPLETIONS - 1)];
1009 completion->header = header;
1010 completion->reason = reason;
1011 /* N.B. service_userdata is updated while processing AWAIT_COMPLETION */
1012 completion->service_userdata = user_service->service;
1013 completion->bulk_userdata = bulk_userdata;
1015 if (reason == VCHIQ_SERVICE_CLOSED) {
1017 * Take an extra reference, to be held until
1018 * this CLOSED notification is delivered.
1020 vchiq_service_get(user_service->service);
1021 if (instance->use_close_delivered)
1022 user_service->close_pending = 1;
1026 * A write barrier is needed here to ensure that the entire completion
1027 * record is written out before the insert point.
1031 if (reason == VCHIQ_MESSAGE_AVAILABLE)
1032 user_service->message_available_pos = insert;
1035 instance->completion_insert = insert;
1037 complete(&instance->insert_event);
1039 return VCHIQ_SUCCESS;
1043 service_callback(enum vchiq_reason reason, struct vchiq_header *header,
1044 unsigned int handle, void *bulk_userdata)
1047 * How do we ensure the callback goes to the right client?
1048 * The service_user data points to a user_service record
1049 * containing the original callback and the user state structure, which
1050 * contains a circular buffer for completion records.
1052 struct user_service *user_service;
1053 struct vchiq_service *service;
1054 struct vchiq_instance *instance;
1055 bool skip_completion = false;
1057 DEBUG_INITIALISE(g_state.local);
1059 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1062 service = handle_to_service(handle);
1063 if (WARN_ON(!service)) {
1065 return VCHIQ_SUCCESS;
1068 user_service = (struct user_service *)service->base.userdata;
1069 instance = user_service->instance;
1071 if (!instance || instance->closing) {
1073 return VCHIQ_SUCCESS;
1077 * As hopping around different synchronization mechanism,
1078 * taking an extra reference results in simpler implementation.
1080 vchiq_service_get(service);
1083 vchiq_log_trace(vchiq_arm_log_level,
1084 "%s - service %lx(%d,%p), reason %d, header %lx, instance %lx, bulk_userdata %lx",
1085 __func__, (unsigned long)user_service, service->localport,
1086 user_service->userdata, reason, (unsigned long)header,
1087 (unsigned long)instance, (unsigned long)bulk_userdata);
1089 if (header && user_service->is_vchi) {
1090 spin_lock(&msg_queue_spinlock);
1091 while (user_service->msg_insert ==
1092 (user_service->msg_remove + MSG_QUEUE_SIZE)) {
1093 spin_unlock(&msg_queue_spinlock);
1094 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1095 DEBUG_COUNT(MSG_QUEUE_FULL_COUNT);
1096 vchiq_log_trace(vchiq_arm_log_level, "%s - msg queue full", __func__);
1098 * If there is no MESSAGE_AVAILABLE in the completion
1101 if ((user_service->message_available_pos -
1102 instance->completion_remove) < 0) {
1103 enum vchiq_status status;
1105 vchiq_log_info(vchiq_arm_log_level,
1106 "Inserting extra MESSAGE_AVAILABLE");
1107 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1108 status = add_completion(instance, reason, NULL, user_service,
1110 if (status != VCHIQ_SUCCESS) {
1111 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1112 vchiq_service_put(service);
1117 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1118 if (wait_for_completion_interruptible(&user_service->remove_event)) {
1119 vchiq_log_info(vchiq_arm_log_level, "%s interrupted", __func__);
1120 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1121 vchiq_service_put(service);
1123 } else if (instance->closing) {
1124 vchiq_log_info(vchiq_arm_log_level, "%s closing", __func__);
1125 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1126 vchiq_service_put(service);
1129 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1130 spin_lock(&msg_queue_spinlock);
1133 user_service->msg_queue[user_service->msg_insert &
1134 (MSG_QUEUE_SIZE - 1)] = header;
1135 user_service->msg_insert++;
1138 * If there is a thread waiting in DEQUEUE_MESSAGE, or if
1139 * there is a MESSAGE_AVAILABLE in the completion queue then
1140 * bypass the completion queue.
1142 if (((user_service->message_available_pos -
1143 instance->completion_remove) >= 0) ||
1144 user_service->dequeue_pending) {
1145 user_service->dequeue_pending = 0;
1146 skip_completion = true;
1149 spin_unlock(&msg_queue_spinlock);
1150 complete(&user_service->insert_event);
1154 DEBUG_TRACE(SERVICE_CALLBACK_LINE);
1155 vchiq_service_put(service);
1157 if (skip_completion)
1158 return VCHIQ_SUCCESS;
1160 return add_completion(instance, reason, header, user_service,
1164 int vchiq_dump(void *dump_context, const char *str, int len)
1166 struct dump_context *context = (struct dump_context *)dump_context;
1169 if (context->actual >= context->space)
1172 if (context->offset > 0) {
1173 int skip_bytes = min_t(int, len, context->offset);
1177 context->offset -= skip_bytes;
1178 if (context->offset > 0)
1181 copy_bytes = min_t(int, len, context->space - context->actual);
1182 if (copy_bytes == 0)
1184 if (copy_to_user(context->buf + context->actual, str,
1187 context->actual += copy_bytes;
1191 * If the terminating NUL is included in the length, then it
1192 * marks the end of a line and should be replaced with a
1195 if ((len == 0) && (str[copy_bytes - 1] == '\0')) {
1198 if (copy_to_user(context->buf + context->actual - 1,
1205 int vchiq_dump_platform_instances(void *dump_context)
1207 struct vchiq_state *state = vchiq_get_state();
1213 * There is no list of instances, so instead scan all services,
1214 * marking those that have been dumped.
1218 for (i = 0; i < state->unused_service; i++) {
1219 struct vchiq_service *service;
1220 struct vchiq_instance *instance;
1222 service = rcu_dereference(state->services[i]);
1223 if (!service || service->base.callback != service_callback)
1226 instance = service->instance;
1232 for (i = 0; i < state->unused_service; i++) {
1233 struct vchiq_service *service;
1234 struct vchiq_instance *instance;
1238 service = rcu_dereference(state->services[i]);
1239 if (!service || service->base.callback != service_callback) {
1244 instance = service->instance;
1245 if (!instance || instance->mark) {
1251 len = snprintf(buf, sizeof(buf),
1252 "Instance %pK: pid %d,%s completions %d/%d",
1253 instance, instance->pid,
1254 instance->connected ? " connected, " :
1256 instance->completion_insert -
1257 instance->completion_remove,
1259 err = vchiq_dump(dump_context, buf, len + 1);
1267 int vchiq_dump_platform_service_state(void *dump_context,
1268 struct vchiq_service *service)
1270 struct user_service *user_service =
1271 (struct user_service *)service->base.userdata;
1275 len = scnprintf(buf, sizeof(buf), " instance %pK", service->instance);
1277 if ((service->base.callback == service_callback) && user_service->is_vchi) {
1278 len += scnprintf(buf + len, sizeof(buf) - len, ", %d/%d messages",
1279 user_service->msg_insert - user_service->msg_remove,
1282 if (user_service->dequeue_pending)
1283 len += scnprintf(buf + len, sizeof(buf) - len,
1284 " (dequeue pending)");
1287 return vchiq_dump(dump_context, buf, len + 1);
1290 struct vchiq_state *
1291 vchiq_get_state(void)
1293 if (!g_state.remote)
1294 pr_err("%s: g_state.remote == NULL\n", __func__);
1295 else if (g_state.remote->initialised != 1)
1296 pr_notice("%s: g_state.remote->initialised != 1 (%d)\n",
1297 __func__, g_state.remote->initialised);
1299 return (g_state.remote &&
1300 (g_state.remote->initialised == 1)) ? &g_state : NULL;
1304 * Autosuspend related functionality
1307 static enum vchiq_status
1308 vchiq_keepalive_vchiq_callback(enum vchiq_reason reason,
1309 struct vchiq_header *header,
1310 unsigned int service_user, void *bulk_user)
1312 vchiq_log_error(vchiq_susp_log_level, "%s callback reason %d", __func__, reason);
1317 vchiq_keepalive_thread_func(void *v)
1319 struct vchiq_state *state = (struct vchiq_state *)v;
1320 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1322 enum vchiq_status status;
1323 struct vchiq_instance *instance;
1324 unsigned int ka_handle;
1327 struct vchiq_service_params_kernel params = {
1328 .fourcc = VCHIQ_MAKE_FOURCC('K', 'E', 'E', 'P'),
1329 .callback = vchiq_keepalive_vchiq_callback,
1330 .version = KEEPALIVE_VER,
1331 .version_min = KEEPALIVE_VER_MIN
1334 ret = vchiq_initialise(&instance);
1336 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_initialise failed %d", __func__,
1341 status = vchiq_connect(instance);
1342 if (status != VCHIQ_SUCCESS) {
1343 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_connect failed %d", __func__,
1348 status = vchiq_add_service(instance, ¶ms, &ka_handle);
1349 if (status != VCHIQ_SUCCESS) {
1350 vchiq_log_error(vchiq_susp_log_level, "%s vchiq_open_service failed %d", __func__,
1356 long rc = 0, uc = 0;
1358 if (wait_for_completion_interruptible(&arm_state->ka_evt)) {
1359 vchiq_log_error(vchiq_susp_log_level, "%s interrupted", __func__);
1360 flush_signals(current);
1365 * read and clear counters. Do release_count then use_count to
1366 * prevent getting more releases than uses
1368 rc = atomic_xchg(&arm_state->ka_release_count, 0);
1369 uc = atomic_xchg(&arm_state->ka_use_count, 0);
1372 * Call use/release service the requisite number of times.
1373 * Process use before release so use counts don't go negative
1376 atomic_inc(&arm_state->ka_use_ack_count);
1377 status = vchiq_use_service(ka_handle);
1378 if (status != VCHIQ_SUCCESS) {
1379 vchiq_log_error(vchiq_susp_log_level,
1380 "%s vchiq_use_service error %d", __func__, status);
1384 status = vchiq_release_service(ka_handle);
1385 if (status != VCHIQ_SUCCESS) {
1386 vchiq_log_error(vchiq_susp_log_level,
1387 "%s vchiq_release_service error %d", __func__,
1394 vchiq_shutdown(instance);
1400 vchiq_use_internal(struct vchiq_state *state, struct vchiq_service *service,
1401 enum USE_TYPE_E use_type)
1403 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1414 if (use_type == USE_TYPE_VCHIQ) {
1415 sprintf(entity, "VCHIQ: ");
1416 entity_uc = &arm_state->peer_use_count;
1417 } else if (service) {
1418 sprintf(entity, "%c%c%c%c:%03d",
1419 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1420 service->client_id);
1421 entity_uc = &service->service_use_count;
1423 vchiq_log_error(vchiq_susp_log_level, "%s null service ptr", __func__);
1428 write_lock_bh(&arm_state->susp_res_lock);
1429 local_uc = ++arm_state->videocore_use_count;
1432 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1433 *entity_uc, local_uc);
1435 write_unlock_bh(&arm_state->susp_res_lock);
1438 enum vchiq_status status = VCHIQ_SUCCESS;
1439 long ack_cnt = atomic_xchg(&arm_state->ka_use_ack_count, 0);
1441 while (ack_cnt && (status == VCHIQ_SUCCESS)) {
1442 /* Send the use notify to videocore */
1443 status = vchiq_send_remote_use_active(state);
1444 if (status == VCHIQ_SUCCESS)
1447 atomic_add(ack_cnt, &arm_state->ka_use_ack_count);
1452 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1457 vchiq_release_internal(struct vchiq_state *state, struct vchiq_service *service)
1459 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1470 sprintf(entity, "%c%c%c%c:%03d",
1471 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc),
1472 service->client_id);
1473 entity_uc = &service->service_use_count;
1475 sprintf(entity, "PEER: ");
1476 entity_uc = &arm_state->peer_use_count;
1479 write_lock_bh(&arm_state->susp_res_lock);
1480 if (!arm_state->videocore_use_count || !(*entity_uc)) {
1481 /* Don't use BUG_ON - don't allow user thread to crash kernel */
1482 WARN_ON(!arm_state->videocore_use_count);
1483 WARN_ON(!(*entity_uc));
1487 --arm_state->videocore_use_count;
1490 vchiq_log_trace(vchiq_susp_log_level, "%s %s count %d, state count %d", __func__, entity,
1491 *entity_uc, arm_state->videocore_use_count);
1494 write_unlock_bh(&arm_state->susp_res_lock);
1497 vchiq_log_trace(vchiq_susp_log_level, "%s exit %d", __func__, ret);
1502 vchiq_on_remote_use(struct vchiq_state *state)
1504 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1506 atomic_inc(&arm_state->ka_use_count);
1507 complete(&arm_state->ka_evt);
1511 vchiq_on_remote_release(struct vchiq_state *state)
1513 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1515 atomic_inc(&arm_state->ka_release_count);
1516 complete(&arm_state->ka_evt);
1520 vchiq_use_service_internal(struct vchiq_service *service)
1522 return vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1526 vchiq_release_service_internal(struct vchiq_service *service)
1528 return vchiq_release_internal(service->state, service);
1531 struct vchiq_debugfs_node *
1532 vchiq_instance_get_debugfs_node(struct vchiq_instance *instance)
1534 return &instance->debugfs_node;
1538 vchiq_instance_get_use_count(struct vchiq_instance *instance)
1540 struct vchiq_service *service;
1541 int use_count = 0, i;
1545 while ((service = __next_service_by_instance(instance->state,
1547 use_count += service->service_use_count;
1553 vchiq_instance_get_pid(struct vchiq_instance *instance)
1555 return instance->pid;
1559 vchiq_instance_get_trace(struct vchiq_instance *instance)
1561 return instance->trace;
1565 vchiq_instance_set_trace(struct vchiq_instance *instance, int trace)
1567 struct vchiq_service *service;
1572 while ((service = __next_service_by_instance(instance->state,
1574 service->trace = trace;
1576 instance->trace = (trace != 0);
1580 vchiq_use_service(unsigned int handle)
1582 enum vchiq_status ret = VCHIQ_ERROR;
1583 struct vchiq_service *service = find_service_by_handle(handle);
1586 ret = vchiq_use_internal(service->state, service, USE_TYPE_SERVICE);
1587 vchiq_service_put(service);
1591 EXPORT_SYMBOL(vchiq_use_service);
1594 vchiq_release_service(unsigned int handle)
1596 enum vchiq_status ret = VCHIQ_ERROR;
1597 struct vchiq_service *service = find_service_by_handle(handle);
1600 ret = vchiq_release_internal(service->state, service);
1601 vchiq_service_put(service);
1605 EXPORT_SYMBOL(vchiq_release_service);
1607 struct service_data_struct {
1614 vchiq_dump_service_use_state(struct vchiq_state *state)
1616 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1617 struct service_data_struct *service_data;
1620 * If there's more than 64 services, only dump ones with
1623 int only_nonzero = 0;
1624 static const char *nz = "<-- preventing suspend";
1628 int active_services;
1633 service_data = kmalloc_array(MAX_SERVICES, sizeof(*service_data),
1638 read_lock_bh(&arm_state->susp_res_lock);
1639 peer_count = arm_state->peer_use_count;
1640 vc_use_count = arm_state->videocore_use_count;
1641 active_services = state->unused_service;
1642 if (active_services > MAX_SERVICES)
1646 for (i = 0; i < active_services; i++) {
1647 struct vchiq_service *service_ptr =
1648 rcu_dereference(state->services[i]);
1653 if (only_nonzero && !service_ptr->service_use_count)
1656 if (service_ptr->srvstate == VCHIQ_SRVSTATE_FREE)
1659 service_data[found].fourcc = service_ptr->base.fourcc;
1660 service_data[found].clientid = service_ptr->client_id;
1661 service_data[found].use_count = service_ptr->service_use_count;
1663 if (found >= MAX_SERVICES)
1668 read_unlock_bh(&arm_state->susp_res_lock);
1671 vchiq_log_warning(vchiq_susp_log_level, "Too many active services (%d). Only dumping up to first %d services with non-zero use-count",
1672 active_services, found);
1674 for (i = 0; i < found; i++) {
1675 vchiq_log_warning(vchiq_susp_log_level, "----- %c%c%c%c:%d service count %d %s",
1676 VCHIQ_FOURCC_AS_4CHARS(service_data[i].fourcc),
1677 service_data[i].clientid, service_data[i].use_count,
1678 service_data[i].use_count ? nz : "");
1680 vchiq_log_warning(vchiq_susp_log_level, "----- VCHIQ use count %d", peer_count);
1681 vchiq_log_warning(vchiq_susp_log_level, "--- Overall vchiq instance use count %d",
1684 kfree(service_data);
1688 vchiq_check_service(struct vchiq_service *service)
1690 struct vchiq_arm_state *arm_state;
1691 enum vchiq_status ret = VCHIQ_ERROR;
1693 if (!service || !service->state)
1696 arm_state = vchiq_platform_get_arm_state(service->state);
1698 read_lock_bh(&arm_state->susp_res_lock);
1699 if (service->service_use_count)
1700 ret = VCHIQ_SUCCESS;
1701 read_unlock_bh(&arm_state->susp_res_lock);
1703 if (ret == VCHIQ_ERROR) {
1704 vchiq_log_error(vchiq_susp_log_level,
1705 "%s ERROR - %c%c%c%c:%d service count %d, state count %d", __func__,
1706 VCHIQ_FOURCC_AS_4CHARS(service->base.fourcc), service->client_id,
1707 service->service_use_count, arm_state->videocore_use_count);
1708 vchiq_dump_service_use_state(service->state);
1714 void vchiq_platform_conn_state_changed(struct vchiq_state *state,
1715 enum vchiq_connstate oldstate,
1716 enum vchiq_connstate newstate)
1718 struct vchiq_arm_state *arm_state = vchiq_platform_get_arm_state(state);
1719 char threadname[16];
1721 vchiq_log_info(vchiq_susp_log_level, "%d: %s->%s", state->id,
1722 get_conn_state_name(oldstate), get_conn_state_name(newstate));
1723 if (state->conn_state != VCHIQ_CONNSTATE_CONNECTED)
1726 write_lock_bh(&arm_state->susp_res_lock);
1727 if (arm_state->first_connect) {
1728 write_unlock_bh(&arm_state->susp_res_lock);
1732 arm_state->first_connect = 1;
1733 write_unlock_bh(&arm_state->susp_res_lock);
1734 snprintf(threadname, sizeof(threadname), "vchiq-keep/%d",
1736 arm_state->ka_thread = kthread_create(&vchiq_keepalive_thread_func,
1739 if (IS_ERR(arm_state->ka_thread)) {
1740 vchiq_log_error(vchiq_susp_log_level,
1741 "vchiq: FATAL: couldn't create thread %s",
1744 wake_up_process(arm_state->ka_thread);
1748 static const struct of_device_id vchiq_of_match[] = {
1749 { .compatible = "brcm,bcm2835-vchiq", .data = &bcm2835_drvdata },
1750 { .compatible = "brcm,bcm2836-vchiq", .data = &bcm2836_drvdata },
1753 MODULE_DEVICE_TABLE(of, vchiq_of_match);
1755 static struct platform_device *
1756 vchiq_register_child(struct platform_device *pdev, const char *name)
1758 struct platform_device_info pdevinfo;
1759 struct platform_device *child;
1761 memset(&pdevinfo, 0, sizeof(pdevinfo));
1763 pdevinfo.parent = &pdev->dev;
1764 pdevinfo.name = name;
1765 pdevinfo.id = PLATFORM_DEVID_NONE;
1766 pdevinfo.dma_mask = DMA_BIT_MASK(32);
1768 child = platform_device_register_full(&pdevinfo);
1769 if (IS_ERR(child)) {
1770 dev_warn(&pdev->dev, "%s not registered\n", name);
1777 static int vchiq_probe(struct platform_device *pdev)
1779 struct device_node *fw_node;
1780 const struct of_device_id *of_id;
1781 struct vchiq_drvdata *drvdata;
1784 of_id = of_match_node(vchiq_of_match, pdev->dev.of_node);
1785 drvdata = (struct vchiq_drvdata *)of_id->data;
1789 fw_node = of_find_compatible_node(NULL, NULL,
1790 "raspberrypi,bcm2835-firmware");
1792 dev_err(&pdev->dev, "Missing firmware node\n");
1796 drvdata->fw = devm_rpi_firmware_get(&pdev->dev, fw_node);
1797 of_node_put(fw_node);
1799 return -EPROBE_DEFER;
1801 platform_set_drvdata(pdev, drvdata);
1803 err = vchiq_platform_init(pdev, &g_state);
1805 goto failed_platform_init;
1807 vchiq_debugfs_init();
1809 vchiq_log_info(vchiq_arm_log_level,
1810 "vchiq: platform initialised - version %d (min %d)",
1811 VCHIQ_VERSION, VCHIQ_VERSION_MIN);
1814 * Simply exit on error since the function handles cleanup in
1817 err = vchiq_register_chrdev(&pdev->dev);
1819 vchiq_log_warning(vchiq_arm_log_level,
1820 "Failed to initialize vchiq cdev");
1824 bcm2835_camera = vchiq_register_child(pdev, "bcm2835-camera");
1825 bcm2835_audio = vchiq_register_child(pdev, "bcm2835_audio");
1829 failed_platform_init:
1830 vchiq_log_warning(vchiq_arm_log_level, "could not initialize vchiq platform");
1835 static int vchiq_remove(struct platform_device *pdev)
1837 platform_device_unregister(bcm2835_audio);
1838 platform_device_unregister(bcm2835_camera);
1839 vchiq_debugfs_deinit();
1840 vchiq_deregister_chrdev();
1845 static struct platform_driver vchiq_driver = {
1847 .name = "bcm2835_vchiq",
1848 .of_match_table = vchiq_of_match,
1850 .probe = vchiq_probe,
1851 .remove = vchiq_remove,
1854 static int __init vchiq_driver_init(void)
1858 ret = platform_driver_register(&vchiq_driver);
1860 pr_err("Failed to register vchiq driver\n");
1864 module_init(vchiq_driver_init);
1866 static void __exit vchiq_driver_exit(void)
1868 platform_driver_unregister(&vchiq_driver);
1870 module_exit(vchiq_driver_exit);
1872 MODULE_LICENSE("Dual BSD/GPL");
1873 MODULE_DESCRIPTION("Videocore VCHIQ driver");
1874 MODULE_AUTHOR("Broadcom Corporation");