1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /* Copyright (c) 2010-2012 Broadcom. All rights reserved. */
4 #include <linux/kernel.h>
5 #include <linux/types.h>
6 #include <linux/errno.h>
7 #include <linux/interrupt.h>
8 #include <linux/pagemap.h>
9 #include <linux/dma-mapping.h>
11 #include <linux/platform_device.h>
12 #include <linux/uaccess.h>
15 #include <soc/bcm2835/raspberrypi-firmware.h>
17 #define TOTAL_SLOTS (VCHIQ_SLOT_ZERO_SLOTS + 2 * 32)
19 #include "vchiq_arm.h"
20 #include "vchiq_connected.h"
21 #include "vchiq_pagelist.h"
23 #define MAX_FRAGMENTS (VCHIQ_NUM_CURRENT_BULKS * 2)
25 #define VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX 0
26 #define VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX 1
31 struct vchiq_2835_state {
33 struct vchiq_arm_state arm_state;
36 struct vchiq_pagelist_info {
37 struct pagelist *pagelist;
38 size_t pagelist_buffer_size;
40 enum dma_data_direction dma_dir;
41 unsigned int num_pages;
42 unsigned int pages_need_release;
44 struct scatterlist *scatterlist;
45 unsigned int scatterlist_mapped;
48 static void __iomem *g_regs;
49 /* This value is the size of the L2 cache lines as understood by the
50 * VPU firmware, which determines the required alignment of the
51 * offsets/sizes in pagelists.
53 * Modern VPU firmware looks for a DT "cache-line-size" property in
54 * the VCHIQ node and will overwrite it with the actual L2 cache size,
55 * which the kernel must then respect. That property was rejected
56 * upstream, so we have to use the VPU firmware's compatibility value
59 static unsigned int g_cache_line_size = 32;
60 static unsigned int g_fragments_size;
61 static char *g_fragments_base;
62 static char *g_free_fragments;
63 static struct semaphore g_free_fragments_sema;
64 static struct device *g_dev;
66 static DEFINE_SEMAPHORE(g_free_fragments_mutex);
69 vchiq_doorbell_irq(int irq, void *dev_id);
71 static struct vchiq_pagelist_info *
72 create_pagelist(char __user *buf, size_t count, unsigned short type);
75 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
78 int vchiq_platform_init(struct platform_device *pdev, struct vchiq_state *state)
80 struct device *dev = &pdev->dev;
81 struct vchiq_drvdata *drvdata = platform_get_drvdata(pdev);
82 struct rpi_firmware *fw = drvdata->fw;
83 struct vchiq_slot_zero *vchiq_slot_zero;
87 int slot_mem_size, frag_mem_size;
91 * VCHI messages between the CPU and firmware use
92 * 32-bit bus addresses.
94 err = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
99 g_cache_line_size = drvdata->cache_line_size;
100 g_fragments_size = 2 * g_cache_line_size;
102 /* Allocate space for the channels in coherent memory */
103 slot_mem_size = PAGE_ALIGN(TOTAL_SLOTS * VCHIQ_SLOT_SIZE);
104 frag_mem_size = PAGE_ALIGN(g_fragments_size * MAX_FRAGMENTS);
106 slot_mem = dmam_alloc_coherent(dev, slot_mem_size + frag_mem_size,
107 &slot_phys, GFP_KERNEL);
109 dev_err(dev, "could not allocate DMA memory\n");
113 WARN_ON(((unsigned long)slot_mem & (PAGE_SIZE - 1)) != 0);
115 vchiq_slot_zero = vchiq_init_slots(slot_mem, slot_mem_size);
116 if (!vchiq_slot_zero)
119 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_OFFSET_IDX] =
120 (int)slot_phys + slot_mem_size;
121 vchiq_slot_zero->platform_data[VCHIQ_PLATFORM_FRAGMENTS_COUNT_IDX] =
124 g_fragments_base = (char *)slot_mem + slot_mem_size;
126 g_free_fragments = g_fragments_base;
127 for (i = 0; i < (MAX_FRAGMENTS - 1); i++) {
128 *(char **)&g_fragments_base[i*g_fragments_size] =
129 &g_fragments_base[(i + 1)*g_fragments_size];
131 *(char **)&g_fragments_base[i * g_fragments_size] = NULL;
132 sema_init(&g_free_fragments_sema, MAX_FRAGMENTS);
134 if (vchiq_init_state(state, vchiq_slot_zero) != VCHIQ_SUCCESS)
137 g_regs = devm_platform_ioremap_resource(pdev, 0);
139 return PTR_ERR(g_regs);
141 irq = platform_get_irq(pdev, 0);
145 err = devm_request_irq(dev, irq, vchiq_doorbell_irq, IRQF_IRQPOLL,
146 "VCHIQ doorbell", state);
148 dev_err(dev, "failed to register irq=%d\n", irq);
152 /* Send the base address of the slots to VideoCore */
153 channelbase = slot_phys;
154 err = rpi_firmware_property(fw, RPI_FIRMWARE_VCHIQ_INIT,
155 &channelbase, sizeof(channelbase));
156 if (err || channelbase) {
157 dev_err(dev, "failed to set channelbase\n");
158 return err ? : -ENXIO;
162 vchiq_log_info(vchiq_arm_log_level,
163 "vchiq_init - done (slots %pK, phys %pad)",
164 vchiq_slot_zero, &slot_phys);
166 vchiq_call_connected_callbacks();
172 vchiq_platform_init_state(struct vchiq_state *state)
174 enum vchiq_status status = VCHIQ_SUCCESS;
175 struct vchiq_2835_state *platform_state;
177 state->platform_state = kzalloc(sizeof(*platform_state), GFP_KERNEL);
178 if (!state->platform_state)
181 platform_state = (struct vchiq_2835_state *)state->platform_state;
183 platform_state->inited = 1;
184 status = vchiq_arm_init_state(state, &platform_state->arm_state);
186 if (status != VCHIQ_SUCCESS)
187 platform_state->inited = 0;
192 struct vchiq_arm_state*
193 vchiq_platform_get_arm_state(struct vchiq_state *state)
195 struct vchiq_2835_state *platform_state;
197 platform_state = (struct vchiq_2835_state *)state->platform_state;
199 WARN_ON_ONCE(!platform_state->inited);
201 return &platform_state->arm_state;
205 remote_event_signal(struct remote_event *event)
211 dsb(sy); /* data barrier operation */
214 writel(0, g_regs + BELL2); /* trigger vc interrupt */
218 vchiq_prepare_bulk_data(struct vchiq_bulk *bulk, void *offset, int size,
221 struct vchiq_pagelist_info *pagelistinfo;
223 pagelistinfo = create_pagelist((char __user *)offset, size,
224 (dir == VCHIQ_BULK_RECEIVE)
231 bulk->data = (void *)(unsigned long)pagelistinfo->dma_addr;
234 * Store the pagelistinfo address in remote_data,
235 * which isn't used by the slave.
237 bulk->remote_data = pagelistinfo;
239 return VCHIQ_SUCCESS;
243 vchiq_complete_bulk(struct vchiq_bulk *bulk)
245 if (bulk && bulk->remote_data && bulk->actual)
246 free_pagelist((struct vchiq_pagelist_info *)bulk->remote_data,
250 int vchiq_dump_platform_state(void *dump_context)
255 len = snprintf(buf, sizeof(buf),
256 " Platform: 2835 (VC master)");
257 return vchiq_dump(dump_context, buf, len + 1);
265 vchiq_doorbell_irq(int irq, void *dev_id)
267 struct vchiq_state *state = dev_id;
268 irqreturn_t ret = IRQ_NONE;
271 /* Read (and clear) the doorbell */
272 status = readl(g_regs + BELL0);
274 if (status & 0x4) { /* Was the doorbell rung? */
275 remote_event_pollall(state);
283 cleanup_pagelistinfo(struct vchiq_pagelist_info *pagelistinfo)
285 if (pagelistinfo->scatterlist_mapped) {
286 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
287 pagelistinfo->num_pages, pagelistinfo->dma_dir);
290 if (pagelistinfo->pages_need_release)
291 unpin_user_pages(pagelistinfo->pages, pagelistinfo->num_pages);
293 dma_free_coherent(g_dev, pagelistinfo->pagelist_buffer_size,
294 pagelistinfo->pagelist, pagelistinfo->dma_addr);
297 /* There is a potential problem with partial cache lines (pages?)
298 * at the ends of the block when reading. If the CPU accessed anything in
299 * the same line (page?) then it may have pulled old data into the cache,
300 * obscuring the new data underneath. We can solve this by transferring the
301 * partial cache lines separately, and allowing the ARM to copy into the
305 static struct vchiq_pagelist_info *
306 create_pagelist(char __user *buf, size_t count, unsigned short type)
308 struct pagelist *pagelist;
309 struct vchiq_pagelist_info *pagelistinfo;
312 unsigned int num_pages, offset, i, k;
314 size_t pagelist_size;
315 struct scatterlist *scatterlist, *sg;
319 if (count >= INT_MAX - PAGE_SIZE)
322 offset = ((unsigned int)(unsigned long)buf & (PAGE_SIZE - 1));
323 num_pages = DIV_ROUND_UP(count + offset, PAGE_SIZE);
325 if (num_pages > (SIZE_MAX - sizeof(struct pagelist) -
326 sizeof(struct vchiq_pagelist_info)) /
327 (sizeof(u32) + sizeof(pages[0]) +
328 sizeof(struct scatterlist)))
331 pagelist_size = sizeof(struct pagelist) +
332 (num_pages * sizeof(u32)) +
333 (num_pages * sizeof(pages[0]) +
334 (num_pages * sizeof(struct scatterlist))) +
335 sizeof(struct vchiq_pagelist_info);
337 /* Allocate enough storage to hold the page pointers and the page
340 pagelist = dma_alloc_coherent(g_dev, pagelist_size, &dma_addr,
343 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK", __func__, pagelist);
348 addrs = pagelist->addrs;
349 pages = (struct page **)(addrs + num_pages);
350 scatterlist = (struct scatterlist *)(pages + num_pages);
351 pagelistinfo = (struct vchiq_pagelist_info *)
352 (scatterlist + num_pages);
354 pagelist->length = count;
355 pagelist->type = type;
356 pagelist->offset = offset;
358 /* Populate the fields of the pagelistinfo structure */
359 pagelistinfo->pagelist = pagelist;
360 pagelistinfo->pagelist_buffer_size = pagelist_size;
361 pagelistinfo->dma_addr = dma_addr;
362 pagelistinfo->dma_dir = (type == PAGELIST_WRITE) ?
363 DMA_TO_DEVICE : DMA_FROM_DEVICE;
364 pagelistinfo->num_pages = num_pages;
365 pagelistinfo->pages_need_release = 0;
366 pagelistinfo->pages = pages;
367 pagelistinfo->scatterlist = scatterlist;
368 pagelistinfo->scatterlist_mapped = 0;
370 if (is_vmalloc_addr((void __force *)buf)) {
371 unsigned long length = count;
372 unsigned int off = offset;
374 for (actual_pages = 0; actual_pages < num_pages;
377 vmalloc_to_page((void __force *)(buf +
378 (actual_pages * PAGE_SIZE)));
379 size_t bytes = PAGE_SIZE - off;
382 cleanup_pagelistinfo(pagelistinfo);
388 pages[actual_pages] = pg;
392 /* do not try and release vmalloc pages */
394 actual_pages = pin_user_pages_fast(
395 (unsigned long)buf & PAGE_MASK,
397 type == PAGELIST_READ,
400 if (actual_pages != num_pages) {
401 vchiq_log_info(vchiq_arm_log_level,
402 "%s - only %d/%d pages locked",
403 __func__, actual_pages, num_pages);
405 /* This is probably due to the process being killed */
406 if (actual_pages > 0)
407 unpin_user_pages(pages, actual_pages);
408 cleanup_pagelistinfo(pagelistinfo);
411 /* release user pages */
412 pagelistinfo->pages_need_release = 1;
416 * Initialize the scatterlist so that the magic cookie
417 * is filled if debugging is enabled
419 sg_init_table(scatterlist, num_pages);
420 /* Now set the pages for each scatterlist */
421 for (i = 0; i < num_pages; i++) {
422 unsigned int len = PAGE_SIZE - offset;
426 sg_set_page(scatterlist + i, pages[i], len, offset);
431 dma_buffers = dma_map_sg(g_dev,
434 pagelistinfo->dma_dir);
436 if (dma_buffers == 0) {
437 cleanup_pagelistinfo(pagelistinfo);
441 pagelistinfo->scatterlist_mapped = 1;
443 /* Combine adjacent blocks for performance */
445 for_each_sg(scatterlist, sg, dma_buffers, i) {
446 u32 len = sg_dma_len(sg);
447 u32 addr = sg_dma_address(sg);
449 /* Note: addrs is the address + page_count - 1
450 * The firmware expects blocks after the first to be page-
451 * aligned and a multiple of the page size
454 WARN_ON(i && (i != (dma_buffers - 1)) && (len & ~PAGE_MASK));
455 WARN_ON(i && (addr & ~PAGE_MASK));
457 ((addrs[k - 1] & PAGE_MASK) +
458 (((addrs[k - 1] & ~PAGE_MASK) + 1) << PAGE_SHIFT))
459 == (addr & PAGE_MASK))
460 addrs[k - 1] += ((len + PAGE_SIZE - 1) >> PAGE_SHIFT);
462 addrs[k++] = (addr & PAGE_MASK) |
463 (((len + PAGE_SIZE - 1) >> PAGE_SHIFT) - 1);
466 /* Partial cache lines (fragments) require special measures */
467 if ((type == PAGELIST_READ) &&
468 ((pagelist->offset & (g_cache_line_size - 1)) ||
469 ((pagelist->offset + pagelist->length) &
470 (g_cache_line_size - 1)))) {
473 if (down_interruptible(&g_free_fragments_sema)) {
474 cleanup_pagelistinfo(pagelistinfo);
478 WARN_ON(!g_free_fragments);
480 down(&g_free_fragments_mutex);
481 fragments = g_free_fragments;
483 g_free_fragments = *(char **) g_free_fragments;
484 up(&g_free_fragments_mutex);
485 pagelist->type = PAGELIST_READ_WITH_FRAGMENTS +
486 (fragments - g_fragments_base) / g_fragments_size;
493 free_pagelist(struct vchiq_pagelist_info *pagelistinfo,
496 struct pagelist *pagelist = pagelistinfo->pagelist;
497 struct page **pages = pagelistinfo->pages;
498 unsigned int num_pages = pagelistinfo->num_pages;
500 vchiq_log_trace(vchiq_arm_log_level, "%s - %pK, %d",
501 __func__, pagelistinfo->pagelist, actual);
504 * NOTE: dma_unmap_sg must be called before the
505 * cpu can touch any of the data/pages.
507 dma_unmap_sg(g_dev, pagelistinfo->scatterlist,
508 pagelistinfo->num_pages, pagelistinfo->dma_dir);
509 pagelistinfo->scatterlist_mapped = 0;
511 /* Deal with any partial cache lines (fragments) */
512 if (pagelist->type >= PAGELIST_READ_WITH_FRAGMENTS) {
513 char *fragments = g_fragments_base +
514 (pagelist->type - PAGELIST_READ_WITH_FRAGMENTS) *
516 int head_bytes, tail_bytes;
518 head_bytes = (g_cache_line_size - pagelist->offset) &
519 (g_cache_line_size - 1);
520 tail_bytes = (pagelist->offset + actual) &
521 (g_cache_line_size - 1);
523 if ((actual >= 0) && (head_bytes != 0)) {
524 if (head_bytes > actual)
527 memcpy((char *)kmap(pages[0]) +
533 if ((actual >= 0) && (head_bytes < actual) &&
535 memcpy((char *)kmap(pages[num_pages - 1]) +
536 ((pagelist->offset + actual) &
537 (PAGE_SIZE - 1) & ~(g_cache_line_size - 1)),
538 fragments + g_cache_line_size,
540 kunmap(pages[num_pages - 1]);
543 down(&g_free_fragments_mutex);
544 *(char **)fragments = g_free_fragments;
545 g_free_fragments = fragments;
546 up(&g_free_fragments_mutex);
547 up(&g_free_fragments_sema);
550 /* Need to mark all the pages dirty. */
551 if (pagelist->type != PAGELIST_WRITE &&
552 pagelistinfo->pages_need_release) {
555 for (i = 0; i < num_pages; i++)
556 set_page_dirty(pages[i]);
559 cleanup_pagelistinfo(pagelistinfo);