2 * VRAM manager for OMAP
4 * Copyright (C) 2009 Nokia Corporation
5 * Author: Tomi Valkeinen <tomi.valkeinen@nokia.com>
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful, but
12 * WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * General Public License for more details.
16 * You should have received a copy of the GNU General Public License along
17 * with this program; if not, write to the Free Software Foundation, Inc.,
18 * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 #include <linux/kernel.h>
25 #include <linux/list.h>
26 #include <linux/slab.h>
27 #include <linux/seq_file.h>
28 #include <linux/memblock.h>
29 #include <linux/completion.h>
30 #include <linux/debugfs.h>
31 #include <linux/jiffies.h>
32 #include <linux/module.h>
34 #include <asm/setup.h>
36 #include <plat/vram.h>
40 #define DBG(format, ...) pr_debug("VRAM: " format, ## __VA_ARGS__)
42 #define DBG(format, ...)
45 /* postponed regions are used to temporarily store region information at boot
46 * time when we cannot yet allocate the region list */
47 #define MAX_POSTPONED_REGIONS 10
49 static bool vram_initialized;
50 static int postponed_cnt;
54 } postponed_regions[MAX_POSTPONED_REGIONS];
57 struct list_head list;
63 struct list_head list;
64 struct list_head alloc_list;
69 static DEFINE_MUTEX(region_mutex);
70 static LIST_HEAD(region_list);
72 static struct vram_region *omap_vram_create_region(unsigned long paddr,
75 struct vram_region *rm;
77 rm = kzalloc(sizeof(*rm), GFP_KERNEL);
80 INIT_LIST_HEAD(&rm->alloc_list);
89 static void omap_vram_free_region(struct vram_region *vr)
96 static struct vram_alloc *omap_vram_create_allocation(struct vram_region *vr,
97 unsigned long paddr, unsigned pages)
99 struct vram_alloc *va;
100 struct vram_alloc *new;
102 new = kzalloc(sizeof(*va), GFP_KERNEL);
110 list_for_each_entry(va, &vr->alloc_list, list) {
111 if (va->paddr > new->paddr)
115 list_add_tail(&new->list, &va->list);
120 static void omap_vram_free_allocation(struct vram_alloc *va)
126 int omap_vram_add_region(unsigned long paddr, size_t size)
128 struct vram_region *rm;
131 if (vram_initialized) {
132 DBG("adding region paddr %08lx size %d\n",
136 pages = size >> PAGE_SHIFT;
138 rm = omap_vram_create_region(paddr, pages);
142 list_add(&rm->list, ®ion_list);
144 if (postponed_cnt == MAX_POSTPONED_REGIONS)
147 postponed_regions[postponed_cnt].paddr = paddr;
148 postponed_regions[postponed_cnt].size = size;
155 int omap_vram_free(unsigned long paddr, size_t size)
157 struct vram_region *rm;
158 struct vram_alloc *alloc;
161 DBG("free mem paddr %08lx size %d\n", paddr, size);
163 size = PAGE_ALIGN(size);
165 mutex_lock(®ion_mutex);
167 list_for_each_entry(rm, ®ion_list, list) {
168 list_for_each_entry(alloc, &rm->alloc_list, list) {
169 start = alloc->paddr;
170 end = alloc->paddr + (alloc->pages >> PAGE_SHIFT);
172 if (start >= paddr && end < paddr + size)
177 mutex_unlock(®ion_mutex);
181 omap_vram_free_allocation(alloc);
183 mutex_unlock(®ion_mutex);
186 EXPORT_SYMBOL(omap_vram_free);
188 static int _omap_vram_reserve(unsigned long paddr, unsigned pages)
190 struct vram_region *rm;
191 struct vram_alloc *alloc;
194 size = pages << PAGE_SHIFT;
196 list_for_each_entry(rm, ®ion_list, list) {
197 unsigned long start, end;
199 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
202 end = start + (rm->pages << PAGE_SHIFT) - 1;
203 if (start > paddr || end < paddr + size - 1)
206 DBG("block ok, checking allocs\n");
208 list_for_each_entry(alloc, &rm->alloc_list, list) {
209 end = alloc->paddr - 1;
211 if (start <= paddr && end >= paddr + size - 1)
214 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
217 end = rm->paddr + (rm->pages << PAGE_SHIFT) - 1;
219 if (!(start <= paddr && end >= paddr + size - 1))
222 DBG("found area start %lx, end %lx\n", start, end);
224 if (omap_vram_create_allocation(rm, paddr, pages) == NULL)
233 int omap_vram_reserve(unsigned long paddr, size_t size)
238 DBG("reserve mem paddr %08lx size %d\n", paddr, size);
240 size = PAGE_ALIGN(size);
241 pages = size >> PAGE_SHIFT;
243 mutex_lock(®ion_mutex);
245 r = _omap_vram_reserve(paddr, pages);
247 mutex_unlock(®ion_mutex);
251 EXPORT_SYMBOL(omap_vram_reserve);
253 static void _omap_vram_dma_cb(int lch, u16 ch_status, void *data)
255 struct completion *compl = data;
259 static int _omap_vram_clear(u32 paddr, unsigned pages)
261 struct completion compl;
263 unsigned frame_count;
267 init_completion(&compl);
269 r = omap_request_dma(OMAP_DMA_NO_DEVICE, "VRAM DMA",
273 pr_err("VRAM: request_dma failed for memory clear\n");
277 elem_count = pages * PAGE_SIZE / 4;
280 omap_set_dma_transfer_params(lch, OMAP_DMA_DATA_TYPE_S32,
281 elem_count, frame_count,
282 OMAP_DMA_SYNC_ELEMENT,
285 omap_set_dma_dest_params(lch, 0, OMAP_DMA_AMODE_POST_INC,
288 omap_set_dma_color_mode(lch, OMAP_DMA_CONSTANT_FILL, 0x000000);
292 if (wait_for_completion_timeout(&compl, msecs_to_jiffies(1000)) == 0) {
294 pr_err("VRAM: dma timeout while clearing memory\n");
306 static int _omap_vram_alloc(unsigned pages, unsigned long *paddr)
308 struct vram_region *rm;
309 struct vram_alloc *alloc;
311 list_for_each_entry(rm, ®ion_list, list) {
312 unsigned long start, end;
314 DBG("checking region %lx %d\n", rm->paddr, rm->pages);
318 list_for_each_entry(alloc, &rm->alloc_list, list) {
321 if (end - start >= pages << PAGE_SHIFT)
324 start = alloc->paddr + (alloc->pages << PAGE_SHIFT);
327 end = rm->paddr + (rm->pages << PAGE_SHIFT);
329 if (end - start < pages << PAGE_SHIFT)
332 DBG("found %lx, end %lx\n", start, end);
334 alloc = omap_vram_create_allocation(rm, start, pages);
340 _omap_vram_clear(start, pages);
348 int omap_vram_alloc(size_t size, unsigned long *paddr)
355 DBG("alloc mem size %d\n", size);
357 size = PAGE_ALIGN(size);
358 pages = size >> PAGE_SHIFT;
360 mutex_lock(®ion_mutex);
362 r = _omap_vram_alloc(pages, paddr);
364 mutex_unlock(®ion_mutex);
368 EXPORT_SYMBOL(omap_vram_alloc);
370 void omap_vram_get_info(unsigned long *vram,
371 unsigned long *free_vram,
372 unsigned long *largest_free_block)
374 struct vram_region *vr;
375 struct vram_alloc *va;
379 *largest_free_block = 0;
381 mutex_lock(®ion_mutex);
383 list_for_each_entry(vr, ®ion_list, list) {
388 *vram += vr->pages << PAGE_SHIFT;
390 list_for_each_entry(va, &vr->alloc_list, list) {
391 free = va->paddr - pa;
393 if (free > *largest_free_block)
394 *largest_free_block = free;
395 pa = va->paddr + (va->pages << PAGE_SHIFT);
398 free = vr->paddr + (vr->pages << PAGE_SHIFT) - pa;
400 if (free > *largest_free_block)
401 *largest_free_block = free;
404 mutex_unlock(®ion_mutex);
406 EXPORT_SYMBOL(omap_vram_get_info);
408 #if defined(CONFIG_DEBUG_FS)
409 static int vram_debug_show(struct seq_file *s, void *unused)
411 struct vram_region *vr;
412 struct vram_alloc *va;
415 mutex_lock(®ion_mutex);
417 list_for_each_entry(vr, ®ion_list, list) {
418 size = vr->pages << PAGE_SHIFT;
419 seq_printf(s, "%08lx-%08lx (%d bytes)\n",
420 vr->paddr, vr->paddr + size - 1,
423 list_for_each_entry(va, &vr->alloc_list, list) {
424 size = va->pages << PAGE_SHIFT;
425 seq_printf(s, " %08lx-%08lx (%d bytes)\n",
426 va->paddr, va->paddr + size - 1,
431 mutex_unlock(®ion_mutex);
436 static int vram_debug_open(struct inode *inode, struct file *file)
438 return single_open(file, vram_debug_show, inode->i_private);
441 static const struct file_operations vram_debug_fops = {
442 .open = vram_debug_open,
445 .release = single_release,
448 static int __init omap_vram_create_debugfs(void)
452 d = debugfs_create_file("vram", S_IRUGO, NULL,
453 NULL, &vram_debug_fops);
461 static __init int omap_vram_init(void)
465 vram_initialized = 1;
467 for (i = 0; i < postponed_cnt; i++)
468 omap_vram_add_region(postponed_regions[i].paddr,
469 postponed_regions[i].size);
471 #ifdef CONFIG_DEBUG_FS
472 if (omap_vram_create_debugfs())
473 pr_err("VRAM: Failed to create debugfs file\n");
479 arch_initcall(omap_vram_init);
481 /* boottime vram alloc stuff */
483 /* set from board file */
484 static u32 omap_vram_sdram_start __initdata;
485 static u32 omap_vram_sdram_size __initdata;
487 /* set from kernel cmdline */
488 static u32 omap_vram_def_sdram_size __initdata;
489 static u32 omap_vram_def_sdram_start __initdata;
491 static int __init omap_vram_early_vram(char *p)
493 omap_vram_def_sdram_size = memparse(p, &p);
495 omap_vram_def_sdram_start = simple_strtoul(p + 1, &p, 16);
498 early_param("vram", omap_vram_early_vram);
501 * Called from map_io. We need to call to this early enough so that we
502 * can reserve the fixed SDRAM regions before VM could get hold of them.
504 void __init omap_vram_reserve_sdram_memblock(void)
509 /* cmdline arg overrides the board file definition */
510 if (omap_vram_def_sdram_size) {
511 size = omap_vram_def_sdram_size;
512 paddr = omap_vram_def_sdram_start;
516 size = omap_vram_sdram_size;
517 paddr = omap_vram_sdram_start;
520 #ifdef CONFIG_OMAP2_VRAM_SIZE
522 size = CONFIG_OMAP2_VRAM_SIZE * 1024 * 1024;
530 size = ALIGN(size, SZ_2M);
533 if (paddr & ~PAGE_MASK) {
534 pr_err("VRAM start address 0x%08x not page aligned\n",
539 if (!memblock_is_region_memory(paddr, size)) {
540 pr_err("Illegal SDRAM region 0x%08x..0x%08x for VRAM\n",
541 paddr, paddr + size - 1);
545 if (memblock_is_region_reserved(paddr, size)) {
546 pr_err("FB: failed to reserve VRAM - busy\n");
550 if (memblock_reserve(paddr, size) < 0) {
551 pr_err("FB: failed to reserve VRAM - no memory\n");
555 paddr = memblock_alloc(size, SZ_2M);
558 memblock_free(paddr, size);
559 memblock_remove(paddr, size);
561 omap_vram_add_region(paddr, size);
563 pr_info("Reserving %u bytes SDRAM for VRAM\n", size);
566 void __init omap_vram_set_sdram_vram(u32 size, u32 start)
568 omap_vram_sdram_start = start;
569 omap_vram_sdram_size = size;