1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/parisc/mm/init.c
5 * Copyright (C) 1995 Linus Torvalds
6 * Copyright 1999 SuSE GmbH
7 * changed by Philipp Rumpf
8 * Copyright 1999 Philipp Rumpf (prumpf@tux.org)
9 * Copyright 2004 Randolph Chung (tausq@debian.org)
10 * Copyright 2006-2007 Helge Deller (deller@gmx.de)
15 #include <linux/module.h>
17 #include <linux/memblock.h>
18 #include <linux/gfp.h>
19 #include <linux/delay.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/swap.h>
23 #include <linux/unistd.h>
24 #include <linux/nodemask.h> /* for node_online_map */
25 #include <linux/pagemap.h> /* for release_pages */
26 #include <linux/compat.h>
28 #include <asm/pgalloc.h>
30 #include <asm/pdc_chassis.h>
31 #include <asm/mmzone.h>
32 #include <asm/sections.h>
33 #include <asm/msgbuf.h>
34 #include <asm/sparsemem.h>
36 extern int data_start;
37 extern void parisc_kernel_start(void); /* Kernel entry point in head.S */
39 #if CONFIG_PGTABLE_LEVELS == 3
40 pmd_t pmd0[PTRS_PER_PMD] __section(".data..vm0.pmd") __attribute__ ((aligned(PAGE_SIZE)));
43 pgd_t swapper_pg_dir[PTRS_PER_PGD] __section(".data..vm0.pgd") __attribute__ ((aligned(PAGE_SIZE)));
44 pte_t pg0[PT_INITIAL * PTRS_PER_PTE] __section(".data..vm0.pte") __attribute__ ((aligned(PAGE_SIZE)));
46 static struct resource data_resource = {
47 .name = "Kernel data",
48 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
51 static struct resource code_resource = {
52 .name = "Kernel code",
53 .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM,
56 static struct resource pdcdata_resource = {
57 .name = "PDC data (Page Zero)",
60 .flags = IORESOURCE_BUSY | IORESOURCE_MEM,
63 static struct resource sysram_resources[MAX_PHYSMEM_RANGES] __ro_after_init;
65 /* The following array is initialized from the firmware specific
66 * information retrieved in kernel/inventory.c.
69 physmem_range_t pmem_ranges[MAX_PHYSMEM_RANGES] __initdata;
70 int npmem_ranges __initdata;
73 #define MAX_MEM (1UL << MAX_PHYSMEM_BITS)
74 #else /* !CONFIG_64BIT */
75 #define MAX_MEM (3584U*1024U*1024U)
76 #endif /* !CONFIG_64BIT */
78 static unsigned long mem_limit __read_mostly = MAX_MEM;
80 static void __init mem_limit_func(void)
85 /* We need this before __setup() functions are called */
88 for (cp = boot_command_line; *cp; ) {
89 if (memcmp(cp, "mem=", 4) == 0) {
91 limit = memparse(cp, &end);
96 while (*cp != ' ' && *cp)
103 if (limit < mem_limit)
107 #define MAX_GAP (0x40000000UL >> PAGE_SHIFT)
109 static void __init setup_bootmem(void)
111 unsigned long mem_max;
112 #ifndef CONFIG_SPARSEMEM
113 physmem_range_t pmem_holes[MAX_PHYSMEM_RANGES - 1];
116 int i, sysram_resource_count;
118 disable_sr_hashing(); /* Turn off space register hashing */
121 * Sort the ranges. Since the number of ranges is typically
122 * small, and performance is not an issue here, just do
123 * a simple insertion sort.
126 for (i = 1; i < npmem_ranges; i++) {
129 for (j = i; j > 0; j--) {
132 if (pmem_ranges[j-1].start_pfn <
133 pmem_ranges[j].start_pfn) {
137 tmp = pmem_ranges[j-1];
138 pmem_ranges[j-1] = pmem_ranges[j];
139 pmem_ranges[j] = tmp;
143 #ifndef CONFIG_SPARSEMEM
145 * Throw out ranges that are too far apart (controlled by
149 for (i = 1; i < npmem_ranges; i++) {
150 if (pmem_ranges[i].start_pfn -
151 (pmem_ranges[i-1].start_pfn +
152 pmem_ranges[i-1].pages) > MAX_GAP) {
154 printk("Large gap in memory detected (%ld pages). "
155 "Consider turning on CONFIG_SPARSEMEM\n",
156 pmem_ranges[i].start_pfn -
157 (pmem_ranges[i-1].start_pfn +
158 pmem_ranges[i-1].pages));
164 /* Print the memory ranges */
165 pr_info("Memory Ranges:\n");
167 for (i = 0; i < npmem_ranges; i++) {
168 struct resource *res = &sysram_resources[i];
172 size = (pmem_ranges[i].pages << PAGE_SHIFT);
173 start = (pmem_ranges[i].start_pfn << PAGE_SHIFT);
174 pr_info("%2d) Start 0x%016lx End 0x%016lx Size %6ld MB\n",
175 i, start, start + (size - 1), size >> 20);
177 /* request memory resource */
178 res->name = "System RAM";
180 res->end = start + size - 1;
181 res->flags = IORESOURCE_SYSTEM_RAM | IORESOURCE_BUSY;
182 request_resource(&iomem_resource, res);
185 sysram_resource_count = npmem_ranges;
188 * For 32 bit kernels we limit the amount of memory we can
189 * support, in order to preserve enough kernel address space
190 * for other purposes. For 64 bit kernels we don't normally
191 * limit the memory, but this mechanism can be used to
192 * artificially limit the amount of memory (and it is written
193 * to work with multiple memory ranges).
196 mem_limit_func(); /* check for "mem=" argument */
199 for (i = 0; i < npmem_ranges; i++) {
202 rsize = pmem_ranges[i].pages << PAGE_SHIFT;
203 if ((mem_max + rsize) > mem_limit) {
204 printk(KERN_WARNING "Memory truncated to %ld MB\n", mem_limit >> 20);
205 if (mem_max == mem_limit)
208 pmem_ranges[i].pages = (mem_limit >> PAGE_SHIFT)
209 - (mem_max >> PAGE_SHIFT);
210 npmem_ranges = i + 1;
218 printk(KERN_INFO "Total Memory: %ld MB\n",mem_max >> 20);
220 #ifndef CONFIG_SPARSEMEM
221 /* Merge the ranges, keeping track of the holes */
223 unsigned long end_pfn;
224 unsigned long hole_pages;
227 end_pfn = pmem_ranges[0].start_pfn + pmem_ranges[0].pages;
228 for (i = 1; i < npmem_ranges; i++) {
230 hole_pages = pmem_ranges[i].start_pfn - end_pfn;
232 pmem_holes[npmem_holes].start_pfn = end_pfn;
233 pmem_holes[npmem_holes++].pages = hole_pages;
234 end_pfn += hole_pages;
236 end_pfn += pmem_ranges[i].pages;
239 pmem_ranges[0].pages = end_pfn - pmem_ranges[0].start_pfn;
245 * Initialize and free the full range of memory in each range.
249 for (i = 0; i < npmem_ranges; i++) {
250 unsigned long start_pfn;
251 unsigned long npages;
255 start_pfn = pmem_ranges[i].start_pfn;
256 npages = pmem_ranges[i].pages;
258 start = start_pfn << PAGE_SHIFT;
259 size = npages << PAGE_SHIFT;
261 /* add system RAM memblock */
262 memblock_add(start, size);
264 if ((start_pfn + npages) > max_pfn)
265 max_pfn = start_pfn + npages;
269 * We can't use memblock top-down allocations because we only
270 * created the initial mapping up to KERNEL_INITIAL_SIZE in
271 * the assembly bootup code.
273 memblock_set_bottom_up(true);
275 /* IOMMU is always used to access "high mem" on those boxes
276 * that can support enough mem that a PCI device couldn't
277 * directly DMA to any physical addresses.
278 * ISA DMA support will need to revisit this.
280 max_low_pfn = max_pfn;
282 /* reserve PAGE0 pdc memory, kernel text/data/bss & bootmap */
284 #define PDC_CONSOLE_IO_IODC_SIZE 32768
286 memblock_reserve(0UL, (unsigned long)(PAGE0->mem_free +
287 PDC_CONSOLE_IO_IODC_SIZE));
288 memblock_reserve(__pa(KERNEL_BINARY_TEXT_START),
289 (unsigned long)(_end - KERNEL_BINARY_TEXT_START));
291 #ifndef CONFIG_SPARSEMEM
293 /* reserve the holes */
295 for (i = 0; i < npmem_holes; i++) {
296 memblock_reserve((pmem_holes[i].start_pfn << PAGE_SHIFT),
297 (pmem_holes[i].pages << PAGE_SHIFT));
301 #ifdef CONFIG_BLK_DEV_INITRD
303 printk(KERN_INFO "initrd: %08lx-%08lx\n", initrd_start, initrd_end);
304 if (__pa(initrd_start) < mem_max) {
305 unsigned long initrd_reserve;
307 if (__pa(initrd_end) > mem_max) {
308 initrd_reserve = mem_max - __pa(initrd_start);
310 initrd_reserve = initrd_end - initrd_start;
312 initrd_below_start_ok = 1;
313 printk(KERN_INFO "initrd: reserving %08lx-%08lx (mem_max %08lx)\n", __pa(initrd_start), __pa(initrd_start) + initrd_reserve, mem_max);
315 memblock_reserve(__pa(initrd_start), initrd_reserve);
320 data_resource.start = virt_to_phys(&data_start);
321 data_resource.end = virt_to_phys(_end) - 1;
322 code_resource.start = virt_to_phys(_text);
323 code_resource.end = virt_to_phys(&data_start)-1;
325 /* We don't know which region the kernel will be in, so try
328 for (i = 0; i < sysram_resource_count; i++) {
329 struct resource *res = &sysram_resources[i];
330 request_resource(res, &code_resource);
331 request_resource(res, &data_resource);
333 request_resource(&sysram_resources[0], &pdcdata_resource);
335 /* Initialize Page Deallocation Table (PDT) and check for bad memory. */
338 memblock_allow_resize();
342 static bool kernel_set_to_readonly;
344 static void __init map_pages(unsigned long start_vaddr,
345 unsigned long start_paddr, unsigned long size,
346 pgprot_t pgprot, int force)
350 unsigned long end_paddr;
351 unsigned long start_pmd;
352 unsigned long start_pte;
355 unsigned long address;
357 unsigned long ro_start;
358 unsigned long ro_end;
359 unsigned long kernel_start, kernel_end;
361 ro_start = __pa((unsigned long)_text);
362 ro_end = __pa((unsigned long)&data_start);
363 kernel_start = __pa((unsigned long)&__init_begin);
364 kernel_end = __pa((unsigned long)&_end);
366 end_paddr = start_paddr + size;
368 /* for 2-level configuration PTRS_PER_PMD is 0 so start_pmd will be 0 */
369 start_pmd = ((start_vaddr >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
370 start_pte = ((start_vaddr >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
372 address = start_paddr;
374 while (address < end_paddr) {
375 pgd_t *pgd = pgd_offset_k(vaddr);
376 p4d_t *p4d = p4d_offset(pgd, vaddr);
377 pud_t *pud = pud_offset(p4d, vaddr);
379 #if CONFIG_PGTABLE_LEVELS == 3
380 if (pud_none(*pud)) {
381 pmd = memblock_alloc(PAGE_SIZE << PMD_TABLE_ORDER,
382 PAGE_SIZE << PMD_TABLE_ORDER);
384 panic("pmd allocation failed.\n");
385 pud_populate(NULL, pud, pmd);
389 pmd = pmd_offset(pud, vaddr);
390 for (tmp1 = start_pmd; tmp1 < PTRS_PER_PMD; tmp1++, pmd++) {
391 if (pmd_none(*pmd)) {
392 pg_table = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
394 panic("page table allocation failed\n");
395 pmd_populate_kernel(NULL, pmd, pg_table);
398 pg_table = pte_offset_kernel(pmd, vaddr);
399 for (tmp2 = start_pte; tmp2 < PTRS_PER_PTE; tmp2++, pg_table++) {
406 } else if (address < kernel_start || address >= kernel_end) {
407 /* outside kernel memory */
409 } else if (!kernel_set_to_readonly) {
410 /* still initializing, allow writing to RO memory */
411 prot = PAGE_KERNEL_RWX;
413 } else if (address >= ro_start) {
414 /* Code (ro) and Data areas */
415 prot = (address < ro_end) ?
416 PAGE_KERNEL_EXEC : PAGE_KERNEL;
422 pte = __mk_pte(address, prot);
424 pte = pte_mkhuge(pte);
426 if (address >= end_paddr)
429 set_pte(pg_table, pte);
431 address += PAGE_SIZE;
436 if (address >= end_paddr)
443 void __init set_kernel_text_rw(int enable_read_write)
445 unsigned long start = (unsigned long) __init_begin;
446 unsigned long end = (unsigned long) &data_start;
448 map_pages(start, __pa(start), end-start,
449 PAGE_KERNEL_RWX, enable_read_write ? 1:0);
451 /* force the kernel to see the new page table entries */
456 void __ref free_initmem(void)
458 unsigned long init_begin = (unsigned long)__init_begin;
459 unsigned long init_end = (unsigned long)__init_end;
460 unsigned long kernel_end = (unsigned long)&_end;
462 /* Remap kernel text and data, but do not touch init section yet. */
463 kernel_set_to_readonly = true;
464 map_pages(init_end, __pa(init_end), kernel_end - init_end,
467 /* The init text pages are marked R-X. We have to
468 * flush the icache and mark them RW-
470 * This is tricky, because map_pages is in the init section.
471 * Do a dummy remap of the data section first (the data
472 * section is already PAGE_KERNEL) to pull in the TLB entries
474 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
476 /* now remap at PAGE_KERNEL since the TLB is pre-primed to execute
478 map_pages(init_begin, __pa(init_begin), init_end - init_begin,
481 /* force the kernel to see the new TLB entries */
482 __flush_tlb_range(0, init_begin, kernel_end);
484 /* finally dump all the instructions which were cached, since the
485 * pages are no-longer executable */
486 flush_icache_range(init_begin, init_end);
488 free_initmem_default(POISON_FREE_INITMEM);
490 /* set up a new led state on systems shipped LED State panel */
491 pdc_chassis_send_status(PDC_CHASSIS_DIRECT_BCOMPLETE);
495 #ifdef CONFIG_STRICT_KERNEL_RWX
496 void mark_rodata_ro(void)
498 /* rodata memory was already mapped with KERNEL_RO access rights by
499 pagetable_init() and map_pages(). No need to do additional stuff here */
500 unsigned long roai_size = __end_ro_after_init - __start_ro_after_init;
502 pr_info("Write protected read-only-after-init data: %luk\n", roai_size >> 10);
508 * Just an arbitrary offset to serve as a "hole" between mapping areas
509 * (between top of physical memory and a potential pcxl dma mapping
510 * area, and below the vmalloc mapping area).
512 * The current 32K value just means that there will be a 32K "hole"
513 * between mapping areas. That means that any out-of-bounds memory
514 * accesses will hopefully be caught. The vmalloc() routines leaves
515 * a hole of 4kB between each vmalloced area for the same reason.
518 /* Leave room for gateway page expansion */
519 #if KERNEL_MAP_START < GATEWAY_PAGE_SIZE
520 #error KERNEL_MAP_START is in gateway reserved region
522 #define MAP_START (KERNEL_MAP_START)
524 #define VM_MAP_OFFSET (32*1024)
525 #define SET_MAP_OFFSET(x) ((void *)(((unsigned long)(x) + VM_MAP_OFFSET) \
526 & ~(VM_MAP_OFFSET-1)))
528 void *parisc_vmalloc_start __ro_after_init;
529 EXPORT_SYMBOL(parisc_vmalloc_start);
532 unsigned long pcxl_dma_start __ro_after_init;
535 void __init mem_init(void)
537 /* Do sanity checks on IPC (compat) structures */
538 BUILD_BUG_ON(sizeof(struct ipc64_perm) != 48);
540 BUILD_BUG_ON(sizeof(struct semid64_ds) != 80);
541 BUILD_BUG_ON(sizeof(struct msqid64_ds) != 104);
542 BUILD_BUG_ON(sizeof(struct shmid64_ds) != 104);
545 BUILD_BUG_ON(sizeof(struct compat_ipc64_perm) != sizeof(struct ipc64_perm));
546 BUILD_BUG_ON(sizeof(struct compat_semid64_ds) != 80);
547 BUILD_BUG_ON(sizeof(struct compat_msqid64_ds) != 104);
548 BUILD_BUG_ON(sizeof(struct compat_shmid64_ds) != 104);
551 /* Do sanity checks on page table constants */
552 BUILD_BUG_ON(PTE_ENTRY_SIZE != sizeof(pte_t));
553 BUILD_BUG_ON(PMD_ENTRY_SIZE != sizeof(pmd_t));
554 BUILD_BUG_ON(PGD_ENTRY_SIZE != sizeof(pgd_t));
555 BUILD_BUG_ON(PAGE_SHIFT + BITS_PER_PTE + BITS_PER_PMD + BITS_PER_PGD
557 #if CONFIG_PGTABLE_LEVELS == 3
558 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PMD);
560 BUILD_BUG_ON(PT_INITIAL > PTRS_PER_PGD);
563 high_memory = __va((max_pfn << PAGE_SHIFT));
564 set_max_mapnr(max_low_pfn);
568 if (boot_cpu_data.cpu_type == pcxl2 || boot_cpu_data.cpu_type == pcxl) {
569 pcxl_dma_start = (unsigned long)SET_MAP_OFFSET(MAP_START);
570 parisc_vmalloc_start = SET_MAP_OFFSET(pcxl_dma_start
571 + PCXL_DMA_MAP_SIZE);
574 parisc_vmalloc_start = SET_MAP_OFFSET(MAP_START);
578 * Do not expose the virtual kernel memory layout to userspace.
579 * But keep code for debugging purposes.
581 printk("virtual kernel memory layout:\n"
582 " vmalloc : 0x%px - 0x%px (%4ld MB)\n"
583 " fixmap : 0x%px - 0x%px (%4ld kB)\n"
584 " memory : 0x%px - 0x%px (%4ld MB)\n"
585 " .init : 0x%px - 0x%px (%4ld kB)\n"
586 " .data : 0x%px - 0x%px (%4ld kB)\n"
587 " .text : 0x%px - 0x%px (%4ld kB)\n",
589 (void*)VMALLOC_START, (void*)VMALLOC_END,
590 (VMALLOC_END - VMALLOC_START) >> 20,
592 (void *)FIXMAP_START, (void *)(FIXMAP_START + FIXMAP_SIZE),
593 (unsigned long)(FIXMAP_SIZE / 1024),
595 __va(0), high_memory,
596 ((unsigned long)high_memory - (unsigned long)__va(0)) >> 20,
598 __init_begin, __init_end,
599 ((unsigned long)__init_end - (unsigned long)__init_begin) >> 10,
602 ((unsigned long)_edata - (unsigned long)_etext) >> 10,
605 ((unsigned long)_etext - (unsigned long)_text) >> 10);
609 unsigned long *empty_zero_page __ro_after_init;
610 EXPORT_SYMBOL(empty_zero_page);
613 * pagetable_init() sets up the page tables
615 * Note that gateway_init() places the Linux gateway page at page 0.
616 * Since gateway pages cannot be dereferenced this has the desirable
617 * side effect of trapping those pesky NULL-reference errors in the
620 static void __init pagetable_init(void)
624 /* Map each physical memory range to its kernel vaddr */
626 for (range = 0; range < npmem_ranges; range++) {
627 unsigned long start_paddr;
628 unsigned long end_paddr;
631 start_paddr = pmem_ranges[range].start_pfn << PAGE_SHIFT;
632 size = pmem_ranges[range].pages << PAGE_SHIFT;
633 end_paddr = start_paddr + size;
635 map_pages((unsigned long)__va(start_paddr), start_paddr,
636 size, PAGE_KERNEL, 0);
639 #ifdef CONFIG_BLK_DEV_INITRD
640 if (initrd_end && initrd_end > mem_limit) {
641 printk(KERN_INFO "initrd: mapping %08lx-%08lx\n", initrd_start, initrd_end);
642 map_pages(initrd_start, __pa(initrd_start),
643 initrd_end - initrd_start, PAGE_KERNEL, 0);
647 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
648 if (!empty_zero_page)
649 panic("zero page allocation failed.\n");
653 static void __init gateway_init(void)
655 unsigned long linux_gateway_page_addr;
656 /* FIXME: This is 'const' in order to trick the compiler
657 into not treating it as DP-relative data. */
658 extern void * const linux_gateway_page;
660 linux_gateway_page_addr = LINUX_GATEWAY_ADDR & PAGE_MASK;
663 * Setup Linux Gateway page.
665 * The Linux gateway page will reside in kernel space (on virtual
666 * page 0), so it doesn't need to be aliased into user space.
669 map_pages(linux_gateway_page_addr, __pa(&linux_gateway_page),
670 PAGE_SIZE, PAGE_GATEWAY, 1);
673 static void __init parisc_bootmem_free(void)
675 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
677 max_zone_pfn[0] = memblock_end_of_DRAM();
679 free_area_init(max_zone_pfn);
682 void __init paging_init(void)
687 flush_cache_all_local(); /* start with known state */
688 flush_tlb_all_local(NULL);
691 parisc_bootmem_free();
697 * Currently, all PA20 chips have 18 bit protection IDs, which is the
698 * limiting factor (space ids are 32 bits).
701 #define NR_SPACE_IDS 262144
706 * Currently we have a one-to-one relationship between space IDs and
707 * protection IDs. Older parisc chips (PCXS, PCXT, PCXL, PCXL2) only
708 * support 15 bit protection IDs, so that is the limiting factor.
709 * PCXT' has 18 bit protection IDs, but only 16 bit spaceids, so it's
710 * probably not worth the effort for a special case here.
713 #define NR_SPACE_IDS 32768
715 #endif /* !CONFIG_PA20 */
717 #define RECYCLE_THRESHOLD (NR_SPACE_IDS / 2)
718 #define SID_ARRAY_SIZE (NR_SPACE_IDS / (8 * sizeof(long)))
720 static unsigned long space_id[SID_ARRAY_SIZE] = { 1 }; /* disallow space 0 */
721 static unsigned long dirty_space_id[SID_ARRAY_SIZE];
722 static unsigned long space_id_index;
723 static unsigned long free_space_ids = NR_SPACE_IDS - 1;
724 static unsigned long dirty_space_ids = 0;
726 static DEFINE_SPINLOCK(sid_lock);
728 unsigned long alloc_sid(void)
732 spin_lock(&sid_lock);
734 if (free_space_ids == 0) {
735 if (dirty_space_ids != 0) {
736 spin_unlock(&sid_lock);
737 flush_tlb_all(); /* flush_tlb_all() calls recycle_sids() */
738 spin_lock(&sid_lock);
740 BUG_ON(free_space_ids == 0);
745 index = find_next_zero_bit(space_id, NR_SPACE_IDS, space_id_index);
746 space_id[BIT_WORD(index)] |= BIT_MASK(index);
747 space_id_index = index;
749 spin_unlock(&sid_lock);
751 return index << SPACEID_SHIFT;
754 void free_sid(unsigned long spaceid)
756 unsigned long index = spaceid >> SPACEID_SHIFT;
757 unsigned long *dirty_space_offset, mask;
759 dirty_space_offset = &dirty_space_id[BIT_WORD(index)];
760 mask = BIT_MASK(index);
762 spin_lock(&sid_lock);
764 BUG_ON(*dirty_space_offset & mask); /* attempt to free space id twice */
766 *dirty_space_offset |= mask;
769 spin_unlock(&sid_lock);
774 static void get_dirty_sids(unsigned long *ndirtyptr,unsigned long *dirty_array)
778 /* NOTE: sid_lock must be held upon entry */
780 *ndirtyptr = dirty_space_ids;
781 if (dirty_space_ids != 0) {
782 for (i = 0; i < SID_ARRAY_SIZE; i++) {
783 dirty_array[i] = dirty_space_id[i];
784 dirty_space_id[i] = 0;
792 static void recycle_sids(unsigned long ndirty,unsigned long *dirty_array)
796 /* NOTE: sid_lock must be held upon entry */
799 for (i = 0; i < SID_ARRAY_SIZE; i++) {
800 space_id[i] ^= dirty_array[i];
803 free_space_ids += ndirty;
808 #else /* CONFIG_SMP */
810 static void recycle_sids(void)
814 /* NOTE: sid_lock must be held upon entry */
816 if (dirty_space_ids != 0) {
817 for (i = 0; i < SID_ARRAY_SIZE; i++) {
818 space_id[i] ^= dirty_space_id[i];
819 dirty_space_id[i] = 0;
822 free_space_ids += dirty_space_ids;
830 * flush_tlb_all() calls recycle_sids(), since whenever the entire tlb is
831 * purged, we can safely reuse the space ids that were released but
832 * not flushed from the tlb.
837 static unsigned long recycle_ndirty;
838 static unsigned long recycle_dirty_array[SID_ARRAY_SIZE];
839 static unsigned int recycle_inuse;
841 void flush_tlb_all(void)
845 __inc_irq_stat(irq_tlb_count);
847 spin_lock(&sid_lock);
848 if (dirty_space_ids > RECYCLE_THRESHOLD) {
849 BUG_ON(recycle_inuse); /* FIXME: Use a semaphore/wait queue here */
850 get_dirty_sids(&recycle_ndirty,recycle_dirty_array);
854 spin_unlock(&sid_lock);
855 on_each_cpu(flush_tlb_all_local, NULL, 1);
857 spin_lock(&sid_lock);
858 recycle_sids(recycle_ndirty,recycle_dirty_array);
860 spin_unlock(&sid_lock);
864 void flush_tlb_all(void)
866 __inc_irq_stat(irq_tlb_count);
867 spin_lock(&sid_lock);
868 flush_tlb_all_local(NULL);
870 spin_unlock(&sid_lock);