2 * Copyright (C) 1995 Linus Torvalds
3 * Copyright 2010 Tilera Corporation. All Rights Reserved.
5 * This program is free software; you can redistribute it and/or
6 * modify it under the terms of the GNU General Public License
7 * as published by the Free Software Foundation, version 2.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
12 * NON INFRINGEMENT. See the GNU General Public License for
16 #include <linux/module.h>
17 #include <linux/signal.h>
18 #include <linux/sched.h>
19 #include <linux/kernel.h>
20 #include <linux/errno.h>
21 #include <linux/string.h>
22 #include <linux/types.h>
23 #include <linux/ptrace.h>
24 #include <linux/mman.h>
26 #include <linux/hugetlb.h>
27 #include <linux/swap.h>
28 #include <linux/smp.h>
29 #include <linux/init.h>
30 #include <linux/highmem.h>
31 #include <linux/pagemap.h>
32 #include <linux/poison.h>
33 #include <linux/bootmem.h>
34 #include <linux/slab.h>
35 #include <linux/proc_fs.h>
36 #include <linux/efi.h>
37 #include <linux/memory_hotplug.h>
38 #include <linux/uaccess.h>
39 #include <asm/mmu_context.h>
40 #include <asm/processor.h>
41 #include <asm/system.h>
42 #include <asm/pgtable.h>
43 #include <asm/pgalloc.h>
45 #include <asm/fixmap.h>
47 #include <asm/tlbflush.h>
48 #include <asm/sections.h>
49 #include <asm/setup.h>
50 #include <asm/homecache.h>
51 #include <hv/hypervisor.h>
52 #include <arch/chip.h>
57 * We could set FORCE_MAX_ZONEORDER to "(HPAGE_SHIFT - PAGE_SHIFT + 1)"
58 * in the Tile Kconfig, but this generates configure warnings.
59 * Do it here and force people to get it right to compile this file.
60 * The problem is that with 4KB small pages and 16MB huge pages,
61 * the default value doesn't allow us to group enough small pages
62 * together to make up a huge page.
64 #if CONFIG_FORCE_MAX_ZONEORDER < HPAGE_SHIFT - PAGE_SHIFT + 1
65 # error "Change FORCE_MAX_ZONEORDER in arch/tile/Kconfig to match page size"
68 #define clear_pgd(pmdptr) (*(pmdptr) = hv_pte(0))
71 unsigned long VMALLOC_RESERVE = CONFIG_VMALLOC_RESERVE;
72 EXPORT_SYMBOL(VMALLOC_RESERVE);
75 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
77 /* Create an L2 page table */
78 static pte_t * __init alloc_pte(void)
80 return __alloc_bootmem(L2_KERNEL_PGTABLE_SIZE, HV_PAGE_TABLE_ALIGN, 0);
84 * L2 page tables per controller. We allocate these all at once from
85 * the bootmem allocator and store them here. This saves on kernel L2
86 * page table memory, compared to allocating a full 64K page per L2
87 * page table, and also means that in cases where we use huge pages,
88 * we are guaranteed to later be able to shatter those huge pages and
89 * switch to using these page tables instead, without requiring
90 * further allocation. Each l2_ptes[] entry points to the first page
91 * table for the first hugepage-size piece of memory on the
92 * controller; other page tables are just indexed directly, i.e. the
93 * L2 page tables are contiguous in memory for each controller.
95 static pte_t *l2_ptes[MAX_NUMNODES];
96 static int num_l2_ptes[MAX_NUMNODES];
98 static void init_prealloc_ptes(int node, int pages)
100 BUG_ON(pages & (HV_L2_ENTRIES-1));
102 num_l2_ptes[node] = pages;
103 l2_ptes[node] = __alloc_bootmem(pages * sizeof(pte_t),
104 HV_PAGE_TABLE_ALIGN, 0);
108 pte_t *get_prealloc_pte(unsigned long pfn)
110 int node = pfn_to_nid(pfn);
111 pfn &= ~(-1UL << (NR_PA_HIGHBIT_SHIFT - PAGE_SHIFT));
112 BUG_ON(node >= MAX_NUMNODES);
113 BUG_ON(pfn >= num_l2_ptes[node]);
114 return &l2_ptes[node][pfn];
118 * What caching do we expect pages from the heap to have when
119 * they are allocated during bootup? (Once we've installed the
120 * "real" swapper_pg_dir.)
122 static int initial_heap_home(void)
124 #if CHIP_HAS_CBOX_HOME_MAP()
126 return PAGE_HOME_HASH;
128 return smp_processor_id();
132 * Place a pointer to an L2 page table in a middle page
135 static void __init assign_pte(pmd_t *pmd, pte_t *page_table)
137 phys_addr_t pa = __pa(page_table);
138 unsigned long l2_ptfn = pa >> HV_LOG2_PAGE_TABLE_ALIGN;
139 pte_t pteval = hv_pte_set_ptfn(__pgprot(_PAGE_TABLE), l2_ptfn);
140 BUG_ON((pa & (HV_PAGE_TABLE_ALIGN-1)) != 0);
141 pteval = pte_set_home(pteval, initial_heap_home());
142 *(pte_t *)pmd = pteval;
143 if (page_table != (pte_t *)pmd_page_vaddr(*pmd))
149 #if HV_L1_SIZE != HV_L2_SIZE
150 # error Rework assumption that L1 and L2 page tables are same size.
153 /* Since pmd_t arrays and pte_t arrays are the same size, just use casts. */
154 static inline pmd_t *alloc_pmd(void)
156 return (pmd_t *)alloc_pte();
159 static inline void assign_pmd(pud_t *pud, pmd_t *pmd)
161 assign_pte((pmd_t *)pud, (pte_t *)pmd);
164 #endif /* __tilegx__ */
166 /* Replace the given pmd with a full PTE table. */
167 void __init shatter_pmd(pmd_t *pmd)
169 pte_t *pte = get_prealloc_pte(pte_pfn(*(pte_t *)pmd));
170 assign_pte(pmd, pte);
173 #ifdef CONFIG_HIGHMEM
175 * This function initializes a certain range of kernel virtual memory
176 * with new bootmem page tables, everywhere page tables are missing in
181 * NOTE: The pagetables are allocated contiguous on the physical space
182 * so we can cache the place of the first one and move around without
183 * checking the pgd every time.
185 static void __init page_table_range_init(unsigned long start,
186 unsigned long end, pgd_t *pgd_base)
193 pgd_idx = pgd_index(vaddr);
194 pgd = pgd_base + pgd_idx;
196 for ( ; (pgd_idx < PTRS_PER_PGD) && (vaddr != end); pgd++, pgd_idx++) {
197 pmd_t *pmd = pmd_offset(pud_offset(pgd, vaddr), vaddr);
199 assign_pte(pmd, alloc_pte());
203 #endif /* CONFIG_HIGHMEM */
206 #if CHIP_HAS_CBOX_HOME_MAP()
208 static int __initdata ktext_hash = 1; /* .text pages */
209 static int __initdata kdata_hash = 1; /* .data and .bss pages */
210 int __write_once hash_default = 1; /* kernel allocator pages */
211 EXPORT_SYMBOL(hash_default);
212 int __write_once kstack_hash = 1; /* if no homecaching, use h4h */
213 #endif /* CHIP_HAS_CBOX_HOME_MAP */
216 * CPUs to use to for striping the pages of kernel data. If hash-for-home
217 * is available, this is only relevant if kcache_hash sets up the
218 * .data and .bss to be page-homed, and we don't want the default mode
219 * of using the full set of kernel cpus for the striping.
221 static __initdata struct cpumask kdata_mask;
222 static __initdata int kdata_arg_seen;
224 int __write_once kdata_huge; /* if no homecaching, small pages */
227 /* Combine a generic pgprot_t with cache home to get a cache-aware pgprot. */
228 static pgprot_t __init construct_pgprot(pgprot_t prot, int home)
230 prot = pte_set_home(prot, home);
231 #if CHIP_HAS_CBOX_HOME_MAP()
232 if (home == PAGE_HOME_IMMUTABLE) {
234 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_HASH_L3);
236 prot = hv_pte_set_mode(prot, HV_PTE_MODE_CACHE_NO_L3);
243 * For a given kernel data VA, how should it be cached?
244 * We return the complete pgprot_t with caching bits set.
246 static pgprot_t __init init_pgprot(ulong address)
250 enum { CODE_DELTA = MEM_SV_INTRPT - PAGE_OFFSET };
252 #if CHIP_HAS_CBOX_HOME_MAP()
253 /* For kdata=huge, everything is just hash-for-home. */
255 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
258 /* We map the aliased pages of permanent text inaccessible. */
259 if (address < (ulong) _sinittext - CODE_DELTA)
263 * We map read-only data non-coherent for performance. We could
264 * use neighborhood caching on TILE64, but it's not clear it's a win.
266 if ((address >= (ulong) __start_rodata &&
267 address < (ulong) __end_rodata) ||
268 address == (ulong) empty_zero_page) {
269 return construct_pgprot(PAGE_KERNEL_RO, PAGE_HOME_IMMUTABLE);
272 /* As a performance optimization, keep the boot init stack here. */
273 if (address >= (ulong)&init_thread_union &&
274 address < (ulong)&init_thread_union + THREAD_SIZE)
275 return construct_pgprot(PAGE_KERNEL, smp_processor_id());
278 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
279 /* Force the atomic_locks[] array page to be hash-for-home. */
280 if (address == (ulong) atomic_locks)
281 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
286 * Everything else that isn't data or bss is heap, so mark it
287 * with the initial heap home (hash-for-home, or this cpu). This
288 * includes any addresses after the loaded image and any address before
289 * _einitdata, since we already captured the case of text before
290 * _sinittext, and __pa(einittext) is approximately __pa(sinitdata).
292 * All the LOWMEM pages that we mark this way will get their
293 * struct page homecache properly marked later, in set_page_homes().
294 * The HIGHMEM pages we leave with a default zero for their
295 * homes, but with a zero free_time we don't have to actually
296 * do a flush action the first time we use them, either.
298 if (address >= (ulong) _end || address < (ulong) _einitdata)
299 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
301 #if CHIP_HAS_CBOX_HOME_MAP()
302 /* Use hash-for-home if requested for data/bss. */
304 return construct_pgprot(PAGE_KERNEL, PAGE_HOME_HASH);
308 * Make the w1data homed like heap to start with, to avoid
309 * making it part of the page-striped data area when we're just
310 * going to convert it to read-only soon anyway.
312 if (address >= (ulong)__w1data_begin && address < (ulong)__w1data_end)
313 return construct_pgprot(PAGE_KERNEL, initial_heap_home());
316 * Otherwise we just hand out consecutive cpus. To avoid
317 * requiring this function to hold state, we just walk forward from
318 * _sdata by PAGE_SIZE, skipping the readonly and init data, to reach
319 * the requested address, while walking cpu home around kdata_mask.
320 * This is typically no more than a dozen or so iterations.
322 page = (((ulong)__w1data_end) + PAGE_SIZE - 1) & PAGE_MASK;
323 BUG_ON(address < page || address >= (ulong)_end);
324 cpu = cpumask_first(&kdata_mask);
325 for (; page < address; page += PAGE_SIZE) {
326 if (page >= (ulong)&init_thread_union &&
327 page < (ulong)&init_thread_union + THREAD_SIZE)
329 if (page == (ulong)empty_zero_page)
332 #if !ATOMIC_LOCKS_FOUND_VIA_TABLE()
333 if (page == (ulong)atomic_locks)
337 cpu = cpumask_next(cpu, &kdata_mask);
339 cpu = cpumask_first(&kdata_mask);
341 return construct_pgprot(PAGE_KERNEL, cpu);
345 * This function sets up how we cache the kernel text. If we have
346 * hash-for-home support, normally that is used instead (see the
347 * kcache_hash boot flag for more information). But if we end up
348 * using a page-based caching technique, this option sets up the
349 * details of that. In addition, the "ktext=nocache" option may
350 * always be used to disable local caching of text pages, if desired.
353 static int __initdata ktext_arg_seen;
354 static int __initdata ktext_small;
355 static int __initdata ktext_local;
356 static int __initdata ktext_all;
357 static int __initdata ktext_nondataplane;
358 static int __initdata ktext_nocache;
359 static struct cpumask __initdata ktext_mask;
361 static int __init setup_ktext(char *str)
366 /* If you have a leading "nocache", turn off ktext caching */
367 if (strncmp(str, "nocache", 7) == 0) {
369 pr_info("ktext: disabling local caching of kernel text\n");
379 /* Default setting on Tile64: use a huge page */
380 if (strcmp(str, "huge") == 0)
381 pr_info("ktext: using one huge locally cached page\n");
383 /* Pay TLB cost but get no cache benefit: cache small pages locally */
384 else if (strcmp(str, "local") == 0) {
387 pr_info("ktext: using small pages with local caching\n");
390 /* Neighborhood cache ktext pages on all cpus. */
391 else if (strcmp(str, "all") == 0) {
394 pr_info("ktext: using maximal caching neighborhood\n");
398 /* Neighborhood ktext pages on specified mask */
399 else if (cpulist_parse(str, &ktext_mask) == 0) {
400 char buf[NR_CPUS * 5];
401 cpulist_scnprintf(buf, sizeof(buf), &ktext_mask);
402 if (cpumask_weight(&ktext_mask) > 1) {
404 pr_info("ktext: using caching neighborhood %s "
405 "with small pages\n", buf);
407 pr_info("ktext: caching on cpu %s with one huge page\n",
418 early_param("ktext", setup_ktext);
421 static inline pgprot_t ktext_set_nocache(pgprot_t prot)
424 prot = hv_pte_set_nc(prot);
425 #if CHIP_HAS_NC_AND_NOALLOC_BITS()
427 prot = hv_pte_set_no_alloc_l2(prot);
433 static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
435 return pmd_offset(pud_offset(&pgtables[pgd_index(va)], va), va);
438 static pmd_t *__init get_pmd(pgd_t pgtables[], unsigned long va)
440 pud_t *pud = pud_offset(&pgtables[pgd_index(va)], va);
442 assign_pmd(pud, alloc_pmd());
443 return pmd_offset(pud, va);
447 /* Temporary page table we use for staging. */
448 static pgd_t pgtables[PTRS_PER_PGD]
449 __attribute__((aligned(HV_PAGE_TABLE_ALIGN)));
452 * This maps the physical memory to kernel virtual address space, a total
453 * of max_low_pfn pages, by creating page tables starting from address
456 * This routine transitions us from using a set of compiled-in large
457 * pages to using some more precise caching, including removing access
458 * to code pages mapped at PAGE_OFFSET (executed only at MEM_SV_START)
459 * marking read-only data as locally cacheable, striping the remaining
460 * .data and .bss across all the available tiles, and removing access
461 * to pages above the top of RAM (thus ensuring a page fault from a bad
462 * virtual address rather than a hypervisor shoot down for accessing
463 * memory outside the assigned limits).
465 static void __init kernel_physical_mapping_init(pgd_t *pgd_base)
467 unsigned long address, pfn;
471 const struct cpumask *my_cpu_mask = cpumask_of(smp_processor_id());
472 struct cpumask kstripe_mask;
475 #if CHIP_HAS_CBOX_HOME_MAP()
476 if (ktext_arg_seen && ktext_hash) {
477 pr_warning("warning: \"ktext\" boot argument ignored"
478 " if \"kcache_hash\" sets up text hash-for-home\n");
482 if (kdata_arg_seen && kdata_hash) {
483 pr_warning("warning: \"kdata\" boot argument ignored"
484 " if \"kcache_hash\" sets up data hash-for-home\n");
487 if (kdata_huge && !hash_default) {
488 pr_warning("warning: disabling \"kdata=huge\"; requires"
489 " kcache_hash=all or =allbutstack\n");
495 * Set up a mask for cpus to use for kernel striping.
496 * This is normally all cpus, but minus dataplane cpus if any.
497 * If the dataplane covers the whole chip, we stripe over
498 * the whole chip too.
500 cpumask_copy(&kstripe_mask, cpu_possible_mask);
502 kdata_mask = kstripe_mask;
504 /* Allocate and fill in L2 page tables */
505 for (i = 0; i < MAX_NUMNODES; ++i) {
506 #ifdef CONFIG_HIGHMEM
507 unsigned long end_pfn = node_lowmem_end_pfn[i];
509 unsigned long end_pfn = node_end_pfn[i];
511 unsigned long end_huge_pfn = 0;
513 /* Pre-shatter the last huge page to allow per-cpu pages. */
515 end_huge_pfn = end_pfn - (HPAGE_SIZE >> PAGE_SHIFT);
517 pfn = node_start_pfn[i];
519 /* Allocate enough memory to hold L2 page tables for node. */
520 init_prealloc_ptes(i, end_pfn - pfn);
522 address = (unsigned long) pfn_to_kaddr(pfn);
523 while (pfn < end_pfn) {
524 BUG_ON(address & (HPAGE_SIZE-1));
525 pmd = get_pmd(pgtables, address);
526 pte = get_prealloc_pte(pfn);
527 if (pfn < end_huge_pfn) {
528 pgprot_t prot = init_pgprot(address);
529 *(pte_t *)pmd = pte_mkhuge(pfn_pte(pfn, prot));
530 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
531 pfn++, pte_ofs++, address += PAGE_SIZE)
532 pte[pte_ofs] = pfn_pte(pfn, prot);
535 printk(KERN_DEBUG "pre-shattered huge"
536 " page at %#lx\n", address);
537 for (pte_ofs = 0; pte_ofs < PTRS_PER_PTE;
538 pfn++, pte_ofs++, address += PAGE_SIZE) {
539 pgprot_t prot = init_pgprot(address);
540 pte[pte_ofs] = pfn_pte(pfn, prot);
542 assign_pte(pmd, pte);
548 * Set or check ktext_map now that we have cpu_possible_mask
549 * and kstripe_mask to work with.
552 cpumask_copy(&ktext_mask, cpu_possible_mask);
553 else if (ktext_nondataplane)
554 ktext_mask = kstripe_mask;
555 else if (!cpumask_empty(&ktext_mask)) {
556 /* Sanity-check any mask that was requested */
558 cpumask_andnot(&bad, &ktext_mask, cpu_possible_mask);
559 cpumask_and(&ktext_mask, &ktext_mask, cpu_possible_mask);
560 if (!cpumask_empty(&bad)) {
561 char buf[NR_CPUS * 5];
562 cpulist_scnprintf(buf, sizeof(buf), &bad);
563 pr_info("ktext: not using unavailable cpus %s\n", buf);
565 if (cpumask_empty(&ktext_mask)) {
566 pr_warning("ktext: no valid cpus; caching on %d.\n",
568 cpumask_copy(&ktext_mask,
569 cpumask_of(smp_processor_id()));
573 address = MEM_SV_INTRPT;
574 pmd = get_pmd(pgtables, address);
576 /* Allocate an L2 PTE for the kernel text */
578 pgprot_t prot = construct_pgprot(PAGE_KERNEL_EXEC,
579 PAGE_HOME_IMMUTABLE);
583 prot = hv_pte_set_mode(prot,
584 HV_PTE_MODE_UNCACHED);
586 prot = hv_pte_set_mode(prot,
587 HV_PTE_MODE_CACHE_NO_L3);
589 prot = hv_pte_set_mode(prot,
590 HV_PTE_MODE_CACHE_TILE_L3);
591 cpu = cpumask_first(&ktext_mask);
593 prot = ktext_set_nocache(prot);
596 BUG_ON(address != (unsigned long)_stext);
597 pfn = 0; /* code starts at PA 0 */
599 for (pte_ofs = 0; address < (unsigned long)_einittext;
600 pfn++, pte_ofs++, address += PAGE_SIZE) {
602 prot = set_remote_cache_cpu(prot, cpu);
603 cpu = cpumask_next(cpu, &ktext_mask);
605 cpu = cpumask_first(&ktext_mask);
607 pte[pte_ofs] = pfn_pte(pfn, prot);
609 assign_pte(pmd, pte);
611 pte_t pteval = pfn_pte(0, PAGE_KERNEL_EXEC);
612 pteval = pte_mkhuge(pteval);
613 #if CHIP_HAS_CBOX_HOME_MAP()
615 pteval = hv_pte_set_mode(pteval,
616 HV_PTE_MODE_CACHE_HASH_L3);
617 pteval = ktext_set_nocache(pteval);
619 #endif /* CHIP_HAS_CBOX_HOME_MAP() */
620 if (cpumask_weight(&ktext_mask) == 1) {
621 pteval = set_remote_cache_cpu(pteval,
622 cpumask_first(&ktext_mask));
623 pteval = hv_pte_set_mode(pteval,
624 HV_PTE_MODE_CACHE_TILE_L3);
625 pteval = ktext_set_nocache(pteval);
626 } else if (ktext_nocache)
627 pteval = hv_pte_set_mode(pteval,
628 HV_PTE_MODE_UNCACHED);
630 pteval = hv_pte_set_mode(pteval,
631 HV_PTE_MODE_CACHE_NO_L3);
632 *(pte_t *)pmd = pteval;
635 /* Set swapper_pgprot here so it is flushed to memory right away. */
636 swapper_pgprot = init_pgprot((unsigned long)swapper_pg_dir);
639 * Since we may be changing the caching of the stack and page
640 * table itself, we invoke an assembly helper to do the
643 * - flush the cache so we start with an empty slate
644 * - install pgtables[] as the real page table
645 * - flush the TLB so the new page table takes effect
647 rc = flush_and_install_context(__pa(pgtables),
648 init_pgprot((unsigned long)pgtables),
649 __get_cpu_var(current_asid),
650 cpumask_bits(my_cpu_mask));
653 /* Copy the page table back to the normal swapper_pg_dir. */
654 memcpy(pgd_base, pgtables, sizeof(pgtables));
655 __install_page_table(pgd_base, __get_cpu_var(current_asid),
659 * We just read swapper_pgprot and thus brought it into the cache,
660 * with its new home & caching mode. When we start the other CPUs,
661 * they're going to reference swapper_pgprot via their initial fake
662 * VA-is-PA mappings, which cache everything locally. At that
663 * time, if it's in our cache with a conflicting home, the
664 * simulator's coherence checker will complain. So, flush it out
665 * of our cache; we're not going to ever use it again anyway.
667 __insn_finv(&swapper_pgprot);
671 * devmem_is_allowed() checks to see if /dev/mem access to a certain address
672 * is valid. The argument is a physical page number.
674 * On Tile, the only valid things for which we can just hand out unchecked
675 * PTEs are the kernel code and data. Anything else might change its
676 * homing with time, and we wouldn't know to adjust the /dev/mem PTEs.
677 * Note that init_thread_union is released to heap soon after boot,
678 * so we include it in the init data.
680 * For TILE-Gx, we might want to consider allowing access to PA
681 * regions corresponding to PCI space, etc.
683 int devmem_is_allowed(unsigned long pagenr)
685 return pagenr < kaddr_to_pfn(_end) &&
686 !(pagenr >= kaddr_to_pfn(&init_thread_union) ||
687 pagenr < kaddr_to_pfn(_einitdata)) &&
688 !(pagenr >= kaddr_to_pfn(_sinittext) ||
689 pagenr <= kaddr_to_pfn(_einittext-1));
692 #ifdef CONFIG_HIGHMEM
693 static void __init permanent_kmaps_init(pgd_t *pgd_base)
702 page_table_range_init(vaddr, vaddr + PAGE_SIZE*LAST_PKMAP, pgd_base);
704 pgd = swapper_pg_dir + pgd_index(vaddr);
705 pud = pud_offset(pgd, vaddr);
706 pmd = pmd_offset(pud, vaddr);
707 pte = pte_offset_kernel(pmd, vaddr);
708 pkmap_page_table = pte;
710 #endif /* CONFIG_HIGHMEM */
713 static void __init init_free_pfn_range(unsigned long start, unsigned long end)
716 struct page *page = pfn_to_page(start);
718 for (pfn = start; pfn < end; ) {
719 /* Optimize by freeing pages in large batches */
720 int order = __ffs(pfn);
724 if (order >= MAX_ORDER)
727 while (pfn + count > end) {
731 for (p = page, i = 0; i < count; ++i, ++p) {
732 __ClearPageReserved(p);
734 * Hacky direct set to avoid unnecessary
735 * lock take/release for EVERY page here.
737 p->_count.counter = 0;
738 p->_mapcount.counter = -1;
740 init_page_count(page);
741 __free_pages(page, order);
742 totalram_pages += count;
749 static void __init set_non_bootmem_pages_init(void)
753 unsigned long start, end;
754 int nid = z->zone_pgdat->node_id;
755 int idx = zone_idx(z);
757 start = z->zone_start_pfn;
759 continue; /* bootmem */
760 end = start + z->spanned_pages;
761 if (idx == ZONE_NORMAL) {
762 BUG_ON(start != node_start_pfn[nid]);
763 start = node_free_pfn[nid];
765 #ifdef CONFIG_HIGHMEM
766 if (idx == ZONE_HIGHMEM)
767 totalhigh_pages += z->spanned_pages;
770 unsigned long percpu_pfn = node_percpu_pfn[nid];
771 if (start < percpu_pfn && end > percpu_pfn)
775 if (start <= pci_reserve_start_pfn &&
776 end > pci_reserve_start_pfn) {
777 if (end > pci_reserve_end_pfn)
778 init_free_pfn_range(pci_reserve_end_pfn, end);
779 end = pci_reserve_start_pfn;
782 init_free_pfn_range(start, end);
787 * paging_init() sets up the page tables - note that all of lowmem is
788 * already mapped by head.S.
790 void __init paging_init(void)
792 #ifdef CONFIG_HIGHMEM
793 unsigned long vaddr, end;
798 pgd_t *pgd_base = swapper_pg_dir;
800 kernel_physical_mapping_init(pgd_base);
802 #ifdef CONFIG_HIGHMEM
804 * Fixed mappings, only the page table structure has to be
805 * created - mappings will be set by set_fixmap():
807 vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK;
808 end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK;
809 page_table_range_init(vaddr, end, pgd_base);
810 permanent_kmaps_init(pgd_base);
815 * Since GX allocates just one pmd_t array worth of vmalloc space,
816 * we go ahead and allocate it statically here, then share it
817 * globally. As a result we don't have to worry about any task
818 * changing init_mm once we get up and running, and there's no
819 * need for e.g. vmalloc_sync_all().
821 BUILD_BUG_ON(pgd_index(VMALLOC_START) != pgd_index(VMALLOC_END));
822 pud = pud_offset(pgd_base + pgd_index(VMALLOC_START), VMALLOC_START);
823 assign_pmd(pud, alloc_pmd());
829 * Walk the kernel page tables and derive the page_home() from
830 * the PTEs, so that set_pte() can properly validate the caching
831 * of all PTEs it sees.
833 void __init set_page_homes(void)
837 static void __init set_max_mapnr_init(void)
839 #ifdef CONFIG_FLATMEM
840 max_mapnr = max_low_pfn;
844 void __init mem_init(void)
846 int codesize, datasize, initsize;
852 #ifdef CONFIG_FLATMEM
857 #ifdef CONFIG_HIGHMEM
858 /* check that fixmap and pkmap do not overlap */
859 if (PKMAP_ADDR(LAST_PKMAP-1) >= FIXADDR_START) {
860 pr_err("fixmap and kmap areas overlap"
861 " - this will crash\n");
862 pr_err("pkstart: %lxh pkend: %lxh fixstart %lxh\n",
863 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP-1),
869 set_max_mapnr_init();
871 /* this will put all bootmem onto the freelists */
872 totalram_pages += free_all_bootmem();
874 /* count all remaining LOWMEM and give all HIGHMEM to page allocator */
875 set_non_bootmem_pages_init();
877 codesize = (unsigned long)&_etext - (unsigned long)&_text;
878 datasize = (unsigned long)&_end - (unsigned long)&_sdata;
879 initsize = (unsigned long)&_einittext - (unsigned long)&_sinittext;
880 initsize += (unsigned long)&_einitdata - (unsigned long)&_sinitdata;
882 pr_info("Memory: %luk/%luk available (%dk kernel code, %dk data, %dk init, %ldk highmem)\n",
883 (unsigned long) nr_free_pages() << (PAGE_SHIFT-10),
884 num_physpages << (PAGE_SHIFT-10),
888 (unsigned long) (totalhigh_pages << (PAGE_SHIFT-10))
892 * In debug mode, dump some interesting memory mappings.
894 #ifdef CONFIG_HIGHMEM
895 printk(KERN_DEBUG " KMAP %#lx - %#lx\n",
896 FIXADDR_START, FIXADDR_TOP + PAGE_SIZE - 1);
897 printk(KERN_DEBUG " PKMAP %#lx - %#lx\n",
898 PKMAP_BASE, PKMAP_ADDR(LAST_PKMAP) - 1);
900 #ifdef CONFIG_HUGEVMAP
901 printk(KERN_DEBUG " HUGEMAP %#lx - %#lx\n",
902 HUGE_VMAP_BASE, HUGE_VMAP_END - 1);
904 printk(KERN_DEBUG " VMALLOC %#lx - %#lx\n",
905 _VMALLOC_START, _VMALLOC_END - 1);
907 for (i = MAX_NUMNODES-1; i >= 0; --i) {
908 struct pglist_data *node = &node_data[i];
909 if (node->node_present_pages) {
910 unsigned long start = (unsigned long)
911 pfn_to_kaddr(node->node_start_pfn);
912 unsigned long end = start +
913 (node->node_present_pages << PAGE_SHIFT);
914 printk(KERN_DEBUG " MEM%d %#lx - %#lx\n",
920 for (i = MAX_NUMNODES-1; i >= 0; --i) {
921 if ((unsigned long)vbase_map[i] != -1UL) {
922 printk(KERN_DEBUG " LOWMEM%d %#lx - %#lx\n",
923 i, (unsigned long) (vbase_map[i]),
924 (unsigned long) (last-1));
932 * Convert from using one lock for all atomic operations to
935 __init_atomic_per_cpu();
940 * this is for the non-NUMA, single node SMP system case.
941 * Specifically, in the case of x86, we will always add
942 * memory to the highmem for now.
944 #ifndef CONFIG_NEED_MULTIPLE_NODES
945 int arch_add_memory(u64 start, u64 size)
947 struct pglist_data *pgdata = &contig_page_data;
948 struct zone *zone = pgdata->node_zones + MAX_NR_ZONES-1;
949 unsigned long start_pfn = start >> PAGE_SHIFT;
950 unsigned long nr_pages = size >> PAGE_SHIFT;
952 return __add_pages(zone, start_pfn, nr_pages);
955 int remove_memory(u64 start, u64 size)
961 struct kmem_cache *pgd_cache;
963 void __init pgtable_cache_init(void)
965 pgd_cache = kmem_cache_create("pgd",
966 PTRS_PER_PGD*sizeof(pgd_t),
967 PTRS_PER_PGD*sizeof(pgd_t),
971 panic("pgtable_cache_init(): Cannot create pgd cache");
974 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
976 * The __w1data area holds data that is only written during initialization,
977 * and is read-only and thus freely cacheable thereafter. Fix the page
978 * table entries that cover that region accordingly.
980 static void mark_w1data_ro(void)
982 /* Loop over page table entries */
983 unsigned long addr = (unsigned long)__w1data_begin;
984 BUG_ON((addr & (PAGE_SIZE-1)) != 0);
985 for (; addr <= (unsigned long)__w1data_end - 1; addr += PAGE_SIZE) {
986 unsigned long pfn = kaddr_to_pfn((void *)addr);
987 pte_t *ptep = virt_to_pte(NULL, addr);
988 BUG_ON(pte_huge(*ptep)); /* not relevant for kdata_huge */
989 set_pte_at(&init_mm, addr, ptep, pfn_pte(pfn, PAGE_KERNEL_RO));
994 #ifdef CONFIG_DEBUG_PAGEALLOC
995 static long __write_once initfree;
997 static long __write_once initfree = 1;
1000 /* Select whether to free (1) or mark unusable (0) the __init pages. */
1001 static int __init set_initfree(char *str)
1004 if (strict_strtol(str, 0, &val) == 0) {
1006 pr_info("initfree: %s free init pages\n",
1007 initfree ? "will" : "won't");
1011 __setup("initfree=", set_initfree);
1013 static void free_init_pages(char *what, unsigned long begin, unsigned long end)
1015 unsigned long addr = (unsigned long) begin;
1017 if (kdata_huge && !initfree) {
1018 pr_warning("Warning: ignoring initfree=0:"
1019 " incompatible with kdata=huge\n");
1022 end = (end + PAGE_SIZE - 1) & PAGE_MASK;
1023 local_flush_tlb_pages(NULL, begin, PAGE_SIZE, end - begin);
1024 for (addr = begin; addr < end; addr += PAGE_SIZE) {
1026 * Note we just reset the home here directly in the
1027 * page table. We know this is safe because our caller
1028 * just flushed the caches on all the other cpus,
1029 * and they won't be touching any of these pages.
1031 int pfn = kaddr_to_pfn((void *)addr);
1032 struct page *page = pfn_to_page(pfn);
1033 pte_t *ptep = virt_to_pte(NULL, addr);
1036 * If debugging page accesses then do not free
1037 * this memory but mark them not present - any
1038 * buggy init-section access will create a
1039 * kernel page fault:
1041 pte_clear(&init_mm, addr, ptep);
1044 __ClearPageReserved(page);
1045 init_page_count(page);
1046 if (pte_huge(*ptep))
1047 BUG_ON(!kdata_huge);
1049 set_pte_at(&init_mm, addr, ptep,
1050 pfn_pte(pfn, PAGE_KERNEL));
1051 memset((void *)addr, POISON_FREE_INITMEM, PAGE_SIZE);
1055 pr_info("Freeing %s: %ldk freed\n", what, (end - begin) >> 10);
1058 void free_initmem(void)
1060 const unsigned long text_delta = MEM_SV_INTRPT - PAGE_OFFSET;
1063 * Evict the dirty initdata on the boot cpu, evict the w1data
1064 * wherever it's homed, and evict all the init code everywhere.
1065 * We are guaranteed that no one will touch the init pages any
1066 * more, and although other cpus may be touching the w1data,
1067 * we only actually change the caching on tile64, which won't
1068 * be keeping local copies in the other tiles' caches anyway.
1070 homecache_evict(&cpu_cacheable_map);
1072 /* Free the data pages that we won't use again after init. */
1073 free_init_pages("unused kernel data",
1074 (unsigned long)_sinitdata,
1075 (unsigned long)_einitdata);
1078 * Free the pages mapped from 0xc0000000 that correspond to code
1079 * pages from MEM_SV_INTRPT that we won't use again after init.
1081 free_init_pages("unused kernel text",
1082 (unsigned long)_sinittext - text_delta,
1083 (unsigned long)_einittext - text_delta);
1085 #if !CHIP_HAS_COHERENT_LOCAL_CACHE()
1087 * Upgrade the .w1data section to globally cached.
1088 * We don't do this on tilepro, since the cache architecture
1089 * pretty much makes it irrelevant, and in any case we end
1090 * up having racing issues with other tiles that may touch
1091 * the data after we flush the cache but before we update
1092 * the PTEs and flush the TLBs, causing sharer shootdowns
1093 * later. Even though this is to clean data, it seems like
1094 * an unnecessary complication.
1099 /* Do a global TLB flush so everyone sees the changes. */