x86, mm: Don't clear page table if range is ram
[linux-2.6-microblaze.git] / arch / x86 / mm / init_64.c
1 /*
2  *  linux/arch/x86_64/mm/init.c
3  *
4  *  Copyright (C) 1995  Linus Torvalds
5  *  Copyright (C) 2000  Pavel Machek <pavel@ucw.cz>
6  *  Copyright (C) 2002,2003 Andi Kleen <ak@suse.de>
7  */
8
9 #include <linux/signal.h>
10 #include <linux/sched.h>
11 #include <linux/kernel.h>
12 #include <linux/errno.h>
13 #include <linux/string.h>
14 #include <linux/types.h>
15 #include <linux/ptrace.h>
16 #include <linux/mman.h>
17 #include <linux/mm.h>
18 #include <linux/swap.h>
19 #include <linux/smp.h>
20 #include <linux/init.h>
21 #include <linux/initrd.h>
22 #include <linux/pagemap.h>
23 #include <linux/bootmem.h>
24 #include <linux/memblock.h>
25 #include <linux/proc_fs.h>
26 #include <linux/pci.h>
27 #include <linux/pfn.h>
28 #include <linux/poison.h>
29 #include <linux/dma-mapping.h>
30 #include <linux/module.h>
31 #include <linux/memory.h>
32 #include <linux/memory_hotplug.h>
33 #include <linux/nmi.h>
34 #include <linux/gfp.h>
35
36 #include <asm/processor.h>
37 #include <asm/bios_ebda.h>
38 #include <asm/uaccess.h>
39 #include <asm/pgtable.h>
40 #include <asm/pgalloc.h>
41 #include <asm/dma.h>
42 #include <asm/fixmap.h>
43 #include <asm/e820.h>
44 #include <asm/apic.h>
45 #include <asm/tlb.h>
46 #include <asm/mmu_context.h>
47 #include <asm/proto.h>
48 #include <asm/smp.h>
49 #include <asm/sections.h>
50 #include <asm/kdebug.h>
51 #include <asm/numa.h>
52 #include <asm/cacheflush.h>
53 #include <asm/init.h>
54 #include <asm/uv/uv.h>
55 #include <asm/setup.h>
56
57 static int __init parse_direct_gbpages_off(char *arg)
58 {
59         direct_gbpages = 0;
60         return 0;
61 }
62 early_param("nogbpages", parse_direct_gbpages_off);
63
64 static int __init parse_direct_gbpages_on(char *arg)
65 {
66         direct_gbpages = 1;
67         return 0;
68 }
69 early_param("gbpages", parse_direct_gbpages_on);
70
71 /*
72  * NOTE: pagetable_init alloc all the fixmap pagetables contiguous on the
73  * physical space so we can cache the place of the first one and move
74  * around without checking the pgd every time.
75  */
76
77 pteval_t __supported_pte_mask __read_mostly = ~_PAGE_IOMAP;
78 EXPORT_SYMBOL_GPL(__supported_pte_mask);
79
80 int force_personality32;
81
82 /*
83  * noexec32=on|off
84  * Control non executable heap for 32bit processes.
85  * To control the stack too use noexec=off
86  *
87  * on   PROT_READ does not imply PROT_EXEC for 32-bit processes (default)
88  * off  PROT_READ implies PROT_EXEC
89  */
90 static int __init nonx32_setup(char *str)
91 {
92         if (!strcmp(str, "on"))
93                 force_personality32 &= ~READ_IMPLIES_EXEC;
94         else if (!strcmp(str, "off"))
95                 force_personality32 |= READ_IMPLIES_EXEC;
96         return 1;
97 }
98 __setup("noexec32=", nonx32_setup);
99
100 /*
101  * When memory was added/removed make sure all the processes MM have
102  * suitable PGD entries in the local PGD level page.
103  */
104 void sync_global_pgds(unsigned long start, unsigned long end)
105 {
106         unsigned long address;
107
108         for (address = start; address <= end; address += PGDIR_SIZE) {
109                 const pgd_t *pgd_ref = pgd_offset_k(address);
110                 struct page *page;
111
112                 if (pgd_none(*pgd_ref))
113                         continue;
114
115                 spin_lock(&pgd_lock);
116                 list_for_each_entry(page, &pgd_list, lru) {
117                         pgd_t *pgd;
118                         spinlock_t *pgt_lock;
119
120                         pgd = (pgd_t *)page_address(page) + pgd_index(address);
121                         /* the pgt_lock only for Xen */
122                         pgt_lock = &pgd_page_get_mm(page)->page_table_lock;
123                         spin_lock(pgt_lock);
124
125                         if (pgd_none(*pgd))
126                                 set_pgd(pgd, *pgd_ref);
127                         else
128                                 BUG_ON(pgd_page_vaddr(*pgd)
129                                        != pgd_page_vaddr(*pgd_ref));
130
131                         spin_unlock(pgt_lock);
132                 }
133                 spin_unlock(&pgd_lock);
134         }
135 }
136
137 /*
138  * NOTE: This function is marked __ref because it calls __init function
139  * (alloc_bootmem_pages). It's safe to do it ONLY when after_bootmem == 0.
140  */
141 static __ref void *spp_getpage(void)
142 {
143         void *ptr;
144
145         if (after_bootmem)
146                 ptr = (void *) get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
147         else
148                 ptr = alloc_bootmem_pages(PAGE_SIZE);
149
150         if (!ptr || ((unsigned long)ptr & ~PAGE_MASK)) {
151                 panic("set_pte_phys: cannot allocate page data %s\n",
152                         after_bootmem ? "after bootmem" : "");
153         }
154
155         pr_debug("spp_getpage %p\n", ptr);
156
157         return ptr;
158 }
159
160 static pud_t *fill_pud(pgd_t *pgd, unsigned long vaddr)
161 {
162         if (pgd_none(*pgd)) {
163                 pud_t *pud = (pud_t *)spp_getpage();
164                 pgd_populate(&init_mm, pgd, pud);
165                 if (pud != pud_offset(pgd, 0))
166                         printk(KERN_ERR "PAGETABLE BUG #00! %p <-> %p\n",
167                                pud, pud_offset(pgd, 0));
168         }
169         return pud_offset(pgd, vaddr);
170 }
171
172 static pmd_t *fill_pmd(pud_t *pud, unsigned long vaddr)
173 {
174         if (pud_none(*pud)) {
175                 pmd_t *pmd = (pmd_t *) spp_getpage();
176                 pud_populate(&init_mm, pud, pmd);
177                 if (pmd != pmd_offset(pud, 0))
178                         printk(KERN_ERR "PAGETABLE BUG #01! %p <-> %p\n",
179                                pmd, pmd_offset(pud, 0));
180         }
181         return pmd_offset(pud, vaddr);
182 }
183
184 static pte_t *fill_pte(pmd_t *pmd, unsigned long vaddr)
185 {
186         if (pmd_none(*pmd)) {
187                 pte_t *pte = (pte_t *) spp_getpage();
188                 pmd_populate_kernel(&init_mm, pmd, pte);
189                 if (pte != pte_offset_kernel(pmd, 0))
190                         printk(KERN_ERR "PAGETABLE BUG #02!\n");
191         }
192         return pte_offset_kernel(pmd, vaddr);
193 }
194
195 void set_pte_vaddr_pud(pud_t *pud_page, unsigned long vaddr, pte_t new_pte)
196 {
197         pud_t *pud;
198         pmd_t *pmd;
199         pte_t *pte;
200
201         pud = pud_page + pud_index(vaddr);
202         pmd = fill_pmd(pud, vaddr);
203         pte = fill_pte(pmd, vaddr);
204
205         set_pte(pte, new_pte);
206
207         /*
208          * It's enough to flush this one mapping.
209          * (PGE mappings get flushed as well)
210          */
211         __flush_tlb_one(vaddr);
212 }
213
214 void set_pte_vaddr(unsigned long vaddr, pte_t pteval)
215 {
216         pgd_t *pgd;
217         pud_t *pud_page;
218
219         pr_debug("set_pte_vaddr %lx to %lx\n", vaddr, native_pte_val(pteval));
220
221         pgd = pgd_offset_k(vaddr);
222         if (pgd_none(*pgd)) {
223                 printk(KERN_ERR
224                         "PGD FIXMAP MISSING, it should be setup in head.S!\n");
225                 return;
226         }
227         pud_page = (pud_t*)pgd_page_vaddr(*pgd);
228         set_pte_vaddr_pud(pud_page, vaddr, pteval);
229 }
230
231 pmd_t * __init populate_extra_pmd(unsigned long vaddr)
232 {
233         pgd_t *pgd;
234         pud_t *pud;
235
236         pgd = pgd_offset_k(vaddr);
237         pud = fill_pud(pgd, vaddr);
238         return fill_pmd(pud, vaddr);
239 }
240
241 pte_t * __init populate_extra_pte(unsigned long vaddr)
242 {
243         pmd_t *pmd;
244
245         pmd = populate_extra_pmd(vaddr);
246         return fill_pte(pmd, vaddr);
247 }
248
249 /*
250  * Create large page table mappings for a range of physical addresses.
251  */
252 static void __init __init_extra_mapping(unsigned long phys, unsigned long size,
253                                                 pgprot_t prot)
254 {
255         pgd_t *pgd;
256         pud_t *pud;
257         pmd_t *pmd;
258
259         BUG_ON((phys & ~PMD_MASK) || (size & ~PMD_MASK));
260         for (; size; phys += PMD_SIZE, size -= PMD_SIZE) {
261                 pgd = pgd_offset_k((unsigned long)__va(phys));
262                 if (pgd_none(*pgd)) {
263                         pud = (pud_t *) spp_getpage();
264                         set_pgd(pgd, __pgd(__pa(pud) | _KERNPG_TABLE |
265                                                 _PAGE_USER));
266                 }
267                 pud = pud_offset(pgd, (unsigned long)__va(phys));
268                 if (pud_none(*pud)) {
269                         pmd = (pmd_t *) spp_getpage();
270                         set_pud(pud, __pud(__pa(pmd) | _KERNPG_TABLE |
271                                                 _PAGE_USER));
272                 }
273                 pmd = pmd_offset(pud, phys);
274                 BUG_ON(!pmd_none(*pmd));
275                 set_pmd(pmd, __pmd(phys | pgprot_val(prot)));
276         }
277 }
278
279 void __init init_extra_mapping_wb(unsigned long phys, unsigned long size)
280 {
281         __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE);
282 }
283
284 void __init init_extra_mapping_uc(unsigned long phys, unsigned long size)
285 {
286         __init_extra_mapping(phys, size, PAGE_KERNEL_LARGE_NOCACHE);
287 }
288
289 /*
290  * The head.S code sets up the kernel high mapping:
291  *
292  *   from __START_KERNEL_map to __START_KERNEL_map + size (== _end-_text)
293  *
294  * phys_addr holds the negative offset to the kernel, which is added
295  * to the compile time generated pmds. This results in invalid pmds up
296  * to the point where we hit the physaddr 0 mapping.
297  *
298  * We limit the mappings to the region from _text to _brk_end.  _brk_end
299  * is rounded up to the 2MB boundary. This catches the invalid pmds as
300  * well, as they are located before _text:
301  */
302 void __init cleanup_highmap(void)
303 {
304         unsigned long vaddr = __START_KERNEL_map;
305         unsigned long vaddr_end = __START_KERNEL_map + (max_pfn_mapped << PAGE_SHIFT);
306         unsigned long end = roundup((unsigned long)_brk_end, PMD_SIZE) - 1;
307         pmd_t *pmd = level2_kernel_pgt;
308
309         for (; vaddr + PMD_SIZE - 1 < vaddr_end; pmd++, vaddr += PMD_SIZE) {
310                 if (pmd_none(*pmd))
311                         continue;
312                 if (vaddr < (unsigned long) _text || vaddr > end)
313                         set_pmd(pmd, __pmd(0));
314         }
315 }
316
317 static __ref void *alloc_low_page(unsigned long *phys)
318 {
319         unsigned long pfn = pgt_buf_end++;
320         void *adr;
321
322         if (after_bootmem) {
323                 adr = (void *)get_zeroed_page(GFP_ATOMIC | __GFP_NOTRACK);
324                 *phys = __pa(adr);
325
326                 return adr;
327         }
328
329         if (pfn >= pgt_buf_top)
330                 panic("alloc_low_page: ran out of memory");
331
332         adr = early_memremap(pfn * PAGE_SIZE, PAGE_SIZE);
333         clear_page(adr);
334         *phys  = pfn * PAGE_SIZE;
335         return adr;
336 }
337
338 static __ref void *map_low_page(void *virt)
339 {
340         void *adr;
341         unsigned long phys, left;
342
343         if (after_bootmem)
344                 return virt;
345
346         phys = __pa(virt);
347         left = phys & (PAGE_SIZE - 1);
348         adr = early_memremap(phys & PAGE_MASK, PAGE_SIZE);
349         adr = (void *)(((unsigned long)adr) | left);
350
351         return adr;
352 }
353
354 static __ref void unmap_low_page(void *adr)
355 {
356         if (after_bootmem)
357                 return;
358
359         early_iounmap((void *)((unsigned long)adr & PAGE_MASK), PAGE_SIZE);
360 }
361
362 static unsigned long __meminit
363 phys_pte_init(pte_t *pte_page, unsigned long addr, unsigned long end,
364               pgprot_t prot)
365 {
366         unsigned long pages = 0, next;
367         unsigned long last_map_addr = end;
368         int i;
369
370         pte_t *pte = pte_page + pte_index(addr);
371
372         for (i = pte_index(addr); i < PTRS_PER_PTE; i++, addr = next, pte++) {
373                 next = (addr & PAGE_MASK) + PAGE_SIZE;
374                 if (addr >= end) {
375                         if (!after_bootmem &&
376                             !e820_any_mapped(addr & PAGE_MASK, next, E820_RAM) &&
377                             !e820_any_mapped(addr & PAGE_MASK, next, E820_RESERVED_KERN))
378                                 set_pte(pte, __pte(0));
379                         continue;
380                 }
381
382                 /*
383                  * We will re-use the existing mapping.
384                  * Xen for example has some special requirements, like mapping
385                  * pagetable pages as RO. So assume someone who pre-setup
386                  * these mappings are more intelligent.
387                  */
388                 if (pte_val(*pte)) {
389                         if (!after_bootmem)
390                                 pages++;
391                         continue;
392                 }
393
394                 if (0)
395                         printk("   pte=%p addr=%lx pte=%016lx\n",
396                                pte, addr, pfn_pte(addr >> PAGE_SHIFT, PAGE_KERNEL).pte);
397                 pages++;
398                 set_pte(pte, pfn_pte(addr >> PAGE_SHIFT, prot));
399                 last_map_addr = (addr & PAGE_MASK) + PAGE_SIZE;
400         }
401
402         update_page_count(PG_LEVEL_4K, pages);
403
404         return last_map_addr;
405 }
406
407 static unsigned long __meminit
408 phys_pmd_init(pmd_t *pmd_page, unsigned long address, unsigned long end,
409               unsigned long page_size_mask, pgprot_t prot)
410 {
411         unsigned long pages = 0, next;
412         unsigned long last_map_addr = end;
413
414         int i = pmd_index(address);
415
416         for (; i < PTRS_PER_PMD; i++, address = next) {
417                 unsigned long pte_phys;
418                 pmd_t *pmd = pmd_page + pmd_index(address);
419                 pte_t *pte;
420                 pgprot_t new_prot = prot;
421
422                 next = (address & PMD_MASK) + PMD_SIZE;
423                 if (address >= end) {
424                         if (!after_bootmem &&
425                             !e820_any_mapped(address & PMD_MASK, next, E820_RAM) &&
426                             !e820_any_mapped(address & PMD_MASK, next, E820_RESERVED_KERN))
427                                 set_pmd(pmd, __pmd(0));
428                         continue;
429                 }
430
431                 if (pmd_val(*pmd)) {
432                         if (!pmd_large(*pmd)) {
433                                 spin_lock(&init_mm.page_table_lock);
434                                 pte = map_low_page((pte_t *)pmd_page_vaddr(*pmd));
435                                 last_map_addr = phys_pte_init(pte, address,
436                                                                 end, prot);
437                                 unmap_low_page(pte);
438                                 spin_unlock(&init_mm.page_table_lock);
439                                 continue;
440                         }
441                         /*
442                          * If we are ok with PG_LEVEL_2M mapping, then we will
443                          * use the existing mapping,
444                          *
445                          * Otherwise, we will split the large page mapping but
446                          * use the same existing protection bits except for
447                          * large page, so that we don't violate Intel's TLB
448                          * Application note (317080) which says, while changing
449                          * the page sizes, new and old translations should
450                          * not differ with respect to page frame and
451                          * attributes.
452                          */
453                         if (page_size_mask & (1 << PG_LEVEL_2M)) {
454                                 if (!after_bootmem)
455                                         pages++;
456                                 last_map_addr = next;
457                                 continue;
458                         }
459                         new_prot = pte_pgprot(pte_clrhuge(*(pte_t *)pmd));
460                 }
461
462                 if (page_size_mask & (1<<PG_LEVEL_2M)) {
463                         pages++;
464                         spin_lock(&init_mm.page_table_lock);
465                         set_pte((pte_t *)pmd,
466                                 pfn_pte((address & PMD_MASK) >> PAGE_SHIFT,
467                                         __pgprot(pgprot_val(prot) | _PAGE_PSE)));
468                         spin_unlock(&init_mm.page_table_lock);
469                         last_map_addr = next;
470                         continue;
471                 }
472
473                 pte = alloc_low_page(&pte_phys);
474                 last_map_addr = phys_pte_init(pte, address, end, new_prot);
475                 unmap_low_page(pte);
476
477                 spin_lock(&init_mm.page_table_lock);
478                 pmd_populate_kernel(&init_mm, pmd, __va(pte_phys));
479                 spin_unlock(&init_mm.page_table_lock);
480         }
481         update_page_count(PG_LEVEL_2M, pages);
482         return last_map_addr;
483 }
484
485 static unsigned long __meminit
486 phys_pud_init(pud_t *pud_page, unsigned long addr, unsigned long end,
487                          unsigned long page_size_mask)
488 {
489         unsigned long pages = 0, next;
490         unsigned long last_map_addr = end;
491         int i = pud_index(addr);
492
493         for (; i < PTRS_PER_PUD; i++, addr = next) {
494                 unsigned long pmd_phys;
495                 pud_t *pud = pud_page + pud_index(addr);
496                 pmd_t *pmd;
497                 pgprot_t prot = PAGE_KERNEL;
498
499                 next = (addr & PUD_MASK) + PUD_SIZE;
500                 if (addr >= end) {
501                         if (!after_bootmem &&
502                             !e820_any_mapped(addr & PUD_MASK, next, E820_RAM) &&
503                             !e820_any_mapped(addr & PUD_MASK, next, E820_RESERVED_KERN))
504                                 set_pud(pud, __pud(0));
505                         continue;
506                 }
507
508                 if (pud_val(*pud)) {
509                         if (!pud_large(*pud)) {
510                                 pmd = map_low_page(pmd_offset(pud, 0));
511                                 last_map_addr = phys_pmd_init(pmd, addr, end,
512                                                          page_size_mask, prot);
513                                 unmap_low_page(pmd);
514                                 __flush_tlb_all();
515                                 continue;
516                         }
517                         /*
518                          * If we are ok with PG_LEVEL_1G mapping, then we will
519                          * use the existing mapping.
520                          *
521                          * Otherwise, we will split the gbpage mapping but use
522                          * the same existing protection  bits except for large
523                          * page, so that we don't violate Intel's TLB
524                          * Application note (317080) which says, while changing
525                          * the page sizes, new and old translations should
526                          * not differ with respect to page frame and
527                          * attributes.
528                          */
529                         if (page_size_mask & (1 << PG_LEVEL_1G)) {
530                                 if (!after_bootmem)
531                                         pages++;
532                                 last_map_addr = next;
533                                 continue;
534                         }
535                         prot = pte_pgprot(pte_clrhuge(*(pte_t *)pud));
536                 }
537
538                 if (page_size_mask & (1<<PG_LEVEL_1G)) {
539                         pages++;
540                         spin_lock(&init_mm.page_table_lock);
541                         set_pte((pte_t *)pud,
542                                 pfn_pte((addr & PUD_MASK) >> PAGE_SHIFT,
543                                         PAGE_KERNEL_LARGE));
544                         spin_unlock(&init_mm.page_table_lock);
545                         last_map_addr = next;
546                         continue;
547                 }
548
549                 pmd = alloc_low_page(&pmd_phys);
550                 last_map_addr = phys_pmd_init(pmd, addr, end, page_size_mask,
551                                               prot);
552                 unmap_low_page(pmd);
553
554                 spin_lock(&init_mm.page_table_lock);
555                 pud_populate(&init_mm, pud, __va(pmd_phys));
556                 spin_unlock(&init_mm.page_table_lock);
557         }
558         __flush_tlb_all();
559
560         update_page_count(PG_LEVEL_1G, pages);
561
562         return last_map_addr;
563 }
564
565 unsigned long __meminit
566 kernel_physical_mapping_init(unsigned long start,
567                              unsigned long end,
568                              unsigned long page_size_mask)
569 {
570         bool pgd_changed = false;
571         unsigned long next, last_map_addr = end;
572         unsigned long addr;
573
574         start = (unsigned long)__va(start);
575         end = (unsigned long)__va(end);
576         addr = start;
577
578         for (; start < end; start = next) {
579                 pgd_t *pgd = pgd_offset_k(start);
580                 unsigned long pud_phys;
581                 pud_t *pud;
582
583                 next = (start + PGDIR_SIZE) & PGDIR_MASK;
584                 if (next > end)
585                         next = end;
586
587                 if (pgd_val(*pgd)) {
588                         pud = map_low_page((pud_t *)pgd_page_vaddr(*pgd));
589                         last_map_addr = phys_pud_init(pud, __pa(start),
590                                                  __pa(end), page_size_mask);
591                         unmap_low_page(pud);
592                         continue;
593                 }
594
595                 pud = alloc_low_page(&pud_phys);
596                 last_map_addr = phys_pud_init(pud, __pa(start), __pa(next),
597                                                  page_size_mask);
598                 unmap_low_page(pud);
599
600                 spin_lock(&init_mm.page_table_lock);
601                 pgd_populate(&init_mm, pgd, __va(pud_phys));
602                 spin_unlock(&init_mm.page_table_lock);
603                 pgd_changed = true;
604         }
605
606         if (pgd_changed)
607                 sync_global_pgds(addr, end);
608
609         __flush_tlb_all();
610
611         return last_map_addr;
612 }
613
614 #ifndef CONFIG_NUMA
615 void __init initmem_init(void)
616 {
617         memblock_set_node(0, (phys_addr_t)ULLONG_MAX, 0);
618 }
619 #endif
620
621 void __init paging_init(void)
622 {
623         sparse_memory_present_with_active_regions(MAX_NUMNODES);
624         sparse_init();
625
626         /*
627          * clear the default setting with node 0
628          * note: don't use nodes_clear here, that is really clearing when
629          *       numa support is not compiled in, and later node_set_state
630          *       will not set it back.
631          */
632         node_clear_state(0, N_NORMAL_MEMORY);
633
634         zone_sizes_init();
635 }
636
637 /*
638  * Memory hotplug specific functions
639  */
640 #ifdef CONFIG_MEMORY_HOTPLUG
641 /*
642  * After memory hotplug the variables max_pfn, max_low_pfn and high_memory need
643  * updating.
644  */
645 static void  update_end_of_memory_vars(u64 start, u64 size)
646 {
647         unsigned long end_pfn = PFN_UP(start + size);
648
649         if (end_pfn > max_pfn) {
650                 max_pfn = end_pfn;
651                 max_low_pfn = end_pfn;
652                 high_memory = (void *)__va(max_pfn * PAGE_SIZE - 1) + 1;
653         }
654 }
655
656 /*
657  * Memory is added always to NORMAL zone. This means you will never get
658  * additional DMA/DMA32 memory.
659  */
660 int arch_add_memory(int nid, u64 start, u64 size)
661 {
662         struct pglist_data *pgdat = NODE_DATA(nid);
663         struct zone *zone = pgdat->node_zones + ZONE_NORMAL;
664         unsigned long start_pfn = start >> PAGE_SHIFT;
665         unsigned long nr_pages = size >> PAGE_SHIFT;
666         int ret;
667
668         init_memory_mapping(start, start + size);
669
670         ret = __add_pages(nid, zone, start_pfn, nr_pages);
671         WARN_ON_ONCE(ret);
672
673         /* update max_pfn, max_low_pfn and high_memory */
674         update_end_of_memory_vars(start, size);
675
676         return ret;
677 }
678 EXPORT_SYMBOL_GPL(arch_add_memory);
679
680 #endif /* CONFIG_MEMORY_HOTPLUG */
681
682 static struct kcore_list kcore_vsyscall;
683
684 void __init mem_init(void)
685 {
686         long codesize, reservedpages, datasize, initsize;
687         unsigned long absent_pages;
688
689         pci_iommu_alloc();
690
691         /* clear_bss() already clear the empty_zero_page */
692
693         reservedpages = 0;
694
695         /* this will put all low memory onto the freelists */
696 #ifdef CONFIG_NUMA
697         totalram_pages = numa_free_all_bootmem();
698 #else
699         totalram_pages = free_all_bootmem();
700 #endif
701
702         absent_pages = absent_pages_in_range(0, max_pfn);
703         reservedpages = max_pfn - totalram_pages - absent_pages;
704         after_bootmem = 1;
705
706         codesize =  (unsigned long) &_etext - (unsigned long) &_text;
707         datasize =  (unsigned long) &_edata - (unsigned long) &_etext;
708         initsize =  (unsigned long) &__init_end - (unsigned long) &__init_begin;
709
710         /* Register memory areas for /proc/kcore */
711         kclist_add(&kcore_vsyscall, (void *)VSYSCALL_START,
712                          VSYSCALL_END - VSYSCALL_START, KCORE_OTHER);
713
714         printk(KERN_INFO "Memory: %luk/%luk available (%ldk kernel code, "
715                          "%ldk absent, %ldk reserved, %ldk data, %ldk init)\n",
716                 nr_free_pages() << (PAGE_SHIFT-10),
717                 max_pfn << (PAGE_SHIFT-10),
718                 codesize >> 10,
719                 absent_pages << (PAGE_SHIFT-10),
720                 reservedpages << (PAGE_SHIFT-10),
721                 datasize >> 10,
722                 initsize >> 10);
723 }
724
725 #ifdef CONFIG_DEBUG_RODATA
726 const int rodata_test_data = 0xC3;
727 EXPORT_SYMBOL_GPL(rodata_test_data);
728
729 int kernel_set_to_readonly;
730
731 void set_kernel_text_rw(void)
732 {
733         unsigned long start = PFN_ALIGN(_text);
734         unsigned long end = PFN_ALIGN(__stop___ex_table);
735
736         if (!kernel_set_to_readonly)
737                 return;
738
739         pr_debug("Set kernel text: %lx - %lx for read write\n",
740                  start, end);
741
742         /*
743          * Make the kernel identity mapping for text RW. Kernel text
744          * mapping will always be RO. Refer to the comment in
745          * static_protections() in pageattr.c
746          */
747         set_memory_rw(start, (end - start) >> PAGE_SHIFT);
748 }
749
750 void set_kernel_text_ro(void)
751 {
752         unsigned long start = PFN_ALIGN(_text);
753         unsigned long end = PFN_ALIGN(__stop___ex_table);
754
755         if (!kernel_set_to_readonly)
756                 return;
757
758         pr_debug("Set kernel text: %lx - %lx for read only\n",
759                  start, end);
760
761         /*
762          * Set the kernel identity mapping for text RO.
763          */
764         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
765 }
766
767 void mark_rodata_ro(void)
768 {
769         unsigned long start = PFN_ALIGN(_text);
770         unsigned long rodata_start =
771                 ((unsigned long)__start_rodata + PAGE_SIZE - 1) & PAGE_MASK;
772         unsigned long end = (unsigned long) &__end_rodata_hpage_align;
773         unsigned long text_end = PAGE_ALIGN((unsigned long) &__stop___ex_table);
774         unsigned long rodata_end = PAGE_ALIGN((unsigned long) &__end_rodata);
775         unsigned long data_start = (unsigned long) &_sdata;
776
777         printk(KERN_INFO "Write protecting the kernel read-only data: %luk\n",
778                (end - start) >> 10);
779         set_memory_ro(start, (end - start) >> PAGE_SHIFT);
780
781         kernel_set_to_readonly = 1;
782
783         /*
784          * The rodata section (but not the kernel text!) should also be
785          * not-executable.
786          */
787         set_memory_nx(rodata_start, (end - rodata_start) >> PAGE_SHIFT);
788
789         rodata_test();
790
791 #ifdef CONFIG_CPA_DEBUG
792         printk(KERN_INFO "Testing CPA: undo %lx-%lx\n", start, end);
793         set_memory_rw(start, (end-start) >> PAGE_SHIFT);
794
795         printk(KERN_INFO "Testing CPA: again\n");
796         set_memory_ro(start, (end-start) >> PAGE_SHIFT);
797 #endif
798
799         free_init_pages("unused kernel memory",
800                         (unsigned long) page_address(virt_to_page(text_end)),
801                         (unsigned long)
802                                  page_address(virt_to_page(rodata_start)));
803         free_init_pages("unused kernel memory",
804                         (unsigned long) page_address(virt_to_page(rodata_end)),
805                         (unsigned long) page_address(virt_to_page(data_start)));
806 }
807
808 #endif
809
810 int kern_addr_valid(unsigned long addr)
811 {
812         unsigned long above = ((long)addr) >> __VIRTUAL_MASK_SHIFT;
813         pgd_t *pgd;
814         pud_t *pud;
815         pmd_t *pmd;
816         pte_t *pte;
817
818         if (above != 0 && above != -1UL)
819                 return 0;
820
821         pgd = pgd_offset_k(addr);
822         if (pgd_none(*pgd))
823                 return 0;
824
825         pud = pud_offset(pgd, addr);
826         if (pud_none(*pud))
827                 return 0;
828
829         pmd = pmd_offset(pud, addr);
830         if (pmd_none(*pmd))
831                 return 0;
832
833         if (pmd_large(*pmd))
834                 return pfn_valid(pmd_pfn(*pmd));
835
836         pte = pte_offset_kernel(pmd, addr);
837         if (pte_none(*pte))
838                 return 0;
839
840         return pfn_valid(pte_pfn(*pte));
841 }
842
843 /*
844  * A pseudo VMA to allow ptrace access for the vsyscall page.  This only
845  * covers the 64bit vsyscall page now. 32bit has a real VMA now and does
846  * not need special handling anymore:
847  */
848 static struct vm_area_struct gate_vma = {
849         .vm_start       = VSYSCALL_START,
850         .vm_end         = VSYSCALL_START + (VSYSCALL_MAPPED_PAGES * PAGE_SIZE),
851         .vm_page_prot   = PAGE_READONLY_EXEC,
852         .vm_flags       = VM_READ | VM_EXEC
853 };
854
855 struct vm_area_struct *get_gate_vma(struct mm_struct *mm)
856 {
857 #ifdef CONFIG_IA32_EMULATION
858         if (!mm || mm->context.ia32_compat)
859                 return NULL;
860 #endif
861         return &gate_vma;
862 }
863
864 int in_gate_area(struct mm_struct *mm, unsigned long addr)
865 {
866         struct vm_area_struct *vma = get_gate_vma(mm);
867
868         if (!vma)
869                 return 0;
870
871         return (addr >= vma->vm_start) && (addr < vma->vm_end);
872 }
873
874 /*
875  * Use this when you have no reliable mm, typically from interrupt
876  * context. It is less reliable than using a task's mm and may give
877  * false positives.
878  */
879 int in_gate_area_no_mm(unsigned long addr)
880 {
881         return (addr >= VSYSCALL_START) && (addr < VSYSCALL_END);
882 }
883
884 const char *arch_vma_name(struct vm_area_struct *vma)
885 {
886         if (vma->vm_mm && vma->vm_start == (long)vma->vm_mm->context.vdso)
887                 return "[vdso]";
888         if (vma == &gate_vma)
889                 return "[vsyscall]";
890         return NULL;
891 }
892
893 #ifdef CONFIG_X86_UV
894 unsigned long memory_block_size_bytes(void)
895 {
896         if (is_uv_system()) {
897                 printk(KERN_INFO "UV: memory block size 2GB\n");
898                 return 2UL * 1024 * 1024 * 1024;
899         }
900         return MIN_MEMORY_BLOCK_SIZE;
901 }
902 #endif
903
904 #ifdef CONFIG_SPARSEMEM_VMEMMAP
905 /*
906  * Initialise the sparsemem vmemmap using huge-pages at the PMD level.
907  */
908 static long __meminitdata addr_start, addr_end;
909 static void __meminitdata *p_start, *p_end;
910 static int __meminitdata node_start;
911
912 int __meminit
913 vmemmap_populate(struct page *start_page, unsigned long size, int node)
914 {
915         unsigned long addr = (unsigned long)start_page;
916         unsigned long end = (unsigned long)(start_page + size);
917         unsigned long next;
918         pgd_t *pgd;
919         pud_t *pud;
920         pmd_t *pmd;
921
922         for (; addr < end; addr = next) {
923                 void *p = NULL;
924
925                 pgd = vmemmap_pgd_populate(addr, node);
926                 if (!pgd)
927                         return -ENOMEM;
928
929                 pud = vmemmap_pud_populate(pgd, addr, node);
930                 if (!pud)
931                         return -ENOMEM;
932
933                 if (!cpu_has_pse) {
934                         next = (addr + PAGE_SIZE) & PAGE_MASK;
935                         pmd = vmemmap_pmd_populate(pud, addr, node);
936
937                         if (!pmd)
938                                 return -ENOMEM;
939
940                         p = vmemmap_pte_populate(pmd, addr, node);
941
942                         if (!p)
943                                 return -ENOMEM;
944
945                         addr_end = addr + PAGE_SIZE;
946                         p_end = p + PAGE_SIZE;
947                 } else {
948                         next = pmd_addr_end(addr, end);
949
950                         pmd = pmd_offset(pud, addr);
951                         if (pmd_none(*pmd)) {
952                                 pte_t entry;
953
954                                 p = vmemmap_alloc_block_buf(PMD_SIZE, node);
955                                 if (!p)
956                                         return -ENOMEM;
957
958                                 entry = pfn_pte(__pa(p) >> PAGE_SHIFT,
959                                                 PAGE_KERNEL_LARGE);
960                                 set_pmd(pmd, __pmd(pte_val(entry)));
961
962                                 /* check to see if we have contiguous blocks */
963                                 if (p_end != p || node_start != node) {
964                                         if (p_start)
965                                                 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
966                                                        addr_start, addr_end-1, p_start, p_end-1, node_start);
967                                         addr_start = addr;
968                                         node_start = node;
969                                         p_start = p;
970                                 }
971
972                                 addr_end = addr + PMD_SIZE;
973                                 p_end = p + PMD_SIZE;
974                         } else
975                                 vmemmap_verify((pte_t *)pmd, node, addr, next);
976                 }
977
978         }
979         sync_global_pgds((unsigned long)start_page, end);
980         return 0;
981 }
982
983 void __meminit vmemmap_populate_print_last(void)
984 {
985         if (p_start) {
986                 printk(KERN_DEBUG " [%lx-%lx] PMD -> [%p-%p] on node %d\n",
987                         addr_start, addr_end-1, p_start, p_end-1, node_start);
988                 p_start = NULL;
989                 p_end = NULL;
990                 node_start = 0;
991         }
992 }
993 #endif