1 // SPDX-License-Identifier: GPL-2.0
3 * High memory handling common code and variables.
5 * (C) 1999 Andrea Arcangeli, SuSE GmbH, andrea@suse.de
6 * Gerhard Wichert, Siemens AG, Gerhard.Wichert@pdb.siemens.de
9 * Redesigned the x86 32-bit VM architecture to deal with
10 * 64-bit physical space. With current x86 CPUs this
11 * means up to 64 Gigabytes physical RAM.
13 * Rewrote high memory support to move the page cache into
14 * high memory. Implemented permanent (schedulable) kmaps
15 * based on Linus' idea.
17 * Copyright (C) 1999 Ingo Molnar <mingo@redhat.com>
21 #include <linux/export.h>
22 #include <linux/swap.h>
23 #include <linux/bio.h>
24 #include <linux/pagemap.h>
25 #include <linux/mempool.h>
26 #include <linux/blkdev.h>
27 #include <linux/init.h>
28 #include <linux/hash.h>
29 #include <linux/highmem.h>
30 #include <linux/kgdb.h>
31 #include <asm/tlbflush.h>
32 #include <linux/vmalloc.h>
35 * Virtual_count is not a pure "count".
36 * 0 means that it is not mapped, and has not been mapped
37 * since a TLB flush - it is usable.
38 * 1 means that there are no users, but it has been mapped
39 * since the last TLB flush - so we can't use it.
40 * n means that there are (n-1) current users of it.
45 * Architecture with aliasing data cache may define the following family of
46 * helper functions in its asm/highmem.h to control cache color of virtual
47 * addresses where physical memory pages are mapped by kmap.
49 #ifndef get_pkmap_color
52 * Determine color of virtual address where the page should be mapped.
54 static inline unsigned int get_pkmap_color(struct page *page)
58 #define get_pkmap_color get_pkmap_color
61 * Get next index for mapping inside PKMAP region for page with given color.
63 static inline unsigned int get_next_pkmap_nr(unsigned int color)
65 static unsigned int last_pkmap_nr;
67 last_pkmap_nr = (last_pkmap_nr + 1) & LAST_PKMAP_MASK;
72 * Determine if page index inside PKMAP region (pkmap_nr) of given color
73 * has wrapped around PKMAP region end. When this happens an attempt to
74 * flush all unused PKMAP slots is made.
76 static inline int no_more_pkmaps(unsigned int pkmap_nr, unsigned int color)
82 * Get the number of PKMAP entries of the given color. If no free slot is
83 * found after checking that many entries, kmap will sleep waiting for
84 * someone to call kunmap and free PKMAP slot.
86 static inline int get_pkmap_entries_count(unsigned int color)
92 * Get head of a wait queue for PKMAP entries of the given color.
93 * Wait queues for different mapping colors should be independent to avoid
94 * unnecessary wakeups caused by freeing of slots of other colors.
96 static inline wait_queue_head_t *get_pkmap_wait_queue_head(unsigned int color)
98 static DECLARE_WAIT_QUEUE_HEAD(pkmap_map_wait);
100 return &pkmap_map_wait;
104 atomic_long_t _totalhigh_pages __read_mostly;
105 EXPORT_SYMBOL(_totalhigh_pages);
107 unsigned int __nr_free_highpages (void)
110 unsigned int pages = 0;
112 for_each_populated_zone(zone) {
113 if (is_highmem(zone))
114 pages += zone_page_state(zone, NR_FREE_PAGES);
120 static int pkmap_count[LAST_PKMAP];
121 static __cacheline_aligned_in_smp DEFINE_SPINLOCK(kmap_lock);
123 pte_t * pkmap_page_table;
126 * Most architectures have no use for kmap_high_get(), so let's abstract
127 * the disabling of IRQ out of the locking in that case to save on a
128 * potential useless overhead.
130 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
131 #define lock_kmap() spin_lock_irq(&kmap_lock)
132 #define unlock_kmap() spin_unlock_irq(&kmap_lock)
133 #define lock_kmap_any(flags) spin_lock_irqsave(&kmap_lock, flags)
134 #define unlock_kmap_any(flags) spin_unlock_irqrestore(&kmap_lock, flags)
136 #define lock_kmap() spin_lock(&kmap_lock)
137 #define unlock_kmap() spin_unlock(&kmap_lock)
138 #define lock_kmap_any(flags) \
139 do { spin_lock(&kmap_lock); (void)(flags); } while (0)
140 #define unlock_kmap_any(flags) \
141 do { spin_unlock(&kmap_lock); (void)(flags); } while (0)
144 struct page *__kmap_to_page(void *vaddr)
146 unsigned long addr = (unsigned long)vaddr;
148 if (addr >= PKMAP_ADDR(0) && addr < PKMAP_ADDR(LAST_PKMAP)) {
149 int i = PKMAP_NR(addr);
150 return pte_page(pkmap_page_table[i]);
153 return virt_to_page(addr);
155 EXPORT_SYMBOL(__kmap_to_page);
157 static void flush_all_zero_pkmaps(void)
164 for (i = 0; i < LAST_PKMAP; i++) {
168 * zero means we don't have anything to do,
169 * >1 means that it is still in use. Only
170 * a count of 1 means that it is free but
171 * needs to be unmapped
173 if (pkmap_count[i] != 1)
178 BUG_ON(pte_none(pkmap_page_table[i]));
181 * Don't need an atomic fetch-and-clear op here;
182 * no-one has the page mapped, and cannot get at
183 * its virtual address (and hence PTE) without first
184 * getting the kmap_lock (which is held here).
185 * So no dangers, even with speculative execution.
187 page = pte_page(pkmap_page_table[i]);
188 pte_clear(&init_mm, PKMAP_ADDR(i), &pkmap_page_table[i]);
190 set_page_address(page, NULL);
194 flush_tlb_kernel_range(PKMAP_ADDR(0), PKMAP_ADDR(LAST_PKMAP));
197 void __kmap_flush_unused(void)
200 flush_all_zero_pkmaps();
204 static inline unsigned long map_new_virtual(struct page *page)
208 unsigned int last_pkmap_nr;
209 unsigned int color = get_pkmap_color(page);
212 count = get_pkmap_entries_count(color);
213 /* Find an empty entry */
215 last_pkmap_nr = get_next_pkmap_nr(color);
216 if (no_more_pkmaps(last_pkmap_nr, color)) {
217 flush_all_zero_pkmaps();
218 count = get_pkmap_entries_count(color);
220 if (!pkmap_count[last_pkmap_nr])
221 break; /* Found a usable entry */
226 * Sleep for somebody else to unmap their entries
229 DECLARE_WAITQUEUE(wait, current);
230 wait_queue_head_t *pkmap_map_wait =
231 get_pkmap_wait_queue_head(color);
233 __set_current_state(TASK_UNINTERRUPTIBLE);
234 add_wait_queue(pkmap_map_wait, &wait);
237 remove_wait_queue(pkmap_map_wait, &wait);
240 /* Somebody else might have mapped it while we slept */
241 if (page_address(page))
242 return (unsigned long)page_address(page);
248 vaddr = PKMAP_ADDR(last_pkmap_nr);
249 set_pte_at(&init_mm, vaddr,
250 &(pkmap_page_table[last_pkmap_nr]), mk_pte(page, kmap_prot));
252 pkmap_count[last_pkmap_nr] = 1;
253 set_page_address(page, (void *)vaddr);
259 * kmap_high - map a highmem page into memory
260 * @page: &struct page to map
262 * Returns the page's virtual memory address.
264 * We cannot call this from interrupts, as it may block.
266 void *kmap_high(struct page *page)
271 * For highmem pages, we can't trust "virtual" until
272 * after we have the lock.
275 vaddr = (unsigned long)page_address(page);
277 vaddr = map_new_virtual(page);
278 pkmap_count[PKMAP_NR(vaddr)]++;
279 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 2);
281 return (void*) vaddr;
284 EXPORT_SYMBOL(kmap_high);
286 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
288 * kmap_high_get - pin a highmem page into memory
289 * @page: &struct page to pin
291 * Returns the page's current virtual memory address, or NULL if no mapping
292 * exists. If and only if a non null address is returned then a
293 * matching call to kunmap_high() is necessary.
295 * This can be called from any context.
297 void *kmap_high_get(struct page *page)
299 unsigned long vaddr, flags;
301 lock_kmap_any(flags);
302 vaddr = (unsigned long)page_address(page);
304 BUG_ON(pkmap_count[PKMAP_NR(vaddr)] < 1);
305 pkmap_count[PKMAP_NR(vaddr)]++;
307 unlock_kmap_any(flags);
308 return (void*) vaddr;
313 * kunmap_high - unmap a highmem page into memory
314 * @page: &struct page to unmap
316 * If ARCH_NEEDS_KMAP_HIGH_GET is not defined then this may be called
317 * only from user context.
319 void kunmap_high(struct page *page)
325 unsigned int color = get_pkmap_color(page);
326 wait_queue_head_t *pkmap_map_wait;
328 lock_kmap_any(flags);
329 vaddr = (unsigned long)page_address(page);
331 nr = PKMAP_NR(vaddr);
334 * A count must never go down to zero
335 * without a TLB flush!
338 switch (--pkmap_count[nr]) {
343 * Avoid an unnecessary wake_up() function call.
344 * The common case is pkmap_count[] == 1, but
346 * The tasks queued in the wait-queue are guarded
347 * by both the lock in the wait-queue-head and by
348 * the kmap_lock. As the kmap_lock is held here,
349 * no need for the wait-queue-head's lock. Simply
350 * test if the queue is empty.
352 pkmap_map_wait = get_pkmap_wait_queue_head(color);
353 need_wakeup = waitqueue_active(pkmap_map_wait);
355 unlock_kmap_any(flags);
357 /* do wake-up, if needed, race-free outside of the spin lock */
359 wake_up(pkmap_map_wait);
361 EXPORT_SYMBOL(kunmap_high);
362 #endif /* CONFIG_HIGHMEM */
364 #ifdef CONFIG_KMAP_LOCAL
366 #include <asm/kmap_size.h>
368 static DEFINE_PER_CPU(int, __kmap_local_idx);
371 * With DEBUG_HIGHMEM the stack depth is doubled and every second
372 * slot is unused which acts as a guard page
374 #ifdef CONFIG_DEBUG_HIGHMEM
380 static inline int kmap_local_idx_push(void)
382 int idx = __this_cpu_add_return(__kmap_local_idx, KM_INCR) - 1;
384 WARN_ON_ONCE(in_irq() && !irqs_disabled());
385 BUG_ON(idx >= KM_MAX_IDX);
389 static inline int kmap_local_idx(void)
391 return __this_cpu_read(__kmap_local_idx) - 1;
394 static inline void kmap_local_idx_pop(void)
396 int idx = __this_cpu_sub_return(__kmap_local_idx, KM_INCR);
401 #ifndef arch_kmap_local_post_map
402 # define arch_kmap_local_post_map(vaddr, pteval) do { } while (0)
405 #ifndef arch_kmap_local_pre_unmap
406 # define arch_kmap_local_pre_unmap(vaddr) do { } while (0)
409 #ifndef arch_kmap_local_post_unmap
410 # define arch_kmap_local_post_unmap(vaddr) do { } while (0)
413 #ifndef arch_kmap_local_map_idx
414 #define arch_kmap_local_map_idx(idx, pfn) kmap_local_calc_idx(idx)
417 #ifndef arch_kmap_local_unmap_idx
418 #define arch_kmap_local_unmap_idx(idx, vaddr) kmap_local_calc_idx(idx)
421 #ifndef arch_kmap_local_high_get
422 static inline void *arch_kmap_local_high_get(struct page *page)
428 /* Unmap a local mapping which was obtained by kmap_high_get() */
429 static inline void kmap_high_unmap_local(unsigned long vaddr)
431 #ifdef ARCH_NEEDS_KMAP_HIGH_GET
432 if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP))
433 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
437 static inline int kmap_local_calc_idx(int idx)
439 return idx + KM_MAX_IDX * smp_processor_id();
442 static pte_t *__kmap_pte;
444 static pte_t *kmap_get_pte(void)
447 __kmap_pte = virt_to_kpte(__fix_to_virt(FIX_KMAP_BEGIN));
451 void *__kmap_local_pfn_prot(unsigned long pfn, pgprot_t prot)
453 pte_t pteval, *kmap_pte = kmap_get_pte();
458 idx = arch_kmap_local_map_idx(kmap_local_idx_push(), pfn);
459 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
460 BUG_ON(!pte_none(*(kmap_pte - idx)));
461 pteval = pfn_pte(pfn, prot);
462 set_pte_at(&init_mm, vaddr, kmap_pte - idx, pteval);
463 arch_kmap_local_post_map(vaddr, pteval);
466 return (void *)vaddr;
468 EXPORT_SYMBOL_GPL(__kmap_local_pfn_prot);
470 void *__kmap_local_page_prot(struct page *page, pgprot_t prot)
474 if (!PageHighMem(page))
475 return page_address(page);
477 /* Try kmap_high_get() if architecture has it enabled */
478 kmap = arch_kmap_local_high_get(page);
482 return __kmap_local_pfn_prot(page_to_pfn(page), prot);
484 EXPORT_SYMBOL(__kmap_local_page_prot);
486 void kunmap_local_indexed(void *vaddr)
488 unsigned long addr = (unsigned long) vaddr & PAGE_MASK;
489 pte_t *kmap_pte = kmap_get_pte();
492 if (addr < __fix_to_virt(FIX_KMAP_END) ||
493 addr > __fix_to_virt(FIX_KMAP_BEGIN)) {
494 WARN_ON_ONCE(addr < PAGE_OFFSET);
496 /* Handle mappings which were obtained by kmap_high_get() */
497 kmap_high_unmap_local(addr);
502 idx = arch_kmap_local_unmap_idx(kmap_local_idx(), addr);
503 WARN_ON_ONCE(addr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
505 arch_kmap_local_pre_unmap(addr);
506 pte_clear(&init_mm, addr, kmap_pte - idx);
507 arch_kmap_local_post_unmap(addr);
508 kmap_local_idx_pop();
511 EXPORT_SYMBOL(kunmap_local_indexed);
514 #if defined(HASHED_PAGE_VIRTUAL)
516 #define PA_HASH_ORDER 7
519 * Describes one page->virtual association
521 struct page_address_map {
524 struct list_head list;
527 static struct page_address_map page_address_maps[LAST_PKMAP];
532 static struct page_address_slot {
533 struct list_head lh; /* List of page_address_maps */
534 spinlock_t lock; /* Protect this bucket's list */
535 } ____cacheline_aligned_in_smp page_address_htable[1<<PA_HASH_ORDER];
537 static struct page_address_slot *page_slot(const struct page *page)
539 return &page_address_htable[hash_ptr(page, PA_HASH_ORDER)];
543 * page_address - get the mapped virtual address of a page
544 * @page: &struct page to get the virtual address of
546 * Returns the page's virtual address.
548 void *page_address(const struct page *page)
552 struct page_address_slot *pas;
554 if (!PageHighMem(page))
555 return lowmem_page_address(page);
557 pas = page_slot(page);
559 spin_lock_irqsave(&pas->lock, flags);
560 if (!list_empty(&pas->lh)) {
561 struct page_address_map *pam;
563 list_for_each_entry(pam, &pas->lh, list) {
564 if (pam->page == page) {
571 spin_unlock_irqrestore(&pas->lock, flags);
575 EXPORT_SYMBOL(page_address);
578 * set_page_address - set a page's virtual address
579 * @page: &struct page to set
580 * @virtual: virtual address to use
582 void set_page_address(struct page *page, void *virtual)
585 struct page_address_slot *pas;
586 struct page_address_map *pam;
588 BUG_ON(!PageHighMem(page));
590 pas = page_slot(page);
591 if (virtual) { /* Add */
592 pam = &page_address_maps[PKMAP_NR((unsigned long)virtual)];
594 pam->virtual = virtual;
596 spin_lock_irqsave(&pas->lock, flags);
597 list_add_tail(&pam->list, &pas->lh);
598 spin_unlock_irqrestore(&pas->lock, flags);
599 } else { /* Remove */
600 spin_lock_irqsave(&pas->lock, flags);
601 list_for_each_entry(pam, &pas->lh, list) {
602 if (pam->page == page) {
603 list_del(&pam->list);
604 spin_unlock_irqrestore(&pas->lock, flags);
608 spin_unlock_irqrestore(&pas->lock, flags);
614 void __init page_address_init(void)
618 for (i = 0; i < ARRAY_SIZE(page_address_htable); i++) {
619 INIT_LIST_HEAD(&page_address_htable[i].lh);
620 spin_lock_init(&page_address_htable[i].lock);
624 #endif /* defined(HASHED_PAGE_VIRTUAL) */