X-Git-Url: http://git.monstr.eu/?p=linux-2.6-microblaze.git;a=blobdiff_plain;f=arch%2Farc%2Finclude%2Fasm%2Fpgalloc.h;h=8ab1af3da6e7f4f348ff945dc6b34010712b57a6;hp=a32ca3104ced7545e4795603ec448bfcd01eac4a;hb=d9820ff76f95fa26d33e412254a89cd65b23142d;hpb=aef4226f914016cc00affa8476ba5164dcca56fd diff --git a/arch/arc/include/asm/pgalloc.h b/arch/arc/include/asm/pgalloc.h index a32ca3104ced..8ab1af3da6e7 100644 --- a/arch/arc/include/asm/pgalloc.h +++ b/arch/arc/include/asm/pgalloc.h @@ -35,26 +35,27 @@ static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd, pte_t *pte) { - pmd_set(pmd, pte); + /* + * The cast to long below is OK in 32-bit PAE40 regime with long long pte + * Despite "wider" pte, the pte table needs to be in non-PAE low memory + * as all higher levels can only hold long pointers. + * + * The cast itself is needed given simplistic definition of set_pmd() + */ + set_pmd(pmd, __pmd((unsigned long)pte)); } -static inline void -pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t ptep) -{ - pmd_set(pmd, (pte_t *) ptep); -} - -static inline int __get_order_pgd(void) +static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, pgtable_t pte_page) { - return get_order(PTRS_PER_PGD * sizeof(pgd_t)); + set_pmd(pmd, __pmd((unsigned long)page_address(pte_page))); } static inline pgd_t *pgd_alloc(struct mm_struct *mm) { - int num, num2; - pgd_t *ret = (pgd_t *) __get_free_pages(GFP_KERNEL, __get_order_pgd()); + pgd_t *ret = (pgd_t *) __get_free_page(GFP_KERNEL); if (ret) { + int num, num2; num = USER_PTRS_PER_PGD + USER_KERNEL_GUTTER / PGDIR_SIZE; memzero(ret, num * sizeof(pgd_t)); @@ -70,61 +71,43 @@ static inline pgd_t *pgd_alloc(struct mm_struct *mm) static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd) { - free_pages((unsigned long)pgd, __get_order_pgd()); -} - - -/* - * With software-only page-tables, addr-split for traversal is tweakable and - * that directly governs how big tables would be at each level. - * Further, the MMU page size is configurable. - * Thus we need to programatically assert the size constraint - * All of this is const math, allowing gcc to do constant folding/propagation. - */ - -static inline int __get_order_pte(void) -{ - return get_order(PTRS_PER_PTE * sizeof(pte_t)); + free_page((unsigned long)pgd); } static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm) { pte_t *pte; - pte = (pte_t *) __get_free_pages(GFP_KERNEL | __GFP_ZERO, - __get_order_pte()); + pte = (pte_t *) __get_free_page(GFP_KERNEL | __GFP_ZERO); return pte; } -static inline pgtable_t -pte_alloc_one(struct mm_struct *mm) +static inline pgtable_t pte_alloc_one(struct mm_struct *mm) { - pgtable_t pte_pg; struct page *page; - pte_pg = (pgtable_t)__get_free_pages(GFP_KERNEL, __get_order_pte()); - if (!pte_pg) - return 0; - memzero((void *)pte_pg, PTRS_PER_PTE * sizeof(pte_t)); - page = virt_to_page(pte_pg); + page = (pgtable_t)alloc_page(GFP_KERNEL | __GFP_ZERO | __GFP_ACCOUNT); + if (!page) + return NULL; + if (!pgtable_pte_page_ctor(page)) { __free_page(page); - return 0; + return NULL; } - return pte_pg; + return page; } static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte) { - free_pages((unsigned long)pte, __get_order_pte()); /* takes phy addr */ + free_page((unsigned long)pte); } -static inline void pte_free(struct mm_struct *mm, pgtable_t ptep) +static inline void pte_free(struct mm_struct *mm, pgtable_t pte_page) { - pgtable_pte_page_dtor(virt_to_page(ptep)); - free_pages((unsigned long)ptep, __get_order_pte()); + pgtable_pte_page_dtor(pte_page); + __free_page(pte_page); } #define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)