/*
* Preload a translation in the hash table
*/
-void hash_preload(struct mm_struct *mm, unsigned long ea,
- bool is_exec, unsigned long trap)
+void hash_preload(struct mm_struct *mm, unsigned long ea)
{
pmd_t *pmd;
* We don't need to worry about _PAGE_PRESENT here because we are
* called with either mm->page_table_lock held or ptl lock held
*/
- unsigned long trap;
- bool is_exec;
/* We only want HPTEs for linux PTEs that have _PAGE_ACCESSED set */
if (!pte_young(*ptep) || address >= TASK_SIZE)
return;
- /*
- * We try to figure out if we are coming from an instruction
- * access fault and pass that down to __hash_page so we avoid
- * double-faulting on execution of fresh text. We have to test
- * for regs NULL since init will get here first thing at boot.
- *
- * We also avoid filling the hash if not coming from a fault.
- */
+ /* We have to test for regs NULL since init will get here first thing at boot */
+ if (!current->thread.regs)
+ return;
- trap = current->thread.regs ? TRAP(current->thread.regs) : 0UL;
- switch (trap) {
- case 0x300:
- is_exec = false;
- break;
- case 0x400:
- is_exec = true;
- break;
- default:
+ /* We also avoid filling the hash if not coming from a fault */
+ if (TRAP(current->thread.regs) != 0x300 && TRAP(current->thread.regs) != 0x400)
return;
- }
- hash_preload(vma->vm_mm, address, is_exec, trap);
+ hash_preload(vma->vm_mm, address);
}
/*
#ifdef CONFIG_PPC32
-void hash_preload(struct mm_struct *mm, unsigned long ea,
- bool is_exec, unsigned long trap);
+void hash_preload(struct mm_struct *mm, unsigned long ea);
extern void mapin_ram(void);
extern void setbat(int index, unsigned long virt, phys_addr_t phys,
map_kernel_page(v, p, ktext ? PAGE_KERNEL_TEXT : PAGE_KERNEL);
#ifdef CONFIG_PPC_BOOK3S_32
if (ktext)
- hash_preload(&init_mm, v, false, 0x300);
+ hash_preload(&init_mm, v);
#endif
v += PAGE_SIZE;
p += PAGE_SIZE;