Merge tag 'efi-next-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
authorLinus Torvalds <torvalds@linux-foundation.org>
Sun, 21 Feb 2021 03:09:26 +0000 (19:09 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sun, 21 Feb 2021 03:09:26 +0000 (19:09 -0800)
Pull EFI updates from Ard Biesheuvel via Borislav Petkov:
 "A few cleanups left and right, some of which were part of a initrd
  measured boot series that needs some more work, and so only the
  cleanup patches have been included for this release"

* tag 'efi-next-for-v5.12' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  efi/arm64: Update debug prints to reflect other entropy sources
  efi: x86: clean up previous struct mm switching
  efi: x86: move mixed mode stack PA variable out of 'efi_scratch'
  efi/libstub: move TPM related prototypes into efistub.h
  efi/libstub: fix prototype of efi_tcg2_protocol::get_event_log()
  efi/libstub: whitespace cleanup
  efi: ia64: move IA64-only declarations to new asm/efi.h header

1  2 
arch/ia64/kernel/time.c
arch/ia64/mm/init.c
arch/x86/platform/efi/efi_64.c

diff --combined arch/ia64/kernel/time.c
@@@ -26,6 -26,7 +26,7 @@@
  #include <linux/sched/cputime.h>
  
  #include <asm/delay.h>
+ #include <asm/efi.h>
  #include <asm/hw_irq.h>
  #include <asm/ptrace.h>
  #include <asm/sal.h>
@@@ -171,34 -172,29 +172,34 @@@ void vtime_account_hardirq(struct task_
  static irqreturn_t
  timer_interrupt (int irq, void *dev_id)
  {
 -      unsigned long cur_itm, new_itm, ticks;
 +      unsigned long new_itm;
  
        if (cpu_is_offline(smp_processor_id())) {
                return IRQ_HANDLED;
        }
  
        new_itm = local_cpu_data->itm_next;
 -      cur_itm = ia64_get_itc();
  
 -      if (!time_after(cur_itm, new_itm)) {
 +      if (!time_after(ia64_get_itc(), new_itm))
                printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
 -                     cur_itm, new_itm);
 -              ticks = 1;
 -      } else {
 -              ticks = DIV_ROUND_UP(cur_itm - new_itm,
 -                                   local_cpu_data->itm_delta);
 -              new_itm += ticks * local_cpu_data->itm_delta;
 -      }
 +                     ia64_get_itc(), new_itm);
 +
 +      while (1) {
 +              new_itm += local_cpu_data->itm_delta;
 +
 +              legacy_timer_tick(smp_processor_id() == time_keeper_id);
 +
 +              local_cpu_data->itm_next = new_itm;
  
 -      if (smp_processor_id() != time_keeper_id)
 -              ticks = 0;
 +              if (time_after(new_itm, ia64_get_itc()))
 +                      break;
  
 -      legacy_timer_tick(ticks);
 +              /*
 +               * Allow IPIs to interrupt the timer loop.
 +               */
 +              local_irq_enable();
 +              local_irq_disable();
 +      }
  
        do {
                /*
diff --combined arch/ia64/mm/init.c
@@@ -27,6 -27,7 +27,7 @@@
  #include <linux/swiotlb.h>
  
  #include <asm/dma.h>
+ #include <asm/efi.h>
  #include <asm/io.h>
  #include <asm/numa.h>
  #include <asm/patch.h>
@@@ -536,7 -537,7 +537,7 @@@ virtual_memmap_init(u64 start, u64 end
  
        if (map_start < map_end)
                memmap_init_zone((unsigned long)(map_end - map_start),
 -                               args->nid, args->zone, page_to_pfn(map_start),
 +                               args->nid, args->zone, page_to_pfn(map_start), page_to_pfn(map_end),
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        return 0;
  }
@@@ -546,7 -547,7 +547,7 @@@ memmap_init (unsigned long size, int ni
             unsigned long start_pfn)
  {
        if (!vmem_map) {
 -              memmap_init_zone(size, nid, zone, start_pfn,
 +              memmap_init_zone(size, nid, zone, start_pfn, start_pfn + size,
                                 MEMINIT_EARLY, NULL, MIGRATE_MOVABLE);
        } else {
                struct page *start;
   * 0xffff_ffff_0000_0000 and limit EFI VA mapping space to 64G.
   */
  static u64 efi_va = EFI_VA_START;
- struct efi_scratch efi_scratch;
- EXPORT_SYMBOL_GPL(efi_mm);
+ static struct mm_struct *efi_prev_mm;
  
  /*
   * We need our own copy of the higher levels of the page tables
@@@ -115,12 -112,31 +112,12 @@@ void efi_sync_low_kernel_mappings(void
        pud_t *pud_k, *pud_efi;
        pgd_t *efi_pgd = efi_mm.pgd;
  
 -      /*
 -       * We can share all PGD entries apart from the one entry that
 -       * covers the EFI runtime mapping space.
 -       *
 -       * Make sure the EFI runtime region mappings are guaranteed to
 -       * only span a single PGD entry and that the entry also maps
 -       * other important kernel regions.
 -       */
 -      MAYBE_BUILD_BUG_ON(pgd_index(EFI_VA_END) != pgd_index(MODULES_END));
 -      MAYBE_BUILD_BUG_ON((EFI_VA_START & PGDIR_MASK) !=
 -                      (EFI_VA_END & PGDIR_MASK));
 -
        pgd_efi = efi_pgd + pgd_index(PAGE_OFFSET);
        pgd_k = pgd_offset_k(PAGE_OFFSET);
  
        num_entries = pgd_index(EFI_VA_END) - pgd_index(PAGE_OFFSET);
        memcpy(pgd_efi, pgd_k, sizeof(pgd_t) * num_entries);
  
 -      /*
 -       * As with PGDs, we share all P4D entries apart from the one entry
 -       * that covers the EFI runtime mapping space.
 -       */
 -      BUILD_BUG_ON(p4d_index(EFI_VA_END) != p4d_index(MODULES_END));
 -      BUILD_BUG_ON((EFI_VA_START & P4D_MASK) != (EFI_VA_END & P4D_MASK));
 -
        pgd_efi = efi_pgd + pgd_index(EFI_VA_END);
        pgd_k = pgd_offset_k(EFI_VA_END);
        p4d_efi = p4d_offset(pgd_efi, 0);
@@@ -237,7 -253,7 +234,7 @@@ int __init efi_setup_page_tables(unsign
                return 1;
        }
  
-       efi_scratch.phys_stack = page_to_phys(page + 1); /* stack grows down */
+       efi_mixed_mode_stack_pa = page_to_phys(page + 1); /* stack grows down */
  
        npages = (_etext - _text) >> PAGE_SHIFT;
        text = __pa(_text);
@@@ -462,11 -478,17 +459,17 @@@ void __init efi_dump_pagetable(void
   * can not change under us.
   * It should be ensured that there are no concurent calls to this function.
   */
- void efi_switch_mm(struct mm_struct *mm)
+ void efi_enter_mm(void)
+ {
+       efi_prev_mm = current->active_mm;
+       current->active_mm = &efi_mm;
+       switch_mm(efi_prev_mm, &efi_mm, NULL);
+ }
+ void efi_leave_mm(void)
  {
-       efi_scratch.prev_mm = current->active_mm;
-       current->active_mm = mm;
-       switch_mm(efi_scratch.prev_mm, mm, NULL);
+       current->active_mm = efi_prev_mm;
+       switch_mm(&efi_mm, efi_prev_mm, NULL);
  }
  
  static DEFINE_SPINLOCK(efi_runtime_lock);
@@@ -530,12 -552,12 +533,12 @@@ efi_thunk_set_virtual_address_map(unsig
        efi_sync_low_kernel_mappings();
        local_irq_save(flags);
  
-       efi_switch_mm(&efi_mm);
+       efi_enter_mm();
  
        status = __efi_thunk(set_virtual_address_map, memory_map_size,
                             descriptor_size, descriptor_version, virtual_map);
  
-       efi_switch_mm(efi_scratch.prev_mm);
+       efi_leave_mm();
        local_irq_restore(flags);
  
        return status;
@@@ -829,7 -851,7 +832,7 @@@ efi_set_virtual_address_map(unsigned lo
                                                         descriptor_size,
                                                         descriptor_version,
                                                         virtual_map);
-       efi_switch_mm(&efi_mm);
+       efi_enter_mm();
  
        kernel_fpu_begin();
  
        /* grab the virtually remapped EFI runtime services table pointer */
        efi.runtime = READ_ONCE(systab->runtime);
  
-       efi_switch_mm(efi_scratch.prev_mm);
+       efi_leave_mm();
  
        return status;
  }