Merge branch 'topic/paca' into next
authorMichael Ellerman <mpe@ellerman.id.au>
Fri, 30 Mar 2018 13:11:24 +0000 (00:11 +1100)
committerMichael Ellerman <mpe@ellerman.id.au>
Fri, 30 Mar 2018 22:09:36 +0000 (09:09 +1100)
Bring in yet another series that touches KVM code, and might need to
be merged into the kvm-ppc branch to resolve conflicts.

This required some changes in pnv_power9_force_smt4_catch/release()
due to the paca array becomming an array of pointers.

24 files changed:
1  2 
arch/powerpc/include/asm/book3s/64/hash.h
arch/powerpc/include/asm/paca.h
arch/powerpc/include/asm/setup.h
arch/powerpc/kernel/asm-offsets.c
arch/powerpc/kernel/paca.c
arch/powerpc/kernel/prom.c
arch/powerpc/kernel/setup-common.c
arch/powerpc/kernel/setup_64.c
arch/powerpc/kernel/sysfs.c
arch/powerpc/kvm/book3s_hv.c
arch/powerpc/kvm/book3s_hv_rmhandlers.S
arch/powerpc/mm/hash_utils_64.c
arch/powerpc/mm/mem.c
arch/powerpc/mm/numa.c
arch/powerpc/mm/pgtable-book3s64.c
arch/powerpc/mm/pgtable-radix.c
arch/powerpc/mm/tlb-radix.c
arch/powerpc/platforms/powernv/idle.c
arch/powerpc/platforms/powernv/setup.c
arch/powerpc/platforms/pseries/hotplug-cpu.c
arch/powerpc/platforms/pseries/lpar.c
arch/powerpc/platforms/pseries/setup.c
arch/powerpc/platforms/pseries/smp.c
arch/powerpc/xmon/xmon.c

Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
Simple merge
@@@ -2256,10 -2240,9 +2257,10 @@@ static void kvmppc_start_thread(struct 
                vcpu->arch.thread_cpu = cpu;
                cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
        }
-       tpaca = &paca[cpu];
+       tpaca = paca_ptrs[cpu];
        tpaca->kvm_hstate.kvm_vcpu = vcpu;
        tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
 +      tpaca->kvm_hstate.fake_suspend = 0;
        /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
        smp_wmb();
        tpaca->kvm_hstate.kvm_vcore = vc;
Simple merge
Simple merge
Simple merge
@@@ -155,15 -155,15 +155,15 @@@ void mmu_cleanup_all(void
  }
  
  #ifdef CONFIG_MEMORY_HOTPLUG
- int __meminit create_section_mapping(unsigned long start, unsigned long end)
 -int create_section_mapping(unsigned long start, unsigned long end, int nid)
++int __meminit create_section_mapping(unsigned long start, unsigned long end, int nid)
  {
        if (radix_enabled())
-               return radix__create_section_mapping(start, end);
+               return radix__create_section_mapping(start, end, nid);
  
-       return hash__create_section_mapping(start, end);
+       return hash__create_section_mapping(start, end, nid);
  }
  
 -int remove_section_mapping(unsigned long start, unsigned long end)
 +int __meminit remove_section_mapping(unsigned long start, unsigned long end)
  {
        if (radix_enabled())
                return radix__remove_section_mapping(start, end);
@@@ -687,30 -729,6 +750,30 @@@ static void free_pmd_table(pmd_t *pmd_s
        pud_clear(pud);
  }
  
-       create_physical_mapping(params->aligned_start, params->start);
-       create_physical_mapping(params->end, params->aligned_end);
 +struct change_mapping_params {
 +      pte_t *pte;
 +      unsigned long start;
 +      unsigned long end;
 +      unsigned long aligned_start;
 +      unsigned long aligned_end;
 +};
 +
 +static int __meminit stop_machine_change_mapping(void *data)
 +{
 +      struct change_mapping_params *params =
 +                      (struct change_mapping_params *)data;
 +
 +      if (!data)
 +              return -1;
 +
 +      spin_unlock(&init_mm.page_table_lock);
 +      pte_clear(&init_mm, params->aligned_start, params->pte);
++      create_physical_mapping(params->aligned_start, params->start, -1);
++      create_physical_mapping(params->end, params->aligned_end, -1);
 +      spin_lock(&init_mm.page_table_lock);
 +      return 0;
 +}
 +
  static void remove_pte_table(pte_t *pte_start, unsigned long addr,
                             unsigned long end)
  {
@@@ -863,12 -853,12 +926,12 @@@ static void __meminit remove_pagetable(
        radix__flush_tlb_kernel_range(start, end);
  }
  
- int __meminit radix__create_section_mapping(unsigned long start, unsigned long end)
 -int __ref radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
++int __meminit radix__create_section_mapping(unsigned long start, unsigned long end, int nid)
  {
-       return create_physical_mapping(start, end);
+       return create_physical_mapping(start, end, nid);
  }
  
 -int radix__remove_section_mapping(unsigned long start, unsigned long end)
 +int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
  {
        remove_pagetable(start, end);
        return 0;
Simple merge
@@@ -388,86 -387,6 +388,82 @@@ void power9_idle(void
        power9_idle_type(pnv_default_stop_val, pnv_default_stop_mask);
  }
  
-       struct paca_struct *tpaca;
 +#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 +/*
 + * This is used in working around bugs in thread reconfiguration
 + * on POWER9 (at least up to Nimbus DD2.2) relating to transactional
 + * memory and the way that XER[SO] is checkpointed.
 + * This function forces the core into SMT4 in order by asking
 + * all other threads not to stop, and sending a message to any
 + * that are in a stop state.
 + * Must be called with preemption disabled.
 + *
 + * DO NOT call this unless cpu_has_feature(CPU_FTR_P9_TM_XER_SO_BUG) is
 + * true; otherwise this function will hang the system, due to the
 + * optimization in power9_idle_stop.
 + */
 +void pnv_power9_force_smt4_catch(void)
 +{
 +      int cpu, cpu0, thr;
-       tpaca = &paca[cpu0];
 +      int awake_threads = 1;          /* this thread is awake */
 +      int poke_threads = 0;
 +      int need_awake = threads_per_core;
 +
 +      cpu = smp_processor_id();
 +      cpu0 = cpu & ~(threads_per_core - 1);
-                       atomic_inc(&tpaca[thr].dont_stop);
 +      for (thr = 0; thr < threads_per_core; ++thr) {
 +              if (cpu != cpu0 + thr)
-               if (!tpaca[thr].requested_psscr)
++                      atomic_inc(&paca_ptrs[cpu0+thr]->dont_stop);
 +      }
 +      /* order setting dont_stop vs testing requested_psscr */
 +      mb();
 +      for (thr = 0; thr < threads_per_core; ++thr) {
-                                          tpaca[thr].hw_cpu_id);
++              if (!paca_ptrs[cpu0+thr]->requested_psscr)
 +                      ++awake_threads;
 +              else
 +                      poke_threads |= (1 << thr);
 +      }
 +
 +      /* If at least 3 threads are awake, the core is in SMT4 already */
 +      if (awake_threads < need_awake) {
 +              /* We have to wake some threads; we'll use msgsnd */
 +              for (thr = 0; thr < threads_per_core; ++thr) {
 +                      if (poke_threads & (1 << thr)) {
 +                              ppc_msgsnd_sync();
 +                              ppc_msgsnd(PPC_DBELL_MSGTYPE, 0,
-                                   !tpaca[thr].requested_psscr) {
++                                         paca_ptrs[cpu0+thr]->hw_cpu_id);
 +                      }
 +              }
 +              /* now spin until at least 3 threads are awake */
 +              do {
 +                      for (thr = 0; thr < threads_per_core; ++thr) {
 +                              if ((poke_threads & (1 << thr)) &&
-       struct paca_struct *tpaca;
++                                  !paca_ptrs[cpu0+thr]->requested_psscr) {
 +                                      ++awake_threads;
 +                                      poke_threads &= ~(1 << thr);
 +                              }
 +                      }
 +              } while (awake_threads < need_awake);
 +      }
 +}
 +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_catch);
 +
 +void pnv_power9_force_smt4_release(void)
 +{
 +      int cpu, cpu0, thr;
-       tpaca = &paca[cpu0];
 +
 +      cpu = smp_processor_id();
 +      cpu0 = cpu & ~(threads_per_core - 1);
-                       atomic_dec(&tpaca[thr].dont_stop);
 +
 +      /* clear all the dont_stop flags */
 +      for (thr = 0; thr < threads_per_core; ++thr) {
 +              if (cpu != cpu0 + thr)
++                      atomic_dec(&paca_ptrs[cpu0+thr]->dont_stop);
 +      }
 +}
 +EXPORT_SYMBOL_GPL(pnv_power9_force_smt4_release);
 +#endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */
 +
  #ifdef CONFIG_HOTPLUG_CPU
  static void pnv_program_cpu_hotplug_lpcr(unsigned int cpu, u64 lpcr_val)
  {
Simple merge
Simple merge
Simple merge