Merge tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
authorLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Apr 2015 17:19:03 +0000 (10:19 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Mon, 20 Apr 2015 17:19:03 +0000 (10:19 -0700)
Pull final removal of deprecated cpus_* cpumask functions from Rusty Russell:
 "This is the final removal (after several years!) of the obsolete
  cpus_* functions, prompted by their mis-use in staging.

  With these function removed, all cpu functions should only iterate to
  nr_cpu_ids, so we finally only allocate that many bits when cpumasks
  are allocated offstack"

* tag 'cpumask-next-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/rusty/linux: (25 commits)
  cpumask: remove __first_cpu / __next_cpu
  cpumask: resurrect CPU_MASK_CPU0
  linux/cpumask.h: add typechecking to cpumask_test_cpu
  cpumask: only allocate nr_cpumask_bits.
  Fix weird uses of num_online_cpus().
  cpumask: remove deprecated functions.
  mips: fix obsolete cpumask_of_cpu usage.
  x86: fix more deprecated cpu function usage.
  ia64: remove deprecated cpus_ usage.
  powerpc: fix deprecated CPU_MASK_CPU0 usage.
  CPU_MASK_ALL/CPU_MASK_NONE: remove from deprecated region.
  staging/lustre/o2iblnd: Don't use cpus_weight
  staging/lustre/libcfs: replace deprecated cpus_ calls with cpumask_
  staging/lustre/ptlrpc: Do not use deprecated cpus_* functions
  blackfin: fix up obsolete cpu function usage.
  parisc: fix up obsolete cpu function usage.
  tile: fix up obsolete cpu function usage.
  arm64: fix up obsolete cpu function usage.
  mips: fix up obsolete cpu function usage.
  x86: fix up obsolete cpu function usage.
  ...

13 files changed:
1  2 
arch/arm64/kernel/smp.c
arch/mips/kernel/process.c
arch/mips/kernel/smp-cps.c
arch/mips/kernel/smp.c
arch/mips/kernel/traps.c
arch/powerpc/include/asm/cputhreads.h
arch/sparc/kernel/time_32.c
arch/tile/kernel/setup.c
drivers/irqchip/irq-gic-v3.c
drivers/irqchip/irq-mips-gic.c
drivers/net/ethernet/tile/tilegx.c
lib/Kconfig
lib/cpumask.c

diff --combined arch/arm64/kernel/smp.c
@@@ -151,7 -151,6 +151,7 @@@ asmlinkage void secondary_start_kernel(
         */
        cpu_set_reserved_ttbr0();
        flush_tlb_all();
 +      cpu_set_default_tcr_t0sz();
  
        preempt_disable();
        trace_hardirqs_off();
@@@ -310,7 -309,7 +310,7 @@@ void cpu_die(void
  void __init smp_cpus_done(unsigned int max_cpus)
  {
        pr_info("SMP: Total of %d processors activated.\n", num_online_cpus());
 -      apply_alternatives_all();
 +      do_post_cpus_up_work();
  }
  
  void __init smp_prepare_boot_cpu(void)
@@@ -636,7 -635,7 +636,7 @@@ void smp_send_stop(void
                cpumask_t mask;
  
                cpumask_copy(&mask, cpu_online_mask);
-               cpu_clear(smp_processor_id(), mask);
+               cpumask_clear_cpu(smp_processor_id(), &mask);
  
                smp_cross_call(&mask, IPI_CPU_STOP);
        }
@@@ -49,7 -49,7 +49,7 @@@
  void arch_cpu_idle_dead(void)
  {
        /* What the heck is this check doing ? */
-       if (!cpu_isset(smp_processor_id(), cpu_callin_map))
+       if (!cpumask_test_cpu(smp_processor_id(), &cpu_callin_map))
                play_dead();
  }
  #endif
@@@ -107,11 -107,8 +107,11 @@@ int arch_dup_task_struct(struct task_st
        return 0;
  }
  
 +/*
 + * Copy architecture-specific thread state
 + */
  int copy_thread(unsigned long clone_flags, unsigned long usp,
 -      unsigned long arg, struct task_struct *p)
 +      unsigned long kthread_arg, struct task_struct *p)
  {
        struct thread_info *ti = task_thread_info(p);
        struct pt_regs *childregs, *regs = current_pt_regs();
        childksp = (unsigned long) childregs;
        p->thread.cp0_status = read_c0_status() & ~(ST0_CU2|ST0_CU1);
        if (unlikely(p->flags & PF_KTHREAD)) {
 +              /* kernel thread */
                unsigned long status = p->thread.cp0_status;
                memset(childregs, 0, sizeof(struct pt_regs));
                ti->addr_limit = KERNEL_DS;
                p->thread.reg16 = usp; /* fn */
 -              p->thread.reg17 = arg;
 +              p->thread.reg17 = kthread_arg;
                p->thread.reg29 = childksp;
                p->thread.reg31 = (unsigned long) ret_from_kernel_thread;
  #if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
                childregs->cp0_status = status;
                return 0;
        }
 +
 +      /* user thread */
        *childregs = *regs;
        childregs->regs[7] = 0; /* Clear error flag */
        childregs->regs[2] = 0; /* Child gets zero as return value */
@@@ -88,12 -88,6 +88,12 @@@ static void __init cps_smp_setup(void
  
        /* Make core 0 coherent with everything */
        write_gcr_cl_coherence(0xff);
 +
 +#ifdef CONFIG_MIPS_MT_FPAFF
 +      /* If we have an FPU, enroll ourselves in the FPU-full mask */
 +      if (cpu_has_fpu)
 +              cpu_set(0, mt_fpu_cpumask);
 +#endif /* CONFIG_MIPS_MT_FPAFF */
  }
  
  static void __init cps_prepare_cpus(unsigned int max_cpus)
@@@ -290,7 -284,7 +290,7 @@@ static void cps_smp_finish(void
  #ifdef CONFIG_MIPS_MT_FPAFF
        /* If we have an FPU, enroll ourselves in the FPU-full mask */
        if (cpu_has_fpu)
-               cpu_set(smp_processor_id(), mt_fpu_cpumask);
+               cpumask_set_cpu(smp_processor_id(), &mt_fpu_cpumask);
  #endif /* CONFIG_MIPS_MT_FPAFF */
  
        local_irq_enable();
@@@ -313,7 -307,7 +313,7 @@@ static int cps_cpu_disable(void
        atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
        smp_mb__after_atomic();
        set_cpu_online(cpu, false);
-       cpu_clear(cpu, cpu_callin_map);
+       cpumask_clear_cpu(cpu, &cpu_callin_map);
  
        return 0;
  }
diff --combined arch/mips/kernel/smp.c
@@@ -75,30 -75,30 +75,30 @@@ static inline void set_cpu_sibling_map(
  {
        int i;
  
-       cpu_set(cpu, cpu_sibling_setup_map);
+       cpumask_set_cpu(cpu, &cpu_sibling_setup_map);
  
        if (smp_num_siblings > 1) {
-               for_each_cpu_mask(i, cpu_sibling_setup_map) {
+               for_each_cpu(i, &cpu_sibling_setup_map) {
                        if (cpu_data[cpu].package == cpu_data[i].package &&
                                    cpu_data[cpu].core == cpu_data[i].core) {
-                               cpu_set(i, cpu_sibling_map[cpu]);
-                               cpu_set(cpu, cpu_sibling_map[i]);
+                               cpumask_set_cpu(i, &cpu_sibling_map[cpu]);
+                               cpumask_set_cpu(cpu, &cpu_sibling_map[i]);
                        }
                }
        } else
-               cpu_set(cpu, cpu_sibling_map[cpu]);
+               cpumask_set_cpu(cpu, &cpu_sibling_map[cpu]);
  }
  
  static inline void set_cpu_core_map(int cpu)
  {
        int i;
  
-       cpu_set(cpu, cpu_core_setup_map);
+       cpumask_set_cpu(cpu, &cpu_core_setup_map);
  
-       for_each_cpu_mask(i, cpu_core_setup_map) {
+       for_each_cpu(i, &cpu_core_setup_map) {
                if (cpu_data[cpu].package == cpu_data[i].package) {
-                       cpu_set(i, cpu_core_map[cpu]);
-                       cpu_set(cpu, cpu_core_map[i]);
+                       cpumask_set_cpu(i, &cpu_core_map[cpu]);
+                       cpumask_set_cpu(cpu, &cpu_core_map[i]);
                }
        }
  }
@@@ -138,7 -138,7 +138,7 @@@ asmlinkage void start_secondary(void
        cpu = smp_processor_id();
        cpu_data[cpu].udelay_val = loops_per_jiffy;
  
-       cpu_set(cpu, cpu_coherent_mask);
+       cpumask_set_cpu(cpu, &cpu_coherent_mask);
        notify_cpu_starting(cpu);
  
        set_cpu_online(cpu, true);
        set_cpu_sibling_map(cpu);
        set_cpu_core_map(cpu);
  
-       cpu_set(cpu, cpu_callin_map);
+       cpumask_set_cpu(cpu, &cpu_callin_map);
  
        synchronise_count_slave(cpu);
  
@@@ -176,8 -176,10 +176,8 @@@ static void stop_this_cpu(void *dummy
         * Remove this CPU:
         */
        set_cpu_online(smp_processor_id(), false);
 -      for (;;) {
 -              if (cpu_wait)
 -                      (*cpu_wait)();          /* Wait if available. */
 -      }
 +      local_irq_disable();
 +      while (1);
  }
  
  void smp_send_stop(void)
@@@ -208,7 -210,7 +208,7 @@@ void smp_prepare_boot_cpu(void
  {
        set_cpu_possible(0, true);
        set_cpu_online(0, true);
-       cpu_set(0, cpu_callin_map);
+       cpumask_set_cpu(0, &cpu_callin_map);
  }
  
  int __cpu_up(unsigned int cpu, struct task_struct *tidle)
        /*
         * Trust is futile.  We should really have timeouts ...
         */
-       while (!cpu_isset(cpu, cpu_callin_map))
+       while (!cpumask_test_cpu(cpu, &cpu_callin_map))
                udelay(100);
  
        synchronise_count_master(cpu);
diff --combined arch/mips/kernel/traps.c
@@@ -12,7 -12,6 +12,7 @@@
   * Copyright (C) 2000, 2001, 2012 MIPS Technologies, Inc.  All rights reserved.
   * Copyright (C) 2014, Imagination Technologies Ltd.
   */
 +#include <linux/bitops.h>
  #include <linux/bug.h>
  #include <linux/compiler.h>
  #include <linux/context_tracking.h>
@@@ -700,60 -699,29 +700,60 @@@ asmlinkage void do_ov(struct pt_regs *r
        exception_exit(prev_state);
  }
  
 -int process_fpemu_return(int sig, void __user *fault_addr)
 +int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
  {
 -      if (sig == SIGSEGV || sig == SIGBUS) {
 -              struct siginfo si = {0};
 +      struct siginfo si = { 0 };
 +
 +      switch (sig) {
 +      case 0:
 +              return 0;
 +
 +      case SIGFPE:
                si.si_addr = fault_addr;
                si.si_signo = sig;
 -              if (sig == SIGSEGV) {
 -                      down_read(&current->mm->mmap_sem);
 -                      if (find_vma(current->mm, (unsigned long)fault_addr))
 -                              si.si_code = SEGV_ACCERR;
 -                      else
 -                              si.si_code = SEGV_MAPERR;
 -                      up_read(&current->mm->mmap_sem);
 -              } else {
 -                      si.si_code = BUS_ADRERR;
 -              }
 +              /*
 +               * Inexact can happen together with Overflow or Underflow.
 +               * Respect the mask to deliver the correct exception.
 +               */
 +              fcr31 &= (fcr31 & FPU_CSR_ALL_E) <<
 +                       (ffs(FPU_CSR_ALL_X) - ffs(FPU_CSR_ALL_E));
 +              if (fcr31 & FPU_CSR_INV_X)
 +                      si.si_code = FPE_FLTINV;
 +              else if (fcr31 & FPU_CSR_DIV_X)
 +                      si.si_code = FPE_FLTDIV;
 +              else if (fcr31 & FPU_CSR_OVF_X)
 +                      si.si_code = FPE_FLTOVF;
 +              else if (fcr31 & FPU_CSR_UDF_X)
 +                      si.si_code = FPE_FLTUND;
 +              else if (fcr31 & FPU_CSR_INE_X)
 +                      si.si_code = FPE_FLTRES;
 +              else
 +                      si.si_code = __SI_FAULT;
 +              force_sig_info(sig, &si, current);
 +              return 1;
 +
 +      case SIGBUS:
 +              si.si_addr = fault_addr;
 +              si.si_signo = sig;
 +              si.si_code = BUS_ADRERR;
                force_sig_info(sig, &si, current);
                return 1;
 -      } else if (sig) {
 +
 +      case SIGSEGV:
 +              si.si_addr = fault_addr;
 +              si.si_signo = sig;
 +              down_read(&current->mm->mmap_sem);
 +              if (find_vma(current->mm, (unsigned long)fault_addr))
 +                      si.si_code = SEGV_ACCERR;
 +              else
 +                      si.si_code = SEGV_MAPERR;
 +              up_read(&current->mm->mmap_sem);
 +              force_sig_info(sig, &si, current);
 +              return 1;
 +
 +      default:
                force_sig(sig, current);
                return 1;
 -      } else {
 -              return 0;
        }
  }
  
@@@ -761,8 -729,7 +761,8 @@@ static int simulate_fp(struct pt_regs *
                       unsigned long old_epc, unsigned long old_ra)
  {
        union mips_instruction inst = { .word = opcode };
 -      void __user *fault_addr = NULL;
 +      void __user *fault_addr;
 +      unsigned long fcr31;
        int sig;
  
        /* If it's obviously not an FP instruction, skip it */
        /* Run the emulator */
        sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
                                       &fault_addr);
 +      fcr31 = current->thread.fpu.fcr31;
  
 -      /* If something went wrong, signal */
 -      process_fpemu_return(sig, fault_addr);
 +      /*
 +       * We can't allow the emulated instruction to leave any of
 +       * the cause bits set in $fcr31.
 +       */
 +      current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
  
        /* Restore the hardware register state */
        own_fpu(1);
  
 +      /* Send a signal if required.  */
 +      process_fpemu_return(sig, fault_addr, fcr31);
 +
        return 0;
  }
  
  asmlinkage void do_fpe(struct pt_regs *regs, unsigned long fcr31)
  {
        enum ctx_state prev_state;
 -      siginfo_t info = {0};
 +      void __user *fault_addr;
 +      int sig;
  
        prev_state = exception_enter();
        if (notify_die(DIE_FP, "FP exception", regs, 0, regs_to_trapnr(regs),
                       SIGFPE) == NOTIFY_STOP)
                goto out;
 +
 +      /* Clear FCSR.Cause before enabling interrupts */
 +      write_32bit_cp1_register(CP1_STATUS, fcr31 & ~FPU_CSR_ALL_X);
 +      local_irq_enable();
 +
        die_if_kernel("FP exception in kernel code", regs);
  
        if (fcr31 & FPU_CSR_UNI_X) {
 -              int sig;
 -              void __user *fault_addr = NULL;
 -
                /*
                 * Unimplemented operation exception.  If we've got the full
                 * software emulator on-board, let's use it...
                /* Run the emulator */
                sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 1,
                                               &fault_addr);
 +              fcr31 = current->thread.fpu.fcr31;
  
                /*
                 * We can't allow the emulated instruction to leave any of
 -               * the cause bit set in $fcr31.
 +               * the cause bits set in $fcr31.
                 */
                current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
  
                /* Restore the hardware register state */
                own_fpu(1);     /* Using the FPU again.  */
 +      } else {
 +              sig = SIGFPE;
 +              fault_addr = (void __user *) regs->cp0_epc;
 +      }
  
 -              /* If something went wrong, signal */
 -              process_fpemu_return(sig, fault_addr);
 -
 -              goto out;
 -      } else if (fcr31 & FPU_CSR_INV_X)
 -              info.si_code = FPE_FLTINV;
 -      else if (fcr31 & FPU_CSR_DIV_X)
 -              info.si_code = FPE_FLTDIV;
 -      else if (fcr31 & FPU_CSR_OVF_X)
 -              info.si_code = FPE_FLTOVF;
 -      else if (fcr31 & FPU_CSR_UDF_X)
 -              info.si_code = FPE_FLTUND;
 -      else if (fcr31 & FPU_CSR_INE_X)
 -              info.si_code = FPE_FLTRES;
 -      else
 -              info.si_code = __SI_FAULT;
 -      info.si_signo = SIGFPE;
 -      info.si_errno = 0;
 -      info.si_addr = (void __user *) regs->cp0_epc;
 -      force_sig_info(SIGFPE, &info, current);
 +      /* Send a signal if required.  */
 +      process_fpemu_return(sig, fault_addr, fcr31);
  
  out:
        exception_exit(prev_state);
@@@ -909,9 -879,9 +909,9 @@@ void do_trap_or_bp(struct pt_regs *regs
                break;
        case BRK_MEMU:
                /*
 -               * Address errors may be deliberately induced by the FPU
 -               * emulator to retake control of the CPU after executing the
 -               * instruction in the delay slot of an emulated branch.
 +               * This breakpoint code is used by the FPU emulator to retake
 +               * control of the CPU after executing the instruction from the
 +               * delay slot of an emulated branch.
                 *
                 * Terminate if exception was recognized as a delay slot return
                 * otherwise handle as normal.
  
  asmlinkage void do_bp(struct pt_regs *regs)
  {
 +      unsigned long epc = msk_isa16_mode(exception_epc(regs));
        unsigned int opcode, bcode;
        enum ctx_state prev_state;
 -      unsigned long epc;
 -      u16 instr[2];
        mm_segment_t seg;
  
        seg = get_fs();
  
        prev_state = exception_enter();
        if (get_isa16_mode(regs->cp0_epc)) {
 -              /* Calculate EPC. */
 -              epc = exception_epc(regs);
 -              if (cpu_has_mmips) {
 -                      if ((__get_user(instr[0], (u16 __user *)msk_isa16_mode(epc)) ||
 -                          (__get_user(instr[1], (u16 __user *)msk_isa16_mode(epc + 2)))))
 -                              goto out_sigsegv;
 -                      opcode = (instr[0] << 16) | instr[1];
 -              } else {
 +              u16 instr[2];
 +
 +              if (__get_user(instr[0], (u16 __user *)epc))
 +                      goto out_sigsegv;
 +
 +              if (!cpu_has_mmips) {
                        /* MIPS16e mode */
 -                      if (__get_user(instr[0],
 -                                     (u16 __user *)msk_isa16_mode(epc)))
 +                      bcode = (instr[0] >> 5) & 0x3f;
 +              } else if (mm_insn_16bit(instr[0])) {
 +                      /* 16-bit microMIPS BREAK */
 +                      bcode = instr[0] & 0xf;
 +              } else {
 +                      /* 32-bit microMIPS BREAK */
 +                      if (__get_user(instr[1], (u16 __user *)(epc + 2)))
                                goto out_sigsegv;
 -                      bcode = (instr[0] >> 6) & 0x3f;
 -                      do_trap_or_bp(regs, bcode, "Break");
 -                      goto out;
 +                      opcode = (instr[0] << 16) | instr[1];
 +                      bcode = (opcode >> 6) & ((1 << 20) - 1);
                }
        } else {
 -              if (__get_user(opcode,
 -                             (unsigned int __user *) exception_epc(regs)))
 +              if (__get_user(opcode, (unsigned int __user *)epc))
                        goto out_sigsegv;
 +              bcode = (opcode >> 6) & ((1 << 20) - 1);
        }
  
        /*
         * Gas is bug-compatible, but not always, grrr...
         * We handle both cases with a simple heuristics.  --macro
         */
 -      bcode = ((opcode >> 6) & ((1 << 20) - 1));
        if (bcode >= (1 << 10))
 -              bcode >>= 10;
 +              bcode = ((bcode & ((1 << 10) - 1)) << 10) | (bcode >> 10);
  
        /*
         * notify the kprobe handlers, if instruction is likely to
@@@ -1063,24 -1033,22 +1063,24 @@@ asmlinkage void do_ri(struct pt_regs *r
         * as quickly as possible.
         */
        if (mipsr2_emulation && cpu_has_mips_r6 &&
 -          likely(user_mode(regs))) {
 -              if (likely(get_user(opcode, epc) >= 0)) {
 -                      status = mipsr2_decoder(regs, opcode);
 -                      switch (status) {
 -                      case 0:
 -                      case SIGEMT:
 -                              task_thread_info(current)->r2_emul_return = 1;
 -                              return;
 -                      case SIGILL:
 -                              goto no_r2_instr;
 -                      default:
 -                              process_fpemu_return(status,
 -                                                   &current->thread.cp0_baduaddr);
 -                              task_thread_info(current)->r2_emul_return = 1;
 -                              return;
 -                      }
 +          likely(user_mode(regs)) &&
 +          likely(get_user(opcode, epc) >= 0)) {
 +              unsigned long fcr31 = 0;
 +
 +              status = mipsr2_decoder(regs, opcode, &fcr31);
 +              switch (status) {
 +              case 0:
 +              case SIGEMT:
 +                      task_thread_info(current)->r2_emul_return = 1;
 +                      return;
 +              case SIGILL:
 +                      goto no_r2_instr;
 +              default:
 +                      process_fpemu_return(status,
 +                                           &current->thread.cp0_baduaddr,
 +                                           fcr31);
 +                      task_thread_info(current)->r2_emul_return = 1;
 +                      return;
                }
        }
  
@@@ -1153,13 -1121,13 +1153,13 @@@ static void mt_ase_fp_affinity(void
                 * restricted the allowed set to exclude any CPUs with FPUs,
                 * we'll skip the procedure.
                 */
-               if (cpus_intersects(current->cpus_allowed, mt_fpu_cpumask)) {
+               if (cpumask_intersects(&current->cpus_allowed, &mt_fpu_cpumask)) {
                        cpumask_t tmask;
  
                        current->thread.user_cpus_allowed
                                = current->cpus_allowed;
-                       cpus_and(tmask, current->cpus_allowed,
-                               mt_fpu_cpumask);
+                       cpumask_and(&tmask, &current->cpus_allowed,
+                                   &mt_fpu_cpumask);
                        set_cpus_allowed_ptr(current, &tmask);
                        set_thread_flag(TIF_FPUBOUND);
                }
@@@ -1325,13 -1293,10 +1325,13 @@@ asmlinkage void do_cpu(struct pt_regs *
        enum ctx_state prev_state;
        unsigned int __user *epc;
        unsigned long old_epc, old31;
 +      void __user *fault_addr;
        unsigned int opcode;
 +      unsigned long fcr31;
        unsigned int cpid;
        int status, err;
        unsigned long __maybe_unused flags;
 +      int sig;
  
        prev_state = exception_enter();
        cpid = (regs->cp0_cause >> CAUSEB_CE) & 3;
                status = -1;
  
                if (unlikely(compute_return_epc(regs) < 0))
 -                      goto out;
 +                      break;
  
                if (get_isa16_mode(regs->cp0_epc)) {
                        unsigned short mmop[2] = { 0 };
                        force_sig(status, current);
                }
  
 -              goto out;
 +              break;
  
        case 3:
                /*
 -               * Old (MIPS I and MIPS II) processors will set this code
 -               * for COP1X opcode instructions that replaced the original
 -               * COP3 space.  We don't limit COP1 space instructions in
 -               * the emulator according to the CPU ISA, so we want to
 -               * treat COP1X instructions consistently regardless of which
 -               * code the CPU chose.  Therefore we redirect this trap to
 -               * the FP emulator too.
 -               *
 -               * Then some newer FPU-less processors use this code
 -               * erroneously too, so they are covered by this choice
 -               * as well.
 +               * The COP3 opcode space and consequently the CP0.Status.CU3
 +               * bit and the CP0.Cause.CE=3 encoding have been removed as
 +               * of the MIPS III ISA.  From the MIPS IV and MIPS32r2 ISAs
 +               * up the space has been reused for COP1X instructions, that
 +               * are enabled by the CP0.Status.CU1 bit and consequently
 +               * use the CP0.Cause.CE=1 encoding for Coprocessor Unusable
 +               * exceptions.  Some FPU-less processors that implement one
 +               * of these ISAs however use this code erroneously for COP1X
 +               * instructions.  Therefore we redirect this trap to the FP
 +               * emulator too.
                 */
 -              if (raw_cpu_has_fpu)
 +              if (raw_cpu_has_fpu || !cpu_has_mips_4_5_64_r2_r6) {
 +                      force_sig(SIGILL, current);
                        break;
 +              }
                /* Fall through.  */
  
        case 1:
                err = enable_restore_fp_context(0);
  
 -              if (!raw_cpu_has_fpu || err) {
 -                      int sig;
 -                      void __user *fault_addr = NULL;
 -                      sig = fpu_emulator_cop1Handler(regs,
 -                                                     &current->thread.fpu,
 -                                                     0, &fault_addr);
 -                      if (!process_fpemu_return(sig, fault_addr) && !err)
 -                              mt_ase_fp_affinity();
 -              }
 +              if (raw_cpu_has_fpu && !err)
 +                      break;
  
 -              goto out;
 +              sig = fpu_emulator_cop1Handler(regs, &current->thread.fpu, 0,
 +                                             &fault_addr);
 +              fcr31 = current->thread.fpu.fcr31;
 +
 +              /*
 +               * We can't allow the emulated instruction to leave
 +               * any of the cause bits set in $fcr31.
 +               */
 +              current->thread.fpu.fcr31 &= ~FPU_CSR_ALL_X;
 +
 +              /* Send a signal if required.  */
 +              if (!process_fpemu_return(sig, fault_addr, fcr31) && !err)
 +                      mt_ase_fp_affinity();
 +
 +              break;
  
        case 2:
                raw_notifier_call_chain(&cu2_chain, CU2_EXCEPTION, regs);
 -              goto out;
 +              break;
        }
  
 -      force_sig(SIGILL, current);
 -
 -out:
        exception_exit(prev_state);
  }
  
 -asmlinkage void do_msa_fpe(struct pt_regs *regs)
 +asmlinkage void do_msa_fpe(struct pt_regs *regs, unsigned int msacsr)
  {
        enum ctx_state prev_state;
  
        prev_state = exception_enter();
 +      if (notify_die(DIE_MSAFP, "MSA FP exception", regs, 0,
 +                     regs_to_trapnr(regs), SIGFPE) == NOTIFY_STOP)
 +              goto out;
 +
 +      /* Clear MSACSR.Cause before enabling interrupts */
 +      write_msa_csr(msacsr & ~MSA_CSR_CAUSEF);
 +      local_irq_enable();
 +
        die_if_kernel("do_msa_fpe invoked from kernel context!", regs);
        force_sig(SIGFPE, current);
 +out:
        exception_exit(prev_state);
  }
  
@@@ -2018,12 -1969,6 +2018,12 @@@ int cp0_compare_irq_shift
  int cp0_perfcount_irq;
  EXPORT_SYMBOL_GPL(cp0_perfcount_irq);
  
 +/*
 + * Fast debug channel IRQ or -1 if not present
 + */
 +int cp0_fdc_irq;
 +EXPORT_SYMBOL_GPL(cp0_fdc_irq);
 +
  static int noulri;
  
  static int __init ulri_disable(char *s)
@@@ -2105,21 -2050,17 +2105,21 @@@ void per_cpu_trap_init(bool is_boot_cpu
         *
         *  o read IntCtl.IPTI to determine the timer interrupt
         *  o read IntCtl.IPPCI to determine the performance counter interrupt
 +       *  o read IntCtl.IPFDC to determine the fast debug channel interrupt
         */
        if (cpu_has_mips_r2_r6) {
                cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
                cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
                cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
 -              if (cp0_perfcount_irq == cp0_compare_irq)
 -                      cp0_perfcount_irq = -1;
 +              cp0_fdc_irq = (read_c0_intctl() >> INTCTLB_IPFDC) & 7;
 +              if (!cp0_fdc_irq)
 +                      cp0_fdc_irq = -1;
 +
        } else {
                cp0_compare_irq = CP0_LEGACY_COMPARE_IRQ;
                cp0_compare_irq_shift = CP0_LEGACY_PERFCNT_IRQ;
                cp0_perfcount_irq = -1;
 +              cp0_fdc_irq = -1;
        }
  
        if (!cpu_data[cpu].asid_cache)
@@@ -25,7 -25,7 +25,7 @@@ extern cpumask_t threads_core_mask
  #define threads_per_core      1
  #define threads_per_subcore   1
  #define threads_shift         0
- #define threads_core_mask     (CPU_MASK_CPU0)
+ #define threads_core_mask     (*get_cpu_mask(0))
  #endif
  
  /* cpu_thread_mask_to_cores - Return a cpumask of one per cores
@@@ -55,7 -55,7 +55,7 @@@ static inline cpumask_t cpu_thread_mask
  
  static inline int cpu_nr_cores(void)
  {
 -      return NR_CPUS >> threads_shift;
 +      return nr_cpu_ids >> threads_shift;
  }
  
  static inline cpumask_t cpu_online_cores_map(void)
@@@ -181,20 -181,24 +181,20 @@@ static struct clocksource timer_cs = 
        .rating = 100,
        .read   = timer_cs_read,
        .mask   = CLOCKSOURCE_MASK(64),
 -      .shift  = 2,
        .flags  = CLOCK_SOURCE_IS_CONTINUOUS,
  };
  
  static __init int setup_timer_cs(void)
  {
        timer_cs_enabled = 1;
 -      timer_cs.mult = clocksource_hz2mult(sparc_config.clock_rate,
 -                                          timer_cs.shift);
 -
 -      return clocksource_register(&timer_cs);
 +      return clocksource_register_hz(&timer_cs, sparc_config.clock_rate);
  }
  
  #ifdef CONFIG_SMP
  static void percpu_ce_setup(enum clock_event_mode mode,
                        struct clock_event_device *evt)
  {
-       int cpu = __first_cpu(evt->cpumask);
+       int cpu = cpumask_first(evt->cpumask);
  
        switch (mode) {
                case CLOCK_EVT_MODE_PERIODIC:
  static int percpu_ce_set_next_event(unsigned long delta,
                                    struct clock_event_device *evt)
  {
-       int cpu = __first_cpu(evt->cpumask);
+       int cpu = cpumask_first(evt->cpumask);
        unsigned int next = (unsigned int)delta;
  
        sparc_config.load_profile_irq(cpu, next);
diff --combined arch/tile/kernel/setup.c
@@@ -32,7 -32,6 +32,7 @@@
  #include <linux/hugetlb.h>
  #include <linux/start_kernel.h>
  #include <linux/screen_info.h>
 +#include <linux/tick.h>
  #include <asm/setup.h>
  #include <asm/sections.h>
  #include <asm/cacheflush.h>
@@@ -774,7 -773,7 +774,7 @@@ static void __init zone_sizes_init(void
                 * though, there'll be no lowmem, so we just alloc_bootmem
                 * the memmap.  There will be no percpu memory either.
                 */
-               if (i != 0 && cpu_isset(i, isolnodes)) {
+               if (i != 0 && cpumask_test_cpu(i, &isolnodes)) {
                        node_memmap_pfn[i] =
                                alloc_bootmem_pfn(0, memmap_size, 0);
                        BUG_ON(node_percpu[i] != 0);
@@@ -1391,28 -1390,6 +1391,28 @@@ static int __init dataplane(char *str
  
  early_param("dataplane", dataplane);
  
 +#ifdef CONFIG_NO_HZ_FULL
 +/* Warn if hypervisor shared cpus are marked as nohz_full. */
 +static int __init check_nohz_full_cpus(void)
 +{
 +      struct cpumask shared;
 +      int cpu;
 +
 +      if (hv_inquire_tiles(HV_INQ_TILES_SHARED,
 +                           (HV_VirtAddr) shared.bits, sizeof(shared)) < 0) {
 +              pr_warn("WARNING: No support for inquiring hv shared tiles\n");
 +              return 0;
 +      }
 +      for_each_cpu(cpu, &shared) {
 +              if (tick_nohz_full_cpu(cpu))
 +                      pr_warn("WARNING: nohz_full cpu %d receives hypervisor interrupts!\n",
 +                             cpu);
 +      }
 +      return 0;
 +}
 +arch_initcall(check_nohz_full_cpus);
 +#endif
 +
  #ifdef CONFIG_CMDLINE_BOOL
  static char __initdata builtin_cmdline[COMMAND_LINE_SIZE] = CONFIG_CMDLINE;
  #endif
@@@ -195,19 -195,6 +195,19 @@@ static void gic_enable_redist(bool enab
  /*
   * Routines to disable, enable, EOI and route interrupts
   */
 +static int gic_peek_irq(struct irq_data *d, u32 offset)
 +{
 +      u32 mask = 1 << (gic_irq(d) % 32);
 +      void __iomem *base;
 +
 +      if (gic_irq_in_rdist(d))
 +              base = gic_data_rdist_sgi_base();
 +      else
 +              base = gic_data.dist_base;
 +
 +      return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
 +}
 +
  static void gic_poke_irq(struct irq_data *d, u32 offset)
  {
        u32 mask = 1 << (gic_irq(d) % 32);
@@@ -236,61 -223,6 +236,61 @@@ static void gic_unmask_irq(struct irq_d
        gic_poke_irq(d, GICD_ISENABLER);
  }
  
 +static int gic_irq_set_irqchip_state(struct irq_data *d,
 +                                   enum irqchip_irq_state which, bool val)
 +{
 +      u32 reg;
 +
 +      if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
 +              return -EINVAL;
 +
 +      switch (which) {
 +      case IRQCHIP_STATE_PENDING:
 +              reg = val ? GICD_ISPENDR : GICD_ICPENDR;
 +              break;
 +
 +      case IRQCHIP_STATE_ACTIVE:
 +              reg = val ? GICD_ISACTIVER : GICD_ICACTIVER;
 +              break;
 +
 +      case IRQCHIP_STATE_MASKED:
 +              reg = val ? GICD_ICENABLER : GICD_ISENABLER;
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      gic_poke_irq(d, reg);
 +      return 0;
 +}
 +
 +static int gic_irq_get_irqchip_state(struct irq_data *d,
 +                                   enum irqchip_irq_state which, bool *val)
 +{
 +      if (d->hwirq >= gic_data.irq_nr) /* PPI/SPI only */
 +              return -EINVAL;
 +
 +      switch (which) {
 +      case IRQCHIP_STATE_PENDING:
 +              *val = gic_peek_irq(d, GICD_ISPENDR);
 +              break;
 +
 +      case IRQCHIP_STATE_ACTIVE:
 +              *val = gic_peek_irq(d, GICD_ISACTIVER);
 +              break;
 +
 +      case IRQCHIP_STATE_MASKED:
 +              *val = !gic_peek_irq(d, GICD_ISENABLER);
 +              break;
 +
 +      default:
 +              return -EINVAL;
 +      }
 +
 +      return 0;
 +}
 +
  static void gic_eoi_irq(struct irq_data *d)
  {
        gic_write_eoir(gic_irq(d));
@@@ -486,6 -418,19 +486,6 @@@ static void gic_cpu_init(void
  }
  
  #ifdef CONFIG_SMP
 -static int gic_peek_irq(struct irq_data *d, u32 offset)
 -{
 -      u32 mask = 1 << (gic_irq(d) % 32);
 -      void __iomem *base;
 -
 -      if (gic_irq_in_rdist(d))
 -              base = gic_data_rdist_sgi_base();
 -      else
 -              base = gic_data.dist_base;
 -
 -      return !!(readl_relaxed(base + offset + (gic_irq(d) / 32) * 4) & mask);
 -}
 -
  static int gic_secondary_init(struct notifier_block *nfb,
                              unsigned long action, void *hcpu)
  {
@@@ -521,7 -466,7 +521,7 @@@ static u16 gic_compute_target_list(int 
                tlist |= 1 << (mpidr & 0xf);
  
                cpu = cpumask_next(cpu, mask);
 -              if (cpu == nr_cpu_ids)
 +              if (cpu >= nr_cpu_ids)
                        goto out;
  
                mpidr = cpu_logical_map(cpu);
@@@ -567,7 -512,7 +567,7 @@@ static void gic_raise_softirq(const str
         */
        smp_wmb();
  
-       for_each_cpu_mask(cpu, *mask) {
+       for_each_cpu(cpu, mask) {
                u64 cluster_id = cpu_logical_map(cpu) & ~0xffUL;
                u16 tlist;
  
@@@ -656,8 -601,6 +656,8 @@@ static struct irq_chip gic_chip = 
        .irq_eoi                = gic_eoi_irq,
        .irq_set_type           = gic_set_type,
        .irq_set_affinity       = gic_set_affinity,
 +      .irq_get_irqchip_state  = gic_irq_get_irqchip_state,
 +      .irq_set_irqchip_state  = gic_irq_set_irqchip_state,
  };
  
  #define GIC_ID_NR             (1U << gic_data.rdists.id_bits)
@@@ -166,27 -166,6 +166,27 @@@ cycle_t gic_read_compare(void
  
        return (((cycle_t) hi) << 32) + lo;
  }
 +
 +void gic_start_count(void)
 +{
 +      u32 gicconfig;
 +
 +      /* Start the counter */
 +      gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
 +      gicconfig &= ~(1 << GIC_SH_CONFIG_COUNTSTOP_SHF);
 +      gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
 +}
 +
 +void gic_stop_count(void)
 +{
 +      u32 gicconfig;
 +
 +      /* Stop the counter */
 +      gicconfig = gic_read(GIC_REG(SHARED, GIC_SH_CONFIG));
 +      gicconfig |= 1 << GIC_SH_CONFIG_COUNTSTOP_SHF;
 +      gic_write(GIC_REG(SHARED, GIC_SH_CONFIG), gicconfig);
 +}
 +
  #endif
  
  static bool gic_local_irq_is_routable(int intr)
@@@ -239,7 -218,7 +239,7 @@@ int gic_get_c0_compare_int(void
  int gic_get_c0_perfcount_int(void)
  {
        if (!gic_local_irq_is_routable(GIC_LOCAL_INT_PERFCTR)) {
 -              /* Is the erformance counter shared with the timer? */
 +              /* Is the performance counter shared with the timer? */
                if (cp0_perfcount_irq < 0)
                        return -1;
                return MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
                                  GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_PERFCTR));
  }
  
 +int gic_get_c0_fdc_int(void)
 +{
 +      if (!gic_local_irq_is_routable(GIC_LOCAL_INT_FDC)) {
 +              /* Is the FDC IRQ even present? */
 +              if (cp0_fdc_irq < 0)
 +                      return -1;
 +              return MIPS_CPU_IRQ_BASE + cp0_fdc_irq;
 +      }
 +
 +      /*
 +       * Some cores claim the FDC is routable but it doesn't actually seem to
 +       * be connected.
 +       */
 +      switch (current_cpu_type()) {
 +      case CPU_INTERAPTIV:
 +      case CPU_PROAPTIV:
 +              return -1;
 +      }
 +
 +      return irq_create_mapping(gic_irq_domain,
 +                                GIC_LOCAL_TO_HWIRQ(GIC_LOCAL_INT_FDC));
 +}
 +
  static void gic_handle_shared_int(void)
  {
        unsigned int i, intr, virq;
@@@ -389,19 -345,19 +389,19 @@@ static int gic_set_affinity(struct irq_
        int             i;
  
        cpumask_and(&tmp, cpumask, cpu_online_mask);
-       if (cpus_empty(tmp))
+       if (cpumask_empty(&tmp))
                return -EINVAL;
  
        /* Assumption : cpumask refers to a single CPU */
        spin_lock_irqsave(&gic_lock, flags);
  
        /* Re-route this IRQ */
-       gic_map_to_vpe(irq, first_cpu(tmp));
+       gic_map_to_vpe(irq, cpumask_first(&tmp));
  
        /* Update the pcpu_masks */
        for (i = 0; i < NR_CPUS; i++)
                clear_bit(irq, pcpu_masks[i].pcpu_mask);
-       set_bit(irq, pcpu_masks[first_cpu(tmp)].pcpu_mask);
+       set_bit(irq, pcpu_masks[cpumask_first(&tmp)].pcpu_mask);
  
        cpumask_copy(d->affinity, cpumask);
        spin_unlock_irqrestore(&gic_lock, flags);
@@@ -636,20 -592,15 +636,20 @@@ static int gic_local_irq_domain_map(str
         * of the MIPS kernel code does not use the percpu IRQ API for
         * the CP0 timer and performance counter interrupts.
         */
 -      if (intr != GIC_LOCAL_INT_TIMER && intr != GIC_LOCAL_INT_PERFCTR) {
 +      switch (intr) {
 +      case GIC_LOCAL_INT_TIMER:
 +      case GIC_LOCAL_INT_PERFCTR:
 +      case GIC_LOCAL_INT_FDC:
 +              irq_set_chip_and_handler(virq,
 +                                       &gic_all_vpes_local_irq_controller,
 +                                       handle_percpu_irq);
 +              break;
 +      default:
                irq_set_chip_and_handler(virq,
                                         &gic_local_irq_controller,
                                         handle_percpu_devid_irq);
                irq_set_percpu_devid(virq);
 -      } else {
 -              irq_set_chip_and_handler(virq,
 -                                       &gic_all_vpes_local_irq_controller,
 -                                       handle_percpu_irq);
 +              break;
        }
  
        spin_lock_irqsave(&gic_lock, flags);
@@@ -838,8 -838,7 +838,8 @@@ static int ptp_mpipe_adjtime(struct ptp
        return ret;
  }
  
 -static int ptp_mpipe_gettime(struct ptp_clock_info *ptp, struct timespec *ts)
 +static int ptp_mpipe_gettime(struct ptp_clock_info *ptp,
 +                           struct timespec64 *ts)
  {
        int ret = 0;
        struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
  }
  
  static int ptp_mpipe_settime(struct ptp_clock_info *ptp,
 -                           const struct timespec *ts)
 +                           const struct timespec64 *ts)
  {
        int ret = 0;
        struct mpipe_data *md = container_of(ptp, struct mpipe_data, caps);
@@@ -877,8 -876,8 +877,8 @@@ static struct ptp_clock_info ptp_mpipe_
        .pps            = 0,
        .adjfreq        = ptp_mpipe_adjfreq,
        .adjtime        = ptp_mpipe_adjtime,
 -      .gettime        = ptp_mpipe_gettime,
 -      .settime        = ptp_mpipe_settime,
 +      .gettime64      = ptp_mpipe_gettime,
 +      .settime64      = ptp_mpipe_settime,
        .enable         = ptp_mpipe_enable,
  };
  
@@@ -1123,7 -1122,7 +1123,7 @@@ static int alloc_percpu_mpipe_resources
                        addr + i * sizeof(struct tile_net_comps);
  
        /* If this is a network cpu, create an iqueue. */
-       if (cpu_isset(cpu, network_cpus_map)) {
+       if (cpumask_test_cpu(cpu, &network_cpus_map)) {
                order = get_order(NOTIF_RING_SIZE);
                page = homecache_alloc_pages(GFP_KERNEL, order, cpu);
                if (page == NULL) {
@@@ -1299,7 -1298,7 +1299,7 @@@ static int tile_net_init_mpipe(struct n
        int first_ring, ring;
        int instance = mpipe_instance(dev);
        struct mpipe_data *md = &mpipe_data[instance];
-       int network_cpus_count = cpus_weight(network_cpus_map);
+       int network_cpus_count = cpumask_weight(&network_cpus_map);
  
        if (!hash_default) {
                netdev_err(dev, "Networking requires hash_default!\n");
diff --combined lib/Kconfig
@@@ -18,8 -18,9 +18,8 @@@ config HAVE_ARCH_BITREVERS
        default n
        depends on BITREVERSE
        help
 -        This option provides an config for the architecture which have instruction
 -        can do bitreverse operation, we use the hardware instruction if the architecture
 -        have this capability.
 +        This option enables the use of hardware bit-reversal instructions on
 +        architectures which support such operations.
  
  config RATIONAL
        bool
@@@ -396,10 -397,6 +396,6 @@@ config CPUMASK_OFFSTAC
          them on the stack.  This is a bit more expensive, but avoids
          stack overflow.
  
- config DISABLE_OBSOLETE_CPUMASK_FUNCTIONS
-        bool "Disable obsolete cpumask functions" if DEBUG_PER_CPU_MAPS
-        depends on BROKEN
  config CPU_RMAP
        bool
        depends on SMP
diff --combined lib/cpumask.c
@@@ -5,27 -5,6 +5,6 @@@
  #include <linux/export.h>
  #include <linux/bootmem.h>
  
- int __first_cpu(const cpumask_t *srcp)
- {
-       return min_t(int, NR_CPUS, find_first_bit(srcp->bits, NR_CPUS));
- }
- EXPORT_SYMBOL(__first_cpu);
- int __next_cpu(int n, const cpumask_t *srcp)
- {
-       return min_t(int, NR_CPUS, find_next_bit(srcp->bits, NR_CPUS, n+1));
- }
- EXPORT_SYMBOL(__next_cpu);
- #if NR_CPUS > 64
- int __next_cpu_nr(int n, const cpumask_t *srcp)
- {
-       return min_t(int, nr_cpu_ids,
-                               find_next_bit(srcp->bits, nr_cpu_ids, n+1));
- }
- EXPORT_SYMBOL(__next_cpu_nr);
- #endif
  /**
   * cpumask_next_and - get the next cpu in *src1p & *src2p
   * @n: the cpu prior to the place to search (ie. return will be > @n)
  int cpumask_next_and(int n, const struct cpumask *src1p,
                     const struct cpumask *src2p)
  {
 -      while ((n = cpumask_next(n, src1p)) < nr_cpu_ids)
 -              if (cpumask_test_cpu(n, src2p))
 -                      break;
 -      return n;
 +      struct cpumask tmp;
 +
 +      if (cpumask_and(&tmp, src1p, src2p))
 +              return cpumask_next(n, &tmp);
 +      return nr_cpu_ids;
  }
  EXPORT_SYMBOL(cpumask_next_and);
  
@@@ -90,13 -68,6 +69,6 @@@ bool alloc_cpumask_var_node(cpumask_var
                dump_stack();
        }
  #endif
-       /* FIXME: Bandaid to save us from old primitives which go to NR_CPUS. */
-       if (*mask) {
-               unsigned char *ptr = (unsigned char *)cpumask_bits(*mask);
-               unsigned int tail;
-               tail = BITS_TO_LONGS(NR_CPUS - nr_cpumask_bits) * sizeof(long);
-               memset(ptr + cpumask_size() - tail, 0, tail);
-       }
  
        return *mask != NULL;
  }