Merge branch 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Feb 2017 19:46:00 +0000 (11:46 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Feb 2017 19:46:00 +0000 (11:46 -0800)
Pull x86 fixes from Ingo Molnar:
 "Two documentation updates, plus a debugging annotation fix"

* 'x86-urgent-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  x86/crash: Update the stale comment in reserve_crashkernel()
  x86/irq, trace: Add __irq_entry annotation to x86's platform IRQ handlers
  Documentation, x86, resctrl: Recommend locking for resctrlfs

1  2 
arch/x86/kernel/apic/apic.c
arch/x86/kernel/cpu/mcheck/mce_amd.c
arch/x86/kernel/cpu/mcheck/therm_throt.c
arch/x86/kernel/setup.c

@@@ -529,19 -529,18 +529,19 @@@ static void lapic_timer_broadcast(cons
   * The local apic timer can be used for any function which is CPU local.
   */
  static struct clock_event_device lapic_clockevent = {
 -      .name                   = "lapic",
 -      .features               = CLOCK_EVT_FEAT_PERIODIC |
 -                                CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
 -                                | CLOCK_EVT_FEAT_DUMMY,
 -      .shift                  = 32,
 -      .set_state_shutdown     = lapic_timer_shutdown,
 -      .set_state_periodic     = lapic_timer_set_periodic,
 -      .set_state_oneshot      = lapic_timer_set_oneshot,
 -      .set_next_event         = lapic_next_event,
 -      .broadcast              = lapic_timer_broadcast,
 -      .rating                 = 100,
 -      .irq                    = -1,
 +      .name                           = "lapic",
 +      .features                       = CLOCK_EVT_FEAT_PERIODIC |
 +                                        CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
 +                                        | CLOCK_EVT_FEAT_DUMMY,
 +      .shift                          = 32,
 +      .set_state_shutdown             = lapic_timer_shutdown,
 +      .set_state_periodic             = lapic_timer_set_periodic,
 +      .set_state_oneshot              = lapic_timer_set_oneshot,
 +      .set_state_oneshot_stopped      = lapic_timer_shutdown,
 +      .set_next_event                 = lapic_next_event,
 +      .broadcast                      = lapic_timer_broadcast,
 +      .rating                         = 100,
 +      .irq                            = -1,
  };
  static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
  
@@@ -1246,7 -1245,7 +1246,7 @@@ static void lapic_setup_esr(void
  /**
   * setup_local_APIC - setup the local APIC
   *
 - * Used to setup local APIC while initializing BSP or bringin up APs.
 + * Used to setup local APIC while initializing BSP or bringing up APs.
   * Always called with preemption disabled.
   */
  void setup_local_APIC(void)
@@@ -1865,14 -1864,14 +1865,14 @@@ static void __smp_spurious_interrupt(u
                "should never happen.\n", vector, smp_processor_id());
  }
  
- __visible void smp_spurious_interrupt(struct pt_regs *regs)
+ __visible void __irq_entry smp_spurious_interrupt(struct pt_regs *regs)
  {
        entering_irq();
        __smp_spurious_interrupt(~regs->orig_ax);
        exiting_irq();
  }
  
- __visible void smp_trace_spurious_interrupt(struct pt_regs *regs)
+ __visible void __irq_entry smp_trace_spurious_interrupt(struct pt_regs *regs)
  {
        u8 vector = ~regs->orig_ax;
  
@@@ -1923,14 -1922,14 +1923,14 @@@ static void __smp_error_interrupt(struc
  
  }
  
- __visible void smp_error_interrupt(struct pt_regs *regs)
+ __visible void __irq_entry smp_error_interrupt(struct pt_regs *regs)
  {
        entering_irq();
        __smp_error_interrupt(regs);
        exiting_irq();
  }
  
- __visible void smp_trace_error_interrupt(struct pt_regs *regs)
+ __visible void __irq_entry smp_trace_error_interrupt(struct pt_regs *regs)
  {
        entering_irq();
        trace_error_apic_entry(ERROR_APIC_VECTOR);
@@@ -2029,8 -2028,8 +2029,8 @@@ void disconnect_bsp_APIC(int virt_wire_
  /*
   * The number of allocated logical CPU IDs. Since logical CPU IDs are allocated
   * contiguously, it equals to current allocated max logical CPU ID plus 1.
 - * All allocated CPU ID should be in [0, nr_logical_cpuidi), so the maximum of
 - * nr_logical_cpuids is nr_cpu_ids.
 + * All allocated CPU IDs should be in the [0, nr_logical_cpuids) range,
 + * so the maximum of nr_logical_cpuids is nr_cpu_ids.
   *
   * NOTE: Reserve 0 for BSP.
   */
@@@ -2095,7 -2094,7 +2095,7 @@@ int __generic_processor_info(int apicid
         * Since fixing handling of boot_cpu_physical_apicid requires
         * another discussion and tests on each platform, we leave it
         * for now and here we use read_apic_id() directly in this
 -       * function, generic_processor_info().
 +       * function, __generic_processor_info().
         */
        if (disabled_cpu_apicid != BAD_APICID &&
            disabled_cpu_apicid != read_apic_id() &&
@@@ -192,7 -192,6 +192,7 @@@ static void get_smca_bank_info(unsigne
  
                        smca_banks[bank].hwid = s_hwid;
                        smca_banks[bank].id = instance_id;
 +                      smca_banks[bank].sysfs_id = s_hwid->count++;
                        break;
                }
        }
@@@ -778,8 -777,7 +778,8 @@@ __log_error(unsigned int bank, bool def
        mce_setup(&m);
  
        m.status = status;
 -      m.bank = bank;
 +      m.bank   = bank;
 +      m.tsc    = rdtsc();
  
        if (threshold_err)
                m.misc = misc;
@@@ -816,14 -814,14 +816,14 @@@ static inline void __smp_deferred_error
        deferred_error_int_vector();
  }
  
- asmlinkage __visible void smp_deferred_error_interrupt(void)
+ asmlinkage __visible void __irq_entry smp_deferred_error_interrupt(void)
  {
        entering_irq();
        __smp_deferred_error_interrupt();
        exiting_ack_irq();
  }
  
- asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
+ asmlinkage __visible void __irq_entry smp_trace_deferred_error_interrupt(void)
  {
        entering_irq();
        trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
@@@ -1066,12 -1064,9 +1066,12 @@@ static const char *get_name(unsigned in
                return NULL;
        }
  
 +      if (smca_banks[bank].hwid->count == 1)
 +              return smca_get_name(bank_type);
 +
        snprintf(buf_mcatype, MAX_MCATYPE_NAME_LEN,
                 "%s_%x", smca_get_name(bank_type),
 -                        smca_banks[bank].id);
 +                        smca_banks[bank].sysfs_id);
        return buf_mcatype;
  }
  
@@@ -6,7 -6,7 +6,7 @@@
   *
   * Maintains a counter in /sys that keeps track of the number of thermal
   * events, such that the user knows how bad the thermal problem might be
 - * (since the logging to syslog and mcelog is rate limited).
 + * (since the logging to syslog is rate limited).
   *
   * Author: Dmitriy Zavin (dmitriyz@google.com)
   *
@@@ -141,8 -141,13 +141,8 @@@ static struct attribute_group thermal_a
   * IRQ has been acknowledged.
   *
   * It will take care of rate limiting and printing messages to the syslog.
 - *
 - * Returns: 0 : Event should NOT be further logged, i.e. still in
 - *              "timeout" from previous log message.
 - *          1 : Event should be logged further, and a message has been
 - *              printed to the syslog.
   */
 -static int therm_throt_process(bool new_event, int event, int level)
 +static void therm_throt_process(bool new_event, int event, int level)
  {
        struct _thermal_state *state;
        unsigned int this_cpu = smp_processor_id();
                else if (event == POWER_LIMIT_EVENT)
                        state = &pstate->core_power_limit;
                else
 -                       return 0;
 +                      return;
        } else if (level == PACKAGE_LEVEL) {
                if (event == THERMAL_THROTTLING_EVENT)
                        state = &pstate->package_throttle;
                else if (event == POWER_LIMIT_EVENT)
                        state = &pstate->package_power_limit;
                else
 -                      return 0;
 +                      return;
        } else
 -              return 0;
 +              return;
  
        old_event = state->new_event;
        state->new_event = new_event;
  
        if (time_before64(now, state->next_check) &&
                        state->count != state->last_count)
 -              return 0;
 +              return;
  
        state->next_check = now + CHECK_INTERVAL;
        state->last_count = state->count;
                                this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package",
                                state->count);
 -              return 1;
 +              return;
        }
        if (old_event) {
                if (event == THERMAL_THROTTLING_EVENT)
                        pr_info("CPU%d: %s temperature/speed normal\n", this_cpu,
                                level == CORE_LEVEL ? "Core" : "Package");
 -              return 1;
 +              return;
        }
 -
 -      return 0;
  }
  
  static int thresh_event_valid(int level, int event)
@@@ -358,9 -365,10 +358,9 @@@ static void intel_thermal_interrupt(voi
        /* Check for violation of core thermal thresholds*/
        notify_thresholds(msr_val);
  
 -      if (therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
 -                              THERMAL_THROTTLING_EVENT,
 -                              CORE_LEVEL) != 0)
 -              mce_log_therm_throt_event(msr_val);
 +      therm_throt_process(msr_val & THERM_STATUS_PROCHOT,
 +                          THERMAL_THROTTLING_EVENT,
 +                          CORE_LEVEL);
  
        if (this_cpu_has(X86_FEATURE_PLN) && int_pln_enable)
                therm_throt_process(msr_val & THERM_STATUS_POWER_LIMIT,
@@@ -396,14 -404,16 +396,16 @@@ static inline void __smp_thermal_interr
        smp_thermal_vector();
  }
  
- asmlinkage __visible void smp_thermal_interrupt(struct pt_regs *regs)
+ asmlinkage __visible void __irq_entry
+ smp_thermal_interrupt(struct pt_regs *regs)
  {
        entering_irq();
        __smp_thermal_interrupt();
        exiting_ack_irq();
  }
  
- asmlinkage __visible void smp_trace_thermal_interrupt(struct pt_regs *regs)
+ asmlinkage __visible void __irq_entry
+ smp_trace_thermal_interrupt(struct pt_regs *regs)
  {
        entering_irq();
        trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
diff --combined arch/x86/kernel/setup.c
@@@ -575,7 -575,9 +575,9 @@@ static void __init reserve_crashkernel(
        /* 0 means: find the address automatically */
        if (crash_base <= 0) {
                /*
-                *  kexec want bzImage is below CRASH_KERNEL_ADDR_MAX
+                * Set CRASH_ADDR_LOW_MAX upper bound for crash memory,
+                * as old kexec-tools loads bzImage below that, unless
+                * "crashkernel=size[KMG],high" is specified.
                 */
                crash_base = memblock_find_in_range(CRASH_ALIGN,
                                                    high ? CRASH_ADDR_HIGH_MAX
@@@ -1176,20 -1178,6 +1178,20 @@@ void __init setup_arch(char **cmdline_p
        /* Allocate bigger log buffer */
        setup_log_buf(1);
  
 +      if (efi_enabled(EFI_BOOT)) {
 +              switch (boot_params.secure_boot) {
 +              case efi_secureboot_mode_disabled:
 +                      pr_info("Secure boot disabled\n");
 +                      break;
 +              case efi_secureboot_mode_enabled:
 +                      pr_info("Secure boot enabled\n");
 +                      break;
 +              default:
 +                      pr_info("Secure boot could not be determined\n");
 +                      break;
 +              }
 +      }
 +
        reserve_initrd();
  
        acpi_table_upgrade();