cpufreq: intel_pstate: hybrid: Avoid exposing two global attributes
[linux-2.6-microblaze.git] / drivers / cpufreq / intel_pstate.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * intel_pstate.c: Native P state management for Intel processors
4  *
5  * (C) Copyright 2012 Intel Corporation
6  * Author: Dirk Brandewie <dirk.j.brandewie@intel.com>
7  */
8
9 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
10
11 #include <linux/kernel.h>
12 #include <linux/kernel_stat.h>
13 #include <linux/module.h>
14 #include <linux/ktime.h>
15 #include <linux/hrtimer.h>
16 #include <linux/tick.h>
17 #include <linux/slab.h>
18 #include <linux/sched/cpufreq.h>
19 #include <linux/list.h>
20 #include <linux/cpu.h>
21 #include <linux/cpufreq.h>
22 #include <linux/sysfs.h>
23 #include <linux/types.h>
24 #include <linux/fs.h>
25 #include <linux/acpi.h>
26 #include <linux/vmalloc.h>
27 #include <linux/pm_qos.h>
28 #include <trace/events/power.h>
29
30 #include <asm/div64.h>
31 #include <asm/msr.h>
32 #include <asm/cpu_device_id.h>
33 #include <asm/cpufeature.h>
34 #include <asm/intel-family.h>
35
36 #define INTEL_PSTATE_SAMPLING_INTERVAL  (10 * NSEC_PER_MSEC)
37
38 #define INTEL_CPUFREQ_TRANSITION_LATENCY        20000
39 #define INTEL_CPUFREQ_TRANSITION_DELAY_HWP      5000
40 #define INTEL_CPUFREQ_TRANSITION_DELAY          500
41
42 #ifdef CONFIG_ACPI
43 #include <acpi/processor.h>
44 #include <acpi/cppc_acpi.h>
45 #endif
46
47 #define FRAC_BITS 8
48 #define int_tofp(X) ((int64_t)(X) << FRAC_BITS)
49 #define fp_toint(X) ((X) >> FRAC_BITS)
50
51 #define ONE_EIGHTH_FP ((int64_t)1 << (FRAC_BITS - 3))
52
53 #define EXT_BITS 6
54 #define EXT_FRAC_BITS (EXT_BITS + FRAC_BITS)
55 #define fp_ext_toint(X) ((X) >> EXT_FRAC_BITS)
56 #define int_ext_tofp(X) ((int64_t)(X) << EXT_FRAC_BITS)
57
58 static inline int32_t mul_fp(int32_t x, int32_t y)
59 {
60         return ((int64_t)x * (int64_t)y) >> FRAC_BITS;
61 }
62
63 static inline int32_t div_fp(s64 x, s64 y)
64 {
65         return div64_s64((int64_t)x << FRAC_BITS, y);
66 }
67
68 static inline int ceiling_fp(int32_t x)
69 {
70         int mask, ret;
71
72         ret = fp_toint(x);
73         mask = (1 << FRAC_BITS) - 1;
74         if (x & mask)
75                 ret += 1;
76         return ret;
77 }
78
79 static inline u64 mul_ext_fp(u64 x, u64 y)
80 {
81         return (x * y) >> EXT_FRAC_BITS;
82 }
83
84 static inline u64 div_ext_fp(u64 x, u64 y)
85 {
86         return div64_u64(x << EXT_FRAC_BITS, y);
87 }
88
89 /**
90  * struct sample -      Store performance sample
91  * @core_avg_perf:      Ratio of APERF/MPERF which is the actual average
92  *                      performance during last sample period
93  * @busy_scaled:        Scaled busy value which is used to calculate next
94  *                      P state. This can be different than core_avg_perf
95  *                      to account for cpu idle period
96  * @aperf:              Difference of actual performance frequency clock count
97  *                      read from APERF MSR between last and current sample
98  * @mperf:              Difference of maximum performance frequency clock count
99  *                      read from MPERF MSR between last and current sample
100  * @tsc:                Difference of time stamp counter between last and
101  *                      current sample
102  * @time:               Current time from scheduler
103  *
104  * This structure is used in the cpudata structure to store performance sample
105  * data for choosing next P State.
106  */
107 struct sample {
108         int32_t core_avg_perf;
109         int32_t busy_scaled;
110         u64 aperf;
111         u64 mperf;
112         u64 tsc;
113         u64 time;
114 };
115
116 /**
117  * struct pstate_data - Store P state data
118  * @current_pstate:     Current requested P state
119  * @min_pstate:         Min P state possible for this platform
120  * @max_pstate:         Max P state possible for this platform
121  * @max_pstate_physical:This is physical Max P state for a processor
122  *                      This can be higher than the max_pstate which can
123  *                      be limited by platform thermal design power limits
124  * @scaling:            Scaling factor to  convert frequency to cpufreq
125  *                      frequency units
126  * @turbo_pstate:       Max Turbo P state possible for this platform
127  * @max_freq:           @max_pstate frequency in cpufreq units
128  * @turbo_freq:         @turbo_pstate frequency in cpufreq units
129  *
130  * Stores the per cpu model P state limits and current P state.
131  */
132 struct pstate_data {
133         int     current_pstate;
134         int     min_pstate;
135         int     max_pstate;
136         int     max_pstate_physical;
137         int     scaling;
138         int     turbo_pstate;
139         unsigned int max_freq;
140         unsigned int turbo_freq;
141 };
142
143 /**
144  * struct vid_data -    Stores voltage information data
145  * @min:                VID data for this platform corresponding to
146  *                      the lowest P state
147  * @max:                VID data corresponding to the highest P State.
148  * @turbo:              VID data for turbo P state
149  * @ratio:              Ratio of (vid max - vid min) /
150  *                      (max P state - Min P State)
151  *
152  * Stores the voltage data for DVFS (Dynamic Voltage and Frequency Scaling)
153  * This data is used in Atom platforms, where in addition to target P state,
154  * the voltage data needs to be specified to select next P State.
155  */
156 struct vid_data {
157         int min;
158         int max;
159         int turbo;
160         int32_t ratio;
161 };
162
163 /**
164  * struct global_params - Global parameters, mostly tunable via sysfs.
165  * @no_turbo:           Whether or not to use turbo P-states.
166  * @turbo_disabled:     Whether or not turbo P-states are available at all,
167  *                      based on the MSR_IA32_MISC_ENABLE value and whether or
168  *                      not the maximum reported turbo P-state is different from
169  *                      the maximum reported non-turbo one.
170  * @turbo_disabled_mf:  The @turbo_disabled value reflected by cpuinfo.max_freq.
171  * @min_perf_pct:       Minimum capacity limit in percent of the maximum turbo
172  *                      P-state capacity.
173  * @max_perf_pct:       Maximum capacity limit in percent of the maximum turbo
174  *                      P-state capacity.
175  */
176 struct global_params {
177         bool no_turbo;
178         bool turbo_disabled;
179         bool turbo_disabled_mf;
180         int max_perf_pct;
181         int min_perf_pct;
182 };
183
184 /**
185  * struct cpudata -     Per CPU instance data storage
186  * @cpu:                CPU number for this instance data
187  * @policy:             CPUFreq policy value
188  * @update_util:        CPUFreq utility callback information
189  * @update_util_set:    CPUFreq utility callback is set
190  * @iowait_boost:       iowait-related boost fraction
191  * @last_update:        Time of the last update.
192  * @pstate:             Stores P state limits for this CPU
193  * @vid:                Stores VID limits for this CPU
194  * @last_sample_time:   Last Sample time
195  * @aperf_mperf_shift:  APERF vs MPERF counting frequency difference
196  * @prev_aperf:         Last APERF value read from APERF MSR
197  * @prev_mperf:         Last MPERF value read from MPERF MSR
198  * @prev_tsc:           Last timestamp counter (TSC) value
199  * @prev_cummulative_iowait: IO Wait time difference from last and
200  *                      current sample
201  * @sample:             Storage for storing last Sample data
202  * @min_perf_ratio:     Minimum capacity in terms of PERF or HWP ratios
203  * @max_perf_ratio:     Maximum capacity in terms of PERF or HWP ratios
204  * @acpi_perf_data:     Stores ACPI perf information read from _PSS
205  * @valid_pss_table:    Set to true for valid ACPI _PSS entries found
206  * @epp_powersave:      Last saved HWP energy performance preference
207  *                      (EPP) or energy performance bias (EPB),
208  *                      when policy switched to performance
209  * @epp_policy:         Last saved policy used to set EPP/EPB
210  * @epp_default:        Power on default HWP energy performance
211  *                      preference/bias
212  * @epp_cached          Cached HWP energy-performance preference value
213  * @hwp_req_cached:     Cached value of the last HWP Request MSR
214  * @hwp_cap_cached:     Cached value of the last HWP Capabilities MSR
215  * @last_io_update:     Last time when IO wake flag was set
216  * @sched_flags:        Store scheduler flags for possible cross CPU update
217  * @hwp_boost_min:      Last HWP boosted min performance
218  * @suspended:          Whether or not the driver has been suspended.
219  *
220  * This structure stores per CPU instance data for all CPUs.
221  */
222 struct cpudata {
223         int cpu;
224
225         unsigned int policy;
226         struct update_util_data update_util;
227         bool   update_util_set;
228
229         struct pstate_data pstate;
230         struct vid_data vid;
231
232         u64     last_update;
233         u64     last_sample_time;
234         u64     aperf_mperf_shift;
235         u64     prev_aperf;
236         u64     prev_mperf;
237         u64     prev_tsc;
238         u64     prev_cummulative_iowait;
239         struct sample sample;
240         int32_t min_perf_ratio;
241         int32_t max_perf_ratio;
242 #ifdef CONFIG_ACPI
243         struct acpi_processor_performance acpi_perf_data;
244         bool valid_pss_table;
245 #endif
246         unsigned int iowait_boost;
247         s16 epp_powersave;
248         s16 epp_policy;
249         s16 epp_default;
250         s16 epp_cached;
251         u64 hwp_req_cached;
252         u64 hwp_cap_cached;
253         u64 last_io_update;
254         unsigned int sched_flags;
255         u32 hwp_boost_min;
256         bool suspended;
257 };
258
259 static struct cpudata **all_cpu_data;
260
261 /**
262  * struct pstate_funcs - Per CPU model specific callbacks
263  * @get_max:            Callback to get maximum non turbo effective P state
264  * @get_max_physical:   Callback to get maximum non turbo physical P state
265  * @get_min:            Callback to get minimum P state
266  * @get_turbo:          Callback to get turbo P state
267  * @get_scaling:        Callback to get frequency scaling factor
268  * @get_aperf_mperf_shift: Callback to get the APERF vs MPERF frequency difference
269  * @get_val:            Callback to convert P state to actual MSR write value
270  * @get_vid:            Callback to get VID data for Atom platforms
271  *
272  * Core and Atom CPU models have different way to get P State limits. This
273  * structure is used to store those callbacks.
274  */
275 struct pstate_funcs {
276         int (*get_max)(void);
277         int (*get_max_physical)(void);
278         int (*get_min)(void);
279         int (*get_turbo)(void);
280         int (*get_scaling)(void);
281         int (*get_aperf_mperf_shift)(void);
282         u64 (*get_val)(struct cpudata*, int pstate);
283         void (*get_vid)(struct cpudata *);
284 };
285
286 static struct pstate_funcs pstate_funcs __read_mostly;
287
288 static int hwp_active __read_mostly;
289 static int hwp_mode_bdw __read_mostly;
290 static bool per_cpu_limits __read_mostly;
291 static bool hwp_boost __read_mostly;
292
293 static struct cpufreq_driver *intel_pstate_driver __read_mostly;
294
295 #ifdef CONFIG_ACPI
296 static bool acpi_ppc;
297 #endif
298
299 static struct global_params global;
300
301 static DEFINE_MUTEX(intel_pstate_driver_lock);
302 static DEFINE_MUTEX(intel_pstate_limits_lock);
303
304 #ifdef CONFIG_ACPI
305
306 static bool intel_pstate_acpi_pm_profile_server(void)
307 {
308         if (acpi_gbl_FADT.preferred_profile == PM_ENTERPRISE_SERVER ||
309             acpi_gbl_FADT.preferred_profile == PM_PERFORMANCE_SERVER)
310                 return true;
311
312         return false;
313 }
314
315 static bool intel_pstate_get_ppc_enable_status(void)
316 {
317         if (intel_pstate_acpi_pm_profile_server())
318                 return true;
319
320         return acpi_ppc;
321 }
322
323 #ifdef CONFIG_ACPI_CPPC_LIB
324
325 /* The work item is needed to avoid CPU hotplug locking issues */
326 static void intel_pstste_sched_itmt_work_fn(struct work_struct *work)
327 {
328         sched_set_itmt_support();
329 }
330
331 static DECLARE_WORK(sched_itmt_work, intel_pstste_sched_itmt_work_fn);
332
333 static void intel_pstate_set_itmt_prio(int cpu)
334 {
335         struct cppc_perf_caps cppc_perf;
336         static u32 max_highest_perf = 0, min_highest_perf = U32_MAX;
337         int ret;
338
339         ret = cppc_get_perf_caps(cpu, &cppc_perf);
340         if (ret)
341                 return;
342
343         /*
344          * The priorities can be set regardless of whether or not
345          * sched_set_itmt_support(true) has been called and it is valid to
346          * update them at any time after it has been called.
347          */
348         sched_set_itmt_core_prio(cppc_perf.highest_perf, cpu);
349
350         if (max_highest_perf <= min_highest_perf) {
351                 if (cppc_perf.highest_perf > max_highest_perf)
352                         max_highest_perf = cppc_perf.highest_perf;
353
354                 if (cppc_perf.highest_perf < min_highest_perf)
355                         min_highest_perf = cppc_perf.highest_perf;
356
357                 if (max_highest_perf > min_highest_perf) {
358                         /*
359                          * This code can be run during CPU online under the
360                          * CPU hotplug locks, so sched_set_itmt_support()
361                          * cannot be called from here.  Queue up a work item
362                          * to invoke it.
363                          */
364                         schedule_work(&sched_itmt_work);
365                 }
366         }
367 }
368
369 static int intel_pstate_get_cppc_guranteed(int cpu)
370 {
371         struct cppc_perf_caps cppc_perf;
372         int ret;
373
374         ret = cppc_get_perf_caps(cpu, &cppc_perf);
375         if (ret)
376                 return ret;
377
378         if (cppc_perf.guaranteed_perf)
379                 return cppc_perf.guaranteed_perf;
380
381         return cppc_perf.nominal_perf;
382 }
383
384 #else /* CONFIG_ACPI_CPPC_LIB */
385 static void intel_pstate_set_itmt_prio(int cpu)
386 {
387 }
388 #endif /* CONFIG_ACPI_CPPC_LIB */
389
390 static void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
391 {
392         struct cpudata *cpu;
393         int ret;
394         int i;
395
396         if (hwp_active) {
397                 intel_pstate_set_itmt_prio(policy->cpu);
398                 return;
399         }
400
401         if (!intel_pstate_get_ppc_enable_status())
402                 return;
403
404         cpu = all_cpu_data[policy->cpu];
405
406         ret = acpi_processor_register_performance(&cpu->acpi_perf_data,
407                                                   policy->cpu);
408         if (ret)
409                 return;
410
411         /*
412          * Check if the control value in _PSS is for PERF_CTL MSR, which should
413          * guarantee that the states returned by it map to the states in our
414          * list directly.
415          */
416         if (cpu->acpi_perf_data.control_register.space_id !=
417                                                 ACPI_ADR_SPACE_FIXED_HARDWARE)
418                 goto err;
419
420         /*
421          * If there is only one entry _PSS, simply ignore _PSS and continue as
422          * usual without taking _PSS into account
423          */
424         if (cpu->acpi_perf_data.state_count < 2)
425                 goto err;
426
427         pr_debug("CPU%u - ACPI _PSS perf data\n", policy->cpu);
428         for (i = 0; i < cpu->acpi_perf_data.state_count; i++) {
429                 pr_debug("     %cP%d: %u MHz, %u mW, 0x%x\n",
430                          (i == cpu->acpi_perf_data.state ? '*' : ' '), i,
431                          (u32) cpu->acpi_perf_data.states[i].core_frequency,
432                          (u32) cpu->acpi_perf_data.states[i].power,
433                          (u32) cpu->acpi_perf_data.states[i].control);
434         }
435
436         /*
437          * The _PSS table doesn't contain whole turbo frequency range.
438          * This just contains +1 MHZ above the max non turbo frequency,
439          * with control value corresponding to max turbo ratio. But
440          * when cpufreq set policy is called, it will call with this
441          * max frequency, which will cause a reduced performance as
442          * this driver uses real max turbo frequency as the max
443          * frequency. So correct this frequency in _PSS table to
444          * correct max turbo frequency based on the turbo state.
445          * Also need to convert to MHz as _PSS freq is in MHz.
446          */
447         if (!global.turbo_disabled)
448                 cpu->acpi_perf_data.states[0].core_frequency =
449                                         policy->cpuinfo.max_freq / 1000;
450         cpu->valid_pss_table = true;
451         pr_debug("_PPC limits will be enforced\n");
452
453         return;
454
455  err:
456         cpu->valid_pss_table = false;
457         acpi_processor_unregister_performance(policy->cpu);
458 }
459
460 static void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
461 {
462         struct cpudata *cpu;
463
464         cpu = all_cpu_data[policy->cpu];
465         if (!cpu->valid_pss_table)
466                 return;
467
468         acpi_processor_unregister_performance(policy->cpu);
469 }
470 #else /* CONFIG_ACPI */
471 static inline void intel_pstate_init_acpi_perf_limits(struct cpufreq_policy *policy)
472 {
473 }
474
475 static inline void intel_pstate_exit_perf_limits(struct cpufreq_policy *policy)
476 {
477 }
478
479 static inline bool intel_pstate_acpi_pm_profile_server(void)
480 {
481         return false;
482 }
483 #endif /* CONFIG_ACPI */
484
485 #ifndef CONFIG_ACPI_CPPC_LIB
486 static int intel_pstate_get_cppc_guranteed(int cpu)
487 {
488         return -ENOTSUPP;
489 }
490 #endif /* CONFIG_ACPI_CPPC_LIB */
491
492 static inline void update_turbo_state(void)
493 {
494         u64 misc_en;
495         struct cpudata *cpu;
496
497         cpu = all_cpu_data[0];
498         rdmsrl(MSR_IA32_MISC_ENABLE, misc_en);
499         global.turbo_disabled =
500                 (misc_en & MSR_IA32_MISC_ENABLE_TURBO_DISABLE ||
501                  cpu->pstate.max_pstate == cpu->pstate.turbo_pstate);
502 }
503
504 static int min_perf_pct_min(void)
505 {
506         struct cpudata *cpu = all_cpu_data[0];
507         int turbo_pstate = cpu->pstate.turbo_pstate;
508
509         return turbo_pstate ?
510                 (cpu->pstate.min_pstate * 100 / turbo_pstate) : 0;
511 }
512
513 static s16 intel_pstate_get_epb(struct cpudata *cpu_data)
514 {
515         u64 epb;
516         int ret;
517
518         if (!boot_cpu_has(X86_FEATURE_EPB))
519                 return -ENXIO;
520
521         ret = rdmsrl_on_cpu(cpu_data->cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
522         if (ret)
523                 return (s16)ret;
524
525         return (s16)(epb & 0x0f);
526 }
527
528 static s16 intel_pstate_get_epp(struct cpudata *cpu_data, u64 hwp_req_data)
529 {
530         s16 epp;
531
532         if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
533                 /*
534                  * When hwp_req_data is 0, means that caller didn't read
535                  * MSR_HWP_REQUEST, so need to read and get EPP.
536                  */
537                 if (!hwp_req_data) {
538                         epp = rdmsrl_on_cpu(cpu_data->cpu, MSR_HWP_REQUEST,
539                                             &hwp_req_data);
540                         if (epp)
541                                 return epp;
542                 }
543                 epp = (hwp_req_data >> 24) & 0xff;
544         } else {
545                 /* When there is no EPP present, HWP uses EPB settings */
546                 epp = intel_pstate_get_epb(cpu_data);
547         }
548
549         return epp;
550 }
551
552 static int intel_pstate_set_epb(int cpu, s16 pref)
553 {
554         u64 epb;
555         int ret;
556
557         if (!boot_cpu_has(X86_FEATURE_EPB))
558                 return -ENXIO;
559
560         ret = rdmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, &epb);
561         if (ret)
562                 return ret;
563
564         epb = (epb & ~0x0f) | pref;
565         wrmsrl_on_cpu(cpu, MSR_IA32_ENERGY_PERF_BIAS, epb);
566
567         return 0;
568 }
569
570 /*
571  * EPP/EPB display strings corresponding to EPP index in the
572  * energy_perf_strings[]
573  *      index           String
574  *-------------------------------------
575  *      0               default
576  *      1               performance
577  *      2               balance_performance
578  *      3               balance_power
579  *      4               power
580  */
581 static const char * const energy_perf_strings[] = {
582         "default",
583         "performance",
584         "balance_performance",
585         "balance_power",
586         "power",
587         NULL
588 };
589 static const unsigned int epp_values[] = {
590         HWP_EPP_PERFORMANCE,
591         HWP_EPP_BALANCE_PERFORMANCE,
592         HWP_EPP_BALANCE_POWERSAVE,
593         HWP_EPP_POWERSAVE
594 };
595
596 static int intel_pstate_get_energy_pref_index(struct cpudata *cpu_data, int *raw_epp)
597 {
598         s16 epp;
599         int index = -EINVAL;
600
601         *raw_epp = 0;
602         epp = intel_pstate_get_epp(cpu_data, 0);
603         if (epp < 0)
604                 return epp;
605
606         if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
607                 if (epp == HWP_EPP_PERFORMANCE)
608                         return 1;
609                 if (epp == HWP_EPP_BALANCE_PERFORMANCE)
610                         return 2;
611                 if (epp == HWP_EPP_BALANCE_POWERSAVE)
612                         return 3;
613                 if (epp == HWP_EPP_POWERSAVE)
614                         return 4;
615                 *raw_epp = epp;
616                 return 0;
617         } else if (boot_cpu_has(X86_FEATURE_EPB)) {
618                 /*
619                  * Range:
620                  *      0x00-0x03       :       Performance
621                  *      0x04-0x07       :       Balance performance
622                  *      0x08-0x0B       :       Balance power
623                  *      0x0C-0x0F       :       Power
624                  * The EPB is a 4 bit value, but our ranges restrict the
625                  * value which can be set. Here only using top two bits
626                  * effectively.
627                  */
628                 index = (epp >> 2) + 1;
629         }
630
631         return index;
632 }
633
634 static int intel_pstate_set_epp(struct cpudata *cpu, u32 epp)
635 {
636         int ret;
637
638         /*
639          * Use the cached HWP Request MSR value, because in the active mode the
640          * register itself may be updated by intel_pstate_hwp_boost_up() or
641          * intel_pstate_hwp_boost_down() at any time.
642          */
643         u64 value = READ_ONCE(cpu->hwp_req_cached);
644
645         value &= ~GENMASK_ULL(31, 24);
646         value |= (u64)epp << 24;
647         /*
648          * The only other updater of hwp_req_cached in the active mode,
649          * intel_pstate_hwp_set(), is called under the same lock as this
650          * function, so it cannot run in parallel with the update below.
651          */
652         WRITE_ONCE(cpu->hwp_req_cached, value);
653         ret = wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
654         if (!ret)
655                 cpu->epp_cached = epp;
656
657         return ret;
658 }
659
660 static int intel_pstate_set_energy_pref_index(struct cpudata *cpu_data,
661                                               int pref_index, bool use_raw,
662                                               u32 raw_epp)
663 {
664         int epp = -EINVAL;
665         int ret;
666
667         if (!pref_index)
668                 epp = cpu_data->epp_default;
669
670         if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
671                 if (use_raw)
672                         epp = raw_epp;
673                 else if (epp == -EINVAL)
674                         epp = epp_values[pref_index - 1];
675
676                 /*
677                  * To avoid confusion, refuse to set EPP to any values different
678                  * from 0 (performance) if the current policy is "performance",
679                  * because those values would be overridden.
680                  */
681                 if (epp > 0 && cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
682                         return -EBUSY;
683
684                 ret = intel_pstate_set_epp(cpu_data, epp);
685         } else {
686                 if (epp == -EINVAL)
687                         epp = (pref_index - 1) << 2;
688                 ret = intel_pstate_set_epb(cpu_data->cpu, epp);
689         }
690
691         return ret;
692 }
693
694 static ssize_t show_energy_performance_available_preferences(
695                                 struct cpufreq_policy *policy, char *buf)
696 {
697         int i = 0;
698         int ret = 0;
699
700         while (energy_perf_strings[i] != NULL)
701                 ret += sprintf(&buf[ret], "%s ", energy_perf_strings[i++]);
702
703         ret += sprintf(&buf[ret], "\n");
704
705         return ret;
706 }
707
708 cpufreq_freq_attr_ro(energy_performance_available_preferences);
709
710 static struct cpufreq_driver intel_pstate;
711
712 static ssize_t store_energy_performance_preference(
713                 struct cpufreq_policy *policy, const char *buf, size_t count)
714 {
715         struct cpudata *cpu = all_cpu_data[policy->cpu];
716         char str_preference[21];
717         bool raw = false;
718         ssize_t ret;
719         u32 epp = 0;
720
721         ret = sscanf(buf, "%20s", str_preference);
722         if (ret != 1)
723                 return -EINVAL;
724
725         ret = match_string(energy_perf_strings, -1, str_preference);
726         if (ret < 0) {
727                 if (!boot_cpu_has(X86_FEATURE_HWP_EPP))
728                         return ret;
729
730                 ret = kstrtouint(buf, 10, &epp);
731                 if (ret)
732                         return ret;
733
734                 if (epp > 255)
735                         return -EINVAL;
736
737                 raw = true;
738         }
739
740         /*
741          * This function runs with the policy R/W semaphore held, which
742          * guarantees that the driver pointer will not change while it is
743          * running.
744          */
745         if (!intel_pstate_driver)
746                 return -EAGAIN;
747
748         mutex_lock(&intel_pstate_limits_lock);
749
750         if (intel_pstate_driver == &intel_pstate) {
751                 ret = intel_pstate_set_energy_pref_index(cpu, ret, raw, epp);
752         } else {
753                 /*
754                  * In the passive mode the governor needs to be stopped on the
755                  * target CPU before the EPP update and restarted after it,
756                  * which is super-heavy-weight, so make sure it is worth doing
757                  * upfront.
758                  */
759                 if (!raw)
760                         epp = ret ? epp_values[ret - 1] : cpu->epp_default;
761
762                 if (cpu->epp_cached != epp) {
763                         int err;
764
765                         cpufreq_stop_governor(policy);
766                         ret = intel_pstate_set_epp(cpu, epp);
767                         err = cpufreq_start_governor(policy);
768                         if (!ret)
769                                 ret = err;
770                 }
771         }
772
773         mutex_unlock(&intel_pstate_limits_lock);
774
775         return ret ?: count;
776 }
777
778 static ssize_t show_energy_performance_preference(
779                                 struct cpufreq_policy *policy, char *buf)
780 {
781         struct cpudata *cpu_data = all_cpu_data[policy->cpu];
782         int preference, raw_epp;
783
784         preference = intel_pstate_get_energy_pref_index(cpu_data, &raw_epp);
785         if (preference < 0)
786                 return preference;
787
788         if (raw_epp)
789                 return  sprintf(buf, "%d\n", raw_epp);
790         else
791                 return  sprintf(buf, "%s\n", energy_perf_strings[preference]);
792 }
793
794 cpufreq_freq_attr_rw(energy_performance_preference);
795
796 static ssize_t show_base_frequency(struct cpufreq_policy *policy, char *buf)
797 {
798         struct cpudata *cpu;
799         u64 cap;
800         int ratio;
801
802         ratio = intel_pstate_get_cppc_guranteed(policy->cpu);
803         if (ratio <= 0) {
804                 rdmsrl_on_cpu(policy->cpu, MSR_HWP_CAPABILITIES, &cap);
805                 ratio = HWP_GUARANTEED_PERF(cap);
806         }
807
808         cpu = all_cpu_data[policy->cpu];
809
810         return sprintf(buf, "%d\n", ratio * cpu->pstate.scaling);
811 }
812
813 cpufreq_freq_attr_ro(base_frequency);
814
815 static struct freq_attr *hwp_cpufreq_attrs[] = {
816         &energy_performance_preference,
817         &energy_performance_available_preferences,
818         &base_frequency,
819         NULL,
820 };
821
822 static void __intel_pstate_get_hwp_cap(struct cpudata *cpu)
823 {
824         u64 cap;
825
826         rdmsrl_on_cpu(cpu->cpu, MSR_HWP_CAPABILITIES, &cap);
827         WRITE_ONCE(cpu->hwp_cap_cached, cap);
828         cpu->pstate.max_pstate = HWP_GUARANTEED_PERF(cap);
829         cpu->pstate.turbo_pstate = HWP_HIGHEST_PERF(cap);
830 }
831
832 static void intel_pstate_get_hwp_cap(struct cpudata *cpu)
833 {
834         __intel_pstate_get_hwp_cap(cpu);
835         cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
836         cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
837 }
838
839 static void intel_pstate_hwp_set(unsigned int cpu)
840 {
841         struct cpudata *cpu_data = all_cpu_data[cpu];
842         int max, min;
843         u64 value;
844         s16 epp;
845
846         max = cpu_data->max_perf_ratio;
847         min = cpu_data->min_perf_ratio;
848
849         if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE)
850                 min = max;
851
852         rdmsrl_on_cpu(cpu, MSR_HWP_REQUEST, &value);
853
854         value &= ~HWP_MIN_PERF(~0L);
855         value |= HWP_MIN_PERF(min);
856
857         value &= ~HWP_MAX_PERF(~0L);
858         value |= HWP_MAX_PERF(max);
859
860         if (cpu_data->epp_policy == cpu_data->policy)
861                 goto skip_epp;
862
863         cpu_data->epp_policy = cpu_data->policy;
864
865         if (cpu_data->policy == CPUFREQ_POLICY_PERFORMANCE) {
866                 epp = intel_pstate_get_epp(cpu_data, value);
867                 cpu_data->epp_powersave = epp;
868                 /* If EPP read was failed, then don't try to write */
869                 if (epp < 0)
870                         goto skip_epp;
871
872                 epp = 0;
873         } else {
874                 /* skip setting EPP, when saved value is invalid */
875                 if (cpu_data->epp_powersave < 0)
876                         goto skip_epp;
877
878                 /*
879                  * No need to restore EPP when it is not zero. This
880                  * means:
881                  *  - Policy is not changed
882                  *  - user has manually changed
883                  *  - Error reading EPB
884                  */
885                 epp = intel_pstate_get_epp(cpu_data, value);
886                 if (epp)
887                         goto skip_epp;
888
889                 epp = cpu_data->epp_powersave;
890         }
891         if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
892                 value &= ~GENMASK_ULL(31, 24);
893                 value |= (u64)epp << 24;
894         } else {
895                 intel_pstate_set_epb(cpu, epp);
896         }
897 skip_epp:
898         WRITE_ONCE(cpu_data->hwp_req_cached, value);
899         wrmsrl_on_cpu(cpu, MSR_HWP_REQUEST, value);
900 }
901
902 static void intel_pstate_hwp_offline(struct cpudata *cpu)
903 {
904         u64 value = READ_ONCE(cpu->hwp_req_cached);
905         int min_perf;
906
907         if (boot_cpu_has(X86_FEATURE_HWP_EPP)) {
908                 /*
909                  * In case the EPP has been set to "performance" by the
910                  * active mode "performance" scaling algorithm, replace that
911                  * temporary value with the cached EPP one.
912                  */
913                 value &= ~GENMASK_ULL(31, 24);
914                 value |= HWP_ENERGY_PERF_PREFERENCE(cpu->epp_cached);
915                 WRITE_ONCE(cpu->hwp_req_cached, value);
916         }
917
918         value &= ~GENMASK_ULL(31, 0);
919         min_perf = HWP_LOWEST_PERF(READ_ONCE(cpu->hwp_cap_cached));
920
921         /* Set hwp_max = hwp_min */
922         value |= HWP_MAX_PERF(min_perf);
923         value |= HWP_MIN_PERF(min_perf);
924
925         /* Set EPP to min */
926         if (boot_cpu_has(X86_FEATURE_HWP_EPP))
927                 value |= HWP_ENERGY_PERF_PREFERENCE(HWP_EPP_POWERSAVE);
928
929         wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
930 }
931
932 #define POWER_CTL_EE_ENABLE     1
933 #define POWER_CTL_EE_DISABLE    2
934
935 static int power_ctl_ee_state;
936
937 static void set_power_ctl_ee_state(bool input)
938 {
939         u64 power_ctl;
940
941         mutex_lock(&intel_pstate_driver_lock);
942         rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
943         if (input) {
944                 power_ctl &= ~BIT(MSR_IA32_POWER_CTL_BIT_EE);
945                 power_ctl_ee_state = POWER_CTL_EE_ENABLE;
946         } else {
947                 power_ctl |= BIT(MSR_IA32_POWER_CTL_BIT_EE);
948                 power_ctl_ee_state = POWER_CTL_EE_DISABLE;
949         }
950         wrmsrl(MSR_IA32_POWER_CTL, power_ctl);
951         mutex_unlock(&intel_pstate_driver_lock);
952 }
953
954 static void intel_pstate_hwp_enable(struct cpudata *cpudata);
955
956 static void intel_pstate_hwp_reenable(struct cpudata *cpu)
957 {
958         intel_pstate_hwp_enable(cpu);
959         wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, READ_ONCE(cpu->hwp_req_cached));
960 }
961
962 static int intel_pstate_suspend(struct cpufreq_policy *policy)
963 {
964         struct cpudata *cpu = all_cpu_data[policy->cpu];
965
966         pr_debug("CPU %d suspending\n", cpu->cpu);
967
968         cpu->suspended = true;
969
970         return 0;
971 }
972
973 static int intel_pstate_resume(struct cpufreq_policy *policy)
974 {
975         struct cpudata *cpu = all_cpu_data[policy->cpu];
976
977         pr_debug("CPU %d resuming\n", cpu->cpu);
978
979         /* Only restore if the system default is changed */
980         if (power_ctl_ee_state == POWER_CTL_EE_ENABLE)
981                 set_power_ctl_ee_state(true);
982         else if (power_ctl_ee_state == POWER_CTL_EE_DISABLE)
983                 set_power_ctl_ee_state(false);
984
985         if (cpu->suspended && hwp_active) {
986                 mutex_lock(&intel_pstate_limits_lock);
987
988                 /* Re-enable HWP, because "online" has not done that. */
989                 intel_pstate_hwp_reenable(cpu);
990
991                 mutex_unlock(&intel_pstate_limits_lock);
992         }
993
994         cpu->suspended = false;
995
996         return 0;
997 }
998
999 static void intel_pstate_update_policies(void)
1000 {
1001         int cpu;
1002
1003         for_each_possible_cpu(cpu)
1004                 cpufreq_update_policy(cpu);
1005 }
1006
1007 static void intel_pstate_update_max_freq(unsigned int cpu)
1008 {
1009         struct cpufreq_policy *policy = cpufreq_cpu_acquire(cpu);
1010         struct cpudata *cpudata;
1011
1012         if (!policy)
1013                 return;
1014
1015         cpudata = all_cpu_data[cpu];
1016         policy->cpuinfo.max_freq = global.turbo_disabled_mf ?
1017                         cpudata->pstate.max_freq : cpudata->pstate.turbo_freq;
1018
1019         refresh_frequency_limits(policy);
1020
1021         cpufreq_cpu_release(policy);
1022 }
1023
1024 static void intel_pstate_update_limits(unsigned int cpu)
1025 {
1026         mutex_lock(&intel_pstate_driver_lock);
1027
1028         update_turbo_state();
1029         /*
1030          * If turbo has been turned on or off globally, policy limits for
1031          * all CPUs need to be updated to reflect that.
1032          */
1033         if (global.turbo_disabled_mf != global.turbo_disabled) {
1034                 global.turbo_disabled_mf = global.turbo_disabled;
1035                 arch_set_max_freq_ratio(global.turbo_disabled);
1036                 for_each_possible_cpu(cpu)
1037                         intel_pstate_update_max_freq(cpu);
1038         } else {
1039                 cpufreq_update_policy(cpu);
1040         }
1041
1042         mutex_unlock(&intel_pstate_driver_lock);
1043 }
1044
1045 /************************** sysfs begin ************************/
1046 #define show_one(file_name, object)                                     \
1047         static ssize_t show_##file_name                                 \
1048         (struct kobject *kobj, struct kobj_attribute *attr, char *buf)  \
1049         {                                                               \
1050                 return sprintf(buf, "%u\n", global.object);             \
1051         }
1052
1053 static ssize_t intel_pstate_show_status(char *buf);
1054 static int intel_pstate_update_status(const char *buf, size_t size);
1055
1056 static ssize_t show_status(struct kobject *kobj,
1057                            struct kobj_attribute *attr, char *buf)
1058 {
1059         ssize_t ret;
1060
1061         mutex_lock(&intel_pstate_driver_lock);
1062         ret = intel_pstate_show_status(buf);
1063         mutex_unlock(&intel_pstate_driver_lock);
1064
1065         return ret;
1066 }
1067
1068 static ssize_t store_status(struct kobject *a, struct kobj_attribute *b,
1069                             const char *buf, size_t count)
1070 {
1071         char *p = memchr(buf, '\n', count);
1072         int ret;
1073
1074         mutex_lock(&intel_pstate_driver_lock);
1075         ret = intel_pstate_update_status(buf, p ? p - buf : count);
1076         mutex_unlock(&intel_pstate_driver_lock);
1077
1078         return ret < 0 ? ret : count;
1079 }
1080
1081 static ssize_t show_turbo_pct(struct kobject *kobj,
1082                                 struct kobj_attribute *attr, char *buf)
1083 {
1084         struct cpudata *cpu;
1085         int total, no_turbo, turbo_pct;
1086         uint32_t turbo_fp;
1087
1088         mutex_lock(&intel_pstate_driver_lock);
1089
1090         if (!intel_pstate_driver) {
1091                 mutex_unlock(&intel_pstate_driver_lock);
1092                 return -EAGAIN;
1093         }
1094
1095         cpu = all_cpu_data[0];
1096
1097         total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1098         no_turbo = cpu->pstate.max_pstate - cpu->pstate.min_pstate + 1;
1099         turbo_fp = div_fp(no_turbo, total);
1100         turbo_pct = 100 - fp_toint(mul_fp(turbo_fp, int_tofp(100)));
1101
1102         mutex_unlock(&intel_pstate_driver_lock);
1103
1104         return sprintf(buf, "%u\n", turbo_pct);
1105 }
1106
1107 static ssize_t show_num_pstates(struct kobject *kobj,
1108                                 struct kobj_attribute *attr, char *buf)
1109 {
1110         struct cpudata *cpu;
1111         int total;
1112
1113         mutex_lock(&intel_pstate_driver_lock);
1114
1115         if (!intel_pstate_driver) {
1116                 mutex_unlock(&intel_pstate_driver_lock);
1117                 return -EAGAIN;
1118         }
1119
1120         cpu = all_cpu_data[0];
1121         total = cpu->pstate.turbo_pstate - cpu->pstate.min_pstate + 1;
1122
1123         mutex_unlock(&intel_pstate_driver_lock);
1124
1125         return sprintf(buf, "%u\n", total);
1126 }
1127
1128 static ssize_t show_no_turbo(struct kobject *kobj,
1129                              struct kobj_attribute *attr, char *buf)
1130 {
1131         ssize_t ret;
1132
1133         mutex_lock(&intel_pstate_driver_lock);
1134
1135         if (!intel_pstate_driver) {
1136                 mutex_unlock(&intel_pstate_driver_lock);
1137                 return -EAGAIN;
1138         }
1139
1140         update_turbo_state();
1141         if (global.turbo_disabled)
1142                 ret = sprintf(buf, "%u\n", global.turbo_disabled);
1143         else
1144                 ret = sprintf(buf, "%u\n", global.no_turbo);
1145
1146         mutex_unlock(&intel_pstate_driver_lock);
1147
1148         return ret;
1149 }
1150
1151 static ssize_t store_no_turbo(struct kobject *a, struct kobj_attribute *b,
1152                               const char *buf, size_t count)
1153 {
1154         unsigned int input;
1155         int ret;
1156
1157         ret = sscanf(buf, "%u", &input);
1158         if (ret != 1)
1159                 return -EINVAL;
1160
1161         mutex_lock(&intel_pstate_driver_lock);
1162
1163         if (!intel_pstate_driver) {
1164                 mutex_unlock(&intel_pstate_driver_lock);
1165                 return -EAGAIN;
1166         }
1167
1168         mutex_lock(&intel_pstate_limits_lock);
1169
1170         update_turbo_state();
1171         if (global.turbo_disabled) {
1172                 pr_notice_once("Turbo disabled by BIOS or unavailable on processor\n");
1173                 mutex_unlock(&intel_pstate_limits_lock);
1174                 mutex_unlock(&intel_pstate_driver_lock);
1175                 return -EPERM;
1176         }
1177
1178         global.no_turbo = clamp_t(int, input, 0, 1);
1179
1180         if (global.no_turbo) {
1181                 struct cpudata *cpu = all_cpu_data[0];
1182                 int pct = cpu->pstate.max_pstate * 100 / cpu->pstate.turbo_pstate;
1183
1184                 /* Squash the global minimum into the permitted range. */
1185                 if (global.min_perf_pct > pct)
1186                         global.min_perf_pct = pct;
1187         }
1188
1189         mutex_unlock(&intel_pstate_limits_lock);
1190
1191         intel_pstate_update_policies();
1192
1193         mutex_unlock(&intel_pstate_driver_lock);
1194
1195         return count;
1196 }
1197
1198 static void update_qos_request(enum freq_qos_req_type type)
1199 {
1200         struct freq_qos_request *req;
1201         struct cpufreq_policy *policy;
1202         int i;
1203
1204         for_each_possible_cpu(i) {
1205                 struct cpudata *cpu = all_cpu_data[i];
1206                 unsigned int freq, perf_pct;
1207
1208                 policy = cpufreq_cpu_get(i);
1209                 if (!policy)
1210                         continue;
1211
1212                 req = policy->driver_data;
1213                 cpufreq_cpu_put(policy);
1214
1215                 if (!req)
1216                         continue;
1217
1218                 if (hwp_active)
1219                         intel_pstate_get_hwp_cap(cpu);
1220
1221                 if (type == FREQ_QOS_MIN) {
1222                         perf_pct = global.min_perf_pct;
1223                 } else {
1224                         req++;
1225                         perf_pct = global.max_perf_pct;
1226                 }
1227
1228                 freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * perf_pct, 100);
1229
1230                 if (freq_qos_update_request(req, freq) < 0)
1231                         pr_warn("Failed to update freq constraint: CPU%d\n", i);
1232         }
1233 }
1234
1235 static ssize_t store_max_perf_pct(struct kobject *a, struct kobj_attribute *b,
1236                                   const char *buf, size_t count)
1237 {
1238         unsigned int input;
1239         int ret;
1240
1241         ret = sscanf(buf, "%u", &input);
1242         if (ret != 1)
1243                 return -EINVAL;
1244
1245         mutex_lock(&intel_pstate_driver_lock);
1246
1247         if (!intel_pstate_driver) {
1248                 mutex_unlock(&intel_pstate_driver_lock);
1249                 return -EAGAIN;
1250         }
1251
1252         mutex_lock(&intel_pstate_limits_lock);
1253
1254         global.max_perf_pct = clamp_t(int, input, global.min_perf_pct, 100);
1255
1256         mutex_unlock(&intel_pstate_limits_lock);
1257
1258         if (intel_pstate_driver == &intel_pstate)
1259                 intel_pstate_update_policies();
1260         else
1261                 update_qos_request(FREQ_QOS_MAX);
1262
1263         mutex_unlock(&intel_pstate_driver_lock);
1264
1265         return count;
1266 }
1267
1268 static ssize_t store_min_perf_pct(struct kobject *a, struct kobj_attribute *b,
1269                                   const char *buf, size_t count)
1270 {
1271         unsigned int input;
1272         int ret;
1273
1274         ret = sscanf(buf, "%u", &input);
1275         if (ret != 1)
1276                 return -EINVAL;
1277
1278         mutex_lock(&intel_pstate_driver_lock);
1279
1280         if (!intel_pstate_driver) {
1281                 mutex_unlock(&intel_pstate_driver_lock);
1282                 return -EAGAIN;
1283         }
1284
1285         mutex_lock(&intel_pstate_limits_lock);
1286
1287         global.min_perf_pct = clamp_t(int, input,
1288                                       min_perf_pct_min(), global.max_perf_pct);
1289
1290         mutex_unlock(&intel_pstate_limits_lock);
1291
1292         if (intel_pstate_driver == &intel_pstate)
1293                 intel_pstate_update_policies();
1294         else
1295                 update_qos_request(FREQ_QOS_MIN);
1296
1297         mutex_unlock(&intel_pstate_driver_lock);
1298
1299         return count;
1300 }
1301
1302 static ssize_t show_hwp_dynamic_boost(struct kobject *kobj,
1303                                 struct kobj_attribute *attr, char *buf)
1304 {
1305         return sprintf(buf, "%u\n", hwp_boost);
1306 }
1307
1308 static ssize_t store_hwp_dynamic_boost(struct kobject *a,
1309                                        struct kobj_attribute *b,
1310                                        const char *buf, size_t count)
1311 {
1312         unsigned int input;
1313         int ret;
1314
1315         ret = kstrtouint(buf, 10, &input);
1316         if (ret)
1317                 return ret;
1318
1319         mutex_lock(&intel_pstate_driver_lock);
1320         hwp_boost = !!input;
1321         intel_pstate_update_policies();
1322         mutex_unlock(&intel_pstate_driver_lock);
1323
1324         return count;
1325 }
1326
1327 static ssize_t show_energy_efficiency(struct kobject *kobj, struct kobj_attribute *attr,
1328                                       char *buf)
1329 {
1330         u64 power_ctl;
1331         int enable;
1332
1333         rdmsrl(MSR_IA32_POWER_CTL, power_ctl);
1334         enable = !!(power_ctl & BIT(MSR_IA32_POWER_CTL_BIT_EE));
1335         return sprintf(buf, "%d\n", !enable);
1336 }
1337
1338 static ssize_t store_energy_efficiency(struct kobject *a, struct kobj_attribute *b,
1339                                        const char *buf, size_t count)
1340 {
1341         bool input;
1342         int ret;
1343
1344         ret = kstrtobool(buf, &input);
1345         if (ret)
1346                 return ret;
1347
1348         set_power_ctl_ee_state(input);
1349
1350         return count;
1351 }
1352
1353 show_one(max_perf_pct, max_perf_pct);
1354 show_one(min_perf_pct, min_perf_pct);
1355
1356 define_one_global_rw(status);
1357 define_one_global_rw(no_turbo);
1358 define_one_global_rw(max_perf_pct);
1359 define_one_global_rw(min_perf_pct);
1360 define_one_global_ro(turbo_pct);
1361 define_one_global_ro(num_pstates);
1362 define_one_global_rw(hwp_dynamic_boost);
1363 define_one_global_rw(energy_efficiency);
1364
1365 static struct attribute *intel_pstate_attributes[] = {
1366         &status.attr,
1367         &no_turbo.attr,
1368         NULL
1369 };
1370
1371 static const struct attribute_group intel_pstate_attr_group = {
1372         .attrs = intel_pstate_attributes,
1373 };
1374
1375 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[];
1376
1377 static struct kobject *intel_pstate_kobject;
1378
1379 static void __init intel_pstate_sysfs_expose_params(void)
1380 {
1381         int rc;
1382
1383         intel_pstate_kobject = kobject_create_and_add("intel_pstate",
1384                                                 &cpu_subsys.dev_root->kobj);
1385         if (WARN_ON(!intel_pstate_kobject))
1386                 return;
1387
1388         rc = sysfs_create_group(intel_pstate_kobject, &intel_pstate_attr_group);
1389         if (WARN_ON(rc))
1390                 return;
1391
1392         if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1393                 rc = sysfs_create_file(intel_pstate_kobject, &turbo_pct.attr);
1394                 WARN_ON(rc);
1395
1396                 rc = sysfs_create_file(intel_pstate_kobject, &num_pstates.attr);
1397                 WARN_ON(rc);
1398         }
1399
1400         /*
1401          * If per cpu limits are enforced there are no global limits, so
1402          * return without creating max/min_perf_pct attributes
1403          */
1404         if (per_cpu_limits)
1405                 return;
1406
1407         rc = sysfs_create_file(intel_pstate_kobject, &max_perf_pct.attr);
1408         WARN_ON(rc);
1409
1410         rc = sysfs_create_file(intel_pstate_kobject, &min_perf_pct.attr);
1411         WARN_ON(rc);
1412
1413         if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids)) {
1414                 rc = sysfs_create_file(intel_pstate_kobject, &energy_efficiency.attr);
1415                 WARN_ON(rc);
1416         }
1417 }
1418
1419 static void __init intel_pstate_sysfs_remove(void)
1420 {
1421         if (!intel_pstate_kobject)
1422                 return;
1423
1424         sysfs_remove_group(intel_pstate_kobject, &intel_pstate_attr_group);
1425
1426         if (!boot_cpu_has(X86_FEATURE_HYBRID_CPU)) {
1427                 sysfs_remove_file(intel_pstate_kobject, &num_pstates.attr);
1428                 sysfs_remove_file(intel_pstate_kobject, &turbo_pct.attr);
1429         }
1430
1431         if (!per_cpu_limits) {
1432                 sysfs_remove_file(intel_pstate_kobject, &max_perf_pct.attr);
1433                 sysfs_remove_file(intel_pstate_kobject, &min_perf_pct.attr);
1434
1435                 if (x86_match_cpu(intel_pstate_cpu_ee_disable_ids))
1436                         sysfs_remove_file(intel_pstate_kobject, &energy_efficiency.attr);
1437         }
1438
1439         kobject_put(intel_pstate_kobject);
1440 }
1441
1442 static void intel_pstate_sysfs_expose_hwp_dynamic_boost(void)
1443 {
1444         int rc;
1445
1446         if (!hwp_active)
1447                 return;
1448
1449         rc = sysfs_create_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1450         WARN_ON_ONCE(rc);
1451 }
1452
1453 static void intel_pstate_sysfs_hide_hwp_dynamic_boost(void)
1454 {
1455         if (!hwp_active)
1456                 return;
1457
1458         sysfs_remove_file(intel_pstate_kobject, &hwp_dynamic_boost.attr);
1459 }
1460
1461 /************************** sysfs end ************************/
1462
1463 static void intel_pstate_hwp_enable(struct cpudata *cpudata)
1464 {
1465         /* First disable HWP notification interrupt as we don't process them */
1466         if (boot_cpu_has(X86_FEATURE_HWP_NOTIFY))
1467                 wrmsrl_on_cpu(cpudata->cpu, MSR_HWP_INTERRUPT, 0x00);
1468
1469         wrmsrl_on_cpu(cpudata->cpu, MSR_PM_ENABLE, 0x1);
1470         if (cpudata->epp_default == -EINVAL)
1471                 cpudata->epp_default = intel_pstate_get_epp(cpudata, 0);
1472 }
1473
1474 static int atom_get_min_pstate(void)
1475 {
1476         u64 value;
1477
1478         rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1479         return (value >> 8) & 0x7F;
1480 }
1481
1482 static int atom_get_max_pstate(void)
1483 {
1484         u64 value;
1485
1486         rdmsrl(MSR_ATOM_CORE_RATIOS, value);
1487         return (value >> 16) & 0x7F;
1488 }
1489
1490 static int atom_get_turbo_pstate(void)
1491 {
1492         u64 value;
1493
1494         rdmsrl(MSR_ATOM_CORE_TURBO_RATIOS, value);
1495         return value & 0x7F;
1496 }
1497
1498 static u64 atom_get_val(struct cpudata *cpudata, int pstate)
1499 {
1500         u64 val;
1501         int32_t vid_fp;
1502         u32 vid;
1503
1504         val = (u64)pstate << 8;
1505         if (global.no_turbo && !global.turbo_disabled)
1506                 val |= (u64)1 << 32;
1507
1508         vid_fp = cpudata->vid.min + mul_fp(
1509                 int_tofp(pstate - cpudata->pstate.min_pstate),
1510                 cpudata->vid.ratio);
1511
1512         vid_fp = clamp_t(int32_t, vid_fp, cpudata->vid.min, cpudata->vid.max);
1513         vid = ceiling_fp(vid_fp);
1514
1515         if (pstate > cpudata->pstate.max_pstate)
1516                 vid = cpudata->vid.turbo;
1517
1518         return val | vid;
1519 }
1520
1521 static int silvermont_get_scaling(void)
1522 {
1523         u64 value;
1524         int i;
1525         /* Defined in Table 35-6 from SDM (Sept 2015) */
1526         static int silvermont_freq_table[] = {
1527                 83300, 100000, 133300, 116700, 80000};
1528
1529         rdmsrl(MSR_FSB_FREQ, value);
1530         i = value & 0x7;
1531         WARN_ON(i > 4);
1532
1533         return silvermont_freq_table[i];
1534 }
1535
1536 static int airmont_get_scaling(void)
1537 {
1538         u64 value;
1539         int i;
1540         /* Defined in Table 35-10 from SDM (Sept 2015) */
1541         static int airmont_freq_table[] = {
1542                 83300, 100000, 133300, 116700, 80000,
1543                 93300, 90000, 88900, 87500};
1544
1545         rdmsrl(MSR_FSB_FREQ, value);
1546         i = value & 0xF;
1547         WARN_ON(i > 8);
1548
1549         return airmont_freq_table[i];
1550 }
1551
1552 static void atom_get_vid(struct cpudata *cpudata)
1553 {
1554         u64 value;
1555
1556         rdmsrl(MSR_ATOM_CORE_VIDS, value);
1557         cpudata->vid.min = int_tofp((value >> 8) & 0x7f);
1558         cpudata->vid.max = int_tofp((value >> 16) & 0x7f);
1559         cpudata->vid.ratio = div_fp(
1560                 cpudata->vid.max - cpudata->vid.min,
1561                 int_tofp(cpudata->pstate.max_pstate -
1562                         cpudata->pstate.min_pstate));
1563
1564         rdmsrl(MSR_ATOM_CORE_TURBO_VIDS, value);
1565         cpudata->vid.turbo = value & 0x7f;
1566 }
1567
1568 static int core_get_min_pstate(void)
1569 {
1570         u64 value;
1571
1572         rdmsrl(MSR_PLATFORM_INFO, value);
1573         return (value >> 40) & 0xFF;
1574 }
1575
1576 static int core_get_max_pstate_physical(void)
1577 {
1578         u64 value;
1579
1580         rdmsrl(MSR_PLATFORM_INFO, value);
1581         return (value >> 8) & 0xFF;
1582 }
1583
1584 static int core_get_tdp_ratio(u64 plat_info)
1585 {
1586         /* Check how many TDP levels present */
1587         if (plat_info & 0x600000000) {
1588                 u64 tdp_ctrl;
1589                 u64 tdp_ratio;
1590                 int tdp_msr;
1591                 int err;
1592
1593                 /* Get the TDP level (0, 1, 2) to get ratios */
1594                 err = rdmsrl_safe(MSR_CONFIG_TDP_CONTROL, &tdp_ctrl);
1595                 if (err)
1596                         return err;
1597
1598                 /* TDP MSR are continuous starting at 0x648 */
1599                 tdp_msr = MSR_CONFIG_TDP_NOMINAL + (tdp_ctrl & 0x03);
1600                 err = rdmsrl_safe(tdp_msr, &tdp_ratio);
1601                 if (err)
1602                         return err;
1603
1604                 /* For level 1 and 2, bits[23:16] contain the ratio */
1605                 if (tdp_ctrl & 0x03)
1606                         tdp_ratio >>= 16;
1607
1608                 tdp_ratio &= 0xff; /* ratios are only 8 bits long */
1609                 pr_debug("tdp_ratio %x\n", (int)tdp_ratio);
1610
1611                 return (int)tdp_ratio;
1612         }
1613
1614         return -ENXIO;
1615 }
1616
1617 static int core_get_max_pstate(void)
1618 {
1619         u64 tar;
1620         u64 plat_info;
1621         int max_pstate;
1622         int tdp_ratio;
1623         int err;
1624
1625         rdmsrl(MSR_PLATFORM_INFO, plat_info);
1626         max_pstate = (plat_info >> 8) & 0xFF;
1627
1628         tdp_ratio = core_get_tdp_ratio(plat_info);
1629         if (tdp_ratio <= 0)
1630                 return max_pstate;
1631
1632         if (hwp_active) {
1633                 /* Turbo activation ratio is not used on HWP platforms */
1634                 return tdp_ratio;
1635         }
1636
1637         err = rdmsrl_safe(MSR_TURBO_ACTIVATION_RATIO, &tar);
1638         if (!err) {
1639                 int tar_levels;
1640
1641                 /* Do some sanity checking for safety */
1642                 tar_levels = tar & 0xff;
1643                 if (tdp_ratio - 1 == tar_levels) {
1644                         max_pstate = tar_levels;
1645                         pr_debug("max_pstate=TAC %x\n", max_pstate);
1646                 }
1647         }
1648
1649         return max_pstate;
1650 }
1651
1652 static int core_get_turbo_pstate(void)
1653 {
1654         u64 value;
1655         int nont, ret;
1656
1657         rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1658         nont = core_get_max_pstate();
1659         ret = (value) & 255;
1660         if (ret <= nont)
1661                 ret = nont;
1662         return ret;
1663 }
1664
1665 static inline int core_get_scaling(void)
1666 {
1667         return 100000;
1668 }
1669
1670 static u64 core_get_val(struct cpudata *cpudata, int pstate)
1671 {
1672         u64 val;
1673
1674         val = (u64)pstate << 8;
1675         if (global.no_turbo && !global.turbo_disabled)
1676                 val |= (u64)1 << 32;
1677
1678         return val;
1679 }
1680
1681 static int knl_get_aperf_mperf_shift(void)
1682 {
1683         return 10;
1684 }
1685
1686 static int knl_get_turbo_pstate(void)
1687 {
1688         u64 value;
1689         int nont, ret;
1690
1691         rdmsrl(MSR_TURBO_RATIO_LIMIT, value);
1692         nont = core_get_max_pstate();
1693         ret = (((value) >> 8) & 0xFF);
1694         if (ret <= nont)
1695                 ret = nont;
1696         return ret;
1697 }
1698
1699 static void intel_pstate_set_pstate(struct cpudata *cpu, int pstate)
1700 {
1701         trace_cpu_frequency(pstate * cpu->pstate.scaling, cpu->cpu);
1702         cpu->pstate.current_pstate = pstate;
1703         /*
1704          * Generally, there is no guarantee that this code will always run on
1705          * the CPU being updated, so force the register update to run on the
1706          * right CPU.
1707          */
1708         wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
1709                       pstate_funcs.get_val(cpu, pstate));
1710 }
1711
1712 static void intel_pstate_set_min_pstate(struct cpudata *cpu)
1713 {
1714         intel_pstate_set_pstate(cpu, cpu->pstate.min_pstate);
1715 }
1716
1717 static void intel_pstate_max_within_limits(struct cpudata *cpu)
1718 {
1719         int pstate = max(cpu->pstate.min_pstate, cpu->max_perf_ratio);
1720
1721         update_turbo_state();
1722         intel_pstate_set_pstate(cpu, pstate);
1723 }
1724
1725 static void intel_pstate_get_cpu_pstates(struct cpudata *cpu)
1726 {
1727         cpu->pstate.min_pstate = pstate_funcs.get_min();
1728         cpu->pstate.max_pstate_physical = pstate_funcs.get_max_physical();
1729         cpu->pstate.scaling = pstate_funcs.get_scaling();
1730
1731         if (hwp_active && !hwp_mode_bdw) {
1732                 __intel_pstate_get_hwp_cap(cpu);
1733         } else {
1734                 cpu->pstate.max_pstate = pstate_funcs.get_max();
1735                 cpu->pstate.turbo_pstate = pstate_funcs.get_turbo();
1736         }
1737
1738         cpu->pstate.max_freq = cpu->pstate.max_pstate * cpu->pstate.scaling;
1739         cpu->pstate.turbo_freq = cpu->pstate.turbo_pstate * cpu->pstate.scaling;
1740
1741         if (pstate_funcs.get_aperf_mperf_shift)
1742                 cpu->aperf_mperf_shift = pstate_funcs.get_aperf_mperf_shift();
1743
1744         if (pstate_funcs.get_vid)
1745                 pstate_funcs.get_vid(cpu);
1746
1747         intel_pstate_set_min_pstate(cpu);
1748 }
1749
1750 /*
1751  * Long hold time will keep high perf limits for long time,
1752  * which negatively impacts perf/watt for some workloads,
1753  * like specpower. 3ms is based on experiements on some
1754  * workoads.
1755  */
1756 static int hwp_boost_hold_time_ns = 3 * NSEC_PER_MSEC;
1757
1758 static inline void intel_pstate_hwp_boost_up(struct cpudata *cpu)
1759 {
1760         u64 hwp_req = READ_ONCE(cpu->hwp_req_cached);
1761         u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
1762         u32 max_limit = (hwp_req & 0xff00) >> 8;
1763         u32 min_limit = (hwp_req & 0xff);
1764         u32 boost_level1;
1765
1766         /*
1767          * Cases to consider (User changes via sysfs or boot time):
1768          * If, P0 (Turbo max) = P1 (Guaranteed max) = min:
1769          *      No boost, return.
1770          * If, P0 (Turbo max) > P1 (Guaranteed max) = min:
1771          *     Should result in one level boost only for P0.
1772          * If, P0 (Turbo max) = P1 (Guaranteed max) > min:
1773          *     Should result in two level boost:
1774          *         (min + p1)/2 and P1.
1775          * If, P0 (Turbo max) > P1 (Guaranteed max) > min:
1776          *     Should result in three level boost:
1777          *        (min + p1)/2, P1 and P0.
1778          */
1779
1780         /* If max and min are equal or already at max, nothing to boost */
1781         if (max_limit == min_limit || cpu->hwp_boost_min >= max_limit)
1782                 return;
1783
1784         if (!cpu->hwp_boost_min)
1785                 cpu->hwp_boost_min = min_limit;
1786
1787         /* level at half way mark between min and guranteed */
1788         boost_level1 = (HWP_GUARANTEED_PERF(hwp_cap) + min_limit) >> 1;
1789
1790         if (cpu->hwp_boost_min < boost_level1)
1791                 cpu->hwp_boost_min = boost_level1;
1792         else if (cpu->hwp_boost_min < HWP_GUARANTEED_PERF(hwp_cap))
1793                 cpu->hwp_boost_min = HWP_GUARANTEED_PERF(hwp_cap);
1794         else if (cpu->hwp_boost_min == HWP_GUARANTEED_PERF(hwp_cap) &&
1795                  max_limit != HWP_GUARANTEED_PERF(hwp_cap))
1796                 cpu->hwp_boost_min = max_limit;
1797         else
1798                 return;
1799
1800         hwp_req = (hwp_req & ~GENMASK_ULL(7, 0)) | cpu->hwp_boost_min;
1801         wrmsrl(MSR_HWP_REQUEST, hwp_req);
1802         cpu->last_update = cpu->sample.time;
1803 }
1804
1805 static inline void intel_pstate_hwp_boost_down(struct cpudata *cpu)
1806 {
1807         if (cpu->hwp_boost_min) {
1808                 bool expired;
1809
1810                 /* Check if we are idle for hold time to boost down */
1811                 expired = time_after64(cpu->sample.time, cpu->last_update +
1812                                        hwp_boost_hold_time_ns);
1813                 if (expired) {
1814                         wrmsrl(MSR_HWP_REQUEST, cpu->hwp_req_cached);
1815                         cpu->hwp_boost_min = 0;
1816                 }
1817         }
1818         cpu->last_update = cpu->sample.time;
1819 }
1820
1821 static inline void intel_pstate_update_util_hwp_local(struct cpudata *cpu,
1822                                                       u64 time)
1823 {
1824         cpu->sample.time = time;
1825
1826         if (cpu->sched_flags & SCHED_CPUFREQ_IOWAIT) {
1827                 bool do_io = false;
1828
1829                 cpu->sched_flags = 0;
1830                 /*
1831                  * Set iowait_boost flag and update time. Since IO WAIT flag
1832                  * is set all the time, we can't just conclude that there is
1833                  * some IO bound activity is scheduled on this CPU with just
1834                  * one occurrence. If we receive at least two in two
1835                  * consecutive ticks, then we treat as boost candidate.
1836                  */
1837                 if (time_before64(time, cpu->last_io_update + 2 * TICK_NSEC))
1838                         do_io = true;
1839
1840                 cpu->last_io_update = time;
1841
1842                 if (do_io)
1843                         intel_pstate_hwp_boost_up(cpu);
1844
1845         } else {
1846                 intel_pstate_hwp_boost_down(cpu);
1847         }
1848 }
1849
1850 static inline void intel_pstate_update_util_hwp(struct update_util_data *data,
1851                                                 u64 time, unsigned int flags)
1852 {
1853         struct cpudata *cpu = container_of(data, struct cpudata, update_util);
1854
1855         cpu->sched_flags |= flags;
1856
1857         if (smp_processor_id() == cpu->cpu)
1858                 intel_pstate_update_util_hwp_local(cpu, time);
1859 }
1860
1861 static inline void intel_pstate_calc_avg_perf(struct cpudata *cpu)
1862 {
1863         struct sample *sample = &cpu->sample;
1864
1865         sample->core_avg_perf = div_ext_fp(sample->aperf, sample->mperf);
1866 }
1867
1868 static inline bool intel_pstate_sample(struct cpudata *cpu, u64 time)
1869 {
1870         u64 aperf, mperf;
1871         unsigned long flags;
1872         u64 tsc;
1873
1874         local_irq_save(flags);
1875         rdmsrl(MSR_IA32_APERF, aperf);
1876         rdmsrl(MSR_IA32_MPERF, mperf);
1877         tsc = rdtsc();
1878         if (cpu->prev_mperf == mperf || cpu->prev_tsc == tsc) {
1879                 local_irq_restore(flags);
1880                 return false;
1881         }
1882         local_irq_restore(flags);
1883
1884         cpu->last_sample_time = cpu->sample.time;
1885         cpu->sample.time = time;
1886         cpu->sample.aperf = aperf;
1887         cpu->sample.mperf = mperf;
1888         cpu->sample.tsc =  tsc;
1889         cpu->sample.aperf -= cpu->prev_aperf;
1890         cpu->sample.mperf -= cpu->prev_mperf;
1891         cpu->sample.tsc -= cpu->prev_tsc;
1892
1893         cpu->prev_aperf = aperf;
1894         cpu->prev_mperf = mperf;
1895         cpu->prev_tsc = tsc;
1896         /*
1897          * First time this function is invoked in a given cycle, all of the
1898          * previous sample data fields are equal to zero or stale and they must
1899          * be populated with meaningful numbers for things to work, so assume
1900          * that sample.time will always be reset before setting the utilization
1901          * update hook and make the caller skip the sample then.
1902          */
1903         if (cpu->last_sample_time) {
1904                 intel_pstate_calc_avg_perf(cpu);
1905                 return true;
1906         }
1907         return false;
1908 }
1909
1910 static inline int32_t get_avg_frequency(struct cpudata *cpu)
1911 {
1912         return mul_ext_fp(cpu->sample.core_avg_perf, cpu_khz);
1913 }
1914
1915 static inline int32_t get_avg_pstate(struct cpudata *cpu)
1916 {
1917         return mul_ext_fp(cpu->pstate.max_pstate_physical,
1918                           cpu->sample.core_avg_perf);
1919 }
1920
1921 static inline int32_t get_target_pstate(struct cpudata *cpu)
1922 {
1923         struct sample *sample = &cpu->sample;
1924         int32_t busy_frac;
1925         int target, avg_pstate;
1926
1927         busy_frac = div_fp(sample->mperf << cpu->aperf_mperf_shift,
1928                            sample->tsc);
1929
1930         if (busy_frac < cpu->iowait_boost)
1931                 busy_frac = cpu->iowait_boost;
1932
1933         sample->busy_scaled = busy_frac * 100;
1934
1935         target = global.no_turbo || global.turbo_disabled ?
1936                         cpu->pstate.max_pstate : cpu->pstate.turbo_pstate;
1937         target += target >> 2;
1938         target = mul_fp(target, busy_frac);
1939         if (target < cpu->pstate.min_pstate)
1940                 target = cpu->pstate.min_pstate;
1941
1942         /*
1943          * If the average P-state during the previous cycle was higher than the
1944          * current target, add 50% of the difference to the target to reduce
1945          * possible performance oscillations and offset possible performance
1946          * loss related to moving the workload from one CPU to another within
1947          * a package/module.
1948          */
1949         avg_pstate = get_avg_pstate(cpu);
1950         if (avg_pstate > target)
1951                 target += (avg_pstate - target) >> 1;
1952
1953         return target;
1954 }
1955
1956 static int intel_pstate_prepare_request(struct cpudata *cpu, int pstate)
1957 {
1958         int min_pstate = max(cpu->pstate.min_pstate, cpu->min_perf_ratio);
1959         int max_pstate = max(min_pstate, cpu->max_perf_ratio);
1960
1961         return clamp_t(int, pstate, min_pstate, max_pstate);
1962 }
1963
1964 static void intel_pstate_update_pstate(struct cpudata *cpu, int pstate)
1965 {
1966         if (pstate == cpu->pstate.current_pstate)
1967                 return;
1968
1969         cpu->pstate.current_pstate = pstate;
1970         wrmsrl(MSR_IA32_PERF_CTL, pstate_funcs.get_val(cpu, pstate));
1971 }
1972
1973 static void intel_pstate_adjust_pstate(struct cpudata *cpu)
1974 {
1975         int from = cpu->pstate.current_pstate;
1976         struct sample *sample;
1977         int target_pstate;
1978
1979         update_turbo_state();
1980
1981         target_pstate = get_target_pstate(cpu);
1982         target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
1983         trace_cpu_frequency(target_pstate * cpu->pstate.scaling, cpu->cpu);
1984         intel_pstate_update_pstate(cpu, target_pstate);
1985
1986         sample = &cpu->sample;
1987         trace_pstate_sample(mul_ext_fp(100, sample->core_avg_perf),
1988                 fp_toint(sample->busy_scaled),
1989                 from,
1990                 cpu->pstate.current_pstate,
1991                 sample->mperf,
1992                 sample->aperf,
1993                 sample->tsc,
1994                 get_avg_frequency(cpu),
1995                 fp_toint(cpu->iowait_boost * 100));
1996 }
1997
1998 static void intel_pstate_update_util(struct update_util_data *data, u64 time,
1999                                      unsigned int flags)
2000 {
2001         struct cpudata *cpu = container_of(data, struct cpudata, update_util);
2002         u64 delta_ns;
2003
2004         /* Don't allow remote callbacks */
2005         if (smp_processor_id() != cpu->cpu)
2006                 return;
2007
2008         delta_ns = time - cpu->last_update;
2009         if (flags & SCHED_CPUFREQ_IOWAIT) {
2010                 /* Start over if the CPU may have been idle. */
2011                 if (delta_ns > TICK_NSEC) {
2012                         cpu->iowait_boost = ONE_EIGHTH_FP;
2013                 } else if (cpu->iowait_boost >= ONE_EIGHTH_FP) {
2014                         cpu->iowait_boost <<= 1;
2015                         if (cpu->iowait_boost > int_tofp(1))
2016                                 cpu->iowait_boost = int_tofp(1);
2017                 } else {
2018                         cpu->iowait_boost = ONE_EIGHTH_FP;
2019                 }
2020         } else if (cpu->iowait_boost) {
2021                 /* Clear iowait_boost if the CPU may have been idle. */
2022                 if (delta_ns > TICK_NSEC)
2023                         cpu->iowait_boost = 0;
2024                 else
2025                         cpu->iowait_boost >>= 1;
2026         }
2027         cpu->last_update = time;
2028         delta_ns = time - cpu->sample.time;
2029         if ((s64)delta_ns < INTEL_PSTATE_SAMPLING_INTERVAL)
2030                 return;
2031
2032         if (intel_pstate_sample(cpu, time))
2033                 intel_pstate_adjust_pstate(cpu);
2034 }
2035
2036 static struct pstate_funcs core_funcs = {
2037         .get_max = core_get_max_pstate,
2038         .get_max_physical = core_get_max_pstate_physical,
2039         .get_min = core_get_min_pstate,
2040         .get_turbo = core_get_turbo_pstate,
2041         .get_scaling = core_get_scaling,
2042         .get_val = core_get_val,
2043 };
2044
2045 static const struct pstate_funcs silvermont_funcs = {
2046         .get_max = atom_get_max_pstate,
2047         .get_max_physical = atom_get_max_pstate,
2048         .get_min = atom_get_min_pstate,
2049         .get_turbo = atom_get_turbo_pstate,
2050         .get_val = atom_get_val,
2051         .get_scaling = silvermont_get_scaling,
2052         .get_vid = atom_get_vid,
2053 };
2054
2055 static const struct pstate_funcs airmont_funcs = {
2056         .get_max = atom_get_max_pstate,
2057         .get_max_physical = atom_get_max_pstate,
2058         .get_min = atom_get_min_pstate,
2059         .get_turbo = atom_get_turbo_pstate,
2060         .get_val = atom_get_val,
2061         .get_scaling = airmont_get_scaling,
2062         .get_vid = atom_get_vid,
2063 };
2064
2065 static const struct pstate_funcs knl_funcs = {
2066         .get_max = core_get_max_pstate,
2067         .get_max_physical = core_get_max_pstate_physical,
2068         .get_min = core_get_min_pstate,
2069         .get_turbo = knl_get_turbo_pstate,
2070         .get_aperf_mperf_shift = knl_get_aperf_mperf_shift,
2071         .get_scaling = core_get_scaling,
2072         .get_val = core_get_val,
2073 };
2074
2075 #define X86_MATCH(model, policy)                                         \
2076         X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
2077                                            X86_FEATURE_APERFMPERF, &policy)
2078
2079 static const struct x86_cpu_id intel_pstate_cpu_ids[] = {
2080         X86_MATCH(SANDYBRIDGE,          core_funcs),
2081         X86_MATCH(SANDYBRIDGE_X,        core_funcs),
2082         X86_MATCH(ATOM_SILVERMONT,      silvermont_funcs),
2083         X86_MATCH(IVYBRIDGE,            core_funcs),
2084         X86_MATCH(HASWELL,              core_funcs),
2085         X86_MATCH(BROADWELL,            core_funcs),
2086         X86_MATCH(IVYBRIDGE_X,          core_funcs),
2087         X86_MATCH(HASWELL_X,            core_funcs),
2088         X86_MATCH(HASWELL_L,            core_funcs),
2089         X86_MATCH(HASWELL_G,            core_funcs),
2090         X86_MATCH(BROADWELL_G,          core_funcs),
2091         X86_MATCH(ATOM_AIRMONT,         airmont_funcs),
2092         X86_MATCH(SKYLAKE_L,            core_funcs),
2093         X86_MATCH(BROADWELL_X,          core_funcs),
2094         X86_MATCH(SKYLAKE,              core_funcs),
2095         X86_MATCH(BROADWELL_D,          core_funcs),
2096         X86_MATCH(XEON_PHI_KNL,         knl_funcs),
2097         X86_MATCH(XEON_PHI_KNM,         knl_funcs),
2098         X86_MATCH(ATOM_GOLDMONT,        core_funcs),
2099         X86_MATCH(ATOM_GOLDMONT_PLUS,   core_funcs),
2100         X86_MATCH(SKYLAKE_X,            core_funcs),
2101         {}
2102 };
2103 MODULE_DEVICE_TABLE(x86cpu, intel_pstate_cpu_ids);
2104
2105 static const struct x86_cpu_id intel_pstate_cpu_oob_ids[] __initconst = {
2106         X86_MATCH(BROADWELL_D,          core_funcs),
2107         X86_MATCH(BROADWELL_X,          core_funcs),
2108         X86_MATCH(SKYLAKE_X,            core_funcs),
2109         {}
2110 };
2111
2112 static const struct x86_cpu_id intel_pstate_cpu_ee_disable_ids[] = {
2113         X86_MATCH(KABYLAKE,             core_funcs),
2114         {}
2115 };
2116
2117 static const struct x86_cpu_id intel_pstate_hwp_boost_ids[] = {
2118         X86_MATCH(SKYLAKE_X,            core_funcs),
2119         X86_MATCH(SKYLAKE,              core_funcs),
2120         {}
2121 };
2122
2123 static int intel_pstate_init_cpu(unsigned int cpunum)
2124 {
2125         struct cpudata *cpu;
2126
2127         cpu = all_cpu_data[cpunum];
2128
2129         if (!cpu) {
2130                 cpu = kzalloc(sizeof(*cpu), GFP_KERNEL);
2131                 if (!cpu)
2132                         return -ENOMEM;
2133
2134                 all_cpu_data[cpunum] = cpu;
2135
2136                 cpu->cpu = cpunum;
2137
2138                 cpu->epp_default = -EINVAL;
2139
2140                 if (hwp_active) {
2141                         const struct x86_cpu_id *id;
2142
2143                         intel_pstate_hwp_enable(cpu);
2144
2145                         id = x86_match_cpu(intel_pstate_hwp_boost_ids);
2146                         if (id && intel_pstate_acpi_pm_profile_server())
2147                                 hwp_boost = true;
2148                 }
2149         } else if (hwp_active) {
2150                 /*
2151                  * Re-enable HWP in case this happens after a resume from ACPI
2152                  * S3 if the CPU was offline during the whole system/resume
2153                  * cycle.
2154                  */
2155                 intel_pstate_hwp_reenable(cpu);
2156         }
2157
2158         cpu->epp_powersave = -EINVAL;
2159         cpu->epp_policy = 0;
2160
2161         intel_pstate_get_cpu_pstates(cpu);
2162
2163         pr_debug("controlling: cpu %d\n", cpunum);
2164
2165         return 0;
2166 }
2167
2168 static void intel_pstate_set_update_util_hook(unsigned int cpu_num)
2169 {
2170         struct cpudata *cpu = all_cpu_data[cpu_num];
2171
2172         if (hwp_active && !hwp_boost)
2173                 return;
2174
2175         if (cpu->update_util_set)
2176                 return;
2177
2178         /* Prevent intel_pstate_update_util() from using stale data. */
2179         cpu->sample.time = 0;
2180         cpufreq_add_update_util_hook(cpu_num, &cpu->update_util,
2181                                      (hwp_active ?
2182                                       intel_pstate_update_util_hwp :
2183                                       intel_pstate_update_util));
2184         cpu->update_util_set = true;
2185 }
2186
2187 static void intel_pstate_clear_update_util_hook(unsigned int cpu)
2188 {
2189         struct cpudata *cpu_data = all_cpu_data[cpu];
2190
2191         if (!cpu_data->update_util_set)
2192                 return;
2193
2194         cpufreq_remove_update_util_hook(cpu);
2195         cpu_data->update_util_set = false;
2196         synchronize_rcu();
2197 }
2198
2199 static int intel_pstate_get_max_freq(struct cpudata *cpu)
2200 {
2201         return global.turbo_disabled || global.no_turbo ?
2202                         cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2203 }
2204
2205 static void intel_pstate_update_perf_limits(struct cpudata *cpu,
2206                                             unsigned int policy_min,
2207                                             unsigned int policy_max)
2208 {
2209         int scaling = cpu->pstate.scaling;
2210         int32_t max_policy_perf, min_policy_perf;
2211
2212         /*
2213          * HWP needs some special consideration, because HWP_REQUEST uses
2214          * abstract values to represent performance rather than pure ratios.
2215          */
2216         if (hwp_active)
2217                 intel_pstate_get_hwp_cap(cpu);
2218
2219         max_policy_perf = policy_max / scaling;
2220         if (policy_max == policy_min) {
2221                 min_policy_perf = max_policy_perf;
2222         } else {
2223                 min_policy_perf = policy_min / scaling;
2224                 min_policy_perf = clamp_t(int32_t, min_policy_perf,
2225                                           0, max_policy_perf);
2226         }
2227
2228         pr_debug("cpu:%d min_policy_perf:%d max_policy_perf:%d\n",
2229                  cpu->cpu, min_policy_perf, max_policy_perf);
2230
2231         /* Normalize user input to [min_perf, max_perf] */
2232         if (per_cpu_limits) {
2233                 cpu->min_perf_ratio = min_policy_perf;
2234                 cpu->max_perf_ratio = max_policy_perf;
2235         } else {
2236                 int turbo_max = cpu->pstate.turbo_pstate;
2237                 int32_t global_min, global_max;
2238
2239                 /* Global limits are in percent of the maximum turbo P-state. */
2240                 global_max = DIV_ROUND_UP(turbo_max * global.max_perf_pct, 100);
2241                 global_min = DIV_ROUND_UP(turbo_max * global.min_perf_pct, 100);
2242                 global_min = clamp_t(int32_t, global_min, 0, global_max);
2243
2244                 pr_debug("cpu:%d global_min:%d global_max:%d\n", cpu->cpu,
2245                          global_min, global_max);
2246
2247                 cpu->min_perf_ratio = max(min_policy_perf, global_min);
2248                 cpu->min_perf_ratio = min(cpu->min_perf_ratio, max_policy_perf);
2249                 cpu->max_perf_ratio = min(max_policy_perf, global_max);
2250                 cpu->max_perf_ratio = max(min_policy_perf, cpu->max_perf_ratio);
2251
2252                 /* Make sure min_perf <= max_perf */
2253                 cpu->min_perf_ratio = min(cpu->min_perf_ratio,
2254                                           cpu->max_perf_ratio);
2255
2256         }
2257         pr_debug("cpu:%d max_perf_ratio:%d min_perf_ratio:%d\n", cpu->cpu,
2258                  cpu->max_perf_ratio,
2259                  cpu->min_perf_ratio);
2260 }
2261
2262 static int intel_pstate_set_policy(struct cpufreq_policy *policy)
2263 {
2264         struct cpudata *cpu;
2265
2266         if (!policy->cpuinfo.max_freq)
2267                 return -ENODEV;
2268
2269         pr_debug("set_policy cpuinfo.max %u policy->max %u\n",
2270                  policy->cpuinfo.max_freq, policy->max);
2271
2272         cpu = all_cpu_data[policy->cpu];
2273         cpu->policy = policy->policy;
2274
2275         mutex_lock(&intel_pstate_limits_lock);
2276
2277         intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2278
2279         if (cpu->policy == CPUFREQ_POLICY_PERFORMANCE) {
2280                 /*
2281                  * NOHZ_FULL CPUs need this as the governor callback may not
2282                  * be invoked on them.
2283                  */
2284                 intel_pstate_clear_update_util_hook(policy->cpu);
2285                 intel_pstate_max_within_limits(cpu);
2286         } else {
2287                 intel_pstate_set_update_util_hook(policy->cpu);
2288         }
2289
2290         if (hwp_active) {
2291                 /*
2292                  * When hwp_boost was active before and dynamically it
2293                  * was turned off, in that case we need to clear the
2294                  * update util hook.
2295                  */
2296                 if (!hwp_boost)
2297                         intel_pstate_clear_update_util_hook(policy->cpu);
2298                 intel_pstate_hwp_set(policy->cpu);
2299         }
2300
2301         mutex_unlock(&intel_pstate_limits_lock);
2302
2303         return 0;
2304 }
2305
2306 static void intel_pstate_adjust_policy_max(struct cpudata *cpu,
2307                                            struct cpufreq_policy_data *policy)
2308 {
2309         if (!hwp_active &&
2310             cpu->pstate.max_pstate_physical > cpu->pstate.max_pstate &&
2311             policy->max < policy->cpuinfo.max_freq &&
2312             policy->max > cpu->pstate.max_freq) {
2313                 pr_debug("policy->max > max non turbo frequency\n");
2314                 policy->max = policy->cpuinfo.max_freq;
2315         }
2316 }
2317
2318 static void intel_pstate_verify_cpu_policy(struct cpudata *cpu,
2319                                            struct cpufreq_policy_data *policy)
2320 {
2321         int max_freq;
2322
2323         update_turbo_state();
2324         if (hwp_active) {
2325                 intel_pstate_get_hwp_cap(cpu);
2326                 max_freq = global.no_turbo || global.turbo_disabled ?
2327                                 cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2328         } else {
2329                 max_freq = intel_pstate_get_max_freq(cpu);
2330         }
2331         cpufreq_verify_within_limits(policy, policy->cpuinfo.min_freq, max_freq);
2332
2333         intel_pstate_adjust_policy_max(cpu, policy);
2334 }
2335
2336 static int intel_pstate_verify_policy(struct cpufreq_policy_data *policy)
2337 {
2338         intel_pstate_verify_cpu_policy(all_cpu_data[policy->cpu], policy);
2339
2340         return 0;
2341 }
2342
2343 static int intel_pstate_cpu_offline(struct cpufreq_policy *policy)
2344 {
2345         struct cpudata *cpu = all_cpu_data[policy->cpu];
2346
2347         pr_debug("CPU %d going offline\n", cpu->cpu);
2348
2349         if (cpu->suspended)
2350                 return 0;
2351
2352         /*
2353          * If the CPU is an SMT thread and it goes offline with the performance
2354          * settings different from the minimum, it will prevent its sibling
2355          * from getting to lower performance levels, so force the minimum
2356          * performance on CPU offline to prevent that from happening.
2357          */
2358         if (hwp_active)
2359                 intel_pstate_hwp_offline(cpu);
2360         else
2361                 intel_pstate_set_min_pstate(cpu);
2362
2363         intel_pstate_exit_perf_limits(policy);
2364
2365         return 0;
2366 }
2367
2368 static int intel_pstate_cpu_online(struct cpufreq_policy *policy)
2369 {
2370         struct cpudata *cpu = all_cpu_data[policy->cpu];
2371
2372         pr_debug("CPU %d going online\n", cpu->cpu);
2373
2374         intel_pstate_init_acpi_perf_limits(policy);
2375
2376         if (hwp_active) {
2377                 /*
2378                  * Re-enable HWP and clear the "suspended" flag to let "resume"
2379                  * know that it need not do that.
2380                  */
2381                 intel_pstate_hwp_reenable(cpu);
2382                 cpu->suspended = false;
2383         }
2384
2385         return 0;
2386 }
2387
2388 static void intel_pstate_stop_cpu(struct cpufreq_policy *policy)
2389 {
2390         pr_debug("CPU %d stopping\n", policy->cpu);
2391
2392         intel_pstate_clear_update_util_hook(policy->cpu);
2393 }
2394
2395 static int intel_pstate_cpu_exit(struct cpufreq_policy *policy)
2396 {
2397         pr_debug("CPU %d exiting\n", policy->cpu);
2398
2399         policy->fast_switch_possible = false;
2400
2401         return 0;
2402 }
2403
2404 static int __intel_pstate_cpu_init(struct cpufreq_policy *policy)
2405 {
2406         struct cpudata *cpu;
2407         int rc;
2408
2409         rc = intel_pstate_init_cpu(policy->cpu);
2410         if (rc)
2411                 return rc;
2412
2413         cpu = all_cpu_data[policy->cpu];
2414
2415         cpu->max_perf_ratio = 0xFF;
2416         cpu->min_perf_ratio = 0;
2417
2418         /* cpuinfo and default policy values */
2419         policy->cpuinfo.min_freq = cpu->pstate.min_pstate * cpu->pstate.scaling;
2420         update_turbo_state();
2421         global.turbo_disabled_mf = global.turbo_disabled;
2422         policy->cpuinfo.max_freq = global.turbo_disabled ?
2423                         cpu->pstate.max_freq : cpu->pstate.turbo_freq;
2424
2425         policy->min = policy->cpuinfo.min_freq;
2426         policy->max = policy->cpuinfo.max_freq;
2427
2428         intel_pstate_init_acpi_perf_limits(policy);
2429
2430         policy->fast_switch_possible = true;
2431
2432         return 0;
2433 }
2434
2435 static int intel_pstate_cpu_init(struct cpufreq_policy *policy)
2436 {
2437         int ret = __intel_pstate_cpu_init(policy);
2438
2439         if (ret)
2440                 return ret;
2441
2442         /*
2443          * Set the policy to powersave to provide a valid fallback value in case
2444          * the default cpufreq governor is neither powersave nor performance.
2445          */
2446         policy->policy = CPUFREQ_POLICY_POWERSAVE;
2447
2448         if (hwp_active) {
2449                 struct cpudata *cpu = all_cpu_data[policy->cpu];
2450
2451                 cpu->epp_cached = intel_pstate_get_epp(cpu, 0);
2452         }
2453
2454         return 0;
2455 }
2456
2457 static struct cpufreq_driver intel_pstate = {
2458         .flags          = CPUFREQ_CONST_LOOPS,
2459         .verify         = intel_pstate_verify_policy,
2460         .setpolicy      = intel_pstate_set_policy,
2461         .suspend        = intel_pstate_suspend,
2462         .resume         = intel_pstate_resume,
2463         .init           = intel_pstate_cpu_init,
2464         .exit           = intel_pstate_cpu_exit,
2465         .stop_cpu       = intel_pstate_stop_cpu,
2466         .offline        = intel_pstate_cpu_offline,
2467         .online         = intel_pstate_cpu_online,
2468         .update_limits  = intel_pstate_update_limits,
2469         .name           = "intel_pstate",
2470 };
2471
2472 static int intel_cpufreq_verify_policy(struct cpufreq_policy_data *policy)
2473 {
2474         struct cpudata *cpu = all_cpu_data[policy->cpu];
2475
2476         intel_pstate_verify_cpu_policy(cpu, policy);
2477         intel_pstate_update_perf_limits(cpu, policy->min, policy->max);
2478
2479         return 0;
2480 }
2481
2482 /* Use of trace in passive mode:
2483  *
2484  * In passive mode the trace core_busy field (also known as the
2485  * performance field, and lablelled as such on the graphs; also known as
2486  * core_avg_perf) is not needed and so is re-assigned to indicate if the
2487  * driver call was via the normal or fast switch path. Various graphs
2488  * output from the intel_pstate_tracer.py utility that include core_busy
2489  * (or performance or core_avg_perf) have a fixed y-axis from 0 to 100%,
2490  * so we use 10 to indicate the normal path through the driver, and
2491  * 90 to indicate the fast switch path through the driver.
2492  * The scaled_busy field is not used, and is set to 0.
2493  */
2494
2495 #define INTEL_PSTATE_TRACE_TARGET 10
2496 #define INTEL_PSTATE_TRACE_FAST_SWITCH 90
2497
2498 static void intel_cpufreq_trace(struct cpudata *cpu, unsigned int trace_type, int old_pstate)
2499 {
2500         struct sample *sample;
2501
2502         if (!trace_pstate_sample_enabled())
2503                 return;
2504
2505         if (!intel_pstate_sample(cpu, ktime_get()))
2506                 return;
2507
2508         sample = &cpu->sample;
2509         trace_pstate_sample(trace_type,
2510                 0,
2511                 old_pstate,
2512                 cpu->pstate.current_pstate,
2513                 sample->mperf,
2514                 sample->aperf,
2515                 sample->tsc,
2516                 get_avg_frequency(cpu),
2517                 fp_toint(cpu->iowait_boost * 100));
2518 }
2519
2520 static void intel_cpufreq_hwp_update(struct cpudata *cpu, u32 min, u32 max,
2521                                      u32 desired, bool fast_switch)
2522 {
2523         u64 prev = READ_ONCE(cpu->hwp_req_cached), value = prev;
2524
2525         value &= ~HWP_MIN_PERF(~0L);
2526         value |= HWP_MIN_PERF(min);
2527
2528         value &= ~HWP_MAX_PERF(~0L);
2529         value |= HWP_MAX_PERF(max);
2530
2531         value &= ~HWP_DESIRED_PERF(~0L);
2532         value |= HWP_DESIRED_PERF(desired);
2533
2534         if (value == prev)
2535                 return;
2536
2537         WRITE_ONCE(cpu->hwp_req_cached, value);
2538         if (fast_switch)
2539                 wrmsrl(MSR_HWP_REQUEST, value);
2540         else
2541                 wrmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, value);
2542 }
2543
2544 static void intel_cpufreq_perf_ctl_update(struct cpudata *cpu,
2545                                           u32 target_pstate, bool fast_switch)
2546 {
2547         if (fast_switch)
2548                 wrmsrl(MSR_IA32_PERF_CTL,
2549                        pstate_funcs.get_val(cpu, target_pstate));
2550         else
2551                 wrmsrl_on_cpu(cpu->cpu, MSR_IA32_PERF_CTL,
2552                               pstate_funcs.get_val(cpu, target_pstate));
2553 }
2554
2555 static int intel_cpufreq_update_pstate(struct cpufreq_policy *policy,
2556                                        int target_pstate, bool fast_switch)
2557 {
2558         struct cpudata *cpu = all_cpu_data[policy->cpu];
2559         int old_pstate = cpu->pstate.current_pstate;
2560
2561         target_pstate = intel_pstate_prepare_request(cpu, target_pstate);
2562         if (hwp_active) {
2563                 int max_pstate = policy->strict_target ?
2564                                         target_pstate : cpu->max_perf_ratio;
2565
2566                 intel_cpufreq_hwp_update(cpu, target_pstate, max_pstate, 0,
2567                                          fast_switch);
2568         } else if (target_pstate != old_pstate) {
2569                 intel_cpufreq_perf_ctl_update(cpu, target_pstate, fast_switch);
2570         }
2571
2572         cpu->pstate.current_pstate = target_pstate;
2573
2574         intel_cpufreq_trace(cpu, fast_switch ? INTEL_PSTATE_TRACE_FAST_SWITCH :
2575                             INTEL_PSTATE_TRACE_TARGET, old_pstate);
2576
2577         return target_pstate;
2578 }
2579
2580 static int intel_cpufreq_target(struct cpufreq_policy *policy,
2581                                 unsigned int target_freq,
2582                                 unsigned int relation)
2583 {
2584         struct cpudata *cpu = all_cpu_data[policy->cpu];
2585         struct cpufreq_freqs freqs;
2586         int target_pstate;
2587
2588         update_turbo_state();
2589
2590         freqs.old = policy->cur;
2591         freqs.new = target_freq;
2592
2593         cpufreq_freq_transition_begin(policy, &freqs);
2594
2595         switch (relation) {
2596         case CPUFREQ_RELATION_L:
2597                 target_pstate = DIV_ROUND_UP(freqs.new, cpu->pstate.scaling);
2598                 break;
2599         case CPUFREQ_RELATION_H:
2600                 target_pstate = freqs.new / cpu->pstate.scaling;
2601                 break;
2602         default:
2603                 target_pstate = DIV_ROUND_CLOSEST(freqs.new, cpu->pstate.scaling);
2604                 break;
2605         }
2606
2607         target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, false);
2608
2609         freqs.new = target_pstate * cpu->pstate.scaling;
2610
2611         cpufreq_freq_transition_end(policy, &freqs, false);
2612
2613         return 0;
2614 }
2615
2616 static unsigned int intel_cpufreq_fast_switch(struct cpufreq_policy *policy,
2617                                               unsigned int target_freq)
2618 {
2619         struct cpudata *cpu = all_cpu_data[policy->cpu];
2620         int target_pstate;
2621
2622         update_turbo_state();
2623
2624         target_pstate = DIV_ROUND_UP(target_freq, cpu->pstate.scaling);
2625
2626         target_pstate = intel_cpufreq_update_pstate(policy, target_pstate, true);
2627
2628         return target_pstate * cpu->pstate.scaling;
2629 }
2630
2631 static void intel_cpufreq_adjust_perf(unsigned int cpunum,
2632                                       unsigned long min_perf,
2633                                       unsigned long target_perf,
2634                                       unsigned long capacity)
2635 {
2636         struct cpudata *cpu = all_cpu_data[cpunum];
2637         u64 hwp_cap = READ_ONCE(cpu->hwp_cap_cached);
2638         int old_pstate = cpu->pstate.current_pstate;
2639         int cap_pstate, min_pstate, max_pstate, target_pstate;
2640
2641         update_turbo_state();
2642         cap_pstate = global.turbo_disabled ? HWP_GUARANTEED_PERF(hwp_cap) :
2643                                              HWP_HIGHEST_PERF(hwp_cap);
2644
2645         /* Optimization: Avoid unnecessary divisions. */
2646
2647         target_pstate = cap_pstate;
2648         if (target_perf < capacity)
2649                 target_pstate = DIV_ROUND_UP(cap_pstate * target_perf, capacity);
2650
2651         min_pstate = cap_pstate;
2652         if (min_perf < capacity)
2653                 min_pstate = DIV_ROUND_UP(cap_pstate * min_perf, capacity);
2654
2655         if (min_pstate < cpu->pstate.min_pstate)
2656                 min_pstate = cpu->pstate.min_pstate;
2657
2658         if (min_pstate < cpu->min_perf_ratio)
2659                 min_pstate = cpu->min_perf_ratio;
2660
2661         max_pstate = min(cap_pstate, cpu->max_perf_ratio);
2662         if (max_pstate < min_pstate)
2663                 max_pstate = min_pstate;
2664
2665         target_pstate = clamp_t(int, target_pstate, min_pstate, max_pstate);
2666
2667         intel_cpufreq_hwp_update(cpu, min_pstate, max_pstate, target_pstate, true);
2668
2669         cpu->pstate.current_pstate = target_pstate;
2670         intel_cpufreq_trace(cpu, INTEL_PSTATE_TRACE_FAST_SWITCH, old_pstate);
2671 }
2672
2673 static int intel_cpufreq_cpu_init(struct cpufreq_policy *policy)
2674 {
2675         struct freq_qos_request *req;
2676         struct cpudata *cpu;
2677         struct device *dev;
2678         int ret, freq;
2679
2680         dev = get_cpu_device(policy->cpu);
2681         if (!dev)
2682                 return -ENODEV;
2683
2684         ret = __intel_pstate_cpu_init(policy);
2685         if (ret)
2686                 return ret;
2687
2688         policy->cpuinfo.transition_latency = INTEL_CPUFREQ_TRANSITION_LATENCY;
2689         /* This reflects the intel_pstate_get_cpu_pstates() setting. */
2690         policy->cur = policy->cpuinfo.min_freq;
2691
2692         req = kcalloc(2, sizeof(*req), GFP_KERNEL);
2693         if (!req) {
2694                 ret = -ENOMEM;
2695                 goto pstate_exit;
2696         }
2697
2698         cpu = all_cpu_data[policy->cpu];
2699
2700         if (hwp_active) {
2701                 u64 value;
2702
2703                 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY_HWP;
2704
2705                 intel_pstate_get_hwp_cap(cpu);
2706
2707                 rdmsrl_on_cpu(cpu->cpu, MSR_HWP_REQUEST, &value);
2708                 WRITE_ONCE(cpu->hwp_req_cached, value);
2709
2710                 cpu->epp_cached = intel_pstate_get_epp(cpu, value);
2711         } else {
2712                 policy->transition_delay_us = INTEL_CPUFREQ_TRANSITION_DELAY;
2713         }
2714
2715         freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.min_perf_pct, 100);
2716
2717         ret = freq_qos_add_request(&policy->constraints, req, FREQ_QOS_MIN,
2718                                    freq);
2719         if (ret < 0) {
2720                 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
2721                 goto free_req;
2722         }
2723
2724         freq = DIV_ROUND_UP(cpu->pstate.turbo_freq * global.max_perf_pct, 100);
2725
2726         ret = freq_qos_add_request(&policy->constraints, req + 1, FREQ_QOS_MAX,
2727                                    freq);
2728         if (ret < 0) {
2729                 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
2730                 goto remove_min_req;
2731         }
2732
2733         policy->driver_data = req;
2734
2735         return 0;
2736
2737 remove_min_req:
2738         freq_qos_remove_request(req);
2739 free_req:
2740         kfree(req);
2741 pstate_exit:
2742         intel_pstate_exit_perf_limits(policy);
2743
2744         return ret;
2745 }
2746
2747 static int intel_cpufreq_cpu_exit(struct cpufreq_policy *policy)
2748 {
2749         struct freq_qos_request *req;
2750
2751         req = policy->driver_data;
2752
2753         freq_qos_remove_request(req + 1);
2754         freq_qos_remove_request(req);
2755         kfree(req);
2756
2757         return intel_pstate_cpu_exit(policy);
2758 }
2759
2760 static struct cpufreq_driver intel_cpufreq = {
2761         .flags          = CPUFREQ_CONST_LOOPS,
2762         .verify         = intel_cpufreq_verify_policy,
2763         .target         = intel_cpufreq_target,
2764         .fast_switch    = intel_cpufreq_fast_switch,
2765         .init           = intel_cpufreq_cpu_init,
2766         .exit           = intel_cpufreq_cpu_exit,
2767         .offline        = intel_pstate_cpu_offline,
2768         .online         = intel_pstate_cpu_online,
2769         .suspend        = intel_pstate_suspend,
2770         .resume         = intel_pstate_resume,
2771         .update_limits  = intel_pstate_update_limits,
2772         .name           = "intel_cpufreq",
2773 };
2774
2775 static struct cpufreq_driver *default_driver;
2776
2777 static void intel_pstate_driver_cleanup(void)
2778 {
2779         unsigned int cpu;
2780
2781         get_online_cpus();
2782         for_each_online_cpu(cpu) {
2783                 if (all_cpu_data[cpu]) {
2784                         if (intel_pstate_driver == &intel_pstate)
2785                                 intel_pstate_clear_update_util_hook(cpu);
2786
2787                         kfree(all_cpu_data[cpu]);
2788                         all_cpu_data[cpu] = NULL;
2789                 }
2790         }
2791         put_online_cpus();
2792
2793         intel_pstate_driver = NULL;
2794 }
2795
2796 static int intel_pstate_register_driver(struct cpufreq_driver *driver)
2797 {
2798         int ret;
2799
2800         if (driver == &intel_pstate)
2801                 intel_pstate_sysfs_expose_hwp_dynamic_boost();
2802
2803         memset(&global, 0, sizeof(global));
2804         global.max_perf_pct = 100;
2805
2806         intel_pstate_driver = driver;
2807         ret = cpufreq_register_driver(intel_pstate_driver);
2808         if (ret) {
2809                 intel_pstate_driver_cleanup();
2810                 return ret;
2811         }
2812
2813         global.min_perf_pct = min_perf_pct_min();
2814
2815         return 0;
2816 }
2817
2818 static ssize_t intel_pstate_show_status(char *buf)
2819 {
2820         if (!intel_pstate_driver)
2821                 return sprintf(buf, "off\n");
2822
2823         return sprintf(buf, "%s\n", intel_pstate_driver == &intel_pstate ?
2824                                         "active" : "passive");
2825 }
2826
2827 static int intel_pstate_update_status(const char *buf, size_t size)
2828 {
2829         if (size == 3 && !strncmp(buf, "off", size)) {
2830                 if (!intel_pstate_driver)
2831                         return -EINVAL;
2832
2833                 if (hwp_active)
2834                         return -EBUSY;
2835
2836                 cpufreq_unregister_driver(intel_pstate_driver);
2837                 intel_pstate_driver_cleanup();
2838                 return 0;
2839         }
2840
2841         if (size == 6 && !strncmp(buf, "active", size)) {
2842                 if (intel_pstate_driver) {
2843                         if (intel_pstate_driver == &intel_pstate)
2844                                 return 0;
2845
2846                         cpufreq_unregister_driver(intel_pstate_driver);
2847                 }
2848
2849                 return intel_pstate_register_driver(&intel_pstate);
2850         }
2851
2852         if (size == 7 && !strncmp(buf, "passive", size)) {
2853                 if (intel_pstate_driver) {
2854                         if (intel_pstate_driver == &intel_cpufreq)
2855                                 return 0;
2856
2857                         cpufreq_unregister_driver(intel_pstate_driver);
2858                         intel_pstate_sysfs_hide_hwp_dynamic_boost();
2859                 }
2860
2861                 return intel_pstate_register_driver(&intel_cpufreq);
2862         }
2863
2864         return -EINVAL;
2865 }
2866
2867 static int no_load __initdata;
2868 static int no_hwp __initdata;
2869 static int hwp_only __initdata;
2870 static unsigned int force_load __initdata;
2871
2872 static int __init intel_pstate_msrs_not_valid(void)
2873 {
2874         if (!pstate_funcs.get_max() ||
2875             !pstate_funcs.get_min() ||
2876             !pstate_funcs.get_turbo())
2877                 return -ENODEV;
2878
2879         return 0;
2880 }
2881
2882 static void __init copy_cpu_funcs(struct pstate_funcs *funcs)
2883 {
2884         pstate_funcs.get_max   = funcs->get_max;
2885         pstate_funcs.get_max_physical = funcs->get_max_physical;
2886         pstate_funcs.get_min   = funcs->get_min;
2887         pstate_funcs.get_turbo = funcs->get_turbo;
2888         pstate_funcs.get_scaling = funcs->get_scaling;
2889         pstate_funcs.get_val   = funcs->get_val;
2890         pstate_funcs.get_vid   = funcs->get_vid;
2891         pstate_funcs.get_aperf_mperf_shift = funcs->get_aperf_mperf_shift;
2892 }
2893
2894 #ifdef CONFIG_ACPI
2895
2896 static bool __init intel_pstate_no_acpi_pss(void)
2897 {
2898         int i;
2899
2900         for_each_possible_cpu(i) {
2901                 acpi_status status;
2902                 union acpi_object *pss;
2903                 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
2904                 struct acpi_processor *pr = per_cpu(processors, i);
2905
2906                 if (!pr)
2907                         continue;
2908
2909                 status = acpi_evaluate_object(pr->handle, "_PSS", NULL, &buffer);
2910                 if (ACPI_FAILURE(status))
2911                         continue;
2912
2913                 pss = buffer.pointer;
2914                 if (pss && pss->type == ACPI_TYPE_PACKAGE) {
2915                         kfree(pss);
2916                         return false;
2917                 }
2918
2919                 kfree(pss);
2920         }
2921
2922         pr_debug("ACPI _PSS not found\n");
2923         return true;
2924 }
2925
2926 static bool __init intel_pstate_no_acpi_pcch(void)
2927 {
2928         acpi_status status;
2929         acpi_handle handle;
2930
2931         status = acpi_get_handle(NULL, "\\_SB", &handle);
2932         if (ACPI_FAILURE(status))
2933                 goto not_found;
2934
2935         if (acpi_has_method(handle, "PCCH"))
2936                 return false;
2937
2938 not_found:
2939         pr_debug("ACPI PCCH not found\n");
2940         return true;
2941 }
2942
2943 static bool __init intel_pstate_has_acpi_ppc(void)
2944 {
2945         int i;
2946
2947         for_each_possible_cpu(i) {
2948                 struct acpi_processor *pr = per_cpu(processors, i);
2949
2950                 if (!pr)
2951                         continue;
2952                 if (acpi_has_method(pr->handle, "_PPC"))
2953                         return true;
2954         }
2955         pr_debug("ACPI _PPC not found\n");
2956         return false;
2957 }
2958
2959 enum {
2960         PSS,
2961         PPC,
2962 };
2963
2964 /* Hardware vendor-specific info that has its own power management modes */
2965 static struct acpi_platform_list plat_info[] __initdata = {
2966         {"HP    ", "ProLiant", 0, ACPI_SIG_FADT, all_versions, NULL, PSS},
2967         {"ORACLE", "X4-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2968         {"ORACLE", "X4-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2969         {"ORACLE", "X4-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2970         {"ORACLE", "X3-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2971         {"ORACLE", "X3-2L   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2972         {"ORACLE", "X3-2B   ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2973         {"ORACLE", "X4470M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2974         {"ORACLE", "X4270M3 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2975         {"ORACLE", "X4270M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2976         {"ORACLE", "X4170M2 ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2977         {"ORACLE", "X4170 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2978         {"ORACLE", "X4275 M3", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2979         {"ORACLE", "X6-2    ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2980         {"ORACLE", "Sudbury ", 0, ACPI_SIG_FADT, all_versions, NULL, PPC},
2981         { } /* End */
2982 };
2983
2984 #define BITMASK_OOB     (BIT(8) | BIT(18))
2985
2986 static bool __init intel_pstate_platform_pwr_mgmt_exists(void)
2987 {
2988         const struct x86_cpu_id *id;
2989         u64 misc_pwr;
2990         int idx;
2991
2992         id = x86_match_cpu(intel_pstate_cpu_oob_ids);
2993         if (id) {
2994                 rdmsrl(MSR_MISC_PWR_MGMT, misc_pwr);
2995                 if (misc_pwr & BITMASK_OOB) {
2996                         pr_debug("Bit 8 or 18 in the MISC_PWR_MGMT MSR set\n");
2997                         pr_debug("P states are controlled in Out of Band mode by the firmware/hardware\n");
2998                         return true;
2999                 }
3000         }
3001
3002         idx = acpi_match_platform_list(plat_info);
3003         if (idx < 0)
3004                 return false;
3005
3006         switch (plat_info[idx].data) {
3007         case PSS:
3008                 if (!intel_pstate_no_acpi_pss())
3009                         return false;
3010
3011                 return intel_pstate_no_acpi_pcch();
3012         case PPC:
3013                 return intel_pstate_has_acpi_ppc() && !force_load;
3014         }
3015
3016         return false;
3017 }
3018
3019 static void intel_pstate_request_control_from_smm(void)
3020 {
3021         /*
3022          * It may be unsafe to request P-states control from SMM if _PPC support
3023          * has not been enabled.
3024          */
3025         if (acpi_ppc)
3026                 acpi_processor_pstate_control();
3027 }
3028 #else /* CONFIG_ACPI not enabled */
3029 static inline bool intel_pstate_platform_pwr_mgmt_exists(void) { return false; }
3030 static inline bool intel_pstate_has_acpi_ppc(void) { return false; }
3031 static inline void intel_pstate_request_control_from_smm(void) {}
3032 #endif /* CONFIG_ACPI */
3033
3034 #define INTEL_PSTATE_HWP_BROADWELL      0x01
3035
3036 #define X86_MATCH_HWP(model, hwp_mode)                                  \
3037         X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
3038                                            X86_FEATURE_HWP, hwp_mode)
3039
3040 static const struct x86_cpu_id hwp_support_ids[] __initconst = {
3041         X86_MATCH_HWP(BROADWELL_X,      INTEL_PSTATE_HWP_BROADWELL),
3042         X86_MATCH_HWP(BROADWELL_D,      INTEL_PSTATE_HWP_BROADWELL),
3043         X86_MATCH_HWP(ANY,              0),
3044         {}
3045 };
3046
3047 static bool intel_pstate_hwp_is_enabled(void)
3048 {
3049         u64 value;
3050
3051         rdmsrl(MSR_PM_ENABLE, value);
3052         return !!(value & 0x1);
3053 }
3054
3055 static int __init intel_pstate_init(void)
3056 {
3057         const struct x86_cpu_id *id;
3058         int rc;
3059
3060         if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
3061                 return -ENODEV;
3062
3063         if (no_load)
3064                 return -ENODEV;
3065
3066         id = x86_match_cpu(hwp_support_ids);
3067         if (id) {
3068                 copy_cpu_funcs(&core_funcs);
3069                 /*
3070                  * Avoid enabling HWP for processors without EPP support,
3071                  * because that means incomplete HWP implementation which is a
3072                  * corner case and supporting it is generally problematic.
3073                  *
3074                  * If HWP is enabled already, though, there is no choice but to
3075                  * deal with it.
3076                  */
3077                 if ((!no_hwp && boot_cpu_has(X86_FEATURE_HWP_EPP)) ||
3078                     intel_pstate_hwp_is_enabled()) {
3079                         hwp_active++;
3080                         hwp_mode_bdw = id->driver_data;
3081                         intel_pstate.attr = hwp_cpufreq_attrs;
3082                         intel_cpufreq.attr = hwp_cpufreq_attrs;
3083                         intel_cpufreq.flags |= CPUFREQ_NEED_UPDATE_LIMITS;
3084                         intel_cpufreq.adjust_perf = intel_cpufreq_adjust_perf;
3085                         if (!default_driver)
3086                                 default_driver = &intel_pstate;
3087
3088                         goto hwp_cpu_matched;
3089                 }
3090         } else {
3091                 id = x86_match_cpu(intel_pstate_cpu_ids);
3092                 if (!id) {
3093                         pr_info("CPU model not supported\n");
3094                         return -ENODEV;
3095                 }
3096
3097                 copy_cpu_funcs((struct pstate_funcs *)id->driver_data);
3098         }
3099
3100         if (intel_pstate_msrs_not_valid()) {
3101                 pr_info("Invalid MSRs\n");
3102                 return -ENODEV;
3103         }
3104         /* Without HWP start in the passive mode. */
3105         if (!default_driver)
3106                 default_driver = &intel_cpufreq;
3107
3108 hwp_cpu_matched:
3109         /*
3110          * The Intel pstate driver will be ignored if the platform
3111          * firmware has its own power management modes.
3112          */
3113         if (intel_pstate_platform_pwr_mgmt_exists()) {
3114                 pr_info("P-states controlled by the platform\n");
3115                 return -ENODEV;
3116         }
3117
3118         if (!hwp_active && hwp_only)
3119                 return -ENOTSUPP;
3120
3121         pr_info("Intel P-state driver initializing\n");
3122
3123         all_cpu_data = vzalloc(array_size(sizeof(void *), num_possible_cpus()));
3124         if (!all_cpu_data)
3125                 return -ENOMEM;
3126
3127         intel_pstate_request_control_from_smm();
3128
3129         intel_pstate_sysfs_expose_params();
3130
3131         mutex_lock(&intel_pstate_driver_lock);
3132         rc = intel_pstate_register_driver(default_driver);
3133         mutex_unlock(&intel_pstate_driver_lock);
3134         if (rc) {
3135                 intel_pstate_sysfs_remove();
3136                 return rc;
3137         }
3138
3139         if (hwp_active) {
3140                 const struct x86_cpu_id *id;
3141
3142                 id = x86_match_cpu(intel_pstate_cpu_ee_disable_ids);
3143                 if (id) {
3144                         set_power_ctl_ee_state(false);
3145                         pr_info("Disabling energy efficiency optimization\n");
3146                 }
3147
3148                 pr_info("HWP enabled\n");
3149         }
3150
3151         return 0;
3152 }
3153 device_initcall(intel_pstate_init);
3154
3155 static int __init intel_pstate_setup(char *str)
3156 {
3157         if (!str)
3158                 return -EINVAL;
3159
3160         if (!strcmp(str, "disable"))
3161                 no_load = 1;
3162         else if (!strcmp(str, "active"))
3163                 default_driver = &intel_pstate;
3164         else if (!strcmp(str, "passive"))
3165                 default_driver = &intel_cpufreq;
3166
3167         if (!strcmp(str, "no_hwp")) {
3168                 pr_info("HWP disabled\n");
3169                 no_hwp = 1;
3170         }
3171         if (!strcmp(str, "force"))
3172                 force_load = 1;
3173         if (!strcmp(str, "hwp_only"))
3174                 hwp_only = 1;
3175         if (!strcmp(str, "per_cpu_perf_limits"))
3176                 per_cpu_limits = true;
3177
3178 #ifdef CONFIG_ACPI
3179         if (!strcmp(str, "support_acpi_ppc"))
3180                 acpi_ppc = true;
3181 #endif
3182
3183         return 0;
3184 }
3185 early_param("intel_pstate", intel_pstate_setup);
3186
3187 MODULE_AUTHOR("Dirk Brandewie <dirk.j.brandewie@intel.com>");
3188 MODULE_DESCRIPTION("'intel_pstate' - P state driver Intel Core processors");
3189 MODULE_LICENSE("GPL");