Merge tag 'cxl-fixes-for-5.19-rc6' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / cpufreq / amd-pstate.c
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  * amd-pstate.c - AMD Processor P-state Frequency Driver
4  *
5  * Copyright (C) 2021 Advanced Micro Devices, Inc. All Rights Reserved.
6  *
7  * Author: Huang Rui <ray.huang@amd.com>
8  *
9  * AMD P-State introduces a new CPU performance scaling design for AMD
10  * processors using the ACPI Collaborative Performance and Power Control (CPPC)
11  * feature which works with the AMD SMU firmware providing a finer grained
12  * frequency control range. It is to replace the legacy ACPI P-States control,
13  * allows a flexible, low-latency interface for the Linux kernel to directly
14  * communicate the performance hints to hardware.
15  *
16  * AMD P-State is supported on recent AMD Zen base CPU series include some of
17  * Zen2 and Zen3 processors. _CPC needs to be present in the ACPI tables of AMD
18  * P-State supported system. And there are two types of hardware implementations
19  * for AMD P-State: 1) Full MSR Solution and 2) Shared Memory Solution.
20  * X86_FEATURE_CPPC CPU feature flag is used to distinguish the different types.
21  */
22
23 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
24
25 #include <linux/kernel.h>
26 #include <linux/module.h>
27 #include <linux/init.h>
28 #include <linux/smp.h>
29 #include <linux/sched.h>
30 #include <linux/cpufreq.h>
31 #include <linux/compiler.h>
32 #include <linux/dmi.h>
33 #include <linux/slab.h>
34 #include <linux/acpi.h>
35 #include <linux/io.h>
36 #include <linux/delay.h>
37 #include <linux/uaccess.h>
38 #include <linux/static_call.h>
39
40 #include <acpi/processor.h>
41 #include <acpi/cppc_acpi.h>
42
43 #include <asm/msr.h>
44 #include <asm/processor.h>
45 #include <asm/cpufeature.h>
46 #include <asm/cpu_device_id.h>
47 #include "amd-pstate-trace.h"
48
49 #define AMD_PSTATE_TRANSITION_LATENCY   0x20000
50 #define AMD_PSTATE_TRANSITION_DELAY     500
51
52 /*
53  * TODO: We need more time to fine tune processors with shared memory solution
54  * with community together.
55  *
56  * There are some performance drops on the CPU benchmarks which reports from
57  * Suse. We are co-working with them to fine tune the shared memory solution. So
58  * we disable it by default to go acpi-cpufreq on these processors and add a
59  * module parameter to be able to enable it manually for debugging.
60  */
61 static bool shared_mem = false;
62 module_param(shared_mem, bool, 0444);
63 MODULE_PARM_DESC(shared_mem,
64                  "enable amd-pstate on processors with shared memory solution (false = disabled (default), true = enabled)");
65
66 static struct cpufreq_driver amd_pstate_driver;
67
68 /**
69  * struct  amd_aperf_mperf
70  * @aperf: actual performance frequency clock count
71  * @mperf: maximum performance frequency clock count
72  * @tsc:   time stamp counter
73  */
74 struct amd_aperf_mperf {
75         u64 aperf;
76         u64 mperf;
77         u64 tsc;
78 };
79
80 /**
81  * struct amd_cpudata - private CPU data for AMD P-State
82  * @cpu: CPU number
83  * @req: constraint request to apply
84  * @cppc_req_cached: cached performance request hints
85  * @highest_perf: the maximum performance an individual processor may reach,
86  *                assuming ideal conditions
87  * @nominal_perf: the maximum sustained performance level of the processor,
88  *                assuming ideal operating conditions
89  * @lowest_nonlinear_perf: the lowest performance level at which nonlinear power
90  *                         savings are achieved
91  * @lowest_perf: the absolute lowest performance level of the processor
92  * @max_freq: the frequency that mapped to highest_perf
93  * @min_freq: the frequency that mapped to lowest_perf
94  * @nominal_freq: the frequency that mapped to nominal_perf
95  * @lowest_nonlinear_freq: the frequency that mapped to lowest_nonlinear_perf
96  * @cur: Difference of Aperf/Mperf/tsc count between last and current sample
97  * @prev: Last Aperf/Mperf/tsc count value read from register
98  * @freq: current cpu frequency value
99  * @boost_supported: check whether the Processor or SBIOS supports boost mode
100  *
101  * The amd_cpudata is key private data for each CPU thread in AMD P-State, and
102  * represents all the attributes and goals that AMD P-State requests at runtime.
103  */
104 struct amd_cpudata {
105         int     cpu;
106
107         struct  freq_qos_request req[2];
108         u64     cppc_req_cached;
109
110         u32     highest_perf;
111         u32     nominal_perf;
112         u32     lowest_nonlinear_perf;
113         u32     lowest_perf;
114
115         u32     max_freq;
116         u32     min_freq;
117         u32     nominal_freq;
118         u32     lowest_nonlinear_freq;
119
120         struct amd_aperf_mperf cur;
121         struct amd_aperf_mperf prev;
122
123         u64 freq;
124         bool    boost_supported;
125 };
126
127 static inline int pstate_enable(bool enable)
128 {
129         return wrmsrl_safe(MSR_AMD_CPPC_ENABLE, enable);
130 }
131
132 static int cppc_enable(bool enable)
133 {
134         int cpu, ret = 0;
135
136         for_each_present_cpu(cpu) {
137                 ret = cppc_set_enable(cpu, enable);
138                 if (ret)
139                         return ret;
140         }
141
142         return ret;
143 }
144
145 DEFINE_STATIC_CALL(amd_pstate_enable, pstate_enable);
146
147 static inline int amd_pstate_enable(bool enable)
148 {
149         return static_call(amd_pstate_enable)(enable);
150 }
151
152 static int pstate_init_perf(struct amd_cpudata *cpudata)
153 {
154         u64 cap1;
155
156         int ret = rdmsrl_safe_on_cpu(cpudata->cpu, MSR_AMD_CPPC_CAP1,
157                                      &cap1);
158         if (ret)
159                 return ret;
160
161         /*
162          * TODO: Introduce AMD specific power feature.
163          *
164          * CPPC entry doesn't indicate the highest performance in some ASICs.
165          */
166         WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
167
168         WRITE_ONCE(cpudata->nominal_perf, AMD_CPPC_NOMINAL_PERF(cap1));
169         WRITE_ONCE(cpudata->lowest_nonlinear_perf, AMD_CPPC_LOWNONLIN_PERF(cap1));
170         WRITE_ONCE(cpudata->lowest_perf, AMD_CPPC_LOWEST_PERF(cap1));
171
172         return 0;
173 }
174
175 static int cppc_init_perf(struct amd_cpudata *cpudata)
176 {
177         struct cppc_perf_caps cppc_perf;
178
179         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
180         if (ret)
181                 return ret;
182
183         WRITE_ONCE(cpudata->highest_perf, amd_get_highest_perf());
184
185         WRITE_ONCE(cpudata->nominal_perf, cppc_perf.nominal_perf);
186         WRITE_ONCE(cpudata->lowest_nonlinear_perf,
187                    cppc_perf.lowest_nonlinear_perf);
188         WRITE_ONCE(cpudata->lowest_perf, cppc_perf.lowest_perf);
189
190         return 0;
191 }
192
193 DEFINE_STATIC_CALL(amd_pstate_init_perf, pstate_init_perf);
194
195 static inline int amd_pstate_init_perf(struct amd_cpudata *cpudata)
196 {
197         return static_call(amd_pstate_init_perf)(cpudata);
198 }
199
200 static void pstate_update_perf(struct amd_cpudata *cpudata, u32 min_perf,
201                                u32 des_perf, u32 max_perf, bool fast_switch)
202 {
203         if (fast_switch)
204                 wrmsrl(MSR_AMD_CPPC_REQ, READ_ONCE(cpudata->cppc_req_cached));
205         else
206                 wrmsrl_on_cpu(cpudata->cpu, MSR_AMD_CPPC_REQ,
207                               READ_ONCE(cpudata->cppc_req_cached));
208 }
209
210 static void cppc_update_perf(struct amd_cpudata *cpudata,
211                              u32 min_perf, u32 des_perf,
212                              u32 max_perf, bool fast_switch)
213 {
214         struct cppc_perf_ctrls perf_ctrls;
215
216         perf_ctrls.max_perf = max_perf;
217         perf_ctrls.min_perf = min_perf;
218         perf_ctrls.desired_perf = des_perf;
219
220         cppc_set_perf(cpudata->cpu, &perf_ctrls);
221 }
222
223 DEFINE_STATIC_CALL(amd_pstate_update_perf, pstate_update_perf);
224
225 static inline void amd_pstate_update_perf(struct amd_cpudata *cpudata,
226                                           u32 min_perf, u32 des_perf,
227                                           u32 max_perf, bool fast_switch)
228 {
229         static_call(amd_pstate_update_perf)(cpudata, min_perf, des_perf,
230                                             max_perf, fast_switch);
231 }
232
233 static inline bool amd_pstate_sample(struct amd_cpudata *cpudata)
234 {
235         u64 aperf, mperf, tsc;
236         unsigned long flags;
237
238         local_irq_save(flags);
239         rdmsrl(MSR_IA32_APERF, aperf);
240         rdmsrl(MSR_IA32_MPERF, mperf);
241         tsc = rdtsc();
242
243         if (cpudata->prev.mperf == mperf || cpudata->prev.tsc == tsc) {
244                 local_irq_restore(flags);
245                 return false;
246         }
247
248         local_irq_restore(flags);
249
250         cpudata->cur.aperf = aperf;
251         cpudata->cur.mperf = mperf;
252         cpudata->cur.tsc =  tsc;
253         cpudata->cur.aperf -= cpudata->prev.aperf;
254         cpudata->cur.mperf -= cpudata->prev.mperf;
255         cpudata->cur.tsc -= cpudata->prev.tsc;
256
257         cpudata->prev.aperf = aperf;
258         cpudata->prev.mperf = mperf;
259         cpudata->prev.tsc = tsc;
260
261         cpudata->freq = div64_u64((cpudata->cur.aperf * cpu_khz), cpudata->cur.mperf);
262
263         return true;
264 }
265
266 static void amd_pstate_update(struct amd_cpudata *cpudata, u32 min_perf,
267                               u32 des_perf, u32 max_perf, bool fast_switch)
268 {
269         u64 prev = READ_ONCE(cpudata->cppc_req_cached);
270         u64 value = prev;
271
272         value &= ~AMD_CPPC_MIN_PERF(~0L);
273         value |= AMD_CPPC_MIN_PERF(min_perf);
274
275         value &= ~AMD_CPPC_DES_PERF(~0L);
276         value |= AMD_CPPC_DES_PERF(des_perf);
277
278         value &= ~AMD_CPPC_MAX_PERF(~0L);
279         value |= AMD_CPPC_MAX_PERF(max_perf);
280
281         if (trace_amd_pstate_perf_enabled() && amd_pstate_sample(cpudata)) {
282                 trace_amd_pstate_perf(min_perf, des_perf, max_perf, cpudata->freq,
283                         cpudata->cur.mperf, cpudata->cur.aperf, cpudata->cur.tsc,
284                                 cpudata->cpu, (value != prev), fast_switch);
285         }
286
287         if (value == prev)
288                 return;
289
290         WRITE_ONCE(cpudata->cppc_req_cached, value);
291
292         amd_pstate_update_perf(cpudata, min_perf, des_perf,
293                                max_perf, fast_switch);
294 }
295
296 static int amd_pstate_verify(struct cpufreq_policy_data *policy)
297 {
298         cpufreq_verify_within_cpu_limits(policy);
299
300         return 0;
301 }
302
303 static int amd_pstate_target(struct cpufreq_policy *policy,
304                              unsigned int target_freq,
305                              unsigned int relation)
306 {
307         struct cpufreq_freqs freqs;
308         struct amd_cpudata *cpudata = policy->driver_data;
309         unsigned long max_perf, min_perf, des_perf, cap_perf;
310
311         if (!cpudata->max_freq)
312                 return -ENODEV;
313
314         cap_perf = READ_ONCE(cpudata->highest_perf);
315         min_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
316         max_perf = cap_perf;
317
318         freqs.old = policy->cur;
319         freqs.new = target_freq;
320
321         des_perf = DIV_ROUND_CLOSEST(target_freq * cap_perf,
322                                      cpudata->max_freq);
323
324         cpufreq_freq_transition_begin(policy, &freqs);
325         amd_pstate_update(cpudata, min_perf, des_perf,
326                           max_perf, false);
327         cpufreq_freq_transition_end(policy, &freqs, false);
328
329         return 0;
330 }
331
332 static void amd_pstate_adjust_perf(unsigned int cpu,
333                                    unsigned long _min_perf,
334                                    unsigned long target_perf,
335                                    unsigned long capacity)
336 {
337         unsigned long max_perf, min_perf, des_perf,
338                       cap_perf, lowest_nonlinear_perf;
339         struct cpufreq_policy *policy = cpufreq_cpu_get(cpu);
340         struct amd_cpudata *cpudata = policy->driver_data;
341
342         cap_perf = READ_ONCE(cpudata->highest_perf);
343         lowest_nonlinear_perf = READ_ONCE(cpudata->lowest_nonlinear_perf);
344
345         des_perf = cap_perf;
346         if (target_perf < capacity)
347                 des_perf = DIV_ROUND_UP(cap_perf * target_perf, capacity);
348
349         min_perf = READ_ONCE(cpudata->highest_perf);
350         if (_min_perf < capacity)
351                 min_perf = DIV_ROUND_UP(cap_perf * _min_perf, capacity);
352
353         if (min_perf < lowest_nonlinear_perf)
354                 min_perf = lowest_nonlinear_perf;
355
356         max_perf = cap_perf;
357         if (max_perf < min_perf)
358                 max_perf = min_perf;
359
360         des_perf = clamp_t(unsigned long, des_perf, min_perf, max_perf);
361
362         amd_pstate_update(cpudata, min_perf, des_perf, max_perf, true);
363 }
364
365 static int amd_get_min_freq(struct amd_cpudata *cpudata)
366 {
367         struct cppc_perf_caps cppc_perf;
368
369         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
370         if (ret)
371                 return ret;
372
373         /* Switch to khz */
374         return cppc_perf.lowest_freq * 1000;
375 }
376
377 static int amd_get_max_freq(struct amd_cpudata *cpudata)
378 {
379         struct cppc_perf_caps cppc_perf;
380         u32 max_perf, max_freq, nominal_freq, nominal_perf;
381         u64 boost_ratio;
382
383         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
384         if (ret)
385                 return ret;
386
387         nominal_freq = cppc_perf.nominal_freq;
388         nominal_perf = READ_ONCE(cpudata->nominal_perf);
389         max_perf = READ_ONCE(cpudata->highest_perf);
390
391         boost_ratio = div_u64(max_perf << SCHED_CAPACITY_SHIFT,
392                               nominal_perf);
393
394         max_freq = nominal_freq * boost_ratio >> SCHED_CAPACITY_SHIFT;
395
396         /* Switch to khz */
397         return max_freq * 1000;
398 }
399
400 static int amd_get_nominal_freq(struct amd_cpudata *cpudata)
401 {
402         struct cppc_perf_caps cppc_perf;
403
404         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
405         if (ret)
406                 return ret;
407
408         /* Switch to khz */
409         return cppc_perf.nominal_freq * 1000;
410 }
411
412 static int amd_get_lowest_nonlinear_freq(struct amd_cpudata *cpudata)
413 {
414         struct cppc_perf_caps cppc_perf;
415         u32 lowest_nonlinear_freq, lowest_nonlinear_perf,
416             nominal_freq, nominal_perf;
417         u64 lowest_nonlinear_ratio;
418
419         int ret = cppc_get_perf_caps(cpudata->cpu, &cppc_perf);
420         if (ret)
421                 return ret;
422
423         nominal_freq = cppc_perf.nominal_freq;
424         nominal_perf = READ_ONCE(cpudata->nominal_perf);
425
426         lowest_nonlinear_perf = cppc_perf.lowest_nonlinear_perf;
427
428         lowest_nonlinear_ratio = div_u64(lowest_nonlinear_perf << SCHED_CAPACITY_SHIFT,
429                                          nominal_perf);
430
431         lowest_nonlinear_freq = nominal_freq * lowest_nonlinear_ratio >> SCHED_CAPACITY_SHIFT;
432
433         /* Switch to khz */
434         return lowest_nonlinear_freq * 1000;
435 }
436
437 static int amd_pstate_set_boost(struct cpufreq_policy *policy, int state)
438 {
439         struct amd_cpudata *cpudata = policy->driver_data;
440         int ret;
441
442         if (!cpudata->boost_supported) {
443                 pr_err("Boost mode is not supported by this processor or SBIOS\n");
444                 return -EINVAL;
445         }
446
447         if (state)
448                 policy->cpuinfo.max_freq = cpudata->max_freq;
449         else
450                 policy->cpuinfo.max_freq = cpudata->nominal_freq;
451
452         policy->max = policy->cpuinfo.max_freq;
453
454         ret = freq_qos_update_request(&cpudata->req[1],
455                                       policy->cpuinfo.max_freq);
456         if (ret < 0)
457                 return ret;
458
459         return 0;
460 }
461
462 static void amd_pstate_boost_init(struct amd_cpudata *cpudata)
463 {
464         u32 highest_perf, nominal_perf;
465
466         highest_perf = READ_ONCE(cpudata->highest_perf);
467         nominal_perf = READ_ONCE(cpudata->nominal_perf);
468
469         if (highest_perf <= nominal_perf)
470                 return;
471
472         cpudata->boost_supported = true;
473         amd_pstate_driver.boost_enabled = true;
474 }
475
476 static int amd_pstate_cpu_init(struct cpufreq_policy *policy)
477 {
478         int min_freq, max_freq, nominal_freq, lowest_nonlinear_freq, ret;
479         struct device *dev;
480         struct amd_cpudata *cpudata;
481
482         dev = get_cpu_device(policy->cpu);
483         if (!dev)
484                 return -ENODEV;
485
486         cpudata = kzalloc(sizeof(*cpudata), GFP_KERNEL);
487         if (!cpudata)
488                 return -ENOMEM;
489
490         cpudata->cpu = policy->cpu;
491
492         ret = amd_pstate_init_perf(cpudata);
493         if (ret)
494                 goto free_cpudata1;
495
496         min_freq = amd_get_min_freq(cpudata);
497         max_freq = amd_get_max_freq(cpudata);
498         nominal_freq = amd_get_nominal_freq(cpudata);
499         lowest_nonlinear_freq = amd_get_lowest_nonlinear_freq(cpudata);
500
501         if (min_freq < 0 || max_freq < 0 || min_freq > max_freq) {
502                 dev_err(dev, "min_freq(%d) or max_freq(%d) value is incorrect\n",
503                         min_freq, max_freq);
504                 ret = -EINVAL;
505                 goto free_cpudata1;
506         }
507
508         policy->cpuinfo.transition_latency = AMD_PSTATE_TRANSITION_LATENCY;
509         policy->transition_delay_us = AMD_PSTATE_TRANSITION_DELAY;
510
511         policy->min = min_freq;
512         policy->max = max_freq;
513
514         policy->cpuinfo.min_freq = min_freq;
515         policy->cpuinfo.max_freq = max_freq;
516
517         /* It will be updated by governor */
518         policy->cur = policy->cpuinfo.min_freq;
519
520         if (boot_cpu_has(X86_FEATURE_CPPC))
521                 policy->fast_switch_possible = true;
522
523         ret = freq_qos_add_request(&policy->constraints, &cpudata->req[0],
524                                    FREQ_QOS_MIN, policy->cpuinfo.min_freq);
525         if (ret < 0) {
526                 dev_err(dev, "Failed to add min-freq constraint (%d)\n", ret);
527                 goto free_cpudata1;
528         }
529
530         ret = freq_qos_add_request(&policy->constraints, &cpudata->req[1],
531                                    FREQ_QOS_MAX, policy->cpuinfo.max_freq);
532         if (ret < 0) {
533                 dev_err(dev, "Failed to add max-freq constraint (%d)\n", ret);
534                 goto free_cpudata2;
535         }
536
537         /* Initial processor data capability frequencies */
538         cpudata->max_freq = max_freq;
539         cpudata->min_freq = min_freq;
540         cpudata->nominal_freq = nominal_freq;
541         cpudata->lowest_nonlinear_freq = lowest_nonlinear_freq;
542
543         policy->driver_data = cpudata;
544
545         amd_pstate_boost_init(cpudata);
546
547         return 0;
548
549 free_cpudata2:
550         freq_qos_remove_request(&cpudata->req[0]);
551 free_cpudata1:
552         kfree(cpudata);
553         return ret;
554 }
555
556 static int amd_pstate_cpu_exit(struct cpufreq_policy *policy)
557 {
558         struct amd_cpudata *cpudata;
559
560         cpudata = policy->driver_data;
561
562         freq_qos_remove_request(&cpudata->req[1]);
563         freq_qos_remove_request(&cpudata->req[0]);
564         kfree(cpudata);
565
566         return 0;
567 }
568
569 static int amd_pstate_cpu_resume(struct cpufreq_policy *policy)
570 {
571         int ret;
572
573         ret = amd_pstate_enable(true);
574         if (ret)
575                 pr_err("failed to enable amd-pstate during resume, return %d\n", ret);
576
577         return ret;
578 }
579
580 static int amd_pstate_cpu_suspend(struct cpufreq_policy *policy)
581 {
582         int ret;
583
584         ret = amd_pstate_enable(false);
585         if (ret)
586                 pr_err("failed to disable amd-pstate during suspend, return %d\n", ret);
587
588         return ret;
589 }
590
591 /* Sysfs attributes */
592
593 /*
594  * This frequency is to indicate the maximum hardware frequency.
595  * If boost is not active but supported, the frequency will be larger than the
596  * one in cpuinfo.
597  */
598 static ssize_t show_amd_pstate_max_freq(struct cpufreq_policy *policy,
599                                         char *buf)
600 {
601         int max_freq;
602         struct amd_cpudata *cpudata;
603
604         cpudata = policy->driver_data;
605
606         max_freq = amd_get_max_freq(cpudata);
607         if (max_freq < 0)
608                 return max_freq;
609
610         return sprintf(&buf[0], "%u\n", max_freq);
611 }
612
613 static ssize_t show_amd_pstate_lowest_nonlinear_freq(struct cpufreq_policy *policy,
614                                                      char *buf)
615 {
616         int freq;
617         struct amd_cpudata *cpudata;
618
619         cpudata = policy->driver_data;
620
621         freq = amd_get_lowest_nonlinear_freq(cpudata);
622         if (freq < 0)
623                 return freq;
624
625         return sprintf(&buf[0], "%u\n", freq);
626 }
627
628 /*
629  * In some of ASICs, the highest_perf is not the one in the _CPC table, so we
630  * need to expose it to sysfs.
631  */
632 static ssize_t show_amd_pstate_highest_perf(struct cpufreq_policy *policy,
633                                             char *buf)
634 {
635         u32 perf;
636         struct amd_cpudata *cpudata = policy->driver_data;
637
638         perf = READ_ONCE(cpudata->highest_perf);
639
640         return sprintf(&buf[0], "%u\n", perf);
641 }
642
643 cpufreq_freq_attr_ro(amd_pstate_max_freq);
644 cpufreq_freq_attr_ro(amd_pstate_lowest_nonlinear_freq);
645
646 cpufreq_freq_attr_ro(amd_pstate_highest_perf);
647
648 static struct freq_attr *amd_pstate_attr[] = {
649         &amd_pstate_max_freq,
650         &amd_pstate_lowest_nonlinear_freq,
651         &amd_pstate_highest_perf,
652         NULL,
653 };
654
655 static struct cpufreq_driver amd_pstate_driver = {
656         .flags          = CPUFREQ_CONST_LOOPS | CPUFREQ_NEED_UPDATE_LIMITS,
657         .verify         = amd_pstate_verify,
658         .target         = amd_pstate_target,
659         .init           = amd_pstate_cpu_init,
660         .exit           = amd_pstate_cpu_exit,
661         .suspend        = amd_pstate_cpu_suspend,
662         .resume         = amd_pstate_cpu_resume,
663         .set_boost      = amd_pstate_set_boost,
664         .name           = "amd-pstate",
665         .attr           = amd_pstate_attr,
666 };
667
668 static int __init amd_pstate_init(void)
669 {
670         int ret;
671
672         if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD)
673                 return -ENODEV;
674
675         if (!acpi_cpc_valid()) {
676                 pr_debug("the _CPC object is not present in SBIOS\n");
677                 return -ENODEV;
678         }
679
680         /* don't keep reloading if cpufreq_driver exists */
681         if (cpufreq_get_current_driver())
682                 return -EEXIST;
683
684         /* capability check */
685         if (boot_cpu_has(X86_FEATURE_CPPC)) {
686                 pr_debug("AMD CPPC MSR based functionality is supported\n");
687                 amd_pstate_driver.adjust_perf = amd_pstate_adjust_perf;
688         } else if (shared_mem) {
689                 static_call_update(amd_pstate_enable, cppc_enable);
690                 static_call_update(amd_pstate_init_perf, cppc_init_perf);
691                 static_call_update(amd_pstate_update_perf, cppc_update_perf);
692         } else {
693                 pr_info("This processor supports shared memory solution, you can enable it with amd_pstate.shared_mem=1\n");
694                 return -ENODEV;
695         }
696
697         /* enable amd pstate feature */
698         ret = amd_pstate_enable(true);
699         if (ret) {
700                 pr_err("failed to enable amd-pstate with return %d\n", ret);
701                 return ret;
702         }
703
704         ret = cpufreq_register_driver(&amd_pstate_driver);
705         if (ret)
706                 pr_err("failed to register amd_pstate_driver with return %d\n",
707                        ret);
708
709         return ret;
710 }
711
712 static void __exit amd_pstate_exit(void)
713 {
714         cpufreq_unregister_driver(&amd_pstate_driver);
715
716         amd_pstate_enable(false);
717 }
718
719 module_init(amd_pstate_init);
720 module_exit(amd_pstate_exit);
721
722 MODULE_AUTHOR("Huang Rui <ray.huang@amd.com>");
723 MODULE_DESCRIPTION("AMD Processor P-state Frequency Driver");
724 MODULE_LICENSE("GPL");