Linux 6.9-rc1
[linux-2.6-microblaze.git] / drivers / acpi / cppc_acpi.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4  *
5  * (C) Copyright 2014, 2015 Linaro Ltd.
6  * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7  *
8  * CPPC describes a few methods for controlling CPU performance using
9  * information from a per CPU table called CPC. This table is described in
10  * the ACPI v5.0+ specification. The table consists of a list of
11  * registers which may be memory mapped or hardware registers and also may
12  * include some static integer values.
13  *
14  * CPU performance is on an abstract continuous scale as against a discretized
15  * P-state scale which is tied to CPU frequency only. In brief, the basic
16  * operation involves:
17  *
18  * - OS makes a CPU performance request. (Can provide min and max bounds)
19  *
20  * - Platform (such as BMC) is free to optimize request within requested bounds
21  *   depending on power/thermal budgets etc.
22  *
23  * - Platform conveys its decision back to OS
24  *
25  * The communication between OS and platform occurs through another medium
26  * called (PCC) Platform Communication Channel. This is a generic mailbox like
27  * mechanism which includes doorbell semantics to indicate register updates.
28  * See drivers/mailbox/pcc.c for details on PCC.
29  *
30  * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31  * above specifications.
32  */
33
34 #define pr_fmt(fmt)     "ACPI CPPC: " fmt
35
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
42 #include <linux/dmi.h>
43 #include <linux/units.h>
44 #include <asm/unaligned.h>
45
46 #include <acpi/cppc_acpi.h>
47
48 struct cppc_pcc_data {
49         struct pcc_mbox_chan *pcc_channel;
50         void __iomem *pcc_comm_addr;
51         bool pcc_channel_acquired;
52         unsigned int deadline_us;
53         unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
54
55         bool pending_pcc_write_cmd;     /* Any pending/batched PCC write cmds? */
56         bool platform_owns_pcc;         /* Ownership of PCC subspace */
57         unsigned int pcc_write_cnt;     /* Running count of PCC write commands */
58
59         /*
60          * Lock to provide controlled access to the PCC channel.
61          *
62          * For performance critical usecases(currently cppc_set_perf)
63          *      We need to take read_lock and check if channel belongs to OSPM
64          * before reading or writing to PCC subspace
65          *      We need to take write_lock before transferring the channel
66          * ownership to the platform via a Doorbell
67          *      This allows us to batch a number of CPPC requests if they happen
68          * to originate in about the same time
69          *
70          * For non-performance critical usecases(init)
71          *      Take write_lock for all purposes which gives exclusive access
72          */
73         struct rw_semaphore pcc_lock;
74
75         /* Wait queue for CPUs whose requests were batched */
76         wait_queue_head_t pcc_write_wait_q;
77         ktime_t last_cmd_cmpl_time;
78         ktime_t last_mpar_reset;
79         int mpar_count;
80         int refcount;
81 };
82
83 /* Array to represent the PCC channel per subspace ID */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
87
88 /*
89  * The cpc_desc structure contains the ACPI register details
90  * as described in the per CPU _CPC tables. The details
91  * include the type of register (e.g. PCC, System IO, FFH etc.)
92  * and destination addresses which lets us READ/WRITE CPU performance
93  * information using the appropriate I/O methods.
94  */
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
96
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
99                                                 0x8 + (offs))
100
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&             \
103                                 (cpc)->cpc_entry.reg.space_id ==        \
104                                 ACPI_ADR_SPACE_PLATFORM_COMM)
105
106 /* Check if a CPC register is in SystemMemory */
107 #define CPC_IN_SYSTEM_MEMORY(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&   \
108                                 (cpc)->cpc_entry.reg.space_id ==        \
109                                 ACPI_ADR_SPACE_SYSTEM_MEMORY)
110
111 /* Check if a CPC register is in SystemIo */
112 #define CPC_IN_SYSTEM_IO(cpc) ((cpc)->type == ACPI_TYPE_BUFFER &&       \
113                                 (cpc)->cpc_entry.reg.space_id ==        \
114                                 ACPI_ADR_SPACE_SYSTEM_IO)
115
116 /* Evaluates to True if reg is a NULL register descriptor */
117 #define IS_NULL_REG(reg) ((reg)->space_id ==  ACPI_ADR_SPACE_SYSTEM_MEMORY && \
118                                 (reg)->address == 0 &&                  \
119                                 (reg)->bit_width == 0 &&                \
120                                 (reg)->bit_offset == 0 &&               \
121                                 (reg)->access_width == 0)
122
123 /* Evaluates to True if an optional cpc field is supported */
124 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ?          \
125                                 !!(cpc)->cpc_entry.int_value :          \
126                                 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
127 /*
128  * Arbitrary Retries in case the remote processor is slow to respond
129  * to PCC commands. Keeping it high enough to cover emulators where
130  * the processors run painfully slow.
131  */
132 #define NUM_RETRIES 500ULL
133
134 #define OVER_16BTS_MASK ~0xFFFFULL
135
136 #define define_one_cppc_ro(_name)               \
137 static struct kobj_attribute _name =            \
138 __ATTR(_name, 0444, show_##_name, NULL)
139
140 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
141
142 #define show_cppc_data(access_fn, struct_name, member_name)             \
143         static ssize_t show_##member_name(struct kobject *kobj,         \
144                                 struct kobj_attribute *attr, char *buf) \
145         {                                                               \
146                 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);           \
147                 struct struct_name st_name = {0};                       \
148                 int ret;                                                \
149                                                                         \
150                 ret = access_fn(cpc_ptr->cpu_id, &st_name);             \
151                 if (ret)                                                \
152                         return ret;                                     \
153                                                                         \
154                 return sysfs_emit(buf, "%llu\n",                \
155                                 (u64)st_name.member_name);              \
156         }                                                               \
157         define_one_cppc_ro(member_name)
158
159 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
160 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
161 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
162 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
163 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
164 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
165
166 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
167 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
168
169 /* Check for valid access_width, otherwise, fallback to using bit_width */
170 #define GET_BIT_WIDTH(reg) ((reg)->access_width ? (8 << ((reg)->access_width - 1)) : (reg)->bit_width)
171
172 /* Shift and apply the mask for CPC reads/writes */
173 #define MASK_VAL(reg, val) ((val) >> ((reg)->bit_offset &                       \
174                                         GENMASK(((reg)->bit_width), 0)))
175
176 static ssize_t show_feedback_ctrs(struct kobject *kobj,
177                 struct kobj_attribute *attr, char *buf)
178 {
179         struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
180         struct cppc_perf_fb_ctrs fb_ctrs = {0};
181         int ret;
182
183         ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
184         if (ret)
185                 return ret;
186
187         return sysfs_emit(buf, "ref:%llu del:%llu\n",
188                         fb_ctrs.reference, fb_ctrs.delivered);
189 }
190 define_one_cppc_ro(feedback_ctrs);
191
192 static struct attribute *cppc_attrs[] = {
193         &feedback_ctrs.attr,
194         &reference_perf.attr,
195         &wraparound_time.attr,
196         &highest_perf.attr,
197         &lowest_perf.attr,
198         &lowest_nonlinear_perf.attr,
199         &nominal_perf.attr,
200         &nominal_freq.attr,
201         &lowest_freq.attr,
202         NULL
203 };
204 ATTRIBUTE_GROUPS(cppc);
205
206 static const struct kobj_type cppc_ktype = {
207         .sysfs_ops = &kobj_sysfs_ops,
208         .default_groups = cppc_groups,
209 };
210
211 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
212 {
213         int ret, status;
214         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
215         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
216                 pcc_ss_data->pcc_comm_addr;
217
218         if (!pcc_ss_data->platform_owns_pcc)
219                 return 0;
220
221         /*
222          * Poll PCC status register every 3us(delay_us) for maximum of
223          * deadline_us(timeout_us) until PCC command complete bit is set(cond)
224          */
225         ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
226                                         status & PCC_CMD_COMPLETE_MASK, 3,
227                                         pcc_ss_data->deadline_us);
228
229         if (likely(!ret)) {
230                 pcc_ss_data->platform_owns_pcc = false;
231                 if (chk_err_bit && (status & PCC_ERROR_MASK))
232                         ret = -EIO;
233         }
234
235         if (unlikely(ret))
236                 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
237                        pcc_ss_id, ret);
238
239         return ret;
240 }
241
242 /*
243  * This function transfers the ownership of the PCC to the platform
244  * So it must be called while holding write_lock(pcc_lock)
245  */
246 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
247 {
248         int ret = -EIO, i;
249         struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
250         struct acpi_pcct_shared_memory __iomem *generic_comm_base =
251                 pcc_ss_data->pcc_comm_addr;
252         unsigned int time_delta;
253
254         /*
255          * For CMD_WRITE we know for a fact the caller should have checked
256          * the channel before writing to PCC space
257          */
258         if (cmd == CMD_READ) {
259                 /*
260                  * If there are pending cpc_writes, then we stole the channel
261                  * before write completion, so first send a WRITE command to
262                  * platform
263                  */
264                 if (pcc_ss_data->pending_pcc_write_cmd)
265                         send_pcc_cmd(pcc_ss_id, CMD_WRITE);
266
267                 ret = check_pcc_chan(pcc_ss_id, false);
268                 if (ret)
269                         goto end;
270         } else /* CMD_WRITE */
271                 pcc_ss_data->pending_pcc_write_cmd = FALSE;
272
273         /*
274          * Handle the Minimum Request Turnaround Time(MRTT)
275          * "The minimum amount of time that OSPM must wait after the completion
276          * of a command before issuing the next command, in microseconds"
277          */
278         if (pcc_ss_data->pcc_mrtt) {
279                 time_delta = ktime_us_delta(ktime_get(),
280                                             pcc_ss_data->last_cmd_cmpl_time);
281                 if (pcc_ss_data->pcc_mrtt > time_delta)
282                         udelay(pcc_ss_data->pcc_mrtt - time_delta);
283         }
284
285         /*
286          * Handle the non-zero Maximum Periodic Access Rate(MPAR)
287          * "The maximum number of periodic requests that the subspace channel can
288          * support, reported in commands per minute. 0 indicates no limitation."
289          *
290          * This parameter should be ideally zero or large enough so that it can
291          * handle maximum number of requests that all the cores in the system can
292          * collectively generate. If it is not, we will follow the spec and just
293          * not send the request to the platform after hitting the MPAR limit in
294          * any 60s window
295          */
296         if (pcc_ss_data->pcc_mpar) {
297                 if (pcc_ss_data->mpar_count == 0) {
298                         time_delta = ktime_ms_delta(ktime_get(),
299                                                     pcc_ss_data->last_mpar_reset);
300                         if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
301                                 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
302                                          pcc_ss_id);
303                                 ret = -EIO;
304                                 goto end;
305                         }
306                         pcc_ss_data->last_mpar_reset = ktime_get();
307                         pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
308                 }
309                 pcc_ss_data->mpar_count--;
310         }
311
312         /* Write to the shared comm region. */
313         writew_relaxed(cmd, &generic_comm_base->command);
314
315         /* Flip CMD COMPLETE bit */
316         writew_relaxed(0, &generic_comm_base->status);
317
318         pcc_ss_data->platform_owns_pcc = true;
319
320         /* Ring doorbell */
321         ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
322         if (ret < 0) {
323                 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
324                        pcc_ss_id, cmd, ret);
325                 goto end;
326         }
327
328         /* wait for completion and check for PCC error bit */
329         ret = check_pcc_chan(pcc_ss_id, true);
330
331         if (pcc_ss_data->pcc_mrtt)
332                 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
333
334         if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
335                 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
336         else
337                 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
338
339 end:
340         if (cmd == CMD_WRITE) {
341                 if (unlikely(ret)) {
342                         for_each_possible_cpu(i) {
343                                 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
344
345                                 if (!desc)
346                                         continue;
347
348                                 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
349                                         desc->write_cmd_status = ret;
350                         }
351                 }
352                 pcc_ss_data->pcc_write_cnt++;
353                 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
354         }
355
356         return ret;
357 }
358
359 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
360 {
361         if (ret < 0)
362                 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
363                                 *(u16 *)msg, ret);
364         else
365                 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
366                                 *(u16 *)msg, ret);
367 }
368
369 static struct mbox_client cppc_mbox_cl = {
370         .tx_done = cppc_chan_tx_done,
371         .knows_txdone = true,
372 };
373
374 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
375 {
376         int result = -EFAULT;
377         acpi_status status = AE_OK;
378         struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
379         struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
380         struct acpi_buffer state = {0, NULL};
381         union acpi_object  *psd = NULL;
382         struct acpi_psd_package *pdomain;
383
384         status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
385                                             &buffer, ACPI_TYPE_PACKAGE);
386         if (status == AE_NOT_FOUND)     /* _PSD is optional */
387                 return 0;
388         if (ACPI_FAILURE(status))
389                 return -ENODEV;
390
391         psd = buffer.pointer;
392         if (!psd || psd->package.count != 1) {
393                 pr_debug("Invalid _PSD data\n");
394                 goto end;
395         }
396
397         pdomain = &(cpc_ptr->domain_info);
398
399         state.length = sizeof(struct acpi_psd_package);
400         state.pointer = pdomain;
401
402         status = acpi_extract_package(&(psd->package.elements[0]),
403                 &format, &state);
404         if (ACPI_FAILURE(status)) {
405                 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
406                 goto end;
407         }
408
409         if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
410                 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
411                 goto end;
412         }
413
414         if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
415                 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
416                 goto end;
417         }
418
419         if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
420             pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
421             pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
422                 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
423                 goto end;
424         }
425
426         result = 0;
427 end:
428         kfree(buffer.pointer);
429         return result;
430 }
431
432 bool acpi_cpc_valid(void)
433 {
434         struct cpc_desc *cpc_ptr;
435         int cpu;
436
437         if (acpi_disabled)
438                 return false;
439
440         for_each_present_cpu(cpu) {
441                 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
442                 if (!cpc_ptr)
443                         return false;
444         }
445
446         return true;
447 }
448 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
449
450 bool cppc_allow_fast_switch(void)
451 {
452         struct cpc_register_resource *desired_reg;
453         struct cpc_desc *cpc_ptr;
454         int cpu;
455
456         for_each_possible_cpu(cpu) {
457                 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
458                 desired_reg = &cpc_ptr->cpc_regs[DESIRED_PERF];
459                 if (!CPC_IN_SYSTEM_MEMORY(desired_reg) &&
460                                 !CPC_IN_SYSTEM_IO(desired_reg))
461                         return false;
462         }
463
464         return true;
465 }
466 EXPORT_SYMBOL_GPL(cppc_allow_fast_switch);
467
468 /**
469  * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
470  * @cpu: Find all CPUs that share a domain with cpu.
471  * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
472  *
473  *      Return: 0 for success or negative value for err.
474  */
475 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
476 {
477         struct cpc_desc *cpc_ptr, *match_cpc_ptr;
478         struct acpi_psd_package *match_pdomain;
479         struct acpi_psd_package *pdomain;
480         int count_target, i;
481
482         /*
483          * Now that we have _PSD data from all CPUs, let's setup P-state
484          * domain info.
485          */
486         cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
487         if (!cpc_ptr)
488                 return -EFAULT;
489
490         pdomain = &(cpc_ptr->domain_info);
491         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
492         if (pdomain->num_processors <= 1)
493                 return 0;
494
495         /* Validate the Domain info */
496         count_target = pdomain->num_processors;
497         if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
498                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
499         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
500                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
501         else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
502                 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
503
504         for_each_possible_cpu(i) {
505                 if (i == cpu)
506                         continue;
507
508                 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
509                 if (!match_cpc_ptr)
510                         goto err_fault;
511
512                 match_pdomain = &(match_cpc_ptr->domain_info);
513                 if (match_pdomain->domain != pdomain->domain)
514                         continue;
515
516                 /* Here i and cpu are in the same domain */
517                 if (match_pdomain->num_processors != count_target)
518                         goto err_fault;
519
520                 if (pdomain->coord_type != match_pdomain->coord_type)
521                         goto err_fault;
522
523                 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
524         }
525
526         return 0;
527
528 err_fault:
529         /* Assume no coordination on any error parsing domain info */
530         cpumask_clear(cpu_data->shared_cpu_map);
531         cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
532         cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
533
534         return -EFAULT;
535 }
536 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
537
538 static int register_pcc_channel(int pcc_ss_idx)
539 {
540         struct pcc_mbox_chan *pcc_chan;
541         u64 usecs_lat;
542
543         if (pcc_ss_idx >= 0) {
544                 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
545
546                 if (IS_ERR(pcc_chan)) {
547                         pr_err("Failed to find PCC channel for subspace %d\n",
548                                pcc_ss_idx);
549                         return -ENODEV;
550                 }
551
552                 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
553                 /*
554                  * cppc_ss->latency is just a Nominal value. In reality
555                  * the remote processor could be much slower to reply.
556                  * So add an arbitrary amount of wait on top of Nominal.
557                  */
558                 usecs_lat = NUM_RETRIES * pcc_chan->latency;
559                 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
560                 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
561                 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
562                 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
563
564                 pcc_data[pcc_ss_idx]->pcc_comm_addr =
565                         acpi_os_ioremap(pcc_chan->shmem_base_addr,
566                                         pcc_chan->shmem_size);
567                 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
568                         pr_err("Failed to ioremap PCC comm region mem for %d\n",
569                                pcc_ss_idx);
570                         return -ENOMEM;
571                 }
572
573                 /* Set flag so that we don't come here for each CPU. */
574                 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
575         }
576
577         return 0;
578 }
579
580 /**
581  * cpc_ffh_supported() - check if FFH reading supported
582  *
583  * Check if the architecture has support for functional fixed hardware
584  * read/write capability.
585  *
586  * Return: true for supported, false for not supported
587  */
588 bool __weak cpc_ffh_supported(void)
589 {
590         return false;
591 }
592
593 /**
594  * cpc_supported_by_cpu() - check if CPPC is supported by CPU
595  *
596  * Check if the architectural support for CPPC is present even
597  * if the _OSC hasn't prescribed it
598  *
599  * Return: true for supported, false for not supported
600  */
601 bool __weak cpc_supported_by_cpu(void)
602 {
603         return false;
604 }
605
606 /**
607  * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
608  * @pcc_ss_id: PCC Subspace index as in the PCC client ACPI package.
609  *
610  * Check and allocate the cppc_pcc_data memory.
611  * In some processor configurations it is possible that same subspace
612  * is shared between multiple CPUs. This is seen especially in CPUs
613  * with hardware multi-threading support.
614  *
615  * Return: 0 for success, errno for failure
616  */
617 static int pcc_data_alloc(int pcc_ss_id)
618 {
619         if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
620                 return -EINVAL;
621
622         if (pcc_data[pcc_ss_id]) {
623                 pcc_data[pcc_ss_id]->refcount++;
624         } else {
625                 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
626                                               GFP_KERNEL);
627                 if (!pcc_data[pcc_ss_id])
628                         return -ENOMEM;
629                 pcc_data[pcc_ss_id]->refcount++;
630         }
631
632         return 0;
633 }
634
635 /*
636  * An example CPC table looks like the following.
637  *
638  *  Name (_CPC, Package() {
639  *      17,                                                     // NumEntries
640  *      1,                                                      // Revision
641  *      ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)},    // Highest Performance
642  *      ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)},    // Nominal Performance
643  *      ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)},    // Lowest Nonlinear Performance
644  *      ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)},    // Lowest Performance
645  *      ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)},    // Guaranteed Performance Register
646  *      ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)},    // Desired Performance Register
647  *      ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
648  *      ...
649  *      ...
650  *      ...
651  *  }
652  * Each Register() encodes how to access that specific register.
653  * e.g. a sample PCC entry has the following encoding:
654  *
655  *  Register (
656  *      PCC,    // AddressSpaceKeyword
657  *      8,      // RegisterBitWidth
658  *      8,      // RegisterBitOffset
659  *      0x30,   // RegisterAddress
660  *      9,      // AccessSize (subspace ID)
661  *  )
662  */
663
664 #ifndef arch_init_invariance_cppc
665 static inline void arch_init_invariance_cppc(void) { }
666 #endif
667
668 /**
669  * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
670  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
671  *
672  *      Return: 0 for success or negative value for err.
673  */
674 int acpi_cppc_processor_probe(struct acpi_processor *pr)
675 {
676         struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
677         union acpi_object *out_obj, *cpc_obj;
678         struct cpc_desc *cpc_ptr;
679         struct cpc_reg *gas_t;
680         struct device *cpu_dev;
681         acpi_handle handle = pr->handle;
682         unsigned int num_ent, i, cpc_rev;
683         int pcc_subspace_id = -1;
684         acpi_status status;
685         int ret = -ENODATA;
686
687         if (!osc_sb_cppc2_support_acked) {
688                 pr_debug("CPPC v2 _OSC not acked\n");
689                 if (!cpc_supported_by_cpu())
690                         return -ENODEV;
691         }
692
693         /* Parse the ACPI _CPC table for this CPU. */
694         status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
695                         ACPI_TYPE_PACKAGE);
696         if (ACPI_FAILURE(status)) {
697                 ret = -ENODEV;
698                 goto out_buf_free;
699         }
700
701         out_obj = (union acpi_object *) output.pointer;
702
703         cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
704         if (!cpc_ptr) {
705                 ret = -ENOMEM;
706                 goto out_buf_free;
707         }
708
709         /* First entry is NumEntries. */
710         cpc_obj = &out_obj->package.elements[0];
711         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
712                 num_ent = cpc_obj->integer.value;
713                 if (num_ent <= 1) {
714                         pr_debug("Unexpected _CPC NumEntries value (%d) for CPU:%d\n",
715                                  num_ent, pr->id);
716                         goto out_free;
717                 }
718         } else {
719                 pr_debug("Unexpected _CPC NumEntries entry type (%d) for CPU:%d\n",
720                          cpc_obj->type, pr->id);
721                 goto out_free;
722         }
723
724         /* Second entry should be revision. */
725         cpc_obj = &out_obj->package.elements[1];
726         if (cpc_obj->type == ACPI_TYPE_INTEGER) {
727                 cpc_rev = cpc_obj->integer.value;
728         } else {
729                 pr_debug("Unexpected _CPC Revision entry type (%d) for CPU:%d\n",
730                          cpc_obj->type, pr->id);
731                 goto out_free;
732         }
733
734         if (cpc_rev < CPPC_V2_REV) {
735                 pr_debug("Unsupported _CPC Revision (%d) for CPU:%d\n", cpc_rev,
736                          pr->id);
737                 goto out_free;
738         }
739
740         /*
741          * Disregard _CPC if the number of entries in the return pachage is not
742          * as expected, but support future revisions being proper supersets of
743          * the v3 and only causing more entries to be returned by _CPC.
744          */
745         if ((cpc_rev == CPPC_V2_REV && num_ent != CPPC_V2_NUM_ENT) ||
746             (cpc_rev == CPPC_V3_REV && num_ent != CPPC_V3_NUM_ENT) ||
747             (cpc_rev > CPPC_V3_REV && num_ent <= CPPC_V3_NUM_ENT)) {
748                 pr_debug("Unexpected number of _CPC return package entries (%d) for CPU:%d\n",
749                          num_ent, pr->id);
750                 goto out_free;
751         }
752         if (cpc_rev > CPPC_V3_REV) {
753                 num_ent = CPPC_V3_NUM_ENT;
754                 cpc_rev = CPPC_V3_REV;
755         }
756
757         cpc_ptr->num_entries = num_ent;
758         cpc_ptr->version = cpc_rev;
759
760         /* Iterate through remaining entries in _CPC */
761         for (i = 2; i < num_ent; i++) {
762                 cpc_obj = &out_obj->package.elements[i];
763
764                 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
765                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
766                         cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
767                 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
768                         gas_t = (struct cpc_reg *)
769                                 cpc_obj->buffer.pointer;
770
771                         /*
772                          * The PCC Subspace index is encoded inside
773                          * the CPC table entries. The same PCC index
774                          * will be used for all the PCC entries,
775                          * so extract it only once.
776                          */
777                         if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
778                                 if (pcc_subspace_id < 0) {
779                                         pcc_subspace_id = gas_t->access_width;
780                                         if (pcc_data_alloc(pcc_subspace_id))
781                                                 goto out_free;
782                                 } else if (pcc_subspace_id != gas_t->access_width) {
783                                         pr_debug("Mismatched PCC ids in _CPC for CPU:%d\n",
784                                                  pr->id);
785                                         goto out_free;
786                                 }
787                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
788                                 if (gas_t->address) {
789                                         void __iomem *addr;
790                                         size_t access_width;
791
792                                         if (!osc_cpc_flexible_adr_space_confirmed) {
793                                                 pr_debug("Flexible address space capability not supported\n");
794                                                 if (!cpc_supported_by_cpu())
795                                                         goto out_free;
796                                         }
797
798                                         access_width = GET_BIT_WIDTH(gas_t) / 8;
799                                         addr = ioremap(gas_t->address, access_width);
800                                         if (!addr)
801                                                 goto out_free;
802                                         cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
803                                 }
804                         } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
805                                 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
806                                         /*
807                                          * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
808                                          * SystemIO doesn't implement 64-bit
809                                          * registers.
810                                          */
811                                         pr_debug("Invalid access width %d for SystemIO register in _CPC\n",
812                                                  gas_t->access_width);
813                                         goto out_free;
814                                 }
815                                 if (gas_t->address & OVER_16BTS_MASK) {
816                                         /* SystemIO registers use 16-bit integer addresses */
817                                         pr_debug("Invalid IO port %llu for SystemIO register in _CPC\n",
818                                                  gas_t->address);
819                                         goto out_free;
820                                 }
821                                 if (!osc_cpc_flexible_adr_space_confirmed) {
822                                         pr_debug("Flexible address space capability not supported\n");
823                                         if (!cpc_supported_by_cpu())
824                                                 goto out_free;
825                                 }
826                         } else {
827                                 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
828                                         /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
829                                         pr_debug("Unsupported register type (%d) in _CPC\n",
830                                                  gas_t->space_id);
831                                         goto out_free;
832                                 }
833                         }
834
835                         cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
836                         memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
837                 } else {
838                         pr_debug("Invalid entry type (%d) in _CPC for CPU:%d\n",
839                                  i, pr->id);
840                         goto out_free;
841                 }
842         }
843         per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
844
845         /*
846          * Initialize the remaining cpc_regs as unsupported.
847          * Example: In case FW exposes CPPC v2, the below loop will initialize
848          * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
849          */
850         for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
851                 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
852                 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
853         }
854
855
856         /* Store CPU Logical ID */
857         cpc_ptr->cpu_id = pr->id;
858
859         /* Parse PSD data for this CPU */
860         ret = acpi_get_psd(cpc_ptr, handle);
861         if (ret)
862                 goto out_free;
863
864         /* Register PCC channel once for all PCC subspace ID. */
865         if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
866                 ret = register_pcc_channel(pcc_subspace_id);
867                 if (ret)
868                         goto out_free;
869
870                 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
871                 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
872         }
873
874         /* Everything looks okay */
875         pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
876
877         /* Add per logical CPU nodes for reading its feedback counters. */
878         cpu_dev = get_cpu_device(pr->id);
879         if (!cpu_dev) {
880                 ret = -EINVAL;
881                 goto out_free;
882         }
883
884         /* Plug PSD data into this CPU's CPC descriptor. */
885         per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
886
887         ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
888                         "acpi_cppc");
889         if (ret) {
890                 per_cpu(cpc_desc_ptr, pr->id) = NULL;
891                 kobject_put(&cpc_ptr->kobj);
892                 goto out_free;
893         }
894
895         arch_init_invariance_cppc();
896
897         kfree(output.pointer);
898         return 0;
899
900 out_free:
901         /* Free all the mapped sys mem areas for this CPU */
902         for (i = 2; i < cpc_ptr->num_entries; i++) {
903                 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
904
905                 if (addr)
906                         iounmap(addr);
907         }
908         kfree(cpc_ptr);
909
910 out_buf_free:
911         kfree(output.pointer);
912         return ret;
913 }
914 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
915
916 /**
917  * acpi_cppc_processor_exit - Cleanup CPC structs.
918  * @pr: Ptr to acpi_processor containing this CPU's logical ID.
919  *
920  * Return: Void
921  */
922 void acpi_cppc_processor_exit(struct acpi_processor *pr)
923 {
924         struct cpc_desc *cpc_ptr;
925         unsigned int i;
926         void __iomem *addr;
927         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
928
929         if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
930                 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
931                         pcc_data[pcc_ss_id]->refcount--;
932                         if (!pcc_data[pcc_ss_id]->refcount) {
933                                 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
934                                 kfree(pcc_data[pcc_ss_id]);
935                                 pcc_data[pcc_ss_id] = NULL;
936                         }
937                 }
938         }
939
940         cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
941         if (!cpc_ptr)
942                 return;
943
944         /* Free all the mapped sys mem areas for this CPU */
945         for (i = 2; i < cpc_ptr->num_entries; i++) {
946                 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
947                 if (addr)
948                         iounmap(addr);
949         }
950
951         kobject_put(&cpc_ptr->kobj);
952         kfree(cpc_ptr);
953 }
954 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
955
956 /**
957  * cpc_read_ffh() - Read FFH register
958  * @cpunum:     CPU number to read
959  * @reg:        cppc register information
960  * @val:        place holder for return value
961  *
962  * Read bit_width bits from a specified address and bit_offset
963  *
964  * Return: 0 for success and error code
965  */
966 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
967 {
968         return -ENOTSUPP;
969 }
970
971 /**
972  * cpc_write_ffh() - Write FFH register
973  * @cpunum:     CPU number to write
974  * @reg:        cppc register information
975  * @val:        value to write
976  *
977  * Write value of bit_width bits to a specified address and bit_offset
978  *
979  * Return: 0 for success and error code
980  */
981 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
982 {
983         return -ENOTSUPP;
984 }
985
986 /*
987  * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
988  * as fast as possible. We have already mapped the PCC subspace during init, so
989  * we can directly write to it.
990  */
991
992 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
993 {
994         void __iomem *vaddr = NULL;
995         int size;
996         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
997         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
998
999         if (reg_res->type == ACPI_TYPE_INTEGER) {
1000                 *val = reg_res->cpc_entry.int_value;
1001                 return 0;
1002         }
1003
1004         *val = 0;
1005
1006         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1007                 u32 width = GET_BIT_WIDTH(reg);
1008                 u32 val_u32;
1009                 acpi_status status;
1010
1011                 status = acpi_os_read_port((acpi_io_address)reg->address,
1012                                            &val_u32, width);
1013                 if (ACPI_FAILURE(status)) {
1014                         pr_debug("Error: Failed to read SystemIO port %llx\n",
1015                                  reg->address);
1016                         return -EFAULT;
1017                 }
1018
1019                 *val = val_u32;
1020                 return 0;
1021         } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1022                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1023         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1024                 vaddr = reg_res->sys_mem_vaddr;
1025         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1026                 return cpc_read_ffh(cpu, reg, val);
1027         else
1028                 return acpi_os_read_memory((acpi_physical_address)reg->address,
1029                                 val, reg->bit_width);
1030
1031         size = GET_BIT_WIDTH(reg);
1032
1033         switch (size) {
1034         case 8:
1035                 *val = readb_relaxed(vaddr);
1036                 break;
1037         case 16:
1038                 *val = readw_relaxed(vaddr);
1039                 break;
1040         case 32:
1041                 *val = readl_relaxed(vaddr);
1042                 break;
1043         case 64:
1044                 *val = readq_relaxed(vaddr);
1045                 break;
1046         default:
1047                 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
1048                          reg->bit_width, pcc_ss_id);
1049                 return -EFAULT;
1050         }
1051
1052         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1053                 *val = MASK_VAL(reg, *val);
1054
1055         return 0;
1056 }
1057
1058 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
1059 {
1060         int ret_val = 0;
1061         int size;
1062         void __iomem *vaddr = NULL;
1063         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1064         struct cpc_reg *reg = &reg_res->cpc_entry.reg;
1065
1066         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
1067                 u32 width = GET_BIT_WIDTH(reg);
1068                 acpi_status status;
1069
1070                 status = acpi_os_write_port((acpi_io_address)reg->address,
1071                                             (u32)val, width);
1072                 if (ACPI_FAILURE(status)) {
1073                         pr_debug("Error: Failed to write SystemIO port %llx\n",
1074                                  reg->address);
1075                         return -EFAULT;
1076                 }
1077
1078                 return 0;
1079         } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1080                 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1081         else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1082                 vaddr = reg_res->sys_mem_vaddr;
1083         else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1084                 return cpc_write_ffh(cpu, reg, val);
1085         else
1086                 return acpi_os_write_memory((acpi_physical_address)reg->address,
1087                                 val, reg->bit_width);
1088
1089         size = GET_BIT_WIDTH(reg);
1090
1091         if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1092                 val = MASK_VAL(reg, val);
1093
1094         switch (size) {
1095         case 8:
1096                 writeb_relaxed(val, vaddr);
1097                 break;
1098         case 16:
1099                 writew_relaxed(val, vaddr);
1100                 break;
1101         case 32:
1102                 writel_relaxed(val, vaddr);
1103                 break;
1104         case 64:
1105                 writeq_relaxed(val, vaddr);
1106                 break;
1107         default:
1108                 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1109                          reg->bit_width, pcc_ss_id);
1110                 ret_val = -EFAULT;
1111                 break;
1112         }
1113
1114         return ret_val;
1115 }
1116
1117 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1118 {
1119         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1120         struct cpc_register_resource *reg;
1121
1122         if (!cpc_desc) {
1123                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1124                 return -ENODEV;
1125         }
1126
1127         reg = &cpc_desc->cpc_regs[reg_idx];
1128
1129         if (CPC_IN_PCC(reg)) {
1130                 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1131                 struct cppc_pcc_data *pcc_ss_data = NULL;
1132                 int ret = 0;
1133
1134                 if (pcc_ss_id < 0)
1135                         return -EIO;
1136
1137                 pcc_ss_data = pcc_data[pcc_ss_id];
1138
1139                 down_write(&pcc_ss_data->pcc_lock);
1140
1141                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1142                         cpc_read(cpunum, reg, perf);
1143                 else
1144                         ret = -EIO;
1145
1146                 up_write(&pcc_ss_data->pcc_lock);
1147
1148                 return ret;
1149         }
1150
1151         cpc_read(cpunum, reg, perf);
1152
1153         return 0;
1154 }
1155
1156 /**
1157  * cppc_get_desired_perf - Get the desired performance register value.
1158  * @cpunum: CPU from which to get desired performance.
1159  * @desired_perf: Return address.
1160  *
1161  * Return: 0 for success, -EIO otherwise.
1162  */
1163 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1164 {
1165         return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1166 }
1167 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1168
1169 /**
1170  * cppc_get_nominal_perf - Get the nominal performance register value.
1171  * @cpunum: CPU from which to get nominal performance.
1172  * @nominal_perf: Return address.
1173  *
1174  * Return: 0 for success, -EIO otherwise.
1175  */
1176 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1177 {
1178         return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1179 }
1180
1181 /**
1182  * cppc_get_highest_perf - Get the highest performance register value.
1183  * @cpunum: CPU from which to get highest performance.
1184  * @highest_perf: Return address.
1185  *
1186  * Return: 0 for success, -EIO otherwise.
1187  */
1188 int cppc_get_highest_perf(int cpunum, u64 *highest_perf)
1189 {
1190         return cppc_get_perf(cpunum, HIGHEST_PERF, highest_perf);
1191 }
1192 EXPORT_SYMBOL_GPL(cppc_get_highest_perf);
1193
1194 /**
1195  * cppc_get_epp_perf - Get the epp register value.
1196  * @cpunum: CPU from which to get epp preference value.
1197  * @epp_perf: Return address.
1198  *
1199  * Return: 0 for success, -EIO otherwise.
1200  */
1201 int cppc_get_epp_perf(int cpunum, u64 *epp_perf)
1202 {
1203         return cppc_get_perf(cpunum, ENERGY_PERF, epp_perf);
1204 }
1205 EXPORT_SYMBOL_GPL(cppc_get_epp_perf);
1206
1207 /**
1208  * cppc_get_perf_caps - Get a CPU's performance capabilities.
1209  * @cpunum: CPU from which to get capabilities info.
1210  * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1211  *
1212  * Return: 0 for success with perf_caps populated else -ERRNO.
1213  */
1214 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1215 {
1216         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1217         struct cpc_register_resource *highest_reg, *lowest_reg,
1218                 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1219                 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1220         u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1221         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1222         struct cppc_pcc_data *pcc_ss_data = NULL;
1223         int ret = 0, regs_in_pcc = 0;
1224
1225         if (!cpc_desc) {
1226                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1227                 return -ENODEV;
1228         }
1229
1230         highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1231         lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1232         lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1233         nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1234         low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1235         nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1236         guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1237
1238         /* Are any of the regs PCC ?*/
1239         if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1240                 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1241                 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1242                 if (pcc_ss_id < 0) {
1243                         pr_debug("Invalid pcc_ss_id\n");
1244                         return -ENODEV;
1245                 }
1246                 pcc_ss_data = pcc_data[pcc_ss_id];
1247                 regs_in_pcc = 1;
1248                 down_write(&pcc_ss_data->pcc_lock);
1249                 /* Ring doorbell once to update PCC subspace */
1250                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1251                         ret = -EIO;
1252                         goto out_err;
1253                 }
1254         }
1255
1256         cpc_read(cpunum, highest_reg, &high);
1257         perf_caps->highest_perf = high;
1258
1259         cpc_read(cpunum, lowest_reg, &low);
1260         perf_caps->lowest_perf = low;
1261
1262         cpc_read(cpunum, nominal_reg, &nom);
1263         perf_caps->nominal_perf = nom;
1264
1265         if (guaranteed_reg->type != ACPI_TYPE_BUFFER  ||
1266             IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1267                 perf_caps->guaranteed_perf = 0;
1268         } else {
1269                 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1270                 perf_caps->guaranteed_perf = guaranteed;
1271         }
1272
1273         cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1274         perf_caps->lowest_nonlinear_perf = min_nonlinear;
1275
1276         if (!high || !low || !nom || !min_nonlinear)
1277                 ret = -EFAULT;
1278
1279         /* Read optional lowest and nominal frequencies if present */
1280         if (CPC_SUPPORTED(low_freq_reg))
1281                 cpc_read(cpunum, low_freq_reg, &low_f);
1282
1283         if (CPC_SUPPORTED(nom_freq_reg))
1284                 cpc_read(cpunum, nom_freq_reg, &nom_f);
1285
1286         perf_caps->lowest_freq = low_f;
1287         perf_caps->nominal_freq = nom_f;
1288
1289
1290 out_err:
1291         if (regs_in_pcc)
1292                 up_write(&pcc_ss_data->pcc_lock);
1293         return ret;
1294 }
1295 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1296
1297 /**
1298  * cppc_perf_ctrs_in_pcc - Check if any perf counters are in a PCC region.
1299  *
1300  * CPPC has flexibility about how CPU performance counters are accessed.
1301  * One of the choices is PCC regions, which can have a high access latency. This
1302  * routine allows callers of cppc_get_perf_ctrs() to know this ahead of time.
1303  *
1304  * Return: true if any of the counters are in PCC regions, false otherwise
1305  */
1306 bool cppc_perf_ctrs_in_pcc(void)
1307 {
1308         int cpu;
1309
1310         for_each_present_cpu(cpu) {
1311                 struct cpc_register_resource *ref_perf_reg;
1312                 struct cpc_desc *cpc_desc;
1313
1314                 cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1315
1316                 if (CPC_IN_PCC(&cpc_desc->cpc_regs[DELIVERED_CTR]) ||
1317                     CPC_IN_PCC(&cpc_desc->cpc_regs[REFERENCE_CTR]) ||
1318                     CPC_IN_PCC(&cpc_desc->cpc_regs[CTR_WRAP_TIME]))
1319                         return true;
1320
1321
1322                 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1323
1324                 /*
1325                  * If reference perf register is not supported then we should
1326                  * use the nominal perf value
1327                  */
1328                 if (!CPC_SUPPORTED(ref_perf_reg))
1329                         ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1330
1331                 if (CPC_IN_PCC(ref_perf_reg))
1332                         return true;
1333         }
1334
1335         return false;
1336 }
1337 EXPORT_SYMBOL_GPL(cppc_perf_ctrs_in_pcc);
1338
1339 /**
1340  * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1341  * @cpunum: CPU from which to read counters.
1342  * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1343  *
1344  * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1345  */
1346 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1347 {
1348         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1349         struct cpc_register_resource *delivered_reg, *reference_reg,
1350                 *ref_perf_reg, *ctr_wrap_reg;
1351         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1352         struct cppc_pcc_data *pcc_ss_data = NULL;
1353         u64 delivered, reference, ref_perf, ctr_wrap_time;
1354         int ret = 0, regs_in_pcc = 0;
1355
1356         if (!cpc_desc) {
1357                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1358                 return -ENODEV;
1359         }
1360
1361         delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1362         reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1363         ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1364         ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1365
1366         /*
1367          * If reference perf register is not supported then we should
1368          * use the nominal perf value
1369          */
1370         if (!CPC_SUPPORTED(ref_perf_reg))
1371                 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1372
1373         /* Are any of the regs PCC ?*/
1374         if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1375                 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1376                 if (pcc_ss_id < 0) {
1377                         pr_debug("Invalid pcc_ss_id\n");
1378                         return -ENODEV;
1379                 }
1380                 pcc_ss_data = pcc_data[pcc_ss_id];
1381                 down_write(&pcc_ss_data->pcc_lock);
1382                 regs_in_pcc = 1;
1383                 /* Ring doorbell once to update PCC subspace */
1384                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1385                         ret = -EIO;
1386                         goto out_err;
1387                 }
1388         }
1389
1390         cpc_read(cpunum, delivered_reg, &delivered);
1391         cpc_read(cpunum, reference_reg, &reference);
1392         cpc_read(cpunum, ref_perf_reg, &ref_perf);
1393
1394         /*
1395          * Per spec, if ctr_wrap_time optional register is unsupported, then the
1396          * performance counters are assumed to never wrap during the lifetime of
1397          * platform
1398          */
1399         ctr_wrap_time = (u64)(~((u64)0));
1400         if (CPC_SUPPORTED(ctr_wrap_reg))
1401                 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1402
1403         if (!delivered || !reference || !ref_perf) {
1404                 ret = -EFAULT;
1405                 goto out_err;
1406         }
1407
1408         perf_fb_ctrs->delivered = delivered;
1409         perf_fb_ctrs->reference = reference;
1410         perf_fb_ctrs->reference_perf = ref_perf;
1411         perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1412 out_err:
1413         if (regs_in_pcc)
1414                 up_write(&pcc_ss_data->pcc_lock);
1415         return ret;
1416 }
1417 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1418
1419 /*
1420  * Set Energy Performance Preference Register value through
1421  * Performance Controls Interface
1422  */
1423 int cppc_set_epp_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls, bool enable)
1424 {
1425         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1426         struct cpc_register_resource *epp_set_reg;
1427         struct cpc_register_resource *auto_sel_reg;
1428         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1429         struct cppc_pcc_data *pcc_ss_data = NULL;
1430         int ret;
1431
1432         if (!cpc_desc) {
1433                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1434                 return -ENODEV;
1435         }
1436
1437         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1438         epp_set_reg = &cpc_desc->cpc_regs[ENERGY_PERF];
1439
1440         if (CPC_IN_PCC(epp_set_reg) || CPC_IN_PCC(auto_sel_reg)) {
1441                 if (pcc_ss_id < 0) {
1442                         pr_debug("Invalid pcc_ss_id for CPU:%d\n", cpu);
1443                         return -ENODEV;
1444                 }
1445
1446                 if (CPC_SUPPORTED(auto_sel_reg)) {
1447                         ret = cpc_write(cpu, auto_sel_reg, enable);
1448                         if (ret)
1449                                 return ret;
1450                 }
1451
1452                 if (CPC_SUPPORTED(epp_set_reg)) {
1453                         ret = cpc_write(cpu, epp_set_reg, perf_ctrls->energy_perf);
1454                         if (ret)
1455                                 return ret;
1456                 }
1457
1458                 pcc_ss_data = pcc_data[pcc_ss_id];
1459
1460                 down_write(&pcc_ss_data->pcc_lock);
1461                 /* after writing CPC, transfer the ownership of PCC to platform */
1462                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1463                 up_write(&pcc_ss_data->pcc_lock);
1464         } else {
1465                 ret = -ENOTSUPP;
1466                 pr_debug("_CPC in PCC is not supported\n");
1467         }
1468
1469         return ret;
1470 }
1471 EXPORT_SYMBOL_GPL(cppc_set_epp_perf);
1472
1473 /**
1474  * cppc_get_auto_sel_caps - Read autonomous selection register.
1475  * @cpunum : CPU from which to read register.
1476  * @perf_caps : struct where autonomous selection register value is updated.
1477  */
1478 int cppc_get_auto_sel_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1479 {
1480         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1481         struct cpc_register_resource *auto_sel_reg;
1482         u64  auto_sel;
1483
1484         if (!cpc_desc) {
1485                 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1486                 return -ENODEV;
1487         }
1488
1489         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1490
1491         if (!CPC_SUPPORTED(auto_sel_reg))
1492                 pr_warn_once("Autonomous mode is not unsupported!\n");
1493
1494         if (CPC_IN_PCC(auto_sel_reg)) {
1495                 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1496                 struct cppc_pcc_data *pcc_ss_data = NULL;
1497                 int ret = 0;
1498
1499                 if (pcc_ss_id < 0)
1500                         return -ENODEV;
1501
1502                 pcc_ss_data = pcc_data[pcc_ss_id];
1503
1504                 down_write(&pcc_ss_data->pcc_lock);
1505
1506                 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0) {
1507                         cpc_read(cpunum, auto_sel_reg, &auto_sel);
1508                         perf_caps->auto_sel = (bool)auto_sel;
1509                 } else {
1510                         ret = -EIO;
1511                 }
1512
1513                 up_write(&pcc_ss_data->pcc_lock);
1514
1515                 return ret;
1516         }
1517
1518         return 0;
1519 }
1520 EXPORT_SYMBOL_GPL(cppc_get_auto_sel_caps);
1521
1522 /**
1523  * cppc_set_auto_sel - Write autonomous selection register.
1524  * @cpu    : CPU to which to write register.
1525  * @enable : the desired value of autonomous selection resiter to be updated.
1526  */
1527 int cppc_set_auto_sel(int cpu, bool enable)
1528 {
1529         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1530         struct cpc_register_resource *auto_sel_reg;
1531         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1532         struct cppc_pcc_data *pcc_ss_data = NULL;
1533         int ret = -EINVAL;
1534
1535         if (!cpc_desc) {
1536                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1537                 return -ENODEV;
1538         }
1539
1540         auto_sel_reg = &cpc_desc->cpc_regs[AUTO_SEL_ENABLE];
1541
1542         if (CPC_IN_PCC(auto_sel_reg)) {
1543                 if (pcc_ss_id < 0) {
1544                         pr_debug("Invalid pcc_ss_id\n");
1545                         return -ENODEV;
1546                 }
1547
1548                 if (CPC_SUPPORTED(auto_sel_reg)) {
1549                         ret = cpc_write(cpu, auto_sel_reg, enable);
1550                         if (ret)
1551                                 return ret;
1552                 }
1553
1554                 pcc_ss_data = pcc_data[pcc_ss_id];
1555
1556                 down_write(&pcc_ss_data->pcc_lock);
1557                 /* after writing CPC, transfer the ownership of PCC to platform */
1558                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1559                 up_write(&pcc_ss_data->pcc_lock);
1560         } else {
1561                 ret = -ENOTSUPP;
1562                 pr_debug("_CPC in PCC is not supported\n");
1563         }
1564
1565         return ret;
1566 }
1567 EXPORT_SYMBOL_GPL(cppc_set_auto_sel);
1568
1569 /**
1570  * cppc_set_enable - Set to enable CPPC on the processor by writing the
1571  * Continuous Performance Control package EnableRegister field.
1572  * @cpu: CPU for which to enable CPPC register.
1573  * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1574  *
1575  * Return: 0 for success, -ERRNO or -EIO otherwise.
1576  */
1577 int cppc_set_enable(int cpu, bool enable)
1578 {
1579         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1580         struct cpc_register_resource *enable_reg;
1581         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1582         struct cppc_pcc_data *pcc_ss_data = NULL;
1583         int ret = -EINVAL;
1584
1585         if (!cpc_desc) {
1586                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1587                 return -EINVAL;
1588         }
1589
1590         enable_reg = &cpc_desc->cpc_regs[ENABLE];
1591
1592         if (CPC_IN_PCC(enable_reg)) {
1593
1594                 if (pcc_ss_id < 0)
1595                         return -EIO;
1596
1597                 ret = cpc_write(cpu, enable_reg, enable);
1598                 if (ret)
1599                         return ret;
1600
1601                 pcc_ss_data = pcc_data[pcc_ss_id];
1602
1603                 down_write(&pcc_ss_data->pcc_lock);
1604                 /* after writing CPC, transfer the ownership of PCC to platfrom */
1605                 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1606                 up_write(&pcc_ss_data->pcc_lock);
1607                 return ret;
1608         }
1609
1610         return cpc_write(cpu, enable_reg, enable);
1611 }
1612 EXPORT_SYMBOL_GPL(cppc_set_enable);
1613
1614 /**
1615  * cppc_set_perf - Set a CPU's performance controls.
1616  * @cpu: CPU for which to set performance controls.
1617  * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1618  *
1619  * Return: 0 for success, -ERRNO otherwise.
1620  */
1621 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1622 {
1623         struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1624         struct cpc_register_resource *desired_reg, *min_perf_reg, *max_perf_reg;
1625         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1626         struct cppc_pcc_data *pcc_ss_data = NULL;
1627         int ret = 0;
1628
1629         if (!cpc_desc) {
1630                 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1631                 return -ENODEV;
1632         }
1633
1634         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1635         min_perf_reg = &cpc_desc->cpc_regs[MIN_PERF];
1636         max_perf_reg = &cpc_desc->cpc_regs[MAX_PERF];
1637
1638         /*
1639          * This is Phase-I where we want to write to CPC registers
1640          * -> We want all CPUs to be able to execute this phase in parallel
1641          *
1642          * Since read_lock can be acquired by multiple CPUs simultaneously we
1643          * achieve that goal here
1644          */
1645         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1646                 if (pcc_ss_id < 0) {
1647                         pr_debug("Invalid pcc_ss_id\n");
1648                         return -ENODEV;
1649                 }
1650                 pcc_ss_data = pcc_data[pcc_ss_id];
1651                 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1652                 if (pcc_ss_data->platform_owns_pcc) {
1653                         ret = check_pcc_chan(pcc_ss_id, false);
1654                         if (ret) {
1655                                 up_read(&pcc_ss_data->pcc_lock);
1656                                 return ret;
1657                         }
1658                 }
1659                 /*
1660                  * Update the pending_write to make sure a PCC CMD_READ will not
1661                  * arrive and steal the channel during the switch to write lock
1662                  */
1663                 pcc_ss_data->pending_pcc_write_cmd = true;
1664                 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1665                 cpc_desc->write_cmd_status = 0;
1666         }
1667
1668         cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1669
1670         /*
1671          * Only write if min_perf and max_perf not zero. Some drivers pass zero
1672          * value to min and max perf, but they don't mean to set the zero value,
1673          * they just don't want to write to those registers.
1674          */
1675         if (perf_ctrls->min_perf)
1676                 cpc_write(cpu, min_perf_reg, perf_ctrls->min_perf);
1677         if (perf_ctrls->max_perf)
1678                 cpc_write(cpu, max_perf_reg, perf_ctrls->max_perf);
1679
1680         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg))
1681                 up_read(&pcc_ss_data->pcc_lock);        /* END Phase-I */
1682         /*
1683          * This is Phase-II where we transfer the ownership of PCC to Platform
1684          *
1685          * Short Summary: Basically if we think of a group of cppc_set_perf
1686          * requests that happened in short overlapping interval. The last CPU to
1687          * come out of Phase-I will enter Phase-II and ring the doorbell.
1688          *
1689          * We have the following requirements for Phase-II:
1690          *     1. We want to execute Phase-II only when there are no CPUs
1691          * currently executing in Phase-I
1692          *     2. Once we start Phase-II we want to avoid all other CPUs from
1693          * entering Phase-I.
1694          *     3. We want only one CPU among all those who went through Phase-I
1695          * to run phase-II
1696          *
1697          * If write_trylock fails to get the lock and doesn't transfer the
1698          * PCC ownership to the platform, then one of the following will be TRUE
1699          *     1. There is at-least one CPU in Phase-I which will later execute
1700          * write_trylock, so the CPUs in Phase-I will be responsible for
1701          * executing the Phase-II.
1702          *     2. Some other CPU has beaten this CPU to successfully execute the
1703          * write_trylock and has already acquired the write_lock. We know for a
1704          * fact it (other CPU acquiring the write_lock) couldn't have happened
1705          * before this CPU's Phase-I as we held the read_lock.
1706          *     3. Some other CPU executing pcc CMD_READ has stolen the
1707          * down_write, in which case, send_pcc_cmd will check for pending
1708          * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1709          * So this CPU can be certain that its request will be delivered
1710          *    So in all cases, this CPU knows that its request will be delivered
1711          * by another CPU and can return
1712          *
1713          * After getting the down_write we still need to check for
1714          * pending_pcc_write_cmd to take care of the following scenario
1715          *    The thread running this code could be scheduled out between
1716          * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1717          * could have delivered the request to Platform by triggering the
1718          * doorbell and transferred the ownership of PCC to platform. So this
1719          * avoids triggering an unnecessary doorbell and more importantly before
1720          * triggering the doorbell it makes sure that the PCC channel ownership
1721          * is still with OSPM.
1722          *   pending_pcc_write_cmd can also be cleared by a different CPU, if
1723          * there was a pcc CMD_READ waiting on down_write and it steals the lock
1724          * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1725          * case during a CMD_READ and if there are pending writes it delivers
1726          * the write command before servicing the read command
1727          */
1728         if (CPC_IN_PCC(desired_reg) || CPC_IN_PCC(min_perf_reg) || CPC_IN_PCC(max_perf_reg)) {
1729                 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1730                         /* Update only if there are pending write commands */
1731                         if (pcc_ss_data->pending_pcc_write_cmd)
1732                                 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1733                         up_write(&pcc_ss_data->pcc_lock);       /* END Phase-II */
1734                 } else
1735                         /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1736                         wait_event(pcc_ss_data->pcc_write_wait_q,
1737                                    cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1738
1739                 /* send_pcc_cmd updates the status in case of failure */
1740                 ret = cpc_desc->write_cmd_status;
1741         }
1742         return ret;
1743 }
1744 EXPORT_SYMBOL_GPL(cppc_set_perf);
1745
1746 /**
1747  * cppc_get_transition_latency - returns frequency transition latency in ns
1748  * @cpu_num: CPU number for per_cpu().
1749  *
1750  * ACPI CPPC does not explicitly specify how a platform can specify the
1751  * transition latency for performance change requests. The closest we have
1752  * is the timing information from the PCCT tables which provides the info
1753  * on the number and frequency of PCC commands the platform can handle.
1754  *
1755  * If desired_reg is in the SystemMemory or SystemIo ACPI address space,
1756  * then assume there is no latency.
1757  */
1758 unsigned int cppc_get_transition_latency(int cpu_num)
1759 {
1760         /*
1761          * Expected transition latency is based on the PCCT timing values
1762          * Below are definition from ACPI spec:
1763          * pcc_nominal- Expected latency to process a command, in microseconds
1764          * pcc_mpar   - The maximum number of periodic requests that the subspace
1765          *              channel can support, reported in commands per minute. 0
1766          *              indicates no limitation.
1767          * pcc_mrtt   - The minimum amount of time that OSPM must wait after the
1768          *              completion of a command before issuing the next command,
1769          *              in microseconds.
1770          */
1771         unsigned int latency_ns = 0;
1772         struct cpc_desc *cpc_desc;
1773         struct cpc_register_resource *desired_reg;
1774         int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1775         struct cppc_pcc_data *pcc_ss_data;
1776
1777         cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1778         if (!cpc_desc)
1779                 return CPUFREQ_ETERNAL;
1780
1781         desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1782         if (CPC_IN_SYSTEM_MEMORY(desired_reg) || CPC_IN_SYSTEM_IO(desired_reg))
1783                 return 0;
1784         else if (!CPC_IN_PCC(desired_reg))
1785                 return CPUFREQ_ETERNAL;
1786
1787         if (pcc_ss_id < 0)
1788                 return CPUFREQ_ETERNAL;
1789
1790         pcc_ss_data = pcc_data[pcc_ss_id];
1791         if (pcc_ss_data->pcc_mpar)
1792                 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1793
1794         latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1795         latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1796
1797         return latency_ns;
1798 }
1799 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);
1800
1801 /* Minimum struct length needed for the DMI processor entry we want */
1802 #define DMI_ENTRY_PROCESSOR_MIN_LENGTH  48
1803
1804 /* Offset in the DMI processor structure for the max frequency */
1805 #define DMI_PROCESSOR_MAX_SPEED         0x14
1806
1807 /* Callback function used to retrieve the max frequency from DMI */
1808 static void cppc_find_dmi_mhz(const struct dmi_header *dm, void *private)
1809 {
1810         const u8 *dmi_data = (const u8 *)dm;
1811         u16 *mhz = (u16 *)private;
1812
1813         if (dm->type == DMI_ENTRY_PROCESSOR &&
1814             dm->length >= DMI_ENTRY_PROCESSOR_MIN_LENGTH) {
1815                 u16 val = (u16)get_unaligned((const u16 *)
1816                                 (dmi_data + DMI_PROCESSOR_MAX_SPEED));
1817                 *mhz = val > *mhz ? val : *mhz;
1818         }
1819 }
1820
1821 /* Look up the max frequency in DMI */
1822 static u64 cppc_get_dmi_max_khz(void)
1823 {
1824         u16 mhz = 0;
1825
1826         dmi_walk(cppc_find_dmi_mhz, &mhz);
1827
1828         /*
1829          * Real stupid fallback value, just in case there is no
1830          * actual value set.
1831          */
1832         mhz = mhz ? mhz : 1;
1833
1834         return KHZ_PER_MHZ * mhz;
1835 }
1836
1837 /*
1838  * If CPPC lowest_freq and nominal_freq registers are exposed then we can
1839  * use them to convert perf to freq and vice versa. The conversion is
1840  * extrapolated as an affine function passing by the 2 points:
1841  *  - (Low perf, Low freq)
1842  *  - (Nominal perf, Nominal freq)
1843  */
1844 unsigned int cppc_perf_to_khz(struct cppc_perf_caps *caps, unsigned int perf)
1845 {
1846         s64 retval, offset = 0;
1847         static u64 max_khz;
1848         u64 mul, div;
1849
1850         if (caps->lowest_freq && caps->nominal_freq) {
1851                 mul = caps->nominal_freq - caps->lowest_freq;
1852                 mul *= KHZ_PER_MHZ;
1853                 div = caps->nominal_perf - caps->lowest_perf;
1854                 offset = caps->nominal_freq * KHZ_PER_MHZ -
1855                          div64_u64(caps->nominal_perf * mul, div);
1856         } else {
1857                 if (!max_khz)
1858                         max_khz = cppc_get_dmi_max_khz();
1859                 mul = max_khz;
1860                 div = caps->highest_perf;
1861         }
1862
1863         retval = offset + div64_u64(perf * mul, div);
1864         if (retval >= 0)
1865                 return retval;
1866         return 0;
1867 }
1868 EXPORT_SYMBOL_GPL(cppc_perf_to_khz);
1869
1870 unsigned int cppc_khz_to_perf(struct cppc_perf_caps *caps, unsigned int freq)
1871 {
1872         s64 retval, offset = 0;
1873         static u64 max_khz;
1874         u64  mul, div;
1875
1876         if (caps->lowest_freq && caps->nominal_freq) {
1877                 mul = caps->nominal_perf - caps->lowest_perf;
1878                 div = caps->nominal_freq - caps->lowest_freq;
1879                 /*
1880                  * We don't need to convert to kHz for computing offset and can
1881                  * directly use nominal_freq and lowest_freq as the div64_u64
1882                  * will remove the frequency unit.
1883                  */
1884                 offset = caps->nominal_perf -
1885                          div64_u64(caps->nominal_freq * mul, div);
1886                 /* But we need it for computing the perf level. */
1887                 div *= KHZ_PER_MHZ;
1888         } else {
1889                 if (!max_khz)
1890                         max_khz = cppc_get_dmi_max_khz();
1891                 mul = caps->highest_perf;
1892                 div = max_khz;
1893         }
1894
1895         retval = offset + div64_u64(freq * mul, div);
1896         if (retval >= 0)
1897                 return retval;
1898         return 0;
1899 }
1900 EXPORT_SYMBOL_GPL(cppc_khz_to_perf);