1 // SPDX-License-Identifier: GPL-2.0-only
3 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
5 * (C) Copyright 2014, 2015 Linaro Ltd.
6 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
8 * CPPC describes a few methods for controlling CPU performance using
9 * information from a per CPU table called CPC. This table is described in
10 * the ACPI v5.0+ specification. The table consists of a list of
11 * registers which may be memory mapped or hardware registers and also may
12 * include some static integer values.
14 * CPU performance is on an abstract continuous scale as against a discretized
15 * P-state scale which is tied to CPU frequency only. In brief, the basic
18 * - OS makes a CPU performance request. (Can provide min and max bounds)
20 * - Platform (such as BMC) is free to optimize request within requested bounds
21 * depending on power/thermal budgets etc.
23 * - Platform conveys its decision back to OS
25 * The communication between OS and platform occurs through another medium
26 * called (PCC) Platform Communication Channel. This is a generic mailbox like
27 * mechanism which includes doorbell semantics to indicate register updates.
28 * See drivers/mailbox/pcc.c for details on PCC.
30 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
31 * above specifications.
34 #define pr_fmt(fmt) "ACPI CPPC: " fmt
36 #include <linux/delay.h>
37 #include <linux/iopoll.h>
38 #include <linux/ktime.h>
39 #include <linux/rwsem.h>
40 #include <linux/wait.h>
41 #include <linux/topology.h>
43 #include <acpi/cppc_acpi.h>
45 struct cppc_pcc_data {
46 struct pcc_mbox_chan *pcc_channel;
47 void __iomem *pcc_comm_addr;
48 bool pcc_channel_acquired;
49 unsigned int deadline_us;
50 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
52 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
53 bool platform_owns_pcc; /* Ownership of PCC subspace */
54 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
57 * Lock to provide controlled access to the PCC channel.
59 * For performance critical usecases(currently cppc_set_perf)
60 * We need to take read_lock and check if channel belongs to OSPM
61 * before reading or writing to PCC subspace
62 * We need to take write_lock before transferring the channel
63 * ownership to the platform via a Doorbell
64 * This allows us to batch a number of CPPC requests if they happen
65 * to originate in about the same time
67 * For non-performance critical usecases(init)
68 * Take write_lock for all purposes which gives exclusive access
70 struct rw_semaphore pcc_lock;
72 /* Wait queue for CPUs whose requests were batched */
73 wait_queue_head_t pcc_write_wait_q;
74 ktime_t last_cmd_cmpl_time;
75 ktime_t last_mpar_reset;
80 /* Array to represent the PCC channel per subspace ID */
81 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
82 /* The cpu_pcc_subspace_idx contains per CPU subspace ID */
83 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
86 * The cpc_desc structure contains the ACPI register details
87 * as described in the per CPU _CPC tables. The details
88 * include the type of register (e.g. PCC, System IO, FFH etc.)
89 * and destination addresses which lets us READ/WRITE CPU performance
90 * information using the appropriate I/O methods.
92 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
94 /* pcc mapped address + header size + offset within PCC subspace */
95 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
98 /* Check if a CPC register is in PCC */
99 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
100 (cpc)->cpc_entry.reg.space_id == \
101 ACPI_ADR_SPACE_PLATFORM_COMM)
103 /* Evaluates to True if reg is a NULL register descriptor */
104 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
105 (reg)->address == 0 && \
106 (reg)->bit_width == 0 && \
107 (reg)->bit_offset == 0 && \
108 (reg)->access_width == 0)
110 /* Evaluates to True if an optional cpc field is supported */
111 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
112 !!(cpc)->cpc_entry.int_value : \
113 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
115 * Arbitrary Retries in case the remote processor is slow to respond
116 * to PCC commands. Keeping it high enough to cover emulators where
117 * the processors run painfully slow.
119 #define NUM_RETRIES 500ULL
121 #define OVER_16BTS_MASK ~0xFFFFULL
123 #define define_one_cppc_ro(_name) \
124 static struct kobj_attribute _name = \
125 __ATTR(_name, 0444, show_##_name, NULL)
127 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
129 #define show_cppc_data(access_fn, struct_name, member_name) \
130 static ssize_t show_##member_name(struct kobject *kobj, \
131 struct kobj_attribute *attr, char *buf) \
133 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
134 struct struct_name st_name = {0}; \
137 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
141 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
142 (u64)st_name.member_name); \
144 define_one_cppc_ro(member_name)
146 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
147 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
148 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
149 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
150 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_freq);
151 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_freq);
153 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
154 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
156 static ssize_t show_feedback_ctrs(struct kobject *kobj,
157 struct kobj_attribute *attr, char *buf)
159 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
160 struct cppc_perf_fb_ctrs fb_ctrs = {0};
163 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
167 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
168 fb_ctrs.reference, fb_ctrs.delivered);
170 define_one_cppc_ro(feedback_ctrs);
172 static struct attribute *cppc_attrs[] = {
174 &reference_perf.attr,
175 &wraparound_time.attr,
178 &lowest_nonlinear_perf.attr,
184 ATTRIBUTE_GROUPS(cppc);
186 static struct kobj_type cppc_ktype = {
187 .sysfs_ops = &kobj_sysfs_ops,
188 .default_groups = cppc_groups,
191 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
194 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
195 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
196 pcc_ss_data->pcc_comm_addr;
198 if (!pcc_ss_data->platform_owns_pcc)
202 * Poll PCC status register every 3us(delay_us) for maximum of
203 * deadline_us(timeout_us) until PCC command complete bit is set(cond)
205 ret = readw_relaxed_poll_timeout(&generic_comm_base->status, status,
206 status & PCC_CMD_COMPLETE_MASK, 3,
207 pcc_ss_data->deadline_us);
210 pcc_ss_data->platform_owns_pcc = false;
211 if (chk_err_bit && (status & PCC_ERROR_MASK))
216 pr_err("PCC check channel failed for ss: %d. ret=%d\n",
223 * This function transfers the ownership of the PCC to the platform
224 * So it must be called while holding write_lock(pcc_lock)
226 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
229 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
230 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
231 pcc_ss_data->pcc_comm_addr;
232 unsigned int time_delta;
235 * For CMD_WRITE we know for a fact the caller should have checked
236 * the channel before writing to PCC space
238 if (cmd == CMD_READ) {
240 * If there are pending cpc_writes, then we stole the channel
241 * before write completion, so first send a WRITE command to
244 if (pcc_ss_data->pending_pcc_write_cmd)
245 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
247 ret = check_pcc_chan(pcc_ss_id, false);
250 } else /* CMD_WRITE */
251 pcc_ss_data->pending_pcc_write_cmd = FALSE;
254 * Handle the Minimum Request Turnaround Time(MRTT)
255 * "The minimum amount of time that OSPM must wait after the completion
256 * of a command before issuing the next command, in microseconds"
258 if (pcc_ss_data->pcc_mrtt) {
259 time_delta = ktime_us_delta(ktime_get(),
260 pcc_ss_data->last_cmd_cmpl_time);
261 if (pcc_ss_data->pcc_mrtt > time_delta)
262 udelay(pcc_ss_data->pcc_mrtt - time_delta);
266 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
267 * "The maximum number of periodic requests that the subspace channel can
268 * support, reported in commands per minute. 0 indicates no limitation."
270 * This parameter should be ideally zero or large enough so that it can
271 * handle maximum number of requests that all the cores in the system can
272 * collectively generate. If it is not, we will follow the spec and just
273 * not send the request to the platform after hitting the MPAR limit in
276 if (pcc_ss_data->pcc_mpar) {
277 if (pcc_ss_data->mpar_count == 0) {
278 time_delta = ktime_ms_delta(ktime_get(),
279 pcc_ss_data->last_mpar_reset);
280 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
281 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
286 pcc_ss_data->last_mpar_reset = ktime_get();
287 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
289 pcc_ss_data->mpar_count--;
292 /* Write to the shared comm region. */
293 writew_relaxed(cmd, &generic_comm_base->command);
295 /* Flip CMD COMPLETE bit */
296 writew_relaxed(0, &generic_comm_base->status);
298 pcc_ss_data->platform_owns_pcc = true;
301 ret = mbox_send_message(pcc_ss_data->pcc_channel->mchan, &cmd);
303 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
304 pcc_ss_id, cmd, ret);
308 /* wait for completion and check for PCC errro bit */
309 ret = check_pcc_chan(pcc_ss_id, true);
311 if (pcc_ss_data->pcc_mrtt)
312 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
314 if (pcc_ss_data->pcc_channel->mchan->mbox->txdone_irq)
315 mbox_chan_txdone(pcc_ss_data->pcc_channel->mchan, ret);
317 mbox_client_txdone(pcc_ss_data->pcc_channel->mchan, ret);
320 if (cmd == CMD_WRITE) {
322 for_each_possible_cpu(i) {
323 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
328 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
329 desc->write_cmd_status = ret;
332 pcc_ss_data->pcc_write_cnt++;
333 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
339 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
342 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
345 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
349 static struct mbox_client cppc_mbox_cl = {
350 .tx_done = cppc_chan_tx_done,
351 .knows_txdone = true,
354 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
356 int result = -EFAULT;
357 acpi_status status = AE_OK;
358 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
359 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
360 struct acpi_buffer state = {0, NULL};
361 union acpi_object *psd = NULL;
362 struct acpi_psd_package *pdomain;
364 status = acpi_evaluate_object_typed(handle, "_PSD", NULL,
365 &buffer, ACPI_TYPE_PACKAGE);
366 if (status == AE_NOT_FOUND) /* _PSD is optional */
368 if (ACPI_FAILURE(status))
371 psd = buffer.pointer;
372 if (!psd || psd->package.count != 1) {
373 pr_debug("Invalid _PSD data\n");
377 pdomain = &(cpc_ptr->domain_info);
379 state.length = sizeof(struct acpi_psd_package);
380 state.pointer = pdomain;
382 status = acpi_extract_package(&(psd->package.elements[0]),
384 if (ACPI_FAILURE(status)) {
385 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
389 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
390 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
394 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
395 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
399 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
400 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
401 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
402 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
408 kfree(buffer.pointer);
412 bool acpi_cpc_valid(void)
414 struct cpc_desc *cpc_ptr;
417 for_each_present_cpu(cpu) {
418 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
425 EXPORT_SYMBOL_GPL(acpi_cpc_valid);
428 * acpi_get_psd_map - Map the CPUs in the freq domain of a given cpu
429 * @cpu: Find all CPUs that share a domain with cpu.
430 * @cpu_data: Pointer to CPU specific CPPC data including PSD info.
432 * Return: 0 for success or negative value for err.
434 int acpi_get_psd_map(unsigned int cpu, struct cppc_cpudata *cpu_data)
436 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
437 struct acpi_psd_package *match_pdomain;
438 struct acpi_psd_package *pdomain;
442 * Now that we have _PSD data from all CPUs, let's setup P-state
445 cpc_ptr = per_cpu(cpc_desc_ptr, cpu);
449 pdomain = &(cpc_ptr->domain_info);
450 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
451 if (pdomain->num_processors <= 1)
454 /* Validate the Domain info */
455 count_target = pdomain->num_processors;
456 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
457 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ALL;
458 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
459 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_HW;
460 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
461 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_ANY;
463 for_each_possible_cpu(i) {
467 match_cpc_ptr = per_cpu(cpc_desc_ptr, i);
471 match_pdomain = &(match_cpc_ptr->domain_info);
472 if (match_pdomain->domain != pdomain->domain)
475 /* Here i and cpu are in the same domain */
476 if (match_pdomain->num_processors != count_target)
479 if (pdomain->coord_type != match_pdomain->coord_type)
482 cpumask_set_cpu(i, cpu_data->shared_cpu_map);
488 /* Assume no coordination on any error parsing domain info */
489 cpumask_clear(cpu_data->shared_cpu_map);
490 cpumask_set_cpu(cpu, cpu_data->shared_cpu_map);
491 cpu_data->shared_type = CPUFREQ_SHARED_TYPE_NONE;
495 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
497 static int register_pcc_channel(int pcc_ss_idx)
499 struct pcc_mbox_chan *pcc_chan;
502 if (pcc_ss_idx >= 0) {
503 pcc_chan = pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
505 if (IS_ERR(pcc_chan)) {
506 pr_err("Failed to find PCC channel for subspace %d\n",
511 pcc_data[pcc_ss_idx]->pcc_channel = pcc_chan;
513 * cppc_ss->latency is just a Nominal value. In reality
514 * the remote processor could be much slower to reply.
515 * So add an arbitrary amount of wait on top of Nominal.
517 usecs_lat = NUM_RETRIES * pcc_chan->latency;
518 pcc_data[pcc_ss_idx]->deadline_us = usecs_lat;
519 pcc_data[pcc_ss_idx]->pcc_mrtt = pcc_chan->min_turnaround_time;
520 pcc_data[pcc_ss_idx]->pcc_mpar = pcc_chan->max_access_rate;
521 pcc_data[pcc_ss_idx]->pcc_nominal = pcc_chan->latency;
523 pcc_data[pcc_ss_idx]->pcc_comm_addr =
524 acpi_os_ioremap(pcc_chan->shmem_base_addr,
525 pcc_chan->shmem_size);
526 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
527 pr_err("Failed to ioremap PCC comm region mem for %d\n",
532 /* Set flag so that we don't come here for each CPU. */
533 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
540 * cpc_ffh_supported() - check if FFH reading supported
542 * Check if the architecture has support for functional fixed hardware
543 * read/write capability.
545 * Return: true for supported, false for not supported
547 bool __weak cpc_ffh_supported(void)
553 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
555 * Check and allocate the cppc_pcc_data memory.
556 * In some processor configurations it is possible that same subspace
557 * is shared between multiple CPUs. This is seen especially in CPUs
558 * with hardware multi-threading support.
560 * Return: 0 for success, errno for failure
562 static int pcc_data_alloc(int pcc_ss_id)
564 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
567 if (pcc_data[pcc_ss_id]) {
568 pcc_data[pcc_ss_id]->refcount++;
570 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
572 if (!pcc_data[pcc_ss_id])
574 pcc_data[pcc_ss_id]->refcount++;
580 /* Check if CPPC revision + num_ent combination is supported */
581 static bool is_cppc_supported(int revision, int num_ent)
583 int expected_num_ent;
587 expected_num_ent = CPPC_V2_NUM_ENT;
590 expected_num_ent = CPPC_V3_NUM_ENT;
593 pr_debug("Firmware exports unsupported CPPC revision: %d\n",
598 if (expected_num_ent != num_ent) {
599 pr_debug("Firmware exports %d entries. Expected: %d for CPPC rev:%d\n",
600 num_ent, expected_num_ent, revision);
608 * An example CPC table looks like the following.
610 * Name (_CPC, Package() {
613 * ResourceTemplate() {Register(PCC, 32, 0, 0x120, 2)}, // Highest Performance
614 * ResourceTemplate() {Register(PCC, 32, 0, 0x124, 2)}, // Nominal Performance
615 * ResourceTemplate() {Register(PCC, 32, 0, 0x128, 2)}, // Lowest Nonlinear Performance
616 * ResourceTemplate() {Register(PCC, 32, 0, 0x12C, 2)}, // Lowest Performance
617 * ResourceTemplate() {Register(PCC, 32, 0, 0x130, 2)}, // Guaranteed Performance Register
618 * ResourceTemplate() {Register(PCC, 32, 0, 0x110, 2)}, // Desired Performance Register
619 * ResourceTemplate() {Register(SystemMemory, 0, 0, 0, 0)},
624 * Each Register() encodes how to access that specific register.
625 * e.g. a sample PCC entry has the following encoding:
628 * PCC, // AddressSpaceKeyword
629 * 8, // RegisterBitWidth
630 * 8, // RegisterBitOffset
631 * 0x30, // RegisterAddress
632 * 9, // AccessSize (subspace ID)
636 #ifndef arch_init_invariance_cppc
637 static inline void arch_init_invariance_cppc(void) { }
641 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
642 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
644 * Return: 0 for success or negative value for err.
646 int acpi_cppc_processor_probe(struct acpi_processor *pr)
648 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
649 union acpi_object *out_obj, *cpc_obj;
650 struct cpc_desc *cpc_ptr;
651 struct cpc_reg *gas_t;
652 struct device *cpu_dev;
653 acpi_handle handle = pr->handle;
654 unsigned int num_ent, i, cpc_rev;
655 int pcc_subspace_id = -1;
659 if (osc_sb_cppc_not_supported)
662 /* Parse the ACPI _CPC table for this CPU. */
663 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
665 if (ACPI_FAILURE(status)) {
670 out_obj = (union acpi_object *) output.pointer;
672 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
678 /* First entry is NumEntries. */
679 cpc_obj = &out_obj->package.elements[0];
680 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
681 num_ent = cpc_obj->integer.value;
683 pr_debug("Unexpected entry type(%d) for NumEntries\n",
687 cpc_ptr->num_entries = num_ent;
689 /* Second entry should be revision. */
690 cpc_obj = &out_obj->package.elements[1];
691 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
692 cpc_rev = cpc_obj->integer.value;
694 pr_debug("Unexpected entry type(%d) for Revision\n",
698 cpc_ptr->version = cpc_rev;
700 if (!is_cppc_supported(cpc_rev, num_ent))
703 /* Iterate through remaining entries in _CPC */
704 for (i = 2; i < num_ent; i++) {
705 cpc_obj = &out_obj->package.elements[i];
707 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
708 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
709 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
710 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
711 gas_t = (struct cpc_reg *)
712 cpc_obj->buffer.pointer;
715 * The PCC Subspace index is encoded inside
716 * the CPC table entries. The same PCC index
717 * will be used for all the PCC entries,
718 * so extract it only once.
720 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
721 if (pcc_subspace_id < 0) {
722 pcc_subspace_id = gas_t->access_width;
723 if (pcc_data_alloc(pcc_subspace_id))
725 } else if (pcc_subspace_id != gas_t->access_width) {
726 pr_debug("Mismatched PCC ids.\n");
729 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
730 if (gas_t->address) {
733 addr = ioremap(gas_t->address, gas_t->bit_width/8);
736 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
738 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
739 if (gas_t->access_width < 1 || gas_t->access_width > 3) {
741 * 1 = 8-bit, 2 = 16-bit, and 3 = 32-bit.
742 * SystemIO doesn't implement 64-bit
745 pr_debug("Invalid access width %d for SystemIO register\n",
746 gas_t->access_width);
749 if (gas_t->address & OVER_16BTS_MASK) {
750 /* SystemIO registers use 16-bit integer addresses */
751 pr_debug("Invalid IO port %llu for SystemIO register\n",
756 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
757 /* Support only PCC, SystemMemory, SystemIO, and FFH type regs. */
758 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
763 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
764 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
766 pr_debug("Err in entry:%d in CPC table of CPU:%d\n", i, pr->id);
770 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
773 * Initialize the remaining cpc_regs as unsupported.
774 * Example: In case FW exposes CPPC v2, the below loop will initialize
775 * LOWEST_FREQ and NOMINAL_FREQ regs as unsupported
777 for (i = num_ent - 2; i < MAX_CPC_REG_ENT; i++) {
778 cpc_ptr->cpc_regs[i].type = ACPI_TYPE_INTEGER;
779 cpc_ptr->cpc_regs[i].cpc_entry.int_value = 0;
783 /* Store CPU Logical ID */
784 cpc_ptr->cpu_id = pr->id;
786 /* Parse PSD data for this CPU */
787 ret = acpi_get_psd(cpc_ptr, handle);
791 /* Register PCC channel once for all PCC subspace ID. */
792 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
793 ret = register_pcc_channel(pcc_subspace_id);
797 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
798 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
801 /* Everything looks okay */
802 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
804 /* Add per logical CPU nodes for reading its feedback counters. */
805 cpu_dev = get_cpu_device(pr->id);
811 /* Plug PSD data into this CPU's CPC descriptor. */
812 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
814 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
817 per_cpu(cpc_desc_ptr, pr->id) = NULL;
818 kobject_put(&cpc_ptr->kobj);
822 arch_init_invariance_cppc();
824 kfree(output.pointer);
828 /* Free all the mapped sys mem areas for this CPU */
829 for (i = 2; i < cpc_ptr->num_entries; i++) {
830 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
838 kfree(output.pointer);
841 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
844 * acpi_cppc_processor_exit - Cleanup CPC structs.
845 * @pr: Ptr to acpi_processor containing this CPU's logical ID.
849 void acpi_cppc_processor_exit(struct acpi_processor *pr)
851 struct cpc_desc *cpc_ptr;
854 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
856 if (pcc_ss_id >= 0 && pcc_data[pcc_ss_id]) {
857 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
858 pcc_data[pcc_ss_id]->refcount--;
859 if (!pcc_data[pcc_ss_id]->refcount) {
860 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
861 kfree(pcc_data[pcc_ss_id]);
862 pcc_data[pcc_ss_id] = NULL;
867 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
871 /* Free all the mapped sys mem areas for this CPU */
872 for (i = 2; i < cpc_ptr->num_entries; i++) {
873 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
878 kobject_put(&cpc_ptr->kobj);
881 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
884 * cpc_read_ffh() - Read FFH register
885 * @cpunum: CPU number to read
886 * @reg: cppc register information
887 * @val: place holder for return value
889 * Read bit_width bits from a specified address and bit_offset
891 * Return: 0 for success and error code
893 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
899 * cpc_write_ffh() - Write FFH register
900 * @cpunum: CPU number to write
901 * @reg: cppc register information
902 * @val: value to write
904 * Write value of bit_width bits to a specified address and bit_offset
906 * Return: 0 for success and error code
908 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
914 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
915 * as fast as possible. We have already mapped the PCC subspace during init, so
916 * we can directly write to it.
919 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
921 void __iomem *vaddr = NULL;
922 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
923 struct cpc_reg *reg = ®_res->cpc_entry.reg;
925 if (reg_res->type == ACPI_TYPE_INTEGER) {
926 *val = reg_res->cpc_entry.int_value;
932 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
933 u32 width = 8 << (reg->access_width - 1);
937 status = acpi_os_read_port((acpi_io_address)reg->address,
939 if (ACPI_FAILURE(status)) {
940 pr_debug("Error: Failed to read SystemIO port %llx\n",
947 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
948 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
949 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
950 vaddr = reg_res->sys_mem_vaddr;
951 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
952 return cpc_read_ffh(cpu, reg, val);
954 return acpi_os_read_memory((acpi_physical_address)reg->address,
955 val, reg->bit_width);
957 switch (reg->bit_width) {
959 *val = readb_relaxed(vaddr);
962 *val = readw_relaxed(vaddr);
965 *val = readl_relaxed(vaddr);
968 *val = readq_relaxed(vaddr);
971 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
972 reg->bit_width, pcc_ss_id);
979 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
982 void __iomem *vaddr = NULL;
983 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
984 struct cpc_reg *reg = ®_res->cpc_entry.reg;
986 if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_IO) {
987 u32 width = 8 << (reg->access_width - 1);
990 status = acpi_os_write_port((acpi_io_address)reg->address,
992 if (ACPI_FAILURE(status)) {
993 pr_debug("Error: Failed to write SystemIO port %llx\n",
999 } else if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
1000 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
1001 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
1002 vaddr = reg_res->sys_mem_vaddr;
1003 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1004 return cpc_write_ffh(cpu, reg, val);
1006 return acpi_os_write_memory((acpi_physical_address)reg->address,
1007 val, reg->bit_width);
1009 switch (reg->bit_width) {
1011 writeb_relaxed(val, vaddr);
1014 writew_relaxed(val, vaddr);
1017 writel_relaxed(val, vaddr);
1020 writeq_relaxed(val, vaddr);
1023 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1024 reg->bit_width, pcc_ss_id);
1032 static int cppc_get_perf(int cpunum, enum cppc_regs reg_idx, u64 *perf)
1034 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1035 struct cpc_register_resource *reg;
1038 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1042 reg = &cpc_desc->cpc_regs[reg_idx];
1044 if (CPC_IN_PCC(reg)) {
1045 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1046 struct cppc_pcc_data *pcc_ss_data = NULL;
1052 pcc_ss_data = pcc_data[pcc_ss_id];
1054 down_write(&pcc_ss_data->pcc_lock);
1056 if (send_pcc_cmd(pcc_ss_id, CMD_READ) >= 0)
1057 cpc_read(cpunum, reg, perf);
1061 up_write(&pcc_ss_data->pcc_lock);
1066 cpc_read(cpunum, reg, perf);
1072 * cppc_get_desired_perf - Get the desired performance register value.
1073 * @cpunum: CPU from which to get desired performance.
1074 * @desired_perf: Return address.
1076 * Return: 0 for success, -EIO otherwise.
1078 int cppc_get_desired_perf(int cpunum, u64 *desired_perf)
1080 return cppc_get_perf(cpunum, DESIRED_PERF, desired_perf);
1082 EXPORT_SYMBOL_GPL(cppc_get_desired_perf);
1085 * cppc_get_nominal_perf - Get the nominal performance register value.
1086 * @cpunum: CPU from which to get nominal performance.
1087 * @nominal_perf: Return address.
1089 * Return: 0 for success, -EIO otherwise.
1091 int cppc_get_nominal_perf(int cpunum, u64 *nominal_perf)
1093 return cppc_get_perf(cpunum, NOMINAL_PERF, nominal_perf);
1097 * cppc_get_perf_caps - Get a CPU's performance capabilities.
1098 * @cpunum: CPU from which to get capabilities info.
1099 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1101 * Return: 0 for success with perf_caps populated else -ERRNO.
1103 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1105 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1106 struct cpc_register_resource *highest_reg, *lowest_reg,
1107 *lowest_non_linear_reg, *nominal_reg, *guaranteed_reg,
1108 *low_freq_reg = NULL, *nom_freq_reg = NULL;
1109 u64 high, low, guaranteed, nom, min_nonlinear, low_f = 0, nom_f = 0;
1110 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1111 struct cppc_pcc_data *pcc_ss_data = NULL;
1112 int ret = 0, regs_in_pcc = 0;
1115 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1119 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1120 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1121 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1122 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1123 low_freq_reg = &cpc_desc->cpc_regs[LOWEST_FREQ];
1124 nom_freq_reg = &cpc_desc->cpc_regs[NOMINAL_FREQ];
1125 guaranteed_reg = &cpc_desc->cpc_regs[GUARANTEED_PERF];
1127 /* Are any of the regs PCC ?*/
1128 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1129 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg) ||
1130 CPC_IN_PCC(low_freq_reg) || CPC_IN_PCC(nom_freq_reg)) {
1131 if (pcc_ss_id < 0) {
1132 pr_debug("Invalid pcc_ss_id\n");
1135 pcc_ss_data = pcc_data[pcc_ss_id];
1137 down_write(&pcc_ss_data->pcc_lock);
1138 /* Ring doorbell once to update PCC subspace */
1139 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1145 cpc_read(cpunum, highest_reg, &high);
1146 perf_caps->highest_perf = high;
1148 cpc_read(cpunum, lowest_reg, &low);
1149 perf_caps->lowest_perf = low;
1151 cpc_read(cpunum, nominal_reg, &nom);
1152 perf_caps->nominal_perf = nom;
1154 if (guaranteed_reg->type != ACPI_TYPE_BUFFER ||
1155 IS_NULL_REG(&guaranteed_reg->cpc_entry.reg)) {
1156 perf_caps->guaranteed_perf = 0;
1158 cpc_read(cpunum, guaranteed_reg, &guaranteed);
1159 perf_caps->guaranteed_perf = guaranteed;
1162 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1163 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1165 if (!high || !low || !nom || !min_nonlinear)
1168 /* Read optional lowest and nominal frequencies if present */
1169 if (CPC_SUPPORTED(low_freq_reg))
1170 cpc_read(cpunum, low_freq_reg, &low_f);
1172 if (CPC_SUPPORTED(nom_freq_reg))
1173 cpc_read(cpunum, nom_freq_reg, &nom_f);
1175 perf_caps->lowest_freq = low_f;
1176 perf_caps->nominal_freq = nom_f;
1181 up_write(&pcc_ss_data->pcc_lock);
1184 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1187 * cppc_get_perf_ctrs - Read a CPU's performance feedback counters.
1188 * @cpunum: CPU from which to read counters.
1189 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1191 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1193 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1195 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1196 struct cpc_register_resource *delivered_reg, *reference_reg,
1197 *ref_perf_reg, *ctr_wrap_reg;
1198 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1199 struct cppc_pcc_data *pcc_ss_data = NULL;
1200 u64 delivered, reference, ref_perf, ctr_wrap_time;
1201 int ret = 0, regs_in_pcc = 0;
1204 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1208 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1209 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1210 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1211 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1214 * If reference perf register is not supported then we should
1215 * use the nominal perf value
1217 if (!CPC_SUPPORTED(ref_perf_reg))
1218 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1220 /* Are any of the regs PCC ?*/
1221 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1222 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1223 if (pcc_ss_id < 0) {
1224 pr_debug("Invalid pcc_ss_id\n");
1227 pcc_ss_data = pcc_data[pcc_ss_id];
1228 down_write(&pcc_ss_data->pcc_lock);
1230 /* Ring doorbell once to update PCC subspace */
1231 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1237 cpc_read(cpunum, delivered_reg, &delivered);
1238 cpc_read(cpunum, reference_reg, &reference);
1239 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1242 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1243 * performance counters are assumed to never wrap during the lifetime of
1246 ctr_wrap_time = (u64)(~((u64)0));
1247 if (CPC_SUPPORTED(ctr_wrap_reg))
1248 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1250 if (!delivered || !reference || !ref_perf) {
1255 perf_fb_ctrs->delivered = delivered;
1256 perf_fb_ctrs->reference = reference;
1257 perf_fb_ctrs->reference_perf = ref_perf;
1258 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1261 up_write(&pcc_ss_data->pcc_lock);
1264 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1267 * cppc_set_enable - Set to enable CPPC on the processor by writing the
1268 * Continuous Performance Control package EnableRegister field.
1269 * @cpu: CPU for which to enable CPPC register.
1270 * @enable: 0 - disable, 1 - enable CPPC feature on the processor.
1272 * Return: 0 for success, -ERRNO or -EIO otherwise.
1274 int cppc_set_enable(int cpu, bool enable)
1276 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1277 struct cpc_register_resource *enable_reg;
1278 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1279 struct cppc_pcc_data *pcc_ss_data = NULL;
1283 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1287 enable_reg = &cpc_desc->cpc_regs[ENABLE];
1289 if (CPC_IN_PCC(enable_reg)) {
1294 ret = cpc_write(cpu, enable_reg, enable);
1298 pcc_ss_data = pcc_data[pcc_ss_id];
1300 down_write(&pcc_ss_data->pcc_lock);
1301 /* after writing CPC, transfer the ownership of PCC to platfrom */
1302 ret = send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1303 up_write(&pcc_ss_data->pcc_lock);
1307 return cpc_write(cpu, enable_reg, enable);
1309 EXPORT_SYMBOL_GPL(cppc_set_enable);
1312 * cppc_set_perf - Set a CPU's performance controls.
1313 * @cpu: CPU for which to set performance controls.
1314 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1316 * Return: 0 for success, -ERRNO otherwise.
1318 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1320 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1321 struct cpc_register_resource *desired_reg;
1322 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1323 struct cppc_pcc_data *pcc_ss_data = NULL;
1327 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1331 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1334 * This is Phase-I where we want to write to CPC registers
1335 * -> We want all CPUs to be able to execute this phase in parallel
1337 * Since read_lock can be acquired by multiple CPUs simultaneously we
1338 * achieve that goal here
1340 if (CPC_IN_PCC(desired_reg)) {
1341 if (pcc_ss_id < 0) {
1342 pr_debug("Invalid pcc_ss_id\n");
1345 pcc_ss_data = pcc_data[pcc_ss_id];
1346 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1347 if (pcc_ss_data->platform_owns_pcc) {
1348 ret = check_pcc_chan(pcc_ss_id, false);
1350 up_read(&pcc_ss_data->pcc_lock);
1355 * Update the pending_write to make sure a PCC CMD_READ will not
1356 * arrive and steal the channel during the switch to write lock
1358 pcc_ss_data->pending_pcc_write_cmd = true;
1359 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1360 cpc_desc->write_cmd_status = 0;
1364 * Skip writing MIN/MAX until Linux knows how to come up with
1367 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1369 if (CPC_IN_PCC(desired_reg))
1370 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1372 * This is Phase-II where we transfer the ownership of PCC to Platform
1374 * Short Summary: Basically if we think of a group of cppc_set_perf
1375 * requests that happened in short overlapping interval. The last CPU to
1376 * come out of Phase-I will enter Phase-II and ring the doorbell.
1378 * We have the following requirements for Phase-II:
1379 * 1. We want to execute Phase-II only when there are no CPUs
1380 * currently executing in Phase-I
1381 * 2. Once we start Phase-II we want to avoid all other CPUs from
1383 * 3. We want only one CPU among all those who went through Phase-I
1386 * If write_trylock fails to get the lock and doesn't transfer the
1387 * PCC ownership to the platform, then one of the following will be TRUE
1388 * 1. There is at-least one CPU in Phase-I which will later execute
1389 * write_trylock, so the CPUs in Phase-I will be responsible for
1390 * executing the Phase-II.
1391 * 2. Some other CPU has beaten this CPU to successfully execute the
1392 * write_trylock and has already acquired the write_lock. We know for a
1393 * fact it (other CPU acquiring the write_lock) couldn't have happened
1394 * before this CPU's Phase-I as we held the read_lock.
1395 * 3. Some other CPU executing pcc CMD_READ has stolen the
1396 * down_write, in which case, send_pcc_cmd will check for pending
1397 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1398 * So this CPU can be certain that its request will be delivered
1399 * So in all cases, this CPU knows that its request will be delivered
1400 * by another CPU and can return
1402 * After getting the down_write we still need to check for
1403 * pending_pcc_write_cmd to take care of the following scenario
1404 * The thread running this code could be scheduled out between
1405 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1406 * could have delivered the request to Platform by triggering the
1407 * doorbell and transferred the ownership of PCC to platform. So this
1408 * avoids triggering an unnecessary doorbell and more importantly before
1409 * triggering the doorbell it makes sure that the PCC channel ownership
1410 * is still with OSPM.
1411 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1412 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1413 * before the pcc CMD_WRITE is completed. send_pcc_cmd checks for this
1414 * case during a CMD_READ and if there are pending writes it delivers
1415 * the write command before servicing the read command
1417 if (CPC_IN_PCC(desired_reg)) {
1418 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1419 /* Update only if there are pending write commands */
1420 if (pcc_ss_data->pending_pcc_write_cmd)
1421 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1422 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1424 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1425 wait_event(pcc_ss_data->pcc_write_wait_q,
1426 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1428 /* send_pcc_cmd updates the status in case of failure */
1429 ret = cpc_desc->write_cmd_status;
1433 EXPORT_SYMBOL_GPL(cppc_set_perf);
1436 * cppc_get_transition_latency - returns frequency transition latency in ns
1438 * ACPI CPPC does not explicitly specify how a platform can specify the
1439 * transition latency for performance change requests. The closest we have
1440 * is the timing information from the PCCT tables which provides the info
1441 * on the number and frequency of PCC commands the platform can handle.
1443 unsigned int cppc_get_transition_latency(int cpu_num)
1446 * Expected transition latency is based on the PCCT timing values
1447 * Below are definition from ACPI spec:
1448 * pcc_nominal- Expected latency to process a command, in microseconds
1449 * pcc_mpar - The maximum number of periodic requests that the subspace
1450 * channel can support, reported in commands per minute. 0
1451 * indicates no limitation.
1452 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1453 * completion of a command before issuing the next command,
1456 unsigned int latency_ns = 0;
1457 struct cpc_desc *cpc_desc;
1458 struct cpc_register_resource *desired_reg;
1459 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1460 struct cppc_pcc_data *pcc_ss_data;
1462 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1464 return CPUFREQ_ETERNAL;
1466 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1467 if (!CPC_IN_PCC(desired_reg))
1468 return CPUFREQ_ETERNAL;
1471 return CPUFREQ_ETERNAL;
1473 pcc_ss_data = pcc_data[pcc_ss_id];
1474 if (pcc_ss_data->pcc_mpar)
1475 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1477 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1478 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1482 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);