2 * CPPC (Collaborative Processor Performance Control) methods used by CPUfreq drivers.
4 * (C) Copyright 2014, 2015 Linaro Ltd.
5 * Author: Ashwin Chaugule <ashwin.chaugule@linaro.org>
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License
9 * as published by the Free Software Foundation; version 2
12 * CPPC describes a few methods for controlling CPU performance using
13 * information from a per CPU table called CPC. This table is described in
14 * the ACPI v5.0+ specification. The table consists of a list of
15 * registers which may be memory mapped or hardware registers and also may
16 * include some static integer values.
18 * CPU performance is on an abstract continuous scale as against a discretized
19 * P-state scale which is tied to CPU frequency only. In brief, the basic
22 * - OS makes a CPU performance request. (Can provide min and max bounds)
24 * - Platform (such as BMC) is free to optimize request within requested bounds
25 * depending on power/thermal budgets etc.
27 * - Platform conveys its decision back to OS
29 * The communication between OS and platform occurs through another medium
30 * called (PCC) Platform Communication Channel. This is a generic mailbox like
31 * mechanism which includes doorbell semantics to indicate register updates.
32 * See drivers/mailbox/pcc.c for details on PCC.
34 * Finer details about the PCC and CPPC spec are available in the ACPI v5.1 and
35 * above specifications.
38 #define pr_fmt(fmt) "ACPI CPPC: " fmt
40 #include <linux/cpufreq.h>
41 #include <linux/delay.h>
42 #include <linux/ktime.h>
43 #include <linux/rwsem.h>
44 #include <linux/wait.h>
46 #include <acpi/cppc_acpi.h>
48 struct cppc_pcc_data {
49 struct mbox_chan *pcc_channel;
50 void __iomem *pcc_comm_addr;
51 bool pcc_channel_acquired;
53 unsigned int pcc_mpar, pcc_mrtt, pcc_nominal;
55 bool pending_pcc_write_cmd; /* Any pending/batched PCC write cmds? */
56 bool platform_owns_pcc; /* Ownership of PCC subspace */
57 unsigned int pcc_write_cnt; /* Running count of PCC write commands */
60 * Lock to provide controlled access to the PCC channel.
62 * For performance critical usecases(currently cppc_set_perf)
63 * We need to take read_lock and check if channel belongs to OSPM
64 * before reading or writing to PCC subspace
65 * We need to take write_lock before transferring the channel
66 * ownership to the platform via a Doorbell
67 * This allows us to batch a number of CPPC requests if they happen
68 * to originate in about the same time
70 * For non-performance critical usecases(init)
71 * Take write_lock for all purposes which gives exclusive access
73 struct rw_semaphore pcc_lock;
75 /* Wait queue for CPUs whose requests were batched */
76 wait_queue_head_t pcc_write_wait_q;
77 ktime_t last_cmd_cmpl_time;
78 ktime_t last_mpar_reset;
83 /* Array to represent the PCC channel per subspace id */
84 static struct cppc_pcc_data *pcc_data[MAX_PCC_SUBSPACES];
85 /* The cpu_pcc_subspace_idx containsper CPU subspace id */
86 static DEFINE_PER_CPU(int, cpu_pcc_subspace_idx);
89 * The cpc_desc structure contains the ACPI register details
90 * as described in the per CPU _CPC tables. The details
91 * include the type of register (e.g. PCC, System IO, FFH etc.)
92 * and destination addresses which lets us READ/WRITE CPU performance
93 * information using the appropriate I/O methods.
95 static DEFINE_PER_CPU(struct cpc_desc *, cpc_desc_ptr);
97 /* pcc mapped address + header size + offset within PCC subspace */
98 #define GET_PCC_VADDR(offs, pcc_ss_id) (pcc_data[pcc_ss_id]->pcc_comm_addr + \
101 /* Check if a CPC register is in PCC */
102 #define CPC_IN_PCC(cpc) ((cpc)->type == ACPI_TYPE_BUFFER && \
103 (cpc)->cpc_entry.reg.space_id == \
104 ACPI_ADR_SPACE_PLATFORM_COMM)
106 /* Evalutes to True if reg is a NULL register descriptor */
107 #define IS_NULL_REG(reg) ((reg)->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY && \
108 (reg)->address == 0 && \
109 (reg)->bit_width == 0 && \
110 (reg)->bit_offset == 0 && \
111 (reg)->access_width == 0)
113 /* Evalutes to True if an optional cpc field is supported */
114 #define CPC_SUPPORTED(cpc) ((cpc)->type == ACPI_TYPE_INTEGER ? \
115 !!(cpc)->cpc_entry.int_value : \
116 !IS_NULL_REG(&(cpc)->cpc_entry.reg))
118 * Arbitrary Retries in case the remote processor is slow to respond
119 * to PCC commands. Keeping it high enough to cover emulators where
120 * the processors run painfully slow.
122 #define NUM_RETRIES 500ULL
125 struct attribute attr;
126 ssize_t (*show)(struct kobject *kobj,
127 struct attribute *attr, char *buf);
128 ssize_t (*store)(struct kobject *kobj,
129 struct attribute *attr, const char *c, ssize_t count);
132 #define define_one_cppc_ro(_name) \
133 static struct cppc_attr _name = \
134 __ATTR(_name, 0444, show_##_name, NULL)
136 #define to_cpc_desc(a) container_of(a, struct cpc_desc, kobj)
138 #define show_cppc_data(access_fn, struct_name, member_name) \
139 static ssize_t show_##member_name(struct kobject *kobj, \
140 struct attribute *attr, char *buf) \
142 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj); \
143 struct struct_name st_name = {0}; \
146 ret = access_fn(cpc_ptr->cpu_id, &st_name); \
150 return scnprintf(buf, PAGE_SIZE, "%llu\n", \
151 (u64)st_name.member_name); \
153 define_one_cppc_ro(member_name)
155 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, highest_perf);
156 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_perf);
157 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, nominal_perf);
158 show_cppc_data(cppc_get_perf_caps, cppc_perf_caps, lowest_nonlinear_perf);
159 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, reference_perf);
160 show_cppc_data(cppc_get_perf_ctrs, cppc_perf_fb_ctrs, wraparound_time);
162 static ssize_t show_feedback_ctrs(struct kobject *kobj,
163 struct attribute *attr, char *buf)
165 struct cpc_desc *cpc_ptr = to_cpc_desc(kobj);
166 struct cppc_perf_fb_ctrs fb_ctrs = {0};
169 ret = cppc_get_perf_ctrs(cpc_ptr->cpu_id, &fb_ctrs);
173 return scnprintf(buf, PAGE_SIZE, "ref:%llu del:%llu\n",
174 fb_ctrs.reference, fb_ctrs.delivered);
176 define_one_cppc_ro(feedback_ctrs);
178 static struct attribute *cppc_attrs[] = {
180 &reference_perf.attr,
181 &wraparound_time.attr,
184 &lowest_nonlinear_perf.attr,
189 static struct kobj_type cppc_ktype = {
190 .sysfs_ops = &kobj_sysfs_ops,
191 .default_attrs = cppc_attrs,
194 static int check_pcc_chan(int pcc_ss_id, bool chk_err_bit)
196 int ret = -EIO, status = 0;
197 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
198 struct acpi_pcct_shared_memory __iomem *generic_comm_base =
199 pcc_ss_data->pcc_comm_addr;
200 ktime_t next_deadline = ktime_add(ktime_get(),
201 pcc_ss_data->deadline);
203 if (!pcc_ss_data->platform_owns_pcc)
206 /* Retry in case the remote processor was too slow to catch up. */
207 while (!ktime_after(ktime_get(), next_deadline)) {
209 * Per spec, prior to boot the PCC space wil be initialized by
210 * platform and should have set the command completion bit when
211 * PCC can be used by OSPM
213 status = readw_relaxed(&generic_comm_base->status);
214 if (status & PCC_CMD_COMPLETE_MASK) {
216 if (chk_err_bit && (status & PCC_ERROR_MASK))
221 * Reducing the bus traffic in case this loop takes longer than
228 pcc_ss_data->platform_owns_pcc = false;
230 pr_err("PCC check channel failed for ss: %d. Status=%x\n",
237 * This function transfers the ownership of the PCC to the platform
238 * So it must be called while holding write_lock(pcc_lock)
240 static int send_pcc_cmd(int pcc_ss_id, u16 cmd)
243 struct cppc_pcc_data *pcc_ss_data = pcc_data[pcc_ss_id];
244 struct acpi_pcct_shared_memory *generic_comm_base =
245 (struct acpi_pcct_shared_memory *)pcc_ss_data->pcc_comm_addr;
246 unsigned int time_delta;
249 * For CMD_WRITE we know for a fact the caller should have checked
250 * the channel before writing to PCC space
252 if (cmd == CMD_READ) {
254 * If there are pending cpc_writes, then we stole the channel
255 * before write completion, so first send a WRITE command to
258 if (pcc_ss_data->pending_pcc_write_cmd)
259 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
261 ret = check_pcc_chan(pcc_ss_id, false);
264 } else /* CMD_WRITE */
265 pcc_ss_data->pending_pcc_write_cmd = FALSE;
268 * Handle the Minimum Request Turnaround Time(MRTT)
269 * "The minimum amount of time that OSPM must wait after the completion
270 * of a command before issuing the next command, in microseconds"
272 if (pcc_ss_data->pcc_mrtt) {
273 time_delta = ktime_us_delta(ktime_get(),
274 pcc_ss_data->last_cmd_cmpl_time);
275 if (pcc_ss_data->pcc_mrtt > time_delta)
276 udelay(pcc_ss_data->pcc_mrtt - time_delta);
280 * Handle the non-zero Maximum Periodic Access Rate(MPAR)
281 * "The maximum number of periodic requests that the subspace channel can
282 * support, reported in commands per minute. 0 indicates no limitation."
284 * This parameter should be ideally zero or large enough so that it can
285 * handle maximum number of requests that all the cores in the system can
286 * collectively generate. If it is not, we will follow the spec and just
287 * not send the request to the platform after hitting the MPAR limit in
290 if (pcc_ss_data->pcc_mpar) {
291 if (pcc_ss_data->mpar_count == 0) {
292 time_delta = ktime_ms_delta(ktime_get(),
293 pcc_ss_data->last_mpar_reset);
294 if ((time_delta < 60 * MSEC_PER_SEC) && pcc_ss_data->last_mpar_reset) {
295 pr_debug("PCC cmd for subspace %d not sent due to MPAR limit",
300 pcc_ss_data->last_mpar_reset = ktime_get();
301 pcc_ss_data->mpar_count = pcc_ss_data->pcc_mpar;
303 pcc_ss_data->mpar_count--;
306 /* Write to the shared comm region. */
307 writew_relaxed(cmd, &generic_comm_base->command);
309 /* Flip CMD COMPLETE bit */
310 writew_relaxed(0, &generic_comm_base->status);
312 pcc_ss_data->platform_owns_pcc = true;
315 ret = mbox_send_message(pcc_ss_data->pcc_channel, &cmd);
317 pr_err("Err sending PCC mbox message. ss: %d cmd:%d, ret:%d\n",
318 pcc_ss_id, cmd, ret);
322 /* wait for completion and check for PCC errro bit */
323 ret = check_pcc_chan(pcc_ss_id, true);
325 if (pcc_ss_data->pcc_mrtt)
326 pcc_ss_data->last_cmd_cmpl_time = ktime_get();
328 if (pcc_ss_data->pcc_channel->mbox->txdone_irq)
329 mbox_chan_txdone(pcc_ss_data->pcc_channel, ret);
331 mbox_client_txdone(pcc_ss_data->pcc_channel, ret);
334 if (cmd == CMD_WRITE) {
336 for_each_possible_cpu(i) {
337 struct cpc_desc *desc = per_cpu(cpc_desc_ptr, i);
341 if (desc->write_cmd_id == pcc_ss_data->pcc_write_cnt)
342 desc->write_cmd_status = ret;
345 pcc_ss_data->pcc_write_cnt++;
346 wake_up_all(&pcc_ss_data->pcc_write_wait_q);
352 static void cppc_chan_tx_done(struct mbox_client *cl, void *msg, int ret)
355 pr_debug("TX did not complete: CMD sent:%x, ret:%d\n",
358 pr_debug("TX completed. CMD sent:%x, ret:%d\n",
362 struct mbox_client cppc_mbox_cl = {
363 .tx_done = cppc_chan_tx_done,
364 .knows_txdone = true,
367 static int acpi_get_psd(struct cpc_desc *cpc_ptr, acpi_handle handle)
369 int result = -EFAULT;
370 acpi_status status = AE_OK;
371 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
372 struct acpi_buffer format = {sizeof("NNNNN"), "NNNNN"};
373 struct acpi_buffer state = {0, NULL};
374 union acpi_object *psd = NULL;
375 struct acpi_psd_package *pdomain;
377 status = acpi_evaluate_object_typed(handle, "_PSD", NULL, &buffer,
379 if (ACPI_FAILURE(status))
382 psd = buffer.pointer;
383 if (!psd || psd->package.count != 1) {
384 pr_debug("Invalid _PSD data\n");
388 pdomain = &(cpc_ptr->domain_info);
390 state.length = sizeof(struct acpi_psd_package);
391 state.pointer = pdomain;
393 status = acpi_extract_package(&(psd->package.elements[0]),
395 if (ACPI_FAILURE(status)) {
396 pr_debug("Invalid _PSD data for CPU:%d\n", cpc_ptr->cpu_id);
400 if (pdomain->num_entries != ACPI_PSD_REV0_ENTRIES) {
401 pr_debug("Unknown _PSD:num_entries for CPU:%d\n", cpc_ptr->cpu_id);
405 if (pdomain->revision != ACPI_PSD_REV0_REVISION) {
406 pr_debug("Unknown _PSD:revision for CPU: %d\n", cpc_ptr->cpu_id);
410 if (pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ALL &&
411 pdomain->coord_type != DOMAIN_COORD_TYPE_SW_ANY &&
412 pdomain->coord_type != DOMAIN_COORD_TYPE_HW_ALL) {
413 pr_debug("Invalid _PSD:coord_type for CPU:%d\n", cpc_ptr->cpu_id);
419 kfree(buffer.pointer);
424 * acpi_get_psd_map - Map the CPUs in a common freq domain.
425 * @all_cpu_data: Ptrs to CPU specific CPPC data including PSD info.
427 * Return: 0 for success or negative value for err.
429 int acpi_get_psd_map(struct cppc_cpudata **all_cpu_data)
434 cpumask_var_t covered_cpus;
435 struct cppc_cpudata *pr, *match_pr;
436 struct acpi_psd_package *pdomain;
437 struct acpi_psd_package *match_pdomain;
438 struct cpc_desc *cpc_ptr, *match_cpc_ptr;
440 if (!zalloc_cpumask_var(&covered_cpus, GFP_KERNEL))
444 * Now that we have _PSD data from all CPUs, lets setup P-state
447 for_each_possible_cpu(i) {
448 pr = all_cpu_data[i];
452 if (cpumask_test_cpu(i, covered_cpus))
455 cpc_ptr = per_cpu(cpc_desc_ptr, i);
461 pdomain = &(cpc_ptr->domain_info);
462 cpumask_set_cpu(i, pr->shared_cpu_map);
463 cpumask_set_cpu(i, covered_cpus);
464 if (pdomain->num_processors <= 1)
467 /* Validate the Domain info */
468 count_target = pdomain->num_processors;
469 if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ALL)
470 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
471 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_HW_ALL)
472 pr->shared_type = CPUFREQ_SHARED_TYPE_HW;
473 else if (pdomain->coord_type == DOMAIN_COORD_TYPE_SW_ANY)
474 pr->shared_type = CPUFREQ_SHARED_TYPE_ANY;
476 for_each_possible_cpu(j) {
480 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
481 if (!match_cpc_ptr) {
486 match_pdomain = &(match_cpc_ptr->domain_info);
487 if (match_pdomain->domain != pdomain->domain)
490 /* Here i and j are in the same domain */
491 if (match_pdomain->num_processors != count_target) {
496 if (pdomain->coord_type != match_pdomain->coord_type) {
501 cpumask_set_cpu(j, covered_cpus);
502 cpumask_set_cpu(j, pr->shared_cpu_map);
505 for_each_possible_cpu(j) {
509 match_pr = all_cpu_data[j];
513 match_cpc_ptr = per_cpu(cpc_desc_ptr, j);
514 if (!match_cpc_ptr) {
519 match_pdomain = &(match_cpc_ptr->domain_info);
520 if (match_pdomain->domain != pdomain->domain)
523 match_pr->shared_type = pr->shared_type;
524 cpumask_copy(match_pr->shared_cpu_map,
530 for_each_possible_cpu(i) {
531 pr = all_cpu_data[i];
535 /* Assume no coordination on any error parsing domain info */
537 cpumask_clear(pr->shared_cpu_map);
538 cpumask_set_cpu(i, pr->shared_cpu_map);
539 pr->shared_type = CPUFREQ_SHARED_TYPE_ALL;
543 free_cpumask_var(covered_cpus);
546 EXPORT_SYMBOL_GPL(acpi_get_psd_map);
548 static int register_pcc_channel(int pcc_ss_idx)
550 struct acpi_pcct_hw_reduced *cppc_ss;
553 if (pcc_ss_idx >= 0) {
554 pcc_data[pcc_ss_idx]->pcc_channel =
555 pcc_mbox_request_channel(&cppc_mbox_cl, pcc_ss_idx);
557 if (IS_ERR(pcc_data[pcc_ss_idx]->pcc_channel)) {
558 pr_err("Failed to find PCC channel for subspace %d\n",
564 * The PCC mailbox controller driver should
565 * have parsed the PCCT (global table of all
566 * PCC channels) and stored pointers to the
567 * subspace communication region in con_priv.
569 cppc_ss = (pcc_data[pcc_ss_idx]->pcc_channel)->con_priv;
572 pr_err("No PCC subspace found for %d CPPC\n",
578 * cppc_ss->latency is just a Nominal value. In reality
579 * the remote processor could be much slower to reply.
580 * So add an arbitrary amount of wait on top of Nominal.
582 usecs_lat = NUM_RETRIES * cppc_ss->latency;
583 pcc_data[pcc_ss_idx]->deadline = ns_to_ktime(usecs_lat * NSEC_PER_USEC);
584 pcc_data[pcc_ss_idx]->pcc_mrtt = cppc_ss->min_turnaround_time;
585 pcc_data[pcc_ss_idx]->pcc_mpar = cppc_ss->max_access_rate;
586 pcc_data[pcc_ss_idx]->pcc_nominal = cppc_ss->latency;
588 pcc_data[pcc_ss_idx]->pcc_comm_addr =
589 acpi_os_ioremap(cppc_ss->base_address, cppc_ss->length);
590 if (!pcc_data[pcc_ss_idx]->pcc_comm_addr) {
591 pr_err("Failed to ioremap PCC comm region mem for %d\n",
596 /* Set flag so that we dont come here for each CPU. */
597 pcc_data[pcc_ss_idx]->pcc_channel_acquired = true;
604 * cpc_ffh_supported() - check if FFH reading supported
606 * Check if the architecture has support for functional fixed hardware
607 * read/write capability.
609 * Return: true for supported, false for not supported
611 bool __weak cpc_ffh_supported(void)
618 * pcc_data_alloc() - Allocate the pcc_data memory for pcc subspace
620 * Check and allocate the cppc_pcc_data memory.
621 * In some processor configurations it is possible that same subspace
622 * is shared between multiple CPU's. This is seen especially in CPU's
623 * with hardware multi-threading support.
625 * Return: 0 for success, errno for failure
627 int pcc_data_alloc(int pcc_ss_id)
629 if (pcc_ss_id < 0 || pcc_ss_id >= MAX_PCC_SUBSPACES)
632 if (pcc_data[pcc_ss_id]) {
633 pcc_data[pcc_ss_id]->refcount++;
635 pcc_data[pcc_ss_id] = kzalloc(sizeof(struct cppc_pcc_data),
637 if (!pcc_data[pcc_ss_id])
639 pcc_data[pcc_ss_id]->refcount++;
645 * An example CPC table looks like the following.
647 * Name(_CPC, Package()
653 * ResourceTemplate(){Register(PCC, 32, 0, 0x120, 2)},
654 * // Highest Performance
655 * ResourceTemplate(){Register(PCC, 32, 0, 0x124, 2)},
656 * // Nominal Performance
657 * ResourceTemplate(){Register(PCC, 32, 0, 0x128, 2)},
658 * // Lowest Nonlinear Performance
659 * ResourceTemplate(){Register(PCC, 32, 0, 0x12C, 2)},
660 * // Lowest Performance
661 * ResourceTemplate(){Register(PCC, 32, 0, 0x130, 2)},
662 * // Guaranteed Performance Register
663 * ResourceTemplate(){Register(PCC, 32, 0, 0x110, 2)},
664 * // Desired Performance Register
665 * ResourceTemplate(){Register(SystemMemory, 0, 0, 0, 0)},
671 * Each Register() encodes how to access that specific register.
672 * e.g. a sample PCC entry has the following encoding:
676 * AddressSpaceKeyword
680 * //RegisterBitOffset
684 * //AccessSize (subspace ID)
691 * acpi_cppc_processor_probe - Search for per CPU _CPC objects.
692 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
694 * Return: 0 for success or negative value for err.
696 int acpi_cppc_processor_probe(struct acpi_processor *pr)
698 struct acpi_buffer output = {ACPI_ALLOCATE_BUFFER, NULL};
699 union acpi_object *out_obj, *cpc_obj;
700 struct cpc_desc *cpc_ptr;
701 struct cpc_reg *gas_t;
702 struct device *cpu_dev;
703 acpi_handle handle = pr->handle;
704 unsigned int num_ent, i, cpc_rev;
705 int pcc_subspace_id = -1;
709 /* Parse the ACPI _CPC table for this cpu. */
710 status = acpi_evaluate_object_typed(handle, "_CPC", NULL, &output,
712 if (ACPI_FAILURE(status)) {
717 out_obj = (union acpi_object *) output.pointer;
719 cpc_ptr = kzalloc(sizeof(struct cpc_desc), GFP_KERNEL);
725 /* First entry is NumEntries. */
726 cpc_obj = &out_obj->package.elements[0];
727 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
728 num_ent = cpc_obj->integer.value;
730 pr_debug("Unexpected entry type(%d) for NumEntries\n",
735 /* Only support CPPCv2. Bail otherwise. */
736 if (num_ent != CPPC_NUM_ENT) {
737 pr_debug("Firmware exports %d entries. Expected: %d\n",
738 num_ent, CPPC_NUM_ENT);
742 cpc_ptr->num_entries = num_ent;
744 /* Second entry should be revision. */
745 cpc_obj = &out_obj->package.elements[1];
746 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
747 cpc_rev = cpc_obj->integer.value;
749 pr_debug("Unexpected entry type(%d) for Revision\n",
754 if (cpc_rev != CPPC_REV) {
755 pr_debug("Firmware exports revision:%d. Expected:%d\n",
760 /* Iterate through remaining entries in _CPC */
761 for (i = 2; i < num_ent; i++) {
762 cpc_obj = &out_obj->package.elements[i];
764 if (cpc_obj->type == ACPI_TYPE_INTEGER) {
765 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_INTEGER;
766 cpc_ptr->cpc_regs[i-2].cpc_entry.int_value = cpc_obj->integer.value;
767 } else if (cpc_obj->type == ACPI_TYPE_BUFFER) {
768 gas_t = (struct cpc_reg *)
769 cpc_obj->buffer.pointer;
772 * The PCC Subspace index is encoded inside
773 * the CPC table entries. The same PCC index
774 * will be used for all the PCC entries,
775 * so extract it only once.
777 if (gas_t->space_id == ACPI_ADR_SPACE_PLATFORM_COMM) {
778 if (pcc_subspace_id < 0) {
779 pcc_subspace_id = gas_t->access_width;
780 if (pcc_data_alloc(pcc_subspace_id))
782 } else if (pcc_subspace_id != gas_t->access_width) {
783 pr_debug("Mismatched PCC ids.\n");
786 } else if (gas_t->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY) {
787 if (gas_t->address) {
790 addr = ioremap(gas_t->address, gas_t->bit_width/8);
793 cpc_ptr->cpc_regs[i-2].sys_mem_vaddr = addr;
796 if (gas_t->space_id != ACPI_ADR_SPACE_FIXED_HARDWARE || !cpc_ffh_supported()) {
797 /* Support only PCC ,SYS MEM and FFH type regs */
798 pr_debug("Unsupported register type: %d\n", gas_t->space_id);
803 cpc_ptr->cpc_regs[i-2].type = ACPI_TYPE_BUFFER;
804 memcpy(&cpc_ptr->cpc_regs[i-2].cpc_entry.reg, gas_t, sizeof(*gas_t));
806 pr_debug("Err in entry:%d in CPC table of CPU:%d \n", i, pr->id);
810 per_cpu(cpu_pcc_subspace_idx, pr->id) = pcc_subspace_id;
811 /* Store CPU Logical ID */
812 cpc_ptr->cpu_id = pr->id;
814 /* Parse PSD data for this CPU */
815 ret = acpi_get_psd(cpc_ptr, handle);
819 /* Register PCC channel once for all PCC subspace id. */
820 if (pcc_subspace_id >= 0 && !pcc_data[pcc_subspace_id]->pcc_channel_acquired) {
821 ret = register_pcc_channel(pcc_subspace_id);
825 init_rwsem(&pcc_data[pcc_subspace_id]->pcc_lock);
826 init_waitqueue_head(&pcc_data[pcc_subspace_id]->pcc_write_wait_q);
829 /* Everything looks okay */
830 pr_debug("Parsed CPC struct for CPU: %d\n", pr->id);
832 /* Add per logical CPU nodes for reading its feedback counters. */
833 cpu_dev = get_cpu_device(pr->id);
839 /* Plug PSD data into this CPUs CPC descriptor. */
840 per_cpu(cpc_desc_ptr, pr->id) = cpc_ptr;
842 ret = kobject_init_and_add(&cpc_ptr->kobj, &cppc_ktype, &cpu_dev->kobj,
845 per_cpu(cpc_desc_ptr, pr->id) = NULL;
849 kfree(output.pointer);
853 /* Free all the mapped sys mem areas for this CPU */
854 for (i = 2; i < cpc_ptr->num_entries; i++) {
855 void __iomem *addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
863 kfree(output.pointer);
866 EXPORT_SYMBOL_GPL(acpi_cppc_processor_probe);
869 * acpi_cppc_processor_exit - Cleanup CPC structs.
870 * @pr: Ptr to acpi_processor containing this CPUs logical Id.
874 void acpi_cppc_processor_exit(struct acpi_processor *pr)
876 struct cpc_desc *cpc_ptr;
879 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, pr->id);
881 if (pcc_ss_id >=0 && pcc_data[pcc_ss_id]) {
882 if (pcc_data[pcc_ss_id]->pcc_channel_acquired) {
883 pcc_data[pcc_ss_id]->refcount--;
884 if (!pcc_data[pcc_ss_id]->refcount) {
885 pcc_mbox_free_channel(pcc_data[pcc_ss_id]->pcc_channel);
886 pcc_data[pcc_ss_id]->pcc_channel_acquired = 0;
887 kfree(pcc_data[pcc_ss_id]);
892 cpc_ptr = per_cpu(cpc_desc_ptr, pr->id);
896 /* Free all the mapped sys mem areas for this CPU */
897 for (i = 2; i < cpc_ptr->num_entries; i++) {
898 addr = cpc_ptr->cpc_regs[i-2].sys_mem_vaddr;
903 kobject_put(&cpc_ptr->kobj);
906 EXPORT_SYMBOL_GPL(acpi_cppc_processor_exit);
909 * cpc_read_ffh() - Read FFH register
910 * @cpunum: cpu number to read
911 * @reg: cppc register information
912 * @val: place holder for return value
914 * Read bit_width bits from a specified address and bit_offset
916 * Return: 0 for success and error code
918 int __weak cpc_read_ffh(int cpunum, struct cpc_reg *reg, u64 *val)
924 * cpc_write_ffh() - Write FFH register
925 * @cpunum: cpu number to write
926 * @reg: cppc register information
927 * @val: value to write
929 * Write value of bit_width bits to a specified address and bit_offset
931 * Return: 0 for success and error code
933 int __weak cpc_write_ffh(int cpunum, struct cpc_reg *reg, u64 val)
939 * Since cpc_read and cpc_write are called while holding pcc_lock, it should be
940 * as fast as possible. We have already mapped the PCC subspace during init, so
941 * we can directly write to it.
944 static int cpc_read(int cpu, struct cpc_register_resource *reg_res, u64 *val)
947 void __iomem *vaddr = 0;
948 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
949 struct cpc_reg *reg = ®_res->cpc_entry.reg;
951 if (reg_res->type == ACPI_TYPE_INTEGER) {
952 *val = reg_res->cpc_entry.int_value;
957 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
958 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
959 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
960 vaddr = reg_res->sys_mem_vaddr;
961 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
962 return cpc_read_ffh(cpu, reg, val);
964 return acpi_os_read_memory((acpi_physical_address)reg->address,
965 val, reg->bit_width);
967 switch (reg->bit_width) {
969 *val = readb_relaxed(vaddr);
972 *val = readw_relaxed(vaddr);
975 *val = readl_relaxed(vaddr);
978 *val = readq_relaxed(vaddr);
981 pr_debug("Error: Cannot read %u bit width from PCC for ss: %d\n",
982 reg->bit_width, pcc_ss_id);
989 static int cpc_write(int cpu, struct cpc_register_resource *reg_res, u64 val)
992 void __iomem *vaddr = 0;
993 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
994 struct cpc_reg *reg = ®_res->cpc_entry.reg;
996 if (reg->space_id == ACPI_ADR_SPACE_PLATFORM_COMM && pcc_ss_id >= 0)
997 vaddr = GET_PCC_VADDR(reg->address, pcc_ss_id);
998 else if (reg->space_id == ACPI_ADR_SPACE_SYSTEM_MEMORY)
999 vaddr = reg_res->sys_mem_vaddr;
1000 else if (reg->space_id == ACPI_ADR_SPACE_FIXED_HARDWARE)
1001 return cpc_write_ffh(cpu, reg, val);
1003 return acpi_os_write_memory((acpi_physical_address)reg->address,
1004 val, reg->bit_width);
1006 switch (reg->bit_width) {
1008 writeb_relaxed(val, vaddr);
1011 writew_relaxed(val, vaddr);
1014 writel_relaxed(val, vaddr);
1017 writeq_relaxed(val, vaddr);
1020 pr_debug("Error: Cannot write %u bit width to PCC for ss: %d\n",
1021 reg->bit_width, pcc_ss_id);
1030 * cppc_get_perf_caps - Get a CPUs performance capabilities.
1031 * @cpunum: CPU from which to get capabilities info.
1032 * @perf_caps: ptr to cppc_perf_caps. See cppc_acpi.h
1034 * Return: 0 for success with perf_caps populated else -ERRNO.
1036 int cppc_get_perf_caps(int cpunum, struct cppc_perf_caps *perf_caps)
1038 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1039 struct cpc_register_resource *highest_reg, *lowest_reg,
1040 *lowest_non_linear_reg, *nominal_reg;
1041 u64 high, low, nom, min_nonlinear;
1042 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1043 struct cppc_pcc_data *pcc_ss_data;
1044 int ret = 0, regs_in_pcc = 0;
1046 if (!cpc_desc || pcc_ss_id < 0) {
1047 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1051 pcc_ss_data = pcc_data[pcc_ss_id];
1052 highest_reg = &cpc_desc->cpc_regs[HIGHEST_PERF];
1053 lowest_reg = &cpc_desc->cpc_regs[LOWEST_PERF];
1054 lowest_non_linear_reg = &cpc_desc->cpc_regs[LOW_NON_LINEAR_PERF];
1055 nominal_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1057 /* Are any of the regs PCC ?*/
1058 if (CPC_IN_PCC(highest_reg) || CPC_IN_PCC(lowest_reg) ||
1059 CPC_IN_PCC(lowest_non_linear_reg) || CPC_IN_PCC(nominal_reg)) {
1061 down_write(&pcc_ss_data->pcc_lock);
1062 /* Ring doorbell once to update PCC subspace */
1063 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1069 cpc_read(cpunum, highest_reg, &high);
1070 perf_caps->highest_perf = high;
1072 cpc_read(cpunum, lowest_reg, &low);
1073 perf_caps->lowest_perf = low;
1075 cpc_read(cpunum, nominal_reg, &nom);
1076 perf_caps->nominal_perf = nom;
1078 cpc_read(cpunum, lowest_non_linear_reg, &min_nonlinear);
1079 perf_caps->lowest_nonlinear_perf = min_nonlinear;
1081 if (!high || !low || !nom || !min_nonlinear)
1086 up_write(&pcc_ss_data->pcc_lock);
1089 EXPORT_SYMBOL_GPL(cppc_get_perf_caps);
1092 * cppc_get_perf_ctrs - Read a CPUs performance feedback counters.
1093 * @cpunum: CPU from which to read counters.
1094 * @perf_fb_ctrs: ptr to cppc_perf_fb_ctrs. See cppc_acpi.h
1096 * Return: 0 for success with perf_fb_ctrs populated else -ERRNO.
1098 int cppc_get_perf_ctrs(int cpunum, struct cppc_perf_fb_ctrs *perf_fb_ctrs)
1100 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpunum);
1101 struct cpc_register_resource *delivered_reg, *reference_reg,
1102 *ref_perf_reg, *ctr_wrap_reg;
1103 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpunum);
1104 struct cppc_pcc_data *pcc_ss_data;
1105 u64 delivered, reference, ref_perf, ctr_wrap_time;
1106 int ret = 0, regs_in_pcc = 0;
1108 if (!cpc_desc || pcc_ss_id < 0) {
1109 pr_debug("No CPC descriptor for CPU:%d\n", cpunum);
1113 pcc_ss_data = pcc_data[pcc_ss_id];
1114 delivered_reg = &cpc_desc->cpc_regs[DELIVERED_CTR];
1115 reference_reg = &cpc_desc->cpc_regs[REFERENCE_CTR];
1116 ref_perf_reg = &cpc_desc->cpc_regs[REFERENCE_PERF];
1117 ctr_wrap_reg = &cpc_desc->cpc_regs[CTR_WRAP_TIME];
1120 * If refernce perf register is not supported then we should
1121 * use the nominal perf value
1123 if (!CPC_SUPPORTED(ref_perf_reg))
1124 ref_perf_reg = &cpc_desc->cpc_regs[NOMINAL_PERF];
1126 /* Are any of the regs PCC ?*/
1127 if (CPC_IN_PCC(delivered_reg) || CPC_IN_PCC(reference_reg) ||
1128 CPC_IN_PCC(ctr_wrap_reg) || CPC_IN_PCC(ref_perf_reg)) {
1129 down_write(&pcc_ss_data->pcc_lock);
1131 /* Ring doorbell once to update PCC subspace */
1132 if (send_pcc_cmd(pcc_ss_id, CMD_READ) < 0) {
1138 cpc_read(cpunum, delivered_reg, &delivered);
1139 cpc_read(cpunum, reference_reg, &reference);
1140 cpc_read(cpunum, ref_perf_reg, &ref_perf);
1143 * Per spec, if ctr_wrap_time optional register is unsupported, then the
1144 * performance counters are assumed to never wrap during the lifetime of
1147 ctr_wrap_time = (u64)(~((u64)0));
1148 if (CPC_SUPPORTED(ctr_wrap_reg))
1149 cpc_read(cpunum, ctr_wrap_reg, &ctr_wrap_time);
1151 if (!delivered || !reference || !ref_perf) {
1156 perf_fb_ctrs->delivered = delivered;
1157 perf_fb_ctrs->reference = reference;
1158 perf_fb_ctrs->reference_perf = ref_perf;
1159 perf_fb_ctrs->wraparound_time = ctr_wrap_time;
1162 up_write(&pcc_ss_data->pcc_lock);
1165 EXPORT_SYMBOL_GPL(cppc_get_perf_ctrs);
1168 * cppc_set_perf - Set a CPUs performance controls.
1169 * @cpu: CPU for which to set performance controls.
1170 * @perf_ctrls: ptr to cppc_perf_ctrls. See cppc_acpi.h
1172 * Return: 0 for success, -ERRNO otherwise.
1174 int cppc_set_perf(int cpu, struct cppc_perf_ctrls *perf_ctrls)
1176 struct cpc_desc *cpc_desc = per_cpu(cpc_desc_ptr, cpu);
1177 struct cpc_register_resource *desired_reg;
1178 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu);
1179 struct cppc_pcc_data *pcc_ss_data;
1182 if (!cpc_desc || pcc_ss_id < 0) {
1183 pr_debug("No CPC descriptor for CPU:%d\n", cpu);
1187 pcc_ss_data = pcc_data[pcc_ss_id];
1188 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1191 * This is Phase-I where we want to write to CPC registers
1192 * -> We want all CPUs to be able to execute this phase in parallel
1194 * Since read_lock can be acquired by multiple CPUs simultaneously we
1195 * achieve that goal here
1197 if (CPC_IN_PCC(desired_reg)) {
1198 down_read(&pcc_ss_data->pcc_lock); /* BEGIN Phase-I */
1199 if (pcc_ss_data->platform_owns_pcc) {
1200 ret = check_pcc_chan(pcc_ss_id, false);
1202 up_read(&pcc_ss_data->pcc_lock);
1207 * Update the pending_write to make sure a PCC CMD_READ will not
1208 * arrive and steal the channel during the switch to write lock
1210 pcc_ss_data->pending_pcc_write_cmd = true;
1211 cpc_desc->write_cmd_id = pcc_ss_data->pcc_write_cnt;
1212 cpc_desc->write_cmd_status = 0;
1216 * Skip writing MIN/MAX until Linux knows how to come up with
1219 cpc_write(cpu, desired_reg, perf_ctrls->desired_perf);
1221 if (CPC_IN_PCC(desired_reg))
1222 up_read(&pcc_ss_data->pcc_lock); /* END Phase-I */
1224 * This is Phase-II where we transfer the ownership of PCC to Platform
1226 * Short Summary: Basically if we think of a group of cppc_set_perf
1227 * requests that happened in short overlapping interval. The last CPU to
1228 * come out of Phase-I will enter Phase-II and ring the doorbell.
1230 * We have the following requirements for Phase-II:
1231 * 1. We want to execute Phase-II only when there are no CPUs
1232 * currently executing in Phase-I
1233 * 2. Once we start Phase-II we want to avoid all other CPUs from
1235 * 3. We want only one CPU among all those who went through Phase-I
1238 * If write_trylock fails to get the lock and doesn't transfer the
1239 * PCC ownership to the platform, then one of the following will be TRUE
1240 * 1. There is at-least one CPU in Phase-I which will later execute
1241 * write_trylock, so the CPUs in Phase-I will be responsible for
1242 * executing the Phase-II.
1243 * 2. Some other CPU has beaten this CPU to successfully execute the
1244 * write_trylock and has already acquired the write_lock. We know for a
1245 * fact it(other CPU acquiring the write_lock) couldn't have happened
1246 * before this CPU's Phase-I as we held the read_lock.
1247 * 3. Some other CPU executing pcc CMD_READ has stolen the
1248 * down_write, in which case, send_pcc_cmd will check for pending
1249 * CMD_WRITE commands by checking the pending_pcc_write_cmd.
1250 * So this CPU can be certain that its request will be delivered
1251 * So in all cases, this CPU knows that its request will be delivered
1252 * by another CPU and can return
1254 * After getting the down_write we still need to check for
1255 * pending_pcc_write_cmd to take care of the following scenario
1256 * The thread running this code could be scheduled out between
1257 * Phase-I and Phase-II. Before it is scheduled back on, another CPU
1258 * could have delivered the request to Platform by triggering the
1259 * doorbell and transferred the ownership of PCC to platform. So this
1260 * avoids triggering an unnecessary doorbell and more importantly before
1261 * triggering the doorbell it makes sure that the PCC channel ownership
1262 * is still with OSPM.
1263 * pending_pcc_write_cmd can also be cleared by a different CPU, if
1264 * there was a pcc CMD_READ waiting on down_write and it steals the lock
1265 * before the pcc CMD_WRITE is completed. pcc_send_cmd checks for this
1266 * case during a CMD_READ and if there are pending writes it delivers
1267 * the write command before servicing the read command
1269 if (CPC_IN_PCC(desired_reg)) {
1270 if (down_write_trylock(&pcc_ss_data->pcc_lock)) {/* BEGIN Phase-II */
1271 /* Update only if there are pending write commands */
1272 if (pcc_ss_data->pending_pcc_write_cmd)
1273 send_pcc_cmd(pcc_ss_id, CMD_WRITE);
1274 up_write(&pcc_ss_data->pcc_lock); /* END Phase-II */
1276 /* Wait until pcc_write_cnt is updated by send_pcc_cmd */
1277 wait_event(pcc_ss_data->pcc_write_wait_q,
1278 cpc_desc->write_cmd_id != pcc_ss_data->pcc_write_cnt);
1280 /* send_pcc_cmd updates the status in case of failure */
1281 ret = cpc_desc->write_cmd_status;
1285 EXPORT_SYMBOL_GPL(cppc_set_perf);
1288 * cppc_get_transition_latency - returns frequency transition latency in ns
1290 * ACPI CPPC does not explicitly specifiy how a platform can specify the
1291 * transition latency for perfromance change requests. The closest we have
1292 * is the timing information from the PCCT tables which provides the info
1293 * on the number and frequency of PCC commands the platform can handle.
1295 unsigned int cppc_get_transition_latency(int cpu_num)
1298 * Expected transition latency is based on the PCCT timing values
1299 * Below are definition from ACPI spec:
1300 * pcc_nominal- Expected latency to process a command, in microseconds
1301 * pcc_mpar - The maximum number of periodic requests that the subspace
1302 * channel can support, reported in commands per minute. 0
1303 * indicates no limitation.
1304 * pcc_mrtt - The minimum amount of time that OSPM must wait after the
1305 * completion of a command before issuing the next command,
1308 unsigned int latency_ns = 0;
1309 struct cpc_desc *cpc_desc;
1310 struct cpc_register_resource *desired_reg;
1311 int pcc_ss_id = per_cpu(cpu_pcc_subspace_idx, cpu_num);
1312 struct cppc_pcc_data *pcc_ss_data;
1314 cpc_desc = per_cpu(cpc_desc_ptr, cpu_num);
1316 return CPUFREQ_ETERNAL;
1318 desired_reg = &cpc_desc->cpc_regs[DESIRED_PERF];
1319 if (!CPC_IN_PCC(desired_reg))
1320 return CPUFREQ_ETERNAL;
1323 return CPUFREQ_ETERNAL;
1325 pcc_ss_data = pcc_data[pcc_ss_id];
1326 if (pcc_ss_data->pcc_mpar)
1327 latency_ns = 60 * (1000 * 1000 * 1000 / pcc_ss_data->pcc_mpar);
1329 latency_ns = max(latency_ns, pcc_ss_data->pcc_nominal * 1000);
1330 latency_ns = max(latency_ns, pcc_ss_data->pcc_mrtt * 1000);
1334 EXPORT_SYMBOL_GPL(cppc_get_transition_latency);