1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2020 - Google LLC
4 * Author: David Brazdil <dbrazdil@google.com>
7 #include <asm/kvm_asm.h>
8 #include <asm/kvm_hyp.h>
9 #include <asm/kvm_mmu.h>
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <uapi/linux/psci.h>
14 #include <nvhe/trap_handler.h>
16 void kvm_hyp_cpu_entry(unsigned long r0);
17 void kvm_hyp_cpu_resume(unsigned long r0);
19 void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
21 /* Config options set by the host. */
22 struct kvm_host_psci_config __ro_after_init kvm_host_psci_config;
23 s64 __ro_after_init hyp_physvirt_offset;
25 #define __hyp_pa(x) ((phys_addr_t)((x)) + hyp_physvirt_offset)
27 #define INVALID_CPU_ID UINT_MAX
29 struct psci_boot_args {
35 #define PSCI_BOOT_ARGS_UNLOCKED 0
36 #define PSCI_BOOT_ARGS_LOCKED 1
38 #define PSCI_BOOT_ARGS_INIT \
39 ((struct psci_boot_args){ \
40 .lock = ATOMIC_INIT(PSCI_BOOT_ARGS_UNLOCKED), \
43 static DEFINE_PER_CPU(struct psci_boot_args, cpu_on_args) = PSCI_BOOT_ARGS_INIT;
44 static DEFINE_PER_CPU(struct psci_boot_args, suspend_args) = PSCI_BOOT_ARGS_INIT;
46 #define is_psci_0_1(what, func_id) \
47 (kvm_host_psci_config.psci_0_1_ ## what ## _implemented && \
48 (func_id) == kvm_host_psci_config.function_ids_0_1.what)
50 static bool is_psci_0_1_call(u64 func_id)
52 return (is_psci_0_1(cpu_suspend, func_id) ||
53 is_psci_0_1(cpu_on, func_id) ||
54 is_psci_0_1(cpu_off, func_id) ||
55 is_psci_0_1(migrate, func_id));
58 static bool is_psci_0_2_call(u64 func_id)
60 /* SMCCC reserves IDs 0x00-1F with the given 32/64-bit base for PSCI. */
61 return (PSCI_0_2_FN(0) <= func_id && func_id <= PSCI_0_2_FN(31)) ||
62 (PSCI_0_2_FN64(0) <= func_id && func_id <= PSCI_0_2_FN64(31));
65 static unsigned long psci_call(unsigned long fn, unsigned long arg0,
66 unsigned long arg1, unsigned long arg2)
68 struct arm_smccc_res res;
70 arm_smccc_1_1_smc(fn, arg0, arg1, arg2, &res);
74 static unsigned long psci_forward(struct kvm_cpu_context *host_ctxt)
76 return psci_call(cpu_reg(host_ctxt, 0), cpu_reg(host_ctxt, 1),
77 cpu_reg(host_ctxt, 2), cpu_reg(host_ctxt, 3));
80 static __noreturn unsigned long psci_forward_noreturn(struct kvm_cpu_context *host_ctxt)
82 psci_forward(host_ctxt);
83 hyp_panic(); /* unreachable */
86 static unsigned int find_cpu_id(u64 mpidr)
90 /* Reject invalid MPIDRs */
91 if (mpidr & ~MPIDR_HWID_BITMASK)
92 return INVALID_CPU_ID;
94 for (i = 0; i < NR_CPUS; i++) {
95 if (cpu_logical_map(i) == mpidr)
99 return INVALID_CPU_ID;
102 static __always_inline bool try_acquire_boot_args(struct psci_boot_args *args)
104 return atomic_cmpxchg_acquire(&args->lock,
105 PSCI_BOOT_ARGS_UNLOCKED,
106 PSCI_BOOT_ARGS_LOCKED) ==
107 PSCI_BOOT_ARGS_UNLOCKED;
110 static __always_inline void release_boot_args(struct psci_boot_args *args)
112 atomic_set_release(&args->lock, PSCI_BOOT_ARGS_UNLOCKED);
115 static int psci_cpu_on(u64 func_id, struct kvm_cpu_context *host_ctxt)
117 DECLARE_REG(u64, mpidr, host_ctxt, 1);
118 DECLARE_REG(unsigned long, pc, host_ctxt, 2);
119 DECLARE_REG(unsigned long, r0, host_ctxt, 3);
122 struct psci_boot_args *boot_args;
123 struct kvm_nvhe_init_params *init_params;
127 * Find the logical CPU ID for the given MPIDR. The search set is
128 * the set of CPUs that were online at the point of KVM initialization.
129 * Booting other CPUs is rejected because their cpufeatures were not
130 * checked against the finalized capabilities. This could be relaxed
131 * by doing the feature checks in hyp.
133 cpu_id = find_cpu_id(mpidr);
134 if (cpu_id == INVALID_CPU_ID)
135 return PSCI_RET_INVALID_PARAMS;
137 boot_args = per_cpu_ptr(hyp_symbol_addr(cpu_on_args), cpu_id);
138 init_params = per_cpu_ptr(hyp_symbol_addr(kvm_init_params), cpu_id);
140 /* Check if the target CPU is already being booted. */
141 if (!try_acquire_boot_args(boot_args))
142 return PSCI_RET_ALREADY_ON;
148 ret = psci_call(func_id, mpidr,
149 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_entry)),
150 __hyp_pa(init_params));
152 /* If successful, the lock will be released by the target CPU. */
153 if (ret != PSCI_RET_SUCCESS)
154 release_boot_args(boot_args);
159 static int psci_cpu_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
161 DECLARE_REG(u64, power_state, host_ctxt, 1);
162 DECLARE_REG(unsigned long, pc, host_ctxt, 2);
163 DECLARE_REG(unsigned long, r0, host_ctxt, 3);
165 struct psci_boot_args *boot_args;
166 struct kvm_nvhe_init_params *init_params;
168 boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
169 init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
172 * No need to acquire a lock before writing to boot_args because a core
173 * can only suspend itself. Racy CPU_ON calls use a separate struct.
179 * Will either return if shallow sleep state, or wake up into the entry
180 * point if it is a deep sleep state.
182 return psci_call(func_id, power_state,
183 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
184 __hyp_pa(init_params));
187 static int psci_system_suspend(u64 func_id, struct kvm_cpu_context *host_ctxt)
189 DECLARE_REG(unsigned long, pc, host_ctxt, 1);
190 DECLARE_REG(unsigned long, r0, host_ctxt, 2);
192 struct psci_boot_args *boot_args;
193 struct kvm_nvhe_init_params *init_params;
195 boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
196 init_params = this_cpu_ptr(hyp_symbol_addr(kvm_init_params));
199 * No need to acquire a lock before writing to boot_args because a core
200 * can only suspend itself. Racy CPU_ON calls use a separate struct.
205 /* Will only return on error. */
206 return psci_call(func_id,
207 __hyp_pa(hyp_symbol_addr(kvm_hyp_cpu_resume)),
208 __hyp_pa(init_params), 0);
211 asmlinkage void __noreturn kvm_host_psci_cpu_entry(bool is_cpu_on)
213 struct psci_boot_args *boot_args;
214 struct kvm_cpu_context *host_ctxt;
216 host_ctxt = &this_cpu_ptr(hyp_symbol_addr(kvm_host_data))->host_ctxt;
219 boot_args = this_cpu_ptr(hyp_symbol_addr(cpu_on_args));
221 boot_args = this_cpu_ptr(hyp_symbol_addr(suspend_args));
223 cpu_reg(host_ctxt, 0) = boot_args->r0;
224 write_sysreg_el2(boot_args->pc, SYS_ELR);
227 release_boot_args(boot_args);
229 __host_enter(host_ctxt);
232 static unsigned long psci_0_1_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
234 if (is_psci_0_1(cpu_off, func_id) || is_psci_0_1(migrate, func_id))
235 return psci_forward(host_ctxt);
236 if (is_psci_0_1(cpu_on, func_id))
237 return psci_cpu_on(func_id, host_ctxt);
238 if (is_psci_0_1(cpu_suspend, func_id))
239 return psci_cpu_suspend(func_id, host_ctxt);
241 return PSCI_RET_NOT_SUPPORTED;
244 static unsigned long psci_0_2_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
247 case PSCI_0_2_FN_PSCI_VERSION:
248 case PSCI_0_2_FN_CPU_OFF:
249 case PSCI_0_2_FN64_AFFINITY_INFO:
250 case PSCI_0_2_FN64_MIGRATE:
251 case PSCI_0_2_FN_MIGRATE_INFO_TYPE:
252 case PSCI_0_2_FN64_MIGRATE_INFO_UP_CPU:
253 return psci_forward(host_ctxt);
254 case PSCI_0_2_FN_SYSTEM_OFF:
255 case PSCI_0_2_FN_SYSTEM_RESET:
256 psci_forward_noreturn(host_ctxt);
258 case PSCI_0_2_FN64_CPU_SUSPEND:
259 return psci_cpu_suspend(func_id, host_ctxt);
260 case PSCI_0_2_FN64_CPU_ON:
261 return psci_cpu_on(func_id, host_ctxt);
263 return PSCI_RET_NOT_SUPPORTED;
267 static unsigned long psci_1_0_handler(u64 func_id, struct kvm_cpu_context *host_ctxt)
270 case PSCI_1_0_FN_PSCI_FEATURES:
271 case PSCI_1_0_FN_SET_SUSPEND_MODE:
272 case PSCI_1_1_FN64_SYSTEM_RESET2:
273 return psci_forward(host_ctxt);
274 case PSCI_1_0_FN64_SYSTEM_SUSPEND:
275 return psci_system_suspend(func_id, host_ctxt);
277 return psci_0_2_handler(func_id, host_ctxt);
281 bool kvm_host_psci_handler(struct kvm_cpu_context *host_ctxt)
283 DECLARE_REG(u64, func_id, host_ctxt, 0);
286 switch (kvm_host_psci_config.version) {
287 case PSCI_VERSION(0, 1):
288 if (!is_psci_0_1_call(func_id))
290 ret = psci_0_1_handler(func_id, host_ctxt);
292 case PSCI_VERSION(0, 2):
293 if (!is_psci_0_2_call(func_id))
295 ret = psci_0_2_handler(func_id, host_ctxt);
298 if (!is_psci_0_2_call(func_id))
300 ret = psci_1_0_handler(func_id, host_ctxt);
304 cpu_reg(host_ctxt, 0) = ret;
305 cpu_reg(host_ctxt, 1) = 0;
306 cpu_reg(host_ctxt, 2) = 0;
307 cpu_reg(host_ctxt, 3) = 0;