1 // SPDX-License-Identifier: GPL-2.0-only
2 #include <linux/kernel.h>
3 #include <linux/kvm_host.h>
4 #include <asm/asm-prototypes.h>
6 #include <asm/kvm_ppc.h>
8 #include <asm/ppc-opcode.h>
10 #include "book3s_hv.h"
12 static void freeze_pmu(unsigned long mmcr0, unsigned long mmcra)
14 if (!(mmcr0 & MMCR0_FC))
16 if (mmcra & MMCRA_SAMPLE_ENABLE)
18 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
19 if (!(mmcr0 & MMCR0_PMCCEXT))
21 if (!(mmcra & MMCRA_BHRB_DISABLE))
29 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
30 mmcr0 |= MMCR0_PMCCEXT;
31 mmcra = MMCRA_BHRB_DISABLE;
34 mtspr(SPRN_MMCR0, mmcr0);
35 mtspr(SPRN_MMCRA, mmcra);
39 void switch_pmu_to_guest(struct kvm_vcpu *vcpu,
40 struct p9_host_os_sprs *host_os_sprs)
45 lp = vcpu->arch.vpa.pinned_addr;
47 load_pmu = lp->pmcregs_in_use;
50 if (ppc_get_pmu_inuse()) {
52 * It might be better to put PMU handling (at least for the
53 * host) in the perf subsystem because it knows more about what
57 /* POWER9, POWER10 do not implement HPMC or SPMC */
59 host_os_sprs->mmcr0 = mfspr(SPRN_MMCR0);
60 host_os_sprs->mmcra = mfspr(SPRN_MMCRA);
62 freeze_pmu(host_os_sprs->mmcr0, host_os_sprs->mmcra);
64 host_os_sprs->pmc1 = mfspr(SPRN_PMC1);
65 host_os_sprs->pmc2 = mfspr(SPRN_PMC2);
66 host_os_sprs->pmc3 = mfspr(SPRN_PMC3);
67 host_os_sprs->pmc4 = mfspr(SPRN_PMC4);
68 host_os_sprs->pmc5 = mfspr(SPRN_PMC5);
69 host_os_sprs->pmc6 = mfspr(SPRN_PMC6);
70 host_os_sprs->mmcr1 = mfspr(SPRN_MMCR1);
71 host_os_sprs->mmcr2 = mfspr(SPRN_MMCR2);
72 host_os_sprs->sdar = mfspr(SPRN_SDAR);
73 host_os_sprs->siar = mfspr(SPRN_SIAR);
74 host_os_sprs->sier1 = mfspr(SPRN_SIER);
76 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
77 host_os_sprs->mmcr3 = mfspr(SPRN_MMCR3);
78 host_os_sprs->sier2 = mfspr(SPRN_SIER2);
79 host_os_sprs->sier3 = mfspr(SPRN_SIER3);
83 #ifdef CONFIG_PPC_PSERIES
84 /* After saving PMU, before loading guest PMU, flip pmcregs_in_use */
85 if (kvmhv_on_pseries()) {
87 get_lppaca()->pmcregs_in_use = load_pmu;
93 * Load guest. If the VPA said the PMCs are not in use but the guest
94 * tried to access them anyway, HFSCR[PM] will be set by the HFAC
95 * fault so we can make forward progress.
97 if (load_pmu || (vcpu->arch.hfscr & HFSCR_PM)) {
98 mtspr(SPRN_PMC1, vcpu->arch.pmc[0]);
99 mtspr(SPRN_PMC2, vcpu->arch.pmc[1]);
100 mtspr(SPRN_PMC3, vcpu->arch.pmc[2]);
101 mtspr(SPRN_PMC4, vcpu->arch.pmc[3]);
102 mtspr(SPRN_PMC5, vcpu->arch.pmc[4]);
103 mtspr(SPRN_PMC6, vcpu->arch.pmc[5]);
104 mtspr(SPRN_MMCR1, vcpu->arch.mmcr[1]);
105 mtspr(SPRN_MMCR2, vcpu->arch.mmcr[2]);
106 mtspr(SPRN_SDAR, vcpu->arch.sdar);
107 mtspr(SPRN_SIAR, vcpu->arch.siar);
108 mtspr(SPRN_SIER, vcpu->arch.sier[0]);
110 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
111 mtspr(SPRN_MMCR3, vcpu->arch.mmcr[3]);
112 mtspr(SPRN_SIER2, vcpu->arch.sier[1]);
113 mtspr(SPRN_SIER3, vcpu->arch.sier[2]);
116 /* Set MMCRA then MMCR0 last */
117 mtspr(SPRN_MMCRA, vcpu->arch.mmcra);
118 mtspr(SPRN_MMCR0, vcpu->arch.mmcr[0]);
119 /* No isync necessary because we're starting counters */
121 if (!vcpu->arch.nested &&
122 (vcpu->arch.hfscr_permitted & HFSCR_PM))
123 vcpu->arch.hfscr |= HFSCR_PM;
126 EXPORT_SYMBOL_GPL(switch_pmu_to_guest);
128 void switch_pmu_to_host(struct kvm_vcpu *vcpu,
129 struct p9_host_os_sprs *host_os_sprs)
134 lp = vcpu->arch.vpa.pinned_addr;
136 save_pmu = lp->pmcregs_in_use;
137 if (IS_ENABLED(CONFIG_KVM_BOOK3S_HV_NESTED_PMU_WORKAROUND)) {
139 * Save pmu if this guest is capable of running nested guests.
140 * This is option is for old L1s that do not set their
141 * lppaca->pmcregs_in_use properly when entering their L2.
143 save_pmu |= nesting_enabled(vcpu->kvm);
147 vcpu->arch.mmcr[0] = mfspr(SPRN_MMCR0);
148 vcpu->arch.mmcra = mfspr(SPRN_MMCRA);
150 freeze_pmu(vcpu->arch.mmcr[0], vcpu->arch.mmcra);
152 vcpu->arch.pmc[0] = mfspr(SPRN_PMC1);
153 vcpu->arch.pmc[1] = mfspr(SPRN_PMC2);
154 vcpu->arch.pmc[2] = mfspr(SPRN_PMC3);
155 vcpu->arch.pmc[3] = mfspr(SPRN_PMC4);
156 vcpu->arch.pmc[4] = mfspr(SPRN_PMC5);
157 vcpu->arch.pmc[5] = mfspr(SPRN_PMC6);
158 vcpu->arch.mmcr[1] = mfspr(SPRN_MMCR1);
159 vcpu->arch.mmcr[2] = mfspr(SPRN_MMCR2);
160 vcpu->arch.sdar = mfspr(SPRN_SDAR);
161 vcpu->arch.siar = mfspr(SPRN_SIAR);
162 vcpu->arch.sier[0] = mfspr(SPRN_SIER);
164 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
165 vcpu->arch.mmcr[3] = mfspr(SPRN_MMCR3);
166 vcpu->arch.sier[1] = mfspr(SPRN_SIER2);
167 vcpu->arch.sier[2] = mfspr(SPRN_SIER3);
170 } else if (vcpu->arch.hfscr & HFSCR_PM) {
172 * The guest accessed PMC SPRs without specifying they should
173 * be preserved, or it cleared pmcregs_in_use after the last
174 * access. Just ensure they are frozen.
176 freeze_pmu(mfspr(SPRN_MMCR0), mfspr(SPRN_MMCRA));
179 * Demand-fault PMU register access in the guest.
181 * This is used to grab the guest's VPA pmcregs_in_use value
182 * and reflect it into the host's VPA in the case of a nested
185 * It also avoids having to zero-out SPRs after each guest
186 * exit to avoid side-channels when.
188 * This is cleared here when we exit the guest, so later HFSCR
189 * interrupt handling can add it back to run the guest with
190 * PM enabled next time.
192 if (!vcpu->arch.nested)
193 vcpu->arch.hfscr &= ~HFSCR_PM;
194 } /* otherwise the PMU should still be frozen */
196 #ifdef CONFIG_PPC_PSERIES
197 if (kvmhv_on_pseries()) {
199 get_lppaca()->pmcregs_in_use = ppc_get_pmu_inuse();
204 if (ppc_get_pmu_inuse()) {
205 mtspr(SPRN_PMC1, host_os_sprs->pmc1);
206 mtspr(SPRN_PMC2, host_os_sprs->pmc2);
207 mtspr(SPRN_PMC3, host_os_sprs->pmc3);
208 mtspr(SPRN_PMC4, host_os_sprs->pmc4);
209 mtspr(SPRN_PMC5, host_os_sprs->pmc5);
210 mtspr(SPRN_PMC6, host_os_sprs->pmc6);
211 mtspr(SPRN_MMCR1, host_os_sprs->mmcr1);
212 mtspr(SPRN_MMCR2, host_os_sprs->mmcr2);
213 mtspr(SPRN_SDAR, host_os_sprs->sdar);
214 mtspr(SPRN_SIAR, host_os_sprs->siar);
215 mtspr(SPRN_SIER, host_os_sprs->sier1);
217 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
218 mtspr(SPRN_MMCR3, host_os_sprs->mmcr3);
219 mtspr(SPRN_SIER2, host_os_sprs->sier2);
220 mtspr(SPRN_SIER3, host_os_sprs->sier3);
223 /* Set MMCRA then MMCR0 last */
224 mtspr(SPRN_MMCRA, host_os_sprs->mmcra);
225 mtspr(SPRN_MMCR0, host_os_sprs->mmcr0);
229 EXPORT_SYMBOL_GPL(switch_pmu_to_host);
231 static void load_spr_state(struct kvm_vcpu *vcpu,
232 struct p9_host_os_sprs *host_os_sprs)
234 /* TAR is very fast */
235 mtspr(SPRN_TAR, vcpu->arch.tar);
237 #ifdef CONFIG_ALTIVEC
238 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
239 current->thread.vrsave != vcpu->arch.vrsave)
240 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
243 if (vcpu->arch.hfscr & HFSCR_EBB) {
244 if (current->thread.ebbhr != vcpu->arch.ebbhr)
245 mtspr(SPRN_EBBHR, vcpu->arch.ebbhr);
246 if (current->thread.ebbrr != vcpu->arch.ebbrr)
247 mtspr(SPRN_EBBRR, vcpu->arch.ebbrr);
248 if (current->thread.bescr != vcpu->arch.bescr)
249 mtspr(SPRN_BESCR, vcpu->arch.bescr);
252 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
253 current->thread.tidr != vcpu->arch.tid)
254 mtspr(SPRN_TIDR, vcpu->arch.tid);
255 if (host_os_sprs->iamr != vcpu->arch.iamr)
256 mtspr(SPRN_IAMR, vcpu->arch.iamr);
257 if (host_os_sprs->amr != vcpu->arch.amr)
258 mtspr(SPRN_AMR, vcpu->arch.amr);
259 if (vcpu->arch.uamor != 0)
260 mtspr(SPRN_UAMOR, vcpu->arch.uamor);
261 if (current->thread.fscr != vcpu->arch.fscr)
262 mtspr(SPRN_FSCR, vcpu->arch.fscr);
263 if (current->thread.dscr != vcpu->arch.dscr)
264 mtspr(SPRN_DSCR, vcpu->arch.dscr);
265 if (vcpu->arch.pspb != 0)
266 mtspr(SPRN_PSPB, vcpu->arch.pspb);
269 * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI]
270 * clear (or hstate set appropriately to catch those registers
271 * being clobbered if we take a MCE or SRESET), so those are done
275 if (!(vcpu->arch.ctrl & 1))
276 mtspr(SPRN_CTRLT, 0);
279 static void store_spr_state(struct kvm_vcpu *vcpu)
281 vcpu->arch.tar = mfspr(SPRN_TAR);
283 #ifdef CONFIG_ALTIVEC
284 if (cpu_has_feature(CPU_FTR_ALTIVEC))
285 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
288 if (vcpu->arch.hfscr & HFSCR_EBB) {
289 vcpu->arch.ebbhr = mfspr(SPRN_EBBHR);
290 vcpu->arch.ebbrr = mfspr(SPRN_EBBRR);
291 vcpu->arch.bescr = mfspr(SPRN_BESCR);
294 if (cpu_has_feature(CPU_FTR_P9_TIDR))
295 vcpu->arch.tid = mfspr(SPRN_TIDR);
296 vcpu->arch.iamr = mfspr(SPRN_IAMR);
297 vcpu->arch.amr = mfspr(SPRN_AMR);
298 vcpu->arch.uamor = mfspr(SPRN_UAMOR);
299 vcpu->arch.fscr = mfspr(SPRN_FSCR);
300 vcpu->arch.dscr = mfspr(SPRN_DSCR);
301 vcpu->arch.pspb = mfspr(SPRN_PSPB);
303 vcpu->arch.ctrl = mfspr(SPRN_CTRLF);
306 /* Returns true if current MSR and/or guest MSR may have changed */
307 bool load_vcpu_state(struct kvm_vcpu *vcpu,
308 struct p9_host_os_sprs *host_os_sprs)
312 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
313 if (cpu_has_feature(CPU_FTR_TM) ||
314 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
315 unsigned long guest_msr = vcpu->arch.shregs.msr;
316 if (MSR_TM_ACTIVE(guest_msr)) {
317 kvmppc_restore_tm_hv(vcpu, guest_msr, true);
319 } else if (vcpu->arch.hfscr & HFSCR_TM) {
320 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
321 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
322 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
327 load_spr_state(vcpu, host_os_sprs);
329 load_fp_state(&vcpu->arch.fp);
330 #ifdef CONFIG_ALTIVEC
331 load_vr_state(&vcpu->arch.vr);
336 EXPORT_SYMBOL_GPL(load_vcpu_state);
338 void store_vcpu_state(struct kvm_vcpu *vcpu)
340 store_spr_state(vcpu);
342 store_fp_state(&vcpu->arch.fp);
343 #ifdef CONFIG_ALTIVEC
344 store_vr_state(&vcpu->arch.vr);
347 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
348 if (cpu_has_feature(CPU_FTR_TM) ||
349 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
350 unsigned long guest_msr = vcpu->arch.shregs.msr;
351 if (MSR_TM_ACTIVE(guest_msr)) {
352 kvmppc_save_tm_hv(vcpu, guest_msr, true);
353 } else if (vcpu->arch.hfscr & HFSCR_TM) {
354 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
355 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
356 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
358 if (!vcpu->arch.nested) {
359 vcpu->arch.load_tm++; /* see load_ebb comment */
360 if (!vcpu->arch.load_tm)
361 vcpu->arch.hfscr &= ~HFSCR_TM;
367 EXPORT_SYMBOL_GPL(store_vcpu_state);
369 void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs)
371 host_os_sprs->iamr = mfspr(SPRN_IAMR);
372 host_os_sprs->amr = mfspr(SPRN_AMR);
374 EXPORT_SYMBOL_GPL(save_p9_host_os_sprs);
376 /* vcpu guest regs must already be saved */
377 void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu,
378 struct p9_host_os_sprs *host_os_sprs)
381 * current->thread.xxx registers must all be restored to host
382 * values before a potential context switch, otherwise the context
383 * switch itself will overwrite current->thread.xxx with the values
384 * from the guest SPRs.
387 mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso);
389 if (cpu_has_feature(CPU_FTR_P9_TIDR) &&
390 current->thread.tidr != vcpu->arch.tid)
391 mtspr(SPRN_TIDR, current->thread.tidr);
392 if (host_os_sprs->iamr != vcpu->arch.iamr)
393 mtspr(SPRN_IAMR, host_os_sprs->iamr);
394 if (vcpu->arch.uamor != 0)
395 mtspr(SPRN_UAMOR, 0);
396 if (host_os_sprs->amr != vcpu->arch.amr)
397 mtspr(SPRN_AMR, host_os_sprs->amr);
398 if (current->thread.fscr != vcpu->arch.fscr)
399 mtspr(SPRN_FSCR, current->thread.fscr);
400 if (current->thread.dscr != vcpu->arch.dscr)
401 mtspr(SPRN_DSCR, current->thread.dscr);
402 if (vcpu->arch.pspb != 0)
405 /* Save guest CTRL register, set runlatch to 1 */
406 if (!(vcpu->arch.ctrl & 1))
407 mtspr(SPRN_CTRLT, 1);
409 #ifdef CONFIG_ALTIVEC
410 if (cpu_has_feature(CPU_FTR_ALTIVEC) &&
411 vcpu->arch.vrsave != current->thread.vrsave)
412 mtspr(SPRN_VRSAVE, current->thread.vrsave);
414 if (vcpu->arch.hfscr & HFSCR_EBB) {
415 if (vcpu->arch.bescr != current->thread.bescr)
416 mtspr(SPRN_BESCR, current->thread.bescr);
417 if (vcpu->arch.ebbhr != current->thread.ebbhr)
418 mtspr(SPRN_EBBHR, current->thread.ebbhr);
419 if (vcpu->arch.ebbrr != current->thread.ebbrr)
420 mtspr(SPRN_EBBRR, current->thread.ebbrr);
422 if (!vcpu->arch.nested) {
424 * This is like load_fp in context switching, turn off
425 * the facility after it wraps the u8 to try avoiding
426 * saving and restoring the registers each partition
429 vcpu->arch.load_ebb++;
430 if (!vcpu->arch.load_ebb)
431 vcpu->arch.hfscr &= ~HFSCR_EBB;
435 if (vcpu->arch.tar != current->thread.tar)
436 mtspr(SPRN_TAR, current->thread.tar);
438 EXPORT_SYMBOL_GPL(restore_p9_host_os_sprs);
440 #ifdef CONFIG_KVM_BOOK3S_HV_P9_TIMING
441 void accumulate_time(struct kvm_vcpu *vcpu, struct kvmhv_tb_accumulator *next)
443 struct kvmppc_vcore *vc = vcpu->arch.vcore;
444 struct kvmhv_tb_accumulator *curr;
445 u64 tb = mftb() - vc->tb_offset_applied;
450 curr = vcpu->arch.cur_activity;
451 vcpu->arch.cur_activity = next;
452 prev_tb = vcpu->arch.cur_tb_start;
453 vcpu->arch.cur_tb_start = tb;
458 delta = tb - prev_tb;
460 seq = curr->seqcount;
461 curr->seqcount = seq + 1;
463 curr->tb_total += delta;
464 if (seq == 0 || delta < curr->tb_min)
465 curr->tb_min = delta;
466 if (delta > curr->tb_max)
467 curr->tb_max = delta;
469 curr->seqcount = seq + 2;
471 EXPORT_SYMBOL_GPL(accumulate_time);
474 static inline u64 mfslbv(unsigned int idx)
478 asm volatile("slbmfev %0,%1" : "=r" (slbev) : "r" (idx));
483 static inline u64 mfslbe(unsigned int idx)
487 asm volatile("slbmfee %0,%1" : "=r" (slbee) : "r" (idx));
492 static inline void mtslb(u64 slbee, u64 slbev)
494 asm volatile("slbmte %0,%1" :: "r" (slbev), "r" (slbee));
497 static inline void clear_slb_entry(unsigned int idx)
502 static inline void slb_clear_invalidate_partition(void)
505 asm volatile(PPC_SLBIA(6));
509 * Malicious or buggy radix guests may have inserted SLB entries
510 * (only 0..3 because radix always runs with UPRT=1), so these must
511 * be cleared here to avoid side-channels. slbmte is used rather
512 * than slbia, as it won't clear cached translations.
514 static void radix_clear_slb(void)
518 for (i = 0; i < 4; i++)
522 static void switch_mmu_to_guest_radix(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
524 struct kvm_nested_guest *nested = vcpu->arch.nested;
528 lpid = nested ? nested->shadow_lpid : kvm->arch.lpid;
529 pid = vcpu->arch.pid;
532 * Prior memory accesses to host PID Q3 must be completed before we
533 * start switching, and stores must be drained to avoid not-my-LPAR
534 * logic (see switch_mmu_to_host).
536 asm volatile("hwsync" ::: "memory");
538 mtspr(SPRN_LPID, lpid);
539 mtspr(SPRN_LPCR, lpcr);
540 mtspr(SPRN_PID, pid);
542 * isync not required here because we are HRFID'ing to guest before
543 * any guest context access, which is context synchronising.
547 static void switch_mmu_to_guest_hpt(struct kvm *kvm, struct kvm_vcpu *vcpu, u64 lpcr)
553 lpid = kvm->arch.lpid;
554 pid = vcpu->arch.pid;
557 * See switch_mmu_to_guest_radix. ptesync should not be required here
558 * even if the host is in HPT mode because speculative accesses would
559 * not cause RC updates (we are in real mode).
561 asm volatile("hwsync" ::: "memory");
563 mtspr(SPRN_LPID, lpid);
564 mtspr(SPRN_LPCR, lpcr);
565 mtspr(SPRN_PID, pid);
567 for (i = 0; i < vcpu->arch.slb_max; i++)
568 mtslb(vcpu->arch.slb[i].orige, vcpu->arch.slb[i].origv);
570 * isync not required here, see switch_mmu_to_guest_radix.
574 static void switch_mmu_to_host(struct kvm *kvm, u32 pid)
576 u32 lpid = kvm->arch.host_lpid;
577 u64 lpcr = kvm->arch.host_lpcr;
580 * The guest has exited, so guest MMU context is no longer being
581 * non-speculatively accessed, but a hwsync is needed before the
582 * mtLPIDR / mtPIDR switch, in order to ensure all stores are drained,
583 * so the not-my-LPAR tlbie logic does not overlook them.
585 asm volatile("hwsync" ::: "memory");
587 mtspr(SPRN_PID, pid);
588 mtspr(SPRN_LPID, lpid);
589 mtspr(SPRN_LPCR, lpcr);
591 * isync is not required after the switch, because mtmsrd with L=0
592 * is performed after this switch, which is context synchronising.
595 if (!radix_enabled())
596 slb_restore_bolted_realmode();
599 static void save_clear_host_mmu(struct kvm *kvm)
601 if (!radix_enabled()) {
603 * Hash host could save and restore host SLB entries to
604 * reduce SLB fault overheads of VM exits, but for now the
605 * existing code clears all entries and restores just the
606 * bolted ones when switching back to host.
608 slb_clear_invalidate_partition();
612 static void save_clear_guest_mmu(struct kvm *kvm, struct kvm_vcpu *vcpu)
614 if (kvm_is_radix(kvm)) {
621 * This must run before switching to host (radix host can't
624 for (i = 0; i < vcpu->arch.slb_nr; i++) {
628 if (slbee & SLB_ESID_V) {
630 vcpu->arch.slb[nr].orige = slbee | i;
631 vcpu->arch.slb[nr].origv = slbev;
635 vcpu->arch.slb_max = nr;
636 slb_clear_invalidate_partition();
640 static void flush_guest_tlb(struct kvm *kvm)
642 unsigned long rb, set;
644 rb = PPC_BIT(52); /* IS = 2 */
645 if (kvm_is_radix(kvm)) {
646 /* R=1 PRS=1 RIC=2 */
647 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
648 : : "r" (rb), "i" (1), "i" (1), "i" (2),
650 for (set = 1; set < kvm->arch.tlb_sets; ++set) {
651 rb += PPC_BIT(51); /* increment set number */
652 /* R=1 PRS=1 RIC=0 */
653 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
654 : : "r" (rb), "i" (1), "i" (1), "i" (0),
657 asm volatile("ptesync": : :"memory");
658 // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
659 asm volatile(PPC_RADIX_INVALIDATE_ERAT_GUEST : : :"memory");
661 for (set = 0; set < kvm->arch.tlb_sets; ++set) {
662 /* R=0 PRS=0 RIC=0 */
663 asm volatile(PPC_TLBIEL(%0, %4, %3, %2, %1)
664 : : "r" (rb), "i" (0), "i" (0), "i" (0),
666 rb += PPC_BIT(51); /* increment set number */
668 asm volatile("ptesync": : :"memory");
669 // POWER9 congruence-class TLBIEL leaves ERAT. Flush it now.
670 asm volatile(PPC_ISA_3_0_INVALIDATE_ERAT : : :"memory");
674 static void check_need_tlb_flush(struct kvm *kvm, int pcpu,
675 struct kvm_nested_guest *nested)
677 cpumask_t *need_tlb_flush;
682 need_tlb_flush = &nested->need_tlb_flush;
684 need_tlb_flush = &kvm->arch.need_tlb_flush;
686 if (likely(!cpumask_test_cpu(pcpu, need_tlb_flush)))
690 * Individual threads can come in here, but the TLB is shared between
691 * the 4 threads in a core, hence invalidating on one thread
692 * invalidates for all, so only invalidate the first time (if all bits
693 * were set. The others must still execute a ptesync.
695 * If a race occurs and two threads do the TLB flush, that is not a
696 * problem, just sub-optimal.
698 for (i = cpu_first_tlb_thread_sibling(pcpu);
699 i <= cpu_last_tlb_thread_sibling(pcpu);
700 i += cpu_tlb_thread_sibling_step()) {
701 if (!cpumask_test_cpu(i, need_tlb_flush)) {
707 flush_guest_tlb(kvm);
709 asm volatile("ptesync" ::: "memory");
711 /* Clear the bit after the TLB flush */
712 cpumask_clear_cpu(pcpu, need_tlb_flush);
715 unsigned long kvmppc_msr_hard_disable_set_facilities(struct kvm_vcpu *vcpu, unsigned long msr)
717 unsigned long msr_needed = 0;
721 /* MSR bits may have been cleared by context switch so must recheck */
722 if (IS_ENABLED(CONFIG_PPC_FPU))
723 msr_needed |= MSR_FP;
724 if (cpu_has_feature(CPU_FTR_ALTIVEC))
725 msr_needed |= MSR_VEC;
726 if (cpu_has_feature(CPU_FTR_VSX))
727 msr_needed |= MSR_VSX;
728 if ((cpu_has_feature(CPU_FTR_TM) ||
729 cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) &&
730 (vcpu->arch.hfscr & HFSCR_TM))
731 msr_needed |= MSR_TM;
734 * This could be combined with MSR[RI] clearing, but that expands
735 * the unrecoverable window. It would be better to cover unrecoverable
736 * with KVM bad interrupt handling rather than use MSR[RI] at all.
738 * Much more difficult and less worthwhile to combine with IR/DR
741 if ((msr & msr_needed) != msr_needed) {
745 __hard_irq_disable();
747 local_paca->irq_happened |= PACA_IRQ_HARD_DIS;
751 EXPORT_SYMBOL_GPL(kvmppc_msr_hard_disable_set_facilities);
753 int kvmhv_vcpu_entry_p9(struct kvm_vcpu *vcpu, u64 time_limit, unsigned long lpcr, u64 *tb)
755 struct p9_host_os_sprs host_os_sprs;
756 struct kvm *kvm = vcpu->kvm;
757 struct kvm_nested_guest *nested = vcpu->arch.nested;
758 struct kvmppc_vcore *vc = vcpu->arch.vcore;
764 unsigned long host_hfscr;
765 unsigned long host_ciabr;
766 unsigned long host_dawr0;
767 unsigned long host_dawrx0;
768 unsigned long host_psscr;
769 unsigned long host_hpsscr;
770 unsigned long host_pidr;
771 unsigned long host_dawr1;
772 unsigned long host_dawrx1;
775 hdec = time_limit - *tb;
777 return BOOK3S_INTERRUPT_HV_DECREMENTER;
779 WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_HV);
780 WARN_ON_ONCE(!(vcpu->arch.shregs.msr & MSR_ME));
782 start_timing(vcpu, &vcpu->arch.rm_entry);
784 vcpu->arch.ceded = 0;
786 /* Save MSR for restore, with EE clear. */
787 msr = mfmsr() & ~MSR_EE;
789 host_hfscr = mfspr(SPRN_HFSCR);
790 host_ciabr = mfspr(SPRN_CIABR);
791 host_psscr = mfspr(SPRN_PSSCR_PR);
792 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST))
793 host_hpsscr = mfspr(SPRN_PSSCR);
794 host_pidr = mfspr(SPRN_PID);
796 if (dawr_enabled()) {
797 host_dawr0 = mfspr(SPRN_DAWR0);
798 host_dawrx0 = mfspr(SPRN_DAWRX0);
799 if (cpu_has_feature(CPU_FTR_DAWR1)) {
800 host_dawr1 = mfspr(SPRN_DAWR1);
801 host_dawrx1 = mfspr(SPRN_DAWRX1);
805 local_paca->kvm_hstate.host_purr = mfspr(SPRN_PURR);
806 local_paca->kvm_hstate.host_spurr = mfspr(SPRN_SPURR);
808 save_p9_host_os_sprs(&host_os_sprs);
810 msr = kvmppc_msr_hard_disable_set_facilities(vcpu, msr);
811 if (lazy_irq_pending()) {
816 if (unlikely(load_vcpu_state(vcpu, &host_os_sprs)))
817 msr = mfmsr(); /* MSR may have been updated */
820 u64 new_tb = *tb + vc->tb_offset;
821 mtspr(SPRN_TBU40, new_tb);
822 if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
824 mtspr(SPRN_TBU40, new_tb);
827 vc->tb_offset_applied = vc->tb_offset;
830 mtspr(SPRN_VTB, vc->vtb);
831 mtspr(SPRN_PURR, vcpu->arch.purr);
832 mtspr(SPRN_SPURR, vcpu->arch.spurr);
835 mtspr(SPRN_PCR, vc->pcr | PCR_MASK);
836 if (vcpu->arch.doorbell_request) {
837 vcpu->arch.doorbell_request = 0;
838 mtspr(SPRN_DPDES, 1);
841 if (dawr_enabled()) {
842 if (vcpu->arch.dawr0 != host_dawr0)
843 mtspr(SPRN_DAWR0, vcpu->arch.dawr0);
844 if (vcpu->arch.dawrx0 != host_dawrx0)
845 mtspr(SPRN_DAWRX0, vcpu->arch.dawrx0);
846 if (cpu_has_feature(CPU_FTR_DAWR1)) {
847 if (vcpu->arch.dawr1 != host_dawr1)
848 mtspr(SPRN_DAWR1, vcpu->arch.dawr1);
849 if (vcpu->arch.dawrx1 != host_dawrx1)
850 mtspr(SPRN_DAWRX1, vcpu->arch.dawrx1);
853 if (vcpu->arch.ciabr != host_ciabr)
854 mtspr(SPRN_CIABR, vcpu->arch.ciabr);
857 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
858 mtspr(SPRN_PSSCR, vcpu->arch.psscr | PSSCR_EC |
859 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
861 if (vcpu->arch.psscr != host_psscr)
862 mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr);
865 mtspr(SPRN_HFSCR, vcpu->arch.hfscr);
867 mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
868 mtspr(SPRN_HSRR1, (vcpu->arch.shregs.msr & ~MSR_HV) | MSR_ME);
871 * On POWER9 DD2.1 and below, sometimes on a Hypervisor Data Storage
872 * Interrupt (HDSI) the HDSISR is not be updated at all.
874 * To work around this we put a canary value into the HDSISR before
875 * returning to a guest and then check for this canary when we take a
876 * HDSI. If we find the canary on a HDSI, we know the hardware didn't
877 * update the HDSISR. In this case we return to the guest to retake the
878 * HDSI which should correctly update the HDSISR the second time HDSI
881 * The "radix prefetch bug" test can be used to test for this bug, as
882 * it also exists fo DD2.1 and below.
884 if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
885 mtspr(SPRN_HDSISR, HDSISR_CANARY);
887 mtspr(SPRN_SPRG0, vcpu->arch.shregs.sprg0);
888 mtspr(SPRN_SPRG1, vcpu->arch.shregs.sprg1);
889 mtspr(SPRN_SPRG2, vcpu->arch.shregs.sprg2);
890 mtspr(SPRN_SPRG3, vcpu->arch.shregs.sprg3);
893 * It might be preferable to load_vcpu_state here, in order to get the
894 * GPR/FP register loads executing in parallel with the previous mtSPR
895 * instructions, but for now that can't be done because the TM handling
896 * in load_vcpu_state can change some SPRs and vcpu state (nip, msr).
897 * But TM could be split out if this would be a significant benefit.
901 * MSR[RI] does not need to be cleared (and is not, for radix guests
902 * with no prefetch bug), because in_guest is set. If we take a SRESET
903 * or MCE with in_guest set but still in HV mode, then
904 * kvmppc_p9_bad_interrupt handles the interrupt, which effectively
905 * clears MSR[RI] and doesn't return.
907 WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_HV_P9);
908 barrier(); /* Open in_guest critical section */
911 * Hash host, hash guest, or radix guest with prefetch bug, all have
912 * to disable the MMU before switching to guest MMU state.
914 if (!radix_enabled() || !kvm_is_radix(kvm) ||
915 cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG))
916 __mtmsrd(msr & ~(MSR_IR|MSR_DR|MSR_RI), 0);
918 save_clear_host_mmu(kvm);
920 if (kvm_is_radix(kvm))
921 switch_mmu_to_guest_radix(kvm, vcpu, lpcr);
923 switch_mmu_to_guest_hpt(kvm, vcpu, lpcr);
925 /* TLBIEL uses LPID=LPIDR, so run this after setting guest LPID */
926 check_need_tlb_flush(kvm, vc->pcpu, nested);
929 * P9 suppresses the HDEC exception when LPCR[HDICE] = 0,
930 * so set guest LPCR (with HDICE) before writing HDEC.
932 mtspr(SPRN_HDEC, hdec);
934 mtspr(SPRN_DEC, vcpu->arch.dec_expires - *tb);
936 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
939 mtspr(SPRN_DAR, vcpu->arch.shregs.dar);
940 mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr);
941 mtspr(SPRN_SRR0, vcpu->arch.shregs.srr0);
942 mtspr(SPRN_SRR1, vcpu->arch.shregs.srr1);
944 accumulate_time(vcpu, &vcpu->arch.guest_time);
946 switch_pmu_to_guest(vcpu, &host_os_sprs);
947 kvmppc_p9_enter_guest(vcpu);
948 switch_pmu_to_host(vcpu, &host_os_sprs);
950 accumulate_time(vcpu, &vcpu->arch.rm_intr);
952 /* XXX: Could get these from r11/12 and paca exsave instead */
953 vcpu->arch.shregs.srr0 = mfspr(SPRN_SRR0);
954 vcpu->arch.shregs.srr1 = mfspr(SPRN_SRR1);
955 vcpu->arch.shregs.dar = mfspr(SPRN_DAR);
956 vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR);
958 /* 0x2 bit for HSRR is only used by PR and P7/8 HV paths, clear it */
959 trap = local_paca->kvm_hstate.scratch0 & ~0x2;
961 if (likely(trap > BOOK3S_INTERRUPT_MACHINE_CHECK))
962 exsave = local_paca->exgen;
963 else if (trap == BOOK3S_INTERRUPT_SYSTEM_RESET)
964 exsave = local_paca->exnmi;
965 else /* trap == 0x200 */
966 exsave = local_paca->exmc;
968 vcpu->arch.regs.gpr[1] = local_paca->kvm_hstate.scratch1;
969 vcpu->arch.regs.gpr[3] = local_paca->kvm_hstate.scratch2;
972 * After reading machine check regs (DAR, DSISR, SRR0/1) and hstate
973 * scratch (which we need to move into exsave to make re-entrant vs
974 * SRESET/MCE), register state is protected from reentrancy. However
975 * timebase, MMU, among other state is still set to guest, so don't
976 * enable MSR[RI] here. It gets enabled at the end, after in_guest
979 * It is possible an NMI could come in here, which is why it is
980 * important to save the above state early so it can be debugged.
983 vcpu->arch.regs.gpr[9] = exsave[EX_R9/sizeof(u64)];
984 vcpu->arch.regs.gpr[10] = exsave[EX_R10/sizeof(u64)];
985 vcpu->arch.regs.gpr[11] = exsave[EX_R11/sizeof(u64)];
986 vcpu->arch.regs.gpr[12] = exsave[EX_R12/sizeof(u64)];
987 vcpu->arch.regs.gpr[13] = exsave[EX_R13/sizeof(u64)];
988 vcpu->arch.ppr = exsave[EX_PPR/sizeof(u64)];
989 vcpu->arch.cfar = exsave[EX_CFAR/sizeof(u64)];
990 vcpu->arch.regs.ctr = exsave[EX_CTR/sizeof(u64)];
992 vcpu->arch.last_inst = KVM_INST_FETCH_FAILED;
994 if (unlikely(trap == BOOK3S_INTERRUPT_MACHINE_CHECK)) {
995 vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
996 vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
997 kvmppc_realmode_machine_check(vcpu);
999 } else if (unlikely(trap == BOOK3S_INTERRUPT_HMI)) {
1000 kvmppc_p9_realmode_hmi_handler(vcpu);
1002 } else if (trap == BOOK3S_INTERRUPT_H_EMUL_ASSIST) {
1003 vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
1005 } else if (trap == BOOK3S_INTERRUPT_H_DATA_STORAGE) {
1006 vcpu->arch.fault_dar = exsave[EX_DAR/sizeof(u64)];
1007 vcpu->arch.fault_dsisr = exsave[EX_DSISR/sizeof(u64)];
1008 vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
1010 } else if (trap == BOOK3S_INTERRUPT_H_INST_STORAGE) {
1011 vcpu->arch.fault_gpa = mfspr(SPRN_ASDR);
1013 } else if (trap == BOOK3S_INTERRUPT_H_FAC_UNAVAIL) {
1014 vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
1016 #ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1018 * Softpatch interrupt for transactional memory emulation cases
1019 * on POWER9 DD2.2. This is early in the guest exit path - we
1020 * haven't saved registers or done a treclaim yet.
1022 } else if (trap == BOOK3S_INTERRUPT_HV_SOFTPATCH) {
1023 vcpu->arch.emul_inst = mfspr(SPRN_HEIR);
1026 * The cases we want to handle here are those where the guest
1027 * is in real suspend mode and is trying to transition to
1028 * transactional mode.
1030 if (!local_paca->kvm_hstate.fake_suspend &&
1031 (vcpu->arch.shregs.msr & MSR_TS_S)) {
1032 if (kvmhv_p9_tm_emulation_early(vcpu)) {
1034 * Go straight back into the guest with the
1035 * new NIP/MSR as set by TM emulation.
1037 mtspr(SPRN_HSRR0, vcpu->arch.regs.nip);
1038 mtspr(SPRN_HSRR1, vcpu->arch.shregs.msr);
1039 goto tm_return_to_guest;
1045 accumulate_time(vcpu, &vcpu->arch.rm_exit);
1047 /* Advance host PURR/SPURR by the amount used by guest */
1048 purr = mfspr(SPRN_PURR);
1049 spurr = mfspr(SPRN_SPURR);
1050 local_paca->kvm_hstate.host_purr += purr - vcpu->arch.purr;
1051 local_paca->kvm_hstate.host_spurr += spurr - vcpu->arch.spurr;
1052 vcpu->arch.purr = purr;
1053 vcpu->arch.spurr = spurr;
1055 vcpu->arch.ic = mfspr(SPRN_IC);
1056 vcpu->arch.pid = mfspr(SPRN_PID);
1057 vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR);
1059 vcpu->arch.shregs.sprg0 = mfspr(SPRN_SPRG0);
1060 vcpu->arch.shregs.sprg1 = mfspr(SPRN_SPRG1);
1061 vcpu->arch.shregs.sprg2 = mfspr(SPRN_SPRG2);
1062 vcpu->arch.shregs.sprg3 = mfspr(SPRN_SPRG3);
1064 dpdes = mfspr(SPRN_DPDES);
1066 vcpu->arch.doorbell_request = 1;
1068 vc->vtb = mfspr(SPRN_VTB);
1070 dec = mfspr(SPRN_DEC);
1071 if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */
1074 vcpu->arch.dec_expires = dec + *tb;
1076 if (vc->tb_offset_applied) {
1077 u64 new_tb = *tb - vc->tb_offset_applied;
1078 mtspr(SPRN_TBU40, new_tb);
1079 if ((mftb() & 0xffffff) < (new_tb & 0xffffff)) {
1080 new_tb += 0x1000000;
1081 mtspr(SPRN_TBU40, new_tb);
1084 vc->tb_offset_applied = 0;
1087 save_clear_guest_mmu(kvm, vcpu);
1088 switch_mmu_to_host(kvm, host_pidr);
1091 * Enable MSR here in order to have facilities enabled to save
1092 * guest registers. This enables MMU (if we were in realmode), so
1093 * only switch MMU on after the MMU is switched to host, to avoid
1094 * the P9_RADIX_PREFETCH_BUG or hash guest context.
1096 if (IS_ENABLED(CONFIG_PPC_TRANSACTIONAL_MEM) &&
1097 vcpu->arch.shregs.msr & MSR_TS_MASK)
1101 store_vcpu_state(vcpu);
1103 mtspr(SPRN_PURR, local_paca->kvm_hstate.host_purr);
1104 mtspr(SPRN_SPURR, local_paca->kvm_hstate.host_spurr);
1106 if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) {
1107 /* Preserve PSSCR[FAKE_SUSPEND] until we've called kvmppc_save_tm_hv */
1108 mtspr(SPRN_PSSCR, host_hpsscr |
1109 (local_paca->kvm_hstate.fake_suspend << PSSCR_FAKE_SUSPEND_LG));
1112 mtspr(SPRN_HFSCR, host_hfscr);
1113 if (vcpu->arch.ciabr != host_ciabr)
1114 mtspr(SPRN_CIABR, host_ciabr);
1116 if (dawr_enabled()) {
1117 if (vcpu->arch.dawr0 != host_dawr0)
1118 mtspr(SPRN_DAWR0, host_dawr0);
1119 if (vcpu->arch.dawrx0 != host_dawrx0)
1120 mtspr(SPRN_DAWRX0, host_dawrx0);
1121 if (cpu_has_feature(CPU_FTR_DAWR1)) {
1122 if (vcpu->arch.dawr1 != host_dawr1)
1123 mtspr(SPRN_DAWR1, host_dawr1);
1124 if (vcpu->arch.dawrx1 != host_dawrx1)
1125 mtspr(SPRN_DAWRX1, host_dawrx1);
1130 mtspr(SPRN_DPDES, 0);
1132 mtspr(SPRN_PCR, PCR_MASK);
1134 /* HDEC must be at least as large as DEC, so decrementer_max fits */
1135 mtspr(SPRN_HDEC, decrementer_max);
1137 timer_rearm_host_dec(*tb);
1139 restore_p9_host_os_sprs(vcpu, &host_os_sprs);
1141 barrier(); /* Close in_guest critical section */
1142 WRITE_ONCE(local_paca->kvm_hstate.in_guest, KVM_GUEST_MODE_NONE);
1143 /* Interrupts are recoverable at this point */
1146 * cp_abort is required if the processor supports local copy-paste
1147 * to clear the copy buffer that was under control of the guest.
1149 if (cpu_has_feature(CPU_FTR_ARCH_31))
1150 asm volatile(PPC_CP_ABORT);
1157 EXPORT_SYMBOL_GPL(kvmhv_vcpu_entry_p9);