2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
15 * Copyright IBM Corp. 2008
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
35 #include <asm/kvm_booke.h>
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
45 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
47 enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
50 EMULATE_FAIL, /* can't emulate this instruction */
51 EMULATE_AGAIN, /* something went wrong. go again */
52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
55 enum instruction_fetch_type {
57 INST_SC, /* system call */
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
65 enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
77 int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
80 int is_default_endian);
81 extern int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 unsigned int rt, unsigned int bytes,
83 int is_default_endian, int mmio_sign_extend);
84 extern int kvmppc_handle_vmx_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
85 unsigned int rt, unsigned int bytes, int is_default_endian);
86 extern int kvmppc_handle_vmx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
87 unsigned int rs, unsigned int bytes, int is_default_endian);
88 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
89 u64 val, unsigned int bytes,
90 int is_default_endian);
91 extern int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
92 int rs, unsigned int bytes,
93 int is_default_endian);
95 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
96 enum instruction_fetch_type type, u32 *inst);
98 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
100 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
102 extern int kvmppc_emulate_instruction(struct kvm_run *run,
103 struct kvm_vcpu *vcpu);
104 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
105 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
106 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
107 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
108 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
109 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
110 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
111 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
113 /* Core-specific hooks */
115 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
116 unsigned int gtlb_idx);
117 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
118 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
119 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
120 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
121 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
122 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
123 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
125 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
126 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
127 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
128 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
129 struct kvmppc_pte *pte);
131 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
133 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
134 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
135 extern int kvmppc_core_check_processor_compat(void);
136 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
137 struct kvm_translation *tr);
139 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
140 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
142 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
143 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
144 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
145 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
147 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
148 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
149 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
150 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
151 struct kvm_interrupt *irq);
152 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
153 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
155 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
158 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
159 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
161 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
162 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
164 extern int kvmppc_booke_init(void);
165 extern void kvmppc_booke_exit(void);
167 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
168 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
169 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
171 extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
172 extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
173 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
174 extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
175 extern void kvmppc_rmap_reset(struct kvm *kvm);
176 extern long kvmppc_prepare_vrma(struct kvm *kvm,
177 struct kvm_userspace_memory_region *mem);
178 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
179 struct kvm_memory_slot *memslot, unsigned long porder);
180 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
181 extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd,
182 struct iommu_group *grp);
183 extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm,
184 struct iommu_group *grp);
185 extern int kvmppc_switch_mmu_to_hpt(struct kvm *kvm);
186 extern int kvmppc_switch_mmu_to_radix(struct kvm *kvm);
187 extern void kvmppc_setup_partition_table(struct kvm *kvm);
189 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
190 struct kvm_create_spapr_tce_64 *args);
191 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
192 struct kvm *kvm, unsigned long liobn);
193 #define kvmppc_ioba_validate(stt, ioba, npages) \
194 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
195 (stt)->size, (ioba), (npages)) ? \
196 H_PARAMETER : H_SUCCESS)
197 extern long kvmppc_tce_to_ua(struct kvm *kvm, unsigned long tce,
198 unsigned long *ua, unsigned long **prmap);
199 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
200 unsigned long idx, unsigned long tce);
201 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
202 unsigned long ioba, unsigned long tce);
203 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
204 unsigned long liobn, unsigned long ioba,
205 unsigned long tce_list, unsigned long npages);
206 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
207 unsigned long liobn, unsigned long ioba,
208 unsigned long tce_value, unsigned long npages);
209 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
211 extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
212 extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
213 extern int kvmppc_core_init_vm(struct kvm *kvm);
214 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
215 extern void kvmppc_core_free_memslot(struct kvm *kvm,
216 struct kvm_memory_slot *free,
217 struct kvm_memory_slot *dont);
218 extern int kvmppc_core_create_memslot(struct kvm *kvm,
219 struct kvm_memory_slot *slot,
220 unsigned long npages);
221 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
222 struct kvm_memory_slot *memslot,
223 const struct kvm_userspace_memory_region *mem);
224 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
225 const struct kvm_userspace_memory_region *mem,
226 const struct kvm_memory_slot *old,
227 const struct kvm_memory_slot *new);
228 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
229 struct kvm_ppc_smmu_info *info);
230 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
231 struct kvm_memory_slot *memslot);
233 extern int kvmppc_bookehv_init(void);
234 extern void kvmppc_bookehv_exit(void);
236 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
238 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
239 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
240 struct kvm_ppc_resize_hpt *rhpt);
241 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
242 struct kvm_ppc_resize_hpt *rhpt);
244 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
246 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
247 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
248 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
250 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
252 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
254 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
255 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
257 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
258 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
260 union kvmppc_one_reg {
275 struct module *owner;
276 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
277 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
278 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
279 union kvmppc_one_reg *val);
280 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
281 union kvmppc_one_reg *val);
282 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
283 void (*vcpu_put)(struct kvm_vcpu *vcpu);
284 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
285 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
286 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
287 void (*vcpu_free)(struct kvm_vcpu *vcpu);
288 int (*check_requests)(struct kvm_vcpu *vcpu);
289 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
290 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
291 int (*prepare_memory_region)(struct kvm *kvm,
292 struct kvm_memory_slot *memslot,
293 const struct kvm_userspace_memory_region *mem);
294 void (*commit_memory_region)(struct kvm *kvm,
295 const struct kvm_userspace_memory_region *mem,
296 const struct kvm_memory_slot *old,
297 const struct kvm_memory_slot *new);
298 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
300 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
301 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
302 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
303 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
304 void (*free_memslot)(struct kvm_memory_slot *free,
305 struct kvm_memory_slot *dont);
306 int (*create_memslot)(struct kvm_memory_slot *slot,
307 unsigned long npages);
308 int (*init_vm)(struct kvm *kvm);
309 void (*destroy_vm)(struct kvm *kvm);
310 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
311 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
312 unsigned int inst, int *advance);
313 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
314 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
315 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
316 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
318 int (*hcall_implemented)(unsigned long hcall);
319 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
320 struct irq_bypass_producer *);
321 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
322 struct irq_bypass_producer *);
323 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
324 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
325 int (*set_smt_mode)(struct kvm *kvm, unsigned long mode,
326 unsigned long flags);
327 void (*giveup_ext)(struct kvm_vcpu *vcpu, ulong msr);
328 int (*enable_nested)(struct kvm *kvm);
331 extern struct kvmppc_ops *kvmppc_hv_ops;
332 extern struct kvmppc_ops *kvmppc_pr_ops;
334 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
335 enum instruction_fetch_type type, u32 *inst)
337 int ret = EMULATE_DONE;
340 /* Load the instruction manually if it failed to do so in the
342 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
343 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
345 /* Write fetch_failed unswapped if the fetch failed */
346 if (ret == EMULATE_DONE)
347 fetched_inst = kvmppc_need_byteswap(vcpu) ?
348 swab32(vcpu->arch.last_inst) :
349 vcpu->arch.last_inst;
351 fetched_inst = vcpu->arch.last_inst;
353 *inst = fetched_inst;
357 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
359 return kvm->arch.kvm_ops == kvmppc_hv_ops;
362 extern int kvmppc_hwrng_present(void);
365 * Cuts out inst bits with ordering according to spec.
366 * That means the leftmost bit is zero. All given bits are included.
368 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
375 mask = (1 << (lsb - msb + 1)) - 1;
376 r = (inst >> (63 - lsb)) & mask;
382 * Replaces inst bits with ordering according to spec.
384 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
391 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
392 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
397 #define one_reg_size(id) \
398 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
400 #define get_reg_val(id, reg) ({ \
401 union kvmppc_one_reg __u; \
402 switch (one_reg_size(id)) { \
403 case 4: __u.wval = (reg); break; \
404 case 8: __u.dval = (reg); break; \
411 #define set_reg_val(id, val) ({ \
413 switch (one_reg_size(id)) { \
414 case 4: __v = (val).wval; break; \
415 case 8: __v = (val).dval; break; \
421 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
422 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
424 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
425 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
427 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
428 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
429 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
430 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
432 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
436 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
437 extern void kvm_cma_reserve(void) __init;
438 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
440 paca_ptrs[cpu]->kvm_hstate.xics_phys = (void __iomem *)addr;
443 static inline void kvmppc_set_xive_tima(int cpu,
444 unsigned long phys_addr,
445 void __iomem *virt_addr)
447 paca_ptrs[cpu]->kvm_hstate.xive_tima_phys = (void __iomem *)phys_addr;
448 paca_ptrs[cpu]->kvm_hstate.xive_tima_virt = virt_addr;
451 static inline u32 kvmppc_get_xics_latch(void)
455 xirr = get_paca()->kvm_hstate.saved_xirr;
456 get_paca()->kvm_hstate.saved_xirr = 0;
460 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
462 paca_ptrs[cpu]->kvm_hstate.host_ipi = host_ipi;
465 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
467 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
470 extern void kvm_hv_vm_activated(void);
471 extern void kvm_hv_vm_deactivated(void);
472 extern bool kvm_hv_mode_active(void);
475 static inline void __init kvm_cma_reserve(void)
478 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
481 static inline void kvmppc_set_xive_tima(int cpu,
482 unsigned long phys_addr,
483 void __iomem *virt_addr)
486 static inline u32 kvmppc_get_xics_latch(void)
491 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
494 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
499 static inline bool kvm_hv_mode_active(void) { return false; }
503 #ifdef CONFIG_KVM_XICS
504 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
506 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
509 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
512 if (kvm && kvm_irq_bypass)
513 return kvm->arch.pimap;
517 extern void kvmppc_alloc_host_rm_ops(void);
518 extern void kvmppc_free_host_rm_ops(void);
519 extern void kvmppc_free_pimap(struct kvm *kvm);
520 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
521 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
522 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
523 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
524 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
525 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
526 struct kvm_vcpu *vcpu, u32 cpu);
527 extern void kvmppc_xics_ipi_action(void);
528 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
529 unsigned long host_irq);
530 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
531 unsigned long host_irq);
532 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
533 struct kvmppc_irq_map *irq_map,
534 struct kvmppc_passthru_irqmap *pimap,
537 extern int kvmppc_xics_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
538 int level, bool line_status);
540 extern int h_ipi_redirect;
542 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
545 static inline void kvmppc_alloc_host_rm_ops(void) {};
546 static inline void kvmppc_free_host_rm_ops(void) {};
547 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
548 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
550 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
552 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
553 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
557 #ifdef CONFIG_KVM_XIVE
559 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
560 * ie. P9 new interrupt controller, while the second "xive" is the legacy
561 * "eXternal Interrupt Vector Entry" which is the configuration of an
562 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
563 * two function consume or produce a legacy "XIVE" state from the
564 * new "XIVE" interrupt controller.
566 extern int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
568 extern int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
570 extern int kvmppc_xive_int_on(struct kvm *kvm, u32 irq);
571 extern int kvmppc_xive_int_off(struct kvm *kvm, u32 irq);
572 extern void kvmppc_xive_init_module(void);
573 extern void kvmppc_xive_exit_module(void);
575 extern int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
576 struct kvm_vcpu *vcpu, u32 cpu);
577 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu);
578 extern int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
579 struct irq_desc *host_desc);
580 extern int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
581 struct irq_desc *host_desc);
582 extern u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu);
583 extern int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
585 extern int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
586 int level, bool line_status);
587 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu);
589 static inline int kvmppc_xive_set_xive(struct kvm *kvm, u32 irq, u32 server,
590 u32 priority) { return -1; }
591 static inline int kvmppc_xive_get_xive(struct kvm *kvm, u32 irq, u32 *server,
592 u32 *priority) { return -1; }
593 static inline int kvmppc_xive_int_on(struct kvm *kvm, u32 irq) { return -1; }
594 static inline int kvmppc_xive_int_off(struct kvm *kvm, u32 irq) { return -1; }
595 static inline void kvmppc_xive_init_module(void) { }
596 static inline void kvmppc_xive_exit_module(void) { }
598 static inline int kvmppc_xive_connect_vcpu(struct kvm_device *dev,
599 struct kvm_vcpu *vcpu, u32 cpu) { return -EBUSY; }
600 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu *vcpu) { }
601 static inline int kvmppc_xive_set_mapped(struct kvm *kvm, unsigned long guest_irq,
602 struct irq_desc *host_desc) { return -ENODEV; }
603 static inline int kvmppc_xive_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
604 struct irq_desc *host_desc) { return -ENODEV; }
605 static inline u64 kvmppc_xive_get_icp(struct kvm_vcpu *vcpu) { return 0; }
606 static inline int kvmppc_xive_set_icp(struct kvm_vcpu *vcpu, u64 icpval) { return -ENOENT; }
608 static inline int kvmppc_xive_set_irq(struct kvm *kvm, int irq_source_id, u32 irq,
609 int level, bool line_status) { return -ENODEV; }
610 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu *vcpu) { }
611 #endif /* CONFIG_KVM_XIVE */
614 * Prototypes for functions called only from assembler code.
615 * Having prototypes reduces sparse errors.
617 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
618 unsigned long ioba, unsigned long tce);
619 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
620 unsigned long liobn, unsigned long ioba,
621 unsigned long tce_list, unsigned long npages);
622 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
623 unsigned long liobn, unsigned long ioba,
624 unsigned long tce_value, unsigned long npages);
625 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
626 unsigned int yield_count);
627 long kvmppc_h_random(struct kvm_vcpu *vcpu);
628 void kvmhv_commence_exit(int trap);
629 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
630 void kvmppc_subcore_enter_guest(void);
631 void kvmppc_subcore_exit_guest(void);
632 long kvmppc_realmode_hmi_handler(void);
633 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
634 long pte_index, unsigned long pteh, unsigned long ptel);
635 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
636 unsigned long pte_index, unsigned long avpn);
637 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
638 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
639 unsigned long pte_index, unsigned long avpn,
641 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
642 unsigned long pte_index);
643 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
644 unsigned long pte_index);
645 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
646 unsigned long pte_index);
647 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
648 unsigned long slb_v, unsigned int status, bool data);
649 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
650 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu);
651 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server);
652 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
654 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
655 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
656 void kvmppc_guest_entry_inject_int(struct kvm_vcpu *vcpu);
659 * Host-side operations we want to set up while running in real
660 * mode in the guest operating on the xics.
661 * Currently only VCPU wakeup is supported.
664 union kvmppc_rm_state {
672 struct kvmppc_host_rm_core {
673 union kvmppc_rm_state rm_state;
678 struct kvmppc_host_rm_ops {
679 struct kvmppc_host_rm_core *rm_core;
680 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
683 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
685 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
687 #ifdef CONFIG_KVM_BOOKE_HV
688 return mfspr(SPRN_GEPR);
689 #elif defined(CONFIG_BOOKE)
690 return vcpu->arch.epr;
696 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
698 #ifdef CONFIG_KVM_BOOKE_HV
699 mtspr(SPRN_GEPR, epr);
700 #elif defined(CONFIG_BOOKE)
701 vcpu->arch.epr = epr;
705 #ifdef CONFIG_KVM_MPIC
707 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
708 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
710 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
714 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
718 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
719 struct kvm_vcpu *vcpu, u32 cpu)
724 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
725 struct kvm_vcpu *vcpu)
729 #endif /* CONFIG_KVM_MPIC */
731 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
732 struct kvm_config_tlb *cfg);
733 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
734 struct kvm_dirty_tlb *cfg);
736 long kvmppc_alloc_lpid(void);
737 void kvmppc_claim_lpid(long lpid);
738 void kvmppc_free_lpid(long lpid);
739 void kvmppc_init_lpid(unsigned long nr_lpids);
741 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
745 * We can only access pages that the kernel maps
746 * as memory. Bail out for unmapped ones.
751 /* Clear i-cache for new pages */
752 page = pfn_to_page(pfn);
753 if (!test_bit(PG_arch_1, &page->flags)) {
754 flush_dcache_icache_page(page);
755 set_bit(PG_arch_1, &page->flags);
760 * Shared struct helpers. The shared struct can be little or big endian,
761 * depending on the guest endianness. So expose helpers to all of them.
763 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
765 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
766 /* Only Book3S_64 PR supports bi-endian for now */
767 return vcpu->arch.shared_big_endian;
768 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
769 /* Book3s_64 HV on little endian is always little endian */
776 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
777 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
779 return mfspr(bookehv_spr); \
782 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
783 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
785 mtspr(bookehv_spr, val); \
788 #define SHARED_WRAPPER_GET(reg, size) \
789 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
791 if (kvmppc_shared_big_endian(vcpu)) \
792 return be##size##_to_cpu(vcpu->arch.shared->reg); \
794 return le##size##_to_cpu(vcpu->arch.shared->reg); \
797 #define SHARED_WRAPPER_SET(reg, size) \
798 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
800 if (kvmppc_shared_big_endian(vcpu)) \
801 vcpu->arch.shared->reg = cpu_to_be##size(val); \
803 vcpu->arch.shared->reg = cpu_to_le##size(val); \
806 #define SHARED_WRAPPER(reg, size) \
807 SHARED_WRAPPER_GET(reg, size) \
808 SHARED_WRAPPER_SET(reg, size) \
810 #define SPRNG_WRAPPER(reg, bookehv_spr) \
811 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
812 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
814 #ifdef CONFIG_KVM_BOOKE_HV
816 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
817 SPRNG_WRAPPER(reg, bookehv_spr) \
821 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
822 SHARED_WRAPPER(reg, size) \
826 SHARED_WRAPPER(critical, 64)
827 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
828 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
829 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
830 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
831 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
832 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
833 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
834 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
835 SHARED_WRAPPER_GET(msr, 64)
836 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
838 if (kvmppc_shared_big_endian(vcpu))
839 vcpu->arch.shared->msr = cpu_to_be64(val);
841 vcpu->arch.shared->msr = cpu_to_le64(val);
843 SHARED_WRAPPER(dsisr, 32)
844 SHARED_WRAPPER(int_pending, 32)
845 SHARED_WRAPPER(sprg4, 64)
846 SHARED_WRAPPER(sprg5, 64)
847 SHARED_WRAPPER(sprg6, 64)
848 SHARED_WRAPPER(sprg7, 64)
850 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
852 if (kvmppc_shared_big_endian(vcpu))
853 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
855 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
858 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
860 if (kvmppc_shared_big_endian(vcpu))
861 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
863 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
867 * Please call after prepare_to_enter. This function puts the lazy ee and irq
868 * disabled tracking state back to normal mode, without actually enabling
871 static inline void kvmppc_fix_ee_before_entry(void)
877 * To avoid races, the caller must have gone directly from having
878 * interrupts fully-enabled to hard-disabled.
880 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
882 /* Only need to enable IRQs by hard enabling them after this */
883 local_paca->irq_happened = 0;
884 irq_soft_mask_set(IRQS_ENABLED);
888 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
893 ea = kvmppc_get_gpr(vcpu, rb);
895 ea += kvmppc_get_gpr(vcpu, ra);
897 #if defined(CONFIG_PPC_BOOK3E_64)
899 #elif defined(CONFIG_PPC_BOOK3S_64)
903 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
909 extern void xics_wake_cpu(int cpu);
911 #endif /* __POWERPC_KVM_PPC_H__ */