1 // SPDX-License-Identifier: GPL-2.0
3 * Copyright (c) 2017-2019, IBM Corporation.
6 #define pr_fmt(fmt) "xive-kvm: " fmt
8 #include <linux/kernel.h>
9 #include <linux/kvm_host.h>
10 #include <linux/err.h>
11 #include <linux/gfp.h>
12 #include <linux/spinlock.h>
13 #include <linux/delay.h>
14 #include <linux/file.h>
15 #include <asm/uaccess.h>
16 #include <asm/kvm_book3s.h>
17 #include <asm/kvm_ppc.h>
18 #include <asm/hvcall.h>
20 #include <asm/xive-regs.h>
21 #include <asm/debug.h>
22 #include <asm/debugfs.h>
25 #include <linux/debugfs.h>
26 #include <linux/seq_file.h>
28 #include "book3s_xive.h"
30 static u8 xive_vm_esb_load(struct xive_irq_data *xd, u32 offset)
35 * The KVM XIVE native device does not use the XIVE_ESB_SET_PQ_10
36 * load operation, so there is no need to enforce load-after-store
40 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG)
41 offset |= offset << 4;
43 val = in_be64(xd->eoi_mmio + offset);
47 static void kvmppc_xive_native_cleanup_queue(struct kvm_vcpu *vcpu, int prio)
49 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
50 struct xive_q *q = &xc->queues[prio];
52 xive_native_disable_queue(xc->vp_id, q, prio);
54 put_page(virt_to_page(q->qpage));
59 static int kvmppc_xive_native_configure_queue(u32 vp_id, struct xive_q *q,
60 u8 prio, __be32 *qpage,
61 u32 order, bool can_escalate)
64 __be32 *qpage_prev = q->qpage;
66 rc = xive_native_configure_queue(vp_id, q, prio, qpage, order,
72 put_page(virt_to_page(qpage_prev));
77 void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu *vcpu)
79 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
82 if (!kvmppc_xive_enabled(vcpu))
88 pr_devel("native_cleanup_vcpu(cpu=%d)\n", xc->server_num);
90 /* Ensure no interrupt is still routed to that VP */
92 kvmppc_xive_disable_vcpu_interrupts(vcpu);
94 /* Free escalations */
95 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
96 /* Free the escalation irq */
97 if (xc->esc_virq[i]) {
98 if (xc->xive->single_escalation)
99 xive_cleanup_single_escalation(vcpu, xc,
101 free_irq(xc->esc_virq[i], vcpu);
102 irq_dispose_mapping(xc->esc_virq[i]);
103 kfree(xc->esc_virq_names[i]);
109 xive_native_disable_vp(xc->vp_id);
111 /* Clear the cam word so guest entry won't try to push context */
112 vcpu->arch.xive_cam_word = 0;
114 /* Free the queues */
115 for (i = 0; i < KVMPPC_XIVE_Q_COUNT; i++) {
116 kvmppc_xive_native_cleanup_queue(vcpu, i);
122 /* Cleanup the vcpu */
123 vcpu->arch.irq_type = KVMPPC_IRQ_DEFAULT;
124 vcpu->arch.xive_vcpu = NULL;
127 int kvmppc_xive_native_connect_vcpu(struct kvm_device *dev,
128 struct kvm_vcpu *vcpu, u32 server_num)
130 struct kvmppc_xive *xive = dev->private;
131 struct kvmppc_xive_vcpu *xc = NULL;
135 pr_devel("native_connect_vcpu(server=%d)\n", server_num);
137 if (dev->ops != &kvm_xive_native_ops) {
138 pr_devel("Wrong ops !\n");
141 if (xive->kvm != vcpu->kvm)
143 if (vcpu->arch.irq_type != KVMPPC_IRQ_DEFAULT)
146 mutex_lock(&xive->lock);
148 rc = kvmppc_xive_compute_vp_id(xive, server_num, &vp_id);
152 xc = kzalloc(sizeof(*xc), GFP_KERNEL);
158 vcpu->arch.xive_vcpu = xc;
161 xc->server_num = server_num;
165 vcpu->arch.irq_type = KVMPPC_IRQ_XIVE;
167 rc = xive_native_get_vp_info(xc->vp_id, &xc->vp_cam, &xc->vp_chip_id);
169 pr_err("Failed to get VP info from OPAL: %d\n", rc);
174 * Enable the VP first as the single escalation mode will
175 * affect escalation interrupts numbering
177 rc = xive_native_enable_vp(xc->vp_id, xive->single_escalation);
179 pr_err("Failed to enable VP in OPAL: %d\n", rc);
183 /* Configure VCPU fields for use by assembly push/pull */
184 vcpu->arch.xive_saved_state.w01 = cpu_to_be64(0xff000000);
185 vcpu->arch.xive_cam_word = cpu_to_be32(xc->vp_cam | TM_QW1W2_VO);
187 /* TODO: reset all queues to a clean state ? */
189 mutex_unlock(&xive->lock);
191 kvmppc_xive_native_cleanup_vcpu(vcpu);
197 * Device passthrough support
199 static int kvmppc_xive_native_reset_mapped(struct kvm *kvm, unsigned long irq)
201 struct kvmppc_xive *xive = kvm->arch.xive;
202 pgoff_t esb_pgoff = KVM_XIVE_ESB_PAGE_OFFSET + irq * 2;
204 if (irq >= KVMPPC_XIVE_NR_IRQS)
208 * Clear the ESB pages of the IRQ number being mapped (or
209 * unmapped) into the guest and let the the VM fault handler
210 * repopulate with the appropriate ESB pages (device or IC)
212 pr_debug("clearing esb pages for girq 0x%lx\n", irq);
213 mutex_lock(&xive->mapping_lock);
215 unmap_mapping_range(xive->mapping,
216 esb_pgoff << PAGE_SHIFT,
217 2ull << PAGE_SHIFT, 1);
218 mutex_unlock(&xive->mapping_lock);
222 static struct kvmppc_xive_ops kvmppc_xive_native_ops = {
223 .reset_mapped = kvmppc_xive_native_reset_mapped,
226 static vm_fault_t xive_native_esb_fault(struct vm_fault *vmf)
228 struct vm_area_struct *vma = vmf->vma;
229 struct kvm_device *dev = vma->vm_file->private_data;
230 struct kvmppc_xive *xive = dev->private;
231 struct kvmppc_xive_src_block *sb;
232 struct kvmppc_xive_irq_state *state;
233 struct xive_irq_data *xd;
241 * Linux/KVM uses a two pages ESB setting, one for trigger and
244 page_offset = vmf->pgoff - vma->vm_pgoff;
245 irq = page_offset / 2;
247 sb = kvmppc_xive_find_source(xive, irq, &src);
249 pr_devel("%s: source %lx not found !\n", __func__, irq);
250 return VM_FAULT_SIGBUS;
253 state = &sb->irq_state[src];
254 kvmppc_xive_select_irq(state, &hw_num, &xd);
256 arch_spin_lock(&sb->lock);
259 * first/even page is for trigger
260 * second/odd page is for EOI and management.
262 page = page_offset % 2 ? xd->eoi_page : xd->trig_page;
263 arch_spin_unlock(&sb->lock);
265 if (WARN_ON(!page)) {
266 pr_err("%s: accessing invalid ESB page for source %lx !\n",
268 return VM_FAULT_SIGBUS;
271 vmf_insert_pfn(vma, vmf->address, page >> PAGE_SHIFT);
272 return VM_FAULT_NOPAGE;
275 static const struct vm_operations_struct xive_native_esb_vmops = {
276 .fault = xive_native_esb_fault,
279 static vm_fault_t xive_native_tima_fault(struct vm_fault *vmf)
281 struct vm_area_struct *vma = vmf->vma;
283 switch (vmf->pgoff - vma->vm_pgoff) {
284 case 0: /* HW - forbid access */
285 case 1: /* HV - forbid access */
286 return VM_FAULT_SIGBUS;
288 vmf_insert_pfn(vma, vmf->address, xive_tima_os >> PAGE_SHIFT);
289 return VM_FAULT_NOPAGE;
290 case 3: /* USER - TODO */
292 return VM_FAULT_SIGBUS;
296 static const struct vm_operations_struct xive_native_tima_vmops = {
297 .fault = xive_native_tima_fault,
300 static int kvmppc_xive_native_mmap(struct kvm_device *dev,
301 struct vm_area_struct *vma)
303 struct kvmppc_xive *xive = dev->private;
305 /* We only allow mappings at fixed offset for now */
306 if (vma->vm_pgoff == KVM_XIVE_TIMA_PAGE_OFFSET) {
307 if (vma_pages(vma) > 4)
309 vma->vm_ops = &xive_native_tima_vmops;
310 } else if (vma->vm_pgoff == KVM_XIVE_ESB_PAGE_OFFSET) {
311 if (vma_pages(vma) > KVMPPC_XIVE_NR_IRQS * 2)
313 vma->vm_ops = &xive_native_esb_vmops;
318 vma->vm_flags |= VM_IO | VM_PFNMAP;
319 vma->vm_page_prot = pgprot_noncached_wc(vma->vm_page_prot);
322 * Grab the KVM device file address_space to be able to clear
323 * the ESB pages mapping when a device is passed-through into
326 xive->mapping = vma->vm_file->f_mapping;
330 static int kvmppc_xive_native_set_source(struct kvmppc_xive *xive, long irq,
333 struct kvmppc_xive_src_block *sb;
334 struct kvmppc_xive_irq_state *state;
335 u64 __user *ubufp = (u64 __user *) addr;
340 pr_devel("%s irq=0x%lx\n", __func__, irq);
342 if (irq < KVMPPC_XIVE_FIRST_IRQ || irq >= KVMPPC_XIVE_NR_IRQS)
345 sb = kvmppc_xive_find_source(xive, irq, &idx);
347 pr_debug("No source, creating source block...\n");
348 sb = kvmppc_xive_create_src_block(xive, irq);
350 pr_err("Failed to create block...\n");
354 state = &sb->irq_state[idx];
356 if (get_user(val, ubufp)) {
357 pr_err("fault getting user info !\n");
361 arch_spin_lock(&sb->lock);
364 * If the source doesn't already have an IPI, allocate
365 * one and get the corresponding data
367 if (!state->ipi_number) {
368 state->ipi_number = xive_native_alloc_irq();
369 if (state->ipi_number == 0) {
370 pr_err("Failed to allocate IRQ !\n");
374 xive_native_populate_irq_data(state->ipi_number,
376 pr_debug("%s allocated hw_irq=0x%x for irq=0x%lx\n", __func__,
377 state->ipi_number, irq);
380 /* Restore LSI state */
381 if (val & KVM_XIVE_LEVEL_SENSITIVE) {
383 if (val & KVM_XIVE_LEVEL_ASSERTED)
384 state->asserted = true;
385 pr_devel(" LSI ! Asserted=%d\n", state->asserted);
388 /* Mask IRQ to start with */
389 state->act_server = 0;
390 state->act_priority = MASKED;
391 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
392 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
394 /* Increment the number of valid sources and mark this one valid */
402 arch_spin_unlock(&sb->lock);
407 static int kvmppc_xive_native_update_source_config(struct kvmppc_xive *xive,
408 struct kvmppc_xive_src_block *sb,
409 struct kvmppc_xive_irq_state *state,
410 u32 server, u8 priority, bool masked,
413 struct kvm *kvm = xive->kvm;
417 arch_spin_lock(&sb->lock);
419 if (state->act_server == server && state->act_priority == priority &&
423 pr_devel("new_act_prio=%d new_act_server=%d mask=%d act_server=%d act_prio=%d\n",
424 priority, server, masked, state->act_server,
425 state->act_priority);
427 kvmppc_xive_select_irq(state, &hw_num, NULL);
429 if (priority != MASKED && !masked) {
430 rc = kvmppc_xive_select_target(kvm, &server, priority);
434 state->act_priority = priority;
435 state->act_server = server;
438 rc = xive_native_configure_irq(hw_num,
439 kvmppc_xive_vp(xive, server),
442 state->act_priority = MASKED;
443 state->act_server = 0;
446 rc = xive_native_configure_irq(hw_num, 0, MASKED, 0);
450 arch_spin_unlock(&sb->lock);
454 static int kvmppc_xive_native_set_source_config(struct kvmppc_xive *xive,
457 struct kvmppc_xive_src_block *sb;
458 struct kvmppc_xive_irq_state *state;
459 u64 __user *ubufp = (u64 __user *) addr;
467 sb = kvmppc_xive_find_source(xive, irq, &src);
471 state = &sb->irq_state[src];
476 if (get_user(kvm_cfg, ubufp))
479 pr_devel("%s irq=0x%lx cfg=%016llx\n", __func__, irq, kvm_cfg);
481 priority = (kvm_cfg & KVM_XIVE_SOURCE_PRIORITY_MASK) >>
482 KVM_XIVE_SOURCE_PRIORITY_SHIFT;
483 server = (kvm_cfg & KVM_XIVE_SOURCE_SERVER_MASK) >>
484 KVM_XIVE_SOURCE_SERVER_SHIFT;
485 masked = (kvm_cfg & KVM_XIVE_SOURCE_MASKED_MASK) >>
486 KVM_XIVE_SOURCE_MASKED_SHIFT;
487 eisn = (kvm_cfg & KVM_XIVE_SOURCE_EISN_MASK) >>
488 KVM_XIVE_SOURCE_EISN_SHIFT;
490 if (priority != xive_prio_from_guest(priority)) {
491 pr_err("invalid priority for queue %d for VCPU %d\n",
496 return kvmppc_xive_native_update_source_config(xive, sb, state, server,
497 priority, masked, eisn);
500 static int kvmppc_xive_native_sync_source(struct kvmppc_xive *xive,
503 struct kvmppc_xive_src_block *sb;
504 struct kvmppc_xive_irq_state *state;
505 struct xive_irq_data *xd;
510 pr_devel("%s irq=0x%lx", __func__, irq);
512 sb = kvmppc_xive_find_source(xive, irq, &src);
516 state = &sb->irq_state[src];
520 arch_spin_lock(&sb->lock);
523 kvmppc_xive_select_irq(state, &hw_num, &xd);
524 xive_native_sync_source(hw_num);
528 arch_spin_unlock(&sb->lock);
532 static int xive_native_validate_queue_size(u32 qshift)
535 * We only support 64K pages for the moment. This is also
536 * advertised in the DT property "ibm,xive-eq-sizes"
539 case 0: /* EQ reset */
550 static int kvmppc_xive_native_set_queue_config(struct kvmppc_xive *xive,
551 long eq_idx, u64 addr)
553 struct kvm *kvm = xive->kvm;
554 struct kvm_vcpu *vcpu;
555 struct kvmppc_xive_vcpu *xc;
556 void __user *ubufp = (void __user *) addr;
559 struct kvm_ppc_xive_eq kvm_eq;
565 unsigned long page_size;
569 * Demangle priority/server tuple from the EQ identifier
571 priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
572 KVM_XIVE_EQ_PRIORITY_SHIFT;
573 server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
574 KVM_XIVE_EQ_SERVER_SHIFT;
576 if (copy_from_user(&kvm_eq, ubufp, sizeof(kvm_eq)))
579 vcpu = kvmppc_xive_find_server(kvm, server);
581 pr_err("Can't find server %d\n", server);
584 xc = vcpu->arch.xive_vcpu;
586 if (priority != xive_prio_from_guest(priority)) {
587 pr_err("Trying to restore invalid queue %d for VCPU %d\n",
591 q = &xc->queues[priority];
593 pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
594 __func__, server, priority, kvm_eq.flags,
595 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
597 /* reset queue and disable queueing */
598 if (!kvm_eq.qshift) {
602 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
605 pr_err("Failed to reset queue %d for VCPU %d: %d\n",
606 priority, xc->server_num, rc);
614 * sPAPR specifies a "Unconditional Notify (n) flag" for the
615 * H_INT_SET_QUEUE_CONFIG hcall which forces notification
616 * without using the coalescing mechanisms provided by the
617 * XIVE END ESBs. This is required on KVM as notification
618 * using the END ESBs is not supported.
620 if (kvm_eq.flags != KVM_XIVE_EQ_ALWAYS_NOTIFY) {
621 pr_err("invalid flags %d\n", kvm_eq.flags);
625 rc = xive_native_validate_queue_size(kvm_eq.qshift);
627 pr_err("invalid queue size %d\n", kvm_eq.qshift);
631 if (kvm_eq.qaddr & ((1ull << kvm_eq.qshift) - 1)) {
632 pr_err("queue page is not aligned %llx/%llx\n", kvm_eq.qaddr,
633 1ull << kvm_eq.qshift);
637 srcu_idx = srcu_read_lock(&kvm->srcu);
638 gfn = gpa_to_gfn(kvm_eq.qaddr);
640 page_size = kvm_host_page_size(vcpu, gfn);
641 if (1ull << kvm_eq.qshift > page_size) {
642 srcu_read_unlock(&kvm->srcu, srcu_idx);
643 pr_warn("Incompatible host page size %lx!\n", page_size);
647 page = gfn_to_page(kvm, gfn);
648 if (is_error_page(page)) {
649 srcu_read_unlock(&kvm->srcu, srcu_idx);
650 pr_err("Couldn't get queue page %llx!\n", kvm_eq.qaddr);
654 qaddr = page_to_virt(page) + (kvm_eq.qaddr & ~PAGE_MASK);
655 srcu_read_unlock(&kvm->srcu, srcu_idx);
658 * Backup the queue page guest address to the mark EQ page
659 * dirty for migration.
661 q->guest_qaddr = kvm_eq.qaddr;
662 q->guest_qshift = kvm_eq.qshift;
665 * Unconditional Notification is forced by default at the
666 * OPAL level because the use of END ESBs is not supported by
669 rc = kvmppc_xive_native_configure_queue(xc->vp_id, q, priority,
670 (__be32 *) qaddr, kvm_eq.qshift, true);
672 pr_err("Failed to configure queue %d for VCPU %d: %d\n",
673 priority, xc->server_num, rc);
679 * Only restore the queue state when needed. When doing the
680 * H_INT_SET_SOURCE_CONFIG hcall, it should not.
682 if (kvm_eq.qtoggle != 1 || kvm_eq.qindex != 0) {
683 rc = xive_native_set_queue_state(xc->vp_id, priority,
690 rc = kvmppc_xive_attach_escalation(vcpu, priority,
691 xive->single_escalation);
694 kvmppc_xive_native_cleanup_queue(vcpu, priority);
698 static int kvmppc_xive_native_get_queue_config(struct kvmppc_xive *xive,
699 long eq_idx, u64 addr)
701 struct kvm *kvm = xive->kvm;
702 struct kvm_vcpu *vcpu;
703 struct kvmppc_xive_vcpu *xc;
705 void __user *ubufp = (u64 __user *) addr;
708 struct kvm_ppc_xive_eq kvm_eq;
717 * Demangle priority/server tuple from the EQ identifier
719 priority = (eq_idx & KVM_XIVE_EQ_PRIORITY_MASK) >>
720 KVM_XIVE_EQ_PRIORITY_SHIFT;
721 server = (eq_idx & KVM_XIVE_EQ_SERVER_MASK) >>
722 KVM_XIVE_EQ_SERVER_SHIFT;
724 vcpu = kvmppc_xive_find_server(kvm, server);
726 pr_err("Can't find server %d\n", server);
729 xc = vcpu->arch.xive_vcpu;
731 if (priority != xive_prio_from_guest(priority)) {
732 pr_err("invalid priority for queue %d for VCPU %d\n",
736 q = &xc->queues[priority];
738 memset(&kvm_eq, 0, sizeof(kvm_eq));
743 rc = xive_native_get_queue_info(xc->vp_id, priority, &qaddr, &qshift,
744 &qeoi_page, &escalate_irq, &qflags);
749 if (qflags & OPAL_XIVE_EQ_ALWAYS_NOTIFY)
750 kvm_eq.flags |= KVM_XIVE_EQ_ALWAYS_NOTIFY;
752 kvm_eq.qshift = q->guest_qshift;
753 kvm_eq.qaddr = q->guest_qaddr;
755 rc = xive_native_get_queue_state(xc->vp_id, priority, &kvm_eq.qtoggle,
760 pr_devel("%s VCPU %d priority %d fl:%x shift:%d addr:%llx g:%d idx:%d\n",
761 __func__, server, priority, kvm_eq.flags,
762 kvm_eq.qshift, kvm_eq.qaddr, kvm_eq.qtoggle, kvm_eq.qindex);
764 if (copy_to_user(ubufp, &kvm_eq, sizeof(kvm_eq)))
770 static void kvmppc_xive_reset_sources(struct kvmppc_xive_src_block *sb)
774 for (i = 0; i < KVMPPC_XICS_IRQ_PER_ICS; i++) {
775 struct kvmppc_xive_irq_state *state = &sb->irq_state[i];
780 if (state->act_priority == MASKED)
784 state->act_server = 0;
785 state->act_priority = MASKED;
786 xive_vm_esb_load(&state->ipi_data, XIVE_ESB_SET_PQ_01);
787 xive_native_configure_irq(state->ipi_number, 0, MASKED, 0);
788 if (state->pt_number) {
789 xive_vm_esb_load(state->pt_data, XIVE_ESB_SET_PQ_01);
790 xive_native_configure_irq(state->pt_number,
796 static int kvmppc_xive_reset(struct kvmppc_xive *xive)
798 struct kvm *kvm = xive->kvm;
799 struct kvm_vcpu *vcpu;
802 pr_devel("%s\n", __func__);
804 mutex_lock(&xive->lock);
806 kvm_for_each_vcpu(i, vcpu, kvm) {
807 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
813 kvmppc_xive_disable_vcpu_interrupts(vcpu);
815 for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
817 /* Single escalation, no queue 7 */
818 if (prio == 7 && xive->single_escalation)
821 if (xc->esc_virq[prio]) {
822 free_irq(xc->esc_virq[prio], vcpu);
823 irq_dispose_mapping(xc->esc_virq[prio]);
824 kfree(xc->esc_virq_names[prio]);
825 xc->esc_virq[prio] = 0;
828 kvmppc_xive_native_cleanup_queue(vcpu, prio);
832 for (i = 0; i <= xive->max_sbid; i++) {
833 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
836 arch_spin_lock(&sb->lock);
837 kvmppc_xive_reset_sources(sb);
838 arch_spin_unlock(&sb->lock);
842 mutex_unlock(&xive->lock);
847 static void kvmppc_xive_native_sync_sources(struct kvmppc_xive_src_block *sb)
851 for (j = 0; j < KVMPPC_XICS_IRQ_PER_ICS; j++) {
852 struct kvmppc_xive_irq_state *state = &sb->irq_state[j];
853 struct xive_irq_data *xd;
860 * The struct kvmppc_xive_irq_state reflects the state
861 * of the EAS configuration and not the state of the
862 * source. The source is masked setting the PQ bits to
863 * '-Q', which is what is being done before calling
864 * the KVM_DEV_XIVE_EQ_SYNC control.
866 * If a source EAS is configured, OPAL syncs the XIVE
867 * IC of the source and the XIVE IC of the previous
870 * So it should be fine ignoring MASKED sources as
871 * they have been synced already.
873 if (state->act_priority == MASKED)
876 kvmppc_xive_select_irq(state, &hw_num, &xd);
877 xive_native_sync_source(hw_num);
878 xive_native_sync_queue(hw_num);
882 static int kvmppc_xive_native_vcpu_eq_sync(struct kvm_vcpu *vcpu)
884 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
891 for (prio = 0; prio < KVMPPC_XIVE_Q_COUNT; prio++) {
892 struct xive_q *q = &xc->queues[prio];
897 /* Mark EQ page dirty for migration */
898 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
899 mark_page_dirty(vcpu->kvm, gpa_to_gfn(q->guest_qaddr));
900 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
905 static int kvmppc_xive_native_eq_sync(struct kvmppc_xive *xive)
907 struct kvm *kvm = xive->kvm;
908 struct kvm_vcpu *vcpu;
911 pr_devel("%s\n", __func__);
913 mutex_lock(&xive->lock);
914 for (i = 0; i <= xive->max_sbid; i++) {
915 struct kvmppc_xive_src_block *sb = xive->src_blocks[i];
918 arch_spin_lock(&sb->lock);
919 kvmppc_xive_native_sync_sources(sb);
920 arch_spin_unlock(&sb->lock);
924 kvm_for_each_vcpu(i, vcpu, kvm) {
925 kvmppc_xive_native_vcpu_eq_sync(vcpu);
927 mutex_unlock(&xive->lock);
932 static int kvmppc_xive_native_set_attr(struct kvm_device *dev,
933 struct kvm_device_attr *attr)
935 struct kvmppc_xive *xive = dev->private;
937 switch (attr->group) {
938 case KVM_DEV_XIVE_GRP_CTRL:
939 switch (attr->attr) {
940 case KVM_DEV_XIVE_RESET:
941 return kvmppc_xive_reset(xive);
942 case KVM_DEV_XIVE_EQ_SYNC:
943 return kvmppc_xive_native_eq_sync(xive);
944 case KVM_DEV_XIVE_NR_SERVERS:
945 return kvmppc_xive_set_nr_servers(xive, attr->addr);
948 case KVM_DEV_XIVE_GRP_SOURCE:
949 return kvmppc_xive_native_set_source(xive, attr->attr,
951 case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
952 return kvmppc_xive_native_set_source_config(xive, attr->attr,
954 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
955 return kvmppc_xive_native_set_queue_config(xive, attr->attr,
957 case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
958 return kvmppc_xive_native_sync_source(xive, attr->attr,
964 static int kvmppc_xive_native_get_attr(struct kvm_device *dev,
965 struct kvm_device_attr *attr)
967 struct kvmppc_xive *xive = dev->private;
969 switch (attr->group) {
970 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
971 return kvmppc_xive_native_get_queue_config(xive, attr->attr,
977 static int kvmppc_xive_native_has_attr(struct kvm_device *dev,
978 struct kvm_device_attr *attr)
980 switch (attr->group) {
981 case KVM_DEV_XIVE_GRP_CTRL:
982 switch (attr->attr) {
983 case KVM_DEV_XIVE_RESET:
984 case KVM_DEV_XIVE_EQ_SYNC:
985 case KVM_DEV_XIVE_NR_SERVERS:
989 case KVM_DEV_XIVE_GRP_SOURCE:
990 case KVM_DEV_XIVE_GRP_SOURCE_CONFIG:
991 case KVM_DEV_XIVE_GRP_SOURCE_SYNC:
992 if (attr->attr >= KVMPPC_XIVE_FIRST_IRQ &&
993 attr->attr < KVMPPC_XIVE_NR_IRQS)
996 case KVM_DEV_XIVE_GRP_EQ_CONFIG:
1003 * Called when device fd is closed. kvm->lock is held.
1005 static void kvmppc_xive_native_release(struct kvm_device *dev)
1007 struct kvmppc_xive *xive = dev->private;
1008 struct kvm *kvm = xive->kvm;
1009 struct kvm_vcpu *vcpu;
1012 pr_devel("Releasing xive native device\n");
1015 * Clear the KVM device file address_space which is used to
1016 * unmap the ESB pages when a device is passed-through.
1018 mutex_lock(&xive->mapping_lock);
1019 xive->mapping = NULL;
1020 mutex_unlock(&xive->mapping_lock);
1023 * Since this is the device release function, we know that
1024 * userspace does not have any open fd or mmap referring to
1025 * the device. Therefore there can not be any of the
1026 * device attribute set/get, mmap, or page fault functions
1027 * being executed concurrently, and similarly, the
1028 * connect_vcpu and set/clr_mapped functions also cannot
1029 * be being executed.
1032 debugfs_remove(xive->dentry);
1035 * We should clean up the vCPU interrupt presenters first.
1037 kvm_for_each_vcpu(i, vcpu, kvm) {
1039 * Take vcpu->mutex to ensure that no one_reg get/set ioctl
1040 * (i.e. kvmppc_xive_native_[gs]et_vp) can be being done.
1041 * Holding the vcpu->mutex also means that the vcpu cannot
1042 * be executing the KVM_RUN ioctl, and therefore it cannot
1043 * be executing the XIVE push or pull code or accessing
1044 * the XIVE MMIO regions.
1046 mutex_lock(&vcpu->mutex);
1047 kvmppc_xive_native_cleanup_vcpu(vcpu);
1048 mutex_unlock(&vcpu->mutex);
1052 * Now that we have cleared vcpu->arch.xive_vcpu, vcpu->arch.irq_type
1053 * and vcpu->arch.xive_esc_[vr]addr on each vcpu, we are safe
1054 * against xive code getting called during vcpu execution or
1055 * set/get one_reg operations.
1057 kvm->arch.xive = NULL;
1059 for (i = 0; i <= xive->max_sbid; i++) {
1060 if (xive->src_blocks[i])
1061 kvmppc_xive_free_sources(xive->src_blocks[i]);
1062 kfree(xive->src_blocks[i]);
1063 xive->src_blocks[i] = NULL;
1066 if (xive->vp_base != XIVE_INVALID_VP)
1067 xive_native_free_vp_block(xive->vp_base);
1070 * A reference of the kvmppc_xive pointer is now kept under
1071 * the xive_devices struct of the machine for reuse. It is
1072 * freed when the VM is destroyed for now until we fix all the
1080 * Create a XIVE device. kvm->lock is held.
1082 static int kvmppc_xive_native_create(struct kvm_device *dev, u32 type)
1084 struct kvmppc_xive *xive;
1085 struct kvm *kvm = dev->kvm;
1087 pr_devel("Creating xive native device\n");
1092 xive = kvmppc_xive_get_device(kvm, type);
1096 dev->private = xive;
1099 mutex_init(&xive->mapping_lock);
1100 mutex_init(&xive->lock);
1102 /* VP allocation is delayed to the first call to connect_vcpu */
1103 xive->vp_base = XIVE_INVALID_VP;
1104 /* KVM_MAX_VCPUS limits the number of VMs to roughly 64 per sockets
1105 * on a POWER9 system.
1107 xive->nr_servers = KVM_MAX_VCPUS;
1109 xive->single_escalation = xive_native_has_single_escalation();
1110 xive->ops = &kvmppc_xive_native_ops;
1112 kvm->arch.xive = xive;
1117 * Interrupt Pending Buffer (IPB) offset
1119 #define TM_IPB_SHIFT 40
1120 #define TM_IPB_MASK (((u64) 0xFF) << TM_IPB_SHIFT)
1122 int kvmppc_xive_native_get_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1124 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1128 if (!kvmppc_xive_enabled(vcpu))
1134 /* Thread context registers. We only care about IPB and CPPR */
1135 val->xive_timaval[0] = vcpu->arch.xive_saved_state.w01;
1137 /* Get the VP state from OPAL */
1138 rc = xive_native_get_vp_state(xc->vp_id, &opal_state);
1143 * Capture the backup of IPB register in the NVT structure and
1144 * merge it in our KVM VP state.
1146 val->xive_timaval[0] |= cpu_to_be64(opal_state & TM_IPB_MASK);
1148 pr_devel("%s NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x opal=%016llx\n",
1150 vcpu->arch.xive_saved_state.nsr,
1151 vcpu->arch.xive_saved_state.cppr,
1152 vcpu->arch.xive_saved_state.ipb,
1153 vcpu->arch.xive_saved_state.pipr,
1154 vcpu->arch.xive_saved_state.w01,
1155 (u32) vcpu->arch.xive_cam_word, opal_state);
1160 int kvmppc_xive_native_set_vp(struct kvm_vcpu *vcpu, union kvmppc_one_reg *val)
1162 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1163 struct kvmppc_xive *xive = vcpu->kvm->arch.xive;
1165 pr_devel("%s w01=%016llx vp=%016llx\n", __func__,
1166 val->xive_timaval[0], val->xive_timaval[1]);
1168 if (!kvmppc_xive_enabled(vcpu))
1174 /* We can't update the state of a "pushed" VCPU */
1175 if (WARN_ON(vcpu->arch.xive_pushed))
1179 * Restore the thread context registers. IPB and CPPR should
1180 * be the only ones that matter.
1182 vcpu->arch.xive_saved_state.w01 = val->xive_timaval[0];
1185 * There is no need to restore the XIVE internal state (IPB
1186 * stored in the NVT) as the IPB register was merged in KVM VP
1187 * state when captured.
1192 bool kvmppc_xive_native_supported(void)
1194 return xive_native_has_queue_state_support();
1197 static int xive_native_debug_show(struct seq_file *m, void *private)
1199 struct kvmppc_xive *xive = m->private;
1200 struct kvm *kvm = xive->kvm;
1201 struct kvm_vcpu *vcpu;
1207 seq_puts(m, "=========\nVCPU state\n=========\n");
1209 kvm_for_each_vcpu(i, vcpu, kvm) {
1210 struct kvmppc_xive_vcpu *xc = vcpu->arch.xive_vcpu;
1215 seq_printf(m, "cpu server %#x VP=%#x NSR=%02x CPPR=%02x IBP=%02x PIPR=%02x w01=%016llx w2=%08x\n",
1216 xc->server_num, xc->vp_id,
1217 vcpu->arch.xive_saved_state.nsr,
1218 vcpu->arch.xive_saved_state.cppr,
1219 vcpu->arch.xive_saved_state.ipb,
1220 vcpu->arch.xive_saved_state.pipr,
1221 vcpu->arch.xive_saved_state.w01,
1222 (u32) vcpu->arch.xive_cam_word);
1224 kvmppc_xive_debug_show_queues(m, vcpu);
1230 static int xive_native_debug_open(struct inode *inode, struct file *file)
1232 return single_open(file, xive_native_debug_show, inode->i_private);
1235 static const struct file_operations xive_native_debug_fops = {
1236 .open = xive_native_debug_open,
1238 .llseek = seq_lseek,
1239 .release = single_release,
1242 static void xive_native_debugfs_init(struct kvmppc_xive *xive)
1246 name = kasprintf(GFP_KERNEL, "kvm-xive-%p", xive);
1248 pr_err("%s: no memory for name\n", __func__);
1252 xive->dentry = debugfs_create_file(name, 0444, powerpc_debugfs_root,
1253 xive, &xive_native_debug_fops);
1255 pr_debug("%s: created %s\n", __func__, name);
1259 static void kvmppc_xive_native_init(struct kvm_device *dev)
1261 struct kvmppc_xive *xive = (struct kvmppc_xive *)dev->private;
1263 /* Register some debug interfaces */
1264 xive_native_debugfs_init(xive);
1267 struct kvm_device_ops kvm_xive_native_ops = {
1268 .name = "kvm-xive-native",
1269 .create = kvmppc_xive_native_create,
1270 .init = kvmppc_xive_native_init,
1271 .release = kvmppc_xive_native_release,
1272 .set_attr = kvmppc_xive_native_set_attr,
1273 .get_attr = kvmppc_xive_native_get_attr,
1274 .has_attr = kvmppc_xive_native_has_attr,
1275 .mmap = kvmppc_xive_native_mmap,
1278 void kvmppc_xive_native_init_module(void)
1283 void kvmppc_xive_native_exit_module(void)