2 * Copyright 2016,2017 IBM Corporation.
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
10 #define pr_fmt(fmt) "xive: " fmt
12 #include <linux/types.h>
13 #include <linux/irq.h>
14 #include <linux/debugfs.h>
15 #include <linux/smp.h>
16 #include <linux/interrupt.h>
17 #include <linux/seq_file.h>
18 #include <linux/init.h>
20 #include <linux/slab.h>
21 #include <linux/spinlock.h>
22 #include <linux/delay.h>
23 #include <linux/cpumask.h>
30 #include <asm/errno.h>
32 #include <asm/xive-regs.h>
34 #include <asm/kvm_ppc.h>
36 #include "xive-internal.h"
39 static u32 xive_provision_size;
40 static u32 *xive_provision_chips;
41 static u32 xive_provision_chip_count;
42 static u32 xive_queue_shift;
43 static u32 xive_pool_vps = XIVE_INVALID_VP;
44 static struct kmem_cache *xive_provision_cache;
46 int xive_native_populate_irq_data(u32 hw_irq, struct xive_irq_data *data)
48 __be64 flags, eoi_page, trig_page;
49 __be32 esb_shift, src_chip;
53 memset(data, 0, sizeof(*data));
55 rc = opal_xive_get_irq_info(hw_irq, &flags, &eoi_page, &trig_page,
56 &esb_shift, &src_chip);
58 pr_err("opal_xive_get_irq_info(0x%x) returned %lld\n",
63 opal_flags = be64_to_cpu(flags);
64 if (opal_flags & OPAL_XIVE_IRQ_STORE_EOI)
65 data->flags |= XIVE_IRQ_FLAG_STORE_EOI;
66 if (opal_flags & OPAL_XIVE_IRQ_LSI)
67 data->flags |= XIVE_IRQ_FLAG_LSI;
68 if (opal_flags & OPAL_XIVE_IRQ_SHIFT_BUG)
69 data->flags |= XIVE_IRQ_FLAG_SHIFT_BUG;
70 if (opal_flags & OPAL_XIVE_IRQ_MASK_VIA_FW)
71 data->flags |= XIVE_IRQ_FLAG_MASK_FW;
72 if (opal_flags & OPAL_XIVE_IRQ_EOI_VIA_FW)
73 data->flags |= XIVE_IRQ_FLAG_EOI_FW;
74 data->eoi_page = be64_to_cpu(eoi_page);
75 data->trig_page = be64_to_cpu(trig_page);
76 data->esb_shift = be32_to_cpu(esb_shift);
77 data->src_chip = be32_to_cpu(src_chip);
79 data->eoi_mmio = ioremap(data->eoi_page, 1u << data->esb_shift);
80 if (!data->eoi_mmio) {
81 pr_err("Failed to map EOI page for irq 0x%x\n", hw_irq);
85 data->hw_irq = hw_irq;
89 if (data->trig_page == data->eoi_page) {
90 data->trig_mmio = data->eoi_mmio;
94 data->trig_mmio = ioremap(data->trig_page, 1u << data->esb_shift);
95 if (!data->trig_mmio) {
96 pr_err("Failed to map trigger page for irq 0x%x\n", hw_irq);
101 EXPORT_SYMBOL_GPL(xive_native_populate_irq_data);
103 int xive_native_configure_irq(u32 hw_irq, u32 target, u8 prio, u32 sw_irq)
108 rc = opal_xive_set_irq_config(hw_irq, target, prio, sw_irq);
113 return rc == 0 ? 0 : -ENXIO;
115 EXPORT_SYMBOL_GPL(xive_native_configure_irq);
118 /* This can be called multiple time to change a queue configuration */
119 int xive_native_configure_queue(u32 vp_id, struct xive_q *q, u8 prio,
120 __be32 *qpage, u32 order, bool can_escalate)
125 u64 flags, qpage_phys;
127 /* If there's an actual queue page, clean it */
131 qpage_phys = __pa(qpage);
135 /* Initialize the rest of the fields */
136 q->msk = order ? ((1u << (order - 2)) - 1) : 0;
140 rc = opal_xive_get_queue_info(vp_id, prio, NULL, NULL,
145 pr_err("Error %lld getting queue info prio %d\n", rc, prio);
149 q->eoi_phys = be64_to_cpu(qeoi_page_be);
152 flags = OPAL_XIVE_EQ_ALWAYS_NOTIFY | OPAL_XIVE_EQ_ENABLED;
154 /* Escalation needed ? */
156 q->esc_irq = be32_to_cpu(esc_irq_be);
157 flags |= OPAL_XIVE_EQ_ESCALATE;
160 /* Configure and enable the queue in HW */
162 rc = opal_xive_set_queue_info(vp_id, prio, qpage_phys, order, flags);
168 pr_err("Error %lld setting queue for prio %d\n", rc, prio);
172 * KVM code requires all of the above to be visible before
173 * q->qpage is set due to how it manages IPI EOIs
181 EXPORT_SYMBOL_GPL(xive_native_configure_queue);
183 static void __xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
187 /* Disable the queue in HW */
189 rc = opal_xive_set_queue_info(vp_id, prio, 0, 0, 0);
195 pr_err("Error %lld disabling queue for prio %d\n", rc, prio);
198 void xive_native_disable_queue(u32 vp_id, struct xive_q *q, u8 prio)
200 __xive_native_disable_queue(vp_id, q, prio);
202 EXPORT_SYMBOL_GPL(xive_native_disable_queue);
204 static int xive_native_setup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
206 struct xive_q *q = &xc->queue[prio];
209 qpage = xive_queue_page_alloc(cpu, xive_queue_shift);
211 return PTR_ERR(qpage);
213 return xive_native_configure_queue(get_hard_smp_processor_id(cpu),
214 q, prio, qpage, xive_queue_shift, false);
217 static void xive_native_cleanup_queue(unsigned int cpu, struct xive_cpu *xc, u8 prio)
219 struct xive_q *q = &xc->queue[prio];
220 unsigned int alloc_order;
223 * We use the variant with no iounmap as this is called on exec
224 * from an IPI and iounmap isn't safe
226 __xive_native_disable_queue(get_hard_smp_processor_id(cpu), q, prio);
227 alloc_order = xive_alloc_order(xive_queue_shift);
228 free_pages((unsigned long)q->qpage, alloc_order);
232 static bool xive_native_match(struct device_node *node)
234 return of_device_is_compatible(node, "ibm,opal-xive-vc");
238 static int xive_native_get_ipi(unsigned int cpu, struct xive_cpu *xc)
240 struct device_node *np;
241 unsigned int chip_id;
244 /* Find the chip ID */
245 np = of_get_cpu_node(cpu, NULL);
247 if (of_property_read_u32(np, "ibm,chip-id", &chip_id) < 0)
251 /* Allocate an IPI and populate info about it */
253 irq = opal_xive_allocate_irq(chip_id);
254 if (irq == OPAL_BUSY) {
259 pr_err("Failed to allocate IPI on CPU %d\n", cpu);
267 #endif /* CONFIG_SMP */
269 u32 xive_native_alloc_irq(void)
274 rc = opal_xive_allocate_irq(OPAL_XIVE_ANY_CHIP);
283 EXPORT_SYMBOL_GPL(xive_native_alloc_irq);
285 void xive_native_free_irq(u32 irq)
288 s64 rc = opal_xive_free_irq(irq);
294 EXPORT_SYMBOL_GPL(xive_native_free_irq);
297 static void xive_native_put_ipi(unsigned int cpu, struct xive_cpu *xc)
305 rc = opal_xive_free_irq(xc->hw_ipi);
306 if (rc == OPAL_BUSY) {
314 #endif /* CONFIG_SMP */
316 static void xive_native_shutdown(void)
318 /* Switch the XIVE to emulation mode */
319 opal_xive_reset(OPAL_XIVE_MODE_EMU);
323 * Perform an "ack" cycle on the current thread, thus
324 * grabbing the pending active priorities and updating
325 * the CPPR to the most favored one.
327 static void xive_native_update_pending(struct xive_cpu *xc)
332 /* Perform the acknowledge hypervisor to register cycle */
333 ack = be16_to_cpu(__raw_readw(xive_tima + TM_SPC_ACK_HV_REG));
335 /* Synchronize subsequent queue accesses */
339 * Grab the CPPR and the "HE" field which indicates the source
340 * of the hypervisor interrupt (if any)
343 he = GETFIELD(TM_QW3_NSR_HE, (ack >> 8));
345 case TM_QW3_NSR_HE_NONE: /* Nothing to see here */
347 case TM_QW3_NSR_HE_PHYS: /* Physical thread interrupt */
350 /* Mark the priority pending */
351 xc->pending_prio |= 1 << cppr;
354 * A new interrupt should never have a CPPR less favored
355 * than our current one.
357 if (cppr >= xc->cppr)
358 pr_err("CPU %d odd ack CPPR, got %d at %d\n",
359 smp_processor_id(), cppr, xc->cppr);
361 /* Update our idea of what the CPPR is */
364 case TM_QW3_NSR_HE_POOL: /* HV Pool interrupt (unused) */
365 case TM_QW3_NSR_HE_LSI: /* Legacy FW LSI (unused) */
366 pr_err("CPU %d got unexpected interrupt type HE=%d\n",
367 smp_processor_id(), he);
372 static void xive_native_eoi(u32 hw_irq)
375 * Not normally used except if specific interrupts need
376 * a workaround on EOI.
378 opal_int_eoi(hw_irq);
381 static void xive_native_setup_cpu(unsigned int cpu, struct xive_cpu *xc)
388 if (xive_pool_vps == XIVE_INVALID_VP)
391 /* Enable the pool VP */
392 vp = xive_pool_vps + cpu;
393 pr_debug("CPU %d setting up pool VP 0x%x\n", cpu, vp);
395 rc = opal_xive_set_vp_info(vp, OPAL_XIVE_VP_ENABLED, 0);
401 pr_err("Failed to enable pool VP on CPU %d\n", cpu);
405 /* Grab it's CAM value */
406 rc = opal_xive_get_vp_info(vp, NULL, &vp_cam_be, NULL, NULL);
408 pr_err("Failed to get pool VP info CPU %d\n", cpu);
411 vp_cam = be64_to_cpu(vp_cam_be);
413 pr_debug("VP CAM = %llx\n", vp_cam);
415 /* Push it on the CPU (set LSMFB to 0xff to skip backlog scan) */
416 pr_debug("(Old HW value: %08x)\n",
417 in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
418 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD0, 0xff);
419 out_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2,
420 TM_QW2W2_VP | vp_cam);
421 pr_debug("(New HW value: %08x)\n",
422 in_be32(xive_tima + TM_QW2_HV_POOL + TM_WORD2));
425 static void xive_native_teardown_cpu(unsigned int cpu, struct xive_cpu *xc)
430 if (xive_pool_vps == XIVE_INVALID_VP)
433 /* Pull the pool VP from the CPU */
434 in_be64(xive_tima + TM_SPC_PULL_POOL_CTX);
437 vp = xive_pool_vps + cpu;
439 rc = opal_xive_set_vp_info(vp, 0, 0);
446 void xive_native_sync_source(u32 hw_irq)
448 opal_xive_sync(XIVE_SYNC_EAS, hw_irq);
450 EXPORT_SYMBOL_GPL(xive_native_sync_source);
452 static const struct xive_ops xive_native_ops = {
453 .populate_irq_data = xive_native_populate_irq_data,
454 .configure_irq = xive_native_configure_irq,
455 .setup_queue = xive_native_setup_queue,
456 .cleanup_queue = xive_native_cleanup_queue,
457 .match = xive_native_match,
458 .shutdown = xive_native_shutdown,
459 .update_pending = xive_native_update_pending,
460 .eoi = xive_native_eoi,
461 .setup_cpu = xive_native_setup_cpu,
462 .teardown_cpu = xive_native_teardown_cpu,
463 .sync_source = xive_native_sync_source,
465 .get_ipi = xive_native_get_ipi,
466 .put_ipi = xive_native_put_ipi,
467 #endif /* CONFIG_SMP */
471 static bool xive_parse_provisioning(struct device_node *np)
475 if (of_property_read_u32(np, "ibm,xive-provision-page-size",
476 &xive_provision_size) < 0)
478 rc = of_property_count_elems_of_size(np, "ibm,xive-provision-chips", 4);
480 pr_err("Error %d getting provision chips array\n", rc);
483 xive_provision_chip_count = rc;
487 xive_provision_chips = kzalloc(4 * xive_provision_chip_count,
489 if (WARN_ON(!xive_provision_chips))
492 rc = of_property_read_u32_array(np, "ibm,xive-provision-chips",
493 xive_provision_chips,
494 xive_provision_chip_count);
496 pr_err("Error %d reading provision chips array\n", rc);
500 xive_provision_cache = kmem_cache_create("xive-provision",
504 if (!xive_provision_cache) {
505 pr_err("Failed to allocate provision cache\n");
511 static void xive_native_setup_pools(void)
513 /* Allocate a pool big enough */
514 pr_debug("XIVE: Allocating VP block for pool size %d\n", nr_cpu_ids);
516 xive_pool_vps = xive_native_alloc_vp_block(nr_cpu_ids);
517 if (WARN_ON(xive_pool_vps == XIVE_INVALID_VP))
518 pr_err("XIVE: Failed to allocate pool VP, KVM might not function\n");
520 pr_debug("XIVE: Pool VPs allocated at 0x%x for %d max CPUs\n",
521 xive_pool_vps, nr_cpu_ids);
524 u32 xive_native_default_eq_shift(void)
526 return xive_queue_shift;
528 EXPORT_SYMBOL_GPL(xive_native_default_eq_shift);
530 bool __init xive_native_init(void)
532 struct device_node *np;
535 struct property *prop;
541 if (xive_cmdline_disabled)
544 pr_devel("xive_native_init()\n");
545 np = of_find_compatible_node(NULL, NULL, "ibm,opal-xive-pe");
547 pr_devel("not found !\n");
550 pr_devel("Found %pOF\n", np);
552 /* Resource 1 is HV window */
553 if (of_address_to_resource(np, 1, &r)) {
554 pr_err("Failed to get thread mgmnt area resource\n");
557 tima = ioremap(r.start, resource_size(&r));
559 pr_err("Failed to map thread mgmnt area\n");
563 /* Read number of priorities */
564 if (of_property_read_u32(np, "ibm,xive-#priorities", &val) == 0)
567 /* Iterate the EQ sizes and pick one */
568 of_property_for_each_u32(np, "ibm,xive-eq-sizes", prop, p, val) {
569 xive_queue_shift = val;
570 if (val == PAGE_SHIFT)
574 /* Configure Thread Management areas for KVM */
575 for_each_possible_cpu(cpu)
576 kvmppc_set_xive_tima(cpu, r.start, tima);
578 /* Grab size of provisionning pages */
579 xive_parse_provisioning(np);
581 /* Switch the XIVE to exploitation mode */
582 rc = opal_xive_reset(OPAL_XIVE_MODE_EXPL);
584 pr_err("Switch to exploitation mode failed with error %lld\n", rc);
588 /* Setup some dummy HV pool VPs */
589 xive_native_setup_pools();
591 /* Initialize XIVE core with our backend */
592 if (!xive_core_init(&xive_native_ops, tima, TM_QW3_HV_PHYS,
594 opal_xive_reset(OPAL_XIVE_MODE_EMU);
597 pr_info("Using %dkB queues\n", 1 << (xive_queue_shift - 10));
601 static bool xive_native_provision_pages(void)
606 for (i = 0; i < xive_provision_chip_count; i++) {
607 u32 chip = xive_provision_chips[i];
610 * XXX TODO: Try to make the allocation local to the node where
613 p = kmem_cache_alloc(xive_provision_cache, GFP_KERNEL);
615 pr_err("Failed to allocate provisioning page\n");
618 opal_xive_donate_page(chip, __pa(p));
623 u32 xive_native_alloc_vp_block(u32 max_vcpus)
628 order = fls(max_vcpus) - 1;
629 if (max_vcpus > (1 << order))
632 pr_debug("VP block alloc, for max VCPUs %d use order %d\n",
636 rc = opal_xive_alloc_vp_block(order);
641 case OPAL_XIVE_PROVISIONING:
642 if (!xive_native_provision_pages())
643 return XIVE_INVALID_VP;
647 pr_err("OPAL failed to allocate VCPUs order %d, err %lld\n",
649 return XIVE_INVALID_VP;
655 EXPORT_SYMBOL_GPL(xive_native_alloc_vp_block);
657 void xive_native_free_vp_block(u32 vp_base)
661 if (vp_base == XIVE_INVALID_VP)
664 rc = opal_xive_free_vp_block(vp_base);
666 pr_warn("OPAL error %lld freeing VP block\n", rc);
668 EXPORT_SYMBOL_GPL(xive_native_free_vp_block);
670 int xive_native_enable_vp(u32 vp_id)
675 rc = opal_xive_set_vp_info(vp_id, OPAL_XIVE_VP_ENABLED, 0);
680 return rc ? -EIO : 0;
682 EXPORT_SYMBOL_GPL(xive_native_enable_vp);
684 int xive_native_disable_vp(u32 vp_id)
689 rc = opal_xive_set_vp_info(vp_id, 0, 0);
694 return rc ? -EIO : 0;
696 EXPORT_SYMBOL_GPL(xive_native_disable_vp);
698 int xive_native_get_vp_info(u32 vp_id, u32 *out_cam_id, u32 *out_chip_id)
701 __be32 vp_chip_id_be;
704 rc = opal_xive_get_vp_info(vp_id, NULL, &vp_cam_be, NULL, &vp_chip_id_be);
707 *out_cam_id = be64_to_cpu(vp_cam_be) & 0xffffffffu;
708 *out_chip_id = be32_to_cpu(vp_chip_id_be);
712 EXPORT_SYMBOL_GPL(xive_native_get_vp_info);