1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2013-2017 ARM Limited, All Rights Reserved.
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <linux/acpi.h>
8 #include <linux/acpi_iort.h>
9 #include <linux/bitfield.h>
10 #include <linux/bitmap.h>
11 #include <linux/cpu.h>
12 #include <linux/crash_dump.h>
13 #include <linux/delay.h>
14 #include <linux/dma-iommu.h>
15 #include <linux/efi.h>
16 #include <linux/interrupt.h>
17 #include <linux/iopoll.h>
18 #include <linux/irqdomain.h>
19 #include <linux/list.h>
20 #include <linux/log2.h>
21 #include <linux/memblock.h>
23 #include <linux/msi.h>
25 #include <linux/of_address.h>
26 #include <linux/of_irq.h>
27 #include <linux/of_pci.h>
28 #include <linux/of_platform.h>
29 #include <linux/percpu.h>
30 #include <linux/slab.h>
31 #include <linux/syscore_ops.h>
33 #include <linux/irqchip.h>
34 #include <linux/irqchip/arm-gic-v3.h>
35 #include <linux/irqchip/arm-gic-v4.h>
37 #include <asm/cputype.h>
38 #include <asm/exception.h>
40 #include "irq-gic-common.h"
42 #define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1ULL << 0)
43 #define ITS_FLAGS_WORKAROUND_CAVIUM_22375 (1ULL << 1)
44 #define ITS_FLAGS_WORKAROUND_CAVIUM_23144 (1ULL << 2)
45 #define ITS_FLAGS_SAVE_SUSPEND_STATE (1ULL << 3)
47 #define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0)
48 #define RDIST_FLAGS_RD_TABLES_PREALLOCATED (1 << 1)
50 static u32 lpi_id_bits;
53 * We allocate memory for PROPBASE to cover 2 ^ lpi_id_bits LPIs to
54 * deal with (one configuration byte per interrupt). PENDBASE has to
55 * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI).
57 #define LPI_NRBITS lpi_id_bits
58 #define LPI_PROPBASE_SZ ALIGN(BIT(LPI_NRBITS), SZ_64K)
59 #define LPI_PENDBASE_SZ ALIGN(BIT(LPI_NRBITS) / 8, SZ_64K)
61 #define LPI_PROP_DEFAULT_PRIO GICD_INT_DEF_PRI
64 * Collection structure - just an ID, and a redistributor address to
65 * ping. We use one per CPU as a bag of interrupts assigned to this
68 struct its_collection {
74 * The ITS_BASER structure - contains memory information, cached
75 * value of BASER register configuration and ITS page size.
87 * The ITS structure - contains most of the infrastructure, with the
88 * top-level MSI domain, the command queue, the collections, and the
89 * list of devices writing to it.
91 * dev_alloc_lock has to be taken for device allocations, while the
92 * spinlock must be taken to parse data structures such as the device
97 struct mutex dev_alloc_lock;
98 struct list_head entry;
100 void __iomem *sgir_base;
101 phys_addr_t phys_base;
102 struct its_cmd_block *cmd_base;
103 struct its_cmd_block *cmd_write;
104 struct its_baser tables[GITS_BASER_NR_REGS];
105 struct its_collection *collections;
106 struct fwnode_handle *fwnode_handle;
107 u64 (*get_msi_base)(struct its_device *its_dev);
112 struct list_head its_device_list;
114 unsigned long list_nr;
116 unsigned int msi_domain_flags;
117 u32 pre_its_base; /* for Socionext Synquacer */
118 int vlpi_redist_offset;
121 #define is_v4(its) (!!((its)->typer & GITS_TYPER_VLPIS))
122 #define is_v4_1(its) (!!((its)->typer & GITS_TYPER_VMAPP))
123 #define device_ids(its) (FIELD_GET(GITS_TYPER_DEVBITS, (its)->typer) + 1)
125 #define ITS_ITT_ALIGN SZ_256
127 /* The maximum number of VPEID bits supported by VLPI commands */
128 #define ITS_MAX_VPEID_BITS \
131 if (gic_rdists->has_rvpeid && \
132 gic_rdists->gicd_typer2 & GICD_TYPER2_VIL) \
133 nvpeid = 1 + (gic_rdists->gicd_typer2 & \
138 #define ITS_MAX_VPEID (1 << (ITS_MAX_VPEID_BITS))
140 /* Convert page order to size in bytes */
141 #define PAGE_ORDER_TO_SIZE(o) (PAGE_SIZE << (o))
143 struct event_lpi_map {
144 unsigned long *lpi_map;
146 irq_hw_number_t lpi_base;
148 raw_spinlock_t vlpi_lock;
150 struct its_vlpi_map *vlpi_maps;
155 * The ITS view of a device - belongs to an ITS, owns an interrupt
156 * translation table, and a list of interrupts. If it some of its
157 * LPIs are injected into a guest (GICv4), the event_map.vm field
158 * indicates which one.
161 struct list_head entry;
162 struct its_node *its;
163 struct event_lpi_map event_map;
172 struct its_device *dev;
173 struct its_vpe **vpes;
177 static LIST_HEAD(its_nodes);
178 static DEFINE_RAW_SPINLOCK(its_lock);
179 static struct rdists *gic_rdists;
180 static struct irq_domain *its_parent;
182 static unsigned long its_list_map;
183 static u16 vmovp_seq_num;
184 static DEFINE_RAW_SPINLOCK(vmovp_lock);
186 static DEFINE_IDA(its_vpeid_ida);
188 #define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist))
189 #define gic_data_rdist_cpu(cpu) (per_cpu_ptr(gic_rdists->rdist, cpu))
190 #define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base)
191 #define gic_data_rdist_vlpi_base() (gic_data_rdist_rd_base() + SZ_128K)
194 * Skip ITSs that have no vLPIs mapped, unless we're on GICv4.1, as we
195 * always have vSGIs mapped.
197 static bool require_its_list_vmovp(struct its_vm *vm, struct its_node *its)
199 return (gic_rdists->has_rvpeid || vm->vlpi_count[its->list_nr]);
202 static u16 get_its_list(struct its_vm *vm)
204 struct its_node *its;
205 unsigned long its_list = 0;
207 list_for_each_entry(its, &its_nodes, entry) {
211 if (require_its_list_vmovp(vm, its))
212 __set_bit(its->list_nr, &its_list);
215 return (u16)its_list;
218 static inline u32 its_get_event_id(struct irq_data *d)
220 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
221 return d->hwirq - its_dev->event_map.lpi_base;
224 static struct its_collection *dev_event_to_col(struct its_device *its_dev,
227 struct its_node *its = its_dev->its;
229 return its->collections + its_dev->event_map.col_map[event];
232 static struct its_vlpi_map *dev_event_to_vlpi_map(struct its_device *its_dev,
235 if (WARN_ON_ONCE(event >= its_dev->event_map.nr_lpis))
238 return &its_dev->event_map.vlpi_maps[event];
241 static struct its_vlpi_map *get_vlpi_map(struct irq_data *d)
243 if (irqd_is_forwarded_to_vcpu(d)) {
244 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
245 u32 event = its_get_event_id(d);
247 return dev_event_to_vlpi_map(its_dev, event);
253 static int vpe_to_cpuid_lock(struct its_vpe *vpe, unsigned long *flags)
255 raw_spin_lock_irqsave(&vpe->vpe_lock, *flags);
259 static void vpe_to_cpuid_unlock(struct its_vpe *vpe, unsigned long flags)
261 raw_spin_unlock_irqrestore(&vpe->vpe_lock, flags);
264 static int irq_to_cpuid_lock(struct irq_data *d, unsigned long *flags)
266 struct its_vlpi_map *map = get_vlpi_map(d);
270 cpu = vpe_to_cpuid_lock(map->vpe, flags);
272 /* Physical LPIs are already locked via the irq_desc lock */
273 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
274 cpu = its_dev->event_map.col_map[its_get_event_id(d)];
275 /* Keep GCC quiet... */
282 static void irq_to_cpuid_unlock(struct irq_data *d, unsigned long flags)
284 struct its_vlpi_map *map = get_vlpi_map(d);
287 vpe_to_cpuid_unlock(map->vpe, flags);
290 static struct its_collection *valid_col(struct its_collection *col)
292 if (WARN_ON_ONCE(col->target_address & GENMASK_ULL(15, 0)))
298 static struct its_vpe *valid_vpe(struct its_node *its, struct its_vpe *vpe)
300 if (valid_col(its->collections + vpe->col_idx))
307 * ITS command descriptors - parameters to be encoded in a command
310 struct its_cmd_desc {
313 struct its_device *dev;
318 struct its_device *dev;
323 struct its_device *dev;
328 struct its_device *dev;
333 struct its_collection *col;
338 struct its_device *dev;
344 struct its_device *dev;
345 struct its_collection *col;
350 struct its_device *dev;
355 struct its_collection *col;
364 struct its_collection *col;
370 struct its_device *dev;
378 struct its_device *dev;
385 struct its_collection *col;
406 * The ITS command block, which is what the ITS actually parses.
408 struct its_cmd_block {
411 __le64 raw_cmd_le[4];
415 #define ITS_CMD_QUEUE_SZ SZ_64K
416 #define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block))
418 typedef struct its_collection *(*its_cmd_builder_t)(struct its_node *,
419 struct its_cmd_block *,
420 struct its_cmd_desc *);
422 typedef struct its_vpe *(*its_cmd_vbuilder_t)(struct its_node *,
423 struct its_cmd_block *,
424 struct its_cmd_desc *);
426 static void its_mask_encode(u64 *raw_cmd, u64 val, int h, int l)
428 u64 mask = GENMASK_ULL(h, l);
430 *raw_cmd |= (val << l) & mask;
433 static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr)
435 its_mask_encode(&cmd->raw_cmd[0], cmd_nr, 7, 0);
438 static void its_encode_devid(struct its_cmd_block *cmd, u32 devid)
440 its_mask_encode(&cmd->raw_cmd[0], devid, 63, 32);
443 static void its_encode_event_id(struct its_cmd_block *cmd, u32 id)
445 its_mask_encode(&cmd->raw_cmd[1], id, 31, 0);
448 static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id)
450 its_mask_encode(&cmd->raw_cmd[1], phys_id, 63, 32);
453 static void its_encode_size(struct its_cmd_block *cmd, u8 size)
455 its_mask_encode(&cmd->raw_cmd[1], size, 4, 0);
458 static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr)
460 its_mask_encode(&cmd->raw_cmd[2], itt_addr >> 8, 51, 8);
463 static void its_encode_valid(struct its_cmd_block *cmd, int valid)
465 its_mask_encode(&cmd->raw_cmd[2], !!valid, 63, 63);
468 static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr)
470 its_mask_encode(&cmd->raw_cmd[2], target_addr >> 16, 51, 16);
473 static void its_encode_collection(struct its_cmd_block *cmd, u16 col)
475 its_mask_encode(&cmd->raw_cmd[2], col, 15, 0);
478 static void its_encode_vpeid(struct its_cmd_block *cmd, u16 vpeid)
480 its_mask_encode(&cmd->raw_cmd[1], vpeid, 47, 32);
483 static void its_encode_virt_id(struct its_cmd_block *cmd, u32 virt_id)
485 its_mask_encode(&cmd->raw_cmd[2], virt_id, 31, 0);
488 static void its_encode_db_phys_id(struct its_cmd_block *cmd, u32 db_phys_id)
490 its_mask_encode(&cmd->raw_cmd[2], db_phys_id, 63, 32);
493 static void its_encode_db_valid(struct its_cmd_block *cmd, bool db_valid)
495 its_mask_encode(&cmd->raw_cmd[2], db_valid, 0, 0);
498 static void its_encode_seq_num(struct its_cmd_block *cmd, u16 seq_num)
500 its_mask_encode(&cmd->raw_cmd[0], seq_num, 47, 32);
503 static void its_encode_its_list(struct its_cmd_block *cmd, u16 its_list)
505 its_mask_encode(&cmd->raw_cmd[1], its_list, 15, 0);
508 static void its_encode_vpt_addr(struct its_cmd_block *cmd, u64 vpt_pa)
510 its_mask_encode(&cmd->raw_cmd[3], vpt_pa >> 16, 51, 16);
513 static void its_encode_vpt_size(struct its_cmd_block *cmd, u8 vpt_size)
515 its_mask_encode(&cmd->raw_cmd[3], vpt_size, 4, 0);
518 static void its_encode_vconf_addr(struct its_cmd_block *cmd, u64 vconf_pa)
520 its_mask_encode(&cmd->raw_cmd[0], vconf_pa >> 16, 51, 16);
523 static void its_encode_alloc(struct its_cmd_block *cmd, bool alloc)
525 its_mask_encode(&cmd->raw_cmd[0], alloc, 8, 8);
528 static void its_encode_ptz(struct its_cmd_block *cmd, bool ptz)
530 its_mask_encode(&cmd->raw_cmd[0], ptz, 9, 9);
533 static void its_encode_vmapp_default_db(struct its_cmd_block *cmd,
536 its_mask_encode(&cmd->raw_cmd[1], vpe_db_lpi, 31, 0);
539 static void its_encode_vmovp_default_db(struct its_cmd_block *cmd,
542 its_mask_encode(&cmd->raw_cmd[3], vpe_db_lpi, 31, 0);
545 static void its_encode_db(struct its_cmd_block *cmd, bool db)
547 its_mask_encode(&cmd->raw_cmd[2], db, 63, 63);
550 static void its_encode_sgi_intid(struct its_cmd_block *cmd, u8 sgi)
552 its_mask_encode(&cmd->raw_cmd[0], sgi, 35, 32);
555 static void its_encode_sgi_priority(struct its_cmd_block *cmd, u8 prio)
557 its_mask_encode(&cmd->raw_cmd[0], prio >> 4, 23, 20);
560 static void its_encode_sgi_group(struct its_cmd_block *cmd, bool grp)
562 its_mask_encode(&cmd->raw_cmd[0], grp, 10, 10);
565 static void its_encode_sgi_clear(struct its_cmd_block *cmd, bool clr)
567 its_mask_encode(&cmd->raw_cmd[0], clr, 9, 9);
570 static void its_encode_sgi_enable(struct its_cmd_block *cmd, bool en)
572 its_mask_encode(&cmd->raw_cmd[0], en, 8, 8);
575 static inline void its_fixup_cmd(struct its_cmd_block *cmd)
577 /* Let's fixup BE commands */
578 cmd->raw_cmd_le[0] = cpu_to_le64(cmd->raw_cmd[0]);
579 cmd->raw_cmd_le[1] = cpu_to_le64(cmd->raw_cmd[1]);
580 cmd->raw_cmd_le[2] = cpu_to_le64(cmd->raw_cmd[2]);
581 cmd->raw_cmd_le[3] = cpu_to_le64(cmd->raw_cmd[3]);
584 static struct its_collection *its_build_mapd_cmd(struct its_node *its,
585 struct its_cmd_block *cmd,
586 struct its_cmd_desc *desc)
588 unsigned long itt_addr;
589 u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites);
591 itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt);
592 itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN);
594 its_encode_cmd(cmd, GITS_CMD_MAPD);
595 its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id);
596 its_encode_size(cmd, size - 1);
597 its_encode_itt(cmd, itt_addr);
598 its_encode_valid(cmd, desc->its_mapd_cmd.valid);
605 static struct its_collection *its_build_mapc_cmd(struct its_node *its,
606 struct its_cmd_block *cmd,
607 struct its_cmd_desc *desc)
609 its_encode_cmd(cmd, GITS_CMD_MAPC);
610 its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id);
611 its_encode_target(cmd, desc->its_mapc_cmd.col->target_address);
612 its_encode_valid(cmd, desc->its_mapc_cmd.valid);
616 return desc->its_mapc_cmd.col;
619 static struct its_collection *its_build_mapti_cmd(struct its_node *its,
620 struct its_cmd_block *cmd,
621 struct its_cmd_desc *desc)
623 struct its_collection *col;
625 col = dev_event_to_col(desc->its_mapti_cmd.dev,
626 desc->its_mapti_cmd.event_id);
628 its_encode_cmd(cmd, GITS_CMD_MAPTI);
629 its_encode_devid(cmd, desc->its_mapti_cmd.dev->device_id);
630 its_encode_event_id(cmd, desc->its_mapti_cmd.event_id);
631 its_encode_phys_id(cmd, desc->its_mapti_cmd.phys_id);
632 its_encode_collection(cmd, col->col_id);
636 return valid_col(col);
639 static struct its_collection *its_build_movi_cmd(struct its_node *its,
640 struct its_cmd_block *cmd,
641 struct its_cmd_desc *desc)
643 struct its_collection *col;
645 col = dev_event_to_col(desc->its_movi_cmd.dev,
646 desc->its_movi_cmd.event_id);
648 its_encode_cmd(cmd, GITS_CMD_MOVI);
649 its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id);
650 its_encode_event_id(cmd, desc->its_movi_cmd.event_id);
651 its_encode_collection(cmd, desc->its_movi_cmd.col->col_id);
655 return valid_col(col);
658 static struct its_collection *its_build_discard_cmd(struct its_node *its,
659 struct its_cmd_block *cmd,
660 struct its_cmd_desc *desc)
662 struct its_collection *col;
664 col = dev_event_to_col(desc->its_discard_cmd.dev,
665 desc->its_discard_cmd.event_id);
667 its_encode_cmd(cmd, GITS_CMD_DISCARD);
668 its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id);
669 its_encode_event_id(cmd, desc->its_discard_cmd.event_id);
673 return valid_col(col);
676 static struct its_collection *its_build_inv_cmd(struct its_node *its,
677 struct its_cmd_block *cmd,
678 struct its_cmd_desc *desc)
680 struct its_collection *col;
682 col = dev_event_to_col(desc->its_inv_cmd.dev,
683 desc->its_inv_cmd.event_id);
685 its_encode_cmd(cmd, GITS_CMD_INV);
686 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
687 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
691 return valid_col(col);
694 static struct its_collection *its_build_int_cmd(struct its_node *its,
695 struct its_cmd_block *cmd,
696 struct its_cmd_desc *desc)
698 struct its_collection *col;
700 col = dev_event_to_col(desc->its_int_cmd.dev,
701 desc->its_int_cmd.event_id);
703 its_encode_cmd(cmd, GITS_CMD_INT);
704 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
705 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
709 return valid_col(col);
712 static struct its_collection *its_build_clear_cmd(struct its_node *its,
713 struct its_cmd_block *cmd,
714 struct its_cmd_desc *desc)
716 struct its_collection *col;
718 col = dev_event_to_col(desc->its_clear_cmd.dev,
719 desc->its_clear_cmd.event_id);
721 its_encode_cmd(cmd, GITS_CMD_CLEAR);
722 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
723 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
727 return valid_col(col);
730 static struct its_collection *its_build_invall_cmd(struct its_node *its,
731 struct its_cmd_block *cmd,
732 struct its_cmd_desc *desc)
734 its_encode_cmd(cmd, GITS_CMD_INVALL);
735 its_encode_collection(cmd, desc->its_invall_cmd.col->col_id);
742 static struct its_vpe *its_build_vinvall_cmd(struct its_node *its,
743 struct its_cmd_block *cmd,
744 struct its_cmd_desc *desc)
746 its_encode_cmd(cmd, GITS_CMD_VINVALL);
747 its_encode_vpeid(cmd, desc->its_vinvall_cmd.vpe->vpe_id);
751 return valid_vpe(its, desc->its_vinvall_cmd.vpe);
754 static struct its_vpe *its_build_vmapp_cmd(struct its_node *its,
755 struct its_cmd_block *cmd,
756 struct its_cmd_desc *desc)
758 unsigned long vpt_addr, vconf_addr;
762 its_encode_cmd(cmd, GITS_CMD_VMAPP);
763 its_encode_vpeid(cmd, desc->its_vmapp_cmd.vpe->vpe_id);
764 its_encode_valid(cmd, desc->its_vmapp_cmd.valid);
766 if (!desc->its_vmapp_cmd.valid) {
768 alloc = !atomic_dec_return(&desc->its_vmapp_cmd.vpe->vmapp_count);
769 its_encode_alloc(cmd, alloc);
775 vpt_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->vpt_page));
776 target = desc->its_vmapp_cmd.col->target_address + its->vlpi_redist_offset;
778 its_encode_target(cmd, target);
779 its_encode_vpt_addr(cmd, vpt_addr);
780 its_encode_vpt_size(cmd, LPI_NRBITS - 1);
785 vconf_addr = virt_to_phys(page_address(desc->its_vmapp_cmd.vpe->its_vm->vprop_page));
787 alloc = !atomic_fetch_inc(&desc->its_vmapp_cmd.vpe->vmapp_count);
789 its_encode_alloc(cmd, alloc);
791 /* We can only signal PTZ when alloc==1. Why do we have two bits? */
792 its_encode_ptz(cmd, alloc);
793 its_encode_vconf_addr(cmd, vconf_addr);
794 its_encode_vmapp_default_db(cmd, desc->its_vmapp_cmd.vpe->vpe_db_lpi);
799 return valid_vpe(its, desc->its_vmapp_cmd.vpe);
802 static struct its_vpe *its_build_vmapti_cmd(struct its_node *its,
803 struct its_cmd_block *cmd,
804 struct its_cmd_desc *desc)
808 if (!is_v4_1(its) && desc->its_vmapti_cmd.db_enabled)
809 db = desc->its_vmapti_cmd.vpe->vpe_db_lpi;
813 its_encode_cmd(cmd, GITS_CMD_VMAPTI);
814 its_encode_devid(cmd, desc->its_vmapti_cmd.dev->device_id);
815 its_encode_vpeid(cmd, desc->its_vmapti_cmd.vpe->vpe_id);
816 its_encode_event_id(cmd, desc->its_vmapti_cmd.event_id);
817 its_encode_db_phys_id(cmd, db);
818 its_encode_virt_id(cmd, desc->its_vmapti_cmd.virt_id);
822 return valid_vpe(its, desc->its_vmapti_cmd.vpe);
825 static struct its_vpe *its_build_vmovi_cmd(struct its_node *its,
826 struct its_cmd_block *cmd,
827 struct its_cmd_desc *desc)
831 if (!is_v4_1(its) && desc->its_vmovi_cmd.db_enabled)
832 db = desc->its_vmovi_cmd.vpe->vpe_db_lpi;
836 its_encode_cmd(cmd, GITS_CMD_VMOVI);
837 its_encode_devid(cmd, desc->its_vmovi_cmd.dev->device_id);
838 its_encode_vpeid(cmd, desc->its_vmovi_cmd.vpe->vpe_id);
839 its_encode_event_id(cmd, desc->its_vmovi_cmd.event_id);
840 its_encode_db_phys_id(cmd, db);
841 its_encode_db_valid(cmd, true);
845 return valid_vpe(its, desc->its_vmovi_cmd.vpe);
848 static struct its_vpe *its_build_vmovp_cmd(struct its_node *its,
849 struct its_cmd_block *cmd,
850 struct its_cmd_desc *desc)
854 target = desc->its_vmovp_cmd.col->target_address + its->vlpi_redist_offset;
855 its_encode_cmd(cmd, GITS_CMD_VMOVP);
856 its_encode_seq_num(cmd, desc->its_vmovp_cmd.seq_num);
857 its_encode_its_list(cmd, desc->its_vmovp_cmd.its_list);
858 its_encode_vpeid(cmd, desc->its_vmovp_cmd.vpe->vpe_id);
859 its_encode_target(cmd, target);
862 its_encode_db(cmd, true);
863 its_encode_vmovp_default_db(cmd, desc->its_vmovp_cmd.vpe->vpe_db_lpi);
868 return valid_vpe(its, desc->its_vmovp_cmd.vpe);
871 static struct its_vpe *its_build_vinv_cmd(struct its_node *its,
872 struct its_cmd_block *cmd,
873 struct its_cmd_desc *desc)
875 struct its_vlpi_map *map;
877 map = dev_event_to_vlpi_map(desc->its_inv_cmd.dev,
878 desc->its_inv_cmd.event_id);
880 its_encode_cmd(cmd, GITS_CMD_INV);
881 its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id);
882 its_encode_event_id(cmd, desc->its_inv_cmd.event_id);
886 return valid_vpe(its, map->vpe);
889 static struct its_vpe *its_build_vint_cmd(struct its_node *its,
890 struct its_cmd_block *cmd,
891 struct its_cmd_desc *desc)
893 struct its_vlpi_map *map;
895 map = dev_event_to_vlpi_map(desc->its_int_cmd.dev,
896 desc->its_int_cmd.event_id);
898 its_encode_cmd(cmd, GITS_CMD_INT);
899 its_encode_devid(cmd, desc->its_int_cmd.dev->device_id);
900 its_encode_event_id(cmd, desc->its_int_cmd.event_id);
904 return valid_vpe(its, map->vpe);
907 static struct its_vpe *its_build_vclear_cmd(struct its_node *its,
908 struct its_cmd_block *cmd,
909 struct its_cmd_desc *desc)
911 struct its_vlpi_map *map;
913 map = dev_event_to_vlpi_map(desc->its_clear_cmd.dev,
914 desc->its_clear_cmd.event_id);
916 its_encode_cmd(cmd, GITS_CMD_CLEAR);
917 its_encode_devid(cmd, desc->its_clear_cmd.dev->device_id);
918 its_encode_event_id(cmd, desc->its_clear_cmd.event_id);
922 return valid_vpe(its, map->vpe);
925 static struct its_vpe *its_build_invdb_cmd(struct its_node *its,
926 struct its_cmd_block *cmd,
927 struct its_cmd_desc *desc)
929 if (WARN_ON(!is_v4_1(its)))
932 its_encode_cmd(cmd, GITS_CMD_INVDB);
933 its_encode_vpeid(cmd, desc->its_invdb_cmd.vpe->vpe_id);
937 return valid_vpe(its, desc->its_invdb_cmd.vpe);
940 static struct its_vpe *its_build_vsgi_cmd(struct its_node *its,
941 struct its_cmd_block *cmd,
942 struct its_cmd_desc *desc)
944 if (WARN_ON(!is_v4_1(its)))
947 its_encode_cmd(cmd, GITS_CMD_VSGI);
948 its_encode_vpeid(cmd, desc->its_vsgi_cmd.vpe->vpe_id);
949 its_encode_sgi_intid(cmd, desc->its_vsgi_cmd.sgi);
950 its_encode_sgi_priority(cmd, desc->its_vsgi_cmd.priority);
951 its_encode_sgi_group(cmd, desc->its_vsgi_cmd.group);
952 its_encode_sgi_clear(cmd, desc->its_vsgi_cmd.clear);
953 its_encode_sgi_enable(cmd, desc->its_vsgi_cmd.enable);
957 return valid_vpe(its, desc->its_vsgi_cmd.vpe);
960 static u64 its_cmd_ptr_to_offset(struct its_node *its,
961 struct its_cmd_block *ptr)
963 return (ptr - its->cmd_base) * sizeof(*ptr);
966 static int its_queue_full(struct its_node *its)
971 widx = its->cmd_write - its->cmd_base;
972 ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block);
974 /* This is incredibly unlikely to happen, unless the ITS locks up. */
975 if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx)
981 static struct its_cmd_block *its_allocate_entry(struct its_node *its)
983 struct its_cmd_block *cmd;
984 u32 count = 1000000; /* 1s! */
986 while (its_queue_full(its)) {
989 pr_err_ratelimited("ITS queue not draining\n");
996 cmd = its->cmd_write++;
998 /* Handle queue wrapping */
999 if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES))
1000 its->cmd_write = its->cmd_base;
1003 cmd->raw_cmd[0] = 0;
1004 cmd->raw_cmd[1] = 0;
1005 cmd->raw_cmd[2] = 0;
1006 cmd->raw_cmd[3] = 0;
1011 static struct its_cmd_block *its_post_commands(struct its_node *its)
1013 u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write);
1015 writel_relaxed(wr, its->base + GITS_CWRITER);
1017 return its->cmd_write;
1020 static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd)
1023 * Make sure the commands written to memory are observable by
1026 if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING)
1027 gic_flush_dcache_to_poc(cmd, sizeof(*cmd));
1032 static int its_wait_for_range_completion(struct its_node *its,
1034 struct its_cmd_block *to)
1036 u64 rd_idx, to_idx, linear_idx;
1037 u32 count = 1000000; /* 1s! */
1039 /* Linearize to_idx if the command set has wrapped around */
1040 to_idx = its_cmd_ptr_to_offset(its, to);
1041 if (to_idx < prev_idx)
1042 to_idx += ITS_CMD_QUEUE_SZ;
1044 linear_idx = prev_idx;
1049 rd_idx = readl_relaxed(its->base + GITS_CREADR);
1052 * Compute the read pointer progress, taking the
1053 * potential wrap-around into account.
1055 delta = rd_idx - prev_idx;
1056 if (rd_idx < prev_idx)
1057 delta += ITS_CMD_QUEUE_SZ;
1059 linear_idx += delta;
1060 if (linear_idx >= to_idx)
1065 pr_err_ratelimited("ITS queue timeout (%llu %llu)\n",
1066 to_idx, linear_idx);
1077 /* Warning, macro hell follows */
1078 #define BUILD_SINGLE_CMD_FUNC(name, buildtype, synctype, buildfn) \
1079 void name(struct its_node *its, \
1080 buildtype builder, \
1081 struct its_cmd_desc *desc) \
1083 struct its_cmd_block *cmd, *sync_cmd, *next_cmd; \
1084 synctype *sync_obj; \
1085 unsigned long flags; \
1088 raw_spin_lock_irqsave(&its->lock, flags); \
1090 cmd = its_allocate_entry(its); \
1091 if (!cmd) { /* We're soooooo screewed... */ \
1092 raw_spin_unlock_irqrestore(&its->lock, flags); \
1095 sync_obj = builder(its, cmd, desc); \
1096 its_flush_cmd(its, cmd); \
1099 sync_cmd = its_allocate_entry(its); \
1103 buildfn(its, sync_cmd, sync_obj); \
1104 its_flush_cmd(its, sync_cmd); \
1108 rd_idx = readl_relaxed(its->base + GITS_CREADR); \
1109 next_cmd = its_post_commands(its); \
1110 raw_spin_unlock_irqrestore(&its->lock, flags); \
1112 if (its_wait_for_range_completion(its, rd_idx, next_cmd)) \
1113 pr_err_ratelimited("ITS cmd %ps failed\n", builder); \
1116 static void its_build_sync_cmd(struct its_node *its,
1117 struct its_cmd_block *sync_cmd,
1118 struct its_collection *sync_col)
1120 its_encode_cmd(sync_cmd, GITS_CMD_SYNC);
1121 its_encode_target(sync_cmd, sync_col->target_address);
1123 its_fixup_cmd(sync_cmd);
1126 static BUILD_SINGLE_CMD_FUNC(its_send_single_command, its_cmd_builder_t,
1127 struct its_collection, its_build_sync_cmd)
1129 static void its_build_vsync_cmd(struct its_node *its,
1130 struct its_cmd_block *sync_cmd,
1131 struct its_vpe *sync_vpe)
1133 its_encode_cmd(sync_cmd, GITS_CMD_VSYNC);
1134 its_encode_vpeid(sync_cmd, sync_vpe->vpe_id);
1136 its_fixup_cmd(sync_cmd);
1139 static BUILD_SINGLE_CMD_FUNC(its_send_single_vcommand, its_cmd_vbuilder_t,
1140 struct its_vpe, its_build_vsync_cmd)
1142 static void its_send_int(struct its_device *dev, u32 event_id)
1144 struct its_cmd_desc desc;
1146 desc.its_int_cmd.dev = dev;
1147 desc.its_int_cmd.event_id = event_id;
1149 its_send_single_command(dev->its, its_build_int_cmd, &desc);
1152 static void its_send_clear(struct its_device *dev, u32 event_id)
1154 struct its_cmd_desc desc;
1156 desc.its_clear_cmd.dev = dev;
1157 desc.its_clear_cmd.event_id = event_id;
1159 its_send_single_command(dev->its, its_build_clear_cmd, &desc);
1162 static void its_send_inv(struct its_device *dev, u32 event_id)
1164 struct its_cmd_desc desc;
1166 desc.its_inv_cmd.dev = dev;
1167 desc.its_inv_cmd.event_id = event_id;
1169 its_send_single_command(dev->its, its_build_inv_cmd, &desc);
1172 static void its_send_mapd(struct its_device *dev, int valid)
1174 struct its_cmd_desc desc;
1176 desc.its_mapd_cmd.dev = dev;
1177 desc.its_mapd_cmd.valid = !!valid;
1179 its_send_single_command(dev->its, its_build_mapd_cmd, &desc);
1182 static void its_send_mapc(struct its_node *its, struct its_collection *col,
1185 struct its_cmd_desc desc;
1187 desc.its_mapc_cmd.col = col;
1188 desc.its_mapc_cmd.valid = !!valid;
1190 its_send_single_command(its, its_build_mapc_cmd, &desc);
1193 static void its_send_mapti(struct its_device *dev, u32 irq_id, u32 id)
1195 struct its_cmd_desc desc;
1197 desc.its_mapti_cmd.dev = dev;
1198 desc.its_mapti_cmd.phys_id = irq_id;
1199 desc.its_mapti_cmd.event_id = id;
1201 its_send_single_command(dev->its, its_build_mapti_cmd, &desc);
1204 static void its_send_movi(struct its_device *dev,
1205 struct its_collection *col, u32 id)
1207 struct its_cmd_desc desc;
1209 desc.its_movi_cmd.dev = dev;
1210 desc.its_movi_cmd.col = col;
1211 desc.its_movi_cmd.event_id = id;
1213 its_send_single_command(dev->its, its_build_movi_cmd, &desc);
1216 static void its_send_discard(struct its_device *dev, u32 id)
1218 struct its_cmd_desc desc;
1220 desc.its_discard_cmd.dev = dev;
1221 desc.its_discard_cmd.event_id = id;
1223 its_send_single_command(dev->its, its_build_discard_cmd, &desc);
1226 static void its_send_invall(struct its_node *its, struct its_collection *col)
1228 struct its_cmd_desc desc;
1230 desc.its_invall_cmd.col = col;
1232 its_send_single_command(its, its_build_invall_cmd, &desc);
1235 static void its_send_vmapti(struct its_device *dev, u32 id)
1237 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1238 struct its_cmd_desc desc;
1240 desc.its_vmapti_cmd.vpe = map->vpe;
1241 desc.its_vmapti_cmd.dev = dev;
1242 desc.its_vmapti_cmd.virt_id = map->vintid;
1243 desc.its_vmapti_cmd.event_id = id;
1244 desc.its_vmapti_cmd.db_enabled = map->db_enabled;
1246 its_send_single_vcommand(dev->its, its_build_vmapti_cmd, &desc);
1249 static void its_send_vmovi(struct its_device *dev, u32 id)
1251 struct its_vlpi_map *map = dev_event_to_vlpi_map(dev, id);
1252 struct its_cmd_desc desc;
1254 desc.its_vmovi_cmd.vpe = map->vpe;
1255 desc.its_vmovi_cmd.dev = dev;
1256 desc.its_vmovi_cmd.event_id = id;
1257 desc.its_vmovi_cmd.db_enabled = map->db_enabled;
1259 its_send_single_vcommand(dev->its, its_build_vmovi_cmd, &desc);
1262 static void its_send_vmapp(struct its_node *its,
1263 struct its_vpe *vpe, bool valid)
1265 struct its_cmd_desc desc;
1267 desc.its_vmapp_cmd.vpe = vpe;
1268 desc.its_vmapp_cmd.valid = valid;
1269 desc.its_vmapp_cmd.col = &its->collections[vpe->col_idx];
1271 its_send_single_vcommand(its, its_build_vmapp_cmd, &desc);
1274 static void its_send_vmovp(struct its_vpe *vpe)
1276 struct its_cmd_desc desc = {};
1277 struct its_node *its;
1278 unsigned long flags;
1279 int col_id = vpe->col_idx;
1281 desc.its_vmovp_cmd.vpe = vpe;
1283 if (!its_list_map) {
1284 its = list_first_entry(&its_nodes, struct its_node, entry);
1285 desc.its_vmovp_cmd.col = &its->collections[col_id];
1286 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1291 * Yet another marvel of the architecture. If using the
1292 * its_list "feature", we need to make sure that all ITSs
1293 * receive all VMOVP commands in the same order. The only way
1294 * to guarantee this is to make vmovp a serialization point.
1298 raw_spin_lock_irqsave(&vmovp_lock, flags);
1300 desc.its_vmovp_cmd.seq_num = vmovp_seq_num++;
1301 desc.its_vmovp_cmd.its_list = get_its_list(vpe->its_vm);
1304 list_for_each_entry(its, &its_nodes, entry) {
1308 if (!require_its_list_vmovp(vpe->its_vm, its))
1311 desc.its_vmovp_cmd.col = &its->collections[col_id];
1312 its_send_single_vcommand(its, its_build_vmovp_cmd, &desc);
1315 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1318 static void its_send_vinvall(struct its_node *its, struct its_vpe *vpe)
1320 struct its_cmd_desc desc;
1322 desc.its_vinvall_cmd.vpe = vpe;
1323 its_send_single_vcommand(its, its_build_vinvall_cmd, &desc);
1326 static void its_send_vinv(struct its_device *dev, u32 event_id)
1328 struct its_cmd_desc desc;
1331 * There is no real VINV command. This is just a normal INV,
1332 * with a VSYNC instead of a SYNC.
1334 desc.its_inv_cmd.dev = dev;
1335 desc.its_inv_cmd.event_id = event_id;
1337 its_send_single_vcommand(dev->its, its_build_vinv_cmd, &desc);
1340 static void its_send_vint(struct its_device *dev, u32 event_id)
1342 struct its_cmd_desc desc;
1345 * There is no real VINT command. This is just a normal INT,
1346 * with a VSYNC instead of a SYNC.
1348 desc.its_int_cmd.dev = dev;
1349 desc.its_int_cmd.event_id = event_id;
1351 its_send_single_vcommand(dev->its, its_build_vint_cmd, &desc);
1354 static void its_send_vclear(struct its_device *dev, u32 event_id)
1356 struct its_cmd_desc desc;
1359 * There is no real VCLEAR command. This is just a normal CLEAR,
1360 * with a VSYNC instead of a SYNC.
1362 desc.its_clear_cmd.dev = dev;
1363 desc.its_clear_cmd.event_id = event_id;
1365 its_send_single_vcommand(dev->its, its_build_vclear_cmd, &desc);
1368 static void its_send_invdb(struct its_node *its, struct its_vpe *vpe)
1370 struct its_cmd_desc desc;
1372 desc.its_invdb_cmd.vpe = vpe;
1373 its_send_single_vcommand(its, its_build_invdb_cmd, &desc);
1377 * irqchip functions - assumes MSI, mostly.
1379 static void lpi_write_config(struct irq_data *d, u8 clr, u8 set)
1381 struct its_vlpi_map *map = get_vlpi_map(d);
1382 irq_hw_number_t hwirq;
1387 va = page_address(map->vm->vprop_page);
1388 hwirq = map->vintid;
1390 /* Remember the updated property */
1391 map->properties &= ~clr;
1392 map->properties |= set | LPI_PROP_GROUP1;
1394 va = gic_rdists->prop_table_va;
1398 cfg = va + hwirq - 8192;
1400 *cfg |= set | LPI_PROP_GROUP1;
1403 * Make the above write visible to the redistributors.
1404 * And yes, we're flushing exactly: One. Single. Byte.
1407 if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING)
1408 gic_flush_dcache_to_poc(cfg, sizeof(*cfg));
1413 static void wait_for_syncr(void __iomem *rdbase)
1415 while (readl_relaxed(rdbase + GICR_SYNCR) & 1)
1419 static void direct_lpi_inv(struct irq_data *d)
1421 struct its_vlpi_map *map = get_vlpi_map(d);
1422 void __iomem *rdbase;
1423 unsigned long flags;
1428 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1430 WARN_ON(!is_v4_1(its_dev->its));
1432 val = GICR_INVLPIR_V;
1433 val |= FIELD_PREP(GICR_INVLPIR_VPEID, map->vpe->vpe_id);
1434 val |= FIELD_PREP(GICR_INVLPIR_INTID, map->vintid);
1439 /* Target the redistributor this LPI is currently routed to */
1440 cpu = irq_to_cpuid_lock(d, &flags);
1441 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
1442 rdbase = per_cpu_ptr(gic_rdists->rdist, cpu)->rd_base;
1443 gic_write_lpir(val, rdbase + GICR_INVLPIR);
1445 wait_for_syncr(rdbase);
1446 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
1447 irq_to_cpuid_unlock(d, flags);
1450 static void lpi_update_config(struct irq_data *d, u8 clr, u8 set)
1452 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1454 lpi_write_config(d, clr, set);
1455 if (gic_rdists->has_direct_lpi &&
1456 (is_v4_1(its_dev->its) || !irqd_is_forwarded_to_vcpu(d)))
1458 else if (!irqd_is_forwarded_to_vcpu(d))
1459 its_send_inv(its_dev, its_get_event_id(d));
1461 its_send_vinv(its_dev, its_get_event_id(d));
1464 static void its_vlpi_set_doorbell(struct irq_data *d, bool enable)
1466 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1467 u32 event = its_get_event_id(d);
1468 struct its_vlpi_map *map;
1471 * GICv4.1 does away with the per-LPI nonsense, nothing to do
1474 if (is_v4_1(its_dev->its))
1477 map = dev_event_to_vlpi_map(its_dev, event);
1479 if (map->db_enabled == enable)
1482 map->db_enabled = enable;
1485 * More fun with the architecture:
1487 * Ideally, we'd issue a VMAPTI to set the doorbell to its LPI
1488 * value or to 1023, depending on the enable bit. But that
1489 * would be issueing a mapping for an /existing/ DevID+EventID
1490 * pair, which is UNPREDICTABLE. Instead, let's issue a VMOVI
1491 * to the /same/ vPE, using this opportunity to adjust the
1492 * doorbell. Mouahahahaha. We loves it, Precious.
1494 its_send_vmovi(its_dev, event);
1497 static void its_mask_irq(struct irq_data *d)
1499 if (irqd_is_forwarded_to_vcpu(d))
1500 its_vlpi_set_doorbell(d, false);
1502 lpi_update_config(d, LPI_PROP_ENABLED, 0);
1505 static void its_unmask_irq(struct irq_data *d)
1507 if (irqd_is_forwarded_to_vcpu(d))
1508 its_vlpi_set_doorbell(d, true);
1510 lpi_update_config(d, 0, LPI_PROP_ENABLED);
1513 static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val,
1517 const struct cpumask *cpu_mask = cpu_online_mask;
1518 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1519 struct its_collection *target_col;
1520 u32 id = its_get_event_id(d);
1522 /* A forwarded interrupt should use irq_set_vcpu_affinity */
1523 if (irqd_is_forwarded_to_vcpu(d))
1526 /* lpi cannot be routed to a redistributor that is on a foreign node */
1527 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
1528 if (its_dev->its->numa_node >= 0) {
1529 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
1530 if (!cpumask_intersects(mask_val, cpu_mask))
1535 cpu = cpumask_any_and(mask_val, cpu_mask);
1537 if (cpu >= nr_cpu_ids)
1540 /* don't set the affinity when the target cpu is same as current one */
1541 if (cpu != its_dev->event_map.col_map[id]) {
1542 target_col = &its_dev->its->collections[cpu];
1543 its_send_movi(its_dev, target_col, id);
1544 its_dev->event_map.col_map[id] = cpu;
1545 irq_data_update_effective_affinity(d, cpumask_of(cpu));
1548 return IRQ_SET_MASK_OK_DONE;
1551 static u64 its_irq_get_msi_base(struct its_device *its_dev)
1553 struct its_node *its = its_dev->its;
1555 return its->phys_base + GITS_TRANSLATER;
1558 static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg)
1560 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1561 struct its_node *its;
1565 addr = its->get_msi_base(its_dev);
1567 msg->address_lo = lower_32_bits(addr);
1568 msg->address_hi = upper_32_bits(addr);
1569 msg->data = its_get_event_id(d);
1571 iommu_dma_compose_msi_msg(irq_data_get_msi_desc(d), msg);
1574 static int its_irq_set_irqchip_state(struct irq_data *d,
1575 enum irqchip_irq_state which,
1578 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1579 u32 event = its_get_event_id(d);
1581 if (which != IRQCHIP_STATE_PENDING)
1584 if (irqd_is_forwarded_to_vcpu(d)) {
1586 its_send_vint(its_dev, event);
1588 its_send_vclear(its_dev, event);
1591 its_send_int(its_dev, event);
1593 its_send_clear(its_dev, event);
1600 * Two favourable cases:
1602 * (a) Either we have a GICv4.1, and all vPEs have to be mapped at all times
1605 * (b) Or the ITSs do not use a list map, meaning that VMOVP is cheap enough
1606 * and we're better off mapping all VPEs always
1608 * If neither (a) nor (b) is true, then we map vPEs on demand.
1611 static bool gic_requires_eager_mapping(void)
1613 if (!its_list_map || gic_rdists->has_rvpeid)
1619 static void its_map_vm(struct its_node *its, struct its_vm *vm)
1621 unsigned long flags;
1623 if (gic_requires_eager_mapping())
1626 raw_spin_lock_irqsave(&vmovp_lock, flags);
1629 * If the VM wasn't mapped yet, iterate over the vpes and get
1632 vm->vlpi_count[its->list_nr]++;
1634 if (vm->vlpi_count[its->list_nr] == 1) {
1637 for (i = 0; i < vm->nr_vpes; i++) {
1638 struct its_vpe *vpe = vm->vpes[i];
1639 struct irq_data *d = irq_get_irq_data(vpe->irq);
1641 /* Map the VPE to the first possible CPU */
1642 vpe->col_idx = cpumask_first(cpu_online_mask);
1643 its_send_vmapp(its, vpe, true);
1644 its_send_vinvall(its, vpe);
1645 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
1649 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1652 static void its_unmap_vm(struct its_node *its, struct its_vm *vm)
1654 unsigned long flags;
1656 /* Not using the ITS list? Everything is always mapped. */
1657 if (gic_requires_eager_mapping())
1660 raw_spin_lock_irqsave(&vmovp_lock, flags);
1662 if (!--vm->vlpi_count[its->list_nr]) {
1665 for (i = 0; i < vm->nr_vpes; i++)
1666 its_send_vmapp(its, vm->vpes[i], false);
1669 raw_spin_unlock_irqrestore(&vmovp_lock, flags);
1672 static int its_vlpi_map(struct irq_data *d, struct its_cmd_info *info)
1674 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1675 u32 event = its_get_event_id(d);
1681 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1683 if (!its_dev->event_map.vm) {
1684 struct its_vlpi_map *maps;
1686 maps = kcalloc(its_dev->event_map.nr_lpis, sizeof(*maps),
1693 its_dev->event_map.vm = info->map->vm;
1694 its_dev->event_map.vlpi_maps = maps;
1695 } else if (its_dev->event_map.vm != info->map->vm) {
1700 /* Get our private copy of the mapping information */
1701 its_dev->event_map.vlpi_maps[event] = *info->map;
1703 if (irqd_is_forwarded_to_vcpu(d)) {
1704 /* Already mapped, move it around */
1705 its_send_vmovi(its_dev, event);
1707 /* Ensure all the VPEs are mapped on this ITS */
1708 its_map_vm(its_dev->its, info->map->vm);
1711 * Flag the interrupt as forwarded so that we can
1712 * start poking the virtual property table.
1714 irqd_set_forwarded_to_vcpu(d);
1716 /* Write out the property to the prop table */
1717 lpi_write_config(d, 0xff, info->map->properties);
1719 /* Drop the physical mapping */
1720 its_send_discard(its_dev, event);
1722 /* and install the virtual one */
1723 its_send_vmapti(its_dev, event);
1725 /* Increment the number of VLPIs */
1726 its_dev->event_map.nr_vlpis++;
1730 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1734 static int its_vlpi_get(struct irq_data *d, struct its_cmd_info *info)
1736 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1737 struct its_vlpi_map *map;
1740 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1742 map = get_vlpi_map(d);
1744 if (!its_dev->event_map.vm || !map) {
1749 /* Copy our mapping information to the incoming request */
1753 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1757 static int its_vlpi_unmap(struct irq_data *d)
1759 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1760 u32 event = its_get_event_id(d);
1763 raw_spin_lock(&its_dev->event_map.vlpi_lock);
1765 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d)) {
1770 /* Drop the virtual mapping */
1771 its_send_discard(its_dev, event);
1773 /* and restore the physical one */
1774 irqd_clr_forwarded_to_vcpu(d);
1775 its_send_mapti(its_dev, d->hwirq, event);
1776 lpi_update_config(d, 0xff, (LPI_PROP_DEFAULT_PRIO |
1780 /* Potentially unmap the VM from this ITS */
1781 its_unmap_vm(its_dev->its, its_dev->event_map.vm);
1784 * Drop the refcount and make the device available again if
1785 * this was the last VLPI.
1787 if (!--its_dev->event_map.nr_vlpis) {
1788 its_dev->event_map.vm = NULL;
1789 kfree(its_dev->event_map.vlpi_maps);
1793 raw_spin_unlock(&its_dev->event_map.vlpi_lock);
1797 static int its_vlpi_prop_update(struct irq_data *d, struct its_cmd_info *info)
1799 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1801 if (!its_dev->event_map.vm || !irqd_is_forwarded_to_vcpu(d))
1804 if (info->cmd_type == PROP_UPDATE_AND_INV_VLPI)
1805 lpi_update_config(d, 0xff, info->config);
1807 lpi_write_config(d, 0xff, info->config);
1808 its_vlpi_set_doorbell(d, !!(info->config & LPI_PROP_ENABLED));
1813 static int its_irq_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
1815 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
1816 struct its_cmd_info *info = vcpu_info;
1819 if (!is_v4(its_dev->its))
1822 /* Unmap request? */
1824 return its_vlpi_unmap(d);
1826 switch (info->cmd_type) {
1828 return its_vlpi_map(d, info);
1831 return its_vlpi_get(d, info);
1833 case PROP_UPDATE_VLPI:
1834 case PROP_UPDATE_AND_INV_VLPI:
1835 return its_vlpi_prop_update(d, info);
1842 static struct irq_chip its_irq_chip = {
1844 .irq_mask = its_mask_irq,
1845 .irq_unmask = its_unmask_irq,
1846 .irq_eoi = irq_chip_eoi_parent,
1847 .irq_set_affinity = its_set_affinity,
1848 .irq_compose_msi_msg = its_irq_compose_msi_msg,
1849 .irq_set_irqchip_state = its_irq_set_irqchip_state,
1850 .irq_set_vcpu_affinity = its_irq_set_vcpu_affinity,
1855 * How we allocate LPIs:
1857 * lpi_range_list contains ranges of LPIs that are to available to
1858 * allocate from. To allocate LPIs, just pick the first range that
1859 * fits the required allocation, and reduce it by the required
1860 * amount. Once empty, remove the range from the list.
1862 * To free a range of LPIs, add a free range to the list, sort it and
1863 * merge the result if the new range happens to be adjacent to an
1864 * already free block.
1866 * The consequence of the above is that allocation is cost is low, but
1867 * freeing is expensive. We assumes that freeing rarely occurs.
1869 #define ITS_MAX_LPI_NRBITS 16 /* 64K LPIs */
1871 static DEFINE_MUTEX(lpi_range_lock);
1872 static LIST_HEAD(lpi_range_list);
1875 struct list_head entry;
1880 static struct lpi_range *mk_lpi_range(u32 base, u32 span)
1882 struct lpi_range *range;
1884 range = kmalloc(sizeof(*range), GFP_KERNEL);
1886 range->base_id = base;
1893 static int alloc_lpi_range(u32 nr_lpis, u32 *base)
1895 struct lpi_range *range, *tmp;
1898 mutex_lock(&lpi_range_lock);
1900 list_for_each_entry_safe(range, tmp, &lpi_range_list, entry) {
1901 if (range->span >= nr_lpis) {
1902 *base = range->base_id;
1903 range->base_id += nr_lpis;
1904 range->span -= nr_lpis;
1906 if (range->span == 0) {
1907 list_del(&range->entry);
1916 mutex_unlock(&lpi_range_lock);
1918 pr_debug("ITS: alloc %u:%u\n", *base, nr_lpis);
1922 static void merge_lpi_ranges(struct lpi_range *a, struct lpi_range *b)
1924 if (&a->entry == &lpi_range_list || &b->entry == &lpi_range_list)
1926 if (a->base_id + a->span != b->base_id)
1928 b->base_id = a->base_id;
1930 list_del(&a->entry);
1934 static int free_lpi_range(u32 base, u32 nr_lpis)
1936 struct lpi_range *new, *old;
1938 new = mk_lpi_range(base, nr_lpis);
1942 mutex_lock(&lpi_range_lock);
1944 list_for_each_entry_reverse(old, &lpi_range_list, entry) {
1945 if (old->base_id < base)
1949 * old is the last element with ->base_id smaller than base,
1950 * so new goes right after it. If there are no elements with
1951 * ->base_id smaller than base, &old->entry ends up pointing
1952 * at the head of the list, and inserting new it the start of
1953 * the list is the right thing to do in that case as well.
1955 list_add(&new->entry, &old->entry);
1957 * Now check if we can merge with the preceding and/or
1960 merge_lpi_ranges(old, new);
1961 merge_lpi_ranges(new, list_next_entry(new, entry));
1963 mutex_unlock(&lpi_range_lock);
1967 static int __init its_lpi_init(u32 id_bits)
1969 u32 lpis = (1UL << id_bits) - 8192;
1973 numlpis = 1UL << GICD_TYPER_NUM_LPIS(gic_rdists->gicd_typer);
1975 if (numlpis > 2 && !WARN_ON(numlpis > lpis)) {
1977 pr_info("ITS: Using hypervisor restricted LPI range [%u]\n",
1982 * Initializing the allocator is just the same as freeing the
1983 * full range of LPIs.
1985 err = free_lpi_range(8192, lpis);
1986 pr_debug("ITS: Allocator initialized for %u LPIs\n", lpis);
1990 static unsigned long *its_lpi_alloc(int nr_irqs, u32 *base, int *nr_ids)
1992 unsigned long *bitmap = NULL;
1996 err = alloc_lpi_range(nr_irqs, base);
2001 } while (nr_irqs > 0);
2009 bitmap = kcalloc(BITS_TO_LONGS(nr_irqs), sizeof (long), GFP_ATOMIC);
2017 *base = *nr_ids = 0;
2022 static void its_lpi_free(unsigned long *bitmap, u32 base, u32 nr_ids)
2024 WARN_ON(free_lpi_range(base, nr_ids));
2028 static void gic_reset_prop_table(void *va)
2030 /* Priority 0xa0, Group-1, disabled */
2031 memset(va, LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, LPI_PROPBASE_SZ);
2033 /* Make sure the GIC will observe the written configuration */
2034 gic_flush_dcache_to_poc(va, LPI_PROPBASE_SZ);
2037 static struct page *its_allocate_prop_table(gfp_t gfp_flags)
2039 struct page *prop_page;
2041 prop_page = alloc_pages(gfp_flags, get_order(LPI_PROPBASE_SZ));
2045 gic_reset_prop_table(page_address(prop_page));
2050 static void its_free_prop_table(struct page *prop_page)
2052 free_pages((unsigned long)page_address(prop_page),
2053 get_order(LPI_PROPBASE_SZ));
2056 static bool gic_check_reserved_range(phys_addr_t addr, unsigned long size)
2058 phys_addr_t start, end, addr_end;
2062 * We don't bother checking for a kdump kernel as by
2063 * construction, the LPI tables are out of this kernel's
2066 if (is_kdump_kernel())
2069 addr_end = addr + size - 1;
2071 for_each_reserved_mem_region(i, &start, &end) {
2072 if (addr >= start && addr_end <= end)
2076 /* Not found, not a good sign... */
2077 pr_warn("GICv3: Expected reserved range [%pa:%pa], not found\n",
2079 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2083 static int gic_reserve_range(phys_addr_t addr, unsigned long size)
2085 if (efi_enabled(EFI_CONFIG_TABLES))
2086 return efi_mem_reserve_persistent(addr, size);
2091 static int __init its_setup_lpi_prop_table(void)
2093 if (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) {
2096 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2097 lpi_id_bits = (val & GICR_PROPBASER_IDBITS_MASK) + 1;
2099 gic_rdists->prop_table_pa = val & GENMASK_ULL(51, 12);
2100 gic_rdists->prop_table_va = memremap(gic_rdists->prop_table_pa,
2103 gic_reset_prop_table(gic_rdists->prop_table_va);
2107 lpi_id_bits = min_t(u32,
2108 GICD_TYPER_ID_BITS(gic_rdists->gicd_typer),
2109 ITS_MAX_LPI_NRBITS);
2110 page = its_allocate_prop_table(GFP_NOWAIT);
2112 pr_err("Failed to allocate PROPBASE\n");
2116 gic_rdists->prop_table_pa = page_to_phys(page);
2117 gic_rdists->prop_table_va = page_address(page);
2118 WARN_ON(gic_reserve_range(gic_rdists->prop_table_pa,
2122 pr_info("GICv3: using LPI property table @%pa\n",
2123 &gic_rdists->prop_table_pa);
2125 return its_lpi_init(lpi_id_bits);
2128 static const char *its_base_type_string[] = {
2129 [GITS_BASER_TYPE_DEVICE] = "Devices",
2130 [GITS_BASER_TYPE_VCPU] = "Virtual CPUs",
2131 [GITS_BASER_TYPE_RESERVED3] = "Reserved (3)",
2132 [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections",
2133 [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)",
2134 [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)",
2135 [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)",
2138 static u64 its_read_baser(struct its_node *its, struct its_baser *baser)
2140 u32 idx = baser - its->tables;
2142 return gits_read_baser(its->base + GITS_BASER + (idx << 3));
2145 static void its_write_baser(struct its_node *its, struct its_baser *baser,
2148 u32 idx = baser - its->tables;
2150 gits_write_baser(val, its->base + GITS_BASER + (idx << 3));
2151 baser->val = its_read_baser(its, baser);
2154 static int its_setup_baser(struct its_node *its, struct its_baser *baser,
2155 u64 cache, u64 shr, u32 order, bool indirect)
2157 u64 val = its_read_baser(its, baser);
2158 u64 esz = GITS_BASER_ENTRY_SIZE(val);
2159 u64 type = GITS_BASER_TYPE(val);
2160 u64 baser_phys, tmp;
2161 u32 alloc_pages, psz;
2166 alloc_pages = (PAGE_ORDER_TO_SIZE(order) / psz);
2167 if (alloc_pages > GITS_BASER_PAGES_MAX) {
2168 pr_warn("ITS@%pa: %s too large, reduce ITS pages %u->%u\n",
2169 &its->phys_base, its_base_type_string[type],
2170 alloc_pages, GITS_BASER_PAGES_MAX);
2171 alloc_pages = GITS_BASER_PAGES_MAX;
2172 order = get_order(GITS_BASER_PAGES_MAX * psz);
2175 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO, order);
2179 base = (void *)page_address(page);
2180 baser_phys = virt_to_phys(base);
2182 /* Check if the physical address of the memory is above 48bits */
2183 if (IS_ENABLED(CONFIG_ARM64_64K_PAGES) && (baser_phys >> 48)) {
2185 /* 52bit PA is supported only when PageSize=64K */
2186 if (psz != SZ_64K) {
2187 pr_err("ITS: no 52bit PA support when psz=%d\n", psz);
2188 free_pages((unsigned long)base, order);
2192 /* Convert 52bit PA to 48bit field */
2193 baser_phys = GITS_BASER_PHYS_52_to_48(baser_phys);
2198 (type << GITS_BASER_TYPE_SHIFT) |
2199 ((esz - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) |
2200 ((alloc_pages - 1) << GITS_BASER_PAGES_SHIFT) |
2205 val |= indirect ? GITS_BASER_INDIRECT : 0x0;
2209 val |= GITS_BASER_PAGE_SIZE_4K;
2212 val |= GITS_BASER_PAGE_SIZE_16K;
2215 val |= GITS_BASER_PAGE_SIZE_64K;
2219 its_write_baser(its, baser, val);
2222 if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) {
2224 * Shareability didn't stick. Just use
2225 * whatever the read reported, which is likely
2226 * to be the only thing this redistributor
2227 * supports. If that's zero, make it
2228 * non-cacheable as well.
2230 shr = tmp & GITS_BASER_SHAREABILITY_MASK;
2232 cache = GITS_BASER_nC;
2233 gic_flush_dcache_to_poc(base, PAGE_ORDER_TO_SIZE(order));
2239 pr_err("ITS@%pa: %s doesn't stick: %llx %llx\n",
2240 &its->phys_base, its_base_type_string[type],
2242 free_pages((unsigned long)base, order);
2246 baser->order = order;
2249 tmp = indirect ? GITS_LVL1_ENTRY_SIZE : esz;
2251 pr_info("ITS@%pa: allocated %d %s @%lx (%s, esz %d, psz %dK, shr %d)\n",
2252 &its->phys_base, (int)(PAGE_ORDER_TO_SIZE(order) / (int)tmp),
2253 its_base_type_string[type],
2254 (unsigned long)virt_to_phys(base),
2255 indirect ? "indirect" : "flat", (int)esz,
2256 psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT);
2261 static bool its_parse_indirect_baser(struct its_node *its,
2262 struct its_baser *baser,
2263 u32 *order, u32 ids)
2265 u64 tmp = its_read_baser(its, baser);
2266 u64 type = GITS_BASER_TYPE(tmp);
2267 u64 esz = GITS_BASER_ENTRY_SIZE(tmp);
2268 u64 val = GITS_BASER_InnerShareable | GITS_BASER_RaWaWb;
2269 u32 new_order = *order;
2270 u32 psz = baser->psz;
2271 bool indirect = false;
2273 /* No need to enable Indirection if memory requirement < (psz*2)bytes */
2274 if ((esz << ids) > (psz * 2)) {
2276 * Find out whether hw supports a single or two-level table by
2277 * table by reading bit at offset '62' after writing '1' to it.
2279 its_write_baser(its, baser, val | GITS_BASER_INDIRECT);
2280 indirect = !!(baser->val & GITS_BASER_INDIRECT);
2284 * The size of the lvl2 table is equal to ITS page size
2285 * which is 'psz'. For computing lvl1 table size,
2286 * subtract ID bits that sparse lvl2 table from 'ids'
2287 * which is reported by ITS hardware times lvl1 table
2290 ids -= ilog2(psz / (int)esz);
2291 esz = GITS_LVL1_ENTRY_SIZE;
2296 * Allocate as many entries as required to fit the
2297 * range of device IDs that the ITS can grok... The ID
2298 * space being incredibly sparse, this results in a
2299 * massive waste of memory if two-level device table
2300 * feature is not supported by hardware.
2302 new_order = max_t(u32, get_order(esz << ids), new_order);
2303 if (new_order >= MAX_ORDER) {
2304 new_order = MAX_ORDER - 1;
2305 ids = ilog2(PAGE_ORDER_TO_SIZE(new_order) / (int)esz);
2306 pr_warn("ITS@%pa: %s Table too large, reduce ids %llu->%u\n",
2307 &its->phys_base, its_base_type_string[type],
2308 device_ids(its), ids);
2316 static u32 compute_common_aff(u64 val)
2320 aff = FIELD_GET(GICR_TYPER_AFFINITY, val);
2321 clpiaff = FIELD_GET(GICR_TYPER_COMMON_LPI_AFF, val);
2323 return aff & ~(GENMASK(31, 0) >> (clpiaff * 8));
2326 static u32 compute_its_aff(struct its_node *its)
2332 * Reencode the ITS SVPET and MPIDR as a GICR_TYPER, and compute
2333 * the resulting affinity. We then use that to see if this match
2336 svpet = FIELD_GET(GITS_TYPER_SVPET, its->typer);
2337 val = FIELD_PREP(GICR_TYPER_COMMON_LPI_AFF, svpet);
2338 val |= FIELD_PREP(GICR_TYPER_AFFINITY, its->mpidr);
2339 return compute_common_aff(val);
2342 static struct its_node *find_sibling_its(struct its_node *cur_its)
2344 struct its_node *its;
2347 if (!FIELD_GET(GITS_TYPER_SVPET, cur_its->typer))
2350 aff = compute_its_aff(cur_its);
2352 list_for_each_entry(its, &its_nodes, entry) {
2355 if (!is_v4_1(its) || its == cur_its)
2358 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2361 if (aff != compute_its_aff(its))
2364 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2365 baser = its->tables[2].val;
2366 if (!(baser & GITS_BASER_VALID))
2375 static void its_free_tables(struct its_node *its)
2379 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2380 if (its->tables[i].base) {
2381 free_pages((unsigned long)its->tables[i].base,
2382 its->tables[i].order);
2383 its->tables[i].base = NULL;
2388 static int its_probe_baser_psz(struct its_node *its, struct its_baser *baser)
2395 val = its_read_baser(its, baser);
2396 val &= ~GITS_BASER_PAGE_SIZE_MASK;
2400 gpsz = GITS_BASER_PAGE_SIZE_64K;
2403 gpsz = GITS_BASER_PAGE_SIZE_16K;
2407 gpsz = GITS_BASER_PAGE_SIZE_4K;
2411 gpsz >>= GITS_BASER_PAGE_SIZE_SHIFT;
2413 val |= FIELD_PREP(GITS_BASER_PAGE_SIZE_MASK, gpsz);
2414 its_write_baser(its, baser, val);
2416 if (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser->val) == gpsz)
2436 static int its_alloc_tables(struct its_node *its)
2438 u64 shr = GITS_BASER_InnerShareable;
2439 u64 cache = GITS_BASER_RaWaWb;
2442 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_22375)
2443 /* erratum 24313: ignore memory access type */
2444 cache = GITS_BASER_nCnB;
2446 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
2447 struct its_baser *baser = its->tables + i;
2448 u64 val = its_read_baser(its, baser);
2449 u64 type = GITS_BASER_TYPE(val);
2450 bool indirect = false;
2453 if (type == GITS_BASER_TYPE_NONE)
2456 if (its_probe_baser_psz(its, baser)) {
2457 its_free_tables(its);
2461 order = get_order(baser->psz);
2464 case GITS_BASER_TYPE_DEVICE:
2465 indirect = its_parse_indirect_baser(its, baser, &order,
2469 case GITS_BASER_TYPE_VCPU:
2471 struct its_node *sibling;
2474 if ((sibling = find_sibling_its(its))) {
2475 *baser = sibling->tables[2];
2476 its_write_baser(its, baser, baser->val);
2481 indirect = its_parse_indirect_baser(its, baser, &order,
2482 ITS_MAX_VPEID_BITS);
2486 err = its_setup_baser(its, baser, cache, shr, order, indirect);
2488 its_free_tables(its);
2492 /* Update settings which will be used for next BASERn */
2493 cache = baser->val & GITS_BASER_CACHEABILITY_MASK;
2494 shr = baser->val & GITS_BASER_SHAREABILITY_MASK;
2500 static u64 inherit_vpe_l1_table_from_its(void)
2502 struct its_node *its;
2506 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2507 aff = compute_common_aff(val);
2509 list_for_each_entry(its, &its_nodes, entry) {
2515 if (!FIELD_GET(GITS_TYPER_SVPET, its->typer))
2518 if (aff != compute_its_aff(its))
2521 /* GICv4.1 guarantees that the vPE table is GITS_BASER2 */
2522 baser = its->tables[2].val;
2523 if (!(baser & GITS_BASER_VALID))
2526 /* We have a winner! */
2527 gic_data_rdist()->vpe_l1_base = its->tables[2].base;
2529 val = GICR_VPROPBASER_4_1_VALID;
2530 if (baser & GITS_BASER_INDIRECT)
2531 val |= GICR_VPROPBASER_4_1_INDIRECT;
2532 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE,
2533 FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser));
2534 switch (FIELD_GET(GITS_BASER_PAGE_SIZE_MASK, baser)) {
2535 case GIC_PAGE_SIZE_64K:
2536 addr = GITS_BASER_ADDR_48_to_52(baser);
2539 addr = baser & GENMASK_ULL(47, 12);
2542 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, addr >> 12);
2543 val |= FIELD_PREP(GICR_VPROPBASER_SHAREABILITY_MASK,
2544 FIELD_GET(GITS_BASER_SHAREABILITY_MASK, baser));
2545 val |= FIELD_PREP(GICR_VPROPBASER_INNER_CACHEABILITY_MASK,
2546 FIELD_GET(GITS_BASER_INNER_CACHEABILITY_MASK, baser));
2547 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, GITS_BASER_NR_PAGES(baser) - 1);
2555 static u64 inherit_vpe_l1_table_from_rd(cpumask_t **mask)
2561 val = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
2562 aff = compute_common_aff(val);
2564 for_each_possible_cpu(cpu) {
2565 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2567 if (!base || cpu == smp_processor_id())
2570 val = gic_read_typer(base + GICR_TYPER);
2571 if (aff != compute_common_aff(val))
2575 * At this point, we have a victim. This particular CPU
2576 * has already booted, and has an affinity that matches
2577 * ours wrt CommonLPIAff. Let's use its own VPROPBASER.
2578 * Make sure we don't write the Z bit in that case.
2580 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2581 val &= ~GICR_VPROPBASER_4_1_Z;
2583 gic_data_rdist()->vpe_l1_base = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2584 *mask = gic_data_rdist_cpu(cpu)->vpe_table_mask;
2592 static bool allocate_vpe_l2_table(int cpu, u32 id)
2594 void __iomem *base = gic_data_rdist_cpu(cpu)->rd_base;
2595 unsigned int psz, esz, idx, npg, gpsz;
2600 if (!gic_rdists->has_rvpeid)
2603 /* Skip non-present CPUs */
2607 val = gicr_read_vpropbaser(base + SZ_128K + GICR_VPROPBASER);
2609 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val) + 1;
2610 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2611 npg = FIELD_GET(GICR_VPROPBASER_4_1_SIZE, val) + 1;
2617 case GIC_PAGE_SIZE_4K:
2620 case GIC_PAGE_SIZE_16K:
2623 case GIC_PAGE_SIZE_64K:
2628 /* Don't allow vpe_id that exceeds single, flat table limit */
2629 if (!(val & GICR_VPROPBASER_4_1_INDIRECT))
2630 return (id < (npg * psz / (esz * SZ_8)));
2632 /* Compute 1st level table index & check if that exceeds table limit */
2633 idx = id >> ilog2(psz / (esz * SZ_8));
2634 if (idx >= (npg * psz / GITS_LVL1_ENTRY_SIZE))
2637 table = gic_data_rdist_cpu(cpu)->vpe_l1_base;
2639 /* Allocate memory for 2nd level table */
2641 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(psz));
2645 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
2646 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2647 gic_flush_dcache_to_poc(page_address(page), psz);
2649 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
2651 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
2652 if (!(val & GICR_VPROPBASER_SHAREABILITY_MASK))
2653 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
2655 /* Ensure updated table contents are visible to RD hardware */
2662 static int allocate_vpe_l1_table(void)
2664 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2665 u64 val, gpsz, npg, pa;
2666 unsigned int psz = SZ_64K;
2667 unsigned int np, epp, esz;
2670 if (!gic_rdists->has_rvpeid)
2674 * if VPENDBASER.Valid is set, disable any previously programmed
2675 * VPE by setting PendingLast while clearing Valid. This has the
2676 * effect of making sure no doorbell will be generated and we can
2677 * then safely clear VPROPBASER.Valid.
2679 if (gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER) & GICR_VPENDBASER_Valid)
2680 gicr_write_vpendbaser(GICR_VPENDBASER_PendingLast,
2681 vlpi_base + GICR_VPENDBASER);
2684 * If we can inherit the configuration from another RD, let's do
2685 * so. Otherwise, we have to go through the allocation process. We
2686 * assume that all RDs have the exact same requirements, as
2687 * nothing will work otherwise.
2689 val = inherit_vpe_l1_table_from_rd(&gic_data_rdist()->vpe_table_mask);
2690 if (val & GICR_VPROPBASER_4_1_VALID)
2693 gic_data_rdist()->vpe_table_mask = kzalloc(sizeof(cpumask_t), GFP_KERNEL);
2694 if (!gic_data_rdist()->vpe_table_mask)
2697 val = inherit_vpe_l1_table_from_its();
2698 if (val & GICR_VPROPBASER_4_1_VALID)
2701 /* First probe the page size */
2702 val = FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, GIC_PAGE_SIZE_64K);
2703 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2704 val = gicr_read_vpropbaser(vlpi_base + GICR_VPROPBASER);
2705 gpsz = FIELD_GET(GICR_VPROPBASER_4_1_PAGE_SIZE, val);
2706 esz = FIELD_GET(GICR_VPROPBASER_4_1_ENTRY_SIZE, val);
2710 gpsz = GIC_PAGE_SIZE_4K;
2712 case GIC_PAGE_SIZE_4K:
2715 case GIC_PAGE_SIZE_16K:
2718 case GIC_PAGE_SIZE_64K:
2724 * Start populating the register from scratch, including RO fields
2725 * (which we want to print in debug cases...)
2728 val |= FIELD_PREP(GICR_VPROPBASER_4_1_PAGE_SIZE, gpsz);
2729 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ENTRY_SIZE, esz);
2731 /* How many entries per GIC page? */
2733 epp = psz / (esz * SZ_8);
2736 * If we need more than just a single L1 page, flag the table
2737 * as indirect and compute the number of required L1 pages.
2739 if (epp < ITS_MAX_VPEID) {
2742 val |= GICR_VPROPBASER_4_1_INDIRECT;
2744 /* Number of L2 pages required to cover the VPEID space */
2745 nl2 = DIV_ROUND_UP(ITS_MAX_VPEID, epp);
2747 /* Number of L1 pages to point to the L2 pages */
2748 npg = DIV_ROUND_UP(nl2 * SZ_8, psz);
2753 val |= FIELD_PREP(GICR_VPROPBASER_4_1_SIZE, npg - 1);
2755 /* Right, that's the number of CPU pages we need for L1 */
2756 np = DIV_ROUND_UP(npg * psz, PAGE_SIZE);
2758 pr_debug("np = %d, npg = %lld, psz = %d, epp = %d, esz = %d\n",
2759 np, npg, psz, epp, esz);
2760 page = alloc_pages(GFP_KERNEL | __GFP_ZERO, get_order(np * PAGE_SIZE));
2764 gic_data_rdist()->vpe_l1_base = page_address(page);
2765 pa = virt_to_phys(page_address(page));
2766 WARN_ON(!IS_ALIGNED(pa, psz));
2768 val |= FIELD_PREP(GICR_VPROPBASER_4_1_ADDR, pa >> 12);
2769 val |= GICR_VPROPBASER_RaWb;
2770 val |= GICR_VPROPBASER_InnerShareable;
2771 val |= GICR_VPROPBASER_4_1_Z;
2772 val |= GICR_VPROPBASER_4_1_VALID;
2775 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
2776 cpumask_set_cpu(smp_processor_id(), gic_data_rdist()->vpe_table_mask);
2778 pr_debug("CPU%d: VPROPBASER = %llx %*pbl\n",
2779 smp_processor_id(), val,
2780 cpumask_pr_args(gic_data_rdist()->vpe_table_mask));
2785 static int its_alloc_collections(struct its_node *its)
2789 its->collections = kcalloc(nr_cpu_ids, sizeof(*its->collections),
2791 if (!its->collections)
2794 for (i = 0; i < nr_cpu_ids; i++)
2795 its->collections[i].target_address = ~0ULL;
2800 static struct page *its_allocate_pending_table(gfp_t gfp_flags)
2802 struct page *pend_page;
2804 pend_page = alloc_pages(gfp_flags | __GFP_ZERO,
2805 get_order(LPI_PENDBASE_SZ));
2809 /* Make sure the GIC will observe the zero-ed page */
2810 gic_flush_dcache_to_poc(page_address(pend_page), LPI_PENDBASE_SZ);
2815 static void its_free_pending_table(struct page *pt)
2817 free_pages((unsigned long)page_address(pt), get_order(LPI_PENDBASE_SZ));
2821 * Booting with kdump and LPIs enabled is generally fine. Any other
2822 * case is wrong in the absence of firmware/EFI support.
2824 static bool enabled_lpis_allowed(void)
2829 /* Check whether the property table is in a reserved region */
2830 val = gicr_read_propbaser(gic_data_rdist_rd_base() + GICR_PROPBASER);
2831 addr = val & GENMASK_ULL(51, 12);
2833 return gic_check_reserved_range(addr, LPI_PROPBASE_SZ);
2836 static int __init allocate_lpi_tables(void)
2842 * If LPIs are enabled while we run this from the boot CPU,
2843 * flag the RD tables as pre-allocated if the stars do align.
2845 val = readl_relaxed(gic_data_rdist_rd_base() + GICR_CTLR);
2846 if ((val & GICR_CTLR_ENABLE_LPIS) && enabled_lpis_allowed()) {
2847 gic_rdists->flags |= (RDIST_FLAGS_RD_TABLES_PREALLOCATED |
2848 RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING);
2849 pr_info("GICv3: Using preallocated redistributor tables\n");
2852 err = its_setup_lpi_prop_table();
2857 * We allocate all the pending tables anyway, as we may have a
2858 * mix of RDs that have had LPIs enabled, and some that
2859 * don't. We'll free the unused ones as each CPU comes online.
2861 for_each_possible_cpu(cpu) {
2862 struct page *pend_page;
2864 pend_page = its_allocate_pending_table(GFP_NOWAIT);
2866 pr_err("Failed to allocate PENDBASE for CPU%d\n", cpu);
2870 gic_data_rdist_cpu(cpu)->pend_page = pend_page;
2876 static u64 its_clear_vpend_valid(void __iomem *vlpi_base, u64 clr, u64 set)
2878 u32 count = 1000000; /* 1s! */
2882 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2883 val &= ~GICR_VPENDBASER_Valid;
2886 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
2889 val = gicr_read_vpendbaser(vlpi_base + GICR_VPENDBASER);
2890 clean = !(val & GICR_VPENDBASER_Dirty);
2896 } while (!clean && count);
2898 if (unlikely(val & GICR_VPENDBASER_Dirty)) {
2899 pr_err_ratelimited("ITS virtual pending table not cleaning\n");
2900 val |= GICR_VPENDBASER_PendingLast;
2906 static void its_cpu_init_lpis(void)
2908 void __iomem *rbase = gic_data_rdist_rd_base();
2909 struct page *pend_page;
2913 if (gic_data_rdist()->lpi_enabled)
2916 val = readl_relaxed(rbase + GICR_CTLR);
2917 if ((gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED) &&
2918 (val & GICR_CTLR_ENABLE_LPIS)) {
2920 * Check that we get the same property table on all
2921 * RDs. If we don't, this is hopeless.
2923 paddr = gicr_read_propbaser(rbase + GICR_PROPBASER);
2924 paddr &= GENMASK_ULL(51, 12);
2925 if (WARN_ON(gic_rdists->prop_table_pa != paddr))
2926 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
2928 paddr = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2929 paddr &= GENMASK_ULL(51, 16);
2931 WARN_ON(!gic_check_reserved_range(paddr, LPI_PENDBASE_SZ));
2932 its_free_pending_table(gic_data_rdist()->pend_page);
2933 gic_data_rdist()->pend_page = NULL;
2938 pend_page = gic_data_rdist()->pend_page;
2939 paddr = page_to_phys(pend_page);
2940 WARN_ON(gic_reserve_range(paddr, LPI_PENDBASE_SZ));
2943 val = (gic_rdists->prop_table_pa |
2944 GICR_PROPBASER_InnerShareable |
2945 GICR_PROPBASER_RaWaWb |
2946 ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK));
2948 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2949 tmp = gicr_read_propbaser(rbase + GICR_PROPBASER);
2951 if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) {
2952 if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) {
2954 * The HW reports non-shareable, we must
2955 * remove the cacheability attributes as
2958 val &= ~(GICR_PROPBASER_SHAREABILITY_MASK |
2959 GICR_PROPBASER_CACHEABILITY_MASK);
2960 val |= GICR_PROPBASER_nC;
2961 gicr_write_propbaser(val, rbase + GICR_PROPBASER);
2963 pr_info_once("GIC: using cache flushing for LPI property table\n");
2964 gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING;
2968 val = (page_to_phys(pend_page) |
2969 GICR_PENDBASER_InnerShareable |
2970 GICR_PENDBASER_RaWaWb);
2972 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2973 tmp = gicr_read_pendbaser(rbase + GICR_PENDBASER);
2975 if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) {
2977 * The HW reports non-shareable, we must remove the
2978 * cacheability attributes as well.
2980 val &= ~(GICR_PENDBASER_SHAREABILITY_MASK |
2981 GICR_PENDBASER_CACHEABILITY_MASK);
2982 val |= GICR_PENDBASER_nC;
2983 gicr_write_pendbaser(val, rbase + GICR_PENDBASER);
2987 val = readl_relaxed(rbase + GICR_CTLR);
2988 val |= GICR_CTLR_ENABLE_LPIS;
2989 writel_relaxed(val, rbase + GICR_CTLR);
2991 if (gic_rdists->has_vlpis && !gic_rdists->has_rvpeid) {
2992 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
2995 * It's possible for CPU to receive VLPIs before it is
2996 * sheduled as a vPE, especially for the first CPU, and the
2997 * VLPI with INTID larger than 2^(IDbits+1) will be considered
2998 * as out of range and dropped by GIC.
2999 * So we initialize IDbits to known value to avoid VLPI drop.
3001 val = (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3002 pr_debug("GICv4: CPU%d: Init IDbits to 0x%llx for GICR_VPROPBASER\n",
3003 smp_processor_id(), val);
3004 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3007 * Also clear Valid bit of GICR_VPENDBASER, in case some
3008 * ancient programming gets left in and has possibility of
3009 * corrupting memory.
3011 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3014 if (allocate_vpe_l1_table()) {
3016 * If the allocation has failed, we're in massive trouble.
3017 * Disable direct injection, and pray that no VM was
3018 * already running...
3020 gic_rdists->has_rvpeid = false;
3021 gic_rdists->has_vlpis = false;
3024 /* Make sure the GIC has seen the above */
3027 gic_data_rdist()->lpi_enabled = true;
3028 pr_info("GICv3: CPU%d: using %s LPI pending table @%pa\n",
3030 gic_data_rdist()->pend_page ? "allocated" : "reserved",
3034 static void its_cpu_init_collection(struct its_node *its)
3036 int cpu = smp_processor_id();
3039 /* avoid cross node collections and its mapping */
3040 if (its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144) {
3041 struct device_node *cpu_node;
3043 cpu_node = of_get_cpu_node(cpu, NULL);
3044 if (its->numa_node != NUMA_NO_NODE &&
3045 its->numa_node != of_node_to_nid(cpu_node))
3050 * We now have to bind each collection to its target
3053 if (gic_read_typer(its->base + GITS_TYPER) & GITS_TYPER_PTA) {
3055 * This ITS wants the physical address of the
3058 target = gic_data_rdist()->phys_base;
3060 /* This ITS wants a linear CPU number. */
3061 target = gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER);
3062 target = GICR_TYPER_CPU_NUMBER(target) << 16;
3065 /* Perform collection mapping */
3066 its->collections[cpu].target_address = target;
3067 its->collections[cpu].col_id = cpu;
3069 its_send_mapc(its, &its->collections[cpu], 1);
3070 its_send_invall(its, &its->collections[cpu]);
3073 static void its_cpu_init_collections(void)
3075 struct its_node *its;
3077 raw_spin_lock(&its_lock);
3079 list_for_each_entry(its, &its_nodes, entry)
3080 its_cpu_init_collection(its);
3082 raw_spin_unlock(&its_lock);
3085 static struct its_device *its_find_device(struct its_node *its, u32 dev_id)
3087 struct its_device *its_dev = NULL, *tmp;
3088 unsigned long flags;
3090 raw_spin_lock_irqsave(&its->lock, flags);
3092 list_for_each_entry(tmp, &its->its_device_list, entry) {
3093 if (tmp->device_id == dev_id) {
3099 raw_spin_unlock_irqrestore(&its->lock, flags);
3104 static struct its_baser *its_get_baser(struct its_node *its, u32 type)
3108 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
3109 if (GITS_BASER_TYPE(its->tables[i].val) == type)
3110 return &its->tables[i];
3116 static bool its_alloc_table_entry(struct its_node *its,
3117 struct its_baser *baser, u32 id)
3123 /* Don't allow device id that exceeds single, flat table limit */
3124 esz = GITS_BASER_ENTRY_SIZE(baser->val);
3125 if (!(baser->val & GITS_BASER_INDIRECT))
3126 return (id < (PAGE_ORDER_TO_SIZE(baser->order) / esz));
3128 /* Compute 1st level table index & check if that exceeds table limit */
3129 idx = id >> ilog2(baser->psz / esz);
3130 if (idx >= (PAGE_ORDER_TO_SIZE(baser->order) / GITS_LVL1_ENTRY_SIZE))
3133 table = baser->base;
3135 /* Allocate memory for 2nd level table */
3137 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
3138 get_order(baser->psz));
3142 /* Flush Lvl2 table to PoC if hw doesn't support coherency */
3143 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3144 gic_flush_dcache_to_poc(page_address(page), baser->psz);
3146 table[idx] = cpu_to_le64(page_to_phys(page) | GITS_BASER_VALID);
3148 /* Flush Lvl1 entry to PoC if hw doesn't support coherency */
3149 if (!(baser->val & GITS_BASER_SHAREABILITY_MASK))
3150 gic_flush_dcache_to_poc(table + idx, GITS_LVL1_ENTRY_SIZE);
3152 /* Ensure updated table contents are visible to ITS hardware */
3159 static bool its_alloc_device_table(struct its_node *its, u32 dev_id)
3161 struct its_baser *baser;
3163 baser = its_get_baser(its, GITS_BASER_TYPE_DEVICE);
3165 /* Don't allow device id that exceeds ITS hardware limit */
3167 return (ilog2(dev_id) < device_ids(its));
3169 return its_alloc_table_entry(its, baser, dev_id);
3172 static bool its_alloc_vpe_table(u32 vpe_id)
3174 struct its_node *its;
3178 * Make sure the L2 tables are allocated on *all* v4 ITSs. We
3179 * could try and only do it on ITSs corresponding to devices
3180 * that have interrupts targeted at this VPE, but the
3181 * complexity becomes crazy (and you have tons of memory
3184 list_for_each_entry(its, &its_nodes, entry) {
3185 struct its_baser *baser;
3190 baser = its_get_baser(its, GITS_BASER_TYPE_VCPU);
3194 if (!its_alloc_table_entry(its, baser, vpe_id))
3198 /* Non v4.1? No need to iterate RDs and go back early. */
3199 if (!gic_rdists->has_rvpeid)
3203 * Make sure the L2 tables are allocated for all copies of
3204 * the L1 table on *all* v4.1 RDs.
3206 for_each_possible_cpu(cpu) {
3207 if (!allocate_vpe_l2_table(cpu, vpe_id))
3214 static struct its_device *its_create_device(struct its_node *its, u32 dev_id,
3215 int nvecs, bool alloc_lpis)
3217 struct its_device *dev;
3218 unsigned long *lpi_map = NULL;
3219 unsigned long flags;
3220 u16 *col_map = NULL;
3227 if (!its_alloc_device_table(its, dev_id))
3230 if (WARN_ON(!is_power_of_2(nvecs)))
3231 nvecs = roundup_pow_of_two(nvecs);
3233 dev = kzalloc(sizeof(*dev), GFP_KERNEL);
3235 * Even if the device wants a single LPI, the ITT must be
3236 * sized as a power of two (and you need at least one bit...).
3238 nr_ites = max(2, nvecs);
3239 sz = nr_ites * (FIELD_GET(GITS_TYPER_ITT_ENTRY_SIZE, its->typer) + 1);
3240 sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1;
3241 itt = kzalloc_node(sz, GFP_KERNEL, its->numa_node);
3243 lpi_map = its_lpi_alloc(nvecs, &lpi_base, &nr_lpis);
3245 col_map = kcalloc(nr_lpis, sizeof(*col_map),
3248 col_map = kcalloc(nr_ites, sizeof(*col_map), GFP_KERNEL);
3253 if (!dev || !itt || !col_map || (!lpi_map && alloc_lpis)) {
3261 gic_flush_dcache_to_poc(itt, sz);
3265 dev->nr_ites = nr_ites;
3266 dev->event_map.lpi_map = lpi_map;
3267 dev->event_map.col_map = col_map;
3268 dev->event_map.lpi_base = lpi_base;
3269 dev->event_map.nr_lpis = nr_lpis;
3270 raw_spin_lock_init(&dev->event_map.vlpi_lock);
3271 dev->device_id = dev_id;
3272 INIT_LIST_HEAD(&dev->entry);
3274 raw_spin_lock_irqsave(&its->lock, flags);
3275 list_add(&dev->entry, &its->its_device_list);
3276 raw_spin_unlock_irqrestore(&its->lock, flags);
3278 /* Map device to its ITT */
3279 its_send_mapd(dev, 1);
3284 static void its_free_device(struct its_device *its_dev)
3286 unsigned long flags;
3288 raw_spin_lock_irqsave(&its_dev->its->lock, flags);
3289 list_del(&its_dev->entry);
3290 raw_spin_unlock_irqrestore(&its_dev->its->lock, flags);
3291 kfree(its_dev->event_map.col_map);
3292 kfree(its_dev->itt);
3296 static int its_alloc_device_irq(struct its_device *dev, int nvecs, irq_hw_number_t *hwirq)
3300 /* Find a free LPI region in lpi_map and allocate them. */
3301 idx = bitmap_find_free_region(dev->event_map.lpi_map,
3302 dev->event_map.nr_lpis,
3303 get_count_order(nvecs));
3307 *hwirq = dev->event_map.lpi_base + idx;
3312 static int its_msi_prepare(struct irq_domain *domain, struct device *dev,
3313 int nvec, msi_alloc_info_t *info)
3315 struct its_node *its;
3316 struct its_device *its_dev;
3317 struct msi_domain_info *msi_info;
3322 * We ignore "dev" entirely, and rely on the dev_id that has
3323 * been passed via the scratchpad. This limits this domain's
3324 * usefulness to upper layers that definitely know that they
3325 * are built on top of the ITS.
3327 dev_id = info->scratchpad[0].ul;
3329 msi_info = msi_get_domain_info(domain);
3330 its = msi_info->data;
3332 if (!gic_rdists->has_direct_lpi &&
3334 vpe_proxy.dev->its == its &&
3335 dev_id == vpe_proxy.dev->device_id) {
3336 /* Bad luck. Get yourself a better implementation */
3337 WARN_ONCE(1, "DevId %x clashes with GICv4 VPE proxy device\n",
3342 mutex_lock(&its->dev_alloc_lock);
3343 its_dev = its_find_device(its, dev_id);
3346 * We already have seen this ID, probably through
3347 * another alias (PCI bridge of some sort). No need to
3348 * create the device.
3350 its_dev->shared = true;
3351 pr_debug("Reusing ITT for devID %x\n", dev_id);
3355 its_dev = its_create_device(its, dev_id, nvec, true);
3361 pr_debug("ITT %d entries, %d bits\n", nvec, ilog2(nvec));
3363 mutex_unlock(&its->dev_alloc_lock);
3364 info->scratchpad[0].ptr = its_dev;
3368 static struct msi_domain_ops its_msi_domain_ops = {
3369 .msi_prepare = its_msi_prepare,
3372 static int its_irq_gic_domain_alloc(struct irq_domain *domain,
3374 irq_hw_number_t hwirq)
3376 struct irq_fwspec fwspec;
3378 if (irq_domain_get_of_node(domain->parent)) {
3379 fwspec.fwnode = domain->parent->fwnode;
3380 fwspec.param_count = 3;
3381 fwspec.param[0] = GIC_IRQ_TYPE_LPI;
3382 fwspec.param[1] = hwirq;
3383 fwspec.param[2] = IRQ_TYPE_EDGE_RISING;
3384 } else if (is_fwnode_irqchip(domain->parent->fwnode)) {
3385 fwspec.fwnode = domain->parent->fwnode;
3386 fwspec.param_count = 2;
3387 fwspec.param[0] = hwirq;
3388 fwspec.param[1] = IRQ_TYPE_EDGE_RISING;
3393 return irq_domain_alloc_irqs_parent(domain, virq, 1, &fwspec);
3396 static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
3397 unsigned int nr_irqs, void *args)
3399 msi_alloc_info_t *info = args;
3400 struct its_device *its_dev = info->scratchpad[0].ptr;
3401 struct its_node *its = its_dev->its;
3402 irq_hw_number_t hwirq;
3406 err = its_alloc_device_irq(its_dev, nr_irqs, &hwirq);
3410 err = iommu_dma_prepare_msi(info->desc, its->get_msi_base(its_dev));
3414 for (i = 0; i < nr_irqs; i++) {
3415 err = its_irq_gic_domain_alloc(domain, virq + i, hwirq + i);
3419 irq_domain_set_hwirq_and_chip(domain, virq + i,
3420 hwirq + i, &its_irq_chip, its_dev);
3421 irqd_set_single_target(irq_desc_get_irq_data(irq_to_desc(virq + i)));
3422 pr_debug("ID:%d pID:%d vID:%d\n",
3423 (int)(hwirq + i - its_dev->event_map.lpi_base),
3424 (int)(hwirq + i), virq + i);
3430 static int its_irq_domain_activate(struct irq_domain *domain,
3431 struct irq_data *d, bool reserve)
3433 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3434 u32 event = its_get_event_id(d);
3435 const struct cpumask *cpu_mask = cpu_online_mask;
3438 /* get the cpu_mask of local node */
3439 if (its_dev->its->numa_node >= 0)
3440 cpu_mask = cpumask_of_node(its_dev->its->numa_node);
3442 /* Bind the LPI to the first possible CPU */
3443 cpu = cpumask_first_and(cpu_mask, cpu_online_mask);
3444 if (cpu >= nr_cpu_ids) {
3445 if (its_dev->its->flags & ITS_FLAGS_WORKAROUND_CAVIUM_23144)
3448 cpu = cpumask_first(cpu_online_mask);
3451 its_dev->event_map.col_map[event] = cpu;
3452 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3454 /* Map the GIC IRQ and event to the device */
3455 its_send_mapti(its_dev, d->hwirq, event);
3459 static void its_irq_domain_deactivate(struct irq_domain *domain,
3462 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3463 u32 event = its_get_event_id(d);
3465 /* Stop the delivery of interrupts */
3466 its_send_discard(its_dev, event);
3469 static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq,
3470 unsigned int nr_irqs)
3472 struct irq_data *d = irq_domain_get_irq_data(domain, virq);
3473 struct its_device *its_dev = irq_data_get_irq_chip_data(d);
3474 struct its_node *its = its_dev->its;
3477 bitmap_release_region(its_dev->event_map.lpi_map,
3478 its_get_event_id(irq_domain_get_irq_data(domain, virq)),
3479 get_count_order(nr_irqs));
3481 for (i = 0; i < nr_irqs; i++) {
3482 struct irq_data *data = irq_domain_get_irq_data(domain,
3484 /* Nuke the entry in the domain */
3485 irq_domain_reset_irq_data(data);
3488 mutex_lock(&its->dev_alloc_lock);
3491 * If all interrupts have been freed, start mopping the
3492 * floor. This is conditionned on the device not being shared.
3494 if (!its_dev->shared &&
3495 bitmap_empty(its_dev->event_map.lpi_map,
3496 its_dev->event_map.nr_lpis)) {
3497 its_lpi_free(its_dev->event_map.lpi_map,
3498 its_dev->event_map.lpi_base,
3499 its_dev->event_map.nr_lpis);
3501 /* Unmap device/itt */
3502 its_send_mapd(its_dev, 0);
3503 its_free_device(its_dev);
3506 mutex_unlock(&its->dev_alloc_lock);
3508 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
3511 static const struct irq_domain_ops its_domain_ops = {
3512 .alloc = its_irq_domain_alloc,
3513 .free = its_irq_domain_free,
3514 .activate = its_irq_domain_activate,
3515 .deactivate = its_irq_domain_deactivate,
3521 * If a GICv4.0 doesn't implement Direct LPIs (which is extremely
3522 * likely), the only way to perform an invalidate is to use a fake
3523 * device to issue an INV command, implying that the LPI has first
3524 * been mapped to some event on that device. Since this is not exactly
3525 * cheap, we try to keep that mapping around as long as possible, and
3526 * only issue an UNMAP if we're short on available slots.
3528 * Broken by design(tm).
3530 * GICv4.1, on the other hand, mandates that we're able to invalidate
3531 * by writing to a MMIO register. It doesn't implement the whole of
3532 * DirectLPI, but that's good enough. And most of the time, we don't
3533 * even have to invalidate anything, as the redistributor can be told
3534 * whether to generate a doorbell or not (we thus leave it enabled,
3537 static void its_vpe_db_proxy_unmap_locked(struct its_vpe *vpe)
3539 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3540 if (gic_rdists->has_rvpeid)
3543 /* Already unmapped? */
3544 if (vpe->vpe_proxy_event == -1)
3547 its_send_discard(vpe_proxy.dev, vpe->vpe_proxy_event);
3548 vpe_proxy.vpes[vpe->vpe_proxy_event] = NULL;
3551 * We don't track empty slots at all, so let's move the
3552 * next_victim pointer if we can quickly reuse that slot
3553 * instead of nuking an existing entry. Not clear that this is
3554 * always a win though, and this might just generate a ripple
3555 * effect... Let's just hope VPEs don't migrate too often.
3557 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3558 vpe_proxy.next_victim = vpe->vpe_proxy_event;
3560 vpe->vpe_proxy_event = -1;
3563 static void its_vpe_db_proxy_unmap(struct its_vpe *vpe)
3565 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3566 if (gic_rdists->has_rvpeid)
3569 if (!gic_rdists->has_direct_lpi) {
3570 unsigned long flags;
3572 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3573 its_vpe_db_proxy_unmap_locked(vpe);
3574 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3578 static void its_vpe_db_proxy_map_locked(struct its_vpe *vpe)
3580 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3581 if (gic_rdists->has_rvpeid)
3584 /* Already mapped? */
3585 if (vpe->vpe_proxy_event != -1)
3588 /* This slot was already allocated. Kick the other VPE out. */
3589 if (vpe_proxy.vpes[vpe_proxy.next_victim])
3590 its_vpe_db_proxy_unmap_locked(vpe_proxy.vpes[vpe_proxy.next_victim]);
3592 /* Map the new VPE instead */
3593 vpe_proxy.vpes[vpe_proxy.next_victim] = vpe;
3594 vpe->vpe_proxy_event = vpe_proxy.next_victim;
3595 vpe_proxy.next_victim = (vpe_proxy.next_victim + 1) % vpe_proxy.dev->nr_ites;
3597 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = vpe->col_idx;
3598 its_send_mapti(vpe_proxy.dev, vpe->vpe_db_lpi, vpe->vpe_proxy_event);
3601 static void its_vpe_db_proxy_move(struct its_vpe *vpe, int from, int to)
3603 unsigned long flags;
3604 struct its_collection *target_col;
3606 /* GICv4.1 doesn't use a proxy, so nothing to do here */
3607 if (gic_rdists->has_rvpeid)
3610 if (gic_rdists->has_direct_lpi) {
3611 void __iomem *rdbase;
3613 rdbase = per_cpu_ptr(gic_rdists->rdist, from)->rd_base;
3614 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3615 wait_for_syncr(rdbase);
3620 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3622 its_vpe_db_proxy_map_locked(vpe);
3624 target_col = &vpe_proxy.dev->its->collections[to];
3625 its_send_movi(vpe_proxy.dev, target_col, vpe->vpe_proxy_event);
3626 vpe_proxy.dev->event_map.col_map[vpe->vpe_proxy_event] = to;
3628 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3631 static int its_vpe_set_affinity(struct irq_data *d,
3632 const struct cpumask *mask_val,
3635 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3636 int from, cpu = cpumask_first(mask_val);
3637 unsigned long flags;
3640 * Changing affinity is mega expensive, so let's be as lazy as
3641 * we can and only do it if we really have to. Also, if mapped
3642 * into the proxy device, we need to move the doorbell
3643 * interrupt to its new location.
3645 * Another thing is that changing the affinity of a vPE affects
3646 * *other interrupts* such as all the vLPIs that are routed to
3647 * this vPE. This means that the irq_desc lock is not enough to
3648 * protect us, and that we must ensure nobody samples vpe->col_idx
3649 * during the update, hence the lock below which must also be
3650 * taken on any vLPI handling path that evaluates vpe->col_idx.
3652 from = vpe_to_cpuid_lock(vpe, &flags);
3659 * GICv4.1 allows us to skip VMOVP if moving to a cpu whose RD
3660 * is sharing its VPE table with the current one.
3662 if (gic_data_rdist_cpu(cpu)->vpe_table_mask &&
3663 cpumask_test_cpu(from, gic_data_rdist_cpu(cpu)->vpe_table_mask))
3666 its_send_vmovp(vpe);
3667 its_vpe_db_proxy_move(vpe, from, cpu);
3670 irq_data_update_effective_affinity(d, cpumask_of(cpu));
3671 vpe_to_cpuid_unlock(vpe, flags);
3673 return IRQ_SET_MASK_OK_DONE;
3676 static void its_wait_vpt_parse_complete(void)
3678 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3681 if (!gic_rdists->has_vpend_valid_dirty)
3684 WARN_ON_ONCE(readq_relaxed_poll_timeout(vlpi_base + GICR_VPENDBASER,
3686 !(val & GICR_VPENDBASER_Dirty),
3690 static void its_vpe_schedule(struct its_vpe *vpe)
3692 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3695 /* Schedule the VPE */
3696 val = virt_to_phys(page_address(vpe->its_vm->vprop_page)) &
3697 GENMASK_ULL(51, 12);
3698 val |= (LPI_NRBITS - 1) & GICR_VPROPBASER_IDBITS_MASK;
3699 val |= GICR_VPROPBASER_RaWb;
3700 val |= GICR_VPROPBASER_InnerShareable;
3701 gicr_write_vpropbaser(val, vlpi_base + GICR_VPROPBASER);
3703 val = virt_to_phys(page_address(vpe->vpt_page)) &
3704 GENMASK_ULL(51, 16);
3705 val |= GICR_VPENDBASER_RaWaWb;
3706 val |= GICR_VPENDBASER_InnerShareable;
3708 * There is no good way of finding out if the pending table is
3709 * empty as we can race against the doorbell interrupt very
3710 * easily. So in the end, vpe->pending_last is only an
3711 * indication that the vcpu has something pending, not one
3712 * that the pending table is empty. A good implementation
3713 * would be able to read its coarse map pretty quickly anyway,
3714 * making this a tolerable issue.
3716 val |= GICR_VPENDBASER_PendingLast;
3717 val |= vpe->idai ? GICR_VPENDBASER_IDAI : 0;
3718 val |= GICR_VPENDBASER_Valid;
3719 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3721 its_wait_vpt_parse_complete();
3724 static void its_vpe_deschedule(struct its_vpe *vpe)
3726 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3729 val = its_clear_vpend_valid(vlpi_base, 0, 0);
3731 vpe->idai = !!(val & GICR_VPENDBASER_IDAI);
3732 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3735 static void its_vpe_invall(struct its_vpe *vpe)
3737 struct its_node *its;
3739 list_for_each_entry(its, &its_nodes, entry) {
3743 if (its_list_map && !vpe->its_vm->vlpi_count[its->list_nr])
3747 * Sending a VINVALL to a single ITS is enough, as all
3748 * we need is to reach the redistributors.
3750 its_send_vinvall(its, vpe);
3755 static int its_vpe_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3757 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3758 struct its_cmd_info *info = vcpu_info;
3760 switch (info->cmd_type) {
3762 its_vpe_schedule(vpe);
3765 case DESCHEDULE_VPE:
3766 its_vpe_deschedule(vpe);
3770 its_vpe_invall(vpe);
3778 static void its_vpe_send_cmd(struct its_vpe *vpe,
3779 void (*cmd)(struct its_device *, u32))
3781 unsigned long flags;
3783 raw_spin_lock_irqsave(&vpe_proxy.lock, flags);
3785 its_vpe_db_proxy_map_locked(vpe);
3786 cmd(vpe_proxy.dev, vpe->vpe_proxy_event);
3788 raw_spin_unlock_irqrestore(&vpe_proxy.lock, flags);
3791 static void its_vpe_send_inv(struct irq_data *d)
3793 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3795 if (gic_rdists->has_direct_lpi) {
3796 void __iomem *rdbase;
3798 /* Target the redistributor this VPE is currently known on */
3799 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3800 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3801 gic_write_lpir(d->parent_data->hwirq, rdbase + GICR_INVLPIR);
3802 wait_for_syncr(rdbase);
3803 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3805 its_vpe_send_cmd(vpe, its_send_inv);
3809 static void its_vpe_mask_irq(struct irq_data *d)
3812 * We need to unmask the LPI, which is described by the parent
3813 * irq_data. Instead of calling into the parent (which won't
3814 * exactly do the right thing, let's simply use the
3815 * parent_data pointer. Yes, I'm naughty.
3817 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3818 its_vpe_send_inv(d);
3821 static void its_vpe_unmask_irq(struct irq_data *d)
3823 /* Same hack as above... */
3824 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3825 its_vpe_send_inv(d);
3828 static int its_vpe_set_irqchip_state(struct irq_data *d,
3829 enum irqchip_irq_state which,
3832 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3834 if (which != IRQCHIP_STATE_PENDING)
3837 if (gic_rdists->has_direct_lpi) {
3838 void __iomem *rdbase;
3840 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3842 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_SETLPIR);
3844 gic_write_lpir(vpe->vpe_db_lpi, rdbase + GICR_CLRLPIR);
3845 wait_for_syncr(rdbase);
3849 its_vpe_send_cmd(vpe, its_send_int);
3851 its_vpe_send_cmd(vpe, its_send_clear);
3857 static int its_vpe_retrigger(struct irq_data *d)
3859 return !its_vpe_set_irqchip_state(d, IRQCHIP_STATE_PENDING, true);
3862 static struct irq_chip its_vpe_irq_chip = {
3863 .name = "GICv4-vpe",
3864 .irq_mask = its_vpe_mask_irq,
3865 .irq_unmask = its_vpe_unmask_irq,
3866 .irq_eoi = irq_chip_eoi_parent,
3867 .irq_set_affinity = its_vpe_set_affinity,
3868 .irq_retrigger = its_vpe_retrigger,
3869 .irq_set_irqchip_state = its_vpe_set_irqchip_state,
3870 .irq_set_vcpu_affinity = its_vpe_set_vcpu_affinity,
3873 static struct its_node *find_4_1_its(void)
3875 static struct its_node *its = NULL;
3878 list_for_each_entry(its, &its_nodes, entry) {
3890 static void its_vpe_4_1_send_inv(struct irq_data *d)
3892 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3893 struct its_node *its;
3896 * GICv4.1 wants doorbells to be invalidated using the
3897 * INVDB command in order to be broadcast to all RDs. Send
3898 * it to the first valid ITS, and let the HW do its magic.
3900 its = find_4_1_its();
3902 its_send_invdb(its, vpe);
3905 static void its_vpe_4_1_mask_irq(struct irq_data *d)
3907 lpi_write_config(d->parent_data, LPI_PROP_ENABLED, 0);
3908 its_vpe_4_1_send_inv(d);
3911 static void its_vpe_4_1_unmask_irq(struct irq_data *d)
3913 lpi_write_config(d->parent_data, 0, LPI_PROP_ENABLED);
3914 its_vpe_4_1_send_inv(d);
3917 static void its_vpe_4_1_schedule(struct its_vpe *vpe,
3918 struct its_cmd_info *info)
3920 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3923 /* Schedule the VPE */
3924 val |= GICR_VPENDBASER_Valid;
3925 val |= info->g0en ? GICR_VPENDBASER_4_1_VGRP0EN : 0;
3926 val |= info->g1en ? GICR_VPENDBASER_4_1_VGRP1EN : 0;
3927 val |= FIELD_PREP(GICR_VPENDBASER_4_1_VPEID, vpe->vpe_id);
3929 gicr_write_vpendbaser(val, vlpi_base + GICR_VPENDBASER);
3931 its_wait_vpt_parse_complete();
3934 static void its_vpe_4_1_deschedule(struct its_vpe *vpe,
3935 struct its_cmd_info *info)
3937 void __iomem *vlpi_base = gic_data_rdist_vlpi_base();
3942 * vPE is going to block: make the vPE non-resident with
3943 * PendingLast clear and DB set. The GIC guarantees that if
3944 * we read-back PendingLast clear, then a doorbell will be
3945 * delivered when an interrupt comes.
3947 val = its_clear_vpend_valid(vlpi_base,
3948 GICR_VPENDBASER_PendingLast,
3949 GICR_VPENDBASER_4_1_DB);
3950 vpe->pending_last = !!(val & GICR_VPENDBASER_PendingLast);
3953 * We're not blocking, so just make the vPE non-resident
3954 * with PendingLast set, indicating that we'll be back.
3956 val = its_clear_vpend_valid(vlpi_base,
3958 GICR_VPENDBASER_PendingLast);
3959 vpe->pending_last = true;
3963 static void its_vpe_4_1_invall(struct its_vpe *vpe)
3965 void __iomem *rdbase;
3968 val = GICR_INVALLR_V;
3969 val |= FIELD_PREP(GICR_INVALLR_VPEID, vpe->vpe_id);
3971 /* Target the redistributor this vPE is currently known on */
3972 raw_spin_lock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3973 rdbase = per_cpu_ptr(gic_rdists->rdist, vpe->col_idx)->rd_base;
3974 gic_write_lpir(val, rdbase + GICR_INVALLR);
3976 wait_for_syncr(rdbase);
3977 raw_spin_unlock(&gic_data_rdist_cpu(vpe->col_idx)->rd_lock);
3980 static int its_vpe_4_1_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
3982 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
3983 struct its_cmd_info *info = vcpu_info;
3985 switch (info->cmd_type) {
3987 its_vpe_4_1_schedule(vpe, info);
3990 case DESCHEDULE_VPE:
3991 its_vpe_4_1_deschedule(vpe, info);
3995 its_vpe_4_1_invall(vpe);
4003 static struct irq_chip its_vpe_4_1_irq_chip = {
4004 .name = "GICv4.1-vpe",
4005 .irq_mask = its_vpe_4_1_mask_irq,
4006 .irq_unmask = its_vpe_4_1_unmask_irq,
4007 .irq_eoi = irq_chip_eoi_parent,
4008 .irq_set_affinity = its_vpe_set_affinity,
4009 .irq_set_vcpu_affinity = its_vpe_4_1_set_vcpu_affinity,
4012 static void its_configure_sgi(struct irq_data *d, bool clear)
4014 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4015 struct its_cmd_desc desc;
4017 desc.its_vsgi_cmd.vpe = vpe;
4018 desc.its_vsgi_cmd.sgi = d->hwirq;
4019 desc.its_vsgi_cmd.priority = vpe->sgi_config[d->hwirq].priority;
4020 desc.its_vsgi_cmd.enable = vpe->sgi_config[d->hwirq].enabled;
4021 desc.its_vsgi_cmd.group = vpe->sgi_config[d->hwirq].group;
4022 desc.its_vsgi_cmd.clear = clear;
4025 * GICv4.1 allows us to send VSGI commands to any ITS as long as the
4026 * destination VPE is mapped there. Since we map them eagerly at
4027 * activation time, we're pretty sure the first GICv4.1 ITS will do.
4029 its_send_single_vcommand(find_4_1_its(), its_build_vsgi_cmd, &desc);
4032 static void its_sgi_mask_irq(struct irq_data *d)
4034 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4036 vpe->sgi_config[d->hwirq].enabled = false;
4037 its_configure_sgi(d, false);
4040 static void its_sgi_unmask_irq(struct irq_data *d)
4042 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4044 vpe->sgi_config[d->hwirq].enabled = true;
4045 its_configure_sgi(d, false);
4048 static int its_sgi_set_affinity(struct irq_data *d,
4049 const struct cpumask *mask_val,
4053 * There is no notion of affinity for virtual SGIs, at least
4054 * not on the host (since they can only be targetting a vPE).
4055 * Tell the kernel we've done whatever it asked for.
4057 irq_data_update_effective_affinity(d, mask_val);
4058 return IRQ_SET_MASK_OK;
4061 static int its_sgi_set_irqchip_state(struct irq_data *d,
4062 enum irqchip_irq_state which,
4065 if (which != IRQCHIP_STATE_PENDING)
4069 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4070 struct its_node *its = find_4_1_its();
4073 val = FIELD_PREP(GITS_SGIR_VPEID, vpe->vpe_id);
4074 val |= FIELD_PREP(GITS_SGIR_VINTID, d->hwirq);
4075 writeq_relaxed(val, its->sgir_base + GITS_SGIR - SZ_128K);
4077 its_configure_sgi(d, true);
4083 static int its_sgi_get_irqchip_state(struct irq_data *d,
4084 enum irqchip_irq_state which, bool *val)
4086 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4088 unsigned long flags;
4089 u32 count = 1000000; /* 1s! */
4093 if (which != IRQCHIP_STATE_PENDING)
4097 * Locking galore! We can race against two different events:
4099 * - Concurent vPE affinity change: we must make sure it cannot
4100 * happen, or we'll talk to the wrong redistributor. This is
4101 * identical to what happens with vLPIs.
4103 * - Concurrent VSGIPENDR access: As it involves accessing two
4104 * MMIO registers, this must be made atomic one way or another.
4106 cpu = vpe_to_cpuid_lock(vpe, &flags);
4107 raw_spin_lock(&gic_data_rdist_cpu(cpu)->rd_lock);
4108 base = gic_data_rdist_cpu(cpu)->rd_base + SZ_128K;
4109 writel_relaxed(vpe->vpe_id, base + GICR_VSGIR);
4111 status = readl_relaxed(base + GICR_VSGIPENDR);
4112 if (!(status & GICR_VSGIPENDR_BUSY))
4117 pr_err_ratelimited("Unable to get SGI status\n");
4125 raw_spin_unlock(&gic_data_rdist_cpu(cpu)->rd_lock);
4126 vpe_to_cpuid_unlock(vpe, flags);
4131 *val = !!(status & (1 << d->hwirq));
4136 static int its_sgi_set_vcpu_affinity(struct irq_data *d, void *vcpu_info)
4138 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4139 struct its_cmd_info *info = vcpu_info;
4141 switch (info->cmd_type) {
4142 case PROP_UPDATE_VSGI:
4143 vpe->sgi_config[d->hwirq].priority = info->priority;
4144 vpe->sgi_config[d->hwirq].group = info->group;
4145 its_configure_sgi(d, false);
4153 static struct irq_chip its_sgi_irq_chip = {
4154 .name = "GICv4.1-sgi",
4155 .irq_mask = its_sgi_mask_irq,
4156 .irq_unmask = its_sgi_unmask_irq,
4157 .irq_set_affinity = its_sgi_set_affinity,
4158 .irq_set_irqchip_state = its_sgi_set_irqchip_state,
4159 .irq_get_irqchip_state = its_sgi_get_irqchip_state,
4160 .irq_set_vcpu_affinity = its_sgi_set_vcpu_affinity,
4163 static int its_sgi_irq_domain_alloc(struct irq_domain *domain,
4164 unsigned int virq, unsigned int nr_irqs,
4167 struct its_vpe *vpe = args;
4170 /* Yes, we do want 16 SGIs */
4171 WARN_ON(nr_irqs != 16);
4173 for (i = 0; i < 16; i++) {
4174 vpe->sgi_config[i].priority = 0;
4175 vpe->sgi_config[i].enabled = false;
4176 vpe->sgi_config[i].group = false;
4178 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4179 &its_sgi_irq_chip, vpe);
4180 irq_set_status_flags(virq + i, IRQ_DISABLE_UNLAZY);
4186 static void its_sgi_irq_domain_free(struct irq_domain *domain,
4188 unsigned int nr_irqs)
4193 static int its_sgi_irq_domain_activate(struct irq_domain *domain,
4194 struct irq_data *d, bool reserve)
4196 /* Write out the initial SGI configuration */
4197 its_configure_sgi(d, false);
4201 static void its_sgi_irq_domain_deactivate(struct irq_domain *domain,
4204 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4207 * The VSGI command is awkward:
4209 * - To change the configuration, CLEAR must be set to false,
4210 * leaving the pending bit unchanged.
4211 * - To clear the pending bit, CLEAR must be set to true, leaving
4212 * the configuration unchanged.
4214 * You just can't do both at once, hence the two commands below.
4216 vpe->sgi_config[d->hwirq].enabled = false;
4217 its_configure_sgi(d, false);
4218 its_configure_sgi(d, true);
4221 static const struct irq_domain_ops its_sgi_domain_ops = {
4222 .alloc = its_sgi_irq_domain_alloc,
4223 .free = its_sgi_irq_domain_free,
4224 .activate = its_sgi_irq_domain_activate,
4225 .deactivate = its_sgi_irq_domain_deactivate,
4228 static int its_vpe_id_alloc(void)
4230 return ida_simple_get(&its_vpeid_ida, 0, ITS_MAX_VPEID, GFP_KERNEL);
4233 static void its_vpe_id_free(u16 id)
4235 ida_simple_remove(&its_vpeid_ida, id);
4238 static int its_vpe_init(struct its_vpe *vpe)
4240 struct page *vpt_page;
4243 /* Allocate vpe_id */
4244 vpe_id = its_vpe_id_alloc();
4249 vpt_page = its_allocate_pending_table(GFP_KERNEL);
4251 its_vpe_id_free(vpe_id);
4255 if (!its_alloc_vpe_table(vpe_id)) {
4256 its_vpe_id_free(vpe_id);
4257 its_free_pending_table(vpt_page);
4261 raw_spin_lock_init(&vpe->vpe_lock);
4262 vpe->vpe_id = vpe_id;
4263 vpe->vpt_page = vpt_page;
4264 if (gic_rdists->has_rvpeid)
4265 atomic_set(&vpe->vmapp_count, 0);
4267 vpe->vpe_proxy_event = -1;
4272 static void its_vpe_teardown(struct its_vpe *vpe)
4274 its_vpe_db_proxy_unmap(vpe);
4275 its_vpe_id_free(vpe->vpe_id);
4276 its_free_pending_table(vpe->vpt_page);
4279 static void its_vpe_irq_domain_free(struct irq_domain *domain,
4281 unsigned int nr_irqs)
4283 struct its_vm *vm = domain->host_data;
4286 irq_domain_free_irqs_parent(domain, virq, nr_irqs);
4288 for (i = 0; i < nr_irqs; i++) {
4289 struct irq_data *data = irq_domain_get_irq_data(domain,
4291 struct its_vpe *vpe = irq_data_get_irq_chip_data(data);
4293 BUG_ON(vm != vpe->its_vm);
4295 clear_bit(data->hwirq, vm->db_bitmap);
4296 its_vpe_teardown(vpe);
4297 irq_domain_reset_irq_data(data);
4300 if (bitmap_empty(vm->db_bitmap, vm->nr_db_lpis)) {
4301 its_lpi_free(vm->db_bitmap, vm->db_lpi_base, vm->nr_db_lpis);
4302 its_free_prop_table(vm->vprop_page);
4306 static int its_vpe_irq_domain_alloc(struct irq_domain *domain, unsigned int virq,
4307 unsigned int nr_irqs, void *args)
4309 struct irq_chip *irqchip = &its_vpe_irq_chip;
4310 struct its_vm *vm = args;
4311 unsigned long *bitmap;
4312 struct page *vprop_page;
4313 int base, nr_ids, i, err = 0;
4317 bitmap = its_lpi_alloc(roundup_pow_of_two(nr_irqs), &base, &nr_ids);
4321 if (nr_ids < nr_irqs) {
4322 its_lpi_free(bitmap, base, nr_ids);
4326 vprop_page = its_allocate_prop_table(GFP_KERNEL);
4328 its_lpi_free(bitmap, base, nr_ids);
4332 vm->db_bitmap = bitmap;
4333 vm->db_lpi_base = base;
4334 vm->nr_db_lpis = nr_ids;
4335 vm->vprop_page = vprop_page;
4337 if (gic_rdists->has_rvpeid)
4338 irqchip = &its_vpe_4_1_irq_chip;
4340 for (i = 0; i < nr_irqs; i++) {
4341 vm->vpes[i]->vpe_db_lpi = base + i;
4342 err = its_vpe_init(vm->vpes[i]);
4345 err = its_irq_gic_domain_alloc(domain, virq + i,
4346 vm->vpes[i]->vpe_db_lpi);
4349 irq_domain_set_hwirq_and_chip(domain, virq + i, i,
4350 irqchip, vm->vpes[i]);
4356 its_vpe_irq_domain_free(domain, virq, i - 1);
4358 its_lpi_free(bitmap, base, nr_ids);
4359 its_free_prop_table(vprop_page);
4365 static int its_vpe_irq_domain_activate(struct irq_domain *domain,
4366 struct irq_data *d, bool reserve)
4368 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4369 struct its_node *its;
4372 * If we use the list map, we issue VMAPP on demand... Unless
4373 * we're on a GICv4.1 and we eagerly map the VPE on all ITSs
4374 * so that VSGIs can work.
4376 if (!gic_requires_eager_mapping())
4379 /* Map the VPE to the first possible CPU */
4380 vpe->col_idx = cpumask_first(cpu_online_mask);
4382 list_for_each_entry(its, &its_nodes, entry) {
4386 its_send_vmapp(its, vpe, true);
4387 its_send_vinvall(its, vpe);
4390 irq_data_update_effective_affinity(d, cpumask_of(vpe->col_idx));
4395 static void its_vpe_irq_domain_deactivate(struct irq_domain *domain,
4398 struct its_vpe *vpe = irq_data_get_irq_chip_data(d);
4399 struct its_node *its;
4402 * If we use the list map on GICv4.0, we unmap the VPE once no
4403 * VLPIs are associated with the VM.
4405 if (!gic_requires_eager_mapping())
4408 list_for_each_entry(its, &its_nodes, entry) {
4412 its_send_vmapp(its, vpe, false);
4416 static const struct irq_domain_ops its_vpe_domain_ops = {
4417 .alloc = its_vpe_irq_domain_alloc,
4418 .free = its_vpe_irq_domain_free,
4419 .activate = its_vpe_irq_domain_activate,
4420 .deactivate = its_vpe_irq_domain_deactivate,
4423 static int its_force_quiescent(void __iomem *base)
4425 u32 count = 1000000; /* 1s */
4428 val = readl_relaxed(base + GITS_CTLR);
4430 * GIC architecture specification requires the ITS to be both
4431 * disabled and quiescent for writes to GITS_BASER<n> or
4432 * GITS_CBASER to not have UNPREDICTABLE results.
4434 if ((val & GITS_CTLR_QUIESCENT) && !(val & GITS_CTLR_ENABLE))
4437 /* Disable the generation of all interrupts to this ITS */
4438 val &= ~(GITS_CTLR_ENABLE | GITS_CTLR_ImDe);
4439 writel_relaxed(val, base + GITS_CTLR);
4441 /* Poll GITS_CTLR and wait until ITS becomes quiescent */
4443 val = readl_relaxed(base + GITS_CTLR);
4444 if (val & GITS_CTLR_QUIESCENT)
4456 static bool __maybe_unused its_enable_quirk_cavium_22375(void *data)
4458 struct its_node *its = data;
4460 /* erratum 22375: only alloc 8MB table size (20 bits) */
4461 its->typer &= ~GITS_TYPER_DEVBITS;
4462 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, 20 - 1);
4463 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_22375;
4468 static bool __maybe_unused its_enable_quirk_cavium_23144(void *data)
4470 struct its_node *its = data;
4472 its->flags |= ITS_FLAGS_WORKAROUND_CAVIUM_23144;
4477 static bool __maybe_unused its_enable_quirk_qdf2400_e0065(void *data)
4479 struct its_node *its = data;
4481 /* On QDF2400, the size of the ITE is 16Bytes */
4482 its->typer &= ~GITS_TYPER_ITT_ENTRY_SIZE;
4483 its->typer |= FIELD_PREP(GITS_TYPER_ITT_ENTRY_SIZE, 16 - 1);
4488 static u64 its_irq_get_msi_base_pre_its(struct its_device *its_dev)
4490 struct its_node *its = its_dev->its;
4493 * The Socionext Synquacer SoC has a so-called 'pre-ITS',
4494 * which maps 32-bit writes targeted at a separate window of
4495 * size '4 << device_id_bits' onto writes to GITS_TRANSLATER
4496 * with device ID taken from bits [device_id_bits + 1:2] of
4497 * the window offset.
4499 return its->pre_its_base + (its_dev->device_id << 2);
4502 static bool __maybe_unused its_enable_quirk_socionext_synquacer(void *data)
4504 struct its_node *its = data;
4505 u32 pre_its_window[2];
4508 if (!fwnode_property_read_u32_array(its->fwnode_handle,
4509 "socionext,synquacer-pre-its",
4511 ARRAY_SIZE(pre_its_window))) {
4513 its->pre_its_base = pre_its_window[0];
4514 its->get_msi_base = its_irq_get_msi_base_pre_its;
4516 ids = ilog2(pre_its_window[1]) - 2;
4517 if (device_ids(its) > ids) {
4518 its->typer &= ~GITS_TYPER_DEVBITS;
4519 its->typer |= FIELD_PREP(GITS_TYPER_DEVBITS, ids - 1);
4522 /* the pre-ITS breaks isolation, so disable MSI remapping */
4523 its->msi_domain_flags &= ~IRQ_DOMAIN_FLAG_MSI_REMAP;
4529 static bool __maybe_unused its_enable_quirk_hip07_161600802(void *data)
4531 struct its_node *its = data;
4534 * Hip07 insists on using the wrong address for the VLPI
4535 * page. Trick it into doing the right thing...
4537 its->vlpi_redist_offset = SZ_128K;
4541 static const struct gic_quirk its_quirks[] = {
4542 #ifdef CONFIG_CAVIUM_ERRATUM_22375
4544 .desc = "ITS: Cavium errata 22375, 24313",
4545 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4547 .init = its_enable_quirk_cavium_22375,
4550 #ifdef CONFIG_CAVIUM_ERRATUM_23144
4552 .desc = "ITS: Cavium erratum 23144",
4553 .iidr = 0xa100034c, /* ThunderX pass 1.x */
4555 .init = its_enable_quirk_cavium_23144,
4558 #ifdef CONFIG_QCOM_QDF2400_ERRATUM_0065
4560 .desc = "ITS: QDF2400 erratum 0065",
4561 .iidr = 0x00001070, /* QDF2400 ITS rev 1.x */
4563 .init = its_enable_quirk_qdf2400_e0065,
4566 #ifdef CONFIG_SOCIONEXT_SYNQUACER_PREITS
4569 * The Socionext Synquacer SoC incorporates ARM's own GIC-500
4570 * implementation, but with a 'pre-ITS' added that requires
4571 * special handling in software.
4573 .desc = "ITS: Socionext Synquacer pre-ITS",
4576 .init = its_enable_quirk_socionext_synquacer,
4579 #ifdef CONFIG_HISILICON_ERRATUM_161600802
4581 .desc = "ITS: Hip07 erratum 161600802",
4584 .init = its_enable_quirk_hip07_161600802,
4591 static void its_enable_quirks(struct its_node *its)
4593 u32 iidr = readl_relaxed(its->base + GITS_IIDR);
4595 gic_enable_quirks(iidr, its_quirks, its);
4598 static int its_save_disable(void)
4600 struct its_node *its;
4603 raw_spin_lock(&its_lock);
4604 list_for_each_entry(its, &its_nodes, entry) {
4607 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4611 its->ctlr_save = readl_relaxed(base + GITS_CTLR);
4612 err = its_force_quiescent(base);
4614 pr_err("ITS@%pa: failed to quiesce: %d\n",
4615 &its->phys_base, err);
4616 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4620 its->cbaser_save = gits_read_cbaser(base + GITS_CBASER);
4625 list_for_each_entry_continue_reverse(its, &its_nodes, entry) {
4628 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4632 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4635 raw_spin_unlock(&its_lock);
4640 static void its_restore_enable(void)
4642 struct its_node *its;
4645 raw_spin_lock(&its_lock);
4646 list_for_each_entry(its, &its_nodes, entry) {
4650 if (!(its->flags & ITS_FLAGS_SAVE_SUSPEND_STATE))
4656 * Make sure that the ITS is disabled. If it fails to quiesce,
4657 * don't restore it since writing to CBASER or BASER<n>
4658 * registers is undefined according to the GIC v3 ITS
4661 ret = its_force_quiescent(base);
4663 pr_err("ITS@%pa: failed to quiesce on resume: %d\n",
4664 &its->phys_base, ret);
4668 gits_write_cbaser(its->cbaser_save, base + GITS_CBASER);
4671 * Writing CBASER resets CREADR to 0, so make CWRITER and
4672 * cmd_write line up with it.
4674 its->cmd_write = its->cmd_base;
4675 gits_write_cwriter(0, base + GITS_CWRITER);
4677 /* Restore GITS_BASER from the value cache. */
4678 for (i = 0; i < GITS_BASER_NR_REGS; i++) {
4679 struct its_baser *baser = &its->tables[i];
4681 if (!(baser->val & GITS_BASER_VALID))
4684 its_write_baser(its, baser, baser->val);
4686 writel_relaxed(its->ctlr_save, base + GITS_CTLR);
4689 * Reinit the collection if it's stored in the ITS. This is
4690 * indicated by the col_id being less than the HCC field.
4691 * CID < HCC as specified in the GIC v3 Documentation.
4693 if (its->collections[smp_processor_id()].col_id <
4694 GITS_TYPER_HCC(gic_read_typer(base + GITS_TYPER)))
4695 its_cpu_init_collection(its);
4697 raw_spin_unlock(&its_lock);
4700 static struct syscore_ops its_syscore_ops = {
4701 .suspend = its_save_disable,
4702 .resume = its_restore_enable,
4705 static int its_init_domain(struct fwnode_handle *handle, struct its_node *its)
4707 struct irq_domain *inner_domain;
4708 struct msi_domain_info *info;
4710 info = kzalloc(sizeof(*info), GFP_KERNEL);
4714 inner_domain = irq_domain_create_tree(handle, &its_domain_ops, its);
4715 if (!inner_domain) {
4720 inner_domain->parent = its_parent;
4721 irq_domain_update_bus_token(inner_domain, DOMAIN_BUS_NEXUS);
4722 inner_domain->flags |= its->msi_domain_flags;
4723 info->ops = &its_msi_domain_ops;
4725 inner_domain->host_data = info;
4730 static int its_init_vpe_domain(void)
4732 struct its_node *its;
4736 if (gic_rdists->has_direct_lpi) {
4737 pr_info("ITS: Using DirectLPI for VPE invalidation\n");
4741 /* Any ITS will do, even if not v4 */
4742 its = list_first_entry(&its_nodes, struct its_node, entry);
4744 entries = roundup_pow_of_two(nr_cpu_ids);
4745 vpe_proxy.vpes = kcalloc(entries, sizeof(*vpe_proxy.vpes),
4747 if (!vpe_proxy.vpes) {
4748 pr_err("ITS: Can't allocate GICv4 proxy device array\n");
4752 /* Use the last possible DevID */
4753 devid = GENMASK(device_ids(its) - 1, 0);
4754 vpe_proxy.dev = its_create_device(its, devid, entries, false);
4755 if (!vpe_proxy.dev) {
4756 kfree(vpe_proxy.vpes);
4757 pr_err("ITS: Can't allocate GICv4 proxy device\n");
4761 BUG_ON(entries > vpe_proxy.dev->nr_ites);
4763 raw_spin_lock_init(&vpe_proxy.lock);
4764 vpe_proxy.next_victim = 0;
4765 pr_info("ITS: Allocated DevID %x as GICv4 proxy device (%d slots)\n",
4766 devid, vpe_proxy.dev->nr_ites);
4771 static int __init its_compute_its_list_map(struct resource *res,
4772 void __iomem *its_base)
4778 * This is assumed to be done early enough that we're
4779 * guaranteed to be single-threaded, hence no
4780 * locking. Should this change, we should address
4783 its_number = find_first_zero_bit(&its_list_map, GICv4_ITS_LIST_MAX);
4784 if (its_number >= GICv4_ITS_LIST_MAX) {
4785 pr_err("ITS@%pa: No ITSList entry available!\n",
4790 ctlr = readl_relaxed(its_base + GITS_CTLR);
4791 ctlr &= ~GITS_CTLR_ITS_NUMBER;
4792 ctlr |= its_number << GITS_CTLR_ITS_NUMBER_SHIFT;
4793 writel_relaxed(ctlr, its_base + GITS_CTLR);
4794 ctlr = readl_relaxed(its_base + GITS_CTLR);
4795 if ((ctlr & GITS_CTLR_ITS_NUMBER) != (its_number << GITS_CTLR_ITS_NUMBER_SHIFT)) {
4796 its_number = ctlr & GITS_CTLR_ITS_NUMBER;
4797 its_number >>= GITS_CTLR_ITS_NUMBER_SHIFT;
4800 if (test_and_set_bit(its_number, &its_list_map)) {
4801 pr_err("ITS@%pa: Duplicate ITSList entry %d\n",
4802 &res->start, its_number);
4809 static int __init its_probe_one(struct resource *res,
4810 struct fwnode_handle *handle, int numa_node)
4812 struct its_node *its;
4813 void __iomem *its_base;
4815 u64 baser, tmp, typer;
4819 its_base = ioremap(res->start, SZ_64K);
4821 pr_warn("ITS@%pa: Unable to map ITS registers\n", &res->start);
4825 val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK;
4826 if (val != 0x30 && val != 0x40) {
4827 pr_warn("ITS@%pa: No ITS detected, giving up\n", &res->start);
4832 err = its_force_quiescent(its_base);
4834 pr_warn("ITS@%pa: Failed to quiesce, giving up\n", &res->start);
4838 pr_info("ITS %pR\n", res);
4840 its = kzalloc(sizeof(*its), GFP_KERNEL);
4846 raw_spin_lock_init(&its->lock);
4847 mutex_init(&its->dev_alloc_lock);
4848 INIT_LIST_HEAD(&its->entry);
4849 INIT_LIST_HEAD(&its->its_device_list);
4850 typer = gic_read_typer(its_base + GITS_TYPER);
4852 its->base = its_base;
4853 its->phys_base = res->start;
4855 if (!(typer & GITS_TYPER_VMOVP)) {
4856 err = its_compute_its_list_map(res, its_base);
4862 pr_info("ITS@%pa: Using ITS number %d\n",
4865 pr_info("ITS@%pa: Single VMOVP capable\n", &res->start);
4869 u32 svpet = FIELD_GET(GITS_TYPER_SVPET, typer);
4871 its->sgir_base = ioremap(res->start + SZ_128K, SZ_64K);
4872 if (!its->sgir_base) {
4877 its->mpidr = readl_relaxed(its_base + GITS_MPIDR);
4879 pr_info("ITS@%pa: Using GICv4.1 mode %08x %08x\n",
4880 &res->start, its->mpidr, svpet);
4884 its->numa_node = numa_node;
4886 page = alloc_pages_node(its->numa_node, GFP_KERNEL | __GFP_ZERO,
4887 get_order(ITS_CMD_QUEUE_SZ));
4890 goto out_unmap_sgir;
4892 its->cmd_base = (void *)page_address(page);
4893 its->cmd_write = its->cmd_base;
4894 its->fwnode_handle = handle;
4895 its->get_msi_base = its_irq_get_msi_base;
4896 its->msi_domain_flags = IRQ_DOMAIN_FLAG_MSI_REMAP;
4898 its_enable_quirks(its);
4900 err = its_alloc_tables(its);
4904 err = its_alloc_collections(its);
4906 goto out_free_tables;
4908 baser = (virt_to_phys(its->cmd_base) |
4909 GITS_CBASER_RaWaWb |
4910 GITS_CBASER_InnerShareable |
4911 (ITS_CMD_QUEUE_SZ / SZ_4K - 1) |
4914 gits_write_cbaser(baser, its->base + GITS_CBASER);
4915 tmp = gits_read_cbaser(its->base + GITS_CBASER);
4917 if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) {
4918 if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) {
4920 * The HW reports non-shareable, we must
4921 * remove the cacheability attributes as
4924 baser &= ~(GITS_CBASER_SHAREABILITY_MASK |
4925 GITS_CBASER_CACHEABILITY_MASK);
4926 baser |= GITS_CBASER_nC;
4927 gits_write_cbaser(baser, its->base + GITS_CBASER);
4929 pr_info("ITS: using cache flushing for cmd queue\n");
4930 its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING;
4933 gits_write_cwriter(0, its->base + GITS_CWRITER);
4934 ctlr = readl_relaxed(its->base + GITS_CTLR);
4935 ctlr |= GITS_CTLR_ENABLE;
4937 ctlr |= GITS_CTLR_ImDe;
4938 writel_relaxed(ctlr, its->base + GITS_CTLR);
4940 if (GITS_TYPER_HCC(typer))
4941 its->flags |= ITS_FLAGS_SAVE_SUSPEND_STATE;
4943 err = its_init_domain(handle, its);
4945 goto out_free_tables;
4947 raw_spin_lock(&its_lock);
4948 list_add(&its->entry, &its_nodes);
4949 raw_spin_unlock(&its_lock);
4954 its_free_tables(its);
4956 free_pages((unsigned long)its->cmd_base, get_order(ITS_CMD_QUEUE_SZ));
4959 iounmap(its->sgir_base);
4964 pr_err("ITS@%pa: failed probing (%d)\n", &res->start, err);
4968 static bool gic_rdists_supports_plpis(void)
4970 return !!(gic_read_typer(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS);
4973 static int redist_disable_lpis(void)
4975 void __iomem *rbase = gic_data_rdist_rd_base();
4976 u64 timeout = USEC_PER_SEC;
4979 if (!gic_rdists_supports_plpis()) {
4980 pr_info("CPU%d: LPIs not supported\n", smp_processor_id());
4984 val = readl_relaxed(rbase + GICR_CTLR);
4985 if (!(val & GICR_CTLR_ENABLE_LPIS))
4989 * If coming via a CPU hotplug event, we don't need to disable
4990 * LPIs before trying to re-enable them. They are already
4991 * configured and all is well in the world.
4993 * If running with preallocated tables, there is nothing to do.
4995 if (gic_data_rdist()->lpi_enabled ||
4996 (gic_rdists->flags & RDIST_FLAGS_RD_TABLES_PREALLOCATED))
5000 * From that point on, we only try to do some damage control.
5002 pr_warn("GICv3: CPU%d: Booted with LPIs enabled, memory probably corrupted\n",
5003 smp_processor_id());
5004 add_taint(TAINT_CRAP, LOCKDEP_STILL_OK);
5007 val &= ~GICR_CTLR_ENABLE_LPIS;
5008 writel_relaxed(val, rbase + GICR_CTLR);
5010 /* Make sure any change to GICR_CTLR is observable by the GIC */
5014 * Software must observe RWP==0 after clearing GICR_CTLR.EnableLPIs
5015 * from 1 to 0 before programming GICR_PEND{PROP}BASER registers.
5016 * Error out if we time out waiting for RWP to clear.
5018 while (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_RWP) {
5020 pr_err("CPU%d: Timeout while disabling LPIs\n",
5021 smp_processor_id());
5029 * After it has been written to 1, it is IMPLEMENTATION
5030 * DEFINED whether GICR_CTLR.EnableLPI becomes RES1 or can be
5031 * cleared to 0. Error out if clearing the bit failed.
5033 if (readl_relaxed(rbase + GICR_CTLR) & GICR_CTLR_ENABLE_LPIS) {
5034 pr_err("CPU%d: Failed to disable LPIs\n", smp_processor_id());
5041 int its_cpu_init(void)
5043 if (!list_empty(&its_nodes)) {
5046 ret = redist_disable_lpis();
5050 its_cpu_init_lpis();
5051 its_cpu_init_collections();
5057 static const struct of_device_id its_device_id[] = {
5058 { .compatible = "arm,gic-v3-its", },
5062 static int __init its_of_probe(struct device_node *node)
5064 struct device_node *np;
5065 struct resource res;
5067 for (np = of_find_matching_node(node, its_device_id); np;
5068 np = of_find_matching_node(np, its_device_id)) {
5069 if (!of_device_is_available(np))
5071 if (!of_property_read_bool(np, "msi-controller")) {
5072 pr_warn("%pOF: no msi-controller property, ITS ignored\n",
5077 if (of_address_to_resource(np, 0, &res)) {
5078 pr_warn("%pOF: no regs?\n", np);
5082 its_probe_one(&res, &np->fwnode, of_node_to_nid(np));
5089 #define ACPI_GICV3_ITS_MEM_SIZE (SZ_128K)
5091 #ifdef CONFIG_ACPI_NUMA
5092 struct its_srat_map {
5099 static struct its_srat_map *its_srat_maps __initdata;
5100 static int its_in_srat __initdata;
5102 static int __init acpi_get_its_numa_node(u32 its_id)
5106 for (i = 0; i < its_in_srat; i++) {
5107 if (its_id == its_srat_maps[i].its_id)
5108 return its_srat_maps[i].numa_node;
5110 return NUMA_NO_NODE;
5113 static int __init gic_acpi_match_srat_its(union acpi_subtable_headers *header,
5114 const unsigned long end)
5119 static int __init gic_acpi_parse_srat_its(union acpi_subtable_headers *header,
5120 const unsigned long end)
5123 struct acpi_srat_gic_its_affinity *its_affinity;
5125 its_affinity = (struct acpi_srat_gic_its_affinity *)header;
5129 if (its_affinity->header.length < sizeof(*its_affinity)) {
5130 pr_err("SRAT: Invalid header length %d in ITS affinity\n",
5131 its_affinity->header.length);
5135 node = acpi_map_pxm_to_node(its_affinity->proximity_domain);
5137 if (node == NUMA_NO_NODE || node >= MAX_NUMNODES) {
5138 pr_err("SRAT: Invalid NUMA node %d in ITS affinity\n", node);
5142 its_srat_maps[its_in_srat].numa_node = node;
5143 its_srat_maps[its_in_srat].its_id = its_affinity->its_id;
5145 pr_info("SRAT: PXM %d -> ITS %d -> Node %d\n",
5146 its_affinity->proximity_domain, its_affinity->its_id, node);
5151 static void __init acpi_table_parse_srat_its(void)
5155 count = acpi_table_parse_entries(ACPI_SIG_SRAT,
5156 sizeof(struct acpi_table_srat),
5157 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5158 gic_acpi_match_srat_its, 0);
5162 its_srat_maps = kmalloc_array(count, sizeof(struct its_srat_map),
5164 if (!its_srat_maps) {
5165 pr_warn("SRAT: Failed to allocate memory for its_srat_maps!\n");
5169 acpi_table_parse_entries(ACPI_SIG_SRAT,
5170 sizeof(struct acpi_table_srat),
5171 ACPI_SRAT_TYPE_GIC_ITS_AFFINITY,
5172 gic_acpi_parse_srat_its, 0);
5175 /* free the its_srat_maps after ITS probing */
5176 static void __init acpi_its_srat_maps_free(void)
5178 kfree(its_srat_maps);
5181 static void __init acpi_table_parse_srat_its(void) { }
5182 static int __init acpi_get_its_numa_node(u32 its_id) { return NUMA_NO_NODE; }
5183 static void __init acpi_its_srat_maps_free(void) { }
5186 static int __init gic_acpi_parse_madt_its(union acpi_subtable_headers *header,
5187 const unsigned long end)
5189 struct acpi_madt_generic_translator *its_entry;
5190 struct fwnode_handle *dom_handle;
5191 struct resource res;
5194 its_entry = (struct acpi_madt_generic_translator *)header;
5195 memset(&res, 0, sizeof(res));
5196 res.start = its_entry->base_address;
5197 res.end = its_entry->base_address + ACPI_GICV3_ITS_MEM_SIZE - 1;
5198 res.flags = IORESOURCE_MEM;
5200 dom_handle = irq_domain_alloc_fwnode(&res.start);
5202 pr_err("ITS@%pa: Unable to allocate GICv3 ITS domain token\n",
5207 err = iort_register_domain_token(its_entry->translation_id, res.start,
5210 pr_err("ITS@%pa: Unable to register GICv3 ITS domain token (ITS ID %d) to IORT\n",
5211 &res.start, its_entry->translation_id);
5215 err = its_probe_one(&res, dom_handle,
5216 acpi_get_its_numa_node(its_entry->translation_id));
5220 iort_deregister_domain_token(its_entry->translation_id);
5222 irq_domain_free_fwnode(dom_handle);
5226 static void __init its_acpi_probe(void)
5228 acpi_table_parse_srat_its();
5229 acpi_table_parse_madt(ACPI_MADT_TYPE_GENERIC_TRANSLATOR,
5230 gic_acpi_parse_madt_its, 0);
5231 acpi_its_srat_maps_free();
5234 static void __init its_acpi_probe(void) { }
5237 int __init its_init(struct fwnode_handle *handle, struct rdists *rdists,
5238 struct irq_domain *parent_domain)
5240 struct device_node *of_node;
5241 struct its_node *its;
5242 bool has_v4 = false;
5243 bool has_v4_1 = false;
5246 gic_rdists = rdists;
5248 its_parent = parent_domain;
5249 of_node = to_of_node(handle);
5251 its_of_probe(of_node);
5255 if (list_empty(&its_nodes)) {
5256 pr_warn("ITS: No ITS available, not enabling LPIs\n");
5260 err = allocate_lpi_tables();
5264 list_for_each_entry(its, &its_nodes, entry) {
5265 has_v4 |= is_v4(its);
5266 has_v4_1 |= is_v4_1(its);
5269 /* Don't bother with inconsistent systems */
5270 if (WARN_ON(!has_v4_1 && rdists->has_rvpeid))
5271 rdists->has_rvpeid = false;
5273 if (has_v4 & rdists->has_vlpis) {
5274 const struct irq_domain_ops *sgi_ops;
5277 sgi_ops = &its_sgi_domain_ops;
5281 if (its_init_vpe_domain() ||
5282 its_init_v4(parent_domain, &its_vpe_domain_ops, sgi_ops)) {
5283 rdists->has_vlpis = false;
5284 pr_err("ITS: Disabling GICv4 support\n");
5288 register_syscore_ops(&its_syscore_ops);